max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
tests/timetables/test_interval_timetable.py
|
holly-evans/airflow
| 8,092 |
135175
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
from typing import Optional
import dateutil.relativedelta
import freezegun
import pendulum
import pytest
from airflow.exceptions import AirflowTimetableInvalid
from airflow.settings import TIMEZONE
from airflow.timetables.base import DagRunInfo, DataInterval, TimeRestriction, Timetable
from airflow.timetables.interval import CronDataIntervalTimetable, DeltaDataIntervalTimetable
START_DATE = pendulum.DateTime(2021, 9, 4, tzinfo=TIMEZONE)
PREV_DATA_INTERVAL_START = START_DATE
PREV_DATA_INTERVAL_END = START_DATE + datetime.timedelta(days=1)
PREV_DATA_INTERVAL = DataInterval(start=PREV_DATA_INTERVAL_START, end=PREV_DATA_INTERVAL_END)
CURRENT_TIME = pendulum.DateTime(2021, 9, 7, tzinfo=TIMEZONE)
YESTERDAY = CURRENT_TIME - datetime.timedelta(days=1)
HOURLY_CRON_TIMETABLE = CronDataIntervalTimetable("@hourly", TIMEZONE)
HOURLY_TIMEDELTA_TIMETABLE = DeltaDataIntervalTimetable(datetime.timedelta(hours=1))
HOURLY_RELATIVEDELTA_TIMETABLE = DeltaDataIntervalTimetable(dateutil.relativedelta.relativedelta(hours=1))
CRON_TIMETABLE = CronDataIntervalTimetable("30 16 * * *", TIMEZONE)
DELTA_FROM_MIDNIGHT = datetime.timedelta(minutes=30, hours=16)
@pytest.mark.parametrize(
"last_automated_data_interval",
[pytest.param(None, id="first-run"), pytest.param(PREV_DATA_INTERVAL, id="subsequent")],
)
@freezegun.freeze_time(CURRENT_TIME)
def test_no_catchup_first_starts_at_current_time(
last_automated_data_interval: Optional[DataInterval],
) -> None:
"""If ``catchup=False`` and start_date is a day before"""
next_info = CRON_TIMETABLE.next_dagrun_info(
last_automated_data_interval=last_automated_data_interval,
restriction=TimeRestriction(earliest=YESTERDAY, latest=None, catchup=False),
)
expected_start = YESTERDAY + DELTA_FROM_MIDNIGHT
assert next_info == DagRunInfo.interval(start=expected_start, end=CURRENT_TIME + DELTA_FROM_MIDNIGHT)
@pytest.mark.parametrize(
"timetable",
[
pytest.param(HOURLY_CRON_TIMETABLE, id="cron"),
pytest.param(HOURLY_TIMEDELTA_TIMETABLE, id="timedelta"),
pytest.param(HOURLY_RELATIVEDELTA_TIMETABLE, id="relativedelta"),
],
)
@pytest.mark.parametrize(
"last_automated_data_interval",
[pytest.param(None, id="first-run"), pytest.param(PREV_DATA_INTERVAL, id="subsequent")],
)
@freezegun.freeze_time(CURRENT_TIME)
def test_no_catchup_next_info_starts_at_current_time(
timetable: Timetable,
last_automated_data_interval: Optional[DataInterval],
) -> None:
"""If ``catchup=False``, the next data interval ends at the current time."""
next_info = timetable.next_dagrun_info(
last_automated_data_interval=last_automated_data_interval,
restriction=TimeRestriction(earliest=START_DATE, latest=None, catchup=False),
)
expected_start = CURRENT_TIME - datetime.timedelta(hours=1)
assert next_info == DagRunInfo.interval(start=expected_start, end=CURRENT_TIME)
@pytest.mark.parametrize(
"timetable",
[
pytest.param(HOURLY_CRON_TIMETABLE, id="cron"),
pytest.param(HOURLY_TIMEDELTA_TIMETABLE, id="timedelta"),
pytest.param(HOURLY_RELATIVEDELTA_TIMETABLE, id="relativedelta"),
],
)
def test_catchup_next_info_starts_at_previous_interval_end(timetable: Timetable) -> None:
"""If ``catchup=True``, the next interval starts at the previous's end."""
next_info = timetable.next_dagrun_info(
last_automated_data_interval=PREV_DATA_INTERVAL,
restriction=TimeRestriction(earliest=START_DATE, latest=None, catchup=True),
)
expected_end = PREV_DATA_INTERVAL_END + datetime.timedelta(hours=1)
assert next_info == DagRunInfo.interval(start=PREV_DATA_INTERVAL_END, end=expected_end)
@pytest.mark.parametrize(
"timetable",
[
pytest.param(HOURLY_CRON_TIMETABLE, id="cron"),
pytest.param(HOURLY_TIMEDELTA_TIMETABLE, id="timedelta"),
pytest.param(HOURLY_RELATIVEDELTA_TIMETABLE, id="relativedelta"),
],
)
def test_validate_success(timetable: Timetable) -> None:
timetable.validate()
@pytest.mark.parametrize(
"timetable, error_message",
[
pytest.param(
CronDataIntervalTimetable("0 0 1 13 0", TIMEZONE),
"[0 0 1 13 0] is not acceptable, out of range",
id="invalid-cron",
),
pytest.param(
DeltaDataIntervalTimetable(datetime.timedelta()),
"schedule interval must be positive, not datetime.timedelta(0)",
id="zero-timedelta",
),
pytest.param(
DeltaDataIntervalTimetable(dateutil.relativedelta.relativedelta()),
"schedule interval must be positive, not relativedelta()",
id="zero-relativedelta",
),
pytest.param(
DeltaDataIntervalTimetable(datetime.timedelta(days=-1)),
# Dynamically formatted since different Python versions display timedelta differently.
f"schedule interval must be positive, not {datetime.timedelta(days=-1)!r}",
id="negative-timedelta",
),
pytest.param(
DeltaDataIntervalTimetable(dateutil.relativedelta.relativedelta(days=-1)),
"schedule interval must be positive, not relativedelta(days=-1)",
id="negative-relativedelta",
),
],
)
def test_validate_failure(timetable: Timetable, error_message: str) -> None:
with pytest.raises(AirflowTimetableInvalid) as ctx:
timetable.validate()
assert str(ctx.value) == error_message
def test_cron_interval_timezone_from_string():
timetable = CronDataIntervalTimetable("@hourly", "UTC")
assert timetable.serialize()['timezone'] == 'UTC'
|
rllib/examples/export/cartpole_dqn_export.py
|
77loopin/ray
| 21,382 |
135177
|
<gh_stars>1000+
#!/usr/bin/env python
import os
import ray
from ray.rllib.agents.registry import get_trainer_class
from ray.rllib.utils.framework import try_import_tf
tf1, tf, tfv = try_import_tf()
ray.init(num_cpus=10)
def train_and_export(algo_name, num_steps, model_dir, ckpt_dir, prefix):
cls = get_trainer_class(algo_name)
alg = cls(config={}, env="CartPole-v0")
for _ in range(num_steps):
alg.train()
# Export tensorflow checkpoint for fine-tuning
alg.export_policy_checkpoint(ckpt_dir, filename_prefix=prefix)
# Export tensorflow SavedModel for online serving
alg.export_policy_model(model_dir)
def restore_saved_model(export_dir):
signature_key = \
tf1.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
g = tf1.Graph()
with g.as_default():
with tf1.Session(graph=g) as sess:
meta_graph_def = \
tf1.saved_model.load(sess,
[tf1.saved_model.tag_constants.SERVING],
export_dir)
print("Model restored!")
print("Signature Def Information:")
print(meta_graph_def.signature_def[signature_key])
print("You can inspect the model using TensorFlow SavedModel CLI.")
print("https://www.tensorflow.org/guide/saved_model")
def restore_checkpoint(export_dir, prefix):
sess = tf1.Session()
meta_file = "%s.meta" % prefix
saver = tf1.train.import_meta_graph(os.path.join(export_dir, meta_file))
saver.restore(sess, os.path.join(export_dir, prefix))
print("Checkpoint restored!")
print("Variables Information:")
for v in tf1.trainable_variables():
value = sess.run(v)
print(v.name, value)
if __name__ == "__main__":
algo = "DQN"
model_dir = os.path.join(ray._private.utils.get_user_temp_dir(),
"model_export_dir")
ckpt_dir = os.path.join(ray._private.utils.get_user_temp_dir(),
"ckpt_export_dir")
prefix = "model.ckpt"
num_steps = 3
train_and_export(algo, num_steps, model_dir, ckpt_dir, prefix)
restore_saved_model(model_dir)
restore_checkpoint(ckpt_dir, prefix)
|
src/tests/ftest/util/write_host_file.py
|
fedepad/daos
| 429 |
135199
|
<gh_stars>100-1000
#!/usr/bin/python
"""
(C) Copyright 2018-2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
from logging import getLogger
import os
import random
from collections import Counter
def write_host_file(hostlist, path='/tmp', slots=1):
"""Write out a hostfile suitable for orterun.
Args:
hostlist (list): list of hosts to write to the hostfile
path (str, optional): where to write the hostfile. Defaults to '/tmp'.
slots (int, optional): slots per host to specify in the hostfile.
Defaults to 1.
Raises:
ValueError: if no hosts have been specified
Returns:
str: the full path of the written hostfile
"""
log = getLogger()
unique = random.randint(1, 100000) #nosec
if not os.path.exists(path):
os.makedirs(path)
hostfile = os.path.join(path, "".join(["hostfile", str(unique)]))
if hostlist is None:
raise ValueError("host list parameter must be provided.")
log.info(
"Writing hostfile:\n hosts: %s\n slots: %s\n file: %s",
hostlist, slots, hostfile)
with open(hostfile, "w") as hostfile_handle:
for host in hostlist:
if slots is None:
hostfile_handle.write("{0}\n".format(host))
else:
hostfile_handle.write("{0} slots={1}\n".format(host, slots))
return hostfile
|
tests/core/data/test_batch.py
|
Actis92/lightning-flash
| 1,457 |
135209
|
<reponame>Actis92/lightning-flash
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
import pytest
import torch
from flash.core.data.batch import default_uncollate
Case = namedtuple("Case", ["collated_batch", "uncollated_batch"])
cases = [
# Primitives
Case({"preds": [1, 2, 3]}, [{"preds": 1}, {"preds": 2}, {"preds": 3}]),
Case(
{"preds": [1, 2, 3], "metadata": [4, 5, 6]},
[{"preds": 1, "metadata": 4}, {"preds": 2, "metadata": 5}, {"preds": 3, "metadata": 6}],
),
Case(([1, 2, 3], [4, 5, 6]), [[1, 2, 3], [4, 5, 6]]),
Case([[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]),
Case([[1, 2], [4, 5, 6]], [[1, 2], [4, 5, 6]]),
Case([["a", "b"], ["a", "c", "d"]], [["a", "b"], ["a", "c", "d"]]),
# Tensors
Case({"preds": torch.tensor([1, 2, 3])}, [{"preds": 1}, {"preds": 2}, {"preds": 3}]),
Case(
{"preds": torch.tensor([1, 2, 3]), "metadata": torch.tensor([4, 5, 6])},
[{"preds": 1, "metadata": 4}, {"preds": 2, "metadata": 5}, {"preds": 3, "metadata": 6}],
),
Case(torch.tensor([1, 2, 3]), [torch.tensor(1), torch.tensor(2), torch.tensor(3)]),
# Mixed
Case(
{"preds": torch.tensor([1, 2, 3]), "metadata": [4, 5, 6]},
[{"preds": 1, "metadata": 4}, {"preds": 2, "metadata": 5}, {"preds": 3, "metadata": 6}],
),
]
@pytest.mark.parametrize("case", cases)
def test_default_uncollate(case):
assert default_uncollate(case.collated_batch) == case.uncollated_batch
ErrorCase = namedtuple("ErrorCase", ["collated_batch", "match"])
error_cases = [
ErrorCase({"preds": [1, 2, 3], "metadata": [4, 5, 6, 7]}, "expected to have the same length."),
ErrorCase({"preds": [1, 2, 3], "metadata": "test"}, "expected to be list-like."),
ErrorCase("test", "expected to be a `dict` or list-like"),
]
@pytest.mark.parametrize("error_case", error_cases)
def test_default_uncollate_raises(error_case):
with pytest.raises(ValueError, match=error_case.match):
default_uncollate(error_case.collated_batch)
|
examples/get-started/hello_world/example_test.py
|
iPlon-org/esp-idf
| 8,747 |
135210
|
<filename>examples/get-started/hello_world/example_test.py
#!/usr/bin/env python
from __future__ import division, print_function, unicode_literals
import ttfw_idf
@ttfw_idf.idf_example_test(env_tag='Example_GENERIC', target=['esp32', 'esp32s2', 'esp32c3'], ci_target=['esp32'])
def test_examples_hello_world(env, extra_data):
app_name = 'hello_world'
dut = env.get_dut(app_name, 'examples/get-started/hello_world')
dut.start_app()
res = dut.expect(ttfw_idf.MINIMUM_FREE_HEAP_SIZE_RE)
if not res:
raise ValueError('Maximum heap size info not found')
ttfw_idf.print_heap_size(app_name, dut.app.config_name, dut.TARGET, res[0])
if __name__ == '__main__':
test_examples_hello_world()
|
qa/L0_libtorch_io_names/io_names_client.py
|
nskool/server
| 358 |
135229
|
#!/usr/bin/python
# Copyright 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
sys.path.append("../common")
from builtins import range
from future.utils import iteritems
import unittest
import test_util as tu
import numpy as np
import tritonclient.http as httpclient
from tritonclient.utils import np_to_triton_dtype
from tritonclient.utils import InferenceServerException
class IONamingConvention(tu.TestResultCollector):
def _infer_helper(self, model_name, io_names, reversed_order=False):
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=False)
# Create the data for the two inputs. Initialize the first to unique
# integers and the second to all ones.
input0_data = np.arange(start=0, stop=16, dtype=np.float32)
input0_data = np.expand_dims(input0_data, axis=0)
input1_data = np.full(shape=(1, 16), fill_value=-1, dtype=np.float32)
inputs = []
output_req = []
inputs.append(
httpclient.InferInput(
io_names[0] if not reversed_order else io_names[1], [1, 16],
"FP32"))
inputs[-1].set_data_from_numpy(input0_data)
inputs.append(
httpclient.InferInput(
io_names[1] if not reversed_order else io_names[0], [1, 16],
"FP32"))
inputs[-1].set_data_from_numpy(input1_data)
output_req.append(
httpclient.InferRequestedOutput(io_names[2], binary_data=True))
output_req.append(
httpclient.InferRequestedOutput(io_names[3], binary_data=True))
results = triton_client.infer(model_name, inputs, outputs=output_req)
output0_data = results.as_numpy(
io_names[2] if not reversed_order else io_names[3])
output1_data = results.as_numpy(
io_names[3] if not reversed_order else io_names[2])
for i in range(16):
self.assertEqual(input0_data[0][i] - input1_data[0][i],
output0_data[0][i])
self.assertEqual(input0_data[0][i] + input1_data[0][i],
output1_data[0][i])
def test_io_index(self):
io_names = ["INPUT__0", "INPUT__1", "OUTPUT__0", "OUTPUT__1"]
self._infer_helper("libtorch_io_index", io_names)
def test_output_index(self):
io_names = ["INPUT0", "INPUT1", "OUTPUT__0", "OUTPUT__1"]
self._infer_helper("libtorch_output_index", io_names)
def test_no_output_index(self):
io_names = ["INPUT0", "INPUT1", "OUTPUT0", "OUTPUT1"]
self._infer_helper("libtorch_no_output_index", io_names)
def test_no_arguments_no_output_index(self):
io_names = ["INPUTA", "INPUTB", "OUTPUTA", "OUTPUTB"]
self._infer_helper("libtorch_no_arguments_output_index", io_names)
def test_mix_index(self):
io_names = ["INPUTA", "INPUT__1", "OUTPUTA", "OUTPUT__1"]
self._infer_helper("libtorch_mix_index", io_names)
def test_mix_arguments(self):
io_names = ["INPUT0", "INPUTB", "OUTPUTA", "OUTPUT__1"]
self._infer_helper("libtorch_mix_arguments", io_names)
def test_mix_arguments_index(self):
io_names = ["INPUT0", "INPUT__1", "OUTPUT0", "OUTPUT__1"]
self._infer_helper("libtorch_mix_arguments_index", io_names)
def test_unordered_index(self):
io_names = ["INPUT1", "INPUT0", "OUT__1", "OUT__0"]
self._infer_helper("libtorch_unordered_index",
io_names,
reversed_order=True)
if __name__ == '__main__':
unittest.main()
|
src/molecule/test/unit/verifier/test_ansible.py
|
Shaps/molecule
| 1,599 |
135275
|
import os
import pytest
from molecule import config
from molecule.verifier import ansible
@pytest.fixture
def _patched_ansible_verify(mocker):
m = mocker.patch("molecule.provisioner.ansible.Ansible.verify")
m.return_value = "patched-ansible-verify-stdout"
return m
@pytest.fixture
def _verifier_section_data():
return {"verifier": {"name": "ansible", "env": {"FOO": "bar"}}}
# NOTE(retr0h): The use of the `patched_config_validate` fixture, disables
# config.Config._validate from executing. Thus preventing odd side-effects
# throughout patched.assert_called unit tests.
@pytest.fixture
def _instance(_verifier_section_data, patched_config_validate, config_instance):
return ansible.Ansible(config_instance)
def test_config_private_member(_instance):
assert isinstance(_instance._config, config.Config)
def test_default_options_property(_instance):
assert {} == _instance.default_options
def test_default_env_property(_instance):
assert "MOLECULE_FILE" in _instance.default_env
assert "MOLECULE_INVENTORY_FILE" in _instance.default_env
assert "MOLECULE_SCENARIO_DIRECTORY" in _instance.default_env
assert "MOLECULE_INSTANCE_CONFIG" in _instance.default_env
@pytest.mark.parametrize("config_instance", ["_verifier_section_data"], indirect=True)
def test_env_property(_instance):
assert "bar" == _instance.env["FOO"]
def test_name_property(_instance):
assert "ansible" == _instance.name
def test_enabled_property(_instance):
assert _instance.enabled
def test_directory_property(_instance):
parts = _instance.directory.split(os.path.sep)
# Unused by Ansible verifier
assert ["molecule", "default", "tests"] == parts[-3:]
@pytest.mark.parametrize("config_instance", ["_verifier_section_data"], indirect=True)
def test_options_property(_instance):
x = {}
assert x == _instance.options
@pytest.mark.parametrize("config_instance", ["_verifier_section_data"], indirect=True)
def test_options_property_handles_cli_args(_instance):
_instance._config.args = {"debug": True}
x = {}
assert x == _instance.options
def test_execute(patched_logger_info, _patched_ansible_verify, _instance):
_instance.execute()
_patched_ansible_verify.assert_called_once_with(None)
msg = "Running Ansible Verifier"
patched_logger_info.assert_any_call(msg)
msg = "Verifier completed successfully."
patched_logger_info.assert_any_call(msg)
def test_execute_does_not_execute(
patched_ansible_converge, patched_logger_warning, _instance
):
_instance._config.config["verifier"]["enabled"] = False
_instance.execute()
assert not patched_ansible_converge.called
msg = "Skipping, verifier is disabled."
patched_logger_warning.assert_called_once_with(msg)
|
tgen/eval.py
|
AnneBeyer/tgen
| 222 |
135280
|
#!/usr/bin/env python
# coding=utf-8
"""
Evaluation (t-tree comparison functions).
"""
from __future__ import unicode_literals
from __future__ import division
from builtins import zip
from builtins import range
from builtins import object
from past.utils import old_div
from collections import defaultdict
from enum import Enum
from tgen.logf import log_debug, log_warn, log_info
from tgen.tree import TreeData, TreeNode
from tgen.futil import add_bundle_text
import numpy as np
try:
from pytreex.core.node import T
except ImportError:
log_warn('Pytreex modules not available, will not be able to evaluate trees.')
EvalTypes = Enum('EvalTypes', 'TOKEN NODE DEP')
EvalTypes.__doc__ = """Evaluation flavors (tokens, tree node-only, tree dependency)"""
def collect_counts(sent, eval_type=EvalTypes.NODE):
"""Collects counts of different node/dependency types in the given t-tree.
@param sent: the tree/sentence to collect counts from
@param eval_type: if set to EvalTypes.NODE (default), count nodes (formemes, lemmas, dependency \
direction), if set to EvalTypes.DEP, count dependencies (including parent's formeme, lemma, \
dependency direction), if set to EvalTypes.TOKEN, count just word forms (in list of tokens).
@rtype: defaultdict
"""
counts = defaultdict(int)
nodes = sent if isinstance(sent, list) else sent.get_descendants()
for node in nodes:
if eval_type == EvalTypes.TOKEN:
node_id = node[0] # for tokens, use form only (ignore tag)
elif eval_type == EvalTypes.NODE:
node_id = (node.formeme, node.t_lemma, node > node.parent)
else:
parent = node.parent
node_id = (node.formeme, node.t_lemma, node > node.parent,
parent.formeme, parent.t_lemma, (parent.parent is not None and parent > parent.parent))
counts[node_id] += 1
return counts
def corr_pred_gold(gold, pred, eval_type=EvalTypes.NODE):
"""Given a golden tree/sentence and a predicted tree/sentence, this counts correctly
predicted nodes/tokens (true positives), all predicted nodes/tokens (true + false
positives), and all golden nodes/tokens (true positives + false negatives).
@param gold: a golden t-tree/sentence
@param pred: a predicted t-tree/sentence
@param eval_type: type of matching (see EvalTypes)
@rtype: tuple
@return: numbers of correctly predicted, total predicted, and total golden nodes/tokens
"""
gold_counts = collect_counts(gold, eval_type)
pred_counts = collect_counts(pred, eval_type)
ccount, pcount = 0, 0
for node_id, node_count in pred_counts.items():
pcount += node_count
ccount += min(node_count, gold_counts[node_id])
gcount = sum(node_count for node_count in gold_counts.values())
return ccount, pcount, gcount
def precision(gold, pred, eval_type=EvalTypes.NODE):
ccount, pcount, _ = corr_pred_gold(gold, pred, eval_type)
return ccount / float(pcount)
def recall(gold, pred, eval_type=EvalTypes.NODE):
# # correct / # gold
ccount, _, gcount = corr_pred_gold(gold, pred, eval_type)
return ccount / float(gcount)
def f1(gold, pred, eval_type=EvalTypes.NODE):
return f1_from_counts(corr_pred_gold(gold, pred, eval_type))
def f1_from_counts(correct, predicted, gold):
return p_r_f1_from_counts(correct, predicted, gold)[2]
def p_r_f1_from_counts(correct, predicted, gold):
"""Return precision, recall, and F1 given counts of true positives (correct),
total predicted nodes, and total gold nodes.
@param correct: true positives (correctly predicted nodes/tokens)
@param predicted: true + false positives (all predicted nodes/tokens)
@param gold: true positives + false negatives (all golden nodes/tokens)
@rtype: tuple
@return: precision, recall, F1
"""
if correct == 0.0: # escape division by zero
return 0.0, 0.0, 0.0
precision = correct / float(predicted)
recall = correct / float(gold)
return precision, recall, old_div((2 * precision * recall), (precision + recall))
def to_treedata(t):
if isinstance(t, TreeNode):
return t.tree
elif isinstance(t, T):
return TreeData.from_ttree(t)
def common_subtree_size(a, b):
a = to_treedata(a)
b = to_treedata(b)
return a.common_subtree_size(b)
def max_common_subphrase_length(a, b):
"""Return the length of the longest common subphrase of a and b; where a and b are
lists of tokens (form+tag)."""
longest = 0
for sp_a in range(len(a)):
for sp_b in range(len(b)):
pos_a = sp_a
pos_b = sp_b
# disregard tags for comparison
while pos_a < len(a) and pos_b < len(b) and a[pos_a][0] == b[pos_b][0]:
pos_a += 1
pos_b += 1
if pos_a - sp_a > longest:
longest = pos_a - sp_a
return longest
class Stats(object):
"""A set of important statistic values, with simple access and printing."""
def __init__(self, data):
self.mean = np.mean(data)
self.median = np.median(data)
self.min = min(data)
self.max = max(data)
self.perc25 = np.percentile(data, 25)
self.perc75 = np.percentile(data, 75)
def __str__(self):
return "\t".join("%s: %9.3f" % (key.capitalize(), getattr(self, key))
for key in ['mean', 'median', 'min', 'max', 'perc25', 'perc75'])
class Evaluator(object):
"""A fancy object-oriented interface to computing node F-scores.
Accumulates scores over trees/sentences using append(), then can return
a total score using f1(), precision(), recall(), and p_r_f1()."""
def __init__(self):
self.reset()
def reset(self):
"""Zero out all current statistics, start from scratch."""
self.correct = {eval_type: 0 for eval_type in EvalTypes}
self.predicted = {eval_type: 0 for eval_type in EvalTypes}
self.gold = {eval_type: 0 for eval_type in EvalTypes}
self.sizes = []
self.scores = []
def process_eval_doc(self, eval_doc, gen_trees, language, ref_selector, target_selector):
"""Evaluate generated trees against a reference document; save per-tree statistics
in the reference document and print out global statistics.
Does not reset statistics at the beginning (must be reset manually if needed).
@param eval_doc: reference t-tree document
@param gen_trees: a list of generated TreeData objects
@param language: language for the reference document
@param ref_selector: selector for reference trees in the reference document
@param target_selector: selector for generated trees (used to save statistics)
"""
log_info('Evaluating...')
for eval_bundle, gen_tree, in zip(eval_doc.bundles, gen_trees):
# add some stats about the tree directly into the output file
eval_ttree = eval_bundle.get_zone(language, ref_selector).ttree
gen_ttree = TreeNode(gen_tree)
add_bundle_text(eval_bundle, language, target_selector + 'Xscore',
"P: %.4f R: %.4f F1: %.4f" %
p_r_f1_from_counts(*corr_pred_gold(eval_ttree, gen_ttree)))
# collect overall stats
# TODO maybe add cost somehow?
self.append(eval_ttree, gen_ttree)
# print out the overall stats
log_info("NODE precision: %.4f, Recall: %.4f, F1: %.4f" % self.p_r_f1())
log_info("DEP precision: %.4f, Recall: %.4f, F1: %.4f" % self.p_r_f1(EvalTypes.DEP))
log_info("Tree size stats:\n * GOLD %s\n * PRED %s\n * DIFF %s" % self.size_stats())
log_info("Score stats:\n * GOLD %s\n * PRED %s\n * DIFF %s" % self.score_stats())
log_info("Common subtree stats:\n -- SIZE: %s\n -- ΔGLD: %s\n -- ΔPRD: %s" %
self.common_substruct_stats())
def append(self, gold, pred, gold_score=0.0, pred_score=0.0):
"""Add a pair of golden and predicted tree/sentence to the current statistics.
@param gold: a T or TreeNode object representing the golden tree, or list of golden tokens
@param pred: a T or TreeNode object representing the predicted tree, or list of predicted \
tokens
"""
if isinstance(gold, list): # tokens
eval_types = [EvalTypes.TOKEN]
gold_len = len(gold)
pred_len = len(pred)
css = max_common_subphrase_length(gold, pred)
else: # trees
eval_types = [EvalTypes.NODE, EvalTypes.DEP]
gold_len = len(gold.get_descendants())
pred_len = len(pred.get_descendants())
css = common_subtree_size(gold, pred)
self.sizes.append((gold_len, pred_len, css))
for eval_type in eval_types:
ccount, pcount, gcount = corr_pred_gold(gold, pred, eval_type)
self.correct[eval_type] += ccount
self.predicted[eval_type] += pcount
self.gold[eval_type] += gcount
self.scores.append((gold_score, pred_score))
def merge(self, other):
"""Merge in statistics from another Evaluator object."""
for eval_type in EvalTypes:
self.correct[eval_type] += other.correct[eval_type]
self.predicted[eval_type] += other.predicted[eval_type]
self.gold[eval_type] += other.gold[eval_type]
self.sizes.extend(other.sizes)
self.scores.extend(other.scores)
def f1(self, eval_type=EvalTypes.NODE):
return self.p_r_f1(eval_type)[2]
def precision(self, eval_type=EvalTypes.NODE):
return self.p_r_f1(eval_type)[0]
def recall(self, eval_type=EvalTypes.NODE):
return self.p_r_f1(eval_type)[1]
def p_r_f1(self, eval_type=EvalTypes.NODE):
return p_r_f1_from_counts(self.correct[eval_type],
self.predicted[eval_type],
self.gold[eval_type])
def size_stats(self):
"""Return current tree/sentence size statistics.
@rtype: a 3-tuple of Stats objects
@return: statistics for golden trees/sentences, predicted trees/sentences, and differences
"""
return (Stats([inst[0] for inst in self.sizes]),
Stats([inst[1] for inst in self.sizes]),
Stats([inst[0] - inst[1] for inst in self.sizes]))
def common_substruct_stats(self):
"""Return common subtree/subphrase size statistics.
@rtype: a 3-tuple of Stats objects
@return: statistics for common subtree/subphrase size + sizes of what's missing to full \
gold/predicted tree/sentence
"""
return (Stats([inst[2] for inst in self.sizes]),
Stats([inst[0] - inst[2] for inst in self.sizes]),
Stats([inst[1] - inst[2] for inst in self.sizes]))
def score_stats(self):
"""Return tree/sentence score statistics.
@rtype: a 3-tuple of Stats objects
@return: statistics for golden trees/sentences, predicted trees/sentences, and differences
"""
return (Stats([inst[0] for inst in self.scores]),
Stats([inst[1] for inst in self.scores]),
Stats([inst[0] - inst[1] for inst in self.scores]))
def tree_accuracy(self):
"""Return tree-level accuracy (percentage of gold trees scored higher or equal to
the best predicted tree."""
return (sum(1 for gold_score, pred_score in self.scores if gold_score >= pred_score) /
float(len(self.scores)))
class ASearchListsAnalyzer(object):
"""Analysis of the final open and close lists of the A*search generator."""
def __init__(self):
self.reset()
def reset(self):
"""Zero all statistics."""
self.total = 0
self.gold_best = 0
self.gold_on_close = 0
self.gold_on_open = 0
def append(self, gold_tree, open_list, close_list):
"""Analyze the open and close lists of a generator for the presence of the gold-standard
tree and add the results to statistics."""
self.total += 1
best_tree = close_list.peek()[0]
if gold_tree == best_tree:
self.gold_best += 1
log_debug('GOLD TREE IS BEST')
if gold_tree in close_list:
self.gold_on_close += 1
log_debug('GOLD TREE IS ON CLOSE LIST')
if gold_tree in open_list:
self.gold_on_open += 1
log_debug('GOLD TREE IS ON OPEN LIST')
def merge(self, other):
"""Merge in another ASearchListsAnalyzer object."""
self.total += other.total
self.gold_best += other.gold_best
self.gold_on_close += other.gold_on_close
self.gold_on_open += other.gold_on_open
def stats(self):
"""Return statistics (as percentages): gold tree was best, gold tree was on
close list, gold tree was on open list.
@rtype: tuple
"""
if self.total == 0:
return (0.0, 0.0, 0.0)
tot = float(self.total)
return (old_div(self.gold_best, tot),
old_div(self.gold_on_close, tot),
old_div((self.gold_on_close + self.gold_on_open), tot))
class SlotErrAnalyzer(object):
"""Analyze slot error (as in Wen 2015 EMNLP paper), accumulator object."""
def __init__(self, delex_slots=set()):
self.reset()
self.delex_slots = delex_slots
def reset(self):
"""Zero all statistics."""
self.missing = 0
self.superfluous = 0
self.total = 0
def append(self, da, sent):
"""Include statistics from the given sentence (assuming tokens, not trees)."""
if sent and isinstance(sent[0], tuple):
sent = [form for form, pos in sent] # ignore POS
if isinstance(da, tuple):
da = da[1] # ignore contexts
if self.delex_slots:
da = da.get_delexicalized(self.delex_slots)
slots_in_da = set([dai.value for dai in da if dai.value and dai.value.startswith('X-')])
slots_in_sent = set([tok for tok in sent if tok.startswith('X-')])
self.total += len(slots_in_da)
self.missing += len(slots_in_da - slots_in_sent)
self.superfluous += len(slots_in_sent - slots_in_da)
def slot_error(self):
"""Return the currently accumulated slot error."""
if self.total == 0: # avoid zero division error
return 0
return (self.missing + self.superfluous) / float(self.total)
|
linear_dynamical_systems/process_mit_bih.py
|
deepneuralmachine/google-research
| 23,901 |
135293
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate timeseries in 2 clusters: NSR and SVT from mit-bih data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
from absl import app
from absl import flags
import numpy as np
import wfdb
FLAGS = flags.FLAGS
flags.DEFINE_string(
'input_dir', None,
'Local input directory containing the mit-bih file that can be copied from '
'/namespace/health-research/unencrypted/reference/user/milah/mit_bih/.')
flags.DEFINE_string('outfile_dir', None,
'Output filepath.')
def main(argv):
del argv
all_ = [100, 101, 102, 103, 104, 105, 106, 107, 108, 111, 112, 113, 114, 115,
116, 117, 118, 119, 121, 122, 123, 124, 200, 201, 202, 203, 205, 207,
208, 209, 210, 212, 213, 214, 215, 217, 219, 220, 221, 222, 223, 228,
230, 231, 232, 233, 234]
target_rhythms = ['AB', 'AFIB', 'AFL', 'B', 'BII', 'IVR', 'N', 'NOD', 'P',
'PREX', 'SBR', 'SVTA', 'T', 'VFL', 'VT']
rhythms = dict()
for a in all_:
ann_ref = wfdb.rdann(FLAGS.input_dir + str(a), 'atr')
for k, label in enumerate(ann_ref.aux_note):
label = str(label).strip('\x00').strip('(')
if label in target_rhythms:
sampfrom = max(0, ann_ref.sample[k] - 140)
sampto = ann_ref.sample[k] + 361
sig, _ = wfdb.rdsamp(FLAGS.input_dir + str(a), channels=[0, 1],
sampfrom=sampfrom, sampto=sampto)
for channel in [0, 1]:
key = str(a) + ':' + str(k) + ':' + str(channel) + ':' + str(
ann_ref.sample[k])
x = np.array(sig)
x = x[:, channel]
record = ','.join([key, str(channel), str(label)] + [
str(i) for i in x])
if label not in rhythms:
rhythms[label] = []
rhythms[label].append(record)
all_rhythms = sorted(rhythms.keys())
print(all_rhythms)
random.seed(1984)
with file(FLAGS.outfile + 'all.csv', 'w') as f_all:
for label in all_rhythms:
records = rhythms[label]
idxs = range(len(records)/2)
random.shuffle(idxs)
outfile = FLAGS.outfile + label + '.csv'
with file(outfile, 'w') as f:
for i in idxs:
f.write(records[2*i] + '\n')
f.write(records[2*i+1] + '\n')
f_all.write(records[2*i] + '\n')
f_all.write(records[2*i+1] + '\n')
if __name__ == '__main__':
app.run(main)
|
test/test_misc.py
|
nodamu/dl_coursera
| 111 |
135327
|
import unittest
class TestMisc(unittest.TestCase):
def test_pypi_api(self):
from dl_coursera.lib.misc import get_latest_app_version
ver = get_latest_app_version()
self.assertRegex(ver, r'\d+\.\d+\.\d+')
|
flows/statestore/tests/utils.py
|
sergioisidoro/django-flows
| 104 |
135329
|
<reponame>sergioisidoro/django-flows
from flows.statestore.tests.models import TestModel
def store_state_works(case, store):
test_model = TestModel.objects.create(fruit='apple', count=34)
task_id = '10293847565647382910abdcef1029384756'
state = {'a': 1,
'b': 'cake',
'model': test_model,
'pies': {'r': 2, 'theta': 20 }
}
store.put_state(task_id, state)
fetched_state = store.get_state(task_id)
case.assertTrue('a' in fetched_state)
case.assertEqual(1, fetched_state['a'])
case.assertTrue('b' in fetched_state)
case.assertEqual('cake', fetched_state['b'])
case.assertTrue('model' in fetched_state)
fetched_model = fetched_state['model']
case.assertEqual(test_model.id, fetched_model.id)
case.assertEqual(test_model.fruit, fetched_model.fruit)
case.assertEqual(test_model.count, fetched_model.count)
case.assertTrue('pies' in fetched_state)
case.assertEqual({'r': 2, 'theta': 20 }, fetched_state['pies'])
|
rpython/jit/backend/zarch/test/test_calling_convention.py
|
nanjekyejoannah/pypy
| 381 |
135344
|
<filename>rpython/jit/backend/zarch/test/test_calling_convention.py
from rpython.jit.backend.test.calling_convention_test import CallingConvTests
from rpython.jit.backend.zarch.codebuilder import InstrBuilder
from rpython.rtyper.lltypesystem import lltype, rffi
import rpython.jit.backend.zarch.registers as r
import rpython.jit.backend.zarch.conditions as c
class TestZARCHCallingConvention(CallingConvTests):
# ../../test/calling_convention_test.py
def make_function_returning_stack_pointer(self):
mc = InstrBuilder()
mc.LGR(r.r2, r.SP)
mc.BCR(c.ANY, r.r14)
return rffi.cast(lltype.Signed, mc.get_assembler_function())
def get_alignment_requirements(self):
return 2 # two byte alignment
|
vega/algorithms/nas/sgas/__init__.py
|
jie311/vega
| 724 |
135369
|
<filename>vega/algorithms/nas/sgas/__init__.py
from .sgas_trainer_callback import *
|
models/sgm_model/gen_labelmap.py
|
techstack-studios/AnimeInterp
| 245 |
135378
|
import os, sys
import argparse
import numpy as np
import cv2
from skimage import filters
from linefiller.thinning import thinning
from linefiller.trappedball_fill import trapped_ball_fill_multi, flood_fill_multi, mark_fill, build_fill_map, merge_fill, \
show_fill_map, my_merge_fill
def dline_of(x, low_thr=1, high_thr=20, bf_args=[30,40,30]):
xm = cv2.medianBlur(x, 5)
# xga = cv2.GaussianBlur(x,(5, 5),cv2.BORDER_DEFAULT)
xb = cv2.bilateralFilter(x, bf_args[0], bf_args[1], bf_args[2])
# xb = cv2.bilateralFilter(xb, 20, 60, 10 )
xg = cv2.cvtColor(xb, cv2.COLOR_RGB2GRAY)
xl = cv2.Laplacian(xb, ddepth = cv2.CV_32F, ksize=5)
xgg = xl
xgg = xgg.astype(np.float32) * (255. / (xgg.astype(np.float32).max() * 1.0))
xh = filters.apply_hysteresis_threshold(xgg, low_thr, high_thr)
xgg[xh == False] = 0
# xgg[xh == True] = 255
xgg1 = xgg.copy() * 20
xgg1 = np.max(xgg1, axis=2)
return np.clip(255 - xgg1, 0, 255)
def squeeze_label_map(label_map):
ret_label_map = label_map.copy()
labels, counts = np.unique(ret_label_map, return_counts=True)
label_orders = np.argsort(counts)
for ord_id, ord_val in enumerate(label_orders):
mask = (label_map == labels[ord_val])
ret_label_map[mask] = ord_id
return ret_label_map
def trapped_ball_processed(binary, in_image=None, do_merge=True):
fills = []
result = binary
fill = trapped_ball_fill_multi(result, 3, method='max')
fills += fill
result = mark_fill(result, fill)
print('result num 3: ', len(fills))
fill = trapped_ball_fill_multi(result, 2, method=None)
fills += fill
result = mark_fill(result, fill)
print('result num 2: ', len(fills))
fill = trapped_ball_fill_multi(result, 1, method=None)
fills += fill
result = mark_fill(result, fill)
print('result num 1: ', len(fills))
fill = flood_fill_multi(result)
fills += fill
print('flood_fill_multi num 1: ', len(fills))
fillmap = build_fill_map(result, fills)
# print('fillmap num: ', len(np.unique(fillmap)))
if do_merge:
if in_image is None:
fillmap = merge_fill(fillmap, max_iter=10)
else:
fillmap = my_merge_fill(in_image, fillmap)
fillmap = thinning(fillmap)
return fillmap
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('input_root')
parser.add_argument('output_root')
parser.add_argument('--start_idx', default=0,
help='')
parser.add_argument('--end_idx', default=None,
help='')
parser.add_argument('--height', default=960,
help='height of the generated flow, default: 960')
parser.add_argument('--width', default=540,
help='width of the generated flow, default: 540')
parser.add_argument('--use_gpu', action='store_true')
args = parser.parse_args()
######
folder_root = args.input_root
save_root = args.output_root
use_gpu = args.use_gpu
start_idx = int(args.start_idx)
end_idx = None if args.end_idx is None else int(args.end_idx)
# tar_size = (1280, 720)
tar_size = (args.height, args.width)
# tar_size = (640, 360)
######
print('use gpu: ', use_gpu)
sys.stdout.flush()
if not os.path.exists(save_root):
os.makedirs(save_root)
folderList = sorted(os.listdir(folder_root))
print('folderList length: ', len(folderList))
for f_idx, folder in enumerate(folderList[start_idx:end_idx]):
f_idx += start_idx
input_subfolder = os.path.join(folder_root, folder)
imgFileNames = sorted(os.listdir(input_subfolder))
print('-- [%d] %s'%(f_idx, folder))
print(imgFileNames)
saveFolder = os.path.join(save_root, folder)
labelMap1_savePath = os.path.join(saveFolder, 'labelmap_1.npy')
labelMap2_savePath = os.path.join(saveFolder, 'labelmap_3.npy')
# if os.path.exists(labelMap1_savePath) and os.path.exists(labelMap2_savePath):
# try:
# binMap1 = np.load(labelMap1_savePath)
# binMap3 = np.load(labelMap2_savePath)
# except IOError:
# print("labelmap file corrupted")
# else:
# print("already generated")
# continue
sys.stdout.flush()
img1 = cv2.imread(os.path.join(input_subfolder, imgFileNames[0]))
img3 = cv2.imread(os.path.join(input_subfolder, imgFileNames[-1]))
# segmentation
img1_rs = cv2.resize(img1, tar_size)
img3_rs = cv2.resize(img3, tar_size)
if 'Disney' in folder:
boundImg1 = dline_of(img1_rs, 1, 20, [30,40,30]).astype(np.uint8)
boundImg3 = dline_of(img3_rs, 1, 20, [30,40,30]).astype(np.uint8)
else:
boundImg1 = dline_of(img1_rs, 2, 20, [10,10,10]).astype(np.uint8)
boundImg3 = dline_of(img3_rs, 2, 20, [10,10,10]).astype(np.uint8)
ret, binMap1 = cv2.threshold(boundImg1, 220, 255, cv2.THRESH_BINARY)
ret, binMap3 = cv2.threshold(boundImg3, 220, 255, cv2.THRESH_BINARY)
print('- trapped_ball_processed()')
sys.stdout.flush()
fillMap1 = trapped_ball_processed(binMap1, img1_rs)
fillMap3 = trapped_ball_processed(binMap3, img3_rs)
labelMap1 = squeeze_label_map(fillMap1)
labelMap3 = squeeze_label_map(fillMap3)
# save flows
if not os.path.exists(saveFolder):
os.mkdir(saveFolder)
np.save(labelMap1_savePath, labelMap1)
np.save(labelMap2_savePath, labelMap3)
print('save to %s, %s'%(labelMap1_savePath, labelMap2_savePath))
sys.stdout.flush()
labelMap1_img = show_fill_map(labelMap1)
labelMap3_img = show_fill_map(labelMap3)
cv2.imwrite(os.path.join(saveFolder, 'labelmap_1.jpg'), labelMap1_img)
cv2.imwrite(os.path.join(saveFolder, 'labelmap_3.jpg'), labelMap3_img)
|
pyxtal/interface/vasp.py
|
ubikpt/PyXtal
| 127 |
135381
|
<reponame>ubikpt/PyXtal
from pyxtal import pyxtal
from ase import Atoms
from pyxtal.util import good_lattice
from ase.calculators.vasp import Vasp
import os, time
import numpy as np
"""
A script to perform multistages vasp calculation
"""
class VASP():
"""
This is a calculator to perform structure optimization in GULP
At the moment, only inorganic crystal is considered
Args:
struc: structure object generated by Pyxtal
ff: path of forcefield lib
opt: `conv`, `conp`, `single`
"""
def __init__(self, struc, path='tmp', cmd='mpirun -np 16 vasp_std'):
if isinstance(struc, pyxtal):
struc = struc.to_ase()
if not isinstance(struc, Atoms):
raise NotImplementedError("only support ASE atoms object")
self.structure = struc
self.folder = path
if not os.path.exists(self.folder):
os.makedirs(self.folder)
self.pstress = 0.0
self.energy = None
self.energy_per_atom = None
self.stress = None
self.forces = None
self.gap = None
self.cputime = 0
self.error = True
self.cmd = cmd
def set_vasp(self, level=0, pstress=0.0000, setup=None):
self.pstress = pstress
default0 = {'xc': 'pbe',
'npar': 8,
'kgamma': True,
'lcharg': False,
'lwave': False,
'ibrion': 2,
'pstress': pstress*10,
'setups': setup,
}
if level==0:
default1 = {'prec': 'low',
'algo': 'normal',
'kspacing': 0.4,
'isif': 4,
'ediff': 1e-2,
'nsw': 10,
'potim': 0.02,
}
elif level==1:
default1 = {'prec': 'normal',
'algo': 'normal',
'kspacing': 0.3,
'isif': 3,
'ediff': 1e-3,
'nsw': 25,
'potim': 0.05,
}
elif level==2:
default1 = {'prec': 'accurate',
'kspacing': 0.2,
'isif': 3,
'ediff': 1e-3,
'nsw': 50,
'potim': 0.1,
}
elif level==3:
default1 = {'prec': 'accurate',
'encut': 600,
'kspacing': 0.15,
'isif': 3,
'ediff': 1e-4,
'nsw': 50,
}
elif level==4:
default1 = {'prec': 'accurate',
'encut': 600,
'kspacing': 0.15,
'isif': 3,
'ediff': 1e-4,
'nsw': 0,
}
dict_vasp = dict(default0, **default1)
return Vasp(**dict_vasp)
def read_OUTCAR(self, path='OUTCAR'):
"""read time and ncores info from OUTCAR"""
time = 0
ncore = 0
for line in open(path, 'r'):
if line.rfind('running on ') > -1:
ncore = int(line.split()[2])
elif line.rfind('Elapsed time ') > -1:
time = float(line.split(':')[-1])
self.cputime = time
self.ncore = ncore
def read_OSZICAR(self, path='OSZICAR'):
"""read the enthalpy from OSZICAR"""
energy = 100000
for line in open(path, 'r'):
if line.rfind(' F= ') > -1:
energy = float(line.split()[2])
self.energy = energy # this is actually enthalpy
def read_bandgap(self, path='vasprun.xml'):
from pyxtal.interface.vasprun import vasprun
myrun = vasprun(path)
self.gap = myrun.values['gap']
def run(self, setup=None, pstress=0, level=0, clean=True, read_gap=False, walltime=None):
if walltime is not None:
os.environ["VASP_COMMAND"] = "timeout " + max_time + " " + self.cmd
else:
os.environ["VASP_COMMAND"] = self.cmd
cwd = os.getcwd()
setups = self.set_vasp(level, pstress, setup)
self.structure.set_calculator(setups)
os.chdir(self.folder)
try:
self.structure.get_potential_energy()
self.error = False
self.read_OSZICAR()
except RuntimeError:
# VASP is not full done
self.read_OSZICAR()
if self.energy < 10000:
self.error = False
except (IndexError, ValueError, UnboundLocalError):
print("Error in parsing vasp output or VASP calc is wrong")
os.system("cp OUTCAR Error-OUTCAR")
if not self.error:
try:
self.forces = self.structure.get_forces()
except:
self.forces = np.zeros([len(self.structure),3])
self.energy_per_atom = self.energy/len(self.structure)
self.read_OUTCAR()
if read_gap:
self.read_bandgap()
if clean:
self.clean()
os.chdir(cwd)
def clean(self):
os.remove("POSCAR")
os.remove("POTCAR")
os.remove("INCAR")
os.remove("OUTCAR")
if os.path.exists("OSZICAR"):
os.remove("OSZICAR")
def to_pymatgen(self):
from pymatgen.core.structure import Structure
return Structure(self.lattice.matrix, self.sites, self.frac_coords)
def to_pyxtal(self):
struc = pyxtal()
struc.from_seed(self.structure)
return struc
def single_optimize(struc, level, pstress, setup, path, clean,
cmd='mpirun -np 16 vasp_std', walltime="30m"):
"""
single optmization
Args:
struc: pyxtal structure
level: vasp calc level
pstress: external pressure
setup: vasp setup
path: calculation directory
Returns:
the structure, energy and time costs
"""
calc = VASP(struc, path, cmd=cmd)
calc.run(setup, pstress, level, clean=clean)
if calc.error:
return None, None, 0, True
else:
try:
struc = calc.to_pyxtal()
struc.optimize_lattice()
return struc, calc.energy_per_atom, calc.cputime, calc.error
except:
return None, None, 0, True
def single_point(struc, setup=None, path=None, clean=True):
"""
single optmization
Args:
struc: pyxtal structure
level: vasp calc level
pstress: external pressure
setup: vasp setup
path: calculation directory
Returns:
the energy and forces
"""
calc = VASP(struc, path)
calc.run(setup, level=4, clean=clean)
return calc.energy, calc.forces, calc.error
def optimize(struc, path, levels=[0,2,3], pstress=0, setup=None,
clean=True, cmd='mpirun -np 16 vasp_std', walltime="30m"):
"""
multi optimization
Args:
struc: pyxtal structure
path: calculation directory
levels: list of vasp calc levels
pstress: external pressure
setup: vasp setup
Returns:
list of structures, energies and time costs
"""
time_total = 0
for i, level in enumerate(levels):
struc, eng, time, error = single_optimize(struc, level, pstress, setup, path,
clean, cmd, walltime)
time_total += time
#print(eng, time, time_total, '++++++++++++++++++++++++++++++')
if error or not good_lattice(struc):
return None, None, 0, True
return struc, eng, time_total, error
if __name__ == "__main__":
while True:
struc = pyxtal()
struc.from_random(3, 19, ["C"], [4])
if struc.valid:
break
# set up the commands
os.system("source /share/intel/mkl/bin/mklvars.sh intel64")
cmd='mpirun -n 4 /share/apps/bin/vasp544-2019u2/vasp_std'
calc = VASP(struc, path='tmp', cmd=cmd)
calc.run()
print("Energy:", calc.energy)
print("Forces", calc.forces)
struc, eng, time, _ = optimize(struc, path='tmp', levels=[0,1,2], cmd=cmd, walltime='30s')
print(struc)
print("Energy:", eng)
print("Time:", time)
calc = VASP(struc, path='tmp', cmd=cmd)
calc.run(level=4, read_gap=True)
print("Energy:", calc.energy)
print("Gap:", calc.gap)
|
dotviewer/graphserver.py
|
nanjekyejoannah/pypy
| 333 |
135382
|
#! /usr/bin/env python
"""Graph server.
From the command-line it's easier to use sshgraphserver.py instead of this.
"""
from __future__ import print_function, absolute_import
import os, sys
PARENTDIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# make dotviewer importable
sys.path.insert(0, PARENTDIR)
from dotviewer import msgstruct
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
try:
import thread
except ImportError:
import _thread as thread
class Server(object):
def __init__(self, io):
self.io = io
self.display = None
def run(self, only_one_graph=False):
# wait for the CMSG_INIT message
msg = self.io.recvmsg()
if msg[0] != msgstruct.CMSG_INIT or msg[1] != msgstruct.MAGIC:
raise ValueError("bad MAGIC number")
# process messages until we have a pygame display
while self.display is None:
self.process_next_message()
# start a background thread to process further messages
if not only_one_graph:
thread.start_new_thread(self.process_all_messages, ())
# give control to pygame
self.display.run1()
def process_all_messages(self):
try:
while True:
self.process_next_message()
except EOFError:
from dotviewer.drawgraph import display_async_quit
display_async_quit()
def process_next_message(self):
msg = self.io.recvmsg()
fn = self.MESSAGES.get(msg[0])
if fn:
fn(self, *msg[1:])
else:
self.log("unknown message code %r" % (msg[0],))
def log(self, info):
print(info, file=sys.stderr)
def setlayout(self, layout):
if self.display is None:
# make the initial display
from dotviewer.graphdisplay import GraphDisplay
self.display = GraphDisplay(layout)
else:
# send an async command to the display running the main thread
from dotviewer.drawgraph import display_async_cmd
display_async_cmd(layout=layout)
def cmsg_start_graph(self, graph_id, scale, width, height, *rest):
from dotviewer.drawgraph import GraphLayout
self.newlayout = GraphLayout(float(scale), float(width), float(height))
def request_reload():
self.io.sendmsg(msgstruct.MSG_RELOAD, graph_id)
def request_followlink(word):
self.io.sendmsg(msgstruct.MSG_FOLLOW_LINK, graph_id, word)
self.newlayout.request_reload = request_reload
self.newlayout.request_followlink = request_followlink
def cmsg_add_node(self, *args):
self.newlayout.add_node(*args)
def cmsg_add_edge(self, *args):
self.newlayout.add_edge(*args)
def cmsg_add_link(self, word, *info):
if len(info) == 1:
info = info[0]
elif len(info) >= 4:
info = (info[0], info[1:4])
self.newlayout.links[word] = info
def cmsg_fixed_font(self, *rest):
self.newlayout.fixedfont = True
def cmsg_stop_graph(self, *rest):
self.setlayout(self.newlayout)
del self.newlayout
self.io.sendmsg(msgstruct.MSG_OK)
def cmsg_missing_link(self, *rest):
self.setlayout(None)
def cmsg_say(self, errmsg, *rest):
from drawgraph import display_async_cmd
display_async_cmd(say=errmsg)
MESSAGES = {
msgstruct.CMSG_START_GRAPH: cmsg_start_graph,
msgstruct.CMSG_ADD_NODE: cmsg_add_node,
msgstruct.CMSG_ADD_EDGE: cmsg_add_edge,
msgstruct.CMSG_ADD_LINK: cmsg_add_link,
msgstruct.CMSG_FIXED_FONT: cmsg_fixed_font,
msgstruct.CMSG_STOP_GRAPH: cmsg_stop_graph,
msgstruct.CMSG_MISSING_LINK:cmsg_missing_link,
msgstruct.CMSG_SAY: cmsg_say,
}
def listen_server(local_address, s1=None):
import socket, graphclient, thread
if isinstance(local_address, str):
if ':' in local_address:
interface, port = local_address.split(':')
else:
interface, port = '', local_address
local_address = interface, int(port)
if s1 is None:
s1 = socket.socket()
s1.bind(local_address)
s1.listen(5)
print('listening on %r...' % (s1.getsockname(),))
while True:
conn, addr = s1.accept()
print('accepted connection from %r' % (addr,))
sock_io = msgstruct.SocketIO(conn)
handler_io = graphclient.spawn_local_handler()
thread.start_new_thread(copy_all, (sock_io, handler_io))
thread.start_new_thread(copy_all, (handler_io, sock_io))
del sock_io, handler_io, conn
def copy_all(io1, io2):
try:
while True:
io2.sendall(io1.recv())
except EOFError:
io2.close_sending()
if __name__ == '__main__':
if len(sys.argv) != 2:
if len(sys.argv) == 1:
# start locally
import sshgraphserver
sshgraphserver.ssh_graph_server(['LOCAL'])
sys.exit(0)
print(__doc__, file=sys.stderr)
sys.exit(2)
if sys.argv[1] == '--stdio':
# a one-shot server running on stdin/stdout
io = msgstruct.FileIO(getattr(sys.stdin, 'buffer', sys.stdin),
getattr(sys.stdout, 'buffer', sys.stdout))
srv = Server(io)
try:
srv.run()
except Exception as e:
import traceback
f = StringIO()
traceback.print_exc(file=f)
# try to add some explanations
help = (" | if you want to debug on a remote machine, see\n"
" | instructions in dotviewer/sshgraphserver.py\n")
try:
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "hide"
import pygame
if isinstance(e, pygame.error):
print(help, file=f)
except Exception as e:
f.seek(0)
f.truncate()
print("%s: %s" % (e.__class__.__name__, e), file=f)
print(" | Pygame is not installed; either install it, or", file=f)
print(help, file=f)
io.sendmsg(msgstruct.MSG_ERROR, f.getvalue())
else:
listen_server(sys.argv[1])
|
desktop/core/ext-py/monotonic-1.5/monotonic.py
|
kokosing/hue
| 5,079 |
135422
|
<reponame>kokosing/hue
# -*- coding: utf-8 -*-
"""
monotonic
~~~~~~~~~
This module provides a ``monotonic()`` function which returns the
value (in fractional seconds) of a clock which never goes backwards.
On Python 3.3 or newer, ``monotonic`` will be an alias of
``time.monotonic`` from the standard library. On older versions,
it will fall back to an equivalent implementation:
+-------------+----------------------------------------+
| Linux, BSD | ``clock_gettime(3)`` |
+-------------+----------------------------------------+
| Windows | ``GetTickCount`` or ``GetTickCount64`` |
+-------------+----------------------------------------+
| OS X | ``mach_absolute_time`` |
+-------------+----------------------------------------+
If no suitable implementation exists for the current platform,
attempting to import this module (or to import from it) will
cause a ``RuntimeError`` exception to be raised.
Copyright 2014, 2015, 2016 <NAME> <<EMAIL>>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
__all__ = ('monotonic',)
try:
monotonic = time.monotonic
except AttributeError:
import ctypes
import ctypes.util
import os
import sys
import threading
try:
if sys.platform == 'darwin': # OS X, iOS
# See Technical Q&A QA1398 of the Mac Developer Library:
# <https://developer.apple.com/library/mac/qa/qa1398/>
libc = ctypes.CDLL('/usr/lib/libc.dylib', use_errno=True)
class mach_timebase_info_data_t(ctypes.Structure):
"""System timebase info. Defined in <mach/mach_time.h>."""
_fields_ = (('numer', ctypes.c_uint32),
('denom', ctypes.c_uint32))
mach_absolute_time = libc.mach_absolute_time
mach_absolute_time.restype = ctypes.c_uint64
timebase = mach_timebase_info_data_t()
libc.mach_timebase_info(ctypes.byref(timebase))
ticks_per_second = timebase.numer / timebase.denom * 1.0e9
def monotonic():
"""Monotonic clock, cannot go backward."""
return mach_absolute_time() / ticks_per_second
elif sys.platform.startswith('win32') or sys.platform.startswith('cygwin'):
if sys.platform.startswith('cygwin'):
# Note: cygwin implements clock_gettime (CLOCK_MONOTONIC = 4) since
# version 1.7.6. Using raw WinAPI for maximum version compatibility.
# Ugly hack using the wrong calling convention (in 32-bit mode)
# because ctypes has no windll under cygwin (and it also seems that
# the code letting you select stdcall in _ctypes doesn't exist under
# the preprocessor definitions relevant to cygwin).
# This is 'safe' because:
# 1. The ABI of GetTickCount and GetTickCount64 is identical for
# both calling conventions because they both have no parameters.
# 2. libffi masks the problem because after making the call it doesn't
# touch anything through esp and epilogue code restores a correct
# esp from ebp afterwards.
try:
kernel32 = ctypes.cdll.kernel32
except OSError: # 'No such file or directory'
kernel32 = ctypes.cdll.LoadLibrary('kernel32.dll')
else:
kernel32 = ctypes.windll.kernel32
GetTickCount64 = getattr(kernel32, 'GetTickCount64', None)
if GetTickCount64:
# Windows Vista / Windows Server 2008 or newer.
GetTickCount64.restype = ctypes.c_ulonglong
def monotonic():
"""Monotonic clock, cannot go backward."""
return GetTickCount64() / 1000.0
else:
# Before Windows Vista.
GetTickCount = kernel32.GetTickCount
GetTickCount.restype = ctypes.c_uint32
get_tick_count_lock = threading.Lock()
get_tick_count_last_sample = 0
get_tick_count_wraparounds = 0
def monotonic():
"""Monotonic clock, cannot go backward."""
global get_tick_count_last_sample
global get_tick_count_wraparounds
with get_tick_count_lock:
current_sample = GetTickCount()
if current_sample < get_tick_count_last_sample:
get_tick_count_wraparounds += 1
get_tick_count_last_sample = current_sample
final_milliseconds = get_tick_count_wraparounds << 32
final_milliseconds += get_tick_count_last_sample
return final_milliseconds / 1000.0
else:
try:
clock_gettime = ctypes.CDLL(ctypes.util.find_library('c'),
use_errno=True).clock_gettime
except Exception:
clock_gettime = ctypes.CDLL(ctypes.util.find_library('rt'),
use_errno=True).clock_gettime
class timespec(ctypes.Structure):
"""Time specification, as described in clock_gettime(3)."""
_fields_ = (('tv_sec', ctypes.c_long),
('tv_nsec', ctypes.c_long))
if sys.platform.startswith('linux'):
CLOCK_MONOTONIC = 1
elif sys.platform.startswith('freebsd'):
CLOCK_MONOTONIC = 4
elif sys.platform.startswith('sunos5'):
CLOCK_MONOTONIC = 4
elif 'bsd' in sys.platform:
CLOCK_MONOTONIC = 3
elif sys.platform.startswith('aix'):
CLOCK_MONOTONIC = ctypes.c_longlong(10)
def monotonic():
"""Monotonic clock, cannot go backward."""
ts = timespec()
if clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(ts)):
errno = ctypes.get_errno()
raise OSError(errno, os.strerror(errno))
return ts.tv_sec + ts.tv_nsec / 1.0e9
# Perform a sanity-check.
if monotonic() - monotonic() > 0:
raise ValueError('monotonic() is not monotonic!')
except Exception as e:
raise RuntimeError('no suitable implementation for this system: ' + repr(e))
|
src/platform/axis2/fingerprints/AX12.py
|
0x27/clusterd
| 539 |
135423
|
<filename>src/platform/axis2/fingerprints/AX12.py
from src.platform.axis2.interfaces import DefaultServer
class FPrint(DefaultServer):
def __init__(self):
super(FPrint, self).__init__()
self.version = '1.2'
|
dace/version.py
|
jnice-81/dace
| 144 |
135428
|
<gh_stars>100-1000
__version__ = '0.11.3'
|
custom_components/ovapi/sensor.py
|
klaasnicolaas/Smarthome-homeassistant-config
| 167 |
135435
|
<filename>custom_components/ovapi/sensor.py
from datetime import datetime, timedelta
import logging
import operator
import json
import itertools
import http.client
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (CONF_NAME, STATE_UNKNOWN)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
import homeassistant.helpers.config_validation as cv
__version__ = '1.4.2'
_LOGGER = logging.getLogger(__name__)
_RESOURCE = 'v0.ovapi.nl'
CONF_STOP_CODE = 'stop_code'
CONF_TIMING_POINT_CODE = 'timing_point_code'
CONF_ROUTE_CODE = 'route_code'
CONF_SHOW_FUTURE_DEPARTURES = 'show_future_departures'
CONF_LINE_FILTER = 'line_filter'
CONF_DATE_FORMAT = 'date_format'
CONF_CREDITS = 'Data provided by v0.ovapi.nl'
DEFAULT_NAME = 'Line info'
DEFAULT_DATE_FORMAT = "%y-%m-%dT%H:%M:%S"
DEFAULT_SHOW_FUTURE_DEPARTURES = 0
ATTR_NAME = 'name'
ATTR_STOP_CODE = 'stop_code'
ATTR_ROUTE_CODE = 'route_code'
ATTR_TIMING_POINT_CODE = 'timing_point_code'
ATTR_LINE_FILTER = 'line_filter'
ATTR_ICON = 'icon'
ATTR_DESTINATION = 'destination'
ATTR_PROVIDER = 'provider'
ATTR_TRANSPORT_TYPE = 'transport_type'
ATTR_LINE_NAME = 'line_name'
ATTR_STOP_NAME = 'stop_name'
ATTR_DEPARTURE = 'departure'
ATTR_DELAY = 'delay'
ATTR_DEPARTURES = 'departures'
ATTR_UPDATE_CYCLE = 'update_cycle'
ATTR_CREDITS = 'credits'
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_STOP_CODE, default=CONF_STOP_CODE): cv.string,
vol.Optional(CONF_TIMING_POINT_CODE, default=CONF_TIMING_POINT_CODE): cv.string,
vol.Optional(CONF_ROUTE_CODE, default=CONF_ROUTE_CODE): cv.string,
vol.Optional(CONF_LINE_FILTER, default=CONF_LINE_FILTER): cv.string,
vol.Optional(CONF_SHOW_FUTURE_DEPARTURES, default=DEFAULT_SHOW_FUTURE_DEPARTURES): cv.positive_int,
vol.Optional(CONF_DATE_FORMAT, default=DEFAULT_DATE_FORMAT): cv.string,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
name = config.get(CONF_NAME)
stop_code = config.get(CONF_STOP_CODE)
timing_point_code = config.get(CONF_TIMING_POINT_CODE)
route_code = config.get(CONF_ROUTE_CODE)
future_departures = config.get(CONF_SHOW_FUTURE_DEPARTURES)
line_filter = config.get(CONF_LINE_FILTER)
ov_api = OvApiData(stop_code, timing_point_code)
ov_api.update()
if ov_api is None:
raise PlatformNotReady
sensors = []
for counter in range(future_departures + 1):
if counter == 0:
sensors.append(OvApiSensor(ov_api, name, stop_code, timing_point_code, route_code, line_filter, counter))
else:
sensors.append(OvApiSensor(ov_api, (name + "_future_" + str(counter)), stop_code, timing_point_code,
route_code, line_filter, counter))
add_entities(sensors, True)
class OvApiSensor(Entity):
def __init__(self, ov_api, name, stop_code, timing_point_code, route_code, line_filter, counter):
self._json_data = ov_api
self._name = name
self._stop_code = stop_code
self._timing_point_code = timing_point_code
self._route_code = route_code
self._line_filter = line_filter
self._sensor_number = counter
self._icon = None
self._destination = None
self._provider = None
self._transport_type = None
self._line_name = None
self._stop_name = None
self._departure = None
self._delay = None
self._departures = None
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
return self._icon
@property
def destination(self):
return self._destination
@property
def provider(self):
return self._provider
@property
def transport_type(self):
return self._transport_type
@property
def line_name(self):
return self._line_name
@property
def stop_name(self):
return self._stop_name
@property
def departure(self):
return self._departure
@property
def delay(self):
return self._delay
@property
def departures(self):
return self._departures
@property
def state(self):
return self._state
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self._line_filter == ATTR_LINE_FILTER:
filter = None
else:
filter = self._line_filter
if self._sensor_number == 0:
return{
ATTR_NAME: self._name,
ATTR_STOP_CODE: self._stop_code,
ATTR_TIMING_POINT_CODE: self._timing_point_code,
ATTR_ROUTE_CODE: self._route_code,
ATTR_LINE_FILTER: filter,
ATTR_ICON: self._icon,
ATTR_DESTINATION: self._destination,
ATTR_PROVIDER: self._provider,
ATTR_TRANSPORT_TYPE: self._transport_type,
ATTR_LINE_NAME: self._line_name,
ATTR_STOP_NAME: self._stop_name,
ATTR_DEPARTURE: self._departure,
ATTR_DELAY: self._delay,
ATTR_DEPARTURES: self._departures,
ATTR_UPDATE_CYCLE: str(MIN_TIME_BETWEEN_UPDATES.seconds) + ' seconds',
ATTR_CREDITS: CONF_CREDITS
}
else:
return {
ATTR_NAME: self._name,
ATTR_STOP_CODE: self._stop_code,
ATTR_TIMING_POINT_CODE: self._timing_point_code,
ATTR_ROUTE_CODE: self._route_code,
ATTR_LINE_FILTER: filter,
ATTR_ICON: self._icon,
ATTR_DESTINATION: self._destination,
ATTR_PROVIDER: self._provider,
ATTR_TRANSPORT_TYPE: self._transport_type,
ATTR_LINE_NAME: self._line_name,
ATTR_STOP_NAME: self._stop_name,
ATTR_DEPARTURE: self._departure,
ATTR_DELAY: self._delay,
ATTR_UPDATE_CYCLE: str(MIN_TIME_BETWEEN_UPDATES.seconds) + ' seconds',
ATTR_CREDITS: CONF_CREDITS
}
def update(self):
"""Get the latest data from the OvApi."""
self._json_data.update()
data = json.loads(self._json_data.result)
stops = {}
if self._stop_code != CONF_STOP_CODE and self._stop_code is not None:
self._timing_point_code = None
stops = itertools.islice(data[self._stop_code][self._route_code]['Passes'].values(), 50)
elif self._timing_point_code != CONF_TIMING_POINT_CODE and self._timing_point_code is not None:
self._stop_code = None
stops = itertools.islice(data[self._timing_point_code]['Passes'].values(), 50)
else:
_LOGGER.error("Impossible to get data from OvApi, no stop code and no timing point code!")
stops_list = []
for stop in stops:
if self._line_filter == ATTR_LINE_FILTER or stop['LinePublicNumber'] in self._line_filter.split(", "):
target_departure_time = datetime.strptime(stop['TargetDepartureTime'], "%Y-%m-%dT%H:%M:%S")
expected_arrival_time = datetime.strptime(stop['ExpectedDepartureTime'], "%Y-%m-%dT%H:%M:%S")
calculate_delay = expected_arrival_time - target_departure_time
delay = round(calculate_delay.seconds / 60)
stops_item = {
"destination": stop['DestinationName50'],
"provider": stop['DataOwnerCode'],
"transport_type": stop['TransportType'].title(),
"line_name": stop['TransportType'].title() + ' ' + stop['LinePublicNumber'] + ' - ' +
stop['DestinationName50'],
"stop_name": stop['TimingPointName'],
"TargetDepartureTime": target_departure_time.time(),
"ExpectedArrivalTime": expected_arrival_time.time(),
"Delay": delay
}
stops_list.append(stops_item)
if data is None:
self._departure = STATE_UNKNOWN
self._delay = STATE_UNKNOWN
self._departures = STATE_UNKNOWN
self._state = STATE_UNKNOWN
else:
if self._sensor_number >= len(stops_list):
self._departure = STATE_UNKNOWN
self._delay = STATE_UNKNOWN
self._departures = STATE_UNKNOWN
self._state = STATE_UNKNOWN
else:
stops_list.sort(key=operator.itemgetter('TargetDepartureTime'))
self._destination = stops_list[self._sensor_number]["destination"]
self._provider = stops_list[self._sensor_number]["provider"]
self._transport_type = stops_list[self._sensor_number]["transport_type"]
self._line_name = stops_list[self._sensor_number]["line_name"]
self._stop_name = stops_list[self._sensor_number]["stop_name"]
self._departure = stops_list[self._sensor_number]["TargetDepartureTime"].strftime('%H:%M')
self._delay = str(stops_list[self._sensor_number]["Delay"])
if self._sensor_number == 0:
departure_list = []
next_stops_list = stops_list[1:]
for counter, stop in enumerate(next_stops_list):
if counter <= 11:
if next_stops_list[counter]["Delay"] == 0:
departure_list.append(next_stops_list[counter]["TargetDepartureTime"].strftime('%H:%M'))
else:
departure_list.append(next_stops_list[counter]["TargetDepartureTime"].strftime('%H:%M') +
" + " + str(next_stops_list[counter]["Delay"]) + "m")
self._departures = departure_list
if stops_list[self._sensor_number]["Delay"] == 0:
self._state = self._departure
else:
self._state = stops_list[self._sensor_number]["ExpectedArrivalTime"].strftime('%H:%M')
else:
if stops_list[self._sensor_number]["Delay"] == 0:
self._state = self._departure
else:
self._state = self._departure + ' +' + str(self._delay) + 'm'
if self._transport_type == "Tram":
self._icon = 'mdi:train'
if self._transport_type == "Bus":
self._icon = 'mdi:bus'
if self._transport_type == "Metro":
self._icon = 'mdi:subway-variant'
class OvApiData:
def __init__(self, stop_code, timing_point_code):
self._resource = _RESOURCE
self._stop_code = stop_code
self._timing_point_code = timing_point_code
self.result = ""
self._headers = {
'cache-control': "no-cache",
'accept': "application/json"
}
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
if self._stop_code == CONF_STOP_CODE and self._timing_point_code == CONF_TIMING_POINT_CODE:
_LOGGER.error("Impossible to get data from OvApi, no stop code and no timing point code.")
self.result = "Impossible to get data from OvApi, no stop code and no timing point code."
elif self._stop_code != CONF_STOP_CODE:
try:
response = http.client.HTTPConnection(self._resource, timeout=1)
response.request("GET", "/stopareacode/" + self._stop_code, headers = self._headers)
result = response.getresponse()
self.result = result.read().decode('utf-8')
except http.client.HTTPException:
_LOGGER.error("Impossible to get data from OvApi using stop code.")
self.result = "Impossible to get data from OvApi using stop code."
else:
try:
response = http.client.HTTPConnection(self._resource, timeout=1)
response.request("GET", "/tpc/" + self._timing_point_code, headers = self._headers)
result = response.getresponse()
self.result = result.read().decode('utf-8')
except http.client.HTTPException:
_LOGGER.error("Impossible to get data from OvApi using timing point code.")
self.result = "Impossible to get data from OvApi using timing point code."
|
setup.py
|
cclauss/removestar
| 102 |
135455
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
import versioneer
setuptools.setup(
name="removestar",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
author="<NAME>",
author_email="<EMAIL>",
description="A tool to automatically replace 'import *' imports with explicit imports in files",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://www.asmeurer.com/removestar/",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points={'console_scripts': [ 'removestar = removestar.__main__:main']},
python_requires= '>=3.6',
install_requires=[
'pyflakes'
],
license='MIT',
)
|
venv/Lib/site-packages/nipype/algorithms/tests/test_ErrorMap.py
|
richung99/digitizePlots
| 585 |
135459
|
<filename>venv/Lib/site-packages/nipype/algorithms/tests/test_ErrorMap.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
from nipype.testing import example_data
from nipype.algorithms.metrics import ErrorMap
import nibabel as nb
import numpy as np
import os
def test_errormap(tmpdir):
# Single-Spectual
# Make two fake 2*2*2 voxel volumes
# <NAME>'s birthday
volume1 = np.array([[[2.0, 8.0], [1.0, 2.0]], [[1.0, 9.0], [0.0, 3.0]]])
# <NAME>'s birthday
volume2 = np.array([[[0.0, 7.0], [2.0, 3.0]], [[1.0, 9.0], [1.0, 2.0]]])
mask = np.array([[[1, 0], [0, 1]], [[1, 0], [0, 1]]])
img1 = nb.Nifti1Image(volume1, np.eye(4))
img2 = nb.Nifti1Image(volume2, np.eye(4))
maskimg = nb.Nifti1Image(mask, np.eye(4))
nb.save(img1, tmpdir.join("von.nii.gz").strpath)
nb.save(img2, tmpdir.join("alan.nii.gz").strpath)
nb.save(maskimg, tmpdir.join("mask.nii.gz").strpath)
# Default metric
errmap = ErrorMap()
errmap.inputs.in_tst = tmpdir.join("von.nii.gz").strpath
errmap.inputs.in_ref = tmpdir.join("alan.nii.gz").strpath
errmap.out_map = tmpdir.join("out_map.nii.gz").strpath
result = errmap.run()
assert result.outputs.distance == 1.125
# Square metric
errmap.inputs.metric = "sqeuclidean"
result = errmap.run()
assert result.outputs.distance == 1.125
# Linear metric
errmap.inputs.metric = "euclidean"
result = errmap.run()
assert result.outputs.distance == 0.875
# Masked
errmap.inputs.mask = tmpdir.join("mask.nii.gz").strpath
result = errmap.run()
assert result.outputs.distance == 1.0
# Multi-Spectual
# <NAME>'s birthday
volume3 = np.array([[[1.0, 6.0], [0.0, 3.0]], [[1.0, 9.0], [3.0, 6.0]]])
msvolume1 = np.zeros(shape=(2, 2, 2, 2))
msvolume1[:, :, :, 0] = volume1
msvolume1[:, :, :, 1] = volume3
msimg1 = nb.Nifti1Image(msvolume1, np.eye(4))
msvolume2 = np.zeros(shape=(2, 2, 2, 2))
msvolume2[:, :, :, 0] = volume3
msvolume2[:, :, :, 1] = volume1
msimg2 = nb.Nifti1Image(msvolume2, np.eye(4))
nb.save(msimg1, tmpdir.join("von-ray.nii.gz").strpath)
nb.save(msimg2, tmpdir.join("alan-ray.nii.gz").strpath)
errmap.inputs.in_tst = tmpdir.join("von-ray.nii.gz").strpath
errmap.inputs.in_ref = tmpdir.join("alan-ray.nii.gz").strpath
errmap.inputs.metric = "sqeuclidean"
result = errmap.run()
assert result.outputs.distance == 5.5
errmap.inputs.metric = "euclidean"
result = errmap.run()
assert result.outputs.distance == np.float32(1.25 * (2 ** 0.5))
|
tests/test_codecs.py
|
jaegertracing/jaeger-client-python
| 372 |
135460
|
# Copyright (c) 2016 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from collections import namedtuple
from itertools import product
import mock
import pytest
from jaeger_client import Span, SpanContext, Tracer, ConstSampler
from jaeger_client.codecs import (
Codec, TextCodec, BinaryCodec, ZipkinCodec, ZipkinSpanFormat, B3Codec,
span_context_from_string,
span_context_to_string,
)
from jaeger_client.config import Config
from jaeger_client.reporter import InMemoryReporter
from jaeger_client import constants
from opentracing import Format
from opentracing.propagation import (
InvalidCarrierException,
SpanContextCorruptedException,
)
class TestCodecs(unittest.TestCase):
def test_abstract_codec(self):
codec = Codec()
with self.assertRaises(NotImplementedError):
codec.inject({}, {})
with self.assertRaises(NotImplementedError):
codec.extract({})
def test_wrong_carrier(self):
codec = TextCodec()
with self.assertRaises(InvalidCarrierException):
codec.inject(span_context={}, carrier=[]) # array is no good
with self.assertRaises(InvalidCarrierException):
codec.extract(carrier=[])
def test_trace_context_from_bad_string(self):
tests = [
(123.321, 'not a string'),
('bad value', 'bad string'),
('1:1:1:1:1', 'Too many colons'),
('1:1:1', 'Too few colons'),
('x:1:1:1', 'Not all numbers'),
('1:x:1:1', 'Not all numbers'),
('1:1:x:1', 'Not all numbers'),
('1:1:1:x', 'Not all numbers'),
('0:1:1:1', 'Trace ID cannot be zero'),
('1:0:1:1', 'Span ID cannot be zero'),
('1:1:-1:1', 'Parent ID cannot be negative'),
('fdf8:f53e:61e4::18', 'Parent ID is missing'),
('1:1:1:-1', 'Flags cannot be negative'),
]
for test in tests:
try:
val = span_context_from_string(test[0])
except SpanContextCorruptedException:
val = None
self.assertEqual(val, None, test[1])
def test_trace_context_from_to_string(self):
to_string = span_context_to_string
from_string = span_context_from_string
tests = [
[(256, 127, None, 1), '100:7f:0:1'],
[(256, 127, 256, 0), '100:7f:100:0'],
[(0xffffffffffffffffffffffffffffffff, 127, 256, 0),
'ffffffffffffffffffffffffffffffff:7f:100:0'],
]
for test in tests:
ctx = test[0]
value = to_string(*ctx)
self.assertEqual(value, test[1])
ctx_rev = from_string(value)
self.assertEqual(ctx_rev, ctx)
ctx_rev = from_string(['100:7f:100:0'])
assert ctx_rev == (256, 127, 256, 0), 'Array is acceptable'
with self.assertRaises(SpanContextCorruptedException):
from_string(['100:7f:100:0', 'garbage'])
ctx_rev = from_string(u'100:7f:100:0')
assert ctx_rev == (256, 127, 256, 0), 'Unicode is acceptable'
def test_context_to_readable_headers(self):
for url_encoding in [False, True]:
codec = TextCodec(
url_encoding=url_encoding,
trace_id_header='Trace_ID',
baggage_header_prefix='Trace-Attr-')
ctx = SpanContext(
trace_id=256, span_id=127, parent_id=None, flags=1
)
carrier = {}
codec.inject(ctx, carrier)
assert carrier == {'trace-id': '100:7f:0:1'}
ctx._baggage = {
'bender': 'Countess de la Roca',
'fry': u'Leela',
b'key1': bytes([75]),
u'key2': 'cafe',
u'key3': u'\U0001F47E',
}
carrier = {}
codec.inject(ctx, carrier)
# NB: the reverse transformation is not exact, e.g. this fails:
# assert ctx._baggage == codec.extract(carrier)._baggage
# But fully supporting lossless Unicode baggage is not the goal.
if url_encoding:
assert carrier == {
'trace-id': '100:7f:0:1',
'trace-attr-bender': 'Countess%20de%20la%20Roca',
'trace-attr-fry': 'Leela',
'trace-attr-key1': 'K',
'trace-attr-key2': 'cafe',
'trace-attr-key3': '%F0%9F%91%BE',
}, 'with url_encoding = %s' % url_encoding
for key, val in carrier.items():
assert isinstance(key, str)
assert isinstance(val, str), '%s' % type(val)
else:
assert carrier == {
'trace-id': '100:7f:0:1',
'trace-attr-bender': 'Countess de la Roca',
'trace-attr-fry': 'Leela',
'trace-attr-key1': 'K',
u'trace-attr-key2': 'cafe',
'trace-attr-key3': u'\U0001F47E',
}, 'with url_encoding = %s' % url_encoding
def test_context_from_bad_readable_headers(self):
codec = TextCodec(trace_id_header='Trace_ID',
baggage_header_prefix='Trace-Attr-')
ctx = codec.extract(dict())
assert ctx is None, 'No headers'
bad_headers = {
'_Trace_ID': '100:7f:0:1',
'_trace-attr-Kiff': 'Amy'
}
ctx = codec.extract(bad_headers)
assert ctx is None, 'Bad header names'
with self.assertRaises(InvalidCarrierException):
codec.extract(carrier=[]) # not a dict
good_headers_bad_values = {
'Trace-ID': '100:7f:0:1xxx',
'trace-attr-Kiff': 'Amy'
}
with self.assertRaises(SpanContextCorruptedException):
codec.extract(good_headers_bad_values)
def test_context_from_readable_headers(self):
# provide headers all the way through Config object
config = Config(
service_name='test',
config={
'trace_id_header': 'Trace_ID',
'baggage_header_prefix': 'Trace-Attr-',
})
tracer = config.create_tracer(
reporter=InMemoryReporter(),
sampler=ConstSampler(True),
)
for url_encoding in [False, True]:
if url_encoding:
codec = tracer.codecs[Format.HTTP_HEADERS]
headers = {
'Trace-ID': '100%3A7f:0:1',
'trace-attr-Kiff': 'Amy%20Wang',
'trace-atTR-HERMES': 'LaBarbara%20Hermes'
}
else:
codec = tracer.codecs[Format.HTTP_HEADERS]
headers = {
'Trace-ID': '100:7f:0:1',
'trace-attr-Kiff': '<NAME>',
'trace-atTR-HERMES': 'LaBarbara Hermes'
}
ctx = codec.extract(headers)
assert ctx.trace_id == 256
assert ctx.span_id == 127
assert ctx.parent_id is None
assert ctx.flags == 1
assert ctx.baggage == {
'kiff': '<NAME>',
'hermes': '<NAME>',
}
def test_context_from_large_ids(self):
codec = TextCodec(trace_id_header='Trace_ID',
baggage_header_prefix='Trace-Attr-')
headers = {
'Trace-ID': 'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF:FFFFFFFFFFFFFFFF:FFFFFFFFFFFFFFFF:1',
}
context = codec.extract(headers)
assert context.trace_id == 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
assert context.trace_id == (1 << 128) - 1
assert context.trace_id > 0
assert context.span_id == 0xFFFFFFFFFFFFFFFF
assert context.span_id == (1 << 64) - 1
assert context.span_id > 0
assert context.parent_id == 0xFFFFFFFFFFFFFFFF
assert context.parent_id == (1 << 64) - 1
assert context.parent_id > 0
def test_zipkin_codec_extract(self):
codec = ZipkinCodec()
t = namedtuple('Tracing', 'span_id parent_id trace_id traceflags')
carrier = t(span_id=1, parent_id=2, trace_id=3, traceflags=1)
context = codec.extract(carrier)
assert 3 == context.trace_id
assert 2 == context.parent_id
assert 1 == context.span_id
assert 1 == context.flags
assert context.baggage == {}
t = namedtuple('Tracing', 'something')
carrier = t(something=1)
with self.assertRaises(InvalidCarrierException):
codec.extract(carrier)
t = namedtuple('Tracing', 'trace_id')
carrier = t(trace_id=1)
with self.assertRaises(InvalidCarrierException):
codec.extract(carrier)
t = namedtuple('Tracing', 'trace_id span_id')
carrier = t(trace_id=1, span_id=1)
with self.assertRaises(InvalidCarrierException):
codec.extract(carrier)
t = namedtuple('Tracing', 'trace_id span_id parent_id')
carrier = t(trace_id=1, span_id=1, parent_id=1)
with self.assertRaises(InvalidCarrierException):
codec.extract(carrier)
carrier = {'span_id': 1, 'parent_id': 2, 'trace_id': 3,
'traceflags': 1}
context = codec.extract(carrier)
assert 3 == context.trace_id
assert 2 == context.parent_id
assert 1 == context.span_id
assert 1 == context.flags
assert context.baggage == {}
carrier['trace_id'] = 0
assert codec.extract(carrier) is None
def test_zipkin_codec_inject(self):
codec = ZipkinCodec()
with self.assertRaises(InvalidCarrierException):
codec.inject(span_context=None, carrier=[])
ctx = SpanContext(trace_id=256, span_id=127, parent_id=None, flags=1)
span = Span(context=ctx, operation_name='x', tracer=None, start_time=1)
carrier = {}
codec.inject(span_context=span, carrier=carrier)
assert carrier == {'span_id': 127, 'parent_id': None,
'trace_id': 256, 'traceflags': 1}
def test_zipkin_b3_codec_inject(self):
codec = B3Codec()
with self.assertRaises(InvalidCarrierException):
codec.inject(span_context=None, carrier=[])
ctx = SpanContext(trace_id=256, span_id=127, parent_id=None, flags=2)
span = Span(context=ctx, operation_name='x', tracer=None, start_time=1)
carrier = {}
codec.inject(span_context=span, carrier=carrier)
assert carrier == {'X-B3-SpanId': format(127, 'x').zfill(16),
'X-B3-TraceId': format(256, 'x').zfill(16), 'X-B3-Flags': '1'}
def test_b3_codec_inject_parent(self):
codec = B3Codec()
with self.assertRaises(InvalidCarrierException):
codec.inject(span_context=None, carrier=[])
ctx = SpanContext(trace_id=256, span_id=127, parent_id=32, flags=1)
span = Span(context=ctx, operation_name='x', tracer=None, start_time=1)
carrier = {}
codec.inject(span_context=span, carrier=carrier)
assert carrier == {'X-B3-SpanId': format(127, 'x').zfill(16),
'X-B3-ParentSpanId': format(32, 'x').zfill(16),
'X-B3-TraceId': format(256, 'x').zfill(16), 'X-B3-Sampled': '1'}
def test_b3_extract(self):
codec = B3Codec()
with self.assertRaises(InvalidCarrierException):
codec.extract([])
# Implicit case insensitivity testing
carrier = {'X-b3-SpanId': 'a2fb4a1d1a96d312', 'X-B3-ParentSpanId': '0020000000000001',
'X-B3-traceId': '463ac35c9f6413ad48485a3953bb6124', 'X-B3-flags': '1'}
span_context = codec.extract(carrier)
assert span_context.span_id == int('a2fb4a1d1a96d312', 16)
assert span_context.trace_id == int('463ac35c9f6413ad48485a3953bb6124', 16)
assert span_context.parent_id == int('0020000000000001', 16)
assert span_context.flags == 0x02
# validate that missing parentspanid does not cause an error
carrier.pop('X-B3-ParentSpanId')
span_context = codec.extract(carrier)
assert span_context.parent_id is None
carrier.update({'X-b3-sampled': '1'})
span_context = codec.extract(carrier)
assert span_context.flags == 0x03
carrier.pop('X-B3-flags')
span_context = codec.extract(carrier)
assert span_context.flags == 0x01
# validate present debug header with falsy value
carrier = {'X-b3-SpanId': 'a2fb4a1d1a96d312', 'X-B3-flags': '0',
'X-B3-traceId': '463ac35c9f6413ad48485a3953bb6124'}
span_context = codec.extract(carrier)
assert span_context.flags == 0x00
# validate missing context
assert codec.extract({}) is None
# validate explicit none in context
carrier = {'X-b3-SpanId': None,
'X-B3-traceId': '463ac35c9f6413ad48485a3953bb6124'}
assert codec.extract(carrier) is None
# validate invalid hex string
with self.assertRaises(SpanContextCorruptedException):
codec.extract({'x-B3-TraceId': 'a2fb4a1d1a96d312z'})
# validate non-string header
with self.assertRaises(SpanContextCorruptedException):
codec.extract({'x-B3-traceId': 123})
def test_zipkin_b3_codec_extract_injected(self):
codec = B3Codec()
ctx = SpanContext(trace_id=256, span_id=127, parent_id=None, flags=0)
span = Span(context=ctx, operation_name='x', tracer=None, start_time=1)
carrier = {}
codec.inject(span_context=span, carrier=carrier)
extracted = codec.extract(carrier)
assert extracted.trace_id == ctx.trace_id
assert extracted.span_id == ctx.span_id
assert extracted.parent_id == ctx.parent_id
assert extracted.flags == ctx.flags
def test_128bit_trace_id_with_zero_padding(self):
codec = B3Codec(generate_128bit_trace_id=True)
carrier_1 = {'X-B3-SpanId': '39fe73de0012a0e5', 'X-B3-ParentSpanId': '3dbf8a511e159b05',
'X-B3-TraceId': '023f352eaefd8b887a06732f5312e2de', 'X-B3-Flags': '0'}
span_context = codec.extract(carrier_1)
carrier_2 = {}
codec.inject(span_context=span_context, carrier=carrier_2)
assert carrier_1['X-B3-TraceId'] == carrier_2['X-B3-TraceId']
def test_binary_codec(self):
codec = BinaryCodec()
with self.assertRaises(InvalidCarrierException):
codec.inject({}, {})
with self.assertRaises(InvalidCarrierException):
codec.extract({})
tracer = Tracer(
service_name='test',
reporter=InMemoryReporter(),
sampler=ConstSampler(True),
)
baggage = {'baggage_1': u'data',
u'baggage_2': 'foobar',
'baggage_3': '\x00\x01\x09\xff',
u'baggage_4': u'\U0001F47E'}
span_context = SpanContext(trace_id=260817200211625699950706086749966912306, span_id=567890,
parent_id=1234567890, flags=1,
baggage=baggage)
carrier = bytearray()
tracer.inject(span_context, Format.BINARY, carrier)
assert len(carrier) != 0
extracted_span_context = tracer.extract(Format.BINARY, carrier)
assert extracted_span_context.trace_id == span_context.trace_id
assert extracted_span_context.span_id == span_context.span_id
assert extracted_span_context.parent_id == span_context.parent_id
assert extracted_span_context.flags == span_context.flags
assert extracted_span_context.baggage == span_context.baggage
def test_binary_codec_extract_compatibility_with_golang_client(self):
tracer = Tracer(
service_name='test',
reporter=InMemoryReporter(),
sampler=ConstSampler(True),
)
tests = {
b'\x00\x00\x00\x00\x00\x00\x00\x00u\x18\xa9\x13\xa0\xd2\xaf4u\x18\xa9\x13\xa0\xd2\xaf4'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00':
{'trace_id_high': 0,
'trace_id_low': 8437679803646258996,
'span_id': 8437679803646258996,
'parent_id': None,
'flags': 1,
'baggage_count': 0,
'baggage': {}},
b'K2\x88\x8b\x8f\xb5\x96\xe9+\xc6\xe6\xf5\x9d\xed\x8a\xd0+\xc6\xe6\xf5\x9d\xed\x8a\xd0'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00':
{'trace_id_high': 5418543434673002217,
'trace_id_low': 3154462531610577616,
'span_id': 3154462531610577616,
'parent_id': None,
'flags': 1,
'baggage_count': 0,
'baggage': {}},
b'd\xb7^Y\x1afI\x0bi\xe4lc`\x1e\xbep[\x0fw\xc8\x87\xfd\xb2Ti\xe4lc`\x1e\xbep\x01\x00'
b'\x00\x00\x00':
{'trace_id_high': 7257373061318854923,
'trace_id_low': 7630342842742652528,
'span_id': 6561594885260816980,
'parent_id': 7630342842742652528,
'flags': 1,
'baggage_count': 0,
'baggage': {}},
b'a]\x85\xe0\xe0\x06\xd5[6k\x9d\x86\xaa\xbc\\\x8f#c\x06\x80jV\xdf\x826k\x9d\x86\xaa\xbc'
b'\\\x8f\x01\x00\x00\x00\x01\x00\x00\x00\x07key_one\x00\x00\x00\tvalue_one':
{'trace_id_high': 7015910995390813531,
'trace_id_low': 3921401102271798415,
'span_id': 2549888962631491458,
'parent_id': 3921401102271798415,
'flags': 1,
'baggage_count': 1,
'baggage': {'key_one': 'value_one'}
},
}
for span_context_serialized, expected in tests.items():
span_context = tracer.extract(Format.BINARY, bytearray(span_context_serialized))
# because python supports 128bit number as one number and go splits it in two 64 bit
# numbers, we need to split python number to compare it properly to go implementation
max_int64 = 0xFFFFFFFFFFFFFFFF
trace_id_high = (span_context.trace_id >> 64) & max_int64
trace_id_low = span_context.trace_id & max_int64
assert trace_id_high == expected['trace_id_high']
assert trace_id_low == expected['trace_id_low']
assert span_context.span_id == expected['span_id']
assert span_context.parent_id == expected['parent_id']
assert span_context.flags == expected['flags']
assert len(span_context.baggage) == expected['baggage_count']
assert span_context.baggage == expected['baggage']
carrier = bytearray()
tracer.inject(span_context, Format.BINARY, carrier)
assert carrier == bytearray(span_context_serialized)
def test_default_baggage_without_trace_id(tracer):
_test_baggage_without_trace_id(
tracer=tracer,
trace_id_header='Trace_ID',
baggage_header_prefix='Trace-baggage-',
headers={
'Trace-ID': '1:7f:0:1',
'trace-baggage-Kiff': 'Amy',
'trace-BAGGAGE-HERMES': 'LaBarbara',
},
match={
'kiff': 'Amy',
'hermes': 'LaBarbara',
},
)
def test_ad_hoc_baggage_without_trace_id(tracer):
_test_baggage_without_trace_id(
tracer=tracer,
trace_id_header='Trace_ID',
baggage_header_prefix='Trace-baggage-',
headers={
'Trace-ID': '1:7f:0:1',
'jaeger-baggage': 'kiff=Amy, hermes=LaBarbara, bender=Bender',
},
match={
'kiff': 'Amy',
'hermes': 'LaBarbara',
'bender': 'Bender',
},
)
def _test_baggage_without_trace_id(tracer, trace_id_header, baggage_header_prefix, headers, match):
codec = TextCodec(
trace_id_header=trace_id_header,
baggage_header_prefix=baggage_header_prefix,
)
with mock.patch('jaeger_client.codecs.span_context_from_string') as \
from_str:
from_str.return_value = (0, 1, 1, 1) # make trace ID == 0 (i.e. invalid)
span_context = codec.extract(headers)
span = tracer.start_span('test', child_of=span_context)
assert span.context.baggage == match
# also check baggage through API
for k, v in match.items():
assert span.get_baggage_item(k) == v
@pytest.mark.parametrize('fmt,carrier', [
(Format.TEXT_MAP, {}),
(Format.HTTP_HEADERS, {}),
(ZipkinSpanFormat, {}),
])
def test_round_trip(tracer, fmt, carrier):
tracer_128bit = Tracer(
service_name='test',
reporter=InMemoryReporter(),
sampler=ConstSampler(True),
generate_128bit_trace_id=True)
for tracer1, tracer2 in product([tracer, tracer_128bit], repeat=2):
span = tracer1.start_span('test-%s' % fmt)
tracer1.inject(span, fmt, carrier)
context = tracer2.extract(fmt, carrier)
span2 = tracer2.start_span('test-%s' % fmt, child_of=context)
assert span.trace_id == span2.trace_id
def _text_codec_to_trace_id_string(carrier):
return carrier[constants.TRACE_ID_HEADER].split(':')[0]
def _zipkin_codec_to_trace_id_string(carrier):
return '{:x}'.format(carrier['trace_id'])
@pytest.mark.parametrize('fmt,carrier,get_trace_id', [
(Format.TEXT_MAP, {}, _text_codec_to_trace_id_string),
(Format.HTTP_HEADERS, {}, _text_codec_to_trace_id_string),
(ZipkinSpanFormat, {}, _zipkin_codec_to_trace_id_string),
])
def test_inject_with_128bit_trace_id(tracer, fmt, carrier, get_trace_id):
tracer_128bit = Tracer(
service_name='test',
reporter=InMemoryReporter(),
sampler=ConstSampler(True),
generate_128bit_trace_id=True)
for tracer in [tracer, tracer_128bit]:
length = tracer.max_trace_id_bits / 4
trace_id = (1 << 64) - 1 if length == 16 else (1 << 128) - 1
ctx = SpanContext(trace_id=trace_id, span_id=127, parent_id=None,
flags=1)
span = Span(ctx, operation_name='test-%s' % fmt, tracer=None, start_time=1)
tracer.inject(span, fmt, carrier)
assert len(get_trace_id(carrier)) == length
# test if the trace_id arrived on wire remains same even if
# the tracer is configured for 64bit ids or 128bit ids
ctx = SpanContext(trace_id=(1 << 128) - 1, span_id=127, parent_id=None,
flags=0)
span = tracer.start_span('test-%s' % fmt, child_of=ctx)
carrier = dict()
tracer.inject(span, fmt, carrier)
assert len(get_trace_id(carrier)) == 32
ctx = SpanContext(trace_id=(1 << 64) - 1, span_id=127, parent_id=None,
flags=0)
span = tracer.start_span('test-%s' % fmt, child_of=ctx)
carrier = dict()
tracer.inject(span, fmt, carrier)
assert len(get_trace_id(carrier)) == 16
def test_debug_id():
debug_header = 'correlation-id'
tracer = Tracer(
service_name='test',
reporter=InMemoryReporter(),
sampler=ConstSampler(True),
debug_id_header=debug_header,
)
tracer.codecs[Format.TEXT_MAP] = TextCodec(
url_encoding=False,
debug_id_header=debug_header,
)
carrier = {debug_header: 'Coraline'}
context = tracer.extract(Format.TEXT_MAP, carrier)
assert context.is_debug_id_container_only
assert context.debug_id == 'Coraline'
span = tracer.start_span('test', child_of=context)
assert span.is_debug()
assert span.is_sampled()
tags = [t for t in span.tags if t.key == debug_header]
assert len(tags) == 1
assert tags[0].vStr == 'Coraline'
def test_baggage_as_unicode_strings_with_httplib(httpserver):
import urllib.request
urllib_under_test = urllib.request
# httpserver is provided by pytest-localserver
httpserver.serve_content(content='Hello', code=200, headers=None)
tracer = Tracer(
service_name='test',
reporter=InMemoryReporter(),
# don't sample to avoid logging baggage to the span
sampler=ConstSampler(False),
)
tracer.codecs[Format.TEXT_MAP] = TextCodec(url_encoding=True)
baggage = [
(b'key1', b'value'),
(u'key2', b'value'),
('key3', u'value'),
(b'key4', bytes([255])),
(u'key5', u'\U0001F47E')
]
for b in baggage:
span = tracer.start_span('test')
span.set_baggage_item(b[0], b[1])
headers = {}
tracer.inject(span_context=span.context,
format=Format.TEXT_MAP,
carrier=headers)
# make sure httplib doesn't blow up
request = urllib_under_test.Request(httpserver.url, None, headers)
response = urllib_under_test.urlopen(request)
assert response.read() == b'Hello'
response.close()
|
teuthology/orchestra/test/util.py
|
varshar16/teuthology
| 117 |
135471
|
def assert_raises(excClass, callableObj, *args, **kwargs):
"""
Like unittest.TestCase.assertRaises, but returns the exception.
"""
try:
callableObj(*args, **kwargs)
except excClass as e:
return e
else:
if hasattr(excClass,'__name__'): excName = excClass.__name__
else: excName = str(excClass)
raise AssertionError("%s not raised" % excName)
|
PythonAPI/carissma_project/lib/python3.5/site-packages/pandas/util/_doctools.py
|
AbdulHoffmann/carla_carissma
| 6,989 |
135472
|
import numpy as np
import pandas.compat as compat
import pandas as pd
class TablePlotter(object):
"""
Layout some DataFrames in vertical/horizontal layout for explanation.
Used in merging.rst
"""
def __init__(self, cell_width=0.37, cell_height=0.25, font_size=7.5):
self.cell_width = cell_width
self.cell_height = cell_height
self.font_size = font_size
def _shape(self, df):
"""
Calculate table chape considering index levels.
"""
row, col = df.shape
return row + df.columns.nlevels, col + df.index.nlevels
def _get_cells(self, left, right, vertical):
"""
Calculate appropriate figure size based on left and right data.
"""
if vertical:
# calculate required number of cells
vcells = max(sum(self._shape(l)[0] for l in left),
self._shape(right)[0])
hcells = (max(self._shape(l)[1] for l in left) +
self._shape(right)[1])
else:
vcells = max([self._shape(l)[0] for l in left] +
[self._shape(right)[0]])
hcells = sum([self._shape(l)[1] for l in left] +
[self._shape(right)[1]])
return hcells, vcells
def plot(self, left, right, labels=None, vertical=True):
"""
Plot left / right DataFrames in specified layout.
Parameters
----------
left : list of DataFrames before operation is applied
right : DataFrame of operation result
labels : list of str to be drawn as titles of left DataFrames
vertical : bool
If True, use vertical layout. If False, use horizontal layout.
"""
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
if not isinstance(left, list):
left = [left]
left = [self._conv(l) for l in left]
right = self._conv(right)
hcells, vcells = self._get_cells(left, right, vertical)
if vertical:
figsize = self.cell_width * hcells, self.cell_height * vcells
else:
# include margin for titles
figsize = self.cell_width * hcells, self.cell_height * vcells
fig = plt.figure(figsize=figsize)
if vertical:
gs = gridspec.GridSpec(len(left), hcells)
# left
max_left_cols = max(self._shape(l)[1] for l in left)
max_left_rows = max(self._shape(l)[0] for l in left)
for i, (l, label) in enumerate(zip(left, labels)):
ax = fig.add_subplot(gs[i, 0:max_left_cols])
self._make_table(ax, l, title=label,
height=1.0 / max_left_rows)
# right
ax = plt.subplot(gs[:, max_left_cols:])
self._make_table(ax, right, title='Result', height=1.05 / vcells)
fig.subplots_adjust(top=0.9, bottom=0.05, left=0.05, right=0.95)
else:
max_rows = max(self._shape(df)[0] for df in left + [right])
height = 1.0 / np.max(max_rows)
gs = gridspec.GridSpec(1, hcells)
# left
i = 0
for l, label in zip(left, labels):
sp = self._shape(l)
ax = fig.add_subplot(gs[0, i:i + sp[1]])
self._make_table(ax, l, title=label, height=height)
i += sp[1]
# right
ax = plt.subplot(gs[0, i:])
self._make_table(ax, right, title='Result', height=height)
fig.subplots_adjust(top=0.85, bottom=0.05, left=0.05, right=0.95)
return fig
def _conv(self, data):
"""Convert each input to appropriate for table outplot"""
if isinstance(data, pd.Series):
if data.name is None:
data = data.to_frame(name='')
else:
data = data.to_frame()
data = data.fillna('NaN')
return data
def _insert_index(self, data):
# insert is destructive
data = data.copy()
idx_nlevels = data.index.nlevels
if idx_nlevels == 1:
data.insert(0, 'Index', data.index)
else:
for i in range(idx_nlevels):
data.insert(i, 'Index{0}'.format(i),
data.index._get_level_values(i))
col_nlevels = data.columns.nlevels
if col_nlevels > 1:
col = data.columns._get_level_values(0)
values = [data.columns._get_level_values(i).values
for i in range(1, col_nlevels)]
col_df = pd.DataFrame(values)
data.columns = col_df.columns
data = pd.concat([col_df, data])
data.columns = col
return data
def _make_table(self, ax, df, title, height=None):
if df is None:
ax.set_visible(False)
return
import pandas.plotting as plotting
idx_nlevels = df.index.nlevels
col_nlevels = df.columns.nlevels
# must be convert here to get index levels for colorization
df = self._insert_index(df)
tb = plotting.table(ax, df, loc=9)
tb.set_fontsize(self.font_size)
if height is None:
height = 1.0 / (len(df) + 1)
props = tb.properties()
for (r, c), cell in compat.iteritems(props['celld']):
if c == -1:
cell.set_visible(False)
elif r < col_nlevels and c < idx_nlevels:
cell.set_visible(False)
elif r < col_nlevels or c < idx_nlevels:
cell.set_facecolor('#AAAAAA')
cell.set_height(height)
ax.set_title(title, size=self.font_size)
ax.axis('off')
class _WritableDoc(type):
# Remove this when Python2 support is dropped
# __doc__ is not mutable for new-style classes in Python2, which means
# we can't use @Appender to share class docstrings. This can be used
# with `add_metaclass` to make cls.__doc__ mutable.
pass
if __name__ == "__main__":
import matplotlib.pyplot as plt
p = TablePlotter()
df1 = pd.DataFrame({'A': [10, 11, 12],
'B': [20, 21, 22],
'C': [30, 31, 32]})
df2 = pd.DataFrame({'A': [10, 12],
'C': [30, 32]})
p.plot([df1, df2], pd.concat([df1, df2]),
labels=['df1', 'df2'], vertical=True)
plt.show()
df3 = pd.DataFrame({'X': [10, 12],
'Z': [30, 32]})
p.plot([df1, df3], pd.concat([df1, df3], axis=1),
labels=['df1', 'df2'], vertical=False)
plt.show()
idx = pd.MultiIndex.from_tuples([(1, 'A'), (1, 'B'), (1, 'C'),
(2, 'A'), (2, 'B'), (2, 'C')])
col = pd.MultiIndex.from_tuples([(1, 'A'), (1, 'B')])
df3 = pd.DataFrame({'v1': [1, 2, 3, 4, 5, 6],
'v2': [5, 6, 7, 8, 9, 10]},
index=idx)
df3.columns = col
p.plot(df3, df3, labels=['df3'])
plt.show()
|
corehq/apps/translations/app_translations/upload_app.py
|
dimagilg/commcare-hq
| 471 |
135492
|
import io
from django.contrib import messages
from django.template.defaultfilters import linebreaksbr
from django.utils.translation import ugettext as _
import ghdiff
from CommcareTranslationChecker import validate_workbook
from CommcareTranslationChecker.exceptions import FatalError
from corehq.apps.app_manager.exceptions import (
FormNotFoundException,
ModuleNotFoundException,
)
from corehq.apps.hqwebapp.tasks import send_html_email_async
from corehq.apps.translations.app_translations.upload_form import (
BulkAppTranslationFormUpdater,
)
from corehq.apps.translations.app_translations.upload_module import (
BulkAppTranslationModuleUpdater,
)
from corehq.apps.translations.app_translations.utils import (
BulkAppTranslationUpdater,
get_bulk_app_sheet_headers,
get_menu_or_form_by_sheet_name,
get_menu_or_form_by_unique_id,
get_unicode_dicts,
is_form_sheet,
is_module_sheet,
is_modules_and_forms_sheet,
is_single_sheet,
is_single_sheet_workbook,
)
from corehq.apps.translations.const import (
MODULES_AND_FORMS_SHEET_NAME,
SINGLE_SHEET_NAME,
)
from corehq.apps.translations.exceptions import BulkAppTranslationsException
from corehq.util.files import read_workbook_content_as_file
from corehq.util.workbook_json.excel import (
WorkbookJSONError,
get_single_worksheet,
)
def validate_bulk_app_translation_upload(app, workbook, email, lang_to_compare, file_obj):
from corehq.apps.translations.validator import UploadedTranslationsValidator
msgs = UploadedTranslationsValidator(app, workbook, lang_to_compare).compare()
checker_messages, result_wb = run_translation_checker(file_obj)
if msgs or checker_messages:
_email_app_translations_discrepancies(msgs, checker_messages, email, app.name, result_wb)
return [(messages.error, _("Issues found. You should receive an email shortly."))]
else:
return [(messages.success, _("No issues found."))]
def run_translation_checker(file_obj):
translation_checker_messages = []
result_wb = None
try:
result_wb, translation_checker_messages = validate_workbook(file_obj)
except FatalError as e:
translation_checker_messages.append(
_("Workbook check failed to finish due to the following error : %s" % e))
return translation_checker_messages, result_wb
def _email_app_translations_discrepancies(msgs, checker_messages, email, app_name, result_wb):
"""
:param msgs: messages for app translation discrepancies
:param checker_messages: messages for issues found by translation checker
:param email: email to
:param app_name: name of the application
:param result_wb: result wb of translation checker to attach with the email
"""
def form_email_content(msgs, checker_messages):
if msgs:
html_file_content = ghdiff.default_css
for sheet_name, msg in msgs.items():
html_file_content += "<strong>{}</strong>".format(sheet_name) + msg
text_content = _("Hi, PFA file for discrepancies found for app translations.") + "\n"
else:
html_file_content = None
text_content = _("Hi, No discrepancies found for app translations.") + "\n"
if checker_messages:
text_content += _("Issues found with the workbook are as follows :") + "\n"
text_content += '\n'.join([_(msg) for msg in checker_messages])
else:
text_content += _("No issues found with the workbook.")
return html_file_content, text_content
def attachment(title, content, mimetype='text/html'):
return {'title': title, 'file_obj': content, 'mimetype': mimetype}
subject = _("App Translations Discrepancies for {}").format(app_name)
html_file_content, text_content = form_email_content(msgs, checker_messages)
attachments = []
if html_file_content:
attachments.append(attachment("{} Discrepancies.html".format(app_name), io.StringIO(html_file_content)))
if result_wb:
attachments.append(attachment("{} TranslationChecker.xlsx".format(app_name),
io.BytesIO(read_workbook_content_as_file(result_wb)), result_wb.mime_type))
send_html_email_async.delay(subject, email, linebreaksbr(text_content), file_attachments=attachments)
def process_bulk_app_translation_upload(app, workbook, sheet_name_to_unique_id, lang=None):
"""
Process the bulk upload file for the given app.
We return these message tuples instead of calling them now to allow this
function to be used independently of request objects.
:return: Returns a list of message tuples. The first item in each tuple is
a function like django.contrib.messages.error, and the second is a string.
"""
def get_expected_headers(sheet_name):
# This function does its best to return the headers we expect, based
# on the current app, for an uploaded sheet. If the sheet is old, it
# might not include the unique IDs of the modules/forms. In that case
# `sheet_name_to_unique_id` will be empty and we fall back to using the
# name of the sheet and hope that modules/forms have not been moved
# since the sheet was originally downloaded.
#
# If a user created a new sheet, or renamed a sheet, or a form/module
# has been deleted since this sheet was downloaded, then expected
# headers will not be found. We return an empty list, and
# `_check_for_sheet_error()` will handle it.
if sheet_name in sheet_name_to_unique_id:
unique_id = sheet_name_to_unique_id[sheet_name]
if unique_id in expected_headers_by_id:
return expected_headers_by_id[unique_id]
return expected_headers_by_sheet_name.get(sheet_name, [])
msgs = []
single_sheet = is_single_sheet_workbook(workbook)
expected_headers_by_sheet_name = {k: v for k, v in get_bulk_app_sheet_headers(app, single_sheet=single_sheet,
lang=lang)}
expected_headers_by_id = {k: v for k, v in get_bulk_app_sheet_headers(app, single_sheet=single_sheet,
lang=lang, by_id=True)}
processed_sheets = set()
for sheet in workbook.worksheets:
expected_headers = get_expected_headers(sheet.worksheet.title)
try:
_check_for_sheet_error(sheet, expected_headers, processed_sheets)
except BulkAppTranslationsException as e:
msgs.append((messages.error, str(e)))
continue
processed_sheets.add(sheet.worksheet.title)
warnings = _check_for_sheet_warnings(sheet, expected_headers)
for warning in warnings:
msgs.append((messages.warning, warning))
if is_single_sheet(sheet.worksheet.title):
msgs.extend(_process_single_sheet(app, sheet, names_map=sheet_name_to_unique_id, lang=lang))
else:
msgs.extend(_process_rows(app, sheet.worksheet.title, sheet, names_map=sheet_name_to_unique_id))
msgs.append(
(messages.success, _("App Translations Updated!"))
)
return msgs
def get_sheet_name_to_unique_id_map(file_or_filename, lang):
"""
Returns a map of sheet names to unique IDs, so that when modules or
forms have been moved we can use their ID and not their (changed) name.
This function is called before we process the upload so that we can use
the sheet-name-to-unique-ID map to check the sheets before they are
processed.
`file_or_filename` is a file not a workbook because we read uploaded
Excel files using WorkbookJSONReader, and it can only iterate sheet
rows once. This function opens its own Reader to parse the first sheet.
"""
def get_sheet_name():
return MODULES_AND_FORMS_SHEET_NAME if is_multisheet() else SINGLE_SHEET_NAME
def is_multisheet():
return not lang
def is_modules_and_forms_row(row):
"""
Returns the rows about modules and forms in single-sheet uploads.
They are the rows that include the unique IDs.
"""
return not row['case_property'] and not row['list_or_detail'] and not row['label']
sheet_name_to_unique_id = {}
try:
worksheet = get_single_worksheet(file_or_filename, title=get_sheet_name())
except WorkbookJSONError:
# There is something wrong with the file. The problem will happen
# again when we try to process the upload. To preserve current
# behaviour, just return silently.
return sheet_name_to_unique_id
if is_multisheet():
rows = worksheet
else:
rows = (row for row in worksheet if is_modules_and_forms_row(row))
for row in get_unicode_dicts(rows):
sheet_name = row.get('menu_or_form', '')
unique_id = row.get('unique_id')
if unique_id and sheet_name not in sheet_name_to_unique_id:
sheet_name_to_unique_id[sheet_name] = unique_id
return sheet_name_to_unique_id
def _process_single_sheet(app, sheet, names_map, lang=None):
"""
A single-sheet translation file deals with only one language, and
fits all the items to be translated onto the same sheet. All items
share the same columns. If the column is not applicable to the row,
it is left empty.
:param app: The application being translated
:param sheet: The worksheet containing the translations
:param names_map: A map of sheet_name (like "menu1" or "menu1_form1") to
module/form unique_id, used to fetch a module/form
even if it has been moved since the worksheet was created
:param lang: The language that the app is being translated into
:return: A list of error messages or an empty list
"""
msgs = []
module_or_form = None
modules_and_forms_rows = []
rows = []
for row in sheet:
if not row['case_property'] and not row['list_or_detail'] and not row['label']:
modules_and_forms_rows.append(row)
elif module_or_form != row['menu_or_form']:
msgs.extend(_process_rows(app, module_or_form, rows, names_map, lang=lang))
module_or_form = row['menu_or_form']
rows = [row]
else:
rows.append(row)
msgs.extend(_process_rows(app, module_or_form, rows, names_map, lang=lang))
msgs.extend(_process_rows(app, MODULES_AND_FORMS_SHEET_NAME,
modules_and_forms_rows, names_map, lang=lang))
return msgs
def _process_rows(app, sheet_name, rows, names_map, lang=None):
"""
Processes the rows of a worksheet of translations.
This is the complement of get_bulk_app_sheets_by_name() and
get_bulk_app_single_sheet_by_name(), from
corehq/apps/translations/app_translations/download.py, which creates
these worksheets and rows.
:param app: The application being translated
:param sheet_name: The tab name of the sheet being processed.
e.g. "menu1", "menu1_form1", or "Menus_and_forms"
:param rows: The rows in the worksheet
:param names_map: A map of sheet_name to module/form unique_id, used
to fetch a module/form even if it has been moved
since the worksheet was created
:param lang: The language that the app is being translated into
:return: A list of error messages or an empty list
"""
if not sheet_name or not rows:
return []
if is_modules_and_forms_sheet(sheet_name):
updater = BulkAppTranslationModulesAndFormsUpdater(app, names_map, lang=lang)
return updater.update(rows)
if is_module_sheet(sheet_name):
unique_id = names_map.get(sheet_name)
try:
updater = BulkAppTranslationModuleUpdater(app, sheet_name, unique_id, lang=lang)
except ModuleNotFoundException:
return [(
messages.error,
_('Invalid menu in row "%s", skipping row.') % sheet_name
)]
return updater.update(rows)
if is_form_sheet(sheet_name):
unique_id = names_map.get(sheet_name)
try:
updater = BulkAppTranslationFormUpdater(app, sheet_name, unique_id, lang=lang)
except FormNotFoundException:
return [(
messages.error,
_('Invalid form in row "%s", skipping row.') % sheet_name
)]
return updater.update(rows)
return [(
messages.error,
_('Did not recognize "%s", skipping row.') % sheet_name
)]
def _check_for_sheet_error(sheet, expected_headers, processed_sheets=Ellipsis):
if sheet.worksheet.title in processed_sheets:
raise BulkAppTranslationsException(_('Sheet "%s" was repeated. Only the first occurrence has been '
'processed.') % sheet.worksheet.title)
if not expected_headers:
raise BulkAppTranslationsException(_('Skipping sheet "%s", could not recognize title') %
sheet.worksheet.title)
num_required_headers = 0
if is_modules_and_forms_sheet(sheet.worksheet.title):
num_required_headers = 1 # type
elif is_module_sheet(sheet.worksheet.title):
num_required_headers = 2 # case property, list or detail
elif is_form_sheet(sheet.worksheet.title):
num_required_headers = 1 # label
elif is_single_sheet(sheet.worksheet.title):
num_required_headers = 4 # menu or form, case property, list or detail, label
expected_required_headers = tuple(expected_headers[:num_required_headers])
actual_required_headers = tuple(sheet.headers[:num_required_headers])
if expected_required_headers != actual_required_headers:
raise BulkAppTranslationsException(_('Skipping sheet {title}: expected first columns to be '
'{expected}').format(
title=sheet.worksheet.title,
expected=", ".join(expected_required_headers)))
def _check_for_sheet_warnings(sheet, expected_headers):
warnings = []
missing_cols = set(expected_headers) - set(sheet.headers)
extra_cols = set(sheet.headers) - set(expected_headers)
if len(missing_cols) > 0:
warnings.append((_('Sheet "{sheet}" has fewer columns than expected. Sheet will be processed but the '
'following translations will be unchanged: {columns}').format(sheet=sheet.worksheet.title,
columns=", ".join(missing_cols))))
if len(extra_cols) > 0:
warnings.append(_('Sheet "{sheet}" has unrecognized columns. Sheet will be processed but will ignore the '
'following columns: {columns}').format(sheet=sheet.worksheet.title, columns=", ".join(extra_cols)))
return warnings
class BulkAppTranslationModulesAndFormsUpdater(BulkAppTranslationUpdater):
def __init__(self, app, names_map, lang=None):
super(BulkAppTranslationModulesAndFormsUpdater, self).__init__(app, lang)
self.sheet_name_to_unique_id = names_map
def update(self, rows):
"""
This handles updating module/form names and menu media
(the contents of the "Menus and forms" sheet in the multi-tab upload).
"""
self.msgs = []
for row in get_unicode_dicts(rows):
sheet_name = row.get('menu_or_form', '')
# The unique_id column is populated on the "Menus_and_forms" sheet in multi-sheet translation files,
# and in the "name / menu media" row in single-sheet translation files.
unique_id = row.get('unique_id')
if not unique_id and sheet_name in self.sheet_name_to_unique_id:
# If we don't have a value for unique_id, try to fetch it from self.sheet_name_to_unique_id
unique_id = self.sheet_name_to_unique_id[sheet_name]
try:
if unique_id:
document = get_menu_or_form_by_unique_id(self.app, unique_id, sheet_name)
else:
document = get_menu_or_form_by_sheet_name(self.app, sheet_name)
except (ModuleNotFoundException, FormNotFoundException, ValueError) as err:
self.msgs.append((messages.error, str(err)))
continue
self.update_translation_dict('default_', document.name, row)
# Update menu media
for lang in self.langs:
image_header = 'image_%s' % lang
if image_header in row:
document.set_icon(lang, row[image_header])
audio_header = 'audio_%s' % lang
if audio_header in row:
document.set_audio(lang, row[audio_header])
return self.msgs
|
torchsso/autograd/samplegrad.py
|
jjxu217/pytorch-sso
| 121 |
135495
|
<gh_stars>100-1000
from contextlib import contextmanager
import torch
import torch.nn as nn
import torch.nn.functional as F
@contextmanager
def save_sample_grads(model: nn.Module):
handles = []
for module in model.children():
params = list(module.parameters())
params = [p for p in params if p.requires_grad]
if len(params) == 0:
continue
handles.append(module.register_forward_hook(_forward_postprocess))
handles.append(module.register_backward_hook(_backward_postprocess))
yield
for handle in handles:
handle.remove()
def _forward_postprocess(module: nn.Module, input: torch.Tensor, output: torch.Tensor):
data_input = input[0].clone().detach()
if isinstance(module, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):
bnorm = module
f = bnorm.num_features
if isinstance(module, nn.BatchNorm1d):
shape = (1, f)
elif isinstance(module, nn.BatchNorm2d):
shape = (1, f, 1, 1)
else:
shape = (1, f, 1, 1, 1)
# restore normalized input
data_input_norm = (output - bnorm.bias.view(shape)).div(bnorm.weight.view(shape))
data_input = data_input_norm
setattr(module, 'data_input', data_input)
def _backward_postprocess(module: nn.Module, grad_input: torch.Tensor, grad_output: torch.Tensor):
grad_output = grad_output[0].clone().detach()
data_input = getattr(module, 'data_input', None)
assert data_input is not None, 'backward is called before forward.'
assert data_input.size(0) == grad_output.size(0)
args = [module, data_input, grad_output]
if isinstance(module, nn.Linear):
grad_linear(*args)
elif isinstance(module, nn.Conv2d):
grad_conv2d(*args)
elif isinstance(module, nn.BatchNorm1d):
grad_batchnorm1d(*args)
elif isinstance(module, nn.BatchNorm2d):
grad_batchnorm2d(*args)
else:
raise ValueError(f'Unsupported module class: {module.__class__}.')
def grad_linear(module: nn.Module, data_input: torch.Tensor, grad_output: torch.Tensor):
assert isinstance(module, nn.Linear)
linear = module
assert data_input.ndimension() == 2 # n x f_in
assert grad_output.ndimension() == 2 # n x f_out
if linear.weight.requires_grad:
grads = torch.einsum('bi,bj->bij', grad_output, data_input) # n x f_out x f_in
setattr(linear.weight, 'grads', grads) # n x f_out x f_in
if hasattr(linear, 'bias') and linear.bias.requires_grad:
setattr(linear.bias, 'grads', grad_output) # n x f_out
def grad_conv2d(module: nn.Module, data_input: torch.Tensor, grad_output: torch.Tensor):
assert isinstance(module, nn.Conv2d)
conv2d = module
assert data_input.ndimension() == 4 # n x c_in x h_in x w_in
assert grad_output.ndimension() == 4 # n x c_out x h_out x w_out
if conv2d.weight.requires_grad:
# n x (c_in)(k_h)(k_w) x (h_out)(w_out)
input2d = F.unfold(data_input,
kernel_size=conv2d.kernel_size, stride=conv2d.stride,
padding=conv2d.padding, dilation=conv2d.dilation)
# n x c_out x h_out x w_out
n, c_out, h, w = grad_output.size()
# n x c_out x (h_out)(w_out)
grad_output2d = grad_output.view(n, c_out, -1)
c_out, c_in, k_h, k_w = conv2d.weight.size()
grads_2d = torch.einsum('bik,bjk->bij', grad_output2d, input2d) # n x c_out x (c_in)(k_h)(k_w)
setattr(conv2d.weight, 'grads', grads_2d.view(n, c_out, c_in, k_h, k_w)) # n x c_out x c_in x k_h x k_w
if hasattr(conv2d, 'bias') and conv2d.bias.requires_grad:
setattr(conv2d.bias, 'grads', grad_output.sum(dim=(2, 3))) # n x c_out
def grad_batchnorm1d(module: nn.Module, data_input: torch.Tensor, grad_output: torch.Tensor):
assert isinstance(module, nn.BatchNorm1d)
batchnorm1d = module
assert data_input.ndimension() == 2 # n x f
assert grad_output.ndimension() == 2 # n x f
assert batchnorm1d.affine
if batchnorm1d.weight.requires_grad:
grads = data_input.mul(grad_output) # n x f
setattr(batchnorm1d.weight, 'grads', grads)
if batchnorm1d.bias.requires_grad:
setattr(batchnorm1d.bias, 'grads', grad_output) # n x f
def grad_batchnorm2d(module: nn.Module, data_input: torch.Tensor, grad_output: torch.Tensor):
assert isinstance(module, nn.BatchNorm2d)
batchnorm2d = module
assert data_input.ndimension() == 4 # n x c x h x w
assert grad_output.ndimension() == 4 # n x c x h x w
assert batchnorm2d.affine
if batchnorm2d.weight.requires_grad:
grads = data_input.mul(grad_output).sum(dim=(2, 3)) # n x c
setattr(batchnorm2d.weight, 'grads', grads)
if batchnorm2d.bias.requires_grad:
setattr(batchnorm2d.bias, 'grads', grad_output.sum(dim=(2, 3))) # n x c
|
conans/test/conan_v2/tools/test_tools_win.py
|
matthiasng/conan
| 6,205 |
135499
|
<reponame>matthiasng/conan
import six
from conans import tools
from conans.errors import ConanV2Exception
from conans.test.utils.conan_v2_tests import ConanV2ModeTestCase
class ToolsWinTestCase(ConanV2ModeTestCase):
def test_msvc_build_command(self):
with six.assertRaisesRegex(self, ConanV2Exception, "Conan v2 incompatible: 'tools.msvc_build_command' is deprecated"):
tools.msvc_build_command(settings=None, sln_path=None)
def test_build_sln_command(self):
with six.assertRaisesRegex(self, ConanV2Exception, "Conan v2 incompatible: 'tools.build_sln_command' is deprecated"):
tools.build_sln_command(settings=None, sln_path=None)
|
vlcp/service/sdn/l3router.py
|
hubo1016/vlcp
| 252 |
135510
|
<filename>vlcp/service/sdn/l3router.py
import itertools
import time
import vlcp.service.sdn.ioprocessing as iop
from vlcp.config import defaultconfig
from vlcp.event import Event
from vlcp.event import RoutineContainer
from vlcp.event import withIndices
from vlcp.protocol.openflow import OpenflowAsyncMessageEvent
from vlcp.protocol.openflow import OpenflowConnectionStateEvent
from vlcp.server.module import call_api, depend
from vlcp.service.kvdb import objectdb
from vlcp.service.sdn.flowbase import FlowBase
from vlcp.service.sdn.ofpmanager import FlowInitialize
from vlcp.service.sdn import arpresponder
from vlcp.service.sdn import icmpresponder
from vlcp.utils.dataobject import set_new, WeakReferenceObject
from vlcp.utils.ethernet import mac_addr_bytes, ip4_addr_bytes, ip4_addr, arp_packet_l4, mac_addr, ethernet_l4, \
ethernet_l7
from vlcp.utils.flowupdater import FlowUpdater
from vlcp.utils.netutils import parse_ip4_network,get_netmask, parse_ip4_address, ip_in_network
from vlcp.utils.networkmodel import VRouter, RouterPort, SubNet, SubNetMap,DVRouterForwardInfo, \
DVRouterForwardSet, DVRouterForwardInfoRef, DVRouterExternalAddressInfo, LogicalNetworkMap, LogicalNetwork, \
LogicalPort
from contextlib import closing, suppress
from vlcp.event.event import M_
from vlcp.utils.exceptions import WalkKeyNotRetrieved
@withIndices("connection")
class ARPRequest(Event):
pass
class RouterUpdater(FlowUpdater):
def __init__(self, connection, parent):
super(RouterUpdater, self).__init__(connection, (), ("routerupdater", connection), parent._logger)
self._parent = parent
self._lastlogicalport = dict()
self._lastlogicalnet = dict()
self._lastphyport = dict()
self._lastphynet = dict()
self._lastrouterinfo = dict()
self._lastsubnetinfo = dict()
self._lastlgportinfo = dict()
self._lastexternallgportinfo = dict()
self._lastrouterstoreinterfacenetinfo = dict()
self._lastnetworkrouterinfo = dict()
self._lastnetworkroutertableinfo = dict()
self._lastnetworkstaticroutesinfo = dict()
self._laststaticroutes = dict()
self._lastallrouterinfo = dict()
self._laststoreinfo = dict()
self._lastnetworkforwardinfo = dict()
self._lastdvrforwardinfo = dict()
self._original_keys = ()
self._packet_buffer = dict()
self._arp_cache = dict()
async def main(self):
try:
self.subroutine(self._update_handler(), True, "updater_handler")
self.subroutine(self._router_packetin_handler(), True, "router_packetin_handler")
self.subroutine(self._arp_cache_handler(),True,"arp_cache_handler")
self.subroutine(self._time_cycle_handler(),True,"time_cycle_handler")
if self._parent.enable_router_forward:
self.subroutine(self._keep_forwardinfo_alive_handler(),True,"keep_forwardinfo_alive_handler")
self.subroutine(self._keep_addressinfo_alive_handler(),True,"keep_addressinfo_alive_handler")
await FlowUpdater.main(self)
finally:
if hasattr(self, "updater_handler"):
self.updater_handler.close()
if hasattr(self, "router_packetin_handler"):
self.router_packetin_handler.close()
if hasattr(self,"arp_cache_handler"):
self.arp_cache_handler.close()
if hasattr(self,"time_cycle_handler"):
self.time_cycle_handler.close()
if self._parent.enable_router_forward:
if hasattr(self,"keep_forwardinfo_alive_handler"):
self.keep_forwardinfo_alive_handler.close()
if hasattr(self,"keep_addressinfo_alive_handler"):
self.keep_addressinfo_alive_handler.close()
def _getinterfaceinfo(self,netid):
find = False
mac = None
ip = None
phyportno = None
for _,interfaces in self._lastrouterinfo.values():
for macaddress,ipaddress,_,_,nid,phyport in interfaces:
if netid == nid:
mac = macaddress
ip = ipaddress
phyportno = phyport
find = True
break
if find:
return (mac, ip, phyportno)
else:
return ()
def _getinterfaceinfobynetid(self,netid):
for r,v in self._lastallrouterinfo.items():
for e in v:
_,isexternal,gateway,_,outmac,external_ip,nid,phyport,_,_, _ = e
if nid == netid:
if isexternal:
return outmac, external_ip, phyport
else:
return outmac, gateway, phyport
def _getallinterfaceinfobynetid(self,netid):
router = []
ret_info = dict()
for r, v in self._lastallrouterinfo.items():
for e in v:
if e[6] == netid:
router.append(r)
for r in router:
v = self._lastallrouterinfo[r]
for e in v:
# ret_info.append((e[4], e[6]))
ret_info.setdefault(r, []).append((e[4], e[6]))
return ret_info
async def _keep_forwardinfo_alive_handler(self):
while True:
await self.wait_with_timeout(self._parent.forwardinfo_discover_update_time)
datapath_id = self._connection.openflow_datapathid
vhost = self._connection.protocol.vhost
try:
bridge, system_id, _ = await call_api(self, 'ovsdbmanager', 'waitbridgeinfo', {'datapathid': datapath_id,
'vhost': vhost})
except Exception:
self._logger.warning("OVSDB bridge is not ready", exc_info=True)
return
forward_keys = [DVRouterForwardInfo.default_key(k[0],k[1]) for k in self._laststoreinfo.keys()]
ref_forward_keys = [DVRouterForwardInfoRef.default_key(k[0],k[1]) for k in self._laststoreinfo.keys()]
transact_keys = [DVRouterForwardSet.default_key()] + forward_keys + ref_forward_keys
def updater(keys,values,timestamp):
retdict = {}
for i in range((len(transact_keys) - 1) // 2):
if values[i + 1]:
values[i + 1].info = [e for e in values[i + 1].info
if (e[0], e[1], e[2]) != (system_id, bridge, vhost)
and e[4] > timestamp]
indices = DVRouterForwardInfo._getIndices(keys[i + 1])[1]
e = (system_id,bridge,vhost,list(self._laststoreinfo[(indices[0],indices[1])])[0],
timestamp + self._parent.forwardinfo_discover_update_time * 2 * 1000000)
values[i + 1].info.append(e)
values[i + 1].info = sorted(values[i + 1].info,key=lambda x: x[3])
if values[i + 1].info:
retdict[keys[i + 1]] = values[i + 1]
refe = [e[3] for e in values[i + 1].info]
if values[i + 1 + (len(transact_keys) - 1) // 2].info != refe:
values[i + 1 + (len(transact_keys) - 1) // 2].info = refe
retdict[keys[i + 1 + (len(transact_keys) - 1) // 2]] = \
values[i + 1 + (len(transact_keys) - 1) // 2]
# else:
# # there is no info in this struct , drop it from db
# retdict[keys[i + 1]] = None
# retdict[keys[i + 1 + (len(transact_keys) - 1) // 2]] = None
# if WeakReferenceObject(keys[i + 1 + (len(transact_keys) - 1) // 2]) in \
# values[0].set.dataset():
# values[0].set.dataset().discard(
# WeakReferenceObject(keys[i + 1 + (len(transact_keys) - 1) // 2]))
# retdict[keys[0]] = values[0]
return retdict.keys(), retdict.values()
if forward_keys + ref_forward_keys:
await call_api(self,"objectdb","transact",{"keys":transact_keys,"updater":updater,"withtime":True})
async def _keep_addressinfo_alive_handler(self):
while True:
await self.wait_with_timeout(self._parent.addressinfo_discover_update_time)
datapath_id = self._connection.openflow_datapathid
vhost = self._connection.protocol.vhost
try:
bridge, system_id, _ = await call_api(self, 'ovsdbmanager', 'waitbridgeinfo', {'datapathid': datapath_id,
'vhost': vhost})
except Exception:
self._logger.warning("OVSDB bridge is not ready", exc_info=True)
return
for k,v in self._lastsubnetinfo.items():
if v[1] and v[7] and not v[10]:
allocated_ip_address = v[5]
subnetmapkey = SubNetMap.default_key(k.id)
DVRouterExternalAddressInfokey = DVRouterExternalAddressInfo.default_key()
def updater(keys,values,timestamp):
newlist = []
for e in values[0].info:
# remove self , add new timestamp after
if (e[0],e[1],e[2],e[3]) == (system_id,bridge,vhost,values[1].id):
x = (system_id, bridge, vhost, values[1].id, allocated_ip_address,
timestamp + self._parent.addressinfo_discover_update_time * 2 * 1000000)
newlist.append(x)
elif e[5] < timestamp and e[3] == values[1].id:
ipaddress = parse_ip4_address(e[4])
if str(ipaddress) in values[1].allocated_ips:
del values[1].allocated_ips[str(ipaddress)]
else:
newlist.append(e)
values[0].info = newlist
return keys,values
await call_api(self,"objectdb","transact",
{"keys":[DVRouterExternalAddressInfokey,subnetmapkey],
"updater":updater,"withtime":True})
async def _packet_out_message(self,netid,packet,portno):
l2output_next = self._parent._getnexttable('', 'l2output', self._connection.protocol.vhost)
ofdef = self._connection.openflowdef
await self.execute_commands(self._connection,
[
ofdef.ofp_packet_out(
buffer_id=ofdef.OFP_NO_BUFFER,
in_port=ofdef.OFPP_CONTROLLER,
actions=[
ofdef.ofp_action_set_field(
field=ofdef.create_oxm(ofdef.NXM_NX_REG4,
netid)
),
ofdef.ofp_action_set_field(
field=ofdef.create_oxm(ofdef.NXM_NX_REG5,
netid)
),
ofdef.ofp_action_set_field(
field=ofdef.create_oxm(ofdef.NXM_NX_REG6,
portno)
),
ofdef.nx_action_resubmit(
in_port=ofdef.OFPP_IN_PORT & 0xffff,
table=l2output_next
)
],
data=packet._tobytes()
)
]
)
async def _time_cycle_handler(self):
while True:
await self.wait_with_timeout(self._parent.arp_cycle_time)
ct = int(time.time())
# check incomplte arp entry ,, send arp request cycle unitl timeout
for k,v in self._arp_cache.items():
status,timeout,isstatic,realmac,cidr = v
if isstatic:
if status == 1:
outnetid, request_ip = k
ofdef = self._connection.openflowdef
info = self._getinterfaceinfobynetid(outnetid)
if info:
mac, ipaddress, phyport = info
if phyport:
arp_request_packet = arp_packet_l4(
dl_src=mac_addr(mac),
dl_dst=mac_addr("FF:FF:FF:FF:FF:FF"),
arp_op=ofdef.ARPOP_REQUEST,
arp_sha=mac_addr(mac),
arp_spa=ip4_addr(ipaddress),
arp_tpa=request_ip
)
await self._packet_out_message(outnetid, arp_request_packet, phyport)
if status == 2:
if ct > timeout:
if ct - timeout >= self._parent.static_host_arp_refresh_interval:
realmac = mac_addr("FF:FF:FF:FF:FF:FF")
outnetid, request_ip = k
ofdef = self._connection.openflowdef
info = self._getinterfaceinfobynetid(outnetid)
if info:
mac, ipaddress, phyport = info
if phyport:
arp_request_packet = arp_packet_l4(
dl_src=mac_addr(mac),
dl_dst=realmac,
arp_op=ofdef.ARPOP_REQUEST,
arp_sha=mac_addr(mac),
arp_spa=ip4_addr(ipaddress),
arp_tpa=request_ip
)
await self._packet_out_message(outnetid, arp_request_packet, phyport)
else:
if status == 1:
if ct > timeout:
self._arp_cache.pop(k)
if k in self._packet_buffer:
del self._packet_buffer[k]
else:
# packet out an arp request
outnetid,request_ip= k
ofdef = self._connection.openflowdef
info = self._getinterfaceinfobynetid(outnetid)
if info:
mac,ipaddress,phyport = info
if phyport:
arp_request_packet = arp_packet_l4(
dl_src=mac_addr(mac),
dl_dst=mac_addr("FF:FF:FF:FF:FF:FF"),
arp_op=ofdef.ARPOP_REQUEST,
arp_sha=mac_addr(mac),
arp_spa=ip4_addr(ipaddress),
arp_tpa=request_ip
)
await self._packet_out_message(outnetid,arp_request_packet,phyport)
# else
# arp_cache will not have entry which goto network has no phyport
# so nerver run here
else:
self._logger.warning("arp request can find avaliable network %d drop it",outnetid)
self._arp_cache.pop(k)
del self._packet_buffer[k]
if status == 3:
if ct > timeout:
realmac = mac_addr("FF:FF:FF:FF:FF:FF")
# packet out an arp request
outnetid,request_ip= k
ofdef = self._connection.openflowdef
info = self._getinterfaceinfobynetid(outnetid)
if info:
mac,ipaddress,phyport = info
if phyport:
arp_request_packet = arp_packet_l4(
dl_src=mac_addr(mac),
dl_dst=realmac,
arp_op=ofdef.ARPOP_REQUEST,
arp_sha=mac_addr(mac),
arp_spa=ip4_addr(ipaddress),
arp_tpa=request_ip
)
await self._packet_out_message(outnetid,arp_request_packet,phyport)
# when request one arp , but there no reply ,
# buffer will have timeout packet , so checkout it here
for k, v in self._packet_buffer.items():
nv = [(p,bid,t) for p,bid,t in v if ct < t]
self._packet_buffer[k] = nv
async def _arp_cache_handler(self):
arp_request_matcher = ARPRequest.createMatcher(connection=self._connection)
arp_incomplete_timeout = self._parent.arp_incomplete_timeout
while True:
ev = await arp_request_matcher
ct = int(time.time())
ipaddress = ev.ipaddress
netid = ev.logicalnetworkid
# isstatic : this type arp entry will add when arp request static router and gateway
# cidr : when isstatic arp entry reply , use cidr to add flows into sw
isstatic = ev.isstatic
cidr = ev.cidr
if (netid,ipaddress) not in self._arp_cache:
entry = (1,ct + arp_incomplete_timeout,isstatic,"",cidr)
self._arp_cache[(netid,ipaddress)] = entry
ofdef = self._connection.openflowdef
info = self._getinterfaceinfobynetid(netid)
if info:
mac,interface_ip,phyport = info
if phyport:
arp_request_packet = arp_packet_l4(
dl_src=mac_addr(mac),
dl_dst=mac_addr("FF:FF:FF:FF:FF:FF"),
arp_op=ofdef.ARPOP_REQUEST,
arp_sha=mac_addr(mac),
arp_spa=ip4_addr(interface_ip),
arp_tpa=ipaddress
)
self.subroutine(self._packet_out_message(netid,arp_request_packet,phyport))
else:
# logicalnetwork has no phyport, don't send
# arp request, drop arp cache , and packet
del self._arp_cache[(netid,ipaddress)]
if (netid,ipaddress) in self._packet_buffer:
del self._packet_buffer[(netid,ipaddress)]
self._logger.warning(" lgnet %r don't have phyport, drop everything to it",netid)
else:
s,_,isstatic,mac,cidr = self._arp_cache[(netid,ipaddress)]
# this arp request have in cache , update timeout
entry = (s, ct + arp_incomplete_timeout, isstatic,mac,cidr)
self._arp_cache[(netid,ipaddress)] = entry
async def _router_packetin_handler(self):
conn = self._connection
ofdef = self._connection.openflowdef
l3output = self._parent._gettableindex("l3output", self._connection.protocol.vhost)
l3input = self._parent._gettableindex("l3input", self._connection.protocol.vhost)
l2output = self._parent._gettableindex("l2output", self._connection.protocol.vhost)
l3router = self._parent._gettableindex("l3router", self._connection.protocol.vhost)
packetin_matcher = OpenflowAsyncMessageEvent.createMatcher(ofdef.OFPT_PACKET_IN, None, None, l3output, None,
self._connection, self._connection.connmark)
arpreply_matcher = OpenflowAsyncMessageEvent.createMatcher(ofdef.OFPT_PACKET_IN, None, None, l3input, 0x4,
self._connection, self._connection.connmark)
arpflow_remove_matcher = OpenflowAsyncMessageEvent.createMatcher(ofdef.OFPT_FLOW_REMOVED, None, None,
l3output, None, self._connection, self._connection.connmark)
arpflow_request_matcher = OpenflowAsyncMessageEvent.createMatcher(ofdef.OFPT_PACKET_IN, None, None, l3output,
0x1,self._connection, self._connection.connmark)
async def _send_broadcast_packet_out(netid,packet):
# in_port == controller
# input network( reg4 ) == outnetwork (reg5)
# output port (reg6) = 0xffffffff
await self.execute_commands(conn,
[
ofdef.ofp_packet_out(
buffer_id=ofdef.OFP_NO_BUFFER,
in_port=ofdef.OFPP_CONTROLLER,
actions=[
ofdef.ofp_action_set_field(
field=ofdef.create_oxm(ofdef.NXM_NX_REG4, netid)
),
ofdef.ofp_action_set_field(
field=ofdef.create_oxm(ofdef.NXM_NX_REG5, netid)
),
ofdef.ofp_action_set_field(
field=ofdef.create_oxm(ofdef.NXM_NX_REG6, 0xffffffff)
),
ofdef.nx_action_resubmit(
in_port=ofdef.OFPP_IN_PORT & 0xffff,
table=l2output
)
],
data = packet._tobytes()
)
]
)
async def _send_buffer_packet_out(netid,macaddress,ipaddress,srcmacaddress,packet,bid = ofdef.OFP_NO_BUFFER):
await self.execute_commands(conn,
[
ofdef.ofp_packet_out(
buffer_id = bid,
in_port = ofdef.OFPP_CONTROLLER,
actions = [
ofdef.ofp_action_set_field(
field = ofdef.create_oxm(ofdef.NXM_NX_REG5,netid)
),
ofdef.ofp_action_set_field(
field=ofdef.create_oxm(ofdef.NXM_NX_REG6, 0xffffffff)
),
ofdef.ofp_action_set_field(
field = ofdef.create_oxm(ofdef.OXM_OF_ETH_SRC,srcmacaddress)
),
ofdef.ofp_action_set_field(
field = ofdef.create_oxm(ofdef.OXM_OF_ETH_DST,macaddress)
),
ofdef.ofp_action(
type = ofdef.OFPAT_DEC_NW_TTL
),
ofdef.nx_action_resubmit(
in_port = ofdef.OFPP_IN_PORT & 0xffff,
table = l2output
)
],
data = packet._tobytes() if bid == ofdef.OFP_NO_BUFFER else b''
)
]
)
async def _add_host_flow(netid,macaddress,ipaddress,srcmaddress):
await self.execute_commands(conn,
[
ofdef.ofp_flow_mod(
table_id=l3output,
command=ofdef.OFPFC_ADD,
priority=ofdef.OFP_DEFAULT_PRIORITY + 1,
buffer_id=ofdef.OFP_NO_BUFFER,
hard_timeout = self._parent.arp_complete_timeout,
out_port=ofdef.OFPP_ANY,
out_group=ofdef.OFPG_ANY,
match=ofdef.ofp_match_oxm(
oxm_fields=[
ofdef.create_oxm(ofdef.NXM_NX_REG5, netid),
ofdef.create_oxm(ofdef.OXM_OF_ETH_TYPE, ofdef.ETHERTYPE_IP),
ofdef.create_oxm(ofdef.OXM_OF_IPV4_DST,ipaddress)
]
),
instructions=[
ofdef.ofp_instruction_actions(
actions = [
ofdef.ofp_action_set_field(
field=ofdef.create_oxm(ofdef.OXM_OF_ETH_SRC, srcmaddress)
),
ofdef.ofp_action_set_field(
field=ofdef.create_oxm(ofdef.OXM_OF_ETH_DST,macaddress)
),
ofdef.ofp_action(
type=ofdef.OFPAT_DEC_NW_TTL
)
]
),
ofdef.ofp_instruction_goto_table(table_id=l2output)
]
),
ofdef.ofp_flow_mod(
cookie = 0x1,
cookie_mask=0xffffffffffffffff,
table_id=l3output,
command=ofdef.OFPFC_ADD,
priority=ofdef.OFP_DEFAULT_PRIORITY,
buffer_id=ofdef.OFP_NO_BUFFER,
idle_timeout=self._parent.arp_complete_timeout * 2,
flags = ofdef.OFPFF_SEND_FLOW_REM,
out_port=ofdef.OFPP_ANY,
out_group=ofdef.OFPG_ANY,
match=ofdef.ofp_match_oxm(
oxm_fields=[
ofdef.create_oxm(ofdef.NXM_NX_REG5, netid),
ofdef.create_oxm(ofdef.OXM_OF_ETH_TYPE, ofdef.ETHERTYPE_IP),
ofdef.create_oxm(ofdef.OXM_OF_IPV4_DST, ipaddress)
]
),
instructions=[
ofdef.ofp_instruction_actions(
actions=[
ofdef.ofp_action_set_field(
field=ofdef.create_oxm(ofdef.OXM_OF_ETH_SRC, srcmaddress)
),
ofdef.ofp_action_set_field(
field=ofdef.create_oxm(ofdef.OXM_OF_ETH_DST, macaddress)
),
ofdef.ofp_action(
type=ofdef.OFPAT_DEC_NW_TTL
),
ofdef.ofp_action_output(
port = ofdef.OFPP_CONTROLLER,
max_len = 60
)
]
),
ofdef.ofp_instruction_goto_table(table_id=l2output)
]
)
]
)
async def _add_static_routes_flow(from_net_id,cidr,to_net_id,smac,dmac):
network,prefix = parse_ip4_network(cidr)
await self.execute_commands(conn,[
ofdef.ofp_flow_mod(
table_id=l3router,
command=ofdef.OFPFC_ADD,
priority=ofdef.OFP_DEFAULT_PRIORITY + prefix,
buffer_id=ofdef.OFP_NO_BUFFER,
out_port=ofdef.OFPP_ANY,
out_group=ofdef.OFPG_ANY,
match=ofdef.ofp_match_oxm(
oxm_fields=[
ofdef.create_oxm(ofdef.NXM_NX_REG4, from_net_id),
ofdef.create_oxm(ofdef.OXM_OF_ETH_TYPE, ofdef.ETHERTYPE_IP),
ofdef.create_oxm(ofdef.OXM_OF_IPV4_DST_W,
network,
get_netmask(prefix))
]
),
instructions=[
ofdef.ofp_instruction_actions(
actions=[
ofdef.ofp_action_set_field(
field=ofdef.create_oxm(ofdef.NXM_NX_REG5, to_net_id)
),
ofdef.ofp_action_set_field(
field=ofdef.create_oxm(ofdef.OXM_OF_ETH_SRC, smac)
),
ofdef.ofp_action_set_field(
field=ofdef.create_oxm(ofdef.OXM_OF_ETH_DST, dmac)
),
ofdef.ofp_action(
type=ofdef.OFPAT_DEC_NW_TTL
)
]
),
ofdef.ofp_instruction_goto_table(table_id=l2output)
]
)
])
async def _add_static_host_flow(ipaddress, dmac, netid, smac):
await self.execute_commands(conn, [
ofdef.ofp_flow_mod(
table_id=l3output,
command=ofdef.OFPFC_ADD,
priority=ofdef.OFP_DEFAULT_PRIORITY + 1,
buffer_id=ofdef.OFP_NO_BUFFER,
out_port=ofdef.OFPP_ANY,
out_group=ofdef.OFPG_ANY,
match=ofdef.ofp_match_oxm(
oxm_fields=[
ofdef.create_oxm(ofdef.NXM_NX_REG5, netid),
ofdef.create_oxm(ofdef.OXM_OF_ETH_TYPE, ofdef.ETHERTYPE_IP),
ofdef.create_oxm(ofdef.OXM_OF_IPV4_DST, ip4_addr(ipaddress))
]
),
instructions=[
ofdef.ofp_instruction_actions(
actions=[
ofdef.ofp_action_set_field(
field=ofdef.create_oxm(ofdef.OXM_OF_ETH_SRC, smac)
),
ofdef.ofp_action_set_field(
field=ofdef.create_oxm(ofdef.OXM_OF_ETH_DST, dmac)
),
ofdef.ofp_action(
type=ofdef.OFPAT_DEC_NW_TTL
)
]
),
ofdef.ofp_instruction_goto_table(table_id=l2output)
]
)
])
while True:
ev, m = await M_(packetin_matcher, arpreply_matcher,arpflow_request_matcher,arpflow_remove_matcher)
msg = ev.message
try:
if m is packetin_matcher:
outnetworkid = ofdef.uint32.create(ofdef.get_oxm(msg.match.oxm_fields, ofdef.NXM_NX_REG5))
ippacket = ethernet_l4.create(msg.data)
ct = time.time()
if (outnetworkid,ippacket.ip_dst) in self._arp_cache:
status,_,_,mac,_ = self._arp_cache[(outnetworkid,ippacket.ip_dst)]
# this mac is real mac
if status == 2:
info = self._getinterfaceinfobynetid(outnetworkid)
if info:
smac,ip,_= info
self.subroutine(_send_buffer_packet_out(outnetworkid,mac,ip,mac_addr(smac),
ippacket,msg.buffer_id))
continue
if (outnetworkid,ippacket.ip_dst) in self._packet_buffer:
# checkout timeout packet
nv = [(p,bid,t) for p,bid,t in self._packet_buffer[(outnetworkid,ippacket.ip_dst)]
if ct < t]
nv.append((ippacket,msg.buffer_id,ct + self._parent.buffer_packet_timeout))
self._packet_buffer[(outnetworkid,ippacket.ip_dst)] = nv
else:
self._packet_buffer[(outnetworkid,ippacket.ip_dst)] = \
[(ippacket,msg.buffer_id,ct + self._parent.buffer_packet_timeout)]
e = ARPRequest(self._connection,ipaddress=ippacket.ip_dst,
logicalnetworkid=outnetworkid,isstatic=False,
cidr=ip4_addr.formatter(ippacket.ip_dst))
self.subroutine(self.wait_for_send(e), False)
elif m is arpflow_request_matcher:
outnetworkid = ofdef.uint32.create(ofdef.get_oxm(msg.match.oxm_fields, ofdef.NXM_NX_REG5))
#ipaddress = ofdef.get_oxm(msg.match.oxm_fields,ofdef.OXM_OF_IPV4_DST)
ippacket = ethernet_l4.create(msg.data)
ipaddress = ippacket.ip_dst
ct = time.time()
if(outnetworkid,ipaddress) in self._arp_cache:
status,timeout,isstatic,mac,cidr = self._arp_cache[(outnetworkid,ipaddress)]
if status == 2:
# we change this arp entry status in cache ,, next cycle will send arp request
entry = (3,timeout,isstatic,mac,cidr)
self._arp_cache[(outnetworkid,ipaddress)] = entry
elif m is arpflow_remove_matcher:
nid = ofdef.uint32.create(ofdef.get_oxm(msg.match.oxm_fields, ofdef.NXM_NX_REG5))
ip_address = ip4_addr(ip4_addr_bytes.formatter(
ofdef.get_oxm(msg.match.oxm_fields, ofdef.OXM_OF_IPV4_DST)))
if(nid,ip_address) in self._arp_cache:
_, _, isstatic, _, _ = self._arp_cache[(nid,ip_address)]
# never delete static arp entry ..
if not isstatic:
del self._arp_cache[(nid,ip_address)]
if (nid,ip_address) in self._packet_buffer:
del self._packet_buffer[(nid,ip_address)]
elif m is arpreply_matcher:
netid = ofdef.uint32.create(ofdef.get_oxm(msg.match.oxm_fields,ofdef.NXM_NX_REG5))
arp_reply_packet = ethernet_l7.create(msg.data)
reply_ipaddress = arp_reply_packet.arp_spa
reply_macaddress = arp_reply_packet.arp_sha
dst_macaddress = arp_reply_packet.dl_dst
if (netid,reply_ipaddress) in self._arp_cache:
status, timeout, isstatic,_,cidr = self._arp_cache[(netid,reply_ipaddress)]
ct = time.time()
if isstatic:
entry = (2,ct + self._parent.static_host_arp_refresh_interval,
isstatic,reply_macaddress,cidr)
self._arp_cache[(netid,reply_ipaddress)] = entry
# add static routes in l3router
network_relate_router = self._getallinterfaceinfobynetid(netid)
for k, v in network_relate_router.items():
for smac, nid in v:
self.subroutine(_add_static_routes_flow(nid, cidr, netid,
mac_addr(smac), reply_macaddress))
if netid == nid:
self.subroutine(_add_static_host_flow(ip4_addr.formatter(reply_ipaddress),
reply_macaddress, nid, mac_addr(smac)))
else:
# this is the first arp reply
if status == 1 or status == 3:
# complete timeout ,,, after flow hard_timeout, packet will send to controller too
# if packet in this timeout , will send an unicast arp request
# is best 1*self._parent.arp_complete_timeout < t < 2*self._parent.arp_complete_timeout
self._arp_cache[(netid,reply_ipaddress)] = (2,
ct + self._parent.arp_complete_timeout + 20,False,reply_macaddress,cidr)
# search msg buffer ,, packet out msg there wait this arp reply
if (netid,reply_ipaddress) in self._packet_buffer:
for packet,bid, t in self._packet_buffer[(netid,reply_ipaddress)]:
self.subroutine(_send_buffer_packet_out(netid,reply_macaddress,
reply_ipaddress,dst_macaddress,packet,bid))
del self._packet_buffer[(netid,reply_ipaddress)]
# add flow about this host in l3output
# change asyncStart from false to true ,, send buffer packet before add flow
self.subroutine(_add_host_flow(netid,reply_macaddress,reply_ipaddress,dst_macaddress))
except Exception:
self._logger.warning(" handler router packetin message error , ignore !",exc_info=True)
async def _update_handler(self):
dataobjectchange = iop.DataObjectChanged.createMatcher(None, None, self._connection)
while True:
ev = await dataobjectchange
self._lastlogicalport, self._lastphyport, self._lastlogicalnet, self._lastphynet = ev.current
self._update_walk()
self.updateobjects((p for p,_ in self._lastlogicalport))
def _update_walk(self):
logicalportkeys = [p.getkey() for p, _ in self._lastlogicalport]
logicalnetkeys = [n.getkey() for n, _ in self._lastlogicalnet]
phyportkeys = [p.getkey() for p,_ in self._lastphyport]
phynetkeys = [n.getkey() for n,_ in self._lastphynet]
dvrforwardinfokeys = [DVRouterForwardSet.default_key()]
self._initialkeys = logicalportkeys + logicalnetkeys + phyportkeys + phyportkeys + dvrforwardinfokeys
self._original_keys = logicalportkeys + logicalnetkeys + phyportkeys + phyportkeys + dvrforwardinfokeys
self._walkerdict = dict(itertools.chain(((p, self._walk_lgport) for p in logicalportkeys),
((n, self._walk_lgnet) for n in logicalnetkeys),
((n, self._walk_phynet) for n in phynetkeys),
((f, self._walk_dvrforwardinfo) for f in dvrforwardinfokeys),
((p, self._walk_phyport) for p in phyportkeys)))
self.subroutine(self.restart_walk(), False)
def _walk_dvrforwardinfo(self,key,value,walk,save):
save(key)
for weakref in value.set.dataset():
try:
weakobj = walk(weakref.getkey())
except KeyError:
pass
else:
save(weakobj.getkey())
def _walk_lgport(self, key, value, walk, save):
if value is None:
return
save(key)
def _walk_lgnet(self, key, value, walk, save):
if value is None:
return
save(key)
lgnetmapkey = LogicalNetworkMap.default_key(LogicalNetwork._getIndices(key)[1][0])
with suppress(WalkKeyNotRetrieved):
lgnetmap = walk(lgnetmapkey)
save(lgnetmap.getkey())
if self._parent.prepush:
for lgport_weak in lgnetmap.ports.dataset():
with suppress(WalkKeyNotRetrieved):
lgport = walk(lgport_weak.getkey())
save(lgport.getkey())
for subnet_weak in lgnetmap.subnets.dataset():
with suppress(WalkKeyNotRetrieved):
subnetobj = walk(subnet_weak.getkey())
save(subnetobj.getkey())
if hasattr(subnetobj, "router"):
routerport = walk(subnetobj.router.getkey())
save(routerport.getkey())
if hasattr(routerport, "router"):
router = walk(routerport.router.getkey())
save(router.getkey())
for weakobj in router.interfaces.dataset():
routerport_weakkey = weakobj.getkey()
# we walk from this key , so except
if routerport_weakkey != routerport.getkey():
with suppress(WalkKeyNotRetrieved):
weakrouterport = walk(routerport_weakkey)
save(routerport_weakkey)
if hasattr(weakrouterport, "subnet"):
weaksubnet = walk(weakrouterport.subnet.getkey())
save(weaksubnet.getkey())
if hasattr(weaksubnet, "network"):
logicalnetwork = walk(weaksubnet.network.getkey())
save(logicalnetwork.getkey())
def _walk_phyport(self, key, value, walk, save):
if value is None:
return
save(key)
def _walk_phynet(self,key,value,walk,save):
if value is None:
return
save(key)
def reset_initialkeys(self,keys,values):
subnetkeys = [k for k,v in zip(keys,values) if v is not None and not v.isdeleted() and
v.isinstance(SubNet)]
routerportkeys = [k for k,v in zip(keys,values) if v is not None and not v.isdeleted() and
v.isinstance(RouterPort)]
routerkeys = [k for k,v in zip(keys,values) if v is not None and not v.isdeleted() and
v.isinstance(VRouter)]
forwardinfokeys = [k for k,v in zip(keys,values) if v is not None and not v.isdeleted() and
v.isinstance(DVRouterForwardInfoRef)]
self._initialkeys = tuple(itertools.chain(self._original_keys,subnetkeys,
routerportkeys,routerkeys,forwardinfokeys))
async def updateflow(self, connection, addvalues, removevalues, updatedvalues):
try:
datapath_id = connection.openflow_datapathid
ofdef = connection.openflowdef
vhost = connection.protocol.vhost
lastsubnetinfo = self._lastsubnetinfo
lastlgportinfo = self._lastlgportinfo
lastrouterstoreinterfaceinfo = self._lastrouterstoreinterfacenetinfo
lastnetworkrouterinfo = self._lastnetworkrouterinfo
lastnetworkroutertableinfo = self._lastnetworkroutertableinfo
lastnetworkstaticroutesinfo= self._lastnetworkstaticroutesinfo
laststaticroutes= self._laststaticroutes
laststoreinfo = self._laststoreinfo
lastnetworkforwardinfo = self._lastnetworkforwardinfo
lastexternallgportinfo = self._lastexternallgportinfo
allobjects = set(o for o in self._savedresult if o is not None and not o.isdeleted())
dvrforwardinfo = dict(((f.from_pynet,f.to_pynet),f.info) for f in allobjects
if f.isinstance(DVRouterForwardInfoRef))
self._lastdvrforwardinfo = dvrforwardinfo
currentphynetinfo = dict((n,n.id) for n,_ in self._lastphynet if n in allobjects)
# phyport : phynet = 1:1, so we use phynet as key
currentphyportinfo = dict((p.physicalnetwork, (p,id)) for p, id in self._lastphyport if p in allobjects
and p.physicalnetwork in currentphynetinfo)
currentlognetinfo = {}
lognetinfo = dict((n,id) for n,id in self._lastlogicalnet if n in allobjects)
for n,id in lognetinfo.items():
# this lognetwork has phyport, we should get phyport mac
# as the base mac to produce mac that when router send packet used!
# else , use innmac
if n.physicalnetwork in currentphyportinfo:
_,phyportid = currentphyportinfo[n.physicalnetwork]
openflow_port = await call_api(self, "openflowportmanager", "waitportbyno",
{"datapathid": datapath_id,
"vhost": vhost,
"portno": phyportid})
portmac = openflow_port.hw_addr
# convert physicalport mac as router out mac
outmac = [s ^ m for s, m in zip(portmac, mac_addr(self._parent.outroutermacmask))]
currentlognetinfo[n] = (id,mac_addr.formatter(outmac),phyportid)
else:
currentlognetinfo[n] = (id,self._parent.inroutermac,None)
currentlgportinfo = dict((p,(p.ip_address,p.mac_address,currentlognetinfo[p.network][0],p.network.id))
for p,id in self._lastlogicalport if p in allobjects
and hasattr(p,"ip_address")
and hasattr(p,"mac_address")
and p.network in currentlognetinfo)
currentexternallgportinfo = dict((p,(p.ip_address,p.mac_address,currentlognetinfo[p.network][0],
currentlognetinfo[p.network][1]))
for p in allobjects if p.isinstance(LogicalPort)
and hasattr(p,"ip_address")
and hasattr(p,"mac_address")
and p.network in currentlognetinfo
and p not in currentlgportinfo)
self._lastlgportinfo = currentlgportinfo
self._lastexternallgportinfo = currentexternallgportinfo
subnet_to_routerport = dict((p.subnet,p) for p in allobjects if p.isinstance(RouterPort))
router_to_routerport = dict((p.router,p) for p in allobjects if p.isinstance(RouterPort))
routerport_to_subnet = dict((p, p.subnet) for p in allobjects if p.isinstance(RouterPort))
routerport_to_router = dict((p,p.router) for p in allobjects if p.isinstance(RouterPort))
staticroutes = dict((r,r.routes) for r in allobjects if r.isinstance(VRouter)
and r in router_to_routerport)
try:
bridge, system_id, _ = await call_api(self, 'ovsdbmanager', 'waitbridgeinfo',
{'datapathid': datapath_id,
'vhost': vhost})
except Exception:
self._logger.warning("OVSDB bridge is not ready", exc_info=True)
return
currentsubnetinfo = dict()
for s in allobjects:
if s.isinstance(SubNet) and s.network in currentlognetinfo and s in subnet_to_routerport:
cidr = s.cidr
gateway = getattr(s, 'gateway')
external_ip_address = None
local_external_ip = False
if hasattr(s, "local_address"):
external_ip_address = s.local_address
local_external_ip = True
if hasattr(s, "pre_host_config"):
for config in s.pre_host_config:
config_systemid = config["systemid"]
config_bridge = config['bridge']
config_vhost = config['vhost']
if config_systemid in [system_id, '%'] and config_bridge in [bridge, '%'] and \
config_vhost in [vhost, "%"]:
if 'cidr' in config:
cidr = config['cidr']
if 'local_address' in config:
external_ip_address = config['local_address']
local_external_ip = True
if 'gateway' in config:
gateway = config['gateway']
currentsubnetinfo[s] = (cidr,
getattr(s, "isexternal", False),
gateway,
self._parent.inroutermac,
currentlognetinfo[s.network][1], # out router mac
external_ip_address,
currentlognetinfo[s.network][0], # network id
currentlognetinfo[s.network][2], # physical port no
s.id, # subnet id
s.network, # logical network
local_external_ip # external ip address from local
)
if self._parent.enable_router_forward:
update_external_subnet = dict()
for k, v in currentsubnetinfo.items():
if v[1] and v[7] and not v[10]:
# this subnet is external , external_ip is must , there no local config,
# allocated it from cidr
if k in lastsubnetinfo and lastsubnetinfo[k][1] and lastsubnetinfo[k][5] \
and not lastsubnetinfo[k][10]:
# this subnet have allocated ip in last
allocated_ip_address = lastsubnetinfo[k][5]
else:
subnetkey = SubNet.default_key(k.id)
subnetmapkey = SubNetMap.default_key(k.id)
DVRouterExternalAddressInfokey = DVRouterExternalAddressInfo.default_key()
allocated_ip_address = [None]
def allocate_ip(keys,values,timestamp):
start = parse_ip4_address(values[1].allocated_start)
end = parse_ip4_address(values[1].allocated_end)
#values[0].info = [e for e in values[0].info if e[5] > timestamp]
# filter timeout info,
# only discard info has some subnet id , so we can release ip address to subnet
for e in list(values[0].info):
if e[5] < timestamp and values[2].id == e[3]:
ipaddress = parse_ip4_address(e[4])
if str(ipaddress) in values[2].allocated_ips:
del values[2].allocated_ips[str(ipaddress)]
values[0].info.remove(e)
if (system_id,bridge,vhost,values[1].id) in [(e[0],e[1],e[2],e[3]) for e in values[0].info]:
for e in list(values[0].info):
if (e[0],e[1],e[2],e[3]) == (system_id,bridge,vhost,values[1].id):
allocated_ip_address[0] = e[4]
values[0].info.remove(e)
values[0].info.append((system_id,bridge,vhost,values[1].id,allocated_ip_address[0],
timestamp + self._parent.addressinfo_discover_update_time * 2 * 1000000))
else:
for ipaddress in range(start,end):
if str(ipaddress) not in values[2].allocated_ips:
allocated_ip_address[0] = ip4_addr.formatter(ipaddress)
values[2].allocated_ips[str(ipaddress)] = (system_id,bridge,vhost)
break
else:
raise ValueError("allocate external subnet ipaddress error!")
values[0].info.append((system_id,bridge,vhost,values[1].id,allocated_ip_address[0],
timestamp + self._parent.addressinfo_discover_update_time * 2 * 1000000))
return tuple([keys[0],keys[2]]),tuple([values[0],values[2]])
await call_api(self,"objectdb","transact",
{"keys":[DVRouterExternalAddressInfokey,subnetkey, subnetmapkey],
"updater":allocate_ip,"withtime":True})
allocated_ip_address = allocated_ip_address[0]
nv = list(v)
nv[5] = allocated_ip_address
update_external_subnet[k] = tuple(nv)
currentsubnetinfo.update(update_external_subnet)
for k, v in lastsubnetinfo.items():
if v[1] and v[7] and not v[10]:
if k not in currentsubnetinfo or (k in currentsubnetinfo
and not currentsubnetinfo[k][1]
) or currentsubnetinfo[k][10]:
# this external subnet off line , release ip address to subnet
allocated_ip_address = v[5]
subnetmapkey = SubNetMap.default_key(k.id)
DVRouterExternalAddressInfokey = DVRouterExternalAddressInfo.default_key()
def release_ip(keys,values,timestamp):
new_list = []
for e in values[0].info:
if (e[0],e[1],e[2],e[3]) == (system_id,bridge,vhost,values[1].id):
ipaddress = parse_ip4_address(allocated_ip_address)
if str(ipaddress) in values[1].allocated_ips:
del values[1].allocated_ips[str(ipaddress)]
elif e[5] < timestamp and e[3] == values[1].id:
ipaddress = parse_ip4_address(e[4])
if str(ipaddress) in values[1].allocated_ips:
del values[1].allocated_ips[str(ipaddress)]
else:
new_list.append(e)
values[0].info = new_list
return keys,values
await call_api(self,"objectdb","transact",
{"keys":[DVRouterExternalAddressInfokey,subnetmapkey],
"updater":release_ip,"withtime":True})
self._lastsubnetinfo = currentsubnetinfo
allrouterinterfaceinfo = dict()
for router in staticroutes.keys():
for k,v in routerport_to_router.items():
if v == router:
s = routerport_to_subnet[k]
interface = currentsubnetinfo[s]
if hasattr(k,"ip_address"):
interface = list(interface)
interface[2] = k.ip_address
interface = tuple(interface)
allrouterinterfaceinfo.setdefault(router,[]).append(interface)
#self._lastallrouterinfo = allrouterinterfaceinfo
router_to_phynet = dict()
router_to_no_phynet = dict()
for k, v in allrouterinterfaceinfo.items():
for e in v:
if e[7] and e[9].physicalnetwork:
#
# if router interface have same physicalnetwork, physicalport,
# it must be have same outmac
#
router_to_phynet.setdefault(k, set()).add((e[9].physicalnetwork, e[4],e[9],e[0],e[1],e[6]))
else:
router_to_no_phynet.setdefault(k,set()).add((e[9].physicalnetwork, e[4],e[9],e[0],e[1],e[6]))
currentnetworkforwardinfo = dict()
for k,v in router_to_no_phynet.items():
if k in router_to_phynet:
for x in v:
for e in router_to_phynet[k]:
if (e[0].id,x[0].id) in dvrforwardinfo:
if dvrforwardinfo[(e[0].id,x[0].id)]:
if x[4]:
currentnetworkforwardinfo.setdefault(e[2],set()).\
add((e[5],x[5],dvrforwardinfo[(e[0].id,x[0].id)][0],"0.0.0.0/0"))
currentnetworkforwardinfo.setdefault(e[2],set()).\
add((e[5],x[5],dvrforwardinfo[(e[0].id,x[0].id)][0],x[3]))
self._lastnetworkforwardinfo = currentnetworkforwardinfo
if self._parent.enable_router_forward:
# enable router forward , we should update router forward capacity to db
currentstoreinfo = dict()
for k,v in router_to_phynet.items():
for e in v:
for x in v:
if x[0].id != e[0].id:
currentstoreinfo.setdefault((e[0].id,x[0].id),set()).add(e[1])
currentstoreinfo.setdefault((x[0].id, e[0].id), set()).add(x[1])
self._laststoreinfo = currentstoreinfo
add_store_info = dict()
remove_store_info = dict()
for k in laststoreinfo.keys():
if k not in currentstoreinfo or (k in currentstoreinfo and currentstoreinfo[k] != laststoreinfo[k]):
remove_store_info[k] = laststoreinfo[k]
for k in currentstoreinfo.keys():
if k not in laststoreinfo or (k in laststoreinfo and laststoreinfo[k] != currentstoreinfo[k]):
add_store_info[k] = currentstoreinfo[k]
if add_store_info or remove_store_info:
forward_keys = list(set([DVRouterForwardInfo.default_key(k[0],k[1]) for k in
list(add_store_info.keys())+ list(remove_store_info.keys())]))
ref_forward_keys = [DVRouterForwardInfoRef.default_key(DVRouterForwardInfo._getIndices(k)[1][0],
DVRouterForwardInfo._getIndices(k)[1][1]) for k in forward_keys]
transact_keys = [DVRouterForwardSet.default_key()] + forward_keys + ref_forward_keys
try:
bridge, system_id, _ = await call_api(self, 'ovsdbmanager', 'waitbridgeinfo',
{'datapathid': datapath_id,
'vhost': vhost})
except Exception:
self._logger.warning("OVSDB bridge is not ready", exc_info=True)
return
def store_transact(keys,values,timestamp):
transact_object = dict()
#transact_object[keys[0]] = values[0]
for i in range((len(transact_keys) - 1) //2):
if values[i + 1] is None:
# means this phy-> phy info is first
indices = DVRouterForwardInfo._getIndices(keys[i + 1])[1]
if (indices[0],indices[1]) in add_store_info:
obj = DVRouterForwardInfo.create_from_key(keys[i + 1])
e = (system_id,bridge,vhost,list(add_store_info[(indices[0],indices[1])])[0],
timestamp + self._parent.forwardinfo_discover_update_time * 2 * 1000000)
obj.info.append(e)
values[i + 1] = set_new(values[i + 1], obj)
transact_object[keys[i + 1]] = values[i + 1]
refobj = DVRouterForwardInfoRef.create_from_key(keys[i + 1 +
(len(transact_keys) - 1)//2])
refobj.info.append(e[3])
values[i + 1 + (len(transact_keys) - 1)//2] = set_new(
values[i + 1 + (len(transact_keys) - 1)//2],refobj)
transact_object[keys[i + 1 + (len(transact_keys) - 1) // 2]] = \
values[i + 1 + (len(transact_keys) - 1)//2]
values[0].set.dataset().add(refobj.create_weakreference())
transact_object[keys[0]] = values[0]
else:
# DVRouterForwardInfo and DVRouterForwardRefinfo is existed
# and it must be in DVRouterForwardInfoSet
# checkout timeout
values[i+1].info = [e for e in values[i+1].info
if (e[0],e[1],e[2]) != (system_id,bridge,vhost)
and e[4] > timestamp]
indices = DVRouterForwardInfo._getIndices(keys[i + 1])[1]
if (indices[0],indices[1]) in add_store_info:
e = (system_id,bridge,vhost,list(add_store_info[(indices[0],indices[1])])[0],
timestamp + self._parent.forwardinfo_discover_update_time * 2 * 1000000)
values[i+1].info.append(e)
values[i+1].info = sorted(values[i+1].info,key=lambda x: x[3])
if values[i+1].info:
transact_object[keys[i+1]] = values[i+1]
refe = [e[3] for e in values[i + 1].info]
if values[i + 1 + (len(transact_keys) - 1)//2].info != refe:
values[i + 1 + (len(transact_keys) - 1)//2].info = refe
transact_object[keys[i + 1 + (len(transact_keys) - 1)//2]] = \
values[i + 1 + (len(transact_keys) - 1)//2]
else:
transact_object[keys[i+1]] = None
transact_object[keys[i + 1 + (len(transact_keys) - 1) // 2]] = None
if WeakReferenceObject(keys[i + 1 + (len(transact_keys) - 1) // 2]) in \
values[0].set.dataset():
values[0].set.dataset().discard(
WeakReferenceObject(keys[i + 1 + (len(transact_keys) - 1) // 2]))
transact_object[keys[0]] = values[0]
if transact_object:
return tuple(zip(*transact_object.items()))
else:
return (), ()
await call_api(self,"objectdb","transact",
{"keys":transact_keys,"updater":store_transact,"withtime":True})
currentnetworkrouterinfo = dict()
network_to_router = dict()
for k,v in allrouterinterfaceinfo.items():
for e in v:
# isexternal,gateway,inmac,outmac,external_ip,networkid
entry = (e[1], e[2], e[3], e[4], e[5], e[6])
currentnetworkrouterinfo[e[9]] = entry
# network maybe to more router
network_to_router.setdefault(e[9], set()).add(k)
# network_to_router[e[9]] = k
self._lastnetworkrouterinfo = currentnetworkrouterinfo
currentnetworkroutertableinfo = dict()
for network in currentnetworkrouterinfo.keys():
all_router = network_to_router[network]
for router in all_router:
for e in allrouterinterfaceinfo[router]:
if e[9] != network:
# self_network_id, cidr, to_network_id
entry = (currentnetworkrouterinfo[network][5], e[0], e[6])
currentnetworkroutertableinfo.setdefault(network,set()).add(entry)
# for e in allrouterinterfaceinfo[network_to_router[network]]:
# if e[9] != network:
# entry = (currentnetworkrouterinfo[network][5],e[0],e[6])
# currentnetworkroutertableinfo.setdefault(network,set()).add(entry)
self._lastnetworkroutertableinfo = currentnetworkroutertableinfo
currentstaticroutes = dict()
for r,routes in staticroutes.items():
# for e in routes:
# prefix,nexthop = e
# for v in allrouterinterfaceinfo[r]:
# cidr = v[0]
# network,mask = parse_ip4_network(cidr)
# if ip_in_network(parse_ip4_address(nexthop),network,mask):
# currentstaticroutes.setdefault(r,set()).add((prefix,nexthop,v[6]))
#
# # external interface , add default router to static routes
# if v[1]:
# currentstaticroutes.setdefault(r,set()).add(("0.0.0.0/0",v[5],v[6]))
for v in allrouterinterfaceinfo[r]:
cidr = v[0]
for e in routes:
prefix,nexthop = e
network,mask = parse_ip4_network(cidr)
if ip_in_network(parse_ip4_address(nexthop),network,mask):
currentstaticroutes.setdefault(r,set()).add((prefix,nexthop,v[6]))
# external, physical_port_no
if v[1] and v[7]:
# prefix, gateway, network_id
currentstaticroutes.setdefault(r, set()).add(("0.0.0.0/0", v[2], v[6]))
self._laststaticroutes = currentstaticroutes
currentnetworkstaticroutesinfo = dict()
for network in currentnetworkrouterinfo.keys():
all_router = network_to_router[network]
for router in all_router:
if router in currentstaticroutes:
for route in currentstaticroutes[router]:
# from_network_id, prefix, gateway, to_network_id
currentnetworkstaticroutesinfo.setdefault(network, set()).\
add((currentnetworkrouterinfo[network][5], route[0], route[1], route[2]))
self._lastnetworkstaticroutesinfo = currentnetworkstaticroutesinfo
# add_transact_router_store = dict()
# remove_transact_router_store = dict()
# for o in lastrouterstoreinterfaceinfo:
# if o not in currentrouterstoreinterfaceinfo:
# remove_transact_router_store[o] = lastrouterstoreinterfaceinfo[o]
#
# for o in currentrouterstoreinterfaceinfo:
# if o not in lastrouterstoreinterfaceinfo or (o in lastrouterstoreinterfaceinfo
# and lastrouterstoreinterfaceinfo[o] != currentrouterstoreinterfaceinfo[o]):
# add_transact_router_store[o] = currentrouterstoreinterfaceinfo[o]
#
# transact_dvrouter_info_keys = [DVRouterInfo.default_key(r.id)
# for r in list(add_transact_router_store.keys())+
# list(remove_transact_router_store.keys())]
#
# if transact_dvrouter_info_keys:
# try:
# for m in call_api(self, 'ovsdbmanager', 'waitbridgeinfo', {'datapathid': datapath_id,
# 'vhost': vhost}):
# yield m
# except Exception:
# self._logger.warning("OVSDB bridge is not ready", exc_info=True)
# return
# else:
# bridge, system_id, _ = self.retvalue
#
# def transact_store__dvr_info(keys,values,timestamp):
# for i in range(0,len(transact_dvrouter_info_keys)):
# v = [e for e in values[i].dvrinfo
# if (e[0],e[1],[2]) != (system_id,vhost,bridge) and e[8] > timestamp]
# values[i].dvrinfo = v
#
# if keys[i] in [DVRouterInfo.default_key(r.id) for r in list(add_transact_router_store.keys())]:
# k = DVRouterInfo._getIndices(keys[i])[1][0]
# info = add_transact_router_store[ReferenceObject(VRouter.default_key(k))]
# for x in info:
# e = (system_id,vhost,bridge,x[0],x[1],x[2],x[3],x[4],
# timestamp + 5 * 1000000)
# values[i].dvrinfo.append(e)
#
# return keys,values
#
#
# for m in call_api(self,"objectdb","transact",
# {"keys":transact_dvrouter_info_keys,"updater":transact_store__dvr_info,
# "withtime":True}):
# yield m
cmds = []
l3input = self._parent._gettableindex("l3input", vhost)
l3router = self._parent._gettableindex("l3router", vhost)
l3output = self._parent._gettableindex("l3output", vhost)
l2output = self._parent._gettableindex("l2output", vhost)
arpreply = self._parent._gettableindex("arp",vhost)
if connection.protocol.disablenxext:
def match_network(nid):
return ofdef.create_oxm(ofdef.OXM_OF_METADATA_W, (nid & 0xffff) << 32,
b'\x00\x00\xff\xff\x00\x00\x00\x00')
else:
def match_network(nid):
return ofdef.create_oxm(ofdef.NXM_NX_REG4, nid)
def _createinputflow(mac,netid):
return [
ofdef.ofp_flow_mod(
cookie=0x3,
cookie_mask=0xffffffffffffffff,
table_id=l3input,
command=ofdef.OFPFC_ADD,
priority=ofdef.OFP_DEFAULT_PRIORITY,
buffer_id=ofdef.OFP_NO_BUFFER,
out_port=ofdef.OFPP_ANY,
out_group=ofdef.OFPG_ANY,
match=ofdef.ofp_match_oxm(
oxm_fields=[
match_network(netid),
ofdef.create_oxm(ofdef.OXM_OF_ETH_DST, mac_addr_bytes(mac)),
ofdef.create_oxm(ofdef.OXM_OF_ETH_TYPE, ofdef.ETHERTYPE_IP)
]
),
instructions=[
ofdef.ofp_instruction_goto_table(table_id=l3router)
]
)
]
def _deleteinputflow(mac,netid):
return [
ofdef.ofp_flow_mod(
cookie=0x3,
cookie_mask=0xffffffffffffffff,
table_id=l3input,
command=ofdef.OFPFC_DELETE,
out_port=ofdef.OFPP_ANY,
out_group=ofdef.OFPG_ANY,
match=ofdef.ofp_match_oxm(
oxm_fields=[
match_network(netid),
ofdef.create_oxm(ofdef.OXM_OF_ETH_DST, mac_addr_bytes(mac)),
ofdef.create_oxm(ofdef.OXM_OF_ETH_TYPE, ofdef.ETHERTYPE_IP)
]
)
)
]
def _createarpreplyflow(ipaddress,mac,netid):
return [
ofdef.ofp_flow_mod(
cookie=0x4,
cookie_mask=0xffffffffffffffff,
table_id=l3input,
command=ofdef.OFPFC_ADD,
priority=ofdef.OFP_DEFAULT_PRIORITY,
buffer_id=ofdef.OFP_NO_BUFFER,
out_port=ofdef.OFPP_ANY,
out_group=ofdef.OFPG_ANY,
match=ofdef.ofp_match_oxm(
oxm_fields=[
match_network(netid),
ofdef.create_oxm(ofdef.OXM_OF_ETH_TYPE, ofdef.ETHERTYPE_ARP),
ofdef.create_oxm(ofdef.OXM_OF_ETH_DST, mac_addr_bytes(mac)),
ofdef.create_oxm(ofdef.OXM_OF_ARP_OP, ofdef.ARPOP_REPLY),
ofdef.create_oxm(ofdef.OXM_OF_ARP_TPA, ip4_addr(ipaddress))
]
),
instructions=[
ofdef.ofp_instruction_actions(
actions=[
ofdef.ofp_action_output(
port=ofdef.OFPP_CONTROLLER,
max_len=ofdef.OFPCML_NO_BUFFER
)
]
)
]
)
]
def _deletearpreplyflow(ipaddress,mac,netid):
return [
ofdef.ofp_flow_mod(
cookie=0x4,
cookie_mask=0xffffffffffffffff,
table_id=l3input,
command=ofdef.OFPFC_DELETE,
priority=ofdef.OFP_DEFAULT_PRIORITY,
buffer_id=ofdef.OFP_NO_BUFFER,
out_port=ofdef.OFPP_ANY,
out_group=ofdef.OFPG_ANY,
match=ofdef.ofp_match_oxm(
oxm_fields=[
match_network(netid),
ofdef.create_oxm(ofdef.OXM_OF_ETH_TYPE, ofdef.ETHERTYPE_ARP),
ofdef.create_oxm(ofdef.OXM_OF_ETH_DST, mac_addr_bytes(mac)),
ofdef.create_oxm(ofdef.OXM_OF_ARP_OP, ofdef.ARPOP_REPLY),
ofdef.create_oxm(ofdef.OXM_OF_ARP_TPA, ip4_addr(ipaddress))
]
)
)
]
def _createfilterarprequestflow(netid):
return [
ofdef.ofp_flow_mod(
table_id=arpreply,
command=ofdef.OFPFC_ADD,
priority=ofdef.OFP_DEFAULT_PRIORITY - 1,
buffer_id=ofdef.OFP_NO_BUFFER,
out_port=ofdef.OFPP_ANY,
out_group=ofdef.OFPG_ANY,
match=ofdef.ofp_match_oxm(
oxm_fields=[
ofdef.create_oxm(ofdef.NXM_NX_REG7_W, 0,0x4000),
ofdef.create_oxm(ofdef.NXM_NX_REG4, netid),
ofdef.create_oxm(ofdef.OXM_OF_ETH_TYPE, ofdef.ETHERTYPE_ARP),
ofdef.create_oxm(ofdef.OXM_OF_ARP_OP, ofdef.ARPOP_REQUEST),
ofdef.create_oxm(ofdef.OXM_OF_ETH_DST_W, b'\x01\x00\x00\x00\x00\x00',
b'\x01\x00\x00\x00\x00\x00')
]
),
instructions=[ofdef.ofp_instruction_actions(type=ofdef.OFPIT_CLEAR_ACTIONS)]
)
]
def _deletefilterarprequestflow(netid):
return [
ofdef.ofp_flow_mod(
table_id=arpreply,
command=ofdef.OFPFC_DELETE_STRICT,
priority=ofdef.OFP_DEFAULT_PRIORITY - 1,
buffer_id=ofdef.OFP_NO_BUFFER,
out_port=ofdef.OFPP_ANY,
out_group=ofdef.OFPG_ANY,
match=ofdef.ofp_match_oxm(
oxm_fields=[
ofdef.create_oxm(ofdef.NXM_NX_REG7_W, 0,0x4000),
ofdef.create_oxm(ofdef.NXM_NX_REG4, netid),
ofdef.create_oxm(ofdef.OXM_OF_ETH_TYPE, ofdef.ETHERTYPE_ARP),
ofdef.create_oxm(ofdef.OXM_OF_ARP_OP, ofdef.ARPOP_REQUEST),
ofdef.create_oxm(ofdef.OXM_OF_ETH_DST_W, b'\x01\x00\x00\x00\x00\x00',
b'\x01\x00\x00\x00\x00\x00')
]
)
)
]
def _add_router_route(fnetid,cidr,tnetid):
network, prefix = parse_ip4_network(cidr)
return [
ofdef.ofp_flow_mod(
table_id=l3router,
command=ofdef.OFPFC_ADD,
priority=ofdef.OFP_DEFAULT_PRIORITY + prefix,
buffer_id=ofdef.OFP_NO_BUFFER,
out_port=ofdef.OFPP_ANY,
out_group=ofdef.OFPG_ANY,
match=ofdef.ofp_match_oxm(
oxm_fields=[
match_network(fnetid),
ofdef.create_oxm(ofdef.OXM_OF_ETH_TYPE, ofdef.ETHERTYPE_IP),
ofdef.create_oxm(ofdef.OXM_OF_IPV4_DST_W,
network,
get_netmask(prefix))
]
),
instructions=[
ofdef.ofp_instruction_actions(
actions=[
ofdef.ofp_action_set_field(
field=ofdef.create_oxm(ofdef.NXM_NX_REG5, tnetid)
)
]
),
ofdef.ofp_instruction_goto_table(table_id=l3output)
]
)
]
def _delete_router_route(fnetid,cidr,tnetid):
network, prefix = parse_ip4_network(cidr)
return [
ofdef.ofp_flow_mod(
table_id=l3router,
command=ofdef.OFPFC_DELETE_STRICT,
priority=ofdef.OFP_DEFAULT_PRIORITY + prefix,
buffer_id=ofdef.OFP_NO_BUFFER,
out_port=ofdef.OFPP_ANY,
out_group=ofdef.OFPG_ANY,
match=ofdef.ofp_match_oxm(
oxm_fields=[
match_network(fnetid),
ofdef.create_oxm(ofdef.OXM_OF_ETH_TYPE,ofdef.ETHERTYPE_IP),
ofdef.create_oxm(ofdef.OXM_OF_IPV4_DST_W, network,get_netmask(prefix))
]
)
)
]
def _add_host_flow(ipaddress,dmac,netid,smac):
return [
ofdef.ofp_flow_mod(
table_id=l3output,
command=ofdef.OFPFC_ADD,
priority=ofdef.OFP_DEFAULT_PRIORITY + 1,
buffer_id=ofdef.OFP_NO_BUFFER,
out_port=ofdef.OFPP_ANY,
out_group=ofdef.OFPG_ANY,
match=ofdef.ofp_match_oxm(
oxm_fields=[
ofdef.create_oxm(ofdef.NXM_NX_REG5, netid),
ofdef.create_oxm(ofdef.OXM_OF_ETH_TYPE,ofdef.ETHERTYPE_IP),
ofdef.create_oxm(ofdef.OXM_OF_IPV4_DST, ip4_addr(ipaddress))
]
),
instructions=[
ofdef.ofp_instruction_actions(
actions=[
ofdef.ofp_action_set_field(
field=ofdef.create_oxm(ofdef.OXM_OF_ETH_SRC,mac_addr(smac))
),
ofdef.ofp_action_set_field(
field=ofdef.create_oxm(ofdef.OXM_OF_ETH_DST,mac_addr(dmac))
),
ofdef.ofp_action(
type=ofdef.OFPAT_DEC_NW_TTL
)
]
),
ofdef.ofp_instruction_goto_table(table_id=l2output)
]
)
]
def _remove_host_flow(ipaddress,dmac,netid,smac):
return [
ofdef.ofp_flow_mod(
table_id=l3output,
command=ofdef.OFPFC_DELETE,
priority=ofdef.OFP_DEFAULT_PRIORITY + 1,
buffer_id=ofdef.OFP_NO_BUFFER,
out_port=ofdef.OFPP_ANY,
out_group=ofdef.OFPG_ANY,
match=ofdef.ofp_match_oxm(
oxm_fields=[
ofdef.create_oxm(ofdef.NXM_NX_REG5, netid),
ofdef.create_oxm(ofdef.OXM_OF_ETH_TYPE,ofdef.ETHERTYPE_IP),
ofdef.create_oxm(ofdef.OXM_OF_IPV4_DST, ip4_addr(ipaddress))
]
)
)
]
def _add_static_routes_flow(from_net_id,cidr,to_net_id,smac,dmac):
network, prefix = parse_ip4_network(cidr)
return [
ofdef.ofp_flow_mod(
table_id=l3router,
command=ofdef.OFPFC_ADD,
priority=ofdef.OFP_DEFAULT_PRIORITY + prefix,
buffer_id=ofdef.OFP_NO_BUFFER,
out_port=ofdef.OFPP_ANY,
out_group=ofdef.OFPG_ANY,
match=ofdef.ofp_match_oxm(
oxm_fields=[
ofdef.create_oxm(ofdef.NXM_NX_REG4, from_net_id),
ofdef.create_oxm(ofdef.OXM_OF_ETH_TYPE, ofdef.ETHERTYPE_IP),
ofdef.create_oxm(ofdef.OXM_OF_IPV4_DST_W,
network,
get_netmask(prefix))
]
),
instructions=[
ofdef.ofp_instruction_actions(
actions=[
ofdef.ofp_action_set_field(
field=ofdef.create_oxm(ofdef.NXM_NX_REG5, to_net_id)
),
ofdef.ofp_action_set_field(
field=ofdef.create_oxm(ofdef.OXM_OF_ETH_SRC, smac)
),
ofdef.ofp_action_set_field(
field=ofdef.create_oxm(ofdef.OXM_OF_ETH_DST, dmac)
),
ofdef.ofp_action(
type=ofdef.OFPAT_DEC_NW_TTL
)
]
),
ofdef.ofp_instruction_goto_table(table_id=l2output)
]
)
]
def _add_forward_route(from_net_id,cidr,outmac):
network,prefix = parse_ip4_network(cidr)
if network:
priority = ofdef.OFP_DEFAULT_PRIORITY + 33 + prefix
else:
# add default forward route , keep priority as small
priority = ofdef.OFP_DEFAULT_PRIORITY + prefix
return [
ofdef.ofp_flow_mod(
table_id = l3router,
command = ofdef.OFPFC_ADD,
priority = priority,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm(
oxm_fields = [
match_network(from_net_id),
ofdef.create_oxm(ofdef.OXM_OF_ETH_TYPE,ofdef.ETHERTYPE_IP),
ofdef.create_oxm(ofdef.OXM_OF_IPV4_DST_W,network,get_netmask(prefix))
]
),
instructions = [
ofdef.ofp_instruction_actions(
actions=[
ofdef.ofp_action_set_field(
field=ofdef.create_oxm(ofdef.OXM_OF_ETH_DST, mac_addr(outmac))
)
]
),
ofdef.ofp_instruction_goto_table(table_id=l2output)
]
)
]
def _delete_forward_route(from_net_id,cidr,outmac):
network,prefix = parse_ip4_network(cidr)
if network:
priority = ofdef.OFP_DEFAULT_PRIORITY + 33 + prefix
else:
# add default forward route , keep priority as small
priority = ofdef.OFP_DEFAULT_PRIORITY + prefix
return [
ofdef.ofp_flow_mod(
table_id = l3router,
command = ofdef.OFPFC_DELETE_STRICT,
priority = priority,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm(
oxm_fields = [
match_network(from_net_id),
ofdef.create_oxm(ofdef.OXM_OF_ETH_TYPE,ofdef.ETHERTYPE_IP),
ofdef.create_oxm(ofdef.OXM_OF_IPV4_DST_W,network,get_netmask(prefix))
]
)
)
]
for n in lastnetworkrouterinfo:
if n not in currentnetworkrouterinfo or (n in currentnetworkrouterinfo
and currentnetworkrouterinfo[n] != lastnetworkrouterinfo[n]):
isexternal,gateway,inmac,outmac,external_ip,networkid = lastnetworkrouterinfo[n]
if not isexternal:
# remove flow innermac + ip >>> l3input
cmds.extend(_deleteinputflow(inmac,networkid))
cmds.extend(_deleteinputflow(outmac,networkid))
# remove arp reply flow on outmac >>> l3input
cmds.extend(_deletearpreplyflow(gateway,outmac,networkid))
# remove arp filter discard broadcast arp request to inner host
cmds.extend(_deletefilterarprequestflow(networkid))
else:
if external_ip:
cmds.extend(_deleteinputflow(outmac,networkid))
# remove arp reply flow on outmac >>> l3input
cmds.extend(_deletearpreplyflow(external_ip,outmac,networkid))
# remove arp proxy for external ip
await call_api(self, 'arpresponder', 'removeproxyarp', {'connection': connection,
'arpentries': [(external_ip,outmac,n.id,False)]})
for n in lastnetworkroutertableinfo:
if n not in currentnetworkroutertableinfo:
# this router network delete , clear router table
for from_network_id,cidr,to_network_id in lastnetworkroutertableinfo[n]:
cmds.extend(_delete_router_route(from_network_id,cidr,to_network_id))
elif lastnetworkroutertableinfo[n] != currentnetworkroutertableinfo:
last_route_set = lastnetworkroutertableinfo[n]
new_route_set = currentnetworkroutertableinfo[n]
for from_network_id,cidr,to_network_id in last_route_set.difference(new_route_set):
cmds.extend(_delete_router_route(from_network_id,cidr,to_network_id))
for r in laststaticroutes:
if r not in currentstaticroutes:
for prefix,nexthop,netid in laststaticroutes[r]:
network_relate_router = self._getallinterfaceinfobynetid(netid)
# network mybe to more router, if more, we don't remove arp cache
if len(network_relate_router) <= 1:
if (netid, ip4_addr(nexthop)) in self._arp_cache:
del self._arp_cache[(netid, ip4_addr(nexthop))]
# delete all network routes from this prefix
for mac, nid in network_relate_router[r]:
# this static routes mybe in this netwrok, delete it try!
cmds.extend(_delete_router_route(nid, prefix, netid))
if nid == netid and len(network_relate_router) <= 1:
cmds.extend(_remove_host_flow(nexthop, mac, nid, mac))
elif laststaticroutes[r] != currentstaticroutes[r]:
last_router_routes_set = laststaticroutes[r]
new_router_routes_set = currentstaticroutes[r]
for prefix,nexthop,netid in last_router_routes_set.difference(new_router_routes_set):
network_relate_router = self._getallinterfaceinfobynetid(netid)
# network mybe to more router, if more, we don't remove arp cache
if len(network_relate_router) <= 1:
if (netid, ip4_addr(nexthop)) in self._arp_cache:
del self._arp_cache[(netid, ip4_addr(nexthop))]
# delete all network routes from this prefix
for mac, nid in network_relate_router[r]:
# this static routes mybe in this netwrok, delete it try!
cmds.extend(_delete_router_route(nid, prefix, netid))
if nid == netid and len(network_relate_router) <= 1:
cmds.extend(_remove_host_flow(nexthop, mac, nid, mac))
for n in lastnetworkstaticroutesinfo:
if n not in currentnetworkstaticroutesinfo:
for from_network_id,prefix,nexthop,to_network_id in lastnetworkstaticroutesinfo[n]:
cmds.extend(_delete_router_route(from_network_id,prefix,to_network_id))
for p in lastlgportinfo:
if p not in currentlgportinfo or (p in currentlgportinfo
and currentlgportinfo[p] != lastlgportinfo[p]):
ipaddress,macaddrss,netid,netkey = lastlgportinfo[p]
# remove host learn
cmds.extend(_remove_host_flow(ipaddress,macaddrss,netid,self._parent.inroutermac))
# remove arp proxy
await call_api(self, 'arpresponder', 'removeproxyarp', {'connection': connection,
'arpentries': [(ipaddress,macaddrss,netkey,False)]})
for p in lastexternallgportinfo:
if p not in currentexternallgportinfo or\
(p in currentexternallgportinfo and currentexternallgportinfo[p] != lastexternallgportinfo[p]):
ipaddress,macaddrss,netid,outmac = lastexternallgportinfo[p]
cmds.extend(_remove_host_flow(ipaddress,macaddrss,netid,outmac))
for n in lastnetworkforwardinfo:
if n not in currentnetworkforwardinfo or (n in currentnetworkforwardinfo
and currentnetworkforwardinfo[n] != lastnetworkforwardinfo[n]):
for x in lastnetworkforwardinfo[n]:
from_network_id = x[0]
outmac = x[2]
cidr = x[3]
cmds.extend(_delete_forward_route(from_network_id,cidr,outmac))
await self.execute_commands(connection, cmds)
del cmds[:]
for n in currentnetworkrouterinfo:
if n not in lastnetworkrouterinfo or (n in lastnetworkrouterinfo
and lastnetworkrouterinfo[n] != currentnetworkrouterinfo[n]):
isexternal,gateway,inmac,outmac,external_ip,networkid = currentnetworkrouterinfo[n]
if not isexternal:
# add flow innermac + ip >>> l3input
cmds.extend(_createinputflow(inmac,networkid))
cmds.extend(_createinputflow(outmac, networkid))
# add arp reply flow on outmac >>> l3input
cmds.extend(_createarpreplyflow(gateway,outmac,networkid))
# add arp filter discard broadcast arp request to inner host
cmds.extend(_createfilterarprequestflow(networkid))
else:
# external_ip is None means, this external subnet has no phyport
if external_ip:
# external network, packet will recv from outmac , so add ..
cmds.extend(_createinputflow(outmac, networkid))
# add arp reply flow on outmac >>> l3input
cmds.extend(_createarpreplyflow(external_ip,outmac,networkid))
#add arp proxy for external ip
await call_api(self, 'arpresponder', 'createproxyarp', {'connection': connection,
'arpentries': [(external_ip,outmac,n.id,False)]})
for n in currentnetworkroutertableinfo:
if n not in lastnetworkroutertableinfo:
# this router network add , add all router table
for from_network_id,cidr,to_network_id in currentnetworkroutertableinfo[n]:
cmds.extend(_add_router_route(from_network_id,cidr,to_network_id))
elif currentnetworkroutertableinfo[n] != lastnetworkroutertableinfo[n]:
new_route_set = currentnetworkroutertableinfo[n]
last_route_set = lastnetworkroutertableinfo[n]
for from_network_id,cidr,to_network_id in new_route_set.difference(last_route_set):
cmds.extend(_add_router_route(from_network_id,cidr,to_network_id))
arp_request_event = []
for n in currentnetworkstaticroutesinfo:
if n not in lastnetworkstaticroutesinfo:
for from_network_id,prefix,nexthop,to_network_id in currentnetworkstaticroutesinfo[n]:
if (to_network_id,ip4_addr(nexthop)) in self._arp_cache \
and (self._arp_cache[(to_network_id,ip4_addr(nexthop))][0] == 2 or
self._arp_cache[(to_network_id,ip4_addr(nexthop))][0] == 3):
_, _, _, mac, _ = self._arp_cache[(to_network_id, ip4_addr(nexthop))]
smac, _, _ = self._getinterfaceinfobynetid(to_network_id)
cmds.extend(_add_static_routes_flow(from_network_id,prefix,
to_network_id,mac_addr(smac),mac))
if from_network_id == to_network_id:
cmds.extend(_add_host_flow(nexthop,mac_addr.formatter(mac),
from_network_id,smac))
# change this arp entry from host to static entry ..
entry = (2,time.time() + self._parent.arp_incomplete_timeout,True,mac,prefix)
self._arp_cache[(to_network_id,ip4_addr(nexthop))] = entry
else:
e = ARPRequest(self._connection, ipaddress=ip4_addr(nexthop),
logicalnetworkid=to_network_id, isstatic=True,
cidr=prefix)
arp_request_event.append(e)
elif currentnetworkstaticroutesinfo[n] != lastnetworkstaticroutesinfo[n]:
last_router_routes_set = lastnetworkstaticroutesinfo[n]
new_router_routes_set = currentnetworkstaticroutesinfo[n]
for from_network_id,prefix, nexthop, to_network_id in \
new_router_routes_set.difference(last_router_routes_set):
if (to_network_id,ip4_addr(nexthop)) in self._arp_cache \
and (self._arp_cache[(to_network_id,ip4_addr(nexthop))][0] == 2 or
self._arp_cache[(to_network_id,ip4_addr(nexthop))][0] == 3):
_, _, _, mac, _ = self._arp_cache[(to_network_id, ip4_addr(nexthop))]
smac, _, _ = self._getinterfaceinfobynetid(to_network_id)
cmds.extend(_add_static_routes_flow(from_network_id,prefix,
to_network_id,mac_addr(smac),mac))
if from_network_id == to_network_id:
cmds.extend(_add_host_flow(nexthop,mac_addr.formatter(mac),
from_network_id,smac))
# change this arp entry from host to static entry ..
entry = (2,time.time() + self._parent.arp_incomplete_timeout,True,mac,prefix)
self._arp_cache[(to_network_id,ip4_addr(nexthop))] = entry
else:
e = ARPRequest(self._connection, ipaddress=ip4_addr(nexthop),
logicalnetworkid=to_network_id, isstatic=True,
cidr=prefix)
arp_request_event.append(e)
for n in currentnetworkforwardinfo:
if n not in lastnetworkforwardinfo or (n in currentnetworkforwardinfo
and currentnetworkforwardinfo[n] != lastnetworkforwardinfo[n]):
for x in currentnetworkforwardinfo[n]:
from_network_id = x[0]
outmac = x[2]
cidr = x[3]
cmds.extend(_add_forward_route(from_network_id,cidr,outmac))
for p in currentlgportinfo:
if p not in lastlgportinfo or (p in lastlgportinfo
and currentlgportinfo[p] != lastlgportinfo[p]):
ipaddress,macaddrss,netid,netkey = currentlgportinfo[p]
#add arp proxy in physicalport
await call_api(self, 'arpresponder', 'createproxyarp', {'connection': connection,
'arpentries': [(ipaddress,macaddrss,netkey,False)]})
#add host learn
cmds.extend(_add_host_flow(ipaddress,macaddrss,netid,self._parent.inroutermac))
for p in currentexternallgportinfo:
if p not in lastexternallgportinfo or\
(p in lastexternallgportinfo and currentexternallgportinfo[p] != lastexternallgportinfo[p]):
ipaddress,macaddrss,netid,outmac = currentexternallgportinfo[p]
cmds.extend(_add_host_flow(ipaddress,macaddrss,netid,outmac))
if arp_request_event:
for e in arp_request_event:
self.subroutine(self.waitForSend(e))
del arp_request_event[:]
# because function '_getinterfaceinfobynetid' use self._lastallrouterinfo above
# so change to new in last
self._lastallrouterinfo = allrouterinterfaceinfo
await self.execute_commands(connection, cmds)
except Exception:
self._logger.warning("router update flow exception, ignore it! continue", exc_info=True)
@defaultconfig
@depend(arpresponder.ARPResponder,icmpresponder.ICMPResponder,objectdb.ObjectDB)
class L3Router(FlowBase):
"""
L3 connectivities with virtual router.
"""
_tablerequest = (
("l3router", ("l3input",), "router"),
("l3output", ("l3router",), "l3"),
("l2output", ("l3output",), "")
)
# Router responding MAC address for logical ports on this switch
_default_inroutermac = '1a:23:67:59:63:33'
# Router responding MAC address mask for outside network. The MAC address
# is formed with the physical MAC address (NIC MAC address) XORed with this
# mask
_default_outroutermacmask = '0a:00:00:00:00:00'
# Retry ARP requests with this interval when there is no respond
_default_arp_cycle_time = 5
# Prepush ARP entries for all the logical ports which are accessible from the router
_default_prepush = False
# if arp entry have no reply , it will send in arp cycle until timeout
# but if new packet request arp ,, it will flush this timeout in arp entry
_default_arp_incomplete_timeout = 60
# an ARP entry stays in "COMPLETE" state without sending further ARP requests
# until this time
_default_arp_complete_timeout = 30
# The L3 gateway buffers a packet and wait for ARP responds until this time
_default_buffer_packet_timeout = 30
# Refresh external IPs (external gateway address) ARP (MAC-IP corresponding)
_default_static_host_arp_refresh_interval = 60
# Enable forwarding in this server, so it becomes a forwarding node (also known as a N/S gateway)
# This should be set together with module.ioprocessing.enable_router_forward
_default_enable_router_forward = False
# A forwarding node will acquire a IP address from an external network, and refresh the information
# to keep the acquire state. This is the refresh interval.
_default_addressinfo_discover_update_time = 150
# A forwarding node will acknowledge other nodes that it is ready to forward network traffic from
# other nodes, this is the fresh interval
_default_forwardinfo_discover_update_time = 15
def __init__(self, server):
super(L3Router, self).__init__(server)
self.app_routine = RoutineContainer(self.scheduler)
self.app_routine.main = self._main
self.routines.append(self.app_routine)
self._flowupdater = dict()
async def _main(self):
flowinit = FlowInitialize.createMatcher(_ismatch=lambda x: self.vhostbind is None or
x.vhost in self.vhostbind)
conndown = OpenflowConnectionStateEvent.createMatcher(state=OpenflowConnectionStateEvent.CONNECTION_DOWN,
_ismatch=lambda x: self.vhostbind is None
or x.createby.vhost in self.vhostbind)
while True:
ev, m = await M_(flowinit, conndown)
if m is flowinit:
c = ev.connection
self.app_routine.subroutine(self._init_conn(c))
elif m is conndown:
c = ev.connection
self.app_routine.subroutine(self._uninit_conn(c))
async def _init_conn(self, conn):
if conn in self._flowupdater:
updater = self._flowupdater.pop(conn)
updater.close()
updater = RouterUpdater(conn, self)
self._flowupdater[conn] = updater
updater.start()
ofdef = conn.openflowdef
l3router = self._gettableindex("l3router", conn.protocol.vhost)
l3output = self._gettableindex("l3output", conn.protocol.vhost)
l3router_default_flow = ofdef.ofp_flow_mod(
table_id=l3router,
command = ofdef.OFPFC_ADD,
priority=0,
buffer_id = ofdef.OFP_NO_BUFFER,
match = ofdef.ofp_match_oxm(),
instructions=[
ofdef.ofp_instruction_actions(
type=ofdef.OFPIT_CLEAR_ACTIONS
)
]
)
# as default mis flow, max_len = mis_send_len
# ofdef.OFPCML_NO_BUFFER is invaild
l3output_default_flow = ofdef.ofp_flow_mod(
table_id=l3output,
command = ofdef.OFPFC_ADD,
priority=0,
buffer_id = ofdef.OFP_NO_BUFFER,
match = ofdef.ofp_match_oxm(),
instructions=[
ofdef.ofp_instruction_actions(
actions=[
ofdef.ofp_action_output(
port=ofdef.OFPP_CONTROLLER,
max_len=ofdef.OFPCML_NO_BUFFER
)
]
)
]
)
await conn.protocol.batch([l3router_default_flow, l3output_default_flow],conn,self.app_routine)
async def _uninit_conn(self, conn):
if conn in self._flowupdater:
updater = self._flowupdater.pop(conn)
updater.close()
|
nuplan/planning/training/data_loader/test/skeleton_test_dataloader.py
|
motional/nuplan-devkit
| 128 |
135511
|
<gh_stars>100-1000
import unittest
import numpy as np
import pytorch_lightning as pl
import ray
from nuplan.planning.scenario_builder.nuplan_db.test.nuplan_scenario_test_utils import get_test_nuplan_scenario_builder
from nuplan.planning.scenario_builder.scenario_filter import ScenarioFilter
from nuplan.planning.simulation.trajectory.trajectory_sampling import TrajectorySampling
from nuplan.planning.training.data_augmentation.kinematic_agent_augmentation import KinematicAgentAugmentor
from nuplan.planning.training.data_loader.datamodule import DataModule
from nuplan.planning.training.data_loader.log_splitter import LogSplitter
from nuplan.planning.training.preprocessing.feature_builders.agents_feature_builder import AgentsFeatureBuilder
from nuplan.planning.training.preprocessing.feature_builders.raster_feature_builder import RasterFeatureBuilder
from nuplan.planning.training.preprocessing.feature_builders.vector_map_feature_builder import VectorMapFeatureBuilder
from nuplan.planning.training.preprocessing.feature_preprocessor import FeaturePreprocessor
from nuplan.planning.training.preprocessing.features.raster import Raster
from nuplan.planning.training.preprocessing.features.trajectory import Trajectory
from nuplan.planning.training.preprocessing.target_builders.ego_trajectory_target_builder import (
EgoTrajectoryTargetBuilder,
)
from nuplan.planning.training.preprocessing.test.dummy_vectormap_builder import DummyVectorMapBuilder
from nuplan.planning.utils.multithreading.worker_pool import WorkerPool
class SkeletonTestDataloader(unittest.TestCase):
"""
Skeleton with initialized dataloader used in testing.
"""
def setUp(self) -> None:
"""
Set up basic configs.
"""
pl.seed_everything(2022, workers=True)
# Create splitter
self.splitter = LogSplitter(
log_splits={
'train': ["2021.07.16.20.45.29_veh-35_01095_01486"],
'val': ["2021.08.31.14.40.58_veh-40_00285_00668"],
'test': ["2021.10.06.07.26.10_veh-52_00006_00398"],
}
)
# Create feature builder
feature_builders = [
DummyVectorMapBuilder(),
VectorMapFeatureBuilder(radius=20),
AgentsFeatureBuilder(TrajectorySampling(num_poses=4, time_horizon=1.5)),
RasterFeatureBuilder(
map_features={'LANE': 1, 'INTERSECTION': 1.0, 'STOP_LINE': 0.5, 'CROSSWALK': 0.5},
num_input_channels=4,
target_width=224,
target_height=224,
target_pixel_size=0.5,
ego_width=2.297,
ego_front_length=4.049,
ego_rear_length=1.127,
ego_longitudinal_offset=0.0,
baseline_path_thickness=1,
),
]
target_builders = [EgoTrajectoryTargetBuilder(TrajectorySampling(num_poses=10, time_horizon=5.0))]
self.feature_preprocessor = FeaturePreprocessor(
cache_path=None,
force_feature_computation=True,
feature_builders=feature_builders,
target_builders=target_builders,
)
# Extract scenarios
self.scenario_filter = ScenarioFilter(
scenario_types=None,
scenario_tokens=None,
log_names=None,
map_names=None,
num_scenarios_per_type=None,
limit_total_scenarios=150,
expand_scenarios=True,
remove_invalid_goals=False,
shuffle=True,
)
self.augmentors = [
KinematicAgentAugmentor(
trajectory_length=10,
dt=0.1,
mean=[0.3, 0.1, np.pi / 12],
std=[0.5, 0.1, np.pi / 12],
augment_prob=0.5,
)
]
self.scenario_builder = get_test_nuplan_scenario_builder()
def _test_dataloader(self, worker: WorkerPool) -> None:
"""
Tests that the training dataloader can be iterated without errors
"""
scenarios = self.scenario_builder.get_scenarios(self.scenario_filter, worker)
self.assertGreater(len(scenarios), 0)
# Construct data module
batch_size = 4
num_workers = 4
datamodule = DataModule(
feature_preprocessor=self.feature_preprocessor,
splitter=self.splitter,
train_fraction=1.0,
val_fraction=0.1,
test_fraction=0.1,
all_scenarios=scenarios,
augmentors=self.augmentors,
dataloader_params={"batch_size": batch_size, "num_workers": num_workers, "drop_last": True},
)
# Initialize data module
datamodule.setup('fit')
self.assertGreater(len(datamodule.train_dataloader()), 0)
# Run
for features, targets in datamodule.train_dataloader():
# Validate that all features and targets are preset
self.assertTrue("raster" in features.keys())
self.assertTrue("vector_map" in features.keys())
self.assertTrue("trajectory" in targets.keys())
# Validate the dimensions
scenario_features: Raster = features["raster"]
trajectory_target: Trajectory = targets["trajectory"]
self.assertEqual(scenario_features.num_batches, trajectory_target.num_batches)
self.assertIsInstance(scenario_features, Raster)
self.assertIsInstance(trajectory_target, Trajectory)
self.assertEqual(scenario_features.num_batches, batch_size)
def tearDown(self) -> None:
"""
Clean up.
"""
if ray.is_initialized():
ray.shutdown()
if __name__ == '__main__':
unittest.main()
|
cmasher/colormaps/waterlily/waterlily.py
|
cclauss/CMasher
| 257 |
135516
|
# %% IMPORTS
# Package imports
from matplotlib.cm import register_cmap
from matplotlib.colors import ListedColormap
# All declaration
__all__ = ['cmap']
# Author declaration
__author__ = "<NAME> (@1313e)"
# Package declaration
__package__ = 'cmasher'
# %% GLOBALS AND DEFINITIONS
# Type of this colormap
cm_type = 'diverging'
# RGB-values of this colormap
cm_data = [[0.12434357, 0.00588452, 0.21400096],
[0.12931129, 0.00675787, 0.22011754],
[0.13427605, 0.00761301, 0.22627813],
[0.13923743, 0.00844618, 0.23248445],
[0.14419489, 0.00925374, 0.23873832],
[0.14914773, 0.01003226, 0.24504157],
[0.15409628, 0.01077599, 0.25139847],
[0.15903865, 0.01148361, 0.25780936],
[0.16397490, 0.01214941, 0.26427895],
[0.16890307, 0.01277188, 0.27080848],
[0.17382209, 0.01334730, 0.27740161],
[0.17873060, 0.01387220, 0.28406219],
[0.18362636, 0.01434494, 0.29079310],
[0.18850717, 0.01476335, 0.29759833],
[0.19336978, 0.01512770, 0.30448066],
[0.19821038, 0.01543943, 0.31144297],
[0.20302543, 0.01569841, 0.31849056],
[0.20780909, 0.01591090, 0.32562507],
[0.21255503, 0.01608416, 0.33284912],
[0.21725536, 0.01622978, 0.34016407],
[0.22190089, 0.01636307, 0.34757095],
[0.22647956, 0.01650931, 0.35506605],
[0.23097774, 0.01669935, 0.36264494],
[0.23537868, 0.01697681, 0.37029727],
[0.23966261, 0.01740014, 0.37800537],
[0.24380764, 0.01804107, 0.38574545],
[0.24778982, 0.01898922, 0.39348282],
[0.25158529, 0.02034719, 0.40117401],
[0.25517266, 0.02222479, 0.40876847],
[0.25853565, 0.02473093, 0.41621127],
[0.26166567, 0.02795975, 0.42345155],
[0.26456273, 0.03198252, 0.43044714],
[0.26723479, 0.03684228, 0.43716929],
[0.26969569, 0.04248896, 0.44360388],
[0.27196284, 0.04848981, 0.44974895],
[0.27405475, 0.05471025, 0.45561291],
[0.27598966, 0.06106965, 0.46121003],
[0.27778467, 0.06750507, 0.46655789],
[0.27945476, 0.07396949, 0.47167607],
[0.28101352, 0.08042854, 0.47658345],
[0.28247268, 0.08685793, 0.48129838],
[0.28384236, 0.09324093, 0.48583798],
[0.28513137, 0.09956639, 0.49021794],
[0.28634701, 0.10582750, 0.49445266],
[0.28749631, 0.11201977, 0.49855477],
[0.28858462, 0.11814153, 0.50253603],
[0.28961712, 0.12419213, 0.50640676],
[0.29059833, 0.13017201, 0.51017629],
[0.29153215, 0.13608239, 0.51385302],
[0.29242208, 0.14192500, 0.51744454],
[0.29327113, 0.14770193, 0.52095768],
[0.29408197, 0.15341549, 0.52439862],
[0.29485717, 0.15906795, 0.52777294],
[0.29559924, 0.16466147, 0.53108570],
[0.29630976, 0.17019880, 0.53434154],
[0.29699110, 0.17568189, 0.53754465],
[0.29764460, 0.18111329, 0.54069888],
[0.29827178, 0.18649525, 0.54380773],
[0.29887444, 0.19182963, 0.54687447],
[0.29945363, 0.19711869, 0.54990205],
[0.30001054, 0.20236443, 0.55289318],
[0.30054631, 0.20756873, 0.55585038],
[0.30106201, 0.21273339, 0.55877600],
[0.30155859, 0.21786013, 0.56167217],
[0.30203696, 0.22295063, 0.56454090],
[0.30249793, 0.22800646, 0.56738403],
[0.30294227, 0.23302915, 0.57020328],
[0.30337068, 0.23802016, 0.57300024],
[0.30378382, 0.24298087, 0.57577639],
[0.30418229, 0.24791260, 0.57853310],
[0.30456679, 0.25281657, 0.58127171],
[0.30493787, 0.25769398, 0.58399340],
[0.30529589, 0.26254606, 0.58669927],
[0.30564131, 0.26737393, 0.58939035],
[0.30597473, 0.27217857, 0.59206767],
[0.30629654, 0.27696100, 0.59473213],
[0.30660696, 0.28172230, 0.59738453],
[0.30690651, 0.28646330, 0.60002572],
[0.30719552, 0.29118493, 0.60265644],
[0.30747413, 0.29588812, 0.60527731],
[0.30774293, 0.30057358, 0.60788908],
[0.30800193, 0.30524222, 0.61049227],
[0.30825149, 0.30989477, 0.61308745],
[0.30849187, 0.31453196, 0.61567516],
[0.30872319, 0.31915455, 0.61825584],
[0.30894582, 0.32376318, 0.62082997],
[0.30915975, 0.32835858, 0.62339790],
[0.30936539, 0.33294131, 0.62596007],
[0.30956268, 0.33751209, 0.62851674],
[0.30975200, 0.34207144, 0.63106829],
[0.30993331, 0.34662001, 0.63361494],
[0.31010692, 0.35115830, 0.63615698],
[0.31027282, 0.35568691, 0.63869459],
[0.31043127, 0.36020633, 0.64122798],
[0.31058231, 0.36471711, 0.64375731],
[0.31072612, 0.36921970, 0.64628271],
[0.31086280, 0.37371461, 0.64880430],
[0.31099249, 0.37820230, 0.65132216],
[0.31111533, 0.38268321, 0.65383636],
[0.31123142, 0.38715778, 0.65634691],
[0.31134094, 0.39162643, 0.65885384],
[0.31144402, 0.39608958, 0.66135714],
[0.31154081, 0.40054761, 0.66385675],
[0.31163148, 0.40500090, 0.66635262],
[0.31171621, 0.40944984, 0.66884467],
[0.31179520, 0.41389476, 0.67133278],
[0.31186865, 0.41833603, 0.67381681],
[0.31193680, 0.42277397, 0.67629662],
[0.31199992, 0.42720890, 0.67877201],
[0.31205825, 0.43164113, 0.68124278],
[0.31211216, 0.43607095, 0.68370870],
[0.31216189, 0.44049867, 0.68616949],
[0.31220796, 0.44492453, 0.68862492],
[0.31225061, 0.44934882, 0.69107462],
[0.31229048, 0.45377175, 0.69351832],
[0.31232786, 0.45819361, 0.69595559],
[0.31236353, 0.46261456, 0.69838612],
[0.31239787, 0.46703487, 0.70080943],
[0.31243174, 0.47145469, 0.70322513],
[0.31246573, 0.47587424, 0.70563272],
[0.31250071, 0.48029366, 0.70803171],
[0.31253759, 0.48471311, 0.71042159],
[0.31257717, 0.48913278, 0.71280175],
[0.31262078, 0.49355270, 0.71517170],
[0.31266920, 0.49797308, 0.71753069],
[0.31272396, 0.50239396, 0.71987816],
[0.31278644, 0.50681539, 0.72221344],
[0.31285779, 0.51123752, 0.72453568],
[0.31293998, 0.51566030, 0.72684425],
[0.31303476, 0.52008375, 0.72913836],
[0.31314366, 0.52450796, 0.73141704],
[0.31326906, 0.52893282, 0.73367953],
[0.31341322, 0.53335830, 0.73592494],
[0.31357856, 0.53778430, 0.73815229],
[0.31376746, 0.54221081, 0.74036052],
[0.31398291, 0.54663764, 0.74254862],
[0.31422803, 0.55106464, 0.74471556],
[0.31450612, 0.55549161, 0.74686019],
[0.31482075, 0.55991833, 0.74898134],
[0.31517574, 0.56434455, 0.75107779],
[0.31557523, 0.56876997, 0.75314825],
[0.31602366, 0.57319425, 0.75519142],
[0.31652582, 0.57761700, 0.75720593],
[0.31708684, 0.58203779, 0.75919038],
[0.31771222, 0.58645611, 0.76114332],
[0.31840787, 0.59087143, 0.76306326],
[0.31918007, 0.59528313, 0.76494869],
[0.32003532, 0.59969060, 0.76679791],
[0.32098080, 0.60409308, 0.76860934],
[0.32202424, 0.60848970, 0.77038148],
[0.32317357, 0.61287960, 0.77211265],
[0.32443695, 0.61726187, 0.77380097],
[0.32582371, 0.62163533, 0.77544516],
[0.32734269, 0.62599895, 0.77704320],
[0.32900405, 0.63035134, 0.77859393],
[0.33081742, 0.63469128, 0.78009540],
[0.33279358, 0.63901718, 0.78154661],
[0.33494302, 0.64332754, 0.78294608],
[0.33727653, 0.64762069, 0.78429266],
[0.33980514, 0.65189480, 0.78558566],
[0.34253965, 0.65614801, 0.78682444],
[0.34549065, 0.66037837, 0.78800872],
[0.34866835, 0.66458383, 0.78913863],
[0.35208231, 0.66876231, 0.79021477],
[0.35574127, 0.67291169, 0.79123827],
[0.35965281, 0.67702987, 0.79221088],
[0.36382312, 0.68111477, 0.79313499],
[0.36825671, 0.68516444, 0.79401368],
[0.37295618, 0.68917704, 0.79485074],
[0.37792196, 0.69315094, 0.79565068],
[0.38315216, 0.69708476, 0.79641869],
[0.38864250, 0.70097742, 0.79716057],
[0.39438623, 0.70482817, 0.79788271],
[0.40037420, 0.70863664, 0.79859195],
[0.40659514, 0.71240286, 0.79929533],
[0.41303585, 0.71612724, 0.80000005],
[0.41968153, 0.71981060, 0.80071327],
[0.42651620, 0.72345411, 0.80144199],
[0.43352311, 0.72705927, 0.80219282],
[0.44068514, 0.73062784, 0.80297193],
[0.44798499, 0.73416184, 0.80378517],
[0.45540582, 0.73766347, 0.80463763],
[0.46293189, 0.74113497, 0.80553339],
[0.47054756, 0.74457873, 0.80647656],
[0.47823856, 0.74799715, 0.80747031],
[0.48599174, 0.75139259, 0.80851720],
[0.49379555, 0.75476731, 0.80961897],
[0.50163802, 0.75812375, 0.81077799],
[0.50951018, 0.76146396, 0.81199484],
[0.51740343, 0.76479002, 0.81327029],
[0.52531010, 0.76810393, 0.81460481],
[0.53322356, 0.77140754, 0.81599852],
[0.54113819, 0.77470259, 0.81745128],
[0.54904863, 0.77799078, 0.81896303],
[0.55695031, 0.78127373, 0.82053353],
[0.56484078, 0.78455267, 0.82216148],
[0.57271595, 0.78782912, 0.82384685],
[0.58057376, 0.79110425, 0.82558859],
[0.58841174, 0.79437931, 0.82738612],
[0.59622922, 0.79765517, 0.82923797],
[0.60402262, 0.80093330, 0.83114456],
[0.61179265, 0.80421427, 0.83310394],
[0.61953824, 0.80749903, 0.83511535],
[0.62725857, 0.81078844, 0.83717801],
[0.63495307, 0.81408331, 0.83929113],
[0.64262139, 0.81738440, 0.84145388],
[0.65026337, 0.82069239, 0.84366540],
[0.65787900, 0.82400793, 0.84592486],
[0.66546844, 0.82733162, 0.84823141],
[0.67303197, 0.83066400, 0.85058419],
[0.68056996, 0.83400559, 0.85298238],
[0.68808289, 0.83735685, 0.85542516],
[0.69557018, 0.84071852, 0.85791228],
[0.70303254, 0.84409099, 0.86044288],
[0.71047142, 0.84747442, 0.86301580],
[0.71788585, 0.85086963, 0.86563112],
[0.72527708, 0.85427682, 0.86828781],
[0.73264583, 0.85769631, 0.87098521],
[0.73999166, 0.86112874, 0.87372319],
[0.74731621, 0.86457417, 0.87650067],
[0.75461912, 0.86803319, 0.87931756],
[0.76190122, 0.87150605, 0.88217322],
[0.76916383, 0.87499286, 0.88506677],
[0.77640537, 0.87849452, 0.88799879],
[0.78362842, 0.88201078, 0.89096784],
[0.79083329, 0.88554198, 0.89397361],
[0.79801871, 0.88908894, 0.89701653],
[0.80518675, 0.89265150, 0.90009549],
[0.81233783, 0.89622995, 0.90321016],
[0.81947230, 0.89982462, 0.90636025],
[0.82659011, 0.90343594, 0.90954568],
[0.83369156, 0.90706423, 0.91276619],
[0.84077761, 0.91070961, 0.91602124],
[0.84784862, 0.91437236, 0.91931059],
[0.85490492, 0.91805279, 0.92263401],
[0.86194682, 0.92175119, 0.92599126],
[0.86897464, 0.92546784, 0.92938214],
[0.87598869, 0.92920304, 0.93280643],
[0.88298922, 0.93295708, 0.93626395],
[0.88997650, 0.93673027, 0.93975451],
[0.89695077, 0.94052289, 0.94327794],
[0.90391222, 0.94433526, 0.94683409],
[0.91086103, 0.94816769, 0.95042279],
[0.91779733, 0.95202051, 0.95404393],
[0.92472121, 0.95589406, 0.95769737],
[0.93163272, 0.95978870, 0.96138299],
[0.93853161, 0.96370486, 0.96510081],
[0.94541738, 0.96764310, 0.96885089],
[0.95229045, 0.97160364, 0.97263292],
[0.95915055, 0.97558695, 0.97644685],
[0.96599731, 0.97959353, 0.98029261],
[0.97283020, 0.98362395, 0.98417016],
[0.97964739, 0.98767922, 0.98807995],
[0.98644895, 0.99175974, 0.99202153],
[0.99323371, 0.99586633, 0.99599488],
[1.00000000, 1.00000000, 1.00000000],
[0.99382469, 0.99580955, 0.99432110],
[0.98760164, 0.99166330, 0.98862986],
[0.98133740, 0.98755931, 0.98291625],
[0.97503997, 0.98349458, 0.97717497],
[0.96871626, 0.97946629, 0.97140333],
[0.96237164, 0.97547209, 0.96560034],
[0.95601185, 0.97150923, 0.95976730],
[0.94963976, 0.96757616, 0.95390497],
[0.94325909, 0.96367088, 0.94801568],
[0.93687235, 0.95979187, 0.94210153],
[0.93048036, 0.95593835, 0.93616375],
[0.92408561, 0.95210878, 0.93020504],
[0.91768886, 0.94830240, 0.92422693],
[0.91129032, 0.94451872, 0.91823051],
[0.90489147, 0.94075664, 0.91221790],
[0.89849256, 0.93701566, 0.90619023],
[0.89209319, 0.93329557, 0.90014800],
[0.88569452, 0.92959547, 0.89409297],
[0.87929669, 0.92591493, 0.88802599],
[0.87289905, 0.92225388, 0.88194719],
[0.86650216, 0.91861170, 0.87585767],
[0.86010629, 0.91498793, 0.86975822],
[0.85371140, 0.91138223, 0.86364935],
[0.84731670, 0.90779464, 0.85753082],
[0.84092280, 0.90422454, 0.85140359],
[0.83452970, 0.90067160, 0.84526808],
[0.82813733, 0.89713553, 0.83912458],
[0.82174544, 0.89361612, 0.83297320],
[0.81535347, 0.89011330, 0.82681372],
[0.80896190, 0.88662654, 0.82064685],
[0.80257057, 0.88315559, 0.81447273],
[0.79617936, 0.87970020, 0.80829144],
[0.78978811, 0.87626012, 0.80210307],
[0.78339667, 0.87283512, 0.79590767],
[0.77700472, 0.86942502, 0.78970513],
[0.77061205, 0.86602960, 0.78349540],
[0.76421875, 0.86264852, 0.77727873],
[0.75782467, 0.85928154, 0.77105510],
[0.75142964, 0.85592843, 0.76482449],
[0.74503349, 0.85258894, 0.75858685],
[0.73863604, 0.84926286, 0.75234213],
[0.73223714, 0.84594994, 0.74609027],
[0.72583661, 0.84264994, 0.73983119],
[0.71943428, 0.83936264, 0.73356480],
[0.71302998, 0.83608780, 0.72729102],
[0.70662354, 0.83282518, 0.72100975],
[0.70021479, 0.82957454, 0.71472089],
[0.69380357, 0.82633564, 0.70842432],
[0.68738971, 0.82310825, 0.70211992],
[0.68097304, 0.81989211, 0.69580759],
[0.67455294, 0.81668717, 0.68948672],
[0.66812958, 0.81349304, 0.68315752],
[0.66170286, 0.81030946, 0.67681990],
[0.65527262, 0.80713617, 0.67047373],
[0.64883872, 0.80397291, 0.66411886],
[0.64240101, 0.80081943, 0.65775515],
[0.63595841, 0.79767582, 0.65138150],
[0.62951158, 0.79454151, 0.64499858],
[0.62306045, 0.79141621, 0.63860627],
[0.61660476, 0.78829970, 0.63220428],
[0.61014331, 0.78519209, 0.62579136],
[0.60367711, 0.78209270, 0.61936849],
[0.59720594, 0.77900127, 0.61293539],
[0.59072831, 0.77591802, 0.60649044],
[0.58424556, 0.77284215, 0.60003495],
[0.57775664, 0.76977369, 0.59356774],
[0.57126149, 0.76671233, 0.58708865],
[0.56476033, 0.76365767, 0.58059777],
[0.55825218, 0.76060971, 0.57409393],
[0.55173780, 0.75756786, 0.56757778],
[0.54521614, 0.75453214, 0.56104804],
[0.53868762, 0.75150206, 0.55450494],
[0.53215208, 0.74847735, 0.54794811],
[0.52560864, 0.74545795, 0.54137634],
[0.51905823, 0.74244322, 0.53479036],
[0.51250000, 0.73943309, 0.52818895],
[0.50593365, 0.73642732, 0.52157144],
[0.49935973, 0.73342539, 0.51493803],
[0.49277805, 0.73042701, 0.50828809],
[0.48618824, 0.72743197, 0.50162076],
[0.47958991, 0.72444003, 0.49493507],
[0.47298359, 0.72145070, 0.48823100],
[0.46636921, 0.71846366, 0.48150783],
[0.45974679, 0.71547856, 0.47476485],
[0.45311640, 0.71249506, 0.46800133],
[0.44647816, 0.70951278, 0.46121648],
[0.43983228, 0.70653133, 0.45440955],
[0.43317907, 0.70355031, 0.44757974],
[0.42651828, 0.70056948, 0.44072551],
[0.41985059, 0.69758833, 0.43384622],
[0.41317687, 0.69460632, 0.42694128],
[0.40649778, 0.69162299, 0.42000968],
[0.39981272, 0.68863823, 0.41304872],
[0.39312423, 0.68565110, 0.40605913],
[0.38643180, 0.68266150, 0.39903778],
[0.37973812, 0.67966850, 0.39198500],
[0.37304275, 0.67667203, 0.38489700],
[0.36634870, 0.67367115, 0.37777377],
[0.35965765, 0.67066532, 0.37061319],
[0.35297165, 0.66765394, 0.36341301],
[0.34629317, 0.66463637, 0.35617090],
[0.33962524, 0.66161188, 0.34888442],
[0.33297150, 0.65857969, 0.34155107],
[0.32633633, 0.65553892, 0.33416823],
[0.31972491, 0.65248860, 0.32673327],
[0.31314248, 0.64942789, 0.31924201],
[0.30659623, 0.64635564, 0.31169136],
[0.30009369, 0.64327077, 0.30407659],
[0.29364480, 0.64017187, 0.29639406],
[0.28726040, 0.63705756, 0.28863847],
[0.28095360, 0.63392625, 0.28080436],
[0.27474016, 0.63077607, 0.27288617],
[0.26863813, 0.62760506, 0.26487647],
[0.26267002, 0.62441075, 0.25676914],
[0.25686147, 0.62119053, 0.24855490],
[0.25124393, 0.61794122, 0.24022510],
[0.24585511, 0.61465902, 0.23177057],
[0.24074014, 0.61133945, 0.22318124],
[0.23595315, 0.60797718, 0.21444629],
[0.23155932, 0.60456563, 0.20555579],
[0.22763671, 0.60109683, 0.19649969],
[0.22427864, 0.59756085, 0.18727215],
[0.22159446, 0.59394541, 0.17787608],
[0.21970884, 0.59023557, 0.16832692],
[0.21875529, 0.58641365, 0.15866581],
[0.21885793, 0.58246043, 0.14897700],
[0.22009608, 0.57835850, 0.13940533],
[0.22245044, 0.57409882, 0.13016467],
[0.22576171, 0.56968811, 0.12150790],
[0.22974550, 0.56515118, 0.11365853],
[0.23407808, 0.56052386, 0.10674215],
[0.23848634, 0.55584188, 0.10077283],
[0.24278697, 0.55113355, 0.09568718],
[0.24687702, 0.54641845, 0.09138767],
[0.25070795, 0.54170912, 0.08777129],
[0.25426366, 0.53701323, 0.08474270],
[0.25754688, 0.53233510, 0.08221878],
[0.26056796, 0.52767728, 0.08012845],
[0.26334064, 0.52304119, 0.07841111],
[0.26588045, 0.51842744, 0.07701528],
[0.26820294, 0.51383615, 0.07589709],
[0.27032300, 0.50926715, 0.07501892],
[0.27225402, 0.50472022, 0.07434809],
[0.27400849, 0.50019497, 0.07385633],
[0.27559818, 0.49569090, 0.07351934],
[0.27703238, 0.49120778, 0.07331473],
[0.27832162, 0.48674492, 0.07322459],
[0.27947412, 0.48230192, 0.07323212],
[0.28049758, 0.47787835, 0.07332273],
[0.28139919, 0.47347376, 0.07348371],
[0.28218562, 0.46908766, 0.07370402],
[0.28286261, 0.46471970, 0.07397352],
[0.28343564, 0.46036946, 0.07428352],
[0.28390985, 0.45603654, 0.07462643],
[0.28429006, 0.45172052, 0.07499567],
[0.28458007, 0.44742115, 0.07538470],
[0.28478415, 0.44313801, 0.07578856],
[0.28490602, 0.43887075, 0.07620262],
[0.28494907, 0.43461906, 0.07662274],
[0.28491652, 0.43038262, 0.07704527],
[0.28481132, 0.42616112, 0.07746700],
[0.28463599, 0.42195434, 0.07788471],
[0.28439348, 0.41776190, 0.07829633],
[0.28408628, 0.41358350, 0.07869974],
[0.28371630, 0.40941900, 0.07909240],
[0.28328605, 0.40526800, 0.07947311],
[0.28279736, 0.40113032, 0.07984003],
[0.28225231, 0.39700566, 0.08019202],
[0.28165244, 0.39289384, 0.08052756],
[0.28099980, 0.38879452, 0.08084604],
[0.28029571, 0.38470756, 0.08114612],
[0.27954177, 0.38063269, 0.08142708],
[0.27873954, 0.37656965, 0.08168838],
[0.27789035, 0.37251823, 0.08192932],
[0.27699548, 0.36847822, 0.08214928],
[0.27605604, 0.36444944, 0.08234761],
[0.27507335, 0.36043163, 0.08252408],
[0.27404853, 0.35642457, 0.08267831],
[0.27298263, 0.35242806, 0.08280996],
[0.27187665, 0.34844188, 0.08291875],
[0.27073158, 0.34446585, 0.08300442],
[0.26954834, 0.34049973, 0.08306678],
[0.26832781, 0.33654334, 0.08310564],
[0.26707080, 0.33259648, 0.08312079],
[0.26577808, 0.32865896, 0.08311205],
[0.26445049, 0.32473055, 0.08307943],
[0.26308876, 0.32081105, 0.08302283],
[0.26169362, 0.31690026, 0.08294219],
[0.26026570, 0.31299798, 0.08283742],
[0.25880553, 0.30910406, 0.08270826],
[0.25731388, 0.30521825, 0.08255493],
[0.25579133, 0.30134033, 0.08237742],
[0.25423833, 0.29747015, 0.08217546],
[0.25265547, 0.29360749, 0.08194916],
[0.25104334, 0.28975212, 0.08169866],
[0.24940226, 0.28590390, 0.08142361],
[0.24773284, 0.28206257, 0.08112428],
[0.24603549, 0.27822793, 0.08080061],
[0.24431057, 0.27439981, 0.08045246],
[0.24255862, 0.27057794, 0.08008011],
[0.24077983, 0.26676218, 0.07968318],
[0.23897479, 0.26295224, 0.07926209],
[0.23714362, 0.25914798, 0.07881643],
[0.23528689, 0.25534911, 0.07834662],
[0.23340468, 0.25155548, 0.07785225],
[0.23149749, 0.24776679, 0.07733371],
[0.22956545, 0.24398289, 0.07679069],
[0.22760892, 0.24020347, 0.07622341],
[0.22562811, 0.23642835, 0.07563177],
[0.22362324, 0.23265727, 0.07501578],
[0.22159459, 0.22888997, 0.07437557],
[0.21954224, 0.22512625, 0.07371087],
[0.21746649, 0.22136579, 0.07302195],
[0.21536746, 0.21760838, 0.07230872],
[0.21324527, 0.21385374, 0.07157112],
[0.21110013, 0.21010159, 0.07080928],
[0.20893210, 0.20635167, 0.07002313],
[0.20674127, 0.20260369, 0.06921265],
[0.20452777, 0.19885735, 0.06837794],
[0.20229165, 0.19511235, 0.06751901],
[0.20003290, 0.19136842, 0.06663574],
[0.19775160, 0.18762521, 0.06572831],
[0.19544775, 0.18388240, 0.06479672],
[0.19312130, 0.18013969, 0.06384095],
[0.19077219, 0.17639672, 0.06286104],
[0.18840038, 0.17265315, 0.06185709],
[0.18600578, 0.16890862, 0.06082917],
[0.18358823, 0.16516277, 0.05977729],
[0.18114755, 0.16141524, 0.05870151],
[0.17868358, 0.15766562, 0.05760198],
[0.17619607, 0.15391353, 0.05647879],
[0.17368476, 0.15015856, 0.05533204],
[0.17114930, 0.14640032, 0.05416183],
[0.16858932, 0.14263837, 0.05296828],
[0.16600443, 0.13887229, 0.05175158],
[0.16339415, 0.13510163, 0.05051188],
[0.16075793, 0.13132595, 0.04924932],
[0.15809517, 0.12754478, 0.04796409],
[0.15540519, 0.12375769, 0.04665629],
[0.15268725, 0.11996418, 0.04532612],
[0.14994054, 0.11616378, 0.04397374],
[0.14716414, 0.11235600, 0.04259926],
[0.14435705, 0.10854034, 0.04120279],
[0.14151819, 0.10471630, 0.03977633],
[0.13864637, 0.10088335, 0.03834104],
[0.13574030, 0.09704098, 0.03691515],
[0.13279861, 0.09318862, 0.03549973],
[0.12981983, 0.08932571, 0.03409578],
[0.12680239, 0.08545164, 0.03270415],
[0.12374463, 0.08156576, 0.03132562],
[0.12064478, 0.07766739, 0.02996087],
[0.11750102, 0.07375577, 0.02861046],
[0.11431142, 0.06983006, 0.02727485],
[0.11107399, 0.06588932, 0.02595442],
[0.10778667, 0.06193249, 0.02464948]]
# Create ListedColormap object for this colormap
cmap = ListedColormap(cm_data, name='cmr.waterlily', N=511)
cmap_r = cmap.reversed()
# Register (reversed) cmap in MPL
register_cmap(cmap=cmap)
register_cmap(cmap=cmap_r)
|
openprompt/utils/reproduciblity.py
|
puraminy/OpenPrompt
| 979 |
135526
|
import random
import numpy as np
import torch
from openprompt.utils.logging import logger
from typing import *
def set_seed(seed:Optional[int] = None):
"""set seed for reproducibility
Args:
seed (:obj:`int`): the seed to seed everything for reproducibility. if None, do nothing.
"""
if seed:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
logger.info(f"Global seed set to {seed}")
|
solutions/problem_101.py
|
ksvr444/daily-coding-problem
| 1,921 |
135529
|
def is_prime(num, primes):
for prime in primes:
if prime == num:
return True
if not num % prime:
return False
return True
def get_primes(num):
limit = (num // 2) + 1
candidates = list()
primes = list()
for i in range(2, limit):
if is_prime(i, primes):
primes.append(i)
candidates.append((i, num - i))
new_candidates = list()
for first, second in candidates[::-1]:
if is_prime(second, primes):
primes.append(second)
new_candidates.append((first, second))
return new_candidates[-1]
assert get_primes(4) == (2, 2)
assert get_primes(10) == (3, 7)
assert get_primes(100) == (3, 97)
|
ax/modelbridge/random.py
|
trsvchn/Ax
| 1,803 |
135540
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List, Optional, Tuple
from ax.core.data import Data
from ax.core.experiment import Experiment
from ax.core.observation import ObservationData, ObservationFeatures
from ax.core.optimization_config import OptimizationConfig
from ax.core.search_space import SearchSpace
from ax.core.types import TConfig, TGenMetadata
from ax.modelbridge.base import ModelBridge
from ax.modelbridge.modelbridge_utils import (
extract_parameter_constraints,
extract_search_space_digest,
get_fixed_features,
parse_observation_features,
transform_callback,
)
from ax.models.random.base import RandomModel
from ax.utils.common.docutils import copy_doc
FIT_MODEL_ERROR = "Model must be fit before {action}."
# pyre-fixme[13]: Attribute `model` is never initialized.
# pyre-fixme[13]: Attribute `parameters` is never initialized.
class RandomModelBridge(ModelBridge):
"""A model bridge for using purely random 'models'.
Data and optimization configs are not required.
This model bridge interfaces with RandomModel.
Attributes:
model: A RandomModel used to generate candidates
(note: this an awkward use of the word 'model').
parameters: Params found in search space on modelbridge init.
"""
model: RandomModel
parameters: List[str]
def _fit(
self,
model: RandomModel,
search_space: SearchSpace,
observation_features: Optional[List[ObservationFeatures]] = None,
observation_data: Optional[List[ObservationData]] = None,
) -> None:
self.model = model
# Extract and fix parameters from initial search space.
self.parameters = list(search_space.parameters.keys())
@copy_doc(ModelBridge.update)
def update(self, new_data: Data, experiment: Experiment) -> None:
pass # pragma: no cover
def _gen(
self,
n: int,
search_space: SearchSpace,
pending_observations: Dict[str, List[ObservationFeatures]],
fixed_features: ObservationFeatures,
optimization_config: Optional[OptimizationConfig],
model_gen_options: Optional[TConfig],
) -> Tuple[
List[ObservationFeatures],
List[float],
Optional[ObservationFeatures],
TGenMetadata,
]:
"""Generate new candidates according to a search_space."""
# Extract parameter values
search_space_digest = extract_search_space_digest(search_space, self.parameters)
# Get fixed features
fixed_features_dict = get_fixed_features(fixed_features, self.parameters)
# Extract param constraints
linear_constraints = extract_parameter_constraints(
search_space.parameter_constraints, self.parameters
)
# Generate the candidates
X, w = self.model.gen(
n=n,
bounds=search_space_digest.bounds,
linear_constraints=linear_constraints,
fixed_features=fixed_features_dict,
model_gen_options=model_gen_options,
rounding_func=transform_callback(self.parameters, self.transforms),
)
observation_features = parse_observation_features(X, self.parameters)
return observation_features, w.tolist(), None, {}
def _predict(
self, observation_features: List[ObservationFeatures]
) -> List[ObservationData]:
"""Apply terminal transform, predict, and reverse terminal transform on
output.
"""
raise NotImplementedError("RandomModelBridge does not support prediction.")
def _cross_validate(
self,
search_space: SearchSpace,
obs_feats: List[ObservationFeatures],
obs_data: List[ObservationData],
cv_test_points: List[ObservationFeatures],
) -> List[ObservationData]:
raise NotImplementedError
|
tests/system/fashion/test_fashion.py
|
vishalbelsare/jina
| 15,179 |
135556
|
<gh_stars>1000+
import os
import pytest
import jina
from jina import Document
from jina.helloworld.fashion.app import hello_world
from jina.helloworld.fashion.my_executors import *
from jina.parsers.helloworld import set_hw_parser
def check_hello_world_results(html_path: str):
from bs4 import BeautifulSoup
import re
with open(html_path, 'r') as fp:
page = fp.read()
soup = BeautifulSoup(page)
table = soup.find('table')
rows = table.find_all('tr')
assert len(rows) > 1
for row in rows[1:]:
cols = row.find_all('img')
assert len(cols) > 1 # query + results
evaluation = soup.find_all('h3')[0].text
assert 'Precision@50' in evaluation
assert 'Recall@50' in evaluation
evaluation_results = re.findall(r'\d+\.\d+', evaluation)
assert len(evaluation_results) == 2
# not exact to avoid instability, but enough accurate to current results to raise some alarms
assert float(evaluation_results[0]) > 50.0
assert float(evaluation_results[1]) >= 0.5
@pytest.fixture
def helloworld_args(tmpdir):
return set_hw_parser().parse_args(['--workdir', str(tmpdir)])
@pytest.fixture
def query_document():
return Document(content=np.random.rand(28, 28))
root_dir = os.path.abspath(os.path.dirname(jina.__file__))
os.environ['PATH'] += os.pathsep + os.path.join(root_dir, 'helloworld/fashion/')
def test_fashion(helloworld_args, query_document, tmpdir):
"""Regression test for fashion example."""
hello_world(helloworld_args)
check_hello_world_results(os.path.join(str(tmpdir), 'demo.html'))
|
vyapp/areavi.py
|
iogf/vy
| 927 |
135575
|
"""
"""
from vyapp.mixins import DataEvent, IdleEvent
from vyapp.stderr import printd
from tkinter import Text, IntVar
import os
class AreaVi(Text, DataEvent, IdleEvent):
INPUT = None
# Plugins should commonly use self.project
# if it fails then use HOME.
HOME = ''
def __init__(self, default_filename, *args, **kwargs):
"""
This class receives all Text widget arguments
and one named default_filename which means
the filename that is saved when no filename
is specified.
default_filename:
The default path file where contents are saved.
"""
Text.__init__(self, *args, **kwargs)
DataEvent.__init__(self, self)
IdleEvent.__init__(self, self)
self.setup = dict()
# Maybe it should be?
# abspath(default_filename)
self.default_filename = default_filename
# The file's path and name.
self.filename = default_filename
self.extension = os.path.splitext(self.filename)
self.mark_set('(CURSOR_LAST_COL)', '1.0')
self.charset = 'utf-8'
self.map = {}
self.db = {}
self.project = ''
self.assoc_c = 0
# The character used for indentation.
self.tabchar = ' '
self.tabsize = 4
def set_input(e):
AreaVi.INPUT = e.widget
self.hook('AreaVi', '-1', '<FocusIn>', set_input)
def settab(self, tabsize, tabchar):
self.tabchar = tabchar
self.tabsize = tabsize
def indent(self):
self.edit_separator()
self.insert('insert', self.tabchar * self.tabsize)
def update_map(self, namespace, map):
scheme = self.map.setdefault(namespace, {})
scheme.update(map)
def chmode(self, id):
"""
This function is used to change the AreaVi instance's mode.
It receives one parameter named id which means the
mode name.
area = AreaVi('None')
area.chmode('INSERT')
It would make area be in INSERT mode.
"""
opt = self.setup[id]
self.id = id
mode0 = 'mode%s-1' % self
mode1 = 'mode%s%s' % (self, id)
if opt: self.bindtags((mode0, mode1, self, 'Text', '.'))
else: self.bindtags((mode0, mode1, self, '.'))
self.event_generate('<<Chmode>>')
self.event_generate('<<Chmode-%s>>' % id)
def add_mode(self, id, opt=False):
"""
It adds a new mode. The opt argument means whether
it should propagate the event to the internal text widget callbacks.
def install(area):
area.add_mode('MODE')
The code above would add a mode named MODE to the AreaVi instance.
def install(area):
area.add_mode('TYPING', opt=True)
The code above would add a mode named 'TYPING' that is possible to edit
the content of the AreaVi instance. It means that keystrokes that maps
printable characters it would be dropped over the AreaVi instance that has focus.
"""
self.setup[id] = opt
def del_mode(self, id):
"""
"""
pass
def hook(self, namespace, id, seq, callback, add=True):
"""
This method is used to hook a callback to a sequence
specified with its mode:
def callback(event):
event.widget.insert('An event happened!')
def install(area):
area.hook(('INSERT' '<Key-i>', callback))
In the example above, whenever the event <Key-i> happens then
the function named callback will be called with the event object.
"""
scheme = self.map.get(namespace, {})
for id, seq in scheme.get((id, seq), ((id, seq), )):
self.hook_class(id, seq, callback, add)
def hook_class(self, id, seq, callback, add=True):
modn = 'mode%s%s' % (self, id)
if self.bind_class(modn, seq):
printd('Warning: %s %s already binded!' % (id, seq))
self.bind_class(modn, seq, callback, add)
def unhook(self, id, seq):
"""
The opposite of AreaVi.hook.
area.unhook('mode' '<Event>')
"""
mode = 'mode%s%s' % (self, id)
self.unbind_class(mode, seq)
def install(self, namespace, *args):
"""
It is a shorthand for AreaVi.hook. It is used as follows:
def install(area):
area.install(('MODE1', '<Event1>', callback1),
('MODE2', '<Event2>', callback2),
('MODE3', '<Event3>', callback3), ...)
"""
for ind in args:
self.hook(namespace, *ind)
def uninstall(self, *args):
"""
The opposite of AreaVi.install.
area.uninstall(('mode', '<Event>'), ...)
"""
for id, seq, callback in args:
self.unhook(id, seq, callback)
def append(self, data, *args):
"""
This method is used to insert data to the end of the AreaVi instance widget
and place the cursor at the end of the data that was appended. It makes the cursor
visible.
"""
# This is sort of odd, it seems that
# i have to add -1l for it to work.
# It shouldn't be necessary.
index0 = self.index('end -1l')
self.insert('end', data)
for ind in args:
self.tag_add(ind, index0, 'end -1c')
# self.mark_set('insert', 'end')
self.see('insert')
def get_assoc_data(self, index='insert'):
lst = (self.db[ind] for ind in self.tag_names(index)
if 'ASSOC_DATA' in ind)
return lst
def set_assoc_data(self, index0, index1, data):
id = '(ASSOC_DATA-%s)' % self.assoc_c
self.tag_add(id, index0, index1)
self.assoc_c = self.assoc_c + 1
self.db[id] = data
return id
def reset_assoc_data(self):
for ind in self.db.keys():
self.tag_delete(ind)
self.db.clear()
def tags_config(self, config):
for indi, indj in config.items():
self.tag_config(indi, **indj)
def tag_swap(self, name, index0, index1, *args):
"""
It removes a given tag from index0 to index1 and re adds
the tag to the ranges of text delimited in args.
Example:
DATA_X = 'It is black.\n'
DATA_Y = 'It is blue.\n'
text = Text()
text.pack()
text.insert('1.0', DATA_X)
text.insert('2.0', DATA_Y)
text.tag_add('X', '1.0', '1.0 lineend')
text.tag_add('Y', '2.0', '2.0 lineend')
text.tag_config('X', background='black')
text.tag_config('Y', foreground='blue')
text.tag_update(text, 'X', '1.0', 'end', ('2.0', '2.0 lineend'))
It removes the X tag from '1.0' to 'end' then adds
the X tag to the range '2.0' '2.0 lineend'.
"""
self.tag_remove(name, index0, index1)
for indi, indj in args:
self.tag_add(name, indi, indj)
def indexref(self, index='insert'):
"""
This is a short hand function. It is used to convert a Text index
into two integers like:
a, b = area.indexref('insert')
Now, a and b can be manipulated
as numbers.
"""
a, b = self.index(index).split('.')
return int(a), int(b)
def setcur(self, line, col='0'):
"""
It is used to set the cursor position at a given index using line
and col.
"""
self.mark_set('insert', '%s.%s' % (line, col))
self.see('insert')
def indexsplit(self, index):
"""
Just a shorthand for:
a, b = index.split('2.3')
a, b = int(a), int(b)
"""
a, b = index.split('.')
return int(a), int(b)
def seecur(self, index):
"""
Just a shorthand for:
area.mark_set('insert', index)
area.see('insert')
"""
self.mark_set('insert', index)
self.see('insert')
def down(self):
"""
It sets the cursor position one line down.
"""
# I have to use 'end -1l linestart' since it seems the 'end' tag
# corresponds to a one line after the last visible line.
# So last line lineend != 'end'.
is_end = self.compare('insert linestart', '!=', 'end -1l linestart')
if not is_end: return
a, b = self.indexref('(CURSOR_LAST_COL)')
c, d = self.indexref()
self.setcur(c + 1, b)
def up(self):
"""
It sets the cursor one line up.
"""
is_start = self.compare('insert linestart', '!=', '1.0')
if not is_start: return
a, b = self.indexref('(CURSOR_LAST_COL)')
c, d = self.indexref()
self.setcur(c - 1, b)
def left(self):
"""
It moves the cursor one character left.
"""
self.mark_set('insert', 'insert -1c')
# The mark used by self.down, self.up.
self.mark_set('(CURSOR_LAST_COL)', 'insert')
def right(self):
"""
It moves the cursor one character right.
"""
self.mark_set('insert', 'insert +1c')
# The mark used by self.down, self.up.
self.mark_set('(CURSOR_LAST_COL)', 'insert')
def rmsel(self, index0, index1):
"""
It removes the tag sel from the range that is delimited by index0 and index1
regardless whether index0 <= index1.
"""
index2 = self.min(index0, index1)
index3 = self.max(index0, index1)
self.tag_remove('sel', index2, index3)
def addsel(self, index0, index1):
"""
It adds the tag sel to the range delimited by index0 and index1 regardless
whether index0 <= index1.
"""
index2 = self.min(index0, index1)
index3 = self.max(index0, index1)
self.tag_add('sel', index2, index3)
def min(self, index0, index1):
"""
It returns the min between index0 and index1.
"""
if self.compare(index0, '<=', index1):
return index0
else:
return index1
def max(self, index0, index1):
"""
It returns the max between index0 and index1.
"""
if self.compare(index0, '<=', index1):
return index1
else:
return index0
def clear_selection(self):
"""
Unselect all text.
"""
try:
self.tag_remove('sel',
'sel.first', 'sel.last')
except Exception:
pass
def cpsel(self, sep=''):
"""
Copy selected text to the clipboard.
"""
data = self.join_ranges('sel', sep)
self.clipboard_clear()
self.clipboard_append(data)
self.tag_remove('sel', 'sel.first', 'sel.last')
def ctsel(self, sep=''):
"""
It cuts the selected text.
"""
data = self.join_ranges('sel', sep)
self.clipboard_clear()
self.clipboard_append(data)
self.edit_separator()
self.swap_ranges('sel', '', '1.0', 'end')
def toggle_range(self, name, index0, index1):
"""
Toggle tag name in the range defined by index0 and index1.
It means it adds a tag name to the range index0 and index1 if there is no
tag mapped to that range otherwise it removes the tag name from the range.
"""
index2 = index0
index0 = self.min(index0, index1)
index1 = self.max(index2, index1)
map = self.is_tag_range(name, index0, index1)
if map:
self.tag_remove(name, index0, index1)
else:
self.tag_add(name, index0, index1)
def get_word_range(self, index='insert'):
index1 = self.search('\W', index, regexp=True, stopindex='%s linestart' % index, backwards=True)
index2 = self.search('\W', index, regexp=True, stopindex='%s lineend' % index)
index1 = '%s linestart' % index if not index1 else '%s +1c' % index1
index2 = '%s lineend' % index if not index2 else index2
return index1, index2
def get_seq_range(self, index='insert'):
index1 = self.search(' ', index, regexp=True, stopindex='%s linestart' %index, backwards=True)
index2 = self.search(' ', index, regexp=True, stopindex='%s lineend' % index)
index1 = '%s linestart' % index if not index1 else '%s +1c' % index1
index2= '%s lineend' % index if not index2 else index2
return index1, index2
def get_line(self, index='insert'):
return self.get('%s linestart' % index,
'%s lineend' % index)
def shift_right(self, srow, erow, width, char=' '):
"""
Given a start row and a end row it shifts
a block of text to the right.
This is specially useful when working with
source code files.
"""
self.edit_separator()
for ind in range(srow, erow + 1):
self.insert('%s.0' % ind, width * char)
def shift_left(self, srow, erow, width):
"""
Given a start row and a end row it shifts
a block of text to the left.
This is specially useful when working with
source code files.
"""
self.edit_separator()
for ind in range(srow, erow + 1):
self.delete('%s.0' % ind, '%s.%s' % (ind, width))
def collect(self, name, regex, index='1.0', stopindex='end', exact=False,
regexp=True, nocase=False, elide=False, nolinestop=False, step=''):
"""
The code below would find for 'PATTERN' in all selected text of an
AreaVi instance:
for data, pos0, pos1 in area.collect('sel', 'PATTERN'):
pass
"""
# It should be built on top of nextrange.
map = self.tag_ranges(name)
for indi in range(0, len(map) - 1, 2):
seq = self.find(regex, map[indi], map[indi + 1], exact=None,
regexp=True, nocase=None, elide=None, nolinestop=None)
for indj in seq:
yield indj
def replace_ranges(self, name, regex, data, exact=False, regexp=True,
nocase=False, elide=False, nolinestop=False):
"""
It replaces all occurrences of regex in the ranges that are mapped to tag name.
"""
while True:
map = self.tag_nextrange(name, '1.0', 'end')
if not map: break
self.tag_remove(name, *map)
self.replace_all(regex, data, map[0], map[1],
exact, regexp, nocase, elide, nolinestop)
def select_matches(self, name, matches):
""""
It adds a tag to the match ranges from either AreaVi.find or
AreaVi.collect.
name - The tag to be added.
map - An iterator from AreaVi.find or AreaVi.collect.
"""
for _, index0, index1 in matches:
self.tag_add(name, index0, index1)
def split(self, regex, index='1.0', stopindex='end', *args, **kwargs):
"""
It tokenizes the contents of an AreaVi widget based on a regex.
The *args, **kwargs are the same passed to AreaVi.find method.
for token, index0, index1 in area.tokenize(PATTERN):
pass
"""
index0 = index
for chk, index1, index2 in self.find(regex, index,
stopindex, *args, **kwargs):
if self.compare(index1, '>', index0):
yield(self.get(index0, index1), index0, index1)
index0 = index2
else:
yield(chk, index2, stopindex)
def find_forwards(self, regex, index='1.0', stopindex='end', exact=False,
regexp=True, nocase=False, elide=False, nolinestop=False, step=''):
"""
"""
if not regex:
raise TypeError('Regex should be non blank!')
while True:
match = self.isearch(regex, index, stopindex,
exact, regexp=regexp, nocase=nocase, elide=elide,
nolinestop=nolinestop)
if match:
yield(match)
else:
break
# To avoid infinite loop when using '$' as regex.
if self.compare(match[2], '==', 'end'):
break
elif self.compare(match[1], '==', match[2]):
index = '%s %s +1c' % (match[2], step)
else:
index = '%s %s' % (match[2], step)
def find(self, regex, index='1.0', stopindex='end', backwards=False,
exact=False, regexp=True, nocase=False, elide=False,
nolinestop=False, step=''):
"""
It returns an iterator of matches. It is based on the Text.search method.
for match, index0, index1 in area.find('pattern'):
passs
The step parameter is used to add a distance between
each one of the matches.
area.find('c+', step='+1l linestart')
In the example below:
cc1 cc2 cc3
cc4 cc5 cc6
cc7 cc8 cc9
Would match cc1, cc4, cc7.
"""
if backwards:
return self.find_backwards(regex, index, stopindex,
exact, regexp, nocase, elide, nolinestop, step)
else:
return self.find_forwards(regex, index, stopindex,
exact, regexp, nocase, elide, nolinestop, step)
def find_backwards(self, regex, index='end', stopindex='1.0', exact=False,
regexp=True, nocase=False, elide=False, nolinestop=False, step=''):
"""
"""
if not regex:
raise TypeError('Regex should be non blank!')
while True:
match = self.isearch(regex, index, stopindex,
backwards=True, exact=exact, regexp=regexp, nocase=nocase,
elide=elide, nolinestop=nolinestop)
if match:
yield(match)
else:
break
# This one avoids infinite loop when using '^'
# as regex.
if self.compare(match[1], '==', '1.0'):
break
elif self.compare(match[1], '==', match[2]):
index = '%s %s -1c' % (match[1], step)
else:
index = '%s %s' % (match[1], step)
def isearch(self, pattern, index, stopindex='end', forwards=None,
backwards=None, exact=None, regexp=None, nocase=None,
count=None, elide=None, nolinestop=None):
"""
Just search shortcut, in the sense it return the matched chunk
the initial position and the end position.
"""
count = IntVar()
index = self.search(pattern, index, stopindex,
forwards, backwards, exact, regexp, nocase, count=count,
elide=elide, nolinestop=nolinestop)
if not index: return
len = count.get()
tmp = '%s +%sc' % (index, len)
chunk = self.get(index, tmp)
pos0 = self.index(index)
pos1 = self.index('%s +%sc' % (index, len))
return chunk, pos0, pos1
def search(self, pattern, index, stopindex='end', forwards=None,
backwards=None, exact=None, regexp=None, nocase=None,
count=None, elide=None, nolinestop=None):
"""
Standard search method, but with support for the nolinestop
option which is new in tk 8.5 but not supported by tkinter out
of the box.
"""
args = [self._w, 'search']
if forwards: args.append('-forwards')
if backwards: args.append('-backwards')
if exact: args.append('-exact')
if regexp: args.append('-regexp')
if nocase: args.append('-nocase')
if elide: args.append('-elide')
if nolinestop: args.append("-nolinestop")
if count: args.append('-count'); args.append(count)
if pattern and pattern[0] == '-': args.append('--')
args.append(pattern)
args.append(index)
if stopindex: args.append(stopindex)
return str(self.tk.call(tuple(args)))
def ipick(self, name, regex, index='insert', stopindex='end',
verbose=False, backwards=None, exact=None, regexp=True,
nocase=None, elide=None, nolinestop=None):
"""
"""
# Force to do a search from index.
if verbose: self.tag_remove(name, '1.0', 'end')
if not backwards: ranges = self.tag_nextrange(name, index, 'end')
else: ranges = self.tag_prevrange(name, index, '1.0')
if ranges: index0, index1 = ranges[:2]
else: index0 = index1 = index
index = self.isearch(regex, index=index0 if backwards else index1,
stopindex=stopindex, backwards=backwards, exact=exact, regexp=regexp,
nocase=nocase, elide=elide, nolinestop=nolinestop)
if not index: return
_, start, end = index
self.mark_set('insert', start if backwards else end)
self.see('insert')
self.tag_remove(name, '1.0', 'end')
self.tag_add(name, start, end)
return start, end
def replace(self, regex, data, index=None, stopindex=None,
forwards=None, backwards=None, exact=None, regexp=True,
nocase=None, elide=None, nolinestop=None):
"""
It is used to replace occurrences of a given match.
It is possible to use a callback function to return what is replaced
as well.
If the replacement cant be performed anymore it just returns None otherwise
it returns the index and length of the replacement.
Like:
index, length
"""
if not regex:
raise TypeError('Regex should be non blank!')
count = IntVar()
index = self.search(regex, index, stopindex, forwards=forwards,
backwards=backwards, exact=exact, nocase=nocase, nolinestop=nolinestop,
regexp=regexp, elide=elide, count=count)
if not index: return
index0 = self.index('%s +%sc' % (index, count.get()))
if callable(data):
data = data(self.get(index, index0), index, index0)
# Cause infinite loop in replace_all.
if len(data) == count.get() == 0:
raise TypeError('Bad formed regex!')
self.delete(index, index0)
self.insert(index, data)
return index, len(data)
def replace_all(self, regex, data, index='1.0', stopindex='end',
exact=None, regexp=True, nocase=None, elide=None, nolinestop=None):
"""
# It replaces all regex matches for data. The data argument may be a callable
object. When it is a callable object it looks like:
def handle(chunk, start, end):
pass
"""
# It avoids overlapping of replacements.
self.mark_set('(REP_STOPINDEX)', stopindex)
while True:
map = self.replace(regex, data, index,
'(REP_STOPINDEX)', exact=exact, nocase=nocase,
nolinestop=nolinestop, regexp=regexp, elide=elide)
if not map:
return self.index('(REP_STOPINDEX)')
index, size = map
index = self.index('%s +%sc' % (index, size))
if self.compare(index, '==', 'end'): break
if self.compare(index, '==', '%s lineend' % index):
index = '%s +1c' % index
def case_pair(self, index, max, start='(', end=')'):
"""
Once this method is called, it returns an index for the next
matching parenthesis or None if the char over the cursor
isn't either '(' or ')'.
"""
char = self.get(index, '%s +1c' % index)
sign, dir = None, None
if char == start:
sign, dir = '+', False
elif char == end:
sign, dir = '-', True
else:
return None
# If we are searching fowards we don't need
# to add 1c.
index0 = '%s %s' % (index, '+1c' if dir else '')
stopindex = self.index('%s %s%sc' % (index, sign, max))
count = 0
matches = self.find('\%s|\%s' % (start, end),
index = index0, stopindex = stopindex,
backwards = dir, regexp = True)
for data, pos0, pos1 in matches:
count = count + (1 if data == start else -1)
if not count:
return pos0
def clear_data(self):
"""
It clears all text inside an AreaVi instance.
"""
import os
self.delete('1.0', 'end')
self.filename = os.path.abspath(self.default_filename)
self.event_generate('<<ClearData>>')
def load_data(self, filename):
"""
It dumps all text from a file into an AreaVi instance.
filename - Name of the file.
"""
self.filename = os.path.abspath(filename)
_, self.extension = os.path.splitext(self.filename)
self.event_generate('<<Pre-LoadData>>')
self.event_generate('<<Pre-LoadData/*%s>>' % self.extension)
fd = open(self.filename, 'rb')
data = fd.read()
fd.close()
try:
data = data.decode(self.charset)
except UnicodeDecodeError:
self.charset = ''
self.delete('1.0', 'end')
self.insert('end', data)
self.mark_set('insert', '1.0')
self.see('insert')
self.event_generate('<<LoadData>>')
self.event_generate('<<Load/*%s>>' % self.extension)
def decode(self, name):
"""
Used to change the areavi encoding.
"""
self.charset = name
self.load_data(self.filename)
def save_data(self):
"""
It saves the actual text content in the current file.
"""
_, self.extension = os.path.splitext(self.filename)
self.event_generate('<<Pre-SaveData>>')
self.event_generate('<<Pre-Save/*%s>>' % self.extension)
data = self.get('1.0', 'end -1c')
data = data.encode(self.charset)
fd = open(self.filename, 'wb')
fd.write(data)
fd.close()
self.event_generate('<<SaveData>>')
self.event_generate('<<Save/*%s>>' % self.extension)
def save_data_as(self, filename):
"""
It saves the content of the given AreaVi instance into
a file whose name is specified in filename.
filename - Name of the file to save the data.
"""
self.filename = filename
self.save_data()
def is_tag_range(self, name, index0, index1):
"""
Consider:
area.tag_add('tag', '2.0', '5.0')
# It returns True.
area.is_tag_range('tag', '2.0', '3.0')
# It returns False.
area.is_tag_range('tag', '1.0', '2.0')
"""
ranges = self.tag_ranges(name)
for ind in range(0, len(ranges) - 1, 2):
if self.is_subrange(index0, index1, ranges[ind].string,
ranges[ind + 1].string):
return ranges[ind].string, ranges[ind + 1].string
def is_in_range(self, index, index0, index1):
"""
It returns True if index0 <= index <= index1 otherwise
it returns False.
"""
index2 = self.min(index0, index1)
index3 = self.max(index0, index1)
r1 = self.compare(index2, '<=', index)
r2 = self.compare(index3, '>=', index)
return r1 and r2
def is_subrange(self, index0, index1, index2, index3):
"""
It returns True if index2 <= index0 <= index1 <= index2 otherwise
it returns False.
"""
r1 = self.is_in_range(index0, index2, index3)
r2 = self.is_in_range(index1, index2, index3)
return r1 and r2
def swap(self, data, index0, index1):
"""
Swap the text in the range index0, index1 for data.
"""
self.delete(index0, index1)
self.insert(index0, data)
def swap_ranges(self, name, data, index0='1.0', index1='end'):
"""
It swaps ranges of text that are mapped to a tag name for data between index0
and index1.
"""
while True:
range = self.tag_nextrange(name, index0, index1)
if not range: break
self.swap(data, *range)
def join_ranges(self, name, sep=''):
"""
Join ranges of text that corresponds to a tag defined by name using a seperator.
"""
data = ''
for ind in self.get_ranges(name):
data = data + ind + sep
return data
def get_ranges(self, name):
"""
It returns an iterator whose elements are ranges of text that
corresponds to the ranges of the tag name.
"""
ranges = self.tag_ranges(name)
for ind in range(0, len(ranges) - 1, 2):
data = self.get(ranges[ind], ranges[ind + 1])
yield(data)
def tag_prev_occur(self, tag_names, index0, index1, default):
"""
Should be renamed.
"""
for ind in tag_names:
pos = self.tag_prevrange(ind, index0, index1)
if pos: return pos[1]
return default
def tag_next_occur(self, tag_names, index0, index1, default):
"""
Should be renamed.
"""
for ind in tag_names:
pos = self.tag_nextrange(ind, index0, index1)
if pos: return pos[0]
return default
@staticmethod
def areavi_widgets(wid):
"""
This method is a static method that receives a widget as argument
then returns an iterator of AreaVi instances that have the wid paramater as
master widget. It is used like:
from vyapp.app import root
for ind in AreaVi.areavi_widgets(root):
ind.insert('end', 'FOO')
The code above would insert 'FOO' at the end of all AreaVi widgets
that have root as one of its master widget.
"""
for ind in wid.winfo_children():
if isinstance(ind, AreaVi):
yield ind
else:
for ind in AreaVi.areavi_widgets(ind):
yield ind
@staticmethod
def get_opened_files(wid):
"""
This method returns a dictionary that maps all AreaVi instances
that have widget as master like:
from vyapp.app import root
map = area.get_opened_files(root)
Where map is a dictionary like:
map = { '/home/tau/file.c':AreaVi_Instance,
'/home/tau/file.b': AreaVi_Instance}
"""
map = dict()
for ind in AreaVi.areavi_widgets(wid):
map[ind.filename] = ind
return map
@staticmethod
def find_all(wid, regex, index='1.0', stopindex='end', *args, **kwargs):
"""
This method is used to perform pattern searches over all AreaVi instances that have
wid as master. It basically returns an iterator that corresponds to:
from vyapp.app import root
for ind, (match, index0, index1) in area.find_all(root, 'pattern'):
pass
Where ind is the AreaVi widget that the pattern matched and match is the match,
index0 and index1 are the positions in the text.
"""
for indi in AreaVi.areavi_widgets(wid):
it = indi.find(regex, index, stopindex, *args, **kwargs)
for indj in it:
yield indi, indj
def tag_bounds(self, tag, index='insert'):
range0 = self.tag_nextrange(tag, index)
if range0:
if self.compare(range0[0], '<=', index):
return range0
range1 = self.tag_prevrange(tag, index)
if range1:
if self.compare(index, '<=', range1[1]):
return range1
|
server/auvsi_suas/models/mission_config.py
|
UnmannedAerialSystems/interop
| 175 |
135583
|
"""Mission configuration model."""
import logging
from auvsi_suas.models.fly_zone import FlyZone
from auvsi_suas.models.gps_position import GpsPosition
from auvsi_suas.models.odlc import Odlc
from auvsi_suas.models.stationary_obstacle import StationaryObstacle
from auvsi_suas.models.waypoint import Waypoint
from django.contrib import admin
from django.core import validators
from django.db import models
logger = logging.getLogger(__name__)
class MissionConfig(models.Model):
"""The details for the mission."""
# The home position for use as a reference point.
home_pos = models.ForeignKey(GpsPosition,
related_name="missionconfig_home_pos",
on_delete=models.CASCADE)
# The lost comms RTH/RTL and flight termination position.
lost_comms_pos = models.ForeignKey(
GpsPosition,
related_name="missionconfig_lost_comms_pos",
on_delete=models.CASCADE)
# Valid areas for the UAS to fly.
fly_zones = models.ManyToManyField(FlyZone)
# The waypoints that define the mission waypoint path
mission_waypoints = models.ManyToManyField(
Waypoint, related_name='missionconfig_mission_waypoints')
# The polygon that defines the search grid.
search_grid_points = models.ManyToManyField(
Waypoint, related_name='missionconfig_search_grid_points')
# The judge created objects for detection.
odlcs = models.ManyToManyField(Odlc,
related_name='missionconfig_odlc',
blank=True)
# The last known position of the emergent object.
emergent_last_known_pos = models.ForeignKey(
GpsPosition,
related_name='missionconfig_emergent_last_known_pos',
on_delete=models.CASCADE)
# Off-axis object position.
off_axis_odlc_pos = models.ForeignKey(
GpsPosition,
related_name='missionconfig_off_axis_odlc_pos',
on_delete=models.CASCADE)
# The desired center position of the generated map.
map_center_pos = models.ForeignKey(
GpsPosition,
related_name='missionconfig_map_center_pos',
on_delete=models.CASCADE)
# The desired height in feet of the generated map.
map_height_ft = models.FloatField(validators=[
validators.MinValueValidator(1),
])
# The boundary the air drop and UGV drive must be within.
air_drop_boundary_points = models.ManyToManyField(
Waypoint, related_name='missionconfig_air_drop_boundary_points')
# The air drop position.
air_drop_pos = models.ForeignKey(GpsPosition,
related_name='missionconfig_air_drop_pos',
on_delete=models.CASCADE)
# The position the UGV must drive to.
ugv_drive_pos = models.ForeignKey(
GpsPosition,
related_name='missionconfig_ugv_drive_pos',
on_delete=models.CASCADE)
# The stationary obstacles.
stationary_obstacles = models.ManyToManyField(StationaryObstacle)
def __str__(self):
return 'Mission %d' % self.pk
@admin.register(MissionConfig)
class MissionConfigModelAdmin(admin.ModelAdmin):
raw_id_fields = ('home_pos', 'emergent_last_known_pos',
'off_axis_odlc_pos', 'map_center_pos', 'air_drop_pos')
filter_horizontal = ('fly_zones', 'mission_waypoints',
'search_grid_points', 'odlcs', 'stationary_obstacles')
list_display = (
'pk',
'home_pos',
)
|
neptune/internal/abort.py
|
Raalsky/neptune-client
| 254 |
135659
|
<reponame>Raalsky/neptune-client<filename>neptune/internal/abort.py
#
# Copyright (c) 2019, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
try:
import psutil
PSUTIL_INSTALLED = True
except ImportError:
PSUTIL_INSTALLED = False
class CustomAbortImpl(object):
def __init__(self, runnable):
self.__runnable = runnable
def abort(self):
self.__runnable()
class DefaultAbortImpl(object):
KILL_TIMEOUT = 5
def __init__(self, pid):
self._pid = pid
@staticmethod
def requirements_installed():
return PSUTIL_INSTALLED
def abort(self):
try:
processes = self._get_process_with_children(psutil.Process(self._pid))
except psutil.NoSuchProcess:
processes = []
for p in processes:
self._abort(p)
_, alive = psutil.wait_procs(processes, timeout=self.KILL_TIMEOUT)
for p in alive:
self._kill(p)
@staticmethod
def _get_process_with_children(process):
try:
return [process] + process.children(recursive=True)
except psutil.NoSuchProcess:
return []
@staticmethod
def _abort(process):
try:
process.terminate()
except psutil.NoSuchProcess:
pass
def _kill(self, process):
for process in self._get_process_with_children(process):
try:
if process.is_running():
process.kill()
except psutil.NoSuchProcess:
pass
|
mayan/apps/mime_types/apps.py
|
bonitobonita24/Mayan-EDMS
| 343 |
135665
|
<filename>mayan/apps/mime_types/apps.py
from django.utils.translation import ugettext_lazy as _
from mayan.apps.common.apps import MayanAppConfig
class MIMETypesApp(MayanAppConfig):
name = 'mayan.apps.mime_types'
has_tests = True
verbose_name = _('MIME types')
def ready(self, *args, **kwargs):
super().ready(*args, **kwargs)
|
docs/tools/nav.py
|
pdv-ru/ClickHouse
| 15,577 |
135680
|
<reponame>pdv-ru/ClickHouse
import collections
import datetime
import hashlib
import logging
import os
import mkdocs.structure.nav
import util
def find_first_header(content):
for line in content.split('\n'):
if line.startswith('#'):
no_hash = line.lstrip('#')
return no_hash.split('{', 1)[0].strip()
def build_nav_entry(root, args):
if root.endswith('images'):
return None, None, None
result_items = []
index_meta, index_content = util.read_md_file(os.path.join(root, 'index.md'))
current_title = index_meta.get('toc_folder_title', index_meta.get('toc_title'))
current_title = current_title or index_meta.get('title', find_first_header(index_content))
for filename in os.listdir(root):
path = os.path.join(root, filename)
if os.path.isdir(path):
prio, title, payload = build_nav_entry(path, args)
if title and payload:
result_items.append((prio, title, payload))
elif filename.endswith('.md'):
path = os.path.join(root, filename)
meta = ''
content = ''
try:
meta, content = util.read_md_file(path)
except:
print('Error in file: {}'.format(path))
raise
path = path.split('/', 2)[-1]
title = meta.get('toc_title', find_first_header(content))
if title:
title = title.strip().rstrip('.')
else:
title = meta.get('toc_folder_title', 'hidden')
prio = meta.get('toc_priority', 9999)
logging.debug(f'Nav entry: {prio}, {title}, {path}')
if meta.get('toc_hidden') or not content.strip():
title = 'hidden'
if title == 'hidden':
title = 'hidden-' + hashlib.sha1(content.encode('utf-8')).hexdigest()
if args.nav_limit and len(result_items) >= args.nav_limit:
break
result_items.append((prio, title, path))
result_items = sorted(result_items, key=lambda x: (x[0], x[1]))
result = collections.OrderedDict([(item[1], item[2]) for item in result_items])
if index_meta.get('toc_hidden_folder'):
current_title += '|hidden-folder'
return index_meta.get('toc_priority', 10000), current_title, result
def build_docs_nav(lang, args):
docs_dir = os.path.join(args.docs_dir, lang)
_, _, nav = build_nav_entry(docs_dir, args)
result = []
index_key = None
for key, value in list(nav.items()):
if key and value:
if value == 'index.md':
index_key = key
continue
result.append({key: value})
if args.nav_limit and len(result) >= args.nav_limit:
break
if index_key:
key = list(result[0].keys())[0]
result[0][key][index_key] = 'index.md'
result[0][key].move_to_end(index_key, last=False)
return result
def build_blog_nav(lang, args):
blog_dir = os.path.join(args.blog_dir, lang)
years = sorted(os.listdir(blog_dir), reverse=True)
result_nav = [{'hidden': 'index.md'}]
post_meta = collections.OrderedDict()
for year in years:
year_dir = os.path.join(blog_dir, year)
if not os.path.isdir(year_dir):
continue
result_nav.append({year: collections.OrderedDict()})
posts = []
post_meta_items = []
for post in os.listdir(year_dir):
post_path = os.path.join(year_dir, post)
if not post.endswith('.md'):
raise RuntimeError(f'Unexpected non-md file in posts folder: {post_path}')
meta, _ = util.read_md_file(post_path)
post_date = meta['date']
post_title = meta['title']
if datetime.date.fromisoformat(post_date) > datetime.date.today():
continue
posts.append(
(post_date, post_title, os.path.join(year, post),)
)
if post_title in post_meta:
raise RuntimeError(f'Duplicate post title: {post_title}')
if not post_date.startswith(f'{year}-'):
raise RuntimeError(f'Post date {post_date} doesn\'t match the folder year {year}: {post_title}')
post_url_part = post.replace('.md', '')
post_meta_items.append((post_date, {
'date': post_date,
'title': post_title,
'image': meta.get('image'),
'url': f'/blog/{lang}/{year}/{post_url_part}/'
},))
for _, title, path in sorted(posts, reverse=True):
result_nav[-1][year][title] = path
for _, post_meta_item in sorted(post_meta_items,
reverse=True,
key=lambda item: item[0]):
post_meta[post_meta_item['title']] = post_meta_item
return result_nav, post_meta
def _custom_get_navigation(files, config):
nav_config = config['nav'] or mkdocs.structure.nav.nest_paths(f.src_path for f in files.documentation_pages())
items = mkdocs.structure.nav._data_to_navigation(nav_config, files, config)
if not isinstance(items, list):
items = [items]
pages = mkdocs.structure.nav._get_by_type(items, mkdocs.structure.nav.Page)
mkdocs.structure.nav._add_previous_and_next_links(pages)
mkdocs.structure.nav._add_parent_links(items)
missing_from_config = [file for file in files.documentation_pages() if file.page is None]
if missing_from_config:
files._files = [file for file in files._files if file not in missing_from_config]
links = mkdocs.structure.nav._get_by_type(items, mkdocs.structure.nav.Link)
for link in links:
scheme, netloc, path, params, query, fragment = mkdocs.structure.nav.urlparse(link.url)
if scheme or netloc:
mkdocs.structure.nav.log.debug(
"An external link to '{}' is included in "
"the 'nav' configuration.".format(link.url)
)
elif link.url.startswith('/'):
mkdocs.structure.nav.log.debug(
"An absolute path to '{}' is included in the 'nav' configuration, "
"which presumably points to an external resource.".format(link.url)
)
else:
msg = (
"A relative path to '{}' is included in the 'nav' configuration, "
"which is not found in the documentation files".format(link.url)
)
mkdocs.structure.nav.log.warning(msg)
return mkdocs.structure.nav.Navigation(items, pages)
mkdocs.structure.nav.get_navigation = _custom_get_navigation
|
fastrunner/utils/ding_message.py
|
FuxiongYang/faster
| 227 |
135683
|
<gh_stars>100-1000
# !/usr/bin/python3
# -*- coding: utf-8 -*-
# @Author:梨花菜
# @File: ding_message.py
# @Time : 2018/12/21 15:11
# @Email: <EMAIL>
# @Software: PyCharm
from dingtalkchatbot.chatbot import DingtalkChatbot
from fastrunner.utils.parser import format_summary_to_ding
class DingMessage:
"""
调用钉钉机器人发送测试结果
"""
def __init__(self, run_type):
self.run_type = run_type
if run_type == 'auto':
webhook = 'https://oapi.dingtalk.com/robot/send?access_token=998422738ca7d32f8641e9369da7f1b5545aa09c8fcec5ae17324e609c5d1af0'
# webhook = 'https://oapi.dingtalk.com/robot/send?access_token=<KEY>' # 调试机器人
elif run_type == 'deploy':
webhook = 'https://oapi.dingtalk.com/robot/send?access_token=<KEY>'
# webhook = 'https://oapi.dingtalk.com/robot/send?access_token=<KEY>' # 调试机器人
self.robot = DingtalkChatbot(webhook)
def send_ding_msg(self, summary, report_name=None):
msg_and_fail_count = format_summary_to_ding('markdown', summary, report_name=report_name)
msg = msg_and_fail_count[0]
fail_count = msg_and_fail_count[1]
title = 'FasterRunner自动化测试报告'
if fail_count == 0:
if self.run_type == 'deploy':
print("deploy_success")
elif self.run_type == 'auto':
self.robot.send_markdown(title=title, text=msg)
else:
if self.run_type == 'deploy':
self.robot.send_markdown(title=title, text=msg, is_at_all=True)
elif self.run_type == 'auto':
receive_msg_mobiles = [18666126234, 18122118571, 13763312220, 15989041619, 18665742877,
13512756535] # 接收钉钉消息的列表
at_phone = ''
for phone in [f'@{phone} ' for phone in receive_msg_mobiles]:
at_phone += phone
msg += at_phone
self.robot.send_markdown(title=title, text=msg, at_mobiles=receive_msg_mobiles)
if __name__ == '__main__':
robot = DingMessage()
summary = {'stat':{'testsRun': 2, 'failures': 0, 'errors': 0, 'skipped': 0, 'expectedFailures': 0,
'unexpectedSuccesses': 0, 'successes': 1}}
robot.send_ding_msg(summary)
|
python/paddle_fl/mpc/tests/unittests/privc/test_sigmoid_op.py
|
barrierye/PaddleFL
| 379 |
135688
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module test relu op.
"""
import unittest
from multiprocessing import Manager
import numpy as np
from op_test import OpTest
import paddle.fluid as fluid
import paddle.fluid.core as core
from scipy.special import logit
from scipy.special import expit
class TestSigmoidCrossEntropyWithLogitsOp1(OpTest):
"""Test sigmoid_cross_entropy_with_logit_op with binary label
"""
def setUp(self):
self.op_type = "mpc_sigmoid_cross_entropy_with_logits"
self.init_input_output()
self.inputs = {
'X': self.lazy_share(self.x),
'Label': self.lazy_share(self.label)
}
self.outputs = {'Out': self.lazy_share(self.out)}
def init_input_output(self):
batch_size = 10
num_classes = 4
self.x = logit(
np.random.uniform(0, 1, (batch_size, num_classes))
.astype("float64"))
self.label = np.random.randint(0, 2, (batch_size, num_classes))
# approximate sigmoid with f(x) = {=0, x < -0.5; x + 0.5, -0.5 <= x <= 0.5; 1, x> 0.5}
self.out = np.minimum(np.maximum(0, self.x + 0.5), 1)
def test_check_output(self):
place = core.CPUPlace()
self.check_output_with_place(place, atol=1e-3)
def test_check_grad(self):
place = core.CPUPlace()
# TODO max_relative_error is too large, find reason
self.check_grad_with_place(place, ['X'], "Out", max_relative_error = 50)
if __name__ == '__main__':
unittest.main()
|
data/atis_Intent_Detection_and_Slot_Filling/train/check_train_raw_data.py
|
SmallStom/slot_learning
| 418 |
135706
|
label_data = open("label", encoding='utf-8').readlines()
label_data = [x.strip() for x in label_data]
print(len(label_data))
label_kinds = set(label_data)
print(label_kinds)
|
tests/integration/cli/test_test.py
|
gamechanger/dusty
| 421 |
135721
|
<filename>tests/integration/cli/test_test.py
import subprocess
import sys
from ...testcases import DustyIntegrationTestCase
from ...fixtures import busybox_single_app_bundle_fixture
class TestTestCLI(DustyIntegrationTestCase):
def setUp(self):
super(TestTestCLI, self).setUp()
busybox_single_app_bundle_fixture(num_bundles=1)
def test_basic_test_run(self):
result = self.run_command('test --recreate busyboxa test1')
self.assertEqual(self.handler.log_to_client_output.count('TESTS test1 PASSED'), 1)
self.assertEqual(self.handler.log_to_client_output.count('OK'), 1)
self.assertTrue('Running commands to create new image:' in result)
def test_basic_test_args(self):
result = self.run_command('test --recreate busyboxa test3')
self.assertEqual(self.handler.log_to_client_output.count('var\n'), 0)
self.assertEqual(self.handler.log_to_client_output.count('etc\n'), 0)
self.assertEqual(self.handler.log_to_client_output.count('sbin\n'), 0)
self.handler.log_to_client_output = ''
self.run_command('test --recreate busyboxa test3 /')
self.assertEqual(self.handler.log_to_client_output.count('var\n'), 1)
self.assertEqual(self.handler.log_to_client_output.count('etc\n'), 1)
self.assertEqual(self.handler.log_to_client_output.count('sbin\n'), 1)
def test_basic_test_all(self):
result = self.run_command('test --recreate busyboxa all')
self.assertEqual(self.handler.log_to_client_output.count('TESTS PASSED'), 1)
self.assertEqual(self.handler.log_to_client_output.count('OK'), 2)
self.assertTrue('Running commands to create new image:' in result)
def test_basic_test_no_recreate(self):
result = self.run_command('test --recreate busyboxa test1')
self.assertTrue('Running commands to create new image:' in result)
self.handler.log_to_client_output = ''
result = self.run_command('test busyboxa test1')
self.assertFalse('Running commands to create new image:' in result)
self.assertEqual(self.handler.log_to_client_output.count('TESTS test1 PASSED'), 1)
self.assertEqual(self.handler.log_to_client_output.count('OK'), 1)
|
lib/pymedphys/_experimental/vendor/pylinac_vendored/winstonlutz.py
|
ethanio12345/pymedphys
| 207 |
135756
|
# Copyright (c) 2019-2020 <NAME>
# Copyright (c) 2014-2020 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions
# of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
# TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# Adaptions undergone by <NAME> of <NAME> original work
# Adapted from https://github.com/jrkerns/pylinac/tree/698254258ff4cb87812840c42b34c93ae32a4693
# Changes to revert to v2.2.6 code determined from https://github.com/jrkerns/pylinac/compare/v2.2.6...v2.2.7#diff-49572d03390f5858885f645e7034ff24
# and https://github.com/jrkerns/pylinac/blob/v2.2.6/pylinac/winston_lutz.py
# Work to make WLImage work for v2.3.2 had its __init__ method monkey
# patched from the following code:
# <https://github.com/jrkerns/pylinac/blob/14a5296ae4ee0ecb01865d08f15070c82e19fc45/pylinac/winston_lutz.py#L594-L612>
"""The functions here have been either 'monkey patched' or vendored from
pylinac. They are not a replacement for using pylinac directly.
These allow for simultaneous use of pylinac wlutz algorithms from
version 2.2.6, 2.2.7, and 2.3.2. They also allow for use with image
arrays instead of DICOM files on disk.
These are designed to be used as an "independent" check of PyMedPhys'
internal WLutz algorithm. They should not be used as a standalone tool
instead, if that is what is desired, pylinac itself should be used
directly.
"""
import functools
from typing import List, Tuple
from pymedphys._imports import numpy as np
from pymedphys._imports import scipy, skimage
from pymedphys._experimental.vendor.pylinac_vendored._pylinac_installed import (
pylinac as _pylinac_installed,
)
from .core import geometry as _vendor_geometry
from .core import image as _vendor_image
from .core import mask as _vendor_mask
from .core import profile as _vendor_profile
GANTRY = "Gantry"
COLLIMATOR = "Collimator"
COUCH = "Couch"
COMBO = "Combo"
EPID = "Epid"
REFERENCE = "Reference"
ALL = "All"
class WLImageCurrent:
"""This is a composition of pylinac's WLImage class and its ImageArray class.
It is designed to be able to support raw image in-memory arrays
instead of DICOM files on disk as the original WLImage class
required.
See the following issue where this API was proposed upstream
but for now has not been implemented:
<https://github.com/jrkerns/pylinac/issues/277>
"""
def __init__(self, array, *, dpi=None, sid=None, dtype=None):
"""Adapted from
<https://github.com/jrkerns/pylinac/blob/14a5296ae4ee0ecb01865d08f15070c82e19fc45/pylinac/winston_lutz.py#L594-L612>
"""
self._array_image = _pylinac_installed.image.ArrayImage(
array, dpi=dpi, sid=sid, dtype=dtype
)
self._array_image.check_inversion_by_histogram(percentiles=(0.01, 50, 99.99))
self._array_image._clean_edges = (
_pylinac_installed.winston_lutz.WLImage._clean_edges # pylint: disable = protected-access
)
self._array_image._clean_edges(self._array_image)
self._array_image.ground()
self._array_image.normalize()
self._array_image.rad_field_bounding_box = None
self._field_cax = None
self._bb = None
self._array_image.find_field_centroid = (
_pylinac_installed.winston_lutz.WLImage._find_field_centroid # pylint: disable = protected-access
)
self._array_image.find_bb = (
_pylinac_installed.winston_lutz.WLImage._find_bb # pylint: disable = protected-access
)
def set_bounding_box_by_padding(self, padding):
dims = np.shape(self._array_image.array)
self._array_image.rad_field_bounding_box = [
int(padding[1]),
int(dims[0] - padding[1]),
int(padding[0]),
int(dims[1] - padding[0]),
]
def _run_field_finding(self):
(
self._field_cax,
self._array_image.rad_field_bounding_box,
) = self._array_image.find_field_centroid(self._array_image)
@property
def field_cax(self):
if self._field_cax is None:
self._run_field_finding()
return self._field_cax
@property
def bb(self):
if self._bb is None:
if self._array_image.rad_field_bounding_box is None:
self._run_field_finding()
# print(
# "Bounding box found:"
# "\n\n===================\n\n"
# f"{self._array_image.rad_field_bounding_box}"
# "\n\n===================\n\n"
# )
self._bb = self._array_image.find_bb(self._array_image)
return self._bb
class WLImage_2_2_7(_vendor_image.ArrayImage):
"""Holds individual Winston-Lutz EPID images, image properties, and
automatically finds the field CAX and BB."""
def __init__(self, array, *, dpi=None, sid=None, dtype=None):
super().__init__(array, dpi=dpi, sid=sid, dtype=dtype)
self.check_inversion_by_histogram(percentiles=(0.01, 50, 99.99))
self._clean_edges()
self.field_cax, self.rad_field_bounding_box = self._find_field_centroid()
self._bb = None
@property
def bb(self):
if self._bb is None:
self._bb = self._find_bb()
return self._bb
def _clean_edges(self, window_size: int = 2):
"""Clean the edges of the image to be near the background level."""
def has_noise(self, window_size):
"""Helper method to determine if there is spurious signal at
any of the image edges.
Determines if the min or max of an edge is within 10% of the
baseline value and trims if not.
"""
near_min, near_max = np.percentile(self.array, [5, 99.5])
img_range = near_max - near_min
top = self[:window_size, :]
left = self[:, :window_size]
bottom = self[-window_size:, :]
right = self[:, -window_size:]
edge_array = np.concatenate(
(top.flatten(), left.flatten(), bottom.flatten(), right.flatten())
)
edge_too_low = edge_array.min() < (near_min - img_range / 10)
edge_too_high = edge_array.max() > (near_max + img_range / 10)
return edge_too_low or edge_too_high
safety_stop = np.min(self.shape) / 10
while has_noise(self, window_size) and safety_stop > 0:
self.remove_edges(window_size)
safety_stop -= 1
def _find_field_centroid(self) -> Tuple[_vendor_geometry.Point, List]:
"""Find the centroid of the radiation field based on a 50%
height threshold.
Returns
-------
p
The CAX point location.
edges
The bounding box of the field, plus a small margin.
"""
min_val, max_val = np.percentile(self.array, [5, 99.9])
threshold_img = self.as_binary((max_val - min_val) / 2 + min_val)
# clean single-pixel noise from outside field
cleaned_img = scipy.ndimage.binary_erosion(threshold_img)
[*edges] = _vendor_mask.bounding_box(cleaned_img)
edges[0] -= 10
edges[1] += 10
edges[2] -= 10
edges[3] += 10
coords = scipy.ndimage.measurements.center_of_mass(threshold_img)
p = _vendor_geometry.Point(x=coords[-1], y=coords[0])
return p, edges
def _find_bb(self) -> _vendor_geometry.Point:
"""Find the BB within the radiation field. Iteratively searches
for a circle-like object by lowering a low-pass threshold value
until found.
Returns
-------
Point
The weighted-pixel value location of the BB.
"""
# get initial starting conditions
hmin, hmax = np.percentile(self.array, [5, 99.9])
spread = hmax - hmin
max_thresh = hmax
lower_thresh = hmax - spread / 1.5
# search for the BB by iteratively lowering the low-pass
# threshold value until the BB is found.
found = False
while not found:
try:
binary_arr = np.logical_and((max_thresh > self), (self >= lower_thresh))
labeled_arr, num_roi = scipy.ndimage.measurements.label(binary_arr)
roi_sizes, _ = np.histogram(labeled_arr, bins=num_roi + 1)
bw_bb_img = np.where(
labeled_arr == np.argsort(roi_sizes)[-3], 1, 0
) # we pick the 3rd largest one because the largest is the background, 2nd is rad field, 3rd is the BB
bb_regionprops = skimage.measure.regionprops(bw_bb_img)[0]
if not is_round(bb_regionprops):
raise ValueError
if not is_modest_size(bw_bb_img, self.rad_field_bounding_box):
raise ValueError
if not is_symmetric(bw_bb_img):
raise ValueError
except (IndexError, ValueError):
max_thresh -= 0.05 * spread
if max_thresh < hmin:
raise ValueError(
"Pylinac 2.2.7: Unable to locate the BB. Make sure the field "
"edges do not obscure the BB and that there is no artifacts in "
"the images."
)
else:
found = True
# determine the center of mass of the BB
inv_img = _vendor_image.ArrayImage(self.array)
# we invert so BB intensity increases w/ attenuation
inv_img.check_inversion_by_histogram(percentiles=(0.01, 50, 99.99))
bb_rprops = skimage.measure.regionprops(bw_bb_img, intensity_image=inv_img)[0]
return _vendor_geometry.Point(
bb_rprops.weighted_centroid[1], bb_rprops.weighted_centroid[0]
)
@property
def epid(self) -> _vendor_geometry.Point:
"""Center of the EPID panel"""
return self.center
@property
def cax2epid_vector(self) -> _vendor_geometry.Vector:
"""The vector in mm from the CAX to the EPID center pixel"""
dist = (self.epid - self.field_cax) / self.dpmm
return _vendor_geometry.Vector(dist.x, dist.y, dist.z)
@property
def cax2bb_distance(self):
"""The scalar distance in mm from the CAX to the BB."""
dist = self.field_cax.distance_to(self.bb)
return dist / self.dpmm
@property
def cax2epid_distance(self):
"""The scalar distance in mm from the CAX to the EPID center pixel"""
return self.field_cax.distance_to(self.epid) / self.dpmm
def is_symmetric(logical_array) -> bool:
"""Whether the binary object's dimensions are symmetric, i.e. a perfect circle. Used to find the BB."""
ymin, ymax, xmin, xmax = _vendor_mask.bounding_box(logical_array)
y = abs(ymax - ymin)
x = abs(xmax - xmin)
if x > max(y * 1.05, y + 3) or x < min(y * 0.95, y - 3):
return False
return True
def is_modest_size(logical_array, field_bounding_box):
"""Decide whether the ROI is roughly the size of a BB; not noise and not an artifact. Used to find the BB."""
bbox = field_bounding_box
rad_field_area = (bbox[1] - bbox[0]) * (bbox[3] - bbox[2])
return rad_field_area * 0.003 < np.sum(logical_array) < rad_field_area * 0.3
def is_round(rprops):
"""Decide if the ROI is circular in nature by testing the filled area vs bounding box. Used to find the BB."""
expected_fill_ratio = np.pi / 4 # area of a circle inside a square
actual_fill_ratio = rprops.filled_area / rprops.bbox_area
return expected_fill_ratio * 1.2 > actual_fill_ratio > expected_fill_ratio * 0.8
def is_round_old(logical_array):
"""Decide if the ROI is circular in nature by testing the filled area vs bounding box. Used to find the BB."""
expected_fill_ratio = np.pi / 4
actual_fill_ratio = _vendor_mask.filled_area_ratio(logical_array)
return expected_fill_ratio * 1.2 > actual_fill_ratio > expected_fill_ratio * 0.8
class WLImage_2_2_6(WLImage_2_2_7):
def _find_bb(self) -> _vendor_geometry.Point:
"""Find the BB within the radiation field. Iteratively searches for a circle-like object
by lowering a low-pass threshold value until found.
Returns
-------
Point
The weighted-pixel value location of the BB.
"""
# get initial starting conditions
hmin, hmax = np.percentile(self.array, [5, 99.9])
spread = hmax - hmin
max_thresh = hmax
lower_thresh = hmax - spread / 1.5
# search for the BB by iteratively lowering the low-pass threshold value until the BB is found.
found = False
while not found:
try:
binary_arr = np.logical_and((max_thresh > self), (self >= lower_thresh))
labeled_arr, num_roi = scipy.ndimage.measurements.label(binary_arr)
roi_sizes, _ = np.histogram(labeled_arr, bins=num_roi + 1)
bw_bb_img = np.where(labeled_arr == np.argsort(roi_sizes)[-3], 1, 0)
if not is_round_old(bw_bb_img):
raise ValueError
if not is_modest_size(bw_bb_img, self.rad_field_bounding_box):
raise ValueError
if not is_symmetric(bw_bb_img):
raise ValueError
except (IndexError, ValueError):
max_thresh -= 0.05 * spread
if max_thresh < hmin:
raise ValueError(
"Pylinac 2.2.6: Unable to locate the BB. Make sure the field "
"edges do not obscure the BB and that there is no artifacts in "
"the images."
)
else:
found = True
# determine the center of mass of the BB
inv_img = _vendor_image.ArrayImage(self.array)
inv_img.invert()
x_arr = np.abs(np.average(bw_bb_img, weights=inv_img, axis=0))
x_com = _vendor_profile.SingleProfile(x_arr).fwxm_center(interpolate=True)
y_arr = np.abs(np.average(bw_bb_img, weights=inv_img, axis=1))
y_com = _vendor_profile.SingleProfile(y_arr).fwxm_center(interpolate=True)
return _vendor_geometry.Point(x_com, y_com)
@functools.lru_cache()
def get_version_to_class_map():
VERSION_TO_CLASS_MAP = {
"2.2.6": WLImage_2_2_6,
"2.2.7": WLImage_2_2_7,
_pylinac_installed.__version__: WLImageCurrent,
}
return VERSION_TO_CLASS_MAP
def get_latest_wlimage():
return WLImageCurrent
|
merf/merf_test.py
|
ritviksahajpal/merf
| 189 |
135765
|
<filename>merf/merf_test.py
"""
MERF Unit Tests
Run with this command for verbose output:
> python tests.py -v
:copyright: 2017 Manifold, Inc.
:author: <NAME> <<EMAIL>>
"""
import pickle
import unittest
import numpy as np
import pandas as pd
from lightgbm import LGBMRegressor
from numpy.testing import assert_almost_equal
from sklearn.exceptions import NotFittedError
from merf import MERF
from utils import MERFDataGenerator
from viz import plot_merf_training_stats
class DataGenerationTest(unittest.TestCase):
def test_create_cluster_sizes(self):
clusters = MERFDataGenerator.create_cluster_sizes_array([1, 2, 3], 1)
self.assertListEqual(clusters, [1, 2, 3])
clusters = MERFDataGenerator.create_cluster_sizes_array([30, 20, 7], 3)
self.assertListEqual(clusters, [30, 30, 30, 20, 20, 20, 7, 7, 7])
def test_generate_samples(self):
dg = MERFDataGenerator(m=0.6, sigma_b=4.5, sigma_e=1)
df, ptev, prev = dg.generate_samples([1, 2, 3])
# check columns
self.assertListEqual(df.columns.tolist(), ["y", "X_0", "X_1", "X_2", "Z", "cluster"])
# check length
self.assertEqual(len(df), 6)
# check cluster sizes
self.assertEqual(len(df[df["cluster"] == 0]), 1)
self.assertEqual(len(df[df["cluster"] == 1]), 2)
self.assertEqual(len(df[df["cluster"] == 2]), 3)
def test_generate_split_samples(self):
dg = MERFDataGenerator(m=0.7, sigma_b=2.7, sigma_e=1)
train, test_known, test_new, training_ids, ptev, prev = dg.generate_split_samples([1, 3], [3, 2], [1, 1])
# check all have same columns
self.assertListEqual(train.columns.tolist(), ["y", "X_0", "X_1", "X_2", "Z", "cluster"])
self.assertListEqual(test_known.columns.tolist(), ["y", "X_0", "X_1", "X_2", "Z", "cluster"])
self.assertListEqual(test_new.columns.tolist(), ["y", "X_0", "X_1", "X_2", "Z", "cluster"])
# check length
self.assertEqual(len(train), 4)
self.assertEqual(len(test_known), 5)
self.assertEqual(len(test_new), 2)
# check cluster sizes
self.assertEqual(len(train[train["cluster"] == 0]), 1)
self.assertEqual(len(train[train["cluster"] == 1]), 3)
self.assertEqual(len(test_known[test_known["cluster"] == 0]), 3)
self.assertEqual(len(test_known[test_known["cluster"] == 1]), 2)
self.assertEqual(len(test_new[test_new["cluster"] == 2]), 1)
self.assertEqual(len(test_new[test_new["cluster"] == 3]), 1)
# Check training ids
self.assertListEqual(training_ids.tolist(), [0, 1])
def test_ohe_clusters(self):
training_cluster_ids = np.array([0, 1, 2, 3])
# Training like encoding -- all categories in matrix
X_ohe = MERFDataGenerator.ohe_clusters(
pd.Series([0, 0, 1, 2, 2, 2, 3]), training_cluster_ids=training_cluster_ids
)
# check columns and sums
self.assertListEqual(X_ohe.columns.tolist(), ["cluster_0", "cluster_1", "cluster_2", "cluster_3"])
self.assertListEqual(X_ohe.sum().tolist(), [2, 1, 3, 1])
# New encoding -- no categories in matrix
X_ohe = MERFDataGenerator.ohe_clusters(pd.Series([4, 4, 5, 6, 6, 7]), training_cluster_ids=training_cluster_ids)
# check columns and sums
self.assertListEqual(X_ohe.columns.tolist(), ["cluster_0", "cluster_1", "cluster_2", "cluster_3"])
self.assertListEqual(X_ohe.sum().tolist(), [0, 0, 0, 0])
# Mixed encoding -- some categories in matrix
X_ohe = MERFDataGenerator.ohe_clusters(
pd.Series([1, 1, 3, 0, 0, 4, 5, 6, 6, 7]), training_cluster_ids=training_cluster_ids
)
# check columns and sums
self.assertListEqual(X_ohe.columns.tolist(), ["cluster_0", "cluster_1", "cluster_2", "cluster_3"])
self.assertListEqual(X_ohe.sum().tolist(), [2, 2, 0, 1])
class MERFTest(unittest.TestCase):
def setUp(self):
np.random.seed(3187)
dg = MERFDataGenerator(m=0.6, sigma_b=4.5, sigma_e=1)
train, test_known, test_new, train_cluster_ids, ptev, prev = dg.generate_split_samples([1, 3], [3, 2], [1, 1])
self.X_train = train[["X_0", "X_1", "X_2"]]
self.Z_train = train[["Z"]]
self.clusters_train = train["cluster"]
self.y_train = train["y"]
self.X_known = test_known[["X_0", "X_1", "X_2"]]
self.Z_known = test_known[["Z"]]
self.clusters_known = test_known["cluster"]
self.y_known = test_known["y"]
self.X_new = test_new[["X_0", "X_1", "X_2"]]
self.Z_new = test_new[["Z"]]
self.clusters_new = test_new["cluster"]
self.y_new = test_new["y"]
def test_not_fitted_error(self):
m = MERF()
with self.assertRaises(NotFittedError):
m.predict(self.X_known, self.Z_known, self.clusters_known)
def test_fit_and_predict_pandas(self):
m = MERF(max_iterations=5)
# Train
m.fit(self.X_train, self.Z_train, self.clusters_train, self.y_train)
self.assertEqual(len(m.gll_history), 5)
self.assertEqual(len(m.val_loss_history), 0)
# Predict Known Clusters
yhat_known = m.predict(self.X_known, self.Z_known, self.clusters_known)
self.assertEqual(len(yhat_known), 5)
# Predict New Clusters
yhat_new = m.predict(self.X_new, self.Z_new, self.clusters_new)
self.assertEqual(len(yhat_new), 2)
def test_fit_and_predict_numpy(self):
m = MERF(max_iterations=5)
# Train
m.fit(np.array(self.X_train), np.array(self.Z_train), self.clusters_train, self.y_train)
self.assertEqual(len(m.val_loss_history), 0)
# Predict Known Clusters
yhat_known = m.predict(np.array(self.X_known), np.array(self.Z_known), self.clusters_known)
self.assertEqual(len(yhat_known), 5)
# Predict New Clusters
yhat_new = m.predict(np.array(self.X_new), np.array(self.Z_new), self.clusters_new)
self.assertEqual(len(yhat_new), 2)
def test_type_error(self):
m = MERF(max_iterations=5)
self.assertRaises(
TypeError,
m.fit,
np.array(self.X_train),
np.array(self.Z_train),
np.array(self.clusters_train),
self.y_train,
)
def test_early_stopping(self):
np.random.seed(3187)
# Create a MERF model with a high early stopping threshold
m = MERF(max_iterations=5, gll_early_stop_threshold=0.1)
# Fit
m.fit(self.X_train, self.Z_train, self.clusters_train, self.y_train)
# The number of iterations should be less than max_iterations
self.assertTrue(len(m.gll_history) < 5)
def test_pickle(self):
m = MERF(max_iterations=5)
# Train
m.fit(self.X_train, self.Z_train, self.clusters_train, self.y_train)
# Write to pickle file
with open("model.pkl", "wb") as fin:
pickle.dump(m, fin)
# Read back from pickle file
with open("model.pkl", "rb") as fout:
m_pkl = pickle.load(fout)
# Check that m is not the same object as m_pkl
self.assertIsNot(m_pkl, m)
# Predict Known Clusters
yhat_known_pkl = m_pkl.predict(self.X_known, self.Z_known, self.clusters_known)
yhat_known = m.predict(self.X_known, self.Z_known, self.clusters_known)
assert_almost_equal(yhat_known_pkl, yhat_known)
# Predict New Clusters
yhat_new_pkl = m_pkl.predict(self.X_new, self.Z_new, self.clusters_new)
yhat_new = m.predict(self.X_new, self.Z_new, self.clusters_new)
assert_almost_equal(yhat_new_pkl, yhat_new)
def test_user_defined_fe_model(self):
lgbm = LGBMRegressor()
m = MERF(fixed_effects_model=lgbm, max_iterations=5)
# Train
m.fit(self.X_train, self.Z_train, self.clusters_train, self.y_train)
self.assertEqual(len(m.gll_history), 5)
# Predict Known Clusters
yhat_known = m.predict(self.X_known, self.Z_known, self.clusters_known)
self.assertEqual(len(yhat_known), 5)
# Predict New Clusters
yhat_new = m.predict(self.X_new, self.Z_new, self.clusters_new)
self.assertEqual(len(yhat_new), 2)
def test_validation(self):
lgbm = LGBMRegressor()
m = MERF(fixed_effects_model=lgbm, max_iterations=5)
# Train
m.fit(
self.X_train,
self.Z_train,
self.clusters_train,
self.y_train,
self.X_known,
self.Z_known,
self.clusters_known,
self.y_known,
)
self.assertEqual(len(m.val_loss_history), 5)
# Predict Known Clusters
yhat_known = m.predict(self.X_known, self.Z_known, self.clusters_known)
self.assertEqual(len(yhat_known), 5)
# Predict New Clusters
yhat_new = m.predict(self.X_new, self.Z_new, self.clusters_new)
self.assertEqual(len(yhat_new), 2)
def test_validation_numpy(self):
m = MERF(max_iterations=3)
# Train
m.fit(
np.array(self.X_train),
np.array(self.Z_train),
self.clusters_train,
self.y_train,
np.array(self.X_new),
np.array(self.Z_new),
self.clusters_new,
self.y_new,
)
self.assertEqual(len(m.val_loss_history), 3)
# Predict Known Clusters
yhat_known = m.predict(self.X_known, self.Z_known, self.clusters_known)
self.assertEqual(len(yhat_known), 5)
# Predict New Clusters
yhat_new = m.predict(self.X_new, self.Z_new, self.clusters_new)
self.assertEqual(len(yhat_new), 2)
def test_viz(self):
lgbm = LGBMRegressor()
m = MERF(fixed_effects_model=lgbm, max_iterations=5)
# Train
m.fit(
self.X_train,
self.Z_train,
self.clusters_train,
self.y_train,
self.X_known,
self.Z_known,
self.clusters_known,
self.y_known,
)
plot_merf_training_stats(m)
if __name__ == "__main__":
unittest.main()
|
tracardi/domain/pii.py
|
bytepl/tracardi
| 153 |
135802
|
<gh_stars>100-1000
from typing import Optional, Any
from pydantic import BaseModel
class PII(BaseModel):
"""
Personally identifiable information, or PII, is any data that could
potentially be used to identify a particular person. Examples include a full name,
Social Security number, driver's license number, bank account number,
passport number, and email address.
"""
name: Optional[Any] = None
last_name: Optional[Any] = None
birth_date: Optional[Any] = None
marital_status: Optional[str] = None
email: Optional[Any] = None
telephone: Optional[Any] = None
twitter: Optional[Any] = None
facebook: Optional[Any] = None
whatsapp: Optional[Any] = None
other: Optional[dict] = {}
|
sample_factory/envs/create_env.py
|
eles13/sample-factory
| 320 |
135833
|
from sample_factory.envs.env_registry import global_env_registry
def create_env(full_env_name, cfg=None, env_config=None):
"""
Factory function that creates environment instances.
Matches full_env_name with env family prefixes registered in the REGISTRY and calls make_env_func()
for the first match.
:param full_env_name: complete name of the environment, starting with the prefix of registered environment family,
e.g. atari_breakout, or doom_battle. Passed to make_env_func() for further processing by the specific env family
factory (see doom_utils.py or dmlab_env.py)
:param cfg: namespace with full system configuration, output of argparser (or AttrDict when loaded from JSON)
:param env_config: AttrDict with additional system information:
env_config = AttrDict(worker_index=self.worker_idx, vector_index=vector_idx, env_id=env_id)
:return: environment instance
"""
env_registry = global_env_registry()
env_registry_entry = env_registry.resolve_env_name(full_env_name)
env = env_registry_entry.make_env_func(full_env_name, cfg=cfg, env_config=env_config)
return env
|
src/sagemaker_training/__init__.py
|
unoebauer/sagemaker-training-toolkit
| 248 |
135858
|
<gh_stars>100-1000
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""This file is executed when the sagemaker_training package is imported."""
from __future__ import absolute_import
# list of errors: To show user error message on the SM Training job page
# [x for x in dir(__builtins__) if 'Error' in x]
_PYTHON_ERRORS_ = [
"ArithmeticError",
"AssertionError",
"AttributeError",
"BlockingIOError",
"BrokenPipeError",
"BufferError",
"ChildProcessError",
"ConnectionAbortedError",
"ConnectionError",
"ConnectionRefusedError",
"ConnectionResetError",
"EOFError",
"EnvironmentError",
"FileExistsError",
"FileNotFoundError",
"FloatingPointError",
"IOError",
"ImportError",
"IndentationError",
"IndexError",
"InterruptedError",
"IsADirectoryError",
"KeyError",
"LookupError",
"MemoryError",
"ModuleNotFoundError",
"NameError",
"NotADirectoryError",
"NotImplementedError",
"OSError",
"OverflowError",
"PermissionError",
"ProcessLookupError",
"RecursionError",
"ReferenceError",
"RuntimeError",
"SyntaxError",
"SystemError",
"TabError",
"TimeoutError",
"TypeError",
"UnboundLocalError",
"UnicodeDecodeError",
"UnicodeEncodeError",
"UnicodeError",
"UnicodeTranslateError",
"ValueError",
"ZeroDivisionError",
"Invalid requirement",
"ResourceExhaustedError",
"OutOfRangeError",
"InvalidArgumentError",
]
|
mmf/trainers/core/device.py
|
tirkarthi/mmf
| 3,252 |
135956
|
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import warnings
from abc import ABC
import torch
from mmf.common.registry import registry
from mmf.utils.distributed import (
broadcast_xla_master_model_param,
get_world_size,
is_xla,
)
from omegaconf import open_dict
logger = logging.getLogger(__name__)
class TrainerDeviceMixin(ABC):
def configure_seed(self) -> None:
seed = self.config.training.seed
if seed is None:
return
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = self.config.training.cudnn_benchmark
# TODO: Review self.device assignment and then override
def configure_device(self) -> None:
if self.config.training.get("device", "cuda") == "xla":
import torch_xla.core.xla_model as xm
self.device = xm.xla_device()
self.distributed = True
self.local_rank = xm.get_local_ordinal()
is_xla = True
else:
is_xla = False
if "device_id" not in self.config:
warnings.warn(
"No 'device_id' in 'config', setting to -1. "
"This can cause issues later in training. Ensure that "
"distributed setup is properly initialized."
)
self.local_rank = -1
else:
self.local_rank = self.config.device_id
self.device = self.local_rank
self.distributed = False
# Will be updated later based on distributed setup
registry.register("global_device", self.device)
if self.config.distributed.init_method is not None:
self.distributed = True
self.device = torch.device("cuda", self.local_rank)
torch.cuda.set_device(self.local_rank)
elif torch.cuda.is_available():
self.device = torch.device("cuda")
torch.cuda.set_device(0)
elif not is_xla:
self.device = torch.device("cpu")
if "rank" not in self.config.distributed:
if torch.distributed.is_available() and torch.distributed.is_initialized():
global_rank = torch.distributed.get_rank()
else:
global_rank = -1
with open_dict(self.config.distributed):
self.config.distributed.rank = global_rank
registry.register("global_device", self.config.distributed.rank)
def parallelize_model(self) -> None:
registry.register("data_parallel", False)
registry.register("distributed", False)
if (
"cuda" in str(self.device)
and torch.cuda.device_count() > 1
and not self.distributed
):
registry.register("data_parallel", True)
self.model = torch.nn.DataParallel(self.model)
if "cuda" in str(self.device) and self.distributed:
registry.register("distributed", True)
set_torch_ddp = True
try:
from fairscale.nn.data_parallel import ShardedDataParallel
from fairscale.optim.oss import OSS
if isinstance(self.optimizer, OSS):
self.model = ShardedDataParallel(self.model, self.optimizer)
set_torch_ddp = False
logger.info("Using FairScale ShardedDataParallel")
except ImportError:
logger.info("Using PyTorch DistributedDataParallel")
warnings.warn(
"You can enable ZeRO and Sharded DDP, by installing fairscale "
+ "and setting optimizer.enable_state_sharding=True."
)
if set_torch_ddp:
self.model = torch.nn.parallel.DistributedDataParallel(
self.model,
device_ids=[self.local_rank],
output_device=self.local_rank,
find_unused_parameters=self.config.training.find_unused_parameters,
)
if is_xla() and get_world_size() > 1:
broadcast_xla_master_model_param(self.model)
|
keystone/identity/backends/resource_options.py
|
10088/keystone
| 615 |
136009
|
<filename>keystone/identity/backends/resource_options.py<gh_stars>100-1000
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystone.common import resource_options
from keystone.common.validation import parameter_types
from keystone.i18n import _
def _mfa_rules_validator_list_of_lists_of_strings_no_duplicates(value):
# NOTE(notmorgan): This should possibly validate that the auth-types
# are enabled? For now it simply validates the following:
#
# Must be a list of lists, each sub list must be a list of strings
# e.g. [['str1', 'str2'], ['str3', 'str4']]
# No sub-list may be empty. Duplication of sub-lists and duplication of
# string elements are not permitted.
msg = _('Invalid data type, must be a list of lists comprised of strings. '
'Sub-lists may not be duplicated. Strings in sub-lists may not be '
'duplicated.')
if not isinstance(value, list):
# Value is not a List, TypeError
raise TypeError(msg)
sublists = []
for sublist in value:
# Sublist element tracker is reset for each sublist.
string_set = set()
if not isinstance(sublist, list):
# Sublist is not a List, TypeError
raise TypeError(msg)
if not sublist:
# Sublist is Empty, ValueError
raise ValueError(msg)
if sublist in sublists:
# Sublist is duplicated, ValueError
raise ValueError(msg)
# Add the sublist to the tracker
sublists.append(sublist)
for element in sublist:
if not isinstance(element, str):
# Element of sublist is not a string, TypeError
raise TypeError(msg)
if element in string_set:
# Element of sublist is duplicated, ValueError
raise ValueError(msg)
# add element to the sublist element tracker
string_set.add(element)
USER_OPTIONS_REGISTRY = resource_options.ResourceOptionRegistry('USER')
IGNORE_CHANGE_PASSWORD_OPT = (
resource_options.ResourceOption(
option_id='1000',
option_name='ignore_change_password_upon_first_use',
validator=resource_options.boolean_validator,
json_schema_validation=parameter_types.boolean))
IGNORE_PASSWORD_EXPIRY_OPT = (
resource_options.ResourceOption(
option_id='1001',
option_name='ignore_password_expiry',
validator=resource_options.boolean_validator,
json_schema_validation=parameter_types.boolean))
IGNORE_LOCKOUT_ATTEMPT_OPT = (
resource_options.ResourceOption(
option_id='1002',
option_name='ignore_lockout_failure_attempts',
validator=resource_options.boolean_validator,
json_schema_validation=parameter_types.boolean))
LOCK_PASSWORD_OPT = (
resource_options.ResourceOption(
option_id='1003',
option_name='lock_password',
validator=resource_options.boolean_validator,
json_schema_validation=parameter_types.boolean))
IGNORE_USER_INACTIVITY_OPT = (
resource_options.ResourceOption(
option_id='1004',
option_name='ignore_user_inactivity',
validator=resource_options.boolean_validator,
json_schema_validation=parameter_types.boolean))
MFA_RULES_OPT = (
resource_options.ResourceOption(
option_id='MFAR',
option_name='multi_factor_auth_rules',
validator=_mfa_rules_validator_list_of_lists_of_strings_no_duplicates,
json_schema_validation={
# List
'type': 'array',
'items': {
# Of Lists
'type': 'array',
'items': {
# Of Strings, each string must be unique, minimum 1
# element
'type': 'string',
},
'minItems': 1,
'uniqueItems': True
},
'uniqueItems': True
}))
MFA_ENABLED_OPT = (
resource_options.ResourceOption(
option_id='MFAE',
option_name='multi_factor_auth_enabled',
validator=resource_options.boolean_validator,
json_schema_validation=parameter_types.boolean))
# NOTE(notmorgan): wrap this in a function for testing purposes.
# This is called on import by design.
def register_user_options():
for opt in [
IGNORE_CHANGE_PASSWORD_OPT,
IGNORE_PASSWORD_EXPIRY_OPT,
IGNORE_LOCKOUT_ATTEMPT_OPT,
LOCK_PASSWORD_OPT,
IGNORE_USER_INACTIVITY_OPT,
MFA_RULES_OPT,
MFA_ENABLED_OPT,
]:
USER_OPTIONS_REGISTRY.register_option(opt)
register_user_options()
|
virtual/lib/python3.6/site-packages/setuptools/wheel.py
|
najma-amin/News-Highlights
| 445 |
136040
|
<filename>virtual/lib/python3.6/site-packages/setuptools/wheel.py
'''Wheels support.'''
from distutils.util import get_platform
import email
import itertools
import os
import re
import zipfile
from pkg_resources import Distribution, PathMetadata, parse_version
from setuptools.extern.six import PY3
from setuptools import Distribution as SetuptoolsDistribution
from setuptools import pep425tags
from setuptools.command.egg_info import write_requirements
WHEEL_NAME = re.compile(
r"""^(?P<project_name>.+?)-(?P<version>\d.*?)
((-(?P<build>\d.*?))?-(?P<py_version>.+?)-(?P<abi>.+?)-(?P<platform>.+?)
)\.whl$""",
re.VERBOSE).match
NAMESPACE_PACKAGE_INIT = '''\
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
__path__ = __import__('pkgutil').extend_path(__path__, __name__)
'''
def unpack(src_dir, dst_dir):
'''Move everything under `src_dir` to `dst_dir`, and delete the former.'''
for dirpath, dirnames, filenames in os.walk(src_dir):
subdir = os.path.relpath(dirpath, src_dir)
for f in filenames:
src = os.path.join(dirpath, f)
dst = os.path.join(dst_dir, subdir, f)
os.renames(src, dst)
for n, d in reversed(list(enumerate(dirnames))):
src = os.path.join(dirpath, d)
dst = os.path.join(dst_dir, subdir, d)
if not os.path.exists(dst):
# Directory does not exist in destination,
# rename it and prune it from os.walk list.
os.renames(src, dst)
del dirnames[n]
# Cleanup.
for dirpath, dirnames, filenames in os.walk(src_dir, topdown=True):
assert not filenames
os.rmdir(dirpath)
class Wheel(object):
def __init__(self, filename):
match = WHEEL_NAME(os.path.basename(filename))
if match is None:
raise ValueError('invalid wheel name: %r' % filename)
self.filename = filename
for k, v in match.groupdict().items():
setattr(self, k, v)
def tags(self):
'''List tags (py_version, abi, platform) supported by this wheel.'''
return itertools.product(self.py_version.split('.'),
self.abi.split('.'),
self.platform.split('.'))
def is_compatible(self):
'''Is the wheel is compatible with the current platform?'''
supported_tags = pep425tags.get_supported()
return next((True for t in self.tags() if t in supported_tags), False)
def egg_name(self):
return Distribution(
project_name=self.project_name, version=self.version,
platform=(None if self.platform == 'any' else get_platform()),
).egg_name() + '.egg'
def install_as_egg(self, destination_eggdir):
'''Install wheel as an egg directory.'''
with zipfile.ZipFile(self.filename) as zf:
dist_basename = '%s-%s' % (self.project_name, self.version)
dist_info = '%s.dist-info' % dist_basename
dist_data = '%s.data' % dist_basename
def get_metadata(name):
with zf.open('%s/%s' % (dist_info, name)) as fp:
value = fp.read().decode('utf-8') if PY3 else fp.read()
return email.parser.Parser().parsestr(value)
wheel_metadata = get_metadata('WHEEL')
dist_metadata = get_metadata('METADATA')
# Check wheel format version is supported.
wheel_version = parse_version(wheel_metadata.get('Wheel-Version'))
if not parse_version('1.0') <= wheel_version < parse_version('2.0dev0'):
raise ValueError('unsupported wheel format version: %s' % wheel_version)
# Extract to target directory.
os.mkdir(destination_eggdir)
zf.extractall(destination_eggdir)
# Convert metadata.
dist_info = os.path.join(destination_eggdir, dist_info)
dist = Distribution.from_location(
destination_eggdir, dist_info,
metadata=PathMetadata(destination_eggdir, dist_info)
)
# Note: we need to evaluate and strip markers now,
# as we can't easily convert back from the syntax:
# foobar; "linux" in sys_platform and extra == 'test'
def raw_req(req):
req.marker = None
return str(req)
install_requires = list(sorted(map(raw_req, dist.requires())))
extras_require = {
extra: list(sorted(
req
for req in map(raw_req, dist.requires((extra,)))
if req not in install_requires
))
for extra in dist.extras
}
egg_info = os.path.join(destination_eggdir, 'EGG-INFO')
os.rename(dist_info, egg_info)
os.rename(os.path.join(egg_info, 'METADATA'),
os.path.join(egg_info, 'PKG-INFO'))
setup_dist = SetuptoolsDistribution(attrs=dict(
install_requires=install_requires,
extras_require=extras_require,
))
write_requirements(setup_dist.get_command_obj('egg_info'),
None, os.path.join(egg_info, 'requires.txt'))
# Move data entries to their correct location.
dist_data = os.path.join(destination_eggdir, dist_data)
dist_data_scripts = os.path.join(dist_data, 'scripts')
if os.path.exists(dist_data_scripts):
egg_info_scripts = os.path.join(destination_eggdir,
'EGG-INFO', 'scripts')
os.mkdir(egg_info_scripts)
for entry in os.listdir(dist_data_scripts):
# Remove bytecode, as it's not properly handled
# during easy_install scripts install phase.
if entry.endswith('.pyc'):
os.unlink(os.path.join(dist_data_scripts, entry))
else:
os.rename(os.path.join(dist_data_scripts, entry),
os.path.join(egg_info_scripts, entry))
os.rmdir(dist_data_scripts)
for subdir in filter(os.path.exists, (
os.path.join(dist_data, d)
for d in ('data', 'headers', 'purelib', 'platlib')
)):
unpack(subdir, destination_eggdir)
if os.path.exists(dist_data):
os.rmdir(dist_data)
# Fix namespace packages.
namespace_packages = os.path.join(egg_info, 'namespace_packages.txt')
if os.path.exists(namespace_packages):
with open(namespace_packages) as fp:
namespace_packages = fp.read().split()
for mod in namespace_packages:
mod_dir = os.path.join(destination_eggdir, *mod.split('.'))
mod_init = os.path.join(mod_dir, '__init__.py')
if os.path.exists(mod_dir) and not os.path.exists(mod_init):
with open(mod_init, 'w') as fp:
fp.write(NAMESPACE_PACKAGE_INIT)
|
Documents/Router/CVE-2017-7494/impacket/testcases/SMB_RPC/test_wkst.py
|
edinjapan/NSABlocklist
| 201 |
136056
|
###############################################################################
# Tested so far:
#
# NetrWkstaGetInfo
# NetrWkstaUserEnum
# NetrWkstaTransportEnum
# NetrWkstaTransportAdd
# NetrUseAdd
# NetrUseGetInfo
# NetrUseDel
# NetrUseEnum
# NetrWorkstationStatisticsGet
# NetrGetJoinInformation
# NetrJoinDomain2
# NetrUnjoinDomain2
# NetrRenameMachineInDomain2
# NetrValidateName2
# NetrGetJoinableOUs2
# NetrAddAlternateComputerName
# NetrRemoveAlternateComputerName
# NetrSetPrimaryComputerName
# NetrEnumerateComputerNames
#
# Not yet:
#
# Shouldn't dump errors against a win7
#
################################################################################
import unittest
import ConfigParser
from impacket.dcerpc.v5 import transport
from impacket.dcerpc.v5 import wkst
from impacket.dcerpc.v5.ndr import NULL
class WKSTTests(unittest.TestCase):
def connect(self):
rpctransport = transport.DCERPCTransportFactory(self.stringBinding)
if len(self.hashes) > 0:
lmhash, nthash = self.hashes.split(':')
else:
lmhash = ''
nthash = ''
if hasattr(rpctransport, 'set_credentials'):
# This method exists only for selected protocol sequences.
rpctransport.set_credentials(self.username,self.password, self.domain, lmhash, nthash)
dce = rpctransport.get_dce_rpc()
dce.connect()
dce.bind(wkst.MSRPC_UUID_WKST, transfer_syntax = self.ts)
return dce, rpctransport
def test_NetrWkstaGetInfo(self):
dce, rpctransport = self.connect()
request = wkst.NetrWkstaGetInfo()
request['ServerName'] = '\x00'*10
request['Level'] = 100
resp = dce.request(request)
resp.dump()
request['Level'] = 101
resp = dce.request(request)
resp.dump()
request['Level'] = 102
resp = dce.request(request)
resp.dump()
request['Level'] = 502
resp = dce.request(request)
resp.dump()
def test_hNetrWkstaGetInfo(self):
dce, rpctransport = self.connect()
resp = wkst.hNetrWkstaGetInfo(dce, 100)
resp.dump()
resp = wkst.hNetrWkstaGetInfo(dce, 101)
resp.dump()
resp = wkst.hNetrWkstaGetInfo(dce, 102)
resp.dump()
resp = wkst.hNetrWkstaGetInfo(dce, 502)
resp.dump()
def test_NetrWkstaUserEnum(self):
dce, rpctransport = self.connect()
request = wkst.NetrWkstaUserEnum()
request['ServerName'] = '\x00'*10
request['UserInfo']['Level'] = 0
request['UserInfo']['WkstaUserInfo']['tag'] = 0
request['PreferredMaximumLength'] = 8192
resp = dce.request(request)
resp.dump()
request['UserInfo']['Level'] = 1
request['UserInfo']['WkstaUserInfo']['tag'] = 1
resp = dce.request(request)
resp.dump()
def test_hNetrWkstaUserEnum(self):
dce, rpctransport = self.connect()
resp = wkst.hNetrWkstaUserEnum(dce, 0)
resp.dump()
resp = wkst.hNetrWkstaUserEnum(dce, 1)
resp.dump()
def test_NetrWkstaTransportEnum(self):
dce, rpctransport = self.connect()
request = wkst.NetrWkstaTransportEnum()
request['ServerName'] = '\x00'*10
request['TransportInfo']['Level'] = 0
request['TransportInfo']['WkstaTransportInfo']['tag'] = 0
request['PreferredMaximumLength'] = 500
request['ResumeHandle'] = NULL
resp = dce.request(request)
resp.dump()
def test_hNetrWkstaTransportEnum(self):
dce, rpctransport = self.connect()
resp = wkst.hNetrWkstaTransportEnum(dce, 0)
resp.dump()
def test_NetrWkstaSetInfo(self):
dce, rpctransport = self.connect()
request = wkst.NetrWkstaGetInfo()
request['ServerName'] = '\x00'*10
request['Level'] = 502
resp = dce.request(request)
resp.dump()
oldVal = resp['WkstaInfo']['WkstaInfo502']['wki502_dormant_file_limit']
req = wkst.NetrWkstaSetInfo()
req['ServerName'] = '\x00'*10
req['Level'] = 502
req['WkstaInfo'] = resp['WkstaInfo']
req['WkstaInfo']['WkstaInfo502']['wki502_dormant_file_limit'] = 500
resp2 = dce.request(req)
resp2.dump()
resp = dce.request(request)
self.assertTrue(500 == resp['WkstaInfo']['WkstaInfo502']['wki502_dormant_file_limit'] )
req['WkstaInfo']['WkstaInfo502']['wki502_dormant_file_limit'] = oldVal
resp2 = dce.request(req)
resp2.dump()
def test_hNetrWkstaSetInfo(self):
dce, rpctransport = self.connect()
resp = wkst.hNetrWkstaGetInfo(dce, 502)
resp.dump()
oldVal = resp['WkstaInfo']['WkstaInfo502']['wki502_dormant_file_limit']
resp['WkstaInfo']['WkstaInfo502']['wki502_dormant_file_limit'] = 500
resp2 = wkst.hNetrWkstaSetInfo(dce, 502,resp['WkstaInfo']['WkstaInfo502'])
resp2.dump()
resp = wkst.hNetrWkstaGetInfo(dce, 502)
resp.dump()
self.assertTrue(500 == resp['WkstaInfo']['WkstaInfo502']['wki502_dormant_file_limit'] )
resp['WkstaInfo']['WkstaInfo502']['wki502_dormant_file_limit'] = oldVal
resp2 = wkst.hNetrWkstaSetInfo(dce, 502,resp['WkstaInfo']['WkstaInfo502'])
resp2.dump()
def test_NetrWkstaTransportAdd(self):
dce, rpctransport = self.connect()
req = wkst.NetrWkstaTransportAdd()
req['ServerName'] = '\x00'*10
req['Level'] = 0
req['TransportInfo']['wkti0_transport_name'] = 'BETO\x00'
req['TransportInfo']['wkti0_transport_address'] = '000C29BC5CE5\x00'
try:
resp2 = dce.request(req)
resp2.dump()
except Exception, e:
if str(e).find('ERROR_INVALID_FUNCTION') < 0:
raise
def test_hNetrUseAdd_hNetrUseDel_hNetrUseGetInfo_hNetrUseEnum(self):
dce, rpctransport = self.connect()
info1 = wkst.LPUSE_INFO_1()
info1['ui1_local'] = 'Z:\x00'
info1['ui1_remote'] = '\\\\127.0.0.1\\c$\x00'
info1['ui1_password'] = NULL
resp = wkst.hNetrUseAdd(dce, 1, info1)
resp.dump()
# We're not testing this call with NDR64, it fails and I can't see the contents
if self.ts == ('71710533-BEBA-4937-8319-B5DBEF9CCC36', '1.0'):
return
resp = wkst.hNetrUseEnum(dce, 2)
resp.dump()
resp2 = wkst.hNetrUseGetInfo(dce, 'Z:', 3)
resp2.dump()
resp = wkst.hNetrUseDel(dce,'Z:')
resp.dump()
def test_NetrUseAdd_NetrUseDel_NetrUseGetInfo_NetrUseEnum(self):
dce, rpctransport = self.connect()
req = wkst.NetrUseAdd()
req['ServerName'] = '\x00'*10
req['Level'] = 1
req['InfoStruct']['tag'] = 1
req['InfoStruct']['UseInfo1']['ui1_local'] = 'Z:\x00'
req['InfoStruct']['UseInfo1']['ui1_remote'] = '\\\\127.0.0.1\\c$\x00'
req['InfoStruct']['UseInfo1']['ui1_password'] = NULL
resp2 = dce.request(req)
resp2.dump()
# We're not testing this call with NDR64, it fails and I can't see the contents
if self.ts == ('71710533-BEBA-4937-8319-B5DBEF9CCC36', '1.0'):
return
req = wkst.NetrUseEnum()
req['ServerName'] = NULL
req['InfoStruct']['Level'] = 2
req['InfoStruct']['UseInfo']['tag'] = 2
req['InfoStruct']['UseInfo']['Level2']['Buffer'] = NULL
req['PreferredMaximumLength'] = 0xffffffff
req['ResumeHandle'] = NULL
resp2 = dce.request(req)
resp2.dump()
req = wkst.NetrUseGetInfo()
req['ServerName'] = '\x00'*10
req['UseName'] = 'Z:\x00'
req['Level'] = 3
resp2 = dce.request(req)
resp2.dump()
req = wkst.NetrUseDel()
req['ServerName'] = '\x00'*10
req['UseName'] = 'Z:\x00'
req['ForceLevel'] = wkst.USE_LOTS_OF_FORCE
resp2 = dce.request(req)
resp2.dump()
def test_NetrWorkstationStatisticsGet(self):
dce, rpctransport = self.connect()
req = wkst.NetrWorkstationStatisticsGet()
req['ServerName'] = '\x00'*10
req['ServiceName'] = '\x00'
req['Level'] = 0
req['Options'] = 0
try:
resp2 = dce.request(req)
resp2.dump()
except Exception, e:
if str(e).find('ERROR_INVALID_PARAMETER') < 0:
raise
def test_hNetrWorkstationStatisticsGet(self):
dce, rpctransport = self.connect()
try:
resp2 = wkst.hNetrWorkstationStatisticsGet(dce, '\x00', 0, 0)
resp2.dump()
except Exception, e:
if str(e).find('ERROR_INVALID_PARAMETER') < 0:
raise
def test_NetrGetJoinInformation(self):
dce, rpctransport = self.connect()
req = wkst.NetrGetJoinInformation()
req['ServerName'] = '\x00'*10
req['NameBuffer'] = '\x00'
try:
resp2 = dce.request(req)
resp2.dump()
except Exception, e:
if str(e).find('ERROR_INVALID_PARAMETER') < 0:
raise
def test_hNetrGetJoinInformation(self):
dce, rpctransport = self.connect()
try:
resp = wkst.hNetrGetJoinInformation(dce, '\x00')
resp.dump()
except Exception, e:
if str(e).find('ERROR_INVALID_PARAMETER') < 0:
raise
def test_NetrJoinDomain2(self):
dce, rpctransport = self.connect()
req = wkst.NetrJoinDomain2()
req['ServerName'] = '\x00'*10
req['DomainNameParam'] = '172.16.123.1\\FREEFLY\x00'
req['MachineAccountOU'] = 'OU=BETUS,DC=FREEFLY\x00'
req['AccountName'] = NULL
req['Password']['Buffer'] = '\x00'*512
req['Options'] = wkst.NETSETUP_DOMAIN_JOIN_IF_JOINED
#req.dump()
try:
resp2 = dce.request(req)
resp2.dump()
except Exception, e:
if str(e).find('ERROR_INVALID_PASSWORD') < 0:
raise
def test_hNetrJoinDomain2(self):
dce, rpctransport = self.connect()
try:
resp = wkst.hNetrJoinDomain2(dce,'172.16.123.1\\FREEFLY\x00','OU=BETUS,DC=FREEFLY\x00',NULL,'\x00'*512, wkst.NETSETUP_DOMAIN_JOIN_IF_JOINED)
resp.dump()
except Exception, e:
if str(e).find('ERROR_INVALID_PASSWORD') < 0:
raise
def test_NetrUnjoinDomain2(self):
dce, rpctransport = self.connect()
req = wkst.NetrUnjoinDomain2()
req['ServerName'] = '\x00'*10
req['AccountName'] = NULL
req['Password']['Buffer'] = '\<PASSWORD>'*512
#req['Password'] = NULL
req['Options'] = wkst.NETSETUP_ACCT_DELETE
try:
resp2 = dce.request(req)
resp2.dump()
except Exception, e:
if str(e).find('ERROR_INVALID_PASSWORD') < 0:
raise
def test_hNetrUnjoinDomain2(self):
dce, rpctransport = self.connect()
try:
resp = wkst.hNetrUnjoinDomain2(dce, NULL, '\x00'*512, wkst.NETSETUP_ACCT_DELETE)
resp.dump()
except Exception, e:
if str(e).find('ERROR_INVALID_PASSWORD') < 0:
raise
def test_NetrRenameMachineInDomain2(self):
dce, rpctransport = self.connect()
req = wkst.NetrRenameMachineInDomain2()
req['ServerName'] = '\x00'*10
req['MachineName'] = 'BETUS\x00'
req['AccountName'] = NULL
req['Password']['Buffer'] = '\x00'*512
#req['Password'] = NULL
req['Options'] = wkst.NETSETUP_ACCT_CREATE
try:
resp2 = dce.request(req)
resp2.dump()
except Exception, e:
if str(e).find('ERROR_INVALID_PASSWORD') < 0:
raise
def test_hNetrRenameMachineInDomain2(self):
dce, rpctransport = self.connect()
try:
resp = wkst.hNetrRenameMachineInDomain2(dce, 'BETUS\x00', NULL, '\x00'*512, wkst.NETSETUP_ACCT_CREATE)
resp.dump()
except Exception, e:
if str(e).find('ERROR_INVALID_PASSWORD') < 0:
raise
def test_NetrValidateName2(self):
dce, rpctransport = self.connect()
req = wkst.NetrValidateName2()
req['ServerName'] = '\x00'*10
req['NameToValidate'] = 'BETO\x00'
req['AccountName'] = NULL
req['Password'] = <PASSWORD>
req['NameType'] = wkst.NETSETUP_NAME_TYPE.NetSetupDomain
try:
resp2 = dce.request(req)
resp2.dump()
except Exception, e:
if str(e).find('0x8001011c') < 0:
raise
def test_hNetrValidateName2(self):
dce, rpctransport = self.connect()
try:
resp2 = wkst.hNetrValidateName2(dce, 'BETO\x00', NULL, NULL, wkst.NETSETUP_NAME_TYPE.NetSetupDomain)
resp2.dump()
except Exception, e:
if str(e).find('0x8001011c') < 0:
raise
def test_NetrGetJoinableOUs2(self):
dce, rpctransport = self.connect()
req = wkst.NetrGetJoinableOUs2()
req['ServerName'] = '\x00'*10
req['DomainNameParam'] = 'FREEFLY\x00'
req['AccountName'] = NULL
req['Password'] = NULL
req['OUCount'] = 0
#req.dump()
try:
resp2 = dce.request(req)
resp2.dump()
except Exception, e:
if str(e).find('0x8001011c') < 0:
raise
def test_hNetrGetJoinableOUs2(self):
dce, rpctransport = self.connect()
try:
resp = wkst.hNetrGetJoinableOUs2(dce,'FREEFLY\x00', NULL, NULL,0 )
resp.dump()
except Exception, e:
if str(e).find('0x8001011c') < 0:
raise
def test_NetrAddAlternateComputerName(self):
dce, rpctransport = self.connect()
req = wkst.NetrAddAlternateComputerName()
req['ServerName'] = '\x00'*10
req['AlternateName'] = 'FREEFLY\x00'
req['DomainAccount'] = NULL
req['EncryptedPassword'] = <PASSWORD>
#req.dump()
try:
resp2 = dce.request(req)
resp2.dump()
except Exception, e:
if str(e).find('ERROR_NOT_SUPPORTED') < 0 and str(e).find('ERROR_INVALID_PASSWORD') < 0:
raise
def test_hNetrAddAlternateComputerName(self):
dce, rpctransport = self.connect()
try:
resp2= wkst.hNetrAddAlternateComputerName(dce, 'FREEFLY\x00', NULL, NULL)
resp2.dump()
except Exception, e:
if str(e).find('ERROR_NOT_SUPPORTED') < 0 and str(e).find('ERROR_INVALID_PASSWORD') < 0:
raise
def test_NetrRemoveAlternateComputerName(self):
dce, rpctransport = self.connect()
req = wkst.NetrRemoveAlternateComputerName()
req['ServerName'] = '\x00'*10
req['AlternateName'] = 'FREEFLY\x00'
req['DomainAccount'] = NULL
req['EncryptedPassword'] = NULL
#req.dump()
try:
resp2 = dce.request(req)
resp2.dump()
except Exception, e:
if str(e).find('ERROR_NOT_SUPPORTED') < 0 and str(e).find('ERROR_INVALID_PASSWORD') < 0:
raise
def test_hNetrRemoveAlternateComputerName(self):
dce, rpctransport = self.connect()
try:
resp2 = wkst.hNetrRemoveAlternateComputerName(dce,'FREEFLY\x00', NULL, NULL )
resp2.dump()
except Exception, e:
if str(e).find('ERROR_NOT_SUPPORTED') < 0 and str(e).find('ERROR_INVALID_PASSWORD') < 0:
raise
def test_NetrSetPrimaryComputerName(self):
dce, rpctransport = self.connect()
req = wkst.NetrSetPrimaryComputerName()
req['ServerName'] = '\x00'*10
req['PrimaryName'] = 'FREEFLY\x00'
req['DomainAccount'] = NULL
req['EncryptedPassword'] = <PASSWORD>
#req.dump()
try:
resp2 = dce.request(req)
resp2.dump()
except Exception, e:
if str(e).find('ERROR_NOT_SUPPORTED') < 0:
if str(e).find('ERROR_INVALID_PARAMETER') < 0:
raise
def test_hNetrSetPrimaryComputerName(self):
dce, rpctransport = self.connect()
try:
resp2 = wkst.hNetrSetPrimaryComputerName(dce,'FREEFLY\x00', NULL, NULL )
resp2.dump()
except Exception, e:
if str(e).find('ERROR_NOT_SUPPORTED') < 0:
if str(e).find('ERROR_INVALID_PARAMETER') < 0:
raise
def test_NetrEnumerateComputerNames(self):
dce, rpctransport = self.connect()
req = wkst.NetrEnumerateComputerNames()
req['ServerName'] = '\x00'*10
req['NameType'] = wkst.NET_COMPUTER_NAME_TYPE.NetAllComputerNames
#req.dump()
try:
resp2 = dce.request(req)
resp2.dump()
except Exception, e:
if str(e).find('ERROR_NOT_SUPPORTED') < 0:
raise
def test_hNetrEnumerateComputerNames(self):
dce, rpctransport = self.connect()
try:
resp2 = wkst.hNetrEnumerateComputerNames(dce,wkst.NET_COMPUTER_NAME_TYPE.NetAllComputerNames)
resp2.dump()
except Exception, e:
if str(e).find('ERROR_NOT_SUPPORTED') < 0:
raise
class SMBTransport(WKSTTests):
def setUp(self):
WKSTTests.setUp(self)
configFile = ConfigParser.ConfigParser()
configFile.read('dcetests.cfg')
self.username = configFile.get('SMBTransport', 'username')
self.domain = configFile.get('SMBTransport', 'domain')
self.serverName = configFile.get('SMBTransport', 'servername')
self.password = configFile.get('SMBTransport', 'password')
self.machine = configFile.get('SMBTransport', 'machine')
self.hashes = configFile.get('SMBTransport', 'hashes')
self.stringBinding = r'ncacn_np:%s[\PIPE\wkssvc]' % self.machine
self.ts = ('8a885d04-1ceb-11c9-9fe8-08002b104860', '2.0')
class SMBTransport64(WKSTTests):
def setUp(self):
WKSTTests.setUp(self)
configFile = ConfigParser.ConfigParser()
configFile.read('dcetests.cfg')
self.username = configFile.get('SMBTransport', 'username')
self.domain = configFile.get('SMBTransport', 'domain')
self.serverName = configFile.get('SMBTransport', 'servername')
self.password = configFile.get('SMBTransport', 'password')
self.machine = configFile.get('SMBTransport', 'machine')
self.hashes = configFile.get('SMBTransport', 'hashes')
self.stringBinding = r'ncacn_np:%s[\PIPE\wkssvc]' % self.machine
self.ts = ('71710533-BEBA-4937-8319-B5DBEF9CCC36', '1.0')
# Process command-line arguments.
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
testcase = sys.argv[1]
suite = unittest.TestLoader().loadTestsFromTestCase(globals()[testcase])
else:
suite = unittest.TestLoader().loadTestsFromTestCase(SMBTransport)
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(SMBTransport64))
unittest.TextTestRunner(verbosity=1).run(suite)
|
part2-steps-challenge/load-testing/plot-locust-data.py
|
viartemev/vertx-in-action
| 201 |
136093
|
<reponame>viartemev/vertx-in-action
#!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import argparse
parser = argparse.ArgumentParser(description="Plot some data from Hey csv output")
parser.add_argument("title", help="Plot title")
parser.add_argument("input", help="Data file in CSV format")
parser.add_argument("output", help="Output file name")
parser.add_argument("dpi", help="DPI resolution", type=int)
args = parser.parse_args()
data = pd.read_csv(args.input)
data = data.set_index("Name")
ax = data.plot.barh(y=["95%", "98%", "99%", "100%"], stacked=True, color=["tab:green", "tab:blue", "tab:orange", "tab:red"])
ax.set_xlabel("Latency (ms)")
ax.set_ylabel("Name")
plt.title = args.title
plt.tight_layout()
plt.savefig(f"{args.output}", dpi=args.dpi)
|
sdk-extension/opentelemetry-sdk-extension-aws/tests/resource/test_ec2.py
|
epsagon/opentelemetry-python-contrib
| 208 |
136111
|
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from collections import OrderedDict
from unittest.mock import patch
from opentelemetry.sdk.extension.aws.resource.ec2 import AwsEc2ResourceDetector
from opentelemetry.semconv.resource import (
CloudPlatformValues,
CloudProviderValues,
ResourceAttributes,
)
MockEc2ResourceAttributes = {
ResourceAttributes.CLOUD_PROVIDER: CloudProviderValues.AWS.value,
ResourceAttributes.CLOUD_PLATFORM: CloudPlatformValues.AWS_EC2.value,
ResourceAttributes.CLOUD_ACCOUNT_ID: "123456789012",
ResourceAttributes.CLOUD_REGION: "mock-west-2",
ResourceAttributes.CLOUD_AVAILABILITY_ZONE: "mock-west-2a",
ResourceAttributes.HOST_ID: "i-1234ab56cd7e89f01",
ResourceAttributes.HOST_TYPE: "t2.micro-mock",
ResourceAttributes.HOST_NAME: "ip-172-12-34-567.mock-west-2.compute.internal",
}
class AwsEc2ResourceDetectorTest(unittest.TestCase):
@patch(
"opentelemetry.sdk.extension.aws.resource.ec2._get_host",
return_value=MockEc2ResourceAttributes[ResourceAttributes.HOST_NAME],
)
@patch(
"opentelemetry.sdk.extension.aws.resource.ec2._get_identity",
return_value=f"""{{
"accountId" : "{MockEc2ResourceAttributes[ResourceAttributes.CLOUD_ACCOUNT_ID]}",
"architecture" : "x86_64",
"availabilityZone" : "{MockEc2ResourceAttributes[ResourceAttributes.CLOUD_AVAILABILITY_ZONE]}",
"billingProducts" : null,
"devpayProductCodes" : null,
"marketplaceProductCodes" : null,
"imageId" : "ami-0957cee1854021123",
"instanceId" : "{MockEc2ResourceAttributes[ResourceAttributes.HOST_ID]}",
"instanceType" : "{MockEc2ResourceAttributes[ResourceAttributes.HOST_TYPE]}",
"kernelId" : null,
"pendingTime" : "2021-07-13T21:53:41Z",
"privateIp" : "172.12.34.567",
"ramdiskId" : null,
"region" : "{MockEc2ResourceAttributes[ResourceAttributes.CLOUD_REGION]}",
"version" : "2017-09-30"
}}""",
)
@patch(
"opentelemetry.sdk.extension.aws.resource.ec2._get_token",
return_value="mock-token",
)
def test_simple_create(
self, mock_get_token, mock_get_identity, mock_get_host
):
actual = AwsEc2ResourceDetector().detect()
self.assertDictEqual(
actual.attributes.copy(), OrderedDict(MockEc2ResourceAttributes)
)
|
custom_components/tahoma/water_heater.py
|
iMicknl/ha-tahoma
| 130 |
136114
|
"""Support for Overkiz water heater devices."""
from homeassistant.components.water_heater import DOMAIN as WATER_HEATER
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import DOMAIN
from .water_heater_devices.domestic_hot_water_production import (
DomesticHotWaterProduction,
)
from .water_heater_devices.hitachi_dhw import HitachiDHW
TYPE = {
"DomesticHotWaterProduction": DomesticHotWaterProduction,
"HitachiDHW": HitachiDHW,
}
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
):
"""Set up the Overkiz water heater from a config entry."""
data = hass.data[DOMAIN][entry.entry_id]
coordinator = data["coordinator"]
water_heater_devices = [device for device in data["platforms"][WATER_HEATER]]
entities = [
TYPE[device.widget](device.deviceurl, coordinator)
for device in water_heater_devices
if device.widget in TYPE
]
async_add_entities(entities)
|
strawberryfields/program_utils.py
|
corvust/strawberryfields
| 646 |
136124
|
# Copyright 2019 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains various utility classes and functions used
within the :class:`~.Program` class.
"""
from collections.abc import Sequence
import networkx as nx
from .parameters import MeasuredParameter
__all__ = [
"Program_current_context",
"RegRefError",
"CircuitError",
"MergeFailure",
"Command",
"RegRef",
"list_to_grid",
"grid_to_DAG",
"DAG_to_list",
"list_to_DAG",
"group_operations",
"optimize_circuit",
]
Program_current_context = None
"""Context for inputting a Program. Used to be a class attribute of :class:`.Program`, placed
here to avoid cyclic imports."""
# todo: Avoid issues with Program contexts and threading,
# cf. _pydecimal.py in the python standard distribution.
class RegRefError(IndexError):
"""Exception raised by :class:`.Program` when it encounters an invalid register reference.
E.g., trying to apply a gate to a nonexistent or deleted subsystem.
"""
class CircuitError(RuntimeError):
"""Exception raised by :class:`.Program` when it encounters an illegal
operation in the quantum circuit.
E.g., trying to use an Operation type that is unsupported by the current compilation target.
"""
class MergeFailure(RuntimeError):
"""Exception raised by :meth:`strawberryfields.ops.Operation.merge` when an
attempted merge fails.
E.g., trying to merge two gates of different families.
"""
class Command:
"""Represents a quantum operation applied on specific subsystems of the register.
A Command instance is immutable once created, and can be shared between
several :class:`.Program` instances.
Args:
op (~strawberryfields.ops.Operation): quantum operation to apply
reg (Sequence[RegRef]): Subsystems to which the operation is applied.
Note that the order matters here.
"""
# pylint: disable=too-few-public-methods
def __init__(self, op, reg):
# accept a single RegRef in addition to a Sequence
if not isinstance(reg, Sequence):
reg = [reg]
#: Operation: quantum operation to apply
self.op = op
#: Sequence[RegRef]: subsystems to which the operation is applied
self.reg = reg
def __str__(self):
"""
Return a string containing the command in Blackbird syntax.
"""
operation = str(self.op)
if self.op.ns == 0:
# op takes no subsystems as parameters, do not print anything more
code = operation
else:
subsystems = ", ".join([str(r) for r in self.reg])
code = "{} | ({})".format(operation, subsystems)
return code
def __lt__(self, other):
# Needed as a tiebreaker for NetworkX lexicographical_topological_sort()
# due to a buggy implementation! Any order will do. Remove when NetworkX is fixed.
return True
def get_dependencies(self):
"""Subsystems the command depends on.
Combination of ``self.reg`` and ``self.op.measurement_deps``.
.. note:: ``measurement_deps`` are used to ensure that the measurement
happens before the result is used, but this is a bit too strict:
two gates depending on the same measurement result but otherwise
acting on different subsystems should commute.
Returns:
set[RegRef]: set of subsystems the command depends on
"""
deps = self.op.measurement_deps | set(self.reg)
return deps
class RegRef:
"""Quantum register reference.
The objects of this class refer to a specific subsystem (mode) of
a quantum register.
Within the scope of each :class:`.Program` instance, only one RegRef instance
should exist per subsystem. Program keeps the authoritative mapping
of subsystem indices to RegRef instances.
Subsystem measurement results are stored in the "official" RegRef object.
If other RegRef objects referring to the same subsystem exist, they will
not be updated. Once a RegRef is assigned a subsystem index it will never
change, not even if the subsystem is deleted.
The RegRefs are constructed in :meth:`.Program._add_subsystems`.
Args:
ind (int): index of the register subsystem referred to
"""
# pylint: disable=too-few-public-methods
def __init__(self, ind):
self.ind = ind #: int: subsystem index
self.val = None #: float, complex: Measurement result. None if the subsystem has not been measured yet.
self.active = True #: bool: True at construction, False after the subsystem is deleted
def __str__(self):
return "q[{}]".format(self.ind)
def __hash__(self):
"""Hashing method.
NOTE: Has to match :meth:`__eq__` such that if two RegRefs compare equal they must have equal hashes.
"""
return hash((self.ind, self.active))
def __eq__(self, other):
"""Equality comparison.
Compares the index and the activity state of the two RegRefs, the val field does not matter.
NOTE: Affects the hashability of RegRefs, see also :meth:`__hash__`.
"""
if other.__class__ != self.__class__:
print("--------------- regref.__eq__: compared reqref to ", other.__class__)
return False
return self.ind == other.ind and self.active == other.active
@property
def par(self):
"""Convert the RegRef into a measured parameter.
Returns:
MeasuredParameter: measured parameter linked to this RegRef
"""
return MeasuredParameter(self)
# =================
# Utility functions
# =================
def list_to_grid(ls):
"""Transforms a list of Commands to a grid representation.
The grid is a mapping from subsystem indices to lists of :class:`Command` instances touching
that subsystem, in temporal order. The same Command instance will appear in each list that
corresponds to one of its subsystems.
Args:
ls (Iterable[Command]): quantum circuit
Returns:
dict[int, list[Command]]: same circuit in grid form
"""
grid = {}
# enter every operation in the list to its proper position in the grid
for cmd in ls:
for r in cmd.get_dependencies():
# Add cmd to the grid to the end of the line r.ind.
grid.setdefault(r.ind, []).append(cmd)
return grid
def grid_to_DAG(grid):
"""Transforms a grid of Commands to a DAG representation.
In the DAG (directed acyclic graph) each node is a :class:`Command` instance,
and edges point from Commands to their immediate dependents/followers.
Args:
grid (dict[int, list[Command]]): quantum circuit
Returns:
networkx.DiGraph[Command]: same circuit in DAG form
"""
DAG = nx.DiGraph()
for _, q in grid.items():
if q:
# add the first operation on the wire that does not depend on anything
DAG.add_node(q[0])
for i in range(1, len(q)):
# add the edge between the operations, and the operation nodes themselves
DAG.add_edge(q[i - 1], q[i])
return DAG
def list_to_DAG(ls):
"""Transforms a list of Commands to a DAG representation.
In the DAG (directed acyclic graph) each node is a :class:`Command` instance,
and edges point from Commands to their immediate dependents/followers.
Args:
ls (Iterable[Command]): quantum circuit
Returns:
networkx.DiGraph[Command]: same circuit in DAG form
"""
return grid_to_DAG(list_to_grid(ls))
def DAG_to_list(dag):
"""Transforms a Command DAG to a list representation.
The list contains the :class:`Command` instances in (one possible) topological order,
i.e., dependants following the operations they depend on.
Args:
dag (networkx.DiGraph[Command]): quantum circuit
Returns:
list[Command]: same circuit in list form
"""
# sort the operation graph into topological order
temp = nx.algorithms.dag.topological_sort(dag)
return list(temp)
def group_operations(seq, predicate):
"""Group a set of Operations in a circuit together (if possible).
For the purposes of this method, we call a :class:`Operation` instance *marked* iff
``predicate`` returns True on it.
This method converts the quantum circuit in ``seq`` into an equivalent circuit ``A+B+C``,
where the :class:`Command` instances in sequences ``A`` and ``C`` do not contain any
marked Operations.
The sequence ``B`` contains all marked Operations in the circuit, and possibly
additional unmarked instances that could not be moved into ``A`` or ``C`` using the
available commutation rules.
Any of the three returned sequences can be empty (but if ``B`` is empty then so is ``C``).
Args:
seq (Sequence[Command]): quantum circuit
predicate (Callable[[Operation], bool]): Grouping predicate. Returns True for the
Operations to be grouped together, False for the others.
Returns:
Tuple[Sequence[Command]]: A, B, C such that A+B+C is equivalent to seq,
and A and C do not contain any marked Operation instances.
"""
def find_first_index(seq):
"""Index of the first element in the sequence for which the predicate function returns True.
If no such element exists, returns the length of the sequence.
"""
return next((i for i, e in enumerate(seq) if predicate(e.op)), len(seq))
def marked_last(node):
"""Mapping from nodes to sorting keys to resolve ambiguities in the topological sort.
Larger key values come later in the lexicographical-topological ordering.
"""
if predicate(node.op):
return 1
return 0
def lex_topo(seq, key):
"""Sorts a Command sequence lexicographical-topologically using the given lexicographic key function."""
DAG = list_to_DAG(seq)
return list(nx.algorithms.dag.lexicographical_topological_sort(DAG, key=key))
C = lex_topo(seq, key=marked_last)
ind = find_first_index(C)
A = C[:ind] # initial unmarked instances
B = C[ind:] # marked and possibly unmarked
# re-sort B, marked instances first
C = lex_topo(B, key=lambda x: -marked_last(x))
# find last marked
ind = len(C) - find_first_index(list(reversed(C)))
B = C[:ind] # marked and still possibly unmarked
C = C[ind:] # final unmarked instances
return A, B, C
def optimize_circuit(seq):
"""Try to simplify and optimize a quantum circuit.
The purpose of the optimizer is to simplify the circuit
to make it cheaper and faster to execute. Different backends may require
different types of optimization, but in general the fewer operations a circuit has,
the faster it should run. The optimizer thus should convert the circuit into a
simpler :term:`equivalent circuit`.
The optimizations are based on the abstract algebraic properties of the Operations
constituting the circuit, e.g., combining two consecutive gates of the same gate family,
and at no point should require a matrix representation of any kind.
The optimization must also not change the state of the RegRefs in any way.
Currently the optimization is very simple. It
* merges neighboring :class:`state preparations <.Preparation>` and :class:`gates <.Gate>`
belonging to the same family and acting on the same sequence of subsystems
* cancels neighboring pairs of a gate and its inverse
Args:
seq (Sequence[Command]): quantum circuit to optimize
Returns:
List[Command]: optimized circuit
"""
def _print_list(i, q, print_fn=print):
"For debugging."
# pylint: disable=unreachable
return
print_fn("i: {}, len: {} ".format(i, len(q)), end="")
for x in q:
print_fn(x.op, ", ", end="")
print_fn()
grid = list_to_grid(seq)
# try merging neighboring operations on each wire
# TODO the merging could also be done using the circuit DAG, which
# might be smarter (ns>1 would be easy)
for k in grid:
q = grid[k]
i = 0 # index along the wire
_print_list(i, q)
while i + 1 < len(q):
# at least two operations left to merge on this wire
try:
a = q[i]
b = q[i + 1]
# the ops must have equal size and act on the same wires
if a.op.ns == b.op.ns and a.reg == b.reg:
if a.op.ns != 1:
# ns > 1 is tougher. on no wire must there be anything
# between them, also deleting is more complicated
# todo treat it as a failed merge for now
i += 1
continue
op = a.op.merge(b.op)
# merge was successful, delete the old ops
del q[i : i + 2]
# insert the merged op (unless it's identity)
if op is not None:
q.insert(i, Command(op, a.reg))
# move one spot backwards to try another merge
if i > 0:
i -= 1
_print_list(i, q)
continue
except MergeFailure:
pass
i += 1 # failed at merging the ops, move forward
# convert the circuit back into a list (via a DAG)
DAG = grid_to_DAG(grid)
return DAG_to_list(DAG)
|
scripts/capnp_test_pycapnp.py
|
BartWeyder/pycapnp
| 188 |
136129
|
#!/usr/bin/env python
import os
import sys
import capnp
capnp.add_import_hook(
[os.getcwd(), "/usr/local/include/"]
) # change this to be auto-detected?
import test_capnp # noqa: E402
def decode(name):
class_name = name[0].upper() + name[1:]
print(getattr(test_capnp, class_name).from_bytes(sys.stdin.read())._short_str())
def encode(name):
val = getattr(test_capnp, name)
class_name = name[0].upper() + name[1:]
message = getattr(test_capnp, class_name).from_dict(val.to_dict())
print(message.to_bytes())
if sys.argv[1] == "decode":
decode(sys.argv[2])
else:
encode(sys.argv[2])
|
seqlike/alphabets.py
|
ericmjl/seqlike
| 186 |
136130
|
"""SeqLike catalog of alphabets."""
import string
from Bio.Data.IUPACData import ambiguous_dna_values, ambiguous_rna_values
from Bio.Data.IUPACData import protein_letters, extended_protein_letters
gap_letter = "-"
stop_letter = "*"
generic_protein_letter = "X"
generic_nt_letter = "N"
# use full alphabet as default for functions that require one
every_letter_alphabet = string.ascii_uppercase
# The rationale for this ordering is that the gap character and standard symbols (4 bases / 20 amino acids) should come first,
# followed by the extra letters. If we were to use something like alphanumeric ordering, then the standard and full alphabets
# would be mutually incompatible.
STANDARD_NT = gap_letter + "ACGTU" + generic_nt_letter
NT = STANDARD_NT + "BDHKMRSVWY"
STANDARD_AA = stop_letter + gap_letter + protein_letters + generic_protein_letter
AA = STANDARD_AA + "BJOUZ"
STANDARD_NT_SET = set(STANDARD_NT)
NT_SET = set(NT)
STANDARD_AA_SET = set(STANDARD_AA)
AA_SET = set(AA)
# combine ambiguous_dna_values and ambiguous_rna_values into one dict
# :sa: https://stackoverflow.com/questions/1495510/combining-dictionaries-of-lists-in-python
def merge_dicts_of_str(d1, d2, ignore_keys=None):
if ignore_keys is None:
ignore_keys = list()
keys = set(d1).union(d2) - set(ignore_keys)
return dict((k, "".join(sorted(set(d1.get(k, "") + d2.get(k, ""))))) for k in keys)
ambiguous_nt_values = merge_dicts_of_str(ambiguous_dna_values, ambiguous_rna_values, ignore_keys="X")
# this seq->set->upper->set is necessary to avoid Seq.upper() errors
# (fails for string alphabets by trying to apply alphabet._upper())
# while extracting just the sequence letters (str(SeqRecord) returns
# a string description of the SeqRecord ID, name, etc
def is_NT(sequence):
# str, Seq, SeqRecord or SeqLike
return _is_seqtype(sequence, NT_SET)
def is_AA(sequence):
return _is_seqtype(sequence, AA_SET)
def is_STANDARD_AA(sequence):
return _is_seqtype(sequence, STANDARD_AA_SET)
def is_STANDARD_NT(sequence):
return _is_seqtype(sequence, STANDARD_NT_SET)
def _is_seqtype(sequence, seq_letters):
# seqlike
if hasattr(sequence, "_seqrecord"):
sequence = sequence._seqrecord.seq._data
# seqrecord
elif hasattr(sequence, "seq"):
# seqrecord was initialized from a Seq
try:
sequence = sequence.seq._data
# seqrecord was initialized from a string
except AttributeError:
sequence = sequence.seq
# seq
elif hasattr(sequence, "_data"):
sequence = sequence._data
if isinstance(sequence, bytes):
sequence = sequence.decode()
sequence = sequence.upper()
# The meat of the logic lies here.
return set(sequence).issubset(seq_letters)
def parse_alphabet(alphabet: str) -> str:
"""
This function parses and validates the 'alphabet' parameter of a SeqLike.
:param alphabet: str specifying 'NT', 'DNA', 'RNA', or 'AA', case insensitive.
:returns: either the NT or AA alphabet string.
"""
# parse string designation to desired alphabet
if isinstance(alphabet, str):
alphabet = alphabet.upper()
assert alphabet in ["NT", "DNA", "RNA", "AA"], "Invalid alphabet!"
if alphabet in ["DNA", "NT", "RNA"]:
return NT
else:
return AA
|
lib/append_text.py
|
millengustavo/arauto
| 106 |
136135
|
<gh_stars>100-1000
def append_text(new_text):
'''
Write the code instruction to be exported later on
Args.
new_text (str): the text that will be appended to the base string
'''
global code_base_text
code_base_text = code_base_text + new_text
|
examples/nnrt/nnrt_datasource_yolo.py
|
cuiboyuan/plato
| 135 |
136163
|
"""
The COCO dataset or other datasets for the YOLOv5 model with using NNRT.
"""
import logging
import os
import math
from plato.config import Config
from plato.datasources import base
from nnrt_datasource_yolo_utils import LoadImagesAndLabels
def make_divisible(x, divisor):
# Returns x evenly divisible by divisor
return math.ceil(x / divisor) * divisor
def check_img_size(img_size, s=32):
# Verify img_size is a multiple of stride s
new_size = make_divisible(img_size, int(s)) # ceil gs-multiple
if new_size != img_size:
print(
'WARNING: --img-size %g must be multiple of max stride %g, updating to %g'
% (img_size, s, new_size))
return new_size
class DataSource(base.DataSource):
"""The YOLO dataset."""
def __init__(self):
super().__init__()
_path = Config().data.data_path
if not os.path.exists(_path):
os.makedirs(_path)
logging.info(
"Downloading the YOLO dataset. This may take a while.")
urls = Config().data.download_urls
for url in urls:
if not os.path.exists(_path + url.split('/')[-1]):
DataSource.download(url, _path)
assert 'grid_size' in Config().params
self.grid_size = Config().params['grid_size']
self.image_size = check_img_size(Config().data.image_size,
self.grid_size)
self.train_set = None
self.test_set = None
def num_train_examples(self):
return Config().data.num_train_examples
def num_test_examples(self):
return Config().data.num_test_examples
def classes(self):
"""Obtains a list of class names in the dataset."""
return Config().data.classes
def get_train_set(self):
single_class = (Config().data.num_classes == 1)
if self.train_set is None:
self.train_set = LoadImagesAndLabels(
Config().data.train_path,
self.image_size,
Config().trainer.batch_size,
augment=False, # augment images
hyp=None, # augmentation hyperparameters
rect=False, # rectangular training
cache_images=False,
single_cls=single_class,
stride=int(self.grid_size),
pad=0.0,
image_weights=False,
prefix='')
return self.train_set
def get_test_set(self):
single_class = (Config().data.num_classes == 1)
if self.test_set is None:
self.test_set = LoadImagesAndLabels(
Config().data.test_path,
self.image_size,
Config().trainer.batch_size,
augment=False, # augment images
hyp=None, # augmentation hyperparameters
rect=False, # rectangular training
cache_images=False,
single_cls=single_class,
stride=int(self.grid_size),
pad=0.0,
image_weights=False,
prefix='')
return self.test_set
|
tests/pyccel/scripts/classes/classes.py
|
dina-fouad/pyccel
| 206 |
136192
|
# pylint: disable=missing-class-docstring, disable=missing-function-docstring, missing-module-docstring/
#$ header class Point(public)
#$ header method __init__(Point, double, double)
#$ header method __del__(Point)
#$ header method translate(Point, double, double)
class Point(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __del__(self):
pass
def translate(self, a, b):
self.x = self.x + a
self.y = self.y + b
if __name__ == '__main__':
p = Point(0.0, 0.0)
x=p.x
p.x=x
a = p.x
a = p.x - 2
a = 2 * p.x - 2
a = 2 * (p.x + 6) - 2
p.y = a + 5
p.y = p.x + 5
p.translate(1.0, 2.0)
print(p.x, p.y)
print(a)
del p
|
python/pmercury/protocols/ssh.py
|
raj-apoorv/mercury
| 299 |
136198
|
"""
Copyright (c) 2019 Cisco Systems, Inc. All rights reserved.
License at https://github.com/cisco/mercury/blob/master/LICENSE
"""
import os
import sys
import socket
# SSH helper classes
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.dirname(os.path.abspath(__file__))+'/../')
from pmercury.protocols.protocol import Protocol
class SSH(Protocol):
def __init__(self, fp_database=None, config=None):
self.session_data = {}
def get_flow_key(self, data, ip_offset, tcp_offset, ip_type, ip_length):
src_port = data[tcp_offset:tcp_offset+2]
dst_port = data[tcp_offset+2:tcp_offset+4]
if ip_type == 'ipv4':
o_ = ip_offset+ip_length-8
src_addr = data[o_:o_+4]
o_ = ip_offset+ip_length-4
dst_addr = data[o_:o_+4]
else:
o_ = ip_offset+ip_length-32
src_addr = data[o_:o_+16]
o_ = ip_offset+ip_length-16
dst_addr = data[o_:o_+16]
pr = b'\x06' # currently only support TCP
return b''.join([src_addr,dst_addr,src_port,dst_port,pr])
def proto_identify(self, data, offset):
if (data[offset] == 83 and
data[offset+1] == 83 and
data[offset+2] == 72 and
data[offset+3] == 45):
return True
return False
def fingerprint(self, data, ip_offset, tcp_offset, app_offset, ip_type, ip_length, data_len):
protocol_type = 'ssh'
fp_str_ = None
if app_offset+4 >= data_len:
return protocol_type, fp_str_, None
flow_key = self.get_flow_key(data, ip_offset, tcp_offset, ip_type, ip_length)
data = data[app_offset:]
if flow_key not in self.session_data and self.proto_identify(data,0) == False:
return protocol_type, fp_str_, None
elif self.proto_identify(data,0):
self.session_data[flow_key] = {}
self.session_data[flow_key]['protocol'] = data
self.session_data[flow_key]['kex'] = b''
return protocol_type, fp_str_, None
data = self.session_data[flow_key]['kex'] + data
if len(data) >= 4096:
del self.session_data[flow_key]
return protocol_type, fp_str_, None
# check SSH packet length to limit possibility of parsing junk and handle fragmentation
if int.from_bytes(data[0:4], byteorder='big') + 4 > len(data):
self.session_data[flow_key]['kex'] += data
return protocol_type, fp_str_, None
# check to make sure message code is key exchange init
if data[5] != 20:
del self.session_data[flow_key]
return protocol_type, fp_str_, None
# extract fingerprint string
self.session_data[flow_key]['kex'] = data
fp_str_ = self.extract_fingerprint(self.session_data[flow_key])
del self.session_data[flow_key]
return protocol_type, fp_str_, None
def extract_fingerprint(self, ssh_):
fp_str_ = ''
fp_str_ += '(' + ssh_['protocol'][:-2].hex() + ')'
data = ssh_['kex']
kex_length = int.from_bytes(data[0:4], byteorder='big')
# skip over message headers and Cookie field
offset = 22
if offset > len(data):
return None
# parse kex algorithms
for i in range(10):
fp_str_, offset = self.parse_kex_field(data, offset, fp_str_)
if offset == None:
return None
return fp_str_
def parse_kex_field(self, data, offset, fp_str_):
len_ = int.from_bytes(data[offset:offset+4], byteorder='big')
fp_str_ += '(' + data[offset+4:offset+4+len_].hex() + ')'
offset += 4 + len_
if offset > len(data):
return None, None
return fp_str_, offset
def get_human_readable(self, fp_str_):
fields = [bytes.fromhex(s_[1:]) for s_ in fp_str_.split(')')[:-1]]
fp_h = {}
fp_h['protocol'] = fields[0].decode().split(',')
fp_h['kex_algos'] = fields[1].decode().split(',')
fp_h['s_host_key_algos'] = fields[2].decode().split(',')
fp_h['c_enc_algos'] = fields[3].decode().split(',')
fp_h['s_enc_algos'] = fields[4].decode().split(',')
fp_h['c_mac_algos'] = fields[5].decode().split(',')
fp_h['s_mac_algos'] = fields[6].decode().split(',')
fp_h['c_comp_algos'] = fields[7].decode().split(',')
fp_h['s_comp_algos'] = fields[8].decode().split(',')
fp_h['c_languages'] = fields[9].decode().split(',')
fp_h['s_languages'] = fields[10].decode().split(',')
return fp_h
|
mmselfsup/models/algorithms/base.py
|
mitming/mmselfsup
| 355 |
136254
|
<gh_stars>100-1000
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
import torch
import torch.distributed as dist
from mmcv.runner import BaseModule, auto_fp16
class BaseModel(BaseModule, metaclass=ABCMeta):
"""Base model class for self-supervised learning."""
def __init__(self, init_cfg=None):
super(BaseModel, self).__init__(init_cfg)
self.fp16_enabled = False
@property
def with_neck(self):
return hasattr(self, 'neck') and self.neck is not None
@property
def with_head(self):
return hasattr(self, 'head') and self.head is not None
@abstractmethod
def extract_feat(self, imgs):
"""Function to extract features from backbone.
Args:
img (Tensor): Input images. Typically these should be mean centered
and std scaled.
"""
pass
@abstractmethod
def forward_train(self, imgs, **kwargs):
"""
Args:
img ([Tensor): List of tensors. Typically these should be
mean centered and std scaled.
kwargs (keyword arguments): Specific to concrete implementation.
"""
pass
def forward_test(self, imgs, **kwargs):
"""
Args:
img (Tensor): List of tensors. Typically these should be
mean centered and std scaled.
kwargs (keyword arguments): Specific to concrete implementation.
"""
pass
@auto_fp16(apply_to=('img', ))
def forward(self, img, mode='train', **kwargs):
"""Forward function of model.
Calls either forward_train, forward_test or extract_feat function
according to the mode.
"""
if mode == 'train':
return self.forward_train(img, **kwargs)
elif mode == 'test':
return self.forward_test(img, **kwargs)
elif mode == 'extract':
return self.extract_feat(img)
else:
raise Exception(f'No such mode: {mode}')
def _parse_losses(self, losses):
"""Parse the raw outputs (losses) of the network.
Args:
losses (dict): Raw output of the network, which usually contain
losses and other necessary information.
Returns:
tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor
which may be a weighted sum of all losses, log_vars contains
all the variables to be sent to the logger.
"""
log_vars = OrderedDict()
for loss_name, loss_value in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
elif isinstance(loss_value, dict):
for name, value in loss_value.items():
log_vars[name] = value
else:
raise TypeError(
f'{loss_name} is not a tensor or list of tensors')
loss = sum(_value for _key, _value in log_vars.items()
if 'loss' in _key)
log_vars['loss'] = loss
for loss_name, loss_value in log_vars.items():
# reduce loss when distributed training
if dist.is_available() and dist.is_initialized():
loss_value = loss_value.data.clone()
dist.all_reduce(loss_value.div_(dist.get_world_size()))
log_vars[loss_name] = loss_value.item()
return loss, log_vars
def train_step(self, data, optimizer):
"""The iteration step during training.
This method defines an iteration step during training, except for the
back propagation and optimizer updating, which are done in an optimizer
hook. Note that in some complicated cases or models, the whole process
including back propagation and optimizer updating are also defined in
this method, such as GAN.
Args:
data (dict): The output of dataloader.
optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of
runner is passed to ``train_step()``. This argument is unused
and reserved.
Returns:
dict: Dict of outputs. The following fields are contained.
- loss (torch.Tensor): A tensor for back propagation, which \
can be a weighted sum of multiple losses.
- log_vars (dict): Dict contains all the variables to be sent \
to the logger.
- num_samples (int): Indicates the batch size (when the model \
is DDP, it means the batch size on each GPU), which is \
used for averaging the logs.
"""
losses = self(**data)
loss, log_vars = self._parse_losses(losses)
if isinstance(data['img'], list):
num_samples = len(data['img'][0].data)
else:
num_samples = len(data['img'].data)
outputs = dict(loss=loss, log_vars=log_vars, num_samples=num_samples)
return outputs
def val_step(self, data, optimizer):
"""The iteration step during validation.
This method shares the same signature as :func:`train_step`, but used
during val epochs. Note that the evaluation after training epochs is
not implemented with this method, but an evaluation hook.
"""
losses = self(**data)
loss, log_vars = self._parse_losses(losses)
if isinstance(data['img'], list):
num_samples = len(data['img'][0].data)
else:
num_samples = len(data['img'].data)
outputs = dict(loss=loss, log_vars=log_vars, num_samples=num_samples)
return outputs
|
cirq-google/cirq_google/ops/sycamore_gate.py
|
LLcat1217/Cirq
| 3,326 |
136277
|
# Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An instance of FSimGate that works naturally on Google's Sycamore chip"""
import numpy as np
import cirq
from cirq._doc import document
class SycamoreGate(cirq.FSimGate):
"""The Sycamore gate is a two-qubit gate equivalent to FSimGate(π/2, π/6).
The unitary of this gate is
[[1, 0, 0, 0],
[0, 0, -1j, 0],
[0, -1j, 0, 0],
[0, 0, 0, exp(- 1j * π/6)]]
This gate can be performed on the Google's Sycamore chip and
is close to the gates that were used to demonstrate quantum
supremacy used in this paper:
https://www.nature.com/articles/s41586-019-1666-5
"""
def __init__(self):
super().__init__(theta=np.pi / 2, phi=np.pi / 6)
def __repr__(self) -> str:
return 'cirq_google.SYC'
def __str__(self) -> str:
return 'SYC'
def _circuit_diagram_info_(self, args: cirq.CircuitDiagramInfoArgs):
return 'SYC', 'SYC'
def _json_dict_(self):
return cirq.obj_to_dict_helper(self, [])
SYC = SycamoreGate()
document(
SYC,
"""The Sycamore gate is a two-qubit gate equivalent to FSimGate(π/2, π/6).
The unitary of this gate is
[[1, 0, 0, 0],
[0, 0, -1j, 0],
[0, -1j, 0, 0],
[0, 0, 0, exp(- 1j * π/6)]]
This gate can be performed on the Google's Sycamore chip and
is close to the gates that were used to demonstrate quantum
supremacy used in this paper:
https://www.nature.com/articles/s41586-019-1666-5
""",
)
|
kansha/card_addons/vote/models.py
|
AnomalistDesignLLC/kansha
| 161 |
136299
|
<filename>kansha/card_addons/vote/models.py
# -*- coding:utf-8 -*-
#--
# Copyright (c) 2012-2014 Net-ng.
# All rights reserved.
#
# This software is licensed under the BSD License, as described in
# the file LICENSE.txt, which you should have received as part of
# this distribution.
#--
from sqlalchemy import func
from elixir import ManyToOne
from elixir import using_options
from nagare.database import session
from kansha.models import Entity
class DataVote(Entity):
using_options(tablename='vote')
user = ManyToOne('DataUser', ondelete='CASCADE')
card = ManyToOne('DataCard', ondelete='CASCADE')
@classmethod
def new(cls, card, user):
"""Create and persist."""
vote = cls(card=card, user=user)
session.add(vote)
session.flush()
return vote
@classmethod
def get_vote(cls, card, user):
'''Return Vote instance which match with user and card
In:
- ``card`` -- DataCard instance
- ``user`` -- DataUser instance
Return:
- DataVote instance
'''
return cls.get_by(user=user, card=card)
@classmethod
def has_voted(cls, card, user):
'''Return if a user has voted for a given card
In:
- ``card`` -- DataCard instance
- ``user`` -- DataUser instance
Return:
- True if a vote is found, False otherwise
'''
return cls.get_vote(card, user) is not None
@classmethod
def count_votes(cls, card):
q = cls.query
q = q.filter(cls.card == card)
return q.with_entities(func.count()).scalar()
@classmethod
def purge(cls, card):
for vote in cls.query.filter_by(card=card):
vote.delete()
|
library/rpi_ws281x/__init__.py
|
paulhayes/rpi-ws281x-python
| 219 |
136305
|
# New canonical package, to support `import rpi_ws281x`
from .rpi_ws281x import PixelStrip, Adafruit_NeoPixel, Color, ws
from _rpi_ws281x import *
__version__ = '4.3.0'
|
python/ql/test/3/query-tests/Expressions/Arguments/wrong_arguments.py
|
vadi2/codeql
| 4,036 |
136315
|
<filename>python/ql/test/3/query-tests/Expressions/Arguments/wrong_arguments.py<gh_stars>1000+
#Make sure that we handle keyword-only arguments correctly
def f(a, *varargs, kw1, kw2="has-default"):
pass
#OK
f(1, 2, 3, kw1=1)
f(1, 2, kw1=1, kw2=2)
#Not OK
f(1, 2, 3, kw1=1, kw3=3)
f(1, 2, 3, kw3=3)
#ODASA-5897
def analyze_member_access(msg, *, original, override, chk: 'default' = None):
pass
def ok():
return analyze_member_access(msg, original=original, chk=chk)
def bad():
return analyze_member_access(msg, original, chk=chk)
|
insights/parsr/examples/multipath_conf.py
|
lhuett/insights-core
| 121 |
136322
|
"""
multipath_conf parses multipath.conf configuration files into nested
dictionaries.
"""
import string
from insights.parsr import (EOF, Forward, LeftCurly, Lift, Literal, LineEnd,
RightCurly, Many, Number, OneLineComment, PosMarker, skip_none, String,
QuotedString, WS, WSChar)
from insights.parsr.query import Entry
def loads(data):
return Entry(children=Top(data)[0])
def load(f):
return loads(f.read())
def to_entry(name, rest):
if isinstance(rest, list):
return Entry(name=name.value, children=rest, lineno=name.lineno)
return Entry(name=name.value, attrs=[rest], lineno=name.lineno)
Stmt = Forward()
Num = Number & (WSChar | LineEnd)
NULL = Literal("none", value=None)
Comment = (WS >> OneLineComment("#").map(lambda x: None))
BeginBlock = (WS >> LeftCurly << WS)
EndBlock = (WS >> RightCurly << WS)
Bare = String(set(string.printable) - (set(string.whitespace) | set("#{}'\"")))
Name = WS >> PosMarker(String(string.ascii_letters + "_")) << WS
Value = WS >> (Num | NULL | QuotedString | Bare) << WS
Block = BeginBlock >> Many(Stmt).map(skip_none) << EndBlock
Stanza = (Lift(to_entry) * Name * (Block | Value)) | Comment
Stmt <= WS >> Stanza << WS
Doc = Many(Stmt).map(skip_none)
Top = Doc + EOF
|
Tests/modules/misc/test_system_namespaces.py
|
aisk/ironpython3
| 1,872 |
136332
|
<filename>Tests/modules/misc/test_system_namespaces.py
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
'''
Ensures we can import from .NET 2.0 namespaces and types
'''
import unittest
from iptest import IronPythonTestCase, is_cli, is_mono, is_netcoreapp, run_test, skipUnlessIronPython
if is_cli and not is_netcoreapp:
import clr
clr.AddReference("System.Configuration")
clr.AddReference("System.Configuration.Install")
clr.AddReference("System.Data")
clr.AddReference("System.Data.OracleClient")
if is_mono:
clr.AddReference("System.Data.SqlClient")
else:
clr.AddReference("System.Data.SqlXml")
clr.AddReference("System.Deployment")
clr.AddReference("System.Design")
clr.AddReference("System.DirectoryServices")
clr.AddReference("System.DirectoryServices.Protocols")
clr.AddReference("System.Drawing.Design")
clr.AddReference("System.Drawing")
clr.AddReference("System.EnterpriseServices")
clr.AddReference("System.Management")
clr.AddReference("System.Messaging")
clr.AddReference("System.Runtime.Remoting")
clr.AddReference("System.Runtime.Serialization.Formatters.Soap")
clr.AddReference("System.Security")
clr.AddReference("System.ServiceProcess")
clr.AddReference("System.Transactions")
clr.AddReference("System.Web")
clr.AddReference("System.Web.Mobile")
clr.AddReference("System.Web.RegularExpressions")
clr.AddReference("System.Web.Services")
clr.AddReference("System.Windows.Forms")
clr.AddReference("System.Xml")
from System import *
from System.CodeDom import *
from System.CodeDom.Compiler import *
from System.Collections import *
from System.Collections.Generic import *
from System.Collections.ObjectModel import *
from System.Collections.Specialized import *
from System.ComponentModel import *
from System.ComponentModel.Design import *
from System.ComponentModel.Design.Data import *
from System.ComponentModel.Design.Serialization import *
from System.Configuration import *
from System.Configuration.Assemblies import *
from System.Configuration.Install import *
from System.Configuration.Internal import *
from System.Configuration.Provider import *
from System.Data import *
from System.Data.Common import *
from System.Data.Design import *
from System.Data.Odbc import *
from System.Data.OleDb import *
from System.Data.OracleClient import *
from System.Data.Sql import *
from System.Data.SqlClient import *
from System.Data.SqlTypes import *
if not is_mono:
from System.Deployment.Application import *
from System.Deployment.Internal import *
from System.Diagnostics import *
from System.Diagnostics.CodeAnalysis import *
from System.Diagnostics.Design import *
from System.Diagnostics.SymbolStore import *
from System.DirectoryServices import *
from System.DirectoryServices.ActiveDirectory import *
from System.DirectoryServices.Protocols import *
from System.Drawing import *
from System.Drawing.Design import *
from System.Drawing.Drawing2D import *
from System.Drawing.Imaging import *
from System.Drawing.Printing import *
from System.Drawing.Text import *
from System.EnterpriseServices import *
from System.EnterpriseServices.CompensatingResourceManager import *
from System.EnterpriseServices.Internal import *
from System.Globalization import *
from System.IO import *
from System.IO.Compression import *
from System.IO.IsolatedStorage import *
from System.IO.Ports import *
from System.Management import *
from System.Management.Instrumentation import *
from System.Media import *
from System.Messaging import *
from System.Messaging.Design import *
from System.Net import *
from System.Net.Cache import *
from System.Net.Configuration import *
from System.Net.Mail import *
from System.Net.Mime import *
from System.Net.NetworkInformation import *
from System.Net.Security import *
from System.Net.Sockets import *
from System.Reflection import *
from System.Reflection.Emit import *
from System.Resources import *
from System.Resources.Tools import *
from System.Runtime import *
from System.Runtime.CompilerServices import *
from System.Runtime.ConstrainedExecution import *
from System.Runtime.Hosting import *
from System.Runtime.InteropServices import *
from System.Runtime.InteropServices.ComTypes import *
from System.Runtime.InteropServices.Expando import *
from System.Runtime.Remoting import *
from System.Runtime.Remoting.Activation import *
from System.Runtime.Remoting.Channels import *
from System.Runtime.Remoting.Channels.Http import *
from System.Runtime.Remoting.Channels.Ipc import *
from System.Runtime.Remoting.Channels.Tcp import *
from System.Runtime.Remoting.Contexts import *
from System.Runtime.Remoting.Lifetime import *
from System.Runtime.Remoting.Messaging import *
from System.Runtime.Remoting.Metadata import *
from System.Runtime.Remoting.Metadata.W3cXsd2001 import *
from System.Runtime.Remoting.MetadataServices import *
from System.Runtime.Remoting.Proxies import *
from System.Runtime.Remoting.Services import *
from System.Runtime.Serialization import *
from System.Runtime.Serialization.Formatters import *
from System.Runtime.Serialization.Formatters.Binary import *
from System.Runtime.Serialization.Formatters.Soap import *
from System.Runtime.Versioning import *
from System.Security import *
from System.Security.AccessControl import *
from System.Security.Authentication import *
from System.Security.Cryptography import *
from System.Security.Cryptography.Pkcs import *
from System.Security.Cryptography.X509Certificates import *
from System.Security.Cryptography.Xml import *
from System.Security.Permissions import *
from System.Security.Policy import *
from System.Security.Principal import *
from System.ServiceProcess import *
from System.ServiceProcess.Design import *
from System.Text import *
from System.Text.RegularExpressions import *
from System.Threading import *
from System.Timers import *
from System.Transactions import *
from System.Transactions.Configuration import *
from System.Web import *
from System.Web.Caching import *
from System.Web.Compilation import *
from System.Web.Configuration import *
from System.Web.Configuration.Internal import *
from System.Web.Handlers import *
from System.Web.Hosting import *
from System.Web.Mail import *
from System.Web.Management import *
if not is_mono:
from System.Web.Mobile import *
from System.Web.RegularExpressions import *
from System.Web.Profile import *
from System.Web.Security import *
from System.Web.Services import *
from System.Web.Services.Configuration import *
from System.Web.Services.Description import *
from System.Web.Services.Discovery import *
from System.Web.Services.Protocols import *
from System.Web.SessionState import *
from System.Web.UI import *
from System.Web.UI.Adapters import *
from System.Web.UI.Design import *
if not is_mono:
from System.Web.UI.Design.MobileControls import *
from System.Web.UI.Design.MobileControls.Converters import *
from System.Web.UI.MobileControls import *
from System.Web.UI.MobileControls.Adapters import *
from System.Web.UI.MobileControls.Adapters.XhtmlAdapters import *
from System.Web.UI.Design.WebControls.WebParts import *
from System.Web.UI.Design.WebControls import *
from System.Web.UI.HtmlControls import *
from System.Web.UI.WebControls import *
from System.Web.UI.WebControls.Adapters import *
from System.Web.UI.WebControls.WebParts import *
from System.Web.Util import *
from System.Windows.Forms import *
from System.Windows.Forms.ComponentModel.Com2Interop import *
from System.Windows.Forms.Design import *
from System.Windows.Forms.Design.Behavior import *
from System.Windows.Forms.Layout import *
from System.Windows.Forms.PropertyGridInternal import *
from System.Windows.Forms.VisualStyles import *
from System.Xml import *
from System.Xml.Schema import *
from System.Xml.Serialization import *
from System.Xml.Serialization.Advanced import *
from System.Xml.Serialization.Configuration import *
from System.Xml.XPath import *
from System.Xml.Xsl import *
from System.Xml.Xsl.Runtime import *
broken_types = []
def deep_dive(in_name, in_type):
if is_cli:
import System
stuff_list = dir(in_type)
for member in stuff_list:
member_type = eval("type(%s.%s)" % (in_name, member))
member_fullname = in_name + "." + member
if (member_type in [type, type(System)]) and (member not in ["__class__"]):
if member_fullname in broken_types:
print("SKIPPING", member_fullname)
continue
net_type = Type.GetType(member_fullname)
#We can only import * from static classes.
if not net_type or (not (net_type.IsAbstract and net_type.IsSealed) and not net_type.IsEnum):
continue
print(member_fullname)
exec("from " + member_fullname + " import *")
deep_dive(member_fullname, member_type)
@unittest.skipIf(is_netcoreapp, 'references are different')
@skipUnlessIronPython()
class SystemNamespacesTest(IronPythonTestCase):
def test_system_deep(self):
import System
deep_dive("System", System)
run_test(__name__)
|
src/app/beer_garden/api/http/authentication/login_handlers/__init__.py
|
ExpressHermes/beer-garden
| 230 |
136347
|
from .basic import BasicLoginHandler
from .certificate import CertificateLoginHandler
LOGIN_HANDLERS = [BasicLoginHandler, CertificateLoginHandler]
|
src/deepqmc/extra/memory.py
|
zenoone/deepqmc
| 224 |
136353
|
<reponame>zenoone/deepqmc
import collections
import torch
__all__ = ()
EMPTY_TYPES = (str, type(None), type({}.keys()))
UNKNWON_CLASSES = set()
def get_children(obj):
if type(obj) in (dict, collections.defaultdict):
return obj.items()
if type(obj) is list or isinstance(obj, tuple):
return ((i, v) for i, v in enumerate(obj))
if type(obj) in (set, frozenset, collections.deque):
return (('?', v) for v in obj)
try:
return obj.__dict__.items()
except AttributeError:
pass
UNKNWON_CLASSES.add(str(type(obj)))
return ()
def find_large_cuda_tensors(obj, depth=False, threshold=1e6):
visited = set()
queue = collections.deque()
queue.append((obj, ''))
while queue:
n, addr = queue.pop() if depth else queue.popleft()
visited.add(id(n))
if torch.is_tensor(n) and n.is_cuda and n.numel() > threshold:
print(addr, type(n), n.shape)
queue.extend(
(v, f'{addr}.{k}') for k, v in get_children(n) if id(v) not in visited
)
|
fetch_cord/run_rpc.py
|
TabulateJarl8/FetchCord
| 286 |
136365
|
#from __future__ import annotations
from typing import Callable, Dict
from pypresence import Presence, exceptions
import time, sys
# import info about system
from .args import parse_args
from .config import ConfigError, load_config
from .computer.Computer import Computer
args = parse_args()
class Run_rpc:
rpcs: Dict[str, Presence]
config: Dict
loops: Dict[str, Callable[['Run_rpc', str, Computer], None]] # Cannot use Run_rpc for type hinting unless doing the __future__.annotations import
loops_indexes: Dict[int, str]
poll_rate: int
update: Callable
def __init__(self):
self.rpcs = {}
try:
self.config = load_config()
except ConfigError as e:
print("Error loading config file, using default values." % str(e))
def set_loop(
self, loops: Dict, loops_indexes: Dict, update: Callable, poll_rate: int = 3
):
self.loops = loops
self.loops_indexes = loops_indexes
self.poll_rate = poll_rate
self.update = update
def run_loop(self, computer: Computer):
try:
loop = 0
while True:
for i in range(len(self.loops_indexes)):
if loop == self.poll_rate:
self.update()
loop = 0
try:
client_id, func = self.loops[self.loops_indexes[i]]
if args.debug:
print(self.rpcs)
print(
"{} not in : {}".format(
self.loops_indexes[i],
self.loops_indexes[i] not in self.rpcs,
)
)
if self.loops_indexes[i] not in self.rpcs:
self.rpcs[self.loops_indexes[i]] = Presence(client_id)
self.try_connect(self.loops_indexes[i])
func(self, self.loops_indexes[i], computer)
loop += 1
except ConnectionResetError:
self.try_connect(self.loops_indexes[i])
except KeyboardInterrupt:
print("Closing connection.")
sys.exit(0)
def try_connect(self, key: str):
while True:
try:
if args.debug:
print('try_connect(key="{}") on {}'.format(key, self.rpcs[key]))
self.rpcs[key].connect()
break
except ConnectionRefusedError:
print(
"RPC connection refused (is Discord open?); trying again in 30 seconds"
)
time.sleep(30)
def try_clear(self, key: str):
# Pypresence clear doesn't work anymore
# try:
# if args.debug:
# print(
# "[key={}] try_clear(pid={} on {}".format(
# key, os.getpid(), self.rpcs[key]
# )
# )
# self.rpcs[key].clear(pid=os.getpid())
# except exceptions.InvalidID:
# pass
# except exceptions.ServerError as e:
# print(e)
# pass
self.rpcs[key].close()
def try_update(
self,
key: str,
state,
details,
large_image,
large_text,
small_image,
small_text,
start,
):
try:
if args.debug:
print('try_update(key="{}") on {}'.format(key, self.rpcs[key]))
self.rpcs[key].update(
state=state,
details=details,
large_image=large_image,
large_text=large_text,
small_image=small_image,
small_text=small_text,
start=start,
)
# ConnectionResetError is here to avoid crashing if Discord is still just starting
except (ConnectionResetError, exceptions.InvalidID):
pass
|
tests/syntax/indentation_error_4.py
|
matan-h/friendly
| 287 |
136373
|
<reponame>matan-h/friendly<filename>tests/syntax/indentation_error_4.py
'''Should raise IndentationError'''
def f():
s = "a"\
"b"
"c"
f()
|
lenstronomy/GalKin/galkin_model.py
|
DarthLazar/lenstronomy
| 107 |
136379
|
from lenstronomy.GalKin.numeric_kinematics import NumericKinematics
from lenstronomy.GalKin.analytic_kinematics import AnalyticKinematics
__all__ = ['GalkinModel']
class GalkinModel(object):
"""
this class handles all the kinematic modeling aspects of Galkin
Excluded are observational conditions (seeing, aperture etc)
Major class to compute velocity dispersion measurements given light and mass models
The class supports any mass and light distribution (and superposition thereof) that has a 3d correspondance in their
2d lens model distribution. For models that do not have this correspondence, you may want to apply a
Multi-Gaussian Expansion (MGE) on their models and use the MGE to be de-projected to 3d.
The computation follows Mamon&Lokas 2005.
The class supports various types of anisotropy models (see Anisotropy class).
Solving the Jeans Equation requires a numerical integral over the 3d light and mass profile (see Mamon&Lokas 2005).
This class (as well as the dedicated LightModel and MassModel classes) perform those integral numerically with an
interpolated grid.
The cosmology assumed to compute the physical mass and distances are set via the kwargs_cosmo keyword arguments.
d_d: Angular diameter distance to the deflector (in Mpc)
d_s: Angular diameter distance to the source (in Mpc)
d_ds: Angular diameter distance from the deflector to the source (in Mpc)
The numerical options can be chosen through the kwargs_numerics keywords
interpol_grid_num: number of interpolation points in the light and mass profile (radially). This number should
be chosen high enough to accurately describe the true light profile underneath.
log_integration: bool, if True, performs the interpolation and numerical integration in log-scale.
max_integrate: maximum 3d radius to where the numerical integration of the Jeans Equation solver is made.
This value should be large enough to contain most of the light and to lead to a converged result.
min_integrate: minimal integration value. This value should be very close to zero but some mass and light
profiles are diverging and a numerically stable value should be chosen.
These numerical options should be chosen to allow for a converged result (within your tolerance) but not too
conservative to impact too much the computational cost. Reasonable values might depend on the specific problem.
"""
def __init__(self, kwargs_model, kwargs_cosmo, kwargs_numerics=None, analytic_kinematics=False):
"""
:param kwargs_model: keyword arguments describing the model components
:param kwargs_cosmo: keyword arguments that define the cosmology in terms of the angular diameter distances involved
:param kwargs_numerics: numerics keyword arguments
:param analytic_kinematics: bool, if True uses the analytic kinematic model
"""
if kwargs_numerics is None:
kwargs_numerics = {'interpol_grid_num': 200, # numerical interpolation, should converge -> infinity
'log_integration': True,
# log or linear interpolation of surface brightness and mass models
'max_integrate': 100,
'min_integrate': 0.001} # lower/upper bound of numerical integrals
if analytic_kinematics is True:
anisotropy_model = kwargs_model.get('anisotropy_model')
if not anisotropy_model == 'OM':
raise ValueError('analytic kinematics only available for OsipkovMerritt ("OM") anisotropy model.')
self.numerics = AnalyticKinematics(kwargs_cosmo=kwargs_cosmo, **kwargs_numerics)
else:
self.numerics = NumericKinematics(kwargs_model=kwargs_model, kwargs_cosmo=kwargs_cosmo, **kwargs_numerics)
self._analytic_kinematics = analytic_kinematics
def check_df(self, r, kwargs_mass, kwargs_light, kwargs_anisotropy):
"""
checks whether the phase space distribution function of a given anisotropy model is positive.
Currently this is implemented by the relation provided by Ciotti and Morganti 2010 equation (10)
https://arxiv.org/pdf/1006.2344.pdf
:param r: 3d radius to check slope-anisotropy constraint
:param theta_E: Einstein radius in arc seconds
:param gamma: power-law slope
:param a_ani: scaled transition radius of the OM anisotropy distribution
:param r_eff: half-light radius in arc seconds
:return: equation (10) >= 0 for physical interpretation
"""
dr = 0.01 # finite differential in radial direction
r_dr = r + dr
sigmar2 = self.numerics.sigma_r2(r, kwargs_mass, kwargs_light, kwargs_anisotropy)
sigmar2_dr = self.numerics.sigma_r2(r_dr, kwargs_mass, kwargs_light, kwargs_anisotropy)
grav_pot = self.numerics.grav_potential(r, kwargs_mass)
grav_pot_dr = self.numerics.grav_potential(r_dr, kwargs_mass)
self.numerics.delete_cache()
return r * (sigmar2_dr - sigmar2 - grav_pot + grav_pot_dr) / dr
|
test/test_jit_disabled.py
|
xiaohanhuang/pytorch
| 183 |
136402
|
# Owner(s): ["oncall: jit"]
import sys
import os
import contextlib
import subprocess
from torch.testing._internal.common_utils import TestCase, run_tests, TemporaryFileName
@contextlib.contextmanager
def _jit_disabled():
cur_env = os.environ.get("PYTORCH_JIT", "1")
os.environ["PYTORCH_JIT"] = "0"
try:
yield
finally:
os.environ["PYTORCH_JIT"] = cur_env
class TestJitDisabled(TestCase):
"""
These tests are separate from the rest of the JIT tests because we need
run a new subprocess and `import torch` with the correct environment
variables set.
"""
def compare_enabled_disabled(self, src):
"""
Runs the script in `src` with PYTORCH_JIT enabled and disabled and
compares their stdout for equality.
"""
# Write `src` out to a temporary so our source inspection logic works
# correctly.
with TemporaryFileName() as fname:
with open(fname, 'w') as f:
f.write(src)
with _jit_disabled():
out_disabled = subprocess.check_output([
sys.executable,
fname])
out_enabled = subprocess.check_output([
sys.executable,
fname])
self.assertEqual(out_disabled, out_enabled)
def test_attribute(self):
_program_string = """
import torch
class Foo(torch.jit.ScriptModule):
def __init__(self, x):
super(Foo, self).__init__()
self.x = torch.jit.Attribute(x, torch.Tensor)
def forward(self, input):
return input
s = Foo(torch.ones(2, 3))
print(s.x)
"""
self.compare_enabled_disabled(_program_string)
def test_script_module_construction(self):
_program_string = """
import torch
class AModule(torch.jit.ScriptModule):
def __init__(self):
super(AModule, self).__init__()
@torch.jit.script_method
def forward(self, input):
pass
AModule()
print("Didn't throw exception")
"""
self.compare_enabled_disabled(_program_string)
def test_recursive_script(self):
_program_string = """
import torch
class AModule(torch.nn.Module):
def __init__(self):
super(AModule, self).__init__()
def forward(self, input):
pass
sm = torch.jit.script(AModule())
print("Didn't throw exception")
"""
self.compare_enabled_disabled(_program_string)
if __name__ == '__main__':
run_tests()
|
panel/depends.py
|
sthagen/holoviz-panel
| 601 |
136426
|
<gh_stars>100-1000
import param
from param.parameterized import iscoroutinefunction
from .widgets import Widget
ipywidget_classes = {}
def param_value_if_widget(arg):
if isinstance(arg, Widget):
return arg.param.value
from .pane.ipywidget import IPyWidget
if IPyWidget.applies(arg) and hasattr(arg, 'value'):
name = type(arg).__name__
if name in ipywidget_classes:
ipy_param = ipywidget_classes[name]
else:
ipy_param = param.parameterized_class(name, {'value': param.Parameter()})
ipywidget_classes[name] = ipy_param
ipy_inst = ipy_param(value=arg.value)
arg.observe(lambda event: ipy_inst.param.update(value=event['new']), 'value')
return ipy_inst.param.value
return arg
def depends(*args, **kwargs):
"""
Python decorator annotating a function or `Parameterized` method to
express its dependencies on a set of Parameters.
Returns a "reactive" function that binds (some of) its arguments to
Parameter values. This means that the "reactive" function can
(or will if `watch=True`) be automatically invoked whenever the underlying
parameter values change.
See also `pn.bind`.
Reference: https://panel.holoviz.org/user_guide/APIs.html#reactive-functions
:Example:
>>> widget = pn.widgets.IntSlider(value=1, start=1, end=5)
>>> @pn.depends(a=widget)
... def add(a,b=1):
... return a+b
>>> pn.Column(widget, add)
This function is the same as the corresponding `param.depends`
decorator, but extended so that if widgets are provided as
dependencies, the underlying `value` Parameter of the widget is
extracted as the actual dependency.
This extension is solely for syntactic convenience, allowing the widget to
be passed in as a synonym for the underlying parameter. Apart from that
extension, this decorator otherwise behaves the same as the underlying
Param depends decorator.
For the Panel version of the decorator, the specified dependencies
can either be Parameter instances, Panel or ipywidgets widgets,
or, if a Parameterized method is supplied rather than a function,
they can be defined either as string names of Parameters of this
object or as Parameter objects of this object's subobjects (i.e.,
Parameterized objects that are values of this object's
Parameters). See the docs for the corresponding param.depends
decorator for further details.
"""
updated_args = [param_value_if_widget(a) for a in args]
updated_kwargs = {k: param_value_if_widget(v) for k, v in kwargs.items()}
return param.depends(*updated_args, **updated_kwargs)
def bind(function, *args, watch=False, **kwargs):
"""
Returns a "reactive" function that binds (some of) its arguments to
Parameter values. This means that the "reactive" function can
(or will if `watch=True`) be automatically invoked whenever the underlying
parameter values change.
Reference: https://panel.holoviz.org/user_guide/APIs.html#reactive-functions
:Example:
>>> def add(a,b):
... return a+b
>>> widget = pn.widgets.IntSlider(value=1, start=1, end=5)
>>> iadd = pn.bind(add, a=widget, b=1)
>>> pn.Column(widget, iadd)
This function is the same as `param.bind`, but extended so that if
widgets are provided as values, the underlying `value` Parameter
of the widget is extracted as the actual argument value and
dependency. This extension is solely for syntactic convenience,
allowing the widget to be passed in as a synonym for the
underlying parameter. Apart from that extension, this function
otherwise behaves the same as the corresponding Param function.
This function allows dynamically recomputing the output of the
provided function whenever one of the bound parameters
changes. For Panel, the parameters are typically values of
widgets, making it simple to have output that reacts to changes in
the widgets. Arguments an also be bound to other parameters (not
part of widgets) or even to constants.
Arguments
---------
function: callable
The function to bind constant or dynamic args and kwargs to.
args: object, param.Parameter, panel.widget.Widget, or ipywidget
Positional arguments to bind to the function.
watch: boolean
Whether to evaluate the function automatically whenever one of
the bound parameters changes.
kwargs: object, param.Parameter, panel.widget.Widget, or ipywidget
Keyword arguments to bind to the function.
Returns
-------
Returns a new function with the args and kwargs bound to it and
annotated with all dependencies.
"""
updated_args = [param_value_if_widget(a) for a in args]
updated_kwargs = {k: param_value_if_widget(v) for k, v in kwargs.items()}
return _param_bind(function, *updated_args, watch=watch, **updated_kwargs)
# Temporary; to move to Param
def _param_bind(function, *args, watch=False, **kwargs):
"""
Given a function, returns a wrapper function that binds the values
of some or all arguments to Parameter values and expresses Param
dependencies on those values, so that the function can be invoked
whenever the underlying values change and the output will reflect
those updated values.
As for functools.partial, arguments can also be bound to constants,
which allows all of the arguments to be bound, leaving a simple
callable object.
Arguments
---------
function: callable
The function to bind constant or dynamic args and kwargs to.
args: object, param.Parameter
Positional arguments to bind to the function.
watch: boolean
Whether to evaluate the function automatically whenever one of
the bound parameters changes.
kwargs: object, param.Parameter
Keyword arguments to bind to the function.
Returns
-------
Returns a new function with the args and kwargs bound to it and
annotated with all dependencies.
"""
dependencies = {}
for i, arg in enumerate(args):
p = param_value_if_widget(arg)
if hasattr(p, '_dinfo'):
for j, arg in enumerate(p._dinfo['dependencies']):
dependencies[f'__arg{i}_arg{j}'] = arg
for kw, kwarg in p._dinfo['kw'].items():
dependencies[f'__arg{i}_arg_{kw}'] = kwarg
elif isinstance(p, param.Parameter):
dependencies[f'__arg{i}'] = p
for kw, v in kwargs.items():
p = param_value_if_widget(v)
if hasattr(p, '_dinfo'):
for j, arg in enumerate(p._dinfo['dependencies']):
dependencies[f'__kwarg_{kw}_arg{j}'] = arg
for pkw, kwarg in p._dinfo['kw'].items():
dependencies[f'__kwarg_{kw}_{pkw}'] = kwarg
elif isinstance(p, param.Parameter):
dependencies[kw] = p
def combine_arguments(wargs, wkwargs):
combined_args = []
for arg in args:
if hasattr(arg, '_dinfo'):
arg = arg()
elif isinstance(arg, param.Parameter):
arg = getattr(arg.owner, arg.name)
combined_args.append(arg)
combined_args += list(wargs)
combined_kwargs = {}
for kw, arg in kwargs.items():
if hasattr(arg, '_dinfo'):
arg = arg()
elif isinstance(arg, param.Parameter):
arg = getattr(arg.owner, arg.name)
combined_kwargs[kw] = arg
for kw, arg in wkwargs.items():
if kw.startswith('__arg') or kw.startswith('__kwarg'):
continue
combined_kwargs[kw] = arg
return combined_args, combined_kwargs
if iscoroutinefunction(function):
@depends(**dependencies, watch=watch)
async def wrapped(*wargs, **wkwargs):
combined_args, combined_kwargs = combine_arguments(wargs, wkwargs)
return await function(*combined_args, **combined_kwargs)
else:
@depends(**dependencies, watch=watch)
def wrapped(*wargs, **wkwargs):
combined_args, combined_kwargs = combine_arguments(wargs, wkwargs)
return function(*combined_args, **combined_kwargs)
wrapped.__bound_function__ = function
return wrapped
__all__ = ["bind", "depends"]
|
mordred/PBF.py
|
ademidun/mordred
| 199 |
136429
|
from rdkit.Chem.rdMolDescriptors import CalcPBF
from ._base import Descriptor
__all__ = ("PBF",)
class PBF(Descriptor):
r"""PBF descriptor."""
__slots__ = ()
since = "1.1.2"
require_3D = True
@classmethod
def preset(cls, version):
yield cls()
def description(self):
return self.__class__.__name__
def __str__(self):
return self.__class__.__name__
def parameters(self):
return ()
def calculate(self):
return CalcPBF(self.get_3D_mol())
rtype = float
|
fastreid/data/datasets/AirportALERT.py
|
NTU-ROSE/fast-reid
| 2,194 |
136433
|
<reponame>NTU-ROSE/fast-reid
# encoding: utf-8
"""
@author: <NAME>
@contact: <EMAIL>
"""
import os
from fastreid.data.datasets import DATASET_REGISTRY
from fastreid.data.datasets.bases import ImageDataset
__all__ = ['AirportALERT', ]
@DATASET_REGISTRY.register()
class AirportALERT(ImageDataset):
"""Airport
"""
dataset_dir = "AirportALERT"
dataset_name = "airport"
def __init__(self, root='datasets', **kwargs):
self.root = root
self.train_path = os.path.join(self.root, self.dataset_dir)
self.train_file = os.path.join(self.root, self.dataset_dir, 'filepath.txt')
required_files = [self.train_file, self.train_path]
self.check_before_run(required_files)
train = self.process_train(self.train_path, self.train_file)
super().__init__(train, [], [], **kwargs)
def process_train(self, dir_path, train_file):
data = []
with open(train_file, "r") as f:
img_paths = [line.strip('\n') for line in f.readlines()]
for path in img_paths:
split_path = path.split('\\')
img_path = '/'.join(split_path)
camid = self.dataset_name + "_" + split_path[0]
pid = self.dataset_name + "_" + split_path[1]
img_path = os.path.join(dir_path, img_path)
# if 11001 <= int(split_path[1]) <= 401999:
if 11001 <= int(split_path[1]):
data.append([img_path, pid, camid])
return data
|
tests/test_uy.py
|
smithdc1/django-localflavor
| 619 |
136448
|
<filename>tests/test_uy.py
from django.test import SimpleTestCase
from localflavor.uy.forms import UYCIField, UYDepartmentSelect
from localflavor.uy.util import get_validation_digit
class UYLocalFlavorTests(SimpleTestCase):
def test_UYDepartmentSelect(self):
f = UYDepartmentSelect()
out = '''<select name="departamentos">
<option value="G">Artigas</option>
<option value="A">Canelones</option>
<option value="E">Cerro Largo</option>
<option value="L">Colonia</option>
<option value="Q">Durazno</option>
<option value="N">Flores</option>
<option value="O">Florida</option>
<option value="P">Lavalleja</option>
<option value="B">Maldonado</option>
<option value="S" selected="selected">Montevideo</option>
<option value="I">Paysand\xfa</option>
<option value="J">R\xedo Negro</option>
<option value="F">Rivera</option>
<option value="C">Rocha</option>
<option value="H">Salto</option>
<option value="M">San Jos\xe9</option>
<option value="K">Soriano</option>
<option value="R">Tacuaremb\xf3</option>
<option value="D">Treinta y Tres</option>
</select>'''
self.assertHTMLEqual(f.render('departamentos', 'S'), out)
def test_UYCIField(self):
valid = {
'4098053': '4098053',
'409805-3': '409805-3',
'409.805-3': '409.805-3',
'10054112': '10054112',
'1005411-2': '1005411-2',
'1.005.411-2': '1.005.411-2',
}
invalid = {
'foo': ['Enter a valid CI number in X.XXX.XXX-X,XXXXXXX-X or XXXXXXXX format.'],
'409805-2': ['Enter a valid CI number.'],
'1.005.411-5': ['Enter a valid CI number.'],
}
self.assertFieldOutput(UYCIField, valid, invalid)
self.assertEqual(get_validation_digit(409805), 3)
self.assertEqual(get_validation_digit(1005411), 2)
|
tests/test_guardduty/test_guardduty.py
|
gtourkas/moto
| 5,460 |
136455
|
import boto3
import sure # noqa # pylint: disable=unused-import
from moto import mock_guardduty
@mock_guardduty
def test_create_detector():
client = boto3.client("guardduty", region_name="us-east-1")
response = client.create_detector(
Enable=True,
ClientToken="745645734574758463758",
FindingPublishingFrequency="ONE_HOUR",
DataSources={"S3Logs": {"Enable": True}},
Tags={},
)
response.should.have.key("DetectorId")
response["DetectorId"].shouldnt.equal(None)
@mock_guardduty
def test_create_detector_with_minimal_params():
client = boto3.client("guardduty", region_name="us-east-1")
response = client.create_detector(Enable=True)
response.should.have.key("DetectorId")
response["DetectorId"].shouldnt.equal(None)
@mock_guardduty
def test_list_detectors_initial():
client = boto3.client("guardduty", region_name="us-east-1")
response = client.list_detectors()
response.should.have.key("DetectorIds").equals([])
@mock_guardduty
def test_list_detectors():
client = boto3.client("guardduty", region_name="us-east-1")
d1 = client.create_detector(
Enable=True,
ClientToken="745645734574758463758",
FindingPublishingFrequency="ONE_HOUR",
DataSources={"S3Logs": {"Enable": True}},
Tags={},
)["DetectorId"]
d2 = client.create_detector(Enable=False,)["DetectorId"]
response = client.list_detectors()
response.should.have.key("DetectorIds")
set(response["DetectorIds"]).should.equal({d1, d2})
|
geoq/training/models.py
|
kaydoh/geoq
| 471 |
136477
|
<gh_stars>100-1000
# This technical data was produced for the U. S. Government under Contract No. W15P7T-13-C-F600, and
# is subject to the Rights in Technical Data-Noncommercial Items clause at DFARS 252.227-7013 (FEB 2012)
from django.db import models
from django.contrib.auth.models import User
from jsonfield import JSONField
class Training(models.Model):
name = models.CharField(max_length=250)
category = models.CharField(max_length=120, help_text="Category of training, eg. FEMA", null=True, blank=True, default="Uncategorized")
primary_contact = models.ForeignKey(User, help_text="Contact for training.",
on_delete=models.PROTECT)
gamification_signals = models.CharField(max_length=250, help_text="After training which Signals should be sent to gamification server?", null=True, blank=True)
content_link = models.CharField(max_length=500, help_text="Link to PDF/PPT/training or web page for training that will open in a new window", null=True, blank=True)
quiz_data = JSONField(help_text="If user should be quized after, list of questions and answers and percent_complete if not 100%", null=True, blank=True)
users_completed = models.ManyToManyField(User, blank=True, related_name="users_completed", help_text='Users that completed this training.')
description = models.TextField(null=True, blank=True, help_text="Details to show potential student.")
updated_at = models.DateTimeField(auto_now=True, help_text="Last updated time/date")
private = models.BooleanField(default=False, help_text="Check to hide in public list")
def __str__(self):
return "%s, %s" % (self.name, self.category)
|
recipe_scrapers/farmhousedelivery.py
|
mathiazom/recipe-scrapers
| 811 |
136479
|
import re
from bs4 import Tag
from ._abstract import AbstractScraper
from ._utils import normalize_string
"""
NOTE: This website has at least 2 prominent layouts styles, so there are two logic blocks and 2 test cases to
support in ingredients and instructions processing sections.
"""
class FarmhouseDelivery(AbstractScraper):
@classmethod
def host(self, domain="com"):
return f"recipes.farmhousedelivery.{domain}"
def title(self):
return self.soup.find("h1", {"class": "entry-title"}).get_text(strip=True)
def ingredients(self):
# Style 1
ingredients_marker = self.soup.find("p", text=re.compile(r"Ingredients:"))
if ingredients_marker is not None:
ingredients_marker_siblings = ingredients_marker.next_siblings
for ingredients_marker_sibling in ingredients_marker_siblings:
if (
isinstance(ingredients_marker_sibling, Tag)
and ingredients_marker_sibling.name == "ul"
):
ingredients = ingredients_marker_sibling.findAll("li")
return [
normalize_string(ingredient.get_text())
for ingredient in ingredients
]
# Style 2
ingredients_marker = self.soup.find("p", text=re.compile(r"Ingredients"))
if ingredients_marker is not None:
ingredients = []
ingredients_marker_siblings = ingredients_marker.next_siblings
for ingredients_marker_sibling in ingredients_marker_siblings:
if (
isinstance(ingredients_marker_sibling, Tag)
and ingredients_marker_sibling.name == "p"
):
if ingredients_marker_sibling.get_text() == "Instructions":
break
else:
ingredients.append(
normalize_string(ingredients_marker_sibling.get_text())
)
return ingredients
return None
def _instructions_list(self):
# Style 1
instructions_marker = self.soup.find("p", text=re.compile(r"Instructions:"))
if instructions_marker is not None:
instructions_marker_siblings = instructions_marker.next_siblings
for instructions_marker_sibling in instructions_marker_siblings:
if (
isinstance(instructions_marker_sibling, Tag)
and instructions_marker_sibling.name == "p"
and instructions_marker_sibling.get_text(strip=True) != ""
):
instructions = instructions_marker_sibling.findAll("span")
return [
normalize_string(instruction.get_text())
for instruction in instructions
]
# Style 2
instructions_marker = self.soup.find("p", text=re.compile(r"Instructions"))
if instructions_marker is not None:
instructions = []
instructions_marker_siblings = instructions_marker.next_siblings
for instructions_marker_sibling in instructions_marker_siblings:
if (
isinstance(instructions_marker_sibling, Tag)
and instructions_marker_sibling.name == "p"
and instructions_marker_sibling.get_text(strip=True) != ""
):
instructions.append(
normalize_string(instructions_marker_sibling.get_text())
)
return instructions
return None
def instructions(self):
data = self._instructions_list()
return "\n".join(data) if data else None
def image(self):
container = self.soup.find("div", {"class": "entry-content"})
if not container:
return None
image = container.find("img", {"src": True})
return image["src"] if image else None
|
tests/test_data_tokenizers.py
|
leezu/gluon-nlp
| 2,461 |
136484
|
import pytest
import random
import collections
import pickle
from uuid import uuid4
import os
import unicodedata
import tempfile
from pkg_resources import parse_version
import gluonnlp
from gluonnlp.data.tokenizers import WhitespaceTokenizer, MosesTokenizer, JiebaTokenizer,\
SpacyTokenizer, SubwordNMTTokenizer, YTTMTokenizer, SentencepieceTokenizer, \
HuggingFaceBPETokenizer, HuggingFaceByteBPETokenizer, HuggingFaceWordPieceTokenizer, \
HuggingFaceTokenizer
from gluonnlp.base import get_repo_url
from gluonnlp.data import Vocab, load_vocab
from gluonnlp.utils.misc import download
from gluonnlp.models.t5 import T5Tokenizer
EN_SAMPLES = ['Four score and seven years ago our fathers brought forth on this continent, '
'a new nation, conceived in Liberty, and dedicated to the proposition '
'that all men are created equal.',
'In spite of the debate going on for months about the photos of Özil with the '
'Turkish President Recep <NAME>, he regrets the return of '
'the 92-match national player Özil.']
DE_SAMPLES = ['Goethe stammte aus einer angesehenen bürgerlichen Familie; sein Großvater'
' mütterlicherseits war als Stadtschultheiß höchster Justizbeamter der'
' Stadt Frankfurt, sein Vater Doktor der Rechte und kaiserlicher Rat.',
'"Das ist eine Frage, die natürlich davon abhängt, dass man einmal ins '
'Gespräch kommt, dass man mit ihm auch darüber spricht, warum er das eine '
'oder andere offenbar so empfunden hat, wie das in seinem Statement niedergelegt'
' ist", sagte Grindel im Fußball-Podcast "Phrasenmäher" der "Bild-Zeitung.']
ZH_SAMPLES = ['苟活者在淡红的血色中,会依稀看见微茫的希望;真的猛士,将更奋然而前行。',
'参加工作,哈尔滨工业大学无线电工程系电子仪器及测量技术专业毕业。']
SUBWORD_TEST_SAMPLES = ["Hello, y'all! How are you Ⅷ 😁 😁 😁 ?",
'GluonNLP is great!!!!!!',
"GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@# 'abc'"]
def random_inject_space(sentence):
words = sentence.split()
ret = ''
for i, word in enumerate(words):
ret += word
if i < len(words) - 1:
n_space_tokens = random.randint(1, 10)
for j in range(n_space_tokens):
ret += random.choice([' ', '\t', '\r', '\n'])
return ret
def verify_encode_token_with_offsets(tokenizer, all_sentences, gt_offsets=None):
if gt_offsets is None:
for sentences in [all_sentences[0], all_sentences]:
enc_tokens = tokenizer.encode(sentences, str)
tokens, offsets = tokenizer.encode_with_offsets(sentences, str)
if isinstance(sentences, list):
for ele_tokens, ele_enc_tokens, ele_offsets, ele_sentence in\
zip(tokens, enc_tokens, offsets, sentences):
for tok, offset, enc_tok in zip(ele_tokens, ele_offsets, ele_enc_tokens):
assert ele_sentence[offset[0]:offset[1]] == tok
assert tok == enc_tok
else:
for tok, offset, enc_tok in zip(tokens, offsets, enc_tokens):
assert sentences[offset[0]:offset[1]] == tok
assert tok == enc_tok
else:
for sentences, ele_gt_offsets in [(all_sentences[0], gt_offsets[0]),
(all_sentences, gt_offsets)]:
enc_tokens = tokenizer.encode(sentences, str)
tokens, offsets = tokenizer.encode_with_offsets(sentences, str)
assert ele_gt_offsets == offsets
assert enc_tokens == tokens
def verify_sentencepiece_tokenizer_with_offsets(tokenizer, all_sentences):
for sentences in [all_sentences[0], all_sentences]:
enc_tokens = tokenizer.encode(sentences, str)
tokens, offsets = tokenizer.encode_with_offsets(sentences, str)
if isinstance(sentences, list):
for ele_tokens, ele_enc_tokens, ele_offsets, ele_sentence\
in zip(tokens, enc_tokens, offsets, sentences):
for i, (tok, offset, enc_tok) in enumerate(zip(ele_tokens, ele_offsets,
ele_enc_tokens)):
assert tok == enc_tok
ele_sel_tok = unicodedata.normalize('NFKC',
ele_sentence[offset[0]:offset[1]]).strip()
if tokenizer.is_first_subword(tok):
real_tok = tok[1:]
else:
real_tok = tok
assert ele_sel_tok == real_tok,\
'ele_sel_tok={}, real_tok={}'.format(ele_sel_tok, real_tok)
def verify_encode_with_offsets_consistency(tokenizer, all_sentences):
for sentences in [all_sentences[0], all_sentences]:
enc_tokens = tokenizer.encode(sentences, int)
tokens, offsets = tokenizer.encode_with_offsets(sentences, int)
str_tokens, str_offsets = tokenizer.encode_with_offsets(sentences, str)
assert offsets == str_offsets
assert tokens == enc_tokens
def verify_encode_token(tokenizer, all_sentences, all_gt_tokens):
for sentences, gt_tokens in [(all_sentences[0], all_gt_tokens[0]),
(all_sentences, all_gt_tokens)]:
tokenizer_encode_ret = tokenizer.encode(sentences)
assert tokenizer_encode_ret == gt_tokens,\
'Whole Encoded: {}, \nWhole GT: {}'.format(tokenizer_encode_ret, gt_tokens)
def verify_decode(tokenizer, all_sentences, out_type=str):
for sentences in [all_sentences[0], all_sentences]:
assert tokenizer.decode(tokenizer.encode(sentences, out_type)) == sentences
def verify_decode_spm(tokenizer, all_sentences, gt_int_decode_sentences):
for sentences, case_gt_int_decode in [(all_sentences[0], gt_int_decode_sentences[0]),
(all_sentences, gt_int_decode_sentences)]:
if isinstance(sentences, str):
gt_str_decode_sentences = sentences
if tokenizer.lowercase:
gt_str_decode_sentences = gt_str_decode_sentences.lower()
gt_str_decode_sentences = unicodedata.normalize('NFKC', gt_str_decode_sentences)
elif isinstance(sentences, list):
gt_str_decode_sentences = []
for ele in sentences:
ele_gt_decode = ele
if tokenizer.lowercase:
ele_gt_decode = ele_gt_decode.lower()
ele_gt_decode = unicodedata.normalize('NFKC', ele_gt_decode)
gt_str_decode_sentences.append(ele_gt_decode)
else:
raise NotImplementedError
assert tokenizer.decode(tokenizer.encode(sentences, str)) == gt_str_decode_sentences
assert tokenizer.decode(tokenizer.encode(sentences, int)) == case_gt_int_decode
def verify_decode_subword_nmt(tokenizer, all_sentences, gt_int_decode, gt_str_decode):
for sentences, case_gt_int_decode, case_gt_str_decode in [(all_sentences[0], gt_int_decode[0], gt_str_decode[0]),
(all_sentences, gt_int_decode, gt_str_decode)]:
assert tokenizer.decode(tokenizer.encode(sentences, str)) == case_gt_str_decode
assert tokenizer.decode(tokenizer.encode(sentences, int)) == case_gt_int_decode
def verify_decode_hf(tokenizer, all_sentences, gt_decode_sentences):
for sentences, case_gt_decode in [(all_sentences[0], gt_decode_sentences[0]),
(all_sentences, gt_decode_sentences)]:
assert tokenizer.decode(tokenizer.encode(sentences, str)) == case_gt_decode
assert tokenizer.decode(tokenizer.encode(sentences, int)) == case_gt_decode
if isinstance(sentences, list):
for sentence in sentences:
assert tokenizer.vocab.to_tokens(tokenizer.encode(sentence, int))\
== tokenizer.encode(sentence, str)
assert tokenizer.vocab[tokenizer.encode(sentence, str)]\
== tokenizer.encode(sentence, int)
else:
assert tokenizer.vocab.to_tokens(tokenizer.encode(sentences, int)) \
== tokenizer.encode(sentences, str)
assert tokenizer.vocab[tokenizer.encode(sentences, str)] \
== tokenizer.encode(sentences, int)
def verify_decode_no_vocab_raise(tokenizer):
# When the vocab is not attached, should raise ValueError
for sentences in [EN_SAMPLES[0], EN_SAMPLES]:
with pytest.raises(ValueError):
tokenizer.encode(sentences, int)
with pytest.raises(ValueError):
tokenizer.decode([0])
with pytest.raises(ValueError):
tokenizer.decode([[0], [1]])
def verify_pickleble(tokenizer, cls):
print(tokenizer)
# Verify if the tokenizer is pickleable and has the same behavior after dumping/loading
tokenizer_p = pickle.loads(pickle.dumps(tokenizer))
assert isinstance(tokenizer_p, cls)
assert tokenizer.encode(SUBWORD_TEST_SAMPLES, str) == tokenizer_p.encode(SUBWORD_TEST_SAMPLES, str)
def test_whitespace_tokenizer():
tokenizer = WhitespaceTokenizer()
gt_en_tokenized = [['Four', 'score', 'and', 'seven', 'years', 'ago', 'our', 'fathers', 'brought',
'forth', 'on', 'this', 'continent,', 'a', 'new', 'nation,', 'conceived',
'in', 'Liberty,', 'and', 'dedicated', 'to', 'the', 'proposition', 'that',
'all', 'men', 'are', 'created', 'equal.'],
['In', 'spite', 'of', 'the', 'debate', 'going', 'on', 'for', 'months',
'about', 'the', 'photos', 'of', 'Özil', 'with', 'the', 'Turkish',
'President', 'Recep', 'Tayyip', 'Erdogan,', 'he', 'regrets', 'the',
'return', 'of', 'the', '92-match', 'national', 'player', 'Özil.']]
gt_de_tokenized = [['Goethe', 'stammte', 'aus', 'einer', 'angesehenen', 'bürgerlichen',
'Familie;', 'sein', 'Großvater', 'mütterlicherseits', 'war', 'als',
'Stadtschultheiß', 'höchster', 'Justizbeamter', 'der', 'Stadt',
'Frankfurt,', 'sein', 'Vater', 'Doktor', 'der', 'Rechte', 'und',
'kaiserlicher', 'Rat.'],
['"Das', 'ist', 'eine', 'Frage,', 'die', 'natürlich', 'davon', 'abhängt,',
'dass', 'man', 'einmal', 'ins', 'Gespräch', 'kommt,', 'dass', 'man', 'mit',
'ihm', 'auch', 'darüber', 'spricht,', 'warum', 'er', 'das', 'eine', 'oder',
'andere', 'offenbar', 'so', 'empfunden', 'hat,', 'wie', 'das', 'in',
'seinem', 'Statement', 'niedergelegt', 'ist",', 'sagte', 'Grindel', 'im',
'Fußball-Podcast', '"Phrasenmäher"', 'der', '"Bild-Zeitung.']]
for _ in range(2):
# Inject noise and test for encode
noisy_en_samples = [random_inject_space(ele) for ele in EN_SAMPLES]
noisy_de_samples = [random_inject_space(ele) for ele in DE_SAMPLES]
verify_encode_token(tokenizer, noisy_en_samples + noisy_de_samples,
gt_en_tokenized + gt_de_tokenized)
# Test for decode
verify_decode(tokenizer, EN_SAMPLES + DE_SAMPLES, str)
# Test for encode_with_offsets
verify_encode_token_with_offsets(tokenizer, noisy_en_samples + noisy_de_samples)
verify_decode_no_vocab_raise(tokenizer)
# Test for output_type = int
vocab = Vocab(collections.Counter(sum(gt_en_tokenized + gt_de_tokenized,
[])))
tokenizer.set_vocab(vocab)
verify_decode(tokenizer, EN_SAMPLES + DE_SAMPLES, int)
verify_pickleble(tokenizer, WhitespaceTokenizer)
verify_encode_token_with_offsets(tokenizer, EN_SAMPLES + DE_SAMPLES)
def test_moses_tokenizer():
en_tokenizer = MosesTokenizer('en')
de_tokenizer = MosesTokenizer('de')
gt_en_tokenized = [['Four', 'score', 'and', 'seven', 'years', 'ago', 'our', 'fathers',
'brought', 'forth', 'on', 'this', 'continent', ',', 'a', 'new', 'nation',
',', 'conceived', 'in', 'Liberty', ',', 'and', 'dedicated', 'to', 'the',
'proposition', 'that', 'all', 'men', 'are', 'created', 'equal', '.'],
['In', 'spite', 'of', 'the', 'debate', 'going', 'on', 'for', 'months',
'about', 'the', 'photos', 'of', 'Özil', 'with', 'the', 'Turkish',
'President', 'Recep', 'Tayyip', 'Erdogan', ',', 'he', 'regrets', 'the',
'return', 'of', 'the', '92-match', 'national', 'player', 'Özil', '.']]
gt_de_tokenized = [['Goethe', 'stammte', 'aus', 'einer', 'angesehenen', 'bürgerlichen',
'Familie', ';', 'sein', 'Großvater', 'mütterlicherseits', 'war', 'als',
'Stadtschultheiß', 'höchster', 'Justizbeamter', 'der', 'Stadt',
'Frankfurt', ',', 'sein', 'Vater', 'Doktor', 'der', 'Rechte', 'und',
'kaiserlicher', 'Rat', '.'],
['"', 'Das', 'ist', 'eine', 'Frage', ',', 'die', 'natürlich', 'davon',
'abhängt', ',', 'dass', 'man', 'einmal', 'ins', 'Gespräch', 'kommt', ',',
'dass', 'man', 'mit', 'ihm', 'auch', 'darüber', 'spricht', ',', 'warum',
'er', 'das', 'eine', 'oder', 'andere', 'offenbar', 'so', 'empfunden',
'hat', ',', 'wie', 'das', 'in', 'seinem', 'Statement', 'niedergelegt',
'ist', '"', ',', 'sagte', 'Grindel', 'im', 'Fußball-Podcast',
'"', 'Phrasenmäher', '"', 'der', '"', 'Bild-Zeitung', '.']]
verify_encode_token(en_tokenizer, EN_SAMPLES, gt_en_tokenized)
verify_encode_token(de_tokenizer, DE_SAMPLES, gt_de_tokenized)
verify_decode(en_tokenizer, EN_SAMPLES, str)
verify_decode(de_tokenizer, DE_SAMPLES, str)
vocab = Vocab(collections.Counter(sum(gt_en_tokenized + gt_de_tokenized, [])))
verify_decode_no_vocab_raise(en_tokenizer)
verify_decode_no_vocab_raise(de_tokenizer)
en_tokenizer.set_vocab(vocab)
de_tokenizer.set_vocab(vocab)
verify_decode(en_tokenizer, EN_SAMPLES, int)
verify_decode(de_tokenizer, DE_SAMPLES, int)
verify_pickleble(en_tokenizer, MosesTokenizer)
verify_pickleble(de_tokenizer, MosesTokenizer)
def test_jieba_tokenizer():
tokenizer = JiebaTokenizer()
gt_zh_tokenized = [['苟活', '者', '在', '淡红', '的', '血色', '中', ',',
'会', '依稀', '看见', '微茫', '的', '希望', ';', '真的',
'猛士', ',', '将', '更奋', '然而', '前行', '。'],
['参加', '工作', ',', '哈尔滨工业大学', '无线电', '工程系', '电子仪器',
'及', '测量', '技术', '专业', '毕业', '。']]
verify_encode_token(tokenizer, ZH_SAMPLES, gt_zh_tokenized)
verify_decode(tokenizer, ZH_SAMPLES, str)
vocab = Vocab(collections.Counter(sum(gt_zh_tokenized, [])))
verify_decode_no_vocab_raise(tokenizer)
tokenizer.set_vocab(vocab)
verify_decode(tokenizer, ZH_SAMPLES, int)
verify_pickleble(tokenizer, JiebaTokenizer)
def test_spacy_tokenizer():
en_tokenizer = SpacyTokenizer('en')
de_tokenizer = SpacyTokenizer('de')
gt_en_tokenized = [['Four', 'score', 'and', 'seven', 'years', 'ago', 'our', 'fathers',
'brought', 'forth', 'on', 'this', 'continent', ',', 'a', 'new', 'nation',
',', 'conceived', 'in', 'Liberty', ',', 'and', 'dedicated', 'to', 'the',
'proposition', 'that', 'all', 'men', 'are', 'created', 'equal', '.'],
['In', 'spite', 'of', 'the', 'debate', 'going', 'on', 'for', 'months',
'about', 'the', 'photos', 'of', 'Özil', 'with', 'the', 'Turkish',
'President', 'Recep', 'Tayyip', 'Erdogan', ',', 'he', 'regrets', 'the',
'return', 'of', 'the', '92-match', 'national', 'player', 'Özil', '.']]
gt_de_tokenized = [['Goethe', 'stammte', 'aus', 'einer', 'angesehenen', 'bürgerlichen',
'Familie', ';', 'sein', 'Großvater', 'mütterlicherseits', 'war', 'als',
'Stadtschultheiß', 'höchster', 'Justizbeamter', 'der', 'Stadt', 'Frankfurt',
',', 'sein', 'Vater', 'Doktor', 'der', 'Rechte', 'und', 'kaiserlicher',
'Rat', '.'],
['"', 'Das', 'ist', 'eine', 'Frage', ',', 'die', 'natürlich', 'davon',
'abhängt', ',', 'dass', 'man', 'einmal', 'ins', 'Gespräch', 'kommt', ',',
'dass', 'man', 'mit', 'ihm', 'auch', 'darüber', 'spricht', ',', 'warum',
'er', 'das', 'eine', 'oder', 'andere', 'offenbar', 'so', 'empfunden', 'hat',
',', 'wie', 'das', 'in', 'seinem', 'Statement', 'niedergelegt', 'ist', '"',
',', 'sagte', 'Grindel', 'im', 'Fußball-Podcast', '"', 'Phrasenmäher', '"',
'der', '"', 'Bild-Zeitung', '.']]
verify_encode_token(en_tokenizer, EN_SAMPLES, gt_en_tokenized)
verify_encode_token(de_tokenizer, DE_SAMPLES, gt_de_tokenized)
vocab = Vocab(collections.Counter(sum(gt_en_tokenized + gt_de_tokenized, [])))
en_tokenizer.set_vocab(vocab)
de_tokenizer.set_vocab(vocab)
verify_pickleble(en_tokenizer, SpacyTokenizer)
verify_pickleble(de_tokenizer, SpacyTokenizer)
verify_encode_token_with_offsets(en_tokenizer, EN_SAMPLES)
verify_encode_token_with_offsets(de_tokenizer, DE_SAMPLES)
# Test for loading spacy tokenizer from specifying the "model" flag
en_tokenizer = SpacyTokenizer(model='en_core_web_lg')
out = en_tokenizer.encode(EN_SAMPLES)
def test_yttm_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'yttm.model')
download(url=get_repo_url() + 'tokenizer_test_models/yttm/test_ende_yttm-6f2c39.model',
path=model_path)
tokenizer = YTTMTokenizer(model_path=model_path)
gt_tokenized = [['▁He', 'll', 'o', ',', '▁y', "'", 'all', '!', '▁How', '▁are', '▁you', '▁',
'Ⅷ', '▁', '😁', '▁', '😁', '▁', '😁', '▁?'],
['▁Gl', 'u', 'on', 'N', 'L', 'P', '▁is', '▁great', '!', '!', '!', '!',
'!', '!'],
['▁Gl', 'u', 'on', 'N', 'L', 'P', '-A', 'm', 'az', 'on', '-H', 'a', 'ib',
'in', '-L', 'e', 'on', 'ard', '-S', 'hen', 'g', '-S', 'h', 'u', 'ai',
'-', 'X', 'ing', 'j', 'ian', '.', '.', '.', '.', '.', '/', ':', '!',
'@', '#', '▁', "'", 'ab', 'c', "'"]]
gt_offsets = [[(0, 2), (2, 4), (4, 5), (5, 6), (6, 8), (8, 9), (9, 12), (12, 13), (13, 17),
(17, 21), (21, 25), (25, 26), (26, 27), (27, 28), (28, 29), (29, 30), (30, 31),
(31, 32), (32, 33), (33, 35)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 11), (11, 17), (17, 18),
(18, 19), (19, 20), (20, 21), (21, 22), (22, 23)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 10), (10, 11), (11, 13),
(13, 15), (15, 17), (17, 18), (18, 20), (20, 22), (22, 24), (24, 25), (25, 27),
(27, 30), (30, 32), (32, 35), (35, 36), (36, 38), (38, 39), (39, 40), (40, 42),
(42, 43), (43, 44), (44, 47), (47, 48), (48, 51), (51, 52), (52, 53), (53, 54),
(54, 55), (55, 56), (56, 57), (57, 58), (58, 59), (59, 60), (60, 61), (61, 62),
(62, 63), (63, 65), (65, 66), (66, 67)]]
gt_int_decode = ['Hello, y<UNK>all! How are you <UNK> <UNK> <UNK> <UNK> ?',
'GluonNLP is great!!!!!!',
'GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@# <UNK>abc<UNK>']
gt_str_decode = ["Hello, y'all! How are you Ⅷ 😁 😁 😁 ?",
'GluonNLP is great!!!!!!',
"GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@# 'abc'"]
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, YTTMTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
# Begin to verify decode
for sample_sentences, ele_gt_int_decode, ele_gt_str_decode in [(SUBWORD_TEST_SAMPLES[0], gt_int_decode[0], gt_str_decode[0]),
(SUBWORD_TEST_SAMPLES, gt_int_decode, gt_str_decode)]:
int_decode = tokenizer.decode(tokenizer.encode(sample_sentences, int))
str_decode = tokenizer.decode(tokenizer.encode(sample_sentences, str))
assert int_decode == ele_gt_int_decode
assert str_decode == ele_gt_str_decode
os.remove(model_path)
assert tokenizer.decode([]) == ''
assert tokenizer.decode([[]]) == ['']
@pytest.mark.seed(123)
def test_sentencepiece_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'spm.model')
download(url=get_repo_url()
+ 'tokenizer_test_models/sentencepiece/case1/test_ende-a9bee4.model',
path=model_path)
# Case1
tokenizer = SentencepieceTokenizer(model_path)
gt_tokenized = [['▁Hel', 'lo', ',', '▁y', "'", 'all', '!', '▁How', '▁are', '▁you',
'▁', 'VI', 'II', '▁', '😁', '▁', '😁', '▁', '😁', '▁?'],
['▁G', 'lu', 'on', 'N', 'L', 'P', '▁is', '▁great', '!', '!', '!', '!',
'!', '!'],
['▁G', 'lu', 'on', 'N', 'L', 'P', '-', 'A', 'ma', 'zo', 'n', '-', 'H', 'ai',
'bin', '-', 'L', 'e', 'on', 'ard', '-', 'S', 'hen', 'g', '-', 'S', 'hu', 'ai',
'-', 'X', 'ing', 'j', 'ian', '.', '.', '.', '.', '.', '/', ':', '!', '@',
'#', '▁', "'", 'ab', 'c', "'"]]
gt_offsets = [[(0, 3), (3, 5), (5, 6), (6, 8), (8, 9), (9, 12), (12, 13), (13, 17), (17, 21),
(21, 25), (25, 26), (26, 26), (26, 27), (27, 28), (28, 29), (29, 30), (30, 31),
(31, 32), (32, 33), (33, 35)],
[(0, 1), (1, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 11), (11, 17), (17, 18),
(18, 19), (19, 20), (20, 21), (21, 22), (22, 23)],
[(0, 1), (1, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 12),
(12, 14), (14, 15), (15, 16), (16, 17), (17, 19), (19, 22), (22, 23), (23, 24),
(24, 25), (25, 27), (27, 30), (30, 31), (31, 32), (32, 35), (35, 36), (36, 37),
(37, 38), (38, 40), (40, 42), (42, 43), (43, 44), (44, 47), (47, 48), (48, 51),
(51, 52), (52, 53), (53, 54), (54, 55), (55, 56), (56, 57), (57, 58), (58, 59),
(59, 60), (60, 61), (61, 62), (62, 63), (63, 65), (65, 66), (66, 67)]]
gt_int_decode = ['Hello, y ⁇ all! How are you VIII ⁇ ⁇ ⁇ ?',
'GluonNLP is great!!!!!!',
'GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:! ⁇ # ⁇ abc ⁇ ']
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, SentencepieceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_spm(tokenizer, SUBWORD_TEST_SAMPLES, gt_int_decode)
# Case2, lower_case
gt_lower_case_int_decode = ['hello, y ⁇ all! how are you viii ⁇ ⁇ ⁇ ?',
'gluonnlp is great!!!!!!',
'gluonnlp-amazon-haibin-leonard-sheng-shuai-xingjian...../:! ⁇ # ⁇ abc ⁇ ']
tokenizer = SentencepieceTokenizer(model_path, lowercase=True)
verify_decode_spm(tokenizer, SUBWORD_TEST_SAMPLES, gt_lower_case_int_decode)
# Case3, Use the sentencepiece regularization commands, we test whether we can obtain different encoding results
tokenizer = SentencepieceTokenizer(model_path, lowercase=True, nbest=-1, alpha=1.0)
has_different_encode_out = False
encode_out = None
for _ in range(10):
if encode_out is None:
encode_out = tokenizer.encode(SUBWORD_TEST_SAMPLES[0])
else:
ele_out = tokenizer.encode(SUBWORD_TEST_SAMPLES[0])
if ele_out != encode_out:
has_different_encode_out = True
break
assert has_different_encode_out
os.remove(model_path)
# Case of T5 Tokenizer
with tempfile.TemporaryDirectory() as dir_path:
vocab_path = os.path.join(dir_path, 't5_spm.model')
download(
url=get_repo_url() + 'tokenizer_test_models/sentencepiece/case_t5/test_t5spm-5f05e7.model',
path=vocab_path
)
extra_ids = 100
tokenizer = T5Tokenizer(vocab_path, extra_ids)
gt_tokenized = [
['▁Hello', ',', '▁', 'y', "'", 'all', '!', '▁How', '▁are', '▁you', '▁VIII', '▁', '😁',
'▁', '😁', '▁', '😁', '▁', '?'],
['▁', 'Glu', 'on', 'N', 'LP', '▁is', '▁great', '!', '!!!!!'],
['▁', 'Glu', 'on', 'N', 'LP', '-', 'Am', 'a', 'zon', '-', 'H', 'a', 'i', 'bin', '-',
'Le', 'on', 'ard', '-', 'She', 'ng', '-', 'Sh', 'u', 'a', 'i', '-', 'X', 'ing', 'j',
'i', 'an', '.....', '/', ':', '!', '@', '#', '▁', "'", 'a', 'b', 'c', "'"]
]
gt_offsets = [
[(0, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 12), (12, 13), (13, 17), (17, 21), (21, 25),
(25, 27), (27, 28), (28, 29), (29, 30), (30, 31), (31, 32), (32, 33), (33, 34), (34, 35)],
[(0, 0), (0, 3), (3, 5), (5, 6), (6, 8), (8, 11), (11, 17), (17, 18), (18, 23)],
[(0, 0), (0, 3), (3, 5), (5, 6), (6, 8), (8, 9), (9, 11), (11, 12), (12, 15), (15, 16),
(16, 17), (17, 18), (18, 19), (19, 22), (22, 23), (23, 25), (25, 27), (27, 30), (30, 31),
(31, 34), (34, 36), (36, 37), (37, 39), (39, 40), (40, 41), (41, 42), (42, 43), (43, 44),
(44, 47), (47, 48), (48, 49), (49, 51), (51, 56), (56, 57), (57, 58), (58, 59), (59, 60),
(60, 61), (61, 62), (62, 63), (63, 64), (64, 65), (65, 66), (66, 67)]
]
gt_int_decode = [
"Hello, y'all! How are you VIII ⁇ ⁇ ⁇ ?",
'GluonNLP is great!!!!!!',
"GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@# 'abc'"
]
inserted_special_tokens = list('<extra_id_{}>'.format(i) for i in range(extra_ids - 1, -1, -1))
assert list(
tokenizer.vocab.to_tokens(i) for i in range(len(tokenizer._sp_model), len(tokenizer._vocab))
) == inserted_special_tokens, 'Some <extra_id> tokens are not properly inserted.'
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, SentencepieceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_spm(tokenizer, SUBWORD_TEST_SAMPLES, gt_int_decode)
os.remove(vocab_path)
def test_subword_nmt_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'subword_nmt.model')
download(url=get_repo_url() + 'tokenizer_test_models/subword-nmt/test_ende-d189ff.model',
path=model_path)
vocab_path = os.path.join(dir_path, 'subword_nmt.vocab')
download(url=get_repo_url() + 'tokenizer_test_models/subword-nmt/test_ende_vocab-900f81.json',
path=vocab_path)
# Case 1
tokenizer = SubwordNMTTokenizer(model_path, vocab_path)
gt_tokenized = [["Hel", "lo", ",</w>", "y", "\'", "all", "!</w>", "How</w>", "are</w>", "you</w>",
"Ⅷ</w>", "😁</w>", "😁</w>", "😁</w>", "?</w>"],
["Gl", "u", "on", "N", "L", "P</w>", "is</w>", "great", "!", "!", "!", "!!",
"!</w>"],
["Gl", "u", "on", "N", "L", "P", "-", "Amaz", "on-", "H", "ai", "b", "in-", "Le",
"on", "ard", "-", "Sh", "eng", "-", "Sh", "u", "ai", "-", "X", "ing", "ji",
"an", "..", "...", "/", ":", "!", "@", "#</w>", "\'", "ab", "c", "\'</w>"]]
gt_offsets = [[(0, 3), (3, 5), (5, 6), (7, 8), (8, 9), (9, 12), (12, 13), (14, 17), (18, 21),
(22, 25), (26, 27), (28, 29), (30, 31), (32, 33), (34, 35)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (9, 11), (12, 17), (17, 18),
(18, 19), (19, 20), (20, 22), (22, 23)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 13), (13, 16),
(16, 17), (17, 19), (19, 20), (20, 23), (23, 25), (25, 27), (27, 30), (30, 31),
(31, 33), (33, 36), (36, 37), (37, 39), (39, 40), (40, 42), (42, 43), (43, 44),
(44, 47), (47, 49), (49, 51), (51, 53), (53, 56), (56, 57), (57, 58), (58, 59),
(59, 60), (60, 61), (62, 63), (63, 65), (65, 66), (66, 67)]]
gt_int_decode = ["Hello, y\'all! How are you Ⅷ 😁 😁 😁 ?",
"GluonNLP is great!!!!!!",
"GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@# \'abc\'"]
gt_str_decode = SUBWORD_TEST_SAMPLES
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, SubwordNMTTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_subword_nmt(tokenizer, SUBWORD_TEST_SAMPLES, gt_int_decode, gt_str_decode)
# Case 2, bpe_dropout
# We use str decode here because we may not perfectly recover the original sentence with int decode.
tokenizer = SubwordNMTTokenizer(model_path, vocab_path, bpe_dropout=0.5)
verify_decode(tokenizer, SUBWORD_TEST_SAMPLES, out_type=str)
os.remove(model_path)
os.remove(vocab_path)
def test_huggingface_bpe_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'test_hf_bpe.model')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bpe/test_hf_bpe.model',
path=model_path)
vocab_path = os.path.join(dir_path, 'test_hf_bpe.vocab')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bpe/test_hf_bpe.vocab',
path=vocab_path)
hf_vocab_path = os.path.join(dir_path, 'test_hf_bpe.hf_vocab')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bpe/test_hf_bpe.hf_vocab',
path=hf_vocab_path)
# Case 1, default lowercase=False
tokenizer = HuggingFaceBPETokenizer(model_path, vocab_path)
gt_tokenized = [['Hello</w>', ',</w>', 'y</w>', "'</w>", 'all</w>', '!</w>', 'How</w>',
'are</w>', 'you</w>', '<unk>', '<unk>', '<unk>', '<unk>', '?</w>'],
['Gl', 'u', 'on', 'N', 'LP</w>', 'is</w>', 'great</w>', '!</w>', '!</w>',
'!</w>', '!</w>', '!</w>', '!</w>'],
['Gl', 'u', 'on', 'N', 'LP</w>', '-</w>', 'Amazon</w>', '-</w>', 'H', 'ai',
'bin</w>', '-</w>', 'Leonard</w>', '-</w>', 'Sh', 'en', 'g</w>', '-</w>',
'Sh', 'u', 'ai</w>', '-</w>', 'X', 'ing', 'j', 'ian</w>', '.</w>', '.</w>',
'.</w>', '.</w>', '.</w>', '/</w>', ':</w>', '!</w>', '@</w>', '#</w>',
"'</w>", 'ab', 'c</w>', "'</w>"]]
gt_offsets = [[(0, 5), (5, 6), (7, 8), (8, 9), (9, 12), (12, 13), (14, 17), (18, 21), (22, 25),
(26, 27), (28, 29), (30, 31), (32, 33), (34, 35)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 8), (9, 11), (12, 17), (17, 18), (18, 19),
(19, 20), (20, 21), (21, 22), (22, 23)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 8), (8, 9), (9, 15), (15, 16), (16, 17),
(17, 19), (19, 22), (22, 23), (23, 30), (30, 31), (31, 33), (33, 35), (35, 36),
(36, 37), (37, 39), (39, 40), (40, 42), (42, 43), (43, 44), (44, 47), (47, 48),
(48, 51), (51, 52), (52, 53), (53, 54), (54, 55), (55, 56), (56, 57), (57, 58),
(58, 59), (59, 60), (60, 61), (62, 63), (63, 65), (65, 66), (66, 67)]]
# gt_int_decode = gt_str_decode for hf
# hf removed the unk tokens in decode result
gt_decode = ["Hello , y ' all ! How are you ?",
'GluonNLP is great ! ! ! ! ! !',
"GluonNLP - Amazon - Haibin - Leonard - Sheng - Shuai - Xingjian . . . . . / : ! @ # ' abc '"]
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceBPETokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
# Case 2, lowercase=True
gt_lowercase_decode = ["hello , y ' all ! how are you ?",
'gluonnlp is great ! ! ! ! ! !',
"gluonnlp - amazon - haibin - leonard - sheng - shuai - xingjian . . . . . / : ! @ # ' abc '"]
tokenizer = HuggingFaceBPETokenizer(model_path, vocab_path, lowercase=True)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_lowercase_decode)
# Case 3, using original hf vocab
tokenizer = HuggingFaceBPETokenizer(model_path, hf_vocab_path)
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceBPETokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
os.remove(model_path)
os.remove(vocab_path)
os.remove(hf_vocab_path)
def test_huggingface_bytebpe_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'hf_bytebpe.model')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bytebpe/test_hf_bytebpe.model',
path=model_path)
vocab_path = os.path.join(dir_path, 'hf_bytebpe.vocab')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bytebpe/test_hf_bytebpe.vocab',
path=vocab_path)
hf_vocab_path = os.path.join(dir_path, 'hf_bytebpe.hf_vocab')
download(url=get_repo_url() + 'tokenizer_test_models/hf_bytebpe/test_hf_bytebpe.hf_vocab',
path=hf_vocab_path)
# Case 1, default lowercase=False
tokenizer = HuggingFaceByteBPETokenizer(model_path, vocab_path)
gt_tokenized = [['Hello', ',', 'Ġy', "'", 'all', '!', 'ĠHow', 'Ġare', 'Ġyou',
'Ġâ', 'ħ', '§', 'ĠðŁĺ', 'ģ', 'ĠðŁĺ', 'ģ', 'ĠðŁĺ', 'ģ', 'Ġ?'],
['Gl', 'u', 'on', 'N', 'LP', 'Ġis', 'Ġgreat', 'ï¼', 'ģ', 'ï¼',
'ģ', 'ï¼', 'ģ', '!!!'],
['Gl', 'u', 'on', 'N', 'LP', '-', 'Amazon', '-', 'Ha', 'ib', 'in',
'-', 'Le', 'on', 'ard', '-', 'She', 'ng', '-', 'Sh', 'u',
'ai', '-', 'X', 'ing', 'j', 'ian', '.....', '/', ':', '!', '@',
'#', "Ġ'", 'ab', 'c', "'"]]
# the defination of the offsets of bytelevel seems not clear
gt_offsets = [[(0, 5), (5, 6), (6, 8), (8, 9), (9, 12), (12, 13), (13, 17), (17, 21),
(21, 25), (25, 27), (26, 27), (26, 27), (27, 29), (28, 29), (29, 31),
(30, 31), (31, 33), (32, 33), (33, 35)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 8), (8, 11), (11, 17), (17, 18),
(17, 18), (18, 19), (18, 19), (19, 20), (19, 20), (20, 23)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 8), (8, 9), (9, 15), (15, 16),
(16, 18), (18, 20), (20, 22), (22, 23), (23, 25), (25, 27), (27, 30),
(30, 31), (31, 34), (34, 36), (36, 37), (37, 39), (39, 40), (40, 42),
(42, 43), (43, 44), (44, 47), (47, 48), (48, 51), (51, 56),
(56, 57), (57, 58), (58, 59), (59, 60), (60, 61), (61, 63),
(63, 65), (65, 66), (66, 67)]]
gt_decode = ["Hello, y'all! How are you Ⅷ 😁 😁 😁 ?",
'GluonNLP is great!!!!!!',
"GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@# 'abc'"]
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceByteBPETokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
# Case 2, lowercase=True
gt_lowercase_int_decode = ["hello, y'all! how are you ⅷ 😁 😁 😁 ?",
'gluonnlp is great!!!!!!',
"gluonnlp-amazon-haibin-leonard-sheng-shuai-xingjian...../:!@# 'abc'"]
tokenizer = HuggingFaceByteBPETokenizer(model_path, vocab_path, lowercase=True)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_lowercase_int_decode)
# Case 3, using original hf vocab
tokenizer = HuggingFaceByteBPETokenizer(model_path, hf_vocab_path)
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceByteBPETokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
os.remove(model_path)
os.remove(vocab_path)
os.remove(hf_vocab_path)
def test_huggingface_wordpiece_tokenizer():
with tempfile.TemporaryDirectory() as dir_path:
vocab_path = os.path.join(dir_path, 'hf_wordpiece.vocab')
download(url=get_repo_url()
+ 'tokenizer_test_models/hf_wordpiece/test_hf_wordpiece.vocab',
path=vocab_path)
hf_vocab_path = os.path.join(dir_path, 'hf_wordpiece.hf_vocab')
download(url=get_repo_url()
+ 'tokenizer_test_models/hf_wordpiece/test_hf_wordpiece.hf_vocab',
path=hf_vocab_path)
# Case 1, lowercase=True
tokenizer = HuggingFaceWordPieceTokenizer(vocab_path, lowercase=True)
gt_tokenized = [["hello", ",", "y", "'", "all", "!", "how", "are", "you",
"<unk>", "<unk>", "<unk>", "<unk>", "?"],
["gl", "##uo", "##nn", "##l", "##p", "is", "great", "\uff01",
"\uff01", "\uff01", "!", "!", "!"],
["gl", "##uo", "##nn", "##l", "##p", "-", "amazon", "-", "hai",
"##bin", "-", "leonard", "-", "shen", "##g", "-", "shu", "##ai", "-",
"xin", "##g", "##ji", "##an", ".", ".", ".", ".", ".", "/", ":", "!",
"@", "#", "'", "abc", "'"]]
gt_offsets = [[(0, 5), (5, 6), (7, 8), (8, 9), (9, 12), (12, 13), (14, 17), (18, 21),
(22, 25), (26, 27), (28, 29), (30, 31), (32, 33), (34, 35)],
[(0, 2), (2, 4), (4, 6), (6, 7), (7, 8), (9, 11), (12, 17), (17, 18),
(18, 19), (19, 20), (20, 21), (21, 22), (22, 23)],
[(0, 2), (2, 4), (4, 6), (6, 7), (7, 8), (8, 9), (9, 15), (15, 16), (16, 19),
(19, 22), (22, 23), (23, 30), (30, 31), (31, 35), (35, 36), (36, 37), (37, 40),
(40, 42), (42, 43), (43, 46), (46, 47), (47, 49), (49, 51), (51, 52), (52, 53),
(53, 54), (54, 55), (55, 56), (56, 57), (57, 58), (58, 59), (59, 60), (60, 61),
(62, 63), (63, 66), (66, 67)]]
gt_decode = ["hello, y'all! how are you?",
"gluonnlp is great ! ! !!!!",
"gluonnlp - amazon - haibin - leonard - sheng - shuai - xingjian..... / :! @ #'abc '"]
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceWordPieceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
# Case 2, lowercase=False
gt_lowercase_decode = [", y'all! are you?",
"is great ! ! !!!!",
"- - - - - -..... / :! @ #'abc '"]
tokenizer = HuggingFaceWordPieceTokenizer(vocab_path, lowercase=False)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_lowercase_decode)
# Case 3, using original hf vocab
tokenizer = HuggingFaceWordPieceTokenizer(hf_vocab_path, lowercase=True)
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceWordPieceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
os.remove(vocab_path)
os.remove(hf_vocab_path)
@pytest.mark.skipif(parse_version(gluonnlp.utils.lazy_imports.try_import_huggingface_tokenizers().__version__)
>= parse_version('0.9.0.dev0'), reason="Test is only valid for tokenizers 0.8.x")
def test_huggingface_wordpiece_tokenizer_v08():
"""Test for huggingface tokenizer >=0.8"""
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'hf_wordpiece_new_0.8.model')
download(url=get_repo_url() +
'tokenizer_test_models/hf_wordpiece_new_0.8/hf_wordpiece.model',
path=model_path,
sha1_hash='66ccadf6e5e354ff9604e4a82f107a2ac873abd5')
vocab_path = os.path.join(dir_path, 'hf_wordpiece_new_0.8.vocab')
download(url=get_repo_url() +
'tokenizer_test_models/hf_wordpiece_new_0.8/hf_wordpiece.vocab',
path=vocab_path,
sha1_hash='dd6fdf4bbc74eaa8806d12cb3d38a4d9a306aea8')
tokenizer = HuggingFaceTokenizer(model_path, vocab_path)
gt_tokenized = [['Hel', '##lo', ',', 'y', '[UNK]', 'all', '!',
'How', 'are', 'you', '[UNK]', '[UNK]', '[UNK]', '[UNK]', '?'],
['Gl', '##u', '##on', '##N', '##L', '##P', 'is', 'great', '[UNK]',
'[UNK]', '[UNK]', '!', '!', '!'],
['Gl', '##u', '##on', '##N', '##L', '##P', '-',
'Am', '##az', '##on', '-', 'Ha', '##ibi', '##n', '-', 'Leon', '##ard',
'-', 'She', '##n', '##g', '-', 'Sh', '##ua', '##i', '-', 'X',
'##ing', '##j', '##ian', '.', '.', '.', '.', '.', '/', ':', '!',
'@', '#', '[UNK]', 'ab', '##c', '[UNK]']]
gt_offsets = [[(0, 3), (3, 5), (5, 6), (7, 8), (8, 9), (9, 12), (12, 13),
(14, 17), (18, 21), (22, 25), (26, 27), (28, 29), (30, 31),
(32, 33), (34, 35)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (9, 11), (12, 17),
(17, 18), (18, 19), (19, 20), (20, 21), (21, 22), (22, 23)],
[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 9),
(9, 11), (11, 13), (13, 15), (15, 16), (16, 18), (18, 21),
(21, 22), (22, 23), (23, 27), (27, 30), (30, 31), (31, 34),
(34, 35), (35, 36), (36, 37), (37, 39), (39, 41), (41, 42),
(42, 43), (43, 44), (44, 47), (47, 48), (48, 51), (51, 52),
(52, 53), (53, 54), (54, 55), (55, 56), (56, 57), (57, 58),
(58, 59), (59, 60), (60, 61), (62, 63), (63, 65), (65, 66),
(66, 67)]]
gt_decode = ['Hello, y all! How are you?',
'GluonNLP is great!!!',
'GluonNLP - Amazon - Haibin - Leonard - Sheng - Shuai - Xingjian..... / '
':! @ # abc']
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
@pytest.mark.skipif(parse_version(gluonnlp.utils.lazy_imports.try_import_huggingface_tokenizers().__version__)
>= parse_version('0.9.0.dev0'), reason="Test is only valid for tokenizers 0.8.x")
def test_huggingface_bpe_tokenizer_v08():
"""Test for huggingface BPE tokenizer >=0.8"""
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'hf_bpe_new_0.8.model')
download(url=get_repo_url() +
'tokenizer_test_models/hf_bpe_new_0.8/hf_bpe.model',
path=model_path,
sha1_hash='ecda90979561ca4c5a8d769b5e3c9fa2270d5317')
vocab_path = os.path.join(dir_path, 'hf_bpe_new_0.8.vocab')
download(url=get_repo_url() +
'tokenizer_test_models/hf_bpe_new_0.8/hf_bpe.vocab',
path=vocab_path,
sha1_hash='b92dde0b094f405208f3ec94b5eae88430bf4262')
tokenizer = HuggingFaceTokenizer(model_path, vocab_path)
gt_tokenized = [['H', 'ello</w>', ',</w>', 'y</w>', 'all</w>', '!</w>',
'How</w>', 'are</w>', 'you</w>', '?</w>'],
['G', 'lu', 'on', 'N', 'L', 'P</w>', 'is</w>', 'great</w>',
'!</w>', '!</w>', '!</w>'],
['G', 'lu', 'on', 'N', 'L', 'P</w>', '-</w>', 'Amaz', 'on</w>',
'-</w>', 'Ha', 'i', 'bin</w>', '-</w>', 'Leon', 'ard</w>', '-</w>',
'Sh', 'eng</w>', '-</w>', 'S', 'hu', 'ai</w>', '-</w>', 'X', 'ing',
'j', 'ian</w>', '.</w>', '.</w>', '.</w>', '.</w>', '.</w>', '/</w>',
':</w>', '!</w>', '@</w>', '#</w>', 'ab', 'c</w>']]
gt_offsets = [[(0, 1), (1, 5), (5, 6), (7, 8), (9, 12), (12, 13), (14, 17),
(18, 21), (22, 25), (34, 35)],
[(0, 1), (1, 3), (3, 5), (5, 6), (6, 7), (7, 8), (9, 11), (12, 17),
(20, 21), (21, 22), (22, 23)],
[(0, 1), (1, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 13), (13, 15),
(15, 16), (16, 18), (18, 19), (19, 22), (22, 23), (23, 27), (27, 30),
(30, 31), (31, 33), (33, 36), (36, 37), (37, 38), (38, 40), (40, 42),
(42, 43), (43, 44), (44, 47), (47, 48), (48, 51), (51, 52), (52, 53),
(53, 54), (54, 55), (55, 56), (56, 57), (57, 58), (58, 59), (59, 60),
(60, 61), (63, 65), (65, 66)]]
gt_decode = ['Hello , y all ! How are you ?',
'GluonNLP is great ! ! !',
'GluonNLP - Amazon - Haibin - Leonard - Sheng - Shuai - Xingjian'
' . . . . . / : ! @ # abc']
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
@pytest.mark.skipif(parse_version(gluonnlp.utils.lazy_imports.try_import_huggingface_tokenizers().__version__)
>= parse_version('0.9.0.dev0'), reason="Test is only valid for tokenizers 0.8.x")
def test_huggingface_bytebpe_tokenizer_v08():
"""Test for huggingface bytebpe tokenizer >=0.8"""
with tempfile.TemporaryDirectory() as dir_path:
model_path = os.path.join(dir_path, 'hf_bytebpe_new_0.8.model')
download(url=get_repo_url() +
'tokenizer_test_models/hf_bytebpe_new_0.8/hf_bytebpe.model',
path=model_path,
sha1_hash='a1c4da1f6c21df923e150f56dbb5b7a53c61808b')
vocab_path = os.path.join(dir_path, 'hf_bytebpe_new_0.8.vocab')
download(url=get_repo_url() +
'tokenizer_test_models/hf_bytebpe_new_0.8/hf_bytebpe.vocab',
path=vocab_path,
sha1_hash='7831b19078a3222f450e65b2188dc0770473123b')
tokenizer = HuggingFaceTokenizer(model_path, vocab_path)
gt_tokenized = [['He', 'llo', ',', 'Ġy', "'", 'all', '!', 'ĠHow', 'Ġare', 'Ġyou',
'Ġâ', 'ħ', '§', 'Ġ', 'ð', 'Ł', 'ĺ', 'ģ', 'Ġ', 'ð', 'Ł', 'ĺ',
'ģ', 'Ġ', 'ð', 'Ł', 'ĺ', 'ģ', 'Ġ?'],
['G', 'l', 'u', 'on', 'N', 'L', 'P', 'Ġis', 'Ġgreat', 'ï', '¼', 'ģ',
'ï', '¼', 'ģ', 'ï', '¼', 'ģ', '!', '!', '!'],
['G', 'l', 'u', 'on', 'N', 'L', 'P', '-', 'Am', 'az', 'on', '-',
'Ha', 'ib', 'in', '-', 'Le', 'on', 'ard', '-', 'S', 'hen', 'g', '-',
'Sh', 'u', 'ai', '-', 'X', 'ing', 'j', 'ian',
'..', '...', '/', ':', '!', '@', '#', 'Ġ', "'", 'ab', 'c', "'"]]
gt_offsets = [[(0, 2), (2, 5), (5, 6), (6, 8), (8, 9), (9, 12), (12, 13), (13, 17),
(17, 21), (21, 25), (25, 27), (26, 27), (26, 27), (27, 28), (28, 29),
(28, 29), (28, 29), (28, 29), (29, 30), (30, 31), (30, 31), (30, 31),
(30, 31), (31, 32), (32, 33), (32, 33), (32, 33), (32, 33), (33, 35)],
[(0, 1), (1, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 11), (11, 17),
(17, 18), (17, 18), (17, 18), (18, 19), (18, 19), (18, 19), (19, 20),
(19, 20), (19, 20), (20, 21), (21, 22), (22, 23)],
[(0, 1), (1, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 11),
(11, 13), (13, 15), (15, 16), (16, 18), (18, 20), (20, 22), (22, 23),
(23, 25), (25, 27), (27, 30), (30, 31), (31, 32), (32, 35), (35, 36),
(36, 37), (37, 39), (39, 40), (40, 42), (42, 43), (43, 44),
(44, 47), (47, 48), (48, 51), (51, 53), (53, 56), (56, 57),
(57, 58), (58, 59), (59, 60), (60, 61), (61, 62), (62, 63),
(63, 65), (65, 66), (66, 67)]]
gt_decode = ["Hello, y'all! How are you Ⅷ 😁 😁 😁 ?",
'GluonNLP is great!!!!!!',
"GluonNLP-Amazon-Haibin-Leonard-Sheng-Shuai-Xingjian...../:!@# 'abc'"]
verify_encode_token(tokenizer, SUBWORD_TEST_SAMPLES, gt_tokenized)
verify_pickleble(tokenizer, HuggingFaceTokenizer)
verify_encode_token_with_offsets(tokenizer, SUBWORD_TEST_SAMPLES, gt_offsets)
verify_decode_hf(tokenizer, SUBWORD_TEST_SAMPLES, gt_decode)
def test_tokenizers_create():
tokenizer = gluonnlp.data.tokenizers.create('moses', 'en')
tokenizer.encode('hello world!')
|
netbox_onboarding/tests/test_netbox_keeper.py
|
fallenfuzz/ntc-netbox-plugin-onboarding
| 180 |
136511
|
"""Unit tests for netbox_onboarding.onboard module and its classes.
(c) 2020 Network To Code
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.conf import settings
from django.test import TestCase
from django.utils.text import slugify
from dcim.models import Site, Manufacturer, DeviceType, DeviceRole, Device, Interface, Platform
from ipam.models import IPAddress
# from netbox_onboarding.netbox_keeper import NetdevKeeper
from netbox_onboarding.exceptions import OnboardException
from netbox_onboarding.netbox_keeper import NetboxKeeper
PLUGIN_SETTINGS = settings.PLUGINS_CONFIG["netbox_onboarding"]
class NetboxKeeperTestCase(TestCase):
"""Test the NetboxKeeper Class."""
def setUp(self):
"""Create a superuser and token for API calls."""
self.site1 = Site.objects.create(name="USWEST", slug="uswest")
def test_ensure_device_manufacturer_strict_missing(self):
"""Verify ensure_device_manufacturer function when Manufacturer object is not present."""
PLUGIN_SETTINGS["object_match_strategy"] = "strict"
onboarding_kwargs = {
"netdev_hostname": "device1",
"netdev_nb_role_slug": PLUGIN_SETTINGS["default_device_role"],
"netdev_vendor": "Cisco",
"netdev_model": "CSR1000v",
"netdev_nb_site_slug": self.site1.slug,
}
nbk = NetboxKeeper(**onboarding_kwargs)
with self.assertRaises(OnboardException) as exc_info:
nbk.ensure_device_manufacturer(create_manufacturer=False)
self.assertEqual(exc_info.exception.message, "ERROR manufacturer not found: Cisco")
self.assertEqual(exc_info.exception.reason, "fail-config")
nbk.ensure_device_manufacturer(create_manufacturer=True)
self.assertIsInstance(nbk.nb_manufacturer, Manufacturer)
self.assertEqual(nbk.nb_manufacturer.slug, slugify(onboarding_kwargs["netdev_vendor"]))
def test_ensure_device_manufacturer_loose_missing(self):
"""Verify ensure_device_manufacturer function when Manufacturer object is not present."""
PLUGIN_SETTINGS["object_match_strategy"] = "loose"
onboarding_kwargs = {
"netdev_hostname": "device1",
"netdev_nb_role_slug": PLUGIN_SETTINGS["default_device_role"],
"netdev_vendor": "Cisco",
"netdev_model": "CSR1000v",
"netdev_nb_site_slug": self.site1.slug,
}
nbk = NetboxKeeper(**onboarding_kwargs)
with self.assertRaises(OnboardException) as exc_info:
nbk.ensure_device_manufacturer(create_manufacturer=False)
self.assertEqual(exc_info.exception.message, "ERROR manufacturer not found: Cisco")
self.assertEqual(exc_info.exception.reason, "fail-config")
nbk.ensure_device_manufacturer(create_manufacturer=True)
self.assertIsInstance(nbk.nb_manufacturer, Manufacturer)
self.assertEqual(nbk.nb_manufacturer.slug, slugify(onboarding_kwargs["netdev_vendor"]))
def test_ensure_device_type_strict_missing(self):
"""Verify ensure_device_type function when DeviceType object is not present."""
PLUGIN_SETTINGS["object_match_strategy"] = "strict"
onboarding_kwargs = {
"netdev_hostname": "device1",
"netdev_nb_role_slug": PLUGIN_SETTINGS["default_device_role"],
"netdev_vendor": "Cisco",
"netdev_model": "CSR1000v",
"netdev_nb_site_slug": self.site1.slug,
}
nbk = NetboxKeeper(**onboarding_kwargs)
nbk.nb_manufacturer = Manufacturer.objects.create(name="Cisco", slug="cisco")
with self.assertRaises(OnboardException) as exc_info:
nbk.ensure_device_type(create_device_type=False)
self.assertEqual(exc_info.exception.message, "ERROR device type not found: CSR1000v")
self.assertEqual(exc_info.exception.reason, "fail-config")
nbk.ensure_device_type(create_device_type=True)
self.assertIsInstance(nbk.nb_device_type, DeviceType)
self.assertEqual(nbk.nb_device_type.slug, slugify(onboarding_kwargs["netdev_model"]))
def test_ensure_device_type_loose_missing(self):
"""Verify ensure_device_type function when DeviceType object is not present."""
PLUGIN_SETTINGS["object_match_strategy"] = "loose"
onboarding_kwargs = {
"netdev_hostname": "device1",
"netdev_nb_role_slug": PLUGIN_SETTINGS["default_device_role"],
"netdev_vendor": "Cisco",
"netdev_model": "CSR1000v",
"netdev_nb_site_slug": self.site1.slug,
}
nbk = NetboxKeeper(**onboarding_kwargs)
nbk.nb_manufacturer = Manufacturer.objects.create(name="Cisco", slug="cisco")
with self.assertRaises(OnboardException) as exc_info:
nbk.ensure_device_type(create_device_type=False)
self.assertEqual(exc_info.exception.message, "ERROR device type not found: CSR1000v")
self.assertEqual(exc_info.exception.reason, "fail-config")
nbk.ensure_device_type(create_device_type=True)
self.assertIsInstance(nbk.nb_device_type, DeviceType)
self.assertEqual(nbk.nb_device_type.slug, slugify(onboarding_kwargs["netdev_model"]))
def test_ensure_device_type_strict_present(self):
"""Verify ensure_device_type function when DeviceType object is already present."""
PLUGIN_SETTINGS["object_match_strategy"] = "strict"
manufacturer = Manufacturer.objects.create(name="Juniper", slug="juniper")
device_type = DeviceType.objects.create(slug="srx3600", model="SRX3600", manufacturer=manufacturer)
onboarding_kwargs = {
"netdev_hostname": "device2",
"netdev_nb_role_slug": PLUGIN_SETTINGS["default_device_role"],
"netdev_vendor": "Juniper",
"netdev_nb_device_type_slug": device_type.slug,
"netdev_nb_site_slug": self.site1.slug,
}
nbk = NetboxKeeper(**onboarding_kwargs)
nbk.nb_manufacturer = manufacturer
nbk.ensure_device_type(create_device_type=False)
self.assertEqual(nbk.nb_device_type, device_type)
def test_ensure_device_type_loose_present(self):
"""Verify ensure_device_type function when DeviceType object is already present."""
PLUGIN_SETTINGS["object_match_strategy"] = "loose"
manufacturer = Manufacturer.objects.create(name="Juniper", slug="juniper")
device_type = DeviceType.objects.create(slug="srx3600", model="SRX3600", manufacturer=manufacturer)
onboarding_kwargs = {
"netdev_hostname": "device2",
"netdev_nb_role_slug": PLUGIN_SETTINGS["default_device_role"],
"netdev_vendor": "Juniper",
"netdev_nb_device_type_slug": device_type.slug,
"netdev_nb_site_slug": self.site1.slug,
}
nbk = NetboxKeeper(**onboarding_kwargs)
nbk.nb_manufacturer = manufacturer
nbk.ensure_device_type(create_device_type=False)
self.assertEqual(nbk.nb_device_type, device_type)
def test_ensure_device_role_not_exist(self):
"""Verify ensure_device_role function when DeviceRole does not already exist."""
test_role_name = "mytestrole"
onboarding_kwargs = {
"netdev_hostname": "device1",
"netdev_nb_role_slug": test_role_name,
"netdev_nb_role_color": PLUGIN_SETTINGS["default_device_role_color"],
"netdev_vendor": "Cisco",
"netdev_nb_site_slug": self.site1.slug,
}
nbk = NetboxKeeper(**onboarding_kwargs)
with self.assertRaises(OnboardException) as exc_info:
nbk.ensure_device_role(create_device_role=False)
self.assertEqual(exc_info.exception.message, f"ERROR device role not found: {test_role_name}")
self.assertEqual(exc_info.exception.reason, "fail-config")
nbk.ensure_device_role(create_device_role=True)
self.assertIsInstance(nbk.nb_device_role, DeviceRole)
self.assertEqual(nbk.nb_device_role.slug, slugify(test_role_name))
def test_ensure_device_role_exist(self):
"""Verify ensure_device_role function when DeviceRole exist but is not assigned to the OT."""
device_role = DeviceRole.objects.create(name="Firewall", slug="firewall")
onboarding_kwargs = {
"netdev_hostname": "device1",
"netdev_nb_role_slug": device_role.slug,
"netdev_nb_role_color": PLUGIN_SETTINGS["default_device_role_color"],
"netdev_vendor": "Cisco",
"netdev_nb_site_slug": self.site1.slug,
}
nbk = NetboxKeeper(**onboarding_kwargs)
nbk.ensure_device_role(create_device_role=False)
self.assertEqual(nbk.nb_device_role, device_role)
#
def test_ensure_device_role_assigned(self):
"""Verify ensure_device_role function when DeviceRole exist and is already assigned."""
device_role = DeviceRole.objects.create(name="Firewall", slug="firewall")
onboarding_kwargs = {
"netdev_hostname": "device1",
"netdev_nb_role_slug": device_role.slug,
"netdev_nb_role_color": PLUGIN_SETTINGS["default_device_role_color"],
"netdev_vendor": "Cisco",
"netdev_nb_site_slug": self.site1.slug,
}
nbk = NetboxKeeper(**onboarding_kwargs)
nbk.ensure_device_role(create_device_role=True)
self.assertEqual(nbk.nb_device_role, device_role)
def test_ensure_device_instance_not_exist(self):
"""Verify ensure_device_instance function."""
serial_number = "123456"
platform_slug = "cisco_ios"
hostname = "device1"
onboarding_kwargs = {
"netdev_hostname": hostname,
"netdev_nb_role_slug": PLUGIN_SETTINGS["default_device_role"],
"netdev_nb_role_color": PLUGIN_SETTINGS["default_device_role_color"],
"netdev_vendor": "Cisco",
"netdev_model": "CSR1000v",
"netdev_nb_site_slug": self.site1.slug,
"netdev_netmiko_device_type": platform_slug,
"netdev_serial_number": serial_number,
"netdev_mgmt_ip_address": "192.0.2.10",
"netdev_mgmt_ifname": "GigaEthernet0",
"netdev_mgmt_pflen": 24,
}
nbk = NetboxKeeper(**onboarding_kwargs)
nbk.ensure_device()
self.assertIsInstance(nbk.device, Device)
self.assertEqual(nbk.device.name, hostname)
self.assertEqual(nbk.device.status, PLUGIN_SETTINGS["default_device_status"])
self.assertEqual(nbk.device.platform.slug, platform_slug)
self.assertEqual(nbk.device.serial, serial_number)
def test_ensure_device_instance_exist(self):
"""Verify ensure_device_instance function."""
manufacturer = Manufacturer.objects.create(name="Cisco", slug="cisco")
device_role = DeviceRole.objects.create(name="Switch", slug="switch")
device_type = DeviceType.objects.create(slug="c2960", model="c2960", manufacturer=manufacturer)
device_name = "test_name"
device = Device.objects.create(
name=device_name,
site=self.site1,
device_type=device_type,
device_role=device_role,
status="planned",
serial="987654",
)
onboarding_kwargs = {
"netdev_hostname": device_name,
"netdev_nb_role_slug": "switch",
"netdev_vendor": "Cisco",
"netdev_model": "c2960",
"netdev_nb_site_slug": self.site1.slug,
"netdev_netmiko_device_type": "cisco_ios",
"netdev_serial_number": "123456",
"netdev_mgmt_ip_address": "192.0.2.10",
"netdev_mgmt_ifname": "GigaEthernet0",
"netdev_mgmt_pflen": 24,
}
nbk = NetboxKeeper(**onboarding_kwargs)
nbk.ensure_device()
self.assertIsInstance(nbk.device, Device)
self.assertEqual(nbk.device.pk, device.pk)
self.assertEqual(nbk.device.name, device_name)
self.assertEqual(nbk.device.platform.slug, "cisco_ios")
self.assertEqual(nbk.device.serial, "123456")
def test_ensure_interface_not_exist(self):
"""Verify ensure_interface function when the interface do not exist."""
onboarding_kwargs = {
"netdev_hostname": "device1",
"netdev_nb_role_slug": PLUGIN_SETTINGS["default_device_role"],
"netdev_nb_role_color": PLUGIN_SETTINGS["default_device_role_color"],
"netdev_vendor": "Cisco",
"netdev_model": "CSR1000v",
"netdev_nb_site_slug": self.site1.slug,
"netdev_netmiko_device_type": "cisco_ios",
"netdev_serial_number": "123456",
"netdev_mgmt_ip_address": "192.0.2.10",
"netdev_mgmt_ifname": "ge-0/0/0",
"netdev_mgmt_pflen": 24,
}
nbk = NetboxKeeper(**onboarding_kwargs)
nbk.ensure_device()
self.assertIsInstance(nbk.nb_mgmt_ifname, Interface)
self.assertEqual(nbk.nb_mgmt_ifname.name, "ge-0/0/0")
def test_ensure_interface_exist(self):
"""Verify ensure_interface function when the interface already exist."""
manufacturer = Manufacturer.objects.create(name="Cisco", slug="cisco")
device_role = DeviceRole.objects.create(name="Switch", slug="switch")
device_type = DeviceType.objects.create(slug="c2960", model="c2960", manufacturer=manufacturer)
device_name = "test_name"
netdev_mgmt_ifname = "GigaEthernet0"
device = Device.objects.create(
name=device_name,
site=self.site1,
device_type=device_type,
device_role=device_role,
status="planned",
serial="987654",
)
intf = Interface.objects.create(name=netdev_mgmt_ifname, device=device)
onboarding_kwargs = {
"netdev_hostname": device_name,
"netdev_nb_role_slug": "switch",
"netdev_vendor": "Cisco",
"netdev_model": "c2960",
"netdev_nb_site_slug": self.site1.slug,
"netdev_netmiko_device_type": "cisco_ios",
"netdev_serial_number": "123456",
"netdev_mgmt_ip_address": "192.0.2.10",
"netdev_mgmt_ifname": netdev_mgmt_ifname,
"netdev_mgmt_pflen": 24,
}
nbk = NetboxKeeper(**onboarding_kwargs)
nbk.ensure_device()
self.assertEqual(nbk.nb_mgmt_ifname, intf)
def test_ensure_primary_ip_not_exist(self):
"""Verify ensure_primary_ip function when the IP address do not already exist."""
onboarding_kwargs = {
"netdev_hostname": "device1",
"netdev_nb_role_slug": PLUGIN_SETTINGS["default_device_role"],
"netdev_nb_role_color": PLUGIN_SETTINGS["default_device_role_color"],
"netdev_vendor": "Cisco",
"netdev_model": "CSR1000v",
"netdev_nb_site_slug": self.site1.slug,
"netdev_netmiko_device_type": "cisco_ios",
"netdev_serial_number": "123456",
"netdev_mgmt_ip_address": "192.0.2.10",
"netdev_mgmt_ifname": "ge-0/0/0",
"netdev_mgmt_pflen": 24,
}
nbk = NetboxKeeper(**onboarding_kwargs)
nbk.ensure_device()
self.assertIsInstance(nbk.nb_primary_ip, IPAddress)
self.assertIn(nbk.nb_primary_ip, Interface.objects.get(device=nbk.device, name="ge-0/0/0").ip_addresses.all())
self.assertEqual(nbk.device.primary_ip, nbk.nb_primary_ip)
def test_ensure_device_platform_missing(self):
"""Verify ensure_device_platform function when Platform object is not present."""
platform_name = "cisco_ios"
onboarding_kwargs = {
"netdev_hostname": "device1",
"netdev_nb_role_slug": PLUGIN_SETTINGS["default_device_role"],
"netdev_vendor": "Cisco",
"netdev_model": "CSR1000v",
"netdev_nb_site_slug": self.site1.slug,
"netdev_nb_platform_slug": platform_name,
"netdev_netmiko_device_type": platform_name,
}
nbk = NetboxKeeper(**onboarding_kwargs)
with self.assertRaises(OnboardException) as exc_info:
nbk.ensure_device_platform(create_platform_if_missing=False)
self.assertEqual(exc_info.exception.message, f"ERROR device platform not found: {platform_name}")
self.assertEqual(exc_info.exception.reason, "fail-config")
nbk.ensure_device_platform(create_platform_if_missing=True)
self.assertIsInstance(nbk.nb_platform, Platform)
self.assertEqual(nbk.nb_platform.slug, slugify(platform_name))
def test_ensure_platform_present(self):
"""Verify ensure_device_platform function when Platform object is present."""
platform_name = "juniper_junos"
manufacturer = Manufacturer.objects.create(name="Juniper", slug="juniper")
device_type = DeviceType.objects.create(slug="srx3600", model="SRX3600", manufacturer=manufacturer)
platform = Platform.objects.create(slug=platform_name, name=platform_name,)
onboarding_kwargs = {
"netdev_hostname": "device2",
"netdev_nb_role_slug": PLUGIN_SETTINGS["default_device_role"],
"netdev_vendor": "Juniper",
"netdev_nb_device_type_slug": device_type.slug,
"netdev_nb_site_slug": self.site1.slug,
"netdev_nb_platform_slug": platform_name,
}
nbk = NetboxKeeper(**onboarding_kwargs)
nbk.ensure_device_platform(create_platform_if_missing=False)
self.assertIsInstance(nbk.nb_platform, Platform)
self.assertEqual(nbk.nb_platform, platform)
self.assertEqual(nbk.nb_platform.slug, slugify(platform_name))
def test_platform_map(self):
"""Verify platform mapping of netmiko to slug functionality."""
# Create static mapping
PLUGIN_SETTINGS["platform_map"] = {"cisco_ios": "ios", "arista_eos": "eos", "cisco_nxos": "cisco-nxos"}
onboarding_kwargs = {
"netdev_hostname": "device1",
"netdev_nb_role_slug": PLUGIN_SETTINGS["default_device_role"],
"netdev_vendor": "Cisco",
"netdev_model": "CSR1000v",
"netdev_nb_site_slug": self.site1.slug,
"netdev_netmiko_device_type": "cisco_ios",
}
nbk = NetboxKeeper(**onboarding_kwargs)
nbk.ensure_device_platform(create_platform_if_missing=True)
self.assertIsInstance(nbk.nb_platform, Platform)
self.assertEqual(nbk.nb_platform.slug, slugify(PLUGIN_SETTINGS["platform_map"]["cisco_ios"]))
self.assertEqual(
Platform.objects.get(name=PLUGIN_SETTINGS["platform_map"]["cisco_ios"]).name,
slugify(PLUGIN_SETTINGS["platform_map"]["cisco_ios"]),
)
|
aries_cloudagent/vc/vc_ld/issue.py
|
kuraakhilesh8230/aries-cloudagent-python
| 247 |
136520
|
"""Verifiable Credential issuance methods."""
from ..ld_proofs import (
LinkedDataProof,
ProofPurpose,
sign,
CredentialIssuancePurpose,
DocumentLoaderMethod,
LinkedDataProofException,
)
from .models.credential import CredentialSchema
async def issue(
*,
credential: dict,
suite: LinkedDataProof,
document_loader: DocumentLoaderMethod,
purpose: ProofPurpose = None,
) -> dict:
"""Issue a verifiable credential.
Takes the base credentail document, verifies it, and adds
a digital signature to it.
Args:
credential (dict): Base credential document.
suite (LinkedDataProof): Signature suite to sign the credential with.
document_loader (DocumentLoader): Document loader to use
purpose (ProofPurpose, optional): A proof purpose instance that will match
proofs to be verified and ensure they were created according to the
appropriate purpose. Default to CredentialIssuancePurpose
Raises:
LinkedDataProofException: When the credential has an invalid structure
OR signing fails
Returns:
dict: The signed verifiable credential
"""
# Validate credential
errors = CredentialSchema().validate(credential)
if len(errors) > 0:
raise LinkedDataProofException(
f"Credential contains invalid structure: {errors}"
)
# Set default proof purpose if not set
if not purpose:
purpose = CredentialIssuancePurpose()
# Sign the credential with LD proof
signed_credential = await sign(
document=credential,
suite=suite,
purpose=purpose,
document_loader=document_loader,
)
return signed_credential
|
third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py
|
wenfeifei/miniblink49
| 5,964 |
136526
|
<reponame>wenfeifei/miniblink49
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from webkitpy.common.host_mock import MockHost
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.layout_tests.models.test_configuration import *
from webkitpy.layout_tests.models.test_expectations import *
try:
from collections import OrderedDict
except ImportError:
# Needed for Python < 2.7
from webkitpy.thirdparty.ordered_dict import OrderedDict
class Base(unittest.TestCase):
# Note that all of these tests are written assuming the configuration
# being tested is Windows XP, Release build.
def __init__(self, testFunc):
host = MockHost()
self._port = host.port_factory.get('test-win-xp', None)
self._exp = None
unittest.TestCase.__init__(self, testFunc)
def get_basic_tests(self):
return ['failures/expected/text.html',
'failures/expected/image_checksum.html',
'failures/expected/crash.html',
'failures/expected/needsrebaseline.html',
'failures/expected/needsmanualrebaseline.html',
'failures/expected/missing_text.html',
'failures/expected/image.html',
'failures/expected/timeout.html',
'passes/text.html']
def get_basic_expectations(self):
return """
Bug(test) failures/expected/text.html [ Failure ]
Bug(test) failures/expected/crash.html [ WontFix ]
Bug(test) failures/expected/needsrebaseline.html [ NeedsRebaseline ]
Bug(test) failures/expected/needsmanualrebaseline.html [ NeedsManualRebaseline ]
Bug(test) failures/expected/missing_image.html [ Rebaseline Missing ]
Bug(test) failures/expected/image_checksum.html [ WontFix ]
Bug(test) failures/expected/image.html [ WontFix Mac ]
"""
def parse_exp(self, expectations, overrides=None, is_lint_mode=False):
expectations_dict = OrderedDict()
expectations_dict['expectations'] = expectations
if overrides:
expectations_dict['overrides'] = overrides
self._port.expectations_dict = lambda: expectations_dict
expectations_to_lint = expectations_dict if is_lint_mode else None
self._exp = TestExpectations(self._port, self.get_basic_tests(), expectations_dict=expectations_to_lint, is_lint_mode=is_lint_mode)
def assert_exp_list(self, test, results):
self.assertEqual(self._exp.get_expectations(test), set(results))
def assert_exp(self, test, result):
self.assert_exp_list(test, [result])
def assert_bad_expectations(self, expectations, overrides=None):
self.assertRaises(ParseError, self.parse_exp, expectations, is_lint_mode=True, overrides=overrides)
class BasicTests(Base):
def test_basic(self):
self.parse_exp(self.get_basic_expectations())
self.assert_exp('failures/expected/text.html', FAIL)
self.assert_exp_list('failures/expected/image_checksum.html', [WONTFIX, SKIP])
self.assert_exp('passes/text.html', PASS)
self.assert_exp('failures/expected/image.html', PASS)
class MiscTests(Base):
def test_multiple_results(self):
self.parse_exp('Bug(x) failures/expected/text.html [ Crash Failure ]')
self.assertEqual(self._exp.get_expectations('failures/expected/text.html'), set([FAIL, CRASH]))
def test_result_was_expected(self):
# test basics
self.assertEqual(TestExpectations.result_was_expected(PASS, set([PASS]), test_needs_rebaselining=False), True)
self.assertEqual(TestExpectations.result_was_expected(FAIL, set([PASS]), test_needs_rebaselining=False), False)
# test handling of SKIPped tests and results
self.assertEqual(TestExpectations.result_was_expected(SKIP, set([CRASH]), test_needs_rebaselining=False), True)
self.assertEqual(TestExpectations.result_was_expected(SKIP, set([LEAK]), test_needs_rebaselining=False), True)
# test handling of MISSING results and the REBASELINE specifier
self.assertEqual(TestExpectations.result_was_expected(MISSING, set([PASS]), test_needs_rebaselining=True), True)
self.assertEqual(TestExpectations.result_was_expected(MISSING, set([PASS]), test_needs_rebaselining=False), False)
self.assertTrue(TestExpectations.result_was_expected(PASS, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
self.assertTrue(TestExpectations.result_was_expected(MISSING, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
self.assertTrue(TestExpectations.result_was_expected(TEXT, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
self.assertTrue(TestExpectations.result_was_expected(IMAGE, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
self.assertTrue(TestExpectations.result_was_expected(IMAGE_PLUS_TEXT, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
self.assertTrue(TestExpectations.result_was_expected(AUDIO, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
self.assertFalse(TestExpectations.result_was_expected(TIMEOUT, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
self.assertFalse(TestExpectations.result_was_expected(CRASH, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
self.assertFalse(TestExpectations.result_was_expected(LEAK, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
def test_remove_pixel_failures(self):
self.assertEqual(TestExpectations.remove_pixel_failures(set([FAIL])), set([FAIL]))
self.assertEqual(TestExpectations.remove_pixel_failures(set([PASS])), set([PASS]))
self.assertEqual(TestExpectations.remove_pixel_failures(set([IMAGE])), set([PASS]))
self.assertEqual(TestExpectations.remove_pixel_failures(set([FAIL])), set([FAIL]))
self.assertEqual(TestExpectations.remove_pixel_failures(set([PASS, IMAGE, CRASH])), set([PASS, CRASH]))
def test_suffixes_for_expectations(self):
self.assertEqual(TestExpectations.suffixes_for_expectations(set([FAIL])), set(['txt', 'png', 'wav']))
self.assertEqual(TestExpectations.suffixes_for_expectations(set([IMAGE])), set(['png']))
self.assertEqual(TestExpectations.suffixes_for_expectations(set([FAIL, IMAGE, CRASH])), set(['txt', 'png', 'wav']))
self.assertEqual(TestExpectations.suffixes_for_expectations(set()), set())
def test_category_expectations(self):
# This test checks unknown tests are not present in the
# expectations and that known test part of a test category is
# present in the expectations.
exp_str = 'Bug(x) failures/expected [ WontFix ]'
self.parse_exp(exp_str)
test_name = 'failures/expected/unknown-test.html'
unknown_test = test_name
self.assertRaises(KeyError, self._exp.get_expectations,
unknown_test)
self.assert_exp_list('failures/expected/crash.html', [WONTFIX, SKIP])
def test_get_expectations_string(self):
self.parse_exp(self.get_basic_expectations())
self.assertEqual(self._exp.get_expectations_string('failures/expected/text.html'), 'FAIL')
def test_expectation_to_string(self):
# Normal cases are handled by other tests.
self.parse_exp(self.get_basic_expectations())
self.assertRaises(ValueError, self._exp.expectation_to_string,
-1)
def test_get_test_set(self):
# Handle some corner cases for this routine not covered by other tests.
self.parse_exp(self.get_basic_expectations())
s = self._exp.get_test_set(WONTFIX)
self.assertEqual(s, set(['failures/expected/crash.html', 'failures/expected/image_checksum.html']))
def test_needs_rebaseline_reftest(self):
try:
filesystem = self._port.host.filesystem
filesystem.write_text_file(filesystem.join(self._port.layout_tests_dir(), 'failures/expected/needsrebaseline.html'), 'content')
filesystem.write_text_file(filesystem.join(self._port.layout_tests_dir(), 'failures/expected/needsrebaseline-expected.html'), 'content')
filesystem.write_text_file(filesystem.join(self._port.layout_tests_dir(), 'failures/expected/needsmanualrebaseline.html'), 'content')
filesystem.write_text_file(filesystem.join(self._port.layout_tests_dir(), 'failures/expected/needsmanualrebaseline-expected.html'), 'content')
self.parse_exp("""Bug(user) failures/expected/needsrebaseline.html [ NeedsRebaseline ]
Bug(user) failures/expected/needsmanualrebaseline.html [ NeedsManualRebaseline ]""", is_lint_mode=True)
self.assertFalse(True, "ParseError wasn't raised")
except ParseError, e:
warnings = """expectations:1 A reftest cannot be marked as NeedsRebaseline/NeedsManualRebaseline failures/expected/needsrebaseline.html
expectations:2 A reftest cannot be marked as NeedsRebaseline/NeedsManualRebaseline failures/expected/needsmanualrebaseline.html"""
self.assertEqual(str(e), warnings)
def test_parse_warning(self):
try:
filesystem = self._port.host.filesystem
filesystem.write_text_file(filesystem.join(self._port.layout_tests_dir(), 'disabled-test.html-disabled'), 'content')
filesystem.write_text_file(filesystem.join(self._port.layout_tests_dir(), 'test-to-rebaseline.html'), 'content')
'disabled-test.html-disabled',
self.parse_exp("Bug(user) [ FOO ] failures/expected/text.html [ Failure ]\n"
"Bug(user) non-existent-test.html [ Failure ]\n"
"Bug(user) disabled-test.html-disabled [ ImageOnlyFailure ]\n"
"Bug(user) [ Release ] test-to-rebaseline.html [ NeedsRebaseline ]", is_lint_mode=True)
self.assertFalse(True, "ParseError wasn't raised")
except ParseError, e:
warnings = ("expectations:1 Unrecognized specifier 'foo' failures/expected/text.html\n"
"expectations:2 Path does not exist. non-existent-test.html\n"
"expectations:4 A test cannot be rebaselined for Debug/Release. test-to-rebaseline.html")
self.assertEqual(str(e), warnings)
def test_parse_warnings_are_logged_if_not_in_lint_mode(self):
oc = OutputCapture()
try:
oc.capture_output()
self.parse_exp('-- this should be a syntax error', is_lint_mode=False)
finally:
_, _, logs = oc.restore_output()
self.assertNotEquals(logs, '')
def test_error_on_different_platform(self):
# parse_exp uses a Windows port. Assert errors on Mac show up in lint mode.
self.assertRaises(ParseError, self.parse_exp,
'Bug(test) [ Mac ] failures/expected/text.html [ Failure ]\nBug(test) [ Mac ] failures/expected/text.html [ Failure ]',
is_lint_mode=True)
def test_error_on_different_build_type(self):
# parse_exp uses a Release port. Assert errors on DEBUG show up in lint mode.
self.assertRaises(ParseError, self.parse_exp,
'Bug(test) [ Debug ] failures/expected/text.html [ Failure ]\nBug(test) [ Debug ] failures/expected/text.html [ Failure ]',
is_lint_mode=True)
def test_overrides(self):
self.parse_exp("Bug(exp) failures/expected/text.html [ Failure ]",
"Bug(override) failures/expected/text.html [ ImageOnlyFailure ]")
self.assert_exp_list('failures/expected/text.html', [FAIL, IMAGE])
def test_overrides__directory(self):
self.parse_exp("Bug(exp) failures/expected/text.html [ Failure ]",
"Bug(override) failures/expected [ Crash ]")
self.assert_exp_list('failures/expected/text.html', [FAIL, CRASH])
self.assert_exp_list('failures/expected/image.html', [CRASH])
def test_overrides__duplicate(self):
self.assert_bad_expectations("Bug(exp) failures/expected/text.html [ Failure ]",
"Bug(override) failures/expected/text.html [ ImageOnlyFailure ]\n"
"Bug(override) failures/expected/text.html [ Crash ]\n")
def test_pixel_tests_flag(self):
def match(test, result, pixel_tests_enabled):
return self._exp.matches_an_expected_result(
test, result, pixel_tests_enabled, sanitizer_is_enabled=False)
self.parse_exp(self.get_basic_expectations())
self.assertTrue(match('failures/expected/text.html', FAIL, True))
self.assertTrue(match('failures/expected/text.html', FAIL, False))
self.assertFalse(match('failures/expected/text.html', CRASH, True))
self.assertFalse(match('failures/expected/text.html', CRASH, False))
self.assertTrue(match('failures/expected/image_checksum.html', PASS, True))
self.assertTrue(match('failures/expected/image_checksum.html', PASS, False))
self.assertTrue(match('failures/expected/crash.html', PASS, False))
self.assertTrue(match('failures/expected/needsrebaseline.html', TEXT, True))
self.assertFalse(match('failures/expected/needsrebaseline.html', CRASH, True))
self.assertTrue(match('failures/expected/needsmanualrebaseline.html', TEXT, True))
self.assertFalse(match('failures/expected/needsmanualrebaseline.html', CRASH, True))
self.assertTrue(match('passes/text.html', PASS, False))
def test_sanitizer_flag(self):
def match(test, result):
return self._exp.matches_an_expected_result(
test, result, pixel_tests_are_enabled=False, sanitizer_is_enabled=True)
self.parse_exp("""
Bug(test) failures/expected/crash.html [ Crash ]
Bug(test) failures/expected/image.html [ ImageOnlyFailure ]
Bug(test) failures/expected/text.html [ Failure ]
Bug(test) failures/expected/timeout.html [ Timeout ]
""")
self.assertTrue(match('failures/expected/crash.html', CRASH))
self.assertTrue(match('failures/expected/image.html', PASS))
self.assertTrue(match('failures/expected/text.html', PASS))
self.assertTrue(match('failures/expected/timeout.html', TIMEOUT))
def test_more_specific_override_resets_skip(self):
self.parse_exp("Bug(x) failures/expected [ Skip ]\n"
"Bug(x) failures/expected/text.html [ ImageOnlyFailure ]\n")
self.assert_exp('failures/expected/text.html', IMAGE)
self.assertFalse(self._port._filesystem.join(self._port.layout_tests_dir(),
'failures/expected/text.html') in
self._exp.get_tests_with_result_type(SKIP))
def test_bot_test_expectations(self):
"""Test that expectations are merged rather than overridden when using flaky option 'unexpected'."""
test_name1 = 'failures/expected/text.html'
test_name2 = 'passes/text.html'
expectations_dict = OrderedDict()
expectations_dict['expectations'] = "Bug(x) %s [ ImageOnlyFailure ]\nBug(x) %s [ Slow ]\n" % (test_name1, test_name2)
self._port.expectations_dict = lambda: expectations_dict
expectations = TestExpectations(self._port, self.get_basic_tests())
self.assertEqual(expectations.get_expectations(test_name1), set([IMAGE]))
self.assertEqual(expectations.get_expectations(test_name2), set([SLOW]))
def bot_expectations():
return {test_name1: ['PASS', 'TIMEOUT'], test_name2: ['CRASH']}
self._port.bot_expectations = bot_expectations
self._port._options.ignore_flaky_tests = 'unexpected'
expectations = TestExpectations(self._port, self.get_basic_tests())
self.assertEqual(expectations.get_expectations(test_name1), set([PASS, IMAGE, TIMEOUT]))
self.assertEqual(expectations.get_expectations(test_name2), set([CRASH, SLOW]))
class SkippedTests(Base):
def check(self, expectations, overrides, skips, lint=False, expected_results=[WONTFIX, SKIP, FAIL]):
port = MockHost().port_factory.get('test-win-xp')
port._filesystem.write_text_file(port._filesystem.join(port.layout_tests_dir(), 'failures/expected/text.html'), 'foo')
expectations_dict = OrderedDict()
expectations_dict['expectations'] = expectations
if overrides:
expectations_dict['overrides'] = overrides
port.expectations_dict = lambda: expectations_dict
port.skipped_layout_tests = lambda tests: set(skips)
expectations_to_lint = expectations_dict if lint else None
exp = TestExpectations(port, ['failures/expected/text.html'], expectations_dict=expectations_to_lint, is_lint_mode=lint)
self.assertEqual(exp.get_expectations('failures/expected/text.html'), set(expected_results))
def test_skipped_tests_work(self):
self.check(expectations='', overrides=None, skips=['failures/expected/text.html'], expected_results=[WONTFIX, SKIP])
def test_duplicate_skipped_test_fails_lint(self):
self.assertRaises(ParseError, self.check, expectations='Bug(x) failures/expected/text.html [ Failure ]\n',
overrides=None, skips=['failures/expected/text.html'], lint=True)
def test_skipped_file_overrides_expectations(self):
self.check(expectations='Bug(x) failures/expected/text.html [ Failure ]\n', overrides=None,
skips=['failures/expected/text.html'])
def test_skipped_dir_overrides_expectations(self):
self.check(expectations='Bug(x) failures/expected/text.html [ Failure ]\n', overrides=None,
skips=['failures/expected'])
def test_skipped_file_overrides_overrides(self):
self.check(expectations='', overrides='Bug(x) failures/expected/text.html [ Failure ]\n',
skips=['failures/expected/text.html'])
def test_skipped_dir_overrides_overrides(self):
self.check(expectations='', overrides='Bug(x) failures/expected/text.html [ Failure ]\n',
skips=['failures/expected'])
def test_skipped_entry_dont_exist(self):
port = MockHost().port_factory.get('test-win-xp')
expectations_dict = OrderedDict()
expectations_dict['expectations'] = ''
port.expectations_dict = lambda: expectations_dict
port.skipped_layout_tests = lambda tests: set(['foo/bar/baz.html'])
capture = OutputCapture()
capture.capture_output()
exp = TestExpectations(port)
_, _, logs = capture.restore_output()
self.assertEqual('The following test foo/bar/baz.html from the Skipped list doesn\'t exist\n', logs)
def test_expectations_string(self):
self.parse_exp(self.get_basic_expectations())
notrun = 'failures/expected/text.html'
self._exp.add_extra_skipped_tests([notrun])
self.assertEqual('NOTRUN', self._exp.get_expectations_string(notrun))
class ExpectationSyntaxTests(Base):
def test_unrecognized_expectation(self):
self.assert_bad_expectations('Bug(test) failures/expected/text.html [ Unknown ]')
def test_macro(self):
exp_str = 'Bug(test) [ Win ] failures/expected/text.html [ Failure ]'
self.parse_exp(exp_str)
self.assert_exp('failures/expected/text.html', FAIL)
def assert_tokenize_exp(self, line, bugs=None, specifiers=None, expectations=None, warnings=None, comment=None, name='foo.html'):
bugs = bugs or []
specifiers = specifiers or []
expectations = expectations or []
warnings = warnings or []
filename = 'TestExpectations'
line_number = '1'
expectation_line = TestExpectationParser._tokenize_line(filename, line, line_number)
self.assertEqual(expectation_line.warnings, warnings)
self.assertEqual(expectation_line.name, name)
self.assertEqual(expectation_line.filename, filename)
self.assertEqual(expectation_line.line_numbers, line_number)
if not warnings:
self.assertEqual(expectation_line.specifiers, specifiers)
self.assertEqual(expectation_line.expectations, expectations)
def test_comments(self):
self.assert_tokenize_exp("# comment", name=None, comment="# comment")
self.assert_tokenize_exp("foo.html [ Pass ] # comment", comment="# comment", expectations=['PASS'], specifiers=[])
def test_config_specifiers(self):
self.assert_tokenize_exp('[ Mac ] foo.html [ Failure ] ', specifiers=['MAC'], expectations=['FAIL'])
def test_unknown_config(self):
self.assert_tokenize_exp('[ Foo ] foo.html [ Pass ]', specifiers=['Foo'], expectations=['PASS'])
def test_unknown_expectation(self):
self.assert_tokenize_exp('foo.html [ Audio ]', warnings=['Unrecognized expectation "Audio"'])
def test_skip(self):
self.assert_tokenize_exp('foo.html [ Skip ]', specifiers=[], expectations=['SKIP'])
def test_slow(self):
self.assert_tokenize_exp('foo.html [ Slow ]', specifiers=[], expectations=['SLOW'])
def test_wontfix(self):
self.assert_tokenize_exp('foo.html [ WontFix ]', specifiers=[], expectations=['WONTFIX', 'SKIP'])
self.assert_tokenize_exp('foo.html [ WontFix ImageOnlyFailure ]', specifiers=[], expectations=['WONTFIX', 'SKIP'],
warnings=['A test marked Skip or WontFix must not have other expectations.'])
def test_blank_line(self):
self.assert_tokenize_exp('', name=None)
def test_warnings(self):
self.assert_tokenize_exp('[ Mac ]', warnings=['Did not find a test name.', 'Missing expectations.'], name=None)
self.assert_tokenize_exp('[ [', warnings=['unexpected "["', 'Missing expectations.'], name=None)
self.assert_tokenize_exp('crbug.com/12345 ]', warnings=['unexpected "]"', 'Missing expectations.'], name=None)
self.assert_tokenize_exp('foo.html crbug.com/12345 ]', warnings=['"crbug.com/12345" is not at the start of the line.', 'Missing expectations.'])
self.assert_tokenize_exp('foo.html', warnings=['Missing expectations.'])
class SemanticTests(Base):
def test_bug_format(self):
self.assertRaises(ParseError, self.parse_exp, 'BUG1234 failures/expected/text.html [ Failure ]', is_lint_mode=True)
def test_bad_bugid(self):
try:
self.parse_exp('crbug/1234 failures/expected/text.html [ Failure ]', is_lint_mode=True)
self.fail('should have raised an error about a bad bug identifier')
except ParseError, exp:
self.assertEqual(len(exp.warnings), 3)
def test_missing_bugid(self):
self.parse_exp('failures/expected/text.html [ Failure ]', is_lint_mode=False)
self.assertFalse(self._exp.has_warnings())
try:
self.parse_exp('failures/expected/text.html [ Failure ]', is_lint_mode=True)
except ParseError, exp:
self.assertEqual(exp.warnings, ['expectations:1 Test lacks BUG specifier. failures/expected/text.html'])
def test_skip_and_wontfix(self):
# Skip is not allowed to have other expectations as well, because those
# expectations won't be exercised and may become stale .
self.parse_exp('failures/expected/text.html [ Failure Skip ]')
self.assertTrue(self._exp.has_warnings())
self.parse_exp('failures/expected/text.html [ Crash WontFix ]')
self.assertTrue(self._exp.has_warnings())
self.parse_exp('failures/expected/text.html [ Pass WontFix ]')
self.assertTrue(self._exp.has_warnings())
def test_rebaseline(self):
# Can't lint a file w/ 'REBASELINE' in it.
self.assertRaises(ParseError, self.parse_exp,
'Bug(test) failures/expected/text.html [ Failure Rebaseline ]',
is_lint_mode=True)
def test_duplicates(self):
self.assertRaises(ParseError, self.parse_exp, """
Bug(exp) failures/expected/text.html [ Failure ]
Bug(exp) failures/expected/text.html [ ImageOnlyFailure ]""", is_lint_mode=True)
self.assertRaises(ParseError, self.parse_exp,
self.get_basic_expectations(), overrides="""
Bug(override) failures/expected/text.html [ Failure ]
Bug(override) failures/expected/text.html [ ImageOnlyFailure ]""", is_lint_mode=True)
def test_duplicate_with_line_before_preceding_line(self):
self.assert_bad_expectations("""Bug(exp) [ Debug ] failures/expected/text.html [ Failure ]
Bug(exp) [ Release ] failures/expected/text.html [ Failure ]
Bug(exp) [ Debug ] failures/expected/text.html [ Failure ]
""")
def test_missing_file(self):
self.parse_exp('Bug(test) missing_file.html [ Failure ]')
self.assertTrue(self._exp.has_warnings(), 1)
class PrecedenceTests(Base):
def test_file_over_directory(self):
# This tests handling precedence of specific lines over directories
# and tests expectations covering entire directories.
exp_str = """
Bug(x) failures/expected/text.html [ Failure ]
Bug(y) failures/expected [ WontFix ]
"""
self.parse_exp(exp_str)
self.assert_exp('failures/expected/text.html', FAIL)
self.assert_exp_list('failures/expected/crash.html', [WONTFIX, SKIP])
exp_str = """
Bug(x) failures/expected [ WontFix ]
Bug(y) failures/expected/text.html [ Failure ]
"""
self.parse_exp(exp_str)
self.assert_exp('failures/expected/text.html', FAIL)
self.assert_exp_list('failures/expected/crash.html', [WONTFIX, SKIP])
def test_ambiguous(self):
self.assert_bad_expectations("Bug(test) [ Release ] passes/text.html [ Pass ]\n"
"Bug(test) [ Win ] passes/text.html [ Failure ]\n")
def test_more_specifiers(self):
self.assert_bad_expectations("Bug(test) [ Release ] passes/text.html [ Pass ]\n"
"Bug(test) [ Win Release ] passes/text.html [ Failure ]\n")
def test_order_in_file(self):
self.assert_bad_expectations("Bug(test) [ Win Release ] : passes/text.html [ Failure ]\n"
"Bug(test) [ Release ] : passes/text.html [ Pass ]\n")
def test_macro_overrides(self):
self.assert_bad_expectations("Bug(test) [ Win ] passes/text.html [ Pass ]\n"
"Bug(test) [ XP ] passes/text.html [ Failure ]\n")
class RemoveConfigurationsTest(Base):
def test_remove(self):
host = MockHost()
test_port = host.port_factory.get('test-win-xp', None)
test_port.test_exists = lambda test: True
test_port.test_isfile = lambda test: True
test_config = test_port.test_configuration()
test_port.expectations_dict = lambda: {"expectations": """Bug(x) [ Linux Win Release ] failures/expected/foo.html [ Failure ]
Bug(y) [ Win Mac Debug ] failures/expected/foo.html [ Crash ]
"""}
expectations = TestExpectations(test_port, self.get_basic_tests())
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', test_config)])
self.assertEqual("""Bug(x) [ Linux Win7 Release ] failures/expected/foo.html [ Failure ]
Bug(y) [ Win Mac Debug ] failures/expected/foo.html [ Crash ]
""", actual_expectations)
def test_remove_needs_rebaseline(self):
host = MockHost()
test_port = host.port_factory.get('test-win-xp', None)
test_port.test_exists = lambda test: True
test_port.test_isfile = lambda test: True
test_config = test_port.test_configuration()
test_port.expectations_dict = lambda: {"expectations": """Bug(x) [ Win ] failures/expected/foo.html [ NeedsRebaseline ]
"""}
expectations = TestExpectations(test_port, self.get_basic_tests())
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', test_config)])
self.assertEqual("""Bug(x) [ XP Debug ] failures/expected/foo.html [ NeedsRebaseline ]
Bug(x) [ Win7 ] failures/expected/foo.html [ NeedsRebaseline ]
""", actual_expectations)
def test_remove_multiple_configurations(self):
host = MockHost()
test_port = host.port_factory.get('test-win-xp', None)
test_port.test_exists = lambda test: True
test_port.test_isfile = lambda test: True
test_config = test_port.test_configuration()
test_port.expectations_dict = lambda: {'expectations': """Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ]
"""}
expectations = TestExpectations(test_port)
actual_expectations = expectations.remove_configurations([
('failures/expected/foo.html', test_config),
('failures/expected/foo.html', host.port_factory.get('test-win-win7', None).test_configuration()),
])
self.assertEqual("""Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
""", actual_expectations)
def test_remove_line_with_comments(self):
host = MockHost()
test_port = host.port_factory.get('test-win-xp', None)
test_port.test_exists = lambda test: True
test_port.test_isfile = lambda test: True
test_config = test_port.test_configuration()
test_port.expectations_dict = lambda: {'expectations': """Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
# This comment line should get stripped. As should the preceding line.
Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ]
"""}
expectations = TestExpectations(test_port)
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', test_config)])
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', host.port_factory.get('test-win-win7', None).test_configuration())])
self.assertEqual("""Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
""", actual_expectations)
def test_remove_line_with_comments_at_start(self):
host = MockHost()
test_port = host.port_factory.get('test-win-xp', None)
test_port.test_exists = lambda test: True
test_port.test_isfile = lambda test: True
test_config = test_port.test_configuration()
test_port.expectations_dict = lambda: {'expectations': """
# This comment line should get stripped. As should the preceding line.
Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ]
Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
"""}
expectations = TestExpectations(test_port)
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', test_config)])
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', host.port_factory.get('test-win-win7', None).test_configuration())])
self.assertEqual("""
Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
""", actual_expectations)
def test_remove_line_with_comments_at_end_with_no_trailing_newline(self):
host = MockHost()
test_port = host.port_factory.get('test-win-xp', None)
test_port.test_exists = lambda test: True
test_port.test_isfile = lambda test: True
test_config = test_port.test_configuration()
test_port.expectations_dict = lambda: {'expectations': """Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
# This comment line should get stripped. As should the preceding line.
Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ]"""}
expectations = TestExpectations(test_port)
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', test_config)])
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', host.port_factory.get('test-win-win7', None).test_configuration())])
self.assertEqual("""Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]""", actual_expectations)
def test_remove_line_leaves_comments_for_next_line(self):
host = MockHost()
test_port = host.port_factory.get('test-win-xp', None)
test_port.test_exists = lambda test: True
test_port.test_isfile = lambda test: True
test_config = test_port.test_configuration()
test_port.expectations_dict = lambda: {'expectations': """
# This comment line should not get stripped.
Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ]
Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
"""}
expectations = TestExpectations(test_port)
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', test_config)])
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', host.port_factory.get('test-win-win7', None).test_configuration())])
self.assertEqual("""
# This comment line should not get stripped.
Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
""", actual_expectations)
def test_remove_line_no_whitespace_lines(self):
host = MockHost()
test_port = host.port_factory.get('test-win-xp', None)
test_port.test_exists = lambda test: True
test_port.test_isfile = lambda test: True
test_config = test_port.test_configuration()
test_port.expectations_dict = lambda: {'expectations': """
# This comment line should get stripped.
Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ]
# This comment line should not get stripped.
Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
"""}
expectations = TestExpectations(test_port)
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', test_config)])
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', host.port_factory.get('test-win-win7', None).test_configuration())])
self.assertEqual(""" # This comment line should not get stripped.
Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
""", actual_expectations)
def test_remove_first_line(self):
host = MockHost()
test_port = host.port_factory.get('test-win-xp', None)
test_port.test_exists = lambda test: True
test_port.test_isfile = lambda test: True
test_config = test_port.test_configuration()
test_port.expectations_dict = lambda: {'expectations': """Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ]
# This comment line should not get stripped.
Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
"""}
expectations = TestExpectations(test_port)
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', test_config)])
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', host.port_factory.get('test-win-win7', None).test_configuration())])
self.assertEqual(""" # This comment line should not get stripped.
Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
""", actual_expectations)
def test_remove_flaky_line(self):
host = MockHost()
test_port = host.port_factory.get('test-win-xp', None)
test_port.test_exists = lambda test: True
test_port.test_isfile = lambda test: True
test_config = test_port.test_configuration()
test_port.expectations_dict = lambda: {'expectations': """Bug(x) [ Win ] failures/expected/foo.html [ Failure Timeout ]
Bug(y) [ Mac ] failures/expected/foo.html [ Crash ]
"""}
expectations = TestExpectations(test_port)
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', test_config)])
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', host.port_factory.get('test-win-win7', None).test_configuration())])
self.assertEqual("""Bug(x) [ Win Debug ] failures/expected/foo.html [ Failure Timeout ]
Bug(y) [ Mac ] failures/expected/foo.html [ Crash ]
""", actual_expectations)
class RebaseliningTest(Base):
def test_get_rebaselining_failures(self):
# Make sure we find a test as needing a rebaseline even if it is not marked as a failure.
self.parse_exp('Bug(x) failures/expected/text.html [ Rebaseline ]\n')
self.assertEqual(len(self._exp.get_rebaselining_failures()), 1)
self.parse_exp(self.get_basic_expectations())
self.assertEqual(len(self._exp.get_rebaselining_failures()), 0)
class TestExpectationsParserTests(unittest.TestCase):
def __init__(self, testFunc):
host = MockHost()
test_port = host.port_factory.get('test-win-xp', None)
self._converter = TestConfigurationConverter(test_port.all_test_configurations(), test_port.configuration_specifier_macros())
unittest.TestCase.__init__(self, testFunc)
self._parser = TestExpectationParser(host.port_factory.get('test-win-xp', None), [], is_lint_mode=False)
def test_expectation_line_for_test(self):
# This is kind of a silly test, but it at least ensures that we don't throw an error.
test_name = 'foo/test.html'
expectations = set(["PASS", "IMAGE"])
expectation_line = TestExpectationLine()
expectation_line.original_string = test_name
expectation_line.name = test_name
expectation_line.filename = '<Bot TestExpectations>'
expectation_line.line_numbers = '0'
expectation_line.expectations = expectations
self._parser._parse_line(expectation_line)
self.assertEqual(self._parser.expectation_line_for_test(test_name, expectations), expectation_line)
class TestExpectationSerializationTests(unittest.TestCase):
def __init__(self, testFunc):
host = MockHost()
test_port = host.port_factory.get('test-win-xp', None)
self._converter = TestConfigurationConverter(test_port.all_test_configurations(), test_port.configuration_specifier_macros())
unittest.TestCase.__init__(self, testFunc)
def _tokenize(self, line):
return TestExpectationParser._tokenize_line('path', line, 0)
def assert_round_trip(self, in_string, expected_string=None):
expectation = self._tokenize(in_string)
if expected_string is None:
expected_string = in_string
self.assertEqual(expected_string, expectation.to_string(self._converter))
def assert_list_round_trip(self, in_string, expected_string=None):
host = MockHost()
parser = TestExpectationParser(host.port_factory.get('test-win-xp', None), [], is_lint_mode=False)
expectations = parser.parse('path', in_string)
if expected_string is None:
expected_string = in_string
self.assertEqual(expected_string, TestExpectations.list_to_string(expectations, self._converter))
def test_unparsed_to_string(self):
expectation = TestExpectationLine()
self.assertEqual(expectation.to_string(self._converter), '')
expectation.comment = ' Qux.'
self.assertEqual(expectation.to_string(self._converter), '# Qux.')
expectation.name = 'bar'
self.assertEqual(expectation.to_string(self._converter), 'bar # Qux.')
expectation.specifiers = ['foo']
# FIXME: case should be preserved here but we can't until we drop the old syntax.
self.assertEqual(expectation.to_string(self._converter), '[ FOO ] bar # Qux.')
expectation.expectations = ['bAz']
self.assertEqual(expectation.to_string(self._converter), '[ FOO ] bar [ BAZ ] # Qux.')
expectation.expectations = ['bAz1', 'baZ2']
self.assertEqual(expectation.to_string(self._converter), '[ FOO ] bar [ BAZ1 BAZ2 ] # Qux.')
expectation.specifiers = ['foo1', 'foO2']
self.assertEqual(expectation.to_string(self._converter), '[ FOO1 FOO2 ] bar [ BAZ1 BAZ2 ] # Qux.')
expectation.warnings.append('Oh the horror.')
self.assertEqual(expectation.to_string(self._converter), '')
expectation.original_string = 'Yes it is!'
self.assertEqual(expectation.to_string(self._converter), 'Yes it is!')
def test_unparsed_list_to_string(self):
expectation = TestExpectationLine()
expectation.comment = 'Qux.'
expectation.name = 'bar'
expectation.specifiers = ['foo']
expectation.expectations = ['bAz1', 'baZ2']
# FIXME: case should be preserved here but we can't until we drop the old syntax.
self.assertEqual(TestExpectations.list_to_string([expectation]), '[ FOO ] bar [ BAZ1 BAZ2 ] #Qux.')
def test_parsed_to_string(self):
expectation_line = TestExpectationLine()
expectation_line.bugs = ['Bug(x)']
expectation_line.name = 'test/name/for/realz.html'
expectation_line.parsed_expectations = set([IMAGE])
self.assertEqual(expectation_line.to_string(self._converter), None)
expectation_line.matching_configurations = set([TestConfiguration('xp', 'x86', 'release')])
self.assertEqual(expectation_line.to_string(self._converter), 'Bug(x) [ XP Release ] test/name/for/realz.html [ ImageOnlyFailure ]')
expectation_line.matching_configurations = set([TestConfiguration('xp', 'x86', 'release'), TestConfiguration('xp', 'x86', 'debug')])
self.assertEqual(expectation_line.to_string(self._converter), 'Bug(x) [ XP ] test/name/for/realz.html [ ImageOnlyFailure ]')
def test_serialize_parsed_expectations(self):
expectation_line = TestExpectationLine()
expectation_line.parsed_expectations = set([])
parsed_expectation_to_string = dict([[parsed_expectation, expectation_string] for expectation_string, parsed_expectation in TestExpectations.EXPECTATIONS.items()])
self.assertEqual(expectation_line._serialize_parsed_expectations(parsed_expectation_to_string), '')
expectation_line.parsed_expectations = set([FAIL])
self.assertEqual(expectation_line._serialize_parsed_expectations(parsed_expectation_to_string), 'fail')
expectation_line.parsed_expectations = set([PASS, IMAGE])
self.assertEqual(expectation_line._serialize_parsed_expectations(parsed_expectation_to_string), 'image pass')
expectation_line.parsed_expectations = set([FAIL, PASS])
self.assertEqual(expectation_line._serialize_parsed_expectations(parsed_expectation_to_string), 'pass fail')
def test_serialize_parsed_specifier_string(self):
expectation_line = TestExpectationLine()
expectation_line.bugs = ['garden-o-matic']
expectation_line.parsed_specifiers = ['the', 'for']
self.assertEqual(expectation_line._serialize_parsed_specifiers(self._converter, []), 'for the')
self.assertEqual(expectation_line._serialize_parsed_specifiers(self._converter, ['win']), 'for the win')
expectation_line.bugs = []
expectation_line.parsed_specifiers = []
self.assertEqual(expectation_line._serialize_parsed_specifiers(self._converter, []), '')
self.assertEqual(expectation_line._serialize_parsed_specifiers(self._converter, ['win']), 'win')
def test_format_line(self):
self.assertEqual(TestExpectationLine._format_line([], ['MODIFIERS'], 'name', ['EXPECTATIONS'], 'comment'), '[ MODIFIERS ] name [ EXPECTATIONS ] #comment')
self.assertEqual(TestExpectationLine._format_line([], ['MODIFIERS'], 'name', ['EXPECTATIONS'], None), '[ MODIFIERS ] name [ EXPECTATIONS ]')
def test_string_roundtrip(self):
self.assert_round_trip('')
self.assert_round_trip('[')
self.assert_round_trip('FOO [')
self.assert_round_trip('FOO ] bar')
self.assert_round_trip(' FOO [')
self.assert_round_trip(' [ FOO ] ')
self.assert_round_trip('[ FOO ] bar [ BAZ ]')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux. ')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux. ')
self.assert_round_trip('[ FOO ] ] ] bar BAZ')
self.assert_round_trip('[ FOO ] ] ] bar [ BAZ ]')
self.assert_round_trip('FOO ] ] bar ==== BAZ')
self.assert_round_trip('=')
self.assert_round_trip('#')
self.assert_round_trip('# ')
self.assert_round_trip('# Foo')
self.assert_round_trip('# Foo')
self.assert_round_trip('# Foo :')
self.assert_round_trip('# Foo : =')
def test_list_roundtrip(self):
self.assert_list_round_trip('')
self.assert_list_round_trip('\n')
self.assert_list_round_trip('\n\n')
self.assert_list_round_trip('bar')
self.assert_list_round_trip('bar\n# Qux.')
self.assert_list_round_trip('bar\n# Qux.\n')
def test_reconstitute_only_these(self):
lines = []
reconstitute_only_these = []
def add_line(matching_configurations, reconstitute):
expectation_line = TestExpectationLine()
expectation_line.original_string = "Nay"
expectation_line.bugs = ['Bug(x)']
expectation_line.name = 'Yay'
expectation_line.parsed_expectations = set([IMAGE])
expectation_line.matching_configurations = matching_configurations
lines.append(expectation_line)
if reconstitute:
reconstitute_only_these.append(expectation_line)
add_line(set([TestConfiguration('xp', 'x86', 'release')]), True)
add_line(set([TestConfiguration('xp', 'x86', 'release'), TestConfiguration('xp', 'x86', 'debug')]), False)
serialized = TestExpectations.list_to_string(lines, self._converter)
self.assertEqual(serialized, "Bug(x) [ XP Release ] Yay [ ImageOnlyFailure ]\nBug(x) [ XP ] Yay [ ImageOnlyFailure ]")
serialized = TestExpectations.list_to_string(lines, self._converter, reconstitute_only_these=reconstitute_only_these)
self.assertEqual(serialized, "Bug(x) [ XP Release ] Yay [ ImageOnlyFailure ]\nNay")
def disabled_test_string_whitespace_stripping(self):
# FIXME: Re-enable this test once we rework the code to no longer support the old syntax.
self.assert_round_trip('\n', '')
self.assert_round_trip(' [ FOO ] bar [ BAZ ]', '[ FOO ] bar [ BAZ ]')
self.assert_round_trip('[ FOO ] bar [ BAZ ]', '[ FOO ] bar [ BAZ ]')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.', '[ FOO ] bar [ BAZ ] # Qux.')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.', '[ FOO ] bar [ BAZ ] # Qux.')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.', '[ FOO ] bar [ BAZ ] # Qux.')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.', '[ FOO ] bar [ BAZ ] # Qux.')
|
notebook/list_transpose.py
|
vhn0912/python-snippets
| 174 |
136545
|
import numpy as np
import pandas as pd
l_2d = [[0, 1, 2], [3, 4, 5]]
arr_t = np.array(l_2d).T
print(arr_t)
print(type(arr_t))
# [[0 3]
# [1 4]
# [2 5]]
# <class 'numpy.ndarray'>
l_2d_t = np.array(l_2d).T.tolist()
print(l_2d_t)
print(type(l_2d_t))
# [[0, 3], [1, 4], [2, 5]]
# <class 'list'>
df_t = pd.DataFrame(l_2d).T
print(df_t)
print(type(df_t))
# 0 1
# 0 0 3
# 1 1 4
# 2 2 5
# <class 'pandas.core.frame.DataFrame'>
l_2d_t = pd.DataFrame(l_2d).T.values.tolist()
print(l_2d_t)
print(type(l_2d_t))
# [[0, 3], [1, 4], [2, 5]]
# <class 'list'>
l_2d_t_tuple = list(zip(*l_2d))
print(l_2d_t_tuple)
print(type(l_2d_t_tuple))
# [(0, 3), (1, 4), (2, 5)]
# <class 'list'>
print(l_2d_t_tuple[0])
print(type(l_2d_t_tuple[0]))
# (0, 3)
# <class 'tuple'>
l_2d_t = [list(x) for x in zip(*l_2d)]
print(l_2d_t)
print(type(l_2d_t))
# [[0, 3], [1, 4], [2, 5]]
# <class 'list'>
print(l_2d_t[0])
print(type(l_2d_t[0]))
# [0, 3]
# <class 'list'>
print(*l_2d)
# [0, 1, 2] [3, 4, 5]
print(list(zip([0, 1, 2], [3, 4, 5])))
# [(0, 3), (1, 4), (2, 5)]
print([list(x) for x in [(0, 3), (1, 4), (2, 5)]])
# [[0, 3], [1, 4], [2, 5]]
|
mindinsight/wizard/create_project.py
|
fapbatista/mindinsight
| 216 |
136553
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Create project command module."""
import os
import re
import sys
import textwrap
from pathlib import Path
import click
from mindinsight.utils.command import BaseCommand
from mindinsight.wizard.base.utility import find_network_maker_names, load_network_maker, process_prompt_choice
from mindinsight.wizard.common.exceptions import CommandError
from mindinsight.wizard.conf.constants import SUPPORT_MINDSPORE_VERSION, QUESTION_START
class CreateProject(BaseCommand):
"""Create project class."""
name = 'createproject'
description = 'create project'
def __init__(self):
self._network_types = find_network_maker_names()
def add_arguments(self, parser):
"""
Add arguments to parser.
Args:
parser (ArgumentParser): Specify parser to which arguments are added.
"""
parser.add_argument(
'name',
type=str,
help='Specify the new project name.')
def _make_project_dir(self, project_name):
self._check_project_dir(project_name)
permissions = os.R_OK | os.W_OK | os.X_OK
mode = permissions << 6
project_dir = os.path.join(os.getcwd(), project_name)
os.makedirs(project_dir, mode=mode, exist_ok=True)
return project_dir
@staticmethod
def _check_project_dir(project_name):
"""Check project directory whether empty or exist."""
if not re.search('^[A-Za-z0-9][A-Za-z0-9._-]*$', project_name):
raise CommandError("'%s' is not a valid project name. Please input a valid name matching "
"regex ^[A-Za-z0-9][A-Za-z0-9._-]*$" % project_name)
project_dir = os.path.join(os.getcwd(), project_name)
if os.path.exists(project_dir):
output_path = Path(project_dir)
if output_path.is_dir():
if os.path.os.listdir(project_dir):
raise CommandError('%s already exists, %s is not empty directory, please try another name.'
% (project_name, project_dir))
else:
CommandError('There is a file in the current directory has the same name as the project %s, '
'please try another name.' % project_name)
return True
def ask_network(self):
"""Ask user question for selecting a network to create."""
network_type_choices = self._network_types[:]
network_type_choices.sort(reverse=False)
prompt_msg = '{}:\n{}\n'.format(
'%sPlease select a network' % QUESTION_START,
'\n'.join(f'{idx: >4}: {choice}' for idx, choice in enumerate(network_type_choices, start=1))
)
prompt_type = click.IntRange(min=1, max=len(network_type_choices))
choice = 0
while not choice:
choice = click.prompt(prompt_msg, default=0, type=prompt_type,
hide_input=False, show_choices=False,
confirmation_prompt=False, show_default=False,
value_proc=lambda x: process_prompt_choice(x, prompt_type))
if not choice:
click.secho(textwrap.dedent("Network is required."), fg='red')
click.secho(textwrap.dedent("Your choice is %s." % network_type_choices[choice - 1]), fg='yellow')
return network_type_choices[choice - 1]
@staticmethod
def echo_notice():
"""Echo notice for depending environment."""
click.secho(textwrap.dedent(
"[NOTICE] The final generated scripts should be run under environment "
"where mindspore==%s and related device drivers are installed. " % SUPPORT_MINDSPORE_VERSION), fg='yellow')
def run(self, args):
"""Override run method to start."""
project_name = args.get('name')
try:
self._check_project_dir(project_name)
except CommandError as error:
click.secho(error.message, fg='red')
sys.exit(1)
try:
self.echo_notice()
network_maker_name = self.ask_network()
network_maker = load_network_maker(network_maker_name)
network_maker.configure()
except click.exceptions.Abort:
sys.exit(1)
project_dir = self._make_project_dir(project_name)
source_files = network_maker.generate(**args)
for source_file in source_files:
source_file.write(project_dir)
click.secho(f"{project_name} is generated in {project_dir}")
|
src/dispatch/plugins/dispatch_slack/modals/feedback/views.py
|
axellaurelut/dispatch
| 3,417 |
136555
|
import json
from dispatch.enums import DispatchEnum
from dispatch.incident.models import Incident
from dispatch.feedback.enums import FeedbackRating
class RatingFeedbackBlockId(DispatchEnum):
anonymous = "anonymous_field"
feedback = "feedback_field"
rating = "rating_field"
class RatingFeedbackCallbackId(DispatchEnum):
submit_form = "rating_feedback_submit_form"
def rating_feedback_view(incident: Incident, channel_id: str):
"""Builds all blocks required to rate and provide feedback about an incident."""
modal_template = {
"type": "modal",
"title": {"type": "plain_text", "text": "Incident Feedback"},
"blocks": [
{
"type": "context",
"elements": [
{
"type": "plain_text",
"text": "Use this form to rate your experience and provide feedback about the incident.",
}
],
},
],
"close": {"type": "plain_text", "text": "Cancel"},
"submit": {"type": "plain_text", "text": "Submit"},
"callback_id": RatingFeedbackCallbackId.submit_form,
"private_metadata": json.dumps({"incident_id": str(incident.id), "channel_id": channel_id}),
}
rating_picker_options = []
for rating in FeedbackRating:
rating_picker_options.append(
{"text": {"type": "plain_text", "text": rating}, "value": rating}
)
rating_picker_block = {
"type": "input",
"block_id": RatingFeedbackBlockId.rating,
"label": {"type": "plain_text", "text": "Rate your experience"},
"element": {
"type": "static_select",
"placeholder": {"type": "plain_text", "text": "Select a rating"},
"options": rating_picker_options,
},
"optional": False,
}
modal_template["blocks"].append(rating_picker_block)
feedback_block = {
"type": "input",
"block_id": RatingFeedbackBlockId.feedback,
"label": {"type": "plain_text", "text": "Give us feedback"},
"element": {
"type": "plain_text_input",
"action_id": RatingFeedbackBlockId.feedback,
"placeholder": {
"type": "plain_text",
"text": "How would you describe your experience?",
},
"multiline": True,
},
"optional": False,
}
modal_template["blocks"].append(feedback_block)
anonymous_checkbox_block = {
"type": "input",
"block_id": RatingFeedbackBlockId.anonymous,
"label": {
"type": "plain_text",
"text": "Check the box if you wish to provide your feedback anonymously",
},
"element": {
"type": "checkboxes",
"action_id": RatingFeedbackBlockId.anonymous,
"options": [
{
"value": "anonymous",
"text": {"type": "plain_text", "text": "Anonymize my feedback"},
},
],
},
"optional": True,
}
modal_template["blocks"].append(anonymous_checkbox_block)
return modal_template
|
build/sage_bootstrap/levenshtein.py
|
bopopescu/sage
| 1,742 |
136557
|
# -*- coding: utf-8 -*-
u"""
Levenshtein Distance
The Levenshtein distance between two words is the minimal number of
edits that turn one word into the other. Here, "edit" means a
single-letter addition, single-letter deletion, or exchange of a
letter with another letter.
http://en.wikipedia.org/wiki/Levenshtein_distance
EXAMPLES::
>>> from sage_bootstrap.levenshtein import Levenshtein
>>> Levenshtein(5)(u'Queensryche', u'Queensrÿche')
1
"""
#*****************************************************************************
# Copyright (C) 2015 <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
class DistanceExceeded(Exception):
pass
class Levenshtein(object):
def __init__(self, limit):
"""
Levenshtein Distance with Maximum Distance Cutoff
Args:
limit (int): if the distance exceeds the limit, a
:class:`DistanceExceeded` is raised and the
computation is aborted.
EXAMPLES::
>>> from sage_bootstrap.levenshtein import Levenshtein
>>> lev3 = Levenshtein(3)
>>> lev3(u'saturday', u'sunday')
3
>>> lev3(u'kitten', u'sitting')
3
>>> lev2 = Levenshtein(2)
>>> lev2(u'kitten', u'sitting')
Traceback (most recent call last):
...
DistanceExceeded
"""
self._limit = limit
def __call__(self, a, b):
"""
calculate the levenshtein distance
args:
a,b (str): the two strings to compare
returns:
int: the Levenshtein distance if it is less or equal to
the distance limit.
Example::
>>> from app.scoring.levenshtein import Levenshtein
>>> lev3 = Levenshtein(3)
>>> lev3(u'Saturday', u'Sunday')
3
"""
n, m = len(a), len(b)
if n > m:
# Optimization to use O(min(n,m)) space
a, b, n, m = b, a, m, n
curr = range(n+1)
for i in range(1, m+1):
prev, curr = curr, [i]+[0]*n
for j in range(1, n+1):
cost_add, cost_del = prev[j]+1, curr[j-1]+1
cost_change = prev[j-1]
if a[j-1] != b[i-1]:
cost_change += 1
curr[j] = min(cost_add, cost_del, cost_change)
if min(curr) > self._limit:
raise DistanceExceeded
if curr[n] > self._limit:
raise DistanceExceeded
return curr[n]
|
Algo and DSA/LeetCode-Solutions-master/Python/fixed-point.py
|
Sourav692/FAANG-Interview-Preparation
| 3,269 |
136562
|
<filename>Algo and DSA/LeetCode-Solutions-master/Python/fixed-point.py
# Time: O(logn)
# Space: O(1)
class Solution(object):
def fixedPoint(self, A):
"""
:type A: List[int]
:rtype: int
"""
left, right = 0, len(A)-1
while left <= right:
mid = left + (right-left)//2
if A[mid] >= mid:
right = mid-1
else:
left = mid+1
return left if A[left] == left else -1
|
rj_gameplay/rj_gameplay/tactic/nmark_tactic.py
|
RoboJackets/robocup-software
| 200 |
136587
|
<reponame>RoboJackets/robocup-software<filename>rj_gameplay/rj_gameplay/tactic/nmark_tactic.py
from dataclasses import dataclass
from typing import List, Optional
import stp.rc as rc
import stp.tactic as tactic
import stp.role as role
import rj_gameplay.eval
import rj_gameplay.skill as skills
from rj_gameplay.skill import mark
import stp.skill as skill
import numpy as np
import stp.global_parameters as global_parameters
def get_closest_enemies_to_ball(num_enemies: int, world_state: rc.WorldState) -> List[rc.Robot]:
ball_pt = world_state.ball.pos
dist_to_enemies = {
np.linalg.norm(ball_pt - robot.pose[0:2]): robot
for robot in world_state.their_robots
}
# sort dict keys by dist (shortest first)
# return enemies that correspond to n shortest dists
return [dist_to_enemies[dist] for dist in sorted(dist_to_enemies.keys())[0:num_enemies]]
class marker_cost(role.CostFn):
"""Pick mark robots based on dist to the ball point
"""
def __init__(self, enemy_to_mark: rc.Robot=None):
self.enemy_to_mark = enemy_to_mark
def __call__(
self,
robot: rc.Robot,
prev_result: Optional["RoleResult"],
world_state: rc.WorldState,
) -> float:
# TODO: make a better way to avoid assignment of goalie to other roles
if world_state.game_info is not None:
if robot.id == world_state.goalie_id:
return 99
# TODO: prevent gameplay crashing w/out this check
if robot is None or self.enemy_to_mark is None:
return 99
# TODO(#1669): Remove this once role assignment no longer assigns non-visible robots
if not robot.visible:
return 99 # float('inf') threw ValueError
# TODO: use the convenience func in stp/role/ that has a stickiness for the last assignment
# TODO: this is actually using a local var, not the param given
# figure out how the param should be used
# if prev_result is not None and prev_result.role is not None:
# if robot.id == self.prev_result.role.robot.id:
# # return 0
# pass
return np.linalg.norm(robot.pose[0:2]-self.enemy_to_mark.pose[0:2]) / global_parameters.soccer.robot.max_speed
def unassigned_cost_fn(
self,
prev_result: Optional["RoleResult"],
world_state: rc.WorldState,
) -> float:
#TODO: Implement real unassigned cost function
return role.BIG_STUPID_NUMBER_CONST_FOR_UNASSIGNED_COST_PLS_CHANGE
class NMarkTactic(tactic.ITactic):
"""Marks the n closest enemies to ball with the closest robots on our team to said enemies.
"""
def __init__(self, n: int):
self.num_markers = n
# create empty mark SkillEntry for each robot
self.mark_list = [
tactic.SkillEntry(mark.Mark())
for i in range(self.num_markers)
]
# create cost func for each robot
self.cost_list = [marker_cost() for _ in range(self.num_markers)]
def compute_props(self):
pass
def create_request(self, **kwargs) -> role.RoleRequest:
"""Creates a sane default RoleRequest.
:return: A list of size 1 of a sane default RoleRequest.
"""
pass
def get_requests(
self, world_state: rc.WorldState, props
) -> List[tactic.RoleRequests]:
"""
:return: role request for n markers
"""
if world_state is not None and world_state.ball.visible:
# assign n closest enemies to respective skill and role costFn
closest_enemies = get_closest_enemies_to_ball(self.num_markers, world_state)
for i in range(len(closest_enemies)):
self.mark_list[i].skill.target_robot = closest_enemies[i]
self.cost_list[i].enemy_to_mark = closest_enemies[i]
# create RoleRequest for each SkillEntry
role_requests = {
self.mark_list[i]: [role.RoleRequest(role.Priority.LOW, False, self.cost_list[i])]
for i in range(self.num_markers)
}
return role_requests
def tick(self, world_state: rc.WorldState,
role_results: tactic.RoleResults) -> List[tactic.SkillEntry]:
"""
:return: skills for the number of markers assigned from the n markers
"""
# create list of skills based on if RoleResult exists for SkillEntry
skills = [
mark_skill_entry
for mark_skill_entry in self.mark_list
if role_results[mark_skill_entry][0]
]
return skills
def is_done(self, world_state):
# TODO: replace all similar is_done() with a .all() and generator expr
# see https://www.w3schools.com/python/ref_func_all.asp
for mark_skill in self.mark_list:
if not mark_skill.skill.is_done(world_state):
return False
return True
|
plato/processors/inbound_feature_tensors.py
|
cuiboyuan/plato
| 135 |
136617
|
<filename>plato/processors/inbound_feature_tensors.py
"""
Implements a Processor for converting MistNet features from numpy ndarrays to PyTorch tensors.
"""
import logging
from typing import Any
import torch
from plato.processors import base
class Processor(base.Processor):
"""
Implements a Processor for converting MistNet features from numpy ndarrays to PyTorch tensors.
"""
def __init__(self, server_id=None, **kwargs) -> None:
super().__init__(**kwargs)
self.server_id = server_id
def process(self, data: Any) -> Any:
"""
Converts MistNet features from numpy ndarrays to PyTorch tensors.
"""
feature_dataset = []
for logit, target in data:
# Uses torch.as_tensor() as opposed to torch.tensor() to avoid data copying
# according to https://pytorch.org/docs/stable/generated/torch.tensor.html
feature_dataset.append(
(torch.as_tensor(logit), torch.as_tensor(target)))
logging.info(
"[Server #%d] Features converted from ndarrays to PyTorch tensors.",
self.server_id)
return feature_dataset
|
scipy/stats/_variation.py
|
jake-is-ESD-protected/scipy
| 9,095 |
136647
|
<filename>scipy/stats/_variation.py
import numpy as np
from numpy.core.multiarray import normalize_axis_index
from scipy._lib._util import _nan_allsame
from ._stats_py import _chk_asarray, _contains_nan
def _nanvariation(a, *, axis=0, ddof=0, keepdims=False):
"""
Private version of `variation` that ignores nan.
`a` must be a numpy array.
`axis` is assumed to be normalized, i.e. 0 <= axis < a.ndim.
"""
#
# In theory, this should be as simple as something like
# nanstd(a, ddof=ddof, axis=axis, keepdims=keepdims) /
# nanmean(a, axis=axis, keepdims=keepdims)
# In practice, annoying issues arise. Specifically, numpy
# generates warnings in certain edge cases that we don't want
# to propagate to the user. Unfortunately, there does not
# appear to be a thread-safe way to filter out the warnings,
# so we have to do the calculation in a way that doesn't
# generate numpy warnings.
#
# Let N be the number of non-nan inputs in a slice.
# Conditions that generate nan:
# * empty input (i.e. N = 0)
# * All non-nan values 0
# * N < ddof
# * N == ddof and the input is constant
# Conditions that generate inf:
# * non-constant input and either
# * the mean is 0, or
# * N == ddof
#
a_isnan = np.isnan(a)
all_nan = a_isnan.all(axis=axis, keepdims=True)
all_nan_full = np.broadcast_to(all_nan, a.shape)
all_zero = (a_isnan | (a == 0)).all(axis=axis, keepdims=True) & ~all_nan
# ngood is the number of non-nan values in each slice.
ngood = (a.shape[axis] -
np.expand_dims(np.count_nonzero(a_isnan, axis=axis), axis))
# The return value is nan where ddof > ngood.
ddof_too_big = ddof > ngood
# If ddof == ngood, the return value is nan where the input is constant and
# inf otherwise.
ddof_equal_n = ddof == ngood
is_const = _nan_allsame(a, axis=axis, keepdims=True)
a2 = a.copy()
# If an entire slice is nan, `np.nanmean` will generate a warning,
# so we replace those nan's with 1.0 before computing the mean.
# We'll fix the corresponding output later.
a2[all_nan_full] = 1.0
mean_a = np.nanmean(a2, axis=axis, keepdims=True)
# If ddof >= ngood (the number of non-nan values in the slice), `np.nanstd`
# will generate a warning, so set all the values in such a slice to 1.0.
# We'll fix the corresponding output later.
a2[np.broadcast_to(ddof_too_big, a2.shape) | ddof_equal_n] = 1.0
with np.errstate(invalid='ignore'):
std_a = np.nanstd(a2, axis=axis, ddof=ddof, keepdims=True)
del a2
sum_zero = np.nansum(a, axis=axis, keepdims=True) == 0
# Where the sum along the axis is 0, replace mean_a with 1. This avoids
# division by zero. We'll fix the the corresponding output later.
mean_a[sum_zero] = 1.0
# Here--finally!--is the calculation of the variation.
result = std_a / mean_a
# Now fix the values that were given fake data to avoid warnings.
result[~is_const & sum_zero] = np.inf
signed_inf_mask = ~is_const & ddof_equal_n
result[signed_inf_mask] = np.sign(mean_a[signed_inf_mask]) * np.inf
nan_mask = all_zero | all_nan | ddof_too_big | (ddof_equal_n & is_const)
result[nan_mask] = np.nan
if not keepdims:
result = np.squeeze(result, axis=axis)
if result.shape == ():
result = result[()]
return result
def variation(a, axis=0, nan_policy='propagate', ddof=0, *, keepdims=False):
"""
Compute the coefficient of variation.
The coefficient of variation is the standard deviation divided by the
mean. This function is equivalent to::
np.std(x, axis=axis, ddof=ddof) / np.mean(x)
The default for ``ddof`` is 0, but many definitions of the coefficient
of variation use the square root of the unbiased sample variance
for the sample standard deviation, which corresponds to ``ddof=1``.
The function does not take the absolute value of the mean of the data,
so the return value is negative if the mean is negative.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate the coefficient of variation.
Default is 0. If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains ``nan``.
The following options are available:
* 'propagate': return ``nan``
* 'raise': raise an exception
* 'omit': perform the calculation with ``nan`` values omitted
The default is 'propagate'.
ddof : int, optional
Gives the "Delta Degrees Of Freedom" used when computing the
standard deviation. The divisor used in the calculation of the
standard deviation is ``N - ddof``, where ``N`` is the number of
elements. `ddof` must be less than ``N``; if it isn't, the result
will be ``nan`` or ``inf``, depending on ``N`` and the values in
the array. By default `ddof` is zero for backwards compatibility,
but it is recommended to use ``ddof=1`` to ensure that the sample
standard deviation is computed as the square root of the unbiased
sample variance.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the input array.
Returns
-------
variation : ndarray
The calculated variation along the requested axis.
Notes
-----
There are several edge cases that are handled without generating a
warning:
* If both the mean and the standard deviation are zero, ``nan``
is returned.
* If the mean is zero and the standard deviation is nonzero, ``inf``
is returned.
* If the input has length zero (either because the array has zero
length, or all the input values are ``nan`` and ``nan_policy`` is
``'omit'``), ``nan`` is returned.
* If the input contains ``inf``, ``nan`` is returned.
References
----------
.. [1] <NAME>. and <NAME>. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Examples
--------
>>> from scipy.stats import variation
>>> variation([1, 2, 3, 4, 5], ddof=1)
0.5270462766947299
Compute the variation along a given dimension of an array that contains
a few ``nan`` values:
>>> x = np.array([[ 10.0, np.nan, 11.0, 19.0, 23.0, 29.0, 98.0],
... [ 29.0, 30.0, 32.0, 33.0, 35.0, 56.0, 57.0],
... [np.nan, np.nan, 12.0, 13.0, 16.0, 16.0, 17.0]])
>>> variation(x, axis=1, ddof=1, nan_policy='omit')
array([1.05109361, 0.31428986, 0.146483 ])
"""
a, axis = _chk_asarray(a, axis)
axis = normalize_axis_index(axis, ndim=a.ndim)
n = a.shape[axis]
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
return _nanvariation(a, axis=axis, ddof=ddof, keepdims=keepdims)
if a.size == 0 or ddof > n:
# Handle as a special case to avoid spurious warnings.
# The return values, if any, are all nan.
shp = list(a.shape)
if keepdims:
shp[axis] = 1
else:
del shp[axis]
if len(shp) == 0:
result = np.nan
else:
result = np.full(shp, fill_value=np.nan)
return result
mean_a = a.mean(axis, keepdims=True)
if ddof == n:
# Another special case. Result is either inf or nan.
std_a = a.std(axis=axis, ddof=0, keepdims=True)
result = np.full_like(std_a, fill_value=np.nan)
result.flat[std_a.flat > 0] = (np.sign(mean_a) * np.inf).flat
if result.shape == ():
result = result[()]
return result
with np.errstate(divide='ignore', invalid='ignore'):
std_a = a.std(axis, ddof=ddof, keepdims=True)
result = std_a / mean_a
if not keepdims:
result = np.squeeze(result, axis=axis)
if result.shape == ():
result = result[()]
return result
|
python/tests/test_data.py
|
gbull25/ouster_example
| 344 |
136649
|
<filename>python/tests/test_data.py
"""Tests for lidar data parsing.
Checks that the output of parsing hasn't changed unexpectedly.
"""
from os import path
import numpy as np
import pytest
from ouster import client
from ouster.client import _client
pytest.register_assert_rewrite('ouster.client._digest')
import ouster.client._digest as digest # noqa
DATA_DIR = path.join(path.dirname(path.abspath(__file__)), "data")
@pytest.fixture(scope="module")
def stream_digest():
digest_path = path.join(DATA_DIR, "os-992011000121_digest.json")
with open(digest_path, 'r') as f:
return digest.StreamDigest.from_json(f.read())
@pytest.fixture(scope="module")
def info(stream_digest) -> client.SensorInfo:
return stream_digest.meta
def test_make_packets(info: client.SensorInfo) -> None:
pf = _client.PacketFormat.from_info(info)
client.ImuPacket(bytes(pf.imu_packet_size), info)
client.ImuPacket(bytearray(pf.imu_packet_size), info)
with pytest.raises(ValueError):
client.ImuPacket(bytes(), info)
with pytest.raises(ValueError):
client.ImuPacket(bytes(pf.imu_packet_size - 1), info)
client.LidarPacket(bytes(pf.lidar_packet_size), info)
client.LidarPacket(bytearray(pf.lidar_packet_size), info)
with pytest.raises(ValueError):
client.LidarPacket(bytes(), info)
with pytest.raises(ValueError):
client.LidarPacket(bytes(pf.lidar_packet_size - 1), info)
def test_imu_packet(info: client.SensorInfo) -> None:
pf = _client.PacketFormat.from_info(info)
p = client.ImuPacket(bytes(pf.imu_packet_size), info)
assert p.sys_ts == 0
assert p.accel_ts == 0
assert p.gyro_ts == 0
assert np.array_equal(p.accel, np.array([0.0, 0.0, 0.0]))
assert np.array_equal(p.angular_vel, np.array([0.0, 0.0, 0.0]))
with pytest.raises(AttributeError):
p.accel_ts = 0 # type: ignore
def test_lidar_packet(info: client.SensorInfo) -> None:
pf = _client.PacketFormat.from_info(info)
"""Test reading and writing values from empty packets."""
p = client.LidarPacket(bytes(pf.lidar_packet_size), info)
w = pf.columns_per_packet
h = pf.pixels_per_column
assert len(
client.ChanField.__members__) == 4, "Don't forget to update tests!"
assert np.array_equal(p.field(client.ChanField.RANGE), np.zeros((h, w)))
assert np.array_equal(p.field(client.ChanField.REFLECTIVITY),
np.zeros((h, w)))
assert np.array_equal(p.field(client.ChanField.SIGNAL), np.zeros((h, w)))
assert np.array_equal(p.field(client.ChanField.NEAR_IR), np.zeros((h, w)))
assert len(
client.ColHeader.__members__) == 5, "Don't forget to update tests!"
assert np.array_equal(p.header(client.ColHeader.TIMESTAMP), np.zeros(w))
assert np.array_equal(p.header(client.ColHeader.FRAME_ID), np.zeros(w))
assert np.array_equal(p.header(client.ColHeader.MEASUREMENT_ID),
np.zeros(w))
assert np.array_equal(p.header(client.ColHeader.ENCODER_COUNT),
np.zeros(w))
assert np.array_equal(p.header(client.ColHeader.STATUS), np.zeros(w))
# should not be able to modify a packet built from a read-only buffer
with pytest.raises(ValueError):
p.field(client.ChanField.SIGNAL)[0] = 1
with pytest.raises(ValueError):
p.header(client.ColHeader.MEASUREMENT_ID)[0] = 1
# a writeable lidar packet
q = client.LidarPacket(bytearray(pf.lidar_packet_size), info)
q.field(client.ChanField.SIGNAL)[:] = np.ones((h, w))
assert np.array_equal(q.field(client.ChanField.SIGNAL), np.ones((h, w)))
# TODO: masking prevents writing RANGE. Need separate get/set for fields
# q.view(client.ChanField.RANGE)[:] = np.ones((h, w))
# assert np.array_equal(q.view(client.ChanField.RANGE), np.ones((h, w)))
with pytest.raises(ValueError):
q.field(client.ChanField.SIGNAL)[:] = np.ones((w, h))
with pytest.raises(ValueError):
q.field(client.ChanField.SIGNAL)[:] = np.ones((h - 1, w))
with pytest.raises(ValueError):
q.field(client.ChanField.SIGNAL)[:] = np.ones((h, w + 1))
q.header(client.ColHeader.MEASUREMENT_ID)[:] = np.ones(w)
assert np.array_equal(q.header(client.ColHeader.MEASUREMENT_ID),
np.ones(w))
@pytest.fixture(scope="module")
def packet(stream_digest):
bin_path = path.join(DATA_DIR, "os-992011000121_data.bin")
with open(bin_path, 'rb') as b:
return next(iter(digest.LidarBufStream(b, stream_digest.meta)))
def test_read_real_packet(packet: client.LidarPacket) -> None:
"""Read some arbitrary values from a packet and check header invariants."""
assert packet.field(client.ChanField.RANGE)[0, 0] == 1723
assert packet.field(client.ChanField.REFLECTIVITY)[0, 0] == 196
assert packet.field(client.ChanField.SIGNAL)[0, 0] == 66
assert packet.field(client.ChanField.NEAR_IR)[0, 0] == 1768
assert np.all(np.diff(packet.header(client.ColHeader.FRAME_ID)) == 0)
assert np.all(np.diff(packet.header(client.ColHeader.MEASUREMENT_ID)) == 1)
# in 512xN mode, the angle between measurements is exactly 176 encoder ticks
assert np.all(
np.diff(packet.header(client.ColHeader.ENCODER_COUNT)) == 176)
assert np.all(packet.header(client.ColHeader.STATUS) == 0xffffffff)
def test_scan_native() -> None:
"""Check that a native scan is a writeable view of data."""
native = client._client.LidarScan(1024, 32)
assert not native.data.flags.owndata
assert native.data.flags.aligned
assert native.data.flags.writeable
native.data[0, 0] = 1
assert native.data[0, 0] == 1
native.data[:] = 42
assert (native.data == 42).all()
N_FIELDS = client.LidarScan.N_FIELDS
def test_scan_from_native() -> None:
"""Check that converting from a native scan does not copy data."""
native = client._client.LidarScan(1024, 32)
native.data[:] = np.arange(native.data.size).reshape(N_FIELDS, -1)
ls = client.LidarScan.from_native(native)
assert not ls._data.flags.owndata
assert ls._data.flags.aligned
assert ls._data.flags.writeable
assert ls._data.base is native.data.base
assert np.array_equal(ls._data, native.data)
del native
ls._data[:] = 42
assert (ls._data == 42).all()
def test_scan_to_native() -> None:
"""Check that converting to a native scan copies data."""
ls = client.LidarScan(32, 1024)
ls._data[:] = np.arange(ls._data.size).reshape(N_FIELDS, -1)
native = ls.to_native()
assert ls._data.base is not native.data.base
assert np.array_equal(ls._data, native.data)
ls._data[0, 0] = 42
native.data[:] = 1
assert ls._data[0, 0] == 42
def test_scan_not_complete() -> None:
"""Test that not all scans are considered complete."""
ls = client.LidarScan(32, 1024)
status = ls.header(client.ColHeader.STATUS)
assert not ls._complete()
status[0] = 0x01
assert not ls._complete()
assert not ls._complete((0, 0))
status[1:] = 0xFFFFFFFF
assert not ls._complete()
status[:] = 0xFFFFFFFF
status[-1] = 0x01
assert not ls._complete()
# windows are inclusive but python slicing is not
status[:] = 0x00
status[:10] = 0xFFFFFFFF
assert not ls._complete((0, 10))
status[:] = 0x00
status[11:21] = 0xFFFFFFFF
assert not ls._complete((10, 20))
# window [i, i]
status[:] = 0x00
status[0] = 0xFFFFFFFF
assert not ls._complete()
assert not ls._complete((0, 1))
assert ls._complete((0, 0))
status[:] = 0x00
status[128] = 0xFFFFFFFF
assert not ls._complete()
assert not ls._complete((127, 128))
assert ls._complete((128, 128))
@pytest.mark.parametrize("w, win_start, win_end", [
(512, 0, 511),
(512, 1, 0),
(512, 256, 0),
(512, 256, 1),
(1024, 0, 1023),
(1024, 0, 512),
(1024, 0, 0),
(1024, 1023, 1023),
(1024, 1023, 0),
(1024, 1023, 1),
(2048, 0, 2047),
(2048, 1024, 512),
(2048, 1024, 0),
(2048, 1024, 1),
(2048, 511, 511),
])
def test_scan_complete(w, win_start, win_end) -> None:
"""Set the status headers to the specified window and check _complete()."""
ls = client.LidarScan(32, w)
status = ls.header(client.ColHeader.STATUS)
if win_start <= win_end:
status[win_start:win_end + 1] = 0xFFFFFFFF
else:
status[0:win_end + 1] = 0xFFFFFFFF
status[win_start:] = 0xFFFFFFFF
assert ls._complete((win_start, win_end))
|
tests/github/test_issue_0011.py
|
next-franciscoalgaba/python-benedict
| 365 |
136670
|
<gh_stars>100-1000
# -*- coding: utf-8 -*-
from benedict import benedict
import unittest
class github_issue_0011_test_case(unittest.TestCase):
def test_github_issue_0011(self):
"""
https://github.com/fabiocaccamo/python-benedict/issues/11
"""
d = {
'lorem': [
{ 'ipsum':'a' },
{ 'ipsum': 'b' },
{ 'not_ipsum': 'c' },
],
'nested': [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
],
}
b = benedict(d)
v = b.match('lorem[*].ipsum', indexes=True)
v.sort()
self.assertEqual(v, ['a', 'b'])
v = b.match('nested[*][*]', indexes=True)
v.sort()
self.assertEqual(v, [1, 2, 3, 4, 5, 6, 7, 8, 9])
|
examples/synthetic/multiobjective_hartmann/in_code_demo.py
|
hase1128/dragonfly
| 675 |
136675
|
"""
In code demo for multiobjective_hartmann
-- <EMAIL>
"""
from dragonfly import load_config_file, multiobjective_maximise_functions
# From current directory
# from multiobjective_hartmann import compute_objectives, num_objectives
from multiobjective_hartmann import hartmann3_by_2_1, hartmann6, hartmann3_by_2_2
def main():
""" Main function. """
# First Specify the domain via a JSON configuration file.
# See examples/synthetic/multiobjective_branin_currinexp/in_code_demo.py for speciying
# domain directly in code without a file.
config = load_config_file('config.json')
# Specify objectives -- either of the following options could work. Uncomment
# appropriately from imports and multiobjective_hartmann.py
# 1. compute_objectives returns a list of objective values, num_objectives is the number
# of objectives. This has to be a 2-tuple.
# moo_objectives = (compute_objectives, num_objectives)
# 2. Specify each function separately. This has to be a list.
moo_objectives = [hartmann3_by_2_1, hartmann6, hartmann3_by_2_2]
# Optimise
max_num_evals = 100 # Optimisation budget (max number of evaluations)
pareto_opt_vals, pareto_opt_pts, history = multiobjective_maximise_functions(
moo_objectives, config.domain,
max_num_evals, config=config)
print(pareto_opt_pts)
print(pareto_opt_vals)
if __name__ == '__main__':
main()
|
pydis_site/apps/content/tests/test_utils.py
|
Robin5605/site
| 700 |
136686
|
<reponame>Robin5605/site<filename>pydis_site/apps/content/tests/test_utils.py<gh_stars>100-1000
from pathlib import Path
from django.http import Http404
from pydis_site.apps.content import utils
from pydis_site.apps.content.tests.helpers import (
BASE_PATH, MockPagesTestCase, PARSED_CATEGORY_INFO, PARSED_HTML, PARSED_METADATA
)
class GetCategoryTests(MockPagesTestCase):
"""Tests for the get_category function."""
def test_get_valid_category(self):
result = utils.get_category(Path(BASE_PATH, "category"))
self.assertEqual(result, {"title": "Category Name", "description": "Description"})
def test_get_nonexistent_category(self):
with self.assertRaises(Http404):
utils.get_category(Path(BASE_PATH, "invalid"))
def test_get_category_with_path_to_file(self):
# Valid categories are directories, not files
with self.assertRaises(Http404):
utils.get_category(Path(BASE_PATH, "root.md"))
def test_get_category_without_info_yml(self):
# Categories should provide an _info.yml file
with self.assertRaises(FileNotFoundError):
utils.get_category(Path(BASE_PATH, "tmp/category/subcategory_without_info"))
class GetCategoriesTests(MockPagesTestCase):
"""Tests for the get_categories function."""
def test_get_root_categories(self):
result = utils.get_categories(BASE_PATH)
info = PARSED_CATEGORY_INFO
categories = {
"category": info,
"tmp": info,
"not_a_page.md": info,
}
self.assertEqual(result, categories)
def test_get_categories_with_subcategories(self):
result = utils.get_categories(Path(BASE_PATH, "category"))
self.assertEqual(result, {"subcategory": PARSED_CATEGORY_INFO})
def test_get_categories_without_subcategories(self):
result = utils.get_categories(Path(BASE_PATH, "category/subcategory"))
self.assertEqual(result, {})
class GetCategoryPagesTests(MockPagesTestCase):
"""Tests for the get_category_pages function."""
def test_get_pages_in_root_category_successfully(self):
"""The method should successfully retrieve page metadata."""
root_category_pages = utils.get_category_pages(BASE_PATH)
self.assertEqual(
root_category_pages, {"root": PARSED_METADATA, "root_without_metadata": {}}
)
def test_get_pages_in_subcategories_successfully(self):
"""The method should successfully retrieve page metadata."""
category_pages = utils.get_category_pages(Path(BASE_PATH, "category"))
# Page metadata is properly retrieved
self.assertEqual(category_pages, {"with_metadata": PARSED_METADATA})
class GetPageTests(MockPagesTestCase):
"""Tests for the get_page function."""
def test_get_page(self):
# TOC is a special case because the markdown converter outputs the TOC as HTML
updated_metadata = {**PARSED_METADATA, "toc": '<div class="toc">\n<ul></ul>\n</div>\n'}
cases = [
("Root page with metadata", "root.md", PARSED_HTML, updated_metadata),
("Root page without metadata", "root_without_metadata.md", PARSED_HTML, {}),
("Page with metadata", "category/with_metadata.md", PARSED_HTML, updated_metadata),
("Page without metadata", "category/subcategory/without_metadata.md", PARSED_HTML, {}),
]
for msg, page_path, expected_html, expected_metadata in cases:
with self.subTest(msg=msg):
html, metadata = utils.get_page(Path(BASE_PATH, page_path))
self.assertEqual(html, expected_html)
self.assertEqual(metadata, expected_metadata)
def test_get_nonexistent_page_returns_404(self):
with self.assertRaises(Http404):
utils.get_page(Path(BASE_PATH, "invalid"))
|
django_migration_linter/cache.py
|
christianbundy/django-migration-linter
| 357 |
136712
|
<reponame>christianbundy/django-migration-linter<filename>django_migration_linter/cache.py<gh_stars>100-1000
import os
import pickle
class Cache(dict):
def __init__(self, django_folder, database, cache_path):
self.filename = os.path.join(
cache_path,
"{0}_{1}.pickle".format(django_folder.replace(os.sep, "_"), database),
)
if not os.path.exists(os.path.dirname(self.filename)):
os.makedirs(os.path.dirname(self.filename))
super(Cache, self).__init__()
def load(self):
try:
with open(self.filename, "rb") as f:
tmp_dict = pickle.load(f)
self.update(tmp_dict)
except IOError:
pass
def save(self):
with open(self.filename, "wb") as f:
pickle.dump(self, f, protocol=2)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.