max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
probing/utils.py
|
wietsedv/bertje
| 104 |
70763
|
import sys
import yaml
class Config:
def __init__(self, cfg=None):
self.cfg = {}
if cfg is not None:
self.update(cfg)
def __getattribute__(self, name):
cfg = object.__getattribute__(self, 'cfg')
if name not in cfg:
return object.__getattribute__(self, name)
return cfg[name]
def items(self):
return object.__getattribute__(self, 'cfg').items()
def update(self, new_cfg):
cfg = self.cfg
for key, val in new_cfg.items():
if type(val) == dict:
val = Config(val)
if key in cfg:
cfg[key].update(val)
continue
cfg[key] = val
def add(self, arg, val=None):
# Manual item
if val is not None:
subkeys = arg.split('.')
subconfig = self
for subkey in subkeys[:-1]:
subconfig = subconfig.cfg[subkey]
if subkeys[-1] in subconfig.cfg:
if type(subconfig.cfg[subkeys[-1]]) == int:
val = int(val)
elif type(subconfig.cfg[subkeys[-1]]) == float:
val = float(val)
subconfig.cfg[subkeys[-1]] = val
print('{} is set to {}'.format(arg, val))
return
# Config file shortcut
if not arg.endswith('.yaml'):
arg = 'configs/{}.yaml'.format(arg)
# Config file
print('importing config from "{}"'.format(arg))
with open(arg) as f:
self.update(yaml.load(f, Loader=yaml.Loader))
def as_dict(self):
return {key: (val.as_dict() if isinstance(val, Config) else val) for key, val in self.cfg.items()}
def show(self, depth=0):
yaml.dump(self.as_dict(), sys.stdout)
def get_path(self, name):
return self.data.cfg[name].format(self.data.name, self.model.shortname)
def init_config():
config = Config()
config.add('configs/default.yaml')
for arg in sys.argv[1:]:
config.add(*arg.split('='))
return config
def reset_config():
global config
config = init_config()
config = init_config()
|
tests/utils/test_classproperty.py
|
koskotG/ebonite
| 270 |
70768
|
<reponame>koskotG/ebonite
from ebonite.utils.classproperty import classproperty
class MyClass:
@classproperty
def prop1(self):
return 'a'
@classproperty
@classmethod
def prop2(self):
return 'b'
def test_classproperty__get():
assert MyClass.prop1 == 'a'
assert MyClass.prop2 == 'b'
|
neurst/data/dataset_utils.py
|
ishine/neurst
| 208 |
70783
|
# Copyright 2020 ByteDance Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import os
import tensorflow as tf
from absl import logging
from neurst.utils.compat import get_distributed_worker_setting
from neurst.utils.misc import deprecated, flatten_string_list
_MIN_BUCKET_BOUNDARY = 8
_BUCKET_BOUNDARY_SCALE = 1.1
_MAX_BUCKET_BOUNDARY = 256
def map_data_for_keras(dataset):
""" Maps data for training.
For TF v2, the 2nd parameter is omitted to make Keras training work.
Args:
dataset: A tf.data.Dataset object.
Returns:
A tf.data.Dataset object.
"""
def _fn(*args):
return (args,)
return dataset.map(
_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
@deprecated
def _batch_examples_by_token(dataset,
batch_size,
bucket_boundaries,
padding_values,
padding_length,
example_length_func,
drop_remainder=True,
num_replicas_in_sync=1):
"""Group examples by similar lengths, and return batched dataset.
Each batch of similar-length examples are padded to the same length, and may
have different number of elements in each batch, such that:
group_batch_size * padded_length <= batch_size.
This decreases the number of padding tokens per batch, which improves the
training speed.
Args:
dataset: Dataset of unbatched examples.
batch_size: Max number of tokens per batch of examples.
bucket_boundaries: A list of integers of the boundaries of each bucket.
padding_values: A tuple of constants for padding.
padding_length: A list/tuple of padding length, which will be passed to padded_decode.
example_length_func: A callable function, which deduces the input examples to the maximum length.
drop_remainder: Whether the last batch should be dropped in the case it has fewer than batch_size.
num_replicas_in_sync: The number of GPUs or other workers. We will generate
global batches, and each global batch is equally divisible by number of replicas.
Returns:
Dataset of batched examples with similar lengths.
"""
# Get min and max boundary lists for each example. These are used to calculate
# the `bucket_id`, which is the index at which:
# buckets_min[bucket_id] <= len(example) < buckets_max[bucket_id]
# Note that using both min and max lists improves the performance.
buckets_min = [0] + bucket_boundaries[:-1]
buckets_max = bucket_boundaries
# Create list of batch sizes for each bucket_id, so that
# bucket_batch_size[bucket_id] * buckets_max[bucket_id] <= batch_size
bucket_batch_sizes = [batch_size // x // num_replicas_in_sync * num_replicas_in_sync
for x in buckets_max]
# bucket_id will be a tensor, so convert this list to a tensor as well.
bucket_batch_sizes = tf.constant(bucket_batch_sizes, dtype=tf.int64)
def example_to_bucket_id(examples):
"""Return int64 bucket id for this example, calculated based on length."""
seq_length = tf.cast(example_length_func(examples), tf.int32)
conditions_c = tf.logical_and(
tf.less_equal(buckets_min, seq_length),
tf.less(seq_length, buckets_max))
bucket_id = tf.reduce_min(tf.where(conditions_c))
return bucket_id
def window_size_fn(bucket_id):
"""Return number of examples to be grouped when given a bucket id."""
return bucket_batch_sizes[bucket_id]
def batching_fn(bucket_id, grouped_dataset):
"""Batch and add padding to a dataset of elements with similar lengths."""
bucket_batch_size = window_size_fn(bucket_id)
# Batch the dataset and add padding so that all input sequences in the
# examples have the same length, and all target sequences have the same
# lengths as well. Resulting lengths of inputs and targets can differ.
return grouped_dataset.padded_batch(
bucket_batch_size, padding_length,
padding_values=padding_values, drop_remainder=drop_remainder)
return dataset.apply(tf.data.experimental.group_by_window(
key_func=example_to_bucket_id,
reduce_func=batching_fn,
window_size=None,
window_size_func=window_size_fn))
def create_batch_bucket_boundaries(max_length,
min_boundary=_MIN_BUCKET_BOUNDARY,
boundary_scale=_BUCKET_BOUNDARY_SCALE):
""" Creates training batch bucket boundaries.
Args:
max_length: The maximum length of example in dataset.
min_boundary: Minimum length in boundary.
boundary_scale: Amount to scale consecutive boundaries in the list.
Returns:
A list of bucket boundaries.
"""
# Create bucket boundaries list by scaling the previous boundary or adding 1
# (to ensure increasing boundary sizes).
bucket_boundaries = []
x = min_boundary
while x < max_length:
bucket_boundaries.append(x)
x = max(x + 1, int(x * boundary_scale))
if bucket_boundaries[-1] < max_length + 1:
bucket_boundaries = bucket_boundaries + [max_length + 1]
return bucket_boundaries
def associated_bucket_boundaries(a, b):
""" Creates training batch bucket boundaries.
Args:
a: A list of bucket boundaries.
b: Another list of bucket boundaries.
Returns:
Two refactored lists of bucket boundaries with the same size.
"""
length1 = len(a)
length2 = len(b)
if length1 == length2:
return a, b
elif length1 > length2:
step_size1 = length1 * 1. / length2
step_size2 = 1
else:
step_size1 = 1
step_size2 = length2 * 1. / length1
new_boundaries1 = []
new_boundaries2 = []
i = 1
while i < min(length1, length2) + 1:
new_boundaries1.append(a[int(math.ceil(i * step_size1)) - 1])
new_boundaries2.append(b[int(math.ceil(i * step_size2)) - 1])
i += 1
return new_boundaries1, new_boundaries2
@deprecated
def load_from_tfrecord_and_auto_shard(features_file, shuffle=True,
example_parse_fn=None, deterministic=True):
""" Loads TFRecords and does autot-sharding according to worker num.
Args:
features_file: The TFRecords file path.
shuffle: Whether to shuffle files.
example_parse_fn: The example parse function for TF Record.
deterministic: Whether the outputs need to be produced in deterministic order.
Returns: A dataset.
"""
_files = features_file.split(",")
_features_files = []
for _file in _files:
if tf.io.gfile.isdir(_file):
_features_files.append(os.path.join(_file, "*train*"))
elif tf.io.gfile.exists(_file):
_features_files.append(_file)
else:
_features_files.append(_file + "*")
logging.info("Load TFRecords from {}".format(str(_features_files)))
dataset = tf.data.Dataset.list_files(_features_files, shuffle=shuffle)
# auto sharding
worker_id, num_workers, strategy = get_distributed_worker_setting()
if num_workers > 1 and strategy in ["horovod", "byteps"] and not shuffle:
logging.info("Shard %d of the whole dataset(total %d workers).", worker_id, num_workers)
dataset = dataset.shard(num_workers, worker_id)
# Read files and interleave results.
# When training, the order of the examples will be non-deterministic.
options = tf.data.Options()
options.experimental_deterministic = deterministic
dataset = dataset.interleave(
lambda f: tf.data.TFRecordDataset(f, buffer_size=32 * 1024 * 1024),
cycle_length=10,
num_parallel_calls=tf.data.experimental.AUTOTUNE).with_options(options)
if example_parse_fn is None:
return dataset
return dataset.map(example_parse_fn,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
def parse_tfexample(serialized_example, name_to_features,
name_mapping=None, map_func=None,
auxiliary_elements=None):
""" Parses TF example from TF Record. """
parsed = tf.io.parse_single_example(serialized_example, name_to_features)
elements = {}
for k, v in parsed.items():
if name_mapping is None or k not in name_mapping:
elements[k] = tf.sparse.to_dense(v)
else:
elements[name_mapping[k]] = tf.sparse.to_dense(v)
if isinstance(auxiliary_elements, dict):
elements.update(auxiliary_elements)
if map_func is None:
return elements
return map_func(elements)
def glob_tfrecords(file_path):
_files = flatten_string_list(file_path)
_features_files = []
for _file in _files:
if tf.io.gfile.isdir(_file):
_features_files.extend(tf.io.gfile.glob(os.path.join(_file, "*train*")))
elif tf.io.gfile.exists(_file):
_features_files.append(_file)
else:
_features_files.extend(tf.io.gfile.glob(_file + "*"))
return _features_files
def load_tfrecords(file_path,
name_to_features,
shuffle=False,
deterministic=True,
feature_name_mapping=None,
map_func=None,
sharding_index=0,
num_shards=1,
auto_shard=False,
auxiliary_elements=None) -> tf.data.Dataset:
""" Loads TFRecords and does autot-sharding according to worker num.
Args:
file_path: The TFRecords file path.
name_to_features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values.
shuffle: Whether to shuffle files.
deterministic: Whether the outputs need to be produced in deterministic order.
feature_name_mapping: A dict that maps the names in `name_to_features` to aliases.
map_func: A callable function to process the data.
sharding_index: The manually defined index for sharding.
num_shards: The manually defined number of shards operating in parallel.
auto_shard: Automatically shard the TFRecord parts if True.
auxiliary_elements: A dict containing auxiliary elements that will
append to the data sample.
Returns: A dataset.
"""
_features_files = []
for _file in flatten_string_list(file_path):
if tf.io.gfile.isdir(_file):
_features_files.append(os.path.join(_file, "*train*"))
elif tf.io.gfile.exists(_file):
_features_files.append(_file)
else:
_features_files.append(_file + "*")
# shuffle = (shuffle is True) and (num_shards == 1)
# dataset = tf.data.Dataset.list_files(_features_files, shuffle=shuffle)
dataset = tf.data.Dataset.list_files(_features_files, shuffle=False)
if num_shards > 1:
logging.info("Shard %d of the whole dataset(total %d workers).", sharding_index, num_shards)
dataset = dataset.shard(num_shards, sharding_index)
else:
# auto sharding
worker_id, num_workers, strategy = get_distributed_worker_setting()
if num_workers > 1 and strategy in ["horovod", "byteps"] and auto_shard:
logging.info("Shard %d of the whole dataset(total %d workers).", worker_id, num_workers)
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF
dataset = dataset.with_options(options)
dataset = dataset.shard(num_workers, worker_id)
logging.info("Loading TF Records from: ")
if shuffle:
dataset = dataset.shuffle(5000)
for _f in dataset:
logging.info(f" {_f.numpy()}")
# Read files and interleave results.
# When training, the order of the examples will be non-deterministic.
options = tf.data.Options()
options.experimental_deterministic = deterministic
dataset = dataset.interleave(
lambda f: tf.data.TFRecordDataset(f, buffer_size=128 * 1024 * 1024),
cycle_length=10,
num_parallel_calls=tf.data.experimental.AUTOTUNE).with_options(options)
if name_to_features is None:
return dataset
return dataset.map(lambda x: parse_tfexample(x, name_to_features, feature_name_mapping, map_func,
auxiliary_elements=auxiliary_elements),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
def clean_dataset_by_length(dataset, data_max_lengths):
""" Filters empty datas, or datas exceeded max length. """
logging.info(f"Filtering empty data and datas exceeded max length={data_max_lengths}")
return dataset.filter(
lambda data_sample: tf.reduce_all([
(length == -1 or length is None
or tf.less_equal(tf.size(data_sample[k]), length)) # filter by max length
and (length == -1 or (length != -1 and tf.size(data_sample[k]) > 1)) # filter out empty lines
for k, length in data_max_lengths.items()
]))
@deprecated
def batch_sequential_dataset(dataset,
padding_values,
example_length_func=None,
batch_size=None,
batch_size_per_gpu=None,
batch_by_tokens=False,
bucket_boundaries=None,
data_max_lengths=None,
shuffer_buffer=0,
drop_remainder=True,
num_replicas_in_sync=1):
""" Calls padded_batch under special settings for sequential dataset.
Args:
dataset: A parallel dataset.
padding_values: A list of padding values, will be passed to dataset.padded_batch.
example_length_func: A callable function that takes a dict as input and returns
the "length" of this data sample.
batch_size: The number of sentences or word tokens according to `batch_by_tokens`.
batch_size_per_gpu: The per-GPU batch size.
batch_by_tokens: A bool, whether to batch the data by word tokens.
bucket_boundaries: A list integers indicating the boundaries of the bucket when
`batch_by_tokens` is True.
data_max_lengths: The maximum length of training data, None or a list/tuple of
integers with the the size as data samples. -1 indicates scalar data with
no 'length' checking.
shuffer_buffer: The buffer size for shuffling.
drop_remainder: Whether the last batch should be dropped in the case it has fewer than batch_size.
num_replicas_in_sync: The number of GPUs or other workers. We will generate global
batches, and each global batch is equally divisible by number of replicas.
Returns:
The batched dataset.
"""
if data_max_lengths is None:
data_max_lengths = {k: None for k in padding_values}
assert len(data_max_lengths) == len(padding_values)
if example_length_func is None:
def example_length_func(examples):
return tf.reduce_max([
tf.size(examples[k]) for k, length in data_max_lengths.items() if length != -1])
if batch_size is None and batch_size_per_gpu is None:
raise ValueError("Either `batch_size` or `batch_size_per_gpu` needs to be provided.")
elif batch_size is not None and batch_size_per_gpu is not None:
logging.info("Both `batch_size` and `batch_size_per_gpu` are provided, use `batch_size_per_gpu`.")
if batch_size_per_gpu is not None:
batch_size = int(batch_size_per_gpu * num_replicas_in_sync)
logging.info("The global batch size is {}, with batch_by_tokens={}".format(batch_size, batch_by_tokens))
# filter out empty lines
dataset = clean_dataset_by_length(dataset, data_max_lengths)
dynamic_padding_length = {k: ([] if length == -1 else [None])
for k, length in data_max_lengths.items()}
if batch_by_tokens:
# shuffle
if shuffer_buffer:
dataset = dataset.shuffle(buffer_size=shuffer_buffer)
max_length = max(max([_len or 0 for _len in data_max_lengths.values()]), 0)
if not max_length:
logging.info("Using pre-defined max length={}".format(_MAX_BUCKET_BOUNDARY))
max_length = _MAX_BUCKET_BOUNDARY
logging.info("Final check of the max length of the training data. "
"Filter out whose length is larger than {}".format(max_length))
dataset = dataset.filter(
lambda data_sample: tf.reduce_all([
(length == -1) or (length is None) or tf.size(data_sample[k]) <= max_length
for k, length in data_max_lengths.items()]))
if bucket_boundaries is None:
bucket_boundaries = create_batch_bucket_boundaries(max_length)
return _batch_examples_by_token(
dataset,
batch_size=batch_size,
drop_remainder=drop_remainder,
padding_values=padding_values,
padding_length=dynamic_padding_length,
bucket_boundaries=bucket_boundaries,
example_length_func=example_length_func,
num_replicas_in_sync=num_replicas_in_sync)
else:
# shuffle
if shuffer_buffer:
dataset = dataset.shuffle(buffer_size=shuffer_buffer)
padding_length = dynamic_padding_length
logging.info("The padding length of the dataset is {}".format(padding_length))
dataset = dataset.padded_batch(
int(batch_size // num_replicas_in_sync * num_replicas_in_sync),
padding_length, drop_remainder=drop_remainder, padding_values=padding_values)
return dataset
def adjust_batch_size(batch_size=None,
batch_size_per_gpu=None,
bucket_boundaries=None,
boundaries_reduce_to_length_fn=None,
num_replicas_in_sync=1,
verbose=True):
if batch_size is None and batch_size_per_gpu is None:
raise ValueError("At least one of the `batch_size` and `batch_size_per_gpu` should be provided.")
elif batch_size is not None and batch_size_per_gpu is not None:
logging.info("Both `batch_size` and `batch_size_per_gpu` are provided, use `batch_size_per_gpu`.")
if batch_size_per_gpu is not None:
batch_size = int(batch_size_per_gpu * num_replicas_in_sync)
if bucket_boundaries is None:
batch_size = int(batch_size // num_replicas_in_sync * num_replicas_in_sync)
if verbose:
logging.info(f"The global batch size is {batch_size} samples.")
return batch_size
logging.info(f"The global batch size is {batch_size} tokens.")
bucket_batch_sizes = []
try:
i = 0
while True:
bucket_batch_sizes.append(
int(batch_size // boundaries_reduce_to_length_fn({k: v[i] for k, v in bucket_boundaries.items()})
// num_replicas_in_sync * num_replicas_in_sync))
i += 1
except IndexError:
pass
return bucket_batch_sizes
def batch_examples_by_token(dataset,
bucket_boundaries,
bucket_batch_sizes,
padding_values,
example_length_func,
extra_padded_shapes=None,
drop_remainder=True):
"""Group examples by similar lengths, and return batched dataset.
Each batch of similar-length examples are padded to the same length, and may
have different number of elements in each batch, such that:
group_batch_size * padded_length <= batch_size.
This decreases the number of padding tokens per batch, which improves the
training speed.
Args:
dataset: Dataset of unbatched examples.
bucket_batch_sizes: Max number of tokens per batch of examples or a list of batch size for each bucket.
bucket_boundaries: A list of integers of the boundaries of each bucket.
padding_values: A tuple of constants for padding.
example_length_func: A callable function, which deduces the input examples to the maximum length.
extra_padded_shapes: A dict containing extra shapes (not included in bucket boundaries) for padding.
drop_remainder: Whether the last batch should be dropped in the case it has fewer than batch_size.
Returns:
Dataset of batched examples with similar lengths.
"""
cnt = 0
try:
logging.info("The details of batching logic:")
while True:
_batch = bucket_batch_sizes
if isinstance(bucket_batch_sizes, list):
_batch = bucket_batch_sizes[cnt]
_bounds = {k: v[cnt] for k, v in bucket_boundaries.items()}
logging.info(f" - batch={_batch}, bucket boundary={_bounds}")
cnt += 1
except IndexError:
logging.info(f" Total {cnt} input shapes are compiled.")
if not isinstance(bucket_batch_sizes, list):
bucket_batch_sizes = [bucket_batch_sizes] * cnt
# bucket_id will be a tensor, so convert this list to a tensor as well.
bucket_batch_sizes = tf.constant(bucket_batch_sizes, dtype=tf.int64)
bucket_boundaries = {k: tf.constant(v, dtype=tf.int32) for k, v in bucket_boundaries.items()}
def example_to_bucket_id(examples):
"""Return int64 bucket id for this example, calculated based on length."""
seq_length = example_length_func(examples)
conditions_c = tf.reduce_all([
tf.less_equal(v, bucket_boundaries[k])
for k, v in seq_length.items()], axis=0)
bucket_id = tf.reduce_min(tf.where(conditions_c))
return bucket_id
def window_size_fn(bucket_id):
"""Return number of examples to be grouped when given a bucket id."""
return bucket_batch_sizes[bucket_id]
def batching_fn(bucket_id, grouped_dataset):
"""Batch and add padding to a dataset of elements with similar lengths."""
bucket_batch_size = window_size_fn(bucket_id)
padded_shapes = {k: [v[bucket_id]] for k, v in bucket_boundaries.items()}
if extra_padded_shapes:
for k, v in extra_padded_shapes.items():
padded_shapes[k] = v
# Batch the dataset and add padding so that all input sequences in the
# examples have the same length, and all target sequences have the same
# lengths as well. Resulting lengths of inputs and targets can differ.
return grouped_dataset.padded_batch(
bucket_batch_size, padded_shapes,
padding_values=padding_values,
drop_remainder=drop_remainder)
return dataset.apply(tf.data.experimental.group_by_window(
key_func=example_to_bucket_id,
reduce_func=batching_fn,
window_size=None,
window_size_func=window_size_fn))
def take_one_record(data_path):
_file_path = flatten_string_list(data_path)[0]
if tf.io.gfile.isdir(_file_path):
_feature_file = os.path.join(_file_path, "*train*")
elif tf.io.gfile.exists(_file_path):
_feature_file = _file_path
else:
_feature_file = _file_path + "*"
dataset = tf.data.Dataset.list_files([_feature_file], shuffle=False)
dataset = dataset.interleave(
lambda f: tf.data.TFRecordDataset(f, buffer_size=128 * 1024 * 1024),
cycle_length=10,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
for x in dataset.take(1):
example = tf.train.Example()
example.ParseFromString(x.numpy())
return example
|
CPAC/func_preproc/utils.py
|
FCP-INDI/C-PAC
| 125 |
70795
|
import numpy as np
from scipy.signal import iirnotch, firwin, filtfilt, lfilter, freqz
from matplotlib import pyplot as plt
import nibabel as nb
import subprocess
import math
def add_afni_prefix(tpattern):
if tpattern:
if ".txt" in tpattern:
tpattern = "@{0}".format(tpattern)
return tpattern
def nullify(value, function=None):
from traits.trait_base import Undefined
if value is None:
return Undefined
if function:
return function(value)
return value
def chunk_ts(func_file, n_chunks=None, chunk_size=None):
func_img = nb.load(func_file)
trs = func_img.shape[3]
TR_ranges = []
if n_chunks:
chunk_size = trs/n_chunks
elif chunk_size:
n_chunks = int(trs/chunk_size)
else:
raise Exception("\n[!] Dev error: Either 'n_chunks' or 'chunk_size' "
"arguments must be passed to 'chunk_ts' function.\n")
for chunk_idx in range(0, n_chunks):
if chunk_idx == n_chunks - 1:
TR_ranges.append((int(chunk_idx*chunk_size), int(trs - 1)))
else:
TR_ranges.append((int(chunk_idx*chunk_size), int((chunk_idx+1)*chunk_size - 1)))
return TR_ranges
def split_ts_chunks(func_file, tr_ranges):
if '.nii' in func_file:
ext = '.nii'
if '.nii.gz' in func_file:
ext = '.nii.gz'
split_funcs = []
for chunk_idx, tr_range in enumerate(tr_ranges):
out_file = os.path.join(os.getcwd(), os.path.basename(func_file).replace(ext, "_{0}{1}".format(chunk_idx, ext)))
in_file = "{0}[{1}..{2}]".format(func_file, tr_range[0], tr_range[1])
cmd = ["3dcalc", "-a", in_file, "-expr", "a", "-prefix", out_file]
retcode = subprocess.check_output(cmd)
split_funcs.append(out_file)
return split_funcs
def oned_text_concat(in_files):
out_file = os.path.join(os.getcwd(), os.path.basename(in_files[0].replace("_0", "")))
out_txt = []
for txt in in_files:
with open(txt, 'r') as f:
txt_lines = f.readlines()
if not out_txt:
out_txt = [x for x in txt_lines]
else:
for line in txt_lines:
if "#" in line:
continue
out_txt.append(line)
with open(out_file, 'wt') as f:
for line in out_txt:
f.write(line)
return out_file
def degrees_to_mm(degrees, head_radius):
# function to convert degrees of motion to mm
mm = 2*math.pi*head_radius*(degrees/360)
return mm
def mm_to_degrees(mm, head_radius):
# function to convert mm of motion to degrees
degrees = 360*mm/(2*math.pi*head_radius)
return degrees
def degrees_to_mm(degrees, head_radius):
# function to convert degrees of motion to mm
mm = 2*math.pi*head_radius*(degrees/360)
return mm
def mm_to_degrees(mm, head_radius):
# function to convert mm of motion to degrees
degrees = 360*mm/(2*math.pi*head_radius)
return degrees
def degrees_to_mm(degrees, head_radius):
# function to convert degrees of motion to mm
mm = 2*math.pi*head_radius*(degrees/360)
return mm
def mm_to_degrees(mm, head_radius):
# function to convert mm of motion to degrees
degrees = 360*mm/(2*math.pi*head_radius)
return degrees
def notch_filter_motion(motion_params, filter_type, TR, fc_RR_min=None,
fc_RR_max=None, center_freq=None, freq_bw=None,
lowpass_cutoff=None, filter_order=4):
# Adapted from DCAN Labs:
# https://github.com/DCAN-Labs/dcan_bold_processing/blob/master/
# ...matlab_code/filtered_movement_regressors.m
if "ms" in TR:
TR = float(TR.replace("ms", ""))/1000
elif "ms" not in TR and "s" in TR:
TR = float(TR.replace("s", ""))
params_data = np.loadtxt(motion_params)
# Sampling frequency
fs = 1 / TR
# Nyquist frequency
fNy = fs / 2
if filter_type == "notch":
# Respiratory Rate
if fc_RR_min and fc_RR_max:
rr = [float(fc_RR_min) / float(60),
float(fc_RR_max) / float(60)]
rr_fNy = [rr[0] + fNy, rr[1] + fNy]
fa = abs(rr - np.floor(np.divide(rr_fNy, fs)) * fs)
elif center_freq and freq_bw:
tail = float(freq_bw)/float(2)
fa = [center_freq-tail, center_freq+tail]
W_notch = np.divide(fa, fNy)
Wn = np.mean(W_notch)
bw = np.diff(W_notch)
# for filter info
center_freq = Wn * fNy
bandwidth = fa[1] - fa[0]
Q = Wn/bw
[b_filt, a_filt] = iirnotch(Wn, Q)
num_f_apply = np.floor(filter_order / 2)
filter_info = f"Motion estimate filter information\n\nType: Notch\n" \
f"\nCenter freq: {center_freq}\nBandwidth: {bandwidth}\n\n" \
f"Wn: {Wn}\nQ: {Q}\n\n" \
f"Based on:\nSampling freq: {fs}\nNyquist freq: {fNy}"
elif filter_type == "lowpass":
if fc_RR_min:
rr = float(fc_RR_min) / float(60)
rr_fNy = rr + fNy
fa = abs(rr - np.floor(np.divide(rr_fNy, fs)) * fs)
elif lowpass_cutoff:
fa = lowpass_cutoff
Wn = fa/fNy
if filter_order:
b_filt = firwin(filter_order+1, Wn)
a_filt = 1
num_f_apply = 0
filter_info = f"Motion estimate filter information\n\nType: Lowpass" \
f"\n\nCutoff freq: {fa}\nWn: {Wn}\n\n" \
f"Based on:\nSampling freq: {fs}\nNyquist freq: {fNy}"
filter_design = os.path.join(os.getcwd(),
"motion_estimate_filter_design.txt")
filter_plot = os.path.join(os.getcwd(),
"motion_estimate_filter_freq-response.png")
# plot frequency response for user info
w, h = freqz(b_filt, a_filt, fs=fs)
fig, ax1 = plt.subplots()
ax1.set_title('Motion estimate filter frequency response')
ax1.plot(w, 20 * np.log10(abs(h)), 'b')
ax1.set_ylabel('Amplitude [dB]', color='b')
ax1.set_xlabel('Frequency [Hz]')
plt.savefig(filter_plot)
with open(filter_design, 'wt') as f:
f.write(filter_info)
# convert rotation params from degrees to mm
params_data[:, 0:3] = degrees_to_mm(params_data[:, 0:3], head_radius=50)
filtered_params = lfilter(b_filt, a_filt, params_data.T, zi=None)
for i in range(0, int(num_f_apply) - 1):
filtered_params = lfilter(b_filt, a_filt, filtered_params, zi=None)
# back rotation params to degrees
filtered_params[0:3,:] = mm_to_degrees(filtered_params[0:3,:], head_radius = 50)
# back rotation params to degrees
filtered_params[0:3,:] = mm_to_degrees(filtered_params[0:3,:], head_radius = 50)
filtered_motion_params = os.path.join(os.getcwd(),
"{0}_filtered.1D".format(os.path.basename(motion_params)))
np.savetxt(filtered_motion_params, filtered_params.T, fmt='%f')
return (filtered_motion_params, filter_design, filter_plot)
|
examples/synthetic/park1_constrained/park1_constrained.py
|
hase1128/dragonfly
| 675 |
70818
|
<gh_stars>100-1000
"""
Park1 function with three domains.
-- <EMAIL>
"""
# pylint: disable=invalid-name
import numpy as np
def park1_constrained(x):
""" Computes the park1 function. """
return park1_constrained_z_x([1.0, 1.0, 1.0], x)
def park1_constrained_z_x(z, x):
""" Computes the park1 function. """
x1 = max(x[0][0], 0.01) * np.sqrt(z[0])
x2 = x[0][1] * np.sqrt(z[1])
x3 = x[1]/100 * np.sqrt(z[2])
x4 = (x[2] - 10)/6.0 * np.sqrt((z[0] + z[1] + z[2]) / 3.0)
ret1 = (x1/2) * (np.sqrt(1 + (x2 + x3**2)*x4/(x1**2)) - 1)
ret2 = (x1 + 3*x4) * np.exp(1 + np.sin(x3))
return ret1 + ret2
# Write a function like this called obj.
def objective(x):
""" Objective. """
return park1_constrained(x)
|
example/pyctp2/trader/ports_info.py
|
mmmaaaggg/pyctp_lovelylain
| 358 |
70835
|
<filename>example/pyctp2/trader/ports_info.py
# -*- coding: utf-8 -*-
import getpass
class PortsInfo(object):
def __init__(self, name, ports, broker, investor=""):
"""
CTP连接信息,
ports为port列表, 其元素为 "tcp://aaa.bbb.ccc.ddd:ppppp"形式
MDUser不需要输入investor
"""
self.name = name
self.ports = ports
self.broker = broker
self.investor = investor
self.passwd = ""
def input_account(self):
self.investor = input("输入用户名:")
self.passwd = getpass.getpass("输入登陆口令:")
class PortsStub(object):
def __init__(self):
self.name = "PortsStub"
self.ports = []
self.broker = "BrokerOfStub"
self.investor = "InvestorOfStub"
self.passwd = ""
def input_account(self):
pass
|
beta_rec/models/userKNN.py
|
mengzaiqiao/TVBR
| 126 |
70844
|
import numpy as np
import scipy.sparse as ssp
import torch
from beta_rec.models.torch_engine import ModelEngine
from beta_rec.utils.common_util import timeit
def top_k(values, k, exclude=[]):
"""Return the indices of the k items with the highest value in the list of values.
Exclude the ids from the list "exclude".
"""
# Put low similarity to viewed items to exclude them from recommendations
values[exclude] = -np.inf
return list(np.argpartition(-values, range(k))[:k])
def get_sparse_vector(ids, length, values=None):
"""Sparse vector generation.
If "values" is None, the elements are set to 1.
"""
n = len(ids)
if values is None:
return ssp.coo_matrix((np.ones(n), (ids, np.zeros(n))), (length, 1)).tocsc()
else:
return ssp.coo_matrix((values, (ids, np.zeros(n))), (length, 1)).tocsc()
class UserKNN(torch.nn.Module):
"""A PyTorch Module for UserKNN model."""
def __init__(self, config):
"""Initialize UserKNN Class."""
super(UserKNN, self).__init__()
self.config = config
self.device = self.config["device_str"]
self.n_users = self.config["n_users"]
self.n_items = self.config["n_items"]
self.neighbourhood_size = self.config["neighbourhood_size"]
def prepare_model(self, data):
"""Load data into matrices.
:param data:
:return:
"""
row = data.train["col_user"].to_numpy()
col = data.train["col_item"].to_numpy()
self.binary_user_item = ssp.coo_matrix(
(np.ones(len(data.train)), (row, col)), shape=(self.n_users, self.n_items)
).tocsr()
def _items_count_per_user(self):
"""Calculate the number of interacted items for an user.
:return:
"""
if not hasattr(self, "__items_count_per_user"):
self.__items_count_per_user = np.asarray(
self.binary_user_item.sum(axis=1)
).ravel()
return self.__items_count_per_user
def similarity_with_users(self, sequence):
"""Calculate the similarity between the a given user and all users according to the overlap ratio.
:param sequence: the user's interacted items
:return:
"""
sparse_sequence = get_sparse_vector(sequence, self.n_items)
overlap = self.binary_user_item.dot(sparse_sequence).toarray().ravel()
overlap[overlap != 0] /= np.sqrt(self._items_count_per_user()[overlap != 0])
return overlap
def forward(self, batch_data):
"""Redundant method for UserKNN.
Args:
batch_data: tuple consists of (users, pos_items, neg_items), which must be LongTensor.
"""
return 0.0
def predict(self, users, items):
"""Predict result with the model.
Args:
users (int, or list of int): user id(s).
items (int, or list of int): item id(s).
Return:
scores (int, or list of int): predicted scores of these user-item pairs.
"""
scores = []
for i in range(len(users)):
sequence = self.binary_user_item.getrow(users[i]).nonzero()[0]
sim_with_users = self.similarity_with_users(sequence)
nearest_neighbour = top_k(sim_with_users, self.neighbourhood_size)
neighbour_items = get_sparse_vector(
nearest_neighbour,
self.n_users,
values=sim_with_users[nearest_neighbour],
)
sim_with_items = (
self.binary_user_item.T.dot(neighbour_items).toarray().ravel()
)
sim_with_items[sequence] = -np.inf
scores.append(sim_with_items[items[i]])
return torch.tensor(scores)
class UserKNNEngine(ModelEngine):
"""UserKNNEngine Class."""
def __init__(self, config):
"""Initialize UserKNNEngine Class."""
print("userKNNEngine init")
self.config = config
self.model = UserKNN(config["model"])
# super(UserKNNEngine, self).__init__(config)
def train_single_batch(self, batch_data):
"""Train a single batch.
However, userKNN is a neighbourhood model bases its prediction on the similarity relationships among users.
It requires no training procedure.
Args:
batch_data (list): batch users, positive items and negative items.
Return:
0
"""
assert hasattr(self, "model"), "Please specify the exact model !"
return 0
@timeit
def train_an_epoch(self, train_loader, epoch_id):
"""Train a epoch, generate batch_data from data_loader, and call train_single_batch.
Like the train_single_batch method, UserKNN requires no training procedure.
Args:
train_loader (DataLoader):
epoch_id (int): set to 1.
"""
assert hasattr(self, "model"), "Please specify the exact model !"
# self.model.train()
print(f"[Training Epoch {epoch_id}] skipped")
self.writer.add_scalar("model/loss", 0.0, epoch_id)
self.writer.add_scalar("model/regularizer", 0.0, epoch_id)
|
moldesign/data/chemical_components.py
|
Autodesk/molecular-design-toolkit
| 147 |
70857
|
from __future__ import print_function, absolute_import, division
from future.builtins import *
from future import standard_library
standard_library.install_aliases()
# Copyright 2017 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module provides access to the chemical component database, which is stored in
``moldesign/_static_data/chemical_components``
and can be re-generated by running
``cd moldesign/_static_data/ && scripts/generate_residue_data.py --download``
"""
import os
from . import PACKAGEPATH
from moldesign import utils
class _DatabaseEntry(object):
""" Maps into a field stored in the database
"""
def __init__(self, hostdb, keyname):
self.hostdb = hostdb
self.keyname = keyname
self.index = self.hostdb['__FIELDS__']['RESFIELDS'].index(keyname)
def __repr__(self):
return '<Chemical component dictionary: "%s" entries>' % self.keyname
def __getitem__(self, item):
return self.hostdb[item][self.index]
__contains__ = utils.Alias('hostdb.__contains__')
def keys(self):
for key in self.hostdb.keys():
if key == '__FIELDS__':
continue
yield key
def items(self):
for key in self:
yield key, self[key]
__iter__ = keys
# This is a very big dict, so we load it as a compressed database
_bondfilename = os.path.join(PACKAGEPATH, '_static_data', 'chemical_components')
CCD_DATABASE = utils.CompressedJsonDbm(_bondfilename, 'r', dbm=utils.ReadOnlyDumb)
RESIDUE_BONDS = _DatabaseEntry(CCD_DATABASE, 'bonds')
RESIDUE_ATOMS = _DatabaseEntry(CCD_DATABASE, 'atoms')
RESIDUE_CCD_NAMES = _DatabaseEntry(CCD_DATABASE, 'name')
RESIDUE_CCD_TYPES = _DatabaseEntry(CCD_DATABASE, 'type')
|
prediction_flow/pytorch/tests/test_wide_deep.py
|
dydcfg/prediction-flow
| 211 |
70865
|
<filename>prediction_flow/pytorch/tests/test_wide_deep.py
from prediction_flow.features import Number, Category, Sequence, Features
from prediction_flow.transformers.column import (
StandardScaler, CategoryEncoder, SequenceEncoder)
from prediction_flow.pytorch import WideDeep
from .utils import prepare_dataloader
def test_normal():
number_features = [
Number('userAge', StandardScaler()),
Number('rating', StandardScaler())]
category_features = [
Category('userId', CategoryEncoder(min_cnt=1)),
Category('movieId', CategoryEncoder(min_cnt=1)),
Category('topGenre', CategoryEncoder(min_cnt=1))]
sequence_features = [
Sequence('title', SequenceEncoder(sep='|', min_cnt=1)),
Sequence('genres', SequenceEncoder(sep='|', min_cnt=1)),
Sequence('clickedMovieIds',
SequenceEncoder(sep='|', min_cnt=1, max_len=5)),
Sequence('clickedMovieTopGenres',
SequenceEncoder(sep='|', min_cnt=1, max_len=5))]
features = Features(
number_features=number_features,
category_features=category_features,
sequence_features=sequence_features)
wide_features = ['rating', 'title', 'genres']
deep_features = ['userAge', 'rating', 'userId', 'movieId', 'topGenre',
'clickedMovieIds', 'clickedMovieTopGenres']
cross_features = [('movieId', 'clickedMovieIds'),
('topGenre', 'clickedMovieTopGenres')]
dataloader, _ = prepare_dataloader(features)
model = WideDeep(
features, wide_features, deep_features, cross_features,
num_classes=2, embedding_size=4, hidden_layers=(8, 4),
final_activation='sigmoid', dropout=0.3)
model(next(iter(dataloader)))
def test_without_number_feature():
number_features = []
category_features = [
Category('userId', CategoryEncoder(min_cnt=1)),
Category('movieId', CategoryEncoder(min_cnt=1)),
Category('topGenre', CategoryEncoder(min_cnt=1))]
sequence_features = [
Sequence('title', SequenceEncoder(sep='|', min_cnt=1)),
Sequence('genres', SequenceEncoder(sep='|', min_cnt=1)),
Sequence('clickedMovieIds',
SequenceEncoder(sep='|', min_cnt=1, max_len=5)),
Sequence('clickedMovieTopGenres',
SequenceEncoder(sep='|', min_cnt=1, max_len=5))]
features = Features(
number_features=number_features,
category_features=category_features,
sequence_features=sequence_features)
wide_features = ['title', 'genres']
deep_features = ['userId', 'movieId', 'topGenre',
'clickedMovieIds', 'clickedMovieTopGenres']
cross_features = [('movieId', 'clickedMovieIds'),
('topGenre', 'clickedMovieTopGenres')]
dataloader, _ = prepare_dataloader(features)
model = WideDeep(
features, wide_features, deep_features, cross_features,
num_classes=2, embedding_size=4, hidden_layers=(8, 4),
final_activation='sigmoid', dropout=0.3)
model(next(iter(dataloader)))
def test_without_category_feature():
number_features = []
category_features = []
sequence_features = [
Sequence('title', SequenceEncoder(sep='|', min_cnt=1)),
Sequence('genres', SequenceEncoder(sep='|', min_cnt=1)),
Sequence('clickedMovieIds',
SequenceEncoder(sep='|', min_cnt=1, max_len=5)),
Sequence('clickedMovieTopGenres',
SequenceEncoder(sep='|', min_cnt=1, max_len=5))]
features = Features(
number_features=number_features,
category_features=category_features,
sequence_features=sequence_features)
wide_features = ['title', 'genres']
deep_features = ['clickedMovieIds', 'clickedMovieTopGenres']
dataloader, _ = prepare_dataloader(features)
model = WideDeep(
features, wide_features, deep_features, [],
num_classes=2, embedding_size=4, hidden_layers=(8, 4),
final_activation='sigmoid', dropout=0.3)
model(next(iter(dataloader)))
def test_only_with_number_features():
number_features = [
Number('userAge', StandardScaler()),
Number('rating', StandardScaler())]
category_features = []
sequence_features = []
features = Features(
number_features=number_features,
category_features=category_features,
sequence_features=sequence_features)
wide_features = ['rating', 'userAge']
dataloader, _ = prepare_dataloader(features)
model = WideDeep(
features, wide_features, [], [],
num_classes=2, embedding_size=4, hidden_layers=(8, 4),
final_activation='sigmoid', dropout=0.3)
model(next(iter(dataloader)))
|
tests/workflow.py
|
mullikine/chronology
| 189 |
70867
|
<gh_stars>100-1000
import asyncio
# TODO import __init__ above
async def logic():
# TODO add tests
pass
# main(logic)
|
dfirtrack_main/tests/task/test_task_creator_views.py
|
thomas-kropeit/dfirtrack
| 273 |
70909
|
import urllib.parse
from datetime import datetime
from unittest.mock import patch
from django.contrib.auth.models import User
from django.contrib.messages import get_messages
from django.test import TestCase
from django.utils import timezone
from dfirtrack_main.models import (
System,
Systemstatus,
Task,
Taskname,
Taskpriority,
Taskstatus,
)
class TaskCreatorViewTestCase(TestCase):
"""task creator view tests"""
@classmethod
def setUpTestData(cls):
# create user
test_user = User.objects.create_user(
username='testuser_task_creator', password='<PASSWORD>'
)
# create objects
Taskname.objects.create(taskname_name='task_creator_taskname_1')
Taskname.objects.create(taskname_name='task_creator_taskname_2')
Taskname.objects.create(taskname_name='task_creator_taskname_3')
Taskpriority.objects.create(taskpriority_name='taskpriority_1')
# create object
systemstatus_1 = Systemstatus.objects.create(
systemstatus_name='task_creator_systemstatus_1'
)
# create objects
System.objects.create(
system_name='task_creator_system_1',
systemstatus=systemstatus_1,
system_created_by_user_id=test_user,
system_modified_by_user_id=test_user,
)
System.objects.create(
system_name='task_creator_system_2',
systemstatus=systemstatus_1,
system_created_by_user_id=test_user,
system_modified_by_user_id=test_user,
)
System.objects.create(
system_name='task_creator_system_3',
systemstatus=systemstatus_1,
system_created_by_user_id=test_user,
system_modified_by_user_id=test_user,
)
def test_task_creator_not_logged_in(self):
"""test creator view"""
# create url
destination = '/login/?next=' + urllib.parse.quote('/task/creator/', safe='')
# get response
response = self.client.get('/task/creator/', follow=True)
# compare
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
)
def test_task_creator_logged_in(self):
"""test creator view"""
# login testuser
self.client.login(
username='testuser_task_creator', password='<PASSWORD>'
)
# get response
response = self.client.get('/task/creator/')
# compare
self.assertEqual(response.status_code, 200)
def test_task_creator_template(self):
"""test creator view"""
# login testuser
self.client.login(
username='testuser_task_creator', password='<PASSWORD>'
)
# get response
response = self.client.get('/task/creator/')
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/task/task_creator.html')
def test_task_creator_get_user_context(self):
"""test creator view"""
# login testuser
self.client.login(
username='testuser_task_creator', password='<PASSWORD>'
)
# get response
response = self.client.get('/task/creator/')
# compare
self.assertEqual(str(response.context['user']), 'testuser_task_creator')
def test_task_creator_redirect(self):
"""test creator view"""
# login testuser
self.client.login(
username='testuser_task_creator', password='<PASSWORD>'
)
# create url
destination = urllib.parse.quote('/task/creator/', safe='/')
# get response
response = self.client.get('/task/creator', follow=True)
# compare
self.assertRedirects(
response, destination, status_code=301, target_status_code=200
)
def test_task_creator_post_redirect(self):
"""test creator view"""
# login testuser
self.client.login(
username='testuser_task_creator', password='<PASSWORD>'
)
# get objects
taskname_1 = Taskname.objects.get(taskname_name='task_creator_taskname_1')
taskpriority_1 = Taskpriority.objects.get(taskpriority_name='taskpriority_1')
taskstatus_pending = Taskstatus.objects.get(taskstatus_name='10_pending')
system_1 = System.objects.get(system_name='task_creator_system_1')
# create post data
data_dict = {
'taskname': [
taskname_1.taskname_id,
],
'taskpriority': taskpriority_1.taskpriority_id,
'taskstatus': taskstatus_pending.taskstatus_id,
'system': [
system_1.system_id,
],
}
# create url
destination = '/task/'
# get response
response = self.client.post('/task/creator/', data_dict)
# compare
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
)
def test_task_creator_post_system_and_tasks(self):
"""test creator view"""
# login testuser
self.client.login(
username='testuser_task_creator', password='<PASSWORD>'
)
# get objects
taskname_1 = Taskname.objects.get(taskname_name='task_creator_taskname_1')
taskname_2 = Taskname.objects.get(taskname_name='task_creator_taskname_2')
taskname_3 = Taskname.objects.get(taskname_name='task_creator_taskname_3')
taskpriority_1 = Taskpriority.objects.get(taskpriority_name='taskpriority_1')
taskstatus_pending = Taskstatus.objects.get(taskstatus_name='10_pending')
system_1 = System.objects.get(system_name='task_creator_system_1')
system_2 = System.objects.get(system_name='task_creator_system_2')
system_3 = System.objects.get(system_name='task_creator_system_3')
# create post data
data_dict = {
'taskname': [taskname_1.taskname_id, taskname_2.taskname_id],
'taskpriority': taskpriority_1.taskpriority_id,
'taskstatus': taskstatus_pending.taskstatus_id,
'system': [system_1.system_id, system_2.system_id],
}
# get response
self.client.post('/task/creator/', data_dict)
# get object
task_1 = Task.objects.get(
system=system_1,
taskname=taskname_1,
)
# compare
self.assertTrue(system_1.task_set.filter(taskname=taskname_1).exists())
self.assertTrue(system_1.task_set.filter(taskname=taskname_2).exists())
self.assertFalse(system_1.task_set.filter(taskname=taskname_3).exists())
self.assertTrue(system_2.task_set.filter(taskname=taskname_1).exists())
self.assertTrue(system_2.task_set.filter(taskname=taskname_2).exists())
self.assertFalse(system_2.task_set.filter(taskname=taskname_3).exists())
self.assertFalse(system_3.task_set.filter(taskname=taskname_1).exists())
self.assertFalse(system_3.task_set.filter(taskname=taskname_2).exists())
self.assertFalse(system_3.task_set.filter(taskname=taskname_3).exists())
self.assertEqual(task_1.task_started_time, None)
self.assertEqual(task_1.task_finished_time, None)
def test_task_creator_post_times_working(self):
"""test creator view"""
# mock timezone.now()
dt = datetime(2020, 1, 2, tzinfo=timezone.utc)
with patch.object(timezone, 'now', return_value=dt):
# login testuser
self.client.login(
username='testuser_task_creator', password='<PASSWORD>'
)
# get objects
taskname_started = Taskname.objects.create(
taskname_name='task_creator_started_time_working'
)
taskpriority_1 = Taskpriority.objects.get(
taskpriority_name='taskpriority_1'
)
taskstatus_working = Taskstatus.objects.get(taskstatus_name='20_working')
system_1 = System.objects.get(system_name='task_creator_system_1')
# create post data
data_dict = {
'taskname': [
taskname_started.taskname_id,
],
'taskpriority': taskpriority_1.taskpriority_id,
'taskstatus': taskstatus_working.taskstatus_id,
'system': [
system_1.system_id,
],
}
# get response
self.client.post('/task/creator/', data_dict)
# get object
task_started = Task.objects.get(
system=system_1,
taskname=taskname_started,
)
# compare
self.assertEqual(task_started.task_started_time, timezone.now())
self.assertEqual(task_started.task_finished_time, None)
def test_task_creator_post_times_done(self):
"""test creator view"""
# mock timezone.now()
dt = datetime(2020, 3, 4, tzinfo=timezone.utc)
with patch.object(timezone, 'now', return_value=dt):
# login testuser
self.client.login(
username='testuser_task_creator', password='<PASSWORD>'
)
# get objects
taskname_finished = Taskname.objects.create(
taskname_name='task_creator_finished_time_working'
)
taskpriority_1 = Taskpriority.objects.get(
taskpriority_name='taskpriority_1'
)
taskstatus_done = Taskstatus.objects.get(taskstatus_name='30_done')
system_1 = System.objects.get(system_name='task_creator_system_1')
# create post data
data_dict = {
'taskname': [
taskname_finished.taskname_id,
],
'taskpriority': taskpriority_1.taskpriority_id,
'taskstatus': taskstatus_done.taskstatus_id,
'system': [
system_1.system_id,
],
}
# get response
self.client.post('/task/creator/', data_dict)
# get object
task_finished = Task.objects.get(
system=system_1,
taskname=taskname_finished,
)
# compare
self.assertEqual(task_finished.task_started_time, timezone.now())
self.assertEqual(task_finished.task_finished_time, timezone.now())
def test_task_creator_post_invalid_reload(self):
"""test creator view"""
# login testuser
self.client.login(
username='testuser_task_creator', password='<PASSWORD>'
)
# create post data
data_dict = {}
# get response
response = self.client.post('/task/creator/', data_dict)
# compare
self.assertEqual(response.status_code, 200)
def test_task_creator_post_invalid_template(self):
"""test creator view"""
# login testuser
self.client.login(
username='testuser_task_creator', password='<PASSWORD>'
)
# create post data
data_dict = {}
# get response
response = self.client.post('/task/creator/', data_dict)
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/task/task_creator.html')
def test_task_creator_post_messages(self):
"""test creator view"""
# login testuser
self.client.login(
username='testuser_task_creator', password='<PASSWORD>'
)
# get objects
taskname_1 = Taskname.objects.get(taskname_name='task_creator_taskname_1')
taskname_2 = Taskname.objects.get(taskname_name='task_creator_taskname_2')
taskname_3 = Taskname.objects.get(taskname_name='task_creator_taskname_3')
taskpriority_1 = Taskpriority.objects.get(taskpriority_name='taskpriority_1')
taskstatus_pending = Taskstatus.objects.get(taskstatus_name='10_pending')
system_1 = System.objects.get(system_name='task_creator_system_1')
system_2 = System.objects.get(system_name='task_creator_system_2')
system_3 = System.objects.get(system_name='task_creator_system_3')
# create post data
data_dict = {
'taskname': [
taskname_1.taskname_id,
taskname_2.taskname_id,
taskname_3.taskname_id,
],
'taskpriority': taskpriority_1.taskpriority_id,
'taskstatus': taskstatus_pending.taskstatus_id,
'system': [system_1.system_id, system_2.system_id, system_3.system_id],
}
# get response
response = self.client.post('/task/creator/', data_dict)
# get messages
messages = list(get_messages(response.wsgi_request))
# compare
self.assertEqual(str(messages[0]), 'Task creator started')
self.assertEqual(str(messages[1]), '9 tasks created for 3 systems.')
|
src/python/python-proto/python_proto/tests/strategies.py
|
inickles/grapl
| 313 |
70944
|
<filename>src/python/python-proto/python_proto/tests/strategies.py
import datetime
import uuid
from typing import Mapping, Sequence, Union
import hypothesis.strategies as st
from python_proto import SerDe
from python_proto.api import (
DecrementOnlyIntProp,
DecrementOnlyUintProp,
Edge,
EdgeList,
GraphDescription,
IdentifiedGraph,
IdentifiedNode,
IdStrategy,
ImmutableIntProp,
ImmutableStrProp,
ImmutableUintProp,
IncrementOnlyIntProp,
IncrementOnlyUintProp,
MergedEdge,
MergedEdgeList,
MergedGraph,
MergedNode,
NodeDescription,
NodeProperty,
Session,
Static,
)
from python_proto.common import Duration, Timestamp, Uuid
from python_proto.metrics import (
Counter,
Gauge,
GaugeType,
Histogram,
Label,
MetricWrapper,
)
from python_proto.pipeline import Envelope, Metadata, RawLog
#
# constants
#
# These values are used to parametrize the strategies defined below. Some of
# them ensure the generated data actually makes sense. Others are used to
# ensure strategies perform well. Please don't change these without a Very Good
# Reason(TM).
UINT64_MIN = 0
UINT64_MAX = 2**64 - 1
INT64_MIN = -(2**63) + 1
INT64_MAX = 2**63 - 1
INT32_MIN = -(2**31) + 1
INT32_MAX = 2**31 - 1
DURATION_SECONDS_MIN = 0
DURATION_SECONDS_MAX = UINT64_MAX
DURATION_NANOS_MIN = 0
DURATION_NANOS_MAX = 10**9 - 1
MAX_LIST_SIZE = 5
MIN_LOG_EVENT_SIZE = 0
MAX_LOG_EVENT_SIZE = 1024
#
# common
#
def uuids(
lsbs: st.SearchStrategy[int] = st.integers(
min_value=UINT64_MIN, max_value=UINT64_MAX
),
msbs: st.SearchStrategy[int] = st.integers(
min_value=UINT64_MIN, max_value=UINT64_MAX
),
) -> st.SearchStrategy[Uuid]:
return st.builds(Uuid, lsb=lsbs, msb=msbs)
def durations(
seconds: st.SearchStrategy[int] = st.integers(
min_value=DURATION_SECONDS_MIN, max_value=DURATION_SECONDS_MAX
),
nanos: st.SearchStrategy[int] = st.integers(
min_value=DURATION_NANOS_MIN,
max_value=DURATION_NANOS_MAX,
),
) -> st.SearchStrategy[Duration]:
return st.builds(Duration, seconds=seconds, nanos=nanos)
def timestamps(
durations: st.SearchStrategy[Duration] = durations(),
before_epochs: st.SearchStrategy[bool] = st.booleans(),
) -> st.SearchStrategy[Timestamp]:
return st.builds(Timestamp, duration=durations, before_epoch=before_epochs)
#
# pipeline
#
def metadatas(
trace_ids: st.SearchStrategy[uuid.UUID] = st.uuids(),
tenant_ids: st.SearchStrategy[uuid.UUID] = st.uuids(),
event_source_ids: st.SearchStrategy[uuid.UUID] = st.uuids(),
created_times: st.SearchStrategy[datetime.datetime] = st.datetimes(),
last_updated_times: st.SearchStrategy[datetime.datetime] = st.datetimes(),
) -> st.SearchStrategy[Metadata]:
return st.builds(
Metadata,
trace_id=trace_ids,
tenant_id=tenant_ids,
event_source_id=event_source_ids,
created_time=created_times,
last_updated_time=last_updated_times,
)
def raw_logs(
log_events: st.SearchStrategy[bytes] = st.binary(
min_size=MIN_LOG_EVENT_SIZE, max_size=MAX_LOG_EVENT_SIZE
)
) -> st.SearchStrategy[RawLog]:
return st.builds(RawLog, log_event=log_events)
def envelopes(
metadatas: st.SearchStrategy[Metadata] = metadatas(),
inner_messages: st.SearchStrategy[SerDe] = uuids()
| timestamps()
| durations()
| raw_logs(), # TODO: add more here as they're implemented
) -> st.SearchStrategy[Envelope]:
return st.builds(
Envelope,
metadata=metadatas,
inner_message=inner_messages,
)
#
# api
#
def sessions(
primary_key_properties: st.SearchStrategy[Sequence[str]] = st.lists(
st.text(), max_size=MAX_LIST_SIZE
),
primary_key_requires_asset_ids: st.SearchStrategy[bool] = st.booleans(),
create_times: st.SearchStrategy[int] = st.integers(
min_value=UINT64_MIN, max_value=UINT64_MAX
),
last_seen_times: st.SearchStrategy[int] = st.integers(
min_value=UINT64_MIN, max_value=UINT64_MAX
),
terminate_times: st.SearchStrategy[int] = st.integers(
min_value=UINT64_MIN, max_value=UINT64_MAX
),
) -> st.SearchStrategy[Session]:
return st.builds(
Session,
primary_key_properties=primary_key_properties,
primary_key_requires_asset_id=primary_key_requires_asset_ids,
create_time=create_times,
last_seen_time=last_seen_times,
terminate_time=terminate_times,
)
def statics(
primary_key_properties: st.SearchStrategy[Sequence[str]] = st.lists(
st.text(), max_size=MAX_LIST_SIZE
),
primary_key_requires_asset_ids: st.SearchStrategy[bool] = st.booleans(),
) -> st.SearchStrategy[Static]:
return st.builds(
Static,
primary_key_properties=primary_key_properties,
primary_key_requires_asset_id=primary_key_requires_asset_ids,
)
def id_strategies(
strategies: st.SearchStrategy[Union[Session, Static]] = st.one_of(
sessions(), statics()
)
) -> st.SearchStrategy[IdStrategy]:
return st.builds(
IdStrategy,
strategy=strategies,
)
def increment_only_uint_props(
props: st.SearchStrategy[int] = st.integers(
min_value=UINT64_MIN, max_value=UINT64_MAX
),
) -> st.SearchStrategy[IncrementOnlyUintProp]:
return st.builds(IncrementOnlyUintProp, prop=props)
def immutable_uint_props(
props: st.SearchStrategy[int] = st.integers(
min_value=UINT64_MIN, max_value=UINT64_MAX
),
) -> st.SearchStrategy[ImmutableUintProp]:
return st.builds(ImmutableUintProp, prop=props)
def decrement_only_uint_props(
props: st.SearchStrategy[int] = st.integers(
min_value=UINT64_MIN, max_value=UINT64_MAX
),
) -> st.SearchStrategy[DecrementOnlyUintProp]:
return st.builds(DecrementOnlyUintProp, prop=props)
def increment_only_int_props(
props: st.SearchStrategy[int] = st.integers(
min_value=INT64_MIN, max_value=INT64_MAX
),
) -> st.SearchStrategy[IncrementOnlyIntProp]:
return st.builds(IncrementOnlyIntProp, prop=props)
def immutable_int_props(
props: st.SearchStrategy[int] = st.integers(
min_value=INT64_MIN, max_value=INT64_MAX
),
) -> st.SearchStrategy[ImmutableIntProp]:
return st.builds(ImmutableIntProp, prop=props)
def decrement_only_int_props(
props: st.SearchStrategy[int] = st.integers(
min_value=INT64_MIN, max_value=INT64_MAX
),
) -> st.SearchStrategy[DecrementOnlyIntProp]:
return st.builds(DecrementOnlyIntProp, prop=props)
def immutable_str_props(
props: st.SearchStrategy[str] = st.text(),
) -> st.SearchStrategy[ImmutableStrProp]:
return st.builds(ImmutableStrProp, prop=props)
def node_properties(
properties: st.SearchStrategy[
Union[
IncrementOnlyUintProp,
DecrementOnlyUintProp,
ImmutableUintProp,
IncrementOnlyIntProp,
DecrementOnlyIntProp,
ImmutableIntProp,
ImmutableStrProp,
]
] = st.one_of(
increment_only_uint_props(),
decrement_only_uint_props(),
immutable_uint_props(),
increment_only_int_props(),
decrement_only_int_props(),
immutable_int_props(),
immutable_str_props(),
)
) -> st.SearchStrategy[NodeProperty]:
return st.builds(NodeProperty, property_=properties)
def node_descriptions(
properties: st.SearchStrategy[Mapping[str, NodeProperty]] = st.dictionaries(
keys=st.text(), values=node_properties()
),
node_keys: st.SearchStrategy[str] = st.text(),
node_types: st.SearchStrategy[str] = st.text(),
id_strategies: st.SearchStrategy[Sequence[IdStrategy]] = st.lists(
id_strategies(), max_size=MAX_LIST_SIZE
),
) -> st.SearchStrategy[NodeDescription]:
return st.builds(
NodeDescription,
properties=properties,
node_key=node_keys,
node_type=node_types,
id_strategy=id_strategies,
)
def identified_nodes(
properties: st.SearchStrategy[Mapping[str, NodeProperty]] = st.dictionaries(
keys=st.text(), values=node_properties()
),
node_keys: st.SearchStrategy[str] = st.text(),
node_types: st.SearchStrategy[str] = st.text(),
) -> st.SearchStrategy[IdentifiedNode]:
return st.builds(
IdentifiedNode,
properties=properties,
node_key=node_keys,
node_type=node_types,
)
def merged_nodes(
properties: st.SearchStrategy[Mapping[str, NodeProperty]] = st.dictionaries(
keys=st.text(), values=node_properties()
),
uids: st.SearchStrategy[int] = st.integers(
min_value=UINT64_MIN, max_value=UINT64_MAX
),
node_keys: st.SearchStrategy[str] = st.text(),
node_types: st.SearchStrategy[str] = st.text(),
) -> st.SearchStrategy[MergedNode]:
return st.builds(
MergedNode,
properties=properties,
uid=uids,
node_key=node_keys,
node_type=node_types,
)
def edges(
from_node_keys: st.SearchStrategy[str] = st.text(),
to_node_keys: st.SearchStrategy[str] = st.text(),
edge_names: st.SearchStrategy[str] = st.text(),
) -> st.SearchStrategy[Edge]:
return st.builds(
Edge,
from_node_key=from_node_keys,
to_node_key=to_node_keys,
edge_name=edge_names,
)
def edge_lists(
edges: st.SearchStrategy[Sequence[Edge]] = st.lists(
edges(), max_size=MAX_LIST_SIZE
),
) -> st.SearchStrategy[EdgeList]:
return st.builds(
EdgeList,
edges=edges,
)
def merged_edges(
from_uids: st.SearchStrategy[str] = st.text(),
from_node_keys: st.SearchStrategy[str] = st.text(),
to_uids: st.SearchStrategy[str] = st.text(),
to_node_keys: st.SearchStrategy[str] = st.text(),
edge_names: st.SearchStrategy[str] = st.text(),
) -> st.SearchStrategy[MergedEdge]:
return st.builds(
MergedEdge,
from_uid=from_uids,
from_node_key=from_node_keys,
to_uid=to_uids,
to_node_key=to_node_keys,
edge_name=edge_names,
)
def merged_edge_lists(
edges: st.SearchStrategy[Sequence[MergedEdge]] = st.lists(
merged_edges(), max_size=MAX_LIST_SIZE
),
) -> st.SearchStrategy[MergedEdgeList]:
return st.builds(
MergedEdgeList,
edges=edges,
)
def graph_descriptions(
nodes: st.SearchStrategy[Mapping[str, NodeDescription]] = st.dictionaries(
keys=st.text(), values=node_descriptions()
),
edges: st.SearchStrategy[Mapping[str, EdgeList]] = st.dictionaries(
keys=st.text(), values=edge_lists()
),
) -> st.SearchStrategy[GraphDescription]:
return st.builds(
GraphDescription,
nodes=nodes,
edges=edges,
)
def identified_graphs(
nodes: st.SearchStrategy[Mapping[str, IdentifiedNode]] = st.dictionaries(
keys=st.text(), values=identified_nodes()
),
edges: st.SearchStrategy[Mapping[str, EdgeList]] = st.dictionaries(
keys=st.text(), values=edge_lists()
),
) -> st.SearchStrategy[IdentifiedGraph]:
return st.builds(
IdentifiedGraph,
nodes=nodes,
edges=edges,
)
def merged_graphs(
nodes: st.SearchStrategy[Mapping[str, MergedNode]] = st.dictionaries(
keys=st.text(), values=merged_nodes()
),
edges: st.SearchStrategy[Mapping[str, MergedEdgeList]] = st.dictionaries(
keys=st.text(), values=merged_edge_lists()
),
) -> st.SearchStrategy[MergedGraph]:
return st.builds(
MergedGraph,
nodes=nodes,
edges=edges,
)
#
# metrics
#
def labels(
keys: st.SearchStrategy[str] = st.text(),
values: st.SearchStrategy[str] = st.text(),
) -> st.SearchStrategy[Label]:
return st.builds(Label, key=keys, value=values)
def counters(
names: st.SearchStrategy[str] = st.text(),
increments: st.SearchStrategy[int] = st.integers(
min_value=UINT64_MIN, max_value=UINT64_MAX
),
labels: st.SearchStrategy[Sequence[Label]] = st.lists(
labels(), max_size=MAX_LIST_SIZE
),
) -> st.SearchStrategy[Counter]:
return st.builds(Counter, name=names, increment=increments, labels=labels)
def gauge_types() -> st.SearchStrategy[GaugeType]:
return st.sampled_from(GaugeType)
def gauges(
gauge_types: st.SearchStrategy[GaugeType] = gauge_types(),
names: st.SearchStrategy[str] = st.text(),
values: st.SearchStrategy[float] = st.floats(allow_nan=False, allow_infinity=False),
labels: st.SearchStrategy[Sequence[Label]] = st.lists(
labels(), max_size=MAX_LIST_SIZE
),
) -> st.SearchStrategy[Gauge]:
return st.builds(
Gauge, gauge_type=gauge_types, name=names, value=values, labels=labels
)
def histograms(
names: st.SearchStrategy[str] = st.text(),
values: st.SearchStrategy[float] = st.floats(allow_nan=False, allow_infinity=False),
labels: st.SearchStrategy[Sequence[Label]] = st.lists(
labels(), max_size=MAX_LIST_SIZE
),
) -> st.SearchStrategy[Histogram]:
return st.builds(Histogram, name=names, value=values, labels=labels)
def metric_wrappers(
metrics: st.SearchStrategy[Union[Counter, Gauge, Histogram]] = st.one_of(
counters(), gauges(), histograms()
)
) -> st.SearchStrategy[MetricWrapper]:
return st.builds(MetricWrapper, metric=metrics)
|
fuzzers/042-clk-bufg-config/generate.py
|
rw1nkler/prjxray
| 583 |
70957
|
<filename>fuzzers/042-clk-bufg-config/generate.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2020 The Project X-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
import json
from prjxray.segmaker import Segmaker
#Decouple interconnect bits from PRESELECT property bits
def bitfilter(frame, bit):
if frame == 7 or frame == 21:
return False
return True
def main():
segmk = Segmaker("design.bits")
print("Loading tags")
with open('params.json') as f:
params = json.load(f)
for row in params:
base_name = 'BUFGCTRL_X{}Y{}'.format(row['x'], row['y'])
segmk.add_site_tag(
row['site'], '{}.IN_USE'.format(base_name), row['IN_USE'])
if not row['IN_USE']:
continue
for param in (
'INIT_OUT',
'IS_IGNORE0_INVERTED',
'IS_IGNORE1_INVERTED',
):
segmk.add_site_tag(
row['site'], '{}.{}'.format(base_name, param), row[param])
for param in ('PRESELECT_I0', ):
segmk.add_site_tag(
row['site'], '{}.Z{}'.format(base_name, param), 1 ^ row[param])
for param in ('PRESELECT_I1', ):
segmk.add_site_tag(
row['site'], '{}.{}'.format(base_name, param), row[param])
for param, tag in (('IS_CE0_INVERTED', 'ZINV_CE0'), ('IS_S0_INVERTED',
'ZINV_S0'),
('IS_CE1_INVERTED', 'ZINV_CE1'), ('IS_S1_INVERTED',
'ZINV_S1')):
segmk.add_site_tag(
row['site'], '{}.{}'.format(base_name, tag), 1 ^ row[param])
segmk.compile(bitfilter=bitfilter)
segmk.write()
if __name__ == '__main__':
main()
|
espnet/nets/pytorch_backend/transformer/layer_norm.py
|
Syzygianinfern0/espnet
| 252 |
70969
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 <NAME>
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Layer normalization module."""
import torch
class LayerNorm(torch.nn.LayerNorm):
"""Layer normalization module.
:param int nout: output dim size
:param int dim: dimension to be normalized
"""
def __init__(self, nout, dim=-1):
"""Construct an LayerNorm object."""
super(LayerNorm, self).__init__(nout, eps=1e-12)
self.dim = dim
def forward(self, x):
"""Apply layer normalization.
:param torch.Tensor x: input tensor
:return: layer normalized tensor
:rtype torch.Tensor
"""
if self.dim == -1:
return super(LayerNorm, self).forward(x)
return super(LayerNorm, self).forward(x.transpose(1, -1)).transpose(1, -1)
|
caffe2/python/operator_test/conditional_test.py
|
KevinKecc/caffe2
| 585 |
70973
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from hypothesis import given
import hypothesis.strategies as st
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
class TestConditionalOp(hu.HypothesisTestCase):
@given(rows_num=st.integers(1, 10000), **hu.gcs_cpu_only)
def test_conditional(self, rows_num, gc, dc):
op = core.CreateOperator(
"Conditional", ["condition", "data_t", "data_f"], "output"
)
data_t = np.random.random((rows_num, 10, 20)).astype(np.float32)
data_f = np.random.random((rows_num, 10, 20)).astype(np.float32)
condition = np.random.choice(a=[True, False], size=rows_num)
def ref(condition, data_t, data_f):
output = [
data_t[i] if condition[i] else data_f[i]
for i in range(rows_num)
]
return (output,)
self.assertReferenceChecks(gc, op, [condition, data_t, data_f], ref)
|
packages/python/chart-studio/chart_studio/tests/test_core/test_tools/test_file_tools.py
|
mastermind88/plotly.py
| 11,750 |
70979
|
<filename>packages/python/chart-studio/chart_studio/tests/test_core/test_tools/test_file_tools.py
from chart_studio import tools
from chart_studio.tests.utils import PlotlyTestCase
import warnings
class FileToolsTest(PlotlyTestCase):
def test_set_config_file_all_entries(self):
# Check set_config and get_config return the same values
domain, streaming_domain, api, sharing = ("this", "thing", "that", "private")
ssl_verify, proxy_auth, world_readable, auto_open = (True, True, False, False)
tools.set_config_file(
plotly_domain=domain,
plotly_streaming_domain=streaming_domain,
plotly_api_domain=api,
plotly_ssl_verification=ssl_verify,
plotly_proxy_authorization=proxy_auth,
world_readable=world_readable,
auto_open=auto_open,
)
config = tools.get_config_file()
self.assertEqual(config["plotly_domain"], domain)
self.assertEqual(config["plotly_streaming_domain"], streaming_domain)
self.assertEqual(config["plotly_api_domain"], api)
self.assertEqual(config["plotly_ssl_verification"], ssl_verify)
self.assertEqual(config["plotly_proxy_authorization"], proxy_auth)
self.assertEqual(config["world_readable"], world_readable)
self.assertEqual(config["sharing"], sharing)
self.assertEqual(config["auto_open"], auto_open)
tools.reset_config_file()
def test_set_config_file_two_entries(self):
# Check set_config and get_config given only two entries return the
# same values
domain, streaming_domain = "this", "thing"
tools.set_config_file(
plotly_domain=domain, plotly_streaming_domain=streaming_domain
)
config = tools.get_config_file()
self.assertEqual(config["plotly_domain"], domain)
self.assertEqual(config["plotly_streaming_domain"], streaming_domain)
tools.reset_config_file()
def test_set_config_file_world_readable(self):
# Return TypeError when world_readable type is not a bool
kwargs = {"world_readable": "True"}
self.assertRaises(TypeError, tools.set_config_file, **kwargs)
def test_set_config_expected_warning_msg(self):
# Check that UserWarning is being called with http plotly_domain
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
kwargs = {"plotly_domain": "http://www.foo-bar.com"}
tools.set_config_file(**kwargs)
assert len(w) == 1
assert issubclass(w[-1].category, UserWarning)
assert "plotly_domain" in str(w[-1].message)
def test_set_config_no_warning_msg_if_plotly_domain_is_https(self):
# Check that no UserWarning is being called with https plotly_domain
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
kwargs = {"plotly_domain": "https://www.foo-bar.com"}
tools.set_config_file(**kwargs)
assert len(w) == 0
def test_reset_config_file(self):
# Check reset_config and get_config return the same values
tools.reset_config_file()
config = tools.get_config_file()
self.assertEqual(config["plotly_domain"], "https://plotly.com")
self.assertEqual(config["plotly_streaming_domain"], "stream.plotly.com")
def test_get_credentials_file(self):
# Check get_credentials returns all the keys
original_creds = tools.get_credentials_file()
expected = [
"username",
"stream_ids",
"api_key",
"proxy_username",
"proxy_password",
]
self.assertTrue(all(x in original_creds for x in expected))
def test_reset_credentials_file(self):
# Check get_cred return all the keys
tools.reset_credentials_file()
reset_creds = tools.get_credentials_file()
expected = [
"username",
"stream_ids",
"api_key",
"proxy_username",
"proxy_password",
]
self.assertTrue(all(x in reset_creds for x in expected))
|
python/ray/util/dask/scheduler_utils.py
|
daobook/ray
| 21,382 |
70994
|
"""
The following is adapted from Dask release 2021.03.1:
https://github.com/dask/dask/blob/2021.03.1/dask/local.py
"""
import os
from queue import Queue, Empty
from dask import config
from dask.callbacks import local_callbacks, unpack_callbacks
from dask.core import (_execute_task, flatten, get_dependencies, has_tasks,
reverse_dict)
from dask.order import order
if os.name == "nt":
# Python 3 windows Queue.get doesn't handle interrupts properly. To
# workaround this we poll at a sufficiently large interval that it
# shouldn't affect performance, but small enough that users trying to kill
# an application shouldn't care.
def queue_get(q):
while True:
try:
return q.get(block=True, timeout=0.1)
except Empty:
pass
else:
def queue_get(q):
return q.get()
def start_state_from_dask(dsk, cache=None, sortkey=None):
"""Start state from a dask
Examples
--------
>>> dsk = {
'x': 1,
'y': 2,
'z': (inc, 'x'),
'w': (add, 'z', 'y')} # doctest: +SKIP
>>> from pprint import pprint # doctest: +SKIP
>>> pprint(start_state_from_dask(dsk)) # doctest: +SKIP
{'cache': {'x': 1, 'y': 2},
'dependencies': {'w': {'z', 'y'}, 'x': set(), 'y': set(), 'z': {'x'}},
'dependents': {'w': set(), 'x': {'z'}, 'y': {'w'}, 'z': {'w'}},
'finished': set(),
'ready': ['z'],
'released': set(),
'running': set(),
'waiting': {'w': {'z'}},
'waiting_data': {'x': {'z'}, 'y': {'w'}, 'z': {'w'}}}
"""
if sortkey is None:
sortkey = order(dsk).get
if cache is None:
cache = config.get("cache", None)
if cache is None:
cache = dict()
data_keys = set()
for k, v in dsk.items():
if not has_tasks(dsk, v):
cache[k] = v
data_keys.add(k)
dsk2 = dsk.copy()
dsk2.update(cache)
dependencies = {k: get_dependencies(dsk2, k) for k in dsk}
waiting = {
k: v.copy()
for k, v in dependencies.items() if k not in data_keys
}
dependents = reverse_dict(dependencies)
for a in cache:
for b in dependents.get(a, ()):
waiting[b].remove(a)
waiting_data = {k: v.copy() for k, v in dependents.items() if v}
ready_set = {k for k, v in waiting.items() if not v}
ready = sorted(ready_set, key=sortkey, reverse=True)
waiting = {k: v for k, v in waiting.items() if v}
state = {
"dependencies": dependencies,
"dependents": dependents,
"waiting": waiting,
"waiting_data": waiting_data,
"cache": cache,
"ready": ready,
"running": set(),
"finished": set(),
"released": set(),
}
return state
def execute_task(key, task_info, dumps, loads, get_id, pack_exception):
"""
Compute task and handle all administration
See Also
--------
_execute_task : actually execute task
"""
try:
task, data = loads(task_info)
result = _execute_task(task, data)
id = get_id()
result = dumps((result, id))
failed = False
except BaseException as e:
result = pack_exception(e, dumps)
failed = True
return key, result, failed
def release_data(key, state, delete=True):
"""Remove data from temporary storage
See Also
--------
finish_task
"""
if key in state["waiting_data"]:
assert not state["waiting_data"][key]
del state["waiting_data"][key]
state["released"].add(key)
if delete:
del state["cache"][key]
DEBUG = False
def finish_task(dsk,
key,
state,
results,
sortkey,
delete=True,
release_data=release_data):
"""
Update execution state after a task finishes
Mutates. This should run atomically (with a lock).
"""
for dep in sorted(state["dependents"][key], key=sortkey, reverse=True):
s = state["waiting"][dep]
s.remove(key)
if not s:
del state["waiting"][dep]
state["ready"].append(dep)
for dep in state["dependencies"][key]:
if dep in state["waiting_data"]:
s = state["waiting_data"][dep]
s.remove(key)
if not s and dep not in results:
if DEBUG:
from chest.core import nbytes
print("Key: %s\tDep: %s\t NBytes: %.2f\t Release" %
(key, dep,
sum(map(nbytes, state["cache"].values()) / 1e6)))
release_data(dep, state, delete=delete)
elif delete and dep not in results:
release_data(dep, state, delete=delete)
state["finished"].add(key)
state["running"].remove(key)
return state
def nested_get(ind, coll):
"""Get nested index from collection
Examples
--------
>>> nested_get(1, 'abc')
'b'
>>> nested_get([1, 0], 'abc')
('b', 'a')
>>> nested_get([[1, 0], [0, 1]], 'abc')
(('b', 'a'), ('a', 'b'))
"""
if isinstance(ind, list):
return tuple(nested_get(i, coll) for i in ind)
else:
return coll[ind]
def default_get_id():
"""Default get_id"""
return None
def default_pack_exception(e, dumps):
raise
def reraise(exc, tb=None):
if exc.__traceback__ is not tb:
raise exc.with_traceback(tb)
raise exc
def identity(x):
"""Identity function. Returns x.
>>> identity(3)
3
"""
return x
def get_async(apply_async,
num_workers,
dsk,
result,
cache=None,
get_id=default_get_id,
rerun_exceptions_locally=None,
pack_exception=default_pack_exception,
raise_exception=reraise,
callbacks=None,
dumps=identity,
loads=identity,
**kwargs):
"""Asynchronous get function
This is a general version of various asynchronous schedulers for dask. It
takes a an apply_async function as found on Pool objects to form a more
specific ``get`` method that walks through the dask array with parallel
workers, avoiding repeat computation and minimizing memory use.
Parameters
----------
apply_async : function
Asynchronous apply function as found on Pool or ThreadPool
num_workers : int
The number of active tasks we should have at any one time
dsk : dict
A dask dictionary specifying a workflow
result : key or list of keys
Keys corresponding to desired data
cache : dict-like, optional
Temporary storage of results
get_id : callable, optional
Function to return the worker id, takes no arguments. Examples are
`threading.current_thread` and `multiprocessing.current_process`.
rerun_exceptions_locally : bool, optional
Whether to rerun failing tasks in local process to enable debugging
(False by default)
pack_exception : callable, optional
Function to take an exception and ``dumps`` method, and return a
serialized tuple of ``(exception, traceback)`` to send back to the
scheduler. Default is to just raise the exception.
raise_exception : callable, optional
Function that takes an exception and a traceback, and raises an error.
dumps: callable, optional
Function to serialize task data and results to communicate between
worker and parent. Defaults to identity.
loads: callable, optional
Inverse function of `dumps`. Defaults to identity.
callbacks : tuple or list of tuples, optional
Callbacks are passed in as tuples of length 5. Multiple sets of
callbacks may be passed in as a list of tuples. For more information,
see the dask.diagnostics documentation.
See Also
--------
threaded.get
"""
queue = Queue()
if isinstance(result, list):
result_flat = set(flatten(result))
else:
result_flat = {result}
results = set(result_flat)
dsk = dict(dsk)
with local_callbacks(callbacks) as callbacks:
_, _, pretask_cbs, posttask_cbs, _ = unpack_callbacks(callbacks)
started_cbs = []
succeeded = False
# if start_state_from_dask fails, we will have something
# to pass to the final block.
state = {}
try:
for cb in callbacks:
if cb[0]:
cb[0](dsk)
started_cbs.append(cb)
keyorder = order(dsk)
state = start_state_from_dask(
dsk, cache=cache, sortkey=keyorder.get)
for _, start_state, _, _, _ in callbacks:
if start_state:
start_state(dsk, state)
if rerun_exceptions_locally is None:
rerun_exceptions_locally = config.get(
"rerun_exceptions_locally", False)
if state["waiting"] and not state["ready"]:
raise ValueError("Found no accessible jobs in dask")
def fire_task():
""" Fire off a task to the thread pool """
# Choose a good task to compute
key = state["ready"].pop()
state["running"].add(key)
for f in pretask_cbs:
f(key, dsk, state)
# Prep data to send
data = {
dep: state["cache"][dep]
for dep in get_dependencies(dsk, key)
}
# Submit
apply_async(
execute_task,
args=(
key,
dumps((dsk[key], data)),
dumps,
loads,
get_id,
pack_exception,
),
callback=queue.put,
)
# Seed initial tasks into the thread pool
while state["ready"] and len(state["running"]) < num_workers:
fire_task()
# Main loop, wait on tasks to finish, insert new ones
while state["waiting"] or state["ready"] or state["running"]:
key, res_info, failed = queue_get(queue)
if failed:
exc, tb = loads(res_info)
if rerun_exceptions_locally:
data = {
dep: state["cache"][dep]
for dep in get_dependencies(dsk, key)
}
task = dsk[key]
_execute_task(task, data) # Re-execute locally
else:
raise_exception(exc, tb)
res, worker_id = loads(res_info)
state["cache"][key] = res
finish_task(dsk, key, state, results, keyorder.get)
for f in posttask_cbs:
f(key, res, dsk, state, worker_id)
while state["ready"] and len(state["running"]) < num_workers:
fire_task()
succeeded = True
finally:
for _, _, _, _, finish in started_cbs:
if finish:
finish(dsk, state, not succeeded)
return nested_get(result, state["cache"])
def apply_sync(func, args=(), kwds=None, callback=None):
""" A naive synchronous version of apply_async """
if kwds is None:
kwds = {}
res = func(*args, **kwds)
if callback is not None:
callback(res)
|
arybo/lib/mba_impl_petanque.py
|
Liblor/arybo
| 223 |
71007
|
# Copyright (c) 2016 <NAME> <<EMAIL>>
# Copyright (c) 2016 <NAME> <<EMAIL>>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import copy
import six
from six.moves import range
from pytanque import symbol, imm, Vector, Matrix, simplify, simplify_inplace, expand_esf_inplace, subs_vectors, subs_exprs, subs_exprs_inplace, analyses, esf_vector, esf, expand_esf, or_to_esf_inplace, Expr
def get_vector_from_cst(nbits, n):
vec = Vector(nbits)
vec.set_int_be(n, nbits)
return vec
def get_int(nbits, v):
return v.get_int_be()
def popcount(n):
ret = 0
while n>0:
if (n&1) == 1:
ret += 1
n >>= 1
return ret
def next_zero_bit(v):
v = ~v
v = (v ^ (v - 1)) >> 1
return popcount(v)
def evaluate_expr(E, nbits, map_):
# keys of map_ can be mba variables or symbols
# => an mba variable must map to an integer or an mba variable
# => a symbol must map to an expression
keys = []
values = []
for k,v in six.iteritems(map_):
# TOFIX: not a clean test
if hasattr(k, "vec"):
keys.extend(k.vec)
if isinstance(v, six.integer_types):
values.extend(imm((v>>i)&1) for i in range(k.nbits))
continue
if hasattr(v, "vec"):
v = v.vec
if isinstance(v, Vector):
assert(len(v) == len(k.vec))
values.extend(v)
continue
raise ValueError("an MBAVariable must map to an integer value or an MBAVariable!")
elif isinstance(k, Expr):
if not k.is_sym():
raise ValueError("only symbols or MBAVariable can be a key")
if not isinstance(v, Expr):
raise ValueError("a symbol can only be mapped to an expression")
keys.append(k)
values.append(v)
E = expand_esf(E)
simplify_inplace(E)
subs_exprs_inplace(E, keys, values)
simplify_inplace(E)
try:
return E.get_int_be()
except RuntimeError:
return E
def test_N(nbits, X, n):
ret = imm(1)
for i in range(nbits):
if ((n>>i)&1) == 1:
ret *= X[i]
else:
ret *= X[i]+imm(1)
simplify_inplace(ret)
return ret
class MBAImpl(object):
def __init__(self, nbits):
self.__set_nbits(nbits)
self.gen_x = Vector(nbits)
self.use_esf = False
self.use_opt_mba = True
for i in range(0, nbits):
self.gen_x[i] = symbol("__gen_X_%d" % i)
def __set_nbits(self, nbits):
self.nbits = nbits
self.max_uint = (1<<nbits) - 1
def var_symbols(self, name):
symbols = [symbol("%s%d" % (name, i)) for i in range(0, self.nbits)]
M = Vector(self.nbits)
for i in range(0, self.nbits):
M[i] = symbols[i]
return M
def get_vector_from_cst(self, n):
return get_vector_from_cst(self.nbits, n)
def get_int(self, v):
return get_int(self.nbits, v)
def identity(self):
return Matrix.identity(self.nbits)
def cst_matrix(self, cst):
return Matrix(self.nbits, self.nbits, lambda i,j: cst)
def null_matrix(self):
return Matrix(self.nbits, self.nbits)
# def iadd_Y(self, X, Y):
# carry = imm(0)
# for i in range(0, self.nbits):
# Xi = X[i]
# mul_XY = simplify_inplace(Xi*Y[i])
# Xi += Y[i]
# simplify_inplace(Xi)
# carry_new = simplify_inplace(mul_XY + (carry * Xi))
# Xi += carry
# simplify_inplace(Xi)
# carry = carry_new
def iadd_Y(self, X, Y):
carry = imm(0)
ret = Vector(self.nbits)
if self.use_esf:
for i in range(0, self.nbits):
new_carry = esf(2, [X[i], Y[i], carry])
X[i] += simplify_inplace(Y[i]+carry)
carry = new_carry
else:
for i in range(0, self.nbits):
sum_XY = simplify_inplace(X[i]+Y[i])
new_carry = simplify_inplace(X[i]*Y[i] + (carry * sum_XY))
X[i] = sum_XY + carry
carry = new_carry
return ret
def add_Y(self, X, Y):
carry = imm(0)
ret = Vector(self.nbits)
if self.use_esf:
for i in range(0, self.nbits):
ret[i] = simplify_inplace(X[i]+Y[i]+carry)
carry = esf(2, [X[i], Y[i], carry])
else:
for i in range(0, self.nbits):
sum_XY = simplify_inplace(X[i]+Y[i])
ret[i] = simplify_inplace(sum_XY+carry)
carry = simplify_inplace(X[i]*Y[i] + (carry * sum_XY))
return ret
def add_n(self, X, n):
n = n & self.max_uint
if self.use_esf or not self.use_opt_mba:
return self.add_Y(X, self.get_vector_from_cst(n))
else:
return self.add_n_mba(X, n)
def add_n_mba(self, X, n):
null = Vector(self.nbits)
n = self.get_vector_from_cst(n)
while (n != null):
new_X = simplify_inplace(self.xor_Y(X, n))
n = simplify_inplace(self.and_Y(self.lshift_n(X, 1), self.lshift_n(n, 1)))
X = new_X
return (X)
def sub_n_mba(self, X, n):
null = Vector(self.nbits)
n = self.get_vector_from_cst(n)
while (n != null):
X = simplify_inplace(self.xor_Y(X, n))
n = simplify_inplace(self.and_Y(self.lshift_n(X, 1), self.lshift_n(n, 1)))
return (X)
def iadd_n(self, X, n):
n = n & self.max_uint
if self.use_esf:
return self.iadd_Y(X, self.get_vector_from_cst(n))
return self.iadd_n_mba(X, n)
def iadd_n_mba(self, X, n):
null = Vector(self.nbits)
n = self.get_vector_from_cst(n)
while (n != null):
carry = simplify_inplace(self.and_Y(X, n))
self.ixor_Y(X, n)
simplify_inplace(X)
n = self.lshift_n(carry, 1)
return X
def iadd_lshifted_Y(self, X, Y, offset):
if self.use_esf:
self.iadd_Y(X, self.lshift_n(Y, offset))
simplify_inplace(X)
return
carry = imm(0)
for i in range(0, self.nbits):
if i < offset:
Yi = imm(0)
else:
Yi = Y[i-offset]
Xi = X[i]
mul_XY = simplify_inplace(Xi*Yi)
Xi += Yi
simplify_inplace(Xi)
carry_new = simplify_inplace(mul_XY + (carry * Xi))
Xi += carry
simplify_inplace(Xi)
carry = carry_new
def sub_Y(self, X, Y):
carry = imm(0)
ret = Vector(self.nbits)
if self.use_esf:
for i in range(0, self.nbits):
ret[i] = simplify_inplace(X[i]+Y[i]+carry)
carry = esf(2, [X[i]+imm(1), Y[i], carry])
else:
for i in range(0, self.nbits):
sum_XY = simplify_inplace(X[i]+Y[i])
ret[i] = simplify_inplace(sum_XY+carry)
carry = simplify_inplace((X[i]+imm(1))*Y[i] + (carry * (sum_XY+imm(1))))
return ret
def sub_n(self, X, n):
n = n & self.max_uint
return self.sub_Y(X, self.get_vector_from_cst(n))
def mul_Y(self, X, Y):
ret = Vector(self.nbits)
i = 0
for i in range(0, self.nbits):
Yi_vec = Vector(self.nbits, Y[i])
self.iadd_Y(ret, self.lshift_n(X, i) * Yi_vec)
return ret
def mul_n_org(self, X, n):
n = n & self.max_uint
ret = Vector(self.nbits)
i = 0
while n > 0:
if (n & 1) == 1:
self.iadd_lshifted_Y(ret, X, i)
n >>= 1
i += 1
return ret
def mul_n(self, X, n):
if (n == 1):
return X
ret = Vector(self.nbits)
if (n == 0):
return ret
n = n & self.max_uint
i = 0
final_sum = 0
not_x = None
def compute_not_x(not_x):
if not_x is None:
not_x = self.not_X(X)
return not_x
while n > 0:
# Optimisations from the Hacker's delight
nz = next_zero_bit(n)
if (nz >= 3):
not_x = compute_not_x(not_x)
self.iadd_lshifted_Y(ret, X, nz+i)
self.iadd_lshifted_Y(ret, not_x, i)
final_sum += 1<<i
n >>= nz
i += nz
else:
bits4 = n&0b1111
if bits4 == 0b1011:
not_x = compute_not_x(not_x)
self.iadd_lshifted_Y(ret, X, 4+i)
self.iadd_lshifted_Y(ret, not_x, 2+i)
self.iadd_lshifted_Y(ret, not_x, i)
final_sum += 1<<(i+2)
final_sum += 1<<i
n >>= 4
i += 4
elif bits4 == 0b1101:
not_x = compute_not_x(not_x)
self.iadd_lshifted_Y(ret, X, 4+i)
self.iadd_lshifted_Y(ret, not_x, 1+i)
self.iadd_lshifted_Y(ret, not_x, i)
final_sum += 1<<(i+1)
final_sum += 1<<i
n >>= 4
i += 4
else:
if (n & 1) == 1:
self.iadd_lshifted_Y(ret, X, i)
n >>= 1
i += 1
if final_sum > 0:
self.iadd_n(ret, final_sum & self.max_uint)
return ret
def div_n(self, X, n):
ret = Vector(self.nbits*2+1)
for i in range(self.nbits):
ret[i] = X[i]
nc = (2**self.nbits/n)*n - 1
for p in range(self.nbits, 2*self.nbits+1):
if(2**p > nc*(n - 1 - ((2**p - 1) % n))):
break
else:
raise RuntimeError("division: unable to find the shifting count")
m = (2**p + n - 1 - ((2**p - 1) % n))//n
self.__set_nbits(2*self.nbits+1)
ret = self.mul_n(ret, m)
ret = self.rshift_n(ret, p)
self.__set_nbits((self.nbits - 1)//2)
final_ret = Vector(self.nbits)
for i in range(self.nbits):
final_ret[i] = ret[i]
return final_ret
def phi_X(self, X):
def f(i, j):
if i != j:
return imm(0)
return X[i]
return Matrix(self.nbits, self.nbits, f)
def and_Y(self, X, Y):
return X*Y
#return self.phi_X(Y)*X
def and_n(self, X, n):
if n < 0:
n = n & self.max_uint
ret = Vector(self.nbits)
for i in range(self.nbits):
if n & (1<<i):
ret[i] = X[i]
return ret
#return self.phi_X(self.get_vector_from_cst(n))*X
def and_exp(self, X, e):
return X*e
def not_X(self, X):
return X + self.get_vector_from_cst(self.max_uint)
def xor_n(self, X, n):
if n < 0:
n = n % self
ret = Vector(self.nbits)
for i in range(self.nbits):
if n & (1 << i):
ret[i] = X[i] + imm(1)
else:
ret[i] = X[i]
return ret
def xor_exp(self, X, e):
return X+e
def xor_Y(self, X, Y):
return X + Y
def ixor_Y(self, X, Y):
X += Y
def ixor_exp(self, X, e):
X += e
def oppose_X(self, X):
return self.add_n(self.not_X(X), 1)
def notand_n(self, X, n):
return self.not_X(self.and_n(X, n))
def notand_Y(self, X, Y):
return self.not_X(self.and_Y(X, Y))
def notand_exp(self, X, e):
return self.not_exp(self.and_exp(X, e))
def or_Y(self, X, Y):
if self.use_esf:
return esf_vector(2, [X, Y]) + esf_vector(1, [X, Y])
else:
return self.xor_Y(self.and_Y(X, Y), self.xor_Y(X, Y))
def or_exp(self, X, e):
if self.use_esf:
E = Vector(self.nbits, e)
return self.or_Y(X, E)
else:
return self.xor_exp(self.and_exp(X, e), self.xor_exp(X, e))
def or_n(self, X, n):
ret = Vector(self.nbits)
for i in range(self.nbits):
if n & (1<<i):
ret[i] = imm(1)
else:
ret[i] = X[i]
return ret
def lshift_n(self, X, n):
return X>>n
def rshift_n(self, X, n):
return X<<n
def arshift_n(self, X, n):
n = min(n, self.nbits)
ret = X<<n
last_bit = X[self.nbits-1]
for i in range(self.nbits-n, self.nbits):
ret[i] = last_bit
return ret
def rshift_Y(self, X, Y):
# Generate 2**Y and multiply X by this
fds
pass
def rol_n(self, X, n):
# rol(0b(d b c a), 1) = 0b(b c a d)
# rol(vec(a,b,c,d), 1) = vec(d,a,c,b))
ret = Vector(self.nbits)
for i in range(self.nbits):
ret[i] = X[(i-n)%self.nbits]
return ret
def ror_n(self, X, n):
ret = Vector(self.nbits)
for i in range(self.nbits):
ret[i] = X[(i+n)%self.nbits]
return ret
def evaluate(self, E, values):
return evaluate_expr(E, self.nbits, values)
def vectorial_decomp(self, symbols, X):
return analyses.vectorial_decomp(symbols, X)
def permut2expr(self, P, X):
ret = Vector(self.nbits)
v0 = P[0]
nbits_in = (len(P)-1).bit_length()
for k,v in enumerate(P[1:]):
v ^= v0
if v == 0:
continue
k += 1
test = test_N(nbits_in, X, k)
for i in range(self.nbits):
if ((v>>i)&1) == 1:
ret[i] += test
for i in range(self.nbits):
ret[i] += imm((v0 >> i) & 1)
simplify_inplace(ret)
return ret
def symbpermut2expr(self, P, X):
ret = Vector(self.nbits)
nbits_in = (len(P)-1).bit_length()
for k,v in enumerate(P):
test = test_N(nbits_in, X, k)
for i in range(self.nbits):
ret[i] += v[i]*test
simplify_inplace(ret)
return ret
def add_n_matrix(self, n):
def matrix_v(i, j):
if i == j:
return imm(1)
if i < j:
return imm(0)
if i > j:
mask = (~((1<<(j))-1)) & self.max_uint
mask2 = ((1<<(i))-1) & self.max_uint
mask &= mask2
return imm((n & mask) == mask)
return Matrix(self.nbits, self.nbits, matrix_v)
def from_bytes(self, s):
ret = Vector(self.nbits)
for i,c in enumerate(six.iterbytes(s)):
for j in range(8):
ret[i*8+j] = imm((c>>j)&1)
return ret
def to_bytes(self, vec):
l = (self.nbits+7)//8
ret = bytearray(l)
for i,b in enumerate(vec):
if not b.is_imm():
raise ValueError("variable does not contain only immediates!")
b = b.imm_value()
if b:
bit_idx = i&7
byte_idx = i>>3
ret[byte_idx] |= (b<<bit_idx)
return bytes(ret)
|
wemake_python_styleguide/visitors/ast/iterables.py
|
cdhiraj40/wemake-python-styleguide
| 1,931 |
71025
|
<reponame>cdhiraj40/wemake-python-styleguide
import ast
from typing import ClassVar
from typing_extensions import final
from wemake_python_styleguide.logic.nodes import get_parent
from wemake_python_styleguide.types import AnyNodes
from wemake_python_styleguide.violations.consistency import (
IterableUnpackingViolation,
)
from wemake_python_styleguide.visitors import base
@final
class IterableUnpackingVisitor(base.BaseNodeVisitor):
"""Checks iterables unpacking."""
_unpackable_iterable_parent_types: ClassVar[AnyNodes] = (
ast.List,
ast.Set,
ast.Tuple,
)
def visit_Starred(self, node: ast.Starred) -> None:
"""Checks iterable's unpacking."""
self._check_unnecessary_iterable_unpacking(node)
self.generic_visit(node)
def _check_unnecessary_iterable_unpacking(self, node: ast.Starred) -> None:
parent = get_parent(node)
if isinstance(parent, self._unpackable_iterable_parent_types):
if len(getattr(parent, 'elts', [])) == 1:
self.add_violation(IterableUnpackingViolation(node))
|
pygeos/tests/test_linear.py
|
caibengbu/pygeos
| 292 |
71064
|
<reponame>caibengbu/pygeos
import numpy as np
import pytest
import pygeos
from pygeos.testing import assert_geometries_equal
from .common import (
empty_line_string,
empty_point,
line_string,
linear_ring,
multi_line_string,
multi_point,
multi_polygon,
point,
polygon,
)
def test_line_interpolate_point_geom_array():
actual = pygeos.line_interpolate_point(
[line_string, linear_ring, multi_line_string], -1
)
assert_geometries_equal(actual[0], pygeos.Geometry("POINT (1 0)"))
assert_geometries_equal(actual[1], pygeos.Geometry("POINT (0 1)"))
assert_geometries_equal(
actual[2], pygeos.Geometry("POINT (0.5528 1.1056)"), tolerance=0.001
)
def test_line_interpolate_point_geom_array_normalized():
actual = pygeos.line_interpolate_point(
[line_string, linear_ring, multi_line_string], 1, normalized=True
)
assert_geometries_equal(actual[0], pygeos.Geometry("POINT (1 1)"))
assert_geometries_equal(actual[1], pygeos.Geometry("POINT (0 0)"))
assert_geometries_equal(actual[2], pygeos.Geometry("POINT (1 2)"))
def test_line_interpolate_point_float_array():
actual = pygeos.line_interpolate_point(line_string, [0.2, 1.5, -0.2])
assert_geometries_equal(actual[0], pygeos.Geometry("POINT (0.2 0)"))
assert_geometries_equal(actual[1], pygeos.Geometry("POINT (1 0.5)"))
assert_geometries_equal(actual[2], pygeos.Geometry("POINT (1 0.8)"))
@pytest.mark.parametrize("normalized", [False, True])
@pytest.mark.parametrize(
"geom",
[
pygeos.Geometry("LINESTRING EMPTY"),
pygeos.Geometry("LINEARRING EMPTY"),
pygeos.Geometry("MULTILINESTRING EMPTY"),
pygeos.Geometry("MULTILINESTRING (EMPTY, (0 0, 1 1))"),
pygeos.Geometry("GEOMETRYCOLLECTION EMPTY"),
pygeos.Geometry("GEOMETRYCOLLECTION (LINESTRING EMPTY, POINT (1 1))"),
],
)
def test_line_interpolate_point_empty(geom, normalized):
# These geometries segfault in some versions of GEOS (in 3.8.0, still
# some of them segfault). Instead, we patched this to return POINT EMPTY.
# This matches GEOS 3.8.0 behavior on simple empty geometries.
assert_geometries_equal(
pygeos.line_interpolate_point(geom, 0.2, normalized=normalized), empty_point
)
@pytest.mark.parametrize("normalized", [False, True])
@pytest.mark.parametrize(
"geom",
[
empty_point,
point,
polygon,
multi_point,
multi_polygon,
pygeos.geometrycollections([point]),
pygeos.geometrycollections([polygon]),
pygeos.geometrycollections([multi_line_string]),
pygeos.geometrycollections([multi_point]),
pygeos.geometrycollections([multi_polygon]),
],
)
def test_line_interpolate_point_invalid_type(geom, normalized):
with pytest.raises(TypeError):
assert pygeos.line_interpolate_point(geom, 0.2, normalized=normalized)
def test_line_interpolate_point_none():
assert pygeos.line_interpolate_point(None, 0.2) is None
def test_line_interpolate_point_nan():
assert pygeos.line_interpolate_point(line_string, np.nan) is None
def test_line_locate_point_geom_array():
point = pygeos.points(0, 1)
actual = pygeos.line_locate_point([line_string, linear_ring], point)
np.testing.assert_allclose(actual, [0.0, 3.0])
def test_line_locate_point_geom_array2():
points = pygeos.points([[0, 0], [1, 0]])
actual = pygeos.line_locate_point(line_string, points)
np.testing.assert_allclose(actual, [0.0, 1.0])
@pytest.mark.parametrize("normalized", [False, True])
def test_line_locate_point_none(normalized):
assert np.isnan(pygeos.line_locate_point(line_string, None, normalized=normalized))
assert np.isnan(pygeos.line_locate_point(None, point, normalized=normalized))
@pytest.mark.parametrize("normalized", [False, True])
def test_line_locate_point_empty(normalized):
assert np.isnan(
pygeos.line_locate_point(line_string, empty_point, normalized=normalized)
)
assert np.isnan(
pygeos.line_locate_point(empty_line_string, point, normalized=normalized)
)
@pytest.mark.parametrize("normalized", [False, True])
def test_line_locate_point_invalid_geometry(normalized):
with pytest.raises(pygeos.GEOSException):
pygeos.line_locate_point(line_string, line_string, normalized=normalized)
with pytest.raises(pygeos.GEOSException):
pygeos.line_locate_point(polygon, point, normalized=normalized)
def test_line_merge_geom_array():
actual = pygeos.line_merge([line_string, multi_line_string])
assert_geometries_equal(actual[0], line_string)
assert_geometries_equal(actual[1], pygeos.Geometry("LINESTRING (0 0, 1 2)"))
def test_shared_paths_linestring():
g1 = pygeos.linestrings([(0, 0), (1, 0), (1, 1)])
g2 = pygeos.linestrings([(0, 0), (1, 0)])
actual1 = pygeos.shared_paths(g1, g2)
assert_geometries_equal(
pygeos.get_geometry(actual1, 0), pygeos.multilinestrings([g2])
)
def test_shared_paths_none():
assert pygeos.shared_paths(line_string, None) is None
assert pygeos.shared_paths(None, line_string) is None
assert pygeos.shared_paths(None, None) is None
def test_shared_paths_non_linestring():
g1 = pygeos.linestrings([(0, 0), (1, 0), (1, 1)])
g2 = pygeos.points(0, 1)
with pytest.raises(pygeos.GEOSException):
pygeos.shared_paths(g1, g2)
def _prepare_input(geometry, prepare):
"""Prepare without modifying inplace"""
if prepare:
geometry = pygeos.apply(geometry, lambda x: x) # makes a copy
pygeos.prepare(geometry)
return geometry
else:
return geometry
@pytest.mark.parametrize("prepare", [True, False])
def test_shortest_line(prepare):
g1 = pygeos.linestrings([(0, 0), (1, 0), (1, 1)])
g2 = pygeos.linestrings([(0, 3), (3, 0)])
actual = pygeos.shortest_line(_prepare_input(g1, prepare), g2)
expected = pygeos.linestrings([(1, 1), (1.5, 1.5)])
assert pygeos.equals(actual, expected)
@pytest.mark.parametrize("prepare", [True, False])
def test_shortest_line_none(prepare):
assert pygeos.shortest_line(_prepare_input(line_string, prepare), None) is None
assert pygeos.shortest_line(None, line_string) is None
assert pygeos.shortest_line(None, None) is None
@pytest.mark.parametrize("prepare", [True, False])
def test_shortest_line_empty(prepare):
g1 = _prepare_input(line_string, prepare)
assert pygeos.shortest_line(g1, empty_line_string) is None
g1_empty = _prepare_input(empty_line_string, prepare)
assert pygeos.shortest_line(g1_empty, line_string) is None
assert pygeos.shortest_line(g1_empty, empty_line_string) is None
|
scikit-dbscan-example.py
|
davemarr621/dbscan_2
| 107 |
71065
|
# -*- coding: utf-8 -*-
"""
This script is used to validate that my implementation of DBSCAN produces
the same results as the implementation found in scikit-learn.
It's based on the scikit-learn example code, here:
http://scikit-learn.org/stable/auto_examples/cluster/plot_dbscan.html
@author: <NAME>
"""
from sklearn.datasets.samples_generator import make_blobs
from sklearn.cluster import DBSCAN
from sklearn.preprocessing import StandardScaler
from dbscan import MyDBSCAN
# Create three gaussian blobs to use as our clustering data.
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
###############################################################################
# My implementation of DBSCAN
#
# Run my DBSCAN implementation.
print 'Running my implementation...'
my_labels = MyDBSCAN(X, eps=0.3, MinPts=10)
###############################################################################
# Scikit-learn implementation of DBSCAN
#
print 'Runing scikit-learn implementation...'
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
skl_labels = db.labels_
# Scikit learn uses -1 to for NOISE, and starts cluster labeling at 0. I start
# numbering at 1, so increment the skl cluster numbers by 1.
for i in range(0, len(skl_labels)):
if not skl_labels[i] == -1:
skl_labels[i] += 1
###############################################################################
# Did we get the same results?
num_disagree = 0
# Go through each label and make sure they match (print the labels if they
# don't)
for i in range(0, len(skl_labels)):
if not skl_labels[i] == my_labels[i]:
print 'Scikit learn:', skl_labels[i], 'mine:', my_labels[i]
num_disagree += 1
if num_disagree == 0:
print 'PASS - All labels match!'
else:
print 'FAIL -', num_disagree, 'labels don\'t match.'
|
tests/gold_tests/pluginTest/cert_update/cert_update.test.py
|
cmcfarlen/trafficserver
| 1,351 |
71071
|
<reponame>cmcfarlen/trafficserver<gh_stars>1000+
'''
Test the cert_update plugin.
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ports
Test.Summary = '''
Test cert_update plugin.
'''
Test.SkipUnless(
Condition.HasProgram("openssl", "Openssl need to be installed on system for this test to work"),
Condition.PluginExists('cert_update.so')
)
# Set up origin server
server = Test.MakeOriginServer("server")
request_header = {
"headers": "GET / HTTP/1.1\r\nHost: doesnotmatter\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server.addResponse("sessionlog.json", request_header, response_header)
# Set up ATS
ts = Test.MakeATSProcess("ts", command="traffic_manager", enable_tls=1)
# Set up ssl files
ts.addSSLfile("ssl/server1.pem")
ts.addSSLfile("ssl/server2.pem")
ts.addSSLfile("ssl/client1.pem")
ts.addSSLfile("ssl/client2.pem")
# reserve port, attach it to 'ts' so it is released later
ports.get_port(ts, 's_server_port')
ts.Disk.records_config.update({
'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': 'cert_update',
'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.client.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.client.private_key.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.url_remap.pristine_host_hdr': 1
})
ts.Disk.ssl_multicert_config.AddLine(
'dest_ip=* ssl_cert_name=server1.pem ssl_key_name=server1.pem'
)
ts.Disk.remap_config.AddLines([
'map https://bar.com http://127.0.0.1:{0}'.format(server.Variables.Port),
'map https://foo.com https://127.0.0.1:{0}'.format(ts.Variables.s_server_port)
])
ts.Disk.sni_yaml.AddLines([
'sni:',
'- fqdn: "*foo.com"',
' client_cert: "client1.pem"',
])
# Set up plugin
Test.PrepareInstalledPlugin('cert_update.so', ts)
# Server-Cert-Pre
# curl should see that Traffic Server presents bar.com cert from alice
tr = Test.AddTestRun("Server-Cert-Pre")
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.StartBefore(Test.Processes.ts)
tr.Processes.Default.Command = (
'curl --verbose --insecure --ipv4 --resolve bar.com:{0}:127.0.0.1 https://bar.com:{0}'.format(ts.Variables.ssl_port)
)
tr.Processes.Default.Streams.stderr = "gold/server-cert-pre.gold"
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = server
# Server-Cert-Update
tr = Test.AddTestRun("Server-Cert-Update")
tr.Processes.Default.Env = ts.Env
tr.Processes.Default.Command = (
'{0}/traffic_ctl plugin msg cert_update.server {1}/server2.pem'.format(ts.Variables.BINDIR, ts.Variables.SSLDir)
)
ts.Streams.all = "gold/update.gold"
ts.StillRunningAfter = server
# Server-Cert-After
# after use traffic_ctl to update server cert, curl should see bar.com cert from bob
tr = Test.AddTestRun("Server-Cert-After")
tr.Processes.Default.Env = ts.Env
tr.Command = 'curl --verbose --insecure --ipv4 --resolve bar.com:{0}:127.0.0.1 https://bar.com:{0}'.format(ts.Variables.ssl_port)
tr.Processes.Default.Streams.stderr = "gold/server-cert-after.gold"
tr.Processes.Default.ReturnCode = 0
ts.StillRunningAfter = server
# Client-Cert-Pre
# s_server should see client (Traffic Server) as alice.com
tr = Test.AddTestRun("Client-Cert-Pre")
s_server = tr.Processes.Process(
"s_server",
"openssl s_server -www -key {0}/server1.pem -cert {0}/server1.pem -accept {1} -Verify 1 -msg".format(
ts.Variables.SSLDir,
ts.Variables.s_server_port))
s_server.Ready = When.PortReady(ts.Variables.s_server_port)
tr.Command = 'curl --verbose --insecure --ipv4 --header "Host: foo.com" https://localhost:{}'.format(ts.Variables.ssl_port)
tr.Processes.Default.StartBefore(s_server)
s_server.Streams.all = "gold/client-cert-pre.gold"
tr.Processes.Default.ReturnCode = 0
ts.StillRunningAfter = server
# Client-Cert-Update
tr = Test.AddTestRun("Client-Cert-Update")
tr.Processes.Default.Env = ts.Env
tr.Processes.Default.Command = (
'mv {0}/client2.pem {0}/client1.pem && {1}/traffic_ctl plugin msg cert_update.client {0}/client1.pem'.format(
ts.Variables.SSLDir, ts.Variables.BINDIR)
)
ts.Streams.all = "gold/update.gold"
ts.StillRunningAfter = server
# Client-Cert-After
# after use traffic_ctl to update client cert, s_server should see client (Traffic Server) as bob.com
tr = Test.AddTestRun("Client-Cert-After")
s_server = tr.Processes.Process(
"s_server",
"openssl s_server -www -key {0}/server1.pem -cert {0}/server1.pem -accept {1} -Verify 1 -msg".format(
ts.Variables.SSLDir,
ts.Variables.s_server_port))
s_server.Ready = When.PortReady(ts.Variables.s_server_port)
tr.Processes.Default.Env = ts.Env
# Move client2.pem to replace client1.pem since cert path matters in client context mapping
tr.Command = 'curl --verbose --insecure --ipv4 --header "Host: foo.com" https://localhost:{0}'.format(ts.Variables.ssl_port)
tr.Processes.Default.StartBefore(s_server)
s_server.Streams.all = "gold/client-cert-after.gold"
tr.Processes.Default.ReturnCode = 0
ts.StillRunningAfter = server
|
vgg_model.py
|
daxavic/LQ-Nets
| 116 |
71081
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: vgg_model.py
import tensorflow as tf
from tensorflow.contrib.layers import variance_scaling_initializer
from tensorpack.models import *
from tensorpack.tfutils.argscope import argscope, get_arg_scope
from learned_quantization import Conv2DQuant, getBNReLUQuant, getfcBNReLUQuant, getfcBNReLU
def vgg_backbone(image, qw=1):
with argscope(Conv2DQuant, nl=tf.identity, use_bias=False,
W_init=variance_scaling_initializer(mode='FAN_IN'),
data_format=get_arg_scope()['Conv2D']['data_format'],
nbit=qw):
logits = (LinearWrap(image)
.Conv2DQuant('conv1', 96, 7, stride=2, nl=tf.nn.relu, is_quant=False)
.MaxPooling('pool1', shape=2, stride=2, padding='VALID')
# 56
.BNReLUQuant('bnquant2_0')
.Conv2DQuant('conv2_1', 256, 3, nl=getBNReLUQuant)
.Conv2DQuant('conv2_2', 256, 3, nl=getBNReLUQuant)
.Conv2DQuant('conv2_3', 256, 3)
.MaxPooling('pool2', shape=2, stride=2, padding='VALID')
# 28
.BNReLUQuant('bnquant3_0')
.Conv2DQuant('conv3_1', 512, 3, nl=getBNReLUQuant)
.Conv2DQuant('conv3_2', 512, 3, nl=getBNReLUQuant)
.Conv2DQuant('conv3_3', 512, 3)
.MaxPooling('pool3', shape=2, stride=2, padding='VALID')
# 14
.BNReLUQuant('bnquant4_0')
.Conv2DQuant('conv4_1', 512, 3, nl=getBNReLUQuant)
.Conv2DQuant('conv4_2', 512, 3, nl=getBNReLUQuant)
.Conv2DQuant('conv4_3', 512, 3)
.MaxPooling('pool4', shape=2, stride=2, padding='VALID')
# 7
.BNReLUQuant('bnquant5')
.Conv2DQuant('fc5', 4096, 7, nl=getfcBNReLUQuant, padding='VALID', use_bias=True)
.Conv2DQuant('fc6', 4096, 1, nl=getfcBNReLU, padding='VALID', use_bias=True)
.FullyConnected('fc7', out_dim=1000, nl=tf.identity, W_init=variance_scaling_initializer(mode='FAN_IN'))())
return logits
|
tests/test_manager.py
|
Mitul16/pwncat
| 1,454 |
71093
|
#!/usr/bin/env python3
import io
import pwncat.manager
def test_config_fileobj():
configuration = io.StringIO(
"""
set -g db "memory://"
set -g prefix c-k
set -g on_load { }
set -g backdoor_user "config_test"
"""
)
with pwncat.manager.Manager(config=configuration) as manager:
assert manager.config["backdoor_user"] == "config_test"
def test_user_config(tmp_path):
import os
# Ensure we don't muck up the environment for this process
old_home = os.environ.get("XDG_DATA_HOME", None)
try:
# Set the data home to our temp path
os.environ["XDG_DATA_HOME"] = str(tmp_path)
# Create the pwncat directory
(tmp_path / "pwncat").mkdir(exist_ok=True, parents=True)
# Create our user configuration
with (tmp_path / "pwncat" / "pwncatrc").open("w") as filp:
filp.writelines(["""set -g backdoor_user "config_test"\n"""])
os.chdir(tmp_path)
# Create a manager object with default config to load our
# user configuration.
with pwncat.manager.Manager(config=None) as manager:
assert manager.config["backdoor_user"] == "config_test"
finally:
# Restore the environment
if old_home is not None:
os.environ["XDG_DATA_HOME"] = old_home
else:
del os.environ["XDG_DATA_HOME"]
|
src/sage/combinat/species/sum_species.py
|
UCD4IDS/sage
| 1,742 |
71108
|
<gh_stars>1000+
"""
Sum species
"""
#*****************************************************************************
# Copyright (C) 2008 <NAME> <<EMAIL>>,
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from .species import GenericCombinatorialSpecies
from .structure import SpeciesStructureWrapper
from sage.structure.unique_representation import UniqueRepresentation
class SumSpeciesStructure(SpeciesStructureWrapper):
pass
class SumSpecies(GenericCombinatorialSpecies, UniqueRepresentation):
def __init__(self, F, G, min=None, max=None, weight=None):
"""
Returns the sum of two species.
EXAMPLES::
sage: S = species.PermutationSpecies()
sage: A = S+S
sage: A.generating_series().coefficients(5)
[2, 2, 2, 2, 2]
sage: P = species.PermutationSpecies()
sage: F = P + P
sage: F._check()
True
sage: F == loads(dumps(F))
True
TESTS::
sage: A = species.SingletonSpecies() + species.SingletonSpecies()
sage: B = species.SingletonSpecies() + species.SingletonSpecies()
sage: C = species.SingletonSpecies() + species.SingletonSpecies(min=2)
sage: A is B
True
sage: (A is C) or (A == C)
False
"""
self._F = F
self._G = G
self._state_info = [F, G]
GenericCombinatorialSpecies.__init__(self, min=None, max=None, weight=None)
_default_structure_class = SumSpeciesStructure
def left_summand(self):
"""
Returns the left summand of this species.
EXAMPLES::
sage: P = species.PermutationSpecies()
sage: F = P + P*P
sage: F.left_summand()
Permutation species
"""
return self._F
def right_summand(self):
"""
Returns the right summand of this species.
EXAMPLES::
sage: P = species.PermutationSpecies()
sage: F = P + P*P
sage: F.right_summand()
Product of (Permutation species) and (Permutation species)
"""
return self._G
def _name(self):
"""
Note that we use a function to return the name of this species
because we can't do it in the __init__ method due to it
requiring that self.left_summand() and self.right_summand()
already be unpickled.
EXAMPLES::
sage: P = species.PermutationSpecies()
sage: F = P + P
sage: F._name()
'Sum of (Permutation species) and (Permutation species)'
"""
return "Sum of (%s) and (%s)"%(self.left_summand(), self.right_summand())
def _structures(self, structure_class, labels):
"""
EXAMPLES::
sage: P = species.PermutationSpecies()
sage: F = P + P
sage: F.structures([1,2]).list()
[[1, 2], [2, 1], [1, 2], [2, 1]]
"""
for res in self.left_summand().structures(labels):
yield structure_class(self, res, tag="left")
for res in self.right_summand().structures(labels):
yield structure_class(self, res, tag="right")
def _isotypes(self, structure_class, labels):
"""
EXAMPLES::
sage: P = species.PermutationSpecies()
sage: F = P + P
sage: F.isotypes([1,2]).list()
[[2, 1], [1, 2], [2, 1], [1, 2]]
"""
for res in self._F.isotypes(labels):
yield structure_class(self, res, tag="left")
for res in self._G.isotypes(labels):
yield structure_class(self, res, tag="right")
def _gs(self, series_ring, base_ring):
"""
Returns the cycle index series of this species.
EXAMPLES::
sage: P = species.PermutationSpecies()
sage: F = P + P
sage: F.generating_series().coefficients(5)
[2, 2, 2, 2, 2]
"""
return (self.left_summand().generating_series(base_ring) +
self.right_summand().generating_series(base_ring))
def _itgs(self, series_ring, base_ring):
"""
Returns the isomorphism type generating series of this species.
EXAMPLES::
sage: P = species.PermutationSpecies()
sage: F = P + P
sage: F.isotype_generating_series().coefficients(5)
[2, 2, 4, 6, 10]
"""
return (self.left_summand().isotype_generating_series(base_ring) +
self.right_summand().isotype_generating_series(base_ring))
def _cis(self, series_ring, base_ring):
"""
Returns the generating series of this species.
EXAMPLES::
sage: P = species.PermutationSpecies()
sage: F = P + P
sage: F.cycle_index_series().coefficients(5)
[2*p[],
2*p[1],
2*p[1, 1] + 2*p[2],
2*p[1, 1, 1] + 2*p[2, 1] + 2*p[3],
2*p[1, 1, 1, 1] + 2*p[2, 1, 1] + 2*p[2, 2] + 2*p[3, 1] + 2*p[4]]
"""
return (self.left_summand().cycle_index_series(base_ring) +
self.right_summand().cycle_index_series(base_ring))
def weight_ring(self):
"""
Returns the weight ring for this species. This is determined by
asking Sage's coercion model what the result is when you add
elements of the weight rings for each of the operands.
EXAMPLES::
sage: S = species.SetSpecies()
sage: C = S+S
sage: C.weight_ring()
Rational Field
::
sage: S = species.SetSpecies(weight=QQ['t'].gen())
sage: C = S + S
sage: C.weight_ring()
Univariate Polynomial Ring in t over Rational Field
"""
return self._common_parent([self.left_summand().weight_ring(),
self.right_summand().weight_ring()])
def _equation(self, var_mapping):
"""
Returns the right hand side of an algebraic equation satisfied by
this species. This is a utility function called by the
algebraic_equation_system method.
EXAMPLES::
sage: X = species.SingletonSpecies()
sage: S = X + X
sage: S.algebraic_equation_system()
[node1 + (-2*z)]
"""
return sum(var_mapping[operand] for operand in self._state_info)
#Backward compatibility
SumSpecies_class = SumSpecies
|
tools.python3/lav_sort.py
|
urbanslug/lastz
| 139 |
71136
|
<filename>tools.python3/lav_sort.py<gh_stars>100-1000
#!/usr/bin/env python3
"""
Sort the a-stanzas in a lav file, according to the user's choice of key
-----------------------------------------------------------------------
:Author: <NAME> (<EMAIL>)
"""
import sys
validKeys = ["score","pos1","pos2","beg1","beg2","end1","end2"]
def usage(s=None):
message = """
lav_sort --key=[-]<score|beg1|beg2|end1|end2> < lav_file > lav_file
"""
if (s == None): sys.exit (message)
else: sys.exit ("%s\n%s" % (s,message))
def main():
# parse the command line
if (len(sys.argv) < 2):
usage("you must specify a key")
elif (len(sys.argv) > 2):
usage("wrong number of arguments")
arg = sys.argv[1]
if (not arg.startswith("--key=")):
usage("unrecognized argument: \"%s\"" % arg)
keyName = arg[arg.find("=")+1:]
keyReverse = False
if (keyName.startswith("-")):
keyName = keyName[1:]
keyReverse = True
if (keyName.startswith("+")):
keyName = keyName[1:]
keyReverse = False
if (keyName not in validKeys):
usage("unrecognized key: \"%s\"" % keyName)
# process the stanzas
blocks = []
for (kind,stanza) in read_stanzas(sys.stdin):
if (kind == "a"):
key = get_key_value(keyName,stanza)
blocks += [(key,stanza)]
continue
if (len(blocks) > 0):
blocks.sort()
if (keyReverse): blocks.reverse()
for (key,s) in blocks:
print ("\n".join(s))
blocks = []
print ("\n".join(stanza))
if (len(blocks) > 0):
blocks.sort()
if (keyReverse): blocks.reverse()
for (key,s) in blocks:
print ("\n".join(s))
# read_stanzas--
# Collect the lines that belong to the next stanza. A stanza has the form
# shown below. It consists of several lines bracketed by a pair of curlies,
# and has a type indicated by a single letter.
#
# x {
# ...
# }
#
# In this routine we generalize the stanza concept to include lines not
# strictly with a pair of curlies. First, lines beginning with a "#:" are
# considered to be single line stanzas with no type (e.g. the "#:lav" and
# "#:eof" lines). Second, any other blank lines are appended to whatever
# stanza preceeded them. This allows for lav+text and other debugging output
# from lastz to be carried around with the appropriate stanza.
def read_stanzas(f):
kind = None
stanza = []
inCurly = False
for line in f:
line = line.rstrip()
if (not inCurly):
isWaffle = line.startswith("#:")
inCurly = (len(line) == 3) and (line.endswith(" {"))
if (isWaffle) or (inCurly):
if (len(stanza) > 0):
yield (kind,stanza)
stanza = []
if (isWaffle):
yield (line[2:],[line])
kind = None
continue
kind = line[0]
stanza += [line]
else: # (inCurly)
stanza += [line]
if (line == "}"): inCurly = False
assert (len(stanza) == 0), "premature end of file"
# get_key_value--
# Extract the specied key value from an a-stanza. A typical a-stanza looks
# like this one:
#
# a {
# s 14400
# b 425 4438
# e 697 4714
# l 425 4438 448 4461 96
# l 449 4464 579 4594 83
# l 581 4595 604 4618 96
# l 605 4627 609 4631 100
# l 617 4632 648 4663 91
# l 649 4666 697 4714 90
# }
def get_key_value(keyName,aStanza):
if (keyName == "score"):
assert (len(aStanza) >= 2) and (aStanza[1].startswith(" s"))
score = aStanza[1].split()[1]
try:
return int(score)
except:
try:
return float(score)
except:
pass
return score
if (keyName in ["pos1","beg1"]):
assert (len(aStanza) >= 3) and (aStanza[2].startswith(" b"))
beg1 = aStanza[2].split()[1]
return int(beg1)
if (keyName in ["pos2","beg2"]):
assert (len(aStanza) >= 3) and (aStanza[2].startswith(" b"))
beg2 = aStanza[2].split()[2]
return int(beg2)
if (keyName in ["end1"]):
assert (len(aStanza) >= 4) and (aStanza[3].startswith(" e"))
end1 = aStanza[3].split()[1]
return int(end1)
if (keyName in ["end2"]):
assert (len(aStanza) >= 4) and (aStanza[3].startswith(" e"))
end2 = aStanza[3].split()[2]
return int(end2)
assert False
if __name__ == "__main__": main()
|
openstackclient/network/v2/address_scope.py
|
openstack/python-openstackclient
| 262 |
71144
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Address scope action implementations"""
import logging
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
from openstackclient.i18n import _
from openstackclient.identity import common as identity_common
from openstackclient.network import common
LOG = logging.getLogger(__name__)
def _get_columns(item):
column_map = {
'is_shared': 'shared',
'tenant_id': 'project_id',
}
hidden_columns = ['location']
return utils.get_osc_show_columns_for_sdk_resource(
item,
column_map,
hidden_columns
)
def _get_attrs(client_manager, parsed_args):
attrs = {}
attrs['name'] = parsed_args.name
attrs['ip_version'] = parsed_args.ip_version
if parsed_args.share:
attrs['shared'] = True
if parsed_args.no_share:
attrs['shared'] = False
if 'project' in parsed_args and parsed_args.project is not None:
identity_client = client_manager.identity
project_id = identity_common.find_project(
identity_client,
parsed_args.project,
parsed_args.project_domain,
).id
attrs['tenant_id'] = project_id
return attrs
# TODO(rtheis): Use the SDK resource mapped attribute names once the
# OSC minimum requirements include SDK 1.0.
class CreateAddressScope(command.ShowOne, common.NeutronCommandWithExtraArgs):
_description = _("Create a new Address Scope")
def get_parser(self, prog_name):
parser = super(CreateAddressScope, self).get_parser(prog_name)
parser.add_argument(
'name',
metavar="<name>",
help=_("New address scope name")
)
parser.add_argument(
'--ip-version',
type=int,
default=4,
choices=[4, 6],
help=_("IP version (default is 4)")
)
parser.add_argument(
'--project',
metavar="<project>",
help=_("Owner's project (name or ID)")
)
identity_common.add_project_domain_option_to_parser(parser)
share_group = parser.add_mutually_exclusive_group()
share_group.add_argument(
'--share',
action='store_true',
help=_('Share the address scope between projects')
)
share_group.add_argument(
'--no-share',
action='store_true',
help=_('Do not share the address scope between projects (default)')
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.network
attrs = _get_attrs(self.app.client_manager, parsed_args)
attrs.update(
self._parse_extra_properties(parsed_args.extra_properties))
obj = client.create_address_scope(**attrs)
display_columns, columns = _get_columns(obj)
data = utils.get_item_properties(obj, columns, formatters={})
return (display_columns, data)
class DeleteAddressScope(command.Command):
_description = _("Delete address scope(s)")
def get_parser(self, prog_name):
parser = super(DeleteAddressScope, self).get_parser(prog_name)
parser.add_argument(
'address_scope',
metavar="<address-scope>",
nargs='+',
help=_("Address scope(s) to delete (name or ID)")
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.network
result = 0
for scope in parsed_args.address_scope:
try:
obj = client.find_address_scope(scope, ignore_missing=False)
client.delete_address_scope(obj)
except Exception as e:
result += 1
LOG.error(_("Failed to delete address scope with "
"name or ID '%(scope)s': %(e)s"),
{'scope': scope, 'e': e})
if result > 0:
total = len(parsed_args.address_scope)
msg = (_("%(result)s of %(total)s address scopes failed "
"to delete.") % {'result': result, 'total': total})
raise exceptions.CommandError(msg)
# TODO(yanxing'an): Use the SDK resource mapped attribute names once the
# OSC minimum requirements include SDK 1.0.
class ListAddressScope(command.Lister):
_description = _("List address scopes")
def get_parser(self, prog_name):
parser = super(ListAddressScope, self).get_parser(prog_name)
parser.add_argument(
'--name',
metavar='<name>',
help=_("List only address scopes of given name in output")
)
parser.add_argument(
'--ip-version',
type=int,
choices=[4, 6],
metavar='<ip-version>',
dest='ip_version',
help=_("List address scopes of given IP version networks (4 or 6)")
)
parser.add_argument(
'--project',
metavar="<project>",
help=_("List address scopes according to their project "
"(name or ID)")
)
identity_common.add_project_domain_option_to_parser(parser)
shared_group = parser.add_mutually_exclusive_group()
shared_group.add_argument(
'--share',
action='store_true',
help=_("List address scopes shared between projects")
)
shared_group.add_argument(
'--no-share',
action='store_true',
help=_("List address scopes not shared between projects")
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.network
columns = (
'id',
'name',
'ip_version',
'is_shared',
'project_id',
)
column_headers = (
'ID',
'Name',
'IP Version',
'Shared',
'Project',
)
attrs = {}
if parsed_args.name:
attrs['name'] = parsed_args.name
if parsed_args.ip_version:
attrs['ip_version'] = parsed_args.ip_version
if parsed_args.share:
attrs['is_shared'] = True
if parsed_args.no_share:
attrs['is_shared'] = False
if 'project' in parsed_args and parsed_args.project is not None:
identity_client = self.app.client_manager.identity
project_id = identity_common.find_project(
identity_client,
parsed_args.project,
parsed_args.project_domain,
).id
attrs['tenant_id'] = project_id
attrs['project_id'] = project_id
data = client.address_scopes(**attrs)
return (column_headers,
(utils.get_item_properties(
s, columns, formatters={},
) for s in data))
# TODO(rtheis): Use the SDK resource mapped attribute names once the
# OSC minimum requirements include SDK 1.0.
class SetAddressScope(common.NeutronCommandWithExtraArgs):
_description = _("Set address scope properties")
def get_parser(self, prog_name):
parser = super(SetAddressScope, self).get_parser(prog_name)
parser.add_argument(
'address_scope',
metavar="<address-scope>",
help=_("Address scope to modify (name or ID)")
)
parser.add_argument(
'--name',
metavar="<name>",
help=_('Set address scope name')
)
share_group = parser.add_mutually_exclusive_group()
share_group.add_argument(
'--share',
action='store_true',
help=_('Share the address scope between projects')
)
share_group.add_argument(
'--no-share',
action='store_true',
help=_('Do not share the address scope between projects')
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.network
obj = client.find_address_scope(
parsed_args.address_scope,
ignore_missing=False)
attrs = {}
if parsed_args.name is not None:
attrs['name'] = parsed_args.name
if parsed_args.share:
attrs['shared'] = True
if parsed_args.no_share:
attrs['shared'] = False
attrs.update(
self._parse_extra_properties(parsed_args.extra_properties))
client.update_address_scope(obj, **attrs)
class ShowAddressScope(command.ShowOne):
_description = _("Display address scope details")
def get_parser(self, prog_name):
parser = super(ShowAddressScope, self).get_parser(prog_name)
parser.add_argument(
'address_scope',
metavar="<address-scope>",
help=_("Address scope to display (name or ID)")
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.network
obj = client.find_address_scope(
parsed_args.address_scope,
ignore_missing=False)
display_columns, columns = _get_columns(obj)
data = utils.get_item_properties(obj, columns, formatters={})
return (display_columns, data)
|
pex/tools/commands/digraph.py
|
zmanji/pex
| 2,160 |
71175
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import
from pex.typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Dict, IO, List, Mapping, Optional, Tuple
Value = Optional[str]
Attributes = Mapping[str, Value]
class DiGraph(object):
"""Renders a dot digraph built up from nodes and edges."""
@staticmethod
def _render_ID(value):
# type: (str) -> str
# See https://graphviz.org/doc/info/lang.html for the various forms of `ID`.
return '"{}"'.format(value.replace('"', '\\"'))
@classmethod
def _render_a_list(cls, attributes):
# type: (Attributes) -> str
# See https://graphviz.org/doc/info/lang.html for the `a_list` production.
return ", ".join(
"{name}={value}".format(name=name, value=cls._render_ID(value))
for name, value in attributes.items()
if value is not None
)
def __init__(
self,
name, # type: str
strict=True, # type: bool
**attributes # type: Value
):
# type: (...) -> None
"""
:param name: A name for the graph.
:param strict: Whether or not duplicate edges are collapsed into one edge.
"""
self._name = name
self._strict = strict
self._attributes = attributes # type: Attributes
self._nodes = {} # type: Dict[str, Attributes]
self._edges = [] # type: List[Tuple[str, str, Attributes]]
@property
def name(self):
return self._name
def add_node(
self,
name, # type: str
**attributes # type: Value
):
# type: (...) -> None
"""Adds a node to the graph.
This is done implicitly by add_edge for the nodes the edge connects, but may be useful when
the node is either isolated or else needs to be decorated with attributes.
:param name: The name of the node.
"""
self._nodes[name] = attributes
def add_edge(
self,
start, # type: str
end, # type: str
**attributes # type: Value
):
# type: (...) -> None
"""
:param start: The name of the start node.
:param end: The name of the end node.
:param attributes: Any extra attributes for the edge connecting the start node to the end
node.
"""
self._edges.append((start, end, attributes))
def emit(self, out):
# type: (IO[str]) -> None
"""Render the current state of this digraph to the given `out` stream.
:param out: A stream to render this digraph to. N/B.: Will not be flushed or closed.
"""
def emit_attr_stmt(
stmt, # type: str
attributes, # type: Attributes
):
# type: (...) -> None
# See https://graphviz.org/doc/info/lang.html for the `attr_stmt` production.
out.write(
"{statement} [{a_list}];\n".format(
statement=stmt, a_list=self._render_a_list(attributes)
)
)
if self._strict:
out.write("strict ")
out.write("digraph {name} {{\n".format(name=self._render_ID(self._name)))
emit_attr_stmt("graph", self._attributes)
for node, attributes in self._nodes.items():
emit_attr_stmt(self._render_ID(node), attributes)
for start, end, attributes in self._edges:
emit_attr_stmt(
"{start} -> {end}".format(start=self._render_ID(start), end=self._render_ID(end)),
attributes,
)
out.write("}\n")
|
models/models.py
|
awdxqqqaz/MoCoGAN-HD
| 170 |
71185
|
"""
Copyright Snap Inc. 2021. This sample code is made available by Snap Inc. for informational purposes only.
No license, whether implied or otherwise, is granted in or to such code (including any rights to copy, modify,
publish, distribute and/or commercialize such code), unless you have entered into a separate agreement for such rights.
Such code is provided as-is, without warranty of any kind, express or implied, including any warranties of merchantability,
title, fitness for a particular purpose, non-infringement, or that such code is free of defects, errors or viruses.
In no event will Snap Inc. be liable for any damages or losses of any kind arising from the sample code or your use thereof.
"""
import os
import torch
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel as DDP
from .rnn import RNNModule
from models.stylegan2 import model
def load_checkpoints(path, gpu):
if gpu is None:
ckpt = torch.load(path)
else:
loc = 'cuda:{}'.format(gpu)
ckpt = torch.load(path, map_location=loc)
return ckpt
def model_to_gpu(model, opt):
if opt.isTrain:
if opt.gpu is not None:
model.cuda(opt.gpu)
model = DDP(model,
device_ids=[opt.gpu],
find_unused_parameters=True)
else:
model.cuda()
model = DDP(model, find_unused_parameters=True)
else:
model.cuda()
model = nn.DataParallel(model)
return model
def create_model(opt):
ckpt = load_checkpoints(opt.img_g_weights, opt.gpu)
modelG = model.Generator(size=opt.style_gan_size,
style_dim=opt.latent_dimension,
n_mlp=opt.n_mlp)
modelG.load_state_dict(ckpt['g_ema'], strict=False)
modelG.eval()
for p in modelG.parameters():
p.requires_grad = False
if opt.isPCA:
modelS = modelG.style
modelS.eval()
if opt.gpu is not None:
modelS.cuda(opt.gpu)
return modelS
pca_com_path = os.path.join(opt.save_pca_path, 'pca_comp.npy')
pca_stdev_path = os.path.join(opt.save_pca_path, 'pca_stdev.npy')
modelR = RNNModule(pca_com_path,
pca_stdev_path,
z_dim=opt.latent_dimension,
h_dim=opt.h_dim,
n_pca=opt.n_pca,
w_residual=opt.w_residual)
if opt.isTrain:
from .D_3d import ModelD_3d
modelR.init_optim(opt.lr, opt.beta1, opt.beta2)
modelG.modelR = modelR
modelD_3d = ModelD_3d(opt)
if opt.cross_domain:
from .D_img import ModelD_img
else:
from .D import ModelD_img
modelD_img = ModelD_img(opt)
modelG = model_to_gpu(modelG, opt)
modelD_3d = model_to_gpu(modelD_3d, opt)
modelD_img = model_to_gpu(modelD_img, opt)
if opt.load_pretrain_path != 'None' and opt.load_pretrain_epoch > -1:
opt.checkpoints_dir = opt.load_pretrain_path
m_name = '/modelR_epoch_%d.pth' % (opt.load_pretrain_epoch)
ckpt = load_checkpoints(opt.load_pretrain_path + m_name, opt.gpu)
modelG.module.modelR.load_state_dict(ckpt)
m_name = '/modelD_img_epoch_%d.pth' % (opt.load_pretrain_epoch)
ckpt = load_checkpoints(opt.load_pretrain_path + m_name, opt.gpu)
modelD_img.load_state_dict(ckpt)
m_name = '/modelD_3d_epoch_%d.pth' % (opt.load_pretrain_epoch)
ckpt = load_checkpoints(opt.load_pretrain_path + m_name, opt.gpu)
modelD_3d.load_state_dict(ckpt)
return [modelG, modelD_img, modelD_3d]
else:
modelR.eval()
for p in modelR.parameters():
p.requires_grad = False
modelG.modelR = modelR
modelG = model_to_gpu(modelG, opt)
if opt.load_pretrain_path != 'None' and opt.load_pretrain_epoch > -1:
m_name = '/modelR_epoch_%d.pth' % (opt.load_pretrain_epoch)
ckpt = load_checkpoints(opt.load_pretrain_path + m_name, opt.gpu)
modelG.module.modelR.load_state_dict(ckpt)
return modelG
|
code/dx/dx_valuation.py
|
meaninginuse/py4fi2nd
| 893 |
71192
|
#
# DX Package
#
# Valuation Classes
#
# dx_valuation.py
#
# Python for Finance, 2nd ed.
# (c) Dr. <NAME>
#
import numpy as np
import pandas as pd
from dx_simulation import *
from valuation_class import valuation_class
from valuation_mcs_european import valuation_mcs_european
from valuation_mcs_american import valuation_mcs_american
|
gitfs/repository.py
|
whywaita/gitfs
| 921 |
71212
|
<reponame>whywaita/gitfs
# Copyright 2014-2016 Presslabs SRL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from collections import namedtuple
from shutil import rmtree
from stat import S_IFDIR, S_IFREG, S_IFLNK
from pygit2 import (
clone_repository,
Signature,
GIT_SORT_TOPOLOGICAL,
GIT_FILEMODE_TREE,
GIT_STATUS_CURRENT,
GIT_FILEMODE_LINK,
GIT_FILEMODE_BLOB,
GIT_BRANCH_REMOTE,
GIT_BRANCH_LOCAL,
GIT_FILEMODE_BLOB_EXECUTABLE,
)
from six import iteritems
from gitfs.cache import CommitCache
from gitfs.log import log
from gitfs.utils.path import split_path_into_components
from gitfs.utils.commits import CommitsList
DivergeCommits = namedtuple(
"DivergeCommits", ["common_parent", "first_commits", "second_commits"]
)
class Repository(object):
def __init__(self, repository, commits=None):
self._repo = repository
self.commits = commits or CommitCache(self)
self.behind = False
def __getitem__(self, item):
"""
Proxy method for pygit2.Repository
"""
return self._repo[item]
def __getattr__(self, attr):
"""
Proxy method for pygit2.Repository
"""
if attr not in self.__dict__:
return getattr(self._repo, attr)
else:
return self.__dict__[attr]
def ahead(self, upstream, branch):
ahead, _ = self.diverge(upstream, branch)
return ahead
def diverge(self, upstream, branch):
reference = "{}/{}".format(upstream, branch)
remote_branch = self._repo.branches.remote.get(reference)
local_branch = self._repo.branches.local.get(branch)
# TODO: check for missing branches
if remote_branch.target == local_branch.target:
return False, False
diverge_commits = self.find_diverge_commits(local_branch, remote_branch)
behind = len(diverge_commits.second_commits) > 0
ahead = len(diverge_commits.first_commits) > 0
return ahead, behind
def checkout(self, ref, *args, **kwargs):
result = self._repo.checkout(ref, *args, **kwargs)
# update ignore cache after a checkout
self.ignore.update()
status = self._repo.status()
for path, status in iteritems(status):
# path is in current status, move on
if status == GIT_STATUS_CURRENT:
continue
# check if file exists or not
full_path = self._full_path(path)
if path not in self._repo.index:
if path not in self.ignore:
try:
os.unlink(full_path)
except OSError:
# path points to a directory containing untracked files
rmtree(
full_path,
onerror=lambda function, fpath, excinfo: log.info(
"Repository: Checkout couldn't delete %s", fpath
),
)
continue
# check files stats
stats = self.get_git_object_default_stats(ref, path)
current_stat = os.lstat(full_path)
if stats["st_mode"] != current_stat.st_mode:
try:
os.chmod(full_path, current_stat.st_mode)
except OSError:
log.info("Repository: Checkout couldn't chmod %s", full_path)
self._repo.index.add(self._sanitize(path))
return result
def _sanitize(self, path):
if path is not None and path.startswith("/"):
path = path[1:]
return path
def push(self, upstream, branch, credentials):
""" Push changes from a branch to a remote
Examples::
repo.push("origin", "master")
"""
remote = self.get_remote(upstream)
remote.push(["refs/heads/%s" % branch], callbacks=credentials)
def fetch(self, upstream, branch_name, credentials):
"""
Fetch from remote and return True if we are behind or False otherwise
"""
remote = self.get_remote(upstream)
remote.fetch(callbacks=credentials)
_, behind = self.diverge(upstream, branch_name)
self.behind = behind
return behind
def commit(self, message, author, commiter, parents=None, ref="HEAD"):
""" Wrapper for create_commit. It creates a commit from a given ref
(default is HEAD)
"""
status = self._repo.status()
if status == {}:
return None
# sign the author
author = Signature(author[0], author[1])
commiter = Signature(commiter[0], commiter[1])
# write index localy
tree = self._repo.index.write_tree()
self._repo.index.write()
# get parent
if parents is None:
parents = [self._repo.revparse_single(ref).id]
return self._repo.create_commit(ref, author, commiter, message, tree, parents)
@classmethod
def clone(cls, remote_url, path, branch=None, credentials=None):
"""Clone a repo in a give path and update the working directory with
a checkout to head (GIT_CHECKOUT_SAFE_CREATE)
:param str remote_url: URL of the repository to clone
:param str path: Local path to clone into
:param str branch: Branch to checkout after the
clone. The default is to use the remote's default branch.
"""
try:
repo = clone_repository(
remote_url, path, checkout_branch=branch, callbacks=credentials
)
except Exception as e:
log.error("Error on cloning the repository: ", exc_info=True)
repo.checkout_head()
return cls(repo)
def _is_searched_entry(self, entry_name, searched_entry, path_components):
"""
Checks if a tree entry is the one that is being searched for. For
that, the name has to correspond and it has to be the last element
in the path_components list (this means that the path corresponds
exactly).
:param entry_name: the name of the tree entry
:param searched_entry: the name of the object that is being searched
for
:type searched_entry: str
:param path_components: the path of the object being searched for
:type path_components: list
"""
return (
entry_name == searched_entry
and len(path_components) == 1
and entry_name == path_components[0]
)
def _get_git_object(self, tree, obj_name, path_components, modifier):
"""
It recursively searches for the object in the repository. To declare
an object as found, the name and the relative path have to correspond.
It also includes the relative path as a condition for success, to avoid
finding an object with the correct name but with a wrong location.
:param tree: a `pygit2.Tree` instance
:param entry_name: the name of the object
:type entry_name: str
:param path_components: the path of the object being searched for as
a list (e.g: for '/a/b/c/file.txt' => ['a', 'b', 'c', 'file.txt'])
:type path_components: list
:param modifier: a function used to retrieve some specific
characteristic of the git object
:type modifier: function
:returns: an instance corresponding to the object that is being
searched for in case of success, or None otherwise.
:rtype: one of the following:
an instance of `pygit2.Tree`
an instance of `pygit2.Blob`
None
"""
git_obj = None
for entry in tree:
if self._is_searched_entry(entry.name, obj_name, path_components):
return modifier(entry)
elif entry.filemode == GIT_FILEMODE_TREE:
git_obj = self._get_git_object(
self._repo[entry.id], obj_name, path_components[1:], modifier
)
if git_obj:
return git_obj
return git_obj
def get_git_object_type(self, tree, path):
"""
Returns the filemode of the git object with the relative path <path>.
:param tree: a `pygit2.Tree` instance
:param path: the relative path of the object
:type entry_name: str
:returns: the filemode for the entry in case of success
(which can be one of the following) or None otherwise.
0 (0000000) GIT_FILEMODE_NEW
16384 (0040000) GIT_FILEMODE_TREE
33188 (0100644) GIT_FILEMODE_BLOB
33261 (0100755) GIT_FILEMODE_BLOB_EXECUTABLE
40960 (0120000) GIT_FILEMODE_LINK
57344 (0160000) GIT_FILEMODE_COMMIT
:rtype: int, None
"""
path_components = split_path_into_components(path)
try:
return self._get_git_object(
tree, path_components[-1], path_components, lambda entry: entry.filemode
)
except:
return GIT_FILEMODE_TREE
def get_git_object(self, tree, path):
"""
Returns the git object with the relative path <path>.
:param tree: a `pygit2.Tree` instance
:param path: the relative path of the object
:type path: str
:returns: an instance corresponding to the object that is being
searched for in case of success, or None else.
:rtype: one of the following:
an intance of `pygit2.Tree`
an intance of `pygit2.Blob`
None
"""
# It acts as a proxy for the _get_git_object method, which
# does the actual searching.
path_components = split_path_into_components(path)
return self._get_git_object(
tree,
path_components[-1],
path_components,
lambda entry: self._repo[entry.id],
)
def get_git_object_default_stats(self, ref, path):
types = {
GIT_FILEMODE_LINK: {"st_mode": S_IFLNK | 0o444},
GIT_FILEMODE_TREE: {"st_mode": S_IFDIR | 0o555, "st_nlink": 2},
GIT_FILEMODE_BLOB: {"st_mode": S_IFREG | 0o444},
GIT_FILEMODE_BLOB_EXECUTABLE: {"st_mode": S_IFREG | 0o555},
}
if path == "/":
return types[GIT_FILEMODE_TREE]
obj_type = self.get_git_object_type(ref, path)
if obj_type is None:
return obj_type
stats = types[obj_type]
if obj_type in [GIT_FILEMODE_BLOB, GIT_FILEMODE_BLOB_EXECUTABLE]:
stats["st_size"] = self.get_blob_size(ref, path)
return stats
def get_blob_size(self, tree, path):
"""
Returns the size of a the data contained by a blob object
with the relative path <path>.
:param tree: a `pygit2.Tree` instance
:param path: the relative path of the object
:type path: str
:returns: the size of data contained by the blob object.
:rtype: int
"""
return self.get_git_object(tree, path).size
def get_blob_data(self, tree, path):
"""
Returns the data contained by a blob object with the relative
path <path>.
:param tree: a `pygit2.Tree` instance
:param path: the relative path of the object
:type path: str
:returns: the data contained by the blob object.
:rtype: str
"""
return self.get_git_object(tree, path).data
def get_commit_dates(self):
"""
Walk through all commits from current repo in order to compose the
_history_ directory.
"""
return list(self.commits.keys())
def get_commits_by_date(self, date):
"""
Retrieves all the commits from a particular date.
:param date: date with the format: yyyy-mm-dd
:type date: str
:returns: a list containg the commits for that day. Each list item
will have the format: hh:mm:ss-<short_sha1>, where short_sha1 is
the short sha1 of the commit (first 10 characters).
:rtype: list
"""
return list(map(str, self.commits[date]))
def walk_branches(self, sort, *branches):
"""
Simple iterator which take a sorting strategy and some branch and
iterates through those branches one commit at a time, yielding a list
of commits
:param sort: a sorting option `GIT_SORT_NONE, GIT_SORT_TOPOLOGICAL,
GIT_SORT_TIME, GIT_SORT_REVERSE`. Default is 'GIT_SORT_TOPOLOGICAL'
:param branches: branch to iterate through
:type branches: list
:returns: yields a list of commits corresponding to given branches
:rtype: list
"""
iterators = [iter(self._repo.walk(branch.target, sort)) for branch in branches]
stop_iteration = [False for branch in branches]
commits = []
for iterator in iterators:
try:
commit = next(iterator)
except StopIteration:
commit = None
commits.append(commit)
yield (commit for commit in commits)
while not all(stop_iteration):
for index, iterator in enumerate(iterators):
try:
commit = next(iterator)
commits[index] = commit
except StopIteration:
stop_iteration[index] = True
if not all(stop_iteration):
yield (commit for commit in commits)
def remote_head(self, upstream, branch):
ref = "%s/%s" % (upstream, branch)
remote = self._repo.lookup_branch(ref, GIT_BRANCH_REMOTE)
return remote.get_object()
def get_remote(self, name):
""" Retrieve a remote by name. Raise a ValueError if the remote was not
added to repo
Examples::
repo.get_remote("fork")
"""
remote = [remote for remote in self._repo.remotes if remote.name == name]
if not remote:
raise ValueError("Missing remote")
return remote[0]
def _full_path(self, partial):
if partial.startswith("/"):
partial = partial[1:]
return os.path.join(self._repo.workdir, partial)
def find_diverge_commits(self, first_branch, second_branch):
"""
Take two branches and find diverge commits.
2--3--4--5
/
1--+ Return:
\ - common parent: 1
6 - first list of commits: (2, 3, 4, 5)
- second list of commits: (6)
:param first_branch: first branch to look for common parent
:type first_branch: `pygit2.Branch`
:param second_branch: second branch to look for common parent
:type second_branch: `pygit2.Branch`
:returns: a namedtuple with common parent, a list of first's branch
commits and another list with second's branch commits
:rtype: DivergeCommits (namedtuple)
"""
common_parent = None
first_commits = CommitsList()
second_commits = CommitsList()
walker = self.walk_branches(GIT_SORT_TOPOLOGICAL, first_branch, second_branch)
for first_commit, second_commit in walker:
if first_commit in second_commits or second_commit in first_commits:
break
if first_commit not in first_commits:
first_commits.append(first_commit)
if second_commit not in second_commits:
second_commits.append(second_commit)
if second_commit.hex == first_commit.hex:
break
try:
index = second_commits.index(first_commit)
except ValueError:
pass
else:
second_commits = second_commits[:index]
common_parent = first_commit
try:
index = first_commits.index(second_commit)
except ValueError:
pass
else:
first_commits = first_commits[:index]
common_parent = second_commit
return DivergeCommits(common_parent, first_commits, second_commits)
|
matrixprofile/algorithms/mstomp.py
|
MORE-EU/matrixprofile
| 262 |
71235
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
range = getattr(__builtins__, 'xrange', range)
# end of py2 compatability boilerplate
import logging
import numpy as np
from matrixprofile import core
logger = logging.getLogger(__name__)
_EPS = 1e-14
def _batch_compute(args):
"""
Internal function to compute a batch of the time series in parallel.
Parameters
----------
args : tuple
Various attributes used for computing the batch.
(
batch_start : int
The starting index for this batch.
batch_end : int
The ending index for this batch.
ts : array_like
The time series to compute the matrix profile for.
query : array_like
The query.
window_size : int
The size of the window to compute the profile over.
data_length : int
The number of elements in the time series.
profile_length : int
The number of elements that will be in the final matrix
profile.
exclusion_zone : int
Used to exclude trivial matches.
data_mu : array_like
The moving average over the time series for the given window
size.
data_sig : array_like
The moving standard deviation over the time series for the
given window size.
first_product : array_like
The first sliding dot product for the time series over index
0 to window_size.
skip_locs : array_like
Indices that should be skipped for distance profile calculation
due to a nan or inf.
)
Returns
-------
dict : profile
The matrix profile, left and right matrix profiles and their respective
profile indices.
>>> {
>>> 'mp': The matrix profile,
>>> 'pi': The matrix profile 1NN indices,
>>> 'rmp': The right matrix profile,
>>> 'rpi': The right matrix profile 1NN indices,
>>> 'lmp': The left matrix profile,
>>> 'lpi': The left matrix profile 1NN indices,
>>> }
"""
num_dim, batch_start, batch_end, ts, query, window_size, data_length, \
profile_length, exclusion_zone, data_mu, data_sig, \
first_product, skip_locs, profile_dimension, return_dimension = args
# initialize matrices
matrix_profile = np.full((num_dim, profile_length), np.inf)
profile_index = np.full((num_dim, profile_length), 0)
left_matrix_profile = None
right_matrix_profile = None
left_profile_index = None
right_profile_index = None
left_matrix_profile = np.copy(matrix_profile)
right_matrix_profile = np.copy(matrix_profile)
left_profile_index = np.copy(profile_index)
right_profile_index = np.copy(profile_index)
# with batch 0 we do not need to recompute the dot product
# however with other batch windows, we need the previous iterations sliding
# dot product
last_product = np.copy(first_product)
if batch_start is 0:
first_window = query[:, batch_start:batch_start + window_size]
else:
first_window = query[:, batch_start - 1:batch_start + window_size - 1]
for i in range(num_dim):
last_product[i, :] = core.fft_convolve(ts[i, :], first_window[i, :])
query_sum = np.sum(first_window, axis=1)
query_2sum = np.sum(first_window**2, axis=1)
query_mu, query_sig = np.empty(num_dim), np.empty(num_dim)
for i in range(num_dim):
query_mu[i], query_sig[i] = core.moving_avg_std(first_window[i, :], window_size)
drop_value = np.empty(num_dim)
for i in range(num_dim):
drop_value[i] = first_window[i, 0]
distance_profile = np.empty((num_dim, profile_length))
# make sure to compute inclusively from batch start to batch end
# otherwise there are gaps in the profile
if batch_end < profile_length:
batch_end += 1
# iteratively compute distance profile and update with element-wise mins
for i in range(batch_start, batch_end):
# check for nan or inf and skip
if skip_locs[i]:
continue
for j in range(num_dim):
if i == 0:
query_window = query[j, i:i + window_size]
distance_profile[j, :] = core.distance_profile(last_product[j, :], window_size, data_mu[j, :],
data_sig[j, :], query_mu[j], query_sig[j])
# apply exclusion zone
distance_profile[j, :] = core.apply_exclusion_zone(exclusion_zone, 0, window_size, data_length, 0,
distance_profile[j, :])
else:
query_window = query[j, i:i + window_size]
query_sum[j] = query_sum[j] - drop_value[j] + query_window[-1]
query_2sum[j] = query_2sum[j] - drop_value[j]**2 + query_window[-1]**2
query_mu[j] = query_sum[j] / window_size
query_sig2 = query_2sum[j] / window_size - query_mu[j]**2
if query_sig2 < _EPS:
query_sig2 = _EPS
query_sig[j] = np.sqrt(query_sig2)
last_product[j, 1:] = last_product[j, 0:data_length - window_size] \
- ts[j, 0:data_length - window_size] * drop_value[j] \
+ ts[j, window_size:] * query_window[-1]
last_product[j, 0] = first_product[j, i]
distance_profile[j, :] = core.distance_profile(last_product[j, :], window_size, data_mu[j, :],
data_sig[j, :], query_mu[j], query_sig[j])
# apply the exclusion zone
distance_profile[j, :] = core.apply_exclusion_zone(exclusion_zone, 0, window_size, data_length, i,
distance_profile[j, :])
distance_profile[j, distance_profile[j, :] < _EPS] = 0
drop_value[j] = query_window[0]
if np.any(query_sig < _EPS):
continue
distance_profile[:, skip_locs] = np.inf
distance_profile[data_sig < np.sqrt(_EPS)] = np.inf
distance_profile_dim = np.argsort(distance_profile, axis=0)
distance_profile_sort = np.sort(distance_profile, axis=0)
distance_profile_cumsum = np.zeros(profile_length)
for j in range(num_dim):
distance_profile_cumsum += distance_profile_sort[j, :]
distance_profile_mean = distance_profile_cumsum / (j + 1)
# update the matrix profile
indices = (distance_profile_mean < matrix_profile[j, :])
matrix_profile[j, indices] = distance_profile_mean[indices]
profile_index[j, indices] = i
if return_dimension:
profile_dimension[j][:, indices] = distance_profile_dim[:j + 1, indices]
# update the left and right matrix profiles
# find differences, shift left and update
indices = distance_profile_mean[i:] < left_matrix_profile[j, i:]
falses = np.zeros(i).astype('bool')
indices = np.append(falses, indices)
left_matrix_profile[j, indices] = distance_profile_mean[indices]
left_profile_index[j, np.argwhere(indices)] = i
# find differences, shift right and update
indices = distance_profile_mean[0:i] < right_matrix_profile[j, 0:i]
falses = np.zeros(profile_length - i).astype('bool')
indices = np.append(indices, falses)
right_matrix_profile[j, indices] = distance_profile_mean[indices]
right_profile_index[j, np.argwhere(indices)] = i
return {
'mp': matrix_profile,
'pi': profile_index,
'pd': profile_dimension,
'rmp': right_matrix_profile,
'rpi': right_profile_index,
'lmp': left_matrix_profile,
'lpi': left_profile_index,
}
def mstomp(ts, window_size, return_dimension=False, n_jobs=1):
"""
Computes multidimensional matrix profile with mSTAMP (stomp based). Ray or Python's multiprocessing library may be used. When you have initialized Ray on your machine, it takes priority over using Python's multiprocessing.
Parameters
----------
ts : array_like, shape (n_dim, seq_len)
The multidimensional time series to compute the multidimensional matrix profile for.
window_size: int
The size of the window to compute the matrix profile over.
return_dimension : bool
if True, also return the matrix profile dimension. It takses O(d^2 n)
to store and O(d^2 n^2) to compute. (default is False)
n_jobs : int, Default = 1
Number of cpu cores to use.
Returns
-------
dict : profile
A MatrixProfile data structure.
>>> {
>>> 'mp': The matrix profile,
>>> 'pi': The matrix profile 1NN indices,
>>> 'rmp': The right matrix profile,
>>> 'rpi': The right matrix profile 1NN indices,
>>> 'lmp': The left matrix profile,
>>> 'lpi': The left matrix profile 1NN indices,
>>> 'metric': The distance metric computed for the mp,
>>> 'w': The window size used to compute the matrix profile,
>>> 'ez': The exclusion zone used,
>>> 'sample_pct': Percentage of samples used in computing the MP,
>>> 'data': {
>>> 'ts': Time series data,
>>> 'query': Query data if supplied
>>> }
>>> 'class': "MatrixProfile"
>>> 'algorithm': "stomp_based_mstamp"
>>> }
Raises
------
ValueError
If window_size < 4.
If window_size > time series length / 2.
If ts is not a list or np.array.
"""
query = ts
# data conversion to np.array
ts = core.to_np_array(ts)
query = core.to_np_array(query)
if window_size < 4:
error = "window size must be at least 4."
raise ValueError(error)
if ts.ndim == 1:
ts = np.expand_dims(ts, axis=0)
query = np.expand_dims(query, axis=0)
if window_size > query.shape[1] / 2:
error = "Time series is too short relative to desired window size"
raise ValueError(error)
# multiprocessing or single threaded approach
if n_jobs == 1:
pass
else:
n_jobs = core.valid_n_jobs(n_jobs)
# precompute some common values - profile length, query length etc.
profile_length = core.get_profile_length(ts, query, window_size)
data_length = ts.shape[1]
query_length = query.shape[1]
num_queries = query_length - window_size + 1
exclusion_zone = int(np.ceil(window_size / 2.0))
num_dim = ts.shape[0]
# find skip locations, clean up nan and inf in the ts and query
skip_locs = core.find_multid_skip_locations(ts, profile_length, window_size)
ts = core.clean_nan_inf(ts)
query = core.clean_nan_inf(query)
# initialize matrices
matrix_profile = np.full((num_dim, profile_length), np.inf)
profile_index = np.full((num_dim, profile_length), 0)
# profile_index = np.full((num_dim, profile_length), -1)
# compute left and right matrix profile when similarity join does not happen
left_matrix_profile = np.copy(matrix_profile)
right_matrix_profile = np.copy(matrix_profile)
left_profile_index = np.copy(profile_index)
right_profile_index = np.copy(profile_index)
profile_dimension = []
if return_dimension:
n_jobs = 1
for i in range(num_dim):
profile_dimension.append(np.empty((i + 1, profile_length), dtype=int))
# precompute some statistics on ts
data_mu, data_sig, first_product = np.empty((num_dim, profile_length)), np.empty(
(num_dim, profile_length)), np.empty((num_dim, profile_length))
for i in range(num_dim):
data_mu[i, :], data_sig[i, :] = core.moving_avg_std(ts[i, :], window_size)
first_window = query[i, 0:window_size]
first_product[i, :] = core.fft_convolve(ts[i, :], first_window)
batch_windows = []
results = []
# batch compute with multiprocessing
args = []
for start, end in core.generate_batch_jobs(num_queries, n_jobs):
args.append((num_dim, start, end, ts, query, window_size, data_length, profile_length, exclusion_zone, data_mu,
data_sig, first_product, skip_locs, profile_dimension, return_dimension))
batch_windows.append((start, end))
# we are running single threaded stomp - no need to initialize any
# parallel environments.
if n_jobs == 1 or len(args) == 1:
results.append(_batch_compute(args[0]))
else:
# parallelize
with core.mp_pool()(n_jobs) as pool:
results = pool.map(_batch_compute, args)
# now we combine the batch results
if len(results) == 1:
result = results[0]
matrix_profile = result['mp']
profile_index = result['pi']
profile_dimension = result['pd']
left_matrix_profile = result['lmp']
left_profile_index = result['lpi']
right_matrix_profile = result['rmp']
right_profile_index = result['rpi']
else:
for index, result in enumerate(results):
start = batch_windows[index][0]
end = batch_windows[index][1]
# update the matrix profile
indices = result['mp'] < matrix_profile
matrix_profile[indices] = result['mp'][indices]
profile_index[indices] = result['pi'][indices]
# update the left and right matrix profiles
indices = result['lmp'] < left_matrix_profile
left_matrix_profile[indices] = result['lmp'][indices]
left_profile_index[indices] = result['lpi'][indices]
indices = result['rmp'] < right_matrix_profile
right_matrix_profile[indices] = result['rmp'][indices]
right_profile_index[indices] = result['rpi'][indices]
return {
'mp': matrix_profile,
'pi': profile_index,
'pd': profile_dimension,
'rmp': right_matrix_profile,
'rpi': right_profile_index,
'lmp': left_matrix_profile,
'lpi': left_profile_index,
'metric': 'euclidean',
'w': window_size,
'ez': exclusion_zone,
'sample_pct': 1,
'data': {
'ts': ts,
'query': query
},
'class': "MatrixProfile",
'algorithm': "stomp_based_mstamp"
}
|
third_party/tests/IbexGoogle/scripts/sail_log_to_trace_csv.py
|
parzival3/Surelog
| 156 |
71236
|
"""
Copyright 2019 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Convert sail sim log to standard riscv instruction trace format
"""
import argparse
import os
import re
import sys
import logging
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
from riscv_trace_csv import *
START_RE = re.compile(r"\[4\] \[M\]: 0x.*00001010")
END_RE = re.compile(r"ecall")
INSTR_RE = re.compile(r"\[[0-9].*\] \[(?P<pri>.)\]: 0x(?P<addr>[A-F0-9]+?)"
" \(0x(?P<bin>[A-F0-9]+?)\) (?P<instr>.+?$)")
RD_RE = re.compile(r"x(?P<reg>[0-9]+?) <- 0x(?P<val>[A-F0-9]*)")
def process_sail_sim_log(sail_log, csv):
"""Process SAIL RISCV simulation log.
Extract instruction and affected register information from sail simulation
log and save to a list.
"""
logging.info("Processing sail log : %s" % sail_log)
instr_cnt = 0
sail_instr = ""
with open(sail_log, "r") as f, open(csv, "w") as csv_fd:
search_start = 0
instr_start = 0
trace_csv = RiscvInstructionTraceCsv(csv_fd)
trace_csv.start_new_trace()
instr = None
for line in f:
# Extract instruction infromation
m = START_RE.search(line)
if m:
search_start = 1
continue
m = END_RE.search(line)
if m:
break
if search_start:
instr = INSTR_RE.search(line)
if instr:
instr_start = 1
pri = instr.group("pri")
addr = instr.group("addr").lower()
binary = instr.group("bin").lower()
instr_str = instr.group("instr")
continue
if instr_start:
m = RD_RE.search(line)
if m:
# Write the extracted instruction to a csvcol buffer file
instr_cnt += 1
rv_instr_trace = RiscvInstructionTraceEntry()
rv_instr_trace.rd = gpr_to_abi("x%0s" % m.group("reg"))
rv_instr_trace.rd_val = m.group("val").lower()
rv_instr_trace.privileged_mode = pri
rv_instr_trace.addr = addr
rv_instr_trace.binary = binary
rv_instr_trace.instr_str = instr_str
trace_csv.write_trace_entry(rv_instr_trace)
instr_start = 0
logging.info("Processed instruction count : %d" % instr_cnt)
def main():
instr_trace = []
# Parse input arguments
parser = argparse.ArgumentParser()
parser.add_argument("--log", type=str, help="Input sail simulation log")
parser.add_argument("--csv", type=str, help="Output trace csv_buf file")
args = parser.parse_args()
# Process sail log
process_sail_sim_log(args.log, args.csv)
if __name__ == "__main__":
main()
|
src/fauxmo/plugins/__init__.py
|
danielrgullo/fauxmo
| 318 |
71263
|
"""fauxmo.plugins :: Provide ABC for Fauxmo plugins."""
import abc
from typing import Callable
class FauxmoPlugin(abc.ABC):
"""Provide ABC for Fauxmo plugins.
This will become the `plugin` attribute of a `Fauxmo` instance. Its `on`
and `off` methods will be called when Alexa turns something `on` or `off`.
All keys (other than the list of `DEVICES`) from the config will be passed
into FauxmoPlugin as kwargs at initialization, which should let users do
some interesting things. However, that means users employing custom config
keys will need to override `__init__` and either set the `name` and
"private" `_port` attributes manually or pass the appropriate args to
`super().__init__()`.
"""
def __init__(self, *, name: str, port: int) -> None:
"""Initialize FauxmoPlugin.
Keyword Args:
name: Required, device name
port: Required, port that the Fauxmo associated with this plugin
should run on
Note about `port`: if not given in config, it will be set to an
apparently free port in `fauxmo.fauxmo` before FauxmoPlugin
initialization. This attribute serves no default purpose in the
FauxmoPlugin but is passed in to be accessible by user code (i.e. for
logging / debugging). Alternatively, one could accept and throw away
the passed in `port` value and generate their own port in a plugin,
since the Fauxmo device determines its port from the plugin's instance
attribute.
The `_latest_action` attribute stores the most recent successful
action, which is set by the `__getattribute__` hackery for successful
`.on()` and `.off()` commands.
"""
self._name = name
self._port = port
self._latest_action = "off"
def __getattribute__(self, name: str) -> Callable:
"""Intercept `.on()` and `.off()` to set `_latest_action` attribute."""
if name in ["on", "off"]:
success = object.__getattribute__(self, name)()
if success is True:
self._latest_action = name
return lambda: success
else:
return object.__getattribute__(self, name)
@property
def port(self) -> int:
"""Return port attribute in read-only manner."""
return self._port
@property
def name(self) -> str:
"""Return name attribute in read-only manner."""
return self._name
@abc.abstractmethod
def on(self) -> bool:
"""Run function when Alexa turns this Fauxmo device on."""
pass
@abc.abstractmethod
def off(self) -> bool:
"""Run function when Alexa turns this Fauxmo device off."""
pass
@abc.abstractmethod
def get_state(self) -> str:
"""Run function when Alexa requests device state.
Should return "on" or "off" if it can be determined, or "unknown" if
there is no mechanism for determining the device state, in which case
Alexa will complain that the device is not responding.
If state cannot be determined, a plugin can opt into this
implementation, which falls back on the `_latest_action` attribute.
It is intentionally left as an abstract method so that plugins cannot
omit a `get_state` method completely, which could lead to unexpected
behavior; instead, they should explicitly `return super().get_state()`.
"""
return self.latest_action
def close(self) -> None:
"""Run when shutting down; allows plugin to clean up state."""
pass
@property
def latest_action(self) -> str:
"""Return latest action in read-only manner.
Must be a function instead of e.g. property because it overrides
`get_state`, and therefore must be callable.
"""
return self._latest_action
def __repr__(self) -> str:
"""Provide a default human-readable representation of the plugin."""
attrs = ", ".join(f"{k}={v!r}" for k, v in self.__dict__.items())
return f"{self.__class__.__name__}({attrs})"
|
marrow/mailer/exc.py
|
cynepiaadmin/mailer
| 166 |
71289
|
<filename>marrow/mailer/exc.py
# encoding: utf-8
"""Exceptions used by marrow.mailer to report common errors."""
__all__ = [
'MailException',
'MailConfigurationException',
'TransportException',
'TransportFailedException',
'MessageFailedException',
'TransportExhaustedException',
'ManagerException'
]
class MailException(Exception):
"""The base for all marrow.mailer exceptions."""
pass
# Application Exceptions
class DeliveryException(MailException):
"""The base class for all public-facing exceptions."""
pass
class DeliveryFailedException(DeliveryException):
"""The message stored in args[0] could not be delivered for the reason
given in args[1]. (These can be accessed as e.msg and e.reason.)"""
def __init__(self, message, reason):
self.msg = message
self.reason = reason
super(DeliveryFailedException, self).__init__(message, reason)
# Internal Exceptions
class MailerNotRunning(MailException):
"""Raised when attempting to deliver messages using a dead interface."""
pass
class MailConfigurationException(MailException):
"""There was an error in the configuration of marrow.mailer."""
pass
class TransportException(MailException):
"""The base for all marrow.mailer Transport exceptions."""
pass
class TransportFailedException(TransportException):
"""The transport has failed to deliver the message due to an internal
error; a new instance of the transport should be used to retry."""
pass
class MessageFailedException(TransportException):
"""The transport has failed to deliver the message due to a problem with
the message itself, and no attempt should be made to retry delivery of
this message. The transport may still be re-used, however.
The reason for the failure should be the first argument.
"""
pass
class TransportExhaustedException(TransportException):
"""The transport has successfully delivered the message, but can no longer
be used for future message delivery; a new instance should be used on the
next request."""
pass
class ManagerException(MailException):
"""The base for all marrow.mailer Manager exceptions."""
pass
|
mmdeploy/codebase/mmdet/core/bbox/transforms.py
|
zhiqwang/mmdeploy
| 746 |
71312
|
<filename>mmdeploy/codebase/mmdet/core/bbox/transforms.py
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdeploy.codebase.mmdet.deploy import clip_bboxes
def distance2bbox(points, distance, max_shape=None):
"""Rewrite `mmdet.core.bbox.transforms.distance2bbox`
Decode distance prediction to bounding box.
Args:
points (Tensor): Shape (B, N, 2) or (N, 2).
distance (Tensor): Distance from the given point to 4
boundaries (left, top, right, bottom). Shape (B, N, 4) or (N, 4)
max_shape (Sequence[int] or torch.Tensor or Sequence[
Sequence[int]],optional): Maximum bounds for boxes, specifies
(H, W, C) or (H, W). If priors shape is (B, N, 4), then
the max_shape should be a Sequence[Sequence[int]]
and the length of max_shape should also be B.
Returns:
Tensor: Boxes with shape (N, 4) or (B, N, 4)
"""
x1 = points[..., 0] - distance[..., 0]
y1 = points[..., 1] - distance[..., 1]
x2 = points[..., 0] + distance[..., 2]
y2 = points[..., 1] + distance[..., 3]
bboxes = torch.stack([x1, y1, x2, y2], -1)
if max_shape is not None:
# clip bboxes with dynamic `min` and `max`
x1, y1, x2, y2 = clip_bboxes(x1, y1, x2, y2, max_shape)
bboxes = torch.stack([x1, y1, x2, y2], dim=-1)
return bboxes
return bboxes
|
alpa/global_env.py
|
alpa-projects/alpa
| 114 |
71323
|
<filename>alpa/global_env.py
"""All global configurations for this project."""
import os
class GlobalConfig:
"""The global configuration of alpa."""
def __init__(self):
########## Options of device mesh ##########
self.xla_client_mem_fraction = float(
os.environ.get("XLA_PYTHON_CLIENT_MEM_FRACTION", 0.9))
self.xla_gpu_autotune_level = 4
self.delete_remote_buffers_threshold = 200
# use AWS EFA network interface
self.use_aws_efa = os.environ.get("ALPA_USE_AWS_EFA", "").lower() in [
"true", "1"
]
########## Options of shard_parallel ##########
self.shard_parallel_sync_for_timer = False
########## Options of pipeline_parallel ##########
# Whether to debug with local runtime. The local runtime checks
# correctness of stage construction and other graph level operations.
self.debug_with_local_runtime = False
# Whether to debug with pipeshard runtime. If turned on, no physical resource
# is required until launching PipeshardExecutable.
# TODO(yonghao): deprecate it later.
self.debug_with_pipeshard_runtime = False
# Whether to use the whole cluster for stage profiling. If not, only use the given mesh.
self.profile_with_whole_ray_cluster = True
# Stage construction profiling time threshold.
self.profile_timeout = 500
# Stage construction profiling retry threshold.
# Some communication patterns may meet deadlock, so it needs retry.
self.profile_maximum_retry = 2
# Whether to forcely set stage construction's submesh choices
self.overwrite_submesh_choices = None
########## Options of pipeline runtime ##########
self.pipeline_check_alive = True
# Whether to sync before and after the executable for accurate internal timer
self.pipeline_sync_for_timer = False
# Whether to use distributed compilation in pipeline parallel for
# each stage. Disabling it helps debug.
self.pipeline_distributed_compile = True
self.pipeline_use_signal_send_recv = False
self.precompile_resharding_tasks = True
self.use_scatter_gather = True
self.eagerly_create_communicators = True
self.use_memzero_for_gradient_accumulation = False
# Cross mesh resharding mode. Possible choices: {"send_recv", "broadcast"}
self.resharding_mode = "send_recv"
########## Options of XLA compilation ##########
self.build_random_seed = 42
# Whether to use xla while instruction for preventing CSE in rematerialization
self.remat_using_while = False
########## Options of benchmark ##########
# If true, the system is allowed to use dummy values during
# tensor creation and copy to reduce the initialization and copy time.
# This will produce wrong results but is acceptable for
# data-independent benchmarks.
self.use_dummy_value_for_benchmarking = False
########## Options of logging ##########
self.print_compilation_time = False
########## Options of ray namespace ##########
self.default_ray_namespace_prefix = "alpa-train"
self.unittest_ray_namespace_prefix = "alpa-unittest"
global_config = GlobalConfig()
# Other environment setup
is_worker = os.environ.get("ALPA_IS_WORKER", "False") == "True"
os.environ["XLA_FLAGS"] = os.environ.get(
"XLA_FLAGS", "") + " --xla_gpu_enable_async_all_reduce=false"
|
flaskbb/core/exceptions.py
|
rehee/try_discuz
| 1,140 |
71343
|
<reponame>rehee/try_discuz
# -*- coding: utf-8 -*-
"""
flaskbb.core.exceptions
~~~~~~~~~~~~~~~~~~~~~~~
Exceptions raised by flaskbb.core,
forms the root of all exceptions in
FlaskBB.
:copyright: (c) 2014-2018 the FlaskBB Team
:license: BSD, see LICENSE for more details
"""
class BaseFlaskBBError(Exception):
"""
Root exception for FlaskBB.
"""
class ValidationError(BaseFlaskBBError):
"""
Used to signal validation errors for things such as
token verification, user registration, etc.
:param str attribute: The attribute the validation error applies to,
if the validation error applies to multiple attributes or to
the entire object, this should be set to None
:param str reason: Why the attribute, collection of attributes or object
is invalid.
"""
def __init__(self, attribute, reason):
self.attribute = attribute
self.reason = reason
super(ValidationError, self).__init__((attribute, reason))
class StopValidation(BaseFlaskBBError):
"""
Raised from validation handlers to signal that
validation should end immediately and no further
processing should be done.
Can also be used to communicate all errors
raised during a validation run.
:param reasons: A sequence of `(attribute, reason)` pairs explaining
why the object is invalid.
"""
def __init__(self, reasons):
self.reasons = reasons
super(StopValidation, self).__init__(reasons)
class PersistenceError(BaseFlaskBBError):
"""
Used to catch down errors when persisting models to the database instead
of letting all issues percolate up, this should be raised from those
exceptions without smashing their tracebacks. Example::
try:
db.session.add(new_user)
db.session.commit()
except Exception:
raise PersistenceError("Couldn't save user account")
"""
def accumulate_errors(caller, validators, throw=True):
errors = []
for validator in validators:
try:
caller(validator)
except ValidationError as e:
errors.append((e.attribute, e.reason))
if len(errors) and throw:
raise StopValidation(errors)
return errors
|
Lib/fontmake/instantiator.py
|
googlei18n/fontmake
| 333 |
71401
|
# This code is based on ufoProcessor code, which is licensed as follows:
# Copyright (c) 2017-2018 LettError and <NAME>
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Module for generating static font instances.
It is an alternative to mutatorMath (used internally by fontmake) and ufoProcessor. The
aim is to be a minimal implementation that is focussed on using ufoLib2 for font data
abstraction, varLib for instance computation and fontMath as a font data shell for
instance computation directly and exclusively.
At the time of this writing, varLib lacks support for anisotropic (x, y) locations and
extrapolation.
"""
import copy
import logging
import typing
from typing import Any, Dict, List, Mapping, Set, Tuple, Union
import attr
import fontMath
import fontTools.designspaceLib as designspaceLib
import fontTools.misc.fixedTools
import fontTools.varLib as varLib
import ufoLib2
logger = logging.getLogger(__name__)
# Use the same rounding function used by varLib to round things for the variable font
# to reduce differences between the variable and static instances.
fontMath.mathFunctions.setRoundIntegerFunction(fontTools.misc.fixedTools.otRound)
# Stand-in type for any of the fontMath classes we use.
FontMathObject = Union[fontMath.MathGlyph, fontMath.MathInfo, fontMath.MathKerning]
# MutatorMath-style location mapping type, i.e.
# `{"wght": 1.0, "wdth": 0.0, "bleep": 0.5}`.
# LocationKey is a Location turned into a tuple so we can use it as a dict key.
Location = Mapping[str, float]
LocationKey = Tuple[Tuple[str, float], ...]
# Type of mapping of axes to their minimum, default and maximum values, i.e.
# `{"wght": (100.0, 400.0, 900.0), "wdth": (75.0, 100.0, 100.0)}`.
AxisBounds = Dict[str, Tuple[float, float, float]]
# For mapping `wdth` axis user values to the OS2 table's width class field.
WDTH_VALUE_TO_OS2_WIDTH_CLASS = {
50: 1,
62.5: 2,
75: 3,
87.5: 4,
100: 5,
112.5: 6,
125: 7,
150: 8,
200: 9,
}
# Font info fields that are not interpolated and should be copied from the
# default font to the instance.
#
# fontMath at the time of this writing handles the following attributes:
# https://github.com/robotools/fontMath/blob/0.5.0/Lib/fontMath/mathInfo.py#L360-L422
#
# From the attributes that are left, we skip instance-specific ones on purpose:
# - guidelines
# - postscriptFontName
# - styleMapFamilyName
# - styleMapStyleName
# - styleName
# - openTypeNameCompatibleFullName
# - openTypeNamePreferredFamilyName
# - openTypeNamePreferredSubfamilyName
# - openTypeNameUniqueID
# - openTypeNameWWSFamilyName
# - openTypeNameWWSSubfamilyName
# - openTypeOS2Panose
# - postscriptFullName
# - postscriptUniqueID
# - woffMetadataUniqueID
#
# Some, we skip because they are deprecated:
# - macintoshFONDFamilyID
# - macintoshFONDName
# - year
#
# This means we implicitly require the `stylename` attribute in the Designspace
# `<instance>` element.
UFO_INFO_ATTRIBUTES_TO_COPY_TO_INSTANCES = {
"copyright",
"familyName",
"note",
"openTypeGaspRangeRecords",
"openTypeHeadCreated",
"openTypeHeadFlags",
"openTypeNameDescription",
"openTypeNameDesigner",
"openTypeNameDesignerURL",
"openTypeNameLicense",
"openTypeNameLicenseURL",
"openTypeNameManufacturer",
"openTypeNameManufacturerURL",
"openTypeNameRecords",
"openTypeNameSampleText",
"openTypeNameVersion",
"openTypeOS2CodePageRanges",
"openTypeOS2FamilyClass",
"openTypeOS2Selection",
"openTypeOS2Type",
"openTypeOS2UnicodeRanges",
"openTypeOS2VendorID",
"postscriptDefaultCharacter",
"postscriptForceBold",
"postscriptIsFixedPitch",
"postscriptWindowsCharacterSet",
"trademark",
"versionMajor",
"versionMinor",
"woffMajorVersion",
"woffMetadataCopyright",
"woffMetadataCredits",
"woffMetadataDescription",
"woffMetadataExtensions",
"woffMetadataLicense",
"woffMetadataLicensee",
"woffMetadataTrademark",
"woffMetadataVendor",
"woffMinorVersion",
}
# Custom exception for this module
class InstantiatorError(Exception):
pass
def process_rules_swaps(rules, location, glyphNames):
"""Apply these rules at this location to these glyphnames
- rule order matters
Return a list of (oldName, newName) in the same order as the rules.
"""
swaps = []
glyphNames = set(glyphNames)
for rule in rules:
if designspaceLib.evaluateRule(rule, location):
for oldName, newName in rule.subs:
# Here I don't check if the new name is also in glyphNames...
# I guess it should be, so that we can swap, and if it isn't,
# then it's better to error out later when we try to swap,
# instead of silently ignoring the rule here.
if oldName in glyphNames:
swaps.append((oldName, newName))
return swaps
@attr.s(auto_attribs=True, frozen=True, slots=True)
class Instantiator:
"""Data class that holds all necessary information to generate a static
font instance object at an arbitary location within the design space."""
axis_bounds: AxisBounds # Design space!
copy_feature_text: str
copy_nonkerning_groups: Mapping[str, List[str]]
copy_info: ufoLib2.objects.Info
copy_lib: Mapping[str, Any]
default_design_location: Location
designspace_rules: List[designspaceLib.RuleDescriptor]
glyph_mutators: Mapping[str, "Variator"]
glyph_name_to_unicodes: Dict[str, List[int]]
info_mutator: "Variator"
kerning_mutator: "Variator"
round_geometry: bool
skip_export_glyphs: List[str]
special_axes: Mapping[str, designspaceLib.AxisDescriptor]
@classmethod
def from_designspace(
cls,
designspace: designspaceLib.DesignSpaceDocument,
round_geometry: bool = True,
):
"""Instantiates a new data class from a Designspace object."""
if designspace.default is None:
raise InstantiatorError(_error_msg_no_default(designspace))
if any(hasattr(axis, "values") for axis in designspace.axes):
raise InstantiatorError(
"The given designspace has one or more discrete (= non-interpolating) "
"axes. You should split this designspace into smaller interpolating "
"spaces and use the Instantiator on each. See the method "
"`fontTools.designspaceLib.split.splitInterpolable()`"
)
if any(anisotropic(instance.location) for instance in designspace.instances):
raise InstantiatorError(
"The Designspace contains anisotropic instance locations, which are "
"not supported by varLib. Look for and remove all 'yvalue=\"...\"' or "
"use MutatorMath instead."
)
designspace.loadSourceFonts(ufoLib2.Font.open)
# The default font (default layer) determines which glyphs are interpolated,
# because the math behind varLib and MutatorMath uses the default font as the
# point of reference for all data.
default_font = designspace.default.font
glyph_names: Set[str] = set(default_font.keys())
for source in designspace.sources:
other_names = set(source.font.keys())
diff_names = other_names - glyph_names
if diff_names:
logger.warning(
"The source %s (%s) contains glyphs that are missing from the "
"default source, which will be ignored: %s. If this is unintended, "
"check that these glyphs have the exact same name as the "
"corresponding glyphs in the default source.",
source.name,
source.filename,
", ".join(sorted(diff_names)),
)
# Construct Variators
axis_bounds: AxisBounds = {} # Design space!
axis_order: List[str] = []
special_axes = {}
for axis in designspace.axes:
axis_order.append(axis.name)
axis_bounds[axis.name] = (
axis.map_forward(axis.minimum),
axis.map_forward(axis.default),
axis.map_forward(axis.maximum),
)
# Some axes relate to existing OpenType fields and get special attention.
if axis.tag in {"wght", "wdth", "slnt"}:
special_axes[axis.tag] = axis
masters_info = collect_info_masters(designspace, axis_bounds)
try:
info_mutator = Variator.from_masters(masters_info, axis_order)
except varLib.errors.VarLibError as e:
raise InstantiatorError(
f"Cannot set up fontinfo for interpolation: {e}'"
) from e
masters_kerning = collect_kerning_masters(designspace, axis_bounds)
try:
kerning_mutator = Variator.from_masters(masters_kerning, axis_order)
except varLib.errors.VarLibError as e:
raise InstantiatorError(
f"Cannot set up kerning for interpolation: {e}'"
) from e
glyph_mutators: Dict[str, Variator] = {}
glyph_name_to_unicodes: Dict[str, List[int]] = {}
for glyph_name in glyph_names:
items = collect_glyph_masters(designspace, glyph_name, axis_bounds)
try:
glyph_mutators[glyph_name] = Variator.from_masters(items, axis_order)
except varLib.errors.VarLibError as e:
raise InstantiatorError(
f"Cannot set up glyph '{glyph_name}' for interpolation: {e}'"
) from e
glyph_name_to_unicodes[glyph_name] = default_font[glyph_name].unicodes
# Construct defaults to copy over
copy_feature_text: str = default_font.features.text
copy_nonkerning_groups: Mapping[str, List[str]] = {
key: glyph_names
for key, glyph_names in default_font.groups.items()
if not key.startswith(("public.kern1.", "public.kern2."))
} # Kerning groups are taken care of by the kerning Variator.
copy_info: ufoLib2.objects.Info = default_font.info
copy_lib: Mapping[str, Any] = default_font.lib
# The list of glyphs-not-to-export-and-decompose-where-used-as-a-component is
# supposed to be taken from the Designspace when a Designspace is used as the
# starting point of the compilation process. It should be exported to all
# instance libs, where the ufo2ft compilation functions will pick it up.
skip_export_glyphs = designspace.lib.get("public.skipExportGlyphs", [])
return cls(
axis_bounds,
copy_feature_text,
copy_nonkerning_groups,
copy_info,
copy_lib,
designspace.default.location,
designspace.rules,
glyph_mutators,
glyph_name_to_unicodes,
info_mutator,
kerning_mutator,
round_geometry,
skip_export_glyphs,
special_axes,
)
def generate_instance(
self, instance: designspaceLib.InstanceDescriptor
) -> ufoLib2.Font:
"""Generate an interpolated instance font object for an
InstanceDescriptor."""
if anisotropic(instance.location):
raise InstantiatorError(
f"Instance {instance.familyName}-"
f"{instance.styleName}: Anisotropic location "
f"{instance.location} not supported by varLib."
)
font = ufoLib2.Font()
# Instances may leave out locations that match the default source, so merge
# default location with the instance's location.
location = {**self.default_design_location, **instance.location}
location_normalized = varLib.models.normalizeLocation(
location, self.axis_bounds
)
# Kerning
kerning_instance = self.kerning_mutator.instance_at(location_normalized)
if self.round_geometry:
kerning_instance.round()
kerning_instance.extractKerning(font)
# Info
self._generate_instance_info(instance, location_normalized, location, font)
# Non-kerning groups. Kerning groups have been taken care of by the kerning
# instance.
for key, glyph_names in self.copy_nonkerning_groups.items():
font.groups[key] = [name for name in glyph_names]
# Features
font.features.text = self.copy_feature_text
# Lib
# 1. Copy the default lib to the instance.
font.lib = typing.cast(dict, copy.deepcopy(self.copy_lib))
# 2. Copy the Designspace's skipExportGlyphs list over to the UFO to
# make sure it wins over the default UFO one.
font.lib["public.skipExportGlyphs"] = [name for name in self.skip_export_glyphs]
# 3. Write _design_ location to instance's lib.
font.lib["designspace.location"] = [loc for loc in location.items()]
# Glyphs
for glyph_name, glyph_mutator in self.glyph_mutators.items():
glyph = font.newGlyph(glyph_name)
try:
glyph_instance = glyph_mutator.instance_at(location_normalized)
if self.round_geometry:
glyph_instance = glyph_instance.round()
# onlyGeometry=True does not set name and unicodes, in ufoLib2 we can't
# modify a glyph's name. Copy unicodes from default font.
glyph_instance.extractGlyph(glyph, onlyGeometry=True)
except Exception as e:
# TODO: Figure out what exceptions fontMath/varLib can throw.
# By default, explode if we cannot generate a glyph instance for
# whatever reason (usually outline incompatibility)...
if glyph_name not in self.skip_export_glyphs:
raise InstantiatorError(
f"Failed to generate instance of glyph '{glyph_name}': "
f"{str(e)}. (Note: the most common cause for an error here is "
"that the glyph outlines are not point-for-point compatible or "
"have the same starting point or are in the same order in all "
"masters.)"
) from e
# ...except if the glyph is in public.skipExportGlyphs and would
# therefore be removed from the compiled font anyway. There's not much
# we can do except leave it empty in the instance and tell the user.
logger.warning(
"Failed to generate instance of glyph '%s', which is marked as "
"non-exportable. Glyph will be left empty. Failure reason: %s",
glyph_name,
e,
)
glyph.unicodes = [uv for uv in self.glyph_name_to_unicodes[glyph_name]]
# Process rules
glyph_names_list = self.glyph_mutators.keys()
# The order of the swaps below is independent of the order of glyph names.
# It depends on the order of the <sub>s in the designspace rules.
swaps = process_rules_swaps(self.designspace_rules, location, glyph_names_list)
for name_old, name_new in swaps:
if name_old != name_new:
swap_glyph_names(font, name_old, name_new)
return font
def _generate_instance_info(
self,
instance: designspaceLib.InstanceDescriptor,
location_normalized: Location,
location: Location,
font: ufoLib2.Font,
) -> None:
"""Generate fontinfo related attributes.
Separate, as fontinfo treatment is more extensive than the rest.
"""
info_instance = self.info_mutator.instance_at(location_normalized)
if self.round_geometry:
info_instance = info_instance.round()
info_instance.extractInfo(font.info)
# Copy non-interpolating metadata from the default font.
for attribute in UFO_INFO_ATTRIBUTES_TO_COPY_TO_INSTANCES:
if hasattr(self.copy_info, attribute):
setattr(
font.info,
attribute,
copy.deepcopy(getattr(self.copy_info, attribute)),
)
# TODO: multilingual names to replace possibly existing name records.
if instance.familyName:
font.info.familyName = instance.familyName
if instance.styleName is None:
logger.warning(
"The given instance or instance at location %s is missing the "
"stylename attribute, which is required. Copying over the styleName "
"from the default font, which is probably wrong.",
location,
)
font.info.styleName = self.copy_info.styleName
else:
font.info.styleName = instance.styleName
if instance.postScriptFontName:
font.info.postscriptFontName = instance.postScriptFontName
if instance.styleMapFamilyName:
font.info.styleMapFamilyName = instance.styleMapFamilyName
if instance.styleMapStyleName:
font.info.styleMapStyleName = instance.styleMapStyleName
# If the masters haven't set the OS/2 weight and width class, use the
# user-space values ("input") of the axis mapping in the Designspace file for
# weight and width axes, if they exist. The slnt axis' value maps 1:1 to
# italicAngle. Clamp the values to the valid ranges.
if info_instance.openTypeOS2WeightClass is None and "wght" in self.special_axes:
weight_axis = self.special_axes["wght"]
font.info.openTypeOS2WeightClass = weight_class_from_wght_value(
weight_axis.map_backward(location[weight_axis.name])
)
if info_instance.openTypeOS2WidthClass is None and "wdth" in self.special_axes:
width_axis = self.special_axes["wdth"]
font.info.openTypeOS2WidthClass = width_class_from_wdth_value(
width_axis.map_backward(location[width_axis.name])
)
if info_instance.italicAngle is None and "slnt" in self.special_axes:
slant_axis = self.special_axes["slnt"]
font.info.italicAngle = italic_angle_from_slnt_value(
slant_axis.map_backward(location[slant_axis.name])
)
def _error_msg_no_default(designspace: designspaceLib.DesignSpaceDocument) -> str:
if any(axis.map for axis in designspace.axes):
bonus_msg = (
"For axes with a mapping, the 'default' values should have an "
"'input=\"...\"' map value, where the corresponding 'output=\"...\"' "
"value then points to the master source."
)
else:
bonus_msg = ""
default_location = ", ".join(
f"{k}: {v}" for k, v in designspace.newDefaultLocation().items()
)
return (
"Can't generate UFOs from this Designspace because there is no default "
f"master source at location '{default_location}'. Check that all 'default' "
"values of all axes together point to a single actual master source. "
f"{bonus_msg}"
)
def location_to_key(location: Location) -> LocationKey:
"""Converts a Location into a sorted tuple so it can be used as a dict
key."""
return tuple(sorted(location.items()))
def anisotropic(location: Location) -> bool:
"""Tests if any single location value is a MutatorMath-style anisotropic
value, i.e. is a tuple of (x, y)."""
return any(isinstance(v, tuple) for v in location.values())
def collect_info_masters(
designspace: designspaceLib.DesignSpaceDocument, axis_bounds: AxisBounds
) -> List[Tuple[Location, FontMathObject]]:
"""Return master Info objects wrapped by MathInfo."""
locations_and_masters = []
for source in designspace.sources:
if source.layerName is not None:
continue # No font info in source layers.
normalized_location = varLib.models.normalizeLocation(
source.location, axis_bounds
)
locations_and_masters.append(
(normalized_location, fontMath.MathInfo(source.font.info))
)
return locations_and_masters
def collect_kerning_masters(
designspace: designspaceLib.DesignSpaceDocument, axis_bounds: AxisBounds
) -> List[Tuple[Location, FontMathObject]]:
"""Return master kerning objects wrapped by MathKerning."""
# Always take the groups from the default source. This also avoids fontMath
# making a union of all groups it is given.
groups = designspace.default.font.groups
locations_and_masters = []
for source in designspace.sources:
if source.layerName is not None:
continue # No kerning in source layers.
# If a source has groups, they should match the default's.
if source.font.groups and source.font.groups != groups:
logger.warning(
"The source %s (%s) contains different groups than the default source. "
"The default source's groups will be used for the instances.",
source.name,
source.filename,
)
# This assumes that groups of all sources are the same.
normalized_location = varLib.models.normalizeLocation(
source.location, axis_bounds
)
locations_and_masters.append(
(normalized_location, fontMath.MathKerning(source.font.kerning, groups))
)
return locations_and_masters
def collect_glyph_masters(
designspace: designspaceLib.DesignSpaceDocument,
glyph_name: str,
axis_bounds: AxisBounds,
) -> List[Tuple[Location, FontMathObject]]:
"""Return master glyph objects for glyph_name wrapped by MathGlyph.
Note: skips empty source glyphs if the default glyph is not empty to almost match
what ufoProcessor is doing. In e.g. Mutator Sans, the 'S.closed' glyph is left
empty in one source layer. One could treat this as a source error, but ufoProcessor
specifically has code to skip that empty glyph and carry on.
"""
locations_and_masters = []
default_glyph_empty = False
other_glyph_empty = False
for source in designspace.sources:
if source.layerName is None: # Source font.
source_layer = source.font.layers.defaultLayer
else: # Source layer.
source_layer = source.font.layers[source.layerName]
# Sparse fonts do not and layers may not contain every glyph.
if glyph_name not in source_layer:
continue
source_glyph = source_layer[glyph_name]
if not (source_glyph.contours or source_glyph.components):
if source is designspace.findDefault():
default_glyph_empty = True
else:
other_glyph_empty = True
normalized_location = varLib.models.normalizeLocation(
source.location, axis_bounds
)
locations_and_masters.append(
(normalized_location, fontMath.MathGlyph(source_glyph))
)
# Filter out empty glyphs if the default glyph is not empty.
if not default_glyph_empty and other_glyph_empty:
locations_and_masters = [
(loc, master)
for loc, master in locations_and_masters
if master.contours or master.components
]
return locations_and_masters
def width_class_from_wdth_value(wdth_user_value) -> int:
"""Return the OS/2 width class from the wdth axis user value.
The OpenType 1.8.3 specification states:
When mapping from 'wdth' values to usWidthClass, interpolate fractional
values between the mapped values and then round, and clamp to the range
1 to 9.
"Mapped values" probably means the in-percent numbers layed out for the OS/2
width class, so we are forcing these numerical semantics on the user values
of the wdth axis.
"""
width_user_value = min(max(wdth_user_value, 50), 200)
width_user_value_mapped = varLib.models.piecewiseLinearMap(
width_user_value, WDTH_VALUE_TO_OS2_WIDTH_CLASS
)
return fontTools.misc.fixedTools.otRound(width_user_value_mapped)
def weight_class_from_wght_value(wght_user_value) -> int:
"""Return the OS/2 weight class from the wght axis user value."""
weight_user_value = min(max(wght_user_value, 1), 1000)
return fontTools.misc.fixedTools.otRound(weight_user_value)
def italic_angle_from_slnt_value(slnt_user_value) -> Union[int, float]:
"""Return the italic angle from the slnt axis user value."""
slant_user_value = min(max(slnt_user_value, -90), 90)
return slant_user_value
def swap_glyph_names(font: ufoLib2.Font, name_old: str, name_new: str):
"""Swap two existing glyphs in the default layer of a font (outlines,
width, component references, kerning references, group membership).
The idea behind swapping instead of overwriting is explained in
https://github.com/fonttools/fonttools/tree/main/Doc/source/designspaceLib#ufo-instances.
We need to keep the old glyph around in case any other glyph references
it; glyphs that are not explicitly substituted by rules should not be
affected by the rule application.
The .unicodes are not swapped. The rules mechanism is supposed to swap
glyphs, not characters.
"""
if name_old not in font or name_new not in font:
raise InstantiatorError(
f"Cannot swap glyphs '{name_old}' and '{name_new}', as either or both are "
"missing."
)
# 1. Swap outlines and glyph width. Ignore lib content and other properties.
glyph_swap = ufoLib2.objects.Glyph(name="temporary_swap_glyph")
glyph_old = font[name_old]
glyph_new = font[name_new]
p = glyph_swap.getPointPen()
glyph_old.drawPoints(p)
glyph_swap.width = glyph_old.width
glyph_old.clearContours()
glyph_old.clearComponents()
p = glyph_old.getPointPen()
glyph_new.drawPoints(p)
glyph_old.width = glyph_new.width
glyph_new.clearContours()
glyph_new.clearComponents()
p = glyph_new.getPointPen()
glyph_swap.drawPoints(p)
glyph_new.width = glyph_swap.width
# 2. Swap anchors.
glyph_swap.anchors = glyph_old.anchors
glyph_old.anchors = glyph_new.anchors
glyph_new.anchors = glyph_swap.anchors
# 3. Remap components.
for g in font:
for c in g.components:
if c.baseGlyph == name_old:
c.baseGlyph = name_new
elif c.baseGlyph == name_new:
c.baseGlyph = name_old
# 4. Swap literal names in kerning.
kerning_new = {}
for first, second in font.kerning.keys():
value = font.kerning[(first, second)]
if first == name_old:
first = name_new
elif first == name_new:
first = name_old
if second == name_old:
second = name_new
elif second == name_new:
second = name_old
kerning_new[(first, second)] = value
font.kerning = kerning_new
# 5. Swap names in groups.
for group_name, group_members in font.groups.items():
group_members_new = []
for name in group_members:
if name == name_old:
group_members_new.append(name_new)
elif name == name_new:
group_members_new.append(name_old)
else:
group_members_new.append(name)
font.groups[group_name] = group_members_new
@attr.s(auto_attribs=True, frozen=True, slots=True)
class Variator:
"""A middle-man class that ingests a mapping of normalized locations to
masters plus axis definitions and uses varLib to spit out interpolated
instances at specified normalized locations.
fontMath objects stand in for the actual master objects from the
UFO. Upon generating an instance, these objects have to be extracted
into an actual UFO object.
"""
masters: List[FontMathObject]
location_to_master: Mapping[LocationKey, FontMathObject]
model: varLib.models.VariationModel
@classmethod
def from_masters(
cls, items: List[Tuple[Location, FontMathObject]], axis_order: List[str]
):
masters = []
master_locations = []
location_to_master = {}
for normalized_location, master in items:
master_locations.append(normalized_location)
masters.append(master)
location_to_master[location_to_key(normalized_location)] = master
model = varLib.models.VariationModel(master_locations, axis_order)
return cls(masters, location_to_master, model)
def instance_at(self, normalized_location: Location) -> FontMathObject:
"""Return a FontMathObject for the specified location ready to be
inflated.
If an instance location matches a master location, this method
returns the master data instead of running through varLib. This
is both an optimization _and_ it enables having a Designspace
with instances matching their masters without requiring them to
be compatible. Glyphs.app works this way; it will only generate
a font from an instance, but compatibility is only required if
there is actual interpolation to be done. This enables us to
store incompatible bare masters in one Designspace and having
arbitrary instance data applied to them.
"""
normalized_location_key = location_to_key(normalized_location)
if normalized_location_key in self.location_to_master:
return copy.deepcopy(self.location_to_master[normalized_location_key])
return self.model.interpolateFromMasters(normalized_location, self.masters)
|
test_raml_models.py
|
pcyin/pytorch_nmt
| 122 |
71414
|
import os
train_src="../dynet_nmt/data/train.de-en.de.wmixerprep"
train_tgt="../dynet_nmt/data/train.de-en.en.wmixerprep"
dev_src="../dynet_nmt/data/valid.de-en.de"
dev_tgt="../dynet_nmt/data/valid.de-en.en"
test_src="../dynet_nmt/data/test.de-en.de"
test_tgt="../dynet_nmt/data/test.de-en.en"
for temp in [0.5]:
job_name = 'iwslt14.raml.corrupt_ngram.t%.3f' % temp
train_log = 'train.' + job_name + '.log'
model_name = 'model.' + job_name
decode_file = 'iwslt14.test.en.raml.corrupt_ngram.t%.3f' % temp
job_file = 'scripts/train.%s.sh' % job_name
with open(job_file, 'w') as f:
f.write("""#!/bin/sh
python nmt.py \
--cuda \
--mode test \
--load_model models/{model_name}.bin \
--beam_size 5 \
--decode_max_time_step 100 \
--save_to_file decode/{decode_file} \
--test_src {test_src} \
--test_tgt {test_tgt}
echo "test result" >> logs/{train_log}
perl multi-bleu.perl {test_tgt} < decode/{decode_file} >> logs/{train_log}
""".format(model_name=model_name, temp=temp,
train_src=train_src, train_tgt=train_tgt,
dev_src=dev_src, dev_tgt=dev_tgt,
test_src=test_src, test_tgt=test_tgt,
train_log=train_log, decode_file=decode_file))
os.system('bash submit_job.sh %s' % job_file)
|
tools/SDKTool/src/WrappedDeviceAPI/deviceAPI/mobileDevice/android/androidDevice.py
|
Passer-D/GameAISDK
| 1,210 |
71417
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making GameAISDK available.
This source code file is licensed under the GNU General Public License Version 3.
For full details, please refer to the file "LICENSE.txt" which is provided as part of this source code package.
Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
"""
import os
import sys
import logging
from logging.handlers import RotatingFileHandler
import cv2
from .androidDeviceAPI import AndroidDeviceAPI
from .APIDefine import LOG_DEBUG, LOG_DEFAULT, TOUCH_CMD_LIST, DEVICE_CMD_LIST, TOUCH_KEY, \
TOUCH_CLICK, TOUCH_UP, TOUCH_MOVE, TOUCH_DOWN, TOUCH_SWIPE, TOUCH_SWIPEMOVE, TOUCH_RESET, DEVICE_KEY, \
DEVICE_CLICK, DEVICE_CLEARAPP, DEVICE_CURAPP, DEVICE_EXIT, DEVICE_INSTALL, DEVICE_START, \
DEVICE_TEXT, DEVICE_SCREENORI, DEVICE_SCREENSHOT, DEVICE_MAXCONTACT, DEVICE_PARAM, DEVICE_SLEEP, \
DEVICE_SWIPE, DEVICE_WAKE, DEVICE_WMSIZE, LOG_LIST, LOG_FORMAT
from ...iDevice import IDevice
cur_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(cur_dir)
PP_RET_OK = 0
class AndroidDevice(IDevice):
def __init__(self, platform_type):
super(AndroidDevice, self).__init__(platform_type)
self.__deviceApi = AndroidDeviceAPI(platform_type)
self.__height = -1
self.__width = -1
self.__pid = os.getpid()
self.__serial = '*'
self.__showScreen = False
self.__maxContact = 10
self.__logger = None
def initialize(self, log_dir, **kwargs):
"""
:param device_serial: str, 手机序列号,默认为None,当接入一个设备时可不指定序列号,当接入多个设备时需要指定
:param long_edge: int, 长边的长度
:param log_dir: str, 日志存放目录
:param level: enum, 指定日志级别,
取值为[LOG_DEBUG, LOG_INFO, LOG_WARNING, LOG_ERROR, LOG_CRITICAL],默认为LOG_DEBUG
:param show_raw_screen: bool, 是否显示手机图片
:param kwargs: dict, 一些组件需要的参数,可以自己定义,例如端口号等等
"""
level = kwargs.pop('level') if 'level' in kwargs else logging.DEBUG
long_edge = kwargs.pop('long_edge') if 'long_edge' in kwargs else 1280
device_serial = kwargs.pop('device_serial') if 'device_serial' in kwargs else None
show_raw_screen = kwargs.pop('show_raw_screen') if 'show_raw_screen' in kwargs else False
if device_serial is not None:
log_dir = os.path.join(log_dir, device_serial.replace(':', "_")) + os.path.sep
self.__serial = device_serial
if not self._LogInit(log_dir, level, device_serial):
raise RuntimeError("init log failed")
else:
log_dir = os.path.join(log_dir, LOG_DEFAULT) + os.path.sep
if not self._LogInit(log_dir, level, LOG_DEFAULT):
raise RuntimeError("init log failed")
kwargs['standalone'] = 0 if os.environ.get("PLATFORM_IP") else 1
if not self.__deviceApi.Initialize(device_serial, long_edge, **kwargs):
self.__logger.error('DeviceAPI initial failed')
raise RuntimeError("DeviceAPI initial failed")
self.__showScreen = show_raw_screen
self.__maxContact = self.__deviceApi.GetMaxContact()
self.__height, self.__width, strError = self.__deviceApi.GetScreenResolution()
if self.__height == -1 and self.__width == -1:
self.__logger.error(strError)
raise RuntimeError(strError)
height = long_edge
width = self.__width * height / self.__height
self.__width = width
self.__height = height
self.__logger.info("init successful")
return True
def deInitialize(self):
return self.__deviceApi.DeInitialize()
def getScreen(self, **kwargs):
"""
:return: Mat类型的图像/None
"""
err, image = self.__deviceApi.GetFrame()
if err != PP_RET_OK:
self.__logger.error('failed to get frame')
return None
if image is not None and self.__showScreen:
self.__logger.info("get image")
cv2.imshow('pid:' + str(self.__pid) + ' serial:' + str(self.__serial), image)
cv2.waitKey(1)
return image
def doAction(self, **kwargs):
aType = kwargs['aType']
if aType in TOUCH_CMD_LIST:
return self.TouchCMD(**kwargs)
if aType in DEVICE_CMD_LIST:
return self.DeviceCMD(**kwargs)
raise Exception("unknown action type: %s, %s", aType, kwargs)
def TouchCMD(self, **kwargs):
""" 执行操作
:kwargs: dict,
aType参数表示动作类型[TOUCH_CLICK, TOUCH_DOWN, TOUCH_UP, TOUCH_SWIPE, TOUCH_MOVE]
sx为x坐标,当aType为[TOUCH_CLICK, TOUCH_DOWN]时表示按压点的x坐标,
当aType为[TOUCH_SWIPE, TOUCH_MOVE]时表示起始点的x坐标
sy为y坐标,当aType为[TOUCH_CLICK, TOUCH_DOWN]时表示按压点的y坐标,
当aType为[TOUCH_SWIPE, TOUCH_MOVE]时表示起始点的y坐标
ex为x坐标,当aType为[TOUCH_SWIPE, TOUCH_MOVE]时表示结束点的x坐标
ex为y坐标,当aType为[TOUCH_SWIPE, TOUCH_MOVE]时表示结束点的y坐标
DaType为执行该操作的方式,有minitouch方式和ADB命令方式,分别表示为[DACT_TOUCH, DACT_ADB],默认为DACT_TOUCH
contact为触点,默认为0
durationMS为执行一次动作持续的时间,在aType为[TOUCH_CLICK, TOUCH_SWIPE]时使用,
当aType为TOUCH_CLICK时默认为-1,当aType为TOUCH_SWIPE时默认为50
needUp仅在aType为TOUCH_SWIPE时使用,表示滑动后是否需要抬起,默认为True
:return: True or False
"""
for key in kwargs:
if key not in TOUCH_KEY:
self.__logger.error('wrong key of kwargs: %s', key)
return False
actionType = kwargs.get('aType')
if not actionType:
self.__logger.error('aType is needed when exec TouchCommand')
return False
px = sx = kwargs.get('sx', None)
py = sy = kwargs.get('sy', None)
ex = kwargs.get('ex', None)
ey = kwargs.get('ey', None)
contact = kwargs.get('contact', 0)
durationMS = kwargs.get('durationMS', 0)
needUp = kwargs.get('needUp', True)
wait_time = kwargs.get('wait_time', 0)
if actionType == TOUCH_CLICK:
self.__logger.info("platform Click, x: %s, y: %s, contact: %s, durationMS: %s, waitTime: %s",
px,
py,
contact,
durationMS,
wait_time)
self.__deviceApi.Click(px, py, contact, durationMS, wait_time)
elif actionType == TOUCH_DOWN:
self.__logger.info(
"platform Down, x: %s, y: %s, contact: %s, waitTime: %s", px, py, contact, wait_time)
self.__deviceApi.Down(px, py, contact, wait_time)
elif actionType == TOUCH_UP:
self.__logger.info("platform Up, contact: %s, waitTime: %s", contact, wait_time)
self.__deviceApi.Up(contact, wait_time)
elif actionType == TOUCH_SWIPE:
if durationMS <= 0:
durationMS = 50
self.__logger.info("platform Swipe, sx: %s, sy: %s, ex: %s, ey: %s, "
"contact: %s, durationMS: %s, waitTime: %s",
sx,
sy,
ex,
ey,
contact,
durationMS,
wait_time)
self.__deviceApi.Swipe(sx, sy, ex, ey, contact, durationMS, needUp, wait_time)
elif actionType == TOUCH_MOVE:
self.__logger.info(
"platform Move, x: %s, y: %s, contact: %s, waitTime: %s", px, py, contact, wait_time)
self.__deviceApi.Move(px, py, contact, wait_time)
elif actionType == TOUCH_SWIPEMOVE:
if durationMS <= 0:
durationMS = 50
self.__logger.info(
"platform SwipeMove, px: %s, py: %s, contact: %s, durationMS: %s waitTime: %s", px,
py,
contact,
durationMS,
wait_time)
self.__deviceApi.SwipeMove(px, py, contact, durationMS, wait_time)
elif actionType == TOUCH_RESET:
self.__logger.info("platform Reset, waitTime: %s", wait_time)
self.__deviceApi.Reset(wait_time=wait_time)
else:
self.__logger.error('Wrong aType when TouchCommand, aType:%s', actionType)
return False
return True
def DeviceCMD(self, **kwargs):
""" 执行设备相关的操作
aType:操作类型[DEVICE_INSTALL, DEVICE_START, DEVICE_EXIT, DEVICE_CURAPP, DEVICE_CLEARAPP, DEVICE_KEY,
DEVICE_TEXT, DEVICE_SLEEP, DEVICE_WAKE, DEVICE_WMSIZE, DEVICE_BINDRO, DEVICE_SCREENSHOT,
DEVICE_SCREENORI, DEVICE_PARAM]
APKPath:安装包路径
PKGName:包名
ActivityName:包的activity
key:字母
text:键盘输入的字符串
"""
actionType = kwargs.get('aType')
if not actionType:
self.__logger.error('aType is needed when exec DeviceCommand')
return False
if actionType == DEVICE_INSTALL:
APKPath = kwargs.get('APKPath', None)
if not self.__deviceApi.InstallAPP(APKPath):
self.__logger.error('install app failed: %s', APKPath)
return False
elif actionType == DEVICE_START:
PKGName = kwargs.get('PKGName', None)
ActivityName = kwargs.get('ActivityName', None)
self.__deviceApi.LaunchAPP(PKGName, ActivityName)
elif actionType == DEVICE_EXIT:
PKGName = kwargs.get('PKGName', None)
self.__deviceApi.ExitAPP(PKGName)
elif actionType == DEVICE_CURAPP:
return self.__deviceApi.CurrentApp()
elif actionType == DEVICE_CLEARAPP:
PKGName = kwargs.get('PKGName', None)
self.__deviceApi.ClearAppData(PKGName)
elif actionType == DEVICE_KEY:
key = kwargs.get('key', None)
self.__deviceApi.Key(key)
elif actionType == DEVICE_TEXT:
text = kwargs.get('text', None)
self.__deviceApi.Text(text)
elif actionType == DEVICE_SLEEP:
self.__deviceApi.Sleep()
elif actionType == DEVICE_WAKE:
self.__deviceApi.Wake()
elif actionType == DEVICE_WMSIZE:
return self.__deviceApi.WMSize()
elif actionType == DEVICE_SCREENSHOT:
targetPath = kwargs.get('targetPath', None)
self.__deviceApi.TakeScreenshot(targetPath)
elif actionType == DEVICE_SCREENORI:
return self.__deviceApi.GetScreenOri()
elif actionType == DEVICE_MAXCONTACT:
return self.__maxContact
elif actionType == DEVICE_CLICK:
px = kwargs.get('sx', None)
py = kwargs.get('sy', None)
self.__deviceApi.ADBClick(px, py)
elif actionType == DEVICE_SWIPE:
sx = kwargs.get('sx', None)
sy = kwargs.get('sy', None)
ex = kwargs.get('ex', None)
ey = kwargs.get('ey', None)
durationMS = kwargs.get('durationMS', 50)
self.__deviceApi.ADBSwipe(sx, sy, ex, ey, durationMS=durationMS)
elif actionType == DEVICE_PARAM:
packageName = kwargs.get('PKGName', None)
return self.__deviceApi.GetDeviceParame(packageName)
else:
self.__logger.error('wrong aType when exec DeviceCommand, aType:%s', actionType)
return False
return True
# def _GetValuesInkwargs(self, key, isNessesary, defaultValue, kwargs):
# try:
# if not isNessesary:
# if key not in kwargs:
# return True, defaultValue
# else:
# return True, kwargs[key]
# else:
# return True, kwargs[key]
# except KeyError as e:
# self.__logger.error(e)
# return False, 'key error'
def _LogInit(self, log_dir, level, device_serial):
if not isinstance(log_dir, str):
logging.error('wrong log_dir when init LOG, log_dir:%s', log_dir)
return False
if level not in LOG_LIST:
logging.warning('wrong level when init LOG, level:%s, use default level: DEBUG', level)
level = LOG_DEBUG
if not os.path.exists(log_dir):
os.makedirs(log_dir)
self.__logger = logging.getLogger(device_serial)
if not self.__logger.handlers:
console = logging.StreamHandler()
formatter = logging.Formatter(LOG_FORMAT)
console.setFormatter(formatter)
fileHandler = RotatingFileHandler(filename=os.path.join(log_dir, 'DeviceAPI.log'),
maxBytes=2048000,
backupCount=10)
fileHandler.setFormatter(formatter)
self.__logger.addHandler(fileHandler)
self.__logger.addHandler(console)
self.__logger.setLevel(level)
loggerWeTest = logging.getLogger('PlatformWeTest')
if not loggerWeTest.handlers:
fileHandler = RotatingFileHandler(filename=os.path.join(log_dir, 'PlatformWeTest.log'),
maxBytes=2048000,
backupCount=10)
fileHandler.setFormatter(formatter)
loggerWeTest.addHandler(fileHandler)
loggerWeTest.setLevel(level)
return True
# def _CheckException(self):
# if exceptionQueue.empty() is False:
# errorStr = exceptionQueue.get()
# while exceptionQueue.empty() is False:
# errorStr = exceptionQueue.get()
# raise Exception(errorStr)
|
tensorflow_estimator/python/estimator/canned/linear_optimizer/python/utils/sharded_mutable_dense_hashtable_test.py
|
burgerkingeater/estimator
| 288 |
71424
|
<reponame>burgerkingeater/estimator<gh_stars>100-1000
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sharded_mutable_dense_hashtable.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.platform import googletest
from tensorflow_estimator.python.estimator.canned.linear_optimizer.python.utils.sharded_mutable_dense_hashtable import _ShardedMutableDenseHashTable
class _ShardedMutableDenseHashTableTest(tf.test.TestCase):
"""Tests for the ShardedMutableHashTable class."""
def testShardedMutableHashTable(self):
for num_shards in [1, 3, 10]:
with self.cached_session():
default_val = -1
empty_key = 0
deleted_key = -1
keys = tf.constant([11, 12, 13], tf.dtypes.int64)
values = tf.constant([0, 1, 2], tf.dtypes.int64)
table = _ShardedMutableDenseHashTable(
tf.dtypes.int64,
tf.dtypes.int64,
default_val,
empty_key,
deleted_key,
num_shards=num_shards)
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = tf.constant([11, 12, 14], tf.dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
self.assertAllEqual([0, 1, -1], self.evaluate(output))
def testShardedMutableHashTableVectors(self):
for num_shards in [1, 3, 10]:
with self.cached_session():
default_val = [-0.1, 0.2]
empty_key = [0, 1]
deleted_key = [1, 0]
keys = tf.constant([[11, 12], [13, 14], [15, 16]], tf.dtypes.int64)
values = tf.constant([[0.5, 0.6], [1.5, 1.6], [2.5, 2.6]],
tf.dtypes.float32)
table = _ShardedMutableDenseHashTable(
tf.dtypes.int64,
tf.dtypes.float32,
default_val,
empty_key,
deleted_key,
num_shards=num_shards)
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = tf.constant([[11, 12], [13, 14], [11, 14]],
tf.dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([3, 2], output.get_shape())
self.assertAllClose([[0.5, 0.6], [1.5, 1.6], [-0.1, 0.2]],
self.evaluate(output))
def testExportSharded(self):
with self.cached_session():
empty_key = -2
deleted_key = -3
default_val = -1
num_shards = 2
keys = tf.constant([10, 11, 12], tf.dtypes.int64)
values = tf.constant([2, 3, 4], tf.dtypes.int64)
table = _ShardedMutableDenseHashTable(
tf.dtypes.int64,
tf.dtypes.int64,
default_val,
empty_key,
deleted_key,
num_shards=num_shards)
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table.size()))
keys_list, values_list = table.export_sharded()
self.assertAllEqual(num_shards, len(keys_list))
self.assertAllEqual(num_shards, len(values_list))
# Exported keys include empty key buckets set to the empty_key
self.assertAllEqual(
set([-2, 10, 12]), set(self.evaluate(keys_list[0]).flatten()))
self.assertAllEqual(
set([-2, 11]), set(self.evaluate(keys_list[1]).flatten()))
# Exported values include empty value buckets set to 0
self.assertAllEqual(
set([0, 2, 4]), set(self.evaluate(values_list[0]).flatten()))
self.assertAllEqual(
set([0, 3]), set(self.evaluate(values_list[1]).flatten()))
if __name__ == '__main__':
googletest.main()
|
app/grandchallenge/profiles/migrations/0001_initial.py
|
kaczmarj/grand-challenge.org
| 101 |
71426
|
<gh_stars>100-1000
# Generated by Django 1.11.11 on 2018-03-20 18:38
import django.db.models.deletion
import django_countries.fields
import stdimage.models
from django.conf import settings
from django.db import migrations, models
import grandchallenge.core.storage
class Migration(migrations.Migration):
initial = True
dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]
operations = [
migrations.CreateModel(
name="UserProfile",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"mugshot",
stdimage.models.JPEGField(
blank=True,
help_text="A personal image displayed in your profile.",
upload_to=grandchallenge.core.storage.get_mugshot_path,
verbose_name="mugshot",
),
),
(
"privacy",
models.CharField(
choices=[
("open", "Open"),
("registered", "Registered"),
("closed", "Closed"),
],
default="open",
help_text="Designates who can view your profile.",
max_length=15,
verbose_name="privacy",
),
),
("institution", models.CharField(max_length=100)),
("department", models.CharField(max_length=100)),
(
"country",
django_countries.fields.CountryField(max_length=2),
),
("website", models.CharField(blank=True, max_length=150)),
(
"user",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="user_profile",
to=settings.AUTH_USER_MODEL,
verbose_name="user",
),
),
],
options={
"permissions": (("view_profile", "Can view profile"),),
"abstract": False,
},
)
]
|
parsers/IN_PB.py
|
electricitymap/electricitymap-contrib
| 143 |
71433
|
#!/usr/bin/env python3
from collections import defaultdict
import arrow
import requests
GENERATION_URL = "https://sldcapi.pstcl.org/wsDataService.asmx/pbGenData2"
DATE_URL = "https://sldcapi.pstcl.org/wsDataService.asmx/dynamicData"
GENERATION_MAPPING = {
"totalHydro": "hydro",
"totalThermal": "coal",
"totalIpp": "coal",
"resSolar": "solar",
"resNonSolar": "biomass",
}
def calculate_average_timestamp(timestamps):
"""Takes a list of string timestamps and returns the average as an arrow object."""
arrow_timestamps = [
arrow.get(ts, tzinfo="Asia/Kolkata") for ts in timestamps if ts is not None
]
unix_timestamps = [ts.timestamp for ts in arrow_timestamps]
average_timestamp = sum(unix_timestamps) / len(unix_timestamps)
arr_average_timestamp = arrow.get(average_timestamp).to("Asia/Kolkata")
return arr_average_timestamp
def fetch_production(
zone_key="IN-PB", session=None, target_datetime=None, logger=None
) -> dict:
"""Requests the last known production mix (in MW) of a given zone."""
if target_datetime:
raise NotImplementedError(
"The IN-PB production parser is not yet able to parse past dates"
)
s = session or requests.Session()
data_req = s.get(GENERATION_URL)
timestamp_req = s.get(DATE_URL)
raw_data = data_req.json()
timestamp_data = timestamp_req.json()
data = {
"zoneKey": zone_key,
"datetime": arrow.get(
timestamp_data["updateDate"], "DD-MM-YYYY HH:mm:ss", tzinfo="Asia/Kolkata"
).datetime,
"production": {
"hydro": 0.0,
"coal": 0.0,
"biomass": 0.0,
"solar": 0.0,
},
"storage": {},
"source": "punjasldc.<EMAIL>",
}
for from_key, to_key in GENERATION_MAPPING.items():
data["production"][to_key] += max(0, raw_data[from_key]["value"])
return [data]
def fetch_consumption(
zone_key="IN-PB", session=None, target_datetime=None, logger=None
) -> dict:
"""Requests the last known consumption (in MW) of a given zone."""
if target_datetime:
raise NotImplementedError(
"The IN-PB consumption parser is not yet able to parse past dates"
)
s = session or requests.Session()
req = s.get(GENERATION_URL)
raw_data = req.json()
consumption = float(raw_data["grossGeneration"]["value"])
data = {
"zoneKey": zone_key,
"datetime": arrow.now("Asia/Kolkata").datetime,
"consumption": consumption,
"source": "punjasldc.org",
}
return data
if __name__ == "__main__":
print(fetch_production("IN-PB"))
print(fetch_consumption("IN-PB"))
|
sdk/agrifood/azure-agrifood-farming/azure/agrifood/farming/aio/_farm_beats_client.py
|
rsdoherty/azure-sdk-for-python
| 2,728 |
71477
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core import AsyncPipelineClient
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import FarmBeatsClientConfiguration
from .operations import ApplicationDataOperations
from .operations import AttachmentsOperations
from .operations import BoundariesOperations
from .operations import CropsOperations
from .operations import CropVarietiesOperations
from .operations import FarmersOperations
from .operations import FarmOperationsOperations
from .operations import FarmsOperations
from .operations import FieldsOperations
from .operations import HarvestDataOperations
from .operations import ImageProcessingOperations
from .operations import OAuthProvidersOperations
from .operations import OAuthTokensOperations
from .operations import PlantingDataOperations
from .operations import ScenesOperations
from .operations import SeasonalFieldsOperations
from .operations import SeasonsOperations
from .operations import TillageDataOperations
from .operations import WeatherOperations
from .. import models
class FarmBeatsClient(object):
"""APIs documentation for Azure AgPlatform DataPlane Service.
:ivar application_data: ApplicationDataOperations operations
:vartype application_data: azure.agrifood.farming.aio.operations.ApplicationDataOperations
:ivar attachments: AttachmentsOperations operations
:vartype attachments: azure.agrifood.farming.aio.operations.AttachmentsOperations
:ivar boundaries: BoundariesOperations operations
:vartype boundaries: azure.agrifood.farming.aio.operations.BoundariesOperations
:ivar crops: CropsOperations operations
:vartype crops: azure.agrifood.farming.aio.operations.CropsOperations
:ivar crop_varieties: CropVarietiesOperations operations
:vartype crop_varieties: azure.agrifood.farming.aio.operations.CropVarietiesOperations
:ivar farmers: FarmersOperations operations
:vartype farmers: azure.agrifood.farming.aio.operations.FarmersOperations
:ivar farm_operations: FarmOperationsOperations operations
:vartype farm_operations: azure.agrifood.farming.aio.operations.FarmOperationsOperations
:ivar farms: FarmsOperations operations
:vartype farms: azure.agrifood.farming.aio.operations.FarmsOperations
:ivar fields: FieldsOperations operations
:vartype fields: azure.agrifood.farming.aio.operations.FieldsOperations
:ivar harvest_data: HarvestDataOperations operations
:vartype harvest_data: azure.agrifood.farming.aio.operations.HarvestDataOperations
:ivar image_processing: ImageProcessingOperations operations
:vartype image_processing: azure.agrifood.farming.aio.operations.ImageProcessingOperations
:ivar oauth_providers: OAuthProvidersOperations operations
:vartype oauth_providers: azure.agrifood.farming.aio.operations.OAuthProvidersOperations
:ivar oauth_tokens: OAuthTokensOperations operations
:vartype oauth_tokens: azure.agrifood.farming.aio.operations.OAuthTokensOperations
:ivar planting_data: PlantingDataOperations operations
:vartype planting_data: azure.agrifood.farming.aio.operations.PlantingDataOperations
:ivar scenes: ScenesOperations operations
:vartype scenes: azure.agrifood.farming.aio.operations.ScenesOperations
:ivar seasonal_fields: SeasonalFieldsOperations operations
:vartype seasonal_fields: azure.agrifood.farming.aio.operations.SeasonalFieldsOperations
:ivar seasons: SeasonsOperations operations
:vartype seasons: azure.agrifood.farming.aio.operations.SeasonsOperations
:ivar tillage_data: TillageDataOperations operations
:vartype tillage_data: azure.agrifood.farming.aio.operations.TillageDataOperations
:ivar weather: WeatherOperations operations
:vartype weather: azure.agrifood.farming.aio.operations.WeatherOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param endpoint: The endpoint of your FarmBeats resource (protocol and hostname, for example: https://{resourceName}.farmbeats.azure.net).
:type endpoint: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
endpoint: str,
**kwargs: Any
) -> None:
base_url = '{Endpoint}'
self._config = FarmBeatsClientConfiguration(credential, endpoint, **kwargs)
self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.application_data = ApplicationDataOperations(
self._client, self._config, self._serialize, self._deserialize)
self.attachments = AttachmentsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.boundaries = BoundariesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.crops = CropsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.crop_varieties = CropVarietiesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.farmers = FarmersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.farm_operations = FarmOperationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.farms = FarmsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.fields = FieldsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.harvest_data = HarvestDataOperations(
self._client, self._config, self._serialize, self._deserialize)
self.image_processing = ImageProcessingOperations(
self._client, self._config, self._serialize, self._deserialize)
self.oauth_providers = OAuthProvidersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.oauth_tokens = OAuthTokensOperations(
self._client, self._config, self._serialize, self._deserialize)
self.planting_data = PlantingDataOperations(
self._client, self._config, self._serialize, self._deserialize)
self.scenes = ScenesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.seasonal_fields = SeasonalFieldsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.seasons = SeasonsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.tillage_data = TillageDataOperations(
self._client, self._config, self._serialize, self._deserialize)
self.weather = WeatherOperations(
self._client, self._config, self._serialize, self._deserialize)
async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse:
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.AsyncHttpResponse
"""
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "FarmBeatsClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
|
synapse/tests/test_glob.py
|
ackroute/synapse
| 216 |
71493
|
<filename>synapse/tests/test_glob.py
import synapse.glob as s_glob
import synapse.tests.utils as s_t_utils
class GlobTest(s_t_utils.SynTest):
def test_glob_sync(self):
async def afoo():
return 42
retn = s_glob.sync(afoo())
self.eq(retn, 42)
|
build_tools/sim_object_param_struct_cc.py
|
hyu-iot/gem5
| 765 |
71527
|
# Copyright 2021 Google, Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import importlib
import os.path
import sys
import importer
from code_formatter import code_formatter
parser = argparse.ArgumentParser()
parser.add_argument('modpath', help='module the simobject belongs to')
parser.add_argument('param_cc', help='parameter cc file to generate')
parser.add_argument('use_python',
help='whether python is enabled in gem5 (True or False)')
args = parser.parse_args()
use_python = args.use_python.lower()
if use_python == 'true':
use_python = True
elif use_python == 'false':
use_python = False
else:
print(f'Unrecognized "use_python" value {use_python}', file=sys.stderr)
sys.exit(1)
basename = os.path.basename(args.param_cc)
no_ext = os.path.splitext(basename)[0]
sim_object_name = '_'.join(no_ext.split('_')[1:])
importer.install()
module = importlib.import_module(args.modpath)
sim_object = getattr(module, sim_object_name)
code = code_formatter()
sim_object.params_create_decl(code, use_python)
code.write(args.param_cc)
|
packs/alertlogic/actions/scan_list_scan_executions.py
|
userlocalhost2000/st2contrib
| 164 |
71565
|
<filename>packs/alertlogic/actions/scan_list_scan_executions.py
#!/usr/bin/env python
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from st2actions.runners.pythonrunner import Action
from lib.get_scan_list import GetScanList
from lib.get_scan_executions import GetScanExecutions
class ListScanExecutions(Action):
def run(self, scan_title, customer_id=None):
"""
The template class for
Returns: An blank Dict.
Raises:
ValueError: On lack of key in config.
"""
scans = GetScanList(self.config, customer_id)
return GetScanExecutions(self.config, scans[scan_title]['id'])
|
auditlog/migrations/0006_object_pk_index.py
|
washdrop/django-auditlog
| 252 |
71593
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("auditlog", "0005_logentry_additional_data_verbose_name"),
]
operations = [
migrations.AlterField(
model_name="logentry",
name="object_pk",
field=models.CharField(
verbose_name="object pk", max_length=255, db_index=True
),
),
]
|
setup.py
|
qq758689805/Pocsuite-dev
| 1,991 |
71607
|
<reponame>qq758689805/Pocsuite-dev<gh_stars>1000+
#!/usr/bin/env python
# coding: utf-8
from setuptools import setup, find_packages
from pocsuite import (
__version__ as version, __author__ as author,
__author_email__ as author_email, __license__ as license)
setup(
name='pocsuite',
version=version,
description="Pocsuite is an open-sourced remote vulnerability testing framework developed by the Knownsec Security Team.",
long_description="""\
Pocsuite is an open-sourced remote vulnerability testing and proof-of-concept development framework developed by the Knownsec Security Team. It comes with a powerful proof-of-concept engine, many niche features for the ultimate penetration testers and security researchers.""",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='PoC,Exp,Pocsuite',
author=author,
author_email=author_email,
url='http://pocsuite.org',
license=license,
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=[
'lxml',
],
entry_points={
'console_scripts': [
'pocsuite = pocsuite.pocsuite_cli:main',
'pcs-console = pocsuite.pocsuite_console:main',
'pcs-verify = pocsuite.pocsuite_verify:main',
'pcs-attack = pocsuite.pocsuite_attack:main',
],
},
)
|
venv/Lib/site-packages/debugpy/launcher/winapi.py
|
ajayiagbebaku/NFL-Model
| 695 |
71611
|
<gh_stars>100-1000
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
from __future__ import absolute_import, division, print_function, unicode_literals
import ctypes
from ctypes.wintypes import BOOL, DWORD, HANDLE, LARGE_INTEGER, LPCSTR, UINT
from debugpy.common import log
JOBOBJECTCLASS = ctypes.c_int
LPDWORD = ctypes.POINTER(DWORD)
LPVOID = ctypes.c_void_p
SIZE_T = ctypes.c_size_t
ULONGLONG = ctypes.c_ulonglong
class IO_COUNTERS(ctypes.Structure):
_fields_ = [
("ReadOperationCount", ULONGLONG),
("WriteOperationCount", ULONGLONG),
("OtherOperationCount", ULONGLONG),
("ReadTransferCount", ULONGLONG),
("WriteTransferCount", ULONGLONG),
("OtherTransferCount", ULONGLONG),
]
class JOBOBJECT_BASIC_LIMIT_INFORMATION(ctypes.Structure):
_fields_ = [
("PerProcessUserTimeLimit", LARGE_INTEGER),
("PerJobUserTimeLimit", LARGE_INTEGER),
("LimitFlags", DWORD),
("MinimumWorkingSetSize", SIZE_T),
("MaximumWorkingSetSize", SIZE_T),
("ActiveProcessLimit", DWORD),
("Affinity", SIZE_T),
("PriorityClass", DWORD),
("SchedulingClass", DWORD),
]
class JOBOBJECT_EXTENDED_LIMIT_INFORMATION(ctypes.Structure):
_fields_ = [
("BasicLimitInformation", JOBOBJECT_BASIC_LIMIT_INFORMATION),
("IoInfo", IO_COUNTERS),
("ProcessMemoryLimit", SIZE_T),
("JobMemoryLimit", SIZE_T),
("PeakProcessMemoryUsed", SIZE_T),
("PeakJobMemoryUsed", SIZE_T),
]
JobObjectExtendedLimitInformation = JOBOBJECTCLASS(9)
JOB_OBJECT_LIMIT_BREAKAWAY_OK = 0x00000800
JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE = 0x00002000
PROCESS_TERMINATE = 0x0001
PROCESS_SET_QUOTA = 0x0100
def _errcheck(is_error_result=(lambda result: not result)):
def impl(result, func, args):
if is_error_result(result):
log.debug("{0} returned {1}", func.__name__, result)
raise ctypes.WinError()
else:
return result
return impl
kernel32 = ctypes.windll.kernel32
kernel32.AssignProcessToJobObject.errcheck = _errcheck()
kernel32.AssignProcessToJobObject.restype = BOOL
kernel32.AssignProcessToJobObject.argtypes = (HANDLE, HANDLE)
kernel32.CreateJobObjectA.errcheck = _errcheck(lambda result: result == 0)
kernel32.CreateJobObjectA.restype = HANDLE
kernel32.CreateJobObjectA.argtypes = (LPVOID, LPCSTR)
kernel32.OpenProcess.errcheck = _errcheck(lambda result: result == 0)
kernel32.OpenProcess.restype = HANDLE
kernel32.OpenProcess.argtypes = (DWORD, BOOL, DWORD)
kernel32.QueryInformationJobObject.errcheck = _errcheck()
kernel32.QueryInformationJobObject.restype = BOOL
kernel32.QueryInformationJobObject.argtypes = (
HANDLE,
JOBOBJECTCLASS,
LPVOID,
DWORD,
LPDWORD,
)
kernel32.SetInformationJobObject.errcheck = _errcheck()
kernel32.SetInformationJobObject.restype = BOOL
kernel32.SetInformationJobObject.argtypes = (HANDLE, JOBOBJECTCLASS, LPVOID, DWORD)
kernel32.TerminateJobObject.errcheck = _errcheck()
kernel32.TerminateJobObject.restype = BOOL
kernel32.TerminateJobObject.argtypes = (HANDLE, UINT)
|
tests/pytests/unit/utils/test_thin.py
|
babs/salt
| 9,425 |
71631
|
<reponame>babs/salt<filename>tests/pytests/unit/utils/test_thin.py
import pytest
import salt.exceptions
import salt.utils.stringutils
import salt.utils.thin
from tests.support.mock import MagicMock, patch
def _mock_popen(return_value=None, side_effect=None, returncode=0):
proc = MagicMock()
proc.communicate = MagicMock(return_value=return_value, side_effect=side_effect)
proc.returncode = returncode
popen = MagicMock(return_value=proc)
return popen
@pytest.mark.parametrize("version", [[2, 7], [3, 0], [3, 7]])
def test_get_tops_python(version):
"""
Tests 'distro' is only included when targeting
python 3 in get_tops_python
"""
python3 = False
if tuple(version) >= (3, 0):
python3 = True
mods = ["jinja2"]
if python3:
mods.append("distro")
popen_ret = tuple(salt.utils.stringutils.to_bytes(x) for x in ("", ""))
mock_popen = _mock_popen(return_value=popen_ret)
patch_proc = patch("salt.utils.thin.subprocess.Popen", mock_popen)
patch_which = patch("salt.utils.path.which", return_value=True)
with patch_proc, patch_which:
salt.utils.thin.get_tops_python("python2", ext_py_ver=version)
cmds = [x[0][0] for x in mock_popen.call_args_list]
assert [x for x in cmds if "jinja2" in x[2]]
if python3:
assert [x for x in cmds if "distro" in x[2]]
else:
assert not [x for x in cmds if "distro" in x[2]]
@pytest.mark.parametrize("version", [[2, 7], [3, 0], [3, 7]])
def test_get_ext_tops(version):
"""
Tests 'distro' is only included when targeting
python 3 in get_ext_tops
"""
python3 = False
if tuple(version) >= (3, 0):
python3 = True
cfg = {
"namespace": {
"path": "/foo",
"py-version": version,
"dependencies": {
"jinja2": "/jinja/foo.py",
"yaml": "/yaml/",
"tornado": "/tornado/tornado.py",
"msgpack": "msgpack.py",
},
}
}
with patch("salt.utils.thin.os.path.isfile", MagicMock(return_value=True)):
if python3:
with pytest.raises(salt.exceptions.SaltSystemExit) as err:
salt.utils.thin.get_ext_tops(cfg)
else:
ret = salt.utils.thin.get_ext_tops(cfg)
if python3:
assert "distro" in err.value.code
else:
assert not [x for x in ret["namespace"]["dependencies"] if "distro" in x]
assert [x for x in ret["namespace"]["dependencies"] if "msgpack" in x]
|
WebMirror/management/rss_parser_funcs/feed_parse_extractMyFirstTimeTranslating.py
|
fake-name/ReadableWebProxy
| 193 |
71640
|
<gh_stars>100-1000
def extractMyFirstTimeTranslating(item):
"""
'My First Time Translating'
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
return False
|
edb/edgeql/tokenizer.py
|
aaronbrighton/edgedb
| 7,302 |
71646
|
<gh_stars>1000+
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import *
import re
import hashlib
from edb._edgeql_rust import tokenize as _tokenize, TokenizerError, Token
from edb._edgeql_rust import normalize as _normalize, Entry
from edb import errors
TRAILING_WS_IN_CONTINUATION = re.compile(r'\\ \s+\n')
class Source:
def __init__(self, text: str, tokens: List[Token]) -> None:
self._cache_key = hashlib.blake2b(text.encode('utf-8')).digest()
self._text = text
self._tokens = tokens
def text(self) -> str:
return self._text
def cache_key(self) -> bytes:
return self._cache_key
def variables(self) -> Dict[str, Any]:
return {}
def tokens(self) -> List[Token]:
return self._tokens
def first_extra(self) -> Optional[int]:
return None
def extra_count(self) -> int:
return 0
def extra_blob(self) -> bytes:
return b''
@classmethod
def from_string(cls, text: str) -> Source:
return cls(text=text, tokens=tokenize(text))
def __repr__(self):
return f'<edgeql.Source text={self._text!r}>'
class NormalizedSource(Source):
def __init__(self, normalized: Entry, text: str) -> None:
self._text = text
self._cache_key = normalized.key()
self._tokens = normalized.tokens()
self._variables = normalized.variables()
self._first_extra = normalized.first_extra()
self._extra_count = normalized.extra_count()
self._extra_blob = normalized.extra_blob()
def text(self) -> str:
return self._text
def cache_key(self) -> bytes:
return self._cache_key
def variables(self) -> Dict[str, Any]:
return self._variables
def tokens(self) -> List[Token]:
return self._tokens
def first_extra(self) -> Optional[int]:
return self._first_extra
def extra_count(self) -> int:
return self._extra_count
def extra_blob(self) -> bytes:
return self._extra_blob
@classmethod
def from_string(cls, text: str) -> NormalizedSource:
return cls(normalize(text), text)
def tokenize(eql: str) -> List[Token]:
try:
return _tokenize(eql)
except TokenizerError as e:
message, position = e.args
hint = _derive_hint(eql, message, position)
raise errors.EdgeQLSyntaxError(
message, position=position, hint=hint) from e
def normalize(eql: str) -> Entry:
try:
return _normalize(eql)
except TokenizerError as e:
message, position = e.args
hint = _derive_hint(eql, message, position)
raise errors.EdgeQLSyntaxError(
message, position=position, hint=hint) from e
def _derive_hint(
input: str,
message: str,
position: Tuple[int, int, int],
) -> Optional[str]:
_, _, off = position
if message == r"invalid string literal: invalid escape sequence '\ '":
if TRAILING_WS_IN_CONTINUATION.search(input[off:]):
return "consider removing trailing whitespace"
return None
|
mmaction/models/heads/timesformer_head.py
|
rlleshi/mmaction2
| 1,870 |
71657
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import trunc_normal_init
from ..builder import HEADS
from .base import BaseHead
@HEADS.register_module()
class TimeSformerHead(BaseHead):
"""Classification head for TimeSformer.
Args:
num_classes (int): Number of classes to be classified.
in_channels (int): Number of channels in input feature.
loss_cls (dict): Config for building loss.
Defaults to `dict(type='CrossEntropyLoss')`.
init_std (float): Std value for Initiation. Defaults to 0.02.
kwargs (dict, optional): Any keyword argument to be used to initialize
the head.
"""
def __init__(self,
num_classes,
in_channels,
loss_cls=dict(type='CrossEntropyLoss'),
init_std=0.02,
**kwargs):
super().__init__(num_classes, in_channels, loss_cls, **kwargs)
self.init_std = init_std
self.fc_cls = nn.Linear(self.in_channels, self.num_classes)
def init_weights(self):
"""Initiate the parameters from scratch."""
trunc_normal_init(self.fc_cls, std=self.init_std)
def forward(self, x):
# [N, in_channels]
cls_score = self.fc_cls(x)
# [N, num_classes]
return cls_score
|
certbot-apache/tests/autohsts_test.py
|
luisriverag/certbot
| 16,789 |
71663
|
# pylint: disable=too-many-lines
"""Test for certbot_apache._internal.configurator AutoHSTS functionality"""
import re
import unittest
try:
import mock
except ImportError: # pragma: no cover
from unittest import mock # type: ignore
from certbot import errors
from certbot_apache._internal import constants
import util
class AutoHSTSTest(util.ApacheTest):
"""Tests for AutoHSTS feature"""
# pylint: disable=protected-access
def setUp(self): # pylint: disable=arguments-differ
super().setUp()
self.config = util.get_apache_configurator(
self.config_path, self.vhost_path, self.config_dir, self.work_dir)
self.config.parser.modules["headers_module"] = None
self.config.parser.modules["mod_headers.c"] = None
self.config.parser.modules["ssl_module"] = None
self.config.parser.modules["mod_ssl.c"] = None
self.vh_truth = util.get_vh_truth(
self.temp_dir, "debian_apache_2_4/multiple_vhosts")
def get_autohsts_value(self, vh_path):
""" Get value from Strict-Transport-Security header """
header_path = self.config.parser.find_dir("Header", None, vh_path)
if header_path:
pat = '(?:[ "]|^)(strict-transport-security)(?:[ "]|$)'
for head in header_path:
if re.search(pat, self.config.parser.aug.get(head).lower()):
return self.config.parser.aug.get(
head.replace("arg[3]", "arg[4]"))
return None # pragma: no cover
@mock.patch("certbot_apache._internal.configurator.ApacheConfigurator.restart")
@mock.patch("certbot_apache._internal.configurator.ApacheConfigurator.enable_mod")
def test_autohsts_enable_headers_mod(self, mock_enable, _restart):
self.config.parser.modules.pop("headers_module", None)
self.config.parser.modules.pop("mod_header.c", None)
self.config.enable_autohsts(mock.MagicMock(), ["ocspvhost.com"])
self.assertTrue(mock_enable.called)
@mock.patch("certbot_apache._internal.configurator.ApacheConfigurator.restart")
def test_autohsts_deploy_already_exists(self, _restart):
self.config.enable_autohsts(mock.MagicMock(), ["ocspvhost.com"])
self.assertRaises(errors.PluginEnhancementAlreadyPresent,
self.config.enable_autohsts,
mock.MagicMock(), ["ocspvhost.com"])
@mock.patch("certbot_apache._internal.constants.AUTOHSTS_FREQ", 0)
@mock.patch("certbot_apache._internal.configurator.ApacheConfigurator.restart")
@mock.patch("certbot_apache._internal.configurator.ApacheConfigurator.prepare")
def test_autohsts_increase(self, mock_prepare, _mock_restart):
self.config._prepared = False
maxage = "\"max-age={0}\""
initial_val = maxage.format(constants.AUTOHSTS_STEPS[0])
inc_val = maxage.format(constants.AUTOHSTS_STEPS[1])
self.config.enable_autohsts(mock.MagicMock(), ["ocspvhost.com"])
# Verify initial value
self.assertEqual(self.get_autohsts_value(self.vh_truth[7].path),
initial_val)
# Increase
self.config.update_autohsts(mock.MagicMock())
# Verify increased value
self.assertEqual(self.get_autohsts_value(self.vh_truth[7].path),
inc_val)
self.assertTrue(mock_prepare.called)
@mock.patch("certbot_apache._internal.configurator.ApacheConfigurator.restart")
@mock.patch("certbot_apache._internal.configurator.ApacheConfigurator._autohsts_increase")
def test_autohsts_increase_noop(self, mock_increase, _restart):
maxage = "\"max-age={0}\""
initial_val = maxage.format(constants.AUTOHSTS_STEPS[0])
self.config.enable_autohsts(mock.MagicMock(), ["ocspvhost.com"])
# Verify initial value
self.assertEqual(self.get_autohsts_value(self.vh_truth[7].path),
initial_val)
self.config.update_autohsts(mock.MagicMock())
# Freq not patched, so value shouldn't increase
self.assertFalse(mock_increase.called)
@mock.patch("certbot_apache._internal.configurator.ApacheConfigurator.restart")
@mock.patch("certbot_apache._internal.constants.AUTOHSTS_FREQ", 0)
def test_autohsts_increase_no_header(self, _restart):
self.config.enable_autohsts(mock.MagicMock(), ["ocspvhost.com"])
# Remove the header
dir_locs = self.config.parser.find_dir("Header", None,
self.vh_truth[7].path)
dir_loc = "/".join(dir_locs[0].split("/")[:-1])
self.config.parser.aug.remove(dir_loc)
self.assertRaises(errors.PluginError,
self.config.update_autohsts,
mock.MagicMock())
@mock.patch("certbot_apache._internal.constants.AUTOHSTS_FREQ", 0)
@mock.patch("certbot_apache._internal.configurator.ApacheConfigurator.restart")
def test_autohsts_increase_and_make_permanent(self, _mock_restart):
maxage = "\"max-age={0}\""
max_val = maxage.format(constants.AUTOHSTS_PERMANENT)
mock_lineage = mock.MagicMock()
mock_lineage.key_path = "/etc/apache2/ssl/key-certbot_15.pem"
self.config.enable_autohsts(mock.MagicMock(), ["ocspvhost.com"])
for i in range(len(constants.AUTOHSTS_STEPS)-1):
# Ensure that value is not made permanent prematurely
self.config.deploy_autohsts(mock_lineage)
self.assertNotEqual(self.get_autohsts_value(self.vh_truth[7].path),
max_val)
self.config.update_autohsts(mock.MagicMock())
# Value should match pre-permanent increment step
cur_val = maxage.format(constants.AUTOHSTS_STEPS[i+1])
self.assertEqual(self.get_autohsts_value(self.vh_truth[7].path),
cur_val)
# Ensure that the value is raised to max
self.assertEqual(self.get_autohsts_value(self.vh_truth[7].path),
maxage.format(constants.AUTOHSTS_STEPS[-1]))
# Make permanent
self.config.deploy_autohsts(mock_lineage)
self.assertEqual(self.get_autohsts_value(self.vh_truth[7].path),
max_val)
def test_autohsts_update_noop(self):
with mock.patch("time.time") as mock_time:
# Time mock is used to make sure that the execution does not
# continue when no autohsts entries exist in pluginstorage
self.config.update_autohsts(mock.MagicMock())
self.assertFalse(mock_time.called)
def test_autohsts_make_permanent_noop(self):
self.config.storage.put = mock.MagicMock()
self.config.deploy_autohsts(mock.MagicMock())
# Make sure that the execution does not continue when no entries in store
self.assertFalse(self.config.storage.put.called)
@mock.patch("certbot_apache._internal.display_ops.select_vhost")
def test_autohsts_no_ssl_vhost(self, mock_select):
mock_select.return_value = self.vh_truth[0]
with mock.patch("certbot_apache._internal.configurator.logger.error") as mock_log:
self.assertRaises(errors.PluginError,
self.config.enable_autohsts,
mock.MagicMock(), "invalid.example.com")
self.assertTrue(
"Certbot was not able to find SSL" in mock_log.call_args[0][0])
@mock.patch("certbot_apache._internal.configurator.ApacheConfigurator.restart")
@mock.patch("certbot_apache._internal.configurator.ApacheConfigurator.add_vhost_id")
def test_autohsts_dont_enhance_twice(self, mock_id, _restart):
mock_id.return_value = "1234567"
self.config.enable_autohsts(mock.MagicMock(),
["ocspvhost.com", "ocspvhost.com"])
self.assertEqual(mock_id.call_count, 1)
def test_autohsts_remove_orphaned(self):
# pylint: disable=protected-access
self.config._autohsts_fetch_state()
self.config._autohsts["orphan_id"] = {"laststep": 0, "timestamp": 0}
self.config._autohsts_save_state()
self.config.update_autohsts(mock.MagicMock())
self.assertFalse("orphan_id" in self.config._autohsts)
# Make sure it's removed from the pluginstorage file as well
self.config._autohsts = None
self.config._autohsts_fetch_state()
self.assertFalse(self.config._autohsts)
def test_autohsts_make_permanent_vhost_not_found(self):
# pylint: disable=protected-access
self.config._autohsts_fetch_state()
self.config._autohsts["orphan_id"] = {"laststep": 999, "timestamp": 0}
self.config._autohsts_save_state()
with mock.patch("certbot_apache._internal.configurator.logger.error") as mock_log:
self.config.deploy_autohsts(mock.MagicMock())
self.assertTrue(mock_log.called)
self.assertTrue(
"VirtualHost with id orphan_id was not" in mock_log.call_args[0][0])
if __name__ == "__main__":
unittest.main() # pragma: no cover
|
flask_dance/consumer/requests.py
|
kerryhatcher/flask-dance
| 836 |
71667
|
<filename>flask_dance/consumer/requests.py
from functools import wraps
from flask import redirect, url_for
from urlobject import URLObject
from requests_oauthlib import OAuth1Session as BaseOAuth1Session
from requests_oauthlib import OAuth2Session as BaseOAuth2Session
from oauthlib.common import to_unicode
from werkzeug.utils import cached_property
from flask_dance.utils import invalidate_cached_property
class OAuth1Session(BaseOAuth1Session):
"""
A :class:`requests.Session` subclass that can do some special things:
* lazy-loads OAuth1 tokens from the storage via the blueprint
* handles OAuth1 authentication
(from :class:`requests_oauthlib.OAuth1Session` superclass)
* has a ``base_url`` property used for relative URL resolution
Note that this is a session between the consumer (your website) and the
provider (e.g. Twitter), and *not* a session between a user of your website
and your website.
"""
def __init__(self, blueprint=None, base_url=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.blueprint = blueprint
self.base_url = URLObject(base_url)
@cached_property
def token(self):
"""
Get and set the values in the OAuth token, structured as a dictionary.
"""
return self.blueprint.token
def load_token(self):
t = self.token
if t and "oauth_token" in t and "oauth_token_secret" in t:
# This really, really violates the Law of Demeter, but
# I don't see a better way to set these parameters. :(
self.auth.client.resource_owner_key = to_unicode(t["oauth_token"])
self.auth.client.resource_owner_secret = to_unicode(t["oauth_token_secret"])
return True
return False
@property
def authorized(self):
"""This is the property used when you have a statement in your code
that reads "if <provider>.authorized:", e.g. "if twitter.authorized:".
The way it works is kind of complicated: this function just tries
to load the token, and then the 'super()' statement basically just
tests if the token exists (see BaseOAuth1Session.authorized).
To load the token, it calls the load_token() function within this class,
which in turn checks the 'token' property of this class (another
function), which in turn checks the 'token' property of the blueprint
(see base.py), which calls 'storage.get()' to actually try to load
the token from the cache/db (see the 'get()' function in
storage/sqla.py).
"""
self.load_token()
return super().authorized
@property
def authorization_required(self):
"""
.. versionadded:: 1.3.0
This is a decorator for a view function. If the current user does not
have an OAuth token, then they will be redirected to the
:meth:`~flask_dance.consumer.oauth1.OAuth1ConsumerBlueprint.login`
view to obtain one.
"""
def wrapper(func):
@wraps(func)
def check_authorization(*args, **kwargs):
if not self.authorized:
endpoint = f"{self.blueprint.name}.login"
return redirect(url_for(endpoint))
return func(*args, **kwargs)
return check_authorization
return wrapper
def prepare_request(self, request):
if self.base_url:
request.url = self.base_url.relative(request.url)
return super().prepare_request(request)
def request(
self, method, url, data=None, headers=None, should_load_token=True, **kwargs
):
if should_load_token:
self.load_token()
return super().request(
method=method, url=url, data=data, headers=headers, **kwargs
)
class OAuth2Session(BaseOAuth2Session):
"""
A :class:`requests.Session` subclass that can do some special things:
* lazy-loads OAuth2 tokens from the storage via the blueprint
* handles OAuth2 authentication
(from :class:`requests_oauthlib.OAuth2Session` superclass)
* has a ``base_url`` property used for relative URL resolution
Note that this is a session between the consumer (your website) and the
provider (e.g. Twitter), and *not* a session between a user of your website
and your website.
"""
def __init__(self, blueprint=None, base_url=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.blueprint = blueprint
self.base_url = URLObject(base_url)
invalidate_cached_property(self, "token")
@cached_property
def token(self):
"""
Get and set the values in the OAuth token, structured as a dictionary.
"""
return self.blueprint.token
def load_token(self):
self._client.token = self.token
if self.token:
self._client.populate_token_attributes(self.token)
return True
return False
@property
def access_token(self):
"""
Returns the ``access_token`` from the OAuth token.
"""
return self.token and self.token.get("access_token")
@property
def authorized(self):
"""This is the property used when you have a statement in your code
that reads "if <provider>.authorized:", e.g. "if twitter.authorized:".
The way it works is kind of complicated: this function just tries
to load the token, and then the 'super()' statement basically just
tests if the token exists (see BaseOAuth1Session.authorized).
To load the token, it calls the load_token() function within this class,
which in turn checks the 'token' property of this class (another
function), which in turn checks the 'token' property of the blueprint
(see base.py), which calls 'storage.get()' to actually try to load
the token from the cache/db (see the 'get()' function in
storage/sqla.py).
"""
self.load_token()
return super().authorized
@property
def authorization_required(self):
"""
.. versionadded:: 1.3.0
This is a decorator for a view function. If the current user does not
have an OAuth token, then they will be redirected to the
:meth:`~flask_dance.consumer.oauth2.OAuth2ConsumerBlueprint.login`
view to obtain one.
"""
def wrapper(func):
@wraps(func)
def check_authorization(*args, **kwargs):
if not self.authorized:
endpoint = f"{self.blueprint.name}.login"
return redirect(url_for(endpoint))
return func(*args, **kwargs)
return check_authorization
return wrapper
def request(self, method, url, data=None, headers=None, **kwargs):
if self.base_url:
url = self.base_url.relative(url)
self.load_token()
return super().request(
method=method,
url=url,
data=data,
headers=headers,
client_id=self.blueprint.client_id,
client_secret=self.blueprint.client_secret,
**kwargs,
)
|
hardware/chip/rtl872xd/build_bin.py
|
wstong999/AliOS-Things
| 4,538 |
71668
|
<filename>hardware/chip/rtl872xd/build_bin.py
#! /usr/bin/env python
import os
import platform
import argparse
import sys
import shutil
print(sys.argv)
parser = argparse.ArgumentParser()
parser.add_argument('--target', dest='target', action='store')
args = parser.parse_args()
mypath = os.path.dirname(sys.argv[0])
os.chdir(mypath)
print(os.getcwd())
target = args.target
cur_os = platform.system()
arch = platform.architecture()
path = ''
magic = '0xefefefef'
if cur_os == 'Linux':
if '64bit' in arch:
path = 'linux64'
else:
path = 'linux32'
elif cur_os == 'Darwin':
path = 'osx'
elif cur_os == 'Windows':
path = 'win32'
if path:
path = os.path.join("tools", path, "xz")
hw_module = 0
cmd_str = "python haas1000_genbin.py %d \"%s\"" % (hw_module, target)
os.system(cmd_str)
bin_path = os.path.join("..", "write_flash_gui", "ota_bin")
shutil.copy(os.path.join(bin_path, "ota_rtos.bin"), os.path.join(bin_path, "ota_rtos_ota.bin"))
cmd_str = "\"%s\" -f --lzma2=dict=32KiB --check=crc32 -k %s" % (os.path.abspath(path), os.path.join(bin_path, "ota_rtos_ota.bin"))
os.system(cmd_str)
cmd_str = "python ota_gen_md5_bin.py \"%s\" -m %s" % (os.path.join(bin_path, "ota_rtos_ota.bin"), magic)
os.system(cmd_str)
cmd_str = "python ota_gen_md5_bin.py \"%s\" -m %s" % (os.path.join(bin_path, "ota_rtos_ota.bin.xz"), magic)
os.system(cmd_str)
print("run external script success")
|
src/test/pythonFiles/testFiles/specificTest/tests/test_unittest_one.py
|
ChaseKnowlden/vscode-jupyter
| 615 |
71677
|
<reponame>ChaseKnowlden/vscode-jupyter<filename>src/test/pythonFiles/testFiles/specificTest/tests/test_unittest_one.py<gh_stars>100-1000
import unittest
class Test_test_one_1(unittest.TestCase):
def test_1_1_1(self):
self.assertEqual(1,1,'Not equal')
def test_1_1_2(self):
self.assertEqual(1,2,'Not equal')
@unittest.skip("demonstrating skipping")
def test_1_1_3(self):
self.assertEqual(1,2,'Not equal')
class Test_test_one_2(unittest.TestCase):
def test_1_2_1(self):
self.assertEqual(1,1,'Not equal')
if __name__ == '__main__':
unittest.main()
|
alipay/aop/api/response/AlipayEcoCityserviceUserAppinfoQueryResponse.py
|
antopen/alipay-sdk-python-all
| 213 |
71714
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayEcoCityserviceUserAppinfoQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayEcoCityserviceUserAppinfoQueryResponse, self).__init__()
self._biz_type = None
self._result_code = None
self._result_context = None
self._result_msg = None
@property
def biz_type(self):
return self._biz_type
@biz_type.setter
def biz_type(self, value):
self._biz_type = value
@property
def result_code(self):
return self._result_code
@result_code.setter
def result_code(self, value):
self._result_code = value
@property
def result_context(self):
return self._result_context
@result_context.setter
def result_context(self, value):
self._result_context = value
@property
def result_msg(self):
return self._result_msg
@result_msg.setter
def result_msg(self, value):
self._result_msg = value
def parse_response_content(self, response_content):
response = super(AlipayEcoCityserviceUserAppinfoQueryResponse, self).parse_response_content(response_content)
if 'biz_type' in response:
self.biz_type = response['biz_type']
if 'result_code' in response:
self.result_code = response['result_code']
if 'result_context' in response:
self.result_context = response['result_context']
if 'result_msg' in response:
self.result_msg = response['result_msg']
|
official/vision/image_classification/learning_rate.py
|
akshit-protonn/models
| 82,518 |
71720
|
<filename>official/vision/image_classification/learning_rate.py
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Learning rate utilities for vision tasks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Mapping, Optional
import numpy as np
import tensorflow as tf
BASE_LEARNING_RATE = 0.1
class WarmupDecaySchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
"""A wrapper for LearningRateSchedule that includes warmup steps."""
def __init__(self,
lr_schedule: tf.keras.optimizers.schedules.LearningRateSchedule,
warmup_steps: int,
warmup_lr: Optional[float] = None):
"""Add warmup decay to a learning rate schedule.
Args:
lr_schedule: base learning rate scheduler
warmup_steps: number of warmup steps
warmup_lr: an optional field for the final warmup learning rate. This
should be provided if the base `lr_schedule` does not contain this
field.
"""
super(WarmupDecaySchedule, self).__init__()
self._lr_schedule = lr_schedule
self._warmup_steps = warmup_steps
self._warmup_lr = warmup_lr
def __call__(self, step: int):
lr = self._lr_schedule(step)
if self._warmup_steps:
if self._warmup_lr is not None:
initial_learning_rate = tf.convert_to_tensor(
self._warmup_lr, name="initial_learning_rate")
else:
initial_learning_rate = tf.convert_to_tensor(
self._lr_schedule.initial_learning_rate,
name="initial_learning_rate")
dtype = initial_learning_rate.dtype
global_step_recomp = tf.cast(step, dtype)
warmup_steps = tf.cast(self._warmup_steps, dtype)
warmup_lr = initial_learning_rate * global_step_recomp / warmup_steps
lr = tf.cond(global_step_recomp < warmup_steps, lambda: warmup_lr,
lambda: lr)
return lr
def get_config(self) -> Mapping[str, Any]:
config = self._lr_schedule.get_config()
config.update({
"warmup_steps": self._warmup_steps,
"warmup_lr": self._warmup_lr,
})
return config
class CosineDecayWithWarmup(tf.keras.optimizers.schedules.LearningRateSchedule):
"""Class to generate learning rate tensor."""
def __init__(self, batch_size: int, total_steps: int, warmup_steps: int):
"""Creates the consine learning rate tensor with linear warmup.
Args:
batch_size: The training batch size used in the experiment.
total_steps: Total training steps.
warmup_steps: Steps for the warm up period.
"""
super(CosineDecayWithWarmup, self).__init__()
base_lr_batch_size = 256
self._total_steps = total_steps
self._init_learning_rate = BASE_LEARNING_RATE * batch_size / base_lr_batch_size
self._warmup_steps = warmup_steps
def __call__(self, global_step: int):
global_step = tf.cast(global_step, dtype=tf.float32)
warmup_steps = self._warmup_steps
init_lr = self._init_learning_rate
total_steps = self._total_steps
linear_warmup = global_step / warmup_steps * init_lr
cosine_learning_rate = init_lr * (tf.cos(np.pi *
(global_step - warmup_steps) /
(total_steps - warmup_steps)) +
1.0) / 2.0
learning_rate = tf.where(global_step < warmup_steps, linear_warmup,
cosine_learning_rate)
return learning_rate
def get_config(self):
return {
"total_steps": self._total_steps,
"warmup_learning_rate": self._warmup_learning_rate,
"warmup_steps": self._warmup_steps,
"init_learning_rate": self._init_learning_rate,
}
|
bilm/__init__.py
|
nelson-liu/bilm-tf
| 1,676 |
71729
|
from .data import Batcher, TokenBatcher
from .model import BidirectionalLanguageModel, dump_token_embeddings, \
dump_bilm_embeddings
from .elmo import weight_layers
|
zerver/migrations/0362_send_typing_notifications_user_setting.py
|
dumpmemory/zulip
| 17,004 |
71730
|
# Generated by Django 3.2.7 on 2021-10-03 07:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("zerver", "0361_realm_create_web_public_stream_policy"),
]
operations = [
migrations.AddField(
model_name="realmuserdefault",
name="send_private_typing_notifications",
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name="realmuserdefault",
name="send_stream_typing_notifications",
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name="userprofile",
name="send_private_typing_notifications",
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name="userprofile",
name="send_stream_typing_notifications",
field=models.BooleanField(default=True),
),
]
|
hubspot/cms/performance/api/__init__.py
|
Ronfer/hubspot-api-python
| 117 |
71748
|
from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from hubspot.cms.performance.api.public_performance_api import PublicPerformanceApi
|
ib/ext/cfg/CommissionReport.py
|
LewisW/IbPy
| 1,260 |
71760
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" ib.ext.cfg.CommissionReport -> config module for CommissionReport.java.
"""
|
tensorflow/contrib/util/loader.py
|
AlexChrisF/udacity
| 522 |
71784
|
<filename>tensorflow/contrib/util/loader.py<gh_stars>100-1000
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for loading op libraries.
@@load_op_library
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
from tensorflow.python.framework import load_library
from tensorflow.python.platform import resource_loader
def load_op_library(path):
"""Loads a contrib op library from the given path.
NOTE(mrry): On Windows, we currently assume that some contrib op
libraries are statically linked into the main TensorFlow Python
extension DLL - use dynamically linked ops if the .so is present.
Args:
path: An absolute path to a shared object file.
Returns:
A Python module containing the Python wrappers for Ops defined in the
plugin.
"""
if os.name == 'nt':
# To avoid makeing every user_ops aware of windows, re-write
# the file extension from .so to .dll.
path = re.sub(r'\.so$', '.dll', path)
# Currently we have only some user_ops as dlls on windows - don't try
# to load them if the dll is not found.
# TODO(mrry): Once we have all of them this check should be removed.
if not os.path.exists(path):
return None
path = resource_loader.get_path_to_datafile(path)
ret = load_library.load_op_library(path)
assert ret, 'Could not load %s' % path
return ret
|
dbaas/logical/tests/test_database.py
|
didindinn/database-as-a-service
| 303 |
71785
|
<filename>dbaas/logical/tests/test_database.py
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import mock
import logging
from django.test import TestCase
from django.db import IntegrityError
from drivers import base
from maintenance.tests import factory as maintenance_factory
from physical.tests import factory as physical_factory
from physical.models import DatabaseInfra
from logical.tests import factory
from notification.tests.factory import TaskHistoryFactory
from notification.models import TaskHistory
from logical.models import Database, DatabaseHistory
LOG = logging.getLogger(__name__)
ERROR_CLONE_WITHOUT_PERSISTENCE = \
"Database does not have persistence cannot be cloned"
ERROR_CLONE_IN_QUARANTINE = "Database in quarantine cannot be cloned"
ERROR_CLONE_NOT_ALIVE = "Database is not alive and cannot be cloned"
ERROR_DELETE_PROTECTED = "Database {} is protected and cannot be deleted"
ERROR_DELETE_DEAD = "Database {} is not alive and cannot be deleted"
ERROR_UPGRADE_MONGO24 = "MongoDB 2.4 cannot be upgraded by this task."
ERROR_UPGRADE_IN_QUARANTINE = "Database in quarantine and cannot be upgraded."
ERROR_UPGRADE_IS_DEAD = "Database is dead and cannot be upgraded."
ERROR_UPGRADE_NO_EQUIVALENT_PLAN = "Source plan do not has equivalent plan to upgrade."
UPGRADE_URL = "/admin/logical/database/{}/upgrade/"
UPGRADE_RETRY_URL = "/admin/logical/database/{}/upgrade_retry/"
class FakeDriver(base.BaseDriver):
def get_connection(self):
return 'connection-url'
class DatabaseTestCase(TestCase):
def setUp(self):
self.instance = physical_factory.InstanceFactory()
self.databaseinfra = self.instance.databaseinfra
self.engine = FakeDriver(databaseinfra=self.databaseinfra)
self.environment = physical_factory.EnvironmentFactory()
self.plan_upgrade = physical_factory.PlanFactory()
def tearDown(self):
self.engine = None
def test_create_database(self):
database = Database(name="blabla", databaseinfra=self.databaseinfra,
environment=self.environment)
database.save()
self.assertTrue(database.pk)
def test_create_duplicate_database_error(self):
database = Database(name="bleble", databaseinfra=self.databaseinfra,
environment=self.environment)
database.save()
self.assertTrue(database.pk)
self.assertRaises(IntegrityError, Database(name="bleble",
databaseinfra=self.databaseinfra,
environment=self.environment).save)
def test_slugify_database_name_with_spaces(self):
database = factory.DatabaseFactory.build(name="w h a t",
databaseinfra=self.databaseinfra,
environment=self.environment)
database.full_clean()
database.save()
self.assertTrue(database.id)
self.assertEqual(database.name, 'w_h_a_t')
def test_slugify_database_name_with_dots(self):
database = factory.DatabaseFactory.build(name="w.h.e.r.e",
databaseinfra=self.databaseinfra,
environment=self.environment)
database.full_clean()
database.save()
self.assertTrue(database.id)
self.assertEqual(database.name, 'w_h_e_r_e')
def test_cannot_edit_database_name(self):
database = factory.DatabaseFactory(name="w h a t",
databaseinfra=self.databaseinfra,
environment=self.environment)
self.assertTrue(database.id)
database.name = "super3"
self.assertRaises(AttributeError, database.save)
@mock.patch.object(DatabaseInfra, 'get_info')
def test_new_database_bypass_datainfra_info_cache(self, get_info):
def side_effect_get_info(force_refresh=False):
m = mock.Mock()
if not force_refresh:
m.get_database_status.return_value = None
return m
m.get_database_status.return_value = object()
return m
get_info.side_effect = side_effect_get_info
database = factory.DatabaseFactory(name="db1cache",
databaseinfra=self.databaseinfra,
environment=self.environment)
self.assertIsNotNone(database.database_status)
self.assertEqual(
[mock.call(), mock.call(force_refresh=True)], get_info.call_args_list)
def test_can_update_volume_used_disk_size(self):
database = factory.DatabaseFactory()
database.databaseinfra = self.databaseinfra
volume = physical_factory.VolumeFactory()
volume.host = self.instance.hostname
volume.save()
old_used_size = volume.used_size_kb
volume = database.update_host_disk_used_size(
host_address=self.instance.address, used_size_kb=300
)
self.assertNotEqual(volume.used_size_kb, old_used_size)
self.assertEqual(volume.used_size_kb, 300)
old_used_size = volume.used_size_kb
volume = database.update_host_disk_used_size(
host_address=self.instance.address, used_size_kb=500
)
self.assertNotEqual(volume.used_size_kb, old_used_size)
self.assertEqual(volume.used_size_kb, 500)
def test_cannot_update_volume_used_disk_size_host_not_volume(self):
database = factory.DatabaseFactory()
database.databaseinfra = self.databaseinfra
volume = database.update_host_disk_used_size(
host_address=self.instance.address, used_size_kb=300
)
self.assertIsNone(volume)
def test_can_clone(self):
database = factory.DatabaseFactory()
database.status = database.ALIVE
can_be_cloned, error = database.can_be_cloned()
self.assertTrue(can_be_cloned)
self.assertIsNone(error)
def test_cannot_clone_no_persistence(self):
database = factory.DatabaseFactory()
database.status = database.ALIVE
database.plan.has_persistence = False
can_be_cloned, error = database.can_be_cloned()
self.assertFalse(can_be_cloned)
self.assertEqual(error, ERROR_CLONE_WITHOUT_PERSISTENCE)
def test_cannot_clone_in_quarantine(self):
database = factory.DatabaseFactory()
database.status = database.ALIVE
database.is_in_quarantine = True
can_be_cloned, error = database.can_be_cloned()
self.assertFalse(can_be_cloned)
self.assertEqual(error, ERROR_CLONE_IN_QUARANTINE)
def test_cannot_clone_dead(self):
database = factory.DatabaseFactory()
database.status = database.DEAD
database.database_status = None
can_be_cloned, error = database.can_be_cloned()
self.assertFalse(can_be_cloned)
self.assertEqual(error, ERROR_CLONE_NOT_ALIVE)
def test_can_delete(self):
database = factory.DatabaseFactory()
database.status = database.ALIVE
can_be_deleted, error = database.can_be_deleted()
self.assertTrue(can_be_deleted)
self.assertIsNone(error)
@mock.patch('logical.models.factory_for')
@mock.patch('logical.models.Database.automatic_create_first_credential')
def test_insert_on_database_history_when_delete(self, cred_mock, factory_mock):
database = factory.DatabaseFactory(
name='test_fake_name',
description='__test__ fake desc'
)
database_id = database.id
database.is_in_quarantine = True
database.is_protected = False
database.status = database.ALIVE
database.environment.name = '__test__ fake env'
database.project.name = '__test__ proj name'
database.team.name = '__test__ team name'
database.plan.name = '__test__ plan name'
database.databaseinfra.name = '__test__ infra name'
database.databaseinfra.engine.version = 'v1.2.3'
database.databaseinfra.plan.has_persistence = False
database.databaseinfra.engine.engine_type.name = '__test__ fake engine type'
database.databaseinfra.disk_offering.size_kb = 1234
database.delete()
deleted_databases = DatabaseHistory.objects.filter(database_id=database_id)
self.assertEqual(len(deleted_databases), 1)
deleted_database = deleted_databases[0]
self.assertEqual(deleted_database.database_id, database_id)
self.assertEqual(deleted_database.name, 'test_fake_name')
self.assertEqual(deleted_database.description, '__test__ fake desc')
self.assertEqual(deleted_database.engine, '__test__ fake engine type v1.2.3')
self.assertEqual(deleted_database.project, '__test__ proj name')
self.assertEqual(deleted_database.team, '__test__ team name')
self.assertEqual(deleted_database.databaseinfra_name, '__test__ infra name')
self.assertEqual(deleted_database.plan, '__test__ plan name')
self.assertEqual(deleted_database.disk_size_kb, 1234)
self.assertFalse(deleted_database.has_persistence)
self.assertEqual(deleted_database.environment, '__test__ fake env')
def test_cannot_delete_protected(self):
database = factory.DatabaseFactory()
database.status = database.ALIVE
database.is_protected = True
can_be_deleted, error = database.can_be_deleted()
self.assertFalse(can_be_deleted)
self.assertEqual(error, ERROR_DELETE_PROTECTED.format(database.name))
def test_can_delete_protected_in_quarantine(self):
database = factory.DatabaseFactory()
database.status = database.ALIVE
database.is_protected = True
database.is_in_quarantine = True
can_be_deleted, error = database.can_be_deleted()
self.assertTrue(can_be_deleted)
self.assertIsNone(error)
def test_can_delete_in_quarantine(self):
database = factory.DatabaseFactory()
database.status = database.ALIVE
database.is_in_quarantine = True
can_be_deleted, error = database.can_be_deleted()
self.assertTrue(can_be_deleted)
self.assertIsNone(error)
def test_can_upgrade(self):
database = factory.DatabaseFactory()
database.status = database.ALIVE
database.databaseinfra.plan.engine_equivalent_plan = self.plan_upgrade
can_do_upgrade, error = database.can_do_upgrade()
self.assertTrue(can_do_upgrade)
self.assertIsNone(error)
def test_cannot_upgrade_mongo24(self):
mongo = physical_factory.EngineTypeFactory()
mongo.name = 'mongodb'
mongo24 = physical_factory.EngineFactory()
mongo24.engine_type = mongo
mongo24.version = '2.4.xxx'
database = factory.DatabaseFactory()
database.status = database.ALIVE
infra = database.databaseinfra
infra.engine = mongo24
database.databaseinfra = infra
can_do_upgrade, error = database.can_do_upgrade()
self.assertFalse(can_do_upgrade)
self.assertEqual(error, ERROR_UPGRADE_MONGO24)
def test_cannot_upgrade_in_quarantine(self):
database = factory.DatabaseFactory()
database.status = database.ALIVE
database.is_in_quarantine = True
can_do_upgrade, error = database.can_do_upgrade()
self.assertFalse(can_do_upgrade)
self.assertEqual(error, ERROR_UPGRADE_IN_QUARANTINE)
def test_cannot_upgrade_dead(self):
database = factory.DatabaseFactory()
database.databaseinfra.plan.engine_equivalent_plan = self.plan_upgrade
database.status = database.DEAD
can_do_upgrade, error = database.can_do_upgrade()
self.assertFalse(can_do_upgrade)
self.assertEqual(error, ERROR_UPGRADE_IS_DEAD)
def test_cannot_upgrade_no_equivalent_plan(self):
database = factory.DatabaseFactory()
database.status = database.ALIVE
can_do_upgrade, error = database.can_do_upgrade()
self.assertFalse(can_do_upgrade)
self.assertEqual(error, ERROR_UPGRADE_NO_EQUIVALENT_PLAN)
def test_get_upgrade_url(self):
database = factory.DatabaseFactory()
expected_url = UPGRADE_URL.format(database.id)
returned_url = database.get_upgrade_url()
self.assertEqual(returned_url, expected_url)
def test_get_upgrade_retry_url(self):
database = factory.DatabaseFactory()
expected_url = UPGRADE_RETRY_URL.format(database.id)
returned_url = database.get_upgrade_retry_url()
self.assertEqual(returned_url, expected_url)
def test_last_successful_upgrade(self):
database = factory.DatabaseFactory()
self.assertIsNone(database.last_successful_upgrade)
upgrade = maintenance_factory.DatabaseUpgradeFactory()
upgrade.database = database
upgrade.save()
self.assertIsNone(database.last_successful_upgrade)
upgrade.set_success()
self.assertEqual(database.last_successful_upgrade, upgrade)
def test_last_successful_upgrade_with_error(self):
database = factory.DatabaseFactory()
upgrade = maintenance_factory.DatabaseUpgradeFactory()
upgrade.database = database
upgrade.set_error()
self.assertIsNone(database.last_successful_upgrade)
def test_current_task_lock(self):
database = factory.DatabaseFactory()
task1 = TaskHistoryFactory()
task2 = TaskHistoryFactory()
database.pin_task(task1)
self.assertFalse(database.pin_task(task2))
database.unpin_task()
self.assertTrue(database.pin_task(task2))
def test_lock_retry(self):
database = factory.DatabaseFactory()
task1 = TaskHistoryFactory()
task2 = TaskHistoryFactory()
task3 = TaskHistoryFactory()
task1.task_status = TaskHistory.STATUS_ERROR
task1.save()
task2.task_name = task1.task_name
task2.save()
database.pin_task(task1)
self.assertFalse(database.update_task(task3))
self.assertTrue(database.update_task(task2))
self.assertFalse(database.update_task(task2))
database.unpin_task()
self.assertTrue(database.pin_task(task3))
|
tests/chainer_tests/functions_tests/loss_tests/test_hinge.py
|
zaltoprofen/chainer
| 3,705 |
71787
|
<reponame>zaltoprofen/chainer<gh_stars>1000+
import unittest
import numpy
import six
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product_dict(
[{'dtype': numpy.float16,
'forward_options': {'rtol': 3e-3, 'atol': 3e-3},
'backward_options': {'rtol': 1e-1, 'atol': 1e-1}},
{'dtype': numpy.float32,
'forward_options': {},
'backward_options': {'rtol': 1e-1, 'atol': 1e-1}},
{'dtype': numpy.float64,
'forward_options': {},
'backward_options': {'rtol': 1e-1, 'atol': 1e-1}},
],
[{'reduce': 'no'},
{'reduce': 'mean'},
],
[{'norm': 'L1'},
{'norm': 'L2'},
],
[{'label_dtype': numpy.int8},
{'label_dtype': numpy.int16},
{'label_dtype': numpy.int32},
{'label_dtype': numpy.int64},
],
))
class TestHinge(unittest.TestCase):
def setUp(self):
self._config_user = chainer.using_config('dtype', self.dtype)
self._config_user.__enter__()
shape = (10, 5)
self.x = numpy.random.uniform(-1, 1, shape).astype(self.dtype)
# Avoid values around -1.0 for stability
self.x[numpy.logical_and(-1.01 < self.x, self.x < -0.99)] = 0.5
self.t = numpy.random.randint(
0, shape[1], shape[:1]).astype(self.label_dtype)
if self.reduce == 'no':
self.gy = numpy.random.uniform(
-1, 1, self.x.shape).astype(self.dtype)
def tearDown(self):
self._config_user.__exit__(None, None, None)
def check_forward(self, x_data, t_data):
x_val = chainer.Variable(x_data)
t_val = chainer.Variable(t_data, requires_grad=False)
loss = functions.hinge(x_val, t_val, self.norm, self.reduce)
if self.reduce == 'mean':
self.assertEqual(loss.data.shape, ())
else:
self.assertEqual(loss.data.shape, self.x.shape)
self.assertEqual(loss.data.dtype, self.dtype)
loss_value = cuda.to_cpu(loss.data)
# Compute expected value
for i in six.moves.range(self.x.shape[0]):
self.x[i, self.t[i]] *= -1
for i in six.moves.range(self.x.shape[0]):
for j in six.moves.range(self.x.shape[1]):
self.x[i, j] = max(0, 1.0 + self.x[i, j])
if self.norm == 'L1':
loss_expect = self.x
elif self.norm == 'L2':
loss_expect = self.x ** 2
if self.reduce == 'mean':
loss_expect = numpy.sum(loss_expect) / self.x.shape[0]
testing.assert_allclose(
loss_expect, loss_value, **self.forward_options)
def test_forward_cpu(self):
self.check_forward(self.x, self.t)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.t))
@attr.chainerx
def test_forward_chainerx_native(self):
self.check_forward(
backend.to_chx(self.x), backend.to_chx(self.t))
@attr.gpu
@attr.chainerx
def test_forward_chainerx_cuda(self):
self.check_forward(
backend.to_chx(cuda.to_gpu(self.x)),
backend.to_chx(cuda.to_gpu(self.t)))
def check_backward(self, x_data, t_data):
def f(x, t):
return functions.hinge(x, t, self.norm)
gradient_check.check_backward(
f, (x_data, t_data), None, dtype='d', **self.backward_options)
def check_backward_chainerx(self, x_data, t_data):
# TODO(niboshi): gradient_check does not support integer input
# (no_grads) for ChainerX. Support it and merge this method with
# `self.check_backward`.
def f(x):
return functions.hinge(x, t_data, self.norm)
gradient_check.check_backward(
f, (x_data,), None, dtype='d', **self.backward_options)
def test_backward_cpu(self):
self.check_backward(self.x, self.t)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.t))
@attr.chainerx
def test_backward_chainerx_native(self):
self.check_backward_chainerx(
backend.to_chx(self.x),
backend.to_chx(self.t))
@attr.gpu
@attr.chainerx
def test_backward_chainerx_cuda(self):
self.check_backward_chainerx(
backend.to_chx(cuda.to_gpu(self.x)),
backend.to_chx(cuda.to_gpu(self.t)))
class TestHingeInvalidOption(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (10, 5)).astype(numpy.float32)
self.t = numpy.random.randint(0, 5, (10,)).astype(numpy.int32)
def check_invalid_norm_option(self, xp):
x = xp.asarray(self.x)
t = xp.asarray(self.t)
with self.assertRaises(NotImplementedError):
functions.hinge(x, t, 'invalid_norm', 'mean')
def test_invalid_norm_option_cpu(self):
self.check_invalid_norm_option(numpy)
@attr.gpu
def test_invalid_norm_option_gpu(self):
self.check_invalid_norm_option(cuda.cupy)
def check_invalid_reduce_option(self, xp):
x = xp.asarray(self.x)
t = xp.asarray(self.t)
with self.assertRaises(ValueError):
functions.hinge(x, t, 'L1', 'invalid_option')
def test_invalid_reduce_option_cpu(self):
self.check_invalid_reduce_option(numpy)
@attr.gpu
def test_invalid_reduce_option_gpu(self):
self.check_invalid_reduce_option(cuda.cupy)
testing.run_module(__name__, __file__)
|
odps/tunnel/tests/test_pb.py
|
wjsi/aliyun-odps-python-sdk
| 412 |
71796
|
<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import warnings
from odps.tests.core import TestBase, to_str
from odps.compat import unittest
from odps.tunnel.pb.wire_format import *
from odps.utils import to_binary
class Test(TestBase):
def testPyEncodeAndDecode(self):
from odps.tunnel.pb.encoder import Encoder
from odps.tunnel.pb.decoder import Decoder
encoder = Encoder()
encoder.append_tag(0, WIRETYPE_VARINT)
encoder.append_int32(2 ** 20)
encoder.append_tag(1, WIRETYPE_VARINT)
encoder.append_sint64(-2 ** 40)
encoder.append_tag(2, WIRETYPE_LENGTH_DELIMITED)
encoder.append_string(to_binary("hello"))
encoder.append_tag(3, WIRETYPE_VARINT)
encoder.append_bool(True)
encoder.append_tag(4, WIRETYPE_FIXED64)
encoder.append_float(3.14)
encoder.append_double(0.31415926)
encoder.append_tag(5, WIRETYPE_VARINT)
encoder.append_uint32(2 ** 30)
encoder.append_tag(6, WIRETYPE_VARINT)
encoder.append_uint64(2 ** 40)
buffer_size = len(encoder)
tube = io.BytesIO(encoder.tostring())
decoder = Decoder(tube)
self.assertEqual((0, WIRETYPE_VARINT), decoder.read_field_number_and_wire_type())
self.assertEqual(2**20, decoder.read_int32())
self.assertEqual((1, WIRETYPE_VARINT), decoder.read_field_number_and_wire_type())
self.assertEqual(-2**40, decoder.read_sint64())
self.assertEqual((2, WIRETYPE_LENGTH_DELIMITED), decoder.read_field_number_and_wire_type())
self.assertEqual(to_str("hello"), to_str(decoder.read_string()))
self.assertEqual((3, WIRETYPE_VARINT), decoder.read_field_number_and_wire_type())
self.assertEqual(True, decoder.read_bool())
self.assertEqual((4, WIRETYPE_FIXED64), decoder.read_field_number_and_wire_type())
self.assertAlmostEqual(3.14, decoder.read_float(), delta=0.001)
self.assertEqual(0.31415926, decoder.read_double())
self.assertEqual((5, WIRETYPE_VARINT), decoder.read_field_number_and_wire_type())
self.assertEqual(2**30, decoder.read_uint32())
self.assertEqual((6, WIRETYPE_VARINT), decoder.read_field_number_and_wire_type())
self.assertEqual(2**40, decoder.read_uint64())
self.assertEqual(buffer_size, decoder.position())
def testCEncodeAndDecode(self):
try:
from odps.tunnel.pb.encoder_c import Encoder
from odps.tunnel.pb.decoder_c import Decoder
encoder = Encoder()
encoder.append_tag(0, WIRETYPE_VARINT)
encoder.append_tag(1, WIRETYPE_VARINT)
encoder.append_sint64(-2 ** 40)
encoder.append_tag(2, WIRETYPE_LENGTH_DELIMITED)
encoder.append_string(to_binary("hello"))
encoder.append_tag(3, WIRETYPE_VARINT)
encoder.append_bool(True)
encoder.append_tag(4, WIRETYPE_FIXED64)
encoder.append_float(3.14)
encoder.append_double(0.31415926)
encoder.append_tag(5, WIRETYPE_VARINT)
encoder.append_uint32(2 ** 30)
encoder.append_tag(6, WIRETYPE_VARINT)
encoder.append_uint64(2 ** 40)
buffer_size = len(encoder)
tube = io.BytesIO(encoder.tostring())
decoder = Decoder(tube)
self.assertEqual((0, WIRETYPE_VARINT), decoder.read_field_number_and_wire_type())
self.assertEqual((1, WIRETYPE_VARINT), decoder.read_field_number_and_wire_type())
self.assertEqual(-2 ** 40, decoder.read_sint64())
self.assertEqual((2, WIRETYPE_LENGTH_DELIMITED), decoder.read_field_number_and_wire_type())
self.assertEqual(to_str("hello"), to_str(decoder.read_string()))
self.assertEqual((3, WIRETYPE_VARINT), decoder.read_field_number_and_wire_type())
self.assertEqual(True, decoder.read_bool())
self.assertEqual((4, WIRETYPE_FIXED64), decoder.read_field_number_and_wire_type())
self.assertAlmostEqual(3.14, decoder.read_float(), delta=0.001)
self.assertEqual(0.31415926, decoder.read_double())
self.assertEqual((5, WIRETYPE_VARINT), decoder.read_field_number_and_wire_type())
self.assertEqual(2 ** 30, decoder.read_uint32())
self.assertEqual((6, WIRETYPE_VARINT), decoder.read_field_number_and_wire_type())
self.assertEqual(2 ** 40, decoder.read_uint64())
self.assertEqual(buffer_size, decoder.position())
except ImportError:
warnings.warn('No Encoder or Decoder built by cython found')
if __name__ == '__main__':
unittest.main()
|
pypugjs/convert.py
|
wwoods/pypugjs
| 247 |
71802
|
from __future__ import print_function
import codecs
import logging
import os
import sys
from optparse import OptionParser
from pypugjs.utils import process
def convert_file():
support_compilers_list = [
'django',
'jinja',
'underscore',
'mako',
'tornado',
'html',
]
available_compilers = {}
for i in support_compilers_list:
try:
compiler_class = __import__(
'pypugjs.ext.%s' % i, fromlist=['pypugjs']
).Compiler
except ImportError as e:
logging.warning(e)
else:
available_compilers[i] = compiler_class
usage = "usage: %prog [options] [file [output]]"
parser = OptionParser(usage)
parser.add_option(
"-o", "--output", dest="output", help="Write output to FILE", metavar="FILE"
)
# use a default compiler here to sidestep making a particular
# compiler absolutely necessary (ex. django)
default_compiler = sorted(available_compilers.keys())[0]
parser.add_option(
"-c",
"--compiler",
dest="compiler",
choices=list(available_compilers.keys()),
default=default_compiler,
type="choice",
help=(
"COMPILER must be one of %s, default is %s"
% (', '.join(list(available_compilers.keys())), default_compiler)
),
)
parser.add_option(
"-e",
"--ext",
dest="extension",
help="Set import/extends default file extension",
metavar="FILE",
)
options, args = parser.parse_args()
file_output = options.output or (args[1] if len(args) > 1 else None)
compiler = options.compiler
if options.extension:
extension = '.%s' % options.extension
elif options.output:
extension = os.path.splitext(options.output)[1]
else:
extension = None
if compiler in available_compilers:
import six
if len(args) >= 1:
template = codecs.open(args[0], 'r', encoding='utf-8').read()
elif six.PY3:
template = sys.stdin.read()
else:
template = codecs.getreader('utf-8')(sys.stdin).read()
output = process(
template,
compiler=available_compilers[compiler],
staticAttrs=True,
extension=extension,
)
if file_output:
outfile = codecs.open(file_output, 'w', encoding='utf-8')
outfile.write(output)
elif six.PY3:
sys.stdout.write(output)
else:
codecs.getwriter('utf-8')(sys.stdout).write(output)
else:
raise Exception('You must have %s installed!' % compiler)
if __name__ == '__main__':
convert_file()
|
plugin.video.yatp/site-packages/hachoir_parser/game/__init__.py
|
mesabib/kodi.yatp
| 194 |
71809
|
from hachoir_parser.game.zsnes import ZSNESFile
from hachoir_parser.game.spider_man_video import SpiderManVideoFile
from hachoir_parser.game.laf import LafFile
from hachoir_parser.game.blp import BLP1File, BLP2File
from hachoir_parser.game.uasset import UAssetFile
|
train_procgen/graph_util.py
|
tuthoang/train-procgen
| 146 |
71812
|
import csv
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from math import ceil
from constants import ENV_NAMES
import seaborn # sets some style parameters automatically
COLORS = [(57, 106, 177), (218, 124, 48)]
def switch_to_outer_plot(fig):
ax0 = fig.add_subplot(111, frame_on=False)
ax0.set_xticks([])
ax0.set_yticks([])
return ax0
def ema(data_in, smoothing=0):
data_out = np.zeros_like(data_in)
curr = np.nan
for i in range(len(data_in)):
x = data_in[i]
if np.isnan(curr):
curr = x
else:
curr = (1 - smoothing) * x + smoothing * curr
data_out[i] = curr
return data_out
def plot_data_mean_std(ax, data_y, color_idx=0, data_x=None, x_scale=1, smoothing=0, first_valid=0, label=None):
color = COLORS[color_idx]
hexcolor = '#%02x%02x%02x' % color
data_y = data_y[:,first_valid:]
nx, num_datapoint = np.shape(data_y)
if smoothing > 0:
for i in range(nx):
data_y[i,...] = ema(data_y[i,...], smoothing)
if data_x is None:
data_x = (np.array(range(num_datapoint)) + first_valid) * x_scale
data_mean = np.mean(data_y, axis=0)
data_std = np.std(data_y, axis=0, ddof=1)
ax.plot(data_x, data_mean, color=hexcolor, label=label, linestyle='solid', alpha=1, rasterized=True)
ax.fill_between(data_x, data_mean - data_std, data_mean + data_std, color=hexcolor, alpha=.25, linewidth=0.0, rasterized=True)
def read_csv(filename, key_name):
with open(filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
key_index = -1
values = []
for line_num, row in enumerate(csv_reader):
row = [x.lower() for x in row]
if line_num == 0:
idxs = [i for i, val in enumerate(row) if val == key_name]
key_index = idxs[0]
else:
values.append(row[key_index])
return np.array(values, dtype=np.float32)
def plot_values(ax, all_values, title=None, max_x=0, label=None, **kwargs):
if max_x > 0:
all_values = all_values[...,:max_x]
if ax is not None:
plot_data_mean_std(ax, all_values, label=label, **kwargs)
ax.set_title(title)
return all_values
def plot_experiment(run_directory_prefix, titles=None, suffixes=[''], normalization_ranges=None, key_name='eprewmean', **kwargs):
run_folders = [f'{run_directory_prefix}{x}' for x in range(3)]
num_envs = len(ENV_NAMES)
will_normalize_and_reduce = normalization_ranges is not None
if will_normalize_and_reduce:
num_visible_plots = 1
f, axarr = plt.subplots()
else:
num_visible_plots = num_envs
dimx = dimy = ceil(np.sqrt(num_visible_plots))
f, axarr = plt.subplots(dimx, dimy, sharex=True)
for suffix_idx, suffix in enumerate(suffixes):
all_values = []
game_weights = [1] * num_envs
for env_idx in range(num_envs):
env_name = ENV_NAMES[env_idx]
label = suffix if env_idx == 0 else None # only label the first graph to avoid legend duplicates
print(f'loading results from {env_name}...')
if num_visible_plots == 1:
ax = axarr
else:
dimy = len(axarr[0])
ax = axarr[env_idx // dimy][env_idx % dimy]
csv_files = [f"results/{resid}/progress-{env_name}{'-' if len(suffix) > 0 else ''}{suffix}.csv" for resid in run_folders]
curr_ax = None if will_normalize_and_reduce else ax
raw_data = np.array([read_csv(file, key_name) for file in csv_files])
values = plot_values(curr_ax, raw_data, title=env_name, color_idx=suffix_idx, label=label, **kwargs)
if will_normalize_and_reduce:
game_range = normalization_ranges[env_name]
game_min = game_range[0]
game_max = game_range[1]
game_delta = game_max - game_min
sub_values = game_weights[env_idx] * (np.array(values) - game_min) / (game_delta)
all_values.append(sub_values)
if will_normalize_and_reduce:
normalized_data = np.sum(all_values, axis=0)
normalized_data = normalized_data / np.sum(game_weights)
title = 'Mean Normalized Score'
plot_values(ax, normalized_data, title=None, color_idx=suffix_idx, label=suffix, **kwargs)
if len(suffixes) > 1:
if num_visible_plots == 1:
ax.legend(loc='lower right')
else:
f.legend(loc='lower right', bbox_to_anchor=(.5, 0, .5, 1))
return f, axarr
|
semseg/models/ddrnet.py
|
Genevievekim/semantic-segmentation-1
| 196 |
71813
|
import torch
from torch import nn, Tensor
from torch.nn import functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, c1, c2, s=1, downsample= None, no_relu=False) -> None:
super().__init__()
self.conv1 = nn.Conv2d(c1, c2, 3, s, 1, bias=False)
self.bn1 = nn.BatchNorm2d(c2)
self.conv2 = nn.Conv2d(c2, c2, 3, 1, 1, bias=False)
self.bn2 = nn.BatchNorm2d(c2)
self.downsample = downsample
self.no_relu = no_relu
def forward(self, x: Tensor) -> Tensor:
identity = x
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
if self.downsample is not None: identity = self.downsample(x)
out += identity
return out if self.no_relu else F.relu(out)
class Bottleneck(nn.Module):
expansion = 2
def __init__(self, c1, c2, s=1, downsample=None, no_relu=False) -> None:
super().__init__()
self.conv1 = nn.Conv2d(c1, c2, 1, bias=False)
self.bn1 = nn.BatchNorm2d(c2)
self.conv2 = nn.Conv2d(c2, c2, 3, s, 1, bias=False)
self.bn2 = nn.BatchNorm2d(c2)
self.conv3 = nn.Conv2d(c2, c2 * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(c2 * self.expansion)
self.downsample = downsample
self.no_relu = no_relu
def forward(self, x: Tensor) -> Tensor:
identity = x
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
if self.downsample is not None: identity = self.downsample(x)
out += identity
return out if self.no_relu else F.relu(out)
class ConvBN(nn.Sequential):
def __init__(self, c1, c2, k, s=1, p=0):
super().__init__(
nn.Conv2d(c1, c2, k, s, p, bias=False),
nn.BatchNorm2d(c2)
)
class Conv2BN(nn.Sequential):
def __init__(self, c1, ch, c2, k, s=1, p=0):
super().__init__(
nn.Conv2d(c1, ch, k, s, p, bias=False),
nn.BatchNorm2d(ch),
nn.ReLU(),
nn.Conv2d(ch, c2, k, s, p, bias=False),
nn.BatchNorm2d(c2)
)
class Stem(nn.Sequential):
def __init__(self, c1, c2):
super().__init__(
nn.Conv2d(c1, c2, 3, 2, 1),
nn.BatchNorm2d(c2),
nn.ReLU(),
nn.Conv2d(c2, c2, 3, 2, 1),
nn.BatchNorm2d(c2),
nn.ReLU()
)
class Scale(nn.Sequential):
def __init__(self, c1, c2, k, s=None, p=0):
super().__init__(
nn.AvgPool2d(k, s, p, ),
nn.BatchNorm2d(c1),
nn.ReLU(),
nn.Conv2d(c1, c2, 1, bias=False)
)
class ConvModule(nn.Sequential):
def __init__(self, c1, c2, k, s=1, p=0):
super().__init__(
nn.BatchNorm2d(c1),
nn.ReLU(),
nn.Conv2d(c1, c2, k, s, p, bias=False)
)
class DAPPM(nn.Module):
def __init__(self, c1, ch, c2):
super().__init__()
self.scale1 = Scale(c1, ch, 5, 2, 2)
self.scale2 = Scale(c1, ch, 9, 4, 4)
self.scale3 = Scale(c1, ch, 17, 8, 8)
self.scale4 = Scale(c1, ch, 1)
self.scale0 = ConvModule(c1, ch, 1)
self.process1 = ConvModule(ch, ch, 3, 1, 1)
self.process2 = ConvModule(ch, ch, 3, 1, 1)
self.process3 = ConvModule(ch, ch, 3, 1, 1)
self.process4 = ConvModule(ch, ch, 3, 1, 1)
self.compression = ConvModule(ch*5, c2, 1)
self.shortcut = ConvModule(c1, c2, 1)
def forward(self, x: Tensor) -> Tensor:
outs = [self.scale0(x)]
outs.append(self.process1((F.interpolate(self.scale1(x), size=x.shape[-2:], mode='bilinear', align_corners=True) + outs[-1])))
outs.append(self.process2((F.interpolate(self.scale2(x), size=x.shape[-2:], mode='bilinear', align_corners=True) + outs[-1])))
outs.append(self.process3((F.interpolate(self.scale3(x), size=x.shape[-2:], mode='bilinear', align_corners=True) + outs[-1])))
outs.append(self.process4((F.interpolate(self.scale4(x), size=x.shape[-2:], mode='bilinear', align_corners=True) + outs[-1])))
out = self.compression(torch.cat(outs, dim=1)) + self.shortcut(x)
return out
class SegHead(nn.Module):
def __init__(self, c1, ch, c2, scale_factor=None):
super().__init__()
self.bn1 = nn.BatchNorm2d(c1)
self.conv1 = nn.Conv2d(c1, ch, 3, 1, 1, bias=False)
self.bn2 = nn.BatchNorm2d(ch)
self.conv2 = nn.Conv2d(ch, c2, 1)
self.scale_factor = scale_factor
def forward(self, x: Tensor) -> Tensor:
x = self.conv1(F.relu(self.bn1(x)))
x = self.conv2(F.relu(self.bn2(x)))
if self.scale_factor is not None:
H, W = x.shape[-2] * self.scale_factor, x.shape[-1] * self.scale_factor
x = F.interpolate(x, size=(H, W), mode='bilinear', align_corners=True)
return x
class DDRNet(nn.Module):
def __init__(self, backbone: str = None, num_classes: int = 19) -> None:
super().__init__()
planes, spp_planes, head_planes = [32, 64, 128, 256, 512], 128, 64
self.conv1 = Stem(3, planes[0])
self.layer1 = self._make_layer(BasicBlock, planes[0], planes[0], 2)
self.layer2 = self._make_layer(BasicBlock, planes[0], planes[1], 2, 2)
self.layer3 = self._make_layer(BasicBlock, planes[1], planes[2], 2, 2)
self.layer4 = self._make_layer(BasicBlock, planes[2], planes[3], 2, 2)
self.layer5 = self._make_layer(Bottleneck, planes[3], planes[3], 1, 2)
self.layer3_ = self._make_layer(BasicBlock, planes[1], planes[1], 2)
self.layer4_ = self._make_layer(BasicBlock, planes[1], planes[1], 2)
self.layer5_ = self._make_layer(Bottleneck, planes[1], planes[1], 1)
self.compression3 = ConvBN(planes[2], planes[1], 1)
self.compression4 = ConvBN(planes[3], planes[1], 1)
self.down3 = ConvBN(planes[1], planes[2], 3, 2, 1)
self.down4 = Conv2BN(planes[1], planes[2], planes[3], 3, 2, 1)
self.spp = DAPPM(planes[-1], spp_planes, planes[2])
self.seghead_extra = SegHead(planes[1], head_planes, num_classes, 8)
self.final_layer = SegHead(planes[2], head_planes, num_classes, 8)
self.apply(self._init_weights)
def _init_weights(self, m: nn.Module) -> None:
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def init_pretrained(self, pretrained: str = None) -> None:
if pretrained:
self.load_state_dict(torch.load(pretrained, map_location='cpu'), strict=False)
def _make_layer(self, block, inplanes, planes, depths, s=1) -> nn.Sequential:
downsample = None
if inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(inplanes, planes * block.expansion, 1, s, bias=False),
nn.BatchNorm2d(planes * block.expansion)
)
layers = [block(inplanes, planes, s, downsample)]
inplanes = planes * block.expansion
for i in range(1, depths):
if i == depths - 1:
layers.append(block(inplanes, planes, no_relu=True))
else:
layers.appned(block(inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x: Tensor) -> Tensor:
H, W = x.shape[-2] // 8, x.shape[-1] // 8
layers = []
x = self.conv1(x)
x = self.layer1(x)
layers.append(x)
x = self.layer2(F.relu(x))
layers.append(x)
x = self.layer3(F.relu(x))
layers.append(x)
x_ = self.layer3_(F.relu(layers[1]))
x = x + self.down3(F.relu(x_))
x_ = x_ + F.interpolate(self.compression3(F.relu(layers[2])), size=(H, W), mode='bilinear', align_corners=True)
if self.training: x_aux = self.seghead_extra(x_)
x = self.layer4(F.relu(x))
layers.append(x)
x_ = self.layer4_(F.relu(x_))
x = x + self.down4(F.relu(x_))
x_ = x_ + F.interpolate(self.compression4(F.relu(layers[3])), size=(H, W), mode='bilinear', align_corners=True)
x_ = self.layer5_(F.relu(x_))
x = F.interpolate(self.spp(self.layer5(F.relu(x))), size=(H, W), mode='bilinear', align_corners=True)
x_ = self.final_layer(x + x_)
return (x_, x_aux) if self.training else x_
if __name__ == '__main__':
model = DDRNet()
# model.init_pretrained('checkpoints/backbones/ddrnet/ddrnet_23slim.pth')
# model.load_state_dict(torch.load('checkpoints/pretrained/ddrnet/ddrnet_23slim_city.pth', map_location='cpu'))
x = torch.zeros(2, 3, 224, 224)
outs = model(x)
for y in outs:
print(y.shape)
|
nussl/datasets/hooks.py
|
ZhaoJY1/nussl
| 259 |
71840
|
"""
While *nussl* does not come with any data sets, it does have the capability to interface with
many common source separation data sets used within the MIR and speech separation communities.
These data set "hooks" subclass BaseDataset and by default return AudioSignal objects in
labeled dictionaries for ease of use. Transforms can be applied to these datasets for use
in machine learning pipelines.
"""
import os
from .. import musdb
import jams
from ..core import constants, utils
from .base_dataset import BaseDataset, DataSetException
class MUSDB18(BaseDataset):
"""
Hook for MUSDB18. Uses the musdb.DB object to access the
dataset. If ``download=True``, then the 7s snippets of each track
are downloaded to ``self.folder``. If no folder is given, then
the tracks are downloaded to ~/.nussl/musdb18.
Getting an item from this dataset with no transforms returns the
following dictionary:
.. code-block:: none
{
'mix': [AudioSignal object containing mix audio],
'source': {
'bass': [AudioSignal object containing vocals],
'drums': [AudioSignal object containing drums],
'other': [AudioSignal object containing other],
'vocals': [AudioSignal object containing vocals],
}
'metadata': {
'labels': ['bass', 'drums', 'other', 'vocals']
}
}
Args:
folder (str, optional): Location that should be processed to produce the
list of files. Defaults to None.
is_wav (bool, optional): Expect subfolder with wav files for each source
instead of stems, defaults to False.
download (bool, optional): Download sample version of MUSDB18 which
includes 7s excerpts. Defaults to False.
subsets (list, optional): Select a musdb subset train or test.
Defaults to ['train', 'test'] (all tracks).
split (str, optional): When subset train is loaded, split selects the
train/validation split. split=’train’ loads the training split,
`split=’valid’ loads the validation split. split=None applies no
splitting. Defaults to None.
**kwargs: Any additional arguments that are passed up to BaseDataset
(see ``nussl.datasets.BaseDataset``).
"""
DATASET_HASHES = {
"musdb": "56777516ad56fe6a8590badf877e6be013ff932c010e0fbdb0aba03ef878d4cd",
}
def __init__(self, folder=None, is_wav=False, download=False,
subsets=None, split=None, **kwargs):
subsets = ['train', 'test'] if subsets is None else subsets
if folder is None:
folder = os.path.join(
constants.DEFAULT_DOWNLOAD_DIRECTORY, 'musdb18'
)
self.musdb = musdb.DB(root=folder, is_wav=is_wav, download=download,
subsets=subsets, split=split)
super().__init__(folder, **kwargs)
self.metadata['subsets'] = subsets
self.metadata['split'] = split
def get_items(self, folder):
items = range(len(self.musdb))
return list(items)
def process_item(self, item):
track = self.musdb[item]
mix, sources = utils.musdb_track_to_audio_signals(track)
self._setup_audio_signal(mix)
for source in list(sources.values()):
self._setup_audio_signal(source)
output = {
'mix': mix,
'sources': sources,
'metadata': {
'labels': ['bass', 'drums', 'other', 'vocals']
}
}
return output
class MixSourceFolder(BaseDataset):
"""
This dataset expects your data to be formatted in the following way:
.. code-block:: none
data/
mix/
[file0].wav
[file1].wav
[file2].wav
...
[label0]/
[file0].wav
[file1].wav
[file2].wav
...
[label1]/
[file0].wav
[file1].wav
[file2].wav
...
[label2]/
[file0].wav
[file1].wav
[file2].wav
...
...
Note that the the filenames match between the mix folder and each source folder.
The source folder names can be whatever you want. Given a file in the
``self.mix_folder`` folder, this dataset will look up the corresponding files
with the same name in the source folders. These are the source audio files.
The sum of the sources should equal the mixture. Each source will be labeled
according to the folder name it comes from.
Getting an item from this dataset with no transforms returns the
following dictionary:
.. code-block:: none
{
'mix': [AudioSignal object containing mix audio],
'source': {
'[label0]': [AudioSignal object containing label0 audio],
'[label1]': [AudioSignal object containing label1 audio],
'[label2]': [AudioSignal object containing label2 audio],
'[label3]': [AudioSignal object containing label3 audio],
...
}
'metadata': {
'labels': ['label0', 'label1', 'label2', 'label3']
}
}
Args:
folder (str, optional): Location that should be processed to produce the
list of files. Defaults to None.
mix_folder (str, optional): Folder to look in for mixtures. Defaults to 'mix'.
source_folders (list, optional): List of folders to look in for sources.
Path is defined relative to folder. If None, all folders other than
mix_folder are treated as the source folders. Defaults to None.
ext (list, optional): Audio extensions to look for in mix_folder.
Defaults to ['.wav', '.flac', '.mp3'].
**kwargs: Any additional arguments that are passed up to BaseDataset
(see ``nussl.datasets.BaseDataset``).
"""
def __init__(self, folder, mix_folder='mix', source_folders=None,
ext=None, make_mix=False, **kwargs):
self.mix_folder = mix_folder
self.source_folders = source_folders
self.ext = ['.wav', '.flac', '.mp3'] if ext is None else ext
self.make_mix = make_mix
super().__init__(folder, **kwargs)
def get_items(self, folder):
if self.source_folders is None:
self.source_folders = sorted([
f for f in os.listdir(folder)
if os.path.isdir(os.path.join(folder, f))
and f != self.mix_folder
])
if self.make_mix:
mix_folder = os.path.join(folder, self.source_folders[0])
else:
mix_folder = os.path.join(folder, self.mix_folder)
items = sorted([
x for x in os.listdir(mix_folder)
if os.path.splitext(x)[1] in self.ext
])
return items
def get_mix_and_sources(self, item):
sources = {}
for k in self.source_folders:
source_path = os.path.join(self.folder, k, item)
if os.path.exists(source_path):
sources[k] = self._load_audio_file(source_path)
if self.make_mix:
mix = sum(list(sources.values()))
else:
mix_path = os.path.join(self.folder, self.mix_folder, item)
mix = self._load_audio_file(mix_path)
return mix, sources
def process_item(self, item):
mix, sources = self.get_mix_and_sources(item)
output = {
'mix': mix,
'sources': sources,
'metadata': {
'labels': self.source_folders
}
}
return output
class Scaper(BaseDataset):
"""
Source separation datasets can be generated using Scaper, a library for
automatic soundscape generation. Datasets that are generated with Scaper
can be fed into this class easily. Scaper generates a large list of JAMS
files which specify the parameters of the soundscape. If the soundscape is
generated with `save_isolated_events=True`, then the audio corresponding
to each event in the soundscape will be saved as well.
Below is an example of using Scaper to generate a small dataset of 10
mixtures with 2 sources each. The generated dataset can then be immediately
loaded into an instance of ``nussl.datasets.Scaper`` for integration into
a training or evaluation pipeline.
The sources are output in a dictionary that looks like this:
.. code-block:: none
data['sources] = {
'{label}::{count}': AudioSignal,
'{label}::{count}': AudioSignal,
...
}
For example:
.. code-block:: none
data['sources] = {
'siren::0': AudioSignal,
'siren::1': AudioSignal,
'car_horn::0': AudioSignal,
...
}
Getting an item from this dataset with no transforms returns the
following dictionary:
.. code-block:: none
{
'mix': [AudioSignal object containing mix audio],
'source': {
'[label0::count]': [AudioSignal object containing label0 audio],
'[label1::count]': [AudioSignal object containing label1 audio],
'[label2::count]': [AudioSignal object containing label2 audio],
'[label3::count]': [AudioSignal object containing label3 audio],
...
}
'metadata': {
'jams': [the content of the jams file used to generate the soundscape]
'labels': ['label0', 'label1', 'label2', 'label3']
}
}
Example of generating a Scaper dataset and then loading it with nussl:
>>> n_sources = 2
>>> n_mixtures = 10
>>> duration = 3
>>> ref_db = -40
>>> fg_path = '/path/to/foreground/'
>>> output_dir = '/output/path'
>>> for i in range(n_mixtures):
>>> sc = scaper.Scaper(
>>> duration, fg_path, fg_path, random_state=i)
>>> sc.ref_db = ref_db
>>> sc.sr = 16000
>>> for j in range(n_sources):
>>> sc.add_event(
>>> label=('choose', []),
>>> source_file=('choose', []),
>>> source_time=('const', 0),
>>> event_time=('const', 0),
>>> event_duration=('const', duration),
>>> snr=('const', 0),
>>> pitch_shift=None,
>>> time_stretch=None
>>> )
>>> audio_path = os.path.join(output_dir, f'{i}.wav')
>>> jams_path = os.path.join(output_dir, f'{i}.jams')
>>> sc.generate(audio_path, jams_path, save_isolated_events=True)
>>> dataset = nussl.datasets.Scaper(output_dir)
>>> dataset[0] # contains mix, sources, and metadata corresponding to 0.jams.
Raises:
DataSetException: if Scaper dataset wasn't saved with isolated event audio.
"""
def get_items(self, folder):
items = sorted([
x for x in os.listdir(folder)
if os.path.splitext(x)[1] in ['.jams']
])
return items
def _get_info_from_item(self, item):
jam = jams.load(os.path.join(self.folder, item))
ann = jam.annotations.search(namespace='scaper')[0]
mix_path = ann.sandbox.scaper['soundscape_audio_path']
source_paths = ann.sandbox.scaper['isolated_events_audio_path']
return jam, ann, mix_path, source_paths
def process_item(self, item):
jam, ann, mix_path, source_paths = self._get_info_from_item(item)
if not source_paths:
raise DataSetException(
"No paths to isolated events found! Did you generate "
"the soundscape with save_isolated_events=True?")
mix = self._load_audio_file(mix_path)
sources = {}
for event_spec, event_audio_path in zip(ann, source_paths):
label = event_spec.value['label']
label_count = 0
for k in sources:
if label in k:
label_count += 1
label = f"{label}::{label_count}"
sources[label] = self._load_audio_file(event_audio_path)
output = {
'mix': mix,
'sources': sources,
'metadata': {
'scaper': jam,
'labels': ann.sandbox.scaper['fg_labels'],
}
}
return output
class OnTheFly(BaseDataset):
"""
Hook for a dataset that creates mixtures on the fly from source
data. The function that creates the mixture is a closure which
is defined by the end-user. The number of mixtures in the
dataset is also defined by the end-user. The mix closure function
should take two arguments - the dataset object and the index of the
item being processed - and the output of the mix closure should be a
dictionary containing at least a 'mix', 'sources' and (optionally)
a 'metadata' key, or other keys that can be defined up to you.
Here's an example of a closure, which can be configured via
variable scoping:
>>> def make_sine_wave(freq, sample_rate, duration):
>>> dt = 1 / sample_rate
>>> x = np.arange(0.0, duration, dt)
>>> x = np.sin(2 * np.pi * freq * x)
>>> return x
>>> n_sources = 2
>>> duration = 3
>>> sample_rate = 44100
>>> min_freq, max_freq = 110, 1000
>>> def make_mix(dataset, i):
>>> sources = {}
>>> freqs = []
>>> for i in range(n_sources):
>>> freq = np.random.randint(min_freq, max_freq)
>>> freqs.append(freq)
>>> source_data = make_sine_wave(freq, sample_rate, duration)
>>> source_signal = dataset._load_audio_from_array(
>>> audio_data=source_data, sample_rate=sample_rate)
>>> sources[f'sine{i}'] = source_signal * 1 / n_sources
>>> mix = sum(sources.values())
>>> output = {
>>> 'mix': mix,
>>> 'sources': sources,
>>> 'metadata': {
>>> 'frequencies': freqs
>>> }
>>> }
>>> return output
>>> dataset = nussl.datasets.OnTheFly(make_mix, 10)
Args:
mix_closure (function): A closure that determines how to create
a single mixture, given the index. It has a strict input
signature (the index is given as an int) and a strict output
signature (a dictionary containing a 'mix' and 'sources') key.
num_mixtures (int): Number of mixtures that will be created on
the fly. This determines one 'run' thrugh the dataset, or an
epoch.
kwargs: Keyword arguments to BaseDataset.
"""
def __init__(self, mix_closure, num_mixtures, **kwargs):
self.num_mixtures = num_mixtures
self.mix_closure = mix_closure
super().__init__('none', **kwargs)
self.metadata['num_mixtures'] = num_mixtures
def get_items(self, folder):
return list(range(self.num_mixtures))
def process_item(self, item):
output = self.mix_closure(self, item)
if not isinstance(output, dict):
raise DataSetException("output of mix_closure must be a dict!")
if 'mix' not in output or 'sources' not in output:
raise DataSetException(
"output of mix_closure must be a dict containing "
"'mix', 'sources' as keys!")
return output
class FUSS(Scaper):
"""
The Free Universal Sound Separation (FUSS) Dataset is a database of arbitrary
sound mixtures and source-level references, for use in experiments on
arbitrary sound separation.
This is the official sound separation data for the DCASE2020 Challenge Task 4:
Sound Event Detection and Separation in Domestic Environments.
This is a hook for reading in this dataset, and making sure that the mix and
source paths are massaged to be relative paths.
References:
[1] <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, "What's All the FUSS About Free Universal Sound Separation
Data?", 2020, in preparation.
[2] <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, <NAME>, and
<NAME>. "Freesound Datasets: A Platform for the Creation of Open Audio
Datasets." International Society for Music Information Retrieval Conference
(ISMIR), pp. 486–493. Suzhou, China, 2017.
Args:
root (str): Folder where the FUSS data is. Either points to ssdata or
ssdata_reverb.
split (str): Either the ``train``, ``validation``, or ``eval`` split.
kwargs: Additional keyword arguments to BaseDataset.
"""
def __init__(self, root, split='train', **kwargs):
if split not in ['train', 'validation', 'eval']:
raise DataSetException(
f"split '{split}' not one of the accepted splits: "
f"'train', 'validation', 'eval'.")
folder = os.path.join(root, split)
super().__init__(folder, sample_rate=16000, strict_sample_rate=True,
**kwargs)
self.metadata['split'] = split
def _get_info_from_item(self, item):
path_to_item = os.path.join(self.folder, item)
item_base_name = os.path.splitext(item)[0]
jam = jams.load(path_to_item)
ann = jam.annotations.search(namespace='scaper')[0]
mix_path = ann.sandbox.scaper['soundscape_audio_path']
source_paths = ann.sandbox.scaper['isolated_events_audio_path']
mix_path = os.path.join(
self.folder, item_base_name + mix_path.split(item_base_name)[-1])
for i, source_path in enumerate(source_paths):
source_paths[i] = os.path.join(
self.folder, item_base_name + source_path.split(item_base_name)[-1])
return jam, ann, mix_path, source_paths
class WHAM(MixSourceFolder):
"""
Hook for the WHAM dataset. Essentially subclasses MixSourceFolder but with presets
that are helpful for WHAM, which as the following directory structure:
.. code-block:: none
[wav8k, wav16k]/
[min, max]/
[tr, cv, tt]/
mix_both/
mix_clean/
mix_single/
noise/
s1/
s2/
wham_noise/
tr/
cv/
tt/
metadata/
Args:
root (str): Root of WHAM directory.
mix_folder (str): Which folder is the mix? Either 'mix_clean', 'mix_both', or
'mix_single'.
mode (str): Either 'min' or 'max' mode.
split (str): Split to use (tr, cv, or tt).
sample_rate (int): Sample rate of audio, either 8000 or 16000.
"""
MIX_TO_SOURCE_MAP = {
'mix_clean': ['s1', 's2'],
'mix_both': ['s1', 's2', 'noise'],
'mix_single': ['s1'],
}
DATASET_HASHES = {
"wav8k": "acd49e0dae066e16040c983d71cc5a8adb903abff6e5cbb92b3785a1997b7547",
"wav16k": "5691d6a35382f2408a99594f21d820b58371b5ea061841db37d548c0b8d6ec7f"
}
def __init__(self, root, mix_folder='mix_clean', mode='min', split='tr',
sample_rate=8000, **kwargs):
if mix_folder not in self.MIX_TO_SOURCE_MAP.keys():
raise DataSetException(
f"{mix_folder} must be in {list(self.MIX_TO_SOURCE_MAP.keys())}")
if sample_rate not in [8000, 16000]:
raise DataSetException(
f"{sample_rate} not available for WHAM (only 8000 and 16000 Hz allowed)")
if mode not in ['min', 'max']:
raise DataSetException(
f"{mode} not available, only 'min' or 'max' allowed.")
if split not in ['tr', 'cv', 'tt']:
raise DataSetException(
f"{split} not available, must be one of 'tr' (train), "
f"'cv' (validation), and 'tt' (test)")
wav_folder = 'wav8k' if sample_rate == 8000 else 'wav16k'
folder = os.path.join(root, wav_folder, mode, split)
source_folders = self.MIX_TO_SOURCE_MAP[mix_folder]
super().__init__(folder, mix_folder=mix_folder, source_folders=source_folders,
sample_rate=sample_rate, strict_sample_rate=True, **kwargs)
self.metadata.update({
'mix_folder': mix_folder,
'mode': mode,
'split': split,
'wav_folder': wav_folder
})
|
libs/configs_old/DOTA/gwd/cfgs_res50_dota_v20.py
|
Artcs1/RotationDetection
| 850 |
71861
|
<filename>libs/configs_old/DOTA/gwd/cfgs_res50_dota_v20.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import os
import tensorflow as tf
import math
from dataloader.pretrained_weights.pretrain_zoo import PretrainModelZoo
"""
RetinaNet-H + gwd fix bug + sqrt + tau=2 + train set
FLOPs: 860451163; Trainable params: 33002916
iou threshold: 0.5
classname: plane
npos num: 2450
ap: 0.8948394008103565
classname: baseball-diamond
npos num: 209
ap: 0.6580467157774382
classname: bridge
npos num: 424
ap: 0.388917639526009
classname: ground-track-field
npos num: 131
ap: 0.582799082808811
classname: small-vehicle
npos num: 5090
ap: 0.6058372268499183
classname: large-vehicle
npos num: 4293
ap: 0.6297220646782561
classname: ship
npos num: 8861
ap: 0.8143495259256781
classname: tennis-court
npos num: 739
ap: 0.897082428301694
classname: basketball-court
npos num: 124
ap: 0.6194974348503025
classname: storage-tank
npos num: 1869
ap: 0.7888520103937031
classname: soccer-ball-field
npos num: 87
ap: 0.6721727619016967
classname: roundabout
npos num: 164
ap: 0.6740140076462648
classname: harbor
npos num: 2065
ap: 0.6030928319524497
classname: swimming-pool
npos num: 366
ap: 0.532690992577956
classname: helicopter
npos num: 72
ap: 0.45393048522054874
map: 0.6543896406147388
{'0.65': {'mAP': 0.5531255908346647, 'ground-track-field': 0.46874541967164557, 'small-vehicle': 0.5254805842312422, 'soccer-ball-field': 0.49674069740653076, 'harbor': 0.3325998985859663, 'large-vehicle': 0.49237446722103323, 'swimming-pool': 0.3786694115862947, 'roundabout': 0.6127737951332743, 'tennis-court': 0.8955950695702153, 'basketball-court': 0.5642336574393851, 'helicopter': 0.4095234559651532, 'storage-tank': 0.768350569402555, 'bridge': 0.229887299838382, 'baseball-diamond': 0.5172297968073052, 'ship': 0.718831628735693, 'plane': 0.885848110925295},
'0.5': {'mAP': 0.6543896406147388, 'ground-track-field': 0.582799082808811, 'small-vehicle': 0.6058372268499183, 'soccer-ball-field': 0.6721727619016967, 'harbor': 0.6030928319524497, 'large-vehicle': 0.6297220646782561, 'swimming-pool': 0.532690992577956, 'roundabout': 0.6740140076462648, 'tennis-court': 0.897082428301694, 'basketball-court': 0.6194974348503025, 'helicopter': 0.45393048522054874, 'storage-tank': 0.7888520103937031, 'bridge': 0.388917639526009, 'baseball-diamond': 0.6580467157774382, 'ship': 0.8143495259256781, 'plane': 0.8948394008103565},
'0.8': {'mAP': 0.28292248169049333, 'ground-track-field': 0.2325775080634852, 'small-vehicle': 0.1979511661753693, 'soccer-ball-field': 0.29786281543794524, 'harbor': 0.11494252873563218, 'large-vehicle': 0.16034195972421744, 'swimming-pool': 0.10212121212121213, 'roundabout': 0.29187883858274505, 'tennis-court': 0.8003975003061949, 'basketball-court': 0.47053242084058733, 'helicopter': 0.08282828282828283, 'storage-tank': 0.4630236938472425, 'bridge': 0.045454545454545456, 'baseball-diamond': 0.0980392156862745, 'ship': 0.3419243781838527, 'plane': 0.5439611593698137},
'0.85': {'mAP': 0.17732891599288997, 'ground-track-field': 0.13084951639168507, 'small-vehicle': 0.06282073067119796, 'soccer-ball-field': 0.18311688311688312, 'harbor': 0.09090909090909091, 'large-vehicle': 0.05997549072961212, 'swimming-pool': 0.01515151515151515, 'roundabout': 0.1523809523809524, 'tennis-court': 0.777850986366134, 'basketball-court': 0.27146743865010114, 'helicopter': 0.025974025974025972, 'storage-tank': 0.3194857000235097, 'bridge': 0.025974025974025972, 'baseball-diamond': 0.07032306536438768, 'ship': 0.09238611869237975, 'plane': 0.38126819949784874},
'0.9': {'mAP': 0.09261312239028942, 'ground-track-field': 0.045454545454545456, 'small-vehicle': 0.007575757575757575, 'soccer-ball-field': 0.08787878787878788, 'harbor': 0.09090909090909091, 'large-vehicle': 0.006888231631382316, 'swimming-pool': 0.01515151515151515, 'roundabout': 0.05694896083698572, 'tennis-court': 0.6190068314484273, 'basketball-court': 0.1277056277056277, 'helicopter': 0.018181818181818184, 'storage-tank': 0.10310064772905649, 'bridge': 0.012987012987012986, 'baseball-diamond': 0.05454545454545454, 'ship': 0.00899621212121212, 'plane': 0.133866341697667},
'0.6': {'mAP': 0.602003225559061, 'ground-track-field': 0.5117731722941454, 'small-vehicle': 0.5692796674261347, 'soccer-ball-field': 0.591601532425069, 'harbor': 0.42439117183385383, 'large-vehicle': 0.5379528999441402, 'swimming-pool': 0.4552774282858074, 'roundabout': 0.6590275695186874, 'tennis-court': 0.8967502975397331, 'basketball-court': 0.6163602294422292, 'helicopter': 0.42175379721391987, 'storage-tank': 0.7814590420239126, 'bridge': 0.30900189391187255, 'baseball-diamond': 0.6270284107602824, 'ship': 0.7357085211727478, 'plane': 0.892682749593379},
'0.7': {'mAP': 0.47209699491529994, 'ground-track-field': 0.37315990473910204, 'small-vehicle': 0.4462857945106512, 'soccer-ball-field': 0.43301958208470137, 'harbor': 0.24212265985665615, 'large-vehicle': 0.41707228898274396, 'swimming-pool': 0.2672845272755605, 'roundabout': 0.4752231061636024, 'tennis-court': 0.8954629342636613, 'basketball-court': 0.5565887540061711, 'helicopter': 0.3137137929820856, 'storage-tank': 0.6891634802537836, 'bridge': 0.16824841824841824, 'baseball-diamond': 0.3967626112242669, 'ship': 0.6233882592021442, 'plane': 0.7839588099359523},
'0.75': {'mAP': 0.38682933856456475, 'ground-track-field': 0.3505001362890805, 'small-vehicle': 0.32936925454926796, 'soccer-ball-field': 0.35644113950565565, 'harbor': 0.16082435022158342, 'large-vehicle': 0.312014321085313, 'swimming-pool': 0.15053744756715054, 'roundabout': 0.421342806894755, 'tennis-court': 0.8933998458347037, 'basketball-court': 0.5018426096266209, 'helicopter': 0.17586580086580086, 'storage-tank': 0.6481067305855587, 'bridge': 0.11431682090364725, 'baseball-diamond': 0.21312574893137554, 'ship': 0.5086325250920672, 'plane': 0.6661205405158923},
'mmAP': 0.38707336824937255,
'0.95': {'mAP': 0.020635306242343165, 'ground-track-field': 0.045454545454545456, 'small-vehicle': 0.0005790387955993052, 'soccer-ball-field': 0.0, 'harbor': 0.0004434589800443459, 'large-vehicle': 0.00036638424547744445, 'swimming-pool': 0.0, 'roundabout': 0.0053475935828877, 'tennis-court': 0.2304241077310939, 'basketball-court': 0.003189792663476874, 'helicopter': 0.0, 'storage-tank': 0.012987012987012986, 'bridge': 0.0, 'baseball-diamond': 0.0, 'ship': 0.0009404388714733542, 'plane': 0.009797220323536112},
'0.55': {'mAP': 0.6287890656893798, 'ground-track-field': 0.5643322633863954, 'small-vehicle': 0.5913067741856398, 'soccer-ball-field': 0.6335613572261539, 'harbor': 0.5190220297608497, 'large-vehicle': 0.5649195362143626, 'swimming-pool': 0.49227487366542605, 'roundabout': 0.667984152802187, 'tennis-court': 0.897082428301694, 'basketball-court': 0.6163602294422292, 'helicopter': 0.44399239228256077, 'storage-tank': 0.7862921590716214, 'bridge': 0.35810648582284893, 'baseball-diamond': 0.6568440654367499, 'ship': 0.7454706366368675, 'plane': 0.8942866011051104}}
"""
# ------------------------------------------------
VERSION = 'RetinaNet_DOTA_2x_20210124'
NET_NAME = 'resnet50_v1d' # 'MobilenetV2'
# ---------------------------------------- System
ROOT_PATH = os.path.abspath('../../')
print(20*"++--")
print(ROOT_PATH)
GPU_GROUP = "0,1,3"
NUM_GPU = len(GPU_GROUP.strip().split(','))
SHOW_TRAIN_INFO_INTE = 20
SMRY_ITER = 200
SAVE_WEIGHTS_INTE = 20673 * 2
SUMMARY_PATH = os.path.join(ROOT_PATH, 'output/summary')
TEST_SAVE_PATH = os.path.join(ROOT_PATH, 'tools/test_result')
pretrain_zoo = PretrainModelZoo()
PRETRAINED_CKPT = pretrain_zoo.pretrain_weight_path(NET_NAME, ROOT_PATH)
TRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')
EVALUATE_R_DIR = os.path.join(ROOT_PATH, 'output/evaluate_result_pickle/')
# ------------------------------------------ Train and Test
RESTORE_FROM_RPN = False
FIXED_BLOCKS = 1 # allow 0~3
FREEZE_BLOCKS = [True, False, False, False, False] # for gluoncv backbone
USE_07_METRIC = True
ADD_BOX_IN_TENSORBOARD = True
MUTILPY_BIAS_GRADIENT = 2.0 # if None, will not multipy
GRADIENT_CLIPPING_BY_NORM = 10.0 # if None, will not clip
CLS_WEIGHT = 1.0
REG_WEIGHT = 2.0
REG_LOSS_MODE = 2
ALPHA = 1.0
BETA = 1.0
BATCH_SIZE = 1
EPSILON = 1e-5
MOMENTUM = 0.9
LR = 1e-3
DECAY_STEP = [SAVE_WEIGHTS_INTE*12, SAVE_WEIGHTS_INTE*16, SAVE_WEIGHTS_INTE*20]
MAX_ITERATION = SAVE_WEIGHTS_INTE*20
WARM_SETP = int(1.0 / 8.0 * SAVE_WEIGHTS_INTE)
# -------------------------------------------- Dataset
DATASET_NAME = 'DOTATrain' # 'pascal', 'coco'
PIXEL_MEAN = [123.68, 116.779, 103.939] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR
PIXEL_MEAN_ = [0.485, 0.456, 0.406]
PIXEL_STD = [0.229, 0.224, 0.225] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR
IMG_SHORT_SIDE_LEN = 800
IMG_MAX_LENGTH = 800
CLASS_NUM = 15
IMG_ROTATE = False
RGB2GRAY = False
VERTICAL_FLIP = False
HORIZONTAL_FLIP = True
IMAGE_PYRAMID = False
# --------------------------------------------- Network
SUBNETS_WEIGHTS_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01, seed=None)
SUBNETS_BIAS_INITIALIZER = tf.constant_initializer(value=0.0)
PROBABILITY = 0.01
FINAL_CONV_BIAS_INITIALIZER = tf.constant_initializer(value=-math.log((1.0 - PROBABILITY) / PROBABILITY))
WEIGHT_DECAY = 1e-4
USE_GN = False
FPN_CHANNEL = 256
NUM_SUBNET_CONV = 4
FPN_MODE = 'fpn'
# --------------------------------------------- Anchor
LEVEL = ['P3', 'P4', 'P5', 'P6', 'P7']
BASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512]
ANCHOR_STRIDE = [8, 16, 32, 64, 128]
ANCHOR_SCALES = [2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]
ANCHOR_RATIOS = [1, 1 / 2, 2., 1 / 3., 3., 5., 1 / 5.]
ANCHOR_ANGLES = [-90, -75, -60, -45, -30, -15]
ANCHOR_SCALE_FACTORS = None
USE_CENTER_OFFSET = True
METHOD = 'H'
USE_ANGLE_COND = False
ANGLE_RANGE = 90 # or 180
# -------------------------------------------- Head
SHARE_NET = True
USE_P5 = True
IOU_POSITIVE_THRESHOLD = 0.5
IOU_NEGATIVE_THRESHOLD = 0.4
NMS = True
NMS_IOU_THRESHOLD = 0.1
MAXIMUM_DETECTIONS = 100
FILTERED_SCORE = 0.05
VIS_SCORE = 0.4
# -------------------------------------------- GWD
GWD_TAU = 2.0
GWD_FUNC = tf.sqrt
|
tqsdk/sim/trade.py
|
shinny-mayanqiong/tqsdk-python
| 3,208 |
71869
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'mayanqiong'
import math
from datetime import datetime
from typing import Callable
from tqsdk.datetime import _is_in_trading_time
from tqsdk.diff import _simple_merge_diff
from tqsdk.sim.utils import _get_price_range, _get_option_margin, _get_premium, _get_close_profit, _get_commission, \
_get_future_margin
class SimTrade(object):
"""
天勤模拟交易账户,处理 orderbook 和撮合交易
计算账户资金、持仓信息
本模块为 TqSim 交易部分的子模块,纯同步计算,不涉及连接行情的动态信息,所以对于接口的调用有一些需要注意的要求
提供的接口:
+ init_snapshot: 返回初始的账户截面信息
+ insert_order: 处理下单请求,调用 TqSimAccount.insert_order 之前应该调用过 update_quote,保证收到过合约的行情;期权还应该确保收到了标的的行情
+ cancel_order:处理撤单请求
+ update_quote:处理行情更新
返回值 diffs
diffs 是 list 类型,每个元素都是符合 diff 协议中 trade 交易部分的数据包,且返回的都是完整字段的对象,比如:order成交时,返回的是order完整对象而不是有变化的字段
+ settle:处理结算请求
返回值 diffs, trade_log
diffs 同上,trade_log 是结算前的账户日志信息
"""
def __init__(self, account_key: str, init_balance: float = 10000000.0, get_trade_timestamp: Callable = None,
is_in_trading_time: Callable = None) -> None:
self._account_key = account_key
self._quotes = {} # 会记录所有的发来的行情
self._account = {
"currency": "CNY",
"pre_balance": init_balance,
"static_balance": init_balance,
"balance": init_balance,
"available": init_balance,
"float_profit": 0.0,
"position_profit": 0.0, # 期权没有持仓盈亏
"close_profit": 0.0,
"frozen_margin": 0.0,
"margin": 0.0,
"frozen_commission": 0.0,
"commission": 0.0,
"frozen_premium": 0.0,
"premium": 0.0,
"deposit": 0.0,
"withdraw": 0.0,
"risk_ratio": 0.0,
"market_value": 0.0,
"ctp_balance": float("nan"),
"ctp_available": float("nan")
}
self._positions = {} # {symbol: position, ...}
self._orders = {} # {symbol: {order_id: order}, ...}
self._trades = [] # list 类型,与重构之前代码保持一致,list 可以保留 trade 生产的顺序信息
self._diffs = []
self._orders_events = [] # 按照顺序记录 order 的更新,返回给调用方
self._max_datetime = "" # 所有 quotes 的最大行情更新时间
# 本模块在计算成交时间、判断是否在交易时间段内,默认使用所有 quotes 的最大行情更新时间当作当前时间,并且没有模拟到交易时的时间差
# 若外部调用模块需要更精确时间,则由外部模块提供函数支持
self._get_trade_timestamp = get_trade_timestamp if get_trade_timestamp else self._default_get_trade_timestamp
self._is_in_trading_time = is_in_trading_time if is_in_trading_time else self._default_is_in_trading_time
def insert_order(self, symbol, pack):
quote, underlying_quote = self._get_quotes_by_symbol(symbol)
order = self._pre_insert_order(pack)
orders = self._orders.setdefault(symbol, {})
orders[order["order_id"]] = order # order 存入全局
self._orders_events.append(order.copy())
self._insert_order(order, symbol, quote, underlying_quote)
if order["status"] == "ALIVE":
self._match_order(order, symbol, quote, underlying_quote)
if order["status"] == "FINISHED":
self._orders_events.append(order)
del self._orders[symbol][order["order_id"]] # 删除 order
return self._return_results()
def cancel_order(self, symbol, pack):
order = self._orders.get(symbol, {}).get(pack["order_id"], {})
if order.get("status") == "ALIVE":
order["last_msg"] = "已撤单"
order["status"] = "FINISHED"
self._on_order_failed(symbol, order)
self._orders_events.append(order)
del self._orders[symbol][order["order_id"]] # 删除 order
return self._return_results()
def update_quotes(self, symbol, pack):
for q in pack.get("quotes", {}).values():
self._max_datetime = max(q.get("datetime", ""), self._max_datetime)
_simple_merge_diff(self._quotes, pack.get("quotes", {}), reduce_diff=False)
quote, underlying_quote = self._get_quotes_by_symbol(symbol)
# 某些非交易时间段,ticks 回测是 quote 的最新价有可能是 nan,无效的行情直接跳过
if math.isnan(quote["last_price"]):
return [], []
# 撮合委托单
orders = self._orders.get(symbol, {})
for order_id in list(orders.keys()): # match_order 过程中可能会删除 orders 下对象
self._match_order(orders[order_id], symbol, quote, underlying_quote)
if orders[order_id]["status"] == "FINISHED":
self._orders_events.append(orders[order_id])
del self._orders[symbol][order_id]
# 调整持仓保证金和盈亏
position = self._ensure_position(symbol)
underlying_last_price = underlying_quote["last_price"] if underlying_quote else float('nan')
future_margin = _get_future_margin(quote)
if position["volume_long"] > 0 or position["volume_short"] > 0:
if position["last_price"] != quote["last_price"] \
or (math.isnan(future_margin) or future_margin != position["future_margin"])\
or (underlying_quote and (math.isnan(underlying_last_price) or underlying_last_price != position["underlying_last_price"])):
self._adjust_position_account(symbol, quote, underlying_quote,
pre_last_price=position["last_price"],
last_price=quote["last_price"],
pre_underlying_last_price=position["underlying_last_price"],
underlying_last_price=underlying_last_price)
position["future_margin"] = future_margin
position["last_price"] = quote["last_price"]
position["underlying_last_price"] = underlying_last_price
else:
# 修改辅助变量
position["future_margin"] = future_margin
position["last_price"] = quote["last_price"]
position["underlying_last_price"] = underlying_last_price
self._send_position(position) # 一定要返回 position,下游会用到 future_margin 字段判断修改保证金是否成功
self._send_account()
return self._return_results()
def settle(self):
trade_log = {
"trades": self._trades,
"account": self._account.copy(),
"positions": {k: v.copy() for k, v in self._positions.items()}
}
# 为下一交易日调整账户
self._trades = []
for symbol in self._orders:
for order in self._orders[symbol].values():
order["frozen_margin"] = 0.0
order["frozen_premium"] = 0.0
order["last_msg"] = "交易日结束,自动撤销当日有效的委托单(GFD)"
order["status"] = "FINISHED"
self._orders_events.append(order)
self._send_order(order)
self._orders[symbol] = {}
# account 原始字段
self._account["pre_balance"] = self._account["balance"] - self._account["market_value"]
self._account["close_profit"] = 0.0
self._account["commission"] = 0.0
self._account["premium"] = 0.0
self._account["frozen_margin"] = 0.0
self._account["frozen_premium"] = 0.0
# account 计算字段
self._account["static_balance"] = self._account["pre_balance"]
self._account["position_profit"] = 0.0
self._account["risk_ratio"] = self._account["margin"] / self._account["balance"]
self._account["available"] = self._account["static_balance"] - self._account["margin"]
# 根据公式 账户权益 不需要计算 self._account["balance"] = static_balance + market_value
self._send_account()
# 对于持仓的结算放在这里,没有放在 quote_handler 里的原因:
# 1. 异步发送的话,会造成如果此时 sim 未收到 pending_peek, 就没法把结算的账户信息发送出去,此时用户代码中 api.get_postion 得到的持仓和 sim 里面的持仓是不一致的
# set_target_pos 下单时就会产生错单。而且结算时一定是已经收到过行情的数据包,在同步代码的最后一步,会发送出去这个行情包 peeding_peek,
# quote_handler 处理 settle 的时候, 所以在结算的时候 pending_peek 一定是 False, 要 api 处理过之后,才会收到 peek_message
# 2. 同步发送的话,就可以和产生切换交易日的数据包同时发送出去
# 对 order 的处理发生在下一次回复 peek_message
for position in self._positions.values():
# position 原始字段
position["volume_long_frozen_today"] = 0
position["volume_long_frozen_his"] = 0
position["volume_short_frozen_today"] = 0
position["volume_short_frozen_his"] = 0
position["volume_long_today"] = 0
position["volume_long_his"] = position["volume_long"]
position["volume_short_today"] = 0
position["volume_short_his"] = position["volume_short"]
# position 计算字段
position["pos_long_his"] = position["volume_long_his"]
position["pos_long_today"] = 0
position["pos_short_his"] = position["volume_short_his"]
position["pos_short_today"] = 0
position["volume_long_frozen"] = 0
position["volume_short_frozen"] = 0
position["position_price_long"] = position["last_price"]
position["position_price_short"] = position["last_price"]
quote, _ = self._get_quotes_by_symbol(f"{position['exchange_id']}.{position['instrument_id']}")
position["position_cost_long"] = position["last_price"] * position["volume_long"] * quote["volume_multiple"] # position 原始字段
position["position_cost_short"] = position["last_price"] * position["volume_short"] * quote["volume_multiple"] # position 原始字段
position["position_profit_long"] = 0
position["position_profit_short"] = 0
position["position_profit"] = 0
self._send_position(position)
diffs, orders_events = self._return_results()
return diffs, orders_events, trade_log
def init_snapshot(self):
"""返回初始账户截面信息"""
return {
"trade": {
self._account_key: {
"accounts": {"CNY": self._account.copy()},
"positions": {},
"orders": {},
"trades": {}
}
}
}
def _return_results(self):
"""
返回两项内容:diffs: list, orders_events: list
diffs 是截面的变更
orders_events 是委托单变化
"""
diffs = self._diffs
self._diffs = []
orders_events = self._orders_events
self._orders_events = []
return diffs, orders_events
def _ensure_position(self, symbol):
position = self._positions.setdefault(symbol, {
"exchange_id": symbol.split(".", maxsplit=1)[0],
"instrument_id": symbol.split(".", maxsplit=1)[1],
"pos_long_his": 0,
"pos_long_today": 0,
"pos_short_his": 0,
"pos_short_today": 0,
"volume_long_today": 0,
"volume_long_his": 0,
"volume_long": 0,
"volume_long_frozen_today": 0,
"volume_long_frozen_his": 0,
"volume_long_frozen": 0,
"volume_short_today": 0,
"volume_short_his": 0,
"volume_short": 0,
"volume_short_frozen_today": 0,
"volume_short_frozen_his": 0,
"volume_short_frozen": 0,
"open_price_long": float("nan"),
"open_price_short": float("nan"),
"open_cost_long": 0.0,
"open_cost_short": 0.0,
"position_price_long": float("nan"),
"position_price_short": float("nan"),
"position_cost_long": 0.0,
"position_cost_short": 0.0,
"float_profit_long": 0.0,
"float_profit_short": 0.0,
"float_profit": 0.0,
"position_profit_long": 0.0,
"position_profit_short": 0.0,
"position_profit": 0.0,
"margin_long": 0.0,
"margin_short": 0.0,
"margin": 0.0,
"last_price": float('nan'),
"underlying_last_price": float('nan'),
"market_value_long": 0.0, # 权利方市值(始终 >= 0)
"market_value_short": 0.0, # 义务方市值(始终 <= 0)
"market_value": 0.0,
})
if math.isnan(position["last_price"]):
# 该持仓第一次添加,添加辅助计算字段,last_price underlying_last_price
quote, underlying_quote = self._get_quotes_by_symbol(symbol)
position["future_margin"] = _get_future_margin(quote)
position["last_price"] = quote["last_price"]
position["underlying_last_price"] = underlying_quote["last_price"] if underlying_quote else float("nan")
return position
def _get_quotes_by_symbol(self, symbol):
"""返回指定合约及标的合约,在本模块执行过程中,应该保证一定有合约行情"""
quote = self._quotes.get(symbol)
assert quote and quote.get("datetime"), "未收到指定合约行情"
underlying_quote = None
if quote["ins_class"].endswith("OPTION"):
underlying_quote = self._quotes.get(quote["underlying_symbol"])
assert underlying_quote and underlying_quote.get("datetime"), "未收到指定合约的标的行情"
return quote, underlying_quote
def _pre_insert_order(self, pack):
"""order 对象预处理"""
order = pack.copy()
order["exchange_order_id"] = order["order_id"]
order["volume_orign"] = order["volume"]
order["volume_left"] = order["volume"]
order["frozen_margin"] = 0.0
order["frozen_premium"] = 0.0
order["last_msg"] = "报单成功"
order["status"] = "ALIVE"
order["insert_date_time"] = self._get_trade_timestamp()
del order["aid"]
del order["volume"]
self._send_order(order)
return order
def _insert_order(self, order, symbol, quote, underlying_quote=None):
"""判断 order 是否可以记录在 orderbook"""
if ("commission" not in quote or "margin" not in quote) and not quote["ins_class"].endswith("OPTION"):
order["last_msg"] = "不支持的合约类型,TqSim 目前不支持组合,股票,etf期权模拟交易"
order["status"] = "FINISHED"
if order["status"] == "ALIVE" and not self._is_in_trading_time(quote):
order["last_msg"] = "下单失败, 不在可交易时间段内"
order["status"] = "FINISHED"
position = self._ensure_position(symbol)
if order["status"] == "ALIVE" and order["offset"].startswith('CLOSE'):
if order["exchange_id"] in ["SHFE", "INE"]:
if order["offset"] == "CLOSETODAY":
if order["direction"] == "BUY" and position["volume_short_today"] - position["volume_long_frozen_today"] < order["volume_orign"]:
order["last_msg"] = "平今仓手数不足"
elif order["direction"] == "SELL" and position["volume_long_today"] - position["volume_long_frozen_today"] < order["volume_orign"]:
order["last_msg"] = "平今仓手数不足"
if order["offset"] == "CLOSE":
if order["direction"] == "BUY" and position["volume_short_his"] - position["volume_short_frozen_his"] < order["volume_orign"]:
order["last_msg"] = "平昨仓手数不足"
elif order["direction"] == "SELL" and position["volume_long_his"] - position["volume_long_frozen_his"] < order["volume_orign"]:
order["last_msg"] = "平昨仓手数不足"
else:
if order["direction"] == "BUY" and position["volume_short"] - position["volume_short_frozen"] < order["volume_orign"]:
order["last_msg"] = "平仓手数不足"
elif order["direction"] == "SELL" and position["volume_long"] - position["volume_long_frozen"] < order["volume_orign"]:
order["last_msg"] = "平仓手数不足"
if order["last_msg"].endswith("手数不足"):
order["status"] = "FINISHED"
if order["status"] == "ALIVE" and order["offset"] == "OPEN":
# 计算冻结保证金,冻结权利金
if quote["ins_class"].endswith("OPTION"):
if order["direction"] == "SELL": # 期权的SELL义务仓,开仓需要冻结保证金
order["frozen_margin"] = order["volume_orign"] * _get_option_margin(quote, quote["last_price"], underlying_quote["last_price"])
else: # 期权的BUY权利仓(市价单使用 last_price 计算需要冻结的权利金)
price = quote["last_price"] if order["price_type"] == "ANY" else order["limit_price"]
order["frozen_premium"] = order["volume_orign"] * quote["volume_multiple"] * price
else:
order["frozen_margin"] = order["volume_orign"] * _get_future_margin(quote)
if order["frozen_margin"] + order["frozen_premium"] > self._account["available"]:
order["frozen_margin"] = 0.0
order["frozen_premium"] = 0.0
order["last_msg"] = '开仓资金不足'
order["status"] = "FINISHED"
if order["status"] == "FINISHED":
self._send_order(order)
if order["status"] == "ALIVE" and order["offset"] == "OPEN":
# 修改 account 计算字段
self._adjust_account_by_order(frozen_margin=order["frozen_margin"], frozen_premium=order["frozen_premium"])
self._send_account()
if order["status"] == 'ALIVE' and order["offset"].startswith('CLOSE'):
# 修改 position 原始字段
if order["exchange_id"] in ["SHFE", "INE"]:
if order["direction"] == "BUY":
position[f"volume_short_frozen_{'today' if order['offset'] == 'CLOSETODAY' else 'his'}"] += order["volume_orign"]
else:
position[f"volume_long_frozen_{'today' if order['offset'] == 'CLOSETODAY' else 'his'}"] += order["volume_orign"]
elif order["direction"] == "BUY":
volume_short_his_available = position["volume_short_his"] - position["volume_short_frozen_his"]
if volume_short_his_available < order["volume_orign"]:
position["volume_short_frozen_his"] += volume_short_his_available
position["volume_short_frozen_today"] += order["volume_orign"] - volume_short_his_available
else:
position["volume_short_frozen_his"] += order["volume_orign"]
else:
volume_long_his_available = position["volume_long_his"] - position["volume_long_frozen_his"]
if volume_long_his_available < order["volume_orign"]:
position["volume_long_frozen_his"] += volume_long_his_available
position["volume_long_frozen_today"] += order["volume_orign"] - volume_long_his_available
else:
position["volume_long_frozen_his"] += order["volume_orign"]
# 修改 position 计算字段
self._adjust_position_volume_frozen(position)
self._send_position(position)
def _match_order(self, order, symbol, quote, underlying_quote=None):
assert order["status"] == "ALIVE"
ask_price, bid_price = _get_price_range(quote)
# order 预期成交价格
if order["price_type"] in ["ANY", "BEST", "FIVELEVEL"]:
price = ask_price if order["direction"] == "BUY" else bid_price
else:
price = order["limit_price"]
if order["price_type"] == "ANY" and math.isnan(price):
order["last_msg"] = "市价指令剩余撤销"
order["status"] = "FINISHED"
if order["time_condition"] == "IOC": # IOC 立即成交,限价下单且不能成交的价格,直接撤单
if order["direction"] == "BUY" and price < ask_price or order["direction"] == "SELL" and price > bid_price:
order["last_msg"] = "已撤单报单已提交"
order["status"] = "FINISHED"
if order["status"] == "FINISHED":
self._on_order_failed(symbol, order)
elif order["direction"] == "BUY" and price >= ask_price or order["direction"] == "SELL" and price <= bid_price:
trade_id = order["order_id"] + "|" + str(order["volume_left"])
trade = {
"user_id": order["user_id"],
"order_id": order["order_id"],
"trade_id": trade_id,
"exchange_trade_id": order["order_id"] + "|" + str(order["volume_left"]),
"exchange_id": order["exchange_id"],
"instrument_id": order["instrument_id"],
"direction": order["direction"],
"offset": order["offset"],
"price": price,
"volume": order["volume_left"],
"trade_date_time": self._get_trade_timestamp(), # todo: 可能导致测试结果不确定
"commission": order["volume_left"] * _get_commission(quote)
}
self._trades.append(trade)
self._send_trade(trade)
self._on_order_traded(order, trade, quote, underlying_quote)
def _on_order_traded(self, order, trade, quote, underlying_quote):
symbol = order["exchange_id"] + "." + order["instrument_id"]
origin_frozen_margin = order["frozen_margin"]
origin_frozen_premium = order["frozen_premium"]
order["frozen_margin"] = 0.0
order["frozen_premium"] = 0.0
order["volume_left"] = 0
order["last_msg"] = "全部成交"
order["status"] = "FINISHED"
self._send_order(order)
position = self._ensure_position(symbol)
if order["offset"] == 'OPEN':
if order["direction"] == "BUY":
# 修改 position 原始字段
position["volume_long_today"] += order["volume_orign"]
position["open_cost_long"] += trade["price"] * order["volume_orign"] * quote["volume_multiple"] # 多头开仓成本
position["position_cost_long"] += trade["price"] * order["volume_orign"] * quote["volume_multiple"] # 多头持仓成本
else:
# 修改 position 原始字段
position["volume_short_today"] += order["volume_orign"]
position["open_cost_short"] += trade["price"] * order["volume_orign"] * quote["volume_multiple"] # 空头开仓成本
position["position_cost_short"] += trade["price"] * order["volume_orign"] * quote["volume_multiple"] # 空头持仓成本
# 由 order 变化,account 需要更新的计算字段
self._adjust_account_by_order(frozen_margin=-origin_frozen_margin, frozen_premium=-origin_frozen_premium)
# 由 trade 引起的 account 原始字段变化,account 需要更新的计算字段
premium = _get_premium(trade, quote)
self._adjust_account_by_trade(commission=trade["commission"], premium=premium)
# 由 position 字段变化,同时 account 需要更新的计算字段
buy_open = order["volume_orign"] if order["direction"] == "BUY" else 0
sell_open = 0 if order["direction"] == "BUY" else order["volume_orign"]
self._adjust_position_account(symbol, quote, underlying_quote,
pre_last_price=trade["price"],
last_price=position["last_price"],
pre_underlying_last_price=underlying_quote["last_price"] if underlying_quote else float('nan'),
underlying_last_price=position["underlying_last_price"],
buy_open=buy_open, sell_open=sell_open)
else: # order["offset"].startswith('CLOSE')
# 修改 position 原始字段
if order["exchange_id"] in ["SHFE", "INE"]:
if order["offset"] == "CLOSETODAY":
if order["direction"] == "BUY":
position["volume_short_frozen_today"] -= order["volume_orign"]
position["volume_short_today"] -= order["volume_orign"]
elif order["direction"] == "SELL":
position["volume_long_frozen_today"] -= order["volume_orign"]
position["volume_long_today"] -= order["volume_orign"]
if order["offset"] == "CLOSE":
if order["direction"] == "BUY":
position["volume_short_frozen_his"] -= order["volume_orign"]
position["volume_short_his"] -= order["volume_orign"]
elif order["direction"] == "SELL":
position["volume_long_frozen_his"] -= order["volume_orign"]
position["volume_long_his"] -= order["volume_orign"]
elif order["direction"] == "BUY":
if position["volume_short_frozen_his"] >= order["volume_orign"]:
position["volume_short_frozen_his"] -= order["volume_orign"]
position["volume_short_his"] -= order["volume_orign"]
else:
position["volume_short_frozen_today"] -= order["volume_orign"] - position["volume_short_frozen_his"]
position["volume_short_today"] -= order["volume_orign"] - position["volume_short_frozen_his"]
position["volume_short_his"] -= position["volume_short_frozen_his"]
position["volume_short_frozen_his"] = 0
else:
if position["volume_long_frozen_his"] >= order["volume_orign"]:
position["volume_long_frozen_his"] -= order["volume_orign"]
position["volume_long_his"] -= order["volume_orign"]
else:
position["volume_long_frozen_today"] -= order["volume_orign"] - position["volume_long_frozen_his"]
position["volume_long_today"] -= order["volume_orign"] - position["volume_long_frozen_his"]
position["volume_long_his"] -= position["volume_long_frozen_his"]
position["volume_long_frozen_his"] = 0
# 修改 position 原始字段
if order["direction"] == "SELL":
position["open_cost_long"] -= position["open_price_long"] * order["volume_orign"] * quote["volume_multiple"] # 多头开仓成本
position["position_cost_long"] -= position["position_price_long"] * order["volume_orign"] * quote["volume_multiple"] # 多头持仓成本
else:
position["open_cost_short"] -= position["open_price_short"] * order["volume_orign"] * quote["volume_multiple"] # 空头开仓成本
position["position_cost_short"] -= position["position_price_short"] * order["volume_orign"] * quote["volume_multiple"] # 空头持仓成本
# 由 trade 引起的 account 原始字段变化,account 需要更新的计算字段
premium = _get_premium(trade, quote)
close_profit = _get_close_profit(trade, quote, position)
self._adjust_account_by_trade(commission=trade["commission"], premium=premium, close_profit=close_profit)
# 由 position 字段变化,同时 account 需要更新的计算字段
buy_close = order["volume_orign"] if order["direction"] == "BUY" else 0
sell_close = 0 if order["direction"] == "BUY" else order["volume_orign"]
self._adjust_position_account(symbol, quote, underlying_quote, pre_last_price=position["last_price"],
last_price=0, pre_underlying_last_price=position["underlying_last_price"],
underlying_last_price=0, buy_close=buy_close, sell_close=sell_close)
self._send_position(position)
self._send_account()
def _on_order_failed(self, symbol, order):
origin_frozen_margin = order["frozen_margin"]
origin_frozen_premium = order["frozen_premium"]
order["frozen_margin"] = 0.0
order["frozen_premium"] = 0.0
self._send_order(order)
# 调整账户和持仓
if order["offset"] == 'OPEN':
self._adjust_account_by_order(frozen_margin=-origin_frozen_margin, frozen_premium=-origin_frozen_premium)
self._send_account()
else:
position = self._positions[symbol]
if order["exchange_id"] in ["SHFE", "INE"]:
if order["offset"] == "CLOSETODAY":
if order["direction"] == "BUY":
position["volume_short_frozen_today"] -= order["volume_orign"]
else:
position["volume_long_frozen_today"] -= order["volume_orign"]
if order["offset"] == "CLOSE":
if order["direction"] == "BUY":
position["volume_short_frozen_his"] -= order["volume_orign"]
else:
position["volume_long_frozen_his"] -= order["volume_orign"]
else:
if order["direction"] == "BUY":
if position["volume_short_frozen_today"] >= order["volume_orign"]:
position["volume_short_frozen_today"] -= order["volume_orign"]
else:
position["volume_short_frozen_his"] -= order["volume_orign"] - position["volume_short_frozen_today"]
position["volume_short_frozen_today"] = 0
else:
if position["volume_long_frozen_today"] >= order["volume_orign"]:
position["volume_long_frozen_today"] -= order["volume_orign"]
else:
position["volume_long_frozen_his"] -= order["volume_orign"] - position["volume_long_frozen_today"]
position["volume_long_frozen_today"] = 0
self._adjust_position_volume_frozen(position)
self._send_position(position)
def _adjust_position_account(self, symbol, quote, underlying_quote=None, pre_last_price=float('nan'), last_price=float('nan'),
pre_underlying_last_price=float('nan'), underlying_last_price=float('nan'),
buy_open=0, buy_close=0, sell_open=0, sell_close=0):
"""
价格变化,使得 position 中的以下计算字段需要修改,这个函数计算出需要修改的差值部分,计算出差值部分修改 position、account
有两种情况下调用
1. 委托单 FINISHED,且全部成交,分为4种:buy_open, buy_close, sell_open, sell_close
2. 行情跳动
"""
position = self._positions[symbol]
float_profit_long = 0 # 多头浮动盈亏
float_profit_short = 0 # 空头浮动盈亏
position_profit_long = 0 # 多头持仓盈亏,期权持仓盈亏为0
position_profit_short = 0 # 空头持仓盈亏,期权持仓盈亏为0
margin_long = 0 # 多头占用保证金
margin_short = 0 # 空头占用保证金
market_value_long = 0 # 期权权利方市值(始终 >= 0)
market_value_short = 0 # 期权义务方市值(始终 <= 0)
assert [buy_open, buy_close, sell_open, sell_close].count(0) >= 3 # 只有一个大于0, 或者都是0,表示价格变化导致的字段修改
if buy_open > 0:
# 买开,pre_last_price 应该是成交价格,last_price 应该是 position['last_price']
float_profit_long = (last_price - pre_last_price) * buy_open * quote["volume_multiple"]
if quote["ins_class"].endswith("OPTION"):
market_value_long = last_price * buy_open * quote["volume_multiple"]
else:
margin_long = buy_open * _get_future_margin(quote)
position_profit_long = (last_price - pre_last_price) * buy_open * quote["volume_multiple"]
elif sell_close > 0:
# 卖平,pre_last_price 应该是 position['last_price'],last_price 应该是 0
float_profit_long = -position["float_profit_long"] / position["volume_long"] * sell_close
if quote["ins_class"].endswith("OPTION"):
market_value_long = -pre_last_price * sell_close * quote["volume_multiple"]
else:
margin_long = -sell_close * _get_future_margin(quote)
position_profit_long = -position["position_profit_long"] / position["volume_long"] * sell_close
elif sell_open > 0:
# 卖开
float_profit_short = (pre_last_price - last_price) * sell_open * quote["volume_multiple"]
if quote["ins_class"].endswith("OPTION"):
market_value_short = -last_price * sell_open * quote["volume_multiple"]
margin_short = sell_open * _get_option_margin(quote, last_price, underlying_last_price)
else:
margin_short = sell_open * _get_future_margin(quote)
position_profit_short = (pre_last_price - last_price) * sell_open * quote["volume_multiple"]
elif buy_close > 0:
# 买平
float_profit_short = -position["float_profit_short"] / position["volume_short"] * buy_close
if quote["ins_class"].endswith("OPTION"):
market_value_short = pre_last_price * buy_close * quote["volume_multiple"]
margin_short = -buy_close * _get_option_margin(quote, pre_last_price, pre_underlying_last_price)
else:
margin_short = -buy_close * _get_future_margin(quote)
position_profit_short = -position["position_profit_short"] / position["volume_short"] * buy_close
else:
float_profit_long = (last_price - pre_last_price) * position["volume_long"] * quote["volume_multiple"] # 多头浮动盈亏
float_profit_short = (pre_last_price - last_price) * position["volume_short"] * quote["volume_multiple"] # 空头浮动盈亏
if quote["ins_class"].endswith("OPTION"):
margin_short = _get_option_margin(quote, last_price, underlying_last_price) * position["volume_short"] - position["margin_short"]
market_value_long = (last_price - pre_last_price) * position["volume_long"] * quote["volume_multiple"]
market_value_short = (pre_last_price - last_price) * position["volume_short"] * quote["volume_multiple"]
else:
# 期权持仓盈亏为 0
position_profit_long = float_profit_long # 多头持仓盈亏
position_profit_short = float_profit_short # 空头持仓盈亏
margin_long = _get_future_margin(quote) * position["volume_long"] - position["margin_long"]
margin_short = _get_future_margin(quote) * position["volume_short"] - position["margin_short"]
if any([buy_open, buy_close, sell_open, sell_close]):
# 修改 position volume 相关的计算字段
# 在上面 sell_close buy_close 两种情况,计算浮动盈亏时,用到了修改前的手数,所以需改手数字段的代码放在这个位置
self._adjust_position_volume(position)
self._adjust_position(quote, position, float_profit_long, float_profit_short, position_profit_long,
position_profit_short, margin_long, margin_short, market_value_long, market_value_short)
self._adjust_account_by_position(float_profit=float_profit_long + float_profit_short,
position_profit=position_profit_long + position_profit_short,
margin=margin_long + margin_short,
market_value=market_value_long + market_value_short)
# -------- 对于 position 的计算字段修改分为两类:
# 1. 针对手数相关的修改,在下单、成交时会修改
# 2. 针对盈亏、保证金、市值的修改,由于参考合约最新价,在成交、行情跳动时会修改
def _adjust_position_volume_frozen(self, position):
"""position 原始字段修改后,只有冻结手数需要重新计算,有两种情况需要调用
1. 下平仓单 2. 平仓单 FINISHED, 但没有成交
"""
position["volume_long_frozen"] = position["volume_long_frozen_today"] + position["volume_long_frozen_his"]
position["volume_short_frozen"] = position["volume_short_frozen_today"] + position["volume_short_frozen_his"]
def _adjust_position_volume(self, position):
"""position 原始字段修改后,手数之后需要重新计算
1. 委托单 FINISHED,且全部成交
"""
position["pos_long_today"] = position["volume_long_today"]
position["pos_long_his"] = position["volume_long_his"]
position["pos_short_today"] = position["volume_short_today"]
position["pos_short_his"] = position["volume_short_his"]
position["volume_long"] = position["volume_long_today"] + position["volume_long_his"]
position["volume_long_frozen"] = position["volume_long_frozen_today"] + position["volume_long_frozen_his"]
position["volume_short"] = position["volume_short_today"] + position["volume_short_his"]
position["volume_short_frozen"] = position["volume_short_frozen_today"] + position["volume_short_frozen_his"]
def _adjust_position(self, quote, position, float_profit_long=0, float_profit_short=0, position_profit_long=0,
position_profit_short=0, margin_long=0, margin_short=0, market_value_long=0,
market_value_short=0):
# 更新 position 计算字段,根据差值更新的字段
position["float_profit_long"] += float_profit_long
position["float_profit_short"] += float_profit_short
position["position_profit_long"] += position_profit_long
position["position_profit_short"] += position_profit_short
position["margin_long"] += margin_long
position["margin_short"] += margin_short
position["market_value_long"] += market_value_long
position["market_value_short"] += market_value_short
# 更新 position 计算字段,原地重新计算的字段
if position["volume_long"] > 0:
position["open_price_long"] = position["open_cost_long"] / position["volume_long"] / quote["volume_multiple"]
position["position_price_long"] = position["position_cost_long"] / position["volume_long"] / quote["volume_multiple"]
else:
position["open_price_long"] = float("nan")
position["position_price_long"] = float("nan")
if position["volume_short"] > 0:
position["open_price_short"] = position["open_cost_short"] / position["volume_short"] / quote["volume_multiple"]
position["position_price_short"] = position["position_cost_short"] / position["volume_short"] / quote["volume_multiple"]
else:
position["open_price_short"] = float("nan")
position["position_price_short"] = float("nan")
position["float_profit"] = position["float_profit_long"] + position["float_profit_short"]
position["position_profit"] = position["position_profit_long"] + position["position_profit_short"]
position["margin"] = position["margin_long"] + position["margin_short"]
position["market_value"] = position["market_value_long"] + position["market_value_short"]
# -------- 对于 account 的修改分为以下三类
def _adjust_account_by_trade(self, commission=0, close_profit=0, premium=0):
"""由成交引起的 account 原始字段变化,account 需要更新的计算字段"""
# account 原始字段
self._account["close_profit"] += close_profit
self._account["commission"] += commission
self._account["premium"] += premium # premium变量的值有正负,正数表示收入的权利金,负数表示付出的权利金
# account 计算字段
self._account["balance"] += close_profit - commission + premium
self._account["available"] += close_profit - commission + premium
self._account["risk_ratio"] = self._account["margin"] / self._account["balance"]
def _adjust_account_by_position(self, float_profit=0, position_profit=0, margin=0, market_value=0):
"""由 position 变化,account 需要更新的计算字段"""
# account 计算字段,持仓字段求和的字段
self._account["float_profit"] += float_profit
self._account["position_profit"] += position_profit
self._account["margin"] += margin
self._account["market_value"] += market_value
# account 计算字段
self._account["balance"] += position_profit + market_value
self._account["available"] += position_profit - margin
self._account["risk_ratio"] = self._account["margin"] / self._account["balance"]
def _adjust_account_by_order(self, frozen_margin=0, frozen_premium=0):
"""由 order 变化,account 需要更新的计算字段"""
self._account["frozen_margin"] += frozen_margin
self._account["frozen_premium"] += frozen_premium
self._account["available"] -= (frozen_margin + frozen_premium)
def _send_trade(self, trade):
self._diffs.append({
"trade": {
self._account_key: {
"trades": {
trade["trade_id"]: trade.copy()
}
}
}
})
def _send_order(self, order):
self._diffs.append({
"trade": {
self._account_key: {
"orders": {
order["order_id"]: order.copy()
}
}
}
})
def _send_position(self, position):
self._diffs.append({
"trade": {
self._account_key: {
"positions": {
position["exchange_id"] + "." + position["instrument_id"]: position.copy()
}
}
}
})
def _send_account(self):
self._diffs.append({
"trade": {
self._account_key: {
"accounts": {
"CNY": self._account.copy()
}
}
}
})
def _default_get_trade_timestamp(self):
"""获取交易时间的默认方法,为当前所有 quote 的最大行情时间"""
return int(datetime.strptime(self._max_datetime, "%Y-%m-%d %H:%M:%S.%f").timestamp() * 1e6) * 1000
def _default_is_in_trading_time(self, quote):
"""判断是否在交易时间段"""
return _is_in_trading_time(quote, self._max_datetime, float("nan"))
|
chrome/common/extensions/docs/server2/appengine_wrappers.py
|
google-ar/chromium
| 2,151 |
71883
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This will attempt to import the actual App Engine modules, and if it fails,
# they will be replaced with fake modules. This is useful during testing.
try:
import google.appengine.api.memcache as memcache
except ImportError:
class _RPC(object):
def __init__(self, result=None):
self.result = result
def get_result(self):
return self.result
def wait(self):
pass
class InMemoryMemcache(object):
"""An in-memory memcache implementation.
"""
def __init__(self):
self._namespaces = {}
class Client(object):
def set_multi_async(self, mapping, namespace='', time=0):
return _RPC(result=dict(
(k, memcache.set(k, v, namespace=namespace, time=time))
for k, v in mapping.iteritems()))
def get_multi_async(self, keys, namespace='', time=0):
return _RPC(result=dict(
(k, memcache.get(k, namespace=namespace, time=time)) for k in keys))
def set(self, key, value, namespace='', time=0):
self._GetNamespace(namespace)[key] = value
def get(self, key, namespace='', time=0):
return self._GetNamespace(namespace).get(key)
def delete(self, key, namespace=''):
self._GetNamespace(namespace).pop(key, None)
def delete_multi(self, keys, namespace=''):
for k in keys:
self.delete(k, namespace=namespace)
def _GetNamespace(self, namespace):
if namespace not in self._namespaces:
self._namespaces[namespace] = {}
return self._namespaces[namespace]
def flush_all(self):
self._namespaces = {}
return False
memcache = InMemoryMemcache()
|
insights/tests/test_filters.py
|
maxamillion/insights-core
| 121 |
71889
|
<gh_stars>100-1000
from collections import defaultdict
from insights.core import filters
from insights.parsers.ps import PsAux, PsAuxcww
from insights.specs import Specs
from insights.specs.default import DefaultSpecs
import pytest
import sys
def setup_function(func):
if func is test_get_filter:
filters.add_filter(Specs.ps_aux, "COMMAND")
if func is test_get_filter_registry_point:
filters.add_filter(Specs.ps_aux, "COMMAND")
filters.add_filter(DefaultSpecs.ps_aux, "MEM")
if func is test_filter_dumps_loads:
filters.add_filter(Specs.ps_aux, "COMMAND")
def teardown_function(func):
if func is test_get_filter:
del filters.FILTERS[Specs.ps_aux]
if func is test_get_filter_registry_point:
del filters.FILTERS[Specs.ps_aux]
del filters.FILTERS[DefaultSpecs.ps_aux]
if func is test_filter_dumps_loads:
del filters.FILTERS[Specs.ps_aux]
if func is test_add_filter_to_parser:
del filters.FILTERS[Specs.ps_aux]
if func is test_add_filter_to_parser_patterns_list:
del filters.FILTERS[Specs.ps_aux]
@pytest.mark.skipif(sys.version_info < (2, 7), reason='Playbook verifier code uses oyaml library which is incompatable with this test')
def test_filter_dumps_loads():
r = filters.dumps()
assert r is not None
filters.FILTERS = defaultdict(set)
filters.loads(r)
assert Specs.ps_aux in filters.FILTERS
assert filters.FILTERS[Specs.ps_aux] == set(["COMMAND"])
def test_get_filter():
f = filters.get_filters(Specs.ps_aux)
assert "COMMAND" in f
f = filters.get_filters(DefaultSpecs.ps_aux)
assert "COMMAND" in f
def test_get_filter_registry_point():
s = set(["COMMAND", "MEM"])
f = filters.get_filters(DefaultSpecs.ps_aux)
assert f & s == s
f = filters.get_filters(Specs.ps_aux)
assert "COMMAND" in f
assert "MEM" not in f
def test_add_filter_to_parser():
filter_string = "bash"
filters.add_filter(PsAux, filter_string)
spec_filters = filters.get_filters(Specs.ps_aux)
assert filter_string in spec_filters
parser_filters = filters.get_filters(PsAux)
assert not parser_filters
def test_add_filter_to_parser_patterns_list():
filters_list = ["bash", "systemd", "Network"]
filters.add_filter(PsAux, filters_list)
spec_filters = filters.get_filters(Specs.ps_aux)
assert all(f in spec_filters for f in filters_list)
parser_filters = filters.get_filters(PsAux)
assert not parser_filters
def test_add_filter_to_parser_non_filterable():
filter_string = "bash"
filters.add_filter(PsAuxcww, filter_string)
spec_filters = filters.get_filters(Specs.ps_auxcww)
assert not spec_filters
parser_filters = filters.get_filters(PsAuxcww)
assert not parser_filters
def test_add_filter_exception_not_filterable():
with pytest.raises(Exception):
filters.add_filter(Specs.ps_auxcww, "bash")
def test_add_filter_exception_raw():
with pytest.raises(Exception):
filters.add_filter(Specs.metadata_json, "[]")
def test_add_filter_exception_empty():
with pytest.raises(Exception):
filters.add_filter(Specs.ps_aux, "")
|
crabageprediction/venv/Lib/site-packages/mpl_toolkits/axes_grid/anchored_artists.py
|
13rianlucero/CrabAgePrediction
| 603 |
71897
|
from matplotlib.offsetbox import AnchoredOffsetbox, AuxTransformBox, VPacker,\
TextArea, AnchoredText, DrawingArea, AnnotationBbox
from mpl_toolkits.axes_grid1.anchored_artists import \
AnchoredDrawingArea, AnchoredAuxTransformBox, \
AnchoredEllipse, AnchoredSizeBar
|
python/src/nnabla/utils/converter/nnablart/save_variable_buffer.py
|
daniel-falk/nnabla
| 2,792 |
71898
|
<gh_stars>1000+
# Copyright 2018,2019,2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
class _LifeSpan:
def __init__(self):
self.begin_func_idx = -1
self.end_func_idx = -1
def needed_at(self, func_idx):
needed = self.begin_func_idx <= func_idx
needed &= self.end_func_idx >= func_idx
return needed
def __make_buf_var_lives(info):
# buf_var_lives is to remember from when and until when each
# Buffer Variables must be alive
buf_var_num = len(info._variable_buffer_index)
buf_var_lives = [_LifeSpan() for _ in range(buf_var_num)]
name_to_vidx = {v.name: i for i,
v in enumerate(info._network.variable)}
name_to_var = {v.name: v for v in info._network.variable}
# set _LifeSpan.begin_func_idx and .end_func_idx along info._network
final_func_idx = len(info._network.function)
for func_idx, func in enumerate(info._network.function):
for var_name in list(func.input) + list(func.output):
if var_name in info._generator_variables:
# no need to assign buffer for generator data
pass
if name_to_var[var_name].type == 'Buffer':
var_idx = name_to_vidx[var_name]
buf_idx = info._buffer_ids[var_idx]
buf_var_life = buf_var_lives[buf_idx]
if buf_var_life.begin_func_idx < 0:
if var_name in info._input_variables:
buf_var_life.begin_func_idx = 0
else:
buf_var_life.begin_func_idx = func_idx
else:
# only identify a Function which first refers to the Variable
pass
if var_name in info._output_variables:
buf_var_life.end_func_idx = final_func_idx
else:
buf_var_life.end_func_idx = func_idx
else:
pass # ignore 'Parameter'
return buf_var_lives
def __count_actual_buf(info, buf_var_lives):
# count how many buffers are required at maximum based on buf_var_lives
actual_buf_num = 0
for func_idx, _ in enumerate(info._network.function):
buf_num = 0
for buf_idx, buf_var_life in enumerate(buf_var_lives):
buf_num += int(buf_var_life.needed_at(func_idx))
actual_buf_num = max(actual_buf_num, buf_num)
return actual_buf_num
def __make_buf_var_refs(info, buf_var_lives):
# buf_var_refs is to store buffer indices of buffers required in each Function
actual_buf_num = __count_actual_buf(info, buf_var_lives)
shape = (len(info._network.function), actual_buf_num)
buf_var_refs = np.empty(shape, dtype=np.int32)
buf_var_refs[:] = -1
# fill buf_var_refs based on buf_var_lives
for func_idx, _ in enumerate(info._network.function):
crsr = 0
for buf_idx, buf_var_life in enumerate(buf_var_lives):
if buf_var_life.needed_at(func_idx):
buf_var_refs[func_idx][crsr] = buf_idx
crsr += 1
else:
pass # only focus on buffers used in this func
return buf_var_refs
def __compute_actual_buf_sizes(info, buf_var_lives):
# buf_size_array is to store size values of each actual buffer
actual_buf_num = __count_actual_buf(info, buf_var_lives)
buf_size_array = np.zeros(actual_buf_num, dtype=np.int32)
# tmp_size_array is size values when only focusing on a single Function
tmp_size_array = np.empty_like(buf_size_array, dtype=np.int32)
for func_idx, _ in enumerate(info._network.function):
tmp_size_array[:] = -1
crsr = 0
for buf_idx, buf_var_life in enumerate(buf_var_lives):
if buf_var_life.needed_at(func_idx):
tmp_size_array[crsr] = info._variable_buffer_size[buf_idx]
crsr += 1
else:
pass # only focus on buffers used in this func
# update sizes of actual buffers
tmp_size_array = np.sort(tmp_size_array)
for i in range(actual_buf_num):
buf_size_array[i] = max(buf_size_array[i], tmp_size_array[i])
return buf_size_array
def __assign_actual_buf_to_variable(info, actual_buf_sizes, buf_var_refs):
# create a dictionary to store assignment of actual buffers to Variables
# vidx_to_abidx is short for variable index to actual buffer index
vidx_to_abidx = {}
# actual_assigned_flags is to remember if actual buffers are assigned or not
actual_buf_num = len(actual_buf_sizes)
actual_assigned_flags = np.empty(actual_buf_num, dtype=np.bool)
for func_idx, _ in enumerate(info._network.function):
actual_assigned_flags[:] = False
for ref_crsr in range(actual_buf_num):
# minus buf_idx means the corresponding buffer is not needed
buf_idx = buf_var_refs[func_idx][ref_crsr]
if buf_idx < 0:
continue
# restore assignment determined in the previous func_idx
vidx = info._variable_buffer_index[buf_idx][0]
if vidx in vidx_to_abidx:
abidx = vidx_to_abidx[vidx]
actual_assigned_flags[abidx] = True
else:
pass # determine assignment for this vidx in the following for loop
# determine new assignments of actual buffers to Variables
for ref_crsr in range(actual_buf_num):
# minus buf_idx means the corresponding buffer is not needed
buf_idx = buf_var_refs[func_idx][ref_crsr]
if buf_idx < 0:
continue
# skip Variables to which an actual buffer is already assigned
vidx = info._variable_buffer_index[buf_idx][0]
if vidx in vidx_to_abidx:
continue
# search for actual buffers vacant and large enough
needed_size = info._variable_buffer_size[buf_idx]
abidx = 0
while abidx != actual_buf_num:
cond = not actual_assigned_flags[abidx]
cond &= needed_size <= actual_buf_sizes[abidx]
if cond:
actual_assigned_flags[abidx] = True
vidx_to_abidx[vidx] = abidx
break
else:
abidx += 1
# increase size if buffers large enough was NOT found
if abidx == actual_buf_num:
for abidx in range(actual_buf_num):
if not actual_assigned_flags[abidx]:
actual_buf_sizes[abidx] = needed_size
actual_assigned_flags[abidx] = True
vidx_to_abidx[vidx] = abidx
break
return vidx_to_abidx
def save_variable_buffer(info):
# make the followings to save memory usage for Variable Buffer:
# - actual_buf_sizes(list): sizes of actual buffers, which lie under Variable Buffer.
# indices in this list are hereinafter called 'actual buffer index'
# - vidx_to_abidx(dict): assignment of actual buffers to Variable Buffer.
# the key and the value are Variable index and actual buffer index, respectively
buf_var_lives = __make_buf_var_lives(info)
actual_buf_sizes = __compute_actual_buf_sizes(info, buf_var_lives)
buf_var_refs = __make_buf_var_refs(info, buf_var_lives)
vidx_to_abidx = __assign_actual_buf_to_variable(
info, actual_buf_sizes, buf_var_refs)
return list(actual_buf_sizes), vidx_to_abidx
|
examples/explorer/explorer.py
|
wcastello/splunk-sdk-python
| 495 |
71929
|
#!/usr/bin/env python
#
# Copyright 2011-2015 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import server
import webbrowser
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
try:
import utils
except ImportError:
raise Exception("Add the SDK repository to your PYTHONPATH to run the examples "
"(e.g., export PYTHONPATH=~/splunk-sdk-python.")
from splunklib.six.moves import urllib
PORT = 8080
def main(argv):
usage = "usage: %prog [options]"
redirect_port_args = {
"redirectport": {
"flags": ["--redirectport"],
"default": PORT,
"help": "Port to use for redirect server (default: %s)" % PORT,
},
}
opts = utils.parse(argv, redirect_port_args, ".splunkrc", usage=usage)
args = [("scheme", opts.kwargs["scheme"]),
("host", opts.kwargs["host"]),
("port", opts.kwargs["port"]),
("redirecthost", "localhost"),
("redirectport", opts.kwargs["redirectport"]),
("username", opts.kwargs["username"]),
("password", opts.kwargs["password"])]
if 'app' in list(opts.kwargs.keys()):
args.append(('app', opts.kwargs['app']))
if 'owner' in list(opts.kwargs.keys()):
args.append(('owner', opts.kwargs['owner']))
# Encode these arguments
args = urllib.parse.urlencode(args)
# Launch the browser
webbrowser.open("file://%s" % os.path.join(os.getcwd(), "explorer.html?%s" % args))
# And server the files
server.serve(opts.kwargs["redirectport"])
if __name__ == "__main__":
try:
main(sys.argv[1:])
except KeyboardInterrupt:
pass
except:
raise
|
Algo and DSA/LeetCode-Solutions-master/Python/self-crossing.py
|
Sourav692/FAANG-Interview-Preparation
| 3,269 |
71944
|
<filename>Algo and DSA/LeetCode-Solutions-master/Python/self-crossing.py
# Time: O(n)
# Space: O(1)
class Solution(object):
def isSelfCrossing(self, x):
"""
:type x: List[int]
:rtype: bool
"""
if len(x) >= 5 and x[3] == x[1] and x[4] + x[0] >= x[2]:
# Crossing in a loop:
# 2
# 3 ┌────┐
# └─══>┘1
# 4 0 (overlapped)
return True
for i in xrange(3, len(x)):
if x[i] >= x[i - 2] and x[i - 3] >= x[i - 1]:
# Case 1:
# i-2
# i-1┌─┐
# └─┼─>i
# i-3
return True
elif i >= 5 and x[i - 4] <= x[i - 2] and x[i] + x[i - 4] >= x[i - 2] and \
x[i - 1] <= x[i - 3] and x[i - 5] + x[i - 1] >= x[i - 3]:
# Case 2:
# i-4
# ┌──┐
# │i<┼─┐
# i-3│ i-5│i-1
# └────┘
# i-2
return True
return False
|
pypykatz/rdp/parser.py
|
wisdark/pypykatz
| 1,861 |
71948
|
<reponame>wisdark/pypykatz
import platform
from pypykatz import logger
from minidump.minidumpfile import MinidumpFile
from pypykatz.commons.common import KatzSystemInfo
from pypykatz.rdp.packages.creds.templates import RDPCredsTemplate
from pypykatz.rdp.packages.creds.decryptor import RDPCredentialDecryptor
class RDPCredParser:
def __init__(self, process, reader, sysinfo):
self.process = process
self.reader = reader
self.sysinfo = sysinfo
self.credentials = []
@staticmethod
def go_live(pid = None, all_rdp = False):
if platform.system() != 'Windows':
raise Exception('Live parsing will only work on Windows')
from pypykatz.commons.readers.local.common.live_reader_ctypes import OpenProcess, PROCESS_ALL_ACCESS
from pypykatz.commons.winapi.machine import LiveMachine
from pypykatz.commons.winapi.constants import PROCESS_VM_READ , PROCESS_VM_WRITE , PROCESS_VM_OPERATION , PROCESS_QUERY_INFORMATION , PROCESS_CREATE_THREAD
from pypykatz.commons.readers.local.common.privileges import enable_debug_privilege
from pypykatz.commons.readers.local.live_reader import LiveReader
from pypykatz.commons.readers.local.process import Process
req_access_rights = PROCESS_VM_READ | PROCESS_VM_WRITE | PROCESS_VM_OPERATION | PROCESS_QUERY_INFORMATION | PROCESS_CREATE_THREAD
enable_debug_privilege()
targets = []
if pid is not None:
process = Process(pid=pid, access = req_access_rights )
process.list_modules()
reader = LiveReader(process_handle=process.phandle)
sysinfo = KatzSystemInfo.from_live_reader(reader)
targets.append(RDPCredParser(process, reader.get_buffered_reader(), sysinfo))
else:
machine = LiveMachine()
for service_name, display_name, pid in machine.list_services():
if service_name == 'TermService':
process = Process(pid=pid, access = req_access_rights )
reader = LiveReader(process_handle=process.phandle)
sysinfo = KatzSystemInfo.from_live_reader(reader)
targets.append(RDPCredParser(process, reader.get_buffered_reader(), sysinfo))
if all_rdp is True:
for pid in machine.list_all_pids():
try:
process = Process(pid=pid, access = req_access_rights )
for module in process.list_modules():
if module.name.lower().find("mstscax.dll") != -1:
reader = LiveReader(process_handle=process.phandle)
sysinfo = KatzSystemInfo.from_live_reader(reader)
targets.append(RDPCredParser(process, reader.get_buffered_reader(), sysinfo))
break
except Exception as e:
#import traceback
#traceback.print_exc()
print(e)
for target in targets:
target.start()
return targets
@staticmethod
def parse_minidump_file(filename, chunksize = 10*1024):
try:
minidump = MinidumpFile.parse(filename)
reader = minidump.get_reader().get_buffered_reader(segment_chunk_size=chunksize)
sysinfo = KatzSystemInfo.from_minidump(minidump)
except Exception as e:
logger.exception('Minidump parsing error!')
raise e
try:
mimi = RDPCredParser(None, reader, sysinfo)
mimi.start()
except Exception as e:
logger.info('Credentials parsing error!')
raise e
return [mimi]
def rdpcreds(self):
decryptor_template = RDPCredsTemplate.get_template(self.sysinfo)
decryptor = RDPCredentialDecryptor(self.process, self.reader, decryptor_template, self.sysinfo)
decryptor.start()
for cred in decryptor.credentials:
self.credentials.append(cred)
def start(self):
self.rdpcreds()
|
test/python/test_rn50_infer.py
|
avijit-chakroborty/ngraph-bridge
| 142 |
71962
|
#
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Get pretrained model file: wget https://zenodo.org/record/2535873/files/resnet50_v1.pb
import time
from argparse import ArgumentParser
import numpy as np
import tensorflow as tf
from tensorflow.python.tools.optimize_for_inference_lib import optimize_for_inference
from tensorflow.python.framework import dtypes
import ngraph_bridge
INPUTS = 'input_tensor'
OUTPUTS = 'softmax_tensor'
RESNET_IMAGE_SIZE = 224
class RN50Graph:
"""Evaluate image classifier with optimized TensorFlow graph"""
def __init__(self):
arg_parser = ArgumentParser(description='Parse arguments')
arg_parser.add_argument(
"--batch-size", dest="batch_size", type=int, default=8)
arg_parser.add_argument(
"--num-images", dest='num_images', type=int, default=512)
arg_parser.add_argument(
"--num-inter-threads",
dest='num_inter_threads',
type=int,
default=0)
arg_parser.add_argument(
"--num-intra-threads",
dest='num_intra_threads',
type=int,
default=0)
arg_parser.add_argument(
"--input-graph",
dest='input_graph',
type=str,
default="resnet50_v1.pb")
arg_parser.add_argument(
"--warmup-iters", dest='warmup_iters', type=int, default=8)
self.args = arg_parser.parse_args()
def run(self):
"""run benchmark with optimized graph"""
print("Run inference with dummy data")
config = tf.compat.v1.ConfigProto()
config.intra_op_parallelism_threads = self.args.num_intra_threads
config.inter_op_parallelism_threads = self.args.num_inter_threads
config.use_per_session_threads = True
data_graph = tf.Graph()
with data_graph.as_default():
input_shape = [
self.args.batch_size, RESNET_IMAGE_SIZE, RESNET_IMAGE_SIZE, 3
]
images = tf.random.uniform(
input_shape,
0.0,
255.0,
dtype=tf.float32,
seed=42,
name='synthetic_images')
infer_graph = tf.Graph()
with infer_graph.as_default():
graph_def = tf.compat.v1.GraphDef()
with tf.io.gfile.GFile(self.args.input_graph, 'rb') as input_file:
input_graph_content = input_file.read()
graph_def.ParseFromString(input_graph_content)
print(
"Optimizing graph %s for inference..." % self.args.input_graph)
output_graph = optimize_for_inference(
graph_def, [INPUTS], [OUTPUTS], dtypes.float32.as_datatype_enum,
False)
tf.import_graph_def(output_graph, name='')
input_tensor = infer_graph.get_tensor_by_name('input_tensor:0')
output_tensor = infer_graph.get_tensor_by_name('softmax_tensor:0')
# Run without nGraph first
print("Run inference (without nGraph)")
ngraph_bridge.disable()
data_sess = tf.compat.v1.Session(graph=data_graph, config=config)
infer_sess = tf.compat.v1.Session(graph=infer_graph, config=config)
iteration = 0
num_processed_images = 0
num_remaining_images = self.args.num_images
tf_time = 0.0
tf_labels = np.array([], dtype=np.int32)
while num_remaining_images >= self.args.batch_size:
np_images = data_sess.run(images)
if iteration > self.args.warmup_iters:
num_processed_images += self.args.batch_size
num_remaining_images -= self.args.batch_size
tf_start_time = time.time()
predictions = infer_sess.run(output_tensor,
{input_tensor: np_images})
tf_elapsed_time = time.time() - tf_start_time
if iteration > self.args.warmup_iters:
tf_time += tf_elapsed_time
tf_labels = np.append(tf_labels, np.argmax(
predictions, axis=-1))
iteration += 1
print("Total execution time (TF): ", tf_time)
# Run with nGraph now
print("Run inference (with nGraph)")
ngraph_bridge.enable()
data_sess = tf.compat.v1.Session(graph=data_graph, config=config)
infer_sess = tf.compat.v1.Session(graph=infer_graph, config=config)
iteration = 0
num_processed_images = 0
num_remaining_images = self.args.num_images
ngtf_time = 0.0
ngtf_labels = np.array([], dtype=np.int32)
while num_remaining_images >= self.args.batch_size:
np_images = data_sess.run(images)
if iteration > self.args.warmup_iters:
num_processed_images += self.args.batch_size
num_remaining_images -= self.args.batch_size
ngtf_start_time = time.time()
predictions = infer_sess.run(output_tensor,
{input_tensor: np_images})
ngtf_elapsed_time = time.time() - ngtf_start_time
if iteration > self.args.warmup_iters:
ngtf_time += ngtf_elapsed_time
ngtf_labels = np.append(ngtf_labels,
np.argmax(predictions, axis=-1))
iteration += 1
print("Total execution time (NGTF): ", ngtf_time)
print("Processed %d images. Batch size = %d" % (num_processed_images,
self.args.batch_size))
print("Avg throughput (TF): %0.4f img/s" %
(num_processed_images / tf_time))
print("Avg throughput (NGTF): %0.4f img/s" %
(num_processed_images / ngtf_time))
assert ((tf_labels == ngtf_labels).all())
if __name__ == "__main__":
graph = RN50Graph()
graph.run()
|
docs/examples/plot_video.py
|
siahuat0727/torchio
| 1,340 |
71984
|
"""
Transform video
===============
In this example, we use ``torchio.Resample((2, 2, 1))`` to divide the spatial
size of the clip (height and width) by two and
``RandomAffine(degrees=(0, 0, 20))`` to rotate a maximum of 20 degrees around
the time axis.
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import torch
import torchio as tio
from PIL import Image
def read_clip(path, undersample=4):
"""Read a GIF a return an array of shape (C, W, H, T)."""
gif = Image.open(path)
frames = []
for i in range(gif.n_frames):
gif.seek(i)
frames.append(np.array(gif.convert('RGB')))
frames = frames[::undersample]
array = np.stack(frames).transpose(3, 1, 2, 0)
delay = gif.info['duration']
return array, delay
def plot_gif(image):
def _update_frame(num):
frame = get_frame(image, num)
im.set_data(frame)
return
def get_frame(image, i):
return image.data[..., i].permute(1, 2, 0).byte()
plt.rcParams['animation.embed_limit'] = 25
fig, ax = plt.subplots()
im = ax.imshow(get_frame(image, 0))
return animation.FuncAnimation(
fig,
_update_frame,
repeat_delay=image['delay'],
frames=image.shape[-1],
)
# Source: https://thehigherlearning.wordpress.com/2014/06/25/watching-a-cell-divide-under-an-electron-microscope-is-mesmerizing-gif/ # noqa: E501
array, delay = read_clip('nBTu3oi.gif')
plt.imshow(array[..., 0].transpose(1, 2, 0))
plt.plot()
image = tio.ScalarImage(tensor=array, delay=delay)
original_animation = plot_gif(image)
transform = tio.Compose((
tio.Resample((2, 2, 1)),
tio.RandomAffine(degrees=(0, 0, 20)),
))
torch.manual_seed(0)
transformed = transform(image)
transformed_animation = plot_gif(transformed)
|
attic/iterables/CACM/closed_file.py
|
l65775622/example-code
| 5,651 |
71985
|
<reponame>l65775622/example-code
"""
<NAME>. 2014. The curse of the excluded middle.
Commun. ACM 57, 6 (June 2014), 50-55. DOI=10.1145/2605176
http://doi.acm.org/10.1145/2605176
"""
with open('citation.txt', encoding='ascii') as fp:
get_contents = lambda: fp.read()
print(get_contents())
|
ffn/training/model.py
|
pgunn/ffn
| 266 |
72024
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes for FFN model definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from . import optimizer
class FFNModel(object):
"""Base class for FFN models."""
# Dimensionality of the model (2 or 3).
dim = None
############################################################################
# (x, y, z) tuples defining various properties of the network.
# Note that 3-tuples should be used even for 2D networks, in which case
# the third (z) value is ignored.
# How far to move the field of view in the respective directions.
deltas = None
# Size of the input image and seed subvolumes to be used during inference.
# This is enough information to execute a single prediction step, without
# moving the field of view.
input_image_size = None
input_seed_size = None
# Size of the predicted patch as returned by the model.
pred_mask_size = None
###########################################################################
# TF op to compute loss optimized during training. This should include all
# loss components in case more than just the pixelwise loss is used.
loss = None
# TF op to call to perform loss optimization on the model.
train_op = None
def __init__(self, deltas, batch_size=None, define_global_step=True):
assert self.dim is not None
self.deltas = deltas
self.batch_size = batch_size
# Initialize the shift collection. This is used during training with the
# fixed step size policy.
self.shifts = []
for dx in (-self.deltas[0], 0, self.deltas[0]):
for dy in (-self.deltas[1], 0, self.deltas[1]):
for dz in (-self.deltas[2], 0, self.deltas[2]):
if dx == 0 and dy == 0 and dz == 0:
continue
self.shifts.append((dx, dy, dz))
if define_global_step:
self.global_step = tf.Variable(0, name='global_step', trainable=False)
# The seed is always a placeholder which is fed externally from the
# training/inference drivers.
self.input_seed = tf.placeholder(tf.float32, name='seed')
self.input_patches = tf.placeholder(tf.float32, name='patches')
# For training, labels should be defined as a TF object.
self.labels = None
# Optional. Provides per-pixel weights with which the loss is multiplied.
# If specified, should have the same shape as self.labels.
self.loss_weights = None
self.logits = None # type: tf.Operation
# List of image tensors to save in summaries. The images are concatenated
# along the X axis.
self._images = []
def set_uniform_io_size(self, patch_size):
"""Initializes unset input/output sizes to 'patch_size', sets input shapes.
This assumes that the inputs and outputs are of equal size, and that exactly
one step is executed in every direction during training.
Args:
patch_size: (x, y, z) specifying the input/output patch size
Returns:
None
"""
if self.pred_mask_size is None:
self.pred_mask_size = patch_size
if self.input_seed_size is None:
self.input_seed_size = patch_size
if self.input_image_size is None:
self.input_image_size = patch_size
self.set_input_shapes()
def set_input_shapes(self):
"""Sets the shape inference for input_seed and input_patches.
Assumes input_seed_size and input_image_size are already set.
"""
self.input_seed.set_shape([self.batch_size] +
list(self.input_seed_size[::-1]) + [1])
self.input_patches.set_shape([self.batch_size] +
list(self.input_image_size[::-1]) + [1])
def set_up_sigmoid_pixelwise_loss(self, logits):
"""Sets up the loss function of the model."""
assert self.labels is not None
assert self.loss_weights is not None
pixel_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits,
labels=self.labels)
pixel_loss *= self.loss_weights
self.loss = tf.reduce_mean(pixel_loss)
tf.summary.scalar('pixel_loss', self.loss)
self.loss = tf.verify_tensor_all_finite(self.loss, 'Invalid loss detected')
def set_up_optimizer(self, loss=None, max_gradient_entry_mag=0.7):
"""Sets up the training op for the model."""
if loss is None:
loss = self.loss
tf.summary.scalar('optimizer_loss', self.loss)
opt = optimizer.optimizer_from_flags()
grads_and_vars = opt.compute_gradients(loss)
for g, v in grads_and_vars:
if g is None:
tf.logging.error('Gradient is None: %s', v.op.name)
if max_gradient_entry_mag > 0.0:
grads_and_vars = [(tf.clip_by_value(g,
-max_gradient_entry_mag,
+max_gradient_entry_mag), v)
for g, v, in grads_and_vars]
trainables = tf.trainable_variables()
if trainables:
for var in trainables:
tf.summary.histogram(var.name.replace(':0', ''), var)
for grad, var in grads_and_vars:
tf.summary.histogram(
'gradients/%s' % var.name.replace(':0', ''), grad)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.train_op = opt.apply_gradients(grads_and_vars,
global_step=self.global_step,
name='train')
def show_center_slice(self, image, sigmoid=True):
image = image[:, image.get_shape().dims[1] // 2, :, :, :]
if sigmoid:
image = tf.sigmoid(image)
self._images.append(image)
def add_summaries(self):
pass
def update_seed(self, seed, update):
"""Updates the initial 'seed' with 'update'."""
dx = self.input_seed_size[0] - self.pred_mask_size[0]
dy = self.input_seed_size[1] - self.pred_mask_size[1]
dz = self.input_seed_size[2] - self.pred_mask_size[2]
if dx == 0 and dy == 0 and dz == 0:
seed += update
else:
seed += tf.pad(update, [[0, 0],
[dz // 2, dz - dz // 2],
[dy // 2, dy - dy // 2],
[dx // 2, dx - dx // 2],
[0, 0]])
return seed
def define_tf_graph(self):
"""Creates the TensorFlow graph representing the model.
If self.labels is not None, the graph should include operations for
computing and optimizing the loss.
"""
raise NotImplementedError(
'DefineTFGraph needs to be defined by a subclass.')
|
build_tools/setup_helpers/extension.py
|
pmeier/text
| 3,172 |
72063
|
<reponame>pmeier/text
import os
import platform
import subprocess
from pathlib import Path
from torch.utils.cpp_extension import (
CppExtension,
BuildExtension as TorchBuildExtension
)
__all__ = [
'get_ext_modules',
'BuildExtension',
]
_ROOT_DIR = Path(__file__).parent.parent.parent.resolve()
_CSRC_DIR = _ROOT_DIR / 'torchtext' / 'csrc'
_TP_BASE_DIR = _ROOT_DIR / 'third_party'
_TP_INSTALL_DIR = _TP_BASE_DIR / 'build'
def _get_eca(debug):
eca = []
if platform.system() == "Windows":
eca += ['/MT']
if debug:
eca += ["-O0", "-g"]
else:
if platform.system() == "Windows":
eca += ['-O2']
else:
eca += ["-O3", "-fvisibility=hidden"]
return eca
def _get_ela(debug):
ela = []
if debug:
if platform.system() == "Windows":
ela += ["/DEBUG:FULL"]
else:
ela += ["-O0", "-g"]
else:
if platform.system() != "Windows":
ela += ["-O3"]
return ela
def _get_srcs():
return [str(p) for p in _CSRC_DIR.glob('**/*.cpp')]
def _get_include_dirs():
return [
str(_CSRC_DIR),
str(_TP_INSTALL_DIR / 'include'),
]
def _get_library_dirs():
return [
str(_TP_INSTALL_DIR / 'lib'),
str(_TP_INSTALL_DIR / 'lib64')
]
def _get_libraries():
# NOTE: The order of the library listed bellow matters.
#
# For example, the symbol `sentencepiece::unigram::Model` is
# defined in sentencepiece but UNDEFINED in sentencepiece_train.
# GCC only remembers the last encountered symbol.
# Therefore placing 'sentencepiece_train' after 'sentencepiece' cause runtime error.
#
# $ nm third_party/build/lib/libsentencepiece_train.a | grep _ZTIN13sentencepiece7unigram5ModelE
# U _ZTIN13sentencepiece7unigram5ModelE
# $ nm third_party/build/lib/libsentencepiece.a | grep _ZTIN13sentencepiece7unigram5ModelE
# 0000000000000000 V _ZTIN13sentencepiece7unigram5ModelE
return [
'sentencepiece_train',
'sentencepiece',
're2',
'double-conversion'
]
def _get_cxx11_abi():
try:
import torch
value = int(torch._C._GLIBCXX_USE_CXX11_ABI)
except ImportError:
value = 0
return '-D_GLIBCXX_USE_CXX11_ABI=' + str(value)
def _build_third_party(debug):
build_dir = _TP_BASE_DIR / 'build'
build_dir.mkdir(exist_ok=True)
build_env = os.environ.copy()
config = 'Debug' if debug else 'Release'
if platform.system() == 'Windows':
extra_args = [
'-GNinja',
]
build_env.setdefault('CC', 'cl')
build_env.setdefault('CXX', 'cl')
else:
extra_args = ['-DCMAKE_CXX_FLAGS=-fPIC ' + _get_cxx11_abi()]
subprocess.run(
args=[
'cmake',
'-DBUILD_SHARED_LIBS=OFF',
'-DRE2_BUILD_TESTING=OFF',
'-DCMAKE_EXPORT_COMPILE_COMMANDS=ON',
f'-DCMAKE_INSTALL_PREFIX={_TP_INSTALL_DIR}',
f'-DCMAKE_BUILD_TYPE={config}',
'-DCMAKE_CXX_VISIBILITY_PRESET=hidden',
'-DCMAKE_POLICY_DEFAULT_CMP0063=NEW',
] + extra_args + ['..'],
cwd=str(build_dir),
check=True,
env=build_env,
)
print('*** Command list Thirdparty ***')
with open(build_dir / 'compile_commands.json', 'r') as fileobj:
print(fileobj.read())
print('running cmake --build', flush=True)
subprocess.run(
args=['cmake', '--build', '.', '--target', 'install', '--config', config],
cwd=str(build_dir),
check=True,
env=build_env,
)
def _build_sentence_piece(debug):
build_dir = _TP_BASE_DIR / 'sentencepiece' / 'build'
build_dir.mkdir(exist_ok=True)
build_env = os.environ.copy()
config = 'Debug' if debug else 'Release'
if platform.system() == 'Windows':
extra_args = ['-GNinja']
build_env.setdefault('CC', 'cl')
build_env.setdefault('CXX', 'cl')
else:
extra_args = []
subprocess.run(
args=['cmake', '-DSPM_ENABLE_SHARED=OFF', f'-DCMAKE_INSTALL_PREFIX={_TP_INSTALL_DIR}',
'-DCMAKE_CXX_VISIBILITY_PRESET=hidden',
'-DCMAKE_CXX_FLAGS=' + _get_cxx11_abi(),
'-DCMAKE_POLICY_DEFAULT_CMP0063=NEW',
f'-DCMAKE_BUILD_TYPE={config}'] + extra_args + ['..'],
cwd=str(build_dir),
check=True,
env=build_env,
)
subprocess.run(
args=['cmake', '--build', '.', '--target', 'install', '--config', config],
cwd=str(build_dir),
check=True,
env=build_env,
)
def _configure_third_party(debug):
_build_third_party(debug)
_build_sentence_piece(debug)
_EXT_NAME = 'torchtext._torchtext'
def get_ext_modules(debug=False):
return [
CppExtension(
_EXT_NAME,
_get_srcs(),
libraries=_get_libraries(),
include_dirs=_get_include_dirs(),
library_dirs=_get_library_dirs(),
extra_compile_args=_get_eca(debug),
extra_link_args=_get_ela(debug),
),
]
class BuildExtension(TorchBuildExtension):
def build_extension(self, ext):
if ext.name == _EXT_NAME:
_configure_third_party(self.debug)
super().build_extension(ext)
|
note_seq/chord_inference_test.py
|
tetromino/note-seq
| 113 |
72067
|
# Copyright 2021 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for chord_inference."""
from absl.testing import absltest
from note_seq import chord_inference
from note_seq import sequences_lib
from note_seq import testing_lib
from note_seq.protobuf import music_pb2
CHORD_SYMBOL = music_pb2.NoteSequence.TextAnnotation.CHORD_SYMBOL
class ChordInferenceTest(absltest.TestCase):
def testSequenceNotePitchVectors(self):
sequence = music_pb2.NoteSequence()
testing_lib.add_track_to_sequence(
sequence, 0,
[(60, 100, 0.0, 0.0), (62, 100, 0.0, 0.5),
(60, 100, 1.5, 2.5),
(64, 100, 2.0, 2.5), (67, 100, 2.25, 2.75), (70, 100, 2.5, 4.5),
(60, 100, 6.0, 6.0),
])
note_pitch_vectors = chord_inference.sequence_note_pitch_vectors(
sequence, seconds_per_frame=1.0)
expected_note_pitch_vectors = [
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.5, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.5, 0.0, 0.0, 0.5, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
]
self.assertEqual(expected_note_pitch_vectors, note_pitch_vectors.tolist())
def testSequenceNotePitchVectorsVariableLengthFrames(self):
sequence = music_pb2.NoteSequence()
testing_lib.add_track_to_sequence(
sequence, 0,
[(60, 100, 0.0, 0.0), (62, 100, 0.0, 0.5),
(60, 100, 1.5, 2.5),
(64, 100, 2.0, 2.5), (67, 100, 2.25, 2.75), (70, 100, 2.5, 4.5),
(60, 100, 6.0, 6.0),
])
note_pitch_vectors = chord_inference.sequence_note_pitch_vectors(
sequence, seconds_per_frame=[1.5, 2.0, 3.0, 5.0])
expected_note_pitch_vectors = [
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.5, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.5, 0.0, 0.0, 0.5, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
]
self.assertEqual(expected_note_pitch_vectors, note_pitch_vectors.tolist())
def testInferChordsForSequence(self):
sequence = music_pb2.NoteSequence()
testing_lib.add_track_to_sequence(
sequence, 0,
[(60, 100, 0.0, 1.0), (64, 100, 0.0, 1.0), (67, 100, 0.0, 1.0), # C
(62, 100, 1.0, 2.0), (65, 100, 1.0, 2.0), (69, 100, 1.0, 2.0), # Dm
(60, 100, 2.0, 3.0), (65, 100, 2.0, 3.0), (69, 100, 2.0, 3.0), # F
(59, 100, 3.0, 4.0), (62, 100, 3.0, 4.0), (67, 100, 3.0, 4.0)]) # G
quantized_sequence = sequences_lib.quantize_note_sequence(
sequence, steps_per_quarter=4)
chord_inference.infer_chords_for_sequence(
quantized_sequence, chords_per_bar=2)
expected_chords = [('C', 0.0), ('Dm', 1.0), ('F', 2.0), ('G', 3.0)]
chords = [(ta.text, ta.time) for ta in quantized_sequence.text_annotations]
self.assertEqual(expected_chords, chords)
def testInferChordsForSequenceAddKeySignatures(self):
sequence = music_pb2.NoteSequence()
testing_lib.add_track_to_sequence(
sequence, 0,
[(60, 100, 0.0, 1.0), (64, 100, 0.0, 1.0), (67, 100, 0.0, 1.0), # C
(62, 100, 1.0, 2.0), (65, 100, 1.0, 2.0), (69, 100, 1.0, 2.0), # Dm
(60, 100, 2.0, 3.0), (65, 100, 2.0, 3.0), (69, 100, 2.0, 3.0), # F
(59, 100, 3.0, 4.0), (62, 100, 3.0, 4.0), (67, 100, 3.0, 4.0), # G
(66, 100, 4.0, 5.0), (70, 100, 4.0, 5.0), (73, 100, 4.0, 5.0), # F#
(68, 100, 5.0, 6.0), (71, 100, 5.0, 6.0), (75, 100, 5.0, 6.0), # G#m
(66, 100, 6.0, 7.0), (71, 100, 6.0, 7.0), (75, 100, 6.0, 7.0), # B
(65, 100, 7.0, 8.0), (68, 100, 7.0, 8.0), (73, 100, 7.0, 8.0)]) # C#
quantized_sequence = sequences_lib.quantize_note_sequence(
sequence, steps_per_quarter=4)
chord_inference.infer_chords_for_sequence(
quantized_sequence, chords_per_bar=2, add_key_signatures=True)
expected_key_signatures = [(0, 0.0), (6, 4.0)]
key_signatures = [(ks.key, ks.time)
for ks in quantized_sequence.key_signatures]
self.assertEqual(expected_key_signatures, key_signatures)
def testInferChordsForSequenceWithBeats(self):
sequence = music_pb2.NoteSequence()
testing_lib.add_track_to_sequence(
sequence, 0,
[(60, 100, 0.0, 1.1), (64, 100, 0.0, 1.1), (67, 100, 0.0, 1.1), # C
(62, 100, 1.1, 1.9), (65, 100, 1.1, 1.9), (69, 100, 1.1, 1.9), # Dm
(60, 100, 1.9, 3.0), (65, 100, 1.9, 3.0), (69, 100, 1.9, 3.0), # F
(59, 100, 3.0, 4.5), (62, 100, 3.0, 4.5), (67, 100, 3.0, 4.5)]) # G
testing_lib.add_beats_to_sequence(sequence, [0.0, 1.1, 1.9, 1.9, 3.0])
chord_inference.infer_chords_for_sequence(sequence)
expected_chords = [('C', 0.0), ('Dm', 1.1), ('F', 1.9), ('G', 3.0)]
chords = [(ta.text, ta.time) for ta in sequence.text_annotations
if ta.annotation_type == CHORD_SYMBOL]
self.assertEqual(expected_chords, chords)
if __name__ == '__main__':
absltest.main()
|
data_providers/utils.py
|
nammbash/vision_networks
| 299 |
72070
|
from .cifar import Cifar10DataProvider, Cifar100DataProvider, \
Cifar10AugmentedDataProvider, Cifar100AugmentedDataProvider
from .svhn import SVHNDataProvider
def get_data_provider_by_name(name, train_params):
"""Return required data provider class"""
if name == 'C10':
return Cifar10DataProvider(**train_params)
if name == 'C10+':
return Cifar10AugmentedDataProvider(**train_params)
if name == 'C100':
return Cifar100DataProvider(**train_params)
if name == 'C100+':
return Cifar100AugmentedDataProvider(**train_params)
if name == 'SVHN':
return SVHNDataProvider(**train_params)
else:
print("Sorry, data provider for `%s` dataset "
"was not implemented yet" % name)
exit()
|
tools/grit/grit/format/gen_predetermined_ids.py
|
zealoussnow/chromium
| 14,668 |
72108
|
<reponame>zealoussnow/chromium
#!/usr/bin/env python3
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
A tool to generate a predetermined resource ids file that can be used as an
input to grit via the -p option. This is meant to be run manually every once in
a while and its output checked in. See tools/gritsettings/README.md for details.
"""
from __future__ import print_function
import os
import re
import sys
# Regular expression for parsing the #define macro format. Matches both the
# version of the macro with allowlist support and the one without. For example,
# Without generate allowlist flag:
# #define IDS_FOO_MESSAGE 1234
# With generate allowlist flag:
# #define IDS_FOO_MESSAGE (::ui::AllowlistedResource<1234>(), 1234)
RESOURCE_EXTRACT_REGEX = re.compile(r'^#define (\S*).* (\d+)\)?$', re.MULTILINE)
ORDERED_RESOURCE_IDS_REGEX = re.compile(r'^Resource=(\d*)$', re.MULTILINE)
def _GetResourceNameIdPairsIter(string_to_scan):
"""Gets an iterator of the resource name and id pairs of the given string.
Scans the input string for lines of the form "#define NAME ID" and returns
an iterator over all matching (NAME, ID) pairs.
Args:
string_to_scan: The input string to scan.
Yields:
A tuple of name and id.
"""
for match in RESOURCE_EXTRACT_REGEX.finditer(string_to_scan):
yield match.group(1, 2)
def _ReadOrderedResourceIds(path):
"""Reads ordered resource ids from the given file.
The resources are expected to be of the format produced by running Chrome
with --print-resource-ids command line.
Args:
path: File path to read resource ids from.
Returns:
An array of ordered resource ids.
"""
ordered_resource_ids = []
with open(path, "r") as f:
for match in ORDERED_RESOURCE_IDS_REGEX.finditer(f.read()):
ordered_resource_ids.append(int(match.group(1)))
return ordered_resource_ids
def GenerateResourceMapping(original_resources, ordered_resource_ids):
"""Generates a resource mapping from the ordered ids and the original mapping.
The returned dict will assign new ids to ordered_resource_ids numerically
increasing from 101.
Args:
original_resources: A dict of original resource ids to resource names.
ordered_resource_ids: An array of ordered resource ids.
Returns:
A dict of resource ids to resource names.
"""
output_resource_map = {}
# 101 is used as the starting value since other parts of GRIT require it to be
# the minimum (e.g. rc_header.py) based on Windows resource numbering.
next_id = 101
for original_id in ordered_resource_ids:
resource_name = original_resources[original_id]
output_resource_map[next_id] = resource_name
next_id += 1
return output_resource_map
def ReadResourceIdsFromFile(file, original_resources):
"""Reads resource ids from a GRIT-produced header file.
Args:
file: File to a GRIT-produced header file to read from.
original_resources: Dict of resource ids to resource names to add to.
"""
for resource_name, resource_id in _GetResourceNameIdPairsIter(file.read()):
original_resources[int(resource_id)] = resource_name
def _ReadOriginalResourceIds(out_dir):
"""Reads resource ids from GRIT header files in the specified directory.
Args:
out_dir: A Chrome build output directory (e.g. out/gn) to scan.
Returns:
A dict of resource ids to resource names.
"""
original_resources = {}
for root, dirnames, filenames in os.walk(out_dir + '/gen'):
for filename in filenames:
if filename.endswith(('_resources.h', '_settings.h', '_strings.h')):
with open(os.path.join(root, filename), "r") as f:
ReadResourceIdsFromFile(f, original_resources)
return original_resources
def _GeneratePredeterminedIdsFile(ordered_resources_file, out_dir):
"""Generates a predetermined ids file.
Args:
ordered_resources_file: File path to read ordered resource ids from.
out_dir: A Chrome build output directory (e.g. out/gn) to scan.
Returns:
A dict of resource ids to resource names.
"""
original_resources = _ReadOriginalResourceIds(out_dir)
ordered_resource_ids = _ReadOrderedResourceIds(ordered_resources_file)
output_resource_map = GenerateResourceMapping(original_resources,
ordered_resource_ids)
for res_id in sorted(output_resource_map.keys()):
print(output_resource_map[res_id], res_id)
def main(argv):
if len(argv) != 2:
print("usage: gen_predetermined_ids.py <ordered_resources_file> <out_dir>")
sys.exit(1)
ordered_resources_file, out_dir = argv[0], argv[1]
_GeneratePredeterminedIdsFile(ordered_resources_file, out_dir)
if '__main__' == __name__:
main(sys.argv[1:])
|
dev/scripts/clear_testing.py
|
rubik-ai/koku
| 157 |
72140
|
#
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Clear out our local testing directories."""
import argparse
import os
import shutil
TESTING_DIRS = [
"local_providers/aws_local",
"local_providers/aws_local_0",
"local_providers/aws_local_1",
"local_providers/aws_local_2",
"local_providers/aws_local_3",
"local_providers/aws_local_4",
"local_providers/aws_local_5",
"local_providers/azure_local",
"local_providers/gcp_local",
"local_providers/gcp_local_0",
"local_providers/gcp_local_1",
"local_providers/gcp_local_2",
"local_providers/gcp_local_3",
"local_providers/insights_local",
"pvc_dir/insights_local",
"pvc_dir/processing",
"parquet_data",
]
def main(*args, **kwargs):
testing_path = kwargs["testing_path"]
paths_to_clear = [f"{testing_path}/{directory}" for directory in TESTING_DIRS]
for path in paths_to_clear:
try:
print(f"Checking {path}")
dirs_to_remove = [f.path for f in os.scandir(path) if f.is_dir()]
for directory in dirs_to_remove:
print(f"Removing {directory}")
shutil.rmtree(directory)
except FileNotFoundError as err:
print(err)
if __name__ == "__main__":
PARSER = argparse.ArgumentParser()
PARSER.add_argument("-p", "--path", dest="testing_path", help="The path to the testing directory", required=True)
ARGS = vars(PARSER.parse_args())
main(**ARGS)
|
moose/tshark_to_raw.py
|
H1d3r/malware-research
| 322 |
72142
|
#!/usr/bin/env python3
#
# Code related to ESET's Linux/Moose research
# For feedback or questions contact us at: <EMAIL>
# https://github.com/eset/malware-research/
#
# This code is provided to the community under the two-clause BSD license as
# follows:
#
# Copyright (C) 2015 ESET
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# <NAME> <<EMAIL>>
#
# Processes output of pcap-extract-traffic.sh and dumps raw binary contained
# in the traffic for further processing.
import fileinput
class TcpParser(object):
"""Parse tshark output and reassemble TCP data of same stream ids"""
def __init__(self):
# initialized to false because stream_id of 0 is valid in wireshark
self.stream_id = False
self.data = bytearray()
self.ts = 0
def parse_line(self, line):
stream_id, timestamp, data = line.split("\t")
data = data[:-1]
# first run: initialize
if self.stream_id == False:
self.stream_id = stream_id
self.ts = timestamp
self.data = bytearray.fromhex(data)
# stream finished: return previous data and start storing new data
elif stream_id != self.stream_id:
tcpData = TcpStreamData(self.ts, self.stream_id, self.data)
self.stream_id = stream_id
self.ts = timestamp
self.data = bytearray.fromhex(data)
return tcpData
# still in stream append the data
else:
self.data.extend(bytearray.fromhex(data))
return False
def finalize(self):
"""kind of a hack to get last stream"""
tcpData = TcpStreamData(self.ts, self.stream_id, self.data)
self.__init__()
return tcpData
class TcpStreamData(object):
"""Simple data container for TCP reassembled data"""
def __init__(self, ts, stream_id, data):
self.timestamp = ts
self._id = stream_id
self.data = data
t = TcpParser()
for line in fileinput.input():
tcp_stream = t.parse_line(line)
if tcp_stream != False:
fn = 'tcpstream-{:09d}.raw'.format(int(tcp_stream._id))
with open(fn, 'wb') as f:
f.write(tcp_stream.data)
# last stream
tcp_stream = t.finalize()
fn = 'tcpstream-{:09d}.raw'.format(int(tcp_stream._id))
with open(fn, 'wb') as f:
f.write(tcp_stream.data)
|
django/account/models.py
|
acid-chicken/tweet-generator
| 141 |
72157
|
<reponame>acid-chicken/tweet-generator
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, UserManager
from django.contrib.auth.hashers import make_password
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
class CustomUserManager(UserManager):
use_in_migrations = True
def _create_user(self, screen_name, twitter_id, is_protected, password, **extra_fields):
"""
Create and save a user with the given username, email, and password.
"""
if not screen_name:
raise ValueError('The given username must be set')
user = self.model(screen_name=screen_name, **extra_fields)
user.password = <PASSWORD>(password)
user.twitter_id = twitter_id
user.is_protected = is_protected
user.save(using=self._db)
return user
def create_user(self, screen_name, twitter_id=0, is_protected=False, password=<PASSWORD>, **extra_fields):
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
return self._create_user(screen_name, twitter_id, is_protected, password, **extra_fields)
def create_superuser(self, screen_name, twitter_id=0, is_protected=False, password=<PASSWORD>, **extra_fields):
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
if extra_fields.get('is_staff') is not True:
raise ValueError('Superuser must have is_staff=True.')
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True.')
return self._create_user(screen_name, twitter_id, is_protected, password, **extra_fields)
class User(AbstractBaseUser, PermissionsMixin):
screen_name = models.CharField(
'screen name',
unique=True,
max_length=50,
)
twitter_id = models.CharField(
'Twitter id',
unique=True,
max_length=32,
)
is_staff = models.BooleanField(
_('staff status'),
default=False,
help_text=_('Designates whether the user can log into this admin site.'),
)
is_active = models.BooleanField(
_('active'),
default=True,
help_text=_(
'Designates whether this user should be treated as active. '
'Unselect this instead of deleting accounts.'
),
)
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
is_protected = models.BooleanField(
default=False
)
access_token = models.CharField(
'Access token',
max_length=100
)
access_token_secret = models.CharField(
'Access token secret',
max_length=100
)
objects = CustomUserManager()
USERNAME_FIELD = 'screen_name'
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
def clean(self):
super().clean()
|
tests/test_motd.py
|
Varriount/sanic
| 1,883 |
72160
|
import logging
import platform
from unittest.mock import Mock
from sanic import __version__
from sanic.application.logo import BASE_LOGO
from sanic.application.motd import MOTDTTY
def test_logo_base(app, run_startup):
logs = run_startup(app)
assert logs[0][1] == logging.DEBUG
assert logs[0][2] == BASE_LOGO
def test_logo_false(app, run_startup):
app.config.LOGO = False
logs = run_startup(app)
banner, port = logs[1][2].rsplit(":", 1)
assert logs[0][1] == logging.INFO
assert banner == "Goin' Fast @ http://127.0.0.1"
assert int(port) > 0
def test_logo_true(app, run_startup):
app.config.LOGO = True
logs = run_startup(app)
assert logs[0][1] == logging.DEBUG
assert logs[0][2] == BASE_LOGO
def test_logo_custom(app, run_startup):
app.config.LOGO = "My Custom Logo"
logs = run_startup(app)
assert logs[0][1] == logging.DEBUG
assert logs[0][2] == "My Custom Logo"
def test_motd_with_expected_info(app, run_startup):
logs = run_startup(app)
assert logs[1][2] == f"Sanic v{__version__}"
assert logs[3][2] == "mode: debug, single worker"
assert logs[4][2] == "server: sanic"
assert logs[5][2] == f"python: {platform.python_version()}"
assert logs[6][2] == f"platform: {platform.platform()}"
def test_motd_init():
_orig = MOTDTTY.set_variables
MOTDTTY.set_variables = Mock()
motd = MOTDTTY(None, "", {}, {})
motd.set_variables.assert_called_once()
MOTDTTY.set_variables = _orig
def test_motd_display(caplog):
motd = MOTDTTY(" foobar ", "", {"one": "1"}, {"two": "2"})
with caplog.at_level(logging.INFO):
motd.display()
version_line = f"Sanic v{__version__}".center(motd.centering_length)
assert (
"".join(caplog.messages)
== f"""
┌────────────────────────────────┐
│ {version_line} │
│ │
├───────────────────────┬────────┤
│ foobar │ one: 1 │
| ├────────┤
│ │ two: 2 │
└───────────────────────┴────────┘
"""
)
|
jupytext/cell_metadata.py
|
st--/jupytext
| 5,378 |
72184
|
"""
Convert between text notebook metadata and jupyter cell metadata.
Standard cell metadata are documented here:
See also https://ipython.org/ipython-doc/3/notebook/nbformat.html#cell-metadata
"""
import ast
import re
from json import dumps, loads
try:
from json import JSONDecodeError
except ImportError:
JSONDecodeError = ValueError
from .languages import _JUPYTER_LANGUAGES
# Map R Markdown's "echo", "results" and "include" to "hide_input" and "hide_output", that are understood by the
# `runtools` extension for Jupyter notebook, and by nbconvert (use the `hide_input_output.tpl` template).
# See http://jupyter-contrib-nbextensions.readthedocs.io/en/latest/nbextensions/runtools/readme.html
_RMARKDOWN_TO_RUNTOOLS_OPTION_MAP = [
(("include", "FALSE"), [("hide_input", True), ("hide_output", True)]),
(("echo", "FALSE"), [("hide_input", True)]),
(("results", "'hide'"), [("hide_output", True)]),
(("results", '"hide"'), [("hide_output", True)]),
]
# Alternatively, Jupytext can also map the Jupyter Book options to R Markdown
_RMARKDOWN_TO_JUPYTER_BOOK_MAP = [
(("include", "FALSE"), "remove_cell"),
(("echo", "FALSE"), "remove_input"),
(("results", "'hide'"), "remove_output"),
(("results", '"hide"'), "remove_output"),
]
_JUPYTEXT_CELL_METADATA = [
# Pre-jupytext metadata
"skipline",
"noskipline",
# Jupytext metadata
"cell_marker",
"lines_to_next_cell",
"lines_to_end_of_cell_marker",
]
_IGNORE_CELL_METADATA = ",".join(
"-{}".format(name)
for name in [
# Frequent cell metadata that should not enter the text representation
# (these metadata are preserved in the paired Jupyter notebook).
"autoscroll",
"collapsed",
"scrolled",
"trusted",
"execution",
"ExecuteTime",
]
+ _JUPYTEXT_CELL_METADATA
)
_IDENTIFIER_RE = re.compile(r"^[a-zA-Z_\.]+[a-zA-Z0-9_\.]*$")
class RLogicalValueError(Exception):
"""Incorrect value for R boolean"""
class RMarkdownOptionParsingError(Exception):
"""Error when parsing Rmd cell options"""
def _py_logical_values(rbool):
if rbool in ["TRUE", "T"]:
return True
if rbool in ["FALSE", "F"]:
return False
raise RLogicalValueError
def metadata_to_rmd_options(language, metadata, use_runtools=False):
"""Convert language and metadata information to their rmd representation"""
options = (language or "R").lower()
if "name" in metadata:
options += " " + metadata["name"] + ","
del metadata["name"]
if use_runtools:
for rmd_option, jupyter_options in _RMARKDOWN_TO_RUNTOOLS_OPTION_MAP:
if all(
[
metadata.get(opt_name) == opt_value
for opt_name, opt_value in jupyter_options
]
):
options += " {}={},".format(
rmd_option[0], "FALSE" if rmd_option[1] is False else rmd_option[1]
)
for opt_name, _ in jupyter_options:
metadata.pop(opt_name)
else:
for rmd_option, tag in _RMARKDOWN_TO_JUPYTER_BOOK_MAP:
if tag in metadata.get("tags", []):
options += " {}={},".format(
rmd_option[0], "FALSE" if rmd_option[1] is False else rmd_option[1]
)
metadata["tags"] = [i for i in metadata["tags"] if i != tag]
if not metadata["tags"]:
metadata.pop("tags")
for opt_name in metadata:
opt_value = metadata[opt_name]
opt_name = opt_name.strip()
if opt_name == "active":
options += ' {}="{}",'.format(opt_name, str(opt_value))
elif isinstance(opt_value, bool):
options += " {}={},".format(opt_name, "TRUE" if opt_value else "FALSE")
elif isinstance(opt_value, list):
options += " {}={},".format(
opt_name,
"c({})".format(", ".join(['"{}"'.format(str(v)) for v in opt_value])),
)
elif isinstance(opt_value, str):
if opt_value.startswith("#R_CODE#"):
options += " {}={},".format(opt_name, opt_value[8:])
elif '"' not in opt_value:
options += ' {}="{}",'.format(opt_name, opt_value)
else:
options += " {}='{}',".format(opt_name, opt_value)
else:
options += " {}={},".format(opt_name, str(opt_value))
if not language:
options = options[2:]
return options.strip(",").strip()
def update_metadata_from_rmd_options(name, value, metadata, use_runtools=False):
"""Map the R Markdown cell visibility options to the Jupyter ones"""
if use_runtools:
for rmd_option, jupyter_options in _RMARKDOWN_TO_RUNTOOLS_OPTION_MAP:
if name == rmd_option[0] and value == rmd_option[1]:
for opt_name, opt_value in jupyter_options:
metadata[opt_name] = opt_value
return True
else:
for rmd_option, tag in _RMARKDOWN_TO_JUPYTER_BOOK_MAP:
if name == rmd_option[0] and value == rmd_option[1]:
metadata.setdefault("tags", []).append(tag)
return True
return False
class ParsingContext:
"""
Class for determining where to split rmd options
"""
parenthesis_count = 0
curly_bracket_count = 0
square_bracket_count = 0
in_single_quote = False
in_double_quote = False
def __init__(self, line):
self.line = line
def in_global_expression(self):
"""Currently inside an expression"""
return (
self.parenthesis_count == 0
and self.curly_bracket_count == 0
and self.square_bracket_count == 0
and not self.in_single_quote
and not self.in_double_quote
)
def count_special_chars(self, char, prev_char):
"""Update parenthesis counters"""
if char == "(":
self.parenthesis_count += 1
elif char == ")":
self.parenthesis_count -= 1
if self.parenthesis_count < 0:
raise RMarkdownOptionParsingError(
'Option line "{}" has too many '
"closing parentheses".format(self.line)
)
elif char == "{":
self.curly_bracket_count += 1
elif char == "}":
self.curly_bracket_count -= 1
if self.curly_bracket_count < 0:
raise RMarkdownOptionParsingError(
'Option line "{}" has too many '
"closing curly brackets".format(self.line)
)
elif char == "[":
self.square_bracket_count += 1
elif char == "]":
self.square_bracket_count -= 1
if self.square_bracket_count < 0:
raise RMarkdownOptionParsingError(
'Option line "{}" has too many '
"closing square brackets".format(self.line)
)
elif char == "'" and prev_char != "\\":
self.in_single_quote = not self.in_single_quote
elif char == '"' and prev_char != "\\":
self.in_double_quote = not self.in_double_quote
def parse_rmd_options(line):
"""
Given a R markdown option line, returns a list of pairs name,value
:param line:
:return:
"""
parsing_context = ParsingContext(line)
result = []
prev_char = ""
name = ""
value = ""
for char in "," + line + ",":
if parsing_context.in_global_expression():
if char == ",":
if name != "" or value != "":
if result and name == "":
raise RMarkdownOptionParsingError(
'Option line "{}" has no name for '
"option value {}".format(line, value)
)
result.append((name.strip(), value.strip()))
name = ""
value = ""
elif char == "=":
if name == "":
name = value
value = ""
else:
value += char
else:
parsing_context.count_special_chars(char, prev_char)
value += char
else:
parsing_context.count_special_chars(char, prev_char)
value += char
prev_char = char
if not parsing_context.in_global_expression():
raise RMarkdownOptionParsingError(
'Option line "{}" is not properly terminated'.format(line)
)
return result
def rmd_options_to_metadata(options, use_runtools=False):
"""Parse rmd options and return a metadata dictionary"""
options = re.split(r"\s|,", options, 1)
if len(options) == 1:
language = options[0]
chunk_options = []
else:
language, others = options
language = language.rstrip(" ,")
others = others.lstrip(" ,")
chunk_options = parse_rmd_options(others)
language = "R" if language == "r" else language
metadata = {}
for i, opt in enumerate(chunk_options):
name, value = opt
if i == 0 and name == "":
metadata["name"] = value
continue
if update_metadata_from_rmd_options(
name, value, metadata, use_runtools=use_runtools
):
continue
try:
metadata[name] = _py_logical_values(value)
continue
except RLogicalValueError:
metadata[name] = value
for name in metadata:
try_eval_metadata(metadata, name)
if "eval" in metadata and not is_active(".Rmd", metadata):
del metadata["eval"]
return metadata.get("language") or language, metadata
def try_eval_metadata(metadata, name):
"""Evaluate the metadata to a python object, if possible"""
value = metadata[name]
if not isinstance(value, str):
return
if (value.startswith('"') and value.endswith('"')) or (
value.startswith("'") and value.endswith("'")
):
metadata[name] = value[1:-1]
return
if value.startswith("c(") and value.endswith(")"):
value = "[" + value[2:-1] + "]"
elif value.startswith("list(") and value.endswith(")"):
value = "[" + value[5:-1] + "]"
try:
metadata[name] = ast.literal_eval(value)
except (SyntaxError, ValueError):
if name != "name":
metadata[name] = "#R_CODE#" + value
return
def is_active(ext, metadata, default=True):
"""Is the cell active for the given file extension?"""
if metadata.get("run_control", {}).get("frozen") is True:
return ext == ".ipynb"
for tag in metadata.get("tags", []):
if tag.startswith("active-"):
return ext.replace(".", "") in tag.split("-")
if "active" not in metadata:
return default
return ext.replace(".", "") in re.split(r"\.|,", metadata["active"])
def metadata_to_double_percent_options(metadata, plain_json):
"""Metadata to double percent lines"""
text = []
if "title" in metadata:
text.append(metadata.pop("title"))
if "cell_depth" in metadata:
text.insert(0, "%" * metadata.pop("cell_depth"))
if "cell_type" in metadata:
text.append(
"[{}]".format(metadata.pop("region_name", metadata.pop("cell_type")))
)
return metadata_to_text(" ".join(text), metadata, plain_json=plain_json)
def incorrectly_encoded_metadata(text):
"""Encode a text that Jupytext cannot parse as a cell metadata"""
return {"incorrectly_encoded_metadata": text}
def isidentifier(text):
"""Can this text be a proper key?"""
return _IDENTIFIER_RE.match(text)
def is_jupyter_language(language):
"""Is this a jupyter language?"""
for lang in _JUPYTER_LANGUAGES:
if language.lower() == lang.lower():
return True
return False
def parse_key_equal_value(text):
"""Parse a string of the form 'key1=value1 key2=value2'"""
# Empty metadata?
text = text.strip()
if not text:
return {}
last_space_pos = text.rfind(" ")
# Just an identifier?
if not text.startswith("--") and isidentifier(text[last_space_pos + 1 :]):
key = text[last_space_pos + 1 :]
value = None
result = {key: value}
if last_space_pos > 0:
result.update(parse_key_equal_value(text[:last_space_pos]))
return result
# Iterate on the '=' signs, starting from the right
equal_sign_pos = None
while True:
equal_sign_pos = text.rfind("=", None, equal_sign_pos)
if equal_sign_pos < 0:
return incorrectly_encoded_metadata(text)
# Do we have an identifier on the left of the equal sign?
prev_whitespace = text[:equal_sign_pos].rstrip().rfind(" ")
key = text[prev_whitespace + 1 : equal_sign_pos].strip()
if not isidentifier(key.replace(".", "")):
continue
try:
value = relax_json_loads(text[equal_sign_pos + 1 :])
except (ValueError, SyntaxError):
# try with a longer expression
continue
# Combine with remaining metadata
metadata = (
parse_key_equal_value(text[:prev_whitespace]) if prev_whitespace > 0 else {}
)
# Append our value
metadata[key] = value
# And return
return metadata
def relax_json_loads(text, catch=False):
"""Parse a JSON string or similar"""
text = text.strip()
try:
return loads(text)
except JSONDecodeError:
pass
if not catch:
return ast.literal_eval(text)
try:
return ast.literal_eval(text)
except (ValueError, SyntaxError):
pass
return incorrectly_encoded_metadata(text)
def is_json_metadata(text):
"""Is this a JSON metadata?"""
first_curly_bracket = text.find("{")
if first_curly_bracket < 0:
return False
first_equal_sign = text.find("=")
if first_equal_sign < 0:
return True
return first_curly_bracket < first_equal_sign
def text_to_metadata(text, allow_title=False):
"""Parse the language/cell title and associated metadata"""
# Parse the language or cell title = everything before the last blank space before { or =
text = text.strip()
first_curly_bracket = text.find("{")
first_equal_sign = text.find("=")
if first_curly_bracket < 0 or (0 <= first_equal_sign < first_curly_bracket):
# this is a key=value metadata line
# case one = the options may be preceded with a language
if not allow_title:
if is_jupyter_language(text):
return text, {}
if " " not in text:
return "", parse_key_equal_value(text)
language, options = text.split(" ", 1)
if is_jupyter_language(language):
return language, parse_key_equal_value(options)
return "", parse_key_equal_value(text)
# case two = a title may be before the options
# we split the title into words
if first_equal_sign >= 0:
words = text[:first_equal_sign].split(" ")
# last word is the key before the equal sign!
while words and not words[-1]:
words.pop()
if words:
words.pop()
else:
words = text.split(" ")
# and we remove words on the right that are attributes (they start with '.')
while words and (not words[-1].strip() or words[-1].startswith(".")):
words.pop()
title = " ".join(words)
return title, parse_key_equal_value(text[len(title) :])
# json metadata line
return (
text[:first_curly_bracket].strip(),
relax_json_loads(text[first_curly_bracket:], catch=True),
)
def metadata_to_text(language_or_title, metadata=None, plain_json=False):
"""Write the cell metadata in the format key=value"""
# Was metadata the first argument?
if metadata is None:
metadata, language_or_title = language_or_title, metadata
metadata = {
key: metadata[key] for key in metadata if key not in _JUPYTEXT_CELL_METADATA
}
text = [language_or_title] if language_or_title else []
if language_or_title is None:
if (
"title" in metadata
and "{" not in metadata["title"]
and "=" not in metadata["title"]
):
text.append(metadata.pop("title"))
if plain_json:
if metadata:
text.append(dumps(metadata))
else:
for key in metadata:
if key == "incorrectly_encoded_metadata":
text.append(metadata[key])
elif metadata[key] is None:
text.append(key)
else:
text.append("{}={}".format(key, dumps(metadata[key])))
return " ".join(text)
|
asciimol/app/renderer.py
|
whitead/asciiMol
| 284 |
72192
|
import numpy as np
class Renderer:
def __init__(self, height, width, config):
self.height = height
self.width = width
self.content = None
self.zbuffer = None
self.m = None
self.f = 1.0
self.resize(height, width)
self.colors = config.colors
self.bonds = config.bonds
self.btoggle = len(self.bonds) > 0
self.pos, self.sym = np.array(config.coordinates), config.symbols
self.ztoggle = True
self.zoom = 1.0
self.rot = np.identity(3)
self.rotcounter = [0, 0, 0]
self.draw_scene()
def draw_scene(self):
"""
A super simple rasterizer. For now, just draw single character atom symbols at their rounded x and y
positions.
:return: True if nothing bad happened.
"""
mx, my = self.m
rot = np.matmul(self.pos, self.rot)
self.clear()
# Draw bonds
for bond in self.bonds:
i, j = bond
# if bond is (i, j) with i == j, just draw the label (no bonds)
if i == j:
x, y, z = rot[i]
xp, yp = round(float(x) * self.f * self.zoom + mx), round(float(y) * self.zoom + my)
if 1 < xp < self.width - 2 and 1 < yp < self.height - 3 and float(z) < self.zbuffer[yp][xp]:
self.zbuffer[yp][xp] = float(z)
self.content[yp][xp] = self.sym[i][0].upper() + "," + self.colors[self.sym[i].upper()]
# else draw the bond with the labels at the end points
else:
# Draw the two labels at the end points
xa, ya, za = rot[i]
xa = float(xa) * self.f * self.zoom + mx
ya = float(ya) * self.zoom + my
xb, yb, zb = rot[j]
xb = float(xb) * self.f * self.zoom + mx
yb = float(yb) * self.zoom + my
xap, yap = round(xa), round(ya)
xbp, ybp = round(xb), round(yb)
if 1 < xap < self.width - 2 and 1 < yap < self.height - 3 and float(za) < self.zbuffer[yap][xap]:
self.zbuffer[yap][xap] = float(za)
self.content[yap][xap] = self.sym[i][0].upper() + "," + self.colors[self.sym[i].upper()]
if 1 < xbp < self.width - 2 and 1 < ybp < self.height - 3 and float(zb) < self.zbuffer[ybp][xbp]:
self.zbuffer[ybp][xbp] = float(zb)
self.content[ybp][xbp] = self.sym[j][0].upper() + "," + self.colors[self.sym[j].upper()]
if not self.btoggle:
continue
# Then start at xap+1 and go to xbp-1, drawing line segments
sy = -1 if ya > yb else 1
sx = -1 if xa > xb else 1
sz = -1 if za > zb else 1
dx = float((xb - xa) / (yb - ya)) if abs(yb - ya) > 0 else 0
dy = float((yb - ya) / (xb - xa)) if abs(xb - xa) > 0 else 0
dz = float((zb - za) / (xb - xa)) if abs(xb - xa) > 0 else 0
if abs(dy) <= 1:
for k in range(1, abs(xap - xbp)):
xk = xap + sx * k
yk = round(float(ya) + sx * k * dy)
zk = round((float(za) + sz * k * dz))
if 1 < xk < self.width - 2 and 1 < yk < self.height - 3 and float(zk) < \
self.zbuffer[yk][xk]:
col = self.colors[self.sym[i].upper()] if k < abs(xap - xbp) / 2 else self.colors[
self.sym[j].upper()]
self.zbuffer[yk][xk] = float(zk)
self.content[yk][xk] = "·,%s" % col
else:
for k in range(1, abs(yap - ybp)):
xk = round((float(xa) + sy * k * dx))
yk = yap + sy * k
zk = round((float(za) + sz * k * dz))
if 1 < xk < self.width - 2 and 1 < yk < self.height - 3 and float(zk) < \
self.zbuffer[yk][xk]:
col = self.colors[self.sym[i].upper()] if k < abs(yap - ybp) / 2 else self.colors[
self.sym[j].upper()]
self.zbuffer[yk][xk] = float(zk)
self.content[yk][xk] = "·,%s" % col
return True
def rotate(self, direction):
"""
Set an internal rotation matrix that is applied to the coordinates before every render.
:param direction: 1 and -1 are x and -x, 2 is either z/y, depending on whether the ztoggle is active or not
"""
if direction == 1:
self.rot = np.matmul(self.rot, [[1.0, 0.0, 0.0], [0.0, 0.9962, -0.0872], [0.0, 0.0872, 0.9962]])
if self.rotcounter[0] + 5 > 360:
self.rotcounter[0] = 0
self.rotcounter[0] += 5
elif direction == -1:
self.rot = np.matmul(self.rot, [[1.0, 0.0, 0.0], [0.0, 0.9962, 0.0872], [0.0, -0.0872, 0.9962]])
if self.rotcounter[0] - 5 < 0:
self.rotcounter[0] = 360
self.rotcounter[0] -= 5
elif direction == 2 and self.ztoggle:
self.rot = np.matmul(self.rot, [[0.9962, -0.0872, 0.0], [0.0872, 0.9962, 0.0], [0.0, 0.0, 1.0]])
if self.rotcounter[2] + 5 > 360:
self.rotcounter[2] = 0
else:
self.rotcounter[2] += 5
elif direction == -2 and self.ztoggle:
self.rot = np.matmul(self.rot, [[0.9962, 0.0872, 0.0], [-0.0872, 0.9962, 0.0], [0.0, 0.0, 1.0]])
if self.rotcounter[2] - 5 < 0:
self.rotcounter[2] = 360
else:
self.rotcounter[2] -= 5
elif direction == 2:
self.rot = np.matmul(self.rot, [[0.9962, 0.0, 0.0872], [0.0, 1.0, 0.0], [-0.0872, 0.0, 0.9962]])
if self.rotcounter[1] + 5 > 360:
self.rotcounter[1] = 0
else:
self.rotcounter[1] += 5
elif direction == -2:
self.rot = np.matmul(self.rot, [[0.9962, 0.0, -0.0872], [0.0, 1.0, 0.0], [0.0872, 0.0, 0.9962]])
if self.rotcounter[1] - 5 < 0:
self.rotcounter[1] = 360
else:
self.rotcounter[1] -= 5
def reset_view(self):
"""
Reset the view to the starting values.
"""
self.zoom = 1.0
self.rotcounter = [0, 0, 0]
self.rot = np.identity(3)
self.m = round(self.width / 2), round(self.height / 2)
def resize(self, height, width):
"""
Resize the screen. Known issue: crashes if the resize is faster than the framerate.
"""
self.height = height
self.width = width
self.content = [[" ,0"] * self.width for n in range(self.height - 2)]
self.zbuffer = [[10000.0] * self.width for n in range(self.height - 2)]
self.m = round(self.width / 2), round(self.height / 2)
# Since terminal characters are higher than wide, I correct for this by multiplying the x by f
# so that it appears wider. 2.25 is what looks good on my terminals, but might be
# nice to have a general way of determining the optimal value
self.f = 2
def clear(self):
"""
Clear the canvas and redraw the border.
"""
for i in range(self.height - 2):
for j in range(self.width):
self.zbuffer[i][j] = 10000.0
for i in range(self.height - 2):
for j in range(self.width):
if i == 0 and j == 0:
self.content[i][j] = "┌,0"
elif (i == 0 or i == self.height - 3) and 0 < j < self.width - 1:
self.content[i][j] = "─,0"
elif i == 0 and j == self.width - 1:
self.content[i][j] = "┐,0"
elif i < self.height - 3 and (j == 0 or j == self.width - 1):
self.content[i][j] = "│,0"
elif i == self.height - 3 and j == 0:
self.content[i][j] = "└,0"
elif i == self.height - 3 and j == self.width - 1:
self.content[i][j] = "┘,0"
else:
self.content[i][j] = " ,0"
|
tensorflow_toolkit/image_retrieval/image_retrieval/dataset.py
|
morkovka1337/openvino_training_extensions
| 256 |
72199
|
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import collections
import json
import random
import cv2
import numpy as np
import tensorflow as tf
from image_retrieval.common import preproces_image, depreprocess_image, fit_to_max_size, from_list
def blur(image):
kernel = np.ones((3, 3), np.float32) / 9
image = cv2.filter2D(image, -1, kernel)
return image
def gray_noise(image):
if np.mean(image) > 100:
gray = np.random.uniform(0.0, 100.0, image.shape[0:2])
gray3 = np.array([gray, gray, gray])
gray3 = np.transpose(gray3, (1, 2, 0))
gray3 = cv2.blur(gray3, ksize=(7, 7))
image -= gray3
image = np.clip(image, 0.0, 255.0)
return image
@tf.function
def tf_random_crop_and_resize(image, input_size):
min_size = tf.minimum(tf.shape(image)[0], tf.shape(image)[1])
crop_size = tf.random.uniform((), min_size // 2, min_size, dtype=tf.int32)
crop = tf.image.random_crop(image, (crop_size, crop_size, 3))
var_thr = 100
for _ in tf.range(10):
moments = tf.nn.moments(tf.reshape(crop, (-1, 3)), axes=0)
if tf.less(tf.reduce_sum(moments[1]), tf.constant(var_thr, dtype=tf.float32)):
crop = tf.image.random_crop(image, (crop_size, crop_size, 3))
else:
break
moments = tf.nn.moments(tf.reshape(crop, (-1, 3)), axes=0)
if tf.less(tf.reduce_sum(moments[1]), tf.constant(var_thr, dtype=tf.float32)):
crop = tf.image.random_crop(image, (tf.shape(image)[0], tf.shape(image)[1], 3))
crop = tf.cast(tf.expand_dims(crop, axis=0), tf.float32)
crop = tf.image.resize(crop, (input_size, input_size))
crop = tf.squeeze(crop, axis=0)
return crop
@tf.function
def tf_distort_color(image):
""" Distorts color. """
image = image / 255.0
image = image[:, :, ::-1]
brightness_max_delta = 16. / 255.
color_ordering = tf.random.uniform([], maxval=5, dtype=tf.int32)
if tf.equal(color_ordering, 0):
image = tf.image.random_brightness(image, max_delta=brightness_max_delta)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.1)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif tf.equal(color_ordering, 1):
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=brightness_max_delta)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.1)
elif tf.equal(color_ordering, 2):
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.1)
image = tf.image.random_brightness(image, max_delta=brightness_max_delta)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
elif tf.equal(color_ordering, 3):
image = tf.image.random_hue(image, max_delta=0.1)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=brightness_max_delta)
image = tf.clip_by_value(image, 0.0, 1.0)
image = image * 255
image = image[:, :, ::-1]
return image
class Dataset:
def __init__(self, images_paths, labels, is_real, input_size, batch_size, params,
return_original=False):
self.images_paths = images_paths
self.input_size = input_size
self.batch_size = batch_size
self.params = params
self.return_original = return_original
self.loaded_images = []
self.labels = Dataset.reassign_labels(labels)
self.is_real = is_real
if self.params['preload']:
self.preload()
if self.params['pretile']:
self.pretile()
self.images_indexes_per_class = collections.defaultdict(list)
for index, label in enumerate(self.labels):
self.images_indexes_per_class[label].append(index)
if self.params['weighted_sampling']:
self.calc_sampling_probs()
def calc_sampling_probs(self):
''' Counts number of images per class and returns probability distribution so that
distribution of images classes becomes uniform.
'''
frequency = {l: self.labels.count(l) for l in set(self.labels)}
probs = np.empty((len(self.labels)), dtype=np.float32)
for idx, l in enumerate(self.labels):
probs[idx] = 1.0 / frequency[l]
self.probs = probs / np.sum(probs)
def preload(self):
''' Pre-loads images in RAM. '''
for image_path in self.images_paths:
self.loaded_images.append(cv2.imread(image_path))
def pretile(self):
''' Pre-tiles images in RAM. Makes training faster but requires huge amount of RAM. '''
tiled_labels = []
tiled_is_real = []
tiled_loaded_images = []
for read_image, label, real in zip(self.loaded_images, self.labels, self.is_real):
if not real:
for n in range(2, self.params['max_tiling'] + 1):
image = self.tile(read_image, n)
tiled_labels.append(label)
tiled_is_real.append(real)
tiled_loaded_images.append(image)
self.labels.extend(tiled_labels)
self.is_real.extend(tiled_is_real)
self.loaded_images.extend(tiled_loaded_images)
def tile(self, image, n):
''' Tiles images taking their aspect ratios into account. '''
aspect_ratio = image.shape[1] / image.shape[0]
if aspect_ratio < 1:
w_repeats = n
h_repeats = max(1 if n != self.params['max_tiling'] else 2, int(n * aspect_ratio))
else:
h_repeats = n
w_repeats = max(1 if n != self.params['max_tiling'] else 2, int(n / aspect_ratio))
image = np.tile(image, (h_repeats, w_repeats, 1))
fit_size = self.input_size * 3
if image.shape[0] > fit_size or image.shape[1] > fit_size:
image = fit_to_max_size(image, self.input_size * 3)
return image
def sample_index(self):
''' Samples indexes. '''
choices = list(range(len(self.labels)))
if self.params['weighted_sampling']:
choices = np.random.choice(choices, len(self.labels), p=self.probs)
elif self.params['shuffle']:
np.random.shuffle(choices)
# duplication is required for triplet loss at least.
duplicated_choices = []
for choice in choices:
for _ in range(self.params['duplicate_n_times']):
duplicated_choices.append(int(
np.random.choice(
self.images_indexes_per_class[self.labels[choice]],
1)))
for choice in duplicated_choices:
yield [choice]
def read(self, index):
''' Reads an image from RAM or disk and returns it with corresponding class label. '''
if self.params['preload']:
image = self.loaded_images[index[0]].astype(np.float32)
else:
image = cv2.imread(self.images_paths[index[0]]).astype(np.float32)
if not self.params['pretile'] and not self.is_real[index[0]]:
n = random.randint(1, self.params['max_tiling'])
image = self.tile(image, n)
return image, self.labels[index[0]]
def cv2_rotate(self, image):
''' Rotates images on random angle using opencv. '''
c_xy = image.shape[1] / 2, image.shape[0] / 2
angle = random.uniform(-self.params['add_rot_angle'],
self.params['add_rot_angle']) * 57.2958
if self.params['rot90']:
angle += random.randint(0, 3) * 180
rotation_matrix = cv2.getRotationMatrix2D(c_xy, angle, 1)
img_rotation = cv2.warpAffine(image, rotation_matrix, (image.shape[1], image.shape[0]))
return img_rotation
def cv2_noise_and_blur(self, image):
''' Adds noise making image darker and blur.'''
image = image.astype(np.float32)
if self.params['apply_gray_noise'] and np.random.choice([True, False]):
image = gray_noise(image)
if self.params['blur'] and np.random.choice([True, False]):
image = blur(image)
return image
def train_preprocess(self, choice):
''' Applies training preprocessing. '''
original, label = tf.numpy_function(self.read, [choice], [tf.float32, tf.int64])
image = tf_random_crop_and_resize(original, self.input_size)
image, = tf.numpy_function(self.cv2_noise_and_blur, [image], [tf.float32])
if self.params['horizontal_flip']:
image = tf.image.random_flip_left_right(image)
if self.params['vertical_flip']:
image = tf.image.random_flip_up_down(image)
if self.params['add_rot_angle'] > 0 or self.params['rot90']:
image, = tf.numpy_function(self.cv2_rotate, [image], [tf.float32])
image = tf_distort_color(image)
image = preproces_image(image)
if self.return_original:
return image, label, original
return image, label
def __call__(self, *args, **kwargs):
''' Returns tf.data.Dataset instance as well as number of classes in training set. '''
dataset = tf.data.Dataset.from_generator(self.sample_index, (tf.int32),
(tf.TensorShape([1])))
dataset = dataset.map(self.train_preprocess,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
if not self.return_original:
dataset = dataset.batch(self.batch_size, drop_remainder=True)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
dataset = dataset.repeat()
return dataset, len(set(self.labels))
@staticmethod
def create_from_list(path, input_size, batch_size, params, return_original=False):
''' Creates Dataset instance from path to images list.
Images list has following format:
<relative_path_to_image> <class_label>
'''
impaths, labels, is_real, _ = from_list(path)
return Dataset(impaths, labels, is_real, input_size, batch_size, params, return_original)()
@staticmethod
def reassign_labels(labels):
''' Re-assign class labels so that they starts from 0 and ends with (num_classes - 1). '''
unique_labels = list(set(labels))
return [unique_labels.index(l) for l in labels]
def main():
import argparse
import time
args = argparse.ArgumentParser()
args.add_argument('--gallery', required=True)
args.add_argument('--input_size', default=224, type=int)
args.add_argument('--augmentation_config', required=True)
args = args.parse_args()
with open(args.augmentation_config) as f:
augmentation_config = json.load(f)
dataset, _ = Dataset.create_from_list(args.gallery, args.input_size, 1,
augmentation_config, True)
t = time.time()
for preprocessed, label, original in dataset.take(1000):
cv2.imshow('preprocessed', depreprocess_image(preprocessed.numpy()))
cv2.imshow('original', original.numpy().astype(np.uint8))
print(label)
if cv2.waitKey(0) == 27:
break
print(time.time() - t)
if __name__ == '__main__':
main()
|
tests/test_basics.py
|
roguextech/OpenRocket-pyjnius
| 908 |
72215
|
<reponame>roguextech/OpenRocket-pyjnius
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import sys
import unittest
from jnius.reflect import autoclass
try:
long
except NameError:
# Python 3
long = int
def py2_encode(uni):
if sys.version_info < (3, 0):
uni = uni.encode('utf-8')
return uni
class BasicsTest(unittest.TestCase):
def test_static_methods(self):
Test = autoclass('org.jnius.BasicsTest')
self.assertEqual(Test.methodStaticZ(), True)
self.assertEqual(Test.methodStaticB(), 127)
self.assertEqual(Test.methodStaticC(), 'k')
self.assertEqual(Test.methodStaticS(), 32767)
self.assertEqual(Test.methodStaticI(), 2147483467)
self.assertEqual(Test.methodStaticJ(), 9223372036854775807)
self.assertAlmostEqual(Test.methodStaticF(), 1.23456789)
self.assertEqual(Test.methodStaticD(), 1.23456789)
self.assertEqual(Test.methodStaticString(), py2_encode(u'hello \U0001F30E!'))
def test_static_fields(self):
Test = autoclass('org.jnius.BasicsTest')
self.assertEqual(Test.fieldStaticZ, True)
self.assertEqual(Test.fieldStaticB, 127)
self.assertEqual(Test.fieldStaticC, 'k')
self.assertEqual(Test.fieldStaticS, 32767)
self.assertEqual(Test.fieldStaticI, 2147483467)
self.assertEqual(Test.fieldStaticJ, 9223372036854775807)
self.assertAlmostEqual(Test.fieldStaticF, 1.23456789)
self.assertEqual(Test.fieldStaticD, 1.23456789)
self.assertEqual(Test.fieldStaticString, py2_encode(u'hello \U0001F30E!'))
def test_instance_methods(self):
test = autoclass('org.jnius.BasicsTest')()
self.assertEqual(test.methodZ(), True)
self.assertEqual(test.methodB(), 127)
self.assertEqual(test.methodC(), 'k')
self.assertEqual(test.methodS(), 32767)
self.assertEqual(test.methodI(), 2147483467)
self.assertEqual(test.methodJ(), 9223372036854775807)
self.assertAlmostEqual(test.methodF(), 1.23456789)
self.assertEqual(test.methodD(), 1.23456789)
self.assertEqual(test.methodString(), py2_encode(u'hello \U0001F30E!'))
def test_instance_fields(self):
test = autoclass('org.jnius.BasicsTest')()
self.assertEqual(test.fieldZ, True)
self.assertEqual(test.fieldB, 127)
self.assertEqual(test.fieldC, 'k')
self.assertEqual(test.fieldS, 32767)
self.assertEqual(test.fieldI, 2147483467)
self.assertEqual(test.fieldJ, 9223372036854775807)
self.assertAlmostEqual(test.fieldF, 1.23456789)
self.assertEqual(test.fieldD, 1.23456789)
self.assertEqual(test.fieldString, py2_encode(u'hello \U0001F30E!'))
test2 = autoclass('org.jnius.BasicsTest')(10)
self.assertEqual(test2.fieldB, 10)
self.assertEqual(test.fieldB, 127)
self.assertEqual(test2.fieldB, 10)
def test_instance_getter_naming(self):
test = autoclass('org.jnius.BasicsTest')()
self.assertEqual(test.disabled, True)
self.assertEqual(test.enabled, False)
def test_instance_set_fields(self):
test = autoclass('org.jnius.BasicsTest')()
test.fieldSetZ = True
test.fieldSetB = 127
test.fieldSetC = ord('k')
test.fieldSetS = 32767
test.fieldSetI = 2147483467
test.fieldSetJ = 9223372036854775807
test.fieldSetF = 1.23456789
test.fieldSetD = 1.23456789
self.assertTrue(test.testFieldSetZ())
self.assertTrue(test.testFieldSetB())
self.assertTrue(test.testFieldSetC())
self.assertTrue(test.testFieldSetS())
self.assertTrue(test.testFieldSetI())
self.assertTrue(test.testFieldSetJ())
self.assertTrue(test.testFieldSetF())
self.assertTrue(test.testFieldSetD())
def test_instances_methods_array(self):
test = autoclass('org.jnius.BasicsTest')()
self.assertEqual(test.methodArrayZ(), [True] * 3)
self.assertEqual(test.methodArrayB()[0], 127)
if sys.version_info >= (3, 0):
self.assertEqual(test.methodArrayB(), [127] * 3)
self.assertEqual(test.methodArrayC(), ['k'] * 3)
self.assertEqual(test.methodArrayS(), [32767] * 3)
self.assertEqual(test.methodArrayI(), [2147483467] * 3)
self.assertEqual(test.methodArrayJ(), [9223372036854775807] * 3)
ret = test.methodArrayF()
ref = [1.23456789] * 3
self.assertAlmostEqual(ret[0], ref[0])
self.assertAlmostEqual(ret[1], ref[1])
self.assertAlmostEqual(ret[2], ref[2])
self.assertEqual(test.methodArrayD(), [1.23456789] * 3)
self.assertEqual(test.methodArrayString(), [py2_encode(u'hello \U0001F30E!')] * 3)
def test_instances_methods_params(self):
test = autoclass('org.jnius.BasicsTest')()
self.assertEqual(test.methodParamsZBCSIJFD(
True, 127, 'k', 32767, 2147483467, 9223372036854775807, 1.23456789, 1.23456789), True)
self.assertEqual(test.methodParamsZBCSIJFD(
True, long(127), 'k', long(32767), long(2147483467), 9223372036854775807, 1.23456789, 1.23456789), True)
self.assertEqual(test.methodParamsString(py2_encode(u'hello \U0001F30E!')), True)
self.assertEqual(test.methodParamsArrayI([1, 2, 3]), True)
self.assertEqual(test.methodParamsArrayString([
py2_encode(u'hello'), py2_encode(u'\U0001F30E')]), True)
def test_instances_methods_params_object_list_str(self):
test = autoclass('org.jnius.BasicsTest')()
self.assertEqual(test.methodParamsObject([
'hello', 'world']), True)
def test_instances_methods_params_object_list_int(self):
test = autoclass('org.jnius.BasicsTest')()
self.assertEqual(test.methodParamsObject([1, 2]), True)
def test_instances_methods_params_object_list_float(self):
test = autoclass('org.jnius.BasicsTest')()
self.assertEqual(test.methodParamsObject([3.14, 1.61]), True)
def test_instances_methods_params_object_list_long(self):
test = autoclass('org.jnius.BasicsTest')()
self.assertEqual(test.methodParamsObject([1, 2]), True)
def test_instances_methods_params_array_byte(self):
test = autoclass('org.jnius.BasicsTest')()
self.assertEqual(test.methodParamsArrayByte([127, 127, 127]), True)
ret = test.methodArrayB()
self.assertEqual(test.methodParamsArrayByte(ret), True)
def test_return_array_as_object_array_of_strings(self):
test = autoclass('org.jnius.BasicsTest')()
self.assertEqual(test.methodReturnStrings(), [py2_encode(u'Hello'),
py2_encode(u'\U0001F30E')])
def test_return_array_as_object_of_integers(self):
test = autoclass('org.jnius.BasicsTest')()
self.assertEqual(test.methodReturnIntegers(), [1, 2])
|
rpc/RPyC/tutorials/services/registry_discovery/service01.py
|
2581676612/python
| 112 |
72267
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 17-8-15 下午1:35
# @Author : Tom.Lee
# @CopyRight : 2016-2017 OpenBridge by yihecloud
# @File : service.py
# @Product : PyCharm
# @Docs :
# @Source :
import rpyc
from rpyc.utils.server import ThreadedServer
class MyService(rpyc.Service):
def on_connect(self):
pass
def on_disconnect(self):
pass
@classmethod
def exposed_get_answer(cls):
return 66
@classmethod
def get_question(cls):
return "what is the airspeed velocity of an unladen swallow?"
if __name__ == "__main__":
t = ThreadedServer(MyService, port=18861)
print """
service start ok! port {port}
""".format(port=18861)
t.start()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.