max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
util/debug.py
|
RaphaelWag/YOLOS
| 486 |
73807
|
<gh_stars>100-1000
import torch
import numpy
import cv2
import copy
def get_img_array(imgtensor, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):
"""imgtensor: ([C,H,W],device=cuda)
"""
denormimg = imgtensor.cpu().permute(1,2,0).mul_(torch.tensor(std)).add_(torch.tensor(mean))
imgarray = denormimg.numpy()
imgarray = imgarray * 255
imgarray = imgarray.astype('uint8')
imgarray = cv2.cvtColor(imgarray, cv2.COLOR_RGB2BGR)
return imgarray
def draw_rec_in_img(img, target):
tl = 3 # thickness line
tf = max(tl-1,1) # font thickness
color = [0,0,255] # color
tempimg = copy.deepcopy(img)
h, w = target['size']
labels = target['labels'].cpu()
xyxyboxes = target['xyxyboxes'].cpu()
denorm_xyxyboxes = xyxyboxes * torch.tensor([w,h,w,h])
for box,label in zip(denorm_xyxyboxes, labels):
c1, c2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))
cv2.rectangle(tempimg,c1,c2,color,thickness=tl, lineType=cv2.LINE_AA)
label = str(int(label))
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
cv2.rectangle(tempimg, c1, c2, color, -1, cv2.LINE_AA) # filled
cv2.putText(tempimg, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
return tempimg
def draw_patch_in_img(img, tgt_patch, inputs_size):
tl = 1 # thickness line
tf = max(tl-1,1) # font thickness
color = [0,255,0] # color
point_size = 4
point_color = (255, 0, 0) # BGR
point_thickness = 4 # 可以为 0 、4、8
tempimg = copy.deepcopy(img)
h, w = inputs_size
labels = tgt_patch['labels'].cpu()
patch_indexs = tgt_patch['patch_index'].cpu()
centers = tgt_patch['centers'].cpu()
w_num = w//16
for patch_index, label, center in zip(patch_indexs, labels, centers):
point = (int(center[0]), int(center[1]))
cv2.circle(tempimg, point, point_size, point_color, point_thickness)
y_start_index = patch_index // w_num
x_start_index = patch_index - y_start_index*w_num
x_start = x_start_index * 16
y_start = y_start_index * 16
x_end = x_start + 16
y_end = y_start + 16
c1, c2 = (int(x_start), int(y_start)), (int(x_end), int(y_end))
cv2.rectangle(tempimg,c1,c2,color,thickness=tl, lineType=cv2.LINE_AA)
label = str(int(label))
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
cv2.rectangle(tempimg, c1, c2, color, -1, cv2.LINE_AA) # filled
cv2.putText(tempimg, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
return tempimg
|
tool_validate_packages.py
|
amerkel2/azure-storage-python
| 348 |
73813
|
<reponame>amerkel2/azure-storage-python<filename>tool_validate_packages.py
#!/usr/bin/env python
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import argparse
import os
from subprocess import check_call
import glob
DEFAULT_DESTINATION_FOLDER = "./dist"
CURRENT_DIR = os.path.curdir
# build the wheels for all packages
def create_storage_package():
check_call(['python3', 'tool_build_packages.py', 'all'])
# install dependencies required for testing into the virtual environment
def install_dependency_packages(executable_location):
check_call([executable_location, 'install', '-r', 'requirements.txt'])
check_call([executable_location, 'install', 'pytest'])
# install the storage packages into the virtual environment
def install_storage_package(executable_location, environment):
if environment == 'test':
check_call([executable_location, 'install', 'azure-storage-nspkg'])
check_call([executable_location, 'install', 'azure-storage-common', '-i',
'https://testpypi.python.org/pypi', '--no-deps'])
check_call([executable_location, 'install', 'azure-storage-blob', '-i',
'https://testpypi.python.org/pypi', '--no-deps'])
check_call([executable_location, 'install', 'azure-storage-file', '-i',
'https://testpypi.python.org/pypi', '--no-deps'])
check_call([executable_location, 'install', 'azure-storage-queue', '-i',
'https://testpypi.python.org/pypi', '--no-deps'])
elif environment == 'prod':
check_call([executable_location, 'install', 'azure-storage-blob', '--no-cache-dir'])
check_call([executable_location, 'install', 'azure-storage-file', '--no-cache-dir'])
check_call([executable_location, 'install', 'azure-storage-queue', '--no-cache-dir'])
else:
# install the namespace package first
nspkg_wheel = glob.glob("dist/*nspkg*.whl")
check_call([executable_location, 'install', os.path.abspath(nspkg_wheel[0])])
# install the common package
common_wheel = glob.glob("dist/*common*.whl")
check_call([executable_location, 'install', os.path.abspath(common_wheel[0])])
# install all the other packages
# this simply skips the common and namespace package since they are already installed
storage_wheels = glob.glob("dist/*.whl")
for wheel in storage_wheels:
check_call([executable_location, 'install', os.path.abspath(wheel)])
# clean up the test directory containing the virtual environment
def delete_directory_if_exists(dir_name):
if os.path.exists(CURRENT_DIR + '/' + dir_name):
check_call(['rm', '-r', dir_name])
# create virtual environment for python 2
def create_py2_venv(environment):
dir_name = 'py2test-' + environment
pip_location = dir_name + '/bin/pip'
delete_directory_if_exists(dir_name)
os.system('virtualenv ' + dir_name) # this creates the virtual environment
install_dependency_packages(pip_location)
install_storage_package(pip_location, environment)
return dir_name + '/bin/python'
# create virtual environment for python 3
def create_py3_venv(environment):
dir_name = 'py3test-' + environment
pip_location = dir_name + '/bin/pip'
delete_directory_if_exists(dir_name)
os.system('python3 -m venv ' + dir_name) # this creates the virtual environment
install_dependency_packages(pip_location)
install_storage_package(pip_location, environment)
return dir_name + '/bin/python'
# kicks off the entire test suite
def run_unit_tests(executable_location):
check_call([executable_location, '-m', 'pytest'])
# this script requires Bash, which exists on all major platforms (including Win10)
# assumption: python 2 is invoked with 'python', python 3 is invoked with 'python3'
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Validate Azure Storage packages.')
parser.add_argument('--python-version', '-p', help='The desired python version', default='3')
parser.add_argument('--create-package', '-c', help='Whether new packages need to be generated', default='y')
parser.add_argument('--run-tests', '-t', help='Whether the unit tests should run', default='y')
parser.add_argument('--environment', '-e', help='Choose from [local, test, prod]', default='local')
# step 1: parse the command line arguments
args = parser.parse_args()
print("Starting package validation: python_version={0}, create_package={1}, run_tests={2}, environment={3}"
.format(args.python_version, args.create_package, args.run_tests, args.environment))
# step 2: generate wheels if necessary
if args.create_package in ('yes', 'true', 'y', 't'):
create_storage_package()
# step 3: create the virtual environment for the specified python version
if args.python_version == '2':
virtual_py = create_py2_venv(args.environment)
else:
virtual_py = create_py3_venv(args.environment)
# step 4: run unit test suite if necessary
if args.run_tests in ('yes', 'true', 'y', 't'):
run_unit_tests(virtual_py)
|
cpu_ver/hypergrad/slurm_job_watcher.py
|
bigaidream-projects/drmad
| 119 |
73823
|
import time
from glob import glob
import subprocess
import os
from odyssey import run_signal_stem, slurm_fname, temp_dir, jobdir
if __name__ == "__main__":
print "Monitoring slurm jobs in {0}".format(os.getcwd())
while True:
for fname in glob(run_signal_stem + "*"):
jobname = fname[len(run_signal_stem):]
print "Launching job {0}".format(jobname)
with temp_dir(jobdir(jobname)):
subprocess.call(["sbatch", slurm_fname])
os.remove(fname)
time.sleep(2)
|
deepsleep/model.py
|
learning310/deepsleepnet
| 266 |
73826
|
import tensorflow as tf
from deepsleep.nn import *
class DeepFeatureNet(object):
def __init__(
self,
batch_size,
input_dims,
n_classes,
is_train,
reuse_params,
use_dropout,
name="deepfeaturenet"
):
self.batch_size = batch_size
self.input_dims = input_dims
self.n_classes = n_classes
self.is_train = is_train
self.reuse_params = reuse_params
self.use_dropout = use_dropout
self.name = name
self.activations = []
self.layer_idx = 1
self.monitor_vars = []
def _build_placeholder(self):
# Input
name = "x_train" if self.is_train else "x_valid"
self.input_var = tf.compat.v1.placeholder(
tf.float32,
shape=[self.batch_size, self.input_dims, 1, 1],
name=name + "_inputs"
)
# Target
self.target_var = tf.compat.v1.placeholder(
tf.int32,
shape=[self.batch_size, ],
name=name + "_targets"
)
def _conv1d_layer(self, input_var, filter_size, n_filters, stride, wd=0):
input_shape = input_var.get_shape()
n_batches = input_shape[0].value
input_dims = input_shape[1].value
n_in_filters = input_shape[3].value
name = "l{}_conv".format(self.layer_idx)
with tf.compat.v1.variable_scope(name) as scope:
output = conv_1d(name="conv1d", input_var=input_var, filter_shape=[filter_size, 1, n_in_filters, n_filters], stride=stride, bias=None, wd=wd)
# # MONITORING
# self.monitor_vars.append(("{}_before_bn".format(name), output))
output = batch_norm_new(name="bn", input_var=output, is_train=self.is_train)
# # MONITORING
# self.monitor_vars.append(("{}_after_bn".format(name), output))
# output = leaky_relu(name="leaky_relu", input_var=output)
output = tf.nn.relu(output, name="relu")
self.activations.append((name, output))
self.layer_idx += 1
return output
def build_model(self, input_var):
# List to store the output of each CNNs
output_conns = []
######### CNNs with small filter size at the first layer #########
# Convolution
# network = self._conv1d_layer(input_var=input_var, filter_size=128, n_filters=64, stride=16, wd=1e-3)
network = self._conv1d_layer(input_var=input_var, filter_size=50, n_filters=64, stride=6, wd=1e-3)
# Max pooling
name = "l{}_pool".format(self.layer_idx)
network = max_pool_1d(name=name, input_var=network, pool_size=8, stride=8)
self.activations.append((name, network))
self.layer_idx += 1
# Dropout
if self.use_dropout:
name = "l{}_dropout".format(self.layer_idx)
if self.is_train:
network = tf.nn.dropout(network, keep_prob=0.5, name=name)
else:
network = tf.nn.dropout(network, keep_prob=1.0, name=name)
self.activations.append((name, network))
self.layer_idx += 1
# Convolution
network = self._conv1d_layer(input_var=network, filter_size=8, n_filters=128, stride=1)
network = self._conv1d_layer(input_var=network, filter_size=8, n_filters=128, stride=1)
network = self._conv1d_layer(input_var=network, filter_size=8, n_filters=128, stride=1)
# Max pooling
name = "l{}_pool".format(self.layer_idx)
network = max_pool_1d(name=name, input_var=network, pool_size=4, stride=4)
self.activations.append((name, network))
self.layer_idx += 1
# Flatten
name = "l{}_flat".format(self.layer_idx)
network = flatten(name=name, input_var=network)
self.activations.append((name, network))
self.layer_idx += 1
output_conns.append(network)
######### CNNs with large filter size at the first layer #########
# Convolution
# network = self._conv1d_layer(input_var=input_var, filter_size=1024, n_filters=64, stride=128)
network = self._conv1d_layer(input_var=input_var, filter_size=400, n_filters=64, stride=50)
# Max pooling
name = "l{}_pool".format(self.layer_idx)
network = max_pool_1d(name=name, input_var=network, pool_size=4, stride=4)
self.activations.append((name, network))
self.layer_idx += 1
# Dropout
if self.use_dropout:
name = "l{}_dropout".format(self.layer_idx)
if self.is_train:
network = tf.nn.dropout(network, keep_prob=0.5, name=name)
else:
network = tf.nn.dropout(network, keep_prob=1.0, name=name)
self.activations.append((name, network))
self.layer_idx += 1
# Convolution
network = self._conv1d_layer(input_var=network, filter_size=6, n_filters=128, stride=1)
network = self._conv1d_layer(input_var=network, filter_size=6, n_filters=128, stride=1)
network = self._conv1d_layer(input_var=network, filter_size=6, n_filters=128, stride=1)
# Max pooling
name = "l{}_pool".format(self.layer_idx)
network = max_pool_1d(name=name, input_var=network, pool_size=2, stride=2)
self.activations.append((name, network))
self.layer_idx += 1
# Flatten
name = "l{}_flat".format(self.layer_idx)
network = flatten(name=name, input_var=network)
self.activations.append((name, network))
self.layer_idx += 1
output_conns.append(network)
######### Aggregate and link two CNNs #########
# Concat
name = "l{}_concat".format(self.layer_idx)
network = tf.concat(axis=1, values=output_conns, name=name)
self.activations.append((name, network))
self.layer_idx += 1
# Dropout
if self.use_dropout:
name = "l{}_dropout".format(self.layer_idx)
if self.is_train:
network = tf.nn.dropout(network, keep_prob=0.5, name=name)
else:
network = tf.nn.dropout(network, keep_prob=1.0, name=name)
self.activations.append((name, network))
self.layer_idx += 1
return network
def init_ops(self):
self._build_placeholder()
# Get loss and prediction operations
with tf.compat.v1.variable_scope(self.name) as scope:
# Reuse variables for validation
if self.reuse_params:
scope.reuse_variables()
# Build model
network = self.build_model(input_var=self.input_var)
# Softmax linear
name = "l{}_softmax_linear".format(self.layer_idx)
network = fc(name=name, input_var=network, n_hiddens=self.n_classes, bias=0.0, wd=0)
self.activations.append((name, network))
self.layer_idx += 1
# Outputs of softmax linear are logits
self.logits = network
######### Compute loss #########
# Cross-entropy loss
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.logits,
labels=self.target_var,
name="sparse_softmax_cross_entropy_with_logits"
)
loss = tf.reduce_mean(loss, name="cross_entropy")
# Regularization loss
regular_loss = tf.add_n(
tf.compat.v1.get_collection("losses", scope=scope.name + "\/"),
name="regular_loss"
)
# print " "
# print "Params to compute regularization loss:"
# for p in tf.compat.v1.get_collection("losses", scope=scope.name + "\/"):
# print p.name
# print " "
# Total loss
self.loss_op = tf.add(loss, regular_loss)
# Predictions
self.pred_op = tf.argmax(self.logits, 1)
class DeepSleepNet(DeepFeatureNet):
def __init__(
self,
batch_size,
input_dims,
n_classes,
seq_length,
n_rnn_layers,
return_last,
is_train,
reuse_params,
use_dropout_feature,
use_dropout_sequence,
name="deepsleepnet"
):
super(self.__class__, self).__init__(
batch_size=batch_size,
input_dims=input_dims,
n_classes=n_classes,
is_train=is_train,
reuse_params=reuse_params,
use_dropout=use_dropout_feature,
name=name
)
self.seq_length = seq_length
self.n_rnn_layers = n_rnn_layers
self.return_last = return_last
self.use_dropout_sequence = use_dropout_sequence
def _build_placeholder(self):
# Input
name = "x_train" if self.is_train else "x_valid"
self.input_var = tf.compat.v1.placeholder(
tf.float32,
shape=[self.batch_size*self.seq_length, self.input_dims, 1, 1],
name=name + "_inputs"
)
# Target
self.target_var = tf.compat.v1.placeholder(
tf.int32,
shape=[self.batch_size*self.seq_length, ],
name=name + "_targets"
)
def build_model(self, input_var):
# Create a network with superclass method
network = super(self.__class__, self).build_model(
input_var=self.input_var
)
# Residual (or shortcut) connection
output_conns = []
# Fully-connected to select some part of the output to add with the output from bi-directional LSTM
name = "l{}_fc".format(self.layer_idx)
with tf.compat.v1.variable_scope(name) as scope:
output_tmp = fc(name="fc", input_var=network, n_hiddens=1024, bias=None, wd=0)
output_tmp = batch_norm_new(name="bn", input_var=output_tmp, is_train=self.is_train)
# output_tmp = leaky_relu(name="leaky_relu", input_var=output_tmp)
output_tmp = tf.nn.relu(output_tmp, name="relu")
self.activations.append((name, output_tmp))
self.layer_idx += 1
output_conns.append(output_tmp)
######################################################################
# Reshape the input from (batch_size * seq_length, input_dim) to
# (batch_size, seq_length, input_dim)
name = "l{}_reshape_seq".format(self.layer_idx)
input_dim = network.get_shape()[-1].value
seq_input = tf.reshape(network,
shape=[-1, self.seq_length, input_dim],
name=name)
assert self.batch_size == seq_input.get_shape()[0].value
self.activations.append((name, seq_input))
self.layer_idx += 1
# Bidirectional LSTM network
name = "l{}_bi_lstm".format(self.layer_idx)
hidden_size = 512 # will output 1024 (512 forward, 512 backward)
with tf.compat.v1.variable_scope(name) as scope:
def lstm_cell():
cell = tf.compat.v1.nn.rnn_cell.LSTMCell(hidden_size,
use_peepholes=True,
state_is_tuple=True,
reuse=tf.compat.v1.get_variable_scope().reuse)
if self.use_dropout_sequence:
keep_prob = 0.5 if self.is_train else 1.0
cell = tf.compat.v1.nn.rnn_cell.DropoutWrapper(
cell,
output_keep_prob=keep_prob
)
return cell
fw_cell = tf.compat.v1.nn.rnn_cell.MultiRNNCell([lstm_cell() for _ in range(self.n_rnn_layers)], state_is_tuple = True)
bw_cell = tf.compat.v1.nn.rnn_cell.MultiRNNCell([lstm_cell() for _ in range(self.n_rnn_layers)], state_is_tuple = True)
# Initial state of RNN
self.fw_initial_state = fw_cell.zero_state(self.batch_size, tf.float32)
self.bw_initial_state = bw_cell.zero_state(self.batch_size, tf.float32)
# Feedforward to MultiRNNCell
list_rnn_inputs = tf.unstack(seq_input, axis=1)
#outputs, fw_state, bw_state = tf.nn.bidirectional_rnn(
outputs, fw_state, bw_state = tf.compat.v1.nn.static_bidirectional_rnn(
cell_fw=fw_cell,
cell_bw=bw_cell,
inputs=list_rnn_inputs,
initial_state_fw=self.fw_initial_state,
initial_state_bw=self.bw_initial_state
)
if self.return_last:
network = outputs[-1]
else:
network = tf.reshape(tf.concat(axis=1, values=outputs), [-1, hidden_size*2],
name=name)
self.activations.append((name, network))
self.layer_idx +=1
self.fw_final_state = fw_state
self.bw_final_state = bw_state
# Append output
output_conns.append(network)
######################################################################
# Add
name = "l{}_add".format(self.layer_idx)
network = tf.add_n(output_conns, name=name)
self.activations.append((name, network))
self.layer_idx += 1
# Dropout
if self.use_dropout_sequence:
name = "l{}_dropout".format(self.layer_idx)
if self.is_train:
network = tf.nn.dropout(network, keep_prob=0.5, name=name)
else:
network = tf.nn.dropout(network, keep_prob=1.0, name=name)
self.activations.append((name, network))
self.layer_idx += 1
return network
def init_ops(self):
self._build_placeholder()
# Get loss and prediction operations
with tf.compat.v1.variable_scope(self.name) as scope:
# Reuse variables for validation
if self.reuse_params:
scope.reuse_variables()
# Build model
network = self.build_model(input_var=self.input_var)
# Softmax linear
name = "l{}_softmax_linear".format(self.layer_idx)
network = fc(name=name, input_var=network, n_hiddens=self.n_classes, bias=0.0, wd=0)
self.activations.append((name, network))
self.layer_idx += 1
# Outputs of softmax linear are logits
self.logits = network
######### Compute loss #########
# Weighted cross-entropy loss for a sequence of logits (per example)
loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[self.logits],
[self.target_var],
[tf.ones([self.batch_size * self.seq_length])],
name="sequence_loss_by_example"
)
loss = tf.reduce_sum(loss) / self.batch_size
# Regularization loss
regular_loss = tf.add_n(
tf.compat.v1.get_collection("losses", scope=scope.name + "\/"),
name="regular_loss"
)
# print " "
# print "Params to compute regularization loss:"
# for p in tf.compat.v1.get_collection("losses", scope=scope.name + "\/"):
# print p.name
# print " "
# Total loss
self.loss_op = tf.add(loss, regular_loss)
# Predictions
self.pred_op = tf.argmax(self.logits, 1)
|
gitlint-core/gitlint/tests/config/test_config_builder.py
|
carlsmedstad/gitlint
| 559 |
73831
|
# -*- coding: utf-8 -*-
import copy
from gitlint.tests.base import BaseTestCase
from gitlint.config import LintConfig, LintConfigBuilder, LintConfigError
from gitlint import rules
class LintConfigBuilderTests(BaseTestCase):
def test_set_option(self):
config_builder = LintConfigBuilder()
config = config_builder.build()
# assert some defaults
self.assertEqual(config.get_rule_option('title-max-length', 'line-length'), 72)
self.assertEqual(config.get_rule_option('body-max-line-length', 'line-length'), 80)
self.assertListEqual(config.get_rule_option('title-must-not-contain-word', 'words'), ["WIP"])
self.assertEqual(config.verbosity, 3)
# Make some changes and check blueprint
config_builder.set_option('title-max-length', 'line-length', 100)
config_builder.set_option('general', 'verbosity', 2)
config_builder.set_option('title-must-not-contain-word', 'words', ["foo", "bar"])
expected_blueprint = {'title-must-not-contain-word': {'words': ['foo', 'bar']},
'title-max-length': {'line-length': 100}, 'general': {'verbosity': 2}}
self.assertDictEqual(config_builder._config_blueprint, expected_blueprint)
# Build config and verify that the changes have occurred and no other changes
config = config_builder.build()
self.assertEqual(config.get_rule_option('title-max-length', 'line-length'), 100)
self.assertEqual(config.get_rule_option('body-max-line-length', 'line-length'), 80) # should be unchanged
self.assertListEqual(config.get_rule_option('title-must-not-contain-word', 'words'), ["foo", "bar"])
self.assertEqual(config.verbosity, 2)
def test_set_from_commit_ignore_all(self):
config = LintConfig()
original_rules = config.rules
original_rule_ids = [rule.id for rule in original_rules]
config_builder = LintConfigBuilder()
# nothing gitlint
config_builder.set_config_from_commit(self.gitcommit("tëst\ngitlint\nfoo"))
config = config_builder.build()
self.assertSequenceEqual(config.rules, original_rules)
self.assertListEqual(config.ignore, [])
# ignore all rules
config_builder.set_config_from_commit(self.gitcommit("tëst\ngitlint-ignore: all\nfoo"))
config = config_builder.build()
self.assertEqual(config.ignore, original_rule_ids)
# ignore all rules, no space
config_builder.set_config_from_commit(self.gitcommit("tëst\ngitlint-ignore:all\nfoo"))
config = config_builder.build()
self.assertEqual(config.ignore, original_rule_ids)
# ignore all rules, more spacing
config_builder.set_config_from_commit(self.gitcommit("tëst\ngitlint-ignore: \t all\nfoo"))
config = config_builder.build()
self.assertEqual(config.ignore, original_rule_ids)
def test_set_from_commit_ignore_specific(self):
# ignore specific rules
config_builder = LintConfigBuilder()
config_builder.set_config_from_commit(self.gitcommit("tëst\ngitlint-ignore: T1, body-hard-tab"))
config = config_builder.build()
self.assertEqual(config.ignore, ["T1", "body-hard-tab"])
def test_set_from_config_file(self):
# regular config file load, no problems
config_builder = LintConfigBuilder()
config_builder.set_from_config_file(self.get_sample_path("config/gitlintconfig"))
config = config_builder.build()
# Do some assertions on the config
self.assertEqual(config.verbosity, 1)
self.assertFalse(config.debug)
self.assertFalse(config.ignore_merge_commits)
self.assertIsNone(config.extra_path)
self.assertEqual(config.ignore, ["title-trailing-whitespace", "B2"])
self.assertEqual(config.get_rule_option('title-max-length', 'line-length'), 20)
self.assertEqual(config.get_rule_option('body-max-line-length', 'line-length'), 30)
def test_set_from_config_file_negative(self):
config_builder = LintConfigBuilder()
# bad config file load
foo_path = self.get_sample_path("föo")
expected_error_msg = f"Invalid file path: {foo_path}"
with self.assertRaisesMessage(LintConfigError, expected_error_msg):
config_builder.set_from_config_file(foo_path)
# error during file parsing
path = self.get_sample_path("config/no-sections")
expected_error_msg = "File contains no section headers."
# We only match the start of the message here, since the exact message can vary depending on platform
with self.assertRaisesRegex(LintConfigError, expected_error_msg):
config_builder.set_from_config_file(path)
# non-existing rule
path = self.get_sample_path("config/nonexisting-rule")
config_builder = LintConfigBuilder()
config_builder.set_from_config_file(path)
expected_error_msg = "No such rule 'föobar'"
with self.assertRaisesMessage(LintConfigError, expected_error_msg):
config_builder.build()
# non-existing general option
path = self.get_sample_path("config/nonexisting-general-option")
config_builder = LintConfigBuilder()
config_builder.set_from_config_file(path)
expected_error_msg = "'foo' is not a valid gitlint option"
with self.assertRaisesMessage(LintConfigError, expected_error_msg):
config_builder.build()
# non-existing option
path = self.get_sample_path("config/nonexisting-option")
config_builder = LintConfigBuilder()
config_builder.set_from_config_file(path)
expected_error_msg = "Rule 'title-max-length' has no option 'föobar'"
with self.assertRaisesMessage(LintConfigError, expected_error_msg):
config_builder.build()
# invalid option value
path = self.get_sample_path("config/invalid-option-value")
config_builder = LintConfigBuilder()
config_builder.set_from_config_file(path)
expected_error_msg = "'föo' is not a valid value for option 'title-max-length.line-length'. " + \
"Option 'line-length' must be a positive integer (current value: 'föo')."
with self.assertRaisesMessage(LintConfigError, expected_error_msg):
config_builder.build()
def test_set_config_from_string_list(self):
config = LintConfig()
# change and assert changes
config_builder = LintConfigBuilder()
config_builder.set_config_from_string_list(['general.verbosity=1', 'title-max-length.line-length=60',
'body-max-line-length.line-length=120',
"title-must-not-contain-word.words=håha"])
config = config_builder.build()
self.assertEqual(config.get_rule_option('title-max-length', 'line-length'), 60)
self.assertEqual(config.get_rule_option('body-max-line-length', 'line-length'), 120)
self.assertListEqual(config.get_rule_option('title-must-not-contain-word', 'words'), ["håha"])
self.assertEqual(config.verbosity, 1)
def test_set_config_from_string_list_negative(self):
config_builder = LintConfigBuilder()
# assert error on incorrect rule - this happens at build time
config_builder.set_config_from_string_list(["föo.bar=1"])
with self.assertRaisesMessage(LintConfigError, "No such rule 'föo'"):
config_builder.build()
# no equal sign
expected_msg = "'föo.bar' is an invalid configuration option. Use '<rule>.<option>=<value>'"
with self.assertRaisesMessage(LintConfigError, expected_msg):
config_builder.set_config_from_string_list(["föo.bar"])
# missing value
expected_msg = "'föo.bar=' is an invalid configuration option. Use '<rule>.<option>=<value>'"
with self.assertRaisesMessage(LintConfigError, expected_msg):
config_builder.set_config_from_string_list(["föo.bar="])
# space instead of equal sign
expected_msg = "'föo.bar 1' is an invalid configuration option. Use '<rule>.<option>=<value>'"
with self.assertRaisesMessage(LintConfigError, expected_msg):
config_builder.set_config_from_string_list(["föo.bar 1"])
# no period between rule and option names
expected_msg = "'föobar=1' is an invalid configuration option. Use '<rule>.<option>=<value>'"
with self.assertRaisesMessage(LintConfigError, expected_msg):
config_builder.set_config_from_string_list([u'föobar=1'])
def test_rebuild_config(self):
# normal config build
config_builder = LintConfigBuilder()
config_builder.set_option('general', 'verbosity', 3)
lint_config = config_builder.build()
self.assertEqual(lint_config.verbosity, 3)
# check that existing config gets overwritten when we pass it to a configbuilder with different options
existing_lintconfig = LintConfig()
existing_lintconfig.verbosity = 2
lint_config = config_builder.build(existing_lintconfig)
self.assertEqual(lint_config.verbosity, 3)
self.assertEqual(existing_lintconfig.verbosity, 3)
def test_clone(self):
config_builder = LintConfigBuilder()
config_builder.set_option('general', 'verbosity', 2)
config_builder.set_option('title-max-length', 'line-length', 100)
expected = {'title-max-length': {'line-length': 100}, 'general': {'verbosity': 2}}
self.assertDictEqual(config_builder._config_blueprint, expected)
# Clone and verify that the blueprint is the same as the original
cloned_builder = config_builder.clone()
self.assertDictEqual(cloned_builder._config_blueprint, expected)
# Modify the original and make sure we're not modifying the clone (i.e. check that the copy is a deep copy)
config_builder.set_option('title-max-length', 'line-length', 120)
self.assertDictEqual(cloned_builder._config_blueprint, expected)
def test_named_rules(self):
# Store a copy of the default rules from the config, so we can reference it later
config_builder = LintConfigBuilder()
config = config_builder.build()
default_rules = copy.deepcopy(config.rules)
self.assertEqual(default_rules, config.rules) # deepcopy should be equal
# Add a named rule by setting an option in the config builder that follows the named rule pattern
# Assert that whitespace in the rule name is stripped
rule_qualifiers = [u'T7:my-extra-rüle', u' T7 : my-extra-rüle ', u'\tT7:\tmy-extra-rüle\t',
u'T7:\t\n \tmy-extra-rüle\t\n\n', "title-match-regex:my-extra-rüle"]
for rule_qualifier in rule_qualifiers:
config_builder = LintConfigBuilder()
config_builder.set_option(rule_qualifier, 'regex', "föo")
expected_rules = copy.deepcopy(default_rules)
my_rule = rules.TitleRegexMatches({'regex': "föo"})
my_rule.id = rules.TitleRegexMatches.id + ":my-extra-rüle"
my_rule.name = rules.TitleRegexMatches.name + ":my-extra-rüle"
expected_rules._rules[u'T7:my-extra-rüle'] = my_rule
self.assertEqual(config_builder.build().rules, expected_rules)
# assert that changing an option on the newly added rule is passed correctly to the RuleCollection
# we try this with all different rule qualifiers to ensure they all are normalized and map
# to the same rule
for other_rule_qualifier in rule_qualifiers:
cb = config_builder.clone()
cb.set_option(other_rule_qualifier, 'regex', other_rule_qualifier + "bōr")
# before setting the expected rule option value correctly, the RuleCollection should be different
self.assertNotEqual(cb.build().rules, expected_rules)
# after setting the option on the expected rule, it should be equal
my_rule.options['regex'].set(other_rule_qualifier + "bōr")
self.assertEqual(cb.build().rules, expected_rules)
my_rule.options['regex'].set("wrong")
def test_named_rules_negative(self):
# T7 = title-match-regex
# Invalid rule name
for invalid_name in ["", " ", " ", "\t", "\n", "å b", "å:b", "åb:", ":åb"]:
config_builder = LintConfigBuilder()
config_builder.set_option(f"T7:{invalid_name}", 'regex', "tëst")
expected_msg = f"The rule-name part in 'T7:{invalid_name}' cannot contain whitespace, colons or be empty"
with self.assertRaisesMessage(LintConfigError, expected_msg):
config_builder.build()
# Invalid parent rule name
config_builder = LintConfigBuilder()
config_builder.set_option("Ž123:foöbar", "fåke-option", "fåke-value")
with self.assertRaisesMessage(LintConfigError, "No such rule 'Ž123' (named rule: 'Ž123:foöbar')"):
config_builder.build()
# Invalid option name (this is the same as with regular rules)
config_builder = LintConfigBuilder()
config_builder.set_option("T7:foöbar", "blå", "my-rëgex")
with self.assertRaisesMessage(LintConfigError, "Rule 'T7:foöbar' has no option 'blå'"):
config_builder.build()
|
autoimpute/imputations/series/norm_unit_variance_imputation.py
|
gjdv/autoimpute
| 191 |
73837
|
<filename>autoimpute/imputations/series/norm_unit_variance_imputation.py
"""This module implements normal imputation with constant unit variance single imputation
via the NormUnitVarianceImputer.
The NormUnitVarianceImputer imputes missing data assuming that the
single column is normally distributed with a-priori known constant unit
variance. Use SingleImputer or MultipleImputer with strategy=`norm_const_variance`
to broadcast the strategy across all the columns in a dataframe,
or specify this strategy for a given column.
"""
from scipy import stats
import pandas as pd
import numpy as np
from sklearn.utils.validation import check_is_fitted
from autoimpute.imputations import method_names
from autoimpute.imputations.errors import _not_num_series
from .base import ISeriesImputer
methods = method_names
# pylint:disable=attribute-defined-outside-init
# pylint:disable=unnecessary-pass
class NormUnitVarianceImputer(ISeriesImputer):
"""Impute missing values assuming normally distributed
data with unknown mean and *known* variance.
"""
# class variables
strategy = methods.NORM_UNIT_VARIANCE
def __init__(self):
"""Create an instance of the NormUnitVarianceImputer class."""
pass
def fit(self, X, y):
"""Fit the Imputer to the dataset and calculate the mean.
Args:
X (pd.Series): Dataset to fit the imputer.
y (None): ignored, None to meet requirements of base class
Returns:
self. Instance of the class.
"""
_not_num_series(self.strategy, X)
mu = X.mean() # mean of observed data
self.statistics_ = {"param": mu, "strategy": self.strategy}
return self
def impute(self, X):
"""Perform imputations using the statistics generated from fit.
The impute method handles the actual imputation. Missing values
in a given dataset are replaced with the respective mean from fit.
Args:
X (pd.Series): Dataset to impute missing data from fit.
Returns:
np.array -- imputed dataset.
"""
# check if fitted then impute with mean
check_is_fitted(self, "statistics_")
_not_num_series(self.strategy, X)
omu = self.statistics_["param"] # mean of observed data
idx = X.isnull() # missing data
nO = sum(~idx) # number of observed
m = sum(idx) # number to impute
muhatk = stats.norm(omu,np.sqrt(1/nO))
# imputation cross-terms *NOT* uncorrelated
Ymi=stats.multivariate_normal(np.ones(m)*muhatk.rvs(),
np.ones((m,m))/nO+np.eye(m)).rvs()
out = X.copy()
out[idx] = Ymi
return out
def fit_impute(self, X, y=None):
"""Convenience method to perform fit and imputation in one go."""
return self.fit(X, y).impute(X)
if __name__ == '__main__':
from autoimpute.imputations import SingleImputer
si=SingleImputer('normal unit variance')
Yo=stats.norm(0,1).rvs(100)
df = pd.DataFrame(columns=['Yo'],index=range(200),dtype=float)
df.loc[range(100),'Yo'] = Yo
si.fit_transform(df)
|
lib/carbon/tests/util.py
|
hessu/carbon
| 961 |
73846
|
<reponame>hessu/carbon<filename>lib/carbon/tests/util.py<gh_stars>100-1000
from carbon.conf import Settings
class TestSettings(Settings):
def readFrom(*args, **kwargs):
pass
|
testcases/ch2o_tests/syntax/Range.py
|
vermashresth/chainer-compiler
| 116 |
73850
|
# coding: utf-8
import chainer
class Range(chainer.Chain):
def forward(self, x):
return range(x)
class RangeStop(chainer.Chain):
def forward(self, x, y):
return range(x, y)
class RangeStep(chainer.Chain):
def forward(self, x, y, z):
return range(x, y, z)
class RangeListComp(chainer.Chain):
def forward(self, xs, ps, p):
y1 = [xs[x, x+2] for x in range(p)]
y2 = [xs[ps[x], ps[x]+3] for x in range(p)]
return y1, y2
# ======================================
from chainer_compiler import ch2o
import numpy as np
if __name__ == '__main__':
ch2o.generate_testcase(Range, [5])
ch2o.generate_testcase(RangeStop(), [5, 8], subname='stop')
ch2o.generate_testcase(RangeStep(), [5, 19, 2], subname='step')
wn = 5
v = np.random.rand(10, 20).astype(np.float32)
w = np.random.randint(0, 5, size=wn)
p = np.int64(wn)
ch2o.generate_testcase(RangeListComp, [v, w, p], subname='list_comp')
|
solo/helpers.py
|
stevenwdv/solo-python
| 156 |
73875
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 SoloKeys Developers
#
# Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
# http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
# http://opensource.org/licenses/MIT>, at your option. This file may not be
# copied, modified, or distributed except according to those terms.
from numbers import Number
from threading import Event, Timer
def to_websafe(data):
data = data.replace("+", "-")
data = data.replace("/", "_")
data = data.replace("=", "")
return data
def from_websafe(data):
data = data.replace("-", "+")
data = data.replace("_", "/")
return data + "=="[: (3 * len(data)) % 4]
class Timeout(object):
"""Utility class for adding a timeout to an event.
:param time_or_event: A number, in seconds, or a threading.Event object.
:ivar event: The Event associated with the Timeout.
:ivar timer: The Timer associated with the Timeout, if any.
"""
def __init__(self, time_or_event):
if isinstance(time_or_event, Number):
self.event = Event()
self.timer = Timer(time_or_event, self.event.set)
else:
self.event = time_or_event
self.timer = None
def __enter__(self):
if self.timer:
self.timer.start()
return self.event
def __exit__(self, exc_type, exc_val, exc_tb):
if self.timer:
self.timer.cancel()
self.timer.join()
|
tests/task/nlp/test_text_classification.py
|
mariomeissner/lightning-transformers
| 451 |
73910
|
import sys
from unittest.mock import MagicMock
import pytest
from lightning_transformers.core.nlp import HFBackboneConfig, HFTransformerDataConfig
from lightning_transformers.task.nlp.text_classification import (
TextClassificationDataModule,
TextClassificationTransformer,
)
@pytest.mark.skipif(sys.platform == "win32", reason="Currently Windows is not supported")
def test_smoke_train_e2e(script_runner):
script_runner.hf_train(task="text_classification", dataset="emotion", model="prajjwal1/bert-tiny")
@pytest.mark.skipif(sys.platform == "win32", reason="Currently Windows is not supported")
def test_smoke_train_default_dataset(script_runner):
script_runner.hf_train(
task="text_classification", model="prajjwal1/bert-tiny", cmd_args=['dataset.cfg.dataset_name="emotion"']
)
def test_smoke_predict_e2e(script_runner):
y = script_runner.hf_predict(['+x="Lightning rocks!"'], task="text_classification", model="prajjwal1/bert-tiny")
assert len(y) == 1
assert isinstance(y[0]["score"], float)
def test_predict_from_ckpt_path(script_runner, tmpdir):
script_runner.hf_train(
task="text_classification",
dataset="emotion",
model="prajjwal1/bert-tiny",
cmd_args=[f"trainer.default_root_dir={tmpdir}"],
fast_dev_run=0,
)
ckpt_path = tmpdir / "checkpoints" / "epoch=0-step=0.ckpt"
assert ckpt_path.exists()
y = script_runner.hf_predict(
['+x="Lightning rocks!"', f'+checkpoint_path="{ckpt_path}"'],
task="text_classification",
model="prajjwal1/bert-tiny",
)
assert len(y) == 1
assert isinstance(y[0]["score"], float)
def test_model_has_correct_cfg():
model = TextClassificationTransformer(HFBackboneConfig(pretrained_model_name_or_path="bert-base-cased"))
assert model.hparams.downstream_model_type == "transformers.AutoModelForSequenceClassification"
def test_datamodule_has_correct_cfg():
tokenizer = MagicMock()
dm = TextClassificationDataModule(tokenizer)
assert type(dm.cfg) is HFTransformerDataConfig
assert dm.tokenizer is tokenizer
|
Algo and DSA/LeetCode-Solutions-master/Python/lowest-common-ancestor-of-a-binary-search-tree.py
|
Sourav692/FAANG-Interview-Preparation
| 3,269 |
73934
|
# Time: O(n)
# Space: O(1)
class Solution(object):
# @param {TreeNode} root
# @param {TreeNode} p
# @param {TreeNode} q
# @return {TreeNode}
def lowestCommonAncestor(self, root, p, q):
s, b = sorted([p.val, q.val])
while not s <= root.val <= b:
# Keep searching since root is outside of [s, b].
root = root.left if s <= root.val else root.right
# s <= root.val <= b.
return root
|
corehq/blobs/migrate_metadata.py
|
akashkj/commcare-hq
| 471 |
73949
|
from functools import partial
from itertools import groupby
from couchdbkit import ResourceNotFound
from corehq.apps.domain import SHARED_DOMAIN, UNKNOWN_DOMAIN
from corehq.blobs import CODES
from corehq.blobs.mixin import BlobHelper, BlobMetaRef
from corehq.blobs.models import BlobMigrationState, BlobMeta
from corehq.form_processor.backends.sql.dbaccessors import ReindexAccessor
from corehq.util.doc_processor.sql import SqlDocumentProvider
import corehq.apps.accounting.models as acct
import corehq.apps.app_manager.models as apps
import corehq.apps.hqmedia.models as hqmedia
from corehq.apps.builds.models import CommCareBuild
from corehq.apps.case_importer.tracking.models import CaseUploadFileMeta, CaseUploadRecord
from corehq.apps.domain.models import Domain
from corehq.apps.export import models as exports
from corehq.apps.ota.models import DemoUserRestore
from corehq.apps.users.models import CommCareUser
import casexml.apps.case.models as cases
import couchforms.models as xform
class MultiDbMigrator(object):
def __init__(self, slug, couch_types, sql_reindexers):
self.slug = slug
self.couch_types = couch_types
self.sql_reindexers = sql_reindexers
def iter_migrators(self):
from . import migrate as mod
NoStateMigrator, SqlMigrator, BlobMetaMigrator = make_migrators(mod)
couch_migrator = partial(BlobMetaMigrator, blob_helper=couch_blob_helper)
def db_key(doc_type):
if isinstance(doc_type, tuple):
doc_type = doc_type[1]
return doc_type.get_db().dbname
for key, types in groupby(sorted(self.couch_types, key=db_key), key=db_key):
slug = "%s-%s" % (self.slug, key)
yield NoStateMigrator(slug, list(types), couch_migrator)
for rex in self.sql_reindexers:
slug = "%s-%s" % (self.slug, rex.model_class.__name__)
yield SqlMigrator(slug, rex(), BlobMetaMigrator)
def migrate(self, filename, *args, **kw):
def filen(n):
return None if filename is None else "{}.{}".format(filename, n)
migrated = 0
skipped = 0
for n, item in enumerate(self.iter_migrators()):
one_migrated, one_skipped = item.migrate(filen(n), *args, **kw)
migrated += one_migrated
skipped += one_skipped
print("\n")
if not skipped:
BlobMigrationState.objects.get_or_create(slug=self.slug)[0].save()
return migrated, skipped
def make_migrators(mod):
# defer class definitions to work around circular import
class BlobMetaMigrator(mod.BaseDocMigrator):
"""Migrate blob metadata to BlobMeta model"""
def __init__(self, *args, **kw):
super(BlobMetaMigrator, self).__init__(*args, **kw)
self.total_blobs = 0
def migrate(self, doc):
if not doc.get("external_blobs"):
return True
type_code = self.get_type_code(doc)
obj = self.blob_helper(doc, self.couchdb, type_code)
domain = obj.domain
if domain is None:
self.error(obj, {
"error": "unknown-domain",
"doc_type": obj.doc_type,
"doc_id": obj._id,
})
domain = UNKNOWN_DOMAIN
if getattr(obj, "_attachments", None):
self.error(obj, {
"error": "ignored-couch-attachments",
"doc_type": obj.doc_type,
"doc_id": obj._id,
"domain": obj.domain,
"attachments": obj._attachments,
})
with BlobMeta.get_cursor_for_partition_value(doc['_id']) as cursor:
for name, meta in obj.external_blobs.items():
if meta.blobmeta_id is not None:
# blobmeta already saved
continue
cursor.execute("""
INSERT INTO blobs_blobmeta (
domain,
type_code,
parent_id,
name,
key,
content_type,
content_length,
created_on
) VALUES (%s, %s, %s, %s, %s, %s, %s, CLOCK_TIMESTAMP())
ON CONFLICT (key) DO NOTHING
""", params=[
domain,
type_code,
doc["_id"],
name,
meta.key,
meta.content_type,
meta.content_length or 0,
])
self.total_blobs += 1
return True
def error(self, obj, doc):
print("Error: %s %r" % (doc["error"], obj))
super(BlobMetaMigrator, self).write_backup(doc)
class NoStateMigrator(mod.Migrator):
def write_migration_completed_state(self):
pass
class SqlMigrator(NoStateMigrator):
def __init__(self, slug, reindexer, doc_migrator_class):
types = [reindexer.model_class]
def doc_migrator(*args, **kw):
kw["blob_helper"] = reindexer.blob_helper
kw["get_type_code"] = reindexer.get_type_code
return doc_migrator_class(*args, **kw)
super(SqlMigrator, self).__init__(slug, types, doc_migrator)
self.reindexer = reindexer
def get_document_provider(self):
return SqlDocumentProvider(self.iteration_key, self.reindexer)
return NoStateMigrator, SqlMigrator, BlobMetaMigrator
class SqlBlobHelper(object):
"""Adapt a SQL model object to look like a BlobHelper
This is currently built on the assumtion that the SQL model only
references a single blob, and the blob name is not used.
"""
def __init__(self, obj, key, domain, reindexer):
self.obj = obj
self.domain = domain
self.blobs = {"": BlobMetaRef(key=key, **reindexer.blob_kwargs(obj))}
self.external_blobs = self.blobs
def __repr__(self):
return "<%s %s domain=%s id=%s>" % (
type(self).__name__,
self.doc_type,
self.domain,
self._id,
)
@property
def _id(self):
# NOTE unlike couch documents, this is different from `doc["_id"]`,
# the value used to set `BlobMeta.parent_id`. This value should
# only be used to identify the record in in case of error.
return self.obj.id
@property
def doc_type(self):
return type(self.obj).__name__
def sql_blob_helper(key_attr):
def blob_helper(self, doc, *ignored):
"""This has the same signature as BlobHelper
:returns: Object having parts of BlobHelper interface needed
for blob migrations (currently only used by BlobMetaMigrator).
"""
obj = doc["_obj_not_json"]
domain = self.get_domain(obj)
return SqlBlobHelper(obj, getattr(obj, key_attr), domain, self)
return blob_helper
class PkReindexAccessor(ReindexAccessor):
@property
def id_field(self):
return 'id'
def get_doc(self, *args, **kw):
# only used for retries; BlobMetaMigrator doesn't retry
raise NotImplementedError
def doc_to_json(self, obj, id):
return {"_id": str(id), "_obj_not_json": obj, "external_blobs": True}
class CaseUploadFileMetaReindexAccessor(PkReindexAccessor):
model_class = CaseUploadFileMeta
blob_helper = sql_blob_helper("identifier")
def doc_to_json(self, obj):
return PkReindexAccessor.doc_to_json(self, obj, self.get_domain(obj))
@staticmethod
def get_type_code(doc):
return CODES.data_import
def get_domain(self, obj):
try:
return CaseUploadRecord.objects.get(upload_file_meta_id=obj.id).domain
except CaseUploadRecord.DoesNotExist:
return None
def blob_kwargs(self, obj):
return {"content_length": obj.length}
class DemoUserRestoreReindexAccessor(PkReindexAccessor):
model_class = DemoUserRestore
blob_helper = sql_blob_helper("restore_blob_id")
def doc_to_json(self, obj):
return PkReindexAccessor.doc_to_json(
self, obj, obj.demo_user_id or "DemoUserRestore")
@staticmethod
def get_type_code(doc):
return CODES.demo_user_restore
def get_domain(self, obj):
try:
return CommCareUser.get(obj.demo_user_id).domain
except ResourceNotFound:
return None
def blob_kwargs(self, obj):
return {"content_length": obj.content_length, "content_type": "text/xml"}
def couch_blob_helper(doc, *args, **kw):
obj = BlobHelper(doc, *args, **kw)
get_domain = DOMAIN_MAP.get(obj.doc_type)
if get_domain is not None:
assert not hasattr(obj, "domain"), obj
obj.domain = get_domain(doc)
elif not hasattr(obj, "domain"):
obj.domain = None # will trigger "unknown-domain" error
return obj
def get_shared_domain(doc):
return SHARED_DOMAIN
def get_invoice_domain(doc):
if doc.get("is_wire"):
try:
return acct.WireInvoice.objects.get(id=int(doc["invoice_id"])).domain
except acct.WireInvoice.DoesNotExist:
return None # trigger "unknown-domain" error
# customer invoice has no domain
return UNKNOWN_DOMAIN
DOMAIN_MAP = {
"InvoicePdf": get_invoice_domain,
"CommCareBuild": get_shared_domain,
"CommCareAudio": get_shared_domain,
"CommCareImage": get_shared_domain,
"CommCareVideo": get_shared_domain,
"CommCareMultimedia": get_shared_domain,
}
migrate_metadata = lambda: MultiDbMigrator("migrate_metadata",
couch_types=[
apps.Application,
apps.LinkedApplication,
apps.RemoteApp,
("Application-Deleted", apps.Application),
("RemoteApp-Deleted", apps.RemoteApp),
apps.SavedAppBuild,
CommCareBuild,
Domain,
acct.InvoicePdf,
hqmedia.CommCareAudio,
hqmedia.CommCareImage,
hqmedia.CommCareVideo,
hqmedia.CommCareMultimedia,
cases.CommCareCase,
('CommCareCase-deleted', cases.CommCareCase),
('CommCareCase-Deleted', cases.CommCareCase),
('CommCareCase-Deleted-Deleted', cases.CommCareCase),
exports.CaseExportInstance,
exports.FormExportInstance,
exports.SMSExportInstance,
],
sql_reindexers=[
CaseUploadFileMetaReindexAccessor,
DemoUserRestoreReindexAccessor,
],
)
|
legacy/backends/orchestrator/aws/orchestrator_aws_backend.py
|
ParikhKadam/zenml
| 1,275 |
73969
|
<reponame>ParikhKadam/zenml
# Copyright (c) ZenML GmbH 2021. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Orchestrator for simple AWS VM backend"""
import os
import time
from typing import Text, Dict, Any
from zenml.backends.orchestrator.aws import utils
from zenml.backends.orchestrator import OrchestratorBaseBackend
from zenml.repo import Repository
from zenml.standards import standard_keys as keys
from zenml.utils import path_utils
from zenml.constants import ZENML_BASE_IMAGE_NAME
from zenml.logger import get_logger
logger = get_logger(__name__)
EXTRACTED_TAR_DIR_NAME = 'zenml_working'
STAGING_AREA = 'staging'
TAR_PATH_ARG = 'tar_path'
class OrchestratorAWSBackend(OrchestratorBaseBackend):
"""
Orchestrates pipeline on a AWS EC2 instance
"""
def __init__(self,
iam_role: Text,
instance_type: Text = 't2.micro',
instance_image: Text = 'ami-02e9f4e447e4cda79',
zenml_image: Text = None,
region: Text = None,
key_name: Text = None,
security_group: Text = None,
min_count: int = 1,
max_count: int = 1,
**kwargs):
"""
Base class for the orchestrator backend on AWS
:param iam_role: the name of the role created in AWS IAM
:param instance_type: the type of the EC2 instance, defaults to
t2.micro
:param instance_image: the image for the EC2 instance, defaults to the
public image: Deep Learning AMI (Amazon Linux 2) Version 39.0
:param zenml_image: refers to the image with ZenML
:param region: the name of the region that AWS is working on
:param key_name: the name of the key to be used whilst creating the
instance on EC2
:param security_group: the name of a selected security group
:param min_count: the minimum number of instances, defaults to 1
:param max_count: the maximum number of instances, defaults to 1
"""
self.session = utils.setup_session()
self.region = utils.setup_region(region)
self.ec2_client = self.session.client('ec2')
self.ec2_resource = self.session.resource('ec2')
self.instance_type = instance_type
self.instance_image = instance_image
self.zenml_image = zenml_image
self.key_name = key_name
self.min_count = min_count
self.max_count = max_count
if security_group is not None:
self.security_group = [security_group]
else:
self.security_group = security_group
self.iam_role = {'Name': iam_role}
if zenml_image is None:
self.zenml_image = ZENML_BASE_IMAGE_NAME
else:
self.zenml_image = zenml_image
super(OrchestratorBaseBackend, self).__init__(
instance_type=self.instance_type,
instance_image=self.instance_image,
zenml_image=self.zenml_image,
region=self.region,
key_name=self.key_name,
min_count=self.min_count,
max_count=self.max_count,
security_group=self.security_group,
iam_role=self.iam_role,
**kwargs,
)
@staticmethod
def make_unique_name(name):
return f'{name}-{time.asctime()}'
def launch_instance(self, config):
startup = utils.get_startup_script(config,
self.region,
self.zenml_image)
args = {'ImageId': self.instance_image,
'InstanceType': self.instance_type,
'IamInstanceProfile': self.iam_role,
'MaxCount': self.max_count,
'MinCount': self.min_count,
'UserData': startup}
if self.security_group:
args['SecurityGroups'] = self.security_group
if self.key_name:
args['KeyName'] = self.key_name
return self.ec2_resource.create_instances(**args)
def run(self, config: [Dict, Any]):
# Extract the paths to create the tar
logger.info('Orchestrating pipeline on AWS..')
repo: Repository = Repository.get_instance()
repo_path = repo.path
config_dir = repo.zenml_config.config_dir
tar_file_name = \
f'{EXTRACTED_TAR_DIR_NAME}_{str(int(time.time()))}.tar.gz'
path_to_tar = os.path.join(config_dir, tar_file_name)
# Create tarfile but exclude .zenml folder if exists
path_utils.create_tarfile(repo_path, path_to_tar)
logger.info(f'Created tar of current repository at: {path_to_tar}')
# Upload tar to artifact store
store_path = config[keys.GlobalKeys.ARTIFACT_STORE]
store_staging_area = os.path.join(store_path, STAGING_AREA)
store_path_to_tar = os.path.join(store_staging_area, tar_file_name)
path_utils.copy(path_to_tar, store_path_to_tar)
logger.info(f'Copied tar to artifact store at: {store_path_to_tar}')
# Remove tar
path_utils.rm_dir(path_to_tar)
logger.info(f'Removed tar at: {path_to_tar}')
# Append path of tar in config orchestrator utils
config[keys.GlobalKeys.BACKEND][keys.BackendKeys.ARGS][
TAR_PATH_ARG] = store_path_to_tar
# Launch the instance
self.launch_instance(config)
|
tests/utils.py
|
yezz123/authx
| 141 |
74001
|
import contextlib
from datetime import datetime, timedelta
from typing import Iterable, Optional, Tuple, Union
import jwt
with open("tests/key/private_key", "rb") as f:
private_key = f.read()
with open("tests/key/public_key", "rb") as f:
public_key = f.read()
ACCESS_COOKIE_NAME = "access"
REFRESH_COOKIE_NAME = "refresh"
class User:
"""
Setup a user object with the given id, username and admin status.
"""
def __init__(self, id: int, username: str, admin: bool):
self.id = id
self.username = username
self.is_admin = admin
self.data = {"id": id, "username": username}
def mock_get_authenticated_user():
"""
Mock the get_authenticated_user function to return a user object.
Returns:
User: A user object.
"""
class User:
def __init__(self):
"""
Setup a user object with the given id, username and admin status.
"""
self.id = 2
self.username = "user"
self.is_admin = False
self.data = {"id": self.id, "username": self.username}
return User()
class MockDatabaseBackend:
"""
Mock the get_authenticated_user function to return a user object.
"""
def __init__(self, database_name):
self._incr = 5
self._users = [
{
"id": 1,
"email": "<EMAIL>",
"username": "admin",
"password": "<PASSWORD>",
"active": True,
"confirmed": True,
"permissions": ["admin"],
},
{
"id": 2,
"email": "<EMAIL>",
"username": "user",
"password": "<PASSWORD>",
"active": True,
"confirmed": True,
"permissions": [],
},
{
"id": 3,
"email": "<EMAIL>",
"username": "anotheruser",
"password": "<PASSWORD>",
"active": True,
"confirmed": False,
"permissions": [],
},
{
"id": 4,
"email": "<EMAIL>",
"username": "inactiveuser",
"password": "<PASSWORD>",
"active": False,
"confirmed": True,
"permissions": [],
},
{
"id": 5,
"email": "<EMAIL>",
"username": "socialuser",
"provider": "google",
"sid": "8888",
"active": False,
"confirmed": True,
"permissions": [],
},
]
self._email_confirmations = []
def _increment_id(self) -> int:
self._incr += 1
return self._incr
def _get(self, field: str, value) -> Optional[dict]:
return next((item for item in self._users if item.get(field) == value), None)
async def get(self, id: int) -> Optional[dict]:
return self._get("id", id)
async def get_by_email(self, email: str) -> Optional[dict]:
return self._get("email", email)
async def get_by_username(self, username: str) -> Optional[dict]:
return self._get("username", username)
async def get_by_social(self, provider: str, sid: str) -> Optional[dict]:
return next(
(
item
for item in self._users
if item.get("provider") == provider and item.get("sid") == sid
),
None,
) # pragma: no cover
async def create(self, obj: dict) -> int:
id = self._increment_id()
obj["id"] = id
self._users.append(obj)
return id
async def update(self, id: int, obj: dict) -> bool:
for i, item in enumerate(self._users):
if item.get("id") == id:
self._users[i].update(obj)
return True
return False # pragma: no cover
async def delete(self, id: int) -> bool:
"""
Delete a user.
Args:
id (int): The user id to delete.
Returns:
bool: True if the user was deleted, False otherwise.
"""
for i, item in enumerate(self._users): # pragma: no cover
if item.get("id") == id: # pragma: no cover
del self._users[i] # pragma: no cover
return True # pragma: no cover
return False # pragma: no cover
async def count(self, query) -> int:
return 42 # pragma: no cover
async def request_email_confirmation(self, email: str, token_hash: str) -> None:
"""
Add a new email confirmation to the list.
Args:
email (str): The email address to confirm.
token_hash (str): The token hash to confirm.
Returns:
None
"""
for i, item in enumerate(self._email_confirmations):
if item.get("email") == email: # pragma: no cover
self._email_confirmations[i].update(
{"token": token_hash}
) # pragma: no cover
return None # pragma: no cover
self._email_confirmations.append({"email": email, "token": token_hash})
async def confirm_email(self, token_hash: str) -> bool:
"""
Confirm an email address.
Args:
token_hash (str): The token hash to confirm.
Returns:
bool: True if the email was confirmed, False otherwise.
"""
for item in self._email_confirmations:
if item.get("token") == token_hash:
user = self._get("email", item.get("email"))
await self.update(user.get("id"), {"confirmed": True})
return True
return False
async def get_blacklist(self) -> Iterable[dict]:
"""
Get the blacklist.
Returns:
Iterable[dict]: The blacklist.
"""
return [
item for item in self._users if not item.get("active")
] # pragma: no cover
async def search(self) -> Tuple[dict, int]:
return self._users, 1 # pragma: no cover
class MockCacheBackend:
"""
mock the cache backend.
"""
def __init__(self) -> None:
self._db = {}
async def get(self, key: str) -> Optional[str]:
return self._db.get(key)
async def delete(self, key: str) -> None:
with contextlib.suppress(KeyError):
self._db.pop(key)
async def keys(self, match: str) -> Iterable[str]:
return {} # pragma: no cover
async def set(self, key: str, value: Union[str, bytes, int], expire: int) -> None:
self._db[key] = value
async def setnx(self, key: str, value: Union[str, bytes, int], expire: int) -> None:
v = self._db.get(key) # pragma: no cover
if v is None: # pragma: no cover
self._db[key] = value # pragma: no cover
async def incr(self, key: str) -> str:
v = self._db.get(key)
if v is not None:
self._db[key] = int(v) + 1
async def dispatch_action(self, channel: str, action: str, payload: str) -> None:
print("Dispatching action") # pragma: no cover
print(action) # pragma: no cover
print(payload) # pragma: no cover
class MockAuthBackend:
@classmethod
def create(
cls,
jwt_algorithm: str,
private_key: bytes,
public_key: bytes,
access_expiration: int,
refresh_expiration: int,
) -> None:
pass # pragma: no cover
def __init__(
self,
jwt_algorithm: str,
private_key: bytes,
public_key: bytes,
access_expiration: int = 60 * 5,
refresh_expiration: int = 60 * 10,
):
self._jwt_algorithm = jwt_algorithm
self._private_key = private_key
self._public_key = public_key
self._access_expiration = access_expiration
self._refresh_expiration = refresh_expiration
self._private_key = private_key
self._public_key = public_key
async def decode_token(self, token: str, leeway: int = 0) -> Optional[dict]:
"""
Decode a JWT token.
Args:
token (str): The JWT token to decode.
leeway (int, optional): The leeway to use when decoding the token. Defaults to 0.
Returns:
Optional[dict]: The decoded token.
"""
if token:
return jwt.decode(token, key=self._public_key, algorithms="RS256")
return None # pragma: no cover
def _create_token(
self, payload: dict, token_type: str, expiration_delta: Optional[int] = None
) -> str:
iat = datetime.utcnow()
if expiration_delta:
exp = datetime.utcnow() + timedelta(seconds=expiration_delta)
else:
exp = datetime.utcnow() + timedelta(seconds=60) # pragma: no cover
payload |= {"iat": iat, "exp": exp, "type": token_type}
token = jwt.encode(payload, self._private_key, algorithm=self._jwt_algorithm)
if isinstance(token, bytes):
# For PyJWT <= 1.7.1
return token.decode("utf-8") # pragma: no cover
# For PyJWT >= 2.0.0a1
return token
def create_access_token(self, payload: dict) -> str:
return self._create_token(payload, "access", 60 * 5)
def create_refresh_token(self, payload: dict) -> str:
return self._create_token(payload, "refresh", 60 * 10)
def create_tokens(self, payload: dict) -> dict:
access = self.create_access_token(payload)
refresh = self.create_refresh_token(payload)
return {"access": access, "refresh": refresh}
class MockEmailClient:
def __init__(self, *args):
pass
async def send_confirmation_email(self, *args):
"""
Send a confirmation email.
"""
async def send_forgot_password_email(self, *args):
"""
Send a forgot password email.
"""
def mock_verify_password(password: str, db_password: str) -> bool:
return password == <PASSWORD>
def mock_admin_required():
pass
|
indra/assemblers/cx/hub_layout.py
|
johnbachman/belpy
| 136 |
74002
|
"""This module allows adding a semantic hub layout to NDEx CX networkx. This
is useful when a network is centered around a single hub node. The
layout generated here allocates different classes of nodes into segments
around the hub and then gives them random coordinates within that segment."""
import json
import math
import random
import networkx
from collections import defaultdict
def get_aspect(cx, aspect_name):
"""Return an aspect given the name of the aspect"""
if isinstance(cx, dict):
return cx.get(aspect_name)
for entry in cx:
if list(entry.keys())[0] == aspect_name:
return entry[aspect_name]
def edge_type_to_class(edge_type):
"""Return the edge class for layout purposes based on the edge type"""
edge_type = edge_type.lower()
if 'amount' in edge_type:
return 'amount'
if edge_type in ('activation', 'inhibition'):
return 'activity'
if edge_type == 'complex':
return 'complex'
else:
return 'modification'
def classify_nodes(graph, hub: int):
"""Classify each node based on its type and relationship to the hub."""
node_stats = defaultdict(lambda: defaultdict(list))
for u, v, data in graph.edges(data=True):
# This means the node is downstream of the hub
if hub == u:
h, o = u, v
if data['i'] != 'complex':
node_stats[o]['up'].append(-1)
else:
node_stats[o]['up'].append(0)
# This means the node is upstream of the hub
elif hub == v:
h, o = v, u
if data['i'] != 'complex':
node_stats[o]['up'].append(1)
else:
node_stats[o]['up'].append(0)
else:
continue
node_stats[o]['interaction'].append(edge_type_to_class(data['i']))
node_classes = {}
for node_id, stats in node_stats.items():
up = max(set(stats['up']), key=stats['up'].count)
# Special case: if up is not 0 then we should exclude complexes
# from the edge_type states so that we don't end up with
# (-1, complex, ...) or (1, complex, ...) as the node class
interactions = [i for i in stats['interaction'] if
not (up != 0 and i == 'complex')]
edge_type = max(set(interactions), key=interactions.count)
node_type = graph.nodes[node_id]['type']
node_classes[node_id] = (up, edge_type, node_type)
return node_classes
def get_attributes(aspect, id):
"""Return the attributes pointing to a given ID in a given aspect."""
attributes = {}
for entry in aspect:
if entry['po'] == id:
attributes[entry['n']] = entry['v']
return attributes
def cx_to_networkx(cx):
"""Return a MultiDiGraph representation of a CX network."""
graph = networkx.MultiDiGraph()
for node_entry in get_aspect(cx, 'nodes'):
id = node_entry['@id']
attrs = get_attributes(get_aspect(cx, 'nodeAttributes'), id)
attrs['n'] = node_entry['n']
graph.add_node(id, **attrs)
for edge_entry in get_aspect(cx, 'edges'):
id = edge_entry['@id']
attrs = get_attributes(get_aspect(cx, 'edgeAttributes'), id)
attrs['i'] = edge_entry['i']
graph.add_edge(edge_entry['s'], edge_entry['t'], key=id, **attrs)
return graph
def get_quadrant_from_class(node_class):
"""Return the ID of the segment of the plane corresponding to a class."""
up, edge_type, _ = node_class
if up == 0:
return 0 if random.random() < 0.5 else 7
mappings = {(-1, 'modification'): 1,
(-1, 'amount'): 2,
(-1, 'activity'): 3,
(1, 'activity'): 4,
(1, 'amount'): 5,
(1, 'modification'): 6}
return mappings[(up, edge_type)]
def get_coordinates(node_class):
"""Generate coordinates for a node in a given class."""
quadrant_size = (2 * math.pi / 8.0)
quadrant = get_quadrant_from_class(node_class)
begin_angle = quadrant_size * quadrant
r = 200 + 800*random.random()
alpha = begin_angle + random.random() * quadrant_size
x = r * math.cos(alpha)
y = r * math.sin(alpha)
return x, y
def get_layout_aspect(hub, node_classes):
"""Get the full layout aspect with coordinates for each node."""
aspect = [{'node': hub, 'x': 0.0, 'y': 0.0}]
for node, node_class in node_classes.items():
if node == hub:
continue
x, y = get_coordinates(node_class)
aspect.append({'node': node, 'x': x, 'y': y})
return aspect
def get_node_by_name(graph, name):
"""Return a node ID given its name."""
for id, attrs in graph.nodes(data=True):
if attrs['n'] == name:
return id
def add_semantic_hub_layout(cx, hub: str):
"""Attach a layout aspect to a CX network given a hub node."""
graph = cx_to_networkx(cx)
hub_node = get_node_by_name(graph, hub)
node_classes = classify_nodes(graph, hub_node)
layout_aspect = get_layout_aspect(hub_node, node_classes)
cx['cartesianLayout'] = layout_aspect
if __name__ == '__main__':
with open('CDK13.cx', 'r') as fh:
cx = json.load(fh)
add_semantic_hub_layout(cx, 'CDK13')
|
setup.py
|
saurabh1002/vdbfusion
| 119 |
74005
|
<reponame>saurabh1002/vdbfusion
# -*- coding: utf-8 -*-
import ctypes
import multiprocessing
import os
import subprocess
import sys
from setuptools import Extension, find_packages, setup
from setuptools.command.build_ext import build_ext
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=""):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
# required for auto-detection of auxiliary "native" libs
if not extdir.endswith(os.path.sep):
extdir += os.path.sep
debug = int(os.environ.get("DEBUG", 0)) if self.debug is None else self.debug
cfg = "Debug" if debug else "Release"
# CMake lets you override the generator - we need to check this.
# Can be set with Conda-Build, for example.
cmake_generator = os.environ.get("CMAKE_GENERATOR", "")
# Set Python_EXECUTABLE instead if you use PYBIND11_FINDPYTHON
# EXAMPLE_VERSION_INFO shows you how to pass a value into the C++ code
# from Python.
cmake_args = [
f"-DBUILD_PYTHON_BINDINGS=ON",
f"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={extdir}",
f"-DPYTHON_EXECUTABLE={sys.executable}",
f"-DCMAKE_BUILD_TYPE={cfg}", # not used on MSVC, but no harm
]
build_args = []
# Adding CMake arguments set as environment variable
# (needed e.g. to build for ARM OSx on conda-forge)
if "CMAKE_ARGS" in os.environ:
cmake_args += [item for item in os.environ["CMAKE_ARGS"].split(" ") if item]
# Single config generators are handled "normally"
single_config = any(x in cmake_generator for x in {"NMake", "Ninja"})
# Multi-config generators have a different way to specify configs
if not single_config:
cmake_args += [f"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{cfg.upper()}={extdir}"]
build_args += ["--config", cfg]
# Set CMAKE_BUILD_PARALLEL_LEVEL to control the parallel build level across all generators.
if "CMAKE_BUILD_PARALLEL_LEVEL" not in os.environ:
# passing --global-option="build_ext" --global-option="-j8" to pip seems not to work,
# and launches 2 time the entire build process. Therefore if nothing has specified the
# parallel jobs so far, we are going to hack it here. Not the best design, but pip just
# doesn't seem to care about this flag, CMake 3.12+ only.
self.parallel = multiprocessing.cpu_count() if not self.parallel else self.parallel
build_args += [f"-j{self.parallel}"]
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(["cmake", ext.sourcedir] + cmake_args, cwd=self.build_temp)
subprocess.check_call(["cmake", "--build", "."] + build_args, cwd=self.build_temp)
setup(
packages=find_packages("src"),
package_dir={"": "src"},
ext_modules=[CMakeExtension("vdbfusion.pybind.vdbfusion_pybind")],
cmdclass={"build_ext": CMakeBuild},
)
|
lib/device.py
|
keke185321/webcam-pulse-detector
| 1,411 |
74046
|
import cv2, time
#TODO: fix ipcam
#import urllib2, base64
import numpy as np
class ipCamera(object):
def __init__(self,url, user = None, password = None):
self.url = url
auth_encoded = base64.encodestring('%s:%s' % (user, password))[:-1]
self.req = urllib2.Request(self.url)
self.req.add_header('Authorization', 'Basic %s' % auth_encoded)
def get_frame(self):
response = urllib2.urlopen(self.req)
img_array = np.asarray(bytearray(response.read()), dtype=np.uint8)
frame = cv2.imdecode(img_array, 1)
return frame
class Camera(object):
def __init__(self, camera = 0):
self.cam = cv2.VideoCapture(camera)
self.valid = False
try:
resp = self.cam.read()
self.shape = resp[1].shape
self.valid = True
except:
self.shape = None
def get_frame(self):
if self.valid:
_,frame = self.cam.read()
else:
frame = np.ones((480,640,3), dtype=np.uint8)
col = (0,256,256)
cv2.putText(frame, "(Error: Camera not accessible)",
(65,220), cv2.FONT_HERSHEY_PLAIN, 2, col)
return frame
def release(self):
self.cam.release()
|
12.一键导出微信读书的书籍和笔记/pyqt_gui.py
|
shengqiangzhang/examples-of-web-crawlers
| 12,023 |
74052
|
<gh_stars>1000+
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
@project: PyCharm
@file: pyqt_gui.py
@author: <NAME>
@time: 2020/4/11 21:14
@mail: <EMAIL>
"""
from wereader import *
from excel_func import *
import sys
import os
import time
from PyQt5.QtWidgets import QMainWindow
from PyQt5.QtWidgets import QApplication
from PyQt5.QtCore import QUrl
from PyQt5.QtCore import Qt
from PyQt5.QtCore import QCoreApplication
from PyQt5.QtWebEngineWidgets import QWebEngineView, QWebEngineProfile
# 设置header
HEADERS = {
'Host': 'i.weread.qq.com',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8'
}
# 微信读书用户id
USER_VID = 0
class MainWindow(QMainWindow):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.DomainCookies = {}
self.setWindowTitle('微信读书助手') # 设置窗口标题
self.resize(900, 600) # 设置窗口大小
self.setWindowFlags(Qt.WindowMinimizeButtonHint) # 禁止最大化按钮
self.setFixedSize(self.width(), self.height()) # 禁止调整窗口大小
url = 'https://weread.qq.com/#login' # 目标地址
self.browser = QWebEngineView() # 实例化浏览器对象
self.profile = QWebEngineProfile.defaultProfile()
self.profile.cookieStore().deleteAllCookies() # 初次运行软件时删除所有cookies
self.profile.cookieStore().cookieAdded.connect(self.onCookieAdd) # cookies增加时触发self.onCookieAdd()函数
self.browser.loadFinished.connect(self.onLoadFinished) # 网页加载完毕时触发self.onLoadFinished()函数
self.browser.load(QUrl(url)) # 加载网页
self.setCentralWidget(self.browser) # 设置中心窗口
# 网页加载完毕事件
def onLoadFinished(self):
global USER_VID
global HEADERS
# 获取cookies
cookies = ['{}={};'.format(key, value) for key,value in self.DomainCookies.items()]
cookies = ' '.join(cookies)
# 添加Cookie到header
HEADERS.update(Cookie=cookies)
# 判断是否成功登录微信读书
if login_success(HEADERS):
print('登录微信读书成功!')
# 获取用户user_vid
if 'wr_vid' in self.DomainCookies.keys():
USER_VID = self.DomainCookies['wr_vid']
print('用户id:{}'.format(USER_VID))
# 注入javascript脚本,与网页交互
self.browser.page().runJavaScript('alert("登录成功!")')
# 关闭整个qt窗口
self.close()
else:
print('请扫描二维码登录微信读书...')
# 添加cookies事件
def onCookieAdd(self, cookie):
if 'weread.qq.com' in cookie.domain():
name = cookie.name().data().decode('utf-8')
value = cookie.value().data().decode('utf-8')
if name not in self.DomainCookies:
self.DomainCookies.update({name: value})
# 窗口关闭事件
def closeEvent(self, event):
"""
重写closeEvent方法,实现窗体关闭时执行一些代码
:param event: close()触发的事件
:return: None
"""
self.setWindowTitle('退出中……') # 设置窗口标题
# 关闭软件软件之前删除所有cookies
# 此代码不可删除,否则下次打开软件会自动加载浏览器中旧的cookies
self.profile.cookieStore().deleteAllCookies()
if __name__=='__main__':
app = QApplication(sys.argv) # 创建应用
window = MainWindow() # 创建主窗口
window.show() # 显示窗口
app.exec_() # 运行应用,并监听事件
# 创建目录
data_dir = './导出资料/'
note_dir = data_dir + '我的笔记/'
if not os.path.exists(data_dir):
os.makedirs(data_dir)
if not os.path.exists(note_dir):
os.makedirs(note_dir)
books = get_bookshelf(USER_VID, HEADERS) # 获取书架上的书籍
books_finish_read = books['finishReadBooks']
books_finish_read = [[book.bookId, book.title, book.author, book.cover, book.intro, book.category] for book in books_finish_read]
books_recent_read = books['recentBooks']
books_recent_read = [[book.bookId, book.title, book.author, book.cover, book.intro, book.category] for book in books_recent_read]
books_all = books['allBooks']
books_all = [[book.bookId, book.title, book.author, book.cover, book.intro, book.category] for book in books_all]
write_excel_xls(data_dir + '我的书架.xls', ['已读完的书籍', '最近阅读的书籍', '所有的书籍'], [["ID", "标题", "作者", "封面", "简介", "所属目录"], ]) # 写入excel文件
write_excel_xls_append(data_dir + '我的书架.xls', '已读完的书籍', books_finish_read) # 追加写入excel文件
write_excel_xls_append(data_dir + '我的书架.xls', '最近阅读的书籍', books_recent_read) # 追加写入excel文件
write_excel_xls_append(data_dir + '我的书架.xls', '所有的书籍', books_all) # 追加写入excel文件
# 获取书架上的每本书籍的笔记
for index, book in enumerate(books_finish_read):
book_id = book[0]
book_name = book[1]
notes = get_bookmarklist(book[0], HEADERS)
with open(note_dir + book_name + '.txt', 'w', encoding='utf-8') as f:
f.write(notes)
print('导出笔记 {} ({}/{})'.format(note_dir + book_name + '.txt', index+1, len(books_finish_read)))
|
alipay/aop/api/response/AlipayFundTransGroupfundsFundbillsQueryResponse.py
|
snowxmas/alipay-sdk-python-all
| 213 |
74061
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.GroupFundBill import GroupFundBill
from alipay.aop.api.domain.GroupFundBill import GroupFundBill
class AlipayFundTransGroupfundsFundbillsQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayFundTransGroupfundsFundbillsQueryResponse, self).__init__()
self._batch_status = None
self._current_fund_bill = None
self._fund_bills = None
self._timeout = None
@property
def batch_status(self):
return self._batch_status
@batch_status.setter
def batch_status(self, value):
self._batch_status = value
@property
def current_fund_bill(self):
return self._current_fund_bill
@current_fund_bill.setter
def current_fund_bill(self, value):
if isinstance(value, GroupFundBill):
self._current_fund_bill = value
else:
self._current_fund_bill = GroupFundBill.from_alipay_dict(value)
@property
def fund_bills(self):
return self._fund_bills
@fund_bills.setter
def fund_bills(self, value):
if isinstance(value, list):
self._fund_bills = list()
for i in value:
if isinstance(i, GroupFundBill):
self._fund_bills.append(i)
else:
self._fund_bills.append(GroupFundBill.from_alipay_dict(i))
@property
def timeout(self):
return self._timeout
@timeout.setter
def timeout(self, value):
self._timeout = value
def parse_response_content(self, response_content):
response = super(AlipayFundTransGroupfundsFundbillsQueryResponse, self).parse_response_content(response_content)
if 'batch_status' in response:
self.batch_status = response['batch_status']
if 'current_fund_bill' in response:
self.current_fund_bill = response['current_fund_bill']
if 'fund_bills' in response:
self.fund_bills = response['fund_bills']
if 'timeout' in response:
self.timeout = response['timeout']
|
insomniac/globals.py
|
shifenis/Insomniac
| 533 |
74075
|
# These constants can be set by the external UI-layer process, don't change them manually
is_ui_process = False
execution_id = ''
task_id = ''
executable_name = 'insomniac'
do_location_permission_dialog_checks = True # no need in these checks if location permission is denied beforehand
def callback(profile_name):
pass
hardban_detected_callback = callback
softban_detected_callback = callback
def is_insomniac():
return execution_id == ''
|
examples/shapes_from_glsl/defaults.py
|
szabolcsdombi/zengl
| 116 |
74080
|
defaults = '''
const vec4 light = vec4(4.0, 3.0, 10.0, 0.0);
const vec4 eye = vec4(4.0, 3.0, 2.0, 0.0);
const mat4 mvp = mat4(
-0.8147971034049988, -0.7172931432723999, -0.7429299354553223, -0.7427813410758972,
1.0863960981369019, -0.5379698276519775, -0.5571974515914917, -0.5570859909057617,
0.0, 2.2415409088134766, -0.37146496772766113, -0.3713906705379486,
0.0, 0.0, 5.186222076416016, 5.385164737701416
);
'''
|
scripts/new/math_util.py
|
TensorSwarm/TensorSwarm
| 116 |
74083
|
# Copyright (c) 2017 OpenAI (http://openai.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import numpy as np
import scipy.signal
def discount(x, gamma):
"""
computes discounted sums along 0th dimension of x.
inputs
------
x: ndarray
gamma: float
outputs
-------
y: ndarray with same shape as x, satisfying
y[t] = x[t] + gamma*x[t+1] + gamma^2*x[t+2] + ... + gamma^k x[t+k],
where k = len(x) - t - 1
"""
assert x.ndim >= 1
return scipy.signal.lfilter([1],[1,-gamma],x[::-1], axis=0)[::-1]
def explained_variance(ypred,y):
"""
Computes fraction of variance that ypred explains about y.
Returns 1 - Var[y-ypred] / Var[y]
interpretation:
ev=0 => might as well have predicted zero
ev=1 => perfect prediction
ev<0 => worse than just predicting zero
"""
assert y.ndim == 1 and ypred.ndim == 1
vary = np.var(y)
return np.nan if vary==0 else 1 - np.var(y-ypred)/vary
def explained_variance_2d(ypred, y):
assert y.ndim == 2 and ypred.ndim == 2
vary = np.var(y, axis=0)
out = 1 - np.var(y-ypred)/vary
out[vary < 1e-10] = 0
return out
def ncc(ypred, y):
return np.corrcoef(ypred, y)[1,0]
def flatten_arrays(arrs):
return np.concatenate([arr.flat for arr in arrs])
def unflatten_vector(vec, shapes):
i=0
arrs = []
for shape in shapes:
size = np.prod(shape)
arr = vec[i:i+size].reshape(shape)
arrs.append(arr)
i += size
return arrs
def discount_with_boundaries(X, New, gamma):
"""
X: 2d array of floats, time x features
New: 2d array of bools, indicating when a new episode has started
"""
Y = np.zeros_like(X)
T = X.shape[0]
Y[T-1] = X[T-1]
for t in range(T-2, -1, -1):
Y[t] = X[t] + gamma * Y[t+1] * (1 - New[t+1])
return Y
def test_discount_with_boundaries():
gamma=0.9
x = np.array([1.0, 2.0, 3.0, 4.0], 'float32')
starts = [1.0, 0.0, 0.0, 1.0]
y = discount_with_boundaries(x, starts, gamma)
assert np.allclose(y, [
1 + gamma * 2 + gamma**2 * 3,
2 + gamma * 3,
3,
4
])
|
locations/spiders/bayshore_healthcare.py
|
davidchiles/alltheplaces
| 297 |
74100
|
<filename>locations/spiders/bayshore_healthcare.py
# -*- coding: utf-8 -*-
import json
import re
import scrapy
from locations.items import GeojsonPointItem
from locations.hours import OpeningHours
class BayshoreHealthcareSpider(scrapy.Spider):
name = "bayshore_healthcare"
item_attributes = {'brand': "Bayshore Healthcare"}
allowed_domains = ['bayshore.ca']
def start_requests(self):
url = 'https://www.bayshore.ca/wp-admin/admin-ajax.php?action=location_finder&language=en'
headers = {
'origin': 'https://www.bayshore.ca',
'Referer': 'https://www.bayshore.ca/locations/'
}
formdata = {
'search_type': 'location',
}
yield scrapy.http.FormRequest(
url,
self.parse,
method='POST',
headers=headers,
formdata=formdata
)
def parse(self, response):
stores = json.loads(response.body)
for store in stores["result"]["entries"]:
full_addr = store["address"]
addr = re.search(r'^(.*?)<', full_addr).groups()[0]
city = re.search(r'>(.*?),', full_addr).groups()[0]
state = re.search(r',\s([A-Z]{2})\s', full_addr).groups()[0]
postal = re.search(r',\s[A-Z]{2}\s(.*)$', full_addr).groups()[0]
coords = store["latlng"].split(",")
lat = coords[0]
lng = coords[1]
properties = {
'ref': store["id"],
'name': store["name"],
'addr_full': addr,
'city': city,
'state': state,
'postcode': postal,
'country': "CA",
'lat': lat,
'lon': lng,
'phone': store["local_telephone"],
'website': "https://www.bayshore.ca" + store["url"]
}
yield GeojsonPointItem(**properties)
|
mne/datasets/hf_sef/__init__.py
|
fmamashli/mne-python
| 1,953 |
74116
|
"""HF-SEF dataset."""
from .hf_sef import data_path
|
contrib/dash_app/chat_res.py
|
wakafengfan/CDial-GPT
| 906 |
74149
|
from interact import *
def eva_model():
parser = ArgumentParser()
parser.add_argument('--gpt2', action='store_true', help="use gpt2")
parser.add_argument("--model_checkpoint", type=str, default="./models/", help="Path, url or short name of the model")
parser.add_argument("--max_history", type=int, default=2, help="Number of previous utterances to keep in history")
parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu",
help="Device (cuda or cpu)")
parser.add_argument("--no_sample", action='store_true', help="Set to use greedy decoding instead of sampling")
parser.add_argument("--max_length", type=int, default=30, help="Maximum length of the output utterances")
parser.add_argument("--min_length", type=int, default=1, help="Minimum length of the output utterances")
parser.add_argument("--seed", type=int, default=42, help="Seed")
parser.add_argument("--temperature", type=int, default=0.7, help="Sampling softmax temperature")
parser.add_argument("--top_k", type=int, default=0, help="Filter top-k tokens before sampling (<=0: no filtering)")
parser.add_argument("--top_p", type=float, default=0.9,
help="Nucleus filtering (top-p) before sampling (<=0.0: no filtering)")
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__file__)
logger.info(pformat(args))
if args.model_checkpoint == "":
logging.error("Checkpoint needed!")
return
random.seed(args.seed)
torch.random.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
logger.info("Get pretrained model and tokenizer")
tokenizer_class = BertTokenizer
model_class = OpenAIGPTLMHeadModel if not args.gpt2 else GPT2LMHeadModel
tokenizer = tokenizer_class.from_pretrained(args.model_checkpoint, do_lower_case=True)
model = model_class.from_pretrained(args.model_checkpoint)
model.to(args.device)
model.eval()
return model,tokenizer,args
history = []
model,tokenizer,args = eva_model()
def chat_response(raw_text):
global history
def tokenize(obj):
if isinstance(obj, str):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(obj))
if isinstance(obj, dict):
return dict((n, tokenize(o)) for n, o in obj.items())
return list(tokenize(o) for o in obj)
raw_text = " ".join(list(raw_text.replace(" ", "")))
history.append(tokenize(raw_text))
with torch.no_grad():
out_ids = sample_sequence(history, tokenizer, model, args)
history.append(out_ids)
history = history[-(2 * args.max_history + 1):]
out_text = tokenizer.decode(out_ids, skip_special_tokens=True)
#print(out_text)
return out_text
print(0)
|
tests/models/DeepFM_test.py
|
HCMY/DeepCTR
| 6,192 |
74154
|
<reponame>HCMY/DeepCTR
import pytest
import tensorflow as tf
from deepctr.estimator import DeepFMEstimator
from deepctr.models import DeepFM
from ..utils import check_model, get_test_data, SAMPLE_SIZE, get_test_data_estimator, check_estimator, \
Estimator_TEST_TF1
@pytest.mark.parametrize(
'hidden_size,sparse_feature_num',
[((2,), 1), #
((3,), 2)
] # (True, (32,), 3), (False, (32,), 1)
)
def test_DeepFM(hidden_size, sparse_feature_num):
model_name = "DeepFM"
sample_size = SAMPLE_SIZE
x, y, feature_columns = get_test_data(sample_size, sparse_feature_num=sparse_feature_num,
dense_feature_num=sparse_feature_num)
model = DeepFM(feature_columns, feature_columns, dnn_hidden_units=hidden_size, dnn_dropout=0.5)
check_model(model, model_name, x, y)
@pytest.mark.parametrize(
'hidden_size,sparse_feature_num',
[
((3,), 2)
] # (True, (32,), 3), (False, (32,), 1)
)
def test_DeepFMEstimator(hidden_size, sparse_feature_num):
if not Estimator_TEST_TF1 and tf.__version__ < "2.2.0":
return
sample_size = SAMPLE_SIZE
linear_feature_columns, dnn_feature_columns, input_fn = get_test_data_estimator(sample_size,
sparse_feature_num=sparse_feature_num,
dense_feature_num=sparse_feature_num,
classification=False)
model = DeepFMEstimator(linear_feature_columns, dnn_feature_columns, dnn_hidden_units=hidden_size, dnn_dropout=0.5,
task="regression")
check_estimator(model, input_fn)
if __name__ == "__main__":
pass
|
venv/lib/python3.9/site-packages/pendulum/interval.py
|
qarik-hanrattyjen/apache-airflow-backport-providers-google-2021.3.3
| 224 |
74164
|
<filename>venv/lib/python3.9/site-packages/pendulum/interval.py
# -*- coding: utf-8 -*-
from datetime import timedelta
from .mixins.interval import (
WordableIntervalMixin
)
from .constants import (
SECONDS_PER_DAY, SECONDS_PER_HOUR,
SECONDS_PER_MINUTE
)
def _divide_and_round(a, b):
"""divide a by b and round result to the nearest integer
When the ratio is exactly half-way between two integers,
the even integer is returned.
"""
# Based on the reference implementation for divmod_near
# in Objects/longobject.c.
q, r = divmod(a, b)
# round up if either r / b > 0.5, or r / b == 0.5 and q is odd.
# The expression r / b > 0.5 is equivalent to 2 * r > b if b is
# positive, 2 * r < b if b negative.
r *= 2
greater_than_half = r > b if b > 0 else r < b
if greater_than_half or r == b and q % 2 == 1:
q += 1
return q
class BaseInterval(timedelta):
"""
Base class for all inherited interval classes.
"""
_y = None
_m = None
_w = None
_d = None
_h = None
_i = None
_s = None
_invert = None
def __new__(cls, days=0, seconds=0, microseconds=0,
milliseconds=0, minutes=0, hours=0, weeks=0):
self = timedelta.__new__(
cls, days, seconds, microseconds,
milliseconds, minutes, hours, weeks
)
# Intuitive normalization
total = self.total_seconds()
m = 1
if total < 0:
m = -1
self._microseconds = round(total % m * 1e6)
self._seconds = abs(int(total)) % SECONDS_PER_DAY * m
self._days = abs(int(total)) // SECONDS_PER_DAY * m
return self
def total_minutes(self):
return self.total_seconds() / SECONDS_PER_MINUTE
def total_hours(self):
return self.total_seconds() / SECONDS_PER_HOUR
def total_days(self):
return self.total_seconds() / SECONDS_PER_DAY
def total_weeks(self):
return self.total_days() / 7
@property
def weeks(self):
return abs(self.days) // 7 * self._sign(self._days)
@property
def days(self):
return self._days
@property
def remaining_days(self):
return abs(self._days) % 7 * self._sign(self._days)
@property
def hours(self):
if self._h is None:
seconds = self._seconds
self._h = 0
if abs(seconds) >= 3600:
self._h = (abs(seconds) // 3600 % 24) * self._sign(seconds)
return self._h
@property
def minutes(self):
if self._i is None:
seconds = self._seconds
self._i = 0
if abs(seconds) >= 60:
self._i = (abs(seconds) // 60 % 60) * self._sign(seconds)
return self._i
@property
def seconds(self):
return self._seconds
@property
def remaining_seconds(self):
if self._s is None:
self._s = self._seconds
self._s = abs(self._s) % 60 * self._sign(self._s)
return self._s
@property
def microseconds(self):
return self._microseconds
@property
def invert(self):
if self._invert is None:
self._invert = self.total_seconds() < 0
return self._invert
def in_weeks(self):
return int(self.total_weeks())
def in_days(self):
return int(self.total_days())
def in_hours(self):
return int(self.total_hours())
def in_minutes(self):
return int(self.total_minutes())
def in_seconds(self):
return int(self.total_seconds())
def _sign(self, value):
if value < 0:
return -1
return 1
def as_timedelta(self):
"""
Return the interval as a native timedelta.
:rtype: timedelta
"""
return timedelta(seconds=self.total_seconds())
class Interval(WordableIntervalMixin, BaseInterval):
"""
Replacement for the standard timedelta class.
Provides several improvements over the base class.
"""
@classmethod
def instance(cls, delta):
"""
Creates a Interval from a timedelta
:type delta: timedelta
:rtype: Interval
"""
return cls(days=delta.days, seconds=delta.seconds, microseconds=delta.microseconds)
def __add__(self, other):
if isinstance(other, timedelta):
return self.__class__(seconds=self.total_seconds() + other.total_seconds())
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
if isinstance(other, timedelta):
return self.__class__(seconds=self.total_seconds() - other.total_seconds())
return NotImplemented
def __neg__(self):
return self.__class__(seconds=-self.total_seconds())
def _to_microseconds(self):
return ((self._days * (24*3600) + self._seconds) * 1000000 +
self._microseconds)
def __mul__(self, other):
if isinstance(other, int):
return self.__class__(seconds=self.total_seconds() * other)
if isinstance(other, float):
usec = self._to_microseconds()
a, b = other.as_integer_ratio()
return self.__class__(0, 0, _divide_and_round(usec * a, b))
return NotImplemented
__rmul__ = __mul__
def __floordiv__(self, other):
if not isinstance(other, (int, timedelta)):
return NotImplemented
usec = self._to_microseconds()
if isinstance(other, timedelta):
return usec // other._to_microseconds()
if isinstance(other, int):
return self.__class__(0, 0, usec // other)
def __truediv__(self, other):
if not isinstance(other, (int, float, timedelta)):
return NotImplemented
usec = self._to_microseconds()
if isinstance(other, timedelta):
return usec / other._to_microseconds()
if isinstance(other, int):
return self.__class__(0, 0, _divide_and_round(usec, other))
if isinstance(other, float):
a, b = other.as_integer_ratio()
return self.__class__(0, 0, _divide_and_round(b * usec, a))
__div__ = __floordiv__
def __mod__(self, other):
if isinstance(other, timedelta):
r = self._to_microseconds() % other._to_microseconds()
return self.__class__(0, 0, r)
return NotImplemented
def __divmod__(self, other):
if isinstance(other, timedelta):
q, r = divmod(self._to_microseconds(),
other._to_microseconds())
return q, self.__class__(0, 0, r)
return NotImplemented
Interval.min = Interval(-999999999)
Interval.max = Interval(days=999999999, hours=23,
minutes=59, seconds=59,
microseconds=999999)
Interval.resolution = Interval(microseconds=1)
class AbsoluteInterval(Interval):
"""
Interval that expresses a time difference in absolute values.
"""
def __new__(cls, days=0, seconds=0, microseconds=0,
milliseconds=0, minutes=0, hours=0, weeks=0):
self = timedelta.__new__(
cls, days, seconds, microseconds,
milliseconds, minutes, hours, weeks
)
# We need to compute the total_seconds() value
# on a native timedelta object
delta = timedelta(
days, seconds, microseconds,
milliseconds, minutes, hours, weeks
)
# Intuitive normalization
self._total = delta.total_seconds()
total = abs(self._total)
self._microseconds = round(total % 1 * 1e6)
self._seconds = int(total) % SECONDS_PER_DAY
self._days = int(total) // SECONDS_PER_DAY
return self
def total_seconds(self):
return abs(self._total)
@property
def invert(self):
if self._invert is None:
self._invert = self._total < 0
return self._invert
|
setup.py
|
IntheGrass/citeomatic_learning
| 162 |
74167
|
<filename>setup.py
#!/usr/bin/python
import setuptools
setuptools.setup(
name='citeomatic',
version='0.01',
url='http://github.com/allenai/s2-research',
packages=setuptools.find_packages(),
install_requires=[
],
tests_require=[
],
zip_safe=False,
test_suite='py.test',
entry_points='',
pyrobuf_modules=['citeomatic.proto'],
)
|
tests/python/test_mpm88.py
|
mzmzm/taichi
| 11,699 |
74173
|
import os
import pytest
import taichi as ti
from taichi import approx
def run_mpm88_test():
dim = 2
N = 64
n_particles = N * N
n_grid = 128
dx = 1 / n_grid
inv_dx = 1 / dx
dt = 2.0e-4
p_vol = (dx * 0.5)**2
p_rho = 1
p_mass = p_vol * p_rho
E = 400
x = ti.Vector.field(dim, dtype=ti.f32, shape=n_particles)
v = ti.Vector.field(dim, dtype=ti.f32, shape=n_particles)
C = ti.Matrix.field(dim, dim, dtype=ti.f32, shape=n_particles)
J = ti.field(dtype=ti.f32, shape=n_particles)
grid_v = ti.Vector.field(dim, dtype=ti.f32, shape=(n_grid, n_grid))
grid_m = ti.field(dtype=ti.f32, shape=(n_grid, n_grid))
@ti.kernel
def substep():
for p in x:
base = (x[p] * inv_dx - 0.5).cast(int)
fx = x[p] * inv_dx - base.cast(float)
w = [0.5 * (1.5 - fx)**2, 0.75 - (fx - 1)**2, 0.5 * (fx - 0.5)**2]
stress = -dt * p_vol * (J[p] - 1) * 4 * inv_dx * inv_dx * E
affine = ti.Matrix([[stress, 0], [0, stress]]) + p_mass * C[p]
for i in ti.static(range(3)):
for j in ti.static(range(3)):
offset = ti.Vector([i, j])
dpos = (offset.cast(float) - fx) * dx
weight = w[i][0] * w[j][1]
grid_v[base + offset].atomic_add(
weight * (p_mass * v[p] + affine @ dpos))
grid_m[base + offset].atomic_add(weight * p_mass)
for i, j in grid_m:
if grid_m[i, j] > 0:
bound = 3
inv_m = 1 / grid_m[i, j]
grid_v[i, j] = inv_m * grid_v[i, j]
grid_v[i, j][1] -= dt * 9.8
if i < bound and grid_v[i, j][0] < 0:
grid_v[i, j][0] = 0
if i > n_grid - bound and grid_v[i, j][0] > 0:
grid_v[i, j][0] = 0
if j < bound and grid_v[i, j][1] < 0:
grid_v[i, j][1] = 0
if j > n_grid - bound and grid_v[i, j][1] > 0:
grid_v[i, j][1] = 0
for p in x:
base = (x[p] * inv_dx - 0.5).cast(int)
fx = x[p] * inv_dx - base.cast(float)
w = [
0.5 * (1.5 - fx)**2, 0.75 - (fx - 1.0)**2, 0.5 * (fx - 0.5)**2
]
new_v = ti.Vector.zero(ti.f32, 2)
new_C = ti.Matrix.zero(ti.f32, 2, 2)
for i in ti.static(range(3)):
for j in ti.static(range(3)):
dpos = ti.Vector([i, j]).cast(float) - fx
g_v = grid_v[base + ti.Vector([i, j])]
weight = w[i][0] * w[j][1]
new_v += weight * g_v
new_C += 4 * weight * g_v.outer_product(dpos) * inv_dx
v[p] = new_v
x[p] += dt * v[p]
J[p] *= 1 + dt * new_C.trace()
C[p] = new_C
# gui = ti._lib.core.GUI("MPM88", ti.core_veci(512, 512))
# canvas = gui.get_canvas()
for i in range(n_particles):
x[i] = [i % N / N * 0.4 + 0.2, i / N / N * 0.4 + 0.05]
v[i] = [0, -3]
J[i] = 1
for frame in range(10):
for s in range(50):
grid_v.fill([0, 0])
grid_m.fill(0)
substep()
pos = x.to_numpy()
pos[:, 1] *= 2
regression = [
0.31722742,
0.15826741,
0.10224003,
0.07810827,
]
for i in range(4):
assert (pos**(i + 1)).mean() == approx(regression[i], rel=1e-2)
@ti.test()
def test_mpm88():
run_mpm88_test()
def _is_appveyor():
# AppVeyor adds `APPVEYOR=True` ('true' on Ubuntu)
# https://www.appveyor.com/docs/environment-variables/
return os.getenv('APPVEYOR', '').lower() == 'true'
#TODO: Remove exclude of ti.metal
@pytest.mark.skipif(_is_appveyor(), reason='Stuck on Appveyor.')
@ti.test(require=ti.extension.async_mode, exclude=[ti.metal], async_mode=True)
def test_mpm88_async():
# It seems that all async tests on Appveyor run super slow. For example,
# on Appveyor, 10+ tests have passed during the execution of
# test_fuse_dense_x2y2z. Maybe thread synchronizations are expensive?
run_mpm88_test()
@ti.test(arch=[ti.cpu, ti.cuda, ti.opengl])
def test_mpm88_numpy_and_ndarray():
import numpy as np
dim = 2
N = 64
n_particles = N * N
n_grid = 128
dx = 1 / n_grid
inv_dx = 1 / dx
dt = 2.0e-4
p_vol = (dx * 0.5)**2
p_rho = 1
p_mass = p_vol * p_rho
E = 400
@ti.kernel
def substep(x: ti.any_arr(element_dim=1), v: ti.any_arr(element_dim=1),
C: ti.any_arr(element_dim=2), J: ti.any_arr(),
grid_v: ti.any_arr(element_dim=1), grid_m: ti.any_arr()):
for p in x:
base = (x[p] * inv_dx - 0.5).cast(int)
fx = x[p] * inv_dx - base.cast(float)
w = [0.5 * (1.5 - fx)**2, 0.75 - (fx - 1)**2, 0.5 * (fx - 0.5)**2]
stress = -dt * p_vol * (J[p] - 1) * 4 * inv_dx * inv_dx * E
affine = ti.Matrix([[stress, 0], [0, stress]]) + p_mass * C[p]
for i in ti.static(range(3)):
for j in ti.static(range(3)):
offset = ti.Vector([i, j])
dpos = (offset.cast(float) - fx) * dx
weight = w[i][0] * w[j][1]
grid_v[base + offset].atomic_add(
weight * (p_mass * v[p] + affine @ dpos))
grid_m[base + offset].atomic_add(weight * p_mass)
for i, j in grid_m:
if grid_m[i, j] > 0:
bound = 3
inv_m = 1 / grid_m[i, j]
grid_v[i, j] = inv_m * grid_v[i, j]
grid_v[i, j][1] -= dt * 9.8
if i < bound and grid_v[i, j][0] < 0:
grid_v[i, j][0] = 0
if i > n_grid - bound and grid_v[i, j][0] > 0:
grid_v[i, j][0] = 0
if j < bound and grid_v[i, j][1] < 0:
grid_v[i, j][1] = 0
if j > n_grid - bound and grid_v[i, j][1] > 0:
grid_v[i, j][1] = 0
for p in x:
base = (x[p] * inv_dx - 0.5).cast(int)
fx = x[p] * inv_dx - base.cast(float)
w = [
0.5 * (1.5 - fx)**2, 0.75 - (fx - 1.0)**2, 0.5 * (fx - 0.5)**2
]
new_v = ti.Vector.zero(ti.f32, 2)
new_C = ti.Matrix.zero(ti.f32, 2, 2)
for i in ti.static(range(3)):
for j in ti.static(range(3)):
dpos = ti.Vector([i, j]).cast(float) - fx
g_v = grid_v[base + ti.Vector([i, j])]
weight = w[i][0] * w[j][1]
new_v += weight * g_v
new_C += 4 * weight * g_v.outer_product(dpos) * inv_dx
v[p] = new_v
x[p] += dt * v[p]
J[p] *= 1 + dt * new_C.trace()
C[p] = new_C
def run_test(x, v, C, J, grid_v, grid_m):
for i in range(n_particles):
x[i] = [i % N / N * 0.4 + 0.2, i / N / N * 0.4 + 0.05]
v[i] = [0, -3]
J[i] = 1
for frame in range(10):
for s in range(50):
grid_v.fill(0)
grid_m.fill(0)
substep(x, v, C, J, grid_v, grid_m)
pos = x if isinstance(x, np.ndarray) else x.to_numpy()
pos[:, 1] *= 2
regression = [
0.31722742,
0.15826741,
0.10224003,
0.07810827,
]
for i in range(4):
assert (pos**(i + 1)).mean() == approx(regression[i], rel=1e-2)
def test_numpy():
x = np.zeros((n_particles, dim), dtype=np.float32)
v = np.zeros((n_particles, dim), dtype=np.float32)
C = np.zeros((n_particles, dim, dim), dtype=np.float32)
J = np.zeros(n_particles, dtype=np.float32)
grid_v = np.zeros((n_grid, n_grid, dim), dtype=np.float32)
grid_m = np.zeros((n_grid, n_grid), dtype=np.float32)
run_test(x, v, C, J, grid_v, grid_m)
def test_ndarray():
x = ti.Vector.ndarray(dim, ti.f32, n_particles)
v = ti.Vector.ndarray(dim, ti.f32, n_particles)
C = ti.Matrix.ndarray(dim, dim, ti.f32, n_particles)
J = ti.ndarray(ti.f32, n_particles)
grid_v = ti.Vector.ndarray(dim, ti.f32, (n_grid, n_grid))
grid_m = ti.ndarray(ti.f32, (n_grid, n_grid))
run_test(x, v, C, J, grid_v, grid_m)
test_numpy()
test_ndarray()
|
tensorflow/python/debug/lib/debug_events_writer_test.py
|
EricRemmerswaal/tensorflow
| 190,993 |
74182
|
<reponame>EricRemmerswaal/tensorflow<filename>tensorflow/python/debug/lib/debug_events_writer_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the debug events writer Python class."""
import glob
import json as json_lib
import os
import re
import threading
import time
from absl.testing import parameterized
from tensorflow.core.protobuf import debug_event_pb2
from tensorflow.python.debug.lib import debug_events_reader
from tensorflow.python.debug.lib import debug_events_writer
from tensorflow.python.debug.lib import dumping_callback_test_lib
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.platform import googletest
class DebugEventsWriterTest(dumping_callback_test_lib.DumpingCallbackTestBase,
parameterized.TestCase):
def testMultiThreadedConstructorCallWorks(self):
def init_writer():
debug_events_writer.DebugEventsWriter(self.dump_root, self.tfdbg_run_id)
num_threads = 4
threads = []
for _ in range(num_threads):
thread = threading.Thread(target=init_writer)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
# Verify that there is only one debug event file of each type.
metadata_paths = glob.glob(os.path.join(self.dump_root, "*.metadata"))
self.assertLen(metadata_paths, 1)
source_files_paths = glob.glob(
os.path.join(self.dump_root, "*.source_files"))
self.assertLen(source_files_paths, 1)
stack_frames_paths = glob.glob(
os.path.join(self.dump_root, "*.stack_frames"))
self.assertLen(stack_frames_paths, 1)
graphs_paths = glob.glob(os.path.join(self.dump_root, "*.graphs"))
self.assertLen(graphs_paths, 1)
self._readAndCheckMetadataFile()
def testWriteSourceFilesAndStackFrames(self):
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id)
num_protos = 10
for i in range(num_protos):
source_file = debug_event_pb2.SourceFile()
source_file.file_path = "/home/tf2user/main.py"
source_file.host_name = "machine.cluster"
source_file.lines.append("print(%d)" % i)
writer.WriteSourceFile(source_file)
stack_frame = debug_event_pb2.StackFrameWithId()
stack_frame.id = "stack_%d" % i
stack_frame.file_line_col.file_index = i * 10
writer.WriteStackFrameWithId(stack_frame)
writer.FlushNonExecutionFiles()
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
actuals = list(item.debug_event.source_file
for item in reader.source_files_iterator())
self.assertLen(actuals, num_protos)
for i in range(num_protos):
self.assertEqual(actuals[i].file_path, "/home/tf2user/main.py")
self.assertEqual(actuals[i].host_name, "machine.cluster")
self.assertEqual(actuals[i].lines, ["print(%d)" % i])
actuals = list(item.debug_event.stack_frame_with_id
for item in reader.stack_frames_iterator())
self.assertLen(actuals, num_protos)
for i in range(num_protos):
self.assertEqual(actuals[i].id, "stack_%d" % i)
self.assertEqual(actuals[i].file_line_col.file_index, i * 10)
def testWriteGraphOpCreationAndDebuggedGraphs(self):
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id)
num_op_creations = 10
for i in range(num_op_creations):
graph_op_creation = debug_event_pb2.GraphOpCreation()
graph_op_creation.op_type = "Conv2D"
graph_op_creation.op_name = "Conv2D_%d" % i
writer.WriteGraphOpCreation(graph_op_creation)
debugged_graph = debug_event_pb2.DebuggedGraph()
debugged_graph.graph_id = "deadbeaf"
debugged_graph.graph_name = "MyGraph1"
writer.WriteDebuggedGraph(debugged_graph)
writer.FlushNonExecutionFiles()
reader = debug_events_reader.DebugEventsReader(self.dump_root)
actuals = list(item.debug_event for item in reader.graphs_iterator())
self.assertLen(actuals, num_op_creations + 1)
for i in range(num_op_creations):
self.assertEqual(actuals[i].graph_op_creation.op_type, "Conv2D")
self.assertEqual(actuals[i].graph_op_creation.op_name, "Conv2D_%d" % i)
self.assertEqual(actuals[num_op_creations].debugged_graph.graph_id,
"deadbeaf")
def testConcurrentWritesToNonExecutionFilesWorks(self):
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id)
source_file_state = {"counter": 0, "lock": threading.Lock()}
def writer_source_file():
source_file = debug_event_pb2.SourceFile()
with source_file_state["lock"]:
source_file.file_path = "/home/tf2user/file_%d.py" % source_file_state[
"counter"]
source_file_state["counter"] += 1
writer.WriteSourceFile(source_file)
# More-frequent-than-necessary concurrent flushing is not recommended,
# but tolerated.
writer.FlushNonExecutionFiles()
stack_frame_state = {"counter": 0, "lock": threading.Lock()}
def write_stack_frame():
stack_frame = debug_event_pb2.StackFrameWithId()
with stack_frame_state["lock"]:
stack_frame.id = "stack_frame_%d" % stack_frame_state["counter"]
stack_frame_state["counter"] += 1
writer.WriteStackFrameWithId(stack_frame)
# More-frequent-than-necessary concurrent flushing is not recommended,
# but tolerated.
writer.FlushNonExecutionFiles()
graph_op_state = {"counter": 0, "lock": threading.Lock()}
def write_graph_op_creation():
graph_op_creation = debug_event_pb2.GraphOpCreation()
with graph_op_state["lock"]:
graph_op_creation.op_name = "Op%d" % graph_op_state["counter"]
graph_op_state["counter"] += 1
writer.WriteGraphOpCreation(graph_op_creation)
# More-frequent-than-necessary concurrent flushing is not recommended,
# but tolerated.
writer.FlushNonExecutionFiles()
num_threads = 9
threads = []
for i in range(num_threads):
if i % 3 == 0:
target = writer_source_file
elif i % 3 == 1:
target = write_stack_frame
else:
target = write_graph_op_creation
thread = threading.Thread(target=target)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
# Verify the content of the .source_files file.
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
source_files_iter = reader.source_files_iterator()
actuals = list(item.debug_event.source_file for item in source_files_iter)
file_paths = sorted([actual.file_path for actual in actuals])
self.assertEqual(file_paths, [
"/home/tf2user/file_0.py", "/home/tf2user/file_1.py",
"/home/tf2user/file_2.py"
])
# Verify the content of the .stack_frames file.
actuals = list(item.debug_event.stack_frame_with_id
for item in reader.stack_frames_iterator())
stack_frame_ids = sorted([actual.id for actual in actuals])
self.assertEqual(stack_frame_ids,
["stack_frame_0", "stack_frame_1", "stack_frame_2"])
# Verify the content of the .graphs file.
actuals = list(item.debug_event.graph_op_creation
for item in reader.graphs_iterator())
graph_op_names = sorted([actual.op_name for actual in actuals])
self.assertEqual(graph_op_names, ["Op0", "Op1", "Op2"])
def testWriteAndReadMetadata(self):
t0 = time.time()
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id)
writer.Close()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
self.assertIsInstance(reader.starting_wall_time(), float)
self.assertGreaterEqual(reader.starting_wall_time(), t0)
self.assertEqual(reader.tensorflow_version(), versions.__version__)
self.assertTrue(reader.tfdbg_run_id())
def testWriteExecutionEventsWithCircularBuffer(self):
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id)
num_execution_events = debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE * 2
for i in range(num_execution_events):
execution = debug_event_pb2.Execution()
execution.op_type = "OpType%d" % i
writer.WriteExecution(execution)
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
# Before FlushExecutionFiles() is called. No data should have been written
# to the file.
reader.update()
self.assertFalse(reader.executions())
writer.FlushExecutionFiles()
reader.update()
executions = reader.executions()
for i, execution in enumerate(executions):
self.assertEqual(
execution.op_type,
"OpType%d" % (i + debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE))
def testWriteExecutionEventsWithoutCircularBufferBehavior(self):
# A circular buffer size of 0 abolishes the circular buffer behavior.
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id, 0)
num_execution_events = debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE * 2
for i in range(num_execution_events):
execution = debug_event_pb2.Execution()
execution.op_type = "OpType%d" % i
writer.WriteExecution(execution)
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
executions = reader.executions()
self.assertLen(executions, num_execution_events)
for i, execution in enumerate(executions):
self.assertEqual(execution.op_type, "OpType%d" % i)
def testWriteGraphExecutionTraceEventsWithCircularBuffer(self):
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id)
num_execution_events = debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE * 2
for i in range(num_execution_events):
trace = debug_event_pb2.GraphExecutionTrace()
trace.op_name = "Op%d" % i
writer.WriteGraphExecutionTrace(trace)
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
actuals = list(reader.graph_execution_traces_iterators()[0])
# Before FlushExecutionFiles() is called. No data should have been written
# to the file.
self.assertEmpty(actuals)
writer.FlushExecutionFiles()
actuals = list(item.debug_event.graph_execution_trace
for item in reader.graph_execution_traces_iterators()[0])
self.assertLen(actuals, debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE)
for i in range(debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE):
self.assertEqual(
actuals[i].op_name,
"Op%d" % (i + debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE))
def testWriteGraphExecutionTraceEventsWithoutCircularBufferBehavior(self):
# A circular buffer size of 0 abolishes the circular buffer behavior.
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id, 0)
num_execution_events = debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE * 2
for i in range(num_execution_events):
trace = debug_event_pb2.GraphExecutionTrace()
trace.op_name = "Op%d" % i
writer.WriteGraphExecutionTrace(trace)
writer.FlushExecutionFiles()
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
actuals = list(item.debug_event.graph_execution_trace
for item in reader.graph_execution_traces_iterators()[0])
self.assertLen(actuals, num_execution_events)
for i in range(num_execution_events):
self.assertEqual(actuals[i].op_name, "Op%d" % i)
def testConcurrentWritesToExecutionFiles(self):
circular_buffer_size = 5
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id,
circular_buffer_size)
debugged_graph = debug_event_pb2.DebuggedGraph(graph_id="graph1",
graph_name="graph1")
writer.WriteDebuggedGraph(debugged_graph)
execution_state = {"counter": 0, "lock": threading.Lock()}
def write_execution():
execution = debug_event_pb2.Execution()
with execution_state["lock"]:
execution.op_type = "OpType%d" % execution_state["counter"]
execution_state["counter"] += 1
writer.WriteExecution(execution)
graph_execution_trace_state = {"counter": 0, "lock": threading.Lock()}
def write_graph_execution_trace():
with graph_execution_trace_state["lock"]:
op_name = "Op%d" % graph_execution_trace_state["counter"]
graph_op_creation = debug_event_pb2.GraphOpCreation(
op_type="FooOp", op_name=op_name, graph_id="graph1")
trace = debug_event_pb2.GraphExecutionTrace(
op_name=op_name, tfdbg_context_id="graph1")
graph_execution_trace_state["counter"] += 1
writer.WriteGraphOpCreation(graph_op_creation)
writer.WriteGraphExecutionTrace(trace)
threads = []
for i in range(circular_buffer_size * 4):
if i % 2 == 0:
target = write_execution
else:
target = write_graph_execution_trace
thread = threading.Thread(target=target)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
# Verify the content of the .execution file.
executions = reader.executions()
executed_op_types = [execution.op_type for execution in executions]
self.assertLen(executed_op_types, circular_buffer_size)
self.assertLen(executed_op_types, len(set(executed_op_types)))
# Verify the content of the .graph_execution_traces file.
op_names = [trace.op_name for trace in reader.graph_execution_traces()]
self.assertLen(op_names, circular_buffer_size)
self.assertLen(op_names, len(set(op_names)))
def testConcurrentSourceFileRandomReads(self):
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id)
for i in range(100):
source_file = debug_event_pb2.SourceFile(
host_name="localhost", file_path="/tmp/file_%d.py" % i)
source_file.lines.append("# File %d" % i)
writer.WriteSourceFile(source_file)
writer.FlushNonExecutionFiles()
reader = debug_events_reader.DebugDataReader(self.dump_root)
reader.update()
lines = [None] * 100
def read_job_1():
# Read in the reverse order to enhance randomness of the read access.
for i in range(49, -1, -1):
lines[i] = reader.source_lines("localhost", "/tmp/file_%d.py" % i)
def read_job_2():
for i in range(99, 49, -1):
lines[i] = reader.source_lines("localhost", "/tmp/file_%d.py" % i)
thread_1 = threading.Thread(target=read_job_1)
thread_2 = threading.Thread(target=read_job_2)
thread_1.start()
thread_2.start()
thread_1.join()
thread_2.join()
for i in range(100):
self.assertEqual(lines[i], ["# File %d" % i])
def testConcurrentExecutionUpdateAndRandomRead(self):
circular_buffer_size = -1
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id,
circular_buffer_size)
writer_state = {"counter": 0, "done": False}
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
def write_and_update_job():
while True:
if writer_state["done"]:
break
execution = debug_event_pb2.Execution()
execution.op_type = "OpType%d" % writer_state["counter"]
writer_state["counter"] += 1
writer.WriteExecution(execution)
writer.FlushExecutionFiles()
reader.update()
# On the sub-thread, keep writing and reading new Execution protos.
write_and_update_thread = threading.Thread(target=write_and_update_job)
write_and_update_thread.start()
# On the main thread, do concurrent random read.
while True:
exec_digests = reader.executions(digest=True)
if exec_digests:
exec_0 = reader.read_execution(exec_digests[0])
self.assertEqual(exec_0.op_type, "OpType0")
writer_state["done"] = True
break
else:
time.sleep(0.1)
continue
write_and_update_thread.join()
def testConcurrentExecutionRandomReads(self):
circular_buffer_size = -1
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id,
circular_buffer_size)
for i in range(100):
execution = debug_event_pb2.Execution()
execution.op_type = "OpType%d" % i
writer.WriteExecution(execution)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
reader = debug_events_reader.DebugDataReader(self.dump_root)
reader.update()
executions = [None] * 100
def read_job_1():
execution_digests = reader.executions(digest=True)
# Read in the reverse order to enhance randomness of the read access.
for i in range(49, -1, -1):
execution = reader.read_execution(execution_digests[i])
executions[i] = execution
def read_job_2():
execution_digests = reader.executions(digest=True)
for i in range(99, 49, -1):
execution = reader.read_execution(execution_digests[i])
executions[i] = execution
thread_1 = threading.Thread(target=read_job_1)
thread_2 = threading.Thread(target=read_job_2)
thread_1.start()
thread_2.start()
thread_1.join()
thread_2.join()
for i in range(100):
self.assertEqual(executions[i].op_type, "OpType%d" % i)
def testConcurrentGraphExecutionTraceUpdateAndRandomRead(self):
circular_buffer_size = -1
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id,
circular_buffer_size)
debugged_graph = debug_event_pb2.DebuggedGraph(graph_id="graph1",
graph_name="graph1")
writer.WriteDebuggedGraph(debugged_graph)
writer_state = {"counter": 0, "done": False}
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
def write_and_update_job():
while True:
if writer_state["done"]:
break
op_name = "Op%d" % writer_state["counter"]
graph_op_creation = debug_event_pb2.GraphOpCreation(
op_type="FooOp", op_name=op_name, graph_id="graph1")
writer.WriteGraphOpCreation(graph_op_creation)
trace = debug_event_pb2.GraphExecutionTrace(
op_name=op_name, tfdbg_context_id="graph1")
writer.WriteGraphExecutionTrace(trace)
writer_state["counter"] += 1
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
reader.update()
# On the sub-thread, keep writing and reading new GraphExecutionTraces.
write_and_update_thread = threading.Thread(target=write_and_update_job)
write_and_update_thread.start()
# On the main thread, do concurrent random read.
while True:
digests = reader.graph_execution_traces(digest=True)
if digests:
trace_0 = reader.read_graph_execution_trace(digests[0])
self.assertEqual(trace_0.op_name, "Op0")
writer_state["done"] = True
break
else:
time.sleep(0.1)
continue
write_and_update_thread.join()
def testConcurrentGraphExecutionTraceRandomReads(self):
circular_buffer_size = -1
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
self.tfdbg_run_id,
circular_buffer_size)
debugged_graph = debug_event_pb2.DebuggedGraph(graph_id="graph1",
graph_name="graph1")
writer.WriteDebuggedGraph(debugged_graph)
for i in range(100):
op_name = "Op%d" % i
graph_op_creation = debug_event_pb2.GraphOpCreation(
op_type="FooOp", op_name=op_name, graph_id="graph1")
writer.WriteGraphOpCreation(graph_op_creation)
trace = debug_event_pb2.GraphExecutionTrace(
op_name=op_name, tfdbg_context_id="graph1")
writer.WriteGraphExecutionTrace(trace)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
reader = debug_events_reader.DebugDataReader(self.dump_root)
reader.update()
traces = [None] * 100
def read_job_1():
digests = reader.graph_execution_traces(digest=True)
for i in range(49, -1, -1):
traces[i] = reader.read_graph_execution_trace(digests[i])
def read_job_2():
digests = reader.graph_execution_traces(digest=True)
for i in range(99, 49, -1):
traces[i] = reader.read_graph_execution_trace(digests[i])
thread_1 = threading.Thread(target=read_job_1)
thread_2 = threading.Thread(target=read_job_2)
thread_1.start()
thread_2.start()
thread_1.join()
thread_2.join()
for i in range(100):
self.assertEqual(traces[i].op_name, "Op%d" % i)
@parameterized.named_parameters(
("Begin1End3", 1, 3, 1, 3),
("Begin0End3", 0, 3, 0, 3),
("Begin0EndNeg1", 0, -1, 0, 4),
("BeginNoneEnd3", None, 3, 0, 3),
("Begin2EndNone", 2, None, 2, 5),
("BeginNoneEndNone", None, None, 0, 5),
)
def testRangeReadingExecutions(self, begin, end, expected_begin,
expected_end):
writer = debug_events_writer.DebugEventsWriter(
self.dump_root, self.tfdbg_run_id, circular_buffer_size=-1)
for i in range(5):
execution = debug_event_pb2.Execution(op_type="OpType%d" % i)
writer.WriteExecution(execution)
writer.FlushExecutionFiles()
writer.Close()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
executions = reader.executions(begin=begin, end=end)
self.assertLen(executions, expected_end - expected_begin)
self.assertEqual(executions[0].op_type, "OpType%d" % expected_begin)
self.assertEqual(executions[-1].op_type, "OpType%d" % (expected_end - 1))
@parameterized.named_parameters(
("Begin1End3", 1, 3, 1, 3),
("Begin0End3", 0, 3, 0, 3),
("Begin0EndNeg1", 0, -1, 0, 4),
("BeginNoneEnd3", None, 3, 0, 3),
("Begin2EndNone", 2, None, 2, 5),
("BeginNoneEndNone", None, None, 0, 5),
)
def testRangeReadingGraphExecutionTraces(self, begin, end, expected_begin,
expected_end):
writer = debug_events_writer.DebugEventsWriter(
self.dump_root, self.tfdbg_run_id, circular_buffer_size=-1)
debugged_graph = debug_event_pb2.DebuggedGraph(
graph_id="graph1", graph_name="graph1")
writer.WriteDebuggedGraph(debugged_graph)
for i in range(5):
op_name = "Op_%d" % i
graph_op_creation = debug_event_pb2.GraphOpCreation(
op_name=op_name, graph_id="graph1")
writer.WriteGraphOpCreation(graph_op_creation)
trace = debug_event_pb2.GraphExecutionTrace(
op_name=op_name, tfdbg_context_id="graph1")
writer.WriteGraphExecutionTrace(trace)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
writer.Close()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
traces = reader.graph_execution_traces(begin=begin, end=end)
self.assertLen(traces, expected_end - expected_begin)
self.assertEqual(traces[0].op_name, "Op_%d" % expected_begin)
self.assertEqual(traces[-1].op_name, "Op_%d" % (expected_end - 1))
class MultiSetReaderTest(dumping_callback_test_lib.DumpingCallbackTestBase):
"""Test for DebugDataReader for multiple file sets under a dump root."""
def testReadingTwoFileSetsWithTheSameDumpRootSucceeds(self):
# To simulate a multi-host data dump, we first generate file sets in two
# different directories, with the same tfdbg_run_id, and then combine them.
tfdbg_run_id = "foo"
for i in range(2):
writer = debug_events_writer.DebugEventsWriter(
os.path.join(self.dump_root, str(i)),
tfdbg_run_id,
circular_buffer_size=-1)
if i == 0:
debugged_graph = debug_event_pb2.DebuggedGraph(
graph_id="graph1", graph_name="graph1")
writer.WriteDebuggedGraph(debugged_graph)
op_name = "Op_0"
graph_op_creation = debug_event_pb2.GraphOpCreation(
op_type="FooOp", op_name=op_name, graph_id="graph1")
writer.WriteGraphOpCreation(graph_op_creation)
op_name = "Op_1"
graph_op_creation = debug_event_pb2.GraphOpCreation(
op_type="FooOp", op_name=op_name, graph_id="graph1")
writer.WriteGraphOpCreation(graph_op_creation)
for _ in range(10):
trace = debug_event_pb2.GraphExecutionTrace(
op_name="Op_%d" % i, tfdbg_context_id="graph1")
writer.WriteGraphExecutionTrace(trace)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
# Move all files from the subdirectory /1 to subdirectory /0.
dump_root_0 = os.path.join(self.dump_root, "0")
src_paths = glob.glob(os.path.join(self.dump_root, "1", "*"))
for src_path in src_paths:
dst_path = os.path.join(
dump_root_0,
# Rename the file set to avoid file name collision.
re.sub(r"(tfdbg_events\.\d+)", r"\g<1>1", os.path.basename(src_path)))
os.rename(src_path, dst_path)
with debug_events_reader.DebugDataReader(dump_root_0) as reader:
reader.update()
# Verify the content of the .graph_execution_traces file.
trace_digests = reader.graph_execution_traces(digest=True)
self.assertLen(trace_digests, 20)
for _ in range(10):
trace = reader.read_graph_execution_trace(trace_digests[i])
self.assertEqual(trace.op_name, "Op_0")
for _ in range(10):
trace = reader.read_graph_execution_trace(trace_digests[i + 10])
self.assertEqual(trace.op_name, "Op_1")
def testReadingTwoFileSetsWithTheDifferentRootsLeadsToError(self):
# To simulate a multi-host data dump, we first generate file sets in two
# different directories, with different tfdbg_run_ids, and then combine
# them.
for i in range(2):
writer = debug_events_writer.DebugEventsWriter(
os.path.join(self.dump_root, str(i)),
"run_id_%d" % i,
circular_buffer_size=-1)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
# Move all files from the subdirectory /1 to subdirectory /0.
dump_root_0 = os.path.join(self.dump_root, "0")
src_paths = glob.glob(os.path.join(self.dump_root, "1", "*"))
for src_path in src_paths:
dst_path = os.path.join(
dump_root_0,
# Rename the file set to avoid file name collision.
re.sub(r"(tfdbg_events\.\d+)", r"\g<1>1", os.path.basename(src_path)))
os.rename(src_path, dst_path)
with self.assertRaisesRegex(ValueError,
r"Found multiple \(2\) tfdbg2 runs"):
debug_events_reader.DebugDataReader(dump_root_0)
class DataObjectsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
def jsonRoundTripCheck(self, obj):
self.assertEqual(
json_lib.dumps(json_lib.loads(json_lib.dumps(obj)), sort_keys=True),
json_lib.dumps(obj, sort_keys=True))
def testExecutionDigestWithNoOutputToJson(self):
execution_digest = debug_events_reader.ExecutionDigest(
1234, 5678, "FooOp", output_tensor_device_ids=None)
json = execution_digest.to_json()
self.jsonRoundTripCheck(json)
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["output_tensor_device_ids"], None)
def testExecutionDigestWithTwoOutputsToJson(self):
execution_digest = debug_events_reader.ExecutionDigest(
1234, 5678, "FooOp", output_tensor_device_ids=[1357, 2468])
json = execution_digest.to_json()
self.jsonRoundTripCheck(json)
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["output_tensor_device_ids"], (1357, 2468))
def testExecutionNoGraphNoInputToJson(self):
execution_digest = debug_events_reader.ExecutionDigest(
1234, 5678, "FooOp", output_tensor_device_ids=[1357])
execution = debug_events_reader.Execution(
execution_digest,
"localhost",
("a1", "b2"),
debug_event_pb2.TensorDebugMode.CURT_HEALTH,
graph_id=None,
input_tensor_ids=None,
output_tensor_ids=[2468],
debug_tensor_values=([1, 0],))
json = execution.to_json()
self.jsonRoundTripCheck(json)
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["output_tensor_device_ids"], (1357,))
self.assertEqual(json["host_name"], "localhost")
self.assertEqual(json["stack_frame_ids"], ("a1", "b2"))
self.assertEqual(json["tensor_debug_mode"],
debug_event_pb2.TensorDebugMode.CURT_HEALTH)
self.assertIsNone(json["graph_id"])
self.assertIsNone(json["input_tensor_ids"])
self.assertEqual(json["output_tensor_ids"], (2468,))
self.assertEqual(json["debug_tensor_values"], ([1, 0],))
def testExecutionNoGraphNoInputButWithOutputToJson(self):
execution_digest = debug_events_reader.ExecutionDigest(
1234, 5678, "FooOp", output_tensor_device_ids=[1357])
execution = debug_events_reader.Execution(
execution_digest,
"localhost",
("a1", "b2"),
debug_event_pb2.TensorDebugMode.FULL_HEALTH,
graph_id="abcd",
input_tensor_ids=[13, 37],
output_tensor_ids=None,
debug_tensor_values=None)
json = execution.to_json()
self.jsonRoundTripCheck(json)
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["output_tensor_device_ids"], (1357,))
self.assertEqual(json["host_name"], "localhost")
self.assertEqual(json["stack_frame_ids"], ("a1", "b2"))
self.assertEqual(json["tensor_debug_mode"],
debug_event_pb2.TensorDebugMode.FULL_HEALTH)
self.assertEqual(json["graph_id"], "abcd")
self.assertEqual(json["input_tensor_ids"], (13, 37))
self.assertIsNone(json["output_tensor_ids"])
self.assertIsNone(json["debug_tensor_values"])
@parameterized.named_parameters(
("EmptyList", []),
("None", None),
)
def testExecutionWithNoOutputTensorsReturnsZeroForNumOutputs(
self, output_tensor_ids):
execution = debug_events_reader.Execution(
debug_events_reader.ExecutionDigest(1234, 5678, "FooOp"),
"localhost", ("a1", "b2"),
debug_event_pb2.TensorDebugMode.FULL_HEALTH,
graph_id="abcd",
input_tensor_ids=[13, 37],
output_tensor_ids=output_tensor_ids,
debug_tensor_values=None)
self.assertEqual(execution.num_outputs, 0)
def testDebuggedDeviceToJons(self):
debugged_device = debug_events_reader.DebuggedDevice("/TPU:3", 4)
self.assertEqual(debugged_device.to_json(), {
"device_name": "/TPU:3",
"device_id": 4,
})
def testDebuggedGraphToJonsWitouthNameInnerOuterGraphIds(self):
debugged_graph = debug_events_reader.DebuggedGraph(
None,
"b1c2",
outer_graph_id=None,
)
self.assertEqual(
debugged_graph.to_json(), {
"name": None,
"graph_id": "b1c2",
"outer_graph_id": None,
"inner_graph_ids": [],
})
def testDebuggedGraphToJonsWithNameAndInnerOuterGraphIds(self):
debugged_graph = debug_events_reader.DebuggedGraph(
"loss_function",
"b1c2",
outer_graph_id="a0b1",
)
debugged_graph.add_inner_graph_id("c2d3")
debugged_graph.add_inner_graph_id("c2d3e4")
self.assertEqual(
debugged_graph.to_json(), {
"name": "loss_function",
"graph_id": "b1c2",
"outer_graph_id": "a0b1",
"inner_graph_ids": ["c2d3", "c2d3e4"],
})
@parameterized.named_parameters(
("EmptyList", []),
("None", None),
)
def testGraphOpDigestWithNoOutpusReturnsNumOutputsZero(
self, output_tensor_ids):
op_creation_digest = debug_events_reader.GraphOpCreationDigest(
1234,
5678,
"deadbeef",
"FooOp",
"Model_1/Foo_2",
output_tensor_ids,
"machine.cluster", ("a1", "a2"),
input_names=None,
device_name=None)
self.assertEqual(op_creation_digest.num_outputs, 0)
def testGraphOpCreationDigestNoInputNoDeviceNameToJson(self):
op_creation_digest = debug_events_reader.GraphOpCreationDigest(
1234,
5678,
"deadbeef",
"FooOp",
"Model_1/Foo_2", [135],
"machine.cluster", ("a1", "a2"),
input_names=None,
device_name=None)
json = op_creation_digest.to_json()
self.jsonRoundTripCheck(json)
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["graph_id"], "deadbeef")
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["op_name"], "Model_1/Foo_2")
self.assertEqual(json["output_tensor_ids"], (135,))
self.assertEqual(json["host_name"], "machine.cluster")
self.assertEqual(json["stack_frame_ids"], ("a1", "a2"))
self.assertIsNone(json["input_names"])
self.assertIsNone(json["device_name"])
def testGraphOpCreationDigestWithInputsAndDeviceNameToJson(self):
op_creation_digest = debug_events_reader.GraphOpCreationDigest(
1234,
5678,
"deadbeef",
"FooOp",
"Model_1/Foo_2", [135],
"machine.cluster", ("a1", "a2"),
input_names=["Bar_1", "Qux_2"],
device_name="/device:GPU:0")
json = op_creation_digest.to_json()
self.jsonRoundTripCheck(json)
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["graph_id"], "deadbeef")
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["op_name"], "Model_1/Foo_2")
self.assertEqual(json["output_tensor_ids"], (135,))
self.assertEqual(json["host_name"], "machine.cluster")
self.assertEqual(json["stack_frame_ids"], ("a1", "a2"))
self.assertEqual(json["input_names"], ("Bar_1", "Qux_2"))
self.assertEqual(json["device_name"], "/device:GPU:0")
def testGraphExecutionTraceDigestToJson(self):
trace_digest = debug_events_reader.GraphExecutionTraceDigest(
1234, 5678, "FooOp", "Model_1/Foo_2", 1, "deadbeef")
json = trace_digest.to_json()
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["op_name"], "Model_1/Foo_2")
self.assertEqual(json["output_slot"], 1)
self.assertEqual(json["graph_id"], "deadbeef")
def testGraphExecutionTraceWithTensorDebugValueAndDeviceNameToJson(self):
trace_digest = debug_events_reader.GraphExecutionTraceDigest(
1234, 5678, "FooOp", "Model_1/Foo_2", 1, "deadbeef")
trace = debug_events_reader.GraphExecutionTrace(
trace_digest, ["g1", "g2", "deadbeef"],
debug_event_pb2.TensorDebugMode.CURT_HEALTH,
debug_tensor_value=[3, 1], device_name="/device:GPU:0")
json = trace.to_json()
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["op_name"], "Model_1/Foo_2")
self.assertEqual(json["output_slot"], 1)
self.assertEqual(json["graph_id"], "deadbeef")
self.assertEqual(json["graph_ids"], ("g1", "g2", "deadbeef"))
self.assertEqual(json["tensor_debug_mode"],
debug_event_pb2.TensorDebugMode.CURT_HEALTH)
self.assertEqual(json["debug_tensor_value"], (3, 1))
self.assertEqual(json["device_name"], "/device:GPU:0")
def testGraphExecutionTraceNoTensorDebugValueNoDeviceNameToJson(self):
trace_digest = debug_events_reader.GraphExecutionTraceDigest(
1234, 5678, "FooOp", "Model_1/Foo_2", 1, "deadbeef")
trace = debug_events_reader.GraphExecutionTrace(
trace_digest, ["g1", "g2", "deadbeef"],
debug_event_pb2.TensorDebugMode.NO_TENSOR,
debug_tensor_value=None, device_name=None)
json = trace.to_json()
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["op_name"], "Model_1/Foo_2")
self.assertEqual(json["output_slot"], 1)
self.assertEqual(json["graph_id"], "deadbeef")
self.assertEqual(json["graph_ids"], ("g1", "g2", "deadbeef"))
self.assertEqual(json["tensor_debug_mode"],
debug_event_pb2.TensorDebugMode.NO_TENSOR)
self.assertIsNone(json["debug_tensor_value"])
self.assertIsNone(json["device_name"])
if __name__ == "__main__":
ops.enable_eager_execution()
googletest.main()
|
tests/hooks/test_itl.py
|
KevinMusgrave/pytorch-adapt
| 131 |
74196
|
<gh_stars>100-1000
import unittest
import torch
from pytorch_adapt.hooks import ISTLossHook
from pytorch_adapt.layers import ISTLoss
from .utils import assertRequiresGrad, get_models_and_data
class TestITL(unittest.TestCase):
def test_ist_loss_hook(self):
torch.manual_seed(334)
h = ISTLossHook()
(
G,
_,
_,
src_imgs,
_,
target_imgs,
src_domain,
target_domain,
) = get_models_and_data()
outputs, losses = h(locals())
self.assertTrue(G.count == 2)
assertRequiresGrad(self, outputs)
outputs, losses2 = h({**locals(), **outputs})
assertRequiresGrad(self, outputs)
self.assertTrue(G.count == 2)
self.assertTrue(losses == losses2)
src_features = G(src_imgs)
target_features = G(target_imgs)
loss_fn = ISTLoss()
self.assertTrue(
losses["ist_loss"]
== loss_fn(
torch.cat([src_features, target_features], dim=0),
torch.cat([src_domain, target_domain], dim=0),
)
)
|
scripts/deployment/deploy_multisig_keyholders.py
|
JohnAllerdyce/Sovryn-smart-contracts
| 108 |
74200
|
from brownie import *
import json
def main():
thisNetwork = network.show_active()
if thisNetwork == "development":
acct = accounts[0]
# configFile = open('./scripts/contractInteraction/testnet_contracts.json')
elif thisNetwork == "testnet" or thisNetwork == "rsk-mainnet":
acct = accounts.load("rskdeployer")
else:
raise Exception("network not supported")
if thisNetwork == "rsk-mainnet":
configFile = open('./scripts/contractInteraction/mainnet_contracts.json')
elif thisNetwork == "testnet":
configFile = open('./scripts/contractInteraction/testnet_contracts.json')
contracts = json.load(configFile)
timelockOwnerAddress = contracts['timelockOwner']
multiSigKeyHolders= acct.deploy(MultiSigKeyHolders)
multiSigKeyHolders.transferOwnership(timelockOwnerAddress)
|
djangosaml2/cache.py
|
chander/djangosaml2
| 5,079 |
74202
|
# Copyright (C) 2011-2012 Yaco Sistemas (http://www.yaco.es)
# Copyright (C) 2010 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from saml2.cache import Cache
class DjangoSessionCacheAdapter(dict):
"""A cache of things that are stored in the Django Session"""
key_prefix = '_saml2'
def __init__(self, django_session, key_suffix):
self.session = django_session
self.key = self.key_prefix + key_suffix
super(DjangoSessionCacheAdapter, self).__init__(self._get_objects())
def _get_objects(self):
return self.session.get(self.key, {})
def _set_objects(self, objects):
self.session[self.key] = objects
def sync(self):
# Changes in inner objects do not cause session invalidation
# https://docs.djangoproject.com/en/1.9/topics/http/sessions/#when-sessions-are-saved
#add objects to session
self._set_objects(dict(self))
#invalidate session
self.session.modified = True
class OutstandingQueriesCache(object):
"""Handles the queries that have been sent to the IdP and have not
been replied yet.
"""
def __init__(self, django_session):
self._db = DjangoSessionCacheAdapter(django_session,
'_outstanding_queries')
def outstanding_queries(self):
return self._db._get_objects()
def set(self, saml2_session_id, came_from):
self._db[saml2_session_id] = came_from
self._db.sync()
def delete(self, saml2_session_id):
if saml2_session_id in self._db:
del self._db[saml2_session_id]
self._db.sync()
class IdentityCache(Cache):
"""Handles information about the users that have been succesfully
logged in.
This information is useful because when the user logs out we must
know where does he come from in order to notify such IdP/AA.
The current implementation stores this information in the Django session.
"""
def __init__(self, django_session):
self._db = DjangoSessionCacheAdapter(django_session, '_identities')
self._sync = True
class StateCache(DjangoSessionCacheAdapter):
"""Store state information that is needed to associate a logout
request with its response.
"""
def __init__(self, django_session):
super(StateCache, self).__init__(django_session, '_state')
|
datasets/gutenberg_time/gutenberg_time.py
|
WojciechKusa/datasets
| 10,608 |
74205
|
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Recognizing the flow of time in a story is a crucial aspect of understanding it. Prior work related to time has primarily focused on identifying temporal expressions or relative sequencing of events, but here we propose computationally annotating each line of a book with wall clock times, even in the absence of explicit time-descriptive phrases. To do so, we construct a data set of hourly time phrases from 52,183 fictional books."""
import csv
import os
import datasets
_CITATION = """\
@misc{kim2020time,
title={What time is it? Temporal Analysis of Novels},
author={<NAME> and <NAME> and <NAME>},
year={2020},
eprint={2011.04124},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_DESCRIPTION = """\
A clean data resource containing all explicit time references in a dataset of 52,183 novels whose full text is available via Project Gutenberg.
"""
_HOMEPAGE = "https://github.com/allenkim/what-time-is-it"
_LICENSE = "[More Information needed]"
# The HuggingFace dataset library don't host the datasets but only point to the original files
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
_URLs = {
"gutenberg": "https://github.com/TevenLeScao/what-time-is-it/blob/master/gutenberg_time_phrases.zip?raw=true",
}
class GutenbergTime(datasets.GeneratorBasedBuilder):
"""Novel extracts with time-of-the-day information"""
VERSION = datasets.Version("1.1.3")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="gutenberg", description="Data pulled from the Gutenberg project"),
]
def _info(self):
features = datasets.Features(
{
"guten_id": datasets.Value("string"),
"hour_reference": datasets.Value("string"),
"time_phrase": datasets.Value("string"),
"is_ambiguous": datasets.Value("bool_"),
"time_pos_start": datasets.Value("int64"),
"time_pos_end": datasets.Value("int64"),
"tok_context": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
my_urls = _URLs[self.config.name]
data = dl_manager.download_and_extract(my_urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(data, "gutenberg_time_phrases.csv"),
"split": "train",
},
)
]
def _generate_examples(self, filepath, split):
with open(filepath, encoding="utf8") as f:
data = csv.reader(f)
next(data)
for id_, row in enumerate(data):
yield id_, {
"guten_id": row[0],
"hour_reference": row[1],
"time_phrase": row[2],
"is_ambiguous": row[3],
"time_pos_start": row[4],
"time_pos_end": row[5],
"tok_context": row[6],
}
|
tensorflow_io/python/experimental/numpy_dataset_ops.py
|
lgeiger/io
| 558 |
74208
|
<reponame>lgeiger/io
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""NumpyIODataset"""
import numpy as np
import tensorflow as tf
from tensorflow_io.python.ops import core_ops
class NumpyIODataset(tf.data.Dataset):
"""NumpyIODataset"""
def __init__(self, a, internal=True):
"""NumpyIODataset."""
with tf.name_scope("NumpyIODataset"):
assert internal
entries = a
def p(entry):
address, _ = entry.__array_interface__["data"]
shape = entry.shape
dtype = tf.as_dtype(entry.dtype)
return address, "", "", shape, dtype
flatten = tf.nest.flatten(entries)
assert all([entry.shape[0] == flatten[0].shape[0] for entry in flatten])
params = [p(entry) for entry in flatten]
def f(start, stop):
return tf.nest.pack_sequence_as(
entries,
[
core_ops.io_numpy_read(
address=address,
filename=filename,
array=array,
shape=shape,
start=start,
stop=stop,
dtype=dtype,
)
for address, filename, array, shape, dtype in params
],
)
step = 1024
total = tf.constant(flatten[0].shape[0], tf.int64)
indices_start = tf.data.Dataset.range(0, total, step)
indices_stop = indices_start.skip(1).concatenate(
tf.data.Dataset.from_tensor_slices([total])
)
dataset = tf.data.Dataset.zip((indices_start, indices_stop))
dataset = dataset.map(f)
dataset = dataset.unbatch()
self._dataset = dataset
self._holder = [np.array(entry, copy=False) for entry in flatten]
super().__init__(
self._dataset._variant_tensor
) # pylint: disable=protected-access
def _inputs(self):
return []
@property
def element_spec(self):
return self._dataset.element_spec
class NumpyFileIODataset(tf.data.Dataset):
"""NumpyFileIODataset"""
def __init__(self, filename, spec=None, internal=True):
"""NumpyFileIODataset."""
with tf.name_scope("NumpyFileIODataset"):
assert internal
if tf.executing_eagerly():
arrays, shapes, dtypes = core_ops.io_numpy_info(filename=filename)
arrays = tf.unstack(arrays)
shapes = tf.unstack(shapes)
dtypes = tf.unstack(dtypes)
dtypes = [tf.as_dtype(dtype.numpy()) for dtype in dtypes]
entries = list(zip(shapes, dtypes, arrays))
entries = [
tf.TensorSpec(shape, dtype, array)
for (shape, dtype, array) in entries
]
indices = None
if all([e.numpy().decode().startswith("arr_") for e in arrays]):
try:
indices = [int(e.numpy()[4:]) for e in arrays]
except ValueError:
pass
if indices is not None:
values = list(indices)
values.sort()
if not all([k == v for k, v in enumerate(values)]):
indices = None
# if indices is continuously, then construct a tuple, otherwise a dict.
if indices is not None:
entries = dict(zip(indices, entries))
entries = tuple([entries[index] for index in sorted(indices)])
else:
indices = [index.numpy().decode() for index in tf.unstack(arrays)]
entries = dict(zip(indices, entries))
flatten = tf.nest.flatten(entries)
shapes = [entry.shape for entry in flatten]
assert all([shape[0] == shapes[0][0] for shape in shapes])
else:
assert spec is not None
if isinstance(spec, tuple):
entries = tuple(
[
tf.TensorSpec(
None,
(v if isinstance(v, tf.dtypes.DType) else v.dtype),
"arr_{}".format(i),
)
for i, v in enumerate(spec)
]
)
else:
entries = {
k: tf.TensorSpec(
None, (v if isinstance(v, tf.dtypes.DType) else v.dtype), k
)
for k, v in spec.items()
}
flatten = tf.nest.flatten(entries)
def shape_f(entry):
shape, _ = core_ops.io_numpy_spec(
filename=filename, array=entry.name
)
return shape
shapes = [shape_f(entry) for entry in flatten]
def p(entry, shape):
return 0, filename, entry.name, shape, entry.dtype
params = [p(entry, shape) for entry, shape in zip(flatten, shapes)]
def f(start, stop):
return tf.nest.pack_sequence_as(
entries,
[
core_ops.io_numpy_read(
address=address,
filename=filename,
array=array,
shape=shape,
start=start,
stop=stop,
dtype=dtype,
)
for address, filename, array, shape, dtype in params
],
)
step = 1024
total = tf.cast(shapes[0][0], tf.int64)
indices_start = tf.data.Dataset.range(0, total, step)
indices_stop = indices_start.skip(1).concatenate(
tf.data.Dataset.from_tensor_slices([total])
)
dataset = tf.data.Dataset.zip((indices_start, indices_stop))
dataset = dataset.map(f)
dataset = dataset.unbatch()
self._dataset = dataset
super().__init__(
self._dataset._variant_tensor
) # pylint: disable=protected-access
def _inputs(self):
return []
@property
def element_spec(self):
return self._dataset.element_spec
|
www/test/0.9.2/test/commands/fetch/server.py
|
reedspool/_hyperscript
| 684 |
74230
|
<filename>www/test/0.9.2/test/commands/fetch/server.py<gh_stars>100-1000
#!/usr/bin/env python3
from flask import Flask, request, make_response
from time import sleep
app = Flask(__name__)
@app.route('/respond')
def hello_world():
time_to_sleep = int(request.args.get('time')) / 1000
sleep(time_to_sleep)
resp = make_response('Response from Flask')
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp
# main driver function
if __name__ == '__main__':
app.run()
|
5_RemoteDesktop/server.py
|
fuliyuan/kivy-
| 301 |
74236
|
#!/usr/bin/env python
import ctypes
from flask import Flask, request, send_file
from PIL import ImageGrab
from StringIO import StringIO
# http://msdn.microsoft.com/en-us/library/windows/desktop/ms646260%28v=vs.85%29.aspx
MOUSEEVENTF_LEFTDOWN = 2
MOUSEEVENTF_LEFTUP = 4
app = Flask(__name__)
@app.route('/')
def index():
return app.send_static_file('index.html')
@app.route('/desktop.jpeg')
def desktop():
screen = ImageGrab.grab()
buf = StringIO()
screen.save(buf, 'JPEG', quality=75)
buf.seek(0)
return send_file(buf, mimetype='image/jpeg')
@app.route('/click')
def click():
try:
x = int(request.args.get('x'))
y = int(request.args.get('y'))
except:
return 'error'
user32 = ctypes.windll.user32
user32.SetCursorPos(x, y)
user32.mouse_event(MOUSEEVENTF_LEFTDOWN, 0, 0, 0, 0)
user32.mouse_event(MOUSEEVENTF_LEFTUP, 0, 0, 0, 0)
return 'done'
if __name__ == '__main__':
app.run(host='0.0.0.0', port=7080, debug=True)
|
deep-rl/lib/python2.7/site-packages/OpenGL/raw/GL/ARB/viewport_array.py
|
ShujaKhalid/deep-rl
| 210 |
74238
|
<filename>deep-rl/lib/python2.7/site-packages/OpenGL/raw/GL/ARB/viewport_array.py
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_ARB_viewport_array'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_ARB_viewport_array',error_checker=_errors._error_checker)
GL_DEPTH_RANGE=_C('GL_DEPTH_RANGE',0x0B70)
GL_FIRST_VERTEX_CONVENTION=_C('GL_FIRST_VERTEX_CONVENTION',0x8E4D)
GL_LAST_VERTEX_CONVENTION=_C('GL_LAST_VERTEX_CONVENTION',0x8E4E)
GL_LAYER_PROVOKING_VERTEX=_C('GL_LAYER_PROVOKING_VERTEX',0x825E)
GL_MAX_VIEWPORTS=_C('GL_MAX_VIEWPORTS',0x825B)
GL_PROVOKING_VERTEX=_C('GL_PROVOKING_VERTEX',0x8E4F)
GL_SCISSOR_BOX=_C('GL_SCISSOR_BOX',0x0C10)
GL_SCISSOR_TEST=_C('GL_SCISSOR_TEST',0x0C11)
GL_UNDEFINED_VERTEX=_C('GL_UNDEFINED_VERTEX',0x8260)
GL_VIEWPORT=_C('GL_VIEWPORT',0x0BA2)
GL_VIEWPORT_BOUNDS_RANGE=_C('GL_VIEWPORT_BOUNDS_RANGE',0x825D)
GL_VIEWPORT_INDEX_PROVOKING_VERTEX=_C('GL_VIEWPORT_INDEX_PROVOKING_VERTEX',0x825F)
GL_VIEWPORT_SUBPIXEL_BITS=_C('GL_VIEWPORT_SUBPIXEL_BITS',0x825C)
@_f
@_p.types(None,_cs.GLuint,_cs.GLsizei,arrays.GLdoubleArray)
def glDepthRangeArrayv(first,count,v):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLdouble,_cs.GLdouble)
def glDepthRangeIndexed(index,n,f):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint,arrays.GLdoubleArray)
def glGetDoublei_v(target,index,data):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint,arrays.GLfloatArray)
def glGetFloati_v(target,index,data):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLsizei,arrays.GLintArray)
def glScissorArrayv(first,count,v):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLint,_cs.GLsizei,_cs.GLsizei)
def glScissorIndexed(index,left,bottom,width,height):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLintArray)
def glScissorIndexedv(index,v):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLsizei,arrays.GLfloatArray)
def glViewportArrayv(first,count,v):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat)
def glViewportIndexedf(index,x,y,w,h):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLfloatArray)
def glViewportIndexedfv(index,v):pass
|
RecoTracker/SiTrackerMRHTools/python/SiTrackerMultiRecHitUpdator_cfi.py
|
ckamtsikis/cmssw
| 852 |
74261
|
import FWCore.ParameterSet.Config as cms
siTrackerMultiRecHitUpdator = cms.ESProducer("SiTrackerMultiRecHitUpdatorESProducer",
ComponentName = cms.string('SiTrackerMultiRecHitUpdator'),
TTRHBuilder = cms.string('WithAngleAndTemplate'),
HitPropagator = cms.string('trackingRecHitPropagator'),
#AnnealingProgram = cms.vdouble(80.0, 9.0, 4.0, 1.0, 1.0, 1.0),
AnnealingProgram = cms.vdouble(30.0, 18.0, 14.0, 11.0, 6.0, 4.0, 2.0, 1.0),
ChiSquareCut1D = cms.double(10.8276),
ChiSquareCut2D = cms.double(13.8155),
Debug = cms.bool(False)
)
|
topicnet/cooking_machine/models/blei_lafferty_score.py
|
bt2901/TopicNet
| 123 |
74268
|
import numpy as np
from typing import Callable
from .base_score import BaseScore
class BleiLaffertyScore(BaseScore):
"""
This score implements method described in 2009 paper
Blei, <NAME>., and <NAME>erty. "Topic models." Text Mining.
Chapman and Hall/CRC, 2009. 101-124.
At the core this score helps to discover tokens that are most likely
to describe given topic. Summing up that score helps to estimate how
well the model distinguishes between topics. The higher this score - better
"""
def __init__(
self,
name: str = None,
num_top_tokens: int = 30,
should_compute: Callable[[int], bool] = None):
"""
Parameters
----------
name:
name of the score
num_top_tokens : int
now many tokens we consider to be
"""
super().__init__(name=name, should_compute=should_compute)
self.num_top_tokens = num_top_tokens
def __repr__(self):
return f'{self.__class__.__name__}(num_top_tokens={self.num_top_tokens})'
def _compute_blei_scores(self, phi):
"""
Computes Blei score
phi[wt] * [log(phi[wt]) - 1/T sum_k log(phi[wk])]
Parameters
----------
phi : pd.Dataframe
phi matrix of the model
Returns
-------
score : pd.Dataframe
wheighted phi matrix
""" # noqa: W291
topic_number = phi.shape[1]
blei_eps = 1e-42
log_phi = np.log(phi + blei_eps)
numerator = np.sum(log_phi, axis=1)
numerator = numerator[:, np.newaxis]
if hasattr(log_phi, "values"):
multiplier = log_phi.values - numerator / topic_number
else:
multiplier = log_phi - numerator / topic_number
scores = phi * multiplier
return scores
def call(self, model, **kwargs):
modalities = list(model.class_ids.keys())
score = 0
for modality in modalities:
phi = model.get_phi(class_ids=modality)
modality_scores = np.sort(self._compute_blei_scores(phi).values)
score += np.sum(modality_scores[-self.num_top_tokens:, :])
if modalities is None:
phi = model.get_phi()
modality_scores = np.sort(self._compute_blei_scores(phi).values)
score = np.sum(modality_scores[-self.num_top_tokens:, :])
return score
|
es/ch15/cortadora.py
|
MohammedMajidKhadim/GeneticAlgorithmsWithPython
| 1,008 |
74295
|
<reponame>MohammedMajidKhadim/GeneticAlgorithmsWithPython<filename>es/ch15/cortadora.py
# File: cortadora.py
# Del capítulo 15 de _Algoritmos Genéticos con Python_
#
# Author: <NAME> <<EMAIL>>
# Copyright (c) 2017 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
from enum import Enum
class ContenidoDelCampo(Enum):
Hierba = ' #'
Cortado = ' .'
Cortador = 'C'
def __str__(self):
return self.value
class Dirección:
def __init__(self, índice, xOffset, yOffset, símbolo):
self.Índice = índice
self.XOffset = xOffset
self.YOffset = yOffset
self.Símbolo = símbolo
def mover_de(self, ubicación, distancia=1):
return Ubicación(ubicación.X + distancia * self.XOffset,
ubicación.Y + distancia * self.YOffset)
class Direcciones(Enum):
Norte = Dirección(0, 0, -1, '^')
Este = Dirección(1, 1, 0, '>')
Sur = Dirección(2, 0, 1, 'v')
Oeste = Dirección(3, -1, 0, '<')
@staticmethod
def obtener_dirección_después_de_girar_a_la_izquierda_90_grados(dirección):
nuevoÍndice = dirección.Índice - 1 \
if dirección.Índice > 0 \
else len(Direcciones) - 1
nuevaDirección = next(i for i in Direcciones
if i.value.Índice == nuevoÍndice)
return nuevaDirección.value
@staticmethod
def obtener_dirección_después_de_girar_a_la_derecha_90_grados(dirección):
nuevoÍndice = dirección.Índice + 1 \
if dirección.Índice < len(Direcciones) - 1 \
else 0
nuevaDirección = next(i for i in Direcciones
if i.value.Índice == nuevoÍndice)
return nuevaDirección.value
class Ubicación:
def __init__(self, x, y):
self.X, self.Y = x, y
def mover(self, xOffset, yOffset):
return Ubicación(self.X + xOffset, self.Y + yOffset)
class Cortadora:
def __init__(self, ubicación, dirección):
self.Ubicación = ubicación
self.Dirección = dirección
self.CuentaDePasos = 0
def girar_a_la_izquierda(self):
self.CuentaDePasos += 1
self.Dirección = Direcciones \
.obtener_dirección_después_de_girar_a_la_izquierda_90_grados(
self.Dirección)
def corta(self, campo):
nuevaUbicación = self.Dirección.mover_de(self.Ubicación)
nuevaUbicación, esVálida = campo.arreglar_ubicación(nuevaUbicación)
if esVálida:
self.Ubicación = nuevaUbicación
self.CuentaDePasos += 1
campo.ajuste(self.Ubicación,
self.CuentaDePasos if self.CuentaDePasos > 9
else " {}".format(self.CuentaDePasos))
def salta(self, campo, adelante, derecha):
nuevaUbicación = self.Dirección.mover_de(self.Ubicación, adelante)
derechaDirección = Direcciones \
.obtener_dirección_después_de_girar_a_la_derecha_90_grados(
self.Dirección)
nuevaUbicación = derechaDirección.mover_de(nuevaUbicación, derecha)
nuevaUbicación, esVálida = campo.arreglar_ubicación(nuevaUbicación)
if esVálida:
self.Ubicación = nuevaUbicación
self.CuentaDePasos += 1
campo.ajuste(self.Ubicación, self.CuentaDePasos
if self.CuentaDePasos > 9
else " {}".format(self.CuentaDePasos))
class Campo:
def __init__(self, anchura, altura, contenidoInicial):
self.Campo = [[contenidoInicial] * anchura for _ in range(altura)]
self.Anchura = anchura
self.Altura = altura
def ajuste(self, ubicación, símbolo):
self.Campo[ubicación.Y][ubicación.X] = símbolo
def cuente_cortada(self):
return sum(1 for fila in range(self.Altura)
for columna in range(self.Anchura)
if self.Campo[fila][columna] != ContenidoDelCampo.Hierba)
def mostrar(self, cortadora):
for índiceDeFilas in range(self.Altura):
if índiceDeFilas != cortadora.Ubicación.Y:
fila = ' '.join(map(str, self.Campo[índiceDeFilas]))
else:
r = self.Campo[índiceDeFilas][:]
r[cortadora.Ubicación.X] = "{}{}".format(
ContenidoDelCampo.Cortador, cortadora.Dirección.Símbolo)
fila = ' '.join(map(str, r))
print(fila)
class CampoValidando(Campo):
def __init__(self, anchura, altura, contenidoInicial):
super().__init__(anchura, altura, contenidoInicial)
def arreglar_ubicación(self, ubicación):
if ubicación.X >= self.Anchura or \
ubicación.X < 0 or \
ubicación.Y >= self.Altura or \
ubicación.Y < 0:
return None, False
return ubicación, True
class CampoToroidal(Campo):
def __init__(self, anchura, altura, contenidoInicial):
super().__init__(anchura, altura, contenidoInicial)
def arreglar_ubicación(self, ubicación):
nuevaUbicación = Ubicación(ubicación.X, ubicación.Y)
if nuevaUbicación.X < 0:
nuevaUbicación.X += self.Anchura
elif nuevaUbicación.X >= self.Anchura:
nuevaUbicación.X %= self.Anchura
if nuevaUbicación.Y < 0:
nuevaUbicación.Y += self.Altura
elif nuevaUbicación.Y >= self.Altura:
nuevaUbicación.Y %= self.Altura
return nuevaUbicación, True
|
indra/tests/test_obo_clients/test_efo_client.py
|
zebulon2/indra
| 136 |
74300
|
<filename>indra/tests/test_obo_clients/test_efo_client.py<gh_stars>100-1000
from indra.databases import efo_client
from indra.databases.efo_client import _client as client
def test_efo_client_loaded():
assert 'efo' == client.prefix
assert client.entries
assert client.name_to_id
def test_efo_id_to_name():
assert 'muscle measurement' == \
efo_client.get_efo_name_from_efo_id('0004515')
def test_efo_name_to_id():
assert '0004515' == \
efo_client.get_efo_id_from_efo_name('muscle measurement')
|
ClemBot.Bot/bot/cogs/define_cog.py
|
Iapetus-11/ClemBot
| 121 |
74304
|
# This contribution was made by: <NAME>
# Date: 12/15/2020
import logging
import re
import aiohttp
import discord
import discord.ext.commands as commands
import bot.bot_secrets as bot_secrets
import bot.extensions as ext
from bot.consts import Colors
from bot.messaging.events import Events
log = logging.getLogger(__name__)
API_URL = 'https://www.dictionaryapi.com/api/v3/references/collegiate/json/'
class defineCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
def getPageData(self, jsonData, word):
pages = []
# If the word is found, the JSON will return a dictionary of information.
if (isinstance(jsonData[0], dict)):
# For words with several definitions, it will return several dictionaries.
for wordData in jsonData:
# Stems of the given word (Past Tense, Future Tense, Perfect Tense, etc.)
wordStems = wordData.get('meta', {}).get('stems', [])
# Syllables of the given word
syllableData = wordData.get('hwi', {}).get('hw', '')
# Pronunciation of the given word (With those weird letters)
pronunc = []
prsData = wordData.get('hwi', {}).get('prs', [])
for soundData in prsData:
pronunc.append(soundData.get('mw', ''))
# Type of the given word (Noun, Verb, Adjective, etc.)
wordType = wordData.get('fl', '')
# Definitions of the given word
definitions = []
defData = wordData.get('shortdef', [])
for defin in defData:
definitions.append(defin)
# Turn data into one long string (represents a page)
template = 'Tenses: '
for s in enumerate(wordStems):
template += s[1]
if s[0] != len(wordStems) - 1:
template += ', '
template += '\n'
template += f'Syllables: {syllableData}\n'
template += 'Pronunciation: '
for s in enumerate(pronunc):
template += s[1]
if s[0] != len(pronunc) - 1:
template += ', '
template += '\n'
template += f'Word Type: {wordType}\n'
template += '\n'
for s in enumerate(definitions):
page = f'{template}Definition: {s[1]}'
page = page.replace('*', ' | ')
pages.append(page)
# If the word cannot be found, the JSON returns a list of other possible suggestions.
elif isinstance(jsonData[0], str):
template = f'Word not found, see also: '
for s in enumerate(jsonData):
template = f'{template} {s[1]}'
if s[0] != len(jsonData) - 1:
template = f'{template}, '
pages = [template]
return pages
@ext.command()
@ext.long_help(
'Gets the dictionary defintion of any given word'
)
@ext.short_help('Gets a words definition')
@ext.example('define hello')
async def define(self, ctx, word):
"""
Given a word, find its definition and any other relevant information
USE: define <word>
EXAMPLE: define schadenfreude
For phrases, use underscores
EXAMPLE: define computer_science
Letters, numbers, and special characters (_, &, and -) are supported
"""
self.api_key = bot_secrets.secrets.merriam_key
# Remove any characters besides &, _, or - that are not in ranges a-z, A-Z, or 0-9
# per the ASCII Table https://www.asciitable.com
word = re.sub("[^a-zA-Z0-9 &_-]+", "", word)
actualWord = word.replace('_', ' ')
word = word.replace('_', '%20').lower()
url = f'{API_URL}{word}?key={self.api_key}'
wordPages = []
# Try Except for catching errors that could give away the API key
try:
async with aiohttp.request('get', url) as response:
if response.status == 200:
jsonData = await response.json()
wordPages = self.getPageData(jsonData, word)
else:
embed = discord.Embed(title='Merriam_Webster Dictionary', color=Colors.Error)
ErrMsg = f'Oh No! There appears to be an issue! Yell at one of the developers with the following code.\nError Code: {response.status}'
embed.add_field(name='Error with API', value=ErrMsg, inline=False)
await ctx.send(embed=embed)
return
await self.bot.messenger.publish(Events.on_set_pageable_text,
embed_name='Merriam-Webster Dictionary',
field_title=f'Word: {actualWord}',
pages=wordPages,
author=ctx.author,
channel=ctx.channel)
except Exception as err:
err_str = str(err)
err_str = re.sub(self.api_key, "CLASSIFIED", err_str)
raise Exception(err_str).with_traceback(err.__traceback__)
def setup(bot):
bot.add_cog(defineCog(bot))
|
py_v2/WeiboSession.py
|
SykieChen/WeiboBlackList
| 394 |
74306
|
<reponame>SykieChen/WeiboBlackList
import requests
class WeiboSession(requests.Session):
def __init__(self, username, password):
super(WeiboSession, self).__init__()
self.__username = username
self.__password = password
def __del__(self):
self.close()
def login(self):
loginURL = "http://passport.weibo.cn/sso/login"
data = {
"username": self.__username,
"password": self.__password,
"savestate": "1",
"r": "http://m.weibo.cn/",
"ec": "0",
"entry": "mweibo",
"mainpageflag": "1",
}
self.headers.update({
"Referer": "http://passport.weibo.cn/signin/login?entry=mweibo&res=wel&wm=3349&r=http%3A%2F%2Fm.weibo.cn%2F&sudaref=passport.weibo.cn&retcode=6102",
})
retJson = self.post(loginURL, data=data).json()
if retJson["retcode"] == 20000000:
for tmpURL in retJson["data"]["crossdomainlist"].values():
self.get(tmpURL)
myURL = "http://weibo.cn/"
self.get(myURL)
if __name__ == "__main__":
weibo = WeiboSession("", "")
|
vilya/libs/mail.py
|
mubashshirjamal/code
| 1,582 |
74313
|
# coding: utf-8
import smtplib
from vilya.config import SMTP_SERVER
def send_mail(msg):
fromaddr = msg["From"]
toaddrs = []
if msg['To']:
toaddrs += [addr.strip() for addr in msg["To"].split(',')]
if msg["Cc"]:
toaddrs += [addr.strip() for addr in msg["Cc"].split(',')]
smtp = smtplib.SMTP(SMTP_SERVER)
smtp.sendmail(fromaddr, toaddrs, msg.as_string())
smtp.quit()
|
leetcode_problems.py
|
kld123509945/iScript
| 5,267 |
74318
|
<reponame>kld123509945/iScript
#!/usr/bin/env python
# -*- coding=utf-8 -*-
import sys
import re
import os
import argparse
import requests
from lxml import html as lxml_html
try:
import html
except ImportError:
import HTMLParser
html = HTMLParser.HTMLParser()
try:
import cPickle as pk
except ImportError:
import pickle as pk
class LeetcodeProblems(object):
def get_problems_info(self):
leetcode_url = 'https://leetcode.com/problemset/algorithms'
res = requests.get(leetcode_url)
if not res.ok:
print('request error')
sys.exit()
cm = res.text
cmt = cm.split('tbody>')[-2]
indexs = re.findall(r'<td>(\d+)</td>', cmt)
problem_urls = ['https://leetcode.com' + url \
for url in re.findall(
r'<a href="(/problems/.+?)"', cmt)]
levels = re.findall(r"<td value='\d*'>(.+?)</td>", cmt)
tinfos = zip(indexs, levels, problem_urls)
assert (len(indexs) == len(problem_urls) == len(levels))
infos = []
for info in tinfos:
res = requests.get(info[-1])
if not res.ok:
print('request error')
sys.exit()
tree = lxml_html.fromstring(res.text)
title = tree.xpath('//meta[@property="og:title"]/@content')[0]
description = tree.xpath('//meta[@property="description"]/@content')
if not description:
description = tree.xpath('//meta[@property="og:description"]/@content')[0]
else:
description = description[0]
description = html.unescape(description.strip())
tags = tree.xpath('//div[@id="tags"]/following::a[@class="btn btn-xs btn-primary"]/text()')
infos.append(
{
'title': title,
'level': info[1],
'index': int(info[0]),
'description': description,
'tags': tags
}
)
with open('leecode_problems.pk', 'wb') as g:
pk.dump(infos, g)
return infos
def to_text(self, pm_infos):
if self.args.index:
key = 'index'
elif self.args.title:
key = 'title'
elif self.args.tag:
key = 'tags'
elif self.args.level:
key = 'level'
else:
key = 'index'
infos = sorted(pm_infos, key=lambda i: i[key])
text_template = '## {index} - {title}\n' \
'~{level}~ {tags}\n' \
'{description}\n' + '\n' * self.args.line
text = ''
for info in infos:
if self.args.rm_blank:
info['description'] = re.sub(r'[\n\r]+', r'\n', info['description'])
text += text_template.format(**info)
with open('leecode problems.txt', 'w') as g:
g.write(text)
def run(self):
if os.path.exists('leecode_problems.pk') and not self.args.redownload:
with open('leecode_problems.pk', 'rb') as f:
pm_infos = pk.load(f)
else:
pm_infos = self.get_problems_info()
print('find %s problems.' % len(pm_infos))
self.to_text(pm_infos)
def handle_args(argv):
p = argparse.ArgumentParser(description='extract all leecode problems to location')
p.add_argument('--index', action='store_true', help='sort by index')
p.add_argument('--level', action='store_true', help='sort by level')
p.add_argument('--tag', action='store_true', help='sort by tag')
p.add_argument('--title', action='store_true', help='sort by title')
p.add_argument('--rm_blank', action='store_true', help='remove blank')
p.add_argument('--line', action='store', type=int, default=10, help='blank of two problems')
p.add_argument('-r', '--redownload', action='store_true', help='redownload data')
args = p.parse_args(argv[1:])
return args
def main(argv):
args = handle_args(argv)
x = LeetcodeProblems()
x.args = args
x.run()
if __name__ == '__main__':
argv = sys.argv
main(argv)
|
2019/08/08/Flask REST API Example With Pluggable Views and MethodView/api_demo/app.py
|
kenjitagawa/youtube_video_code
| 492 |
74319
|
from flask import Flask, jsonify, request
from flask.views import MethodView
app = Flask(__name__)
languages = [{'name' : 'JavaScript'}, {'name' : 'Python'}, {'name' : 'Ruby'}]
def get_language(name):
return [language for language in languages if language['name'] == name][0]
class Language(MethodView):
def get(self, language_name):
if language_name:
return jsonify({'language' : get_language(language_name)})
else:
return jsonify({'languages': languages})
def post(self):
new_language_name = request.json['name']
language = {'name' : new_language_name}
languages.append(language)
return jsonify({'language' : get_language(new_language_name)}), 201
def put(self, language_name):
language = get_language(language_name)
new_language_name = request.json['name']
language['name'] = new_language_name
return jsonify({'language' : get_language(new_language_name)})
def delete(self, language_name):
language = get_language(language_name)
languages.remove(language)
return '', 204
language_view = Language.as_view('language_api')
app.add_url_rule('/language', methods=['POST'], view_func=language_view)
app.add_url_rule('/language', methods=['GET'], defaults={'language_name' : None}, view_func=language_view)
app.add_url_rule('/language/<language_name>', methods=['GET', 'PUT', 'DELETE'], view_func=language_view)
|
scripts/TreeLSTM_IM/data_iterator.py
|
jabalazs/nli
| 299 |
74329
|
import cPickle as pkl
import gzip
import os
import re
import sys
import numpy
import math
import random
from binary_tree import BinaryTree
def convert_ptb_to_tree(line):
index = 0
tree = None
line = line.rstrip()
stack = []
parts = line.split()
for p_i, p in enumerate(parts):
# opening of a bracket, create a new node, take parent from top of stack
if p == '(':
if tree is None:
tree = BinaryTree(index)
else:
add_descendant(tree, index, stack[-1])
# add the newly created node to the stack and increment the index
stack.append(index)
index += 1
# close of a bracket, pop node on top of the stack
elif p == ')':
stack.pop(-1)
# otherwise, create a new node, take parent from top of stack, and set word
else:
add_descendant(tree, index, stack[-1])
tree.set_word(index, p)
index += 1
return tree
def add_descendant(tree, index, parent_index):
# add to the left first if possible, then to the right
if tree.has_left_descendant_at_node(parent_index):
if tree.has_right_descendant_at_node(parent_index):
sys.exit("Node " + str(parent_index) + " already has two children")
else:
tree.add_right_descendant(index, parent_index)
else:
tree.add_left_descendant(index, parent_index)
def fopen(filename, mode='r'):
if filename.endswith('.gz'):
return gzip.open(filename, mode)
return open(filename, mode)
class TextIterator:
"""Simple Bitext iterator."""
def __init__(self, source, target, label,
dict,
batch_size=128,
n_words=-1,
maxlen=500,
shuffle=True):
self.source = fopen(source, 'r')
self.target = fopen(target, 'r')
self.label = fopen(label, 'r')
with open(dict, 'rb') as f:
self.dict = pkl.load(f)
self.batch_size = batch_size
self.n_words = n_words
self.maxlen = maxlen
self.shuffle = shuffle
self.end_of_data = False
self.source_buffer = []
self.target_buffer = []
self.label_buffer = []
self.k = batch_size * 20
def __iter__(self):
return self
def reset(self):
self.source.seek(0)
self.target.seek(0)
self.label.seek(0)
def next(self):
if self.end_of_data:
self.end_of_data = False
self.reset()
raise StopIteration
source = []
target = []
label = []
# fill buffer, if it's empty
assert len(self.source_buffer) == len(self.target_buffer), 'Buffer size mismatch!'
assert len(self.source_buffer) == len(self.label_buffer), 'Buffer size mismatch!'
if len(self.source_buffer) == 0:
for k_ in xrange(self.k):
ss = self.source.readline()
if ss == "":
break
tt = self.target.readline()
if tt == "":
break
ll = self.label.readline()
if ll == "":
break
ss = convert_ptb_to_tree(ss)
words_ss, left_mask_ss, right_mask_ss = ss.convert_to_sequence_and_masks(ss.root)
words_ss = [self.dict[w] if w in self.dict else 1
for w in words_ss]
if self.n_words > 0:
words_ss = [w if w < self.n_words else 1 for w in words_ss]
ss = (words_ss, left_mask_ss, right_mask_ss)
tt = convert_ptb_to_tree(tt)
words_tt, left_mask_tt, right_mask_tt = tt.convert_to_sequence_and_masks(tt.root)
words_tt = [self.dict[w] if w in self.dict else 1
for w in words_tt]
if self.n_words > 0:
words_tt = [w if w < self.n_words else 1 for w in words_tt]
tt = (words_tt, left_mask_tt, right_mask_tt)
if len(words_ss) > self.maxlen or len(words_tt) > self.maxlen:
continue
self.source_buffer.append(ss)
self.target_buffer.append(tt)
self.label_buffer.append(ll.strip())
if self.shuffle:
# sort by target buffer
tlen = numpy.array([len(t[0]) for t in self.target_buffer])
tidx = tlen.argsort()
# shuffle mini-batch
tindex = []
small_index = range(int(math.ceil(len(tidx)*1./self.batch_size)))
random.shuffle(small_index)
for i in small_index:
if (i+1)*self.batch_size > len(tidx):
tindex.extend(tidx[i*self.batch_size:])
else:
tindex.extend(tidx[i*self.batch_size:(i+1)*self.batch_size])
tidx = tindex
_sbuf = [self.source_buffer[i] for i in tidx]
_tbuf = [self.target_buffer[i] for i in tidx]
_lbuf = [self.label_buffer[i] for i in tidx]
self.source_buffer = _sbuf
self.target_buffer = _tbuf
self.label_buffer = _lbuf
if len(self.source_buffer) == 0 or len(self.target_buffer) == 0 or len(self.label_buffer) == 0:
self.end_of_data = False
self.reset()
raise StopIteration
try:
# actual work here
while True:
# read from source file and map to word index
try:
ss = self.source_buffer.pop(0)
tt = self.target_buffer.pop(0)
ll = self.label_buffer.pop(0)
except IndexError:
break
source.append(ss)
target.append(tt)
label.append(ll)
if len(source) >= self.batch_size or \
len(target) >= self.batch_size or \
len(label) >= self.batch_size:
break
except IOError:
self.end_of_data = True
if len(source) <= 0 or len(target) <= 0 or len(label) <= 0:
self.end_of_data = False
self.reset()
raise StopIteration
return source, target, label
|
src/mcedit2/worldview/viewaction.py
|
elcarrion06/mcedit2
| 673 |
74331
|
<filename>src/mcedit2/worldview/viewaction.py<gh_stars>100-1000
"""
viewaction
"""
from __future__ import absolute_import, division, print_function
import logging
from PySide import QtGui, QtCore
from PySide.QtCore import Qt
from mceditlib.util.lazyprop import weakrefprop
from mcedit2.util.settings import Settings
log = logging.getLogger(__name__)
class ViewAction(QtCore.QObject):
button = Qt.NoButton
modifiers = Qt.NoModifier
key = 0
labelText = "Unknown Action"
hidden = False # Hide from configuration
settingsKey = NotImplemented
acceptsMouseWheel = False
WHEEL_UP = 0x100
WHEEL_DOWN = 0x200
_buttonNames = None
def __init__(self):
"""
An action that can be bound to a keypress or mouse button click, drag, or
movement with the bound key or button held.
"""
super(ViewAction, self).__init__()
if self.settingsKey is not None:
settings = Settings()
prefix = "keybindings/"
try:
modifiers = int(settings.value(prefix + self.settingsKey + "/modifiers", self.modifiers))
button = int(settings.value(prefix + self.settingsKey + "/button", self.button))
key = int(settings.value(prefix + self.settingsKey + "/key", self.key))
except Exception as e:
log.error("Error while reading key binding:")
else:
self.modifiers = modifiers
self.button = button
self.key = key
def __repr__(self):
return "%s(button=%s, key=%s, modifiers=%s)" % (self.__class__.__name__, self.button, self.key, self.modifiers)
def setBinding(self, button, key, modifiers):
self.button = button
self.key = key
self.modifiers = modifiers
if self.settingsKey is not None:
settings = Settings()
prefix = "keybindings/"
settings.setValue(prefix + self.settingsKey + "/button", self.button)
settings.setValue(prefix + self.settingsKey + "/key", self.key)
settings.setValue(prefix + self.settingsKey + "/modifiers", int(self.modifiers))
def matchKeyEvent(self, event):
key = event.key()
modifiers = event.modifiers()
if key in (Qt.Key_Shift, Qt.Key_Control, Qt.Key_Alt, Qt.Key_Meta):
modifiers = self.modifiers # pressing modifier key by itself has modifiers set, but releasing modifiers does not
matched = self.key == key
if event.type == QtCore.QEvent.KeyPress:
# Only match modifiers on key press, ignore modifiers on release to handle
# input sequences like: S down, Shift down, S up, Shift up
matched &= (self.modifiers == modifiers)
return matched
def matchModifiers(self, event):
return (self.modifiers is None or
self.modifiers == event.modifiers())
def mouseMoveEvent(self, event):
"""
Called when the mouse moves while the bound keys or buttons are pressed.
:type event: QtGui.QMouseEvent
"""
def mousePressEvent(self, event):
"""
Called when the bound mouse button is pressed. By default, calls buttonPressEvent.
:type event: QtGui.QMouseEvent
"""
self.buttonPressEvent(event)
def mouseReleaseEvent(self, event):
"""
Called when the bound mouse button is released. By default, calls buttonReleaseEvent
:type event: QtGui.QMouseEvent
"""
self.buttonReleaseEvent(event)
def keyPressEvent(self, event):
"""
Called when the bound key is pressed. By default, calls buttonPressEvent.
:type event: QtGui.QKeyEvent
"""
self.buttonPressEvent(event)
def keyReleaseEvent(self, event):
"""
Called when the bound key is released. By default, calls buttonReleaseEvent
:type event: QtGui.QKeyEvent
"""
self.buttonReleaseEvent(event)
def buttonPressEvent(self, event):
"""
Called by mousePressEvent and keyPressEvent.
Implement this to handle button-press events if it doesn't matter whether the action is bound to a key or
mouse button.
:type event: QtGui.QEvent
"""
def buttonReleaseEvent(self, event):
"""
Called by mouseReleaseEvent and keyReleaseEvent.
Implement this to handle button-release events if it doesn't matter whether the action is bound to a key or
mouse button.
:type event: QtGui.QEvent
"""
def buttonName(self, buttons):
if ViewAction._buttonNames is None:
ViewAction._buttonNames = [
(Qt.LeftButton, self.tr("Left Button")),
(Qt.RightButton, self.tr("Right Button")),
(Qt.MiddleButton, self.tr("Middle Button")),
(ViewAction.WHEEL_UP, self.tr("Mousewheel Up")),
(ViewAction.WHEEL_DOWN, self.tr("Mousewheel Down")),
]
parts = [name for mask, name in self._buttonNames if buttons & mask]
return "+".join(parts)
def describeKeys(self):
modifierKeyNames = {
Qt.Key_Shift: self.tr("Shift"),
Qt.Key_Control: self.tr("Control"),
Qt.Key_Alt: self.tr("Alt"),
Qt.Key_Meta: self.tr("Meta"),
}
s = modifierKeyNames.get(self.key) # QKeySequence returns weird strings when only a modifier is pressed
if s is None:
try:
s = QtGui.QKeySequence(self.key | self.modifiers).toString()
except TypeError:
log.error("KEY: %r MOD: %r", self.key, self.modifiers)
raise
if self.key == 0:
s = s[:-2]
if self.button != Qt.NoButton:
if len(s):
s += "+"
s += self.buttonName(self.button)
return s
class UseToolMouseAction(ViewAction):
button = Qt.LeftButton
labelText = "Use Tool (Don't change!)"
hidden = True
settingsKey = None
modifiers = None # really?
editorTab = weakrefprop()
def __init__(self, editorTab):
super(UseToolMouseAction, self).__init__()
self.editorTab = editorTab
def mousePressEvent(self, event):
self.editorTab.editorSession.viewMousePress(event)
event.view.update()
def mouseMoveEvent(self, event):
self.editorTab.editorSession.viewMouseDrag(event)
event.view.update()
def mouseReleaseEvent(self, event):
self.editorTab.editorSession.viewMouseRelease(event)
event.view.update()
class TrackingMouseAction(ViewAction):
button = Qt.NoButton
hidden = True
modifiers = None
labelText = "Mouse Tracking (Don't change!)"
settingsKey = None
editorTab = weakrefprop()
def __init__(self, editorTab):
super(TrackingMouseAction, self).__init__()
self.editorTab = editorTab
def mouseMoveEvent(self, event):
self.editorTab.editorSession.viewMouseMove(event)
class MoveViewMouseAction(ViewAction):
button = Qt.RightButton
labelText = "Pan View"
settingsKey = "worldview/general/holdToMove"
def buttonPressEvent(self, event):
x, y = event.x(), event.y()
self.dragStart = event.view.unprojectAtHeight(x, y, 0)
self.startOffset = event.view.centerPoint
log.debug("Drag start %s", self.dragStart)
event.view.update()
def mouseMoveEvent(self, event):
x = event.x()
y = event.y()
log.debug("mouseMoveEvent %s", (x, y))
if self.dragStart:
d = event.view.unprojectAtHeight(x, y, 0) - self.dragStart
event.view.centerPoint -= d
log.debug("Drag continue delta %s", d)
event.view.update()
def buttonReleaseEvent(self, event):
x, y = event.x(), event.y()
self.dragStart = None
log.debug("Drag end")
event.view.update()
def ZoomWheelActions():
maxScale = 16.
minScale = 1. / 64
zooms = []
_i = minScale
while _i < maxScale:
zooms.append(_i)
_i *= 2.0
def zoom(view, scale, (mx, my)):
# Get mouse position in world coordinates
worldPos = view.unprojectAtHeight(mx, my, 0)
if scale != view.scale:
view.scale = scale
# Get the new position under the mouse, find its distance from the old position,
# and shift the centerPoint by that amount.
newWorldPos = view.unprojectAtHeight(mx, my, 0)
delta = newWorldPos - worldPos
view.centerPoint = view.centerPoint - delta
log.debug("zoom offset %s, pos %s, delta %s, scale %s", view.centerPoint, (mx, my), delta, view.scale)
class ZoomInAction(ViewAction):
settingsKey = "worldview.general.zoom_in"
button = ViewAction.WHEEL_UP
acceptsMouseWheel = True
labelText = "Zoom In"
def buttonPressEvent(self, event):
log.debug(self.labelText)
mousePos = (event.x(), event.y())
i = zooms.index(event.view.scale)
if i > 0:
zoom(event.view, zooms[i - 1], mousePos)
class ZoomOutAction(ViewAction):
settingsKey = "worldview.general.zoom_out"
button = ViewAction.WHEEL_DOWN
acceptsMouseWheel = True
labelText = "Zoom Out"
def buttonPressEvent(self, event):
log.debug(self.labelText)
mousePos = (event.x(), event.y())
i = zooms.index(event.view.scale)
if i < len(zooms) - 1:
zoom(event.view, zooms[i + 1], mousePos)
return [ZoomInAction(), ZoomOutAction()]
|
train_quantizedTF.py
|
aalikadic/transformer-location-prediction
| 214 |
74343
|
import argparse
import baselineUtils
import torch
import torch.utils.data
import torch.nn as nn
import torch.nn.functional as F
import os
import time
from transformer.batch import subsequent_mask
from torch.optim import Adam,SGD,RMSprop,Adagrad
from transformer.noam_opt import NoamOpt
import numpy as np
import scipy.io
import json
import pickle
from torch.utils.tensorboard import SummaryWriter
def main():
parser=argparse.ArgumentParser(description='Train the individual Transformer model')
parser.add_argument('--dataset_folder',type=str,default='datasets')
parser.add_argument('--dataset_name',type=str,default='zara1')
parser.add_argument('--obs',type=int,default=8)
parser.add_argument('--preds',type=int,default=12)
parser.add_argument('--emb_size',type=int,default=512)
parser.add_argument('--heads',type=int, default=8)
parser.add_argument('--layers',type=int,default=6)
parser.add_argument('--dropout',type=float,default=0.1)
parser.add_argument('--cpu',action='store_true')
parser.add_argument('--output_folder',type=str,default='Output')
parser.add_argument('--val_size',type=int, default=0)
parser.add_argument('--gpu_device',type=str, default="0")
parser.add_argument('--verbose',action='store_true')
parser.add_argument('--max_epoch',type=int, default=100)
parser.add_argument('--batch_size',type=int,default=100)
parser.add_argument('--validation_epoch_start', type=int, default=30)
parser.add_argument('--resume_train',action='store_true')
parser.add_argument('--delim',type=str,default='\t')
parser.add_argument('--name', type=str, default="zara1")
parser.add_argument('--factor', type=float, default=1.)
parser.add_argument('--evaluate',type=bool,default=True)
parser.add_argument('--save_step', type=int, default=1)
args=parser.parse_args()
model_name=args.name
try:
os.mkdir('models')
except:
pass
try:
os.mkdir('output')
except:
pass
try:
os.mkdir('output/QuantizedTF')
except:
pass
try:
os.mkdir(f'models/QuantizedTF')
except:
pass
try:
os.mkdir(f'output/QuantizedTF/{args.name}')
except:
pass
try:
os.mkdir(f'models/QuantizedTF/{args.name}')
except:
pass
log=SummaryWriter('logs/%s'%model_name)
#os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_device
device=torch.device("cuda")
if args.cpu or not torch.cuda.is_available():
device=torch.device("cpu")
args.verbose=True
## creation of the dataloaders for train and validation
if args.val_size==0:
train_dataset,_ = baselineUtils.create_dataset(args.dataset_folder,args.dataset_name,0,args.obs,args.preds,delim=args.delim,train=True,verbose=args.verbose)
val_dataset, _ = baselineUtils.create_dataset(args.dataset_folder, args.dataset_name, 0, args.obs,
args.preds, delim=args.delim, train=False,
verbose=args.verbose)
else:
train_dataset, val_dataset = baselineUtils.create_dataset(args.dataset_folder, args.dataset_name, args.val_size, args.obs,
args.preds, delim=args.delim, train=True,
verbose=args.verbose)
test_dataset,_ = baselineUtils.create_dataset(args.dataset_folder,args.dataset_name,0,args.obs,args.preds,delim=args.delim,train=False,eval=True,verbose=args.verbose)
mat = scipy.io.loadmat(os.path.join(args.dataset_folder, args.dataset_name, "clusters.mat"))
clusters=mat['centroids']
import quantized_TF
model=quantized_TF.QuantizedTF(clusters.shape[0], clusters.shape[0]+1, clusters.shape[0], N=args.layers,
d_model=args.emb_size, d_ff=1024, h=args.heads, dropout=args.dropout).to(device)
tr_dl=torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=0)
val_dl = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=True, num_workers=0)
test_dl = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=0)
#optim = SGD(list(a.parameters())+list(model.parameters())+list(generator.parameters()),lr=0.01)
#sched=torch.optim.lr_scheduler.StepLR(optim,0.0005)
optim = NoamOpt(args.emb_size, args.factor, len(tr_dl)*5,
torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))
#optim=Adagrad(list(a.parameters())+list(model.parameters())+list(generator.parameters()),lr=0.01,lr_decay=0.001)
epoch=0
while epoch<args.max_epoch:
epoch_loss=0
model.train()
for id_b,batch in enumerate(tr_dl):
optim.optimizer.zero_grad()
scale=np.random.uniform(0.5,4)
#rot_mat = np.array([[np.cos(r), np.sin(r)], [-np.sin(r), np.cos(r)]])
n_in_batch=batch['src'].shape[0]
speeds_inp=batch['src'][:,1:,2:4]*scale
inp=torch.tensor(scipy.spatial.distance.cdist(speeds_inp.reshape(-1,2),clusters).argmin(axis=1).reshape(n_in_batch,-1)).to(device)
speeds_trg = batch['trg'][:,:,2:4]*scale
target = torch.tensor(
scipy.spatial.distance.cdist(speeds_trg.reshape(-1, 2), clusters).argmin(axis=1).reshape(n_in_batch, -1)).to(
device)
src_att = torch.ones((inp.shape[0], 1,inp.shape[1])).to(device)
trg_att=subsequent_mask(target.shape[1]).repeat(n_in_batch,1,1).to(device)
start_of_seq=torch.tensor([clusters.shape[0]]).repeat(n_in_batch).unsqueeze(1).to(device)
dec_inp=torch.cat((start_of_seq,target[:,:-1]),1)
out=model(inp, dec_inp, src_att, trg_att)
loss = F.cross_entropy(out.view(-1,out.shape[-1]),target.view(-1),reduction='mean')
loss.backward()
optim.step()
print("epoch %03i/%03i frame %04i / %04i loss: %7.4f" % (epoch, args.max_epoch, id_b, len(tr_dl), loss.item()))
epoch_loss += loss.item()
#sched.step()
log.add_scalar('Loss/train', epoch_loss / len(tr_dl), epoch)
with torch.no_grad():
model.eval()
gt=[]
pr=[]
val_loss=0
step=0
for batch in val_dl:
# rot_mat = np.array([[np.cos(r), np.sin(r)], [-np.sin(r), np.cos(r)]])
n_in_batch = batch['src'].shape[0]
speeds_inp = batch['src'][:, 1:, 2:4]
inp = torch.tensor(
scipy.spatial.distance.cdist(speeds_inp.contiguous().reshape(-1, 2), clusters).argmin(axis=1).reshape(n_in_batch,
-1)).to(
device)
speeds_trg = batch['trg'][:, :, 2:4]
target = torch.tensor(
scipy.spatial.distance.cdist(speeds_trg.contiguous().reshape(-1, 2), clusters).argmin(axis=1).reshape(n_in_batch,
-1)).to(
device)
src_att = torch.ones((inp.shape[0], 1,inp.shape[1])).to(device)
trg_att = subsequent_mask(target.shape[1]).repeat(n_in_batch, 1, 1).to(device)
start_of_seq = torch.tensor([clusters.shape[0]]).repeat(n_in_batch).unsqueeze(1).to(device)
dec_inp = torch.cat((start_of_seq, target[:, :-1]), 1)
out = model(inp, dec_inp, src_att, trg_att)
loss = F.cross_entropy(out.contiguous().view(-1, out.shape[-1]), target.contiguous().view(-1), reduction='mean')
print("val epoch %03i/%03i frame %04i / %04i loss: %7.4f" % (
epoch, args.max_epoch, step, len(val_dl), loss.item()))
val_loss+=loss.item()
step+=1
log.add_scalar('validation/loss', val_loss / len(val_dl), epoch)
if args.evaluate:
# DETERMINISTIC MODE
model.eval()
model.eval()
gt = []
pr = []
inp_ = []
peds = []
frames = []
dt = []
for batch in test_dl:
inp_.append(batch['src'][:,:,0:2])
gt.append(batch['trg'][:, :, 0:2])
frames.append(batch['frames'])
peds.append(batch['peds'])
dt.append(batch['dataset'])
n_in_batch = batch['src'].shape[0]
speeds_inp = batch['src'][:, 1:, 2:4]
gt_b = batch['trg'][:, :, 0:2]
inp = torch.tensor(
scipy.spatial.distance.cdist(speeds_inp.reshape(-1, 2), clusters).argmin(axis=1).reshape(n_in_batch,
-1)).to(
device)
src_att = torch.ones((inp.shape[0], 1,inp.shape[1])).to(device)
trg_att = subsequent_mask(target.shape[1]).repeat(n_in_batch, 1, 1).to(device)
start_of_seq = torch.tensor([clusters.shape[0]]).repeat(n_in_batch).unsqueeze(1).to(device)
dec_inp = start_of_seq
for i in range(args.preds):
trg_att = subsequent_mask(dec_inp.shape[1]).repeat(n_in_batch, 1, 1).to(device)
out = model(inp, dec_inp, src_att, trg_att)
dec_inp=torch.cat((dec_inp,out[:,-1:].argmax(dim=2)),1)
preds_tr_b=clusters[dec_inp[:,1:].cpu().numpy()].cumsum(1)+batch['src'][:,-1:,0:2].cpu().numpy()
pr.append(preds_tr_b)
peds = np.concatenate(peds, 0)
frames = np.concatenate(frames, 0)
dt = np.concatenate(dt, 0)
gt = np.concatenate(gt, 0)
dt_names = test_dataset.data['dataset_name']
pr = np.concatenate(pr, 0)
mad,fad,errs=baselineUtils.distance_metrics(gt,pr)
log.add_scalar('eval/DET_mad', mad, epoch)
log.add_scalar('eval/DET_fad', fad, epoch)
scipy.io.savemat(f"output/QuantizedTF/{args.name}/{epoch:05d}.mat",
{'input': inp, 'gt': gt, 'pr': pr, 'peds': peds, 'frames': frames, 'dt': dt,
'dt_names': dt_names})
# MULTI MODALITY
if False:
num_samples=20
model.eval()
gt=[]
pr_all={}
for sam in range(num_samples):
pr_all[sam]=[]
for batch in test_dl:
# rot_mat = np.array([[np.cos(r), np.sin(r)], [-np.sin(r), np.cos(r)]])
n_in_batch = batch['src'].shape[0]
speeds_inp = batch['src'][:, 1:, 2:4]
gt_b = batch['trg'][:, :, 0:2]
gt.append(gt_b)
inp = torch.tensor(
scipy.spatial.distance.cdist(speeds_inp.reshape(-1, 2), clusters).argmin(axis=1).reshape(n_in_batch,
-1)).to(
device)
src_att = torch.ones((inp.shape[0], 1,inp.shape[1])).to(device)
trg_att = subsequent_mask(target.shape[1]).repeat(n_in_batch, 1, 1).to(device)
start_of_seq = torch.tensor([clusters.shape[0]]).repeat(n_in_batch).unsqueeze(1).to(device)
for sam in range(num_samples):
dec_inp = start_of_seq
for i in range(args.preds):
trg_att = subsequent_mask(dec_inp.shape[1]).repeat(n_in_batch, 1, 1).to(device)
out = model.predict(inp, dec_inp, src_att, trg_att)
h=out[:,-1]
dec_inp=torch.cat((dec_inp,torch.multinomial(h,1)),1)
preds_tr_b=clusters[dec_inp[:,1:].cpu().numpy()].cumsum(1)+batch['src'][:,-1:,0:2].cpu().numpy()
pr_all[sam].append(preds_tr_b)
gt=np.concatenate(gt,0)
#pr=np.concatenate(pr,0)
samp = {}
for k in pr_all.keys():
samp[k] = {}
samp[k]['pr'] = np.concatenate(pr_all[k], 0)
samp[k]['mad'], samp[k]['fad'], samp[k]['err'] = baselineUtils.distance_metrics(gt, samp[k]['pr'])
ev = [samp[i]['err'] for i in range(num_samples)]
e20 = np.stack(ev, -1)
mad_samp=e20.mean(1).min(-1).mean()
fad_samp=e20[:,-1].min(-1).mean()
#mad,fad,errs=baselineUtils.distance_metrics(gt,pr)
log.add_scalar('eval/MM_mad', mad_samp, epoch)
log.add_scalar('eval/MM_fad', fad_samp, epoch)
if epoch % args.save_step == 0:
torch.save(model.state_dict(), f'models/QuantizedTF/{args.name}/{epoch:05d}.pth')
epoch+=1
ab=1
if __name__=='__main__':
main()
|
tests/test_screener.py
|
ericwbzhang/yahooquery
| 417 |
74361
|
import pytest
from yahooquery import Screener
def test_screener():
s = Screener()
assert s.get_screeners("most_actives") is not None
def test_available_screeners():
s = Screener()
assert s.available_screeners is not None
def test_bad_screener():
with pytest.raises(ValueError):
s = Screener()
assert s.get_screeners("most_active")
|
src/probnum/diffeq/odefilter/approx_strategies/_approx_strategy.py
|
fxbriol/probnum
| 226 |
74378
|
"""Approximate information operators."""
import abc
from probnum.diffeq.odefilter import information_operators
class ApproximationStrategy(abc.ABC):
"""Interface for approximation strategies.
Turn an information operator into an approximate information operator that converts
into a :class:`ODEFilter` compatible :class:`Transition`.
"""
def __call__(
self, information_operator: information_operators.InformationOperator
) -> information_operators.ApproximateInformationOperator:
"""Derive a tractable approximation of an information operator."""
raise NotImplementedError
|
tcod/tileset.py
|
hirnimeshrampuresoftware/python-tcod
| 231 |
74392
|
<reponame>hirnimeshrampuresoftware/python-tcod
"""Tileset and font related functions.
Tilesets can be loaded as a whole from tile-sheets or True-Type fonts, or they
can be put together from multiple tile images by loading them separately
using :any:`Tileset.set_tile`.
A major restriction with libtcod is that all tiles must be the same size and
tiles can't overlap when rendered. For sprite-based rendering it can be
useful to use `an alternative library for graphics rendering
<https://wiki.python.org/moin/PythonGameLibraries>`_ while continuing to use
python-tcod's pathfinding and field-of-view algorithms.
"""
from __future__ import annotations
import itertools
import os
from pathlib import Path
from typing import Any, Iterable, Optional, Tuple, Union
import numpy as np
from numpy.typing import ArrayLike, NDArray
import tcod.console
from tcod._internal import _check, _console, _raise_tcod_error, deprecate
from tcod.loader import ffi, lib
class Tileset:
"""A collection of graphical tiles.
This class is provisional, the API may change in the future.
"""
def __init__(self, tile_width: int, tile_height: int) -> None:
self._tileset_p = ffi.gc(
lib.TCOD_tileset_new(tile_width, tile_height),
lib.TCOD_tileset_delete,
)
@classmethod
def _claim(cls, cdata: Any) -> Tileset:
"""Return a new Tileset that owns the provided TCOD_Tileset* object."""
self: Tileset = object.__new__(cls)
if cdata == ffi.NULL:
raise RuntimeError("Tileset initialized with nullptr.")
self._tileset_p = ffi.gc(cdata, lib.TCOD_tileset_delete)
return self
@property
def tile_width(self) -> int:
"""The width of the tile in pixels."""
return int(lib.TCOD_tileset_get_tile_width_(self._tileset_p))
@property
def tile_height(self) -> int:
"""The height of the tile in pixels."""
return int(lib.TCOD_tileset_get_tile_height_(self._tileset_p))
@property
def tile_shape(self) -> Tuple[int, int]:
"""The shape (height, width) of the tile in pixels."""
return self.tile_height, self.tile_width
def __contains__(self, codepoint: int) -> bool:
"""Test if a tileset has a codepoint with ``n in tileset``."""
return bool(lib.TCOD_tileset_get_tile_(self._tileset_p, codepoint, ffi.NULL) == 0)
def get_tile(self, codepoint: int) -> NDArray[np.uint8]:
"""Return a copy of a tile for the given codepoint.
If the tile does not exist yet then a blank array will be returned.
The tile will have a shape of (height, width, rgba) and a dtype of
uint8. Note that most grey-scale tiles will only use the alpha
channel and will usually have a solid white color channel.
"""
tile = np.zeros(self.tile_shape + (4,), dtype=np.uint8)
lib.TCOD_tileset_get_tile_(
self._tileset_p,
codepoint,
ffi.from_buffer("struct TCOD_ColorRGBA*", tile),
)
return tile
def set_tile(self, codepoint: int, tile: Union[ArrayLike, NDArray[np.uint8]]) -> None:
"""Upload a tile into this array.
Args:
codepoint (int): The Unicode codepoint you are assigning to.
If the tile is a sprite rather than a common glyph then consider assigning it to a
`Private Use Area <https://en.wikipedia.org/wiki/Private_Use_Areas>`_.
tile (Union[ArrayLike, NDArray[np.uint8]]):
The pixels to use for this tile in row-major order and must be in the same shape as :any:`tile_shape`.
`tile` can be an RGBA array with the shape of ``(height, width, rgba)``, or a grey-scale array with the
shape ``(height, width)``.
The `tile` array will be converted to a dtype of ``np.uint8``.
An RGB array as an input is too ambiguous and an alpha channel must be added, for example if an image has a key
color than the key color pixels must have their alpha channel set to zero.
This data may be immediately sent to VRAM, which can be a slow operation.
Example::
# Examples use imageio for image loading, see https://imageio.readthedocs.io
tileset: tcod.tileset.Tileset # This example assumes you are modifying an existing tileset.
# Normal usage when a tile already has its own alpha channel.
# The loaded tile must be the correct shape for the tileset you assign it to.
# The tile is assigned to a private use area and will not conflict with any exiting codepoint.
tileset.set_tile(0x100000, imageio.load("rgba_tile.png"))
# Load a greyscale tile.
tileset.set_tile(0x100001, imageio.load("greyscale_tile.png"), pilmode="L")
# If you are stuck with an RGB array then you can use the red channel as the input: `rgb[:, :, 0]`
# Loads an RGB sprite without a background.
tileset.set_tile(0x100002, imageio.load("rgb_no_background.png", pilmode="RGBA"))
# If you're stuck with an RGB array then you can pad the channel axis with an alpha of 255:
# rgba = np.pad(rgb, pad_width=((0, 0), (0, 0), (0, 1)), constant_values=255)
# Loads an RGB sprite with a key color background.
KEY_COLOR = np.asarray((255, 0, 255), dtype=np.uint8)
sprite_rgb = imageio.load("rgb_tile.png")
# Compare the RGB colors to KEY_COLOR, compress full matches to a 2D mask.
sprite_mask = (sprite_rgb != KEY_COLOR).all(axis=2)
# Generate the alpha array, with 255 as the foreground and 0 as the background.
sprite_alpha = sprite_mask.astype(np.uint8) * 255
# Combine the RGB and alpha arrays into an RGBA array.
sprite_rgba = np.append(sprite_rgb, sprite_alpha, axis=2)
tileset.set_tile(0x100003, sprite_rgba)
"""
tile = np.ascontiguousarray(tile, dtype=np.uint8)
if tile.shape == self.tile_shape:
full_tile = np.empty(self.tile_shape + (4,), dtype=np.uint8)
full_tile[:, :, :3] = 255
full_tile[:, :, 3] = tile
return self.set_tile(codepoint, full_tile)
required = self.tile_shape + (4,)
if tile.shape != required:
note = ""
if len(tile.shape) == 3 and tile.shape[2] == 3:
note = (
"\nNote: An RGB array is too ambiguous,"
" an alpha channel must be added to this array to divide the background/foreground areas."
)
raise ValueError(f"Tile shape must be {required} or {self.tile_shape}, got {tile.shape}.{note}")
lib.TCOD_tileset_set_tile_(
self._tileset_p,
codepoint,
ffi.from_buffer("struct TCOD_ColorRGBA*", tile),
)
def render(self, console: tcod.console.Console) -> NDArray[np.uint8]:
"""Render an RGBA array, using console with this tileset.
`console` is the Console object to render, this can not be the root
console.
The output array will be a np.uint8 array with the shape of:
``(con_height * tile_height, con_width * tile_width, 4)``.
.. versionadded:: 11.9
"""
if not console:
raise ValueError("'console' must not be the root console.")
width = console.width * self.tile_width
height = console.height * self.tile_height
out = np.empty((height, width, 4), np.uint8)
out[:] = 9
surface_p = ffi.gc(
lib.SDL_CreateRGBSurfaceWithFormatFrom(
ffi.from_buffer("void*", out),
width,
height,
32,
out.strides[0],
lib.SDL_PIXELFORMAT_RGBA32,
),
lib.SDL_FreeSurface,
)
with surface_p:
with ffi.new("SDL_Surface**", surface_p) as surface_p_p:
_check(
lib.TCOD_tileset_render_to_surface(
self._tileset_p,
_console(console),
ffi.NULL,
surface_p_p,
)
)
return out
def remap(self, codepoint: int, x: int, y: int = 0) -> None:
"""Reassign a codepoint to a character in this tileset.
`codepoint` is the Unicode codepoint to assign.
`x` and `y` is the position of the tilesheet to assign to `codepoint`.
This is the tile position itself, not the pixel position of the tile.
Large values of `x` will wrap to the next row, so using `x` by itself
is equivalent to `Tile Index` in the :any:`charmap-reference`.
This is normally used on loaded tilesheets. Other methods of Tileset
creation won't have reliable tile indexes.
.. versionadded:: 11.12
"""
tile_i = x + y * self._tileset_p.virtual_columns
if not (0 <= tile_i < self._tileset_p.tiles_count):
raise IndexError(
"Tile %i is non-existent and can't be assigned."
" (Tileset has %i tiles.)" % (tile_i, self._tileset_p.tiles_count)
)
_check(
lib.TCOD_tileset_assign_tile(
self._tileset_p,
tile_i,
codepoint,
)
)
@deprecate("Using the default tileset is deprecated.")
def get_default() -> Tileset:
"""Return a reference to the default Tileset.
.. versionadded:: 11.10
.. deprecated:: 11.13
The default tileset is deprecated.
With contexts this is no longer needed.
"""
return Tileset._claim(lib.TCOD_get_default_tileset())
@deprecate("Using the default tileset is deprecated.")
def set_default(tileset: Tileset) -> None:
"""Set the default tileset.
The display will use this new tileset immediately.
.. versionadded:: 11.10
.. deprecated:: 11.13
The default tileset is deprecated.
With contexts this is no longer needed.
"""
lib.TCOD_set_default_tileset(tileset._tileset_p)
def load_truetype_font(path: Union[str, Path], tile_width: int, tile_height: int) -> Tileset:
"""Return a new Tileset from a `.ttf` or `.otf` file.
Same as :any:`set_truetype_font`, but returns a :any:`Tileset` instead.
You can send this Tileset to :any:`set_default`.
This function is provisional. The API may change.
"""
if not os.path.exists(path):
raise RuntimeError("File not found:\n\t%s" % (os.path.realpath(path),))
cdata = lib.TCOD_load_truetype_font_(str(path).encode(), tile_width, tile_height)
if not cdata:
raise RuntimeError(ffi.string(lib.TCOD_get_error()))
return Tileset._claim(cdata)
@deprecate("Accessing the default tileset is deprecated.")
def set_truetype_font(path: Union[str, Path], tile_width: int, tile_height: int) -> None:
"""Set the default tileset from a `.ttf` or `.otf` file.
`path` is the file path for the font file.
`tile_width` and `tile_height` are the desired size of the tiles in the new
tileset. The font will be scaled to fit the given `tile_height` and
`tile_width`.
This function must be called before :any:`tcod.console_init_root`. Once
the root console is setup you may call this function again to change the
font. The tileset can be changed but the window will not be resized
automatically.
.. versionadded:: 9.2
.. deprecated:: 11.13
This function does not support contexts.
Use :any:`load_truetype_font` instead.
"""
if not os.path.exists(path):
raise RuntimeError("File not found:\n\t%s" % (os.path.realpath(path),))
if lib.TCOD_tileset_load_truetype_(str(path).encode(), tile_width, tile_height):
raise RuntimeError(ffi.string(lib.TCOD_get_error()))
def load_bdf(path: Union[str, Path]) -> Tileset:
"""Return a new Tileset from a `.bdf` file.
For the best results the font should be monospace, cell-based, and
single-width. As an example, a good set of fonts would be the
`Unicode fonts and tools for X11 <https://www.cl.cam.ac.uk/~mgk25/ucs-fonts.html>`_
package.
Pass the returned Tileset to :any:`tcod.tileset.set_default` and it will
take effect when `tcod.console_init_root` is called.
.. versionadded:: 11.10
""" # noqa: E501
if not os.path.exists(path):
raise RuntimeError("File not found:\n\t%s" % (os.path.realpath(path),))
cdata = lib.TCOD_load_bdf(str(path).encode())
if not cdata:
raise RuntimeError(ffi.string(lib.TCOD_get_error()).decode())
return Tileset._claim(cdata)
def load_tilesheet(path: Union[str, Path], columns: int, rows: int, charmap: Optional[Iterable[int]]) -> Tileset:
"""Return a new Tileset from a simple tilesheet image.
`path` is the file path to a PNG file with the tileset.
`columns` and `rows` is the shape of the tileset. Tiles are assumed to
take up the entire space of the image.
`charmap` is a sequence of codepoints to map the tilesheet to in row-major order.
This is a list or generator of codepoints which map the tiles like this: ``charmap[tile_index] = codepoint``.
For common tilesets `charmap` should be :any:`tcod.tileset.CHARMAP_CP437`.
Generators will be sliced so :any:`itertools.count` can be used which will
give all tiles the same codepoint as their index, but this will not map
tiles onto proper Unicode.
If `None` is used then no tiles will be mapped, you will need to use
:any:`Tileset.remap` to assign codepoints to this Tileset.
.. versionadded:: 11.12
"""
if not os.path.exists(path):
raise RuntimeError("File not found:\n\t%s" % (os.path.realpath(path),))
mapping = []
if charmap is not None:
mapping = list(itertools.islice(charmap, columns * rows))
cdata = lib.TCOD_tileset_load(str(path).encode(), columns, rows, len(mapping), mapping)
if not cdata:
_raise_tcod_error()
return Tileset._claim(cdata)
def procedural_block_elements(*, tileset: Tileset) -> None:
"""Overwrites the block element codepoints in `tileset` with prodecually generated glyphs.
Args:
tileset (Tileset): A :any:`Tileset` with tiles of any shape.
This will overwrite all of the codepoints `listed here <https://en.wikipedia.org/wiki/Block_Elements>`_
except for the shade glyphs.
This function is useful for other functions such as :any:`Console.draw_semigraphics` which use more types of block
elements than are found in Code Page 437.
.. versionadded:: 13.1
Example::
>>> tileset = tcod.tileset.Tileset(8, 8)
>>> tcod.tileset.procedural_block_elements(tileset=tileset)
>>> tileset.get_tile(0x259E)[:, :, 3] # "▞" Quadrant upper right and lower left.
array([[ 0, 0, 0, 0, 255, 255, 255, 255],
[ 0, 0, 0, 0, 255, 255, 255, 255],
[ 0, 0, 0, 0, 255, 255, 255, 255],
[ 0, 0, 0, 0, 255, 255, 255, 255],
[255, 255, 255, 255, 0, 0, 0, 0],
[255, 255, 255, 255, 0, 0, 0, 0],
[255, 255, 255, 255, 0, 0, 0, 0],
[255, 255, 255, 255, 0, 0, 0, 0]], dtype=uint8)
>>> tileset.get_tile(0x2581)[:, :, 3] # "▁" Lower one eighth block.
array([[ 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0],
[255, 255, 255, 255, 255, 255, 255, 255]], dtype=uint8)
>>> tileset.get_tile(0x258D)[:, :, 3] # "▍" Left three eighths block.
array([[255, 255, 255, 0, 0, 0, 0, 0],
[255, 255, 255, 0, 0, 0, 0, 0],
[255, 255, 255, 0, 0, 0, 0, 0],
[255, 255, 255, 0, 0, 0, 0, 0],
[255, 255, 255, 0, 0, 0, 0, 0],
[255, 255, 255, 0, 0, 0, 0, 0],
[255, 255, 255, 0, 0, 0, 0, 0],
[255, 255, 255, 0, 0, 0, 0, 0]], dtype=uint8)
"""
quadrants: NDArray[np.uint8] = np.zeros(tileset.tile_shape, dtype=np.uint8)
half_height = tileset.tile_height // 2
half_width = tileset.tile_width // 2
quadrants[:half_height, :half_width] = 0b1000 # Top-left.
quadrants[:half_height, half_width:] = 0b0100 # Top-right.
quadrants[half_height:, :half_width] = 0b0010 # Bottom-left.
quadrants[half_height:, half_width:] = 0b0001 # Bottom-right.
for codepoint, quad_mask in (
(0x2580, 0b1100), # "▀" Upper half block.
(0x2584, 0b0011), # "▄" Lower half block.
(0x2588, 0b1111), # "█" Full block.
(0x258C, 0b1010), # "▌" Left half block.
(0x2590, 0b0101), # "▐" Right half block.
(0x2596, 0b0010), # "▖" Quadrant lower left.
(0x2597, 0b0001), # "▗" Quadrant lower right.
(0x2598, 0b1000), # "▘" Quadrant upper left.
(0x2599, 0b1011), # "▙" Quadrant upper left and lower left and lower right.
(0x259A, 0b1001), # "▚" Quadrant upper left and lower right.
(0x259B, 0b1110), # "▛" Quadrant upper left and upper right and lower left.
(0x259C, 0b1101), # "▜" Quadrant upper left and upper right and lower right.
(0x259D, 0b0100), # "▝" Quadrant upper right.
(0x259E, 0b0110), # "▞" Quadrant upper right and lower left.
(0x259F, 0b0111), # "▟" Quadrant upper right and lower left and lower right.
):
alpha: NDArray[np.uint8] = np.asarray((quadrants & quad_mask) != 0, dtype=np.uint8) * 255
tileset.set_tile(codepoint, alpha)
for codepoint, axis, fraction, negative in (
(0x2581, 0, 7, True), # "▁" Lower one eighth block.
(0x2582, 0, 6, True), # "▂" Lower one quarter block.
(0x2583, 0, 5, True), # "▃" Lower three eighths block.
(0x2585, 0, 3, True), # "▅" Lower five eighths block.
(0x2586, 0, 2, True), # "▆" Lower three quarters block.
(0x2587, 0, 1, True), # "▇" Lower seven eighths block.
(0x2589, 1, 7, False), # "▉" Left seven eighths block.
(0x258A, 1, 6, False), # "▊" Left three quarters block.
(0x258B, 1, 5, False), # "▋" Left five eighths block.
(0x258D, 1, 3, False), # "▍" Left three eighths block.
(0x258E, 1, 2, False), # "▎" Left one quarter block.
(0x258F, 1, 1, False), # "▏" Left one eighth block.
(0x2594, 0, 1, False), # "▔" Upper one eighth block.
(0x2595, 1, 7, True), # "▕" Right one eighth block .
):
indexes = [slice(None), slice(None)]
divide = tileset.tile_shape[axis] * fraction // 8
# If negative then shade from the far corner, otherwise shade from the near corner.
indexes[axis] = slice(divide, None) if negative else slice(None, divide)
alpha = np.zeros(tileset.tile_shape, dtype=np.uint8)
alpha[tuple(indexes)] = 255
tileset.set_tile(codepoint, alpha)
CHARMAP_CP437 = [
0x0000,
0x263A,
0x263B,
0x2665,
0x2666,
0x2663,
0x2660,
0x2022,
0x25D8,
0x25CB,
0x25D9,
0x2642,
0x2640,
0x266A,
0x266B,
0x263C,
0x25BA,
0x25C4,
0x2195,
0x203C,
0x00B6,
0x00A7,
0x25AC,
0x21A8,
0x2191,
0x2193,
0x2192,
0x2190,
0x221F,
0x2194,
0x25B2,
0x25BC,
0x0020,
0x0021,
0x0022,
0x0023,
0x0024,
0x0025,
0x0026,
0x0027,
0x0028,
0x0029,
0x002A,
0x002B,
0x002C,
0x002D,
0x002E,
0x002F,
0x0030,
0x0031,
0x0032,
0x0033,
0x0034,
0x0035,
0x0036,
0x0037,
0x0038,
0x0039,
0x003A,
0x003B,
0x003C,
0x003D,
0x003E,
0x003F,
0x0040,
0x0041,
0x0042,
0x0043,
0x0044,
0x0045,
0x0046,
0x0047,
0x0048,
0x0049,
0x004A,
0x004B,
0x004C,
0x004D,
0x004E,
0x004F,
0x0050,
0x0051,
0x0052,
0x0053,
0x0054,
0x0055,
0x0056,
0x0057,
0x0058,
0x0059,
0x005A,
0x005B,
0x005C,
0x005D,
0x005E,
0x005F,
0x0060,
0x0061,
0x0062,
0x0063,
0x0064,
0x0065,
0x0066,
0x0067,
0x0068,
0x0069,
0x006A,
0x006B,
0x006C,
0x006D,
0x006E,
0x006F,
0x0070,
0x0071,
0x0072,
0x0073,
0x0074,
0x0075,
0x0076,
0x0077,
0x0078,
0x0079,
0x007A,
0x007B,
0x007C,
0x007D,
0x007E,
0x007F,
0x00C7,
0x00FC,
0x00E9,
0x00E2,
0x00E4,
0x00E0,
0x00E5,
0x00E7,
0x00EA,
0x00EB,
0x00E8,
0x00EF,
0x00EE,
0x00EC,
0x00C4,
0x00C5,
0x00C9,
0x00E6,
0x00C6,
0x00F4,
0x00F6,
0x00F2,
0x00FB,
0x00F9,
0x00FF,
0x00D6,
0x00DC,
0x00A2,
0x00A3,
0x00A5,
0x20A7,
0x0192,
0x00E1,
0x00ED,
0x00F3,
0x00FA,
0x00F1,
0x00D1,
0x00AA,
0x00BA,
0x00BF,
0x2310,
0x00AC,
0x00BD,
0x00BC,
0x00A1,
0x00AB,
0x00BB,
0x2591,
0x2592,
0x2593,
0x2502,
0x2524,
0x2561,
0x2562,
0x2556,
0x2555,
0x2563,
0x2551,
0x2557,
0x255D,
0x255C,
0x255B,
0x2510,
0x2514,
0x2534,
0x252C,
0x251C,
0x2500,
0x253C,
0x255E,
0x255F,
0x255A,
0x2554,
0x2569,
0x2566,
0x2560,
0x2550,
0x256C,
0x2567,
0x2568,
0x2564,
0x2565,
0x2559,
0x2558,
0x2552,
0x2553,
0x256B,
0x256A,
0x2518,
0x250C,
0x2588,
0x2584,
0x258C,
0x2590,
0x2580,
0x03B1,
0x00DF,
0x0393,
0x03C0,
0x03A3,
0x03C3,
0x00B5,
0x03C4,
0x03A6,
0x0398,
0x03A9,
0x03B4,
0x221E,
0x03C6,
0x03B5,
0x2229,
0x2261,
0x00B1,
0x2265,
0x2264,
0x2320,
0x2321,
0x00F7,
0x2248,
0x00B0,
0x2219,
0x00B7,
0x221A,
0x207F,
0x00B2,
0x25A0,
0x00A0,
]
"""A code page 437 character mapping.
See :ref:`code-page-437` for more info and a table of glyphs.
.. versionadded:: 11.12
"""
CHARMAP_TCOD = [
0x20,
0x21,
0x22,
0x23,
0x24,
0x25,
0x26,
0x27,
0x28,
0x29,
0x2A,
0x2B,
0x2C,
0x2D,
0x2E,
0x2F,
0x30,
0x31,
0x32,
0x33,
0x34,
0x35,
0x36,
0x37,
0x38,
0x39,
0x3A,
0x3B,
0x3C,
0x3D,
0x3E,
0x3F,
0x40,
0x5B,
0x5C,
0x5D,
0x5E,
0x5F,
0x60,
0x7B,
0x7C,
0x7D,
0x7E,
0x2591,
0x2592,
0x2593,
0x2502,
0x2500,
0x253C,
0x2524,
0x2534,
0x251C,
0x252C,
0x2514,
0x250C,
0x2510,
0x2518,
0x2598,
0x259D,
0x2580,
0x2596,
0x259A,
0x2590,
0x2597,
0x2191,
0x2193,
0x2190,
0x2192,
0x25B2,
0x25BC,
0x25C4,
0x25BA,
0x2195,
0x2194,
0x2610,
0x2611,
0x25CB,
0x25C9,
0x2551,
0x2550,
0x256C,
0x2563,
0x2569,
0x2560,
0x2566,
0x255A,
0x2554,
0x2557,
0x255D,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x41,
0x42,
0x43,
0x44,
0x45,
0x46,
0x47,
0x48,
0x49,
0x4A,
0x4B,
0x4C,
0x4D,
0x4E,
0x4F,
0x50,
0x51,
0x52,
0x53,
0x54,
0x55,
0x56,
0x57,
0x58,
0x59,
0x5A,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x61,
0x62,
0x63,
0x64,
0x65,
0x66,
0x67,
0x68,
0x69,
0x6A,
0x6B,
0x6C,
0x6D,
0x6E,
0x6F,
0x70,
0x71,
0x72,
0x73,
0x74,
0x75,
0x76,
0x77,
0x78,
0x79,
0x7A,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
]
"""The layout used by older libtcod fonts, in Unicode.
This layout is non-standard, and it's not recommend to make a font for it, but
you might need it to load an existing font made for libtcod.
This character map is in Unicode, so old code using the non-Unicode
`tcod.CHAR_*` constants will need to be updated.
See :ref:`deprecated-tcod-layout` for a table of glyphs used in this character
map.
.. versionadded:: 11.12
"""
|
tests/experimental_tests/links_tests/model_tests/pspnet_tests/test_convolution_crop.py
|
souravsingh/chainercv
| 1,600 |
74399
|
import numpy as np
import unittest
from chainer import testing
from chainercv.experimental.links.model.pspnet import convolution_crop
class TestConvolutionCrop(unittest.TestCase):
def test_convolution_crop(self):
size = (8, 6)
stride = (8, 6)
n_channel = 3
img = np.random.uniform(size=(n_channel, 16, 12)).astype(np.float32)
crop_imgs, param = convolution_crop(
img, size, stride, return_param=True)
self.assertEqual(crop_imgs.shape, (4, n_channel) + size)
self.assertEqual(crop_imgs.dtype, np.float32)
for y in range(2):
for x in range(2):
self.assertEqual(param['y_slices'][2 * y + x].start, 8 * y)
self.assertEqual(
param['y_slices'][2 * y + x].stop, 8 * (y + 1))
self.assertEqual(param['x_slices'][2 * y + x].start, 6 * x)
self.assertEqual(
param['x_slices'][2 * y + x].stop, 6 * (x + 1))
for i in range(4):
self.assertEqual(param['crop_y_slices'][i].start, 0)
self.assertEqual(param['crop_y_slices'][i].stop, 8)
self.assertEqual(param['crop_x_slices'][i].start, 0)
self.assertEqual(param['crop_x_slices'][i].stop, 6)
testing.run_module(__name__, __file__)
|
xonsh/diff_history.py
|
danielballan/xonsh
| 7,986 |
74417
|
<reponame>danielballan/xonsh<filename>xonsh/diff_history.py
# -*- coding: utf-8 -*-
"""Tools for diff'ing two xonsh history files in a meaningful fashion."""
import difflib
import datetime
import itertools
import argparse
from xonsh.lazyjson import LazyJSON
from xonsh.tools import print_color
NO_COLOR_S = "{NO_COLOR}"
RED_S = "{RED}"
GREEN_S = "{GREEN}"
BOLD_RED_S = "{BOLD_RED}"
BOLD_GREEN_S = "{BOLD_GREEN}"
# intern some strings
REPLACE_S = "replace"
DELETE_S = "delete"
INSERT_S = "insert"
EQUAL_S = "equal"
def bold_str_diff(a, b, sm=None):
if sm is None:
sm = difflib.SequenceMatcher()
aline = RED_S + "- "
bline = GREEN_S + "+ "
sm.set_seqs(a, b)
for tag, i1, i2, j1, j2 in sm.get_opcodes():
if tag == REPLACE_S:
aline += BOLD_RED_S + a[i1:i2] + RED_S
bline += BOLD_GREEN_S + b[j1:j2] + GREEN_S
elif tag == DELETE_S:
aline += BOLD_RED_S + a[i1:i2] + RED_S
elif tag == INSERT_S:
bline += BOLD_GREEN_S + b[j1:j2] + GREEN_S
elif tag == EQUAL_S:
aline += a[i1:i2]
bline += b[j1:j2]
else:
raise RuntimeError("tag not understood")
return aline + NO_COLOR_S + "\n" + bline + NO_COLOR_S + "\n"
def redline(line):
return "{red}- {line}{no_color}\n".format(red=RED_S, line=line, no_color=NO_COLOR_S)
def greenline(line):
return "{green}+ {line}{no_color}\n".format(
green=GREEN_S, line=line, no_color=NO_COLOR_S
)
def highlighted_ndiff(a, b):
"""Returns a highlighted string, with bold characters where different."""
s = ""
sm = difflib.SequenceMatcher()
sm.set_seqs(a, b)
linesm = difflib.SequenceMatcher()
for tag, i1, i2, j1, j2 in sm.get_opcodes():
if tag == REPLACE_S:
for aline, bline in itertools.zip_longest(a[i1:i2], b[j1:j2]):
if bline is None:
s += redline(aline)
elif aline is None:
s += greenline(bline)
else:
s += bold_str_diff(aline, bline, sm=linesm)
elif tag == DELETE_S:
for aline in a[i1:i2]:
s += redline(aline)
elif tag == INSERT_S:
for bline in b[j1:j2]:
s += greenline(bline)
elif tag == EQUAL_S:
for aline in a[i1:i2]:
s += " " + aline + "\n"
else:
raise RuntimeError("tag not understood")
return s
class HistoryDiffer(object):
"""This class helps diff two xonsh history files."""
def __init__(self, afile, bfile, reopen=False, verbose=False):
"""
Parameters
----------
afile : file handle or str
The first file to diff
bfile : file handle or str
The second file to diff
reopen : bool, optional
Whether or not to reopen the file handles each time. The default here is
opposite from the LazyJSON default because we know that we will be doing
a lot of reading so it is best to keep the handles open.
verbose : bool, optional
Whether to print a verbose amount of information.
"""
self.a = LazyJSON(afile, reopen=reopen)
self.b = LazyJSON(bfile, reopen=reopen)
self.verbose = verbose
self.sm = difflib.SequenceMatcher(autojunk=False)
def __del__(self):
self.a.close()
self.b.close()
def __str__(self):
return self.format()
def _header_line(self, lj):
s = lj._f.name if hasattr(lj._f, "name") else ""
s += " (" + lj["sessionid"] + ")"
s += " [locked]" if lj["locked"] else " [unlocked]"
ts = lj["ts"].load()
ts0 = datetime.datetime.fromtimestamp(ts[0])
s += " started: " + ts0.isoformat(" ")
if ts[1] is not None:
ts1 = datetime.datetime.fromtimestamp(ts[1])
s += " stopped: " + ts1.isoformat(" ") + " runtime: " + str(ts1 - ts0)
return s
def header(self):
"""Computes a header string difference."""
s = "{red}--- {aline}{no_color}\n" "{green}+++ {bline}{no_color}"
s = s.format(
aline=self._header_line(self.a),
bline=self._header_line(self.b),
red=RED_S,
green=GREEN_S,
no_color=NO_COLOR_S,
)
return s
def _env_both_diff(self, in_both, aenv, benv):
sm = self.sm
s = ""
for key in sorted(in_both):
aval = aenv[key]
bval = benv[key]
if aval == bval:
continue
s += "{0!r} is in both, but differs\n".format(key)
s += bold_str_diff(aval, bval, sm=sm) + "\n"
return s
def _env_in_one_diff(self, x, y, color, xid, xenv):
only_x = sorted(x - y)
if len(only_x) == 0:
return ""
if self.verbose:
xstr = ",\n".join(
[" {0!r}: {1!r}".format(key, xenv[key]) for key in only_x]
)
xstr = "\n" + xstr
else:
xstr = ", ".join(["{0!r}".format(key) for key in only_x])
in_x = "These vars are only in {color}{xid}{no_color}: {{{xstr}}}\n\n"
return in_x.format(xid=xid, color=color, no_color=NO_COLOR_S, xstr=xstr)
def envdiff(self):
"""Computes the difference between the environments."""
aenv = self.a["env"].load()
benv = self.b["env"].load()
akeys = frozenset(aenv)
bkeys = frozenset(benv)
in_both = akeys & bkeys
if len(in_both) == len(akeys) == len(bkeys):
keydiff = self._env_both_diff(in_both, aenv, benv)
if len(keydiff) == 0:
return ""
in_a = in_b = ""
else:
keydiff = self._env_both_diff(in_both, aenv, benv)
in_a = self._env_in_one_diff(akeys, bkeys, RED_S, self.a["sessionid"], aenv)
in_b = self._env_in_one_diff(
bkeys, akeys, GREEN_S, self.b["sessionid"], benv
)
s = "Environment\n-----------\n" + in_a + keydiff + in_b
return s
def _cmd_in_one_diff(self, inp, i, xlj, xid, color):
s = "cmd #{i} only in {color}{xid}{no_color}:\n"
s = s.format(i=i, color=color, xid=xid, no_color=NO_COLOR_S)
lines = inp.splitlines()
lt = "{color}{pre}{no_color} {line}\n"
s += lt.format(color=color, no_color=NO_COLOR_S, line=lines[0], pre=">>>")
for line in lines[1:]:
s += lt.format(color=color, no_color=NO_COLOR_S, line=line, pre="...")
if not self.verbose:
return s + "\n"
out = xlj["cmds"][0].get("out", "Note: no output stored")
s += out.rstrip() + "\n\n"
return s
def _cmd_out_and_rtn_diff(self, i, j):
s = ""
aout = self.a["cmds"][i].get("out", None)
bout = self.b["cmds"][j].get("out", None)
if aout is None and bout is None:
# s += 'Note: neither output stored\n'
pass
elif bout is None:
aid = self.a["sessionid"]
s += "Note: only {red}{aid}{no_color} output stored\n".format(
red=RED_S, aid=aid, no_color=NO_COLOR_S
)
elif aout is None:
bid = self.b["sessionid"]
s += "Note: only {green}{bid}{no_color} output stored\n".format(
green=GREEN_S, bid=bid, no_color=NO_COLOR_S
)
elif aout != bout:
s += "Outputs differ\n"
s += highlighted_ndiff(aout.splitlines(), bout.splitlines())
else:
pass
artn = self.a["cmds"][i]["rtn"]
brtn = self.b["cmds"][j]["rtn"]
if artn != brtn:
s += (
"Return vals {red}{artn}{no_color} & {green}{brtn}{no_color} differ\n"
).format(
red=RED_S, green=GREEN_S, no_color=NO_COLOR_S, artn=artn, brtn=brtn
)
return s
def _cmd_replace_diff(self, i, ainp, aid, j, binp, bid):
s = (
"cmd #{i} in {red}{aid}{no_color} is replaced by \n"
"cmd #{j} in {green}{bid}{no_color}:\n"
)
s = s.format(
i=i, aid=aid, j=j, bid=bid, red=RED_S, green=GREEN_S, no_color=NO_COLOR_S
)
s += highlighted_ndiff(ainp.splitlines(), binp.splitlines())
if not self.verbose:
return s + "\n"
s += self._cmd_out_and_rtn_diff(i, j)
return s + "\n"
def cmdsdiff(self):
"""Computes the difference of the commands themselves."""
aid = self.a["sessionid"]
bid = self.b["sessionid"]
ainps = [c["inp"] for c in self.a["cmds"]]
binps = [c["inp"] for c in self.b["cmds"]]
sm = self.sm
sm.set_seqs(ainps, binps)
s = ""
for tag, i1, i2, j1, j2 in sm.get_opcodes():
if tag == REPLACE_S:
zipper = itertools.zip_longest
for i, ainp, j, binp in zipper(
range(i1, i2), ainps[i1:i2], range(j1, j2), binps[j1:j2]
):
if j is None:
s += self._cmd_in_one_diff(ainp, i, self.a, aid, RED_S)
elif i is None:
s += self._cmd_in_one_diff(binp, j, self.b, bid, GREEN_S)
else:
self._cmd_replace_diff(i, ainp, aid, j, binp, bid)
elif tag == DELETE_S:
for i, inp in enumerate(ainps[i1:i2], i1):
s += self._cmd_in_one_diff(inp, i, self.a, aid, RED_S)
elif tag == INSERT_S:
for j, inp in enumerate(binps[j1:j2], j1):
s += self._cmd_in_one_diff(inp, j, self.b, bid, GREEN_S)
elif tag == EQUAL_S:
for i, j in zip(range(i1, i2), range(j1, j2)):
odiff = self._cmd_out_and_rtn_diff(i, j)
if len(odiff) > 0:
h = (
"cmd #{i} in {red}{aid}{no_color} input is the same as \n"
"cmd #{j} in {green}{bid}{no_color}, but output differs:\n"
)
s += h.format(
i=i,
aid=aid,
j=j,
bid=bid,
red=RED_S,
green=GREEN_S,
no_color=NO_COLOR_S,
)
s += odiff + "\n"
else:
raise RuntimeError("tag not understood")
if len(s) == 0:
return s
return "Commands\n--------\n" + s
def format(self):
"""Formats the difference between the two history files."""
s = self.header()
ed = self.envdiff()
if len(ed) > 0:
s += "\n\n" + ed
cd = self.cmdsdiff()
if len(cd) > 0:
s += "\n\n" + cd
return s.rstrip()
_HD_PARSER = None
def dh_create_parser(p=None):
global _HD_PARSER
p_was_none = p is None
if _HD_PARSER is not None and p_was_none:
return _HD_PARSER
if p_was_none:
p = argparse.ArgumentParser(
"diff-history", description="diffs two xonsh history files"
)
p.add_argument(
"--reopen",
dest="reopen",
default=False,
action="store_true",
help="make lazy file loading reopen files each time",
)
p.add_argument(
"-v",
"--verbose",
dest="verbose",
default=False,
action="store_true",
help="whether to print even more information",
)
p.add_argument("a", help="first file in diff")
p.add_argument("b", help="second file in diff")
if p_was_none:
_HD_PARSER = p
return p
def dh_main_action(ns, hist=None, stdout=None, stderr=None):
hd = HistoryDiffer(ns.a, ns.b, reopen=ns.reopen, verbose=ns.verbose)
print_color(hd.format(), file=stdout)
|
baselines/EMNLP2019/uri_features.py
|
ParikhKadam/knowledge-net
| 240 |
74422
|
import itertools
import numpy as np
import networkx as nx
import vocab
def coref_score(instance, property_id):
return [ instance.subject_entity["coref_score"], instance.object_entity["coref_score"] ]
def el_score(instance, property_id):
return [ instance.subject_entity["el_score"], instance.object_entity["el_score"] ]
def _entity_linker_types_from_mention(entity):
arr = np.zeros(len(vocab.types), np.float32)
for i, t in enumerate(vocab.types):
if t in entity["types"]:
arr[i] = 1.0
return arr
def entity_linker_types(instance, property_id):
return np.concatenate([
_entity_linker_types_from_mention(instance.subject_entity),
_entity_linker_types_from_mention(instance.object_entity)
])
def wikidata_predicates(instance, property_id):
return None
def text_score(instance, property_id):
return [ instance.text_instance.scores[property_id] ]
|
tests/conftest.py
|
wsgfz/zmail
| 317 |
74428
|
import json
import os
from typing import List, Tuple
import pytest
@pytest.fixture(scope='module')
def here():
return os.path.abspath(os.path.dirname(__file__))
@pytest.fixture(scope='module')
def accounts(here) -> List[Tuple] or None:
"""Return account list"""
accounts_path = os.path.join(here, 'config')
if not os.path.exists(accounts_path):
return None
with open(accounts_path, 'r') as f:
raw = f.read()
return json.loads(raw)
|
beagle/datasources/win_evtx.py
|
limkokhian/beagle
| 1,139 |
74429
|
import datetime
from typing import TYPE_CHECKING, Generator
import Evtx.Evtx as evtx
from lxml import etree
from beagle.common.logging import logger
from beagle.datasources.base_datasource import DataSource
from beagle.transformers.evtx_transformer import WinEVTXTransformer
if TYPE_CHECKING:
from beagle.transformer.base_transformer import Transformer
from typing import List
class WinEVTX(DataSource):
"""Parses Windows .evtx files. Yields events one by one using the `python-evtx` library.
Parameters
----------
evtx_log_file : str
The path to the windows evtx file to parse.
"""
name = "Windows EVTX File"
transformers = [WinEVTXTransformer] # type: List[Transformer]
category = "Windows Event Logs"
def __init__(self, evtx_log_file: str) -> None:
self.file_path = evtx_log_file
logger.info(f"Setting up WinEVTX for {self.file_path}")
def events(self) -> Generator[dict, None, None]:
with evtx.Evtx(self.file_path) as log:
for record in log.records():
# Get the lxml object
yield self.parse_record(record.lxml())
def metadata(self) -> dict:
"""Get the hostname by inspecting the first record.
Returns
-------
dict
>>> {"hostname": str}
"""
with evtx.Evtx(self.file_path) as log:
for record in log.records():
# Get the lxml object
event = self.parse_record(record.lxml())
break
return {"hostname": event["computer"]}
def parse_record(self, record: etree.ElementTree, name="") -> dict:
"""Recursivly converts a etree.ElementTree record to a JSON dictionary
with one level.
Parameters
----------
record : etree.ElementTree
Current record to parse
name : str, optional
Name of the current key we are at.
Returns
-------
dict
JSON represntation of the event
"""
data = {}
for node in record:
next_name = node.tag.split("}")[-1]
# Recurse
data.update(self.parse_record(node, next_name))
if record.attrib and record.text:
key = f"{name}_{record.keys()[0]}".lower()
# Use attributes if we're in EventData
if "EventData" in record.getparent().tag:
key += f"_{record.values()[0]}".lower()
data[key] = record.text
elif record.attrib:
for k, val in record.attrib.items():
key = f"{name}_{k}".lower()
data[key] = val
else:
curr_name = record.tag.split("}")[-1]
key = f"{curr_name}".lower()
data[key] = record.text
if key == "timecreated_systemtime":
time = datetime.datetime.strptime(
data["timecreated_systemtime"], "%Y-%m-%d %H:%M:%S.%f"
)
epoch = int(time.strftime("%s"))
data["timecreated_systemtime"] = epoch
return data
|
python/dgl/_dataloading/__init__.py
|
ketyi/dgl
| 9,516 |
74438
|
"""The ``dgl.dataloading`` package contains:
* Data loader classes for iterating over a set of nodes or edges in a graph and generates
computation dependency via neighborhood sampling methods.
* Various sampler classes that perform neighborhood sampling for multi-layer GNNs.
* Negative samplers for link prediction.
For a holistic explanation on how different components work together.
Read the user guide :ref:`guide-minibatch`.
.. note::
This package is experimental and the interfaces may be subject
to changes in future releases. It currently only has implementations in PyTorch.
"""
from .neighbor import *
from .dataloader import *
from .cluster_gcn import *
from .shadow import *
from . import negative_sampler
from .async_transferer import AsyncTransferer
from .. import backend as F
if F.get_preferred_backend() == 'pytorch':
from .pytorch import *
|
plenum/test/script/test_bootstrap_test_node.py
|
jandayanan/indy-plenum
| 148 |
74449
|
<filename>plenum/test/script/test_bootstrap_test_node.py
import os
from argparse import ArgumentTypeError
import pytest
from common.serializers.json_serializer import JsonSerializer
from ledger.genesis_txn.genesis_txn_file_util import genesis_txn_file
from plenum.bls.bls_key_manager_file import BlsKeyManagerFile
from plenum.common.constants import NYM, VERKEY, ROLE, TARGET_NYM, ALIAS, NODE, \
DATA, CLIENT_IP, CLIENT_PORT, NODE_IP, \
NODE_PORT, SERVICES, BLS_KEY, VALIDATOR, TRUSTEE, STEWARD, BLS_KEY_PROOF, CURRENT_TXN_PAYLOAD_VERSIONS
from plenum.common.test_network_setup import TestNetworkSetup
from plenum.common.txn_util import getTxnOrderedFields, get_seq_no, get_txn_id, get_payload_data, get_type, get_version, \
get_protocol_version
from plenum.common.util import randomString
from storage import store_utils
from stp_zmq.zstack import ZStack
portsStart = 9600
NODE_COUNT = 4
CLIENT_COUNT = 8
TRUSTEE_COUNT = 1
@pytest.fixture()
def params(tconf):
steward_defs, node_defs = TestNetworkSetup.gen_defs(
ips=None, nodeCount=NODE_COUNT, starting_port=portsStart)
client_defs = TestNetworkSetup.gen_client_defs(clientCount=CLIENT_COUNT)
trustee_def = TestNetworkSetup.gen_trustee_def(1)
nodeParamsFile = randomString()
return steward_defs, node_defs, client_defs, trustee_def, nodeParamsFile
@pytest.fixture()
def bootstrap(params, tdir, tconf):
steward_defs, node_defs, client_defs, trustee_def, nodeParamsFile = params
TestNetworkSetup.bootstrapTestNodesCore(
config=tconf, network="test", appendToLedgers=False,
domainTxnFieldOrder=getTxnOrderedFields(),
trustee_def=trustee_def, steward_defs=steward_defs,
node_defs=node_defs, client_defs=client_defs, localNodes=1,
nodeParamsFileName=nodeParamsFile, chroot=tdir)
@pytest.fixture()
def config_helper(config_helper_class, tdir, tconf):
return config_helper_class(tconf, chroot=tdir)
@pytest.fixture()
def genesis_dir(config_helper):
return config_helper.genesis_dir
@pytest.fixture()
def keys_dir(config_helper):
return config_helper.keys_dir
@pytest.fixture()
def domain_genesis_file(genesis_dir, config_helper):
return os.path.join(genesis_dir,
genesis_txn_file(TestNetworkSetup.domain_ledger_file_name(config_helper.config)))
@pytest.fixture()
def pool_genesis_file(genesis_dir, config_helper):
return os.path.join(genesis_dir,
genesis_txn_file(TestNetworkSetup.pool_ledger_file_name(config_helper.config)))
def test_bootstrap_test_node_creates_genesis_files(bootstrap,
genesis_dir,
domain_genesis_file,
pool_genesis_file):
assert os.path.exists(genesis_dir)
assert os.path.exists(domain_genesis_file)
assert os.path.exists(pool_genesis_file)
def test_bootstrap_test_node_creates_keys(bootstrap,
keys_dir,
params):
assert os.path.exists(keys_dir)
_, node_defs, _, _, _ = params
# only Node1 is local, that is has keys generated
node_name = node_defs[0].name
node_keys_folder = os.path.join(keys_dir, node_name)
assert os.path.exists(node_keys_folder)
assert os.path.exists(os.path.join(node_keys_folder, ZStack.PublicKeyDirName))
assert os.path.exists(os.path.join(node_keys_folder, ZStack.PrivateKeyDirName))
assert os.path.exists(os.path.join(node_keys_folder, ZStack.VerifKeyDirName))
assert os.path.exists(os.path.join(node_keys_folder, ZStack.SigKeyDirName))
assert os.path.exists(os.path.join(node_keys_folder, BlsKeyManagerFile.BLS_KEYS_DIR_NAME))
def test_domain_genesis_txns(bootstrap, domain_genesis_file):
serializer = JsonSerializer()
with open(domain_genesis_file) as f:
i = 0
for line in store_utils.cleanLines(f.readlines()):
txn = serializer.deserialize(line)
assert get_seq_no(txn)
assert get_payload_data(txn)
assert get_type(txn) == NYM
assert get_version(txn) == "1"
assert get_protocol_version(txn) is None
assert get_payload_data(txn)[VERKEY]
assert get_payload_data(txn)[TARGET_NYM]
assert ALIAS not in get_payload_data(txn)
# expect Trustees, then Stewards, then Clients
if 0 <= i < TRUSTEE_COUNT:
expected_role = TRUSTEE
elif TRUSTEE_COUNT <= i < TRUSTEE_COUNT + NODE_COUNT:
expected_role = STEWARD
else:
expected_role = None
assert get_payload_data(txn).get(ROLE) == expected_role
i += 1
def test_pool_genesis_txns(bootstrap, pool_genesis_file):
serializer = JsonSerializer()
with open(pool_genesis_file) as f:
for line in store_utils.cleanLines(f.readlines()):
txn = serializer.deserialize(line)
assert get_seq_no(txn)
assert get_txn_id(txn)
assert get_payload_data(txn)
assert get_type(txn) == NODE
assert get_version(txn) == "1"
assert get_protocol_version(txn) is None
assert get_payload_data(txn)[TARGET_NYM]
data = get_payload_data(txn).get(DATA)
assert data
assert data[ALIAS]
assert data[CLIENT_IP]
assert data[CLIENT_PORT]
assert data[NODE_IP]
assert data[NODE_PORT]
assert data[SERVICES] == [VALIDATOR]
assert data[BLS_KEY]
assert data[BLS_KEY_PROOF]
def test_check_valid_ip_host(params, tdir, tconf):
_, _, client_defs, trustee_def, nodeParamsFile = params
valid = [
'192.168.3.11,172.16.17.32',
'ec2-54-173-9-185.compute-1.amazonaws.com,ec2-52-38-24-189.compute-1.amazonaws.com',
'ec2-54-173-9-185.compute-1.amazonaws.com,172.16.17.32,192.168.3.11',
'172.16.17.32,ec2-54-173-9-185.compute-1.amazonaws.com,192.168.3.11',
'ledger.net,ledger.net'
]
invalid = [
'34.200.79()3.65,172.16.17.32',
'172.16.17.32,ec2-54-173$-9-185.compute-1.amazonaws.com,192.168.3.11',
'172.16.17.32,ec2-54-173-9-185.com$pute-1.amazonaws.com,192.168.3.11',
'172.16.17.32,ec2-54-173-9-185.com&pute-1.amazonaws.com,192.168.3.11',
'172.16.17.32,ec2-54-173-9-185.com*pute-1.amazonaws.com,192.168.3.11',
]
for v in valid:
assert v.split(',') == TestNetworkSetup._bootstrap_args_type_ips_hosts(v)
steward_defs, node_defs = TestNetworkSetup.gen_defs(
ips=None, nodeCount=2, starting_port=portsStart)
TestNetworkSetup.bootstrapTestNodesCore(
config=tconf, network="test", appendToLedgers=False,
domainTxnFieldOrder=getTxnOrderedFields(),
trustee_def=trustee_def, steward_defs=steward_defs,
node_defs=node_defs, client_defs=client_defs, localNodes=1,
nodeParamsFileName=nodeParamsFile, chroot=tdir)
for v in invalid:
with pytest.raises(ArgumentTypeError):
TestNetworkSetup._bootstrap_args_type_ips_hosts(v)
|
texar/tf/losses/adv_losses.py
|
dyoshioka-555/texar
| 2,325 |
74485
|
# Copyright 2018 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Adversarial losses.
"""
import tensorflow as tf
def binary_adversarial_losses(real_data,
fake_data,
discriminator_fn,
mode="max_real"):
"""Computes adversarial losses of real/fake binary discrimination game.
.. role:: python(code)
:language: python
Args:
real_data (Tensor or array): Real data of shape
`[num_real_examples, ...]`.
fake_data (Tensor or array): Fake data of shape
`[num_fake_examples, ...]`. `num_real_examples` does not
necessarily equal `num_fake_examples`.
discriminator_fn: A callable takes data (e.g., :attr:`real_data` and
:attr:`fake_data`) and returns the logits of being real. The
signature of `discriminator_fn` must be:
:python:`logits, ... = discriminator_fn(data)`.
The return value of `discriminator_fn` can be the logits, or
a tuple where the logits are the first element.
mode (str): Mode of the generator loss. Either "max_real" or "min_fake".
- **"max_real"** (default): minimizing the generator loss is to\
maximize the probability of fake data being classified as real.
- **"min_fake"**: minimizing the generator loss is to minimize the\
probability of fake data being classified as fake.
Returns:
A tuple `(generator_loss, discriminator_loss)` each of which is
a scalar Tensor, loss to be minimized.
"""
real_logits = discriminator_fn(real_data)
if isinstance(real_logits, (list, tuple)):
real_logits = real_logits[0]
real_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=real_logits, labels=tf.ones_like(real_logits)))
fake_logits = discriminator_fn(fake_data)
if isinstance(fake_logits, (list, tuple)):
fake_logits = fake_logits[0]
fake_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=fake_logits, labels=tf.zeros_like(fake_logits)))
d_loss = real_loss + fake_loss
if mode == "min_fake":
g_loss = - fake_loss
elif mode == "max_real":
g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=fake_logits, labels=tf.ones_like(fake_logits)))
else:
raise ValueError("Unknown mode: %s. Only 'min_fake' and 'max_real' "
"are allowed.")
return g_loss, d_loss
|
samples/rtl/pipeline.py
|
jnice-81/dace
| 227 |
74494
|
<filename>samples/rtl/pipeline.py<gh_stars>100-1000
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
#
# This sample shows a DEPTH deep pipeline, where each stage adds 1 to the
# integer input stream.
#
# It is intended for running hardware_emulation or hardware xilinx targets.
import dace
import numpy as np
# add symbols
N = dace.symbol('N')
# add sdfg
sdfg = dace.SDFG('pipeline')
# add state
state = sdfg.add_state('device_state')
# add constants
depth = 10
sdfg.add_constant('DEPTH', depth)
# add arrays
sdfg.add_array('A', [N], dtype=dace.int32, storage=dace.StorageType.CPU_Heap)
sdfg.add_array('B', [N], dtype=dace.int32, storage=dace.StorageType.CPU_Heap)
sdfg.add_array('fpga_A', [N],
dtype=dace.int32,
transient=True,
storage=dace.StorageType.FPGA_Global)
sdfg.add_array('fpga_B', [N],
dtype=dace.int32,
transient=True,
storage=dace.StorageType.FPGA_Global)
# add streams
sdfg.add_stream('A_stream',
dtype=dace.int32,
transient=True,
storage=dace.StorageType.FPGA_Local)
sdfg.add_stream('B_stream',
dtype=dace.int32,
transient=True,
storage=dace.StorageType.FPGA_Local)
# add custom rtl tasklet
rtl_tasklet = state.add_tasklet(name='rtl_tasklet',
inputs={'a'},
outputs={'b'},
code='''
/*
Convention:
|--------------------------------------------------------|
| |
-->| ap_aclk (clock input) |
-->| ap_areset (reset input, rst on high) |
-->| ap_start (start pulse from host) |
<--| ap_done (tells the host that the kernel is done) |
| |
| For each input: For each output: |
| |
-->| s_axis_{input}_tvalid reg m_axis_{output}_tvalid |-->
-->| s_axis_{input}_tdata reg m_axis_{output}_tdata |-->
<--| reg s_axis_{input}_tready m_axis_{output}_tready |<--
-->| s_axis_{input}_tkeep reg m_axis_{output}_tkeep |-->
-->| s_axis_{input}_tlast reg m_axis_{output}_tlast |-->
| |
|--------------------------------------------------------|
*/
assign ap_done = 1; // free-running kernel
reg [DEPTH-1:0] tvalids;
reg [31:0] tdatas [DEPTH-1:0];
reg [DEPTH-1:0] treadys;
integer i;
always @(posedge ap_aclk) begin
if (ap_areset) begin
for (i = 0; i < DEPTH; i = i + 1) begin
tvalids[i] = 0;
tdatas[i] = 0;
treadys[i] = 1;
end
s_axis_a_tready = 1;
m_axis_b_tvalid = 0;
m_axis_b_tdata = 0;
end else begin
// Handle m_axis
if (!m_axis_b_tvalid || (m_axis_b_tvalid && m_axis_b_tready)) begin
m_axis_b_tvalid = tvalids[DEPTH-1];
m_axis_b_tdata = tdatas[DEPTH-1];
tvalids[DEPTH-1] = 0;
tvalids[DEPTH-1] = 0;
end
treadys[DEPTH-1] = !m_axis_b_tvalid;
// Handle intermediates
for (i = DEPTH-1; i > 0; i = i - 1) begin
if (tvalids[i-1] && treadys[i-1]) begin
tvalids[i] = tvalids[i-1];
tdatas[i] = tdatas[i-1] + 1;
tvalids[i-1] = 0;
tdatas[i-1] = 0;
end
treadys[i-1] = !tvalids[i];
end
// Handle s_axis
if (s_axis_a_tvalid && s_axis_a_tready) begin
tvalids[0] = s_axis_a_tvalid;
tdatas[0] = s_axis_a_tdata + 1;
end
s_axis_a_tready = !tvalids[0];
end
end
''',
language=dace.Language.SystemVerilog)
# add read and write tasklets
read_a = state.add_tasklet('read_a', {'inp'}, {'out'}, 'out = inp')
write_b = state.add_tasklet('write_b', {'inp'}, {'out'}, 'out = inp')
# add read and write maps
read_a_entry, read_a_exit = state.add_map(
'read_a_map', dict(i='0:N'), schedule=dace.ScheduleType.FPGA_Device)
write_b_entry, write_b_exit = state.add_map(
'write_b_map', dict(i='0:N'), schedule=dace.ScheduleType.FPGA_Device)
# add read_a memlets and access nodes
read_a_inp = state.add_read('fpga_A')
read_a_out = state.add_write('A_stream')
state.add_memlet_path(read_a_inp,
read_a_entry,
read_a,
dst_conn='inp',
memlet=dace.Memlet('fpga_A[i]'))
state.add_memlet_path(read_a,
read_a_exit,
read_a_out,
src_conn='out',
memlet=dace.Memlet('A_stream[0]'))
# add tasklet memlets
A = state.add_read('A_stream')
B = state.add_write('B_stream')
state.add_memlet_path(A,
rtl_tasklet,
dst_conn='a',
memlet=dace.Memlet('A_stream[0]'))
state.add_memlet_path(rtl_tasklet,
B,
src_conn='b',
memlet=dace.Memlet('B_stream[0]'))
# add write_b memlets and access nodes
write_b_inp = state.add_read('B_stream')
write_b_out = state.add_write('fpga_B')
state.add_memlet_path(write_b_inp,
write_b_entry,
write_b,
dst_conn='inp',
memlet=dace.Memlet('B_stream[0]'))
state.add_memlet_path(write_b,
write_b_exit,
write_b_out,
src_conn='out',
memlet=dace.Memlet('fpga_B[i]'))
# add copy to device state
copy_to_device = sdfg.add_state('copy_to_device')
cpu_a = copy_to_device.add_read('A')
dev_a = copy_to_device.add_write('fpga_A')
copy_to_device.add_memlet_path(cpu_a,
dev_a,
memlet=dace.Memlet('A[0:N]'))
sdfg.add_edge(copy_to_device, state, dace.InterstateEdge())
# add copy to host state
copy_to_host = sdfg.add_state('copy_to_host')
dev_b = copy_to_host.add_read('fpga_B')
cpu_b = copy_to_host.add_write('B')
copy_to_host.add_memlet_path(dev_b,
cpu_b,
memlet=dace.Memlet('B[0:N]'))
sdfg.add_edge(state, copy_to_host, dace.InterstateEdge())
# validate sdfg
sdfg.validate()
######################################################################
if __name__ == '__main__':
# init data structures
N.set(8192)
a = np.random.randint(0, 100, N.get()).astype(np.int32)
b = np.zeros((N.get(), )).astype(np.int32)
# show initial values
print("a={}, b={}".format(a, b))
# call program
sdfg(A=a, B=b, N=N)
# show result
print("a={}, b={}".format(a, b))
# check result
for i in range(N.get()):
assert b[i] == a[i] + depth
|
DeepBrainSeg/tumor/models/__init__.py
|
JasperHG90/DeepBrainSeg
| 130 |
74521
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__license__ = 'MIT'
__maintainer__ = ['<NAME>']
__email__ = ['<EMAIL>']
|
summarize_from_feedback/eval_rm.py
|
lumaway/summarize-from-feedback
| 365 |
74527
|
<gh_stars>100-1000
import json
import os
from dataclasses import dataclass, field
import blobfile as bf
import numpy as np
import torch
from summarize_from_feedback.datasets import jsonl_encoding
from summarize_from_feedback.query_response_model import ModelSpec
from summarize_from_feedback.reward_model import RewardModel
from summarize_from_feedback.task_data import make_jsonl_samples_iter
from summarize_from_feedback.tasks import TaskHParams
from summarize_from_feedback.utils import Timer, hyperparams
from summarize_from_feedback.utils.assertions import assert_shape_eq, assert_eq
from summarize_from_feedback.utils.logging_utils import setup_logging_with_pacific_tz
from summarize_from_feedback.utils.torch_utils import to_numpy
"""
Evaluates a reward model on a set of query-responses examples. The output will contain the same
json data as the input along with an extra key containing the predicted reward.
"""
@dataclass
class HParams(hyperparams.HParams):
reward_model_spec: ModelSpec = field(default_factory=ModelSpec)
task: TaskHParams = field(default_factory=TaskHParams)
input_path: str = None # Should contain files samples.0.jsonl, samples.1.jsonl, ...
fp16_activations: bool = True
output_key: str = "predicted_reward"
def main(H: HParams):
layout = H.reward_model_spec.run_params.all_gpu_layout()
reward_model = RewardModel(task_hparams=H.task, spec=H.reward_model_spec, layout=layout)
setup_logging_with_pacific_tz()
act_dtype = torch.float16 if H.fp16_activations else torch.float32
results_dir = bf.join(
os.environ.get("OUTPUT_DIR", os.path.join("/tmp/jobs", os.getenv("JOB_NAME"))), "results"
)
bf.makedirs(results_dir)
if layout.is_logging_rank:
with open(bf.join(results_dir, "task_hparams.json"), "w") as f:
json.dump(H.task.to_json(), f)
with open(bf.join(results_dir, "hparams.json"), "w") as f:
json.dump(H.to_json(), f)
# Creates files for printing. Only the replica root prints the files
output_file_name = os.devnull
if layout.is_replica_root:
fname = f"samples.{layout.replica_idx}.jsonl"
output_file_name = bf.join(results_dir, fname)
print(f"Outputs will be written to {output_file_name}")
input_iter = make_jsonl_samples_iter(H.input_path, layout=layout)
replica_rewards = []
with open(output_file_name, "a") as out_f:
input_idx = 0
for input in input_iter:
with Timer() as timer:
query_tokens = torch.tensor(input["context_tokens"])
assert_shape_eq(
query_tokens, (H.task.query.length,), "Context tokens shape mismatch"
)
response_tokens = torch.tensor(input["sample_tokens"])
assert_eq(response_tokens.dim(), 2)
n_responses = response_tokens.size(0)
results = reward_model.reward(
query_tokens=query_tokens.unsqueeze(0),
response_tokens=response_tokens.unsqueeze(0),
act_dtype=act_dtype,
)
rewards = to_numpy(results["reward"].reshape((n_responses,)))
if layout.is_replica_root:
replica_rewards.append(rewards)
output = {**input, H.output_key: rewards}
out_f.write((json.dumps(jsonl_encoding.encode_example(output)) + "\n"))
input_idx += 1
if layout.is_replica_root:
print(f"Batch {input_idx}. Took {timer.interval} seconds")
if layout.is_replica_root:
print(f"Wrote {input_idx} batches to {output_file_name}")
replica_rewards = np.stack(replica_rewards, axis=0)
all_rewards = reward_model.dp_comm.mpi_all_gather(replica_rewards, "rewards")
if layout.replica_idx == 0:
all_rewards = np.concatenate(all_rewards, axis=0)
print(f"Mean reward: {all_rewards.mean():.3f}")
if all_rewards.shape[1] > 1:
print(f"Stddev within a query: {all_rewards.std(axis=1, ddof=1).mean():.3}")
print(f"Stddev across queries: {all_rewards.std(axis=0, ddof=1).mean():.3}")
return dict(output_path=results_dir)
|
third_party/websockify/tests/load.py
|
albertobarri/idk
| 6,541 |
74530
|
#!/usr/bin/env python
'''
WebSocket server-side load test program. Sends and receives traffic
that has a random payload (length and content) that is checksummed and
given a sequence number. Any errors are reported and counted.
'''
import sys, os, select, random, time, optparse, logging
sys.path.insert(0,os.path.join(os.path.dirname(__file__), ".."))
from websockify.websocket import WebSocketServer, WebSocketRequestHandler
class WebSocketLoadServer(WebSocketServer):
recv_cnt = 0
send_cnt = 0
def __init__(self, *args, **kwargs):
self.delay = kwargs.pop('delay')
WebSocketServer.__init__(self, *args, **kwargs)
class WebSocketLoad(WebSocketRequestHandler):
max_packet_size = 10000
def new_websocket_client(self):
print "Prepopulating random array"
self.rand_array = []
for i in range(0, self.max_packet_size):
self.rand_array.append(random.randint(0, 9))
self.errors = 0
self.send_cnt = 0
self.recv_cnt = 0
try:
self.responder(self.request)
except:
print "accumulated errors:", self.errors
self.errors = 0
raise
def responder(self, client):
c_pend = 0
cqueue = []
cpartial = ""
socks = [client]
last_send = time.time() * 1000
while True:
ins, outs, excepts = select.select(socks, socks, socks, 1)
if excepts: raise Exception("Socket exception")
if client in ins:
frames, closed = self.recv_frames()
err = self.check(frames)
if err:
self.errors = self.errors + 1
print err
if closed:
self.send_close()
now = time.time() * 1000
if client in outs:
if c_pend:
last_send = now
c_pend = self.send_frames()
elif now > (last_send + self.server.delay):
last_send = now
c_pend = self.send_frames([self.generate()])
def generate(self):
length = random.randint(10, self.max_packet_size)
numlist = self.rand_array[self.max_packet_size-length:]
# Error in length
#numlist.append(5)
chksum = sum(numlist)
# Error in checksum
#numlist[0] = 5
nums = "".join( [str(n) for n in numlist] )
data = "^%d:%d:%d:%s$" % (self.send_cnt, length, chksum, nums)
self.send_cnt += 1
return data
def check(self, frames):
err = ""
for data in frames:
if data.count('$') > 1:
raise Exception("Multiple parts within single packet")
if len(data) == 0:
self.traffic("_")
continue
if data[0] != "^":
err += "buf did not start with '^'\n"
continue
try:
cnt, length, chksum, nums = data[1:-1].split(':')
cnt = int(cnt)
length = int(length)
chksum = int(chksum)
except:
print "\n<BOF>" + repr(data) + "<EOF>"
err += "Invalid data format\n"
continue
if self.recv_cnt != cnt:
err += "Expected count %d but got %d\n" % (self.recv_cnt, cnt)
self.recv_cnt = cnt + 1
continue
self.recv_cnt += 1
if len(nums) != length:
err += "Expected length %d but got %d\n" % (length, len(nums))
continue
inv = nums.translate(None, "0123456789")
if inv:
err += "Invalid characters found: %s\n" % inv
continue
real_chksum = 0
for num in nums:
real_chksum += int(num)
if real_chksum != chksum:
err += "Expected checksum %d but real chksum is %d\n" % (chksum, real_chksum)
return err
if __name__ == '__main__':
parser = optparse.OptionParser(usage="%prog [options] listen_port")
parser.add_option("--verbose", "-v", action="store_true",
help="verbose messages and per frame traffic")
parser.add_option("--cert", default="self.pem",
help="SSL certificate file")
parser.add_option("--key", default=None,
help="SSL key file (if separate from cert)")
parser.add_option("--ssl-only", action="store_true",
help="disallow non-encrypted connections")
(opts, args) = parser.parse_args()
try:
if len(args) != 1: raise
opts.listen_port = int(args[0])
if len(args) not in [1,2]: raise
opts.listen_port = int(args[0])
if len(args) == 2:
opts.delay = int(args[1])
else:
opts.delay = 10
except:
parser.error("Invalid arguments")
logging.basicConfig(level=logging.INFO)
opts.web = "."
server = WebSocketLoadServer(WebSocketLoad, **opts.__dict__)
server.start_server()
|
pretrain/data_preprocess/megadepth/undistort_reconstructions.py
|
kudo1026/Pri3D
| 103 |
74576
|
import argparse
import imagesize
import os
import subprocess
parser = argparse.ArgumentParser(description='MegaDepth Undistortion')
parser.add_argument(
'--colmap_path', type=str, required=True,
help='path to colmap executable'
)
parser.add_argument(
'--base_path', type=str, required=True,
help='path to MegaDepth'
)
args = parser.parse_args()
sfm_path = os.path.join(
args.base_path, 'MegaDepth_v1_SfM'
)
base_depth_path = os.path.join(
args.base_path, 'MegaDepth_v1'
)
output_path = os.path.join(
args.base_path, 'Undistorted_SfM'
)
os.mkdir(output_path)
for scene_name in os.listdir(base_depth_path):
current_output_path = os.path.join(output_path, scene_name)
os.mkdir(current_output_path)
image_path = os.path.join(
base_depth_path, scene_name, 'dense0', 'imgs'
)
if not os.path.exists(image_path):
continue
# Find the maximum image size in scene.
max_image_size = 0
for image_name in os.listdir(image_path):
max_image_size = max(
max_image_size,
max(imagesize.get(os.path.join(image_path, image_name)))
)
# Undistort the images and update the reconstruction.
subprocess.call([
os.path.join(args.colmap_path, 'colmap'), 'image_undistorter',
'--image_path', os.path.join(sfm_path, scene_name, 'images'),
'--input_path', os.path.join(sfm_path, scene_name, 'sparse', 'manhattan', '0'),
'--output_path', current_output_path,
'--max_image_size', str(max_image_size)
])
# Transform the reconstruction to raw text format.
sparse_txt_path = os.path.join(current_output_path, 'sparse-txt')
os.mkdir(sparse_txt_path)
subprocess.call([
os.path.join(args.colmap_path, 'colmap'), 'model_converter',
'--input_path', os.path.join(current_output_path, 'sparse'),
'--output_path', sparse_txt_path,
'--output_type', 'TXT'
])
|
tests/r/test_cholera.py
|
hajime9652/observations
| 199 |
74584
|
<gh_stars>100-1000
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.cholera import cholera
def test_cholera():
"""Test module cholera.py by downloading
cholera.csv and testing shape of
extracted data has 38 rows and 15 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = cholera(test_path)
try:
assert x_train.shape == (38, 15)
except:
shutil.rmtree(test_path)
raise()
|
lightning/types/decorators.py
|
lightning-viz/lightning-python
| 176 |
74613
|
from lightning import Lightning
from lightning.types.base import Base
from functools import wraps
import inspect
def viztype(VizType):
# wrapper that passes inputs to cleaning function and creates viz
@wraps(VizType.clean)
def plotter(self, *args, **kwargs):
if kwargs['height'] is None and kwargs['width'] is None:
if self.size != 'full':
kwargs['width'] = SIZES[self.size]
if self.local_enabled:
if hasattr(VizType, '_local') and VizType._local == False:
name = VizType._func if hasattr(VizType, 'func') else VizType._name
print("Plots of type '%s' not yet supported in local mode" % name)
else:
viz = VizType._baseplot_local(VizType._name, *args, **kwargs)
return viz
else:
if not hasattr(self, 'session'):
self.create_session()
if VizType._name == 'plot':
if 'type' not in kwargs:
raise ValueError("Must specify a type for custom plots")
else:
type = kwargs['type']
del kwargs['type']
viz = VizType._baseplot(self.session, type, *args, **kwargs)
else:
viz = VizType._baseplot(self.session, VizType._name, *args, **kwargs)
self.session.visualizations.append(viz)
return viz
# get desired function name if different than plot type
if hasattr(VizType, '_func'):
func = VizType._func
else:
func = VizType._name
# crazy hack to give the dynamically generated function the correct signature
# based on: http://emptysqua.re/blog/copying-a-python-functions-signature/
# NOTE currently only handles functions with keyword arguments with defaults of None
options = {}
if hasattr(VizType, '_options'):
options = VizType._options
def parse(val):
if isinstance(val, str):
return "'" + val + "'"
else:
return val
formatted_options = ', '.join(['%s=%s' % (key, parse(value.get('default'))) for (key, value) in options.items()])
argspec = inspect.getargspec(VizType.clean)
formatted_args = inspect.formatargspec(*argspec)
fndef = 'lambda self, %s, %s: plotter(self,%s, %s)' \
% (formatted_args.lstrip('(').rstrip(')'),
formatted_options, formatted_args[1:].replace('=None', '').rstrip(')'),
', '.join('%s=%s' % (key, key) for key in options.keys()))
fake_fn = eval(fndef, {'plotter': plotter})
plotter = wraps(VizType.clean)(fake_fn)
# manually assign a plot-specific name (instead of 'clean')
plotter.__name__ = func
if plotter.__doc__:
plotter.__doc__ += Base._doc
# add plotter to class
setattr(Lightning, func, plotter)
return VizType
SIZES = {
'small': 400,
'medium': 600,
'large': 800,
}
|
ciphey/basemods/Checkers/format.py
|
AlexandruValeanu/Ciphey
| 9,908 |
74659
|
<gh_stars>1000+
import json
from typing import Dict, Optional
import logging
from rich.logging import RichHandler
from ciphey.iface import Checker, Config, ParamSpec, T, registry
@registry.register
class JsonChecker(Checker[str]):
"""
This object is effectively a prebuilt quorum (with requirement 1) of common patterns
"""
def check(self, text: T) -> Optional[str]:
logging.debug("Trying json checker")
# https://github.com/Ciphey/Ciphey/issues/389
if text.isdigit():
return None
try:
json.loads(text)
return ""
except ValueError:
return None
def getExpectedRuntime(self, text: T) -> float:
# TODO: actually bench this
return 1e-7 * len(text) # From benchmarks I found online
def __init__(self, config: Config):
super().__init__(config)
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
pass
|
virtual/lib/python3.6/site-packages/rest_framework/utils/html.py
|
Ruterana/clone_instagram
| 17,395 |
74688
|
"""
Helpers for dealing with HTML input.
"""
import re
from django.utils.datastructures import MultiValueDict
def is_html_input(dictionary):
# MultiDict type datastructures are used to represent HTML form input,
# which may have more than one value for each key.
return hasattr(dictionary, 'getlist')
def parse_html_list(dictionary, prefix='', default=None):
"""
Used to support list values in HTML forms.
Supports lists of primitives and/or dictionaries.
* List of primitives.
{
'[0]': 'abc',
'[1]': 'def',
'[2]': 'hij'
}
-->
[
'abc',
'def',
'hij'
]
* List of dictionaries.
{
'[0]foo': 'abc',
'[0]bar': 'def',
'[1]foo': 'hij',
'[1]bar': 'klm',
}
-->
[
{'foo': 'abc', 'bar': 'def'},
{'foo': 'hij', 'bar': 'klm'}
]
:returns a list of objects, or the value specified in ``default`` if the list is empty
"""
ret = {}
regex = re.compile(r'^%s\[([0-9]+)\](.*)$' % re.escape(prefix))
for field, value in dictionary.items():
match = regex.match(field)
if not match:
continue
index, key = match.groups()
index = int(index)
if not key:
ret[index] = value
elif isinstance(ret.get(index), dict):
ret[index][key] = value
else:
ret[index] = MultiValueDict({key: [value]})
# return the items of the ``ret`` dict, sorted by key, or ``default`` if the dict is empty
return [ret[item] for item in sorted(ret)] if ret else default
def parse_html_dict(dictionary, prefix=''):
"""
Used to support dictionary values in HTML forms.
{
'profile.username': 'example',
'profile.email': '<EMAIL>',
}
-->
{
'profile': {
'username': 'example',
'email': '<EMAIL>'
}
}
"""
ret = MultiValueDict()
regex = re.compile(r'^%s\.(.+)$' % re.escape(prefix))
for field in dictionary:
match = regex.match(field)
if not match:
continue
key = match.groups()[0]
value = dictionary.getlist(field)
ret.setlist(key, value)
return ret
|
litex/tools/litex_json2renode.py
|
osterwood/litex
| 1,501 |
74692
|
#!/usr/bin/env python3
"""
Copyright (c) 2019-2021 Antmicro <www.antmicro.com>
Renode platform definition (repl) and script (resc) generator for LiteX SoC.
This script parses LiteX 'csr.json' file and generates scripts for Renode
necessary to emulate the given configuration of the LiteX SoC.
"""
import os
import sys
import json
import pprint
import zlib
import argparse
# those memory regions are handled in a special way
# and should not be generated automatically
non_generated_mem_regions = ['ethmac', 'csr']
def get_descriptor(csr, name, size=None):
res = { 'base': csr['csr_bases'][name], 'constants': {} }
for c in csr['constants']:
if c.startswith('{}_'.format(name)):
res['constants'][c[len(name) + 1:]] = csr['constants'][c]
if size:
res['size'] = size
return res
def generate_sysbus_registration(descriptor,
skip_braces=False, region=None, skip_size=False):
""" Generates system bus registration information
consisting of a base address and an optional shadow
address.
Args:
descriptor (dict): dictionary containing 'address',
'shadowed_address' (might be None) and
optionally 'size' fields
skip_braces (bool): determines if the registration info should
be put in braces
region (str or None): name of the region, if None the default
one is assumed
skip_size (bool): if set to true do not set size
Returns:
string: registration information
"""
def generate_registration_entry(address, size=None, name=None):
if name:
if not size:
raise Exception('Size must be provided when registering non-default region')
return 'sysbus new Bus.BusMultiRegistration {{ address: {}; size: {}; region: "{}" }}'.format(hex(address), hex(size), name)
if size:
return "sysbus <{}, +{}>".format(hex(address), hex(size))
return "sysbus {}".format(hex(address))
address = descriptor['base']
size = descriptor['size'] if 'size' in descriptor and not skip_size else None
if 'shadowed_address' in descriptor:
result = "{}; {}".format(
generate_registration_entry(address, size, region),
generate_registration_entry(descriptor['shadowed_address'], size, region))
else:
result = generate_registration_entry(address, size, region)
if not skip_braces:
result = "{{ {} }}".format(result)
return result
def generate_ethmac(csr, name, **kwargs):
""" Generates definition of 'ethmac' peripheral.
Args:
csr (dict): LiteX configuration
name (string): name of the peripheral
kwargs (dict): additional parameters, including 'buffer'
Returns:
string: repl definition of the peripheral
"""
buf = csr['memories']['ethmac']
phy = get_descriptor(csr, 'ethphy', 0x800)
peripheral = get_descriptor(csr, name)
result = """
ethmac: Network.LiteX_Ethernet{} @ {{
{};
{};
{}
}}
""".format('_CSR32' if csr['constants']['config_csr_data_width'] == 32 else '',
generate_sysbus_registration(peripheral,
skip_braces=True),
generate_sysbus_registration(buf,
skip_braces=True, region='buffer'),
generate_sysbus_registration(phy,
skip_braces=True, region='phy'))
interrupt_name = '{}_interrupt'.format(name)
if interrupt_name in csr['constants']:
result += ' -> cpu@{}\n'.format(
csr['constants'][interrupt_name])
result += """
ethphy: Network.EthernetPhysicalLayer @ ethmac 0
VendorSpecific1: 0x4400 // MDIO status: 100Mbps + link up
"""
return result
def generate_memory_region(region_descriptor):
""" Generates definition of memory region.
Args:
region_descriptor (dict): memory region description
Returns:
string: repl definition of the memory region
"""
result = ""
if 'original_address' in region_descriptor:
result += """
This memory region's base address has been
realigned to allow to simulate it -
Renode currently supports memory regions
with base address aligned to 0x1000.
The original base address of this memory region
was {}.
""".format(hex(region_descriptor['original_address']))
if 'original_size' in region_descriptor:
result += """
This memory region's size has been
extended to allow to simulate it -
Renode currently supports memory regions
of size being a multiple of 0x1000.
The original size of this memory region
was {} bytes.
""".format(hex(region_descriptor['original_size']))
if result != "":
result = """
/* WARNING:
{}
*/""".format(result)
result += """
{}: Memory.MappedMemory @ {}
size: {}
""".format(region_descriptor['name'],
generate_sysbus_registration(region_descriptor, skip_size=True),
hex(region_descriptor['size']))
return result
def generate_silencer(csr, name, **kwargs):
""" Silences access to a memory region.
Args:
csr (dict): LiteX configuration
name (string): name of the peripheral
kwargs (dict): additional parameters, not used
Returns:
string: repl definition of the silencer
"""
return """
sysbus:
init add:
SilenceRange <{} 0x200> # {}
""".format(csr['csr_bases'][name], name)
def get_cpu_type(csr):
kind = None
variant = None
config_cpu_type = next((k for k in csr['constants'].keys() if k.startswith('config_cpu_type_')), None)
if config_cpu_type:
kind = config_cpu_type[len('config_cpu_type_'):]
config_cpu_variant = next((k for k in csr['constants'].keys() if k.startswith('config_cpu_variant_')), None)
if config_cpu_variant:
variant = config_cpu_variant[len('config_cpu_variant_'):]
return (kind, variant)
def generate_cpu(csr, time_provider):
""" Generates definition of a CPU.
Returns:
string: repl definition of the CPU
"""
kind, variant = get_cpu_type(csr)
if kind == 'vexriscv' or kind == 'vexriscv_smp':
result = """
cpu: CPU.VexRiscv @ sysbus
"""
if variant == 'linux':
result += """
cpuType: "rv32ima"
privilegeArchitecture: PrivilegeArchitecture.Priv1_10
"""
elif variant in ["i", "im", "ima", "imac"]:
result += """
cpuType: "rv32{}"
""".format(variant)
else:
result += """
cpuType: "rv32im"
"""
if time_provider:
result += """
timeProvider: {}
""".format(time_provider)
if kind == 'vexriscv_smp':
result += """
builtInIrqController: false
"""
return result
elif kind == 'picorv32':
return """
cpu: CPU.PicoRV32 @ sysbus
cpuType: "rv32imc"
"""
elif kind == 'ibex':
return """
cpu: CPU.RiscV32 @ sysbus
cpuType: "rv32imc"
timeProvider: empty
interruptMode: InterruptMode.Vectored
"""
else:
raise Exception('Unsupported cpu type: {}'.format(kind))
def generate_peripheral(csr, name, **kwargs):
""" Generates definition of a peripheral.
Args:
csr (dict): LiteX configuration
name (string): name of the peripheral
kwargs (dict): additional parameterss, including
'model' and 'properties'
Returns:
string: repl definition of the peripheral
"""
peripheral = get_descriptor(csr, name)
model = kwargs['model']
if csr['constants']['config_csr_data_width'] == 32 and 'model_CSR32' in kwargs:
model = kwargs['model_CSR32']
result = '\n{}: {} @ {}\n'.format(
kwargs['name'] if 'name' in kwargs else name,
model,
generate_sysbus_registration(peripheral))
for constant, val in peripheral['constants'].items():
if 'ignored_constants' not in kwargs or constant not in kwargs['ignored_constants']:
if constant == 'interrupt':
result += ' -> cpu@{}\n'.format(val)
else:
result += ' {}: {}\n'.format(constant, val)
if 'properties' in kwargs:
for prop, val in kwargs['properties'].items():
result += ' {}: {}\n'.format(prop, val(csr))
if 'interrupts' in kwargs:
for prop, val in kwargs['interrupts'].items():
result += ' {} -> {}\n'.format(prop, val())
return result
def generate_spiflash(csr, name, **kwargs):
""" Generates definition of an SPI controller with attached flash memory.
Args:
csr (dict): LiteX configuration
name (string): name of the peripheral
kwargs (dict): additional parameterss, including
'model' and 'properties'
Returns:
string: repl definition of the peripheral
"""
peripheral = get_descriptor(csr, name)
result = """
spi_flash: SPI.LiteX_SPI_Flash @ {{
{}
}}
mt25q: SPI.Micron_MT25Q @ spi_flash
underlyingMemory: spiflash
""".format(
generate_sysbus_registration(peripheral, skip_braces=True))
return result
def generate_cas(csr, name, **kwargs):
result = generate_peripheral(csr, name, model='GPIOPort.LiteX_ControlAndStatus', ignored_constants=['leds_count', 'switches_count', 'buttons_count'])
peripheral = get_descriptor(csr, name)
leds_count = int(peripheral['constants']['leds_count'])
switches_count = int(peripheral['constants']['switches_count'])
buttons_count = int(peripheral['constants']['buttons_count'])
for i in range(leds_count):
result += """
{} -> led{}@0
""".format(i, i)
for i in range(leds_count):
result += """
led{}: Miscellaneous.LED @ cas {}
""".format(i, i)
for i in range(switches_count):
result += """
switch{}: Miscellaneous.Button @ cas {}
-> cas@{}
""".format(i, i + 32, i + 32)
for i in range(buttons_count):
result += """
button{}: Miscellaneous.Button @ cas {}
-> cas@{}
""".format(i, i + 64, i + 64)
return result
def generate_mmc(csr, name, **kwargs):
""" Generates definition of 'mmc' peripheral.
Args:
csr (dict): LiteX configuration
name (string): name of the peripheral
kwargs (dict): additional parameters, including 'core', 'reader' and 'writer'
Returns:
string: repl definition of the peripheral
"""
# FIXME: Get litex to generate CSR region size into output information
# currently only a base address is present
peripheral = get_descriptor(csr, name)
core = get_descriptor(csr, 'sdcore', 0x100)
reader = get_descriptor(csr, 'sdblock2mem', 0x100)
writer = get_descriptor(csr, 'sdmem2block', 0x100)
result = """
mmc_controller: SD.LiteSDCard{} @ {{
{}; // phy
{};
{};
{}
}}
""".format('_CSR32' if csr['constants']['config_csr_data_width'] == 32 else '',
generate_sysbus_registration(peripheral,
skip_braces=True),
generate_sysbus_registration(core,
skip_braces=True, region='core'),
generate_sysbus_registration(reader,
skip_braces=True, region='reader'),
generate_sysbus_registration(writer,
skip_braces=True, region='writer'))
return result
def generate_clint(clint, frequency):
# TODO: this is configuration for VexRiscv - add support for other CPU types
result = """
clint: IRQControllers.CoreLevelInterruptor @ {}
frequency: {}
[0, 1] -> cpu@[101, 100]
""".format(generate_sysbus_registration(clint,
skip_braces=True,
skip_size=True),
frequency)
return result
def generate_plic(plic):
# TODO: this is configuration for VexRiscv - add support for other CPU types
result = """
plic: IRQControllers.PlatformLevelInterruptController @ {}
[0-3] -> cpu@[8-11]
numberOfSources: 31
numberOfTargets: 2
prioritiesEnabled: false
""".format(generate_sysbus_registration(plic,
skip_braces=True,
skip_size=True))
return result
def get_clock_frequency(csr):
"""
Args:
csr (dict): LiteX configuration
Returns:
int: system clock frequency
"""
# in different LiteX versions this property
# has different names
return csr['constants']['config_clock_frequency' if 'config_clock_frequency' in csr['constants'] else 'system_clock_frequency']
peripherals_handlers = {
'uart': {
'handler': generate_peripheral,
'model': 'UART.LiteX_UART',
'ignored_constants': ['polling']
},
'timer0': {
'handler': generate_peripheral,
'model': 'Timers.LiteX_Timer',
'model_CSR32': 'Timers.LiteX_Timer_CSR32',
'properties': {
'frequency':
lambda c: get_clock_frequency(c)
}
},
'ethmac': {
'handler': generate_ethmac,
},
'cas': {
'handler': generate_cas,
},
'cpu': {
'name': 'cpu_timer',
'handler': generate_peripheral,
'model': 'Timers.LiteX_CPUTimer',
'properties': {
'frequency':
lambda c: get_clock_frequency(c)
},
'interrupts': {
# IRQ #100 in Renode's VexRiscv model is mapped to Machine Timer Interrupt
'IRQ': lambda: 'cpu@100'
}
},
'ddrphy': {
'handler': generate_silencer
},
'sdram': {
'handler': generate_silencer
},
'spiflash': {
'handler': generate_spiflash
},
'spi': {
'handler': generate_peripheral,
'model': 'SPI.LiteX_SPI',
'ignored_constants': ['interrupt'] # model in Renode currently doesn't support interrupts
},
'ctrl': {
'handler': generate_peripheral,
'model': 'Miscellaneous.LiteX_SoC_Controller',
'model_CSR32': 'Miscellaneous.LiteX_SoC_Controller_CSR32'
},
'i2c0': {
'handler': generate_peripheral,
'model': 'I2C.LiteX_I2C'
},
'sdphy': {
'handler': generate_mmc,
},
'spisdcard': {
'handler': generate_peripheral,
'model': 'SPI.LiteX_SPI',
'ignored_constants': ['interrupt'] # model in Renode currently doesn't support interrupts
},
}
def genereate_etherbone_bridge(name, address, port):
# FIXME: for now the width is fixed to 0x800
return """
{}: EtherboneBridge @ sysbus <{}, +0x800>
port: {}
""".format(name, hex(address), port)
def generate_repl(csr, etherbone_peripherals, autoalign):
""" Generates platform definition.
Args:
csr (dict): LiteX configuration
etherbone_peripherals (dict): collection of peripherals
that should not be simulated directly in Renode,
but connected to it over an etherbone bridge on
a provided port number
autoalign (list): list of memory regions names that
should be automatically re-aligned
Returns:
string: platform defition containing all supported
peripherals and memory regions
"""
result = ""
# RISC-V CPU in Renode requires memory region size
# to be a multiple of 4KB - this is a known limitation
# (not a bug) and there are no plans to handle smaller
# memory regions for now
memories = []
for m in csr['memories']:
x = dict(csr['memories'][m])
x['name'] = m
memories.append(x)
for mem_region in filter_memory_regions(memories, alignment=0x1000, autoalign=autoalign):
result += generate_memory_region(mem_region)
time_provider = None
if 'clint' in csr['memories']:
result += generate_clint(csr['memories']['clint'], csr['constants']['config_clock_frequency'])
time_provider = 'clint'
if 'plic' in csr['memories']:
result += generate_plic(csr['memories']['plic'])
if not time_provider and 'cpu' in csr['csr_bases']:
time_provider = 'cpu_timer'
result += generate_cpu(csr, time_provider)
for name, address in csr['csr_bases'].items():
if name not in peripherals_handlers:
print('Skipping unsupported peripheral `{}` at {}'
.format(name, hex(address)))
continue
if name in etherbone_peripherals:
# generate an etherbone bridge for the peripheral
port = etherbone_peripherals[name]
result += genereate_etherbone_bridge(name, address, port)
pass
else:
# generate an actual model of the peripheral
h = peripherals_handlers[name]
result += h['handler'](csr, name, **h)
return result
def filter_memory_regions(raw_regions, alignment=None, autoalign=[]):
""" Filters memory regions skipping those of linker type
and those from `non_generated_mem_regions` list
and verifying if they have proper size and do not overlap.
Args:
raw_regions (list): list of memory regions parsed from
the configuration file
alignment (int or None): memory size boundary
autoalign (list): list of memory regions names that
should be automatically re-aligned
Returns:
list: reduced, sorted list of memory regions to be generated
in a repl file
"""
previous_region = None
raw_regions.sort(key=lambda x: x['base'])
for r in raw_regions:
if 'linker' in r['type']:
print('Skipping linker region: {}'.format(r['name']))
continue
if 'io' in r['type']:
print('Skipping io region: {}'.format(r['name']))
continue
if r['name'] in non_generated_mem_regions:
print('Skipping pre-defined memory region: {}'.format(r['name']))
continue
if alignment is not None:
size_mismatch = r['size'] % alignment
address_mismatch = r['base'] % alignment
if address_mismatch != 0:
if r['name'] in autoalign:
r['original_address'] = r['base']
r['base'] -= address_mismatch
print('Re-aligning `{}` memory region base address from {} to {} due to limitations in Renode'.format(r['name'], hex(r['original_address']), hex(r['base'])))
else:
print('Error: `{}` memory region base address ({}) is not aligned to {}. This configuration cannot be currently simulated in Renode'.format(r['name'], hex(r['size']), hex(alignment)))
sys.exit(1)
if size_mismatch != 0:
if r['name'] in autoalign:
r['original_size'] = r['size']
r['size'] += alignment - size_mismatch
print('Extending `{}` memory region size from {} to {} due to limitations in Renode'.format(r['name'], hex(r['original_size']), hex(r['size'])))
else:
print('Error: `{}` memory region size ({}) is not aligned to {}. This configuration cannot be currently simulated in Renode'.format(r['name'], hex(r['size']), hex(alignment)))
sys.exit(1)
if previous_region is not None and (previous_region['base'] + previous_region['size']) > (r['base'] + r['size']):
print("Error: detected overlaping memory regions: `{}` and `{}`".format(r['name'], previous_region['name']))
sys.exit(1)
previous_region = r
yield r
def generate_resc(csr, args, flash_binaries={}, tftp_binaries={}):
""" Generates platform definition.
Args:
csr (dict): LiteX configuration
args (object): configuration
flash_binaries (dict): dictionary with paths and offsets of files
to load into flash
tftp_binaries (dict): dictionary with paths and names of files
to serve with the built-in TFTP server
Returns:
string: platform defition containing all supported peripherals
and memory regions
"""
cpu_type, _ = get_cpu_type(csr)
result = """
using sysbus
mach create "litex-{}"
machine LoadPlatformDescription @{}
machine StartGdbServer 10001
showAnalyzer sysbus.uart
showAnalyzer sysbus.uart Antmicro.Renode.Analyzers.LoggingUartAnalyzer
""".format(cpu_type, args.repl)
rom_base = csr['memories']['rom']['base'] if 'rom' in csr['memories'] else None
if rom_base is not None and args.bios_binary:
# load LiteX BIOS to ROM
result += """
sysbus LoadBinary @{} {}
cpu PC {}
""".format(args.bios_binary, rom_base, rom_base)
if args.tftp_ip:
result += """
emulation CreateNetworkServer "server" "{}"
server StartTFTP {}
""".format(args.tftp_ip, args.tftp_port)
for name, path in tftp_binaries.items():
result += """
server.tftp ServeFile @{} "{}" """.format(path, name)
result += """
emulation CreateSwitch "switch"
connector Connect ethmac switch
connector Connect server switch
"""
elif args.configure_network:
# configure network to allow netboot
result += """
emulation CreateSwitch "switch"
emulation CreateTap "{}" "tap"
connector Connect ethmac switch
connector Connect host.tap switch
""".format(args.configure_network)
elif flash_binaries:
if 'flash_boot_address' not in csr['constants']:
print('Warning! There is no flash memory to load binaries to')
else:
# load binaries to spiflash to boot from there
for offset in flash_binaries:
path = flash_binaries[offset]
flash_boot_address = int(csr['constants']['flash_boot_address'], 0) + offset
firmware_data = open(path, 'rb').read()
crc32 = zlib.crc32(firmware_data)
result += 'sysbus WriteDoubleWord {} {}\n'.format(hex(flash_boot_address), hex(len(firmware_data)))
result += 'sysbus WriteDoubleWord {} {}\n'.format(hex(flash_boot_address + 4), hex(crc32))
result += 'sysbus LoadBinary @{} {}\n'.format(path, hex(flash_boot_address + 8))
return result
def print_or_save(filepath, lines):
""" Prints given string on standard output or to the file.
Args:
filepath (string): path to the file lines should be written to
or '-' to write to a standard output
lines (string): content to be printed/written
"""
if filepath == '-':
print(lines)
else:
with open(filepath, 'w') as f:
f.write(lines)
def parse_flash_binaries(csr, args):
flash_binaries = {}
if args.firmware_binary:
flash_binaries[0] = args.firmware_binary
if args.flash_binaries_args:
for entry in args.flash_binaries_args:
path, separator, offset_or_label = entry.rpartition(':')
if separator == '':
print("Flash binary '{}' is in a wrong format. It should be 'path:offset'".format(entry))
sys.exit(1)
# offset can be either a number or one of the constants from the configuration
try:
# try a number first...
offset = int(offset_or_label, 0)
except ValueError:
# ... if it didn't work, check constants
if offset_or_label in csr['constants']:
offset = int(csr['constants'][offset_or_label], 0)
else:
print("Offset is in a wrong format. It should be either a number or one of the constants from the configuration file:")
print("\n".join("\t{}".format(c) for c in csr['constants'].keys()))
sys.exit(1)
flash_binaries[offset] = path
return flash_binaries
def check_tftp_binaries(args):
"""
Expected format is:
* path_to_the_binary
* path_to_the_binary:alternative_name
"""
if args.tftp_ip is None and len(args.tftp_binaries_args) > 0:
print('The TFPT server IP address must be provided')
sys.exit(1)
tftp_binaries = {}
for entry in args.tftp_binaries_args:
path, separator, name = entry.rpartition(':')
if separator == '':
# this means that no alternative name is provided, so we use the original one
name = os.path.basename(entry)
path = entry
if name in tftp_binaries:
print('File with name {} specified more than one - please check your configuration.'.format(name))
sys.exit(1)
tftp_binaries[name] = path
return tftp_binaries
def check_etherbone_peripherals(peripherals):
result = {}
for p in peripherals:
name, separator, port = p.rpartition(':')
if separator == '':
print("Etherbone peripheral `{}` is in a wrong format. It should be in 'name:port'".format(p))
sys.exit(1)
if name not in peripherals_handlers:
print("Unsupported peripheral '{}'. Available ones:\n".format(name))
print("\n".join("\t{}".format(c) for c in peripherals_handlers.keys()))
sys.exit(1)
if name == 'cpu':
print("CPU must be simulated in Renode")
sys.exit(1)
result[name] = port
return result
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('conf_file',
help='JSON configuration generated by LiteX')
parser.add_argument('--resc', action='store',
help='Output script file')
parser.add_argument('--repl', action='store',
help='Output platform definition file')
parser.add_argument('--configure-network', action='store',
help='Generate virtual network and connect it to host')
parser.add_argument('--bios-binary', action='store',
help='Path to the BIOS binary')
parser.add_argument('--firmware-binary', action='store',
help='Path to the binary to load into boot flash')
parser.add_argument('--flash-binary', action='append', dest='flash_binaries_args',
help='Path and an address of the binary to load into boot flash')
parser.add_argument('--etherbone', action='append', dest='etherbone_peripherals',
default=[],
help='Peripheral to connect over etherbone bridge')
parser.add_argument('--auto-align', action='append', dest='autoalign_memor_regions',
default=[],
help='List of memory regions to align automatically (necessary due to limitations in Renode)')
parser.add_argument('--tftp-binary', action='append', dest='tftp_binaries_args', default=[],
help='Path and an optional alternative name of the binary to serve by the TFTP server')
parser.add_argument('--tftp-server-ip', action='store', dest='tftp_ip',
help='The IP address of the TFTP server')
parser.add_argument('--tftp-server-port', action='store', default=69, type=int, dest='tftp_port',
help='The port number of the TFTP server')
args = parser.parse_args()
return args
def main():
args = parse_args()
with open(args.conf_file) as f:
csr = json.load(f)
etherbone_peripherals = check_etherbone_peripherals(args.etherbone_peripherals)
if args.repl:
print_or_save(args.repl, generate_repl(csr, etherbone_peripherals, args.autoalign_memor_regions))
if args.resc:
if not args.repl:
print("REPL is needed when generating RESC file")
sys.exit(1)
else:
flash_binaries = parse_flash_binaries(csr, args)
tftp_binaries = check_tftp_binaries(args)
print_or_save(args.resc, generate_resc(csr, args,
flash_binaries,
tftp_binaries))
if __name__ == '__main__':
main()
|
topaz/modules/ffi/variadic_invoker.py
|
ruby-compiler-survey/topaz
| 241 |
74696
|
from topaz.module import ClassDef
from topaz.objects.objectobject import W_Object
from topaz.modules.ffi.function import W_FFIFunctionObject
from rpython.rlib import jit
class W_VariadicInvokerObject(W_Object):
classdef = ClassDef('VariadicInvoker', W_Object.classdef)
def __init__(self, space):
W_Object.__init__(self, space)
self.w_info = None
self.w_handle = None
@classdef.singleton_method('allocate')
def singleton_method_allocate(self, space, args_w):
return W_VariadicInvokerObject(space)
@classdef.method('initialize')
def method_initialize(self, space, w_handle, w_arg_types,
w_ret_type, w_options=None):
self.w_ret_type = w_ret_type
self.w_options = w_options
self.w_handle = w_handle
if w_options is None:
w_type_map = space.newhash()
else:
w_key = space.newsymbol('type_map')
w_type_map = space.send(w_options, '[]', [w_key])
space.send(self, 'init', [w_arg_types, w_type_map])
@classdef.method('invoke', arg_values_w='array')
def method_invoke(self, space, w_arg_types, arg_values_w):
w_func_cls = space.getclassfor(W_FFIFunctionObject)
w_func = space.send(
w_func_cls, 'new',
[self.w_ret_type, w_arg_types, self.w_handle, self.w_options])
return self._dli_call(space, w_func, arg_values_w)
@jit.dont_look_inside
def _dli_call(self, space, w_func, arg_values_w):
# XXX we are missing argument promotion for the variadic arguments here
# see
# http://stackoverflow.com/questions/1255775/default-argument-promotions-in-c-function-calls
return space.send(w_func, 'call', arg_values_w)
|
tests/model_setup/test__models__.py
|
wankata/tortoise-orm
| 2,847 |
74721
|
<reponame>wankata/tortoise-orm
"""
Tests for __models__
"""
import re
from asynctest.mock import CoroutineMock, patch
from tortoise import Tortoise
from tortoise.contrib import test
from tortoise.exceptions import ConfigurationError
from tortoise.utils import get_schema_sql
class TestGenerateSchema(test.SimpleTestCase):
async def setUp(self):
try:
Tortoise.apps = {}
Tortoise._connections = {}
Tortoise._inited = False
except ConfigurationError:
pass
Tortoise._inited = False
self.sqls = ""
self.post_sqls = ""
self.engine = test.getDBConfig(app_label="models", modules=[])["connections"]["models"][
"engine"
]
async def tearDown(self):
Tortoise._connections = {}
await Tortoise._reset_apps()
async def init_for(self, module: str, safe=False) -> None:
if self.engine != "tortoise.backends.sqlite":
raise test.SkipTest("sqlite only")
with patch(
"tortoise.backends.sqlite.client.SqliteClient.create_connection", new=CoroutineMock()
):
await Tortoise.init(
{
"connections": {
"default": {
"engine": "tortoise.backends.sqlite",
"credentials": {"file_path": ":memory:"},
}
},
"apps": {"models": {"models": [module], "default_connection": "default"}},
}
)
self.sqls = get_schema_sql(Tortoise._connections["default"], safe).split(";\n")
def get_sql(self, text: str) -> str:
return str(re.sub(r"[ \t\n\r]+", " ", [sql for sql in self.sqls if text in sql][0]))
async def test_good(self):
await self.init_for("tests.model_setup.models__models__good")
self.assertIn("goodtournament", "; ".join(self.sqls))
self.assertIn("inaclasstournament", "; ".join(self.sqls))
self.assertNotIn("badtournament", "; ".join(self.sqls))
async def test_bad(self):
await self.init_for("tests.model_setup.models__models__bad")
self.assertNotIn("goodtournament", "; ".join(self.sqls))
self.assertNotIn("inaclasstournament", "; ".join(self.sqls))
self.assertIn("badtournament", "; ".join(self.sqls))
|
src/stepfunctions/workflow/widgets/events_table.py
|
ParidelPooya/aws-step-functions-data-science-sdk-python
| 211 |
74728
|
<reponame>ParidelPooya/aws-step-functions-data-science-sdk-python
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from __future__ import absolute_import
import json
from ast import literal_eval
from string import Template
from stepfunctions.workflow.widgets.utils import (
format_time,
get_elapsed_ms,
AWS_TABLE_CSS,
sagemaker_console_link
)
LAMBDA_SERVICE_NAME = "lambda"
LAMBDA_FUNCTION_RESOURCE_TYPE = "function"
LAMBDA_ARN_SEGMENT_LENGTH = 7
SAGEMAKER_JOB_NAME_MAP = {
'createTrainingJob': 'Sagemaker training job',
'createTrainingJob.sync': 'Sagemaker training job',
'createTransformJob': 'Sagemaker transform job',
'createTransformJob.sync': 'Sagemaker transform job',
'createModel': 'Sagemaker model',
'createModel.sync': 'Sagemaker model',
'createEndpointConfig': 'Sagemaker endpoint configuration',
'createEndpointConfig.sync': 'Sagemaker endpoint configuration',
'createEndpoint': 'Sagemaker endpoint',
'createEndpoint.sync': 'Sagemaker endpoint'
}
TABLE_TEMPLATE = """
<style>
$aws_table_css
$custom_css
</style>
<table class="table-widget">
<thead>
<tr>
<th style="width: 60px">ID</th>
<th>Type</th>
<th>Step</th>
<th>Resource</th>
<th>Elapsed Time (ms)</th>
<th>Timestamp</th>
</tr>
</thead>
<tbody>
{table_rows}
</tbody>
</table>
<script type="text/javascript">
$js
</script>
"""
TABLE_ROW_TEMPLATE = """
<tr class="awsui-table-row">
<td class="awsui-util-pl-xs clickable-cell">
<div class="toggle-icon"></div>
<span>$event_id</span>
</td>
<td>$event_type</td>
<td>$step</td>
<td><a $resource_url target="_blank">$resource</a></td>
<td>$elapsed_time</td>
<td>$timestamp</td>
</tr>
<tr class="hide">
<td class="execution-event-detail" colspan="6">
<pre>$event_detail</pre>
</td>
</tr>
"""
JS_TEMPLATE = """
var clickableCells = document.getElementsByClassName("clickable-cell");
for (var cell of clickableCells) {
cell.addEventListener("click", function(e) {
var currentRow = e.srcElement.closest("tr");
var toggleRow = currentRow.nextElementSibling;
var toggleArrow = currentRow.getElementsByClassName("toggle-icon")[0];
toggleRow.classList.toggle("hide");
toggleArrow.classList.toggle("open");
});
}
"""
CSS_TEMPLATE = """
.table-widget .clickable-cell {
padding-left: 0.1em;
cursor: pointer;
}
.toggle-icon {
display: inline-block;
width: 0;
height: 0;
border-top: 5px solid transparent;
border-left: 8px solid #545b64;
border-bottom: 5px solid transparent;
margin-right: 5px;
}
.toggle-icon.open {
-webkit-transform: rotate(90deg);
-ms-transform: rotate(90deg);
transform: rotate(90deg);
}
"""
class EventsTableWidget(object):
def __init__(self, events):
self.eventIdToLambdaArnMap = {}
self.previous_step_name = ""
self.previous_job_name = ""
start_datetime = None
if len(events) > 0:
start_datetime = events[0].get("timestamp")
table_rows = [Template(TABLE_ROW_TEMPLATE).substitute(
event_id=str(event.get("id")),
event_type=event.get("type"),
step=self._get_step(event),
resource=self._get_resource(event, True),
resource_url=self._get_resource_url(event),
elapsed_time=get_elapsed_ms(start_datetime, event.get("timestamp")),
timestamp=format_time(event.get("timestamp")),
event_detail=self._format_event_detail(event)
) for event in events]
self.template = Template(TABLE_TEMPLATE.format(table_rows='\n'.join(table_rows)))
def show(self):
return self.template.safe_substitute({
'aws_table_css': AWS_TABLE_CSS,
'custom_css': CSS_TEMPLATE,
'js': JS_TEMPLATE
})
def _get_step_detail(self, event):
switcher = {
"ChoiceStateEntered": event.get("stateEnteredEventDetails", {}),
"ChoiceStateExited": event.get("stateExitedEventDetails", {}),
"FailStateEntered": event.get("stateEnteredEventDetails", {}),
"MapStateEntered": event.get("stateEnteredEventDetails", {}),
"MapStateExited": event.get("stateExitedEventDetails", {}),
"ParallelStateEntered": event.get("stateEnteredEventDetails", {}),
"ParallelStateExited": event.get("stateExitedEventDetails", {}),
"PassStateEntered": event.get("stateEnteredEventDetails", {}),
"PassStateExited": event.get("stateExitedEventDetails", {}),
"SucceedStateEntered": event.get("stateEnteredEventDetails", {}),
"SucceedStateExited": event.get("stateExitedEventDetails", {}),
"TaskStateEntered": event.get("stateEnteredEventDetails", {}),
"TaskStateExited": event.get("stateExitedEventDetails", {}),
"WaitStateEntered": event.get("stateEnteredEventDetails", {}),
"WaitStateExited": event.get("stateExitedEventDetails", {}),
"MapIterationAborted": event.get("mapIterationAbortedEventDetails", {}),
"MapIterationFailed": event.get("mapIterationFailedEventDetails", {}),
"MapIterationStarted": event.get("mapIterationStartedEventDetails", {}),
"MapIterationSucceeded": event.get("mapIterationSucceededEventDetails", {}),
"ExecutionFailed": event.get("executionFailedEventDetails", {}),
"ExecutionStarted": event.get("executionStartedEventDetails", {}),
"ExecutionSucceeded": event.get("executionSucceededEventDetails", {}),
"ExecutionAborted": event.get("executionAbortedEventDetails", {}),
"ExecutionTimedOut": event.get("executionTimedOutEventDetails", {}),
"LambdaFunctionScheduled": event.get("lambdaFunctionScheduledEventDetails", {}),
"LambdaFunctionScheduleFailed": event.get("lambdaFunctionScheduleFailedEventDetails", {}),
"LambdaFunctionStartFailed": event.get("lambdaFunctionStartFailedEventDetails", {}),
"LambdaFunctionSucceeded": event.get("lambdaFunctionSucceededEventDetails", {}),
"LambdaFunctionFailed": event.get("lambdaFunctionFailedEventDetails", {}),
"LambdaFunctionTimedOut": event.get("lambdaFunctionTimedOutEventDetails", {}),
"TaskStarted": event.get("taskStartedEventDetails", {}),
"TaskSubmitted": event.get("taskSubmittedEventDetails", {}),
"TaskScheduled": event.get("taskScheduledEventDetails", {}),
"TaskSucceeded": event.get("taskSucceededEventDetails", {}),
"TaskFailed": event.get("taskFailedEventDetails", {})
}
return switcher.get(event.get("type"), {})
# Tries to get step name, if it can not find, return the previous step's name
def _get_step(self, event):
if event.get("type") in (
"ExecutionFailed",
"ExecutionStarted",
"ExecutionSucceeded",
"ExecutionAborted",
"ExecutionTimedOut"
):
step_name = ""
self.previous_step_name = ""
else:
step_name = self._get_step_detail(event).get("name")
if not step_name:
step_name = self.previous_step_name
else:
self.previous_step_name = step_name
return step_name
def _get_resource(self, event, mapped_value=False):
# check that it is a lambda, sagemaker or just a regular execution
if self._is_correct_lambda_arn_sequence(self._get_lambda_arn(event)):
return "Lambda"
# check if it has a resource
elif self._has_resource(event):
# check if it is a sagemaker resource
step_details = self._get_step_detail(event)
if step_details.get("resourceType") == "sagemaker":
sagemaker_resource = step_details.get("resource")
if mapped_value:
return SAGEMAKER_JOB_NAME_MAP[sagemaker_resource]
return sagemaker_resource
return "Step Functions execution"
# if not a resource, return -
return "-"
def _get_resource_url(self, event):
resource = self._get_resource(event)
if "createTrainingJob" in resource:
job_name = self._get_sagemaker_resource_job_name(event, "TrainingJobName")
return 'href="{}"'.format(sagemaker_console_link('jobs', job_name))
if "createTransformJob" in resource:
job_name = self._get_sagemaker_resource_job_name(event, "TransformJobName")
return 'href="{}"'.format(sagemaker_console_link('transformJobs', job_name))
if "createModel" in resource:
job_name = self._get_sagemaker_resource_job_name(event, "ModelName")
return 'href="{}"'.format(sagemaker_console_link('models', job_name))
if "createEndpointConfig" in resource:
job_name = self._get_sagemaker_resource_job_name(event, "EndpointConfigName")
return 'href="{}"'.format(sagemaker_console_link('endpointConfig', job_name))
if "createEndpoint" in resource:
job_name = self._get_sagemaker_resource_job_name(event, "EndpointName")
return 'href="{}"'.format(sagemaker_console_link('endpoints', job_name))
self.previous_job_name = ""
return "class='disabled'"
def _get_sagemaker_resource_job_name(self, event, job_name_key):
step_details = self._get_step_detail(event)
job_name = literal_eval(step_details.get("parameters", "{}")).get(job_name_key, "")
if job_name == "":
job_name = self.previous_job_name
else:
self.previous_job_name = job_name
return job_name
def _has_resource(self, event):
return event.get("type") in (
"TaskSucceeded",
"TaskSubmitted",
"TaskScheduled",
"TaskStarted"
)
def _get_lambda_arn(self, event):
resource_arn = "-"
event_type = event.get("type")
if event_type == "LambdaFunctionScheduled":
resource_arn = event.get("lambdaFunctionScheduledEventDetails").get("resource")
elif event_type in {
"LambdaFunctionScheduleFailed",
"LambdaFunctionFailed",
"LambdaFunctionStartFailed",
"LambdaFunctionStarted",
"LambdaFunctionSucceeded",
"LambdaFunctionTimedOut"
}:
resource_arn = self.eventIdToLambdaArnMap[event.get("previousEventId")]
self.eventIdToLambdaArnMap[event.get("id")] = resource_arn
return resource_arn
def _is_correct_lambda_arn_sequence(self, lambda_arn):
lambda_arn_segments = lambda_arn.split(":")
return (len(lambda_arn_segments) == LAMBDA_ARN_SEGMENT_LENGTH and
lambda_arn_segments[2] == LAMBDA_SERVICE_NAME and
lambda_arn_segments[5] == LAMBDA_FUNCTION_RESOURCE_TYPE)
def _format_event_detail(self, event):
event_details = self._get_step_detail(event)
self._unpack_to_proper_dict(event_details)
return json.dumps(event_details, indent=4)
def _unpack_to_proper_dict(self, dictionary):
for k, v in dictionary.items():
if isinstance(v, dict):
self._unpack_to_proper_dict(v)
else:
dictionary[k] = self._load_json(v)
def _load_json(self, value):
try:
return json.loads(value)
except ValueError as e:
return value
|
RawArchiver/TimedTriggers/TriggerManage.py
|
fake-name/ReadableWebProxy
| 193 |
74736
|
<gh_stars>100-1000
import logging
import abc
import datetime
import traceback
import urllib.parse
import sqlalchemy.exc
import common.database as db
# import RawArchiver.TimedTriggers.RawRollingRewalkTrigger
# def exposed_raw_rewalk_old():
# '''
# Trigger the rewalking system on the rawarchiver
# '''
# run = RawArchiver.TimedTriggers.RawRollingRewalkTrigger.RollingRawRewalkTrigger()
# run.go()
|
applications/ParticleMechanicsApplication/tests/test_generate_mpm_particle_condition.py
|
lkusch/Kratos
| 778 |
74757
|
<filename>applications/ParticleMechanicsApplication/tests/test_generate_mpm_particle_condition.py
from __future__ import print_function, absolute_import, division
import KratosMultiphysics
import KratosMultiphysics.ParticleMechanicsApplication as KratosParticle
import KratosMultiphysics.KratosUnittest as KratosUnittest
class TestGenerateMPMParticleCondition(KratosUnittest.TestCase):
def _generate_particle_condition_and_check(self, current_model, dimension, geometry_element, num_particle, expected_num_particle):
KratosMultiphysics.Logger.GetDefaultOutput().SetSeverity(KratosMultiphysics.Logger.Severity.WARNING)
# Initialize model part
## Material model part definition
material_point_model_part = current_model.CreateModelPart("dummy_name")
material_point_model_part.ProcessInfo.SetValue(KratosMultiphysics.DOMAIN_SIZE, dimension)
## Initial material model part definition
initial_mesh_model_part = current_model.CreateModelPart("Initial_dummy_name")
initial_mesh_model_part.ProcessInfo.SetValue(KratosMultiphysics.DOMAIN_SIZE, dimension)
## Grid model part definition
grid_model_part = current_model.CreateModelPart("Background_Grid")
grid_model_part.ProcessInfo.SetValue(KratosMultiphysics.DOMAIN_SIZE, dimension)
# Create element and nodes for background grids
sub_background = grid_model_part.CreateSubModelPart("test_background")
self._create_nodes(sub_background, dimension, geometry_element)
self._create_elements(sub_background,dimension, geometry_element)
self._create_condition(sub_background,dimension, geometry_element)
for condition in grid_model_part.Conditions:
condition.SetValue(KratosParticle.PARTICLES_PER_CONDITION, num_particle)
condition.SetValue(KratosParticle.MPC_BOUNDARY_CONDITION_TYPE, 1)
# Create element and nodes for initial meshes
sub_mp = initial_mesh_model_part.CreateSubModelPart("test")
sub_mp.GetProperties()[1].SetValue(KratosParticle.PARTICLES_PER_ELEMENT, 4)
# Generate MP Conditions
KratosParticle.GenerateMaterialPointCondition(grid_model_part, initial_mesh_model_part, material_point_model_part)
# Check total number of element
particle_counter = material_point_model_part.NumberOfConditions()
self.assertEqual(expected_num_particle,particle_counter)
def _create_nodes(self, initial_mp, dimension, geometry_element):
initial_mp.CreateNewNode(1, -0.5, -0.5, 0.0)
initial_mp.CreateNewNode(2, 0.5, -0.5, 0.0)
initial_mp.CreateNewNode(3, 0.5, 0.5, 0.0)
initial_mp.CreateNewNode(4, -0.5, 0.5, 0.0)
if (dimension == 3):
initial_mp.CreateNewNode(5, -0.5, -0.5, 1.0)
initial_mp.CreateNewNode(6, 0.5, -0.5, 1.0)
initial_mp.CreateNewNode(7, 0.5, 0.5, 1.0)
initial_mp.CreateNewNode(8, -0.5, 0.5, 1.0)
def _create_elements(self, initial_mp, dimension, geometry_element):
if (dimension == 2):
initial_mp.CreateNewElement("UpdatedLagrangian2D4N", 1, [1,2,3,4], initial_mp.GetProperties()[1])
else:
initial_mp.CreateNewElement("UpdatedLagrangian3D8N", 1, [1,2,3,4,5,6,7,8], initial_mp.GetProperties()[1])
KratosMultiphysics.VariableUtils().SetFlag(KratosMultiphysics.ACTIVE, True, initial_mp.Elements)
def _create_condition(self, initial_mp, dimension, geometry_element):
if (dimension == 2):
if (geometry_element == "Point"):
initial_mp.CreateNewCondition("PointCondition2D1N", 1, [1], initial_mp.GetProperties()[1])
elif (geometry_element == "Line"):
initial_mp.CreateNewCondition("LineCondition2D2N", 1, [1,2], initial_mp.GetProperties()[1])
else:
if (geometry_element == "Point"):
initial_mp.CreateNewCondition("PointCondition3D1N", 1, [1], initial_mp.GetProperties()[1])
elif (geometry_element == "Line"):
initial_mp.CreateNewCondition("LineCondition3D2N", 1, [1,2], initial_mp.GetProperties()[1])
elif (geometry_element == "Triangle"):
initial_mp.CreateNewCondition("SurfaceCondition3D3N", 1, [1,6,8], initial_mp.GetProperties()[1])
elif (geometry_element == "Quadrilateral"):
initial_mp.CreateNewCondition("SurfaceCondition3D4N", 1, [2,4,8,6], initial_mp.GetProperties()[1])
KratosMultiphysics.VariableUtils().SetFlag(KratosMultiphysics.BOUNDARY, True, initial_mp.Conditions)
## Point2D - automatic, 1, and default
def test_GenerateMPMParticleConditionPoint2DAutomatic(self):
current_model = KratosMultiphysics.Model()
self._generate_particle_condition_and_check(current_model, dimension=2, geometry_element="Point", num_particle=0, expected_num_particle=1)
def test_GenerateMPMParticleConditionPoint2D1P(self):
current_model = KratosMultiphysics.Model()
self._generate_particle_condition_and_check(current_model, dimension=2, geometry_element="Point", num_particle=1, expected_num_particle=1)
def test_GenerateMPMParticleConditionPoint2DDefault(self):
current_model = KratosMultiphysics.Model()
self._generate_particle_condition_and_check(current_model, dimension=2, geometry_element="Point", num_particle=50, expected_num_particle=1)
## Line2D - automatic and 2, 3, 4, 5, and default
def test_GenerateMPMParticleConditionLine2DAutomatic(self):
current_model = KratosMultiphysics.Model()
self._generate_particle_condition_and_check(current_model, dimension=2, geometry_element="Line", num_particle=0, expected_num_particle=1)
def test_GenerateMPMParticleConditionLine2D1P(self):
current_model = KratosMultiphysics.Model()
self._generate_particle_condition_and_check(current_model, dimension=2, geometry_element="Line", num_particle=1, expected_num_particle=1)
def test_GenerateMPMParticleConditionLine2D2P(self):
current_model = KratosMultiphysics.Model()
self._generate_particle_condition_and_check(current_model, dimension=2, geometry_element="Line", num_particle=2, expected_num_particle=2)
def test_GenerateMPMParticleConditionLine2D3P(self):
current_model = KratosMultiphysics.Model()
self._generate_particle_condition_and_check(current_model, dimension=2, geometry_element="Line", num_particle=3, expected_num_particle=3)
def test_GenerateMPMParticleConditionLine2D4P(self):
current_model = KratosMultiphysics.Model()
self._generate_particle_condition_and_check(current_model, dimension=2, geometry_element="Line", num_particle=4, expected_num_particle=4)
def test_GenerateMPMParticleConditionLine2D5P(self):
current_model = KratosMultiphysics.Model()
self._generate_particle_condition_and_check(current_model, dimension=2, geometry_element="Line", num_particle=5, expected_num_particle=5)
def test_GenerateMPMParticleConditionLine2DDefault(self):
current_model = KratosMultiphysics.Model()
self._generate_particle_condition_and_check(current_model, dimension=2, geometry_element="Line", num_particle=50, expected_num_particle=1)
## Point3D - automatic, 1, and default
def test_GenerateMPMParticleConditionPoint3DAutomatic(self):
current_model = KratosMultiphysics.Model()
self._generate_particle_condition_and_check(current_model, dimension=3, geometry_element="Point", num_particle=0, expected_num_particle=1)
def test_GenerateMPMParticleConditionPoint3D1P(self):
current_model = KratosMultiphysics.Model()
self._generate_particle_condition_and_check(current_model, dimension=3, geometry_element="Point", num_particle=1, expected_num_particle=1)
def test_GenerateMPMParticleConditionPoint3DDefault(self):
current_model = KratosMultiphysics.Model()
self._generate_particle_condition_and_check(current_model, dimension=3, geometry_element="Point", num_particle=50, expected_num_particle=1)
## Line3D - automatic and 2, 3, 4, 5, and default
def test_GenerateMPMParticleConditionLine3DAutomatic(self):
current_model = KratosMultiphysics.Model()
self._generate_particle_condition_and_check(current_model, dimension=3, geometry_element="Line", num_particle=0, expected_num_particle=1)
def test_GenerateMPMParticleConditionLine3D1P(self):
current_model = KratosMultiphysics.Model()
self._generate_particle_condition_and_check(current_model, dimension=3, geometry_element="Line", num_particle=1, expected_num_particle=1)
def test_GenerateMPMParticleConditionLine3D2P(self):
current_model = KratosMultiphysics.Model()
self._generate_particle_condition_and_check(current_model, dimension=3, geometry_element="Line", num_particle=2, expected_num_particle=2)
def test_GenerateMPMParticleConditionLine3D3P(self):
current_model = KratosMultiphysics.Model()
self._generate_particle_condition_and_check(current_model, dimension=3, geometry_element="Line", num_particle=3, expected_num_particle=3)
def test_GenerateMPMParticleConditionLine3D4P(self):
current_model = KratosMultiphysics.Model()
self._generate_particle_condition_and_check(current_model, dimension=3, geometry_element="Line", num_particle=4, expected_num_particle=4)
def test_GenerateMPMParticleConditionLine3D5P(self):
current_model = KratosMultiphysics.Model()
self._generate_particle_condition_and_check(current_model, dimension=3, geometry_element="Line", num_particle=5, expected_num_particle=5)
def test_GenerateMPMParticleConditionLine3DDefault(self):
current_model = KratosMultiphysics.Model()
self._generate_particle_condition_and_check(current_model, dimension=3, geometry_element="Line", num_particle=50, expected_num_particle=1)
## Triangle3D - automatic, 1, 3, 6, 12, and default
def test_GenerateMPMParticleConditionTriangle3DAutomatic(self):
current_model = KratosMultiphysics.Model()
self._generate_particle_condition_and_check(current_model, dimension=3, geometry_element="Triangle", num_particle=0, expected_num_particle=1)
def test_GenerateMPMParticleConditionTriangle3D1P(self):
current_model = KratosMultiphysics.Model()
self._generate_particle_condition_and_check(current_model, dimension=3, geometry_element="Triangle", num_particle=1, expected_num_particle=1)
def test_GenerateMPMParticleConditionTriangle3D3P(self):
current_model = KratosMultiphysics.Model()
self._generate_particle_condition_and_check(current_model, dimension=3, geometry_element="Triangle", num_particle=3, expected_num_particle=3)
def test_GenerateMPMParticleConditionTriangle3D6P(self):
current_model = KratosMultiphysics.Model()
self._generate_particle_condition_and_check(current_model, dimension=3, geometry_element="Triangle", num_particle=6, expected_num_particle=6)
def test_GenerateMPMParticleConditionTriangle3D12P(self):
current_model = KratosMultiphysics.Model()
self._generate_particle_condition_and_check(current_model, dimension=3, geometry_element="Triangle", num_particle=12, expected_num_particle=12)
def test_GenerateMPMParticleConditionTriangle3DDefault(self):
current_model = KratosMultiphysics.Model()
self._generate_particle_condition_and_check(current_model, dimension=3, geometry_element="Triangle", num_particle=50, expected_num_particle=1)
## Quadrilateral3D - automatic, 1 ,4, 9, 16 and default
def test_GenerateMPMParticleConditionQuadrilateral3DAutomatic(self):
current_model = KratosMultiphysics.Model()
self._generate_particle_condition_and_check(current_model, dimension=3, geometry_element="Quadrilateral", num_particle=0, expected_num_particle=1)
def test_GenerateMPMParticleConditionQuadrilateral3D4N(self):
current_model = KratosMultiphysics.Model()
self._generate_particle_condition_and_check(current_model, dimension=3, geometry_element="Quadrilateral", num_particle=4, expected_num_particle=4)
def test_GenerateMPMParticleConditionQuadrilateral3D9N(self):
current_model = KratosMultiphysics.Model()
self._generate_particle_condition_and_check(current_model, dimension=3, geometry_element="Quadrilateral", num_particle=9, expected_num_particle=9)
def test_GenerateMPMParticleConditionQuadrilateral3D16N(self):
current_model = KratosMultiphysics.Model()
self._generate_particle_condition_and_check(current_model, dimension=3, geometry_element="Quadrilateral", num_particle=16, expected_num_particle=16)
def test_GenerateMPMParticleConditionQuadrilateral3DDefault(self):
current_model = KratosMultiphysics.Model()
self._generate_particle_condition_and_check(current_model, dimension=3, geometry_element="Quadrilateral", num_particle=50, expected_num_particle=1)
if __name__ == '__main__':
KratosUnittest.main()
|
libcloud/compute/drivers/hostvirtual.py
|
Matir/libcloud
| 1,435 |
74801
|
<reponame>Matir/libcloud
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
libcloud driver for the Host Virtual Inc. (VR) API
Home page https://www.hostvirtual.com/
"""
import time
import re
try:
import simplejson as json
except ImportError:
import json
from libcloud.common.hostvirtual import HostVirtualResponse
from libcloud.common.hostvirtual import HostVirtualConnection
from libcloud.common.hostvirtual import HostVirtualException
from libcloud.compute.providers import Provider
from libcloud.compute.types import NodeState
from libcloud.compute.base import Node, NodeDriver
from libcloud.compute.base import NodeImage, NodeSize, NodeLocation
from libcloud.compute.base import NodeAuthSSHKey, NodeAuthPassword
API_ROOT = ''
NODE_STATE_MAP = {
'BUILDING': NodeState.PENDING,
'PENDING': NodeState.PENDING,
'RUNNING': NodeState.RUNNING, # server is powered up
'STOPPING': NodeState.REBOOTING,
'REBOOTING': NodeState.REBOOTING,
'STARTING': NodeState.REBOOTING,
'TERMINATED': NodeState.TERMINATED, # server is powered down
'STOPPED': NodeState.STOPPED
}
DEFAULT_NODE_LOCATION_ID = 21
class HostVirtualComputeResponse(HostVirtualResponse):
pass
class HostVirtualComputeConnection(HostVirtualConnection):
responseCls = HostVirtualComputeResponse
class HostVirtualNodeDriver(NodeDriver):
type = Provider.HOSTVIRTUAL
name = 'HostVirtual'
website = 'http://www.hostvirtual.com'
connectionCls = HostVirtualComputeConnection
features = {'create_node': ['ssh_key', 'password']}
def __init__(self, key, secure=True, host=None, port=None):
self.location = None
super(HostVirtualNodeDriver, self).__init__(key=key, secure=secure,
host=host, port=port)
def list_nodes(self):
try:
result = self.connection.request(
API_ROOT + '/cloud/servers/').object
except HostVirtualException:
return []
nodes = []
for value in result:
node = self._to_node(value)
nodes.append(node)
return nodes
def list_locations(self):
result = self.connection.request(API_ROOT + '/cloud/locations/').object
locations = []
for k in result:
dc = result[k]
locations.append(NodeLocation(
dc["id"],
dc["name"],
dc["name"].split(',')[1].replace(" ", ""), # country
self))
return sorted(locations, key=lambda x: int(x.id))
def list_sizes(self, location=None):
params = {}
if location is not None:
params = {'location': location.id}
result = self.connection.request(
API_ROOT + '/cloud/sizes/',
params=params).object
sizes = []
for size in result:
n = NodeSize(id=size['plan_id'],
name=size['plan'],
ram=size['ram'],
disk=size['disk'],
bandwidth=size['transfer'],
price=size['price'],
driver=self.connection.driver)
sizes.append(n)
return sizes
def list_images(self):
result = self.connection.request(API_ROOT + '/cloud/images/').object
images = []
for image in result:
i = NodeImage(id=image["id"],
name=image["os"],
driver=self.connection.driver,
extra=image)
del i.extra['id']
del i.extra['os']
images.append(i)
return images
def create_node(self, name, image, size, location=None, auth=None):
"""
Creates a node
Example of node creation with ssh key deployed:
>>> from libcloud.compute.base import NodeAuthSSHKey
>>> key = open('/home/user/.ssh/id_rsa.pub').read()
>>> auth = NodeAuthSSHKey(pubkey=key)
>>> from libcloud.compute.providers import get_driver
>>> driver = get_driver('hostvirtual')
>>> conn = driver('API_KEY')
>>> image = conn.list_images()[1]
>>> size = conn.list_sizes()[0]
>>> location = conn.list_locations()[1]
>>> name = 'markos-dev'
>>> node = conn.create_node(name, image, size, auth=auth,
>>> location=location)
"""
dc = None
auth = self._get_and_check_auth(auth)
if not self._is_valid_fqdn(name):
raise HostVirtualException(
500, "Name should be a valid FQDN (e.g, hostname.example.com)")
# simply order a package first
pkg = self.ex_order_package(size)
if location:
dc = location.id
else:
dc = DEFAULT_NODE_LOCATION_ID
# create a stub node
stub_node = self._to_node({
'mbpkgid': pkg['id'],
'status': 'PENDING',
'fqdn': name,
'plan_id': size.id,
'os_id': image.id,
'location_id': dc
})
# provisioning a server using the stub node
self.ex_provision_node(node=stub_node, auth=auth)
node = self._wait_for_node(stub_node.id)
if getattr(auth, 'generated', False):
node.extra['password'] = <PASSWORD>
return node
def reboot_node(self, node):
params = {'force': 0, 'mbpkgid': node.id}
result = self.connection.request(
API_ROOT + '/cloud/server/reboot',
data=json.dumps(params),
method='POST').object
return bool(result)
def destroy_node(self, node):
params = {
'mbpkgid': node.id,
# 'reason': 'Submitted through Libcloud API'
}
result = self.connection.request(
API_ROOT + '/cloud/cancel', data=json.dumps(params),
method='POST').object
return bool(result)
def ex_list_packages(self):
"""
List the server packages.
"""
try:
result = self.connection.request(
API_ROOT + '/cloud/packages/').object
except HostVirtualException:
return []
pkgs = []
for value in result:
pkgs.append(value)
return pkgs
def ex_order_package(self, size):
"""
Order a server package.
:param size:
:type node: :class:`NodeSize`
:rtype: ``str``
"""
params = {'plan': size.name}
pkg = self.connection.request(API_ROOT + '/cloud/buy/',
data=json.dumps(params),
method='POST').object
return pkg
def ex_cancel_package(self, node):
"""
Cancel a server package.
:param node: Node which should be used
:type node: :class:`Node`
:rtype: ``str``
"""
params = {'mbpkgid': node.id}
result = self.connection.request(API_ROOT + '/cloud/cancel/',
data=json.dumps(params),
method='POST').object
return result
def ex_unlink_package(self, node):
"""
Unlink a server package from location.
:param node: Node which should be used
:type node: :class:`Node`
:rtype: ``str``
"""
params = {'mbpkgid': node.id}
result = self.connection.request(API_ROOT + '/cloud/unlink/',
data=json.dumps(params),
method='POST').object
return result
def ex_get_node(self, node_id):
"""
Get a single node.
:param node_id: id of the node that we need the node object for
:type node_id: ``str``
:rtype: :class:`Node`
"""
params = {'mbpkgid': node_id}
result = self.connection.request(
API_ROOT + '/cloud/server', params=params).object
node = self._to_node(result)
return node
def start_node(self, node):
"""
Start a node.
:param node: Node which should be used
:type node: :class:`Node`
:rtype: ``bool``
"""
params = {'mbpkgid': node.id}
result = self.connection.request(
API_ROOT + '/cloud/server/start',
data=json.dumps(params),
method='POST').object
return bool(result)
def stop_node(self, node):
"""
Stop a node.
:param node: Node which should be used
:type node: :class:`Node`
:rtype: ``bool``
"""
params = {'force': 0, 'mbpkgid': node.id}
result = self.connection.request(
API_ROOT + '/cloud/server/shutdown',
data=json.dumps(params),
method='POST').object
return bool(result)
def ex_start_node(self, node):
# NOTE: This method is here for backward compatibility reasons after
# this method was promoted to be part of the standard compute API in
# Libcloud v2.7.0
return self.start_node(node=node)
def ex_stop_node(self, node):
# NOTE: This method is here for backward compatibility reasons after
# this method was promoted to be part of the standard compute API in
# Libcloud v2.7.0
return self.stop_node(node=node)
def ex_provision_node(self, **kwargs):
"""
Provision a server on a VR package and get it booted
:keyword node: node which should be used
:type node: :class:`Node`
:keyword image: The distribution to deploy on your server (mandatory)
:type image: :class:`NodeImage`
:keyword auth: an SSH key or root password (mandatory)
:type auth: :class:`NodeAuthSSHKey` or :class:`NodeAuthPassword`
:keyword location: which datacenter to create the server in
:type location: :class:`NodeLocation`
:return: Node representing the newly built server
:rtype: :class:`Node`
"""
node = kwargs['node']
if 'image' in kwargs:
image = kwargs['image']
else:
image = node.extra['image']
params = {
'mbpkgid': node.id,
'image': image,
'fqdn': node.name,
'location': node.extra['location'],
}
auth = kwargs['auth']
ssh_key = None
password = None
if isinstance(auth, NodeAuthSSHKey):
ssh_key = auth.pubkey
params['ssh_key'] = ssh_key
elif isinstance(auth, NodeAuthPassword):
password = auth.password
params['password'] = password
if not ssh_key and not password:
raise HostVirtualException(
500, "SSH key or Root password is required")
try:
result = self.connection.request(API_ROOT + '/cloud/server/build',
data=json.dumps(params),
method='POST').object
return bool(result)
except HostVirtualException:
self.ex_cancel_package(node)
def ex_delete_node(self, node):
"""
Delete a node.
:param node: Node which should be used
:type node: :class:`Node`
:rtype: ``bool``
"""
params = {'mbpkgid': node.id}
result = self.connection.request(
API_ROOT + '/cloud/server/delete', data=json.dumps(params),
method='POST').object
return bool(result)
def _to_node(self, data):
state = NODE_STATE_MAP[data['status']]
public_ips = []
private_ips = []
extra = {}
if 'plan_id' in data:
extra['size'] = data['plan_id']
if 'os_id' in data:
extra['image'] = data['os_id']
if 'fqdn' in data:
extra['fqdn'] = data['fqdn']
if 'location_id' in data:
extra['location'] = data['location_id']
if 'ip' in data:
public_ips.append(data['ip'])
node = Node(id=data['mbpkgid'], name=data['fqdn'], state=state,
public_ips=public_ips, private_ips=private_ips,
driver=self.connection.driver, extra=extra)
return node
def _wait_for_node(self, node_id, timeout=30, interval=5.0):
"""
:param node_id: ID of the node to wait for.
:type node_id: ``int``
:param timeout: Timeout (in seconds).
:type timeout: ``int``
:param interval: How long to wait (in seconds) between each attempt.
:type interval: ``float``
:return: Node representing the newly built server
:rtype: :class:`Node`
"""
# poll until we get a node
for i in range(0, timeout, int(interval)):
try:
node = self.ex_get_node(node_id)
return node
except HostVirtualException:
time.sleep(interval)
raise HostVirtualException(412, 'Timeout on getting node details')
def _is_valid_fqdn(self, fqdn):
if len(fqdn) > 255:
return False
if fqdn[-1] == ".":
fqdn = fqdn[:-1]
valid = re.compile(r"(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
if len(fqdn.split(".")) > 1:
return all(valid.match(x) for x in fqdn.split("."))
else:
return False
|
overholt/users/__init__.py
|
prdonahue/overholt
| 1,152 |
74826
|
# -*- coding: utf-8 -*-
"""
overholt.users
~~~~~~~~~~~~~~
overholt users package
"""
from ..core import Service
from .models import User
class UsersService(Service):
__model__ = User
|
parsl/monitoring/visualization/plots/default/task_plots.py
|
cylondata/parsl
| 323 |
74828
|
<reponame>cylondata/parsl<filename>parsl/monitoring/visualization/plots/default/task_plots.py
import plotly.graph_objs as go
from plotly.offline import plot
def time_series_cpu_per_task_plot(df_resources, resource_type, label):
if resource_type == "psutil_process_cpu_percent":
yaxis = dict(title="CPU utilization")
else:
yaxis = dict(title='Accumulated CPU user time (seconds)')
fig = go.Figure(data=[go.Scatter(x=df_resources['timestamp'],
y=df_resources[resource_type])],
layout=go.Layout(xaxis=dict(tickformat='%m-%d\n%H:%M:%S',
autorange=True,
title='Time'),
yaxis=yaxis,
title=label))
return plot(fig, show_link=False, output_type="div", include_plotlyjs=False)
def time_series_memory_per_task_plot(df_resources, resource_type, label):
if resource_type == "psutil_process_memory_percent":
yaxis = dict(title="Memory utilization")
data = [go.Scatter(x=df_resources['timestamp'],
y=df_resources[resource_type])]
else:
yaxis = dict(title='Memory usage (GB)')
data = [go.Scatter(x=df_resources['timestamp'],
y=[num / 1000000000 for num in df_resources[resource_type].astype(float)])]
fig = go.Figure(data=data,
layout=go.Layout(xaxis=dict(tickformat='%m-%d\n%H:%M:%S',
autorange=True,
title='Time'),
yaxis=yaxis,
title=label))
return plot(fig, show_link=False, output_type="div", include_plotlyjs=False)
|
tests/oracle_test.py
|
sh0nk/simple-db-migrate
| 120 |
74833
|
<reponame>sh0nk/simple-db-migrate
#-*- coding:utf-8 -*-
import unittest
import sys
import simple_db_migrate.core
from mock import patch, Mock, MagicMock, call, sentinel
from simple_db_migrate.oracle import Oracle
from tests import BaseTest
class OracleTest(BaseTest):
def setUp(self):
super(OracleTest, self).setUp()
self.execute_returns = {}
self.fetchone_returns = {'select count(*) from db_version': [0]}
self.close_returns = {}
self.last_execute_command = '';
self.last_execute_commands = [];
self.config_dict = {'database_script_encoding': 'utf8',
'database_encoding': 'American_America.UTF8',
'database_host': 'somehost',
'database_user': 'root',
'database_password': '<PASSWORD>',
'database_name': 'SID',
'database_version_table': 'db_version',
'drop_db_first': False
}
self.config_mock = MagicMock(spec_set=dict, wraps=self.config_dict)
self.cursor_mock = Mock(**{"execute": Mock(side_effect=self.execute_side_effect),
"close": Mock(side_effect=self.close_side_effect),
"fetchone": Mock(side_effect=self.fetchone_side_effect),
"setinputsizes": Mock(return_value = None),
"rowcount": 0})
self.db_mock = Mock(**{"cursor.return_value": self.cursor_mock})
self.db_driver_mock = Mock(**{"connect.return_value": self.db_mock, "CLOB": "CLOB"})
self.stdin_mock = Mock(**{"readline.return_value":"dba_user"})
self.getpass_mock = Mock(return_value = "dba_password")
@patch.dict('sys.modules', cx_Oracle=MagicMock())
def test_it_should_use_cx_Oracle_as_driver(self):
sys.modules['cx_Oracle'].connect.return_value = self.db_mock
Oracle(self.config_mock)
self.assertNotEqual(0, sys.modules['cx_Oracle'].connect.call_count)
@patch.dict('sys.modules', cx_Oracle=MagicMock())
def test_it_should_use_default_port(self):
sys.modules['cx_Oracle'].connect.return_value = self.db_mock
sys.modules['cx_Oracle'].makedsn.side_effect = self.makedsn_side_effect
Oracle(self.config_mock)
self.assertEqual(call(dsn="(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=(PROTOCOL=TCP)(HOST=somehost)(PORT=1521)))(CONNECT_DATA=(SID=SID)))", password='<PASSWORD>', user='root'), sys.modules['cx_Oracle'].connect.call_args)
@patch.dict('sys.modules', cx_Oracle=MagicMock())
def test_it_should_use_given_configuration(self):
sys.modules['cx_Oracle'].connect.return_value = self.db_mock
sys.modules['cx_Oracle'].makedsn.side_effect = self.makedsn_side_effect
self.config_dict['database_port'] = 9876
Oracle(self.config_mock)
self.assertEqual(call(dsn="(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=(PROTOCOL=TCP)(HOST=somehost)(PORT=9876)))(CONNECT_DATA=(SID=SID)))", password='<PASSWORD>', user='root'), sys.modules['cx_Oracle'].connect.call_args)
@patch.dict('sys.modules', cx_Oracle=MagicMock())
def test_it_should_use_database_name_as_dsn_when_database_host_is_not_set(self):
sys.modules['cx_Oracle'].connect.return_value = self.db_mock
self.config_dict['database_host'] = None
Oracle(self.config_mock)
self.assertEqual(call(dsn='SID', password='<PASSWORD>', user='root'), sys.modules['cx_Oracle'].connect.call_args)
def test_it_should_stop_process_when_an_error_occur_during_connect_database(self):
self.db_driver_mock.connect.side_effect = Exception("error when connecting")
try:
Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
self.fail("it should not get here")
except Exception as e:
self.assertEqual("could not connect to database: error when connecting", str(e))
self.assertEqual(0, self.db_mock.commit.call_count)
self.assertEqual(0, self.db_mock.close.call_count)
self.assertEqual(0, self.cursor_mock.execute.call_count)
self.assertEqual(0, self.cursor_mock.close.call_count)
def test_it_should_create_database_and_version_table_on_init_if_not_exists(self):
self.first_return = Exception("could not connect to database: ORA-01017 invalid user/password")
def connect_side_effect(*args, **kwargs):
ret = sentinel.DEFAULT
if (kwargs['user'] == 'root') and self.first_return:
ret = self.first_return
self.first_return = None
raise ret
return ret
self.db_driver_mock.connect.side_effect = connect_side_effect
self.execute_returns["select version from db_version"] = Exception("Table doesn't exist")
Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
self.assertEqual(1, self.db_mock.rollback.call_count)
self.assertEqual(8, self.db_driver_mock.connect.call_count)
self.assertEqual(4, self.db_mock.commit.call_count)
self.assertEqual(7, self.db_mock.close.call_count)
expected_execute_calls = [
call('create user root identified by migration_test'),
call('grant connect, resource to root'),
call('grant create public synonym to root'),
call('grant drop public synonym to root'),
call('select version from db_version'),
call("create table db_version ( id number(11) not null, version varchar2(20) default '0' NOT NULL, label varchar2(255), name varchar2(255), sql_up clob, sql_down clob, CONSTRAINT db_version_pk PRIMARY KEY (id) ENABLE)"),
call('drop sequence db_version_seq'),
call('create sequence db_version_seq start with 1 increment by 1 nomaxvalue'),
call('select count(*) from db_version'),
call("insert into db_version (id, version) values (db_version_seq.nextval, '0')")
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(7, self.cursor_mock.close.call_count)
def test_it_should_ignore_errors_while_dropping_the_sequence_duringthe_create_database_process(self):
self.first_return = Exception("could not connect to database: ORA-01017 invalid user/password")
def connect_side_effect(*args, **kwargs):
ret = sentinel.DEFAULT
if (kwargs['user'] == 'root') and self.first_return:
ret = self.first_return
self.first_return = None
raise ret
return ret
self.db_driver_mock.connect.side_effect = connect_side_effect
self.execute_returns["select version from db_version"] = Exception("Table doesn't exist")
self.execute_returns["drop sequence db_version_seq"] = Exception("Sequence doesn't exist")
Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
self.assertEqual(2, self.db_mock.rollback.call_count)
self.assertEqual(8, self.db_driver_mock.connect.call_count)
self.assertEqual(3, self.db_mock.commit.call_count)
self.assertEqual(7, self.db_mock.close.call_count)
expected_execute_calls = [
call('create user root identified by migration_test'),
call('grant connect, resource to root'),
call('grant create public synonym to root'),
call('grant drop public synonym to root'),
call('select version from db_version'),
call("create table db_version ( id number(11) not null, version varchar2(20) default '0' NOT NULL, label varchar2(255), name varchar2(255), sql_up clob, sql_down clob, CONSTRAINT db_version_pk PRIMARY KEY (id) ENABLE)"),
call('drop sequence db_version_seq'),
call('create sequence db_version_seq start with 1 increment by 1 nomaxvalue'),
call('select count(*) from db_version'),
call("insert into db_version (id, version) values (db_version_seq.nextval, '0')")
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(7, self.cursor_mock.close.call_count)
def test_it_should_create_version_table_on_init_if_not_exists(self):
self.execute_returns["select version from db_version"] = Exception("Table doesn't exist")
Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
self.assertEqual(7, self.db_driver_mock.connect.call_count)
self.assertEqual(4, self.db_mock.commit.call_count)
self.assertEqual(7, self.db_mock.close.call_count)
expected_execute_calls = [
call('select version from db_version'),
call("create table db_version ( id number(11) not null, version varchar2(20) default '0' NOT NULL, label varchar2(255), name varchar2(255), sql_up clob, sql_down clob, CONSTRAINT db_version_pk PRIMARY KEY (id) ENABLE)"),
call('drop sequence db_version_seq'),
call('create sequence db_version_seq start with 1 increment by 1 nomaxvalue'),
call('select count(*) from db_version'),
call("insert into db_version (id, version) values (db_version_seq.nextval, '0')")
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(6, self.cursor_mock.close.call_count)
def test_it_should_drop_database_on_init_if_its_asked(self):
select_elements_to_drop_sql = """\
SELECT 'DROP PUBLIC SYNONYM ' || SYNONYM_NAME ||';' FROM ALL_SYNONYMS \
WHERE OWNER = 'PUBLIC' AND TABLE_OWNER = '%s' \
UNION ALL \
SELECT 'DROP SYNONYM ' || SYNONYM_NAME ||';' FROM ALL_SYNONYMS \
WHERE OWNER = '%s' AND TABLE_OWNER = '%s' \
UNION ALL \
SELECT 'DROP ' || OBJECT_TYPE || ' ' || OBJECT_NAME ||';' FROM USER_OBJECTS \
WHERE OBJECT_TYPE <> 'TABLE' AND OBJECT_TYPE <> 'INDEX' AND \
OBJECT_TYPE<>'TRIGGER' AND OBJECT_TYPE<>'LOB' \
UNION ALL \
SELECT 'DROP ' || OBJECT_TYPE || ' ' || OBJECT_NAME ||' CASCADE CONSTRAINTS;' FROM USER_OBJECTS \
WHERE OBJECT_TYPE = 'TABLE' AND OBJECT_NAME NOT LIKE 'BIN$%%'""" % ('ROOT','ROOT','ROOT')
self.config_dict["drop_db_first"] = True
self.fetchone_returns[select_elements_to_drop_sql] = [("DELETE TABLE DB_VERSION CASCADE CONSTRAINTS;",)]
self.execute_returns["select version from db_version"] = Exception("Table doesn't exist")
Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
self.assertEqual(9, self.db_driver_mock.connect.call_count)
self.assertEqual(5, self.db_mock.commit.call_count)
self.assertEqual(9, self.db_mock.close.call_count)
expected_execute_calls = [
call(select_elements_to_drop_sql),
call('DELETE TABLE DB_VERSION CASCADE CONSTRAINTS'),
call('select version from db_version'),
call("create table db_version ( id number(11) not null, version varchar2(20) default '0' NOT NULL, label varchar2(255), name varchar2(255), sql_up clob, sql_down clob, CONSTRAINT db_version_pk PRIMARY KEY (id) ENABLE)"),
call('drop sequence db_version_seq'),
call('create sequence db_version_seq start with 1 increment by 1 nomaxvalue'),
call('select count(*) from db_version'),
call("insert into db_version (id, version) values (db_version_seq.nextval, '0')")
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(8, self.cursor_mock.close.call_count)
def test_it_should_create_user_when_it_does_not_exists_during_drop_database_selecting_elements_to_drop(self):
select_elements_to_drop_sql = """\
SELECT 'DROP PUBLIC SYNONYM ' || SYNONYM_NAME ||';' FROM ALL_SYNONYMS \
WHERE OWNER = 'PUBLIC' AND TABLE_OWNER = '%s' \
UNION ALL \
SELECT 'DROP SYNONYM ' || SYNONYM_NAME ||';' FROM ALL_SYNONYMS \
WHERE OWNER = '%s' AND TABLE_OWNER = '%s' \
UNION ALL \
SELECT 'DROP ' || OBJECT_TYPE || ' ' || OBJECT_NAME ||';' FROM USER_OBJECTS \
WHERE OBJECT_TYPE <> 'TABLE' AND OBJECT_TYPE <> 'INDEX' AND \
OBJECT_TYPE<>'TRIGGER' AND OBJECT_TYPE<>'LOB' \
UNION ALL \
SELECT 'DROP ' || OBJECT_TYPE || ' ' || OBJECT_NAME ||' CASCADE CONSTRAINTS;' FROM USER_OBJECTS \
WHERE OBJECT_TYPE = 'TABLE' AND OBJECT_NAME NOT LIKE 'BIN$%%'""" % ('ROOT','ROOT','ROOT')
self.config_dict["drop_db_first"] = True
self.execute_returns[select_elements_to_drop_sql] = Exception("could not connect to database: ORA-01017 invalid user/password")
Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
self.assertEqual(6, self.db_driver_mock.connect.call_count)
self.assertEqual(2, self.db_mock.commit.call_count)
self.assertEqual(6, self.db_mock.close.call_count)
expected_execute_calls = [
call(select_elements_to_drop_sql),
call('create user root identified by migration_test'),
call('grant connect, resource to root'),
call('grant create public synonym to root'),
call('grant drop public synonym to root'),
call('select version from db_version'),
call('select count(*) from db_version'),
call("insert into db_version (id, version) values (db_version_seq.nextval, '0')")
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(5, self.cursor_mock.close.call_count)
def test_it_should_stop_process_when_an_error_occur_during_create_user(self):
select_elements_to_drop_sql = """\
SELECT 'DROP PUBLIC SYNONYM ' || SYNONYM_NAME ||';' FROM ALL_SYNONYMS \
WHERE OWNER = 'PUBLIC' AND TABLE_OWNER = '%s' \
UNION ALL \
SELECT 'DROP SYNONYM ' || SYNONYM_NAME ||';' FROM ALL_SYNONYMS \
WHERE OWNER = '%s' AND TABLE_OWNER = '%s' \
UNION ALL \
SELECT 'DROP ' || OBJECT_TYPE || ' ' || OBJECT_NAME ||';' FROM USER_OBJECTS \
WHERE OBJECT_TYPE <> 'TABLE' AND OBJECT_TYPE <> 'INDEX' AND \
OBJECT_TYPE<>'TRIGGER' AND OBJECT_TYPE<>'LOB' \
UNION ALL \
SELECT 'DROP ' || OBJECT_TYPE || ' ' || OBJECT_NAME ||' CASCADE CONSTRAINTS;' FROM USER_OBJECTS \
WHERE OBJECT_TYPE = 'TABLE' AND OBJECT_NAME NOT LIKE 'BIN$%%'""" % ('ROOT','ROOT','ROOT')
self.config_dict["drop_db_first"] = True
self.execute_returns[select_elements_to_drop_sql] = Exception("could not connect to database: ORA-01017 invalid user/password")
self.execute_returns['grant create public synonym to root'] = Exception("error when granting")
try:
Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
self.fail("it should not get here")
except Exception as e:
self.assertEqual("check error: error when granting", str(e))
self.assertEqual(2, self.db_driver_mock.connect.call_count)
self.assertEqual(0, self.db_mock.commit.call_count)
self.assertEqual(2, self.db_mock.close.call_count)
expected_execute_calls = [
call(select_elements_to_drop_sql),
call('create user root identified by migration_test'),
call('grant connect, resource to root'),
call('grant create public synonym to root')
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(2, self.cursor_mock.close.call_count)
def test_it_should_stop_process_when_an_error_occur_during_drop_database_selecting_elements_to_drop(self):
select_elements_to_drop_sql = """\
SELECT 'DROP PUBLIC SYNONYM ' || SYNONYM_NAME ||';' FROM ALL_SYNONYMS \
WHERE OWNER = 'PUBLIC' AND TABLE_OWNER = '%s' \
UNION ALL \
SELECT 'DROP SYNONYM ' || SYNONYM_NAME ||';' FROM ALL_SYNONYMS \
WHERE OWNER = '%s' AND TABLE_OWNER = '%s' \
UNION ALL \
SELECT 'DROP ' || OBJECT_TYPE || ' ' || OBJECT_NAME ||';' FROM USER_OBJECTS \
WHERE OBJECT_TYPE <> 'TABLE' AND OBJECT_TYPE <> 'INDEX' AND \
OBJECT_TYPE<>'TRIGGER' AND OBJECT_TYPE<>'LOB' \
UNION ALL \
SELECT 'DROP ' || OBJECT_TYPE || ' ' || OBJECT_NAME ||' CASCADE CONSTRAINTS;' FROM USER_OBJECTS \
WHERE OBJECT_TYPE = 'TABLE' AND OBJECT_NAME NOT LIKE 'BIN$%%'""" % ('ROOT','ROOT','ROOT')
self.config_dict["drop_db_first"] = True
self.execute_returns[select_elements_to_drop_sql] = Exception("error when dropping")
try:
Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
self.fail("it should not get here")
except Exception as e:
self.assertEqual("error when dropping", str(e))
self.assertEqual(0, self.db_mock.commit.call_count)
self.assertEqual(1, self.db_mock.close.call_count)
expected_execute_calls = [
call(select_elements_to_drop_sql)
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(1, self.cursor_mock.close.call_count)
def test_it_should_stop_process_when_an_error_occur_during_drop_elements_from_database_and_user_asked_to_stop(self):
select_elements_to_drop_sql = """\
SELECT 'DROP PUBLIC SYNONYM ' || SYNONYM_NAME ||';' FROM ALL_SYNONYMS \
WHERE OWNER = 'PUBLIC' AND TABLE_OWNER = '%s' \
UNION ALL \
SELECT 'DROP SYNONYM ' || SYNONYM_NAME ||';' FROM ALL_SYNONYMS \
WHERE OWNER = '%s' AND TABLE_OWNER = '%s' \
UNION ALL \
SELECT 'DROP ' || OBJECT_TYPE || ' ' || OBJECT_NAME ||';' FROM USER_OBJECTS \
WHERE OBJECT_TYPE <> 'TABLE' AND OBJECT_TYPE <> 'INDEX' AND \
OBJECT_TYPE<>'TRIGGER' AND OBJECT_TYPE<>'LOB' \
UNION ALL \
SELECT 'DROP ' || OBJECT_TYPE || ' ' || OBJECT_NAME ||' CASCADE CONSTRAINTS;' FROM USER_OBJECTS \
WHERE OBJECT_TYPE = 'TABLE' AND OBJECT_NAME NOT LIKE 'BIN$%%'""" % ('ROOT','ROOT','ROOT')
self.config_dict["drop_db_first"] = True
self.fetchone_returns[select_elements_to_drop_sql] = [("DELETE TABLE DB_VERSION CASCADE CONSTRAINTS;",),("DELETE TABLE AUX CASCADE CONSTRAINTS;",)]
self.execute_returns["DELETE TABLE DB_VERSION CASCADE CONSTRAINTS"] = Exception("error dropping table")
self.stdin_mock.readline.return_value = "n"
try:
Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
self.fail("it should not get here")
except Exception as e:
self.assertEqual("can't drop database objects for user 'root'", str(e))
self.assertEqual(1, self.db_mock.rollback.call_count)
self.assertEqual(1, self.db_mock.commit.call_count)
self.assertEqual(3, self.db_mock.close.call_count)
expected_execute_calls = [
call(select_elements_to_drop_sql),
call('DELETE TABLE DB_VERSION CASCADE CONSTRAINTS'),
call('DELETE TABLE AUX CASCADE CONSTRAINTS')
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(3, self.cursor_mock.close.call_count)
def test_it_should_not_stop_process_when_an_error_occur_during_drop_elements_from_database_and_user_asked_to_continue(self):
select_elements_to_drop_sql = """\
SELECT 'DROP PUBLIC SYNONYM ' || SYNONYM_NAME ||';' FROM ALL_SYNONYMS \
WHERE OWNER = 'PUBLIC' AND TABLE_OWNER = '%s' \
UNION ALL \
SELECT 'DROP SYNONYM ' || SYNONYM_NAME ||';' FROM ALL_SYNONYMS \
WHERE OWNER = '%s' AND TABLE_OWNER = '%s' \
UNION ALL \
SELECT 'DROP ' || OBJECT_TYPE || ' ' || OBJECT_NAME ||';' FROM USER_OBJECTS \
WHERE OBJECT_TYPE <> 'TABLE' AND OBJECT_TYPE <> 'INDEX' AND \
OBJECT_TYPE<>'TRIGGER' AND OBJECT_TYPE<>'LOB' \
UNION ALL \
SELECT 'DROP ' || OBJECT_TYPE || ' ' || OBJECT_NAME ||' CASCADE CONSTRAINTS;' FROM USER_OBJECTS \
WHERE OBJECT_TYPE = 'TABLE' AND OBJECT_NAME NOT LIKE 'BIN$%%'""" % ('ROOT','ROOT','ROOT')
self.config_dict["drop_db_first"] = True
self.fetchone_returns[select_elements_to_drop_sql] = [("DELETE TABLE DB_VERSION CASCADE CONSTRAINTS;",),("DELETE TABLE AUX CASCADE CONSTRAINTS;",)]
self.execute_returns["DELETE TABLE DB_VERSION CASCADE CONSTRAINTS"] = Exception("error dropping table")
self.stdin_mock.readline.return_value = "y"
Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
self.assertEqual(1, self.db_mock.rollback.call_count)
self.assertEqual(3, self.db_mock.commit.call_count)
self.assertEqual(7, self.db_mock.close.call_count)
expected_execute_calls = [
call(select_elements_to_drop_sql),
call('DELETE TABLE DB_VERSION CASCADE CONSTRAINTS'),
call('DELETE TABLE AUX CASCADE CONSTRAINTS'),
call('select version from db_version'),
call('select count(*) from db_version'),
call("insert into db_version (id, version) values (db_version_seq.nextval, '0')")
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(6, self.cursor_mock.close.call_count)
def test_it_should_execute_migration_up_and_update_schema_version(self):
oracle = Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
oracle.change("create table spam();", "20090212112104", "20090212112104_test_it_should_execute_migration_down_and_update_schema_version.migration", "create table spam();", "drop table spam;")
self.assertEqual(6, self.db_driver_mock.connect.call_count)
self.assertEqual(4, self.db_mock.commit.call_count)
self.assertEqual(6, self.db_mock.close.call_count)
expected_execute_calls = [
call('select version from db_version'),
call('select count(*) from db_version'),
call("insert into db_version (id, version) values (db_version_seq.nextval, '0')"),
call('create table spam()'),
call('insert into db_version (id, version, label, name, sql_up, sql_down) values (db_version_seq.nextval, :version, :label, :migration_file_name, :sql_up, :sql_down)', {'label': None, 'sql_up': 'create table spam();', 'version': '20090212112104', 'sql_down': 'drop table spam;', 'migration_file_name': '20090212112104_test_it_should_execute_migration_down_and_update_schema_version.migration'})
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(5, self.cursor_mock.close.call_count)
def test_it_should_execute_migration_down_and_update_schema_version(self):
oracle = Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
oracle.change("drop table spam;", "20090212112104", "20090212112104_test_it_should_execute_migration_down_and_update_schema_version.migration", "create table spam();", "drop table spam;", False)
self.assertEqual(6, self.db_driver_mock.connect.call_count)
self.assertEqual(4, self.db_mock.commit.call_count)
self.assertEqual(6, self.db_mock.close.call_count)
expected_execute_calls = [
call('select version from db_version'),
call('select count(*) from db_version'),
call("insert into db_version (id, version) values (db_version_seq.nextval, '0')"),
call('drop table spam'),
call('delete from db_version where version = :version', {'version': '20090212112104'})
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(5, self.cursor_mock.close.call_count)
def test_it_should_use_label_version_when_updating_schema_version(self):
oracle = Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
oracle.change("create table spam();", "20090212112104", "20090212112104_test_it_should_execute_migration_down_and_update_schema_version.migration", "create table spam();", "drop table spam;", label_version="label")
self.assertEqual(6, self.db_driver_mock.connect.call_count)
self.assertEqual(4, self.db_mock.commit.call_count)
self.assertEqual(6, self.db_mock.close.call_count)
expected_execute_calls = [
call('select version from db_version'),
call('select count(*) from db_version'),
call("insert into db_version (id, version) values (db_version_seq.nextval, '0')"),
call('create table spam()'),
call('insert into db_version (id, version, label, name, sql_up, sql_down) values (db_version_seq.nextval, :version, :label, :migration_file_name, :sql_up, :sql_down)', {'label': "label", 'sql_up': 'create table spam();', 'version': '20090212112104', 'sql_down': 'drop table spam;', 'migration_file_name': '20090212112104_test_it_should_execute_migration_down_and_update_schema_version.migration'})
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(5, self.cursor_mock.close.call_count)
def test_it_should_enforce_sql_up_and_sql_down_type_size_when_updating_schema_version(self):
oracle = Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
oracle.change("create table spam();", "20090212112104", "20090212112104_test_it_should_execute_migration_down_and_update_schema_version.migration", "create table spam();", "drop table spam;", label_version="label")
self.assertEqual([call(sql_down='CLOB', sql_up='CLOB')], self.cursor_mock.setinputsizes.mock_calls)
def test_it_should_raise_whem_migration_sql_has_a_syntax_error(self):
oracle = Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
self.assertRaisesWithMessage(Exception, "error executing migration: invalid sql syntax 'create table foo(); create table spam());'", oracle.change,
"create table foo(); create table spam());", "20090212112104", "20090212112104_test_it_should_execute_migration_down_and_update_schema_version.migration", "create table spam());", "drop table spam;", label_version="label")
def test_it_should_raise_whem_migration_sql_has_a_syntax_error_sql_with_codec_error(self):
oracle = Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
expected_raised_message = u"error executing migration: invalid sql syntax 'create table foo(); create table spam()); -- ônibus'"
if (sys.version_info < (3, 0)):
expected_raised_message = expected_raised_message.encode("utf-8")
self.assertRaisesWithMessage(Exception, expected_raised_message, oracle.change,
u"create table foo(); create table spam()); -- ônibus", "20090212112104", "20090212112104_test_it_should_execute_migration_down_and_update_schema_version.migration", "create table foo(); create table spam());", "drop table spam;", label_version="label")
def test_it_should_stop_process_when_an_error_occur_during_database_change(self):
self.execute_returns["insert into spam"] = Exception("invalid sql")
try:
oracle = Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
oracle.change("create table spam(); insert into spam", "20090212112104", "20090212112104_test_it_should_execute_migration_down_and_update_schema_version.migration", "create table spam();", "drop table spam;", label_version="label")
except Exception as e:
self.assertEqual("error executing migration: invalid sql\n\n[ERROR DETAILS] SQL command was:\ninsert into spam", str(e))
self.assertTrue(isinstance(e, simple_db_migrate.core.exceptions.MigrationException))
self.assertEqual(1, self.db_mock.rollback.call_count)
self.assertEqual(5, self.db_driver_mock.connect.call_count)
self.assertEqual(2, self.db_mock.commit.call_count)
self.assertEqual(5, self.db_mock.close.call_count)
expected_execute_calls = [
call('select version from db_version'),
call('select count(*) from db_version'),
call("insert into db_version (id, version) values (db_version_seq.nextval, '0')"),
call('create table spam()'),
call('insert into spam')
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(4, self.cursor_mock.close.call_count)
def test_it_should_stop_process_when_an_error_occur_during_log_schema_version(self):
self.execute_returns['insert into db_version (id, version, label, name, sql_up, sql_down) values (db_version_seq.nextval, :version, :label, :migration_file_name, :sql_up, :sql_down)'] = Exception("invalid sql")
try:
oracle = Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
oracle.change("create table spam();", "20090212112104", "20090212112104_test_it_should_execute_migration_down_and_update_schema_version.migration", "create table spam();", "drop table spam;", label_version="label")
except Exception as e:
self.assertEqual('error logging migration: invalid sql\n\n[ERROR DETAILS] SQL command was:\n20090212112104_test_it_should_execute_migration_down_and_update_schema_version.migration', str(e))
self.assertTrue(isinstance(e, simple_db_migrate.core.exceptions.MigrationException))
self.assertEqual(6, self.db_driver_mock.connect.call_count)
self.assertEqual(1, self.db_mock.rollback.call_count)
self.assertEqual(3, self.db_mock.commit.call_count)
self.assertEqual(6, self.db_mock.close.call_count)
expected_execute_calls = [
call('select version from db_version'),
call('select count(*) from db_version'),
call("insert into db_version (id, version) values (db_version_seq.nextval, '0')"),
call('create table spam()'),
call('insert into db_version (id, version, label, name, sql_up, sql_down) values (db_version_seq.nextval, :version, :label, :migration_file_name, :sql_up, :sql_down)', {'label': 'label', 'sql_up': 'create table spam();', 'version': '20090212112104', 'sql_down': 'drop table spam;', 'migration_file_name': '20090212112104_test_it_should_execute_migration_down_and_update_schema_version.migration'})
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(4, self.cursor_mock.close.call_count)
def test_it_should_log_execution_when_a_function_is_given_when_updating_schema_version(self):
execution_log_mock = Mock()
oracle = Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
oracle.change("create table spam();", "20090212112104", "20090212112104_test_it_should_execute_migration_down_and_update_schema_version.migration", "create table spam();", "drop table spam;", execution_log=execution_log_mock)
expected_execution_log_calls = [
call('create table spam()\n-- 0 row(s) affected\n'),
call('migration 20090212112104_test_it_should_execute_migration_down_and_update_schema_version.migration registered\n')
]
self.assertEqual(expected_execution_log_calls, execution_log_mock.mock_calls)
def test_it_should_get_current_schema_version(self):
self.fetchone_returns = {'select count(*) from db_version': [0], 'select version from db_version order by id desc': ["0"]}
oracle = Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
self.assertEqual("0", oracle.get_current_schema_version())
self.assertEqual(5, self.db_driver_mock.connect.call_count)
self.assertEqual(2, self.db_mock.commit.call_count)
self.assertEqual(5, self.db_mock.close.call_count)
expected_execute_calls = [
call('select version from db_version'),
call('select count(*) from db_version'),
call("insert into db_version (id, version) values (db_version_seq.nextval, '0')"),
call('select version from db_version order by id desc')
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(4, self.cursor_mock.close.call_count)
def test_it_should_get_all_schema_versions(self):
expected_versions = []
expected_versions.append("0")
expected_versions.append("20090211120001")
expected_versions.append("20090211120002")
expected_versions.append("20090211120003")
self.fetchone_returns["select version from db_version order by id"] = list(zip(expected_versions))
oracle = Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
schema_versions = oracle.get_all_schema_versions()
self.assertEqual(len(expected_versions), len(schema_versions))
for version in schema_versions:
self.assertTrue(version in expected_versions)
self.assertEqual(5, self.db_driver_mock.connect.call_count)
self.assertEqual(2, self.db_mock.commit.call_count)
self.assertEqual(5, self.db_mock.close.call_count)
expected_execute_calls = [
call('select version from db_version'),
call('select count(*) from db_version'),
call("insert into db_version (id, version) values (db_version_seq.nextval, '0')"),
call('select version from db_version order by id')
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(4, self.cursor_mock.close.call_count)
def test_it_should_get_all_schema_migrations(self):
expected_versions = []
expected_versions.append([1, "0", None, None, None, None])
expected_versions.append([2, "20090211120001", "label", "20090211120001_name", Mock(**{"read.return_value":"sql_up"}), Mock(**{"read.return_value":"sql_down"})])
self.fetchone_returns["select id, version, label, name, sql_up, sql_down from db_version order by id"] = list(expected_versions)
oracle = Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
schema_migrations = oracle.get_all_schema_migrations()
self.assertEqual(len(expected_versions), len(schema_migrations))
for index, migration in enumerate(schema_migrations):
self.assertEqual(migration.id, expected_versions[index][0])
self.assertEqual(migration.version, expected_versions[index][1])
self.assertEqual(migration.label, expected_versions[index][2])
self.assertEqual(migration.file_name, expected_versions[index][3])
self.assertEqual(migration.sql_up, expected_versions[index][4] and expected_versions[index][4].read() or "")
self.assertEqual(migration.sql_down, expected_versions[index][5] and expected_versions[index][5].read() or "")
self.assertEqual(5, self.db_driver_mock.connect.call_count)
self.assertEqual(2, self.db_mock.commit.call_count)
self.assertEqual(5, self.db_mock.close.call_count)
expected_execute_calls = [
call('select version from db_version'),
call('select count(*) from db_version'),
call("insert into db_version (id, version) values (db_version_seq.nextval, '0')"),
call('select id, version, label, name, sql_up, sql_down from db_version order by id')
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(4, self.cursor_mock.close.call_count)
def test_it_should_parse_sql_statements(self):
#TODO include other types of sql
sql = "create table eggs; drop table spam; ; ;\
CREATE OR REPLACE FUNCTION simple \n\
RETURN VARCHAR2 IS \n\
BEGIN \n\
RETURN 'Simple Function'; \n\
END simple; \n\
/ \n\
drop table eggs; \n\
create or replace procedure proc_db_migrate(dias_fim_mes out number) \n\
as v number; \n\
begin \n\
SELECT LAST_DAY(SYSDATE) - SYSDATE \"Days Left\" \n\
into v \n\
FROM DUAL; \n\
dias_fim_mes := v; \n\
end; \n\
\t/ \n\
create OR RePLaCe TRIGGER \"FOLDER_TR\" \n\
BEFORE INSERT ON \"FOLDER\" \n\
FOR EACH ROW WHEN \n\
(\n\
new.\"FOLDER_ID\" IS NULL \n\
)\n\
BEGIN\n\
SELECT \"FOLDER_SQ\".nextval\n\
INTO :new.\"FOLDER_ID\"\n\
FROM dual;\n\
EnD;\n\
/\n\
CREATE OR REPLACE\t PACKAGE pkg_dbm \n\
AS \n\
FUNCTION getArea (i_rad NUMBER) \n\
RETURN NUMBER;\n\
PROCEDURE p_print (i_str1 VARCHAR2 := 'hello',\n\
i_str2 VARCHAR2 := 'world', \n\
i_end VARCHAR2 := '!');\n\
END;\n\
/ \n\
CREATE OR REPLACE\n PACKAGE BODY pkg_dbm \n\
AS \n\
FUNCTION getArea (i_rad NUMBER) \n\
RETURN NUMBER \n\
IS \n\
v_pi NUMBER := 3.14; \n\
BEGIN \n\
RETURN v_pi * (i_rad ** 2); \n\
END; \n\
PROCEDURE p_print (i_str1 VARCHAR2 := 'hello', i_str2 VARCHAR2 := 'world', i_end VARCHAR2 := '!') \n\
IS \n\
BEGIN \n\
DBMS_OUTPUT.put_line (i_str1 || ',' || i_str2 || i_end); \n\
END; \n\
END; \n\
/ \n\
DECLARE\n\
counter NUMBER(10,8) := 2; \r\n\
pi NUMBER(8,7) := 3.1415926; \n\
test NUMBER(10,8) NOT NULL := 10;\n\
BEGIN \n\
counter := pi/counter; \n\
pi := pi/3; \n\
dbms_output.put_line(counter); \n\
dbms_output.put_line(pi); \n\
END; \n\
/ \n\
BEGIN \n\
dbms_output.put_line('teste de bloco anonimo'); \n\
dbms_output.put_line(select 1 from dual); \n\
END; \n\
/ "
statements = Oracle._parse_sql_statements(sql)
self.assertEqual(10, len(statements))
self.assertEqual('create table eggs', statements[0])
self.assertEqual('drop table spam', statements[1])
self.assertEqual("CREATE OR REPLACE FUNCTION simple \n\
RETURN VARCHAR2 IS \n\
BEGIN \n\
RETURN 'Simple Function'; \n\
END simple;", statements[2])
self.assertEqual('drop table eggs', statements[3])
self.assertEqual('create or replace procedure proc_db_migrate(dias_fim_mes out number) \n\
as v number; \n\
begin \n\
SELECT LAST_DAY(SYSDATE) - SYSDATE \"Days Left\" \n\
into v \n\
FROM DUAL; \n\
dias_fim_mes := v; \n\
end;', statements[4])
self.assertEqual('create OR RePLaCe TRIGGER \"FOLDER_TR\" \n\
BEFORE INSERT ON \"FOLDER\" \n\
FOR EACH ROW WHEN \n\
(\n\
new.\"FOLDER_ID\" IS NULL \n\
)\n\
BEGIN\n\
SELECT \"FOLDER_SQ\".nextval\n\
INTO :new.\"FOLDER_ID\"\n\
FROM dual;\n\
EnD;', statements[5])
self.assertEqual("CREATE OR REPLACE\t PACKAGE pkg_dbm \n\
AS \n\
FUNCTION getArea (i_rad NUMBER) \n\
RETURN NUMBER;\n\
PROCEDURE p_print (i_str1 VARCHAR2 := 'hello',\n\
i_str2 VARCHAR2 := 'world', \n\
i_end VARCHAR2 := '!');\n\
END;", statements[6])
self.assertEqual("CREATE OR REPLACE\n PACKAGE BODY pkg_dbm \n\
AS \n\
FUNCTION getArea (i_rad NUMBER) \n\
RETURN NUMBER \n\
IS \n\
v_pi NUMBER := 3.14; \n\
BEGIN \n\
RETURN v_pi * (i_rad ** 2); \n\
END; \n\
PROCEDURE p_print (i_str1 VARCHAR2 := 'hello', i_str2 VARCHAR2 := 'world', i_end VARCHAR2 := '!') \n\
IS \n\
BEGIN \n\
DBMS_OUTPUT.put_line (i_str1 || ',' || i_str2 || i_end); \n\
END; \n\
END;", statements[7])
self.assertEqual("DECLARE\n\
counter NUMBER(10,8) := 2; \r\n\
pi NUMBER(8,7) := 3.1415926; \n\
test NUMBER(10,8) NOT NULL := 10;\n\
BEGIN \n\
counter := pi/counter; \n\
pi := pi/3; \n\
dbms_output.put_line(counter); \n\
dbms_output.put_line(pi); \n\
END;", statements[8])
self.assertEqual("BEGIN \n\
dbms_output.put_line('teste de bloco anonimo'); \n\
dbms_output.put_line(select 1 from dual); \n\
END;", statements[9])
def test_it_should_parse_sql_statements_with_html_inside(self):
sql = u"""
create table eggs;
INSERT INTO widget_parameter_domain (widget_parameter_id, label, value)
VALUES ((SELECT MAX(widget_parameter_id)
FROM widget_parameter), "Carros", '<div class="box-zap-geral">
<div class="box-zap box-zap-autos">
<a class="logo" target="_blank" title="ZAP" href="http://www.zap.com.br/Parceiros/g1/RedirG1.aspx?CodParceriaLink=42&URL=http://www.zap.com.br">');
drop table spam;
"""
statements = Oracle._parse_sql_statements(sql)
expected_sql_with_html = """INSERT INTO widget_parameter_domain (widget_parameter_id, label, value)
VALUES ((SELECT MAX(widget_parameter_id)
FROM widget_parameter), "Carros", '<div class="box-zap-geral">
<div class="box-zap box-zap-autos">
<a class="logo" target="_blank" title="ZAP" href="http://www.zap.com.br/Parceiros/g1/RedirG1.aspx?CodParceriaLink=42&URL=http://www.zap.com.br">')"""
self.assertEqual(3, len(statements))
self.assertEqual('create table eggs', statements[0])
self.assertEqual(expected_sql_with_html, statements[1])
self.assertEqual('drop table spam', statements[2])
def test_it_should_get_none_for_a_non_existent_version_in_database(self):
oracle = Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
ret = oracle.get_version_id_from_version_number('xxx')
self.assertEqual(None, ret)
self.assertEqual(5, self.db_driver_mock.connect.call_count)
self.assertEqual(2, self.db_mock.commit.call_count)
self.assertEqual(5, self.db_mock.close.call_count)
expected_execute_calls = [
call('select version from db_version'),
call('select count(*) from db_version'),
call("insert into db_version (id, version) values (db_version_seq.nextval, '0')"),
call("select id from db_version where version = 'xxx' order by id desc")
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(4, self.cursor_mock.close.call_count)
def test_it_should_get_most_recent_version_for_a_existent_label_in_database(self):
self.fetchone_returns["select version from db_version where label = 'xxx' order by id desc"] = ["vesion", "version2", "version3"]
oracle = Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
ret = oracle.get_version_number_from_label('xxx')
self.assertEqual("vesion", ret)
self.assertEqual(5, self.db_driver_mock.connect.call_count)
self.assertEqual(2, self.db_mock.commit.call_count)
self.assertEqual(5, self.db_mock.close.call_count)
expected_execute_calls = [
call('select version from db_version'),
call('select count(*) from db_version'),
call("insert into db_version (id, version) values (db_version_seq.nextval, '0')"),
call("select version from db_version where label = 'xxx' order by id desc")
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(4, self.cursor_mock.close.call_count)
def test_it_should_get_none_for_a_non_existent_label_in_database(self):
oracle = Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
ret = oracle.get_version_number_from_label('xxx')
self.assertEqual(None, ret)
self.assertEqual(5, self.db_driver_mock.connect.call_count)
self.assertEqual(2, self.db_mock.commit.call_count)
self.assertEqual(5, self.db_mock.close.call_count)
expected_execute_calls = [
call('select version from db_version'),
call('select count(*) from db_version'),
call("insert into db_version (id, version) values (db_version_seq.nextval, '0')"),
call("select version from db_version where label = 'xxx' order by id desc")
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(4, self.cursor_mock.close.call_count)
def side_effect(self, returns, default_value):
commands = len(self.last_execute_commands)
if commands > 0:
self.last_execute_command = self.last_execute_commands[commands - 1]
value = result = returns.pop(self.last_execute_command, default_value)
if isinstance(result, Exception):
if commands > 0:
self.last_execute_commands.pop()
raise result
if isinstance(result, list) and len(result) > 0 and (isinstance(result[0], tuple) or isinstance(result[0], list)):
returns[self.last_execute_command] = result
value = result.pop(0)
elif isinstance(result, list) and len(result) == 0:
value = None
if commands > 0 and \
self.execute_returns.get(self.last_execute_command, None) is None and \
self.fetchone_returns.get(self.last_execute_command, None) is None and \
self.close_returns.get(self.last_execute_command, None) is None:
self.last_execute_commands.pop()
return value
def execute_side_effect(self, *args):
self.last_execute_commands.append(args[0])
return self.side_effect(self.execute_returns, 0)
def fetchone_side_effect(self, *args):
return self.side_effect(self.fetchone_returns, None)
def close_side_effect(self, *args):
return self.side_effect(self.close_returns, None)
def makedsn_side_effect(self, host, port, sid):
return "(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=(PROTOCOL=TCP)(HOST=%s)(PORT=%s)))(CONNECT_DATA=(SID=%s)))" % (host, port, sid)
if __name__ == "__main__":
unittest.main()
|
f5/bigip/tm/transaction/__init__.py
|
nghia-tran/f5-common-python
| 272 |
74839
|
# coding=utf-8
#
# Copyright 2014 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""BIG-IP® system dns module
REST URI
``http://localhost/mgmt/tm/transaction``
REST Kind
``tm:transaction*``
"""
from f5.bigip.resource import Collection
from f5.bigip.resource import Resource
class Transactions(Collection):
"""This class is a context manager for iControl transactions.
Upon successful exit of the with statement, the transaction will be
submitted, otherwise it will be rolled back.
NOTE: This feature was added to BIGIP in version 11.0.0.
Example:
> bigip = BigIP(<args>)
> tx = bigip.transactions.transaction
> with TransactionContextManager(tx) as api:
> api.net.pools.pool.create(name="foo")
> api.sys.dbs.db.update(name="setup.run", value="false")
> <perform actions inside a transaction>
>
> # transaction is committed when you exit the "with" statement.
"""
def __init__(self, api):
super(Transactions, self).__init__(api)
self._meta_data['allowed_lazy_attributes'] = [Transaction]
self._meta_data['attribute_registry'] = \
{'tm:transactionstate': Transaction}
class Transaction(Resource):
def __init__(self, transactions):
super(Transaction, self).__init__(transactions)
self._meta_data['required_json_kind'] = 'tm:transactionstate'
self._meta_data['required_creation_parameters'] = set()
|
genetic-algorithm-tutorial/implementation.py
|
fimoziq/tutorials
| 670 |
74850
|
<filename>genetic-algorithm-tutorial/implementation.py
# -*- coding: utf-8 -*-
"""genetic-algorithm-python-tutorial.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/161ijkvn8wG_seVtQexm-p3fW3r5p8s_x
# Genetic Algorithm Implementation with Python
* Tutorial: https://towardsai.net/p/computer-science/genetic-algorithm-ga-introduction-with-example-code-e59f9bc58eaf
* Github: https://github.com/towardsai/tutorials/tree/master/genetic-algorithm-tutorial
The Genetic Algorithm is a class of evolutionary algorithm that is broadly inspired by biological evolution. We all know evolution, it is a selection of parents, reproduction, and mutation of offsprings. The main aim of evolution is to reproduce offsprings that are biologically better than their parents. Genetic algorithm is mainly based on natural selection and it tries to simulate the theory of evolution.
"""
import numpy as np
import matplotlib.pyplot as plt
import copy
# cost function
def sphere(x):
''' This is the problem we will be
optimizing, each chromosome of parent has a cost
which is calculated from this cost function'''
return sum(x**2)
def roulette_wheel_selection(p):
''' Roulette Wheel Selection is a method of parent
selection for breeding. We take the cummulative sum of probabilities
and select the first parent whose cummulative sum is greater than
random number'''
c = np.cumsum(p)
r = sum(p) * np.random.rand()
ind = np.argwhere(r <= c)
return ind[0][0]
def crossover(p1, p2):
''' Performing uniform crossover. Alpha is the flag
that determines which gene of each chromosome is choosen
to be inherited by the offspring. Maultiply the alpha value
with each gene of every chromosome of both the parents and
then add the resultant value to get child chromosome'''
c1 = copy.deepcopy(p1)
c2 = copy.deepcopy(p2)
# Uniform crossover
alpha = np.random.uniform(0, 1, *(c1['position'].shape))
c1['position'] = alpha*p1['position'] + (1-alpha)*p2['position']
c2['position'] = alpha*p2['position'] + (1-alpha)*p1['position']
return c1, c2
def mutate(c, mu, sigma):
'''
c: child chromosome
mu: mutation rate. % of gene to be modified
sigma: step size of mutation'''
y = copy.deepcopy(c)
flag = np.random.rand(*(c['position'].shape)) <= mu # array of True and Flase, indicating at which position to perform mutation
ind = np.argwhere(flag)
y['position'][ind] += sigma * np.random.randn(*ind.shape)
return y
def bounds(c, varmin, varmax):
''' Defines the upper and lower bound of gene value'''
c['position'] = np.maximum(c['position'], varmin)
c['position'] = np.minimum(c['position'], varmax)
def sort(arr):
''' Bubble sorting the population + offsoring
in every iteration to get best fit individuals at top'''
n = len(arr)
for i in range(n-1):
for j in range(0, n-i-1):
if arr[j]['cost'] > arr[j+1]['cost'] :
arr[j], arr[j+1] = arr[j+1], arr[j]
return arr
def ga(costfunc, num_var, varmin, varmax, maxit, npop, num_children, mu, sigma, beta):
# Placeholder for each individual
population = {}
for i in range(npop): # each inidivdual has position(chromosomes) and cost,
population[i] = {'position': None, 'cost': None} # create individual as many as population size(npop)
# Best solution found
bestsol = copy.deepcopy(population)
bestsol_cost = np.inf # initial best cost is infinity
# Initialize population - 1st Gen
for i in range(npop):
population[i]['position'] = np.random.uniform(varmin, varmax, num_var) # randomly initialize the chromosomes and cost
population[i]['cost'] = costfunc(population[i]['position'])
if population[i]['cost'] < bestsol_cost: # if cost of an individual is less(best) than best cost,
bestsol = copy.deepcopy(population[i]) # replace the best solution with that individual
# Best cost of each generation/iteration
bestcost = np.empty(maxit)
# Main loop
for it in range(maxit):
# Calculating probability for roulette wheel selection
costs = []
for i in range(len(population)):
costs.append(population[i]['cost']) # list of all the population cost
costs = np.array(costs)
avg_cost = np.mean(costs) # taking average of the costs
if avg_cost != 0:
costs = costs/avg_cost
probs = np.exp(-beta*costs) # probability is exponensial of -ve beta times costs
for _ in range(num_children//2): # we will be having two off springs for each crossover
# hence divide number of children by 2
'''
-> choosing two parents randomly for mating
-> we are shuffling all the 20 parent individuals and
-> choosing first two of the shuffled array as our parents for mating
Randomly selecting parents by shiffling them.
But we will be using roulette wheel slection
for our algorithm
q = np.random.permutation(npop)
p1 = population[q[0]]
p2 = population[q[1]]
'''
# Roulette wheel selection
p1 = population[roulette_wheel_selection(probs)]
p2 = population[roulette_wheel_selection(probs)]
# crossover two parents
c1, c2 = crossover(p1, p2)
# Perform mutation
c1 = mutate(c1, mu, sigma)
c2 = mutate(c2, mu, sigma)
# Apply bounds
bounds(c1, varmin, varmax)
bounds(c2, varmin, varmax)
# Evaluate first off spring
c1['cost'] = costfunc(c1['position']) # calculate cost function of child 1
if type(bestsol_cost) == float:
if c1['cost'] < bestsol_cost: # replacing best solution in every generation/iteration
bestsol_cost = copy.deepcopy(c1)
else:
if c1['cost'] < bestsol_cost['cost']: # replacing best solution in every generation/iteration
bestsol_cost = copy.deepcopy(c1)
# Evaluate second off spring
if c2['cost'] < bestsol_cost['cost']: # replacing best solution in every generation/iteration
bestsol_cost = copy.deepcopy(c2)
# Merge, Sort and Select
population[len(population)] = c1
population[len(population)] = c2
population = sort(population)
# Store best cost
bestcost[it] = bestsol_cost['cost']
# Show generation information
print('Iteration {}: Best Cost = {}'. format(it, bestcost[it]))
out = population
Bestsol = bestsol
bestcost = bestcost
return (out, Bestsol, bestcost)
# Problem definition
costfunc = sphere
num_var = 5 # number of decicion variables
varmin = -10 # lower bound
varmax = 10 # upper bound
# GA Parameters
maxit = 501 # number of iterations
npop = 20 # initial population size
beta = 1
prop_children = 1 # proportion of children to population
num_children = int(np.round(prop_children * npop/2)*2) # making sure it always an even number
mu = 0.2 # mutation rate 20%, 205 of 5 is 1, mutating 1 gene
sigma = 0.1 # step size of mutation
# Run GA
out = ga(costfunc, num_var, varmin, varmax, maxit, npop, num_children, mu, sigma, beta)
# Results
#(out, Bestsol, bestcost)
plt.plot(out[2])
plt.xlim(0, maxit)
plt.xlabel('Generations')
plt.ylabel('Best Cost')
plt.title('Genetic Algorithm')
plt.grid(True)
plt.show
|
autobahntestsuite/autobahntestsuite/echo.py
|
rishabh-bector/autobahn-testsuite
| 595 |
74854
|
###############################################################################
##
## Copyright (c) Crossbar.io Technologies GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
__all__ = ['startClient', 'startServer']
from autobahn.twisted.websocket import connectWS, \
listenWS, \
WebSocketClientFactory, \
WebSocketClientProtocol, \
WebSocketServerFactory, \
WebSocketServerProtocol
class EchoServerProtocol(WebSocketServerProtocol):
def onMessage(self, payload, isBinary):
self.sendMessage(payload, isBinary)
class EchoServerFactory(WebSocketServerFactory):
protocol = EchoServerProtocol
def __init__(self, url, debug = False):
WebSocketServerFactory.__init__(self, url, debug = debug, debugCodePaths = debug)
class EchoClientProtocol(WebSocketClientProtocol):
def onMessage(self, payload, isBinary):
self.sendMessage(payload, isBinary)
class EchoClientFactory(WebSocketClientFactory):
protocol = EchoClientProtocol
def __init__(self, url, debug = False):
WebSocketClientFactory.__init__(self, url, debug = debug, debugCodePaths = debug)
def startClient(wsuri, debug = False):
factory = EchoClientFactory(wsuri, debug)
connectWS(factory)
return True
def startServer(wsuri, sslKey = None, sslCert = None, debug = False):
factory = EchoServerFactory(wsuri, debug)
if sslKey and sslCert:
sslContext = ssl.DefaultOpenSSLContextFactory(sslKey, sslCert)
else:
sslContext = None
listenWS(factory, sslContext)
return True
|
test/core/end2end/fuzzers/generate_client_examples_of_bad_closing_streams.py
|
samotarnik/grpc
| 2,151 |
74855
|
<filename>test/core/end2end/fuzzers/generate_client_examples_of_bad_closing_streams.py
#!/usr/bin/env python2.7
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
os.chdir(os.path.dirname(sys.argv[0]))
streams = {
'server_hanging_response_1_header': (
[0,0,0,4,0,0,0,0,0] + # settings frame
[0,0,0,1,5,0,0,0,1] # trailers
),
'server_hanging_response_2_header2': (
[0,0,0,4,0,0,0,0,0] + # settings frame
[0,0,0,1,4,0,0,0,1] + # headers
[0,0,0,1,5,0,0,0,1] # trailers
),
}
for name, stream in streams.items():
open('client_fuzzer_corpus/%s' % name, 'w').write(bytearray(stream))
|
test/execution/gen_random_tpl.py
|
weijietong/noisepage
| 971 |
74890
|
# http://pastie.org/pastes/10943132/text
# Copyright (c) 2016 1wd
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import random
import sys
class Expr(object):
def __init__(self):
pass
class ConstExpr(Expr):
def __init__(self, val):
self.val = val
class VarExpr(Expr):
def __init__(self, name):
self.name = name
class BinOp(Expr):
def __init__(self, op, left, right):
self.op = op
self.left = left
self.right = right
class FunCallExpr(Expr):
def __init__(self, name):
self.name = name
class Statement(object):
def __init__(self):
pass
class Assignment(Statement):
def __init__(self, lval, expr):
self.lval = lval
self.expr = expr
class VarDecl(Statement):
def __init__(self, name, expr):
self.name = name
self.expr = expr
self.mut = False
self.used = False
class Return(Statement):
def __init__(self, expr, context):
self.expr = expr
self.context = context
class Print(Statement):
def __init__(self, expr):
self.expr = expr
class FunDecl(object):
def __init__(self, name, statements, return_type):
self.name = name
self.statements = statements
self.return_type = return_type
self.used = False
class Program(object):
def __init__(self, main, functions):
self.main = main
self.functions = functions
#----------------------------------------------------------
class Context(object):
def __init__(self, parent=None, decl_name=None):
self.env = {}
self.id = 0
self.parent = parent
self.decl_name = decl_name
def name(self, base, i):
return "%s%s" % (base, i)
def new_name(self, base):
self.id += 1
return self.name(base, self.id)
def random_name(self, base):
biased_min = random.randint(1, self.id)
i = random.randint(biased_min, self.id)
return self.name(base, i)
def random_expr(self):
return random.choice([
self.random_const_expr,
self.random_var_expr,
self.random_binary_op,
self.random_fun_call,
])()
def find_unused(self):
for decl in self.env.values():
if not decl.used:
return decl
return None
def force_use_expr(self):
expr = self.random_const_expr()
decl = self.find_unused()
while decl is not None:
left = self.forced_var_expr(decl.name)
expr = self.forced_random_binary_op(left, expr)
decl = self.find_unused()
decl = self.parent.find_unused()
while decl is not None:
left = self.forced_fun_call(decl.name)
expr = self.forced_random_binary_op(left, expr)
decl = self.parent.find_unused()
return expr
def random_const_expr(self):
return ConstExpr(str(random.randint(1, 1000)))
def forced_var_expr(self, name):
decl = self.env[name]
decl.used = True
return VarExpr(name)
def random_var_expr(self):
if self.id == 0:
return self.random_const_expr()
name = self.random_name('x')
return self.forced_var_expr(name)
def forced_random_binary_op(self, left, right):
#op = random.choice(["+", "-", "*", "|", "&", "^"])
op = random.choice(["|", "&", "^"])
return BinOp(op, left, right)
def random_binary_op(self):
left = self.random_expr()
right = self.random_expr()
return self.forced_random_binary_op(left, right)
def forced_fun_call(self, name):
decl = self.parent.env[name]
decl.used = True
return FunCallExpr(name)
def random_fun_call(self):
if self.parent.id == 0:
return self.random_const_expr()
name = self.parent.random_name('f')
return self.forced_fun_call(name)
def random_statement(self):
return random.choice([
self.random_assignment,
self.random_var_decl,
])()
def random_assignment(self):
name = self.random_name('x')
decl = self.env[name]
expr = self.random_expr()
if not decl.used:
left = self.forced_var_expr(name)
expr = self.forced_random_binary_op(left, expr)
decl.used = False
decl.mut = True
return Assignment(name, expr)
def random_return_statement(self):
return Return(self.force_use_expr(), self)
def random_print_statement(self):
return Print(self.force_use_expr())
def random_var_decl(self):
expr = self.random_expr()
name = self.new_name('x')
decl = VarDecl(name, expr)
self.env[name] = decl
return decl
def random_fun_decl(self, num_statements, return_type):
local = Context(self)
statements = []
statements.append(local.random_var_decl())
for i in range(num_statements):
statements.append(local.random_statement())
if return_type is not None:
statements.append(local.random_return_statement())
else:
statements.append(local.random_print_statement())
name = self.new_name('f')
decl = FunDecl(name, statements, return_type)
local.decl = decl
self.env[name] = decl
return decl
def random_program(self, num_funs, max_statements_per_fun):
functions = []
for i in range(num_funs):
num_statements = random.randint(1, max_statements_per_fun)
fun_decl = self.random_fun_decl(num_statements, 'int')
functions.append(fun_decl)
num_statements = random.randint(1, max_statements_per_fun)
main = self.random_fun_decl(num_statements, None)
return Program(main, functions)
#----------------------------------------------------------
class Lang(object):
operators = {
'&': '&',
'|': '|',
'^': '^',
}
def __init__(self):
self.indent = 0
def write_indent(self, f):
f.write(' ' * 4 * self.indent)
def write_statement(self, f, statement):
handlers = {
VarDecl: self.write_var_decl,
Assignment: self.write_assignment,
Return: self.write_return,
Print: self.write_print,
}
handler = handlers.get(type(statement))
if handler is not None:
handler(f, statement)
else:
raise Exception("Unknown kind of statement")
def write_lval(self, f, lval):
f.write(lval)
def write_expr(self, f, expr, needs_parens=False):
handlers = {
ConstExpr: self.write_const_expr,
VarExpr: self.write_var_expr,
BinOp: self.write_bin_op,
FunCallExpr: self.write_fun_call,
}
handler = handlers.get(type(expr))
if handler is not None:
handler(f, expr, needs_parens)
else:
raise Exception("Unknown kind of expr")
def write_const_expr(self, f, expr, needs_parens):
f.write(expr.val)
def write_var_expr(self, f, expr, needs_parens):
f.write(expr.name)
def write_bin_op(self, f, expr, needs_parens):
if needs_parens:
f.write("(")
self.write_expr(f, expr.left, needs_parens=True)
f.write(" %s " % self.operators[expr.op])
self.write_expr(f, expr.right, needs_parens=True)
if needs_parens:
f.write(")")
def write_fun_call(self, f, expr, needs_parens):
f.write("%s()" % expr.name)
class CppLang(Lang):
ext = 'cpp'
type_names = {
'int': 'int',
}
def write_program(self, f, program):
f.write('#include <cstdio>\n\n')
for fun_decl in program.functions:
self.write_fun_decl(f, fun_decl)
f.write('\n')
self.write_fun_decl(f, program.main, main=True)
def write_fun_decl(self, f, fun_decl, main=False):
if fun_decl.return_type is None:
optional_result = 'int '
else:
type_name = self.type_names[fun_decl.return_type]
optional_result = '%s ' % type_name
fun_name = 'main' if main else fun_decl.name
f.write('%s %s() {\n' % (optional_result, fun_name))
self.indent += 1
for statement in fun_decl.statements:
self.write_statement(f, statement)
self.indent -= 1
f.write('}\n')
def write_var_decl(self, f, var_decl):
self.write_indent(f)
f.write('int ')
self.write_lval(f, var_decl.name)
f.write(' = ')
self.write_expr(f, var_decl.expr)
f.write(';\n')
def write_assignment(self, f, assignment):
self.write_indent(f)
self.write_lval(f, assignment.lval)
f.write(' = ')
self.write_expr(f, assignment.expr)
f.write(';\n')
def write_return(self, f, statement):
self.write_indent(f)
f.write('return ')
self.write_expr(f, statement.expr)
f.write(';\n')
def write_print(self, f, statement):
self.write_indent(f)
f.write('printf("%i\\n", ')
self.write_expr(f, statement.expr)
f.write(');\n')
class CLang(CppLang):
ext = 'c'
def write_program(self, f, program):
f.write('#include <stdio.h>\n\n')
for fun_decl in program.functions:
self.write_fun_decl(f, fun_decl)
f.write('\n')
self.write_fun_decl(f, program.main, main=True)
class DLang(Lang):
ext = 'd'
type_names = {
'int': 'int',
}
def write_program(self, f, program):
f.write('import std.stdio;\n\n')
for fun_decl in program.functions:
self.write_fun_decl(f, fun_decl)
f.write('\n')
self.write_fun_decl(f, program.main, main=True)
def write_fun_decl(self, f, fun_decl, main=False):
if fun_decl.return_type is None:
optional_result = 'void '
else:
type_name = self.type_names[fun_decl.return_type]
optional_result = '%s ' % type_name
fun_name = 'main' if main else fun_decl.name
f.write('%s %s() {\n' % (optional_result, fun_name))
self.indent += 1
for statement in fun_decl.statements:
self.write_statement(f, statement)
self.indent -= 1
f.write('}\n')
def write_var_decl(self, f, var_decl):
self.write_indent(f)
f.write('int ')
self.write_lval(f, var_decl.name)
f.write(' = ')
self.write_expr(f, var_decl.expr)
f.write(';\n')
def write_assignment(self, f, assignment):
self.write_indent(f)
self.write_lval(f, assignment.lval)
f.write(' = ')
self.write_expr(f, assignment.expr)
f.write(';\n')
def write_return(self, f, statement):
self.write_indent(f)
f.write('return ')
self.write_expr(f, statement.expr)
f.write(';\n')
def write_print(self, f, statement):
self.write_indent(f)
f.write('writefln("%d", ')
self.write_expr(f, statement.expr)
f.write(');\n')
class GoLang(Lang):
ext = 'go'
type_names = {
'int': 'int',
}
def write_program(self, f, program):
f.write('package main\n\n')
f.write('import "fmt"\n\n')
for fun_decl in program.functions:
self.write_fun_decl(f, fun_decl)
f.write('\n')
self.write_fun_decl(f, program.main, main=True)
def write_fun_decl(self, f, fun_decl, main=False):
if fun_decl.return_type is None:
optional_result = ''
else:
type_name = self.type_names[fun_decl.return_type]
optional_result = ' %s' % type_name
fun_name = 'main' if main else fun_decl.name
f.write('func %s()%s {\n' % (fun_name, optional_result))
self.indent += 1
for statement in fun_decl.statements:
self.write_statement(f, statement)
self.indent -= 1
f.write('}\n')
def write_var_decl(self, f, var_decl):
self.write_indent(f)
self.write_lval(f, var_decl.name)
f.write(' := ')
self.write_expr(f, var_decl.expr)
f.write('\n')
def write_assignment(self, f, assignment):
self.write_indent(f)
self.write_lval(f, assignment.lval)
f.write(' = ')
self.write_expr(f, assignment.expr)
f.write('\n')
def write_return(self, f, statement):
self.write_indent(f)
f.write('return ')
self.write_expr(f, statement.expr)
f.write('\n')
def write_print(self, f, statement):
self.write_indent(f)
f.write('fmt.Printf("%d\\n", ')
self.write_expr(f, statement.expr)
f.write(')\n')
class PascalLang(Lang):
ext = 'pas'
type_names = {
'int': 'integer',
}
operators = {
'&': 'and',
'|': 'or',
'^': 'xor',
}
def write_program(self, f, program):
f.write('program main;\n\n')
for fun_decl in program.functions:
self.write_fun_decl(f, fun_decl)
f.write('\n')
self.write_fun_decl(f, program.main, main=True)
def write_fun_decl(self, f, fun_decl, main=False):
if not main:
fun_name = fun_decl.name
type_name = self.type_names[fun_decl.return_type]
f.write('function %s() : %s;\n' % (fun_name, type_name))
vars = [s for s in fun_decl.statements if isinstance(s, VarDecl)]
if vars:
f.write('var\n')
for v in vars:
type_name = self.type_names['int']
f.write(' %s : %s;\n' % (v.name, type_name))
f.write('begin\n')
self.indent += 1
for statement in fun_decl.statements:
self.write_statement(f, statement)
self.indent -= 1
f.write('end%s\n' % ('.' if main else ';'))
def write_var_decl(self, f, var_decl):
self.write_indent(f)
self.write_lval(f, var_decl.name)
f.write(' := ')
self.write_expr(f, var_decl.expr)
f.write(';\n')
def write_assignment(self, f, assignment):
self.write_indent(f)
self.write_lval(f, assignment.lval)
f.write(' := ')
self.write_expr(f, assignment.expr)
f.write(';\n')
def write_return(self, f, statement):
self.write_indent(f)
self.write_lval(f, statement.context.decl.name)
f.write(' := ')
self.write_expr(f, statement.expr)
f.write(';\n')
def write_print(self, f, statement):
self.write_indent(f)
f.write('writeln(')
self.write_expr(f, statement.expr)
f.write(');\n')
class RustLang(Lang):
ext = 'rs'
type_names = {
'int': 'i32',
}
def write_program(self, f, program):
for fun_decl in program.functions:
self.write_fun_decl(f, fun_decl)
f.write('\n')
self.write_fun_decl(f, program.main, main=True)
def write_fun_decl(self, f, fun_decl, main=False):
if fun_decl.return_type is None:
optional_result = ''
else:
type_name = self.type_names[fun_decl.return_type]
optional_result = ' -> %s' % type_name
fun_name = 'main' if main else fun_decl.name
f.write('fn %s()%s {\n' % (fun_name, optional_result))
self.indent += 1
for statement in fun_decl.statements:
self.write_statement(f, statement)
self.indent -= 1
f.write('}\n')
def write_var_decl(self, f, var_decl):
self.write_indent(f)
f.write('let ')
if var_decl.mut:
f.write('mut ')
self.write_lval(f, var_decl.name)
f.write(': i32')
f.write(' = ')
self.write_expr(f, var_decl.expr)
f.write(';\n')
def write_const_expr(self, f, expr, needs_parens):
f.write(expr.val + 'i32')
def write_assignment(self, f, assignment):
self.write_indent(f)
self.write_lval(f, assignment.lval)
f.write(' = ')
self.write_expr(f, assignment.expr)
f.write(';\n')
def write_return(self, f, statement):
self.write_indent(f)
self.write_expr(f, statement.expr)
f.write('\n')
def write_print(self, f, statement):
self.write_indent(f)
f.write('println!("{}", ')
self.write_expr(f, statement.expr)
f.write(')\n')
class TplLang(Lang):
ext = 'tpl'
type_names = {
'int': 'int32',
}
def write_program(self, f, program):
for fun_decl in program.functions:
self.write_fun_decl(f, fun_decl)
f.write('\n')
self.write_fun_decl(f, program.main, main=True)
def write_fun_decl(self, f, fun_decl, main=False):
if fun_decl.return_type is None:
optional_result = ' -> int'
else:
type_name = self.type_names[fun_decl.return_type]
optional_result = ' -> %s' % type_name
fun_name = 'main' if main else fun_decl.name
f.write('fun %s()%s {\n' % (fun_name, optional_result))
self.indent += 1
for statement in fun_decl.statements:
self.write_statement(f, statement)
self.indent -= 1
f.write('}\n')
def write_var_decl(self, f, var_decl):
self.write_indent(f)
f.write('var ')
self.write_lval(f, var_decl.name)
f.write(': int32')
f.write(' = ')
self.write_expr(f, var_decl.expr)
f.write('\n')
def write_assignment(self, f, assignment):
self.write_indent(f)
self.write_lval(f, assignment.lval)
f.write(' = ')
self.write_expr(f, assignment.expr)
f.write('\n')
def write_return(self, f, statement):
self.write_indent(f)
f.write('return ')
self.write_expr(f, statement.expr)
f.write('\n')
def write_print(self, f, statement):
self.write_indent(f)
f.write('println("{}", ')
self.write_expr(f, statement.expr)
f.write(')\n')
#----------------------------------------------------------
seed = sys.argv[1]
num_funs = sys.argv[2]
random.seed(seed)
c = Context()
p = c.random_program(
num_funs=int(num_funs),
max_statements_per_fun=20)
langs = [
#CppLang(),
#CLang(),
#DLang(),
#GoLang(),
#PascalLang(),
#RustLang(),
TplLang(),
]
for lang in langs:
filename = 'test_%s_s%s_n%s.%s' % (lang.ext, seed, num_funs, lang.ext)
with open(filename, 'w') as f:
lang.write_program(f, p)
|
calvin/runtime/south/storage/twistedimpl/securedht/service_discovery_ssdp.py
|
gabrielcercel/calvin-base
| 334 |
74905
|
<gh_stars>100-1000
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import traceback
import platform
import random
import socket
import netifaces
from calvin.utilities import calvinlogger
from calvin.runtime.south.storage.twistedimpl.securedht.service_discovery import ServiceDiscoveryBase
#from calvin.runtime.south.storage.twistedimpl.securedht.security_discovery_exchange import Ca
from twisted.internet.protocol import DatagramProtocol
from twisted.web.http import datetimeToString
from twisted.internet import reactor, defer
_log = calvinlogger.get_logger(__name__)
SSDP_ADDR = '172.16.31.10'
SSDP_PORT = 1900
__version_info__ = (0, 6, 7)
__version__ = '.'.join(map(str, __version_info__))
SERVER_ID = ','.join([platform.system(),
platform.release(),
'UPnP/1.0,Calvin UPnP framework',
__version__])
SERVICE_UUID = '1693326a-abb9-11e4-8dfb-9cb654a16426'
CA_SERVICE_UUID = '58532fde-e793-11e5-965d-7cd1c3da1305'
MS_BOOTSTRAP = ('M-SEARCH * HTTP/1.1\r\nHOST: %s:%d\r\nMAN: "ssdp:discover"\r\n' +
'MX: 2\r\nST: uuid:%s\r\n\r\n') %\
(SSDP_ADDR, SSDP_PORT, SERVICE_UUID)
MS_CA = ('M-SEARCH * HTTP/1.1\r\nHOST: %s:%d\r\nMAN: "ssdp:discover"\r\n' +
'MX: 2\r\nST: uuid:%s\r\n\r\n') %\
(SSDP_ADDR, SSDP_PORT, CA_SERVICE_UUID)
MS = {SERVICE_UUID: MS_BOOTSTRAP, CA_SERVICE_UUID: MS_CA}
MS_BOOTSTRAP_RESP = 'HTTP/1.1 200 OK\r\n' + \
'USN: %s::upnp:rootdevice\r\n' % SERVICE_UUID + \
'SERVER: %s\r\n' + \
'last-seen: %s\r\n' + \
'EXT: \r\n' + \
'SERVICE: %s\r\n' + \
'LOCATION: %s\r\n' + \
'CACHE-CONTROL: max-age=1800\r\n' + \
'ST: uuid:%s\r\n' % SERVICE_UUID + \
'DATE: %s\r\n'
MS_CA_RESP = 'HTTP/1.1 200 OK\r\n' + \
'USN: %s::upnp:rootdevice\r\n' % CA_SERVICE_UUID + \
'SERVER: %s\r\n' + \
'last-seen: %s\r\n' + \
'EXT: \r\n' + \
'LOCATION: %s\r\n' + \
'CACHE-CONTROL: max-age=1800\r\n' + \
'ST: uuid:%s\r\n' % CA_SERVICE_UUID + \
'DATE: %s\r\n'
MS_RESP = {SERVICE_UUID: MS_BOOTSTRAP_RESP, CA_SERVICE_UUID: MS_CA_RESP}
def parse_http_response(data):
""" don't try to get the body, there are reponses without """
header = data.split('\r\n\r\n')[0]
lines = header.split('\r\n')
cmd = lines[0].split(' ')
lines = map(lambda x: x.replace(': ', ':', 1), lines[1:])
lines = filter(lambda x: len(x) > 0, lines)
headers = [x.split(':', 1) for x in lines]
headers = dict(map(lambda x: (x[0].lower(), x[1]), headers))
return cmd, headers
class ServerBase(DatagramProtocol):
def __init__(self, node_id, control_uri, ips, d=None):
_log.info("Serverbase::_init_: \n\tnode_id={}\n\tcontrol_uri={}\n\tips={}\n\tdserver={}".format(node_id, control_uri, ips, d))
self._services = {}
self._dstarted = d
self.ignore_list = []
self.ips = ips
self._msearches_resp = {sid: {} for sid in MS.keys()}
self._node_id = node_id
self._control_uri = control_uri
def startProtocol(self):
if self._dstarted:
reactor.callLater(0, self._dstarted.callback, True)
def datagramReceived(self, datagram, address):
# Broadcast
try:
cmd, headers = parse_http_response(datagram)
_log.debug("ServerBase::Received %s, %s from %r" % (cmd, headers, address, ))
if cmd[0] == 'M-SEARCH' and cmd[1] == '*':
_log.debug("Ignore list %s ignore %s" % (self.ignore_list, address not in self.ignore_list))
# Only reply to our requests
if SERVICE_UUID in headers['st'] and address not in self.ignore_list:
for k, addrs in self._services.items():
for addr in addrs:
# Only tell local about local
if addr[0] == "127.0.0.1" and address[0] != "127.0.0.1":
continue
response = MS_RESP[SERVICE_UUID] % ('%s:%d' % addr, str(time.time()),
k, self._control_uri + "/node/" + self._node_id, datetimeToString())
if "cert" in self._msearches_resp[SERVICE_UUID].keys():
response += "CERTIFICATE: {}\r\n\r\n".format(self._msearches_resp[SERVICE_UUID]["cert"])
_log.debug("ServerBase::Sending response: %s" % repr(response))
delay = random.randint(0, min(5, int(headers['mx'])))
reactor.callLater(delay, self.send_it,
response, address)
elif CA_SERVICE_UUID in headers['st'] and address not in self.ignore_list\
and self._msearches_resp[CA_SERVICE_UUID]["sign"]:
for k, addrs in self._services.items():
for addr in addrs:
# Only tell local about local
if addr[0] == "127.0.0.1" and address[0] != "127.0.0.1":
continue
try:
response = MS_RESP[CA_SERVICE_UUID] % (str(addr),
str(time.time()),
self._control_uri + "/node/" + self._node_id,
datetimeToString())
except Exception as err:
_log.error("Failed to create response, err={}".format(err))
raise
_log.debug("ServerBase::Sending response: %s" % repr(response))
delay = random.randint(0, min(5, int(headers['mx'])))
reactor.callLater(delay, self.send_it,
response, address)
except Exception as err:
_log.exception("SSDP search received, but failed handling, err={}".format(err))
def update_params(self, service_uuid, **kwargs):
self._msearches_resp[service_uuid].update(kwargs)
def add_service(self, service, ip, port):
# Service on all interfaces
if ip in ["0.0.0.0", ""]:
self._services[service] = []
for a in self.ips:
_log.debug("Add service %s, %s:%s" % (service, a, port))
self._services[service].append((a, port))
else:
_log.debug("Add service %s, %s:%s" % (service, ip, port))
self._services[service] = [(ip, port)]
def remove_service(self, service):
if service in self._services:
del self._services[service]
def set_ignore_list(self, list_):
self.ignore_list = list_
def send_it(self, response, destination):
try:
if self.transport:
self.transport.write(response, destination)
else:
_log.debug("No transport yet!")
except (AttributeError, socket.error), msg:
_log.exception("Error in send %s" % repr(msg))
def stop(self):
pass
class ClientBase(DatagramProtocol):
def __init__(self, dclient=None):
self._dstarted = dclient
self._service = None
self._msearches = {sid: {'cb': None, 'stopped': False, 'stop': False} for sid in MS.keys()}
def startProtocol(self):
if self._dstarted:
reactor.callLater(0, self._dstarted.callback, True)
def datagramReceived(self, datagram, address):
# Broadcast
cmd, headers = parse_http_response(datagram)
_log.debug("ClientBase::Received %s, %s from %r" % (cmd, headers, address, ))
if cmd[0].startswith('HTTP/1.') and cmd[1] == '200':
if SERVICE_UUID in headers['st']:
c_address = headers['server'].split(':')
c_address[1] = int(c_address[1])
try:
cert = headers['certificate'].split(':')
c_address.extend(cert)
except KeyError:
pass
# Filter on service calvin networks
if self._service is None or self._service == headers['service']:
_log.debug("ClientBase::Received service %s from %s" %
(headers['service'], c_address, ))
if c_address:
if self._msearches[SERVICE_UUID]['cb']:
self._msearches[SERVICE_UUID]['cb']([tuple(c_address)])
if self._msearches[SERVICE_UUID]['stop']:
self.stop(SERVICE_UUID)
elif CA_SERVICE_UUID in headers['st']:
_log.error("Deprecated")
# c_address = headers['server'].split(':')
# c_address[1] = int(c_address[1])
# try:
# cert = headers['certificate']
# c_address.append(cert)
# except KeyError:
# pass
# # FIXME do we need service filtering for signed certificates
# if c_address and not self.is_stopped(CA_SERVICE_UUID):
# _log.debug("Signed Cert %s" % c_address)
# _log.debug("CA search data: %s" % self._msearches[CA_SERVICE_UUID])
# if self._msearches[CA_SERVICE_UUID]['cb']:
# self._msearches[CA_SERVICE_UUID]['cb'](tuple(c_address))
# if self._msearches[CA_SERVICE_UUID]['stop']:
# self.stop(CA_SERVICE_UUID)
def set_callback(self, service_uuid, callback):
self._msearches[service_uuid]['cb'] = callback
def set_service(self, service):
self._service = service
def is_stopped(self, service_uuid):
return self._msearches[service_uuid]['stopped']
def set_autostop(self, service_uuid, stop=True):
self._msearches[service_uuid]['stop'] = stop
def stop(self, service_uuid):
self._msearches[service_uuid]['stopped'] = True
class SSDPServiceDiscovery(ServiceDiscoveryBase):
def __init__(self, node_id, control_uri, iface='', ignore_self=True):
super(SSDPServiceDiscovery, self).__init__()
self.ignore_self = ignore_self
self.iface = '' #iface
self.ssdp = None
self.port = None
self.searches = {}
self.iface_send_list = []
self._node_id = node_id
self._control_uri = control_uri
if self.iface in ["0.0.0.0", ""]:
for a in netifaces.interfaces():
addrs = netifaces.ifaddresses(a)
# Ipv4 for now
if netifaces.AF_INET in addrs:
for a in addrs[netifaces.AF_INET]:
self.iface_send_list.append(a['addr'])
else:
self.iface_send_list.append(iface)
def start(self):
dserver = defer.Deferred()
dclient = defer.Deferred()
try:
self.ssdp = reactor.listenMulticast(SSDP_PORT,
ServerBase(self._node_id,
self._control_uri,
self.iface_send_list,
d=dserver
),
listenMultiple=True)
self.ssdp.setTTL(5)
for iface_ in self.iface_send_list:
d = self.ssdp.joinGroup(SSDP_ADDR, interface=iface_)
d.addErrback(lambda x: _log.error("Failed to join multicast group %s:%s, %s", iface_, SSDP_PORT, x))
d.addCallback(lambda x: _log.debug("Joined multicast group %s:%s, %s", iface_, SSDP_PORT, x))
except:
_log.exception("Multicast listen join failed!!")
# Dont start server some one is alerady running locally
# TODO: Do we need this ?
self.port = reactor.listenMulticast(0, ClientBase(dclient=dclient), interface=self.iface)
_log.debug("SSDP Host: %s" % repr(self.port.getHost()))
# Set ignore port and ips
if self.ssdp and self.ignore_self:
self.ssdp.protocol.set_ignore_list([(x, self.port.getHost().port) for x in self.iface_send_list])
return dserver, dclient
def update_server_params(self, service_uuid, **kwargs):
self.ssdp.protocol.update_params(service_uuid, **kwargs)
def start_search(self, service_uuid, **kwargs):
callback = kwargs.pop('callback', None)
stop = kwargs.pop('stop', False)
# Restart backoff
self.searches.setdefault(service_uuid, {})["backoff"] = .2
def local_start_msearch():
self.port.protocol.set_callback(service_uuid, callback)
self.port.protocol.set_autostop(service_uuid, stop)
self._send_msearch(service_uuid, once=False, kwargs=kwargs)
reactor.callLater(0, local_start_msearch)
def stop_all_search(self):
for service_uuid in MS.keys():
self.port.protocol.set_callback(service_uuid, None)
self.port.protocol.stop(service_uuid)
def stop_search(self, service_uuid):
_log.debug("Stop search of %s" % service_uuid)
self.port.protocol.set_callback(service_uuid, None)
self.port.protocol.stop(service_uuid)
def set_client_filter(self, service):
self.port.protocol.set_service(service)
def register_service(self, service, ip, port):
self.ssdp.protocol.add_service(service, ip, port)
def unregister_service(self, service):
self.ssdp.protocol.remove_service(service)
def _send_msearch(self, service_uuid, once=True, kwargs=None):
if kwargs is None:
kwargs={}
if self.port and not self.port.protocol.is_stopped(service_uuid):
for src_ip in self.iface_send_list:
self.port.protocol.transport.setOutgoingInterface(src_ip)
_log.debug("Sending M-SEARCH... on %s\n%s" % (src_ip, MS[service_uuid].format(**kwargs)))
self.port.write(MS[service_uuid].format(**kwargs), (SSDP_ADDR, SSDP_PORT))
if not once and not self.port.protocol.is_stopped(service_uuid):
reactor.callLater(self.searches[service_uuid]["backoff"], self._send_msearch,
service_uuid, once=False, kwargs=kwargs)
_log.debug("Next M-SEARCH in %s seconds" % self.searches[service_uuid]["backoff"])
self.searches[service_uuid]["backoff"] = min(600, self.searches[service_uuid]["backoff"] * 1.5)
def search(self, service_uuid, callback, **kwargs):
self.port.protocol.set_callback(service_uuid, callback)
self._send_msearch(service_uuid, once=True, kwargs=kwargs)
def stop(self):
dlist = []
if self.ssdp:
dlist.append(self.ssdp.leaveGroup(SSDP_ADDR, interface=self.iface))
dlist.append(self.ssdp.stopListening())
self.ssdp = None
if self.port:
self.stop_all_search()
dlist.append(self.port.stopListening())
self.port = None
return defer.DeferredList(dlist)
|
third_party/WebKit/Source/modules/bluetooth/testing/clusterfuzz/fuzz_integration_test.py
|
google-ar/chromium
| 2,151 |
74910
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test that the fuzzer works the way ClusterFuzz invokes it."""
import glob
import os
import shutil
import sys
import tempfile
import unittest
import setup
class WebBluetoothFuzzerTest(unittest.TestCase):
def setUp(self):
self._output_dir = tempfile.mkdtemp()
self._resources_path = setup.RetrieveResources()
def tearDown(self):
shutil.rmtree(self._output_dir)
shutil.rmtree(self._resources_path)
def testCanGenerate100Files(self):
sys.argv = ['fuzz_main_run.py', '--no_of_files=100',
'--input_dir={}'.format(self._output_dir),
'--output_dir={}'.format(self._output_dir)]
import fuzz_main_run
fuzz_main_run.main()
written_files = glob.glob(os.path.join(self._output_dir, '*.html'))
self.assertEquals(100, len(written_files), 'Should have written 100 '
'test files.')
for test_case in written_files:
self.assertFalse('TRANSFORM' in open(test_case).read())
if __name__ == '__main__':
unittest.main()
|
terrascript/data/cloudsmith_io/cloudsmith.py
|
mjuenema/python-terrascript
| 507 |
74930
|
# terrascript/data/cloudsmith-io/cloudsmith.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:14:19 UTC)
import terrascript
class cloudsmith_namespace(terrascript.Data):
pass
class cloudsmith_package_list(terrascript.Data):
pass
class cloudsmith_repository(terrascript.Data):
pass
__all__ = [
"cloudsmith_namespace",
"cloudsmith_package_list",
"cloudsmith_repository",
]
|
cloudbaseinit/tests/utils/windows/test_netlbfo.py
|
aleskxyz/cloudbase-init
| 160 |
74932
|
<reponame>aleskxyz/cloudbase-init
# Copyright (c) 2017 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import importlib
import unittest
try:
import unittest.mock as mock
except ImportError:
import mock
from cloudbaseinit import exception
from cloudbaseinit.models import network as network_model
MODPATH = "cloudbaseinit.utils.windows.netlbfo"
class NetLBFOTest(unittest.TestCase):
def setUp(self):
self._wmi_mock = mock.MagicMock()
self._mi_mock = mock.MagicMock()
self._module_patcher = mock.patch.dict(
'sys.modules', {
'wmi': self._wmi_mock,
'mi': self._mi_mock})
self._module_patcher.start()
self._netlbfo = importlib.import_module(MODPATH)
def tearDown(self):
self._module_patcher.stop()
@mock.patch('time.sleep')
@mock.patch(MODPATH + '.NetLBFOTeamManager._get_primary_adapter_name')
@mock.patch(MODPATH + '.NetLBFOTeamManager._create_team')
@mock.patch(MODPATH + '.NetLBFOTeamManager._add_team_member')
@mock.patch(MODPATH + '.NetLBFOTeamManager._set_primary_nic_vlan_id')
@mock.patch(MODPATH + '.NetLBFOTeamManager._wait_for_nic')
@mock.patch(MODPATH + '.NetLBFOTeamManager.delete_team')
def _test_create_team(self, mock_delete_team, mock_wait_for_nic,
mock_set_primary_nic_vlan_id, mock_add_team_member,
mock_create_team, mock_primary_adapter_name,
mock_time_sleep, mode_not_found=False,
lb_algo_not_found=False,
add_team_member_fail=False):
mock_primary_adapter_name.return_value = mock.sentinel.pri_nic_name
mock_create_team.return_value = None
lacp_timer = network_model.BOND_LACP_RATE_FAST
members = [mock.sentinel.pri_nic_name, mock.sentinel.other_member]
conn = self._wmi_mock.WMI.return_value
mock_team = mock.Mock()
conn.MSFT_NetLbfoTeam.new.return_value = mock_team
mock_team_nic = mock.Mock()
mock_team_nic.Name = mock.Mock()
conn.MSFT_NetLbfoTeamNic.return_value = [mock_team_nic]
if mode_not_found:
mode = "fake mode"
else:
mode = network_model.BOND_TYPE_8023AD
if lb_algo_not_found:
lb_algo = "fake lb algo"
else:
lb_algo = network_model.BOND_LB_ALGO_L2
if add_team_member_fail:
ex = exception.CloudbaseInitException
mock_add_team_member.side_effect = ex
if mode_not_found or lb_algo_not_found:
self.assertRaises(
exception.ItemNotFoundException,
self._netlbfo.NetLBFOTeamManager().create_team,
mock.sentinel.team_name, mode, lb_algo, members,
mock.sentinel.mac, mock.sentinel.pri_nic_name,
mock.sentinel.vlan_id, lacp_timer)
return
elif add_team_member_fail:
self.assertRaises(
exception.CloudbaseInitException,
self._netlbfo.NetLBFOTeamManager().create_team,
mock.sentinel.team_name, mode, lb_algo, members,
mock.sentinel.mac, mock.sentinel.pri_nic_name,
mock.sentinel.vlan_id, lacp_timer)
else:
self._netlbfo.NetLBFOTeamManager().create_team(
mock.sentinel.team_name, mode, lb_algo, members,
mock.sentinel.mac, mock.sentinel.pri_nic_name,
mock.sentinel.vlan_id, lacp_timer)
if not add_team_member_fail:
mock_set_primary_nic_vlan_id.assert_called_once_with(
conn, mock.sentinel.team_name, mock.sentinel.vlan_id)
mock_create_team.assert_called_once_with(
conn, mock.sentinel.team_name, mock.sentinel.pri_nic_name,
2, 3, mock.sentinel.pri_nic_name, 1)
mock_wait_for_nic.assert_called_once_with(
mock_team_nic.Name)
mock_add_team_member.assert_called_once_with(
conn, mock.sentinel.team_name, mock.sentinel.other_member)
else:
mock_add_team_member.assert_called_with(
conn, mock.sentinel.team_name, mock.sentinel.other_member)
mock_delete_team.assert_called_with(mock.sentinel.team_name)
self.assertEqual(mock_add_team_member.call_count, 6)
self.assertEqual(mock_delete_team.call_count, 6)
def test_create_team(self):
self._test_create_team()
def test_create_team_mode_not_found(self):
self._test_create_team(mode_not_found=True)
def test_create_team_mode_lb_algo_not_found(self):
self._test_create_team(lb_algo_not_found=True)
def test_create_team_add_team_member_fail(self):
self._test_create_team(add_team_member_fail=True)
def test_delete_team(self):
conn = self._wmi_mock.WMI.return_value
mock_team = mock.Mock()
conn.MSFT_NetLbfoTeam.return_value = [mock_team]
self._netlbfo.NetLBFOTeamManager().delete_team(mock.sentinel.team_name)
conn.MSFT_NetLbfoTeam.assert_called_once_with(
name=mock.sentinel.team_name)
mock_team.Delete_.assert_called_once_with()
def test_create_team_private(self):
conn = self._wmi_mock.WMI.return_value
mock_team = mock.Mock()
conn.MSFT_NetLbfoTeam.new.return_value = mock_team
teaming_mode = 1
lb_algo = 2
lacp_timer = 1
custom_options = [
{
u'name': u'TeamMembers',
u'value_type':
self._mi_mock.MI_ARRAY | self._mi_mock.MI_STRING,
u'value': [mock.sentinel.private_nic_team]
},
{
u'name': u'TeamNicName',
u'value_type': self._mi_mock.MI_STRING,
u'value': mock.sentinel.team_nic_name
}
]
operation_options = {u'custom_options': custom_options}
self._netlbfo.NetLBFOTeamManager()._create_team(
conn, mock.sentinel.team_name, mock.sentinel.team_nic_name,
teaming_mode, lb_algo, mock.sentinel.private_nic_team,
lacp_timer)
self.assertEqual(mock.sentinel.team_name, mock_team.Name)
self.assertEqual(teaming_mode, mock_team.TeamingMode)
self.assertEqual(lb_algo, mock_team.LoadBalancingAlgorithm)
self.assertEqual(lacp_timer, mock_team.LacpTimer)
mock_team.put.assert_called_once_with(
operation_options=operation_options)
@mock.patch(MODPATH + '.NetLBFOTeamManager._wait_for_nic')
def test_add_team_nic(self, mock_wait_for_nic):
conn = self._wmi_mock.WMI.return_value
mock_team_nic = mock.Mock()
conn.MSFT_NetLbfoTeamNIC.new.return_value = mock_team_nic
self._netlbfo.NetLBFOTeamManager().add_team_nic(
mock.sentinel.team_name, mock.sentinel.nic_name,
mock.sentinel.vlan_id)
self.assertEqual(mock.sentinel.team_name, mock_team_nic.Team)
self.assertEqual(mock.sentinel.nic_name, mock_team_nic.Name)
self.assertEqual(mock.sentinel.vlan_id, mock_team_nic.VlanID)
mock_team_nic.put.assert_called_once_with()
mock_wait_for_nic.assert_called_once_with(mock_team_nic.Name)
@mock.patch('cloudbaseinit.osutils.factory.get_os_utils')
def test_is_available(self, mock_get_os_utils):
os_utils = mock_get_os_utils.return_value
os_utils.check_os_version.return_value = True
os_utils.is_client_os.return_value = False
with mock.patch('sys.platform', 'win32'):
self.assertEqual(
True, self._netlbfo.NetLBFOTeamManager.is_available())
@mock.patch('time.sleep')
def test_wait_for_nic(self, mock_sleep):
conn = self._wmi_mock.WMI.return_value
conn.Win32_NetworkAdapter.side_effect = [
[], [mock.sentinel.net_adapter]]
self._netlbfo.NetLBFOTeamManager()._wait_for_nic(
mock.sentinel.nic_name)
conn.Win32_NetworkAdapter.assert_has_calls([
mock.call(NetConnectionID=mock.sentinel.nic_name),
mock.call(NetConnectionID=mock.sentinel.nic_name)])
mock_sleep.assert_called_once_with(1)
def test_set_primary_nic_vlan_id(self):
conn = mock.Mock()
mock_team_nic = mock.Mock()
conn.MSFT_NetLbfoTeamNIC.return_value = [mock_team_nic]
self._netlbfo.NetLBFOTeamManager()._set_primary_nic_vlan_id(
conn, mock.sentinel.team_name, mock.sentinel.vlan_id)
custom_options = [{
u'name': u'VlanID',
u'value_type': self._mi_mock.MI_UINT32,
u'value': mock.sentinel.vlan_id
}]
operation_options = {u'custom_options': custom_options}
mock_team_nic.put.assert_called_once_with(
operation_options=operation_options)
def test_add_team_member(self):
conn = mock.Mock()
mock_team_member = mock.Mock()
conn.MSFT_NetLbfoTeamMember.new.return_value = mock_team_member
self._netlbfo.NetLBFOTeamManager()._add_team_member(
conn, mock.sentinel.team_name, mock.sentinel.team_member)
custom_options = [{
u'name': u'Name',
u'value_type': self._mi_mock.MI_STRING,
u'value': mock.sentinel.team_member
}]
operation_options = {u'custom_options': custom_options}
mock_team_member.put.assert_called_once_with(
operation_options=operation_options)
self.assertEqual(mock.sentinel.team_name, mock_team_member.Team)
def _test_get_primary_adapter_name(self, mac_not_found=False,
member_not_found=False):
mock_members = [mock.sentinel.team_member]
conn = self._wmi_mock.WMI.return_value
if mac_not_found:
conn.Win32_NetworkAdapter.return_value = []
else:
conn.Win32_NetworkAdapter.return_value = [
mock.sentinel.net_adapter]
if member_not_found:
net_conn_id = mock.sentinel.something_else
else:
net_conn_id = mock.sentinel.team_member
mock.sentinel.net_adapter.NetConnectionID = net_conn_id
if mac_not_found or member_not_found:
self.assertRaises(
exception.ItemNotFoundException,
self._netlbfo.NetLBFOTeamManager()._get_primary_adapter_name,
mock_members, mock.sentinel.mac)
else:
self.assertEqual(
mock.sentinel.team_member,
self._netlbfo.NetLBFOTeamManager()._get_primary_adapter_name(
mock_members, mock.sentinel.mac))
conn.Win32_NetworkAdapter.assert_called_once_with(
MACAddress=mock.sentinel.mac)
def test_get_primary_adapter_name(self):
self._test_get_primary_adapter_name()
def test_get_primary_adapter_name_mac_not_found(self):
self._test_get_primary_adapter_name(mac_not_found=True)
def test_get_primary_adapter_name_member_not_found(self):
self._test_get_primary_adapter_name(member_not_found=True)
|
test/integration/epacems_test.py
|
catalyst-cooperative/pudl
| 285 |
74945
|
"""tests for pudl/output/epacems.py loading functions."""
from pathlib import Path
import dask.dataframe as dd
import pytest
from pudl.output.epacems import epacems
@pytest.fixture(scope='module')
def epacems_year_and_state(etl_params):
"""Find the year and state defined in pudl/package_data/settings/etl_*.yml."""
# the etl_params data structure alternates dicts and lists so indexing is a pain.
epacems = [item for item in etl_params['datapkg_bundle_settings']
[0]['datasets'] if 'epacems' in item.keys()]
epacems = epacems[0]['epacems']
return {'years': epacems['epacems_years'], 'states': epacems['epacems_states']}
@pytest.fixture(scope='session')
def epacems_parquet_path(
pudl_settings_fixture,
pudl_engine, # implicit dependency; ensures .parquet files exist
):
"""Get path to the directory of EPA CEMS .parquet data."""
out_dir = Path(pudl_settings_fixture['parquet_dir'], 'epacems')
return out_dir
def test_epacems_subset(epacems_year_and_state, epacems_parquet_path):
"""Minimal integration test of epacems(). Check if it returns a DataFrame."""
path = epacems_parquet_path
years = epacems_year_and_state['years']
# Use only Idaho if multiple states are given
states = epacems_year_and_state['states'] if len(
epacems_year_and_state['states']) == 1 else ['ID']
actual = epacems(columns=["gross_load_mw"],
epacems_path=path,
years=years,
states=states)
assert isinstance(actual, dd.DataFrame)
assert actual.shape[0].compute() > 0 # n rows
def test_epacems_subset_input_validation(epacems_year_and_state, epacems_parquet_path):
"""Check if invalid inputs raise exceptions."""
path = epacems_parquet_path
valid_year = epacems_year_and_state['years'][-1]
valid_state = epacems_year_and_state['states'][-1]
valid_column = "gross_load_mw"
invalid_state = 'confederacy'
invalid_year = 1775
invalid_column = 'clean_coal'
combos = [
dict(
years=[valid_year],
states=[valid_state],
columns=[invalid_column],
),
dict(
years=[valid_year],
states=[invalid_state],
columns=[valid_column],
),
dict(
years=[invalid_year],
states=[valid_state],
columns=[valid_column],
),
]
for combo in combos:
with pytest.raises(ValueError):
epacems(epacems_path=path, **combo)
|
examples/pxScene2d/external/libnode-v0.12.7/deps/v8/tools/ll_prof.py
|
madanagopaltcomcast/pxCore
| 2,494 |
74950
|
#!/usr/bin/env python
#
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import bisect
import collections
import ctypes
import disasm
import mmap
import optparse
import os
import re
import subprocess
import sys
import time
USAGE="""usage: %prog [OPTION]...
Analyses V8 and perf logs to produce profiles.
Perf logs can be collected using a command like:
$ perf record -R -e cycles -c 10000 -f -i ./d8 bench.js --ll-prof
# -R: collect all data
# -e cycles: use cpu-cycles event (run "perf list" for details)
# -c 10000: write a sample after each 10000 events
# -f: force output file overwrite
# -i: limit profiling to our process and the kernel
# --ll-prof shell flag enables the right V8 logs
This will produce a binary trace file (perf.data) that %prog can analyse.
IMPORTANT:
The kernel has an internal maximum for events per second, it is 100K by
default. That's not enough for "-c 10000". Set it to some higher value:
$ echo 10000000 | sudo tee /proc/sys/kernel/perf_event_max_sample_rate
You can also make the warning about kernel address maps go away:
$ echo 0 | sudo tee /proc/sys/kernel/kptr_restrict
We have a convenience script that handles all of the above for you:
$ tools/run-llprof.sh ./d8 bench.js
Examples:
# Print flat profile with annotated disassembly for the 10 top
# symbols. Use default log names and include the snapshot log.
$ %prog --snapshot --disasm-top=10
# Print flat profile with annotated disassembly for all used symbols.
# Use default log names and include kernel symbols into analysis.
$ %prog --disasm-all --kernel
# Print flat profile. Use custom log names.
$ %prog --log=foo.log --snapshot-log=snap-foo.log --trace=foo.data --snapshot
"""
JS_ORIGIN = "js"
JS_SNAPSHOT_ORIGIN = "js-snapshot"
class Code(object):
"""Code object."""
_id = 0
UNKNOWN = 0
V8INTERNAL = 1
FULL_CODEGEN = 2
OPTIMIZED = 3
def __init__(self, name, start_address, end_address, origin, origin_offset):
self.id = Code._id
Code._id += 1
self.name = name
self.other_names = None
self.start_address = start_address
self.end_address = end_address
self.origin = origin
self.origin_offset = origin_offset
self.self_ticks = 0
self.self_ticks_map = None
self.callee_ticks = None
if name.startswith("LazyCompile:*"):
self.codetype = Code.OPTIMIZED
elif name.startswith("LazyCompile:"):
self.codetype = Code.FULL_CODEGEN
elif name.startswith("v8::internal::"):
self.codetype = Code.V8INTERNAL
else:
self.codetype = Code.UNKNOWN
def AddName(self, name):
assert self.name != name
if self.other_names is None:
self.other_names = [name]
return
if not name in self.other_names:
self.other_names.append(name)
def FullName(self):
if self.other_names is None:
return self.name
self.other_names.sort()
return "%s (aka %s)" % (self.name, ", ".join(self.other_names))
def IsUsed(self):
return self.self_ticks > 0 or self.callee_ticks is not None
def Tick(self, pc):
self.self_ticks += 1
if self.self_ticks_map is None:
self.self_ticks_map = collections.defaultdict(lambda: 0)
offset = pc - self.start_address
self.self_ticks_map[offset] += 1
def CalleeTick(self, callee):
if self.callee_ticks is None:
self.callee_ticks = collections.defaultdict(lambda: 0)
self.callee_ticks[callee] += 1
def PrintAnnotated(self, arch, options):
if self.self_ticks_map is None:
ticks_map = []
else:
ticks_map = self.self_ticks_map.items()
# Convert the ticks map to offsets and counts arrays so that later
# we can do binary search in the offsets array.
ticks_map.sort(key=lambda t: t[0])
ticks_offsets = [t[0] for t in ticks_map]
ticks_counts = [t[1] for t in ticks_map]
# Get a list of disassembled lines and their addresses.
lines = self._GetDisasmLines(arch, options)
if len(lines) == 0:
return
# Print annotated lines.
address = lines[0][0]
total_count = 0
for i in xrange(len(lines)):
start_offset = lines[i][0] - address
if i == len(lines) - 1:
end_offset = self.end_address - self.start_address
else:
end_offset = lines[i + 1][0] - address
# Ticks (reported pc values) are not always precise, i.e. not
# necessarily point at instruction starts. So we have to search
# for ticks that touch the current instruction line.
j = bisect.bisect_left(ticks_offsets, end_offset)
count = 0
for offset, cnt in reversed(zip(ticks_offsets[:j], ticks_counts[:j])):
if offset < start_offset:
break
count += cnt
total_count += count
count = 100.0 * count / self.self_ticks
if count >= 0.01:
print "%15.2f %x: %s" % (count, lines[i][0], lines[i][1])
else:
print "%s %x: %s" % (" " * 15, lines[i][0], lines[i][1])
print
assert total_count == self.self_ticks, \
"Lost ticks (%d != %d) in %s" % (total_count, self.self_ticks, self)
def __str__(self):
return "%s [0x%x, 0x%x) size: %d origin: %s" % (
self.name,
self.start_address,
self.end_address,
self.end_address - self.start_address,
self.origin)
def _GetDisasmLines(self, arch, options):
if self.origin == JS_ORIGIN or self.origin == JS_SNAPSHOT_ORIGIN:
inplace = False
filename = options.log + ".ll"
else:
inplace = True
filename = self.origin
return disasm.GetDisasmLines(filename,
self.origin_offset,
self.end_address - self.start_address,
arch,
inplace)
class CodePage(object):
"""Group of adjacent code objects."""
SHIFT = 20 # 1M pages
SIZE = (1 << SHIFT)
MASK = ~(SIZE - 1)
@staticmethod
def PageAddress(address):
return address & CodePage.MASK
@staticmethod
def PageId(address):
return address >> CodePage.SHIFT
@staticmethod
def PageAddressFromId(id):
return id << CodePage.SHIFT
def __init__(self, address):
self.address = address
self.code_objects = []
def Add(self, code):
self.code_objects.append(code)
def Remove(self, code):
self.code_objects.remove(code)
def Find(self, pc):
code_objects = self.code_objects
for i, code in enumerate(code_objects):
if code.start_address <= pc < code.end_address:
code_objects[0], code_objects[i] = code, code_objects[0]
return code
return None
def __iter__(self):
return self.code_objects.__iter__()
class CodeMap(object):
"""Code object map."""
def __init__(self):
self.pages = {}
self.min_address = 1 << 64
self.max_address = -1
def Add(self, code, max_pages=-1):
page_id = CodePage.PageId(code.start_address)
limit_id = CodePage.PageId(code.end_address + CodePage.SIZE - 1)
pages = 0
while page_id < limit_id:
if max_pages >= 0 and pages > max_pages:
print >>sys.stderr, \
"Warning: page limit (%d) reached for %s [%s]" % (
max_pages, code.name, code.origin)
break
if page_id in self.pages:
page = self.pages[page_id]
else:
page = CodePage(CodePage.PageAddressFromId(page_id))
self.pages[page_id] = page
page.Add(code)
page_id += 1
pages += 1
self.min_address = min(self.min_address, code.start_address)
self.max_address = max(self.max_address, code.end_address)
def Remove(self, code):
page_id = CodePage.PageId(code.start_address)
limit_id = CodePage.PageId(code.end_address + CodePage.SIZE - 1)
removed = False
while page_id < limit_id:
if page_id not in self.pages:
page_id += 1
continue
page = self.pages[page_id]
page.Remove(code)
removed = True
page_id += 1
return removed
def AllCode(self):
for page in self.pages.itervalues():
for code in page:
if CodePage.PageAddress(code.start_address) == page.address:
yield code
def UsedCode(self):
for code in self.AllCode():
if code.IsUsed():
yield code
def Print(self):
for code in self.AllCode():
print code
def Find(self, pc):
if pc < self.min_address or pc >= self.max_address:
return None
page_id = CodePage.PageId(pc)
if page_id not in self.pages:
return None
return self.pages[page_id].Find(pc)
class CodeInfo(object):
"""Generic info about generated code objects."""
def __init__(self, arch, header_size):
self.arch = arch
self.header_size = header_size
class SnapshotLogReader(object):
"""V8 snapshot log reader."""
_SNAPSHOT_CODE_NAME_RE = re.compile(
r"snapshot-code-name,(\d+),\"(.*)\"")
def __init__(self, log_name):
self.log_name = log_name
def ReadNameMap(self):
log = open(self.log_name, "r")
try:
snapshot_pos_to_name = {}
for line in log:
match = SnapshotLogReader._SNAPSHOT_CODE_NAME_RE.match(line)
if match:
pos = int(match.group(1))
name = match.group(2)
snapshot_pos_to_name[pos] = name
finally:
log.close()
return snapshot_pos_to_name
class LogReader(object):
"""V8 low-level (binary) log reader."""
_ARCH_TO_POINTER_TYPE_MAP = {
"ia32": ctypes.c_uint32,
"arm": ctypes.c_uint32,
"mips": ctypes.c_uint32,
"x64": ctypes.c_uint64,
"arm64": ctypes.c_uint64
}
_CODE_CREATE_TAG = "C"
_CODE_MOVE_TAG = "M"
_CODE_DELETE_TAG = "D"
_SNAPSHOT_POSITION_TAG = "P"
_CODE_MOVING_GC_TAG = "G"
def __init__(self, log_name, code_map, snapshot_pos_to_name):
self.log_file = open(log_name, "r")
self.log = mmap.mmap(self.log_file.fileno(), 0, mmap.MAP_PRIVATE)
self.log_pos = 0
self.code_map = code_map
self.snapshot_pos_to_name = snapshot_pos_to_name
self.address_to_snapshot_name = {}
self.arch = self.log[:self.log.find("\0")]
self.log_pos += len(self.arch) + 1
assert self.arch in LogReader._ARCH_TO_POINTER_TYPE_MAP, \
"Unsupported architecture %s" % self.arch
pointer_type = LogReader._ARCH_TO_POINTER_TYPE_MAP[self.arch]
self.code_create_struct = LogReader._DefineStruct([
("name_size", ctypes.c_int32),
("code_address", pointer_type),
("code_size", ctypes.c_int32)])
self.code_move_struct = LogReader._DefineStruct([
("from_address", pointer_type),
("to_address", pointer_type)])
self.code_delete_struct = LogReader._DefineStruct([
("address", pointer_type)])
self.snapshot_position_struct = LogReader._DefineStruct([
("address", pointer_type),
("position", ctypes.c_int32)])
def ReadUpToGC(self):
while self.log_pos < self.log.size():
tag = self.log[self.log_pos]
self.log_pos += 1
if tag == LogReader._CODE_MOVING_GC_TAG:
self.address_to_snapshot_name.clear()
return
if tag == LogReader._CODE_CREATE_TAG:
event = self.code_create_struct.from_buffer(self.log, self.log_pos)
self.log_pos += ctypes.sizeof(event)
start_address = event.code_address
end_address = start_address + event.code_size
if start_address in self.address_to_snapshot_name:
name = self.address_to_snapshot_name[start_address]
origin = JS_SNAPSHOT_ORIGIN
else:
name = self.log[self.log_pos:self.log_pos + event.name_size]
origin = JS_ORIGIN
self.log_pos += event.name_size
origin_offset = self.log_pos
self.log_pos += event.code_size
code = Code(name, start_address, end_address, origin, origin_offset)
conficting_code = self.code_map.Find(start_address)
if conficting_code:
if not (conficting_code.start_address == code.start_address and
conficting_code.end_address == code.end_address):
self.code_map.Remove(conficting_code)
else:
LogReader._HandleCodeConflict(conficting_code, code)
# TODO(vitalyr): this warning is too noisy because of our
# attempts to reconstruct code log from the snapshot.
# print >>sys.stderr, \
# "Warning: Skipping duplicate code log entry %s" % code
continue
self.code_map.Add(code)
continue
if tag == LogReader._CODE_MOVE_TAG:
event = self.code_move_struct.from_buffer(self.log, self.log_pos)
self.log_pos += ctypes.sizeof(event)
old_start_address = event.from_address
new_start_address = event.to_address
if old_start_address == new_start_address:
# Skip useless code move entries.
continue
code = self.code_map.Find(old_start_address)
if not code:
print >>sys.stderr, "Warning: Not found %x" % old_start_address
continue
assert code.start_address == old_start_address, \
"Inexact move address %x for %s" % (old_start_address, code)
self.code_map.Remove(code)
size = code.end_address - code.start_address
code.start_address = new_start_address
code.end_address = new_start_address + size
self.code_map.Add(code)
continue
if tag == LogReader._CODE_DELETE_TAG:
event = self.code_delete_struct.from_buffer(self.log, self.log_pos)
self.log_pos += ctypes.sizeof(event)
old_start_address = event.address
code = self.code_map.Find(old_start_address)
if not code:
print >>sys.stderr, "Warning: Not found %x" % old_start_address
continue
assert code.start_address == old_start_address, \
"Inexact delete address %x for %s" % (old_start_address, code)
self.code_map.Remove(code)
continue
if tag == LogReader._SNAPSHOT_POSITION_TAG:
event = self.snapshot_position_struct.from_buffer(self.log,
self.log_pos)
self.log_pos += ctypes.sizeof(event)
start_address = event.address
snapshot_pos = event.position
if snapshot_pos in self.snapshot_pos_to_name:
self.address_to_snapshot_name[start_address] = \
self.snapshot_pos_to_name[snapshot_pos]
continue
assert False, "Unknown tag %s" % tag
def Dispose(self):
self.log.close()
self.log_file.close()
@staticmethod
def _DefineStruct(fields):
class Struct(ctypes.Structure):
_fields_ = fields
return Struct
@staticmethod
def _HandleCodeConflict(old_code, new_code):
assert (old_code.start_address == new_code.start_address and
old_code.end_address == new_code.end_address), \
"Conficting code log entries %s and %s" % (old_code, new_code)
if old_code.name == new_code.name:
return
# Code object may be shared by a few functions. Collect the full
# set of names.
old_code.AddName(new_code.name)
class Descriptor(object):
"""Descriptor of a structure in the binary trace log."""
CTYPE_MAP = {
"u16": ctypes.c_uint16,
"u32": ctypes.c_uint32,
"u64": ctypes.c_uint64
}
def __init__(self, fields):
class TraceItem(ctypes.Structure):
_fields_ = Descriptor.CtypesFields(fields)
def __str__(self):
return ", ".join("%s: %s" % (field, self.__getattribute__(field))
for field, _ in TraceItem._fields_)
self.ctype = TraceItem
def Read(self, trace, offset):
return self.ctype.from_buffer(trace, offset)
@staticmethod
def CtypesFields(fields):
return [(field, Descriptor.CTYPE_MAP[format]) for (field, format) in fields]
# Please see http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git;a=tree;f=tools/perf
# for the gory details.
# Reference: struct perf_file_header in kernel/tools/perf/util/header.h
TRACE_HEADER_DESC = Descriptor([
("magic", "u64"),
("size", "u64"),
("attr_size", "u64"),
("attrs_offset", "u64"),
("attrs_size", "u64"),
("data_offset", "u64"),
("data_size", "u64"),
("event_types_offset", "u64"),
("event_types_size", "u64")
])
# Reference: /usr/include/linux/perf_event.h
PERF_EVENT_ATTR_DESC = Descriptor([
("type", "u32"),
("size", "u32"),
("config", "u64"),
("sample_period_or_freq", "u64"),
("sample_type", "u64"),
("read_format", "u64"),
("flags", "u64"),
("wakeup_events_or_watermark", "u32"),
("bp_type", "u32"),
("bp_addr", "u64"),
("bp_len", "u64")
])
# Reference: /usr/include/linux/perf_event.h
PERF_EVENT_HEADER_DESC = Descriptor([
("type", "u32"),
("misc", "u16"),
("size", "u16")
])
# Reference: kernel/events/core.c
PERF_MMAP_EVENT_BODY_DESC = Descriptor([
("pid", "u32"),
("tid", "u32"),
("addr", "u64"),
("len", "u64"),
("pgoff", "u64")
])
# perf_event_attr.sample_type bits control the set of
# perf_sample_event fields.
PERF_SAMPLE_IP = 1 << 0
PERF_SAMPLE_TID = 1 << 1
PERF_SAMPLE_TIME = 1 << 2
PERF_SAMPLE_ADDR = 1 << 3
PERF_SAMPLE_READ = 1 << 4
PERF_SAMPLE_CALLCHAIN = 1 << 5
PERF_SAMPLE_ID = 1 << 6
PERF_SAMPLE_CPU = 1 << 7
PERF_SAMPLE_PERIOD = 1 << 8
PERF_SAMPLE_STREAM_ID = 1 << 9
PERF_SAMPLE_RAW = 1 << 10
# Reference: /usr/include/perf_event.h, the comment for PERF_RECORD_SAMPLE.
PERF_SAMPLE_EVENT_BODY_FIELDS = [
("ip", "u64", PERF_SAMPLE_IP),
("pid", "u32", PERF_SAMPLE_TID),
("tid", "u32", PERF_SAMPLE_TID),
("time", "u64", PERF_SAMPLE_TIME),
("addr", "u64", PERF_SAMPLE_ADDR),
("id", "u64", PERF_SAMPLE_ID),
("stream_id", "u64", PERF_SAMPLE_STREAM_ID),
("cpu", "u32", PERF_SAMPLE_CPU),
("res", "u32", PERF_SAMPLE_CPU),
("period", "u64", PERF_SAMPLE_PERIOD),
# Don't want to handle read format that comes after the period and
# before the callchain and has variable size.
("nr", "u64", PERF_SAMPLE_CALLCHAIN)
# Raw data follows the callchain and is ignored.
]
PERF_SAMPLE_EVENT_IP_FORMAT = "u64"
PERF_RECORD_MMAP = 1
PERF_RECORD_SAMPLE = 9
class TraceReader(object):
"""Perf (linux-2.6/tools/perf) trace file reader."""
_TRACE_HEADER_MAGIC = 4993446653023372624
def __init__(self, trace_name):
self.trace_file = open(trace_name, "r")
self.trace = mmap.mmap(self.trace_file.fileno(), 0, mmap.MAP_PRIVATE)
self.trace_header = TRACE_HEADER_DESC.Read(self.trace, 0)
if self.trace_header.magic != TraceReader._TRACE_HEADER_MAGIC:
print >>sys.stderr, "Warning: unsupported trace header magic"
self.offset = self.trace_header.data_offset
self.limit = self.trace_header.data_offset + self.trace_header.data_size
assert self.limit <= self.trace.size(), \
"Trace data limit exceeds trace file size"
self.header_size = ctypes.sizeof(PERF_EVENT_HEADER_DESC.ctype)
assert self.trace_header.attrs_size != 0, \
"No perf event attributes found in the trace"
perf_event_attr = PERF_EVENT_ATTR_DESC.Read(self.trace,
self.trace_header.attrs_offset)
self.sample_event_body_desc = self._SampleEventBodyDesc(
perf_event_attr.sample_type)
self.callchain_supported = \
(perf_event_attr.sample_type & PERF_SAMPLE_CALLCHAIN) != 0
if self.callchain_supported:
self.ip_struct = Descriptor.CTYPE_MAP[PERF_SAMPLE_EVENT_IP_FORMAT]
self.ip_size = ctypes.sizeof(self.ip_struct)
def ReadEventHeader(self):
if self.offset >= self.limit:
return None, 0
offset = self.offset
header = PERF_EVENT_HEADER_DESC.Read(self.trace, self.offset)
self.offset += header.size
return header, offset
def ReadMmap(self, header, offset):
mmap_info = PERF_MMAP_EVENT_BODY_DESC.Read(self.trace,
offset + self.header_size)
# Read null-terminated filename.
filename = self.trace[offset + self.header_size + ctypes.sizeof(mmap_info):
offset + header.size]
mmap_info.filename = HOST_ROOT + filename[:filename.find(chr(0))]
return mmap_info
def ReadSample(self, header, offset):
sample = self.sample_event_body_desc.Read(self.trace,
offset + self.header_size)
if not self.callchain_supported:
return sample
sample.ips = []
offset += self.header_size + ctypes.sizeof(sample)
for _ in xrange(sample.nr):
sample.ips.append(
self.ip_struct.from_buffer(self.trace, offset).value)
offset += self.ip_size
return sample
def Dispose(self):
self.trace.close()
self.trace_file.close()
def _SampleEventBodyDesc(self, sample_type):
assert (sample_type & PERF_SAMPLE_READ) == 0, \
"Can't hande read format in samples"
fields = [(field, format)
for (field, format, bit) in PERF_SAMPLE_EVENT_BODY_FIELDS
if (bit & sample_type) != 0]
return Descriptor(fields)
OBJDUMP_SECTION_HEADER_RE = re.compile(
r"^\s*\d+\s(\.\S+)\s+[a-f0-9]")
OBJDUMP_SYMBOL_LINE_RE = re.compile(
r"^([a-f0-9]+)\s(.{7})\s(\S+)\s+([a-f0-9]+)\s+(?:\.hidden\s+)?(.*)$")
OBJDUMP_DYNAMIC_SYMBOLS_START_RE = re.compile(
r"^DYNAMIC SYMBOL TABLE")
OBJDUMP_SKIP_RE = re.compile(
r"^.*ld\.so\.cache$")
KERNEL_ALLSYMS_FILE = "/proc/kallsyms"
PERF_KERNEL_ALLSYMS_RE = re.compile(
r".*kallsyms.*")
KERNEL_ALLSYMS_LINE_RE = re.compile(
r"^([a-f0-9]+)\s(?:t|T)\s(\S+)$")
class LibraryRepo(object):
def __init__(self):
self.infos = []
self.names = set()
self.ticks = {}
def Load(self, mmap_info, code_map, options):
# Skip kernel mmaps when requested using the fact that their tid
# is 0.
if mmap_info.tid == 0 and not options.kernel:
return True
if OBJDUMP_SKIP_RE.match(mmap_info.filename):
return True
if PERF_KERNEL_ALLSYMS_RE.match(mmap_info.filename):
return self._LoadKernelSymbols(code_map)
self.infos.append(mmap_info)
mmap_info.ticks = 0
mmap_info.unique_name = self._UniqueMmapName(mmap_info)
if not os.path.exists(mmap_info.filename):
return True
# Request section headers (-h), symbols (-t), and dynamic symbols
# (-T) from objdump.
# Unfortunately, section headers span two lines, so we have to
# keep the just seen section name (from the first line in each
# section header) in the after_section variable.
if mmap_info.filename.endswith(".ko"):
dynamic_symbols = ""
else:
dynamic_symbols = "-T"
process = subprocess.Popen(
"%s -h -t %s -C %s" % (OBJDUMP_BIN, dynamic_symbols, mmap_info.filename),
shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
pipe = process.stdout
after_section = None
code_sections = set()
reloc_sections = set()
dynamic = False
try:
for line in pipe:
if after_section:
if line.find("CODE") != -1:
code_sections.add(after_section)
if line.find("RELOC") != -1:
reloc_sections.add(after_section)
after_section = None
continue
match = OBJDUMP_SECTION_HEADER_RE.match(line)
if match:
after_section = match.group(1)
continue
if OBJDUMP_DYNAMIC_SYMBOLS_START_RE.match(line):
dynamic = True
continue
match = OBJDUMP_SYMBOL_LINE_RE.match(line)
if match:
start_address = int(match.group(1), 16)
origin_offset = start_address
flags = match.group(2)
section = match.group(3)
if section in code_sections:
if dynamic or section in reloc_sections:
start_address += mmap_info.addr
size = int(match.group(4), 16)
name = match.group(5)
origin = mmap_info.filename
code_map.Add(Code(name, start_address, start_address + size,
origin, origin_offset))
finally:
pipe.close()
assert process.wait() == 0, "Failed to objdump %s" % mmap_info.filename
def Tick(self, pc):
for i, mmap_info in enumerate(self.infos):
if mmap_info.addr <= pc < (mmap_info.addr + mmap_info.len):
mmap_info.ticks += 1
self.infos[0], self.infos[i] = mmap_info, self.infos[0]
return True
return False
def _UniqueMmapName(self, mmap_info):
name = mmap_info.filename
index = 1
while name in self.names:
name = "%s-%d" % (mmap_info.filename, index)
index += 1
self.names.add(name)
return name
def _LoadKernelSymbols(self, code_map):
if not os.path.exists(KERNEL_ALLSYMS_FILE):
print >>sys.stderr, "Warning: %s not found" % KERNEL_ALLSYMS_FILE
return False
kallsyms = open(KERNEL_ALLSYMS_FILE, "r")
code = None
for line in kallsyms:
match = KERNEL_ALLSYMS_LINE_RE.match(line)
if match:
start_address = int(match.group(1), 16)
end_address = start_address
name = match.group(2)
if code:
code.end_address = start_address
code_map.Add(code, 16)
code = Code(name, start_address, end_address, "kernel", 0)
return True
def PrintReport(code_map, library_repo, arch, ticks, options):
print "Ticks per symbol:"
used_code = [code for code in code_map.UsedCode()]
used_code.sort(key=lambda x: x.self_ticks, reverse=True)
for i, code in enumerate(used_code):
code_ticks = code.self_ticks
print "%10d %5.1f%% %s [%s]" % (code_ticks, 100. * code_ticks / ticks,
code.FullName(), code.origin)
if options.disasm_all or i < options.disasm_top:
code.PrintAnnotated(arch, options)
print
print "Ticks per library:"
mmap_infos = [m for m in library_repo.infos if m.ticks > 0]
mmap_infos.sort(key=lambda m: m.ticks, reverse=True)
for mmap_info in mmap_infos:
mmap_ticks = mmap_info.ticks
print "%10d %5.1f%% %s" % (mmap_ticks, 100. * mmap_ticks / ticks,
mmap_info.unique_name)
def PrintDot(code_map, options):
print "digraph G {"
for code in code_map.UsedCode():
if code.self_ticks < 10:
continue
print "n%d [shape=box,label=\"%s\"];" % (code.id, code.name)
if code.callee_ticks:
for callee, ticks in code.callee_ticks.iteritems():
print "n%d -> n%d [label=\"%d\"];" % (code.id, callee.id, ticks)
print "}"
if __name__ == "__main__":
parser = optparse.OptionParser(USAGE)
parser.add_option("--snapshot-log",
default="obj/release/snapshot.log",
help="V8 snapshot log file name [default: %default]")
parser.add_option("--log",
default="v8.log",
help="V8 log file name [default: %default]")
parser.add_option("--snapshot",
default=False,
action="store_true",
help="process V8 snapshot log [default: %default]")
parser.add_option("--trace",
default="perf.data",
help="perf trace file name [default: %default]")
parser.add_option("--kernel",
default=False,
action="store_true",
help="process kernel entries [default: %default]")
parser.add_option("--disasm-top",
default=0,
type="int",
help=("number of top symbols to disassemble and annotate "
"[default: %default]"))
parser.add_option("--disasm-all",
default=False,
action="store_true",
help=("disassemble and annotate all used symbols "
"[default: %default]"))
parser.add_option("--dot",
default=False,
action="store_true",
help="produce dot output (WIP) [default: %default]")
parser.add_option("--quiet", "-q",
default=False,
action="store_true",
help="no auxiliary messages [default: %default]")
parser.add_option("--gc-fake-mmap",
default="/tmp/__v8_gc__",
help="gc fake mmap file [default: %default]")
parser.add_option("--objdump",
default="/usr/bin/objdump",
help="objdump tool to use [default: %default]")
parser.add_option("--host-root",
default="",
help="Path to the host root [default: %default]")
options, args = parser.parse_args()
if not options.quiet:
if options.snapshot:
print "V8 logs: %s, %s, %s.ll" % (options.snapshot_log,
options.log,
options.log)
else:
print "V8 log: %s, %s.ll (no snapshot)" % (options.log, options.log)
print "Perf trace file: %s" % options.trace
V8_GC_FAKE_MMAP = options.gc_fake_mmap
HOST_ROOT = options.host_root
if os.path.exists(options.objdump):
disasm.OBJDUMP_BIN = options.objdump
OBJDUMP_BIN = options.objdump
else:
print "Cannot find %s, falling back to default objdump" % options.objdump
# Stats.
events = 0
ticks = 0
missed_ticks = 0
really_missed_ticks = 0
optimized_ticks = 0
generated_ticks = 0
v8_internal_ticks = 0
mmap_time = 0
sample_time = 0
# Process the snapshot log to fill the snapshot name map.
snapshot_name_map = {}
if options.snapshot:
snapshot_log_reader = SnapshotLogReader(log_name=options.snapshot_log)
snapshot_name_map = snapshot_log_reader.ReadNameMap()
# Initialize the log reader.
code_map = CodeMap()
log_reader = LogReader(log_name=options.log + ".ll",
code_map=code_map,
snapshot_pos_to_name=snapshot_name_map)
if not options.quiet:
print "Generated code architecture: %s" % log_reader.arch
print
sys.stdout.flush()
# Process the code and trace logs.
library_repo = LibraryRepo()
log_reader.ReadUpToGC()
trace_reader = TraceReader(options.trace)
while True:
header, offset = trace_reader.ReadEventHeader()
if not header:
break
events += 1
if header.type == PERF_RECORD_MMAP:
start = time.time()
mmap_info = trace_reader.ReadMmap(header, offset)
if mmap_info.filename == HOST_ROOT + V8_GC_FAKE_MMAP:
log_reader.ReadUpToGC()
else:
library_repo.Load(mmap_info, code_map, options)
mmap_time += time.time() - start
elif header.type == PERF_RECORD_SAMPLE:
ticks += 1
start = time.time()
sample = trace_reader.ReadSample(header, offset)
code = code_map.Find(sample.ip)
if code:
code.Tick(sample.ip)
if code.codetype == Code.OPTIMIZED:
optimized_ticks += 1
elif code.codetype == Code.FULL_CODEGEN:
generated_ticks += 1
elif code.codetype == Code.V8INTERNAL:
v8_internal_ticks += 1
else:
missed_ticks += 1
if not library_repo.Tick(sample.ip) and not code:
really_missed_ticks += 1
if trace_reader.callchain_supported:
for ip in sample.ips:
caller_code = code_map.Find(ip)
if caller_code:
if code:
caller_code.CalleeTick(code)
code = caller_code
sample_time += time.time() - start
if options.dot:
PrintDot(code_map, options)
else:
PrintReport(code_map, library_repo, log_reader.arch, ticks, options)
if not options.quiet:
def PrintTicks(number, total, description):
print("%10d %5.1f%% ticks in %s" %
(number, 100.0*number/total, description))
print
print "Stats:"
print "%10d total trace events" % events
print "%10d total ticks" % ticks
print "%10d ticks not in symbols" % missed_ticks
unaccounted = "unaccounted ticks"
if really_missed_ticks > 0:
unaccounted += " (probably in the kernel, try --kernel)"
PrintTicks(really_missed_ticks, ticks, unaccounted)
PrintTicks(optimized_ticks, ticks, "ticks in optimized code")
PrintTicks(generated_ticks, ticks, "ticks in other lazily compiled code")
PrintTicks(v8_internal_ticks, ticks, "ticks in v8::internal::*")
print "%10d total symbols" % len([c for c in code_map.AllCode()])
print "%10d used symbols" % len([c for c in code_map.UsedCode()])
print "%9.2fs library processing time" % mmap_time
print "%9.2fs tick processing time" % sample_time
log_reader.Dispose()
trace_reader.Dispose()
|
data_collection/gazette/spiders/sc_curitibanos.py
|
kaiocp/querido-diario
| 454 |
74951
|
from gazette.spiders.base.fecam import FecamGazetteSpider
class ScCuritibanosSpider(FecamGazetteSpider):
name = "sc_curitibanos"
FECAM_QUERY = "cod_entidade:82"
TERRITORY_ID = "4204806"
|
desktop/core/ext-py/nose-1.3.7/functional_tests/support/package3/src/b.py
|
kokosing/hue
| 5,079 |
74964
|
def b():
pass
|
ple/games/pong.py
|
ArnthorDadi/FlappyBirdAI
| 959 |
74978
|
<gh_stars>100-1000
import math
import sys
import pygame
from pygame.constants import K_w, K_s
from ple.games.utils.vec2d import vec2d
from ple.games.utils import percent_round_int
#import base
from ple.games.base.pygamewrapper import PyGameWrapper
class Ball(pygame.sprite.Sprite):
def __init__(self, radius, speed, rng,
pos_init, SCREEN_WIDTH, SCREEN_HEIGHT):
pygame.sprite.Sprite.__init__(self)
self.rng = rng
self.radius = radius
self.speed = speed
self.pos = vec2d(pos_init)
self.pos_before = vec2d(pos_init)
self.vel = vec2d((speed, -1.0 * speed))
self.SCREEN_HEIGHT = SCREEN_HEIGHT
self.SCREEN_WIDTH = SCREEN_WIDTH
image = pygame.Surface((radius * 2, radius * 2))
image.fill((0, 0, 0, 0))
image.set_colorkey((0, 0, 0))
pygame.draw.circle(
image,
(255, 255, 255),
(radius, radius),
radius,
0
)
self.image = image
self.rect = self.image.get_rect()
self.rect.center = pos_init
def line_intersection(self, p0_x, p0_y, p1_x, p1_y, p2_x, p2_y, p3_x, p3_y):
s1_x = p1_x - p0_x
s1_y = p1_y - p0_y
s2_x = p3_x - p2_x
s2_y = p3_y - p2_y
s = (-s1_y * (p0_x - p2_x) + s1_x * (p0_y - p2_y)) / (-s2_x * s1_y + s1_x * s2_y)
t = (s2_x * (p0_y - p2_y) - s2_y * (p0_x - p2_x)) / (-s2_x * s1_y + s1_x * s2_y)
return (s >= 0 and s <= 1 and t >= 0 and t <= 1)
def update(self, agentPlayer, cpuPlayer, dt):
self.pos.x += self.vel.x * dt
self.pos.y += self.vel.y * dt
is_pad_hit = False
if self.pos.x <= agentPlayer.pos.x + agentPlayer.rect_width:
if self.line_intersection(self.pos_before.x, self.pos_before.y, self.pos.x, self.pos.y, agentPlayer.pos.x + agentPlayer.rect_width / 2, agentPlayer.pos.y - agentPlayer.rect_height / 2, agentPlayer.pos.x + agentPlayer.rect_width / 2, agentPlayer.pos.y + agentPlayer.rect_height / 2):
self.pos.x = max(0, self.pos.x)
self.vel.x = -1 * (self.vel.x + self.speed * 0.05)
self.vel.y += agentPlayer.vel.y * 2.0
self.pos.x += self.radius
is_pad_hit = True
if self.pos.x >= cpuPlayer.pos.x - cpuPlayer.rect_width:
if self.line_intersection(self.pos_before.x, self.pos_before.y, self.pos.x, self.pos.y, cpuPlayer.pos.x - cpuPlayer.rect_width / 2, cpuPlayer.pos.y - cpuPlayer.rect_height / 2, cpuPlayer.pos.x - cpuPlayer.rect_width / 2, cpuPlayer.pos.y + cpuPlayer.rect_height / 2):
self.pos.x = min(self.SCREEN_WIDTH, self.pos.x)
self.vel.x = -1 * (self.vel.x + self.speed * 0.05)
self.vel.y += cpuPlayer.vel.y * 0.006
self.pos.x -= self.radius
is_pad_hit = True
# Little randomness in order not to stuck in a static loop
if is_pad_hit:
self.vel.y += self.rng.random_sample() * 0.001 - 0.0005
if self.pos.y - self.radius <= 0:
self.vel.y *= -0.99
self.pos.y += 1.0
if self.pos.y + self.radius >= self.SCREEN_HEIGHT:
self.vel.y *= -0.99
self.pos.y -= 1.0
self.pos_before.x = self.pos.x
self.pos_before.y = self.pos.y
self.rect.center = (self.pos.x, self.pos.y)
class Player(pygame.sprite.Sprite):
def __init__(self, speed, rect_width, rect_height,
pos_init, SCREEN_WIDTH, SCREEN_HEIGHT):
pygame.sprite.Sprite.__init__(self)
self.speed = speed
self.pos = vec2d(pos_init)
self.vel = vec2d((0, 0))
self.rect_height = rect_height
self.rect_width = rect_width
self.SCREEN_HEIGHT = SCREEN_HEIGHT
self.SCREEN_WIDTH = SCREEN_WIDTH
image = pygame.Surface((rect_width, rect_height))
image.fill((0, 0, 0, 0))
image.set_colorkey((0, 0, 0))
pygame.draw.rect(
image,
(255, 255, 255),
(0, 0, rect_width, rect_height),
0
)
self.image = image
self.rect = self.image.get_rect()
self.rect.center = pos_init
def update(self, dy, dt):
self.vel.y += dy * dt
self.vel.y *= 0.9
self.pos.y += self.vel.y
if self.pos.y - self.rect_height / 2 <= 0:
self.pos.y = self.rect_height / 2
self.vel.y = 0.0
if self.pos.y + self.rect_height / 2 >= self.SCREEN_HEIGHT:
self.pos.y = self.SCREEN_HEIGHT - self.rect_height / 2
self.vel.y = 0.0
self.rect.center = (self.pos.x, self.pos.y)
def updateCpu(self, ball, dt):
dy = 0.0
if ball.vel.x >= 0 and ball.pos.x >= self.SCREEN_WIDTH / 2:
dy = self.speed
if self.pos.y > ball.pos.y:
dy = -1.0 * dy
else:
dy = 1.0 * self.speed / 4.0
if self.pos.y > self.SCREEN_HEIGHT / 2.0:
dy = -1.0 * self.speed / 4.0
if self.pos.y - self.rect_height / 2 <= 0:
self.pos.y = self.rect_height / 2
self.vel.y = 0.0
if self.pos.y + self.rect_height / 2 >= self.SCREEN_HEIGHT:
self.pos.y = self.SCREEN_HEIGHT - self.rect_height / 2
self.vel.y = 0.0
self.pos.y += dy * dt
self.rect.center = (self.pos.x, self.pos.y)
class Pong(PyGameWrapper):
"""
Loosely based on code from marti1125's `pong game`_.
.. _pong game: https://github.com/marti1125/pong/
Parameters
----------
width : int
Screen width.
height : int
Screen height, recommended to be same dimension as width.
MAX_SCORE : int (default: 11)
The max number of points the agent or cpu need to score to cause a terminal state.
cpu_speed_ratio: float (default: 0.5)
Speed of opponent (useful for curriculum learning)
players_speed_ratio: float (default: 0.25)
Speed of player (useful for curriculum learning)
ball_speed_ratio: float (default: 0.75)
Speed of ball (useful for curriculum learning)
"""
def __init__(self, width=64, height=48, cpu_speed_ratio=0.6, players_speed_ratio = 0.4, ball_speed_ratio=0.75, MAX_SCORE=11):
actions = {
"up": K_w,
"down": K_s
}
PyGameWrapper.__init__(self, width, height, actions=actions)
# the %'s come from original values, wanted to keep same ratio when you
# increase the resolution.
self.ball_radius = percent_round_int(height, 0.03)
self.cpu_speed_ratio = cpu_speed_ratio
self.ball_speed_ratio = ball_speed_ratio
self.players_speed_ratio = players_speed_ratio
self.paddle_width = percent_round_int(width, 0.023)
self.paddle_height = percent_round_int(height, 0.15)
self.paddle_dist_to_wall = percent_round_int(width, 0.0625)
self.MAX_SCORE = MAX_SCORE
self.dy = 0.0
self.score_sum = 0.0 # need to deal with 11 on either side winning
self.score_counts = {
"agent": 0.0,
"cpu": 0.0
}
def _handle_player_events(self):
self.dy = 0
if __name__ == "__main__":
# for debugging mode
pygame.event.get()
keys = pygame.key.get_pressed()
if keys[self.actions['up']]:
self.dy = -self.agentPlayer.speed
elif keys[self.actions['down']]:
self.dy = self.agentPlayer.speed
if keys[pygame.QUIT]:
pygame.quit()
sys.exit()
pygame.event.pump()
else:
# consume events from act
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
key = event.key
if key == self.actions['up']:
self.dy = -self.agentPlayer.speed
if key == self.actions['down']:
self.dy = self.agentPlayer.speed
def getGameState(self):
"""
Gets a non-visual state representation of the game.
Returns
-------
dict
* player y position.
* players velocity.
* cpu y position.
* ball x position.
* ball y position.
* ball x velocity.
* ball y velocity.
See code for structure.
"""
state = {
"player_y": self.agentPlayer.pos.y,
"player_velocity": self.agentPlayer.vel.y,
"cpu_y": self.cpuPlayer.pos.y,
"ball_x": self.ball.pos.x,
"ball_y": self.ball.pos.y,
"ball_velocity_x": self.ball.vel.x,
"ball_velocity_y": self.ball.vel.y
}
return state
def getScore(self):
return self.score_sum
def game_over(self):
# pong used 11 as max score
return (self.score_counts['agent'] == self.MAX_SCORE) or (
self.score_counts['cpu'] == self.MAX_SCORE)
def init(self):
self.score_counts = {
"agent": 0.0,
"cpu": 0.0
}
self.score_sum = 0.0
self.ball = Ball(
self.ball_radius,
self.ball_speed_ratio * self.height,
self.rng,
(self.width / 2, self.height / 2),
self.width,
self.height
)
self.agentPlayer = Player(
self.players_speed_ratio * self.height,
self.paddle_width,
self.paddle_height,
(self.paddle_dist_to_wall, self.height / 2),
self.width,
self.height)
self.cpuPlayer = Player(
self.cpu_speed_ratio * self.height,
self.paddle_width,
self.paddle_height,
(self.width - self.paddle_dist_to_wall, self.height / 2),
self.width,
self.height)
self.players_group = pygame.sprite.Group()
self.players_group.add(self.agentPlayer)
self.players_group.add(self.cpuPlayer)
self.ball_group = pygame.sprite.Group()
self.ball_group.add(self.ball)
def reset(self):
self.init()
# after game over set random direction of ball otherwise it will always be the same
self._reset_ball(1 if self.rng.random_sample() > 0.5 else -1)
def _reset_ball(self, direction):
self.ball.pos.x = self.width / 2 # move it to the center
# we go in the same direction that they lost in but at starting vel.
self.ball.vel.x = self.ball.speed * direction
self.ball.vel.y = (self.rng.random_sample() *
self.ball.speed) - self.ball.speed * 0.5
def step(self, dt):
dt /= 1000.0
self.screen.fill((0, 0, 0))
self.agentPlayer.speed = self.players_speed_ratio * self.height
self.cpuPlayer.speed = self.cpu_speed_ratio * self.height
self.ball.speed = self.ball_speed_ratio * self.height
self._handle_player_events()
# doesnt make sense to have this, but include if needed.
self.score_sum += self.rewards["tick"]
self.ball.update(self.agentPlayer, self.cpuPlayer, dt)
is_terminal_state = False
# logic
if self.ball.pos.x <= 0:
self.score_sum += self.rewards["negative"]
self.score_counts["cpu"] += 1.0
self._reset_ball(-1)
is_terminal_state = True
if self.ball.pos.x >= self.width:
self.score_sum += self.rewards["positive"]
self.score_counts["agent"] += 1.0
self._reset_ball(1)
is_terminal_state = True
if is_terminal_state:
# winning
if self.score_counts['agent'] == self.MAX_SCORE:
self.score_sum += self.rewards["win"]
# losing
if self.score_counts['cpu'] == self.MAX_SCORE:
self.score_sum += self.rewards["loss"]
else:
self.agentPlayer.update(self.dy, dt)
self.cpuPlayer.updateCpu(self.ball, dt)
self.players_group.draw(self.screen)
self.ball_group.draw(self.screen)
if __name__ == "__main__":
import numpy as np
pygame.init()
game = Pong(width=256, height=200)
game.screen = pygame.display.set_mode(game.getScreenDims(), 0, 32)
game.clock = pygame.time.Clock()
game.rng = np.random.RandomState(24)
game.init()
while True:
dt = game.clock.tick_busy_loop(60)
game.step(dt)
pygame.display.update()
|
exercises/pt/test_03_14_03.py
|
Jette16/spacy-course
| 2,085 |
75022
|
<gh_stars>1000+
def test():
assert (
"patterns = list(nlp.pipe(people))" in __solution__
), "Você está usando nlp.pipe envolvido em uma lista (list)?"
__msg__.good(
"Bom trabalho! Vamos seguir agora com um exemplo prático que "
"usa nlp.pipe para processar documentos com metadados adicionais."
)
|
tests/test_opq.py
|
matsui528/nanopq
| 217 |
75025
|
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).resolve().parent))
import unittest
import nanopq
import numpy as np
class TestSuite(unittest.TestCase):
def setUp(self):
np.random.seed(123)
def test_property(self):
opq = nanopq.OPQ(M=4, Ks=256)
self.assertEqual(
(opq.M, opq.Ks, opq.verbose, opq.code_dtype),
(opq.pq.M, opq.pq.Ks, opq.pq.verbose, opq.pq.code_dtype),
)
def test_fit(self):
N, D, M, Ks = 100, 12, 4, 10
X = np.random.random((N, D)).astype(np.float32)
opq = nanopq.OPQ(M=M, Ks=Ks)
opq.fit(X)
self.assertEqual(opq.Ds, D / M)
self.assertEqual(opq.codewords.shape, (M, Ks, D / M))
self.assertEqual(opq.R.shape, (D, D))
opq2 = nanopq.OPQ(M=M, Ks=Ks).fit(X) # Can be called as a chain
self.assertTrue(np.allclose(opq.codewords, opq2.codewords))
def test_eq(self):
import copy
N, D, M, Ks = 100, 12, 4, 10
X = np.random.random((N, D)).astype(np.float32)
opq1 = nanopq.OPQ(M=M, Ks=Ks)
opq2 = nanopq.OPQ(M=M, Ks=Ks)
opq3 = copy.deepcopy(opq1)
opq4 = nanopq.OPQ(M=M, Ks=2 * Ks)
self.assertTrue(opq1 == opq1)
self.assertTrue(opq1 == opq2)
self.assertTrue(opq1 == opq3)
self.assertTrue(opq1 != opq4)
opq1.fit(X)
opq2.fit(X)
opq3 = copy.deepcopy(opq1)
opq4.fit(X)
self.assertTrue(opq1 == opq1)
self.assertTrue(opq1 == opq2)
self.assertTrue(opq1 == opq3)
self.assertTrue(opq1 != opq4)
def test_rotate(self):
N, D, M, Ks = 100, 12, 4, 10
X = np.random.random((N, D)).astype(np.float32)
opq = nanopq.OPQ(M=M, Ks=Ks)
opq.fit(X)
rotated_vec = opq.rotate(X[0])
rotated_vecs = opq.rotate(X[:3])
self.assertEqual(rotated_vec.shape, (D,))
self.assertEqual(rotated_vecs.shape, (3, D))
# Because R is a rotation matrix (R^t * R = I), R^t should be R^(-1)
self.assertAlmostEqual(
np.linalg.norm(opq.R.T - np.linalg.inv(opq.R)), 0.0, places=3
)
if __name__ == "__main__":
unittest.main()
|
backend/op/add_followers_op.py
|
sleepingAnt/viewfinder
| 645 |
75064
|
# Copyright 2013 Viewfinder Inc. All Rights Reserved.
"""Viewfinder AddFollowersOperation.
This operation adds a set of contacts to an existing viewpoint as followers of that viewpoint.
If a contact is not yet a Viewfinder user, we create a prospective user and link the contact
to that.
"""
__authors__ = ['<EMAIL> (<NAME>)']
import json
from tornado import gen
from viewfinder.backend.base.exceptions import LimitExceededError, PermissionError
from viewfinder.backend.db.accounting import AccountingAccumulator
from viewfinder.backend.db.db_client import DBKey
from viewfinder.backend.db.followed import Followed
from viewfinder.backend.db.follower import Follower
from viewfinder.backend.db.lock import Lock
from viewfinder.backend.db.operation import Operation
from viewfinder.backend.db.user import User
from viewfinder.backend.db.viewpoint import Viewpoint
from viewfinder.backend.op.notification_manager import NotificationManager
from viewfinder.backend.op.viewfinder_op import ViewfinderOperation
class AddFollowersOperation(ViewfinderOperation):
"""The AddFollowers operation follows the four phase pattern described in the header of
operation_map.py.
"""
def __init__(self, client, act_dict, user_id, viewpoint_id, contact_dicts):
super(AddFollowersOperation, self).__init__(client)
self._act_dict = act_dict
self._user_id = user_id
self._viewpoint_id = viewpoint_id
self._contact_dicts = contact_dicts
@classmethod
@gen.coroutine
def Execute(cls, client, activity, user_id, viewpoint_id, contacts):
"""Entry point called by the operation framework."""
yield AddFollowersOperation(client, activity, user_id, viewpoint_id, contacts)._AddFollowers()
@gen.coroutine
def _AddFollowers(self):
"""Orchestrates the add followers operation by executing each of the phases in turn."""
# Lock the viewpoint while adding followers.
lock = yield gen.Task(Viewpoint.AcquireLock, self._client, self._viewpoint_id)
try:
yield self._Check()
self._client.CheckDBNotModified()
yield self._Update()
yield self._Account()
yield Operation.TriggerFailpoint(self._client)
yield self._Notify()
finally:
yield gen.Task(Viewpoint.ReleaseLock, self._client, self._viewpoint_id, lock)
@gen.coroutine
def _Check(self):
"""Gathers pre-mutation information:
1. Queries for existing followers and viewpoint.
2. Checkpoints list of followers that need to be revived.
3. Checkpoints list of contacts that need to be made prospective users.
4. Checkpoints list of contacts that are already following the viewpoint.
Validates the following:
1. Max follower limit.
2. Permission to add followers.
"""
# Get the viewpoint to be modified, along with the follower that is adding the additional users.
# This state will not be changed by add followers, and so doesn't need to be part of the checkpoint.
self._viewpoint, self._follower = yield gen.Task(Viewpoint.QueryWithFollower,
self._client,
self._user_id,
self._viewpoint_id)
# Checks permission to add followers.
if self._follower is None or not self._follower.CanContribute():
raise PermissionError('User %d does not have permission to add followers to viewpoint "%s".' %
(self._user_id, self._viewpoint_id))
# Start populating the checkpoint if this the first time the operation has been run.
if self._op.checkpoint is None:
# Get all existing followers.
self._existing_followers, _ = yield gen.Task(Viewpoint.QueryFollowers,
self._client,
self._viewpoint_id,
limit=Viewpoint.MAX_FOLLOWERS)
# Get list of followers which have removed themselves from the viewpoint and will need to be revived.
self._revive_follower_ids = self._GetRevivableFollowers(self._existing_followers)
# Get a tuple for each contact: (user_exists?, user_id, webapp_dev_id).
self._contact_ids = yield self._ResolveContactIds(self._contact_dicts)
# Set checkpoint.
# Existing followers, followers to revive, and list of contacts need to be check-pointed
# because these sets are changed in the UPDATE phase. If we fail after UPDATE, but before
# NOTIFY, we would not send correct notifications on retry.
checkpoint = {'existing': [follower.user_id for follower in self._existing_followers],
'revive': self._revive_follower_ids,
'contacts': self._contact_ids}
yield self._op.SetCheckpoint(self._client, checkpoint)
else:
# Restore state from checkpoint.
follower_keys = [DBKey(follower_id, self._viewpoint_id) for follower_id in self._op.checkpoint['existing']]
self._existing_followers = yield gen.Task(Follower.BatchQuery, self._client, follower_keys, None)
self._revive_follower_ids = self._op.checkpoint['revive']
self._contact_ids = self._op.checkpoint['contacts']
self._contact_user_ids = [user_id for user_exists, user_id, webapp_dev_id in self._contact_ids]
# Check if we're about to exceed follower limit on this viewpoint.
if len(self._existing_followers) + len(self._contact_dicts) > Viewpoint.MAX_FOLLOWERS:
raise LimitExceededError(
'User %d attempted to exceed follower limit on viewpoint "%s" by adding %d followers.' %
(self._user_id, self._viewpoint_id, len(self._contact_dicts)))
@gen.coroutine
def _Update(self):
"""Updates the database:
1. Revives any followers that have removed the viewpoint.
2. Creates prospective users.
3. Adds the followers to the viewpoint.
"""
# Create any prospective users (may create nested CreateProspective operations).
yield self._ResolveContacts(self._contact_dicts, self._contact_ids, reason='add_follower=%d' % self._user_id)
# Revive any REMOVED followers.
yield gen.Task(Follower.ReviveRemovedFollowers, self._client, self._existing_followers)
# Figure out which users need to be added as followers. Note that new followers exclude followers
# from the request that are already following the viewpoint (assuming they're not removed).
existing_follower_ids = set(follower.user_id for follower in self._existing_followers
if not follower.IsRemoved())
self._new_follower_ids = [user_id for user_id in set(self._contact_user_ids)
if user_id not in existing_follower_ids]
# Now actually add the followers.
self._new_followers = yield self._viewpoint.AddFollowers(self._client,
self._user_id,
list(existing_follower_ids),
self._new_follower_ids,
self._op.timestamp)
@gen.coroutine
def _Account(self):
"""Makes accounting changes:
1. For revived followers.
2. For new followers.
"""
acc_accum = AccountingAccumulator()
# Make accounting changes for any revived followers.
yield acc_accum.ReviveFollowers(self._client, self._viewpoint_id, self._revive_follower_ids)
# Make accounting changes for the new followers.
yield acc_accum.AddFollowers(self._client, self._viewpoint_id, self._new_follower_ids)
yield acc_accum.Apply(self._client)
@gen.coroutine
def _Notify(self):
"""Creates notifications:
1. Notifies removed followers that conversation has new activity.
2. Notifies users with contacts that have become prospective users.
3. Notifies existing followers of the viewpoint that new followers have been added.
4. Notifies new followers that they have been added to a viewpoint.
"""
# Creates notifications for any new prospective users.
identity_keys = [contact_dict['identity']
for contact_dict, (user_exists, user_id, webapp_dev_id) in zip(self._contact_dicts,
self._contact_ids)
if not user_exists]
yield NotificationManager.NotifyCreateProspective(self._client,
identity_keys,
self._op.timestamp)
# Creates notifications for any revived followers.
yield NotificationManager.NotifyReviveFollowers(self._client,
self._viewpoint_id,
self._revive_follower_ids,
self._op.timestamp)
# Creates notification of new viewpoint for each new follower.
yield NotificationManager.NotifyAddFollowers(self._client,
self._viewpoint_id,
self._existing_followers,
self._new_followers,
self._contact_user_ids,
self._act_dict,
self._op.timestamp)
|
src/python/pants/backend/shell/lint/shfmt/rules_integration_test.py
|
yoav-orca/pants
| 1,806 |
75077
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent
import pytest
from pants.backend.shell.lint.shfmt.rules import ShfmtFieldSet, ShfmtRequest
from pants.backend.shell.lint.shfmt.rules import rules as shfmt_rules
from pants.backend.shell.target_types import ShellSourcesGeneratorTarget
from pants.backend.shell.target_types import rules as target_types_rules
from pants.core.goals.fmt import FmtResult
from pants.core.goals.lint import LintResult, LintResults
from pants.core.util_rules import config_files, external_tool, source_files
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.engine.addresses import Address
from pants.engine.fs import CreateDigest, Digest, FileContent
from pants.engine.target import Target
from pants.testutil.rule_runner import QueryRule, RuleRunner
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
*shfmt_rules(),
*config_files.rules(),
*external_tool.rules(),
*source_files.rules(),
*target_types_rules(),
QueryRule(LintResults, [ShfmtRequest]),
QueryRule(FmtResult, [ShfmtRequest]),
QueryRule(SourceFiles, [SourceFilesRequest]),
],
target_types=[ShellSourcesGeneratorTarget],
)
GOOD_FILE = "! foo bar >a &\n"
BAD_FILE = "! foo bar >a &\n"
# If config is loaded correctly, shfmt will indent the case statements.
NEEDS_CONFIG_FILE = dedent(
"""\
case foo in
PATTERN_1)
\tbar
\t;;
*)
\tbaz
\t;;
esac
"""
)
FIXED_NEEDS_CONFIG_FILE = dedent(
"""\
case foo in
\tPATTERN_1)
\t\tbar
\t\t;;
\t*)
\t\tbaz
\t\t;;
esac
"""
)
def run_shfmt(
rule_runner: RuleRunner,
targets: list[Target],
*,
extra_args: list[str] | None = None,
) -> tuple[tuple[LintResult, ...], FmtResult]:
rule_runner.set_options(
["--backend-packages=pants.backend.shell.lint.shfmt", *(extra_args or ())],
env_inherit={"PATH"},
)
field_sets = [ShfmtFieldSet.create(tgt) for tgt in targets]
lint_results = rule_runner.request(LintResults, [ShfmtRequest(field_sets)])
input_sources = rule_runner.request(
SourceFiles,
[
SourceFilesRequest(field_set.sources for field_set in field_sets),
],
)
fmt_result = rule_runner.request(
FmtResult,
[
ShfmtRequest(field_sets, prior_formatter_result=input_sources.snapshot),
],
)
return lint_results.results, fmt_result
def get_digest(rule_runner: RuleRunner, source_files: dict[str, str]) -> Digest:
files = [FileContent(path, content.encode()) for path, content in source_files.items()]
return rule_runner.request(Digest, [CreateDigest(files)])
def test_passing(rule_runner: RuleRunner) -> None:
rule_runner.write_files({"f.sh": GOOD_FILE, "BUILD": "shell_sources(name='t')"})
tgt = rule_runner.get_target(Address("", target_name="t", relative_file_path="f.sh"))
lint_results, fmt_result = run_shfmt(rule_runner, [tgt])
assert len(lint_results) == 1
assert lint_results[0].exit_code == 0
assert lint_results[0].stderr == ""
assert fmt_result.stdout == ""
assert fmt_result.output == get_digest(rule_runner, {"f.sh": GOOD_FILE})
assert fmt_result.did_change is False
def test_failing(rule_runner: RuleRunner) -> None:
rule_runner.write_files({"f.sh": BAD_FILE, "BUILD": "shell_sources(name='t')"})
tgt = rule_runner.get_target(Address("", target_name="t", relative_file_path="f.sh"))
lint_results, fmt_result = run_shfmt(rule_runner, [tgt])
assert len(lint_results) == 1
assert lint_results[0].exit_code == 1
assert "f.sh.orig" in lint_results[0].stdout
assert fmt_result.stdout == "f.sh\n"
assert fmt_result.output == get_digest(rule_runner, {"f.sh": GOOD_FILE})
assert fmt_result.did_change is True
def test_multiple_targets(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{"good.sh": GOOD_FILE, "bad.sh": BAD_FILE, "BUILD": "shell_sources(name='t')"}
)
tgts = [
rule_runner.get_target(Address("", target_name="t", relative_file_path="good.sh")),
rule_runner.get_target(Address("", target_name="t", relative_file_path="bad.sh")),
]
lint_results, fmt_result = run_shfmt(rule_runner, tgts)
assert len(lint_results) == 1
assert lint_results[0].exit_code == 1
assert "bad.sh.orig" in lint_results[0].stdout
assert "good.sh" not in lint_results[0].stdout
assert "bad.sh\n" == fmt_result.stdout
assert fmt_result.output == get_digest(rule_runner, {"good.sh": GOOD_FILE, "bad.sh": GOOD_FILE})
assert fmt_result.did_change is True
def test_config_files(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"a/f.sh": NEEDS_CONFIG_FILE,
"a/BUILD": "shell_sources()",
"a/.editorconfig": "[*.sh]\nswitch_case_indent = true\n",
"b/f.sh": NEEDS_CONFIG_FILE,
"b/BUILD": "shell_sources()",
}
)
tgts = [
rule_runner.get_target(Address("a", relative_file_path="f.sh")),
rule_runner.get_target(Address("b", relative_file_path="f.sh")),
]
lint_results, fmt_result = run_shfmt(rule_runner, tgts)
assert len(lint_results) == 1
assert lint_results[0].exit_code == 1
assert "a/f.sh.orig" in lint_results[0].stdout
assert "b/f.sh.orig" not in lint_results[0].stdout
assert fmt_result.stdout == "a/f.sh\n"
assert fmt_result.output == get_digest(
rule_runner, {"a/f.sh": FIXED_NEEDS_CONFIG_FILE, "b/f.sh": NEEDS_CONFIG_FILE}
)
assert fmt_result.did_change is True
def test_passthrough_args(rule_runner: RuleRunner) -> None:
rule_runner.write_files({"f.sh": NEEDS_CONFIG_FILE, "BUILD": "shell_sources(name='t')"})
tgt = rule_runner.get_target(Address("", target_name="t", relative_file_path="f.sh"))
lint_results, fmt_result = run_shfmt(rule_runner, [tgt], extra_args=["--shfmt-args=-ci"])
assert len(lint_results) == 1
assert lint_results[0].exit_code == 1
assert "f.sh.orig" in lint_results[0].stdout
assert fmt_result.stdout == "f.sh\n"
assert fmt_result.output == get_digest(rule_runner, {"f.sh": FIXED_NEEDS_CONFIG_FILE})
assert fmt_result.did_change is True
def test_skip(rule_runner: RuleRunner) -> None:
rule_runner.write_files({"f.sh": BAD_FILE, "BUILD": "shell_sources(name='t')"})
tgt = rule_runner.get_target(Address("", target_name="t", relative_file_path="f.sh"))
lint_results, fmt_result = run_shfmt(rule_runner, [tgt], extra_args=["--shfmt-skip"])
assert not lint_results
assert fmt_result.skipped is True
assert fmt_result.did_change is False
|
equip/bytecode/decl.py
|
neuroo/equip
| 102 |
75085
|
<reponame>neuroo/equip<gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
equip.bytecode.decl
~~~~~~~~~~~~~~~~~~~
Structured representation of Module, Types, Method, Imports.
:copyright: (c) 2014 by <NAME> (@rgaucher)
:license: Apache 2, see LICENSE for more details.
"""
import dis
from operator import attrgetter, \
methodcaller
from ..utils.log import logger
from ..visitors.bytecode import BytecodeVisitor
from .utils import update_nested_code_object
class Declaration(object):
"""
Base class for the declaration types of object.
"""
MODULE = 1
TYPE = 2
METHOD = 3
FIELD = 4
IMPORT = 5
def __init__(self, kind, _code_object):
self._kind = kind
self._code_object = _code_object
self._parent = None
self._children = []
self._lines = None
self._bytecode = []
self._bytecode_object = None
self._has_changes = False
@property
def lines(self):
"""
A tuple of start/end line numbers that encapsulates this declaration.
"""
return self._lines
@lines.setter
def lines(self, value):
self._lines = value
@property
def start_lineno(self):
"""
Returns the start line number of the declaration.
"""
return self._lines[0] if self._lines else -1
def get_start_lineno(self):
return self.start_lineno
@property
def end_lineno(self):
"""
Returns the end line number of the declaration.
"""
return self._lines[1] if self._lines else -1
@property
def parent(self):
"""
Returns the parent of this declaration or ``None`` if there is
no parent (e.g., for a ``ModuleDeclaration``).
"""
return self._parent
@parent.setter
def parent(self, value):
# logger.debug("Set parent. %s", value)
self._parent = value
self._parent.add_child(self)
@property
def children(self):
"""
Returns the children of this declaration.
"""
return self._children
def add_child(self, child):
"""
Adds a child to this declaration.
:param child: A ``Declaration`` that is a child of the current declaration.
"""
self._children.append(child)
# logger.debug("add_child:: Children: %s", self.children)
# Keep sorting by line number
self._children = sorted(self._children, key=methodcaller('get_start_lineno'))
@property
def parent_module(self):
"""
Returns the parent module (a ``ModuleDeclaration``) for this declaration.
"""
return self.__get_parent_kind(ModuleDeclaration)
@property
def parent_class(self):
"""
Returns the parent class (a ``TypeDeclaration``) for this declaration.
"""
return self.__get_parent_kind(TypeDeclaration)
@property
def parent_method(self):
"""
Returns the parent method (a ``MethodDeclaration``) for this declaration.
"""
return self.__get_parent_kind(MethodDeclaration)
def __get_parent_kind(self, kind):
p = self.parent
while p is not None:
if isinstance(p, kind):
return p
p = p.parent
return None
@property
def bytecode(self):
"""
Returns the bytecode associated with this declaration.
"""
return self._bytecode
@bytecode.setter
def bytecode(self, value):
self._bytecode = value
@property
def code_object(self):
return self._code_object
@code_object.setter
def code_object(self, value):
self._code_object = value
def update_nested_code_object(self, original_co, new_co):
self._code_object = update_nested_code_object(self._code_object,
original_co,
new_co)
self._has_changes = True
@property
def has_changes(self):
return self._has_changes
@has_changes.setter
def has_changes(self, value):
self._has_changes = value
# Mostly reserved
@property
def bytecode_object(self):
return self._bytecode_object
@bytecode_object.setter
def bytecode_object(self, value):
self._bytecode_object = value
def accept(self, visitor):
if isinstance(visitor, BytecodeVisitor):
for i in xrange(len(self._bytecode)):
index, lineno, op, arg, cflow_in, _ = self._bytecode[i]
visitor.visit(index, op, arg=arg, lineno=lineno, cflow_in=cflow_in)
is_module = lambda self: self.kind == Declaration.MODULE
is_type = lambda self: self.kind == Declaration.TYPE
is_method = lambda self: self.kind == Declaration.METHOD
is_field = lambda self: self.kind == Declaration.FIELD
is_import = lambda self: self.kind == Declaration.IMPORT
@property
def kind(self):
return self._kind
class ImportDeclaration(Declaration):
"""
Models an import statement. It handles relatives/absolute
imports, as well as aliases.
"""
def __init__(self, code_object):
Declaration.__init__(self, Declaration.IMPORT, code_object)
self._root = None
self._aliases = None
self._live_names = None
self._dots = -1
self._star = False
@property
def star(self):
return self._star
@star.setter
def star(self, value):
self._star = value
@property
def aliases(self):
return self._aliases
@aliases.setter
def aliases(self, value):
self._aliases = value
@property
def live_names(self):
if self._live_names is None:
self._live_names = set()
for (name, alias) in self.aliases:
if alias is None:
if '.' not in name:
self._live_names.add(name)
else:
live_name = name[:name.rfind('.')]
self._live_names.add(live_name)
else:
self._live_names.add(alias)
return self._live_names
@property
def dots(self):
return self._dots
@dots.setter
def dots(self, value):
self._dots = value
@property
def root(self):
return self._root
@root.setter
def root(self, value):
self._root = value
def __eq__(self, obj):
return self.root == obj.root and self.aliases == obj.aliases and self.dots == obj.dots
def __repr__(self):
skip_import_root = False
import_buffer = ''
if self.dots > 0:
import_buffer += 'from ' + '.' * self.dots
if self.root:
import_buffer += self.root
skip_import_root = True
import_buffer += ' import '
elif self.root:
import_buffer += 'from '
else:
import_buffer += 'import '
if self.root and not skip_import_root:
import_buffer += self.root + ' import '
if self.star:
import_buffer += '*'
import_list = []
for aliased_name in self.aliases:
local_import = aliased_name[0]
if aliased_name[1]:
local_import += ' as ' + aliased_name[1]
import_list.append(local_import)
if import_list:
import_buffer += ', '.join(import_list)
return 'Import(%s)' % import_buffer
class ModuleDeclaration(Declaration):
"""
The module is the object that captures everything under one pyc file.
It contains nested classes and functions, as well as import statements.
"""
def __init__(self, module_path, code_object):
Declaration.__init__(self, Declaration.MODULE, code_object)
self._module_path = module_path
self._imports = []
self._classes = None
self._functions = None
def add_import(self, importDecl):
if importDecl not in self._imports:
self._imports.append(importDecl)
@property
def imports(self):
return self._imports
@property
def module_path(self):
return self._module_path
@property
def classes(self):
if self._classes is None:
self._classes = [ c for c in self.children if c.is_type() ]
return self._classes
@property
def functions(self):
if self._functions is None:
self._functions = [ f for f in self.children if f.is_method() ]
return self._functions
def __repr__(self):
return 'ModuleDeclaration(path=%s, co=%s)' % (self.module_path, self.code_object)
class TypeDeclaration(Declaration):
"""
Represent a class declaration. It has a name, as well as a hierarchy
(superclass). The type contains several methods and fields, and can
have nested types.
"""
def __init__(self, type_name, code_object):
Declaration.__init__(self, Declaration.TYPE, code_object)
self._type_name = type_name
self._superclasses = set()
self._methods = None
self._fields = None
self._nested_types = None
@property
def type_name(self):
"""
Returns the name of the type.
"""
return self._type_name
@property
def superclasses(self):
return self._superclasses
def add_superclass(self, type_name):
self._superclasses.add(type_name)
@property
def methods(self):
"""
Returns a list of ``MethodDeclaration`` that belong to this type.
"""
if self._methods is None:
self._methods = [ f for f in self.children if f.is_method() ]
return self._methods
@property
def fields(self):
return self.fields
@property
def nested_types(self):
"""
Returns a list of ``TypeDeclaration`` that belong to this type.
"""
if self._nested_types is None:
self._nested_types = [ c for c in self.children if c.is_type() ]
return self._nested_types
def __repr__(self):
return 'TypeDeclaration#%d(name=%s, co=%s, super=%s)' \
% (self.start_lineno, self.type_name, self.code_object, self.superclasses)
class MethodDeclaration(Declaration):
"""
The declaration of a method or a function.
"""
def __init__(self, method_name, code_object):
Declaration.__init__(self, Declaration.METHOD, code_object)
self._method_name = method_name
self._formal_parameters = []
self._body = None
self._labels = dis.findlabels(code_object.co_code)
self._nested_types = []
@property
def body(self):
return self._body
@body.setter
def body(self, value):
self._body = value
@property
def labels(self):
return self._labels
@property
def is_lambda(self):
return self.method_name == '<lambda>'
@property
def formal_parameters(self):
return self._formal_parameters
@formal_parameters.setter
def formal_parameters(self, value):
self._formal_parameters = value
@property
def method_name(self):
return self._method_name
@property
def nested_types(self):
return self._nested_types
def __repr__(self):
return 'MethodDeclaration#%d(name=%s, args=%s, co=%s)' \
% (self.start_lineno, self.method_name, self.formal_params, self.code_object)
class FieldDeclaration(Declaration):
def __init__(self, field_name, code_object):
Declaration.__init__(self, Declaration.FIELD, code_object)
self._field_name = field_name
@property
def field_name(self):
return self._field_name
|
eventsourcing/tests/test_system.py
|
johnbywater/eventsourcing
| 972 |
75105
|
from typing import List
from unittest.case import TestCase
from uuid import uuid4
from eventsourcing.application import Application
from eventsourcing.persistence import Notification
from eventsourcing.system import (
AlwaysPull,
Follower,
Leader,
NeverPull,
ProcessApplication,
Promptable,
PullGaps,
System,
)
from eventsourcing.tests.test_application_with_popo import BankAccounts
from eventsourcing.tests.test_processapplication import EmailProcess
from eventsourcing.utils import get_topic
class TestSystem(TestCase):
def test_graph(self):
system = System(
pipes=[
[
BankAccounts,
EmailProcess,
],
[Application],
]
)
self.assertEqual(len(system.nodes), 3)
self.assertEqual(system.nodes["BankAccounts"], get_topic(BankAccounts))
self.assertEqual(system.nodes["EmailProcess"], get_topic(EmailProcess))
self.assertEqual(system.nodes["Application"], get_topic(Application))
self.assertEqual(system.leaders, ["BankAccounts"])
self.assertEqual(system.followers, ["EmailProcess"])
self.assertEqual(system.singles, ["Application"])
self.assertEqual(len(system.edges), 1)
self.assertIn(
(
"BankAccounts",
"EmailProcess",
),
system.edges,
)
self.assertEqual(len(system.singles), 1)
def test_raises_type_error_not_a_follower(self):
with self.assertRaises(TypeError) as cm:
System(
pipes=[
[
BankAccounts,
Leader,
],
]
)
exception = cm.exception
self.assertEqual(
exception.args[0],
"Not a follower class: <class 'eventsourcing.system.Leader'>",
)
def test_raises_type_error_not_a_processor(self):
with self.assertRaises(TypeError) as cm:
System(
pipes=[
[
BankAccounts,
Follower,
EmailProcess,
],
]
)
exception = cm.exception
self.assertEqual(
exception.args[0],
"Not a process application class: <class 'eventsourcing.system.Follower'>",
)
def test_is_leaders_only(self):
system = System(
pipes=[
[
Leader,
ProcessApplication,
ProcessApplication,
],
]
)
self.assertEqual(list(system.leaders_only), ["Leader"])
def test_leader_class(self):
system = System(
pipes=[
[
Application,
ProcessApplication,
ProcessApplication,
],
]
)
self.assertTrue(issubclass(system.leader_cls("Application"), Leader))
self.assertTrue(issubclass(system.leader_cls("ProcessApplication"), Leader))
class TestLeader(TestCase):
def test(self):
# Define fixture that receives prompts.
class FollowerFixture(Promptable):
def __init__(self):
self.num_prompts = 0
def receive_notifications(
self, leader_name: str, notifications: List[Notification]
) -> None:
self.num_prompts += 1
# Test fixture is working.
follower = FollowerFixture()
follower.receive_notifications("", [])
self.assertEqual(follower.num_prompts, 1)
# Construct leader.
leader = Leader()
leader.lead(follower)
# Check follower receives a prompt when there are new events.
leader.notify(
[
Notification(
id=1,
originator_id=uuid4(),
originator_version=0,
topic="topic1",
state=b"",
)
]
)
self.assertEqual(follower.num_prompts, 2)
# Check follower doesn't receive prompt when no new events.
leader.save()
self.assertEqual(follower.num_prompts, 2)
class TestPullMode(TestCase):
def test_always_pull(self):
mode = AlwaysPull()
self.assertTrue(mode.chose_to_pull(1, 1))
self.assertTrue(mode.chose_to_pull(2, 1))
def test_never_pull(self):
mode = NeverPull()
self.assertFalse(mode.chose_to_pull(1, 1))
self.assertFalse(mode.chose_to_pull(2, 1))
def test_pull_gaps(self):
mode = PullGaps()
self.assertFalse(mode.chose_to_pull(1, 1))
self.assertTrue(mode.chose_to_pull(2, 1))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.