content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
import io
import nextcord
async def send_code_block_maybe_as_file(ctx, text):
"""
Sends a code block to the current context.
If it's too long to fit in a single message, it will
instead be sent as a file.
"""
if len(text) > 2000:
file = io.StringIO()
file.writelines(text)
file.seek(0)
await ctx.send(file=nextcord.File(file, filename="agenda.md"))
else:
await ctx.send(f"```{text}```")
|
python
|
import itertools
import binascii
def detect_ecb(s,klen):
blocks = [s[i:i+klen] for i in range(0,len(s),klen)]
pairs = itertools.combinations(blocks,2)
score = 0
for p in pairs:
if p[0] == p[1]:
score += 1
return score > 0
def main():
f = open('8.txt', 'r')
data = f.read()
lines = data.split('\n')
for i,l in enumerate(lines):
if detect_ecb(binascii.unhexlify(l), 16):
print("Possible AES ECB mode on line: " + str(i))
main()
|
python
|
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A minimal interface mlp module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from six.moves import xrange # pylint: disable=redefined-builtin
from sonnet.python.modules import base
from sonnet.python.modules import basic
from sonnet.python.modules import util
import tensorflow as tf
class MLP(base.AbstractModule, base.Transposable):
"""A Multi-Layer perceptron module."""
def __init__(self,
output_sizes,
activation=tf.nn.relu,
activate_final=False,
initializers=None,
partitioners=None,
regularizers=None,
use_bias=True,
name="mlp"):
"""Constructs an MLP module.
Args:
output_sizes: An iterable of output dimensionalities as defined in
`basic.Linear`. Output size can be defined either as number or via a
callable. In the latter case, since the function invocation is deferred
to graph construction time, the user must only ensure that entries can
be called when build is called. Each entry in the iterable defines
properties in the corresponding linear layer.
activation: An activation op. The activation is applied to intermediate
layers, and optionally to the output of the final layer.
activate_final: Boolean determining if the activation is applied to
the output of the final layer. Default `False`.
initializers: Optional dict containing ops to initialize the linear
layers' weights (with key 'w') or biases (with key 'b').
partitioners: Optional dict containing partitioners to partition the
linear layers' weights (with key 'w') or biases (with key 'b').
regularizers: Optional dict containing regularizers for the linear layers'
weights (with key 'w') and the biases (with key 'b'). As a default, no
regularizers are used. A regularizer should be a function that takes
a single `Tensor` as an input and returns a scalar `Tensor` output, e.g.
the L1 and L2 regularizers in `tf.contrib.layers`.
use_bias: Whether to include bias parameters in the linear layers.
Default `True`.
name: Name of the module.
Raises:
KeyError: If initializers contains any keys other than 'w' or 'b'.
KeyError: If regularizers contains any keys other than 'w' or 'b'.
ValueError: If output_sizes is empty.
TypeError: If `activation` is not callable; or if `output_sizes` is not
iterable.
"""
super(MLP, self).__init__(name=name)
if not isinstance(output_sizes, collections.Iterable):
raise TypeError("output_sizes must be iterable")
output_sizes = tuple(output_sizes)
if not output_sizes:
raise ValueError("output_sizes must not be empty")
self._output_sizes = output_sizes
self._num_layers = len(self._output_sizes)
self._input_shape = None
self.possible_keys = self.get_possible_initializer_keys(use_bias=use_bias)
self._initializers = util.check_initializers(
initializers, self.possible_keys)
self._partitioners = util.check_partitioners(
partitioners, self.possible_keys)
self._regularizers = util.check_regularizers(
regularizers, self.possible_keys)
if not callable(activation):
raise TypeError("Input 'activation' must be callable")
self._activation = activation
self._activate_final = activate_final
self._use_bias = use_bias
self._instantiate_layers()
def _instantiate_layers(self):
"""Instantiates all the linear modules used in the network.
Layers are instantiated in the constructor, as opposed to the build
function, because MLP implements the Transposable interface, and the
transpose function can be called before the module is actually connected
to the graph and build is called.
Notice that this is safe since layers in the transposed module are
instantiated using a lambda returning input_size of the mlp layers, and
this doesn't have to return sensible values until the original module is
connected to the graph.
"""
with self._enter_variable_scope():
self._layers = [basic.Linear(self._output_sizes[i],
name="linear_{}".format(i),
initializers=self._initializers,
partitioners=self._partitioners,
regularizers=self._regularizers,
use_bias=self.use_bias)
for i in xrange(self._num_layers)]
@classmethod
def get_possible_initializer_keys(cls, use_bias=True):
return basic.Linear.get_possible_initializer_keys(use_bias=use_bias)
def _build(self, inputs):
"""Assembles the `MLP` and connects it to the graph.
Args:
inputs: A 2D Tensor of size `[batch_size, input_size]`.
Returns:
A 2D Tensor of size `[batch_size, output_sizes[-1]]`.
"""
self._input_shape = tuple(inputs.get_shape().as_list())
net = inputs
final_index = self._num_layers - 1
for layer_id in xrange(self._num_layers):
net = self._layers[layer_id](net)
if final_index != layer_id or self._activate_final:
net = self._activation(net)
return net
@property
def layers(self):
"""Returns a tuple containing the linear layers of the `MLP`."""
return self._layers
@property
def output_sizes(self):
return tuple([l() if callable(l) else l for l in self._output_sizes])
@property
def use_bias(self):
return self._use_bias
@property
def initializers(self):
"""Returns the intializers dictionary."""
return self._initializers
@property
def partitioners(self):
"""Returns the partitioners dictionary."""
return self._partitioners
@property
def regularizers(self):
"""Returns the regularizers dictionary."""
return self._regularizers
@property
def activation(self):
return self._activation
@property
def activate_final(self):
return self._activate_final
# Implements Transposable interface
@property
def input_shape(self):
"""Returns shape of input `Tensor` passed at last call to `build`."""
self._ensure_is_connected()
return self._input_shape
# Implements Transposable interface
def transpose(self, name=None, activate_final=None):
"""Returns transposed `MLP`.
Args:
name: Optional string specifiying the name of the transposed module. The
default name is constructed by appending "_transpose"
to `self.module_name`.
activate_final: Optional boolean determining if the activation and batch
normalization, if turned on, are applied to the final layer.
Returns:
Matching transposed `MLP` module.
"""
if name is None:
name = self.module_name + "_transpose"
if activate_final is None:
activate_final = self.activate_final
output_sizes = [lambda l=layer: l.input_shape[1] for layer in self._layers]
output_sizes.reverse()
return MLP(name=name,
output_sizes=output_sizes,
activation=self.activation,
activate_final=activate_final,
initializers=self.initializers,
partitioners=self.partitioners,
regularizers=self.regularizers,
use_bias=self.use_bias)
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from collections import Counter, defaultdict
def load_fish(file):
with open(file) as f:
fish = f.read().strip()
fish = fish.split(",")
fish = [int(i) for i in fish]
return fish
def get_num_fish(fish, days):
counts = Counter(fish)
while days > 0:
new_counts = defaultdict(lambda: 0)
for k, v in counts.items():
if k == 0:
new_counts[6] += v
new_counts[8] += v
else:
new_counts[k - 1] += v
counts = new_counts
days -= 1
num_fish = sum(counts.values())
return num_fish
if __name__ == "__main__":
fish = load_fish("../data/06_input.txt")
part_one = get_num_fish(fish, 80)
print(part_one)
part_two = get_num_fish(fish, 256)
print(part_two)
|
python
|
import chainer
import chainer.functions as F
import chainer.links as L
import inspect
import ast, gast
import itertools
from contextlib import ExitStack
from chainer_compiler.elichika.parser import config
from chainer_compiler.elichika.parser import nodes
from chainer_compiler.elichika.parser import values
from chainer_compiler.elichika.parser import functions
from chainer_compiler.elichika.parser import utils
from chainer_compiler.elichika.parser.graphs import Graph
from chainer_compiler.elichika.parser import veval_bin
from chainer_compiler.elichika.parser import veval_unary
from chainer_compiler.elichika.parser import veval_multiary
from chainer_compiler.elichika.parser import veval_aug_assign
import numpy as np
def get_ast_name_forcibly(ast):
if isinstance(ast, gast.gast.Name):
return ast.id
if isinstance(ast, gast.gast.Attribute):
return ast.attr
if isinstance(ast, str):
return ast
return ''
def return_value_or_obj(obj : 'value.Object'):
if isinstance(obj.get_value(), values.NumberValue):
return values.Object(obj.get_value())
if isinstance(obj.get_value(), values.StrValue):
return values.Object(obj.get_value())
if isinstance(obj.get_value(), values.BoolValue):
return values.Object(obj.get_value())
if isinstance(obj.get_value(), values.NoneValue):
return values.Object(obj.get_value())
if isinstance(obj.get_value(), values.TupleValue):
return values.Object(obj.get_value())
return obj
class AstContext:
def __init__(self, nast, lineno_offset : 'int', filename : 'str' = '' ):
self.nast = nast
self.lineno_offset = lineno_offset
self.lineno = self.lineno_offset
self.filename = filename
if hasattr(self.nast, 'lineno'):
self.lineno = self.nast.lineno + self.lineno_offset
def c(self, value) -> 'AstContext':
"""
get AstContext including value
"""
return AstContext(value, self.lineno_offset, filename=self.filename)
def veval_ast_attribute(astc : 'AstContext', local_field : 'values.Field', graph : 'Graph', context : 'functions.VEvalContext' = None) -> 'Attribute':
assert(isinstance(astc.nast, gast.gast.Attribute))
lineprop = utils.LineProperty(astc.lineno, astc.filename)
from_module = True
if context is not None and context._eval_as_written_target:
from_module = False
value = veval_ast(astc.c(astc.nast.value), local_field, graph, context)
value_ref = utils.try_get_obj(value, 'attribute', lineprop)
if(value_ref is None):
utils.print_warning('Unknown or disabled attribute "{}" is accessed'.format(get_ast_name_forcibly(astc.nast.value)), lineprop)
return None
attr = value_ref.get_field().get_attribute(astc.nast.attr, graph.root_graph, False)
# property(getter)
if attr.has_obj() and isinstance(attr.get_obj().get_value(), values.FuncValue) and attr.get_obj().get_value().func.is_property:
func_value = attr.get_obj().get_value()
ret = func_value.func.vcall(func_value.module, graph, func_value.obj, functions.FunctionArgInput(), context, lineprop)
return ret
if attr.has_obj():
return attr
# if attr is not found
gotten_obj = value_ref.try_get_and_store_obj(astc.nast.attr, graph.root_graph)
if gotten_obj is not None:
return value_ref.get_field().get_attribute(astc.nast.attr, graph.root_graph, from_module)
if context is not None and context._eval_as_written_target:
return attr
# value is unknown
if value is None:
utils.print_warning('Assigning value {} is not found'.format(get_ast_name_forcibly(astc.nast.value)), lineprop)
else:
utils.print_warning('Assigning value {} is not found'.format(get_ast_name_forcibly(astc.nast.attr)), lineprop)
return None
def veval_ast_assign(astc : 'AstContext', local_field : 'values.Field', graph : 'Graph', context : 'functions.VEvalContext' = None):
assert(isinstance(astc.nast, gast.gast.Assign))
lineprop = utils.LineProperty(astc.lineno, astc.filename)
value = veval_ast(astc.c(astc.nast.value), local_field, graph, context)
value_obj = utils.try_get_obj(value, 'assign', lineprop)
if value is None:
if config.show_warnings:
print('It is possible that assiging value is invalid in L.{}'.format(astc.lineno))
return None
with context.eval_as_written_target():
targets = veval_ast(astc.c(astc.nast.targets[0]), local_field, graph, context)
if isinstance(targets, list):
# ex. a,b = (1,2)
if not isinstance(value_obj.get_value(), values.TupleValue):
# TODO fix it
assert(False) # not supported
for i in range(len(targets)):
assert(value_obj.get_value().get_constant_value() is not None)
node_assign = nodes.NodeAssign(targets[i], value_obj.get_value().get_constant_value()[i], astc.lineno)
targets[i].revise(utils.try_get_obj(value_obj.get_value().get_constant_value()[i],'assign', lineprop))
graph.add_node(node_assign)
else:
assigned_obj = return_value_or_obj(value_obj)
node_assign = nodes.NodeAssign(targets, assigned_obj, astc.lineno)
targets.revise(assigned_obj)
graph.add_node(node_assign)
def veval_ast_name(astc : 'AstContext', local_field : 'values.Field', graph : 'Graph', context : 'functions.VEvalContext' = None) -> 'Attribute':
assert(isinstance(astc.nast, gast.gast.Name))
from_module = True
if context is not None and context._eval_as_written_target:
from_module = False
ret = local_field.get_attribute(astc.nast.id, graph.root_graph, from_module=from_module)
return ret
def veval_ast_call(astc : 'AstContext', local_field : 'values.Field', graph : 'Graph', context : 'functions.VEvalContext' = None) -> 'Attribute':
assert(isinstance(astc.nast, gast.gast.Call))
lineprop = utils.LineProperty(astc.lineno, astc.filename)
func = veval_ast(astc.c(astc.nast.func), local_field, graph, context)
if func == None or not func.has_obj():
utils.print_warning('Unknown function "{}" is called'.format(get_ast_name_forcibly(astc.nast.func)), lineprop)
return None
func_obj = utils.try_get_obj(func, 'call', lineprop)
func_value = utils.try_get_value(func, 'call', lineprop)
finput = functions.FunctionArgInput()
for arg in astc.nast.args:
arg_ = veval_ast(astc.c(arg), local_field, graph, context)
finput.inputs.append(utils.try_get_obj(arg_, 'call', lineprop))
for keyword in astc.nast.keywords:
arg_ = veval_ast(astc.c(keyword.value), local_field, graph, context)
finput.keywords[keyword.arg] = utils.try_get_obj(arg_, 'call', lineprop)
lineprop = utils.LineProperty(astc.lineno, astc.filename)
# check arguments
for o in finput.inputs:
if o is None:
utils.print_warning('Invalid arguments exists in "{}"'.format(get_ast_name_forcibly(astc.nast.func)), lineprop)
return None
ret = None
if isinstance(func_value, values.FuncValue):
ret = func_value.func.vcall(func_value.module, graph, func_value.obj, finput, context, lineprop)
return ret
elif isinstance(func_value, values.Instance):
# __call__
call_func_ref = func_obj.try_get_and_store_obj('__call__', graph.root_graph)
if call_func_ref is not None:
func_value = call_func_ref.get_value()
ret = func_value.func.vcall(func_value.module, graph, func_obj, finput, context, lineprop)
return ret
if config.show_warnings:
print('Unknown function is called in L.{}'.format(astc.lineno))
return None
def veval_ast_return(astc : 'AstContext', local_field : 'values.Field', graph : 'Graph', context : 'functions.VEvalContext' = None) -> 'None':
assert(isinstance(astc.nast, gast.gast.Return))
lineprop = utils.LineProperty(astc.lineno, astc.filename)
value = veval_ast(astc.c(astc.nast.value), local_field, graph, context)
value_obj = utils.try_get_obj(value, 'return', lineprop)
value_value = utils.try_get_value(value, 'return', lineprop)
if value_value is None:
if config.show_warnings:
print('Returned values are not found. in L.{}'.format(astc.lineno))
return None
node = nodes.NodeReturn(value_value,astc.lineno)
graph.add_node(node)
return value
def veval_ast_if(astc : 'AstContext', local_field : 'values.Field', graph : 'Graph', context : 'functions.VEvalContext' = None):
assert(isinstance(astc.nast, gast.gast.If))
lineprop = utils.LineProperty(astc.lineno, astc.filename)
# if condition
test = veval_ast(astc.c(astc.nast.test), local_field, graph, context)
test_value = utils.try_get_value(test, 'if', lineprop)
id_str = str(utils.get_guid())
if_id = 'if_' + id_str
true_id = 'true_' + id_str
false_id = 'false_' + id_str
# True
values.push_history(true_id)
true_graph = Graph()
true_graph.root_graph = graph.root_graph
true_graph.name = 'True'
# Skip true body when the test_value is decidably False.
if test_value.has_constant_value() and test_value.internal_value == False:
true_body = []
else:
true_body = veval_ast(astc.c(astc.nast.body), local_field, true_graph, context)
true_value_inputs = values.get_inputs()
true_value_outputs = values.get_outputs()
values.pop_history()
# False
values.push_history(false_id)
false_graph = Graph()
false_graph.root_graph = graph.root_graph
false_graph.name = 'False'
# Skip false body when the test_value is decidably True.
if test_value.has_constant_value() and test_value.internal_value == True:
false_body = []
else:
false_body = veval_ast(astc.c(astc.nast.orelse), local_field, false_graph, context)
false_value_inputs = values.get_inputs()
false_value_outputs = values.get_outputs()
values.pop_history()
# generate pairs
value_pairs = {}
for v in true_value_inputs:
key = str(v.field.id) + '_' + v.name
if not (key in value_pairs.keys()):
value_pairs[key] = {}
value_pairs[key]['field'] = v.field
value_pairs[key]['name'] = v.name
value_pairs[key]['true_input_value'] = v.input_value
value_pairs[key]['true_input_body_value'] = v.value
value_pairs[key]['true_input_obj'] = v.obj
for v in true_value_outputs:
key = str(v.field.id) + '_' + v.name
if not (key in value_pairs.keys()):
value_pairs[key] = {}
value_pairs[key]['field'] = v.field
value_pairs[key]['name'] = v.name
value_pairs[key]['true_output_body_value'] = v.value
value_pairs[key]['true_output_obj'] = v.obj
for v in false_value_inputs:
key = str(v.field.id) + '_' + v.name
if not (key in value_pairs.keys()):
value_pairs[key] = {}
value_pairs[key]['field'] = v.field
value_pairs[key]['name'] = v.name
value_pairs[key]['false_input_value'] = v.input_value
value_pairs[key]['false_input_body_value'] = v.value
value_pairs[key]['false_input_obj'] = v.obj
for v in false_value_outputs:
key = str(v.field.id) + '_' + v.name
if not (key in value_pairs.keys()):
value_pairs[key] = {}
value_pairs[key]['field'] = v.field
value_pairs[key]['name'] = v.name
value_pairs[key]['false_output_body_value'] = v.value
value_pairs[key]['false_output_obj'] = v.obj
inputs = []
outputs = []
def get_input_value(v) -> "values.Value":
if 'true_input_value' in v:
return v['true_input_value']
elif 'false_input_value' in v:
return v['false_input_value']
else:
return None
def get_body_input_value(v, input_value) -> "values.Value":
if v is None:
return (None, None)
true_input_body_value = None
false_input_body_value = None
if 'true_input_body_value' in v:
true_input_body_value = v['true_input_body_value']
else:
true_input_body_value = functions.generate_value_with_same_type(input_value)
if 'false_input_body_value' in v:
false_input_body_value = v['false_input_body_value']
else:
false_input_body_value = functions.generate_value_with_same_type(input_value)
return (true_input_body_value, false_input_body_value)
# collect inputs
input_2_body_inputs = {}
for k, v in value_pairs.items():
input_value = get_input_value(v)
if input_value is None:
continue
if not (input_value in input_2_body_inputs.keys()):
body_input_value = get_body_input_value(v, input_value)
input_2_body_inputs[input_value] = body_input_value
for k, v in input_2_body_inputs.items():
inputs.append(k)
true_graph.add_input_value(v[0])
false_graph.add_input_value(v[1])
for k, v in value_pairs.items():
name = v['name']
field = v['field']
input_value = get_input_value(v)
true_input_body_value = None
false_input_body_value = None
if input_value in input_2_body_inputs.keys():
true_input_body_value = input_2_body_inputs[input_value][0]
false_input_body_value = input_2_body_inputs[input_value][1]
true_output_body_value = None
false_output_body_value = None
output_value = None
# search output value
if 'true_output_body_value' in v:
true_output_body_value = v['true_output_body_value']
if 'false_output_body_value' in v:
false_output_body_value = v['false_output_body_value']
if true_output_body_value is not None or false_output_body_value is not None:
if true_output_body_value is None:
if true_input_body_value is not None:
# e.x. not changed
true_output_body_value = true_input_body_value
else:
# e.x. make a value in false statement
true_output_body_value = functions.generate_value_with_same_type(false_output_body_value, is_dummy_value=True)
if false_output_body_value is None:
if false_input_body_value is not None:
# e.x. not changed
false_output_body_value = false_input_body_value
else:
# e.x. make a value in true statement
false_output_body_value = functions.generate_value_with_same_type(true_output_body_value, is_dummy_value=True)
# check types between true and false
true_output_body_value_type = None
false_output_body_value_type = None
if true_output_body_value is not None and true_output_body_value.is_not_none_or_any_value():
true_output_body_value_type = true_output_body_value
if false_output_body_value is not None and false_output_body_value.is_not_none_or_any_value():
false_output_body_value_type = false_output_body_value
if true_output_body_value_type is not None and false_output_body_value_type is not None and type(true_output_body_value_type) != type(false_output_body_value_type):
utils.print_warning('Values with differenet type were generated {} between true ande false'.format(k), lineprop)
if true_output_body_value_type != None:
output_value = functions.generate_value_with_same_type(true_output_body_value_type)
elif false_output_body_value_type != None:
output_value = functions.generate_value_with_same_type(false_output_body_value_type)
elif true_output_body_value is not None:
output_value = functions.generate_value_with_same_type(true_output_body_value)
elif false_output_body_value is not None:
output_value = functions.generate_value_with_same_type(false_output_body_value)
if output_value is not None:
outputs.append(output_value)
true_graph.add_output_value(true_output_body_value)
false_graph.add_output_value(false_output_body_value)
if 'true_output_obj' in v and not 'false_output_obj' in v:
obj = v['true_output_obj']
elif not 'true_output_obj' in v and 'false_output_obj' in v:
obj = v['false_output_obj']
elif 'true_output_obj' in v and 'false_output_obj' in v:
obj = None
else:
assert(False)
if obj is not None:
obj.revise(output_value)
field.get_attribute(name).revise(obj)
elif field.get_attribute(name).has_obj():
field.get_attribute(name).get_obj().revise(output_value)
else:
field.get_attribute(name).revise(values.Object(output_value))
node = nodes.NodeIf(test_value, inputs, true_graph, false_graph, astc.lineno)
node.set_outputs(outputs)
graph.add_node(node)
return None
def veval_ast_aug_assign(astc : 'AstContext', local_field : 'values.Field', graph : 'Graph', context : 'functions.VEvalContext' = None):
assert(isinstance(astc.nast, gast.gast.AugAssign))
lineprop = utils.LineProperty(astc.lineno, astc.filename)
target = veval_ast(astc.c(astc.nast.target), local_field, graph, context)
value = veval_ast(astc.c(astc.nast.value), local_field, graph, context)
target_value = utils.try_get_value(target, 'aug_assign', lineprop)
value_value = utils.try_get_value(value, 'aug_assign', lineprop)
binop = nodes.BinOpType.Unknown
if isinstance(astc.nast.op, gast.Add):
binop = nodes.BinOpType.Add
elif isinstance(astc.nast.op, gast.Sub):
binop = nodes.BinOpType.Sub
elif isinstance(astc.nast.op, gast.Mult):
binop = nodes.BinOpType.Mul
elif isinstance(astc.nast.op, gast.Div):
binop = nodes.BinOpType.Div
elif isinstance(astc.nast.op, gast.FloorDiv):
binop = nodes.BinOpType.FloorDiv
else:
utils.print_warning('Unknown binary operator {}'.format(astc.nast.op), lineprop)
return None
node_aug_assign = nodes.NodeAugAssign(target_value, value_value, binop, astc.lineno)
graph.add_node(node_aug_assign)
new_value = veval_aug_assign.veval(binop, target_value, value_value, lineprop)
node_aug_assign.set_outputs([new_value])
utils.try_get_obj(target, 'aug_assign', lineprop).revise(new_value)
def veval_ast_expr(astc : 'AstContext', local_field : 'values.Field', graph : 'Graph', context : 'functions.VEvalContext' = None):
'''
call a function without not assigning
Ex. b.x()
'''
assert(isinstance(astc.nast, gast.gast.Expr))
return veval_ast(astc.c(astc.nast.value), local_field, graph, context)
def veval_ast_subscript(astc : 'AstContext', local_field : 'values.Field', graph : 'Graph', context : 'functions.VEvalContext' = None):
'''
Ex. x[1], x[y,z]
'''
assert(isinstance(astc.nast, gast.gast.Subscript))
lineprop = utils.LineProperty(astc.lineno, astc.filename)
def veval_with_default(nast, default_value):
if nast is None:
ret = values.NumberValue(default_value)
ret.name = '@SliceDefault'
return ret
obj = veval_ast(astc.c(nast), local_field, graph, context)
return utils.try_get_value(obj, 'subscript', lineprop)
def get_slice_indices(slice):
if slice.lower is None and slice.upper is None and slice.step is None:
return []
indices = [veval_with_default(slice.lower, 0),
veval_with_default(slice.upper, utils.slice_int_max)]
if slice.step is not None:
indices.append(veval_with_default(slice.step, 1))
return indices
value = veval_ast(astc.c(astc.nast.value), local_field, graph, context)
value_value = utils.try_get_value(value, 'subscript', lineprop)
if isinstance(value_value, values.DictValue):
if isinstance(astc.nast.slice, gast.gast.Index):
slice_ = veval_ast(astc.c(astc.nast.slice.value), local_field, graph, context)
slice_value = utils.try_get_value(slice_, 'subscript', lineprop)
value_value.internal_keys[slice_value.encode()] = slice_
ret = value_value.internal_values.get_attribute(slice_value.encode())
return ret
elif isinstance(value_value, values.Instance):
if isinstance(astc.nast.slice, gast.gast.Index):
slice_ = veval_ast(astc.c(astc.nast.slice.value), local_field, graph, context)
finput = functions.FunctionArgInput()
finput.inputs.append(slice_)
value_ref = utils.try_get_obj(value, 'subscript', lineprop)
getitem_func = value_ref.get_field().get_attribute('__getitem__', graph.root_graph, False)
getitem_func_value = getitem_func.get_obj().get_value()
ret = getitem_func_value.func.vcall(getitem_func_value.module, graph, getitem_func_value.obj, finput, context, lineprop)
return ret
elif isinstance(value_value, (values.ListValue, values.TupleValue, values.TensorValue)):
if isinstance(astc.nast.slice, gast.gast.Index):
slice_ = veval_ast(astc.c(astc.nast.slice.value), local_field, graph, context)
slice_value = utils.try_get_value(slice_, 'subscript', lineprop)
if isinstance(slice_value, values.TupleValue):
# ex. x[1,2]
if slice_value.has_constant_value():
values_ = [utils.try_get_value(x, 'subscript', lineprop) for x in slice_value.get_constant_value()]
node = nodes.NodeGetItem(value_value, values_, line=lineprop)
else:
if config.show_warnings:
print('This subscript is not supported. in L.{}'.format(astc.lineno))
node = nodes.NodeInvalid(line=lineprop)
else:
# ex. x[1]
node = nodes.NodeGetItem(value_value, [slice_value])
if isinstance(value_value, values.TensorValue):
ret_value = values.TensorValue()
else:
if value_value.vtype != None and issubclass(value_value.vtype, values.Instance):
assert value_value.has_constant_value()
assert slice_value.has_constant_value()
return value_value.internal_value[slice_value.internal_value]
elif value_value.vtype != None:
ret_value = value_value.vtype(None)
ret_value.dtype = value_value.dtype
else:
utils.print_warning("Unable to determine element type of {}. Using TensorValue as default.".format(value_value), lineprop)
ret_value = values.TensorValue()
node.set_outputs([ret_value])
graph.add_node(node)
if isinstance(value, values.Attribute):
ret_attr = value.make_subscript_attribute(slice_, graph)
ret_attr.revise(values.Object(ret_value), update_parent=False)
return ret_attr
else:
return values.Object(ret_value)
elif isinstance(astc.nast.slice, gast.gast.Slice):
indices = get_slice_indices(astc.nast.slice)
node = nodes.NodeSlice(value_value, indices, [len(indices)])
ret_value = functions.generate_value_with_same_type(value_value)
# for constant propagation, populate ret_value when possible
if value_value.has_constant_value():
if all([value.has_constant_value() for value in indices]):
start, end = (indice.internal_value for indice in indices[:2])
step = indices[2].internal_value if len(indices) == 3 else None
ret_value.internal_value = value_value.internal_value[start:end:step]
node.set_outputs([ret_value])
graph.add_node(node)
return values.Object(ret_value)
elif isinstance(astc.nast.slice, gast.gast.ExtSlice):
indices = []
slice_specs = []
for dim in astc.nast.slice.dims:
if isinstance(dim, gast.gast.Index):
indices.append(utils.try_get_value(veval_ast(astc.c(dim.value), local_field, graph, context), 'subscript', lineprop))
slice_specs.append(1)
elif isinstance(dim, gast.gast.Slice):
ni = get_slice_indices(dim)
indices.extend(ni)
slice_specs.append(len(ni))
else:
assert False, 'Unknown slice: %s in %s' % (dim, nast.slice)
node = nodes.NodeSlice(value_value, indices, slice_specs)
ret_value = functions.generate_value_with_same_type(value_value)
node.set_outputs([ret_value])
graph.add_node(node)
return values.Object(ret_value)
else:
utils.print_warning("Subscript not possible for type {}".format(type(value_value)))
return None
def veval_ast_listcomp(astc : 'AstContext', local_field : 'values.Field', graph : 'Graph', context : 'functions.VEvalContext' = None):
'''
Ex. [x for x in xx]
[elt for target in iter]
'''
assert(isinstance(astc.nast, gast.gast.ListComp))
lineprop = utils.LineProperty(astc.lineno, astc.filename)
listcomp_guid = str(utils.get_guid())
listcomp_id = 'listcomp_' + listcomp_guid
body_id = 'listcomp_body_' + listcomp_guid
internal_counter_id = '@internal/listcomp_counter_' + listcomp_guid
internal_list_id = '@internal/listcomp_list_' + listcomp_guid
internal_cond_id = '@internal/listcomp_cond_' + listcomp_guid
generator = astc.nast.generators[0]
iter_value = utils.try_get_value(veval_ast(astc.c(generator.iter), local_field, graph, context), 'generator', lineprop)
list_value = values.ListValue()
list_obj = values.Object(list_value)
node_generate_list = nodes.NodeGenerate('List', [], lineprop)
node_generate_list.set_outputs([list_value])
graph.add_node(node_generate_list)
# body
target_name = ''
if isinstance(generator.target, gast.gast.Name):
target_name = generator.target.id
else:
if config.show_warnings:
print('This for is not supported. in L.{}'.format(astc.lineno))
return None
counter_value = values.NumberValue(None)
counter_value.dtype = np.array(0).dtype
counter_value.name = internal_counter_id
cond_value = values.BoolValue(None)
cond_value.name = internal_cond_id
# set values with internal name
local_field.get_attribute(internal_list_id).revise(list_obj)
values.push_history(listcomp_id)
body_graph = Graph()
body_graph.root_graph = graph.root_graph
body_graph.name = 'Body_' + listcomp_guid
node_forgen = nodes.NodeForGenerator(counter_value, iter_value)
target_obj = iter_value.get_iterator()
if target_obj is None:
target_obj = values.Object(values.UnknownValue())
if config.show_warnings:
print('unknown iteratable type in L.{}'.format(lineprop))
target_value = target_obj.get_value()
node_forgen.set_outputs([target_obj.get_value()])
local_field.get_attribute(target_name).revise(target_obj)
body_graph.add_node(node_forgen)
elt = veval_ast(astc.c(astc.nast.elt), local_field, body_graph, context)
elt_obj = utils.try_get_obj(elt, 'listcomp', lineprop)
finput = functions.FunctionArgInput()
finput.inputs.append(elt_obj)
append_value = local_field.get_attribute(internal_list_id).get_obj().get_field().get_attribute('append').get_obj().get_value()
append_value.func.vcall(None, body_graph, local_field.get_attribute(internal_list_id).get_obj(), finput, context, lineprop)
value_inputs = values.get_inputs()
value_outputs = values.get_outputs()
values.pop_history()
inputs = []
outputs = []
# default input for subgraph's input
body_graph.add_input_value(counter_value)
body_graph.add_input_value(cond_value)
body_graph.add_input_value(iter_value)
# default output for subgraph's output
body_graph.add_output_value(cond_value)
body_graph.add_output_value(iter_value)
# default output
outputs.append(functions.generate_value_with_same_type(iter_value))
# generate pairs
value_pairs = {}
for v in value_inputs:
key = str(v.field.id) + '_' + v.name
if not (key in value_pairs.keys()):
value_pairs[key] = {}
value_pairs[key]['field'] = v.field
value_pairs[key]['name'] = v.name
value_pairs[key]['input_value'] = v.input_value
value_pairs[key]['input_body_value'] = v.value
for v in value_outputs:
key = str(v.field.id) + '_' + v.name
if not (key in value_pairs.keys()):
value_pairs[key] = {}
value_pairs[key]['field'] = v.field
value_pairs[key]['name'] = v.name
value_pairs[key]['output_body_value'] = v.value
value_pairs[key]['output_obj'] = v.obj
# remove iterator
removed_name = str(local_field.id) + '_' + target_value.name
del value_pairs[removed_name]
for k, v in value_pairs.items():
name = v['name']
field = v['field']
if 'input_body_value' in v:
inputs.append(v['input_value'])
body_graph.add_input_value(v['input_body_value'])
else:
temp_value1 = functions.generate_value_with_same_type(v['output_body_value'])
temp_value2 = functions.generate_value_with_same_type(v['output_body_value'])
inputs.append(temp_value1)
body_graph.add_input_value(temp_value2)
if 'output_body_value' in v:
body_graph.add_output_value(v['output_body_value'])
output_value = functions.generate_value_with_same_type(v['output_body_value'])
outputs.append(output_value)
if 'output_obj' in v:
obj = v['output_obj']
obj.revise(output_value)
field.get_attribute(name).revise(obj)
elif field.get_attribute(name).has_obj():
field.get_attribute(name).get_obj().revise(output_value)
else:
field.get_attribute(name).revise(values.Object(output_value))
else:
temp_value1 = v['input_body_value']
temp_value2 = functions.generate_value_with_same_type(v['input_body_value'])
body_graph.add_output_value(temp_value1)
outputs.append(temp_value2)
node = nodes.NodeListcomp(iter_value, inputs, body_graph, astc.lineno)
node.set_outputs(outputs)
graph.add_node(node)
return local_field.get_attribute(internal_list_id).get_obj()
def veval_ast_bin_op(astc : 'AstContext', local_field : 'values.Field', graph : 'Graph', context : 'functions.VEvalContext' = None):
"""
eval binary operation.
Ex. a + b, b // c, etc
"""
assert(isinstance(astc.nast, gast.gast.BinOp))
lineprop = utils.LineProperty(astc.lineno, astc.filename)
left = veval_ast(astc.c(astc.nast.left), local_field, graph, context)
right = veval_ast(astc.c(astc.nast.right), local_field, graph, context)
left_value = utils.try_get_value(left, 'compare', lineprop)
right_value = utils.try_get_value(right, 'compare', lineprop)
binop = nodes.BinOpType.Unknown
if isinstance(astc.nast.op, gast.Add):
binop = nodes.BinOpType.Add
elif isinstance(astc.nast.op, gast.Sub):
binop = nodes.BinOpType.Sub
elif isinstance(astc.nast.op, gast.Mult):
binop = nodes.BinOpType.Mul
elif isinstance(astc.nast.op, gast.Div):
binop = nodes.BinOpType.Div
elif isinstance(astc.nast.op, gast.FloorDiv):
binop = nodes.BinOpType.FloorDiv
elif isinstance(astc.nast.op, gast.Mod):
binop = nodes.BinOpType.Mod
else:
utils.print_warning('Unknown binary operator {}'.format(astc.nast.op), lineprop)
return None
node_bin_op = nodes.NodeBinOp(left_value, right_value, binop, astc.lineno)
ret_value = veval_bin.veval(binop, left_value, right_value, lineprop)
node_bin_op.set_outputs([ret_value])
graph.add_node(node_bin_op)
return values.Object(ret_value)
def veval_ast_bool_op(astc : 'AstContext', local_field : 'values.Field', graph : 'graphs.Graph', context : 'functions.VEvalContext' = None):
"""
eval bool operations.
Ex. x and y
"""
assert(isinstance(astc.nast, gast.gast.BoolOp))
lineprop = utils.LineProperty(astc.lineno, astc.filename)
multiaryop = nodes.MultiaryOpType.Unknown
if isinstance(astc.nast.op, gast.And):
multiaryop = nodes.MultiaryOpType.And
if isinstance(astc.nast.op, gast.Or):
multiaryop = nodes.MultiaryOpType.Or
values_list = [veval_ast(astc.c(value_), local_field, graph, context) for value_ in astc.nast.values]
values_list_value = [utils.try_get_value(value_, 'multiary', lineprop) for value_ in values_list]
node = nodes.NodeMultiaryOp(values_list_value, multiaryop)
ret_value = veval_multiary.veval(multiaryop, values_list_value)
node.set_outputs([ret_value])
graph.add_node(node)
return values.Object(ret_value)
def veval_ast_unary_op(astc : 'AstContext', local_field : 'values.Field', graph : 'graphs.Graph', context : 'functions.VEvalContext' = None):
"""
eval unary operation.
Ex. -xx
"""
assert(isinstance(astc.nast, gast.gast.UnaryOp))
lineprop = utils.LineProperty(astc.lineno, astc.filename)
unaryop = nodes.UnaryOpType.Unknown
if isinstance(astc.nast.op, gast.UAdd):
unaryop = nodes.UnaryOpType.UAdd
if isinstance(astc.nast.op, gast.USub):
unaryop = nodes.UnaryOpType.USub
if isinstance(astc.nast.op, gast.Not):
unaryop = nodes.UnaryOpType.Not
operand = veval_ast(astc.c(astc.nast.operand), local_field, graph, context)
operand_value = utils.try_get_value(operand, 'unary', lineprop)
node = nodes.NodeUnaryOp(operand_value, unaryop)
ret_value = veval_unary.veval(unaryop, operand_value)
node.set_outputs([ret_value])
graph.add_node(node)
return values.Object(ret_value)
def veval_ast_compare(astc : 'AstContext', local_field : 'values.Field', graph : 'Graph', context : 'functions.VEvalContext' = None):
"""
eval Compare.
Ex. a >= b, a != b, a is b, etc
"""
assert(isinstance(astc.nast, gast.gast.Compare))
lineprop = utils.LineProperty(astc.lineno, astc.filename)
left = veval_ast(astc.c(astc.nast.left), local_field, graph, context)
right = veval_ast(astc.c(astc.nast.comparators[0]), local_field, graph, context)
left_value = utils.try_get_value(left, 'compare', lineprop)
right_value = utils.try_get_value(right, 'compare', lineprop)
compare = nodes.CompareType.unknown
if isinstance(astc.nast.ops[0], gast.Eq):
compare = nodes.CompareType.Eq
if isinstance(astc.nast.ops[0], gast.NotEq):
compare = nodes.CompareType.NotEq
if isinstance(astc.nast.ops[0], gast.Is):
compare = nodes.CompareType.Is
if isinstance(astc.nast.ops[0], gast.IsNot):
compare = nodes.CompareType.IsNot
if isinstance(astc.nast.ops[0], gast.Gt):
compare = nodes.CompareType.Gt
if isinstance(astc.nast.ops[0], gast.GtE):
compare = nodes.CompareType.GtE
if isinstance(astc.nast.ops[0], gast.Lt):
compare = nodes.CompareType.Lt
if isinstance(astc.nast.ops[0], gast.LtE):
compare = nodes.CompareType.LtE
if isinstance(astc.nast.ops[0], gast.In):
compare = nodes.CompareType.In
if isinstance(astc.nast.ops[0], gast.NotIn):
compare = nodes.CompareType.NotIn
node_compare = nodes.NodeCompare(left_value, right_value, compare, astc.lineno)
# constant propagation when possible
default_value = None
if left_value.has_constant_value() and right_value.has_constant_value():
if isinstance(astc.nast.ops[0], gast.Eq):
default_value = left_value.internal_value == right_value.internal_value
if isinstance(astc.nast.ops[0], gast.NotEq):
default_value = left_value.internal_value != right_value.internal_value
if isinstance(astc.nast.ops[0], gast.Is):
default_value = left_value.internal_value is right_value.internal_value
if isinstance(astc.nast.ops[0], gast.IsNot):
default_value = left_value.internal_value is not right_value.internal_value
if isinstance(astc.nast.ops[0], gast.Gt):
default_value = left_value.internal_value > right_value.internal_value
if isinstance(astc.nast.ops[0], gast.GtE):
default_value = left_value.internal_value >= right_value.internal_value
if isinstance(astc.nast.ops[0], gast.Lt):
default_value = left_value.internal_value < right_value.internal_value
if isinstance(astc.nast.ops[0], gast.LtE):
default_value = left_value.internal_value <= right_value.internal_value
if isinstance(astc.nast.ops[0], gast.In):
default_value = left_value.internal_value in map(lambda ref: ref.get_value().internal_value, right_value.internal_value)
if isinstance(astc.nast.ops[0], gast.NotIn):
default_value = left_value.internal_value not in map(lambda ref: ref.get_value().internal_value, right_value.internal_value)
ret_value = values.BoolValue(default_value)
ret_value.name = '@{}'.format(lineprop)
node_compare.set_outputs([ret_value])
graph.add_node(node_compare)
return values.Object(ret_value)
def veval_ast_num(astc : 'AstContext', local_field : 'values.Field', graph : 'Graph', context : 'functions.VEvalContext' = None):
'''
Ex. 1, 2, ...
'''
assert(isinstance(astc.nast, gast.gast.Num))
lineprop = utils.LineProperty(astc.lineno, astc.filename)
value = values.NumberValue(astc.nast.n)
ret = values.Object(value)
name = values.create_ref_value_name_with_constant(ret)
ret.name = name
ret.get_value().name = name
return ret
def veval_ast_str(astc : 'AstContext', local_field : 'values.Field', graph : 'Graph', context : 'functions.VEvalContext' = None):
'''
Ex. "str"
'''
assert(isinstance(astc.nast, gast.gast.Str))
lineprop = utils.LineProperty(astc.lineno, astc.filename)
value = values.StrValue(astc.nast.s)
ret = values.Object(value)
name = values.create_ref_value_name_with_constant(ret)
ret.name = name
ret.get_value().name = name
return ret
def veval_ast_name_constant(astc : 'AstContext', local_field : 'values.Field', graph : 'Graph', context : 'functions.VEvalContext' = None):
'''
Ex. True
'''
assert(isinstance(astc.nast, gast.gast.NameConstant))
lineprop = utils.LineProperty(astc.lineno, astc.filename)
ret = None
if astc.nast.value == True:
ret = values.Object(values.BoolValue(True))
if astc.nast.value == False:
ret = values.Object(values.BoolValue(False))
if astc.nast.value is None:
ret = values.Object(values.NoneValue())
name = values.create_ref_value_name_with_constant(ret)
ret.name = name
ret.get_value().name = name
return ret
def veval_ast_tuple(astc : 'AstContext', local_field : 'values.Field', graph : 'Graph', context : 'functions.VEvalContext' = None):
assert(isinstance(astc.nast, gast.gast.Tuple))
lineprop = utils.LineProperty(astc.lineno, astc.filename)
if context is not None and context._eval_as_written_target:
vs = []
for v in astc.nast.elts:
a_ = veval_ast(astc.c(v), local_field, graph, context=context)
vs.append(a_)
return vs
else:
vs_ref = []
vs = []
for v in astc.nast.elts:
a_ = veval_ast(astc.c(v), local_field, graph, context=context)
v_ = utils.try_get_obj(a_, 'tuple', lineprop)
if v_ is None:
utils.print_warning('Unknown tuple element {}'.format(v), lineprop)
return None
vs_ref.append(v_)
vs.append(v_.get_value())
v_.in_container = True
tuple_value = values.TupleValue(vs_ref)
node = nodes.NodeGenerate('Tuple', vs, line=lineprop)
node.set_outputs([tuple_value])
graph.add_node(node)
return values.Object(tuple_value)
def veval_ast_list(astc : 'AstContext', local_field : 'values.Field', graph : 'Graph', context : 'functions.VEvalContext' = None):
assert(isinstance(astc.nast, gast.gast.List))
'''
Ex. [],[x,y,z]
TODO : Initializer
'''
lineprop = utils.LineProperty(astc.lineno, astc.filename)
elts = []
for elt in astc.nast.elts:
elt_ = veval_ast(astc.c(elt), local_field, graph, context)
elt_obj = utils.try_get_obj(elt_,'list', lineprop)
elts.append(elt_obj)
node = nodes.NodeGenerate('List', [elt.get_value() for elt in elts], lineprop)
graph.add_node(node)
value = values.ListValue(elts)
node.set_outputs([value])
return values.Object(value)
def veval_ast_dict(astc : 'AstContext', local_field : 'values.Field', graph : 'Graph', context : 'functions.VEvalContext' = None):
assert(isinstance(astc.nast, gast.gast.Dict))
lineprop = utils.LineProperty(astc.lineno, astc.filename)
keys = []
elts = []
for key, elt in zip(astc.nast.keys, astc.nast.values):
key_ = veval_ast(astc.c(key), local_field, graph, context)
elt_ = veval_ast(astc.c(elt), local_field, graph, context)
key_obj = utils.try_get_obj(key_, 'dict', lineprop)
elt_obj = utils.try_get_obj(elt_,'dict', lineprop)
keys.append(key_obj)
elts.append(return_value_or_obj(elt_obj))
value = values.DictValue(keys, elts)
return values.Object(value)
def veval_ast_for_unroll(astc : 'AstContext', target_name, iter_ : 'values.ListValue', local_field : 'values.Field', graph : 'Graph', context : 'functions.VEvalContext' = None):
'''
for target in iter:
...
with unroll
'''
assert(isinstance(astc.nast, gast.gast.For))
lineprop = utils.LineProperty(astc.lineno, astc.filename)
for element in iter_.get_constant_value():
local_field.get_attribute(target_name).revise(element)
veval_ast(astc.c(astc.nast.body), local_field, graph, context)
return None
def veval_ast_for(astc : 'AstContext', local_field : 'values.Field', graph : 'Graph', context : 'functions.VEvalContext' = None):
'''
for target in iter:
...
'''
assert(isinstance(astc.nast, gast.gast.For))
lineprop = utils.LineProperty(astc.lineno, astc.filename)
# for target in iter:
iter_ = veval_ast(astc.c(astc.nast.iter), local_field, graph, context)
input_iter_value = utils.try_get_value(iter_, 'for', lineprop)
body_iter_value = functions.generate_value_with_same_type(input_iter_value, suffix_type=functions.SuffixType.Input)
# get target name
target_name = ''
if isinstance(astc.nast.target, gast.gast.Name):
target_name = astc.nast.target.id
else:
if config.show_warnings:
print('This for is not supported. in L.{}'.format(astc.lineno))
return None
# unroll?
if isinstance(input_iter_value, values.ListValue) and input_iter_value.has_constant_value() and input_iter_value.dtype is None:
return veval_ast_for_unroll(astc, target_name, input_iter_value, local_field, graph, context)
for_guid = utils.get_guid()
for_id = 'for_' + str(for_guid)
body_id = 'body_' + str(for_guid)
values.push_history(for_id)
# body
body_graph = Graph()
body_graph.root_graph = graph.root_graph
body_graph.name = 'Body_' + str(for_guid)
# generate a node for input
node_input = nodes.NodeInput('input')
body_graph.add_node(node_input)
body_counter_value = values.NumberValue(None)
body_counter_value.dtype = np.array(0).dtype
body_counter_value.name = 'for_counter_' + str(for_guid)
body_cond_value = values.BoolValue(None)
body_cond_value.name = 'for_cond_' + str(for_guid)
# create a node to lookup a value from sequence
node_forgen = nodes.NodeForGenerator(body_counter_value, body_iter_value)
# generate iterator
target_obj = input_iter_value.get_iterator()
if target_obj is None:
target_obj = values.Object(values.UnknownValue())
if config.show_warnings:
print('unknown iteratable type in L.{}'.format(astc.lineno))
target_value = target_obj.get_value()
node_forgen.set_outputs([target_obj.get_value()])
target_attribute = local_field.get_attribute(target_name)
target_attribute.revise(target_obj)
body_graph.add_node(node_forgen)
# veval body
body = veval_ast(astc.c(astc.nast.body), local_field, body_graph, context)
value_inputs = values.get_inputs()
value_outputs = values.get_outputs()
break_attribute = local_field.get_attribute('#keepgoing')
if break_attribute.has_obj():
break_attribute_ref = break_attribute.get_obj()
break_attribute_value = break_attribute_ref.get_value()
else:
break_attribute_value = body_cond_value
values.pop_history()
inputs = []
outputs = []
node_input_outputs = []
# default input for subgraph's input
body_graph.add_input_value(body_counter_value)
body_graph.add_input_value(body_cond_value)
body_graph.add_input_value(body_iter_value)
# default output for subgraph's output
body_graph.add_output_value(break_attribute_value)
body_graph.add_output_value(body_iter_value)
# default output
outputs.append(functions.generate_value_with_same_type(input_iter_value))
# generate pairs
value_pairs = {}
for v in value_inputs:
key = str(v.field.id) + '_' + v.name
if not (key in value_pairs.keys()):
value_pairs[key] = {}
value_pairs[key]['field'] = v.field
value_pairs[key]['name'] = v.name
value_pairs[key]['input_value'] = v.input_value
value_pairs[key]['input_body_value'] = v.value
for v in value_outputs:
key = str(v.field.id) + '_' + v.name
if not (key in value_pairs.keys()):
value_pairs[key] = {}
value_pairs[key]['field'] = v.field
value_pairs[key]['name'] = v.name
value_pairs[key]['output_body_value'] = v.value
value_pairs[key]['output_obj'] = v.obj
for k, v in value_pairs.items():
name = v['name']
field = v['field']
if 'input_body_value' in v:
inputs.append(v['input_value'])
body_graph.add_input_value(v['input_body_value'])
else:
temp_value1 = functions.generate_value_with_same_type(v['output_body_value'], is_dummy_value=True, suffix_type=functions.SuffixType.Dummy)
temp_value2 = functions.generate_value_with_same_type(v['output_body_value'], suffix_type=functions.SuffixType.Dummy)
inputs.append(temp_value1)
body_graph.add_input_value(temp_value2)
node_input_outputs.append(temp_value2)
if 'output_body_value' in v:
body_graph.add_output_value(v['output_body_value'])
output_value = functions.generate_value_with_same_type(v['output_body_value'])
outputs.append(output_value)
if 'output_obj' in v:
obj = v['output_obj']
obj.revise(output_value)
field.get_attribute(name).revise(obj)
elif field.get_attribute(name).has_obj():
field.get_attribute(name).get_obj().revise(output_value)
else:
field.get_attribute(name).revise(values.Object(output_value))
else:
temp_value1 = v['input_body_value']
temp_value2 = functions.generate_value_with_same_type(v['input_body_value'])
body_graph.add_output_value(temp_value1)
outputs.append(temp_value2)
node = nodes.NodeFor(input_iter_value, inputs, body_graph, body_cond_value, astc.lineno)
node.set_outputs(outputs)
node_input.set_outputs(node_input_outputs)
graph.add_node(node)
return None
def veval_ast_continue(astc : 'AstContext', local_field : 'values.Field', graph : 'Graph', context : 'functions.VEvalContext' = None):
assert(isinstance(astc.nast, gast.gast.Continue))
return None
def veval_ast_break(astc : 'AstContext', local_field : 'values.Field', graph : 'Graph', context : 'functions.VEvalContext' = None):
assert(isinstance(astc.nast, gast.gast.Break))
return None
def veval_ast_with(astc : 'AstContext', local_field : 'values.Field', graph : 'Graph', context : 'functions.VEvalContext' = None):
assert(isinstance(astc.nast, gast.gast.With))
lineprop = utils.LineProperty(astc.lineno, astc.filename)
from_module = True
if context is not None and context._eval_as_written_target:
from_module = False
context.flags_cache.clear()
exit_attrs = []
for item in astc.nast.items:
item_ref = veval_ast(astc.c(item), local_field, graph, context)
exit_attr = item_ref.get_field().get_attribute('__exit__', graph.root_graph, from_module)
exit_attrs.append(exit_attr)
with ExitStack() as stack:
managers = [stack.enter_context(getattr(context, flag)(*args)) for flag, args in context.flags_cache]
if not context._ignore_branch:
veval_ast(astc.c(astc.nast.body), local_field, graph, context)
for attr in exit_attrs:
if attr.has_obj() and isinstance(attr.get_obj().get_value(), values.FuncValue):
func_value = attr.get_obj().get_value()
finput = functions.FunctionArgInput()
# Adding exception_type, exception_value & traceback dummy arguments (None)
finput.inputs.extend([values.Object(values.NoneValue())] * 3)
func_value.func.vcall(func_value.module, graph, func_value.obj, finput, context, lineprop)
def veval_ast_withitem(astc : 'AstContext', local_field : 'values.Field', graph : 'Graph', context : 'functions.VEvalContext' = None):
assert(isinstance(astc.nast, gast.gast.withitem))
lineprop = utils.LineProperty(astc.lineno, astc.filename)
from_module = True
if context is not None and context._eval_as_written_target:
from_module = False
value = veval_ast(astc.c(astc.nast.context_expr), local_field, graph, context)
value_obj = utils.try_get_obj(value, 'withitem', lineprop)
enter_attr = value_obj.get_field().get_attribute('__enter__', graph.root_graph, from_module)
if enter_attr.has_obj() and isinstance(enter_attr.get_obj().get_value(), values.FuncValue):
func_value = enter_attr.get_obj().get_value()
value_obj = func_value.func.vcall(func_value.module, graph, func_value.obj, functions.FunctionArgInput(), context, lineprop)
value_obj = utils.try_get_obj(value_obj, 'withitem', lineprop)
if value is None:
if config.show_warnings:
print('It is possible that one of those withitem is invalid in L.{}'.format(astc.lineno))
return None
value_obj = return_value_or_obj(value_obj)
if astc.nast.optional_vars is not None:
with context.eval_as_written_target():
optional_vars = veval_ast(astc.c(astc.nast.optional_vars), local_field, graph, context)
node_assign = nodes.NodeAssign(optional_vars, value_obj, astc.lineno)
optional_vars.revise(value_obj)
graph.add_node(node_assign)
return value_obj
def veval_ast_lambda(astc : 'AstContext', local_field : 'values.Field', graph : 'Graph', context : 'functions.VEvalContext' = None):
'''
lambda x, y, z=2: ...
Note: kwonly_args are not supported
'''
assert(isinstance(astc.nast, gast.gast.Lambda))
lineprop = utils.LineProperty(astc.lineno, astc.filename)
lambda_id = 'lambda_' + str(utils.get_guid())
values.push_history(lambda_id)
args = veval_ast(astc.c(astc.nast.args), local_field, graph, context)
func = functions.UserDefinedFunctionFromAst(astc, args, local_field)
values.pop_history()
return values.Object(values.FuncValue(func, None))
def veval_ast_arguments(astc : 'AstContext', local_field : 'values.Field', graph : 'Graph', context : 'functions.VEvalContext' = None):
assert(isinstance(astc.nast, gast.gast.arguments))
lineprop = utils.LineProperty(astc.lineno, astc.filename)
ret = functions.FunctionArgCollection()
argspec = inspect.FullArgSpec(astc.nast.args, astc.nast.vararg, astc.nast.kwarg,
astc.nast.defaults, astc.nast.kwonlyargs, astc.nast.kw_defaults, None)
assert not argspec.kwonlyargs, "Keyword only args are not supported"
assert not argspec.varargs, "Varaibale arguments *args is not supported"
assert not argspec.varkw, "Variable keywords **kwargs is not supported"
defaults = [veval_ast(astc.c(default), local_field, graph, context) for default in argspec.defaults]
arg_list = []
for k, v in itertools.zip_longest(reversed(argspec.args), defaults):
arg_list.append((k.id, v))
# reverse the list
for k, v in reversed(arg_list):
ret.add_arg(k, v)
return ret
def veval_ast(astc : 'AstContext', local_field : 'values.Field', graph : 'Graph', context : 'functions.VEvalContext' = None):
if context is None:
context = functions.VEvalContext()
if isinstance(astc.nast, list):
ret = None
for nast_ in astc.nast:
ret = veval_ast(AstContext(nast_, astc.lineno_offset, filename=astc.filename), local_field, graph, context)
if ret is not None:
break
return ret
elif isinstance(astc.nast, gast.gast.Assign):
veval_ast_assign(astc, local_field, graph, context)
return None
elif isinstance(astc.nast, gast.gast.Attribute):
ret = veval_ast_attribute(astc, local_field, graph, context)
return ret
elif isinstance(astc.nast, gast.gast.Call):
ret = veval_ast_call(astc, local_field, graph, context)
return ret
elif isinstance(astc.nast, gast.gast.BinOp):
ret = veval_ast_bin_op(astc, local_field, graph, context)
return ret
elif isinstance(astc.nast, gast.gast.UnaryOp):
ret = veval_ast_unary_op(astc, local_field, graph, context)
return ret
elif isinstance(astc.nast, gast.gast.Compare):
ret = veval_ast_compare(astc, local_field, graph, context)
return ret
elif isinstance(astc.nast, gast.gast.Return):
ret = veval_ast_return(astc, local_field, graph, context)
return ret
elif isinstance(astc.nast, gast.gast.Name):
ret = veval_ast_name(astc, local_field, graph, context)
return ret
elif isinstance(astc.nast, gast.gast.AugAssign):
veval_ast_aug_assign(astc, local_field, graph, context)
elif isinstance(astc.nast, gast.gast.Expr):
veval_ast_expr(astc, local_field, graph, context)
elif isinstance(astc.nast, gast.gast.Subscript):
return veval_ast_subscript(astc, local_field, graph, context)
elif isinstance(astc.nast, gast.gast.ListComp):
return veval_ast_listcomp(astc, local_field, graph, context)
elif isinstance(astc.nast, gast.gast.If):
veval_ast_if(astc, local_field, graph, context)
return None
elif isinstance(astc.nast, gast.gast.Num):
ret = veval_ast_num(astc, local_field, graph, context)
return ret
elif isinstance(astc.nast, gast.gast.Str):
ret = veval_ast_str(astc, local_field, graph, context)
return ret
elif isinstance(astc.nast, gast.gast.NameConstant):
ret = veval_ast_name_constant(astc, local_field, graph, context)
return ret
elif isinstance(astc.nast, gast.gast.Tuple):
ret = veval_ast_tuple(astc, local_field, graph, context)
return ret
elif isinstance(astc.nast, gast.gast.List):
ret = veval_ast_list(astc, local_field, graph, context)
return ret
elif isinstance(astc.nast, gast.gast.For):
veval_ast_for(astc, local_field, graph, context)
return None
elif isinstance(astc.nast, gast.gast.Continue):
veval_ast_continue(astc, local_field, graph, context)
return None
elif isinstance(astc.nast, gast.gast.Break):
veval_ast_break(astc, local_field, graph, context)
return None
elif isinstance(astc.nast, gast.gast.BoolOp):
ret = veval_ast_bool_op(astc, local_field, graph, context)
return ret
elif isinstance(astc.nast, gast.gast.With):
veval_ast_with(astc, local_field, graph, context)
return None
elif isinstance(astc.nast, gast.gast.withitem):
ret = veval_ast_withitem(astc, local_field, graph, context)
return ret
elif isinstance(astc.nast, gast.gast.Dict):
ret = veval_ast_dict(astc, local_field, graph, context)
return ret
elif isinstance(astc.nast, gast.gast.Lambda):
ret = veval_ast_lambda(astc, local_field, graph, context)
return ret
elif isinstance(astc.nast, gast.gast.arguments):
ret = veval_ast_arguments(astc, local_field, graph, context)
return ret
else:
if config.show_warnings:
print('Unknown ast is found : {} in L.{}'.format(type(astc.nast),astc.lineno))
|
python
|
import insightconnect_plugin_runtime
from .schema import GetAlertsInput, GetAlertsOutput, Input, Output, Component
# Custom imports below
class GetAlerts(insightconnect_plugin_runtime.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='get_alerts',
description=Component.DESCRIPTION,
input=GetAlertsInput(),
output=GetAlertsOutput())
def run(self, params={}):
alerts_response = self.connection.client.get_alerts(since=params.get(Input.FROM_DATE))
alerts = alerts_response.get("items", [])
for i in range(999):
if not alerts_response.get("has_more"):
break
alerts_response = self.connection.client.get_alerts(key=alerts_response.get("pages", {}).get("nextKey"))
alerts.extend(alerts_response.get("items"))
for alert in alerts:
alert['severity'] = alert['severity'].upper()
return {
Output.ALERTS: alerts
}
|
python
|
from pydantic import BaseModel
from typing import Optional
import typing as T
class Member(BaseModel):
user_id: int
nickname: str
card: T.Optional[str]
sex: str
age: int
area: str
level: str
role: T.Optional[str]
title: T.Optional[str]
# 以下是 getGroupMemberInfo 返回的更多结果
group_id: Optional[int]
join_time: Optional[int]
last_sent_time: Optional[int]
unfriendly: Optional[bool]
title_expire_time: Optional[int]
card_changeable: Optional[bool]
shut_up_timestamp: Optional[int]
class Anonymous(BaseModel):
id: int
name: str
flag: str
class Group(BaseModel):
group_id: int
group_name: str
group_memo: str
group_create_time: int
group_level: int
member_count: int
max_member_count: int
class HonorListNode(BaseModel):
user_id: int
nickname: str
avatar: str
description: Optional[str]
day_count: Optional[int]
class Honor(BaseModel):
group_id: int
current_talkative: Optional[HonorListNode]
talkative_list: Optional[T.List[HonorListNode]]
performer_list: Optional[T.List[HonorListNode]]
legend_list: Optional[T.List[HonorListNode]]
strong_newbie_list: Optional[T.List[HonorListNode]]
emotion_list: Optional[T.List[HonorListNode]]
class AtAllRemain(BaseModel):
can_at_all: bool
remain_at_all_count_for_group: int
remain_at_all_count_for_uin: int
|
python
|
import config as config
import utils.log as log
# import tests cases
import test_api_config
import test_api_crush_map
import test_api_crush_node
import test_api_crush_rule_set
import test_api_crush_rule
import test_api_crush_type
import test_api_logs
import test_api_mon
import test_api_pool
import test_api_request
import test_api_saltkey
import test_api_server_withinCluster
import test_api_sync
import test_api_event
import test_api_osd
import test_api_cli
import logout
if __name__ == '__main__':
config_data = config.get_config()
if not config_data['auth']:
log.error('auth failed')
else:
# call test_cases
# test_api_cli.exec_test(config_data) # test_id:0
test_api_config.exec_test(config_data) # test_id:1
test_api_crush_map.exec_test(config_data) # test_id:2
test_api_crush_node.exec_test(config_data) # test_id:3
test_api_crush_rule_set.exec_test(config_data) # test_id:4
test_api_crush_rule.exec_test(config_data) # test_id:5
test_api_crush_type.exec_test(config_data) # test_id:6
test_api_logs.exec_test(config_data) # test_id:7
test_api_mon.exec_test(config_data) # test_id:8
test_api_pool.exec_test(config_data) # test_id:9
test_api_request.exec_test(config_data) # test_id:10
test_api_saltkey.exec_test(config_data) # test_id:11
test_api_server_withinCluster.exec_test(config_data) # test_id:12
test_api_sync.exec_test(config_data) # test_id:13
test_api_event.exec_test(config_data) # test_id:14
test_api_osd.exec_test(config_data) # test_id:15
logout.exec_test(config_data)
|
python
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 - 2022 -- Lars Heuer
# All rights reserved.
#
# License: BSD License
#
"""\
EPC QR Codes.
Test against issue <https://github.com/heuer/segno/issues/55>.
"""
from __future__ import absolute_import, unicode_literals
import decimal
import pytest
from segno.helpers import make_epc_qr, _make_epc_qr_data as make_epc_qr_data
@pytest.mark.parametrize('amount', [12.3,
12.30,
decimal.Decimal('12.3'),
decimal.Decimal('12.30'),
'12.3',
'12.30'])
def test_text_002(amount):
name = "François D'Alsace S.A."
iban = 'FR1420041010050500013M02606'
text = 'Client:Marie Louise La Lune'
kw = dict(name=name, iban=iban, text=text, amount=amount)
data = make_epc_qr_data(**kw)
# See. EPC069-12 Version 2.1 dtd. 9 February 2012 example 2
assert len(data) == 103
encoding = 'iso-8859-1'
d = [x.decode(encoding) for x in data.split(b'\n')]
assert 11 == len(d)
assert 'BCD' == d[0]
assert '002' == d[1]
assert '2' == d[2]
assert 'SCT' == d[3]
assert name == d[5]
assert iban == d[6]
assert 'EUR12.3' == d[7]
assert '' == d[8]
assert '' == d[9]
assert text == d[10]
qr = make_epc_qr(**kw)
assert qr
assert not qr.is_micro
assert qr.version <= 13
assert 'M' == qr.error
@pytest.mark.parametrize('expected_amount, amount', [('EUR1000', 1000),
('EUR1000', 1000.0),
('EUR2000', decimal.Decimal('2000'))])
def test_trailing_zeros(expected_amount, amount):
name = "François D'Alsace S.A."
iban = 'FR1420041010050500013M02606'
text = 'Client:Marie Louise La Lune'
kw = dict(name=name, iban=iban, text=text, amount=amount)
data = make_epc_qr_data(**kw)
assert len(data) == 103 # See. EPC069-12 Version 2.1 dtd. 9 February 2012 example 2
encoding = 'iso-8859-1'
d = [x.decode(encoding) for x in data.split(b'\n')]
assert expected_amount == d[7]
@pytest.mark.parametrize('amount', [5.0, 5, '5.00', decimal.Decimal('5.00000')])
def test_remove_dot(amount):
kw = _make_valid_kw()
kw['amount'] = amount
d = make_epc_qr_data(**kw).split(b'\n')
assert b'EUR5' == d[7]
@pytest.mark.parametrize('amount', [12.3,
12.30,
decimal.Decimal('12.3'),
decimal.Decimal('12.30'),
'12.3',
'12.30'])
def test_reference_002(amount):
name = 'Franz Mustermänn'
iban = 'DE71110220330123456789'
reference = 'RF18539007547034'
purpose = 'GDDS'
bic = 'BHBLDEHHXXX'
kw = dict(name=name,
iban=iban,
reference=reference,
bic=bic,
purpose=purpose,
amount=amount,
encoding=1)
data = make_epc_qr_data(**kw)
assert len(data) == 96 # See. EPC069-12 Version 2.1 dtd. 9 February 2012 example 1
encoding = 'utf-8'
d = [x.decode(encoding) for x in data.split(b'\n')]
assert 10 == len(d)
assert 'BCD' == d[0]
assert '002' == d[1]
assert '1' == d[2]
assert 'SCT' == d[3]
assert name == d[5]
assert iban == d[6]
assert 'EUR12.3' == d[7]
assert purpose == d[8]
assert reference == d[9]
qr = make_epc_qr(**kw)
assert qr
assert not qr.is_micro
assert qr.version <= 13
assert 'M' == qr.error
def _make_valid_kw():
return dict(name="François D'Alsace S.A.",
iban='FR1420041010050500013M02606',
text='Client:Marie Louise La Lune',
amount=12.3)
@pytest.mark.parametrize('amount', [0,
0.004,
'0.001',
'999999999.999',
9999999990.99])
def test_invalid_amount(amount):
kw = _make_valid_kw()
kw['amount'] = amount
with pytest.raises(ValueError) as ex:
make_epc_qr_data(**kw)
assert 'amount' in str(ex.value)
with pytest.raises(ValueError) as ex:
make_epc_qr(**kw)
assert 'amount' in str(ex.value)
@pytest.mark.parametrize('bic', ['BHBLDE', # Too short
'BHBLDEHHXXXX', # Too long
'BHBLDEHHXX', # Too short (either 8 or 11) not 8 <= bic <= 11
'BHBLDEH ', # Too short after removing trailing WS
])
def test_invalid_bic(bic):
kw = _make_valid_kw()
kw['bic'] = bic
with pytest.raises(ValueError) as ex:
make_epc_qr_data(**kw)
assert 'BIC' in str(ex.value)
with pytest.raises(ValueError) as ex:
make_epc_qr(**kw)
assert 'BIC' in str(ex.value)
def test_utf8_required():
kw = _make_valid_kw()
kw['name'] = 'Funny 😃 name'
d = make_epc_qr_data(**kw).split(b'\n')
assert b'1' == d[2]
def test_utf8_explicit():
kw = _make_valid_kw()
kw['encoding'] = 'utf-8'
kw['name'] = 'Funny 😃 name'
d = make_epc_qr_data(**kw).split(b'\n')
assert b'1' == d[2]
def test_utf8_explicit2():
kw = _make_valid_kw()
kw['encoding'] = 1
kw['name'] = 'Funny 😃 name'
d = make_epc_qr_data(**kw).split(b'\n')
assert b'1' == d[2]
@pytest.mark.parametrize('encoding', range(1, 9))
def test_valid_encoding(encoding):
kw = _make_valid_kw()
kw['name'] = 'Simple name'
kw['encoding'] = encoding
d = make_epc_qr_data(**kw).split(b'\n')
assert str(encoding).encode() == d[2]
qr = make_epc_qr(**kw)
assert qr
@pytest.mark.parametrize('encoding', [0, 9, '1', b'8', 1.0, 'shift-jis'])
def test_illegal_encoding(encoding):
kw = _make_valid_kw()
kw['encoding'] = encoding
with pytest.raises(ValueError) as ex:
make_epc_qr_data(**kw)
assert 'encoding' in str(ex.value)
with pytest.raises(ValueError) as ex:
make_epc_qr(**kw)
assert 'encoding' in str(ex.value)
@pytest.mark.parametrize('text,reference', [('', ''), (' ', ' '),
('', None), (None, None),
(None, ' '),
])
def test_no_text_no_reference(text, reference):
kw = _make_valid_kw()
kw['text'] = text
kw['reference'] = reference
with pytest.raises(ValueError) as ex:
make_epc_qr_data(**kw)
assert 'reference' in str(ex.value)
with pytest.raises(ValueError) as ex:
make_epc_qr(**kw)
assert 'reference' in str(ex.value)
@pytest.mark.parametrize('iban', ['DE1' + '1' * 34,
'',
None])
def test_illegal_iban(iban):
kw = _make_valid_kw()
kw['iban'] = iban
with pytest.raises(ValueError) as ex:
make_epc_qr_data(**kw)
assert 'IBAN' in str(ex.value)
with pytest.raises(ValueError) as ex:
make_epc_qr(**kw)
assert 'IBAN' in str(ex.value)
@pytest.mark.parametrize('purpose', ['DE1', 'x', 'CDCBC'])
def test_illegal_purpose(purpose):
kw = _make_valid_kw()
kw['purpose'] = purpose
with pytest.raises(ValueError) as ex:
make_epc_qr_data(**kw)
assert 'purpose' in str(ex.value)
with pytest.raises(ValueError) as ex:
make_epc_qr(**kw)
assert 'purpose' in str(ex.value)
@pytest.mark.parametrize('name', [None, '',
'a' * 71, # too long
])
def test_illegal_name(name):
kw = _make_valid_kw()
kw['name'] = name
with pytest.raises(ValueError) as ex:
make_epc_qr_data(**kw)
assert 'name' in str(ex.value)
with pytest.raises(ValueError) as ex:
make_epc_qr(**kw)
assert 'name' in str(ex.value)
def test_text_too_long():
kw = _make_valid_kw()
kw['text'] = 'a' * 141
kw['reference'] = None
with pytest.raises(ValueError) as ex:
make_epc_qr_data(**kw)
assert 'text' in str(ex.value)
def test_reference_too_long():
kw = _make_valid_kw()
kw['text'] = None
kw['reference'] = 'r' * 36
with pytest.raises(ValueError) as ex:
make_epc_qr_data(**kw)
assert 'reference' in str(ex.value)
if __name__ == '__main__':
pytest.main([__file__])
|
python
|
import rope.base.builtins
import rope.base.codeanalyze
import rope.base.pynames
from rope.base import ast, exceptions, utils
from rope.refactor import patchedast
class Scope(object):
def __init__(self, pycore, pyobject, parent_scope):
self.pycore = pycore
self.pyobject = pyobject
self.parent = parent_scope
def get_names(self):
"""Return the names defined or imported in this scope"""
return self.pyobject.get_attributes()
def get_defined_names(self):
"""Return the names defined in this scope"""
return self.pyobject._get_structural_attributes()
def get_name(self, name):
"""Return name `PyName` defined in this scope"""
if name not in self.get_names():
raise exceptions.NameNotFoundError("name %s not found" % name)
return self.get_names()[name]
def __getitem__(self, key):
"""The same as ``get_name(key)``"""
return self.get_name(key)
def __contains__(self, key):
"""The same as ``key in self.get_names()``"""
return key in self.get_names()
@utils.saveit
def get_scopes(self):
"""Return the subscopes of this scope
The returned scopes should be sorted by the order they appear.
"""
return self._create_scopes()
def lookup(self, name):
if name in self.get_names():
return self.get_names()[name]
if self.parent is not None:
return self.parent._propagated_lookup(name)
return None
def get_propagated_names(self):
"""Return the visible names of this scope
Return the names defined in this scope that are visible from
scopes containing this scope. This method returns the same
dictionary returned by `get_names()` except for `ClassScope`
which returns an empty dict.
"""
return self.get_names()
def _propagated_lookup(self, name):
if name in self.get_propagated_names():
return self.get_propagated_names()[name]
if self.parent is not None:
return self.parent._propagated_lookup(name)
return None
def _create_scopes(self):
return [
pydefined.get_scope() for pydefined in self.pyobject._get_defined_objects()
]
def _get_global_scope(self):
current = self
while current.parent is not None:
current = current.parent
return current
def get_start(self):
return self.pyobject.get_ast().lineno
def get_body_start(self):
body = self.pyobject.get_ast().body
if body:
return body[0].lineno
return self.get_start()
def get_end(self):
pymodule = self._get_global_scope().pyobject
return pymodule.logical_lines.logical_line_in(self.logical_end)[1]
@utils.saveit
def get_logical_end(self):
global_scope = self._get_global_scope()
return global_scope._scope_finder.find_scope_end(self)
start = property(get_start)
end = property(get_end)
logical_end = property(get_logical_end)
def get_kind(self):
pass
def get_region(self):
self._calculate_scope_regions_for_module()
node = self.pyobject.get_ast()
region = patchedast.node_region(node)
return region
def _calculate_scope_regions_for_module(self):
self._get_global_scope()._calculate_scope_regions()
def in_region(self, offset):
"""Checks if offset is in scope region"""
region = self.get_region()
return region[0] < offset < region[1]
class GlobalScope(Scope):
def __init__(self, pycore, module):
super(GlobalScope, self).__init__(pycore, module, None)
self.names = module._get_concluded_data()
def get_start(self):
return 1
def get_kind(self):
return "Module"
def get_name(self, name):
try:
return self.pyobject[name]
except exceptions.AttributeNotFoundError:
if name in self.builtin_names:
return self.builtin_names[name]
raise exceptions.NameNotFoundError("name %s not found" % name)
@utils.saveit
def _calculate_scope_regions(self):
source = self._get_source()
patchedast.patch_ast(self.pyobject.get_ast(), source)
def _get_source(self):
return self.pyobject.source_code
def get_names(self):
if self.names.get() is None:
result = dict(self.builtin_names)
result.update(super(GlobalScope, self).get_names())
self.names.set(result)
return self.names.get()
def get_inner_scope_for_line(self, lineno, indents=None):
return self._scope_finder.get_holding_scope(self, lineno, indents)
def get_inner_scope_for_offset(self, offset):
return self._scope_finder.get_holding_scope_for_offset(self, offset)
@property
@utils.saveit
def _scope_finder(self):
return _HoldingScopeFinder(self.pyobject)
@property
def builtin_names(self):
return rope.base.builtins.builtins.get_attributes()
class ComprehensionScope(Scope):
def __init__(self, pycore, pyobject, visitor):
super(ComprehensionScope, self).__init__(
pycore, pyobject, pyobject.parent.get_scope()
)
self.names = None
self.returned_asts = None
self.defineds = None
self.visitor = visitor
def _get_names(self):
if self.names is None:
self._visit_comprehension()
return self.names
def get_names(self):
return self._get_names()
def _visit_comprehension(self):
if self.names is None:
new_visitor = self.visitor(self.pycore, self.pyobject)
for node in ast.get_child_nodes(self.pyobject.get_ast()):
ast.walk(node, new_visitor)
self.names = dict(self.parent.get_names())
self.names.update(new_visitor.names)
self.defineds = new_visitor.defineds
def get_logical_end(self):
return self.get_start()
logical_end = property(get_logical_end)
def get_body_start(self):
return self.get_start()
class FunctionScope(Scope):
def __init__(self, pycore, pyobject, visitor):
super(FunctionScope, self).__init__(
pycore, pyobject, pyobject.parent.get_scope()
)
self.names = None
self.returned_asts = None
self.is_generator = None
self.defineds = None
self.visitor = visitor
def _get_names(self):
if self.names is None:
self._visit_function()
return self.names
def _visit_function(self):
if self.names is None:
new_visitor = self.visitor(self.pycore, self.pyobject)
for n in ast.get_child_nodes(self.pyobject.get_ast()):
ast.walk(n, new_visitor)
self.names = new_visitor.names
self.names.update(self.pyobject.get_parameters())
self.returned_asts = new_visitor.returned_asts
self.is_generator = new_visitor.generator
self.defineds = new_visitor.defineds
def _get_returned_asts(self):
if self.names is None:
self._visit_function()
return self.returned_asts
def _is_generator(self):
if self.is_generator is None:
self._get_returned_asts()
return self.is_generator
def get_names(self):
return self._get_names()
def _create_scopes(self):
if self.defineds is None:
self._visit_function()
return [pydefined.get_scope() for pydefined in self.defineds]
def get_kind(self):
return "Function"
def invalidate_data(self):
for pyname in self.get_names().values():
if isinstance(
pyname,
(rope.base.pynames.AssignedName, rope.base.pynames.EvaluatedName),
):
pyname.invalidate()
class ClassScope(Scope):
def __init__(self, pycore, pyobject):
super(ClassScope, self).__init__(pycore, pyobject, pyobject.parent.get_scope())
def get_kind(self):
return "Class"
def get_propagated_names(self):
return {}
class _HoldingScopeFinder(object):
def __init__(self, pymodule):
self.pymodule = pymodule
def get_indents(self, lineno):
return rope.base.codeanalyze.count_line_indents(self.lines.get_line(lineno))
def _get_scope_indents(self, scope):
return self.get_indents(scope.get_start())
def get_holding_scope(self, module_scope, lineno, line_indents=None):
if line_indents is None:
line_indents = self.get_indents(lineno)
current_scope = module_scope
new_scope = current_scope
while new_scope is not None and (
new_scope.get_kind() == "Module"
or self._get_scope_indents(new_scope) <= line_indents
):
current_scope = new_scope
if (
current_scope.get_start() == lineno
and current_scope.get_kind() != "Module"
):
return current_scope
new_scope = None
for scope in current_scope.get_scopes():
if scope.get_start() <= lineno:
if lineno <= scope.get_end():
new_scope = scope
break
else:
break
return current_scope
def _is_empty_line(self, lineno):
line = self.lines.get_line(lineno)
return line.strip() == "" or line.lstrip().startswith("#")
def _get_body_indents(self, scope):
return self.get_indents(scope.get_body_start())
@staticmethod
def get_holding_scope_for_offset(scope, offset):
for inner_scope in scope.get_scopes():
if inner_scope.in_region(offset):
return _HoldingScopeFinder.get_holding_scope_for_offset(
inner_scope, offset
)
return scope
def find_scope_end(self, scope):
if not scope.parent:
return self.lines.length()
end = scope.pyobject.get_ast().body[-1].lineno
scope_start = self.pymodule.logical_lines.logical_line_in(scope.start)
if scope_start[1] >= end:
# handling one-liners
body_indents = self._get_scope_indents(scope) + 4
else:
body_indents = self._get_body_indents(scope)
for l in self.logical_lines.generate_starts(
min(end + 1, self.lines.length()), self.lines.length() + 1
):
if not self._is_empty_line(l):
if self.get_indents(l) < body_indents:
return end
else:
end = l
return end
@property
def lines(self):
return self.pymodule.lines
@property
def code(self):
return self.pymodule.source_code
@property
def logical_lines(self):
return self.pymodule.logical_lines
class TemporaryScope(Scope):
"""Currently used for list comprehensions and generator expressions
These scopes do not appear in the `get_scopes()` method of their
parent scopes.
"""
def __init__(self, pycore, parent_scope, names):
super(TemporaryScope, self).__init__(
pycore, parent_scope.pyobject, parent_scope
)
self.names = names
def get_names(self):
return self.names
def get_defined_names(self):
return self.names
def _create_scopes(self):
return []
def get_kind(self):
return "Temporary"
|
python
|
import websocket
import json
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM) # set board mode to Broadcom
GPIO.setup(17, GPIO.OUT) # set up pin 17 TV
GPIO.setup(18, GPIO.OUT) # set up pin 18 Lights
GPIO.setup(22, GPIO.OUT) # set up pin 12 A/C
GPIO.setup(27, GPIO.OUT) # set up pin 27 Alarm
GPIO.output(17, 0) # turn off pin 17
GPIO.output(18, 0) # turn off pin 18
GPIO.output(22, 0) # turn off pin 22
GPIO.output(27, 0) # turn off pin 27
class Payload(object):
def __init__(self, j):
self.__dict__ = json.loads(j)
def ws_uri():
return "wss://smarthouseintern.azurewebsites.net/ws"
def ws_on_message(ws, msg):
message = str(Payload(msg).data).lower()
if message != "":
if "tv" in message or "television" in message:
if "open" in message or "on" in message or "opened" in message:
print("TV Opened!!")
GPIO.output(17, 1) # turn on pin 17
if "close" in message or "off" in message or "closed" in message:
print("TV Closed!!")
GPIO.output(17, 0) # turn off pin 17
if "light" in message or "lights" in message:
if "open" in message or "on" in message or "opened" in message:
print("Lights Opened!!")
GPIO.output(18, 1) # turn on pin 18
if "close" in message or "off" in message or "closed" in message:
print("Lights Closed!!")
GPIO.output(18, 0) # turn off pin 18
if "ac" in message or "air" in message or "condition" in message or "conditioner" in message:
if "open" in message or "on" in message or "opened" in message:
print("ac Opened!!")
GPIO.output(22, 1) # turn on pin 22
if "close" in message or "off" in message or "closed" in message:
print("ac Closed!!")
GPIO.output(22, 0) # turn off pin 22
if "alarm" in message or "alarms" in message:
if "open" in message or "on" in message or "opened" in message:
print("alarm Opened!!")
GPIO.output(27, 1) # turn on pin 27
if "close" in message or "off" in message or "closed" in message:
print("alarm Closed!!")
GPIO.output(27, 0) # turn off pin 27
if "all" in message or "whole" in message:
if "open" in message or "on" in message or "opened" in message:
print("All Opened!!")
GPIO.output(17, 1) # turn on pin 17
GPIO.output(18, 1) # turn on pin 18
GPIO.output(22, 1) # turn on pin 22
GPIO.output(27, 1) # turn on pin 27
if "close" in message or "off" in message or "closed" in message:
print("All Closed!!")
GPIO.output(17, 0) # turn off pin 17
GPIO.output(18, 0) # turn off pin 18
GPIO.output(22, 0) # turn off pin 22
GPIO.output(27, 0) # turn off pin 27
def ws_on_error(ws, err):
print(err)
def ws_on_open(ws):
print("### WebSocket Opened ###")
def ws_on_close(ws):
print("### WebSocket Closed ###")
if __name__ == "__main__":
websocket.enableTrace(True)
ws = websocket.WebSocketApp(ws_uri(),
on_message = ws_on_message,
on_close = ws_on_close,
on_error = ws_on_error)
ws.on_open = ws_on_open
ws.run_forever()
|
python
|
from replays_fetching.replay_fetcher import ReplayFetcher
replay_fetcher = ReplayFetcher()
replays = replay_fetcher.get_replays()
for index in range(len(replays)):
print('Replay #{0}: '.format(index) + str(replays[index]))
|
python
|
# 2.3.5 Example: Range Class
class Range:
"""A class that mimic's the built-in range class."""
def __init__(self,start,stop=None,step=1):
"""Initialize a Range instance.
Semantics is similar to built-in range class.
"""
if step == 0:
raise ValueError('step cannot be 0')
if stop is None: # special case of range(n)
start,stop = 0,start # should be treated as if range(0,n)
# calculate the effective length once
self._length = max(0,(stop - start + step - 1)//step)
# need knowledge of start and step (but not stop) to support __getitem__
self._start = start
self._step = step
def __len__(self):
"""Return number of entries in the range."""
return self._length
def __getitem__(self,k):
"""Return entry at index k (using standard interpretation
if negative).
"""
if k < 0:
k += len(self) # attempt to convert negative index
if not 0 <= k < self._length:
raise IndexError('index out of range')
return self._start + k * self._step
#----------------------------- my main function -----------------------------
import numpy as np
rg = Range(2,3.1,0.1)
print('0: length of rg is',rg.__len__())
for i in np.arange(0,1.1,0.1):
print(' ',rg.__getitem__(i))
|
python
|
# Copyright 2018 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tf_euler
class DenseLogits(object):
def __init__(self, logits_dim):
self.out_fc = tf.layers.Dense(logits_dim, use_bias=False)
def __call__(self, inputs, **kwargs):
return self.out_fc(inputs)
class PosNegLogits(object):
def __call__(self, emb, pos_emb, neg_emb):
logit = tf.matmul(emb, pos_emb, transpose_b=True)
neg_logit = tf.matmul(emb, neg_emb, transpose_b=True)
return logit, neg_logit
class CosineLogits(object):
def __call__(self, target_emb, context_emb):
normalized_x = tf.nn.l2_normalize(target_emb, axis=-1)
normalized_y = tf.nn.l2_normalize(context_emb, axis=-1)
logits = tf.reduce_sum(normalized_x * normalized_y, -1, True)
logits = logits * 5.0
return logits
|
python
|
#
# Generated with RIFLEXDynamicCalculationParametersBlueprint
from dmt.blueprint import Blueprint
from dmt.dimension import Dimension
from dmt.attribute import Attribute
from dmt.enum_attribute import EnumAttribute
from dmt.blueprint_attribute import BlueprintAttribute
from sima.sima.blueprints.moao import MOAOBlueprint
class RIFLEXDynamicCalculationParametersBlueprint(MOAOBlueprint):
""""""
def __init__(self, name="RIFLEXDynamicCalculationParameters", package_path="sima/riflex", description=""):
super().__init__(name,package_path,description)
self.attributes.append(Attribute("name","string","",default=""))
self.attributes.append(Attribute("description","string","",default=""))
self.attributes.append(Attribute("_id","string","",default=""))
self.attributes.append(BlueprintAttribute("scriptableValues","sima/sima/ScriptableValue","",True,Dimension("*")))
self.attributes.append(BlueprintAttribute("irregularTimeSeries","sima/riflex/IrregularTimeSeriesParameters","",True))
self.attributes.append(BlueprintAttribute("irregularResponseAnalysis","sima/riflex/IrregularResponseAnalysis","",True))
self.attributes.append(BlueprintAttribute("timeDomainProcedure","sima/riflex/TimeDomainProcedure","",True))
self.attributes.append(BlueprintAttribute("envelopeCurveSpecification","sima/riflex/EnvelopeCurveSpecification","",True))
self.attributes.append(BlueprintAttribute("displacementResponseStorage","sima/riflex/DisplacementResponseStorage","",True))
self.attributes.append(BlueprintAttribute("forceResponseStorage","sima/riflex/ForceResponseStorage","",True))
self.attributes.append(BlueprintAttribute("sumForceResponseStorage","sima/riflex/SumForceResponseStorage","",True))
self.attributes.append(BlueprintAttribute("curvatureResponseStorage","sima/riflex/CurvatureResponseStorage","",True))
self.attributes.append(BlueprintAttribute("stressStorage","sima/riflex/StressStorage","",True))
self.attributes.append(BlueprintAttribute("turbineResponseStorage","sima/riflex/TurbineResponseStorage","",True))
self.attributes.append(BlueprintAttribute("turbineBladeResponseStorage","sima/riflex/TurbineBladeResponseStorage","",True))
self.attributes.append(BlueprintAttribute("supportVesselForceStorage","sima/riflex/SupportVesselForceStorage","",True))
self.attributes.append(BlueprintAttribute("bodyForceStorage","sima/riflex/BodyForceStorage","",True))
self.attributes.append(BlueprintAttribute("hlaElementForces","sima/riflex/HLAElementForce","",True,Dimension("*")))
self.attributes.append(BlueprintAttribute("hlaImportedBodies","sima/riflex/ImportVesselItem","",True,Dimension("*")))
self.attributes.append(BlueprintAttribute("segmentLengthVariations","sima/riflex/SegmentLengthVariationItem","",True,Dimension("*")))
self.attributes.append(BlueprintAttribute("temperatureVariations","sima/riflex/DynamicTemperatureVariationItem","",True,Dimension("*")))
self.attributes.append(BlueprintAttribute("pressureVariations","sima/riflex/DynamicPressureVariationItem","",True,Dimension("*")))
self.attributes.append(BlueprintAttribute("winchVariations","sima/riflex/DynamicWinchVariationItem","",True,Dimension("*")))
self.attributes.append(BlueprintAttribute("dynamicWindChange","sima/riflex/DynamicWindChange","",True))
self.attributes.append(BlueprintAttribute("windTurbineShutdown","sima/riflex/WindTurbineShutdown","",True))
self.attributes.append(BlueprintAttribute("bladePitchFault","sima/riflex/BladePitchFault","",True))
self.attributes.append(BlueprintAttribute("boundaryChangeGroups","sima/riflex/BoundaryChangeGroup","",True,Dimension("*")))
self.attributes.append(BlueprintAttribute("visualisationResponses","sima/riflex/DynmodVisualisationResponses","",True))
self.attributes.append(BlueprintAttribute("regularWaveAnalysis","sima/riflex/RegularWaveAnalaysis","",True))
self.attributes.append(BlueprintAttribute("regularWaveLoading","sima/riflex/RegularWaveLoading","",True))
self.attributes.append(BlueprintAttribute("regularVesselMotions","sima/riflex/RegularVesselMotion","",True,Dimension("*")))
self.attributes.append(Attribute("volumeForcesScaling","number","Scaling of volume forces.",default=1.0))
self.attributes.append(Attribute("specifiedForcesScaling","number","Scaling of specified (nodal) forces.",default=1.0))
self.attributes.append(Attribute("currentVelocitiesScaling","number","Scaling of current velocities.",default=1.0))
self.attributes.append(Attribute("changeStaticLoads","boolean","Change applied static loads at the start of the dynamic analysis",default=False))
self.attributes.append(BlueprintAttribute("dynamicLoads","sima/riflex/DynamicLoads","",True))
|
python
|
# -*- coding: utf-8 -*-
from permutive.exceptions import PermutiveApiException
from .util import none_default_namedtuple
from .base import Resource
User = none_default_namedtuple('User', 'id, custom_id, properties, updated')
class UserResource(Resource):
def create(self):
"""
Creates a new user in remote
:return: User object consisting of only an id
"""
result = self.client.request('POST', '/users')
return User(**result)
def identify(self, user_id, custom_id, **properties):
"""
Associate a user to a known custom_id. This custom id can then be used to fetch a user using self.get
:param user_id: string Permutive user_id (eg: from a object returned from self.create)
:param custom_id: string or stringifyable value.
:param properties: user properties
:return: User
"""
result = self.client.request('POST', '/identify', data={
'id': user_id,
'custom_id': str(custom_id),
'properties': properties
})
return User(result.get('id'), custom_id, properties)
def create_and_identify(self, custom_id, **properties):
"""
Convenience method that calls self.create and self.identify sequentially
:param custom_id: string or stringifyable value.
:param properties: user properties
:return: User
"""
user = self.create()
if not user.id:
raise ValueError('Id was not present in the response from Permutive API')
return self.identify(user.id, custom_id, **properties)
def get(self, custom_id):
"""
Fetch a user from remote using a custom id
:param custom_id: string or stringifyable value.
:return: User|None
"""
try:
result = self.client.request('GET', '/identities/{}'.format(custom_id))
result['id'] = result.pop('user_id') # smh
result['custom_id'] = custom_id
return User(**result)
except PermutiveApiException as e:
if e.status_code == 404:
return None
else:
raise e
def update(self, custom_id, **properties):
"""
Performs a partial update of a User object on remote.
NOTE: This method overwrites existing properties
:param custom_id: string or stringifyable value.
:param properties: user properties
:return: User|None
"""
result = self.client.request('PATCH', '/identities/{}'.format(custom_id), {
'properties': properties
})
if result is True:
return User(None, custom_id, properties)
return None
def delete(self, custom_id):
"""
Deletes a User object from remote using a custom_id
:param custom_id: string or stringifyable value
:return: Boolean
"""
return self.client.request('DELETE', '/identities/{}'.format(custom_id))
|
python
|
# control.applications - Controller for comodit Applications entities.
# coding: utf-8
#
# Copyright 2010 Guardis SPRL, Liège, Belgium.
# Authors: Laurent Eschenauer <[email protected]>
#
# This software cannot be used and/or distributed without prior
# authorization from Guardis.
from __future__ import absolute_import
from comodit_client.api.exporter import Export
from comodit_client.api.importer import Import
from comodit_client.control.doc import ActionDoc
from comodit_client.control.exceptions import ArgumentException
from comodit_client.control.files import ApplicationFilesController
from comodit_client.control.organization_entity import OrganizationEntityController
from comodit_client.control.parameters import ApplicationParametersController
from comodit_client.control.store_helper import StoreHelper
from comodit_client.control.sync import AppSyncController
from . import completions
from comodit_client.util import prompt
from .rpmmodules import RpmModuleController
class ApplicationsController(OrganizationEntityController):
_template = "application.json"
def __init__(self):
super(ApplicationsController, self).__init__()
# sub-controllers
self._register_subcontroller(["files"], ApplicationFilesController())
self._register_subcontroller(["parameters"], ApplicationParametersController())
self._register_subcontroller(["rpm-module"], RpmModuleController())
self._register_subcontroller(["sync"], AppSyncController())
self._register(["lock"], self._lock, self._print_entity_completions)
self._register(["unlock"], self._unlock, self._print_entity_completions)
self._doc = "Applications handling."
# actions
self._register(["import"], self._import, self._print_import_completions)
self._register(["export"], self._export, self._print_export_completions)
helper = StoreHelper(self, "app")
self._register(["publish"], helper._publish, self._print_entity_completions)
self._register(["unpublish"], helper._unpublish, self._print_entity_completions)
self._register(["push"], helper._push, self._print_entity_completions)
self._register(["pull"], helper._pull, self._print_entity_completions)
self._register(["update-authorized"], helper._update_authorized, self._print_entity_completions)
self._register_action_doc(self._export_doc())
self._register_action_doc(self._import_doc())
self._register_action_doc(helper._publish_doc())
self._register_action_doc(helper._unpublish_doc())
self._register_action_doc(helper._push_doc())
self._register_action_doc(helper._pull_doc())
self._register_action_doc(self._lock_doc())
self._register_action_doc(self._unlock_doc())
self._register_action_doc(helper._update_authorized_doc())
def _get_collection(self, org_name):
return self._client.applications(org_name)
def _lock_doc(self):
return ActionDoc("lock"," <org_name> <app_name>", """
Lock disable update.""")
def _unlock_doc(self):
return ActionDoc("unlock", "<org_name> <app_name> [--force]", """
Unlock enable update.""")
def _prune_json_update(self, json_wrapper):
super(ApplicationsController, self)._prune_json_update(json_wrapper)
json_wrapper._del_field("organization")
json_wrapper._del_field("files")
json_wrapper._del_field("parameters")
# Export
def _print_export_completions(self, param_num, argv):
if param_num < 2:
self._print_entity_completions(param_num, argv)
elif param_num == 2:
completions.print_dir_completions()
def _export(self, argv):
self._options = self._config.options
app = self._get_entity(argv)
root_folder = app.name
if len(argv) > 2:
root_folder = argv[2]
export = Export(self._config.options.force)
export.export_application(app, root_folder)
def _export_doc(self):
return ActionDoc("export", "<org_name> <app_name> [<output_folder>] [--force]", """
Export application onto disk. --force option causes existing files to
be overwritten.""")
# Import
def _print_import_completions(self, param_num, argv):
if param_num < 1:
self._print_collection_completions(param_num, argv)
elif param_num == 1:
completions.print_dir_completions()
def _import(self, argv):
if len(argv) != 2:
raise ArgumentException("Wrong number of arguments")
org = self._client.get_organization(argv[0])
imp = Import(update_existing=self._config.options.update_existing)
imp.import_application(org, argv[1])
def _import_doc(self):
return ActionDoc("import", "<org_name> <src_folder> [--update-existing]", """
Import application from disk. --update-existing option causes existing entities
on server to be updated.""")
def _lock(self, argv):
app = self._get_entity(argv)
app.lock()
def _unlock(self, argv):
app = self._get_entity(argv)
if not app.locked :
print("application not locked")
elif self._config.options.force or (prompt.confirm(prompt="Unlock " + app.name + " ?", resp=False)) :
app.unlock()
|
python
|
foods = ('yu','wa','fan','cai','tang')
for foods2 in foods:
print(foods2)
foods = ('yu','wa','fan','cai','tang')
print(foods)
foods = ('yu','wa','fan','cai','tang','xia')
print(foods)
# 4
|
python
|
import dojo.dojo as d
def test():
print(dir(d))
assert(d.test_function() is True)
|
python
|
"""
Unittests for staros plugin
Uses the mock_device.py script to test the plugin.
"""
__author__ = "dwapstra"
import unittest
from unicon import Connection
from unicon.core.errors import SubCommandFailure
class TestStarosPluginConnect(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.c = Connection(hostname='host_name',
start=['mock_device_cli --os staros --state staros_connect'],
os='staros',
username='cisco',
tacacs_password='cisco')
cls.c.connect()
def test_execute(self):
r = self.c.execute('')
self.assertEqual(r, '')
r = self.c.execute([''])
self.assertEqual(r, '')
r = self.c.execute(['']*2)
self.assertEqual(r, ['', ''])
def test_configure(self):
r = self.c.configure('test\ntest123')
self.assertEqual(r, {'test': '123', 'test123': 'abc'})
def test_truncation_add_state_pattern(self):
sm = self.c.state_machine.get_state('config')
sm.add_state_pattern(r'^(.*?)(newpattern)*#\s?$')
r = self.c.configure('test_command')
self.assertEqual(r, 'executing test command')
if __name__ == "__main__":
unittest.main()
|
python
|
# -------------------------------------------------------------------------------
# Copyright (c) 2017, Battelle Memorial Institute All rights reserved.
# Battelle Memorial Institute (hereinafter Battelle) hereby grants permission to any person or entity
# lawfully obtaining a copy of this software and associated documentation files (hereinafter the
# Software) to redistribute and use the Software in source and binary forms, with or without modification.
# Such person or entity may use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and may permit others to do so, subject to the following conditions:
# Redistributions of source code must retain the above copyright notice, this list of conditions and the
# following disclaimers.
# Redistributions in binary form must reproduce the above copyright notice, this list of conditions and
# the following disclaimer in the documentation and/or other materials provided with the distribution.
# Other than as used herein, neither the name Battelle Memorial Institute or Battelle may be used in any
# form whatsoever without the express written consent of Battelle.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
# BATTELLE OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
# General disclaimer for use with OSS licenses
#
# This material was prepared as an account of work sponsored by an agency of the United States Government.
# Neither the United States Government nor the United States Department of Energy, nor Battelle, nor any
# of their employees, nor any jurisdiction or organization that has cooperated in the development of these
# materials, makes any warranty, express or implied, or assumes any legal liability or responsibility for
# the accuracy, completeness, or usefulness or any information, apparatus, product, software, or process
# disclosed, or represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or service by trade name, trademark, manufacturer,
# or otherwise does not necessarily constitute or imply its endorsement, recommendation, or favoring by the United
# States Government or any agency thereof, or Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by BATTELLE for the
# UNITED STATES DEPARTMENT OF ENERGY under Contract DE-AC05-76RL01830
# -------------------------------------------------------------------------------
"""
Created on Sept 22, 2020
@author: Shiva Poudel
"""""
#from shared.sparql import SPARQLManager
#from shared.glm import GLMManager
import networkx as nx
import pandas as pd
import math
import argparse
import json
import sys
import os
import importlib
import numpy as np
import time
from tabulate import tabulate
from gridappsd import GridAPPSD, topics
from gridappsd.topics import simulation_output_topic, simulation_log_topic
global G, undirected_graph, loadbreaksw, exit_flag, measid_lbs, sw_status
global logfile
def on_message(headers, message):
global exit_flag
print('\nTOPOLOGY_VALIDATOR microservice response: ' + str(message), flush=True)
print('\nTOPOLOGY_VALIDATOR microservice response: ' + str(message), file=logfile)
exit_flag = True
def start(log_file, feeder_mrid, model_api_topic):
global logfile
logfile = log_file
global G, measid_lbs, loadbreaksw, undirected_graph
print("\nTOPOLOGY_VALIDATOR starting!!!------------------------------------------------------------")
print("\nTOPOLOGY_VALIDATOR starting!!!------------------------------------------------------------", file=logfile)
gapps = GridAPPSD()
# NOTE: Use of modelType as STATIC or OPERATIONAL will change the response
message = {"modelId": feeder_mrid,
"requestType": "LOOPS",
"modelType": "OPERATIONAL",
"resultFormat": "JSON"}
out_topic = "/topic/goss.gridappsd.model-validator.topology.out"
gapps.subscribe(out_topic, on_message)
in_topic = "/topic/goss.gridappsd.model-validator.topology.in"
gapps.send(in_topic, message)
print("TOPOLOGY_VALIDATOR sent request to microservice; waiting for response\n", flush=True)
print("TOPOLOGY_VALIDATOR sent request to microservice; waiting for response\n", file=logfile)
global exit_flag
exit_flag = False
while not exit_flag:
time.sleep(0.1)
def _main():
# for loading modules
if (os.path.isdir('shared')):
sys.path.append('.')
elif (os.path.isdir('../shared')):
sys.path.append('..')
parser = argparse.ArgumentParser()
parser.add_argument("--request", help="Simulation Request")
opts = parser.parse_args()
sim_request = json.loads(opts.request.replace("\'",""))
feeder_mrid = sim_request["power_system_config"]["Line_name"]
model_api_topic = "goss.gridappsd.process.request.data.powergridmodel"
log_file = open('topology_validator.log', 'w')
start(log_file, feeder_mrid, model_api_topic)
if __name__ == "__main__":
_main()
|
python
|
import binascii
from web3.auto import w3
with open("/home/koshik/.ethereum/rinkeby/keystore/UTC--2018-06-10T05-43-22.134895238Z--9e63c0d223d9232a4f3076947ad7cff353cc1a28") as keyfile:
encrypted_key = keyfile.read()
private_key = w3.eth.account.decrypt(encrypted_key, 'koshik93')
print(binascii.b2a_hex(private_key))
|
python
|
# Copyright 2021 Ian Eborn
# A sub-class of the "SimpleThirdPersonCamera" class, providing one implementaton of
# the collision-related elements of the camera-system.
#
# Specifically, this class primarily implements the "setupCollision" and "getNearestCollision" methods,
# using Panda3D's built-in collision system.
# Panda3D importations
from panda3d.core import CollisionNode, CollisionTraverser, CollisionHandlerQueue, CollisionSegment
# Import the base-class
from SimpleThirdPersonCamera import *
# The class that implements our camera-controller
class SimpleThirdPersonCameraPandaCollision(SimpleThirdPersonCamera):
def __init__(self, tilt, intendedDistance, shoulderSideDistance, height,
adjustmentSpeed, sideSwitchSpeed,
initialShoulderSide,
ownerNodePath,
camera,
colliderRadius = 1):
# This should be set before initialising the super-class, as
# it will be used in "setupCollision" (below), which is called
# by the super-class's constructor-method.
self.colliderRadius = colliderRadius
SimpleThirdPersonCamera.__init__(self, tilt, intendedDistance, shoulderSideDistance, height,
adjustmentSpeed, sideSwitchSpeed,
initialShoulderSide,
ownerNodePath,
camera)
# Build the collision-related elements that inform the camera's behaviour
#
# This implementation uses Panda's built-in collision-system
def setupCollision(self):
# A traverser, which enacts the actual collision-detection
self.traverser = CollisionTraverser()
# We'll use a queue, since we only want the nearest collision in a given update
self.collisionQueue = CollisionHandlerQueue()
# Our collision-objects: four segments, extending backwards for the "intended distance".
self.colliderNode = CollisionNode("camera collider")
self.colliderNode.addSolid(CollisionSegment(-self.colliderRadius, -self.colliderRadius, 0,
-self.colliderRadius, -self.intendedDistance, 0))
self.colliderNode.addSolid(CollisionSegment(self.colliderRadius, -self.colliderRadius, 0,
self.colliderRadius, -self.intendedDistance, 0))
self.colliderNode.addSolid(CollisionSegment(0, -self.colliderRadius, -self.colliderRadius,
0, -self.intendedDistance, -self.colliderRadius))
self.colliderNode.addSolid(CollisionSegment(0, -self.colliderRadius, self.colliderRadius,
0, -self.intendedDistance, self.colliderRadius))
self.colliderNode.setIntoCollideMask(0)
self.colliderNode.setFromCollideMask(1)
self.collider = self.cameraBase.attachNewNode(self.colliderNode)
# Add our collision -objects and -handler to our traverser
self.traverser.addCollider(self.collider, self.collisionQueue)
# Check for a collision relevant to the camera
#
# This implementation uses Panda's built-in collision-system
def getNearestCollision(self, sceneRoot):
# Ask the traverser to check for collisions
self.traverser.traverse(sceneRoot)
# If there have been any collisions...
if self.collisionQueue.getNumEntries() > 0:
# Sort the collision-entries, which orders them from
# nearest to furthest, I believe.
self.collisionQueue.sortEntries()
# Then get the first--i.e. nearest--of them.
entry = self.collisionQueue.getEntry(0)
# Now, use the collision-position to determine how far away the
# collision occurred from the camera's base-position, and return that.
pos = entry.getSurfacePoint(sceneRoot)
diff = self.cameraBase.getPos(sceneRoot) - pos
return diff.length()
# In there were no collisions, just return the "intended distance"
return self.intendedDistance
# A method to clean up the controller's collision elements
def cleanupCollision(self):
if self.collider is not None:
self.traverser.removeCollider(self.collider)
self.collider.removeNode()
self.collider = None
self.colliderNode = None
self.traverser = None
self.collisionQueue = None
|
python
|
from hms_workflow_platform.core.queries.base.base_query import *
class EncounterQuery(BaseQuery):
def __init__(self, site):
super().__init__()
self.adapter = self.get_adapter(site)
self.query = self.adapter.query
self._site = site
def encounter_create(self, date_obj):
date = date_obj.strftime('%Y-%m-%d')
query = ("select format_an(an) en, (modify_date || 'T' || modify_time) mdate "
"from visit "
f"where fix_visit_type_id = '1' and modify_date >= '{date}' "
"union "
"select format_vn(vn) en, (modify_date || 'T' || modify_time) mdate "
"from visit "
f"where fix_visit_type_id <> '1' and modify_date >= '{date}' "
"order by mdate")
result = self.query(query)
return result if result else None
def encounter_discharge(self, date_obj):
date = date_obj.strftime('%Y-%m-%d')
query = (
"select format_vn(vn) en, (visit.financial_discharge_date || 'T' || visit.financial_discharge_time) mdate "
"from visit "
f"where visit.financial_discharge_date >= '{date}' and visit.fix_visit_type_id != '1' "
"union "
"select format_an(an) en, (visit.financial_discharge_date || 'T' || visit.financial_discharge_time) mdate "
"from visit "
f"where visit.financial_discharge_date >= '{date}' and visit.fix_visit_type_id = '1' "
"union "
"select format_vn(vn) en, (visit.doctor_discharge_date || 'T' || visit.doctor_discharge_time) mdate "
"from visit "
f"where visit.doctor_discharge_date >= '{date}' and visit.fix_visit_type_id != '1' "
"union "
"select format_an(an) en, (visit.doctor_discharge_date || 'T' || visit.doctor_discharge_time) mdate "
"from visit "
f"where visit.doctor_discharge_date >= '{date}' and visit.fix_visit_type_id = '1' "
"union "
"select format_an(an) en, (admit.ipd_discharge_date || 'T' || admit.ipd_discharge_time) mdate "
"from admit "
f"where admit.ipd_discharge_date >= '{date}'")
result = self.query(query)
return result if result else None
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Email: [email protected]
# DateTime: 2019-08-06 22:08:14
__author__ = 'chenwx'
import json
import requests
app_url = "http://127.0.0.1:9002"
req_url = app_url + "/api/v2/local"
json_headers = {"content-type": "application/json"}
class ShowLocal(object):
"""docstring for ShowLocal"""
def __init__(self):
super(ShowLocal, self).__init__()
def post(self, content):
mess = {
"key": "c1c2",
"obj": "local",
"content": content
}
r = requests.post(req_url, data=json.dumps(mess), headers=json_headers)
print("http status--------->> %s" % r.status_code)
print(r.text)
def cmd(self, body):
content = {
"task": "cmd",
"arg": body
}
self.post(content)
def unit(self, body):
content = {
"task": "unit",
"arg": body
}
self.post(content)
def srcipt(self, file):
content = {
"task": "script",
"arg": file
}
self.post(content)
task = ShowLocal()
task.cmd('ls /tmp')
task.cmd('uptime')
task.cmd('df -h')
task.unit('disk')
task.unit('disk_dict')
task.unit('uptime')
task.unit('uptime_dict')
task.unit('cpu')
task.unit('mem_dict')
task.srcipt('/home/wait/code/f1.sh')
|
python
|
#42) Coded triangle numbers
#The nth term of the sequence of triangle numbers is given by, tn = (1/2)*n*(n+1); so the first ten triangle numbers are:
#1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...
#By converting each letter in a word to a number corresponding to its alphabetical position and adding these values we form a word value. For example, the word value for SKY is 19 + 11 + 25 = 55 = t10. If the word value is a triangle number then we shall call the word a triangle word.
#Using words.txt (right click and 'Save Link/Target As...'), a 16K text file containing nearly two-thousand common English words, how many are triangle words?
#%% Solution
def triangle_nums(x):
n = 1
while int(1/2*n*(n+1)) <= x:
yield int(1/2*n*(n+1))
n += 1
with open("p042_words.txt", mode='r') as doc:
list_words = doc.read().replace('"', '').split(',')
list_values = [sum([ord(x)-64 for x in word]) for word in list_words]
list_triangle = [x for x in list_values if x in triangle_nums(max(list_values))]
len(list_triangle)
|
python
|
# Authors: Jose C. Garcia Alanis <[email protected]>
#
# License: BSD-3-Clause
# -- WIP -- {
fig, ax = plt.subplots()
im = ax.imshow(np.median(channel_corrs, axis=0), cmap="YlGn", )
# Create colorbar
cbar = ax.figure.colorbar(im, ax=ax)
cbar.ax.set_ylabel('Channels correlation', rotation=-90, va="bottom")
# Show all ticks and label them with the respective list entries
ax.set_xticks(np.arange(n_channels), labels=channels, rotation=-90)
ax.set_yticks(np.arange(n_channels), labels=channels)
# }
|
python
|
class Aws_Utils:
@staticmethod
def run_code_in_lambda(code):
file_Path = 'temp_code/code.py'
temp_Dir = 'temp_code'
zip_file = 'temp_code.zip'
def create_temp_files():
if not os.path.exists(temp_Dir):
os.mkdir(temp_Dir)
with open(file_Path, "w+") as f:
f.write(code)
def delete_temp_files():
os.remove(file_Path)
os.remove(zip_file)
os.rmdir(temp_Dir)
create_temp_files()
name = 'dynamic_code'
role = 'arn:aws:iam::244560807427:role/lambda_basic_execution'
handler = 'code.dynamic_method'
s3_bucket = 'gs-lambda-tests'
s3_key = 'dinis/lambda-using-dynamic-code.zip'
aws = Aws_Cli()
aws.lambda_delete_function(name)
aws.s3_upload_folder (temp_Dir, s3_bucket, s3_key)
aws.lambda_create_function(name, role, handler, s3_bucket, s3_key)
(result, response) = aws.lambda_invoke_function(name, {})
aws.lambda_delete_function(name)
delete_temp_files()
return result
@staticmethod
def zip_folder(root_dir):
return shutil.make_archive(root_dir, "zip", root_dir)
|
python
|
#
# resolution(KB, q): Given a propositional knowledge base and query, return
# whether the query can be inferred from the knowledgebase using resolution.
# The implementation is more efficient than pl_resolution in the AIMA code.
# KnowledgeBasedAgent: An abstract class that makes decisions to navigate
# through a world based on its knowledge.
#
# Compiled against Python 2.7
# Author: Stephen Bahr ([email protected])
import collections
import logic
RESULT_DEATH = 0
RESULT_GIVE_UP = 1
RESULT_WIN = 2
class GameOver(Exception):
"""A class representing the event of the game ending."""
def __init__(self, result):
"""Result is one of the RESULT constants above."""
self.result = result
# Utility functions
def normalize(clause):
return frozenset(map(str, logic.disjuncts(clause)))
def negate(literal):
if literal[0] == '~': return literal[1:]
else: return '~' + literal
def resolution(KB, alpha):
"""Apply the resolution algorithm to determine if alpha can be inferred from KB.
Args:
KB: an instance of logic.PropKB
alpha: an instance of logic.Expr
Return True if KB |- alpha
"""
# We do not want to waste effort resolving clauses of the KB against
# one another directly, we only want to resolve clauses that contain
# information derived from alpha. tainted_clauses will be the set
# we grow.
tainted_clauses = set(normalize(clause)
for clause in logic.conjuncts(logic.to_cnf(~alpha)))
KB_clauses = [normalize(clause) for clause in KB.clauses]
new = set()
while True:
# clausesWith is a map from literals to clauses containing that literal.
clausesWith = collections.defaultdict(list)
for clause in list(tainted_clauses) + KB_clauses:
for literal in clause:
clausesWith[literal].append(clause)
# For each tainted clause, add a pair of that clause and any
# tainted or KB clause that matches it (i.e. opposes on one literal).
pairs = []
for clause0 in tainted_clauses:
for literal in clause0:
for clause1 in clausesWith[negate(literal)]:
pairs.append((literal, clause0, clause1))
# Resolve all the pairs found above. If any result in None, the
# resolution is a bust (provides no new information).
# If any result in False (empty set), we have reached a contradiction
# and proven our goal.
for literal, clause0, clause1 in pairs:
result = resolve(clause0, clause1, literal)
if result is not None:
if result == set(): return True
else: new.add(frozenset(result))
# We now survey all the new clauses. In order to want to keep them,
# they must not be a superset of any already-known clause (since that
# would provide no new information).
added = False
for clause in new:
if not any(old_clause.issubset(clause)
for old_clause in list(tainted_clauses) + KB_clauses):
tainted_clauses.add(clause)
added = True
# If we have not found any new information, we've reached the end
# and cannot prove our goal (it may be True, it may be False, but we
# can't definitively say either way).
if not added: return False
def resolve(clause0, clause1, literal):
"""Resolve two clauses.
Each input clause is represented as a sequence of strings, each string being
one literal. The two clauses must be resolvable, one containing literal,
the other the negation of literal.
Args:
clause0: An arbitrary clause, containing literal.
clause1: An arbitrary clause, containing the negation of literal.
literal: A string.
Returns:
None if the two clauses also match on a different literal, because
in that case, all the resolved clauses would be equivalent to True
The empty set if the two clauses are exactly literal and not-literal,
i.e. they resolve to False
Otherwise, a frozenset of literals, the resolved clause.
"""
clause0 = set(clause0)
clause1 = set(clause1)
clause0.remove(literal)
clause1.remove(negate(literal))
if any(negate(other) in clause1 for other in clause0): return None
return clause0.union(clause1)
class KnowledgeBasedAgent:
def __init__(self):
self.KB = logic.PropKB()
def safe(self):
"""Return the set of safe locations to move to."""
raise NotImplementedError()
def not_unsafe(self):
"""Return the set of locations that can't be proven unsafe to move to."""
raise NotImplementedError()
def unvisited(self):
"""Return the set of locations that haven't yet been visited."""
raise NotImplementedError()
def choose_location(self):
"""Return the next location to explore in the search for gold."""
unvisited_locations = self.unvisited()
safe_moves = self.safe().intersection(unvisited_locations)
if safe_moves:
location = min(safe_moves)
print 'Moving to safe location', location
else:
not_unsafe_moves = self.not_unsafe().intersection(unvisited_locations)
if not_unsafe_moves:
location = min(not_unsafe_moves)
print 'Taking a risk; moving to a not-unsafe location', location
else:
print 'Nowhere left to go'
raise GameOver(RESULT_GIVE_UP)
return location
|
python
|
from django.db import models
from ...apps import UFDLCoreAppConfig
class LicenceQuerySet(models.QuerySet):
"""
A query-set of data-set licences.
"""
pass
class Licence(models.Model):
"""
The licence for a data-set.
"""
# The name for the licence
name = models.CharField(max_length=100)
# The URL to the licences homepage
url = models.URLField()
# The permissions of the licence
permissions = models.ManyToManyField(f"{UFDLCoreAppConfig.label}.Permission",
related_name="+")
# The permissions of the licence
limitations = models.ManyToManyField(f"{UFDLCoreAppConfig.label}.Limitation",
related_name="+")
# The permissions of the licence
conditions = models.ManyToManyField(f"{UFDLCoreAppConfig.label}.Condition",
related_name="+")
# The domains of the licence
domains = models.ManyToManyField(f"{UFDLCoreAppConfig.label}.Domain",
related_name="+")
objects = LicenceQuerySet.as_manager()
class Meta:
constraints = [
# Ensure that each licence has a unique name
models.UniqueConstraint(name="unique_licence_names",
fields=["name"])
]
|
python
|
"""
This script contains all the functions related to the model
"""
import tensorflow as tf
import numpy as np
import random
from math import ceil
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Conv2D, LeakyReLU, Dropout
from game import MOVES_POSSIBLE, ALL_BLOCK_POSSIBLE, GRID_SIZE_X, GRID_SIZE_Y
from tensorflow.keras.utils import to_categorical
from copy import deepcopy
EPS: float = 0.4 # probability of playing a random move
# list of all actions possible for the model
LIST_ACTIONS: [int] = [[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]]
MUTATION_RATE: float = 0.0001
MIN_RANGE_MUTATION: float = -5.0
MAX_RANGE_MUTATION: float = 5.0
# parent is for the rate of parent in the new generation
# children is for the rate of children in the new generation
# child must have a inferior or equal to parent
# new is the rate of new random model in the new generation
# the sum of child, parent and new must be equal to 1
GENERATION_PRESET: dict = {"parent": 0.4, "children": 0.4, "new": 0.2} # this preset is a model for a new generation
# model
class Model2048(Sequential):
"""
Create the main model for 2048
"""
def __init__(self):
super().__init__()
"""
# create the model
self.add(
Conv2D(ALL_BLOCK_POSSIBLE * MOVES_POSSIBLE,
(1, 2),
padding="same",
input_shape=(GRID_SIZE_Y, GRID_SIZE_X, ALL_BLOCK_POSSIBLE),
)
)
self.add(LeakyReLU())
self.add(
Conv2D(ALL_BLOCK_POSSIBLE * MOVES_POSSIBLE,
(2, 1),
padding="same",
)
)
self.add(LeakyReLU())
self.add(
Conv2D(ALL_BLOCK_POSSIBLE, # * MOVES_POSSIBLE
(1, 1),
padding="same",
)
)
self.add(LeakyReLU())
self.add(Flatten())
self.add(Dropout(0.2))
self.add(Dense(256))
self.add(LeakyReLU())
self.add(Dropout(0.2))
self.add(Dense(4, activation="softmax"))
self.compile(optimizer="adam", loss="huber_loss")
"""
self.add(
Conv2D(ALL_BLOCK_POSSIBLE * MOVES_POSSIBLE,
(1, 2),
padding="same",
input_shape=(GRID_SIZE_Y, GRID_SIZE_X, ALL_BLOCK_POSSIBLE),
)
)
self.add(LeakyReLU())
self.add(
Conv2D(ALL_BLOCK_POSSIBLE * MOVES_POSSIBLE,
(2, 1),
padding="same",
)
)
self.add(LeakyReLU())
self.add(
Conv2D(ALL_BLOCK_POSSIBLE, # * MOVES_POSSIBLE
(1, 1),
padding="same",
)
)
self.add(LeakyReLU())
self.add(Flatten())
self.add(Dropout(0.2))
self.add(Dense(256))
self.add(LeakyReLU())
self.add(Dropout(0.2))
self.add(Dense(4, activation="softmax"))
self.compile(optimizer="RMSprop", loss="huber_loss")
"""
self.add(
Conv2D(ALL_BLOCK_POSSIBLE * MOVES_POSSIBLE,
(1, 2),
padding="same",
input_shape=(GRID_SIZE_Y, GRID_SIZE_X, 1),
)
)
self.add(LeakyReLU())
self.add(
Conv2D(ALL_BLOCK_POSSIBLE * MOVES_POSSIBLE,
(2, 1),
padding="same",
)
)
self.add(LeakyReLU())
self.add(
Conv2D(ALL_BLOCK_POSSIBLE, # * MOVES_POSSIBLE
(1, 1),
padding="same",
)
)
self.add(LeakyReLU())
self.add(Flatten())
self.add(Dropout(0.2))
self.add(Dense(256))
self.add(LeakyReLU())
self.add(Dropout(0.2))
self.add(Dense(4, activation="softmax"))
self.compile(optimizer="adam", loss="huber_loss")
"""
def save_model(self, path: str) -> None:
"""
This function save the model as a h5 file
:param path: the path where to save the model
:return: None
"""
self.save(path)
def load_performance(self, path) -> None:
"""
This function will load the weight of the model
it is better than a tf.keras.load_model
:param path: the path of the model to load
:return: None
"""
_m = Model2048()
_m = tf.keras.models.load_model(path)
_w = m.get_weights()
self.set_weights(_w)
# @tf.function
def model_action(self, grid):
"""
This function return the input of the model
:param grid: a 2048 grid
:return: input of the model
"""
return self(np.array([grid_to_input(grid)], dtype=np.float32), training=False).numpy()[0]
def take_action(self, grid, eps: float = EPS):
"""
This function will take sometime a random action and sometime a model action
:param grid: a sequence of indicators of length SEQUENCE_LENGTH
:param eps: probability of playing a random move
:return: the action take
"""
if random.random() < eps:
# take random action
action = deepcopy(random.choice(LIST_ACTIONS))
else:
# let model choose a action
action = self.model_action(grid)
"""
returned_list = [0, 0, 0, 0]
returned_list[np.argmax(action)] = 1
return returned_list"""
return action
def normalization(x, min_range, max_range):
"""
Normalization function
:param x: List of value to normalize
:param min_range: Minimum range for norm
:param max_range: Maximum range for norm
:return: array normalize
"""
x_max = max(x.flatten().tolist())
x_min = min(x.flatten().tolist())
norm = min_range + ((x - x_min) * (max_range - min_range)) / (x_max - x_min)
return norm
def grid_to_input(grid):
"""
This function transform the grid to a model input
:param grid: a 2048 grid
:return: the input for the model
"""
# MULTI LAYER PERCEPTION
# transform to categorical
grid = to_categorical(np.log2(grid + 1) - 1, 18).tolist()
# remove 0
for y in range(4):
for x in range(4):
del grid[y][x][-1]
return np.array(grid)
"""
# ONE LAYER PERCEPTION
grid = grid * 2
grid[grid == 0] = 2
grid = np.log2(grid)
grid -= 1
grid = normalization(grid, 0, 1)
grid = np.reshape(grid, grid.shape + (1, ))
return grid
"""
# genetic algorithm
def model_crossover(parent1_weight: list, parent2_weight: list) -> list:
"""
This function make a crossover of tow models
:param parent1_weight: the weights of the firs model
:param parent2_weight:the weights of the second model
:return: new weight from a crossover of the two parents
"""
new_weight: list = []
# get the shape of the wight
shapes: [tuple] = [a.shape for a in parent1_weight]
# flatten weight
genes1: np.array = np.concatenate([a.flatten() for a in parent1_weight])
genes2: np.array = np.concatenate([a.flatten() for a in parent2_weight])
# create the split coordinate
split = random.randint(0, len(genes1) - 1)
# make the crossover from the two parents
child1_genes = np.array(genes1[0:split].tolist() + genes2[split:].tolist())
# give the good shape to the weight of the child
index = 0
for shape in shapes:
size = np.product(shape)
new_weight.append(child1_genes[index: index + size].reshape(shape))
index += size
return new_weight
def model_mutation(model_weight: list,
mutation_rate: float = MUTATION_RATE,
min_range_mutation: float = MIN_RANGE_MUTATION,
max_range_mutation: float = MAX_RANGE_MUTATION) -> list:
"""
This function add some mutation in the model weight
:param model_weight: model weight where mutation will be added
:param mutation_rate: 1 = 100% the probability of a mutation
:param min_range_mutation the minimum range of a random mutation
:param max_range_mutation the maximum range of a random mutation
:return: the model with mutation
"""
# get the shape of the wight
shapes: [tuple] = [a.shape for a in model_weight]
# flatten weight
genes: np.array = np.concatenate([a.flatten() for a in model_weight])
# create mutation
for i in range(len(genes)):
if random.uniform(0, 1) < mutation_rate:
genes[i] = random.uniform(min_range_mutation, max_range_mutation)
new_weight: list = []
# give the good shape to the muted weight
index = 0
for shape in shapes:
size = np.product(shape)
new_weight.append(genes[index: index + size].reshape(shape))
return new_weight
def new_generation(all_gen_weight: list,
all_gen_score: [int],
generation_preset: dict = None) -> list:
"""
this function return a new generation from a older generation
:param all_gen_weight: a list that contain all model's weight (should be a list of list of array)
you must get weight of all models
:param all_gen_score: a list that contain the score of each model (should be a list of int)
warning : index of all_gen_weight must correspond with index of all_gen_score
:param generation_preset: the presset for generation
:return: a new generation from a older generation
"""
# set generation to default if parameter if None
if generation_preset is None:
generation_preset = GENERATION_PRESET
# sort the score from the biggest to the smalest
best_all_gen_score = sorted(all_gen_score, reverse=True)
# create a list that store best model
best_models: list = []
# select best model
for i in range(ceil(len(all_gen_weight) * generation_preset["parent"])):
# get the index of the maximum score in the list
index_best: int = all_gen_score.index(best_all_gen_score[i])
# add the best model to the list of best model
best_models.append(all_gen_weight[index_best])
# create children
children_models: list = []
for i in range(ceil(len(all_gen_weight) * generation_preset["children"])):
children_models.append(
model_crossover(best_models[i], best_models[i - 1])
)
# create mutation
parent_children_list: list = best_models + children_models
for i in range(len(parent_children_list)):
parent_children_list[i] = model_mutation(parent_children_list[i])
# add random model
random_models: list = []
for i in range(ceil(len(all_gen_weight) * generation_preset["new"])):
_temp_m = Model2048()
_temp_w = _temp_m.get_weights()
random_models.append(_temp_w)
# create the full new gen
new_gen: list = parent_children_list + random_models
return new_gen
if __name__ == '__main__':
m = Model2048()
m.summary()
|
python
|
from Templates.make_excel import make2exel
import Main.Tenderplan.participants2excel as p2ex
import openpyxl
excel_name = r'E:\Лиды_экспорт.xlsx'
make2exel(
['Название компании', 'ИНН', 'Список выигранных тендеров', 'Список остальных тендеров c участием'], excel_name)
wb = openpyxl.load_workbook(excel_name)
sheet = wb.active
participants_dict_list = p2ex.get_participants()
curr_row = 1
for x in range(len(participants_dict_list)):
curr_row += 1
print(participants_dict_list[x]['company'])
sheet.cell(row=curr_row, column=1).value = participants_dict_list[x]['company']
part_row = win_row = curr_row
for t in range(len(participants_dict_list[x]['tender_name'])):
if list(participants_dict_list[x]['tender_name'][t].values())[0] == 'par':
sheet.cell(row=part_row, column=4).value = (list(participants_dict_list[x]['tender_name'][t].keys())[0])
part_row += 1
else:
sheet.cell(row=win_row, column=3).value = (list(participants_dict_list[x]['tender_name'][t].keys())[0])
win_row += 1
curr_row += 1
# if t > len(participants_dict_list[x]['tender_name']):
# curr_row += 1
wb.save(excel_name)
|
python
|
#!/usr/bin/env python
from HTMLParser import HTMLParser
import re
import os
import sys
import string
class Html2MarkdownParser(HTMLParser):
def __init__(self):
self._markdown = ''
self._tag_stack = []
self._tag_attr_data = {}
self._handled_tag_body_data = ''
self._convertible_tags = ['a',
'b', 'blockquote',
'em',
'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr',
'ol',
'p', 'pre',
'strong',
'ul']
# FIXME: special characters
HTMLParser.__init__(self)
def _append_to_markdown(self, new_markdown):
if len(self._markdown) > 1:
if re.match('\s', self._markdown[-1:]):
self._markdown += new_markdown
else:
self._markdown += ' ' + new_markdown
else:
self._markdown += new_markdown
# <a />
def handle_start_a(self, attrs):
self._tag_attr_data = dict(attrs)
def handle_end_a(self):
a_tag = ''
a_tag += '[' + self._handled_tag_body_data + ']'
a_tag += '(' + self._tag_attr_data.get('href')
title = self._tag_attr_data.get('title')
if title:
a_tag += ' "' + title + '") '
else:
a_tag += ') '
self._append_to_markdown(a_tag)
# <b />
def handle_end_b(self):
self._handled_tag_body_data = self._handled_tag_body_data.replace(os.linesep, ' ')
self._append_to_markdown('*' + self._handled_tag_body_data + '*')
# <blockquote />
def handle_end_blockquote(self):
blockquote_body = self._handled_tag_body_data.split(os.linesep)
for blockquote_line in blockquote_body:
blockquote_line = blockquote_line.strip()
self._append_to_markdown('> ' + blockquote_line + os.linesep)
# <em />
def handle_end_em(self):
self._handled_tag_body_data = self._handled_tag_body_data.replace(os.linesep, ' ')
self._append_to_markdown('*' + self._handled_tag_body_data + '*')
# <h1 />
def handle_end_h1(self):
self._handled_tag_body_data = self._handled_tag_body_data.replace(os.linesep, ' ')
self._append_to_markdown('# ' + self._handled_tag_body_data + ' #' + os.linesep)
# <h2 />
def handle_end_h2(self):
self._handled_tag_body_data = self._handled_tag_body_data.replace(os.linesep, ' ')
self._append_to_markdown('## ' + self._handled_tag_body_data + ' ##' + os.linesep)
# <h3 />
def handle_end_h3(self):
self._handled_tag_body_data = self._handled_tag_body_data.replace(os.linesep, ' ')
self._append_to_markdown('### ' + self._handled_tag_body_data + ' ###' + os.linesep)
# <h4 />
def handle_end_h4(self):
self._handled_tag_body_data = self._handled_tag_body_data.replace(os.linesep, ' ')
self._append_to_markdown('#### ' + self._handled_tag_body_data + ' ####' + os.linesep)
# <h5 />
def handle_end_h5(self):
self._handled_tag_body_data = self._handled_tag_body_data.replace(os.linesep, ' ')
self._append_to_markdown('##### ' + self._handled_tag_body_data + ' #####' + os.linesep)
# <h6 />
def handle_end_h6(self):
self._handled_tag_body_data = self._handled_tag_body_data.replace(os.linesep, ' ')
self._append_to_markdown('###### ' + self._handled_tag_body_data + ' ######' + os.linesep)
# <hr />
def handle_start_hr(self, attrs):
self._append_to_markdown('* * *' + os.linesep)
# <li />
def handle_end_li(self):
if len(self._tag_stack):
if self._tag_stack[-1] == 'ol':
self._append_to_markdown('1. ' + self._handled_tag_body_data + os.linesep)
elif self._tag_stack[-1] == 'ul':
self._append_to_markdown('* ' + self._handled_tag_body_data + os.linesep)
# <p />
def handle_start_p(self, attrs):
if len(self._markdown) > 1:
if self._markdown[-2:] == '%s%s' % (os.linesep, os.linesep):
pass
elif self._markdown[-1:] == os.linesep:
self._markdown += os.linesep
else:
self._markdown += os.linesep + os.linesep
def handle_end_p(self):
self._markdown += '%s%s' % (os.linesep, os.linesep)
# <pre />
def handle_end_pre(self):
code_lines = self._handled_tag_body_data.split(os.linesep)
for code_line in code_lines:
code_line = code_line.strip()
self._append_to_markdown(' ' + code_line + os.linesep)
# <strong />
def handle_end_strong(self):
self._handled_tag_body_data = self._handled_tag_body_data.replace(os.linesep, ' ')
self._append_to_markdown('**' + self._handled_tag_body_data + '**')
## ###
def handle_starttag(self, tag, attrs):
self._tag_stack.append(tag)
try:
eval('self.handle_start_' + tag + '(attrs)')
except AttributeError, e:
pass
def handle_endtag(self, tag):
self._tag_stack.pop()
try:
eval('self.handle_end_' + tag + '()')
# Collapse three successive CRs into two before moving on
while len(self._markdown) > 2 and \
self._markdown[-3:] == '%s%s%s' % (os.linesep, os.linesep, os.linesep):
self._markdown = self._markdown[:-3] + '%s%s' % (os.linesep, os.linesep)
except AttributeError, e:
pass
self._tag_attr_data = {}
self._handled_tag_body_data = ''
def handle_data(self, data):
data = os.linesep.join(map(string.strip, data.strip().split(os.linesep)))
if len(self._tag_stack) and self._tag_stack[-1] not in ['p']:
self._handled_tag_body_data += data
else:
self._append_to_markdown(data)
def get_markdown(self):
return self._markdown.rstrip() + '\n'
def main():
p = Html2MarkdownParser()
buf = sys.stdin.read()
p.feed(buf)
p.close()
print p.get_markdown()
if __name__ == "__main__":
sys.exit(main())
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function
def pe5(n=20):
"""
What is the smallest number divisible by each of the numbers 1 to 20?
>>> pe5()
232792560
"""
if n < 2:
return 1
p = 2
m = [2]
for x in range(3, n + 1):
for y in m:
if not x % y:
x //= y
if x > 1:
m.append(x)
p *= x
return p
if __name__ == "__main__":
import doctest
doctest.testmod()
try:
while True:
s = input('> ')
n = int(s)
print(pe5(n))
except (SyntaxError, EOFError, KeyboardInterrupt, NameError):
pass
|
python
|
import aiohttp
import argparse
import asyncio
import ssl
from urllib.parse import urlsplit
from bs4 import BeautifulSoup
from sitemap.utils import write_text_sitemap, clean_link
# Have these at global scope so they remain shared.
urls = []
results = []
def sitemap(url, verbose=False):
""" Main mapping function.
Clears old results, adds the starting url to the pool of urls,
creates and runs an event loop, writes out if necessary.
"""
if len(results) > 0:
del results[:]
urls.append(url)
loop = asyncio.get_event_loop()
if loop.is_closed():
loop = asyncio.new_event_loop()
loop.run_until_complete(asyncio.ensure_future(crawler(urls, results, verbose)))
return results
async def crawler(urls, results, verbose):
""" Crawls urls that aren't already in the results list """
while len(urls) > 0:
await asyncio.gather(*(asyncio.ensure_future(crawl(url, verbose)) for url in urls if url not in results))
async def crawl(url, verbose):
""" Moves current url from urls pool to results,
gets, cleans & parses html content for new urls,
appends new urls to urls pool.
"""
results.append(url)
try:
urls.remove(url)
except ValueError:
pass
try:
async with aiohttp.ClientSession() as session:
async with session.request(url=url, method='GET') as response:
if response.content_type == 'text/html':
content = await response.read()
clean_content(content, url, verbose)
except ssl.SSLError as e:
pass
except aiohttp.ClientError as e:
pass
def clean_content(content, url, verbose):
""" Parse a webpage for links """
soup = BeautifulSoup(content, 'html.parser')
domain = "{0.scheme}://{0.netloc}".format(urlsplit(url))
for link in [h.get('href') for h in soup.find_all('a')]:
link = clean_link(link, domain)
if link is not None:
if link not in urls and link not in results:
urls.append(link)
if verbose:
print(link)
def main():
parser = argparse.ArgumentParser() # pragma: no cover
parser.add_argument( # pragma: no cover
"-u", "--u", # pragma: no cover
help="Base url of the site to be mapped", # pragma: no cover
dest="url" # pragma: no cover
) # pragma: no cover
parser.add_argument( # pragma: no cover
"--w", # pragma: no cover
help="Write output to file", # pragma: no cover
dest="output" # pragma: no cover
) # pragma: no cover
args = parser.parse_args() # pragma: no cover
if args.output: # pragma: no cover
out = sitemap(url=args.url) # pragma: no cover
write_text_sitemap(out, args.output)
elif args.url: # pragma: no cover
sitemap(url=args.url, verbose=True) # pragma: no cover
else: # pragma: no cover
parser.print_help() # pragma: no cover
if __name__ == '__main__':
main()
|
python
|
import list_wifi_distances
import requests
def report(rogue_mac):
pi_id = 8
distance = list_wifi_distances.get_network(rogue_mac.upper())
print(distance)
requests.post("http://10.10.10.93:8000/report", data={'id':pi_id, 'dist': distance})
|
python
|
import logging
from cellfinder.figures import heatmap
def run(args, atlas, downsampled_shape):
logging.info("Generating heatmap")
heatmap.run(
args.paths.downsampled_points,
atlas,
downsampled_shape,
args.brainreg_paths.registered_atlas,
args.paths.heatmap,
smoothing=args.heatmap_smooth,
mask=args.mask_figures,
)
|
python
|
# Generated by Django 3.1.2 on 2021-04-08 02:58
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('bpmn', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Diagram',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=255)),
('xml', models.TextField(default='', max_length=255)),
('svg', models.TextField(default='', max_length=255)),
('process', models.OneToOneField(blank=True, default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to='bpmn.process')),
],
),
]
|
python
|
"""Aramaic Bible Module tool"""
from pathlib import Path
import click
from abm_tools.sedra.bible import parse_sedra3_bible_db_file
from abm_tools.sedra.db import from_transliteration, parse_sedra3_words_db_file, sedra4_db_word_json
@click.group()
def tool():
"""Tools for generating Aramaic bible software modules"""
@tool.command()
@click.argument("word_id", type=int)
def lookup(word_id: int):
"""Lookup a word in the SEDRA 4 DataBase"""
print(sedra4_db_word_json(word_id))
@tool.command()
@click.argument("file_name", type=click.Path(exists=True))
def gen(file_name: Path) -> int:
"""Create Aramaic Sword modules"""
words = parse_sedra3_words_db_file()
for book, chapter, verse, word_num, word_id in parse_sedra3_bible_db_file(
file_name=str(file_name)
):
#word = from_transliteration(words.loc[word_id]["strVocalised"])
word = sedra4_db_word_json(word_id)["western"]
print(
book,
chapter,
verse,
word_num,
word_id,
word,
)
return 0
|
python
|
#!/usr/bin/env python
>>> import snmp_helper
c. Create a script that connects to both routers (pynet-rtr1 and pynet-rtr2) and prints out both the MIB2 sysName and sysDescr.
|
python
|
##
##
##
import queue
class BundGrammarQueue:
def __init__(self):
self.q = {}
self.default_queue_name = "__root__"
self.createLIFO("__root__")
def queue(self, name, auto_create=True):
if name not in self.q:
if auto_create:
self.default_queue_name = name
return self.createQueue(name)
else:
return None
else:
return self.q[name]
def lifo(self, name, auto_create=True):
if name not in self.q:
if auto_create:
self.default_queue_name = name
return self.createLIFO(name)
else:
return None
else:
return self.q[name]
def push(self, data):
if self.default_queue_name in self.q:
self.q[self.default_queue_name].put_nowait(data)
else:
return False
return True
def pull(self):
if self.default_queue_name in self.q:
try:
data = self.q[self.default_queue_name].get_nowait()
except queue.Empty:
return None
return data
return None
def createQueue(self, name):
if name in self.q:
return False
self.q[name] = queue.Queue()
return self.q[name]
def createLIFO(self, name):
if name in self.q:
return False
self.q[name] = queue.LifoQueue()
return self.q[name]
|
python
|
import vcr
from umm.cli.client import umm_request
from umm.server.utils import setup_folder
@vcr.use_cassette()
def test_umm_request():
setup_folder()
resp = umm_request([])
assert resp == {"commands": []}
|
python
|
"""
Copyright (c) 2016-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""
"""Fabric commands for packaging our external software.
External meaning non-python software.
"""
from fabric.api import cd, env, run
from fabric.contrib.files import exists
from fabric.operations import get, put
def package_freeswitch(fs_version='1.6.16~33~e6d643b-1~jessie+1'):
"""Builds freeswitch with our patches.
This will build the package based on what is currently checked out in the
local freeswitc repo. Be sure that the tag that is checked out matches the
version string that is needed. The tag we have used in the past is v1.6.9 and
the FS repo itself is at https://stash.freeswitch.org/scm/fs/freeswitch.git
git clone --branch v1.6.9 https://stash.freeswitch.org/scm/fs/freeswitch.git
"""
path = '/home/vagrant/freeswitch'
if not exists(path):
print('path %s does not exist on the VM, cannot package' % path)
return
with cd(path):
run('cp ../client/packaging/py3.h src/mod/languages/mod_python')
run('./build/set-fs-version.sh %s' % fs_version)
run('dch -b -m -v "%s" --force-distribution -D unstable "Endaga build."' % fs_version)
run('./bootstrap.sh', warn_only=True)
get(remote_path='modules.conf', local_path='/tmp/modules.conf')
o = open('/tmp/modules.conf', 'a')
o.write("event_handlers/mod_smpp\n")
o.write("languages/mod_python\n")
o.write("applications/mod_esl\n")
o.close()
with cd('debian/'):
put(remote_path='modules.conf', local_path='/tmp/modules.conf')
run('./bootstrap.sh -c jessie')
run('./configure --with-python=`which python3`')
run('sudo mk-build-deps -i -t "apt-get -y --no-install-recommends" debian/control')
run('dpkg-buildpackage -b -nc -us')
run('mkdir -p ~/endaga-packages')
run('mv ../*.deb ~/endaga-packages/')
def package_sipauthserve(make_clean='no'):
"""Create a deb for sipauthserve (subscriberRegistry).
The subscriberRegistry repo has its own build script.
"""
_package_external('/home/vagrant/subscriberRegistry', 'sipauthserve-public', make_clean)
def package_smqueue(make_clean='no'):
"""Create a deb for smqueue.
The smqueue repo has its own build script which itself calls FPM.
"""
_package_external('/home/vagrant/smqueue', 'smqueue-public', make_clean)
def package_openbts(make_clean='no'):
"""Create a deb for openbts-public."""
_package_external('/home/vagrant/openbts', 'openbts-public', make_clean)
def package_liba53(make_clean='no'):
"""Create a deb for liba53."""
_package_external('/home/vagrant/liba53', 'liba53', make_clean)
def _package_external(directory, package_name, make_clean):
"""Builds packages with mk-build-deps and dpkg-buildpackage.
Args:
directory: the path to a repo synced on the VM via vagrant
package_name: the name of the debian package that will be created
"""
if env.pkgfmt != "deb":
print("External packages only support deb, not building.")
return
if not exists(directory):
print('path %s does not exist, cannot package' % directory)
return
print('packaging %s as %s' % (directory, package_name))
run('mkdir -p ~/endaga-packages')
with cd('/home/vagrant/'):
with cd(directory):
run('echo y | sudo mk-build-deps')
run('sudo gdebi --n %s-build-deps*.deb' % package_name)
run('rm -f %s-build-deps*.deb' % package_name)
clean_arg = '' if make_clean == 'yes' else '-nc'
run('dpkg-buildpackage -b -uc -us %s' % clean_arg)
run('mv %s_*.deb ~/endaga-packages/.' % package_name)
run('rm %s_*' % package_name)
|
python
|
def fun(x: int) -> str:
return str(x)
class SomeClass:
def meth(self, x: int) -> str:
return str(x)
|
python
|
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
NamedTuple,
Optional,
Set,
)
from contextlib import contextmanager
import pendulum
import prefect
from prefect.core import Edge, Flow, Task
from prefect.engine.result import Result
from prefect.engine.results import ConstantResult
from prefect.engine.runner import ENDRUN, Runner, call_state_handlers
from prefect.engine.state import (
Failed,
Mapped,
Pending,
Running,
Scheduled,
State,
Success,
)
from prefect.utilities import executors
from prefect.utilities.collections import flatten_seq
FlowRunnerInitializeResult = NamedTuple(
"FlowRunnerInitializeResult",
[
("state", State),
("task_states", Dict[Task, State]),
("context", Dict[str, Any]),
("task_contexts", Dict[Task, Dict[str, Any]]),
],
)
class FlowRunner(Runner):
"""
FlowRunners handle the execution of Flows and determine the State of a Flow
before, during and after the Flow is run.
In particular, through the FlowRunner you can specify which tasks should be
the first tasks to run, which tasks should be returned after the Flow is finished,
and what states each task should be initialized with.
Args:
- flow (Flow): the `Flow` to be run
- task_runner_cls (TaskRunner, optional): The class used for running
individual Tasks. Defaults to [TaskRunner](task_runner.html)
- state_handlers (Iterable[Callable], optional): A list of state change handlers
that will be called whenever the flow changes state, providing an
opportunity to inspect or modify the new state. The handler
will be passed the flow runner instance, the old (prior) state, and the new
(current) state, with the following signature:
`state_handler(fr: FlowRunner, old_state: State, new_state: State) -> Optional[State]`
If multiple functions are passed, then the `new_state` argument will be the
result of the previous handler.
Note: new FlowRunners are initialized within the call to `Flow.run()` and in general,
this is the endpoint through which FlowRunners will be interacted with most frequently.
Example:
```python
@task
def say_hello():
print('hello')
with Flow("My Flow") as f:
say_hello()
fr = FlowRunner(flow=f)
flow_state = fr.run()
```
"""
def __init__(
self,
flow: Flow,
task_runner_cls: type = None,
state_handlers: Iterable[Callable] = None,
):
self.flow = flow
if task_runner_cls is None:
task_runner_cls = prefect.engine.get_default_task_runner_class()
self.task_runner_cls = task_runner_cls
super().__init__(state_handlers=state_handlers)
def __repr__(self) -> str:
return "<{}: {}>".format(type(self).__name__, self.flow.name)
def call_runner_target_handlers(self, old_state: State, new_state: State) -> State:
"""
A special state handler that the FlowRunner uses to call its flow's state handlers.
This method is called as part of the base Runner's `handle_state_change()` method.
Args:
- old_state (State): the old (previous) state
- new_state (State): the new (current) state
Returns:
- State: the new state
"""
self.logger.debug(
"Flow '{name}': Handling state change from {old} to {new}".format(
name=self.flow.name,
old=type(old_state).__name__,
new=type(new_state).__name__,
)
)
for handler in self.flow.state_handlers:
new_state = handler(self.flow, old_state, new_state) or new_state
return new_state
def initialize_run( # type: ignore
self,
state: Optional[State],
task_states: Dict[Task, State],
context: Dict[str, Any],
task_contexts: Dict[Task, Dict[str, Any]],
parameters: Dict[str, Any],
) -> FlowRunnerInitializeResult:
"""
Initializes the Task run by initializing state and context appropriately.
If the provided state is a Submitted state, the state it wraps is extracted.
Args:
- state (Optional[State]): the initial state of the run
- task_states (Dict[Task, State]): a dictionary of any initial task states
- context (Dict[str, Any], optional): prefect.Context to use for execution
to use for each Task run
- task_contexts (Dict[Task, Dict[str, Any]], optional): contexts that will be
provided to each task
- parameters(dict): the parameter values for the run
Returns:
- NamedTuple: a tuple of initialized objects:
`(state, task_states, context, task_contexts)`
"""
# overwrite context parameters one-by-one
context_params = context.setdefault("parameters", {})
for p in self.flow.parameters():
if not p.required:
context_params.setdefault(p.name, p.default)
for param, value in (parameters or {}).items():
context_params[param] = value
context.update(flow_name=self.flow.name)
context.setdefault("scheduled_start_time", pendulum.now("utc"))
# add various formatted dates to context
now = pendulum.now("utc")
dates = {
"date": now,
"today": now.strftime("%Y-%m-%d"),
"yesterday": now.add(days=-1).strftime("%Y-%m-%d"),
"tomorrow": now.add(days=1).strftime("%Y-%m-%d"),
"today_nodash": now.strftime("%Y%m%d"),
"yesterday_nodash": now.add(days=-1).strftime("%Y%m%d"),
"tomorrow_nodash": now.add(days=1).strftime("%Y%m%d"),
}
for key, val in dates.items():
context.setdefault(key, val)
for task in self.flow.tasks:
task_contexts.setdefault(task, {}).update(
task_name=task.name, task_slug=self.flow.slugs[task],
)
state, context = super().initialize_run(state=state, context=context)
return FlowRunnerInitializeResult(
state=state,
task_states=task_states,
context=context,
task_contexts=task_contexts,
)
def run(
self,
state: State = None,
task_states: Dict[Task, State] = None,
return_tasks: Iterable[Task] = None,
parameters: Dict[str, Any] = None,
task_runner_state_handlers: Iterable[Callable] = None,
executor: "prefect.engine.executors.Executor" = None,
context: Dict[str, Any] = None,
task_contexts: Dict[Task, Dict[str, Any]] = None,
) -> State:
"""
The main endpoint for FlowRunners. Calling this method will perform all
computations contained within the Flow and return the final state of the Flow.
Args:
- state (State, optional): starting state for the Flow. Defaults to
`Pending`
- task_states (dict, optional): dictionary of task states to begin
computation with, with keys being Tasks and values their corresponding state
- return_tasks ([Task], optional): list of Tasks to include in the
final returned Flow state. Defaults to `None`
- parameters (dict, optional): dictionary of any needed Parameter
values, with keys being strings representing Parameter names and values being
their corresponding values
- task_runner_state_handlers (Iterable[Callable], optional): A list of state change
handlers that will be provided to the task_runner, and called whenever a task
changes state.
- executor (Executor, optional): executor to use when performing
computation; defaults to the executor specified in your prefect configuration
- context (Dict[str, Any], optional): prefect.Context to use for execution
to use for each Task run
- task_contexts (Dict[Task, Dict[str, Any]], optional): contexts that will be
provided to each task
Returns:
- State: `State` representing the final post-run state of the `Flow`.
"""
self.logger.info("Beginning Flow run for '{}'".format(self.flow.name))
# make copies to avoid modifying user inputs
task_states = dict(task_states or {})
context = dict(context or {})
task_contexts = dict(task_contexts or {})
parameters = dict(parameters or {})
if executor is None:
executor = prefect.engine.get_default_executor_class()()
try:
state, task_states, context, task_contexts = self.initialize_run(
state=state,
task_states=task_states,
context=context,
task_contexts=task_contexts,
parameters=parameters,
)
with prefect.context(context):
state = self.check_flow_is_pending_or_running(state)
state = self.check_flow_reached_start_time(state)
state = self.set_flow_to_running(state)
state = self.get_flow_run_state(
state,
task_states=task_states,
task_contexts=task_contexts,
return_tasks=return_tasks,
task_runner_state_handlers=task_runner_state_handlers,
executor=executor,
)
except ENDRUN as exc:
state = exc.state
# All other exceptions are trapped and turned into Failed states
except Exception as exc:
self.logger.exception(
"Unexpected error while running flow: {}".format(repr(exc))
)
if prefect.context.get("raise_on_exception"):
raise exc
new_state = Failed(
message="Unexpected error while running flow: {}".format(repr(exc)),
result=exc,
)
state = self.handle_state_change(state or Pending(), new_state)
return state
@contextmanager
def check_for_cancellation(self) -> Iterator:
"""Contextmanager used to wrap a cancellable section of a flow run.
No-op for the default `FlowRunner` class.
"""
yield
@call_state_handlers
def check_flow_reached_start_time(self, state: State) -> State:
"""
Checks if the Flow is in a Scheduled state and, if it is, ensures that the scheduled
time has been reached.
Args:
- state (State): the current state of this Flow
Returns:
- State: the state of the flow after performing the check
Raises:
- ENDRUN: if the flow is Scheduled with a future scheduled time
"""
if isinstance(state, Scheduled):
if state.start_time and state.start_time > pendulum.now("utc"):
self.logger.debug(
"Flow '{name}': start_time has not been reached; ending run.".format(
name=self.flow.name
)
)
raise ENDRUN(state)
return state
@call_state_handlers
def check_flow_is_pending_or_running(self, state: State) -> State:
"""
Checks if the flow is in either a Pending state or Running state. Either are valid
starting points (because we allow simultaneous runs of the same flow run).
Args:
- state (State): the current state of this flow
Returns:
- State: the state of the flow after running the check
Raises:
- ENDRUN: if the flow is not pending or running
"""
# the flow run is already finished
if state.is_finished() is True:
self.logger.info("Flow run has already finished.")
raise ENDRUN(state)
# the flow run must be either pending or running (possibly redundant with above)
elif not (state.is_pending() or state.is_running()):
self.logger.info("Flow is not ready to run.")
raise ENDRUN(state)
return state
@call_state_handlers
def set_flow_to_running(self, state: State) -> State:
"""
Puts Pending flows in a Running state; leaves Running flows Running.
Args:
- state (State): the current state of this flow
Returns:
- State: the state of the flow after running the check
Raises:
- ENDRUN: if the flow is not pending or running
"""
if state.is_pending():
self.logger.info("Starting flow run.")
return Running(message="Running flow.")
elif state.is_running():
return state
else:
raise ENDRUN(state)
@executors.run_with_heartbeat
@call_state_handlers
def get_flow_run_state(
self,
state: State,
task_states: Dict[Task, State],
task_contexts: Dict[Task, Dict[str, Any]],
return_tasks: Set[Task],
task_runner_state_handlers: Iterable[Callable],
executor: "prefect.engine.executors.base.Executor",
) -> State:
"""
Runs the flow.
Args:
- state (State): starting state for the Flow. Defaults to
`Pending`
- task_states (dict): dictionary of task states to begin
computation with, with keys being Tasks and values their corresponding state
- task_contexts (Dict[Task, Dict[str, Any]]): contexts that will be provided to
each task
- return_tasks ([Task], optional): list of Tasks to include in the
final returned Flow state. Defaults to `None`
- task_runner_state_handlers (Iterable[Callable]): A list of state change handlers
that will be provided to the task_runner, and called whenever a task changes
state.
- executor (Executor): executor to use when performing computation; defaults to the
executor provided in your prefect configuration
Returns:
- State: `State` representing the final post-run state of the `Flow`.
"""
# this dictionary is used for tracking the states of "children" mapped tasks;
# when running on Dask, we want to avoid serializing futures, so instead
# of storing child task states in the `map_states` attribute we instead store
# in this dictionary and only after they are resolved do we attach them to the Mapped state
mapped_children = dict() # type: Dict[Task, list]
if not state.is_running():
self.logger.info("Flow is not in a Running state.")
raise ENDRUN(state)
if return_tasks is None:
return_tasks = set()
if set(return_tasks).difference(self.flow.tasks):
raise ValueError("Some tasks in return_tasks were not found in the flow.")
def extra_context(task: Task, task_index: int = None) -> dict:
return {
"task_name": task.name,
"task_tags": task.tags,
"task_index": task_index,
}
# -- process each task in order
with self.check_for_cancellation(), executor.start():
for task in self.flow.sorted_tasks():
task_state = task_states.get(task)
# if a task is a constant task, we already know its return value
# no need to use up resources by running it through a task runner
if task_state is None and isinstance(
task, prefect.tasks.core.constants.Constant
):
task_states[task] = task_state = Success(result=task.value)
# if the state is finished, don't run the task, just use the provided state if
# the state is cached / mapped, we still want to run the task runner pipeline
# steps to either ensure the cache is still valid / or to recreate the mapped
# pipeline for possible retries
if (
isinstance(task_state, State)
and task_state.is_finished()
and not task_state.is_cached()
and not task_state.is_mapped()
):
continue
upstream_states = {} # type: Dict[Edge, State]
# this dictionary is used exclusively for "reduce" tasks in particular we store
# the states / futures corresponding to the upstream children, and if running
# on Dask, let Dask resolve them at the appropriate time.
# Note: this is an optimization that allows Dask to resolve the mapped
# dependencies by "elevating" them to a function argument.
upstream_mapped_states = {} # type: Dict[Edge, list]
# -- process each edge to the task
for edge in self.flow.edges_to(task):
# load the upstream task states (supplying Pending as a default)
upstream_states[edge] = task_states.get(
edge.upstream_task, Pending(message="Task state not available.")
)
# if the edge is flattened and not the result of a map, then we
# preprocess the upstream states. If it IS the result of a
# map, it will be handled in `prepare_upstream_states_for_mapping`
if edge.flattened:
if not isinstance(upstream_states[edge], Mapped):
upstream_states[edge] = executor.submit(
executors.flatten_upstream_state, upstream_states[edge]
)
# this checks whether the task is a "reduce" task for a mapped pipeline
# and if so, collects the appropriate upstream children
if not edge.mapped and isinstance(upstream_states[edge], Mapped):
children = mapped_children.get(edge.upstream_task, [])
# if the edge is flattened, then we need to wait for the mapped children
# to complete and then flatten them
if edge.flattened:
children = executors.flatten_mapped_children(
mapped_children=children, executor=executor,
)
upstream_mapped_states[edge] = children
# augment edges with upstream constants
for key, val in self.flow.constants[task].items():
edge = Edge(
upstream_task=prefect.tasks.core.constants.Constant(val),
downstream_task=task,
key=key,
)
upstream_states[edge] = Success(
"Auto-generated constant value",
result=ConstantResult(value=val),
)
# handle mapped tasks
if any([edge.mapped for edge in upstream_states.keys()]):
# wait on upstream states to determine the width of the pipeline
# this is the key to depth-first execution
upstream_states = executor.wait(
{e: state for e, state in upstream_states.items()}
)
# we submit the task to the task runner to determine if
# we can proceed with mapping - if the new task state is not a Mapped
# state then we don't proceed
task_states[task] = executor.wait(
executor.submit(
run_task,
task=task,
state=task_state, # original state
upstream_states=upstream_states,
context=dict(
prefect.context, **task_contexts.get(task, {})
),
flow_result=self.flow.result,
task_runner_cls=self.task_runner_cls,
task_runner_state_handlers=task_runner_state_handlers,
upstream_mapped_states=upstream_mapped_states,
is_mapped_parent=True,
extra_context=extra_context(task),
)
)
# either way, we should now have enough resolved states to restructure
# the upstream states into a list of upstream state dictionaries to iterate over
list_of_upstream_states = executors.prepare_upstream_states_for_mapping(
task_states[task],
upstream_states,
mapped_children,
executor=executor,
)
submitted_states = []
for idx, states in enumerate(list_of_upstream_states):
# if we are on a future rerun of a partially complete flow run,
# there might be mapped children in a retrying state; this check
# looks into the current task state's map_states for such info
if (
isinstance(task_state, Mapped)
and len(task_state.map_states) >= idx + 1
):
current_state = task_state.map_states[
idx
] # type: Optional[State]
elif isinstance(task_state, Mapped):
current_state = None
else:
current_state = task_state
# this is where each child is submitted for actual work
submitted_states.append(
executor.submit(
run_task,
task=task,
state=current_state,
upstream_states=states,
context=dict(
prefect.context,
**task_contexts.get(task, {}),
map_index=idx,
),
flow_result=self.flow.result,
task_runner_cls=self.task_runner_cls,
task_runner_state_handlers=task_runner_state_handlers,
upstream_mapped_states=upstream_mapped_states,
extra_context=extra_context(task, task_index=idx),
)
)
if isinstance(task_states.get(task), Mapped):
mapped_children[task] = submitted_states # type: ignore
else:
task_states[task] = executor.submit(
run_task,
task=task,
state=task_state,
upstream_states=upstream_states,
context=dict(prefect.context, **task_contexts.get(task, {})),
flow_result=self.flow.result,
task_runner_cls=self.task_runner_cls,
task_runner_state_handlers=task_runner_state_handlers,
upstream_mapped_states=upstream_mapped_states,
extra_context=extra_context(task),
)
# ---------------------------------------------
# Collect results
# ---------------------------------------------
# terminal tasks determine if the flow is finished
terminal_tasks = self.flow.terminal_tasks()
# reference tasks determine flow state
reference_tasks = self.flow.reference_tasks()
# wait until all terminal tasks are finished
final_tasks = terminal_tasks.union(reference_tasks).union(return_tasks)
final_states = executor.wait(
{
t: task_states.get(t, Pending("Task not evaluated by FlowRunner."))
for t in final_tasks
}
)
# also wait for any children of Mapped tasks to finish, and add them
# to the dictionary to determine flow state
all_final_states = final_states.copy()
for t, s in list(final_states.items()):
if s.is_mapped():
# ensure we wait for any mapped children to complete
if t in mapped_children:
s.map_states = executor.wait(mapped_children[t])
s.result = [ms.result for ms in s.map_states]
all_final_states[t] = s.map_states
assert isinstance(final_states, dict)
key_states = set(flatten_seq([all_final_states[t] for t in reference_tasks]))
terminal_states = set(
flatten_seq([all_final_states[t] for t in terminal_tasks])
)
return_states = {t: final_states[t] for t in return_tasks}
state = self.determine_final_state(
state=state,
key_states=key_states,
return_states=return_states,
terminal_states=terminal_states,
)
return state
def determine_final_state(
self,
state: State,
key_states: Set[State],
return_states: Dict[Task, State],
terminal_states: Set[State],
) -> State:
"""
Implements the logic for determining the final state of the flow run.
Args:
- state (State): the current state of the Flow
- key_states (Set[State]): the states which will determine the success / failure of
the flow run
- return_states (Dict[Task, State]): states to return as results
- terminal_states (Set[State]): the states of the terminal tasks for this flow
Returns:
- State: the final state of the flow run
"""
# check that the flow is finished
if not all(s.is_finished() for s in terminal_states):
self.logger.info("Flow run RUNNING: terminal tasks are incomplete.")
state.result = return_states
# check if any key task failed
elif any(s.is_failed() for s in key_states):
self.logger.info("Flow run FAILED: some reference tasks failed.")
state = Failed(message="Some reference tasks failed.", result=return_states)
# check if all reference tasks succeeded
elif all(s.is_successful() for s in key_states):
self.logger.info("Flow run SUCCESS: all reference tasks succeeded")
state = Success(
message="All reference tasks succeeded.", result=return_states
)
# check for any unanticipated state that is finished but neither success nor failed
else:
self.logger.info("Flow run SUCCESS: no reference tasks failed")
state = Success(message="No reference tasks failed.", result=return_states)
return state
def run_task(
task: Task,
state: State,
upstream_states: Dict[Edge, State],
context: Dict[str, Any],
flow_result: Result,
task_runner_cls: Callable,
task_runner_state_handlers: Iterable[Callable],
upstream_mapped_states: Dict[Edge, list],
is_mapped_parent: bool = False,
) -> State:
"""
Runs a specific task. This method is intended to be called by submitting it to
an executor.
Args:
- task (Task): the task to run
- state (State): starting state for the Flow. Defaults to `Pending`
- task_runner_cls (Callable): the `TaskRunner` class to use
- upstream_states (Dict[Edge, State]): dictionary of upstream states
- context (Dict[str, Any]): a context dictionary for the task run
- flow_result (Result): the `Result` associated with the flow (if any)
- task_runner_state_handlers (Iterable[Callable]): A list of state change
handlers that will be provided to the task_runner, and called
whenever a task changes state.
- upstream_mapped_states (Dict[Edge, list]): dictionary of upstream states
corresponding to mapped children dependencies
- is_mapped_parent (bool): a boolean indicating whether this task run is the
run of a parent mapped task
Returns:
- State: `State` representing the final post-run state of the `Flow`.
"""
with prefect.context(context):
# Update upstream_states with info from upstream_mapped_states
for edge, upstream_state in upstream_states.items():
if not edge.mapped and upstream_state.is_mapped():
assert isinstance(upstream_state, Mapped) # mypy assert
upstream_state.map_states = upstream_mapped_states.get(
edge, upstream_state.map_states
)
upstream_state.result = [s.result for s in upstream_state.map_states]
task_runner = task_runner_cls(
task=task,
state_handlers=task_runner_state_handlers,
flow_result=flow_result,
)
return task_runner.run(
state=state,
upstream_states=upstream_states,
is_mapped_parent=is_mapped_parent,
context=context,
)
|
python
|
''' Menu Module
Module to deal with menus and buttons. Used initially for start menu.
Can be extended if required to create pause and other menues throughout
the game.
@author: Robert (unless stated otherwise)
'''
import pygame
from classes.text import Text
from classes.sfxbox import SFXBox
button_location = 'graphics/menu/button.png'
SFX = SFXBox()
class Menu:
''' Menu
Class which generates and contains menu functionality
'''
def __init__(self, screen, title_obj, background_location, *buttons):
''' Menu
Unpacks buttons passed into menu
'''
self.screen = screen
self.title_obj = title_obj
self.unpackButtons(buttons)
self.background_location = background_location
if background_location != False:
self.background = pygame.image.load(background_location)
self.background_rect = pygame.Rect((0, 0, 1, 1))
# Quitting Bool to determine whether to quit screen
self.playing = True
def display(self):
'''
Displays all buttons on the screen
'''
if self.background_location != False:
self.screen.blit(self.background, self.background_rect)
self.title_obj.display()
# self.play_obj.display()
# self.help_obj.display()
for button in self.buttons:
button.display()
def do(self, event):
''' do function
Actions whatever is input by user. Receives events from game
loop and if applicable actions them.
The buttons have a record of whether they have been
'button-downed' yet. If they have, then if they are als
'button-upped' will call their function
If button is pressed, will also return any output that the
buttons functions may give. This allows the menu to be used in
any scenario such as the pause button, where we want it to
return a string, to tell the pause screen what to do.
'''
if event.type == pygame.QUIT:
# Detecting user pressing quit button, if X pressed,
# break loop and quit screen.
self.playing = False
if event.type == pygame.MOUSEBUTTONDOWN:
for button in self.buttons:
if self.checkPress(button, event.pos):
button.mouse_down = True
if event.type == pygame.MOUSEBUTTONUP:
for button in self.buttons:
if self.checkPress(button, event.pos) and button.mouse_down:
SFX.click()
any_output = button.press()
button.mouse_down = False
return any_output
button.mouse_down = False
def checkPress(self, button, pos):
'''
Checks whether a position hits any of the buttons on the menu
'''
x0, x1, y0, y1 = button.coords
if (x0 < pos[0] < x1) and (y0 < pos[1] < y1):
return True
return False
def unpackButtons(self, buttons):
''' Unpacks buttons form tuple to list
'''
self.buttons = []
for button in buttons:
self.buttons.append(button)
class Button:
''' Button Class
Creates an automatically highlighted button using the Text class
from the text module.
The buttons display function checks whether cursor is covering it.
If the button is being covered, it highlights the text (yellow by
default) and if clicked it calls the given function.
I have used property decorators to deal with button position, as in
the Text class so that the button can easily be moved on screen if
required. For this reason, __position is private, so that it cannot
be edited from outside the function.
'''
def __init__(self, screen, text, position, func,
font_size = 35, size = (128, 64),
text_colour = 'white', highlight = 'yellow'):
# Storing attributes
self.screen = screen
self.text = text
self.__position = position
self.func = func
self.size = size
self.text_colour = text_colour
self.highlight = highlight
self.font_size = font_size
self.highlighted = False
self.mouse_down = False
# Make edges attributes
self.setEdgesAttributes()
# Making text and images
self.makeText()
self.makeImage()
def press(self):
''' Call button function when pressed, and return any output
'''
return self.func()
def makeText(self):
''' Create text object
'''
self.text = Text(self.screen, self.position, self.font_size,
self.text, self.text_colour)
def makeImage(self):
''' Make image object from image to be loaded
'''
self.image = pygame.transform.scale(
pygame.image.load(button_location),
self.size
)
self.rect = self.image.get_rect()
self.rect.center = self.position
def update(self):
''' Updates highlighting if cursor hovering over button
'''
pos_x, pos_y = pygame.mouse.get_pos()
over_button = (self.left < pos_x < self.right) \
and (self.top < pos_y < self.bottom)
if over_button:
self.highlighted = True
self.text.colour = self.highlight
elif self.highlighted:
self.text.colour = self.text_colour
self.highlighted = False
def display(self):
''' Displays all button components on screen
'''
self.update()
self.screen.blit(self.image, self.rect)
self.text.display()
def setEdgesAttributes(self):
''' Sets left/right/top/bottom attributes from position
'''
self.left = self.position[0] - (self.size[0] // 2)
self.right = self.position[0] + (self.size[0] // 2)
self.top = self.position[1] - (self.size[1] // 2)
self.bottom = self.position[1] + (self.size[1] // 2)
# The following decorated functions deal with position and
# coordinates of our button. The position gives the centre
# position, x and y give the corresponding components of the centre,
# and coords give the corner positions. These are all updated by
# updating the position, and the position setter cascades the
# changes to all attributes.
@property
def position(self):
return self.__position
@position.setter
def position(self, new_pos):
self.__position = new_pos
self.setEdgesAttributes()
self.text.position = self.position
self.rect.center = self.position
@property
def x(self):
return self.__position[0]
@x.setter
def x(self, new_x):
self.position = [new_x, self.position[1]]
@property
def y(self):
return self.__position[1]
@y.setter
def y(self, new_y):
self.position = [self.position[0], new_y]
@property
def coords(self):
return (self.left, self.right, self.top, self.bottom)
|
python
|
from loguru import logger
import cv2
import os
import pickle
import typing
import numpy as np
from sklearn.svm import LinearSVC
from stagesepx.classifier.base import BaseModelClassifier
from stagesepx import toolbox
from stagesepx.video import VideoFrame
from stagesepx import constants
class SVMClassifier(BaseModelClassifier):
FEATURE_DICT = {
"hog": toolbox.turn_hog_desc,
"lbp": toolbox.turn_lbp_desc,
# do not use feature transform
"raw": lambda x: x,
}
UNKNOWN_STAGE_NAME = constants.UNKNOWN_STAGE_FLAG
def __init__(
self, feature_type: str = None, score_threshold: float = None, *args, **kwargs
):
"""
init classifier
:param feature_type:
before training, classifier will convert pictures into feature, for better classification.
eg: 'hog', 'lbp' or 'raw'
:param score_threshold:
float, 0 - 1.0, under this value, label -> UNKNOWN_STAGE_NAME
default value is 0 (None)
"""
super().__init__(*args, **kwargs)
# feature settings
if not feature_type:
feature_type = "hog"
if feature_type not in self.FEATURE_DICT:
raise AttributeError(f"no feature func named {feature_type}")
self.feature_func: typing.Callable = self.FEATURE_DICT[feature_type]
logger.debug(f"feature function: {feature_type}")
# model settings
self._model: typing.Optional[LinearSVC] = None
self.score_threshold: float = score_threshold or 0.0
logger.debug(f"score threshold: {self.score_threshold}")
def clean_model(self):
self._model = None
def save_model(self, model_path: str, overwrite: bool = None):
"""
save trained model
:param model_path:
:param overwrite:
:return:
"""
logger.debug(f"save model to {model_path}")
# assert model file
if os.path.isfile(model_path) and not overwrite:
raise FileExistsError(
f"model file {model_path} already existed, you can set `overwrite` True to cover it"
)
# assert model data is not empty
assert self._model, "model is empty"
with open(model_path, "wb") as f:
pickle.dump(self._model, f)
def load_model(self, model_path: str, overwrite: bool = None):
"""
load trained model
:param model_path:
:param overwrite:
:return:
"""
logger.debug(f"load model from {model_path}")
# assert model file
assert os.path.isfile(model_path), f"model file {model_path} not existed"
# assert model data is empty
if self._model and not overwrite:
raise RuntimeError(
f"model is not empty, you can set `overwrite` True to cover it"
)
# joblib raise an error ( i have no idea about how to fix it ) here, so use pickle instead
with open(model_path, "rb") as f:
self._model = pickle.load(f)
def train(self):
"""
train your classifier with data. must be called before prediction
:return:
"""
if not self._model:
logger.debug("no model can be used. build a new one.")
self._model = LinearSVC()
else:
logger.debug("already have a trained model. train on this model.")
train_data = list()
train_label = list()
for each_label, each_label_pic_list in self.read():
for each_pic_object in each_label_pic_list:
logger.debug(f"training label: {each_label}")
# apply hook
each_pic_object = self._apply_hook(
VideoFrame(-1, -1.0, each_pic_object)
)
each_pic_object = each_pic_object.data
each_pic_object = self.feature_func(each_pic_object).flatten()
train_data.append(each_pic_object)
train_label.append(each_label)
logger.debug("data ready")
assert (
len(train_label) > 1
), f"seems only one class in the training dataset, at least two classes are required: {train_label}"
self._model.fit(train_data, train_label)
logger.debug("train finished")
def predict(self, pic_path: str) -> str:
"""
predict a single picture
:param pic_path:
:return:
"""
pic_object = toolbox.imread(pic_path)
return self.predict_with_object(pic_object)
def predict_with_object(self, frame: np.ndarray) -> str:
"""
predict a single object
:param frame:
:return:
"""
pic_object = self.feature_func(frame)
pic_object = pic_object.reshape(1, -1)
# scores for each stages
# IMPORTANT:
# these scores are not always precise
# at the most of time, we used a tiny train data set for training
# which may causes 'liblinear failed to converge'
# actually, it can know which one is the target class
# but the calculated value may becomes weird
scores = self._model.decision_function(pic_object)[0]
logger.debug(f"scores: {scores}")
# in the binary case, return type is different (wtf ...)
# for more effective i think
if len(self._model.classes_) == 2:
# scores is a float
# confidence score for self.classes_[1] where >0 means this
# class would be predicted
return self._model.classes_[1 if scores > 0 else 0]
# unknown
if max(scores) < self.score_threshold:
logger.warning(
f"max score is lower than {self.score_threshold}, unknown class"
)
return self.UNKNOWN_STAGE_NAME
return self._model.classes_[np.argmax(scores)]
def _classify_frame(self, frame: VideoFrame, *_, **__) -> str:
return self.predict_with_object(frame.data)
|
python
|
from fastapi import APIRouter
from client.api.api_v1.endpoints import twitter, disk_space
router = APIRouter()
router.include_router(disk_space.router, prefix="/diskspace", tags=["diskspace"])
router.include_router(twitter.router, prefix="/twitter", tags=["twitter"])
|
python
|
import socket
import imagezmq
import cv2
import time
sender = imagezmq.ImageSender(connect_to='tcp://localhost:5555')
sender_name = socket.gethostname() # send your hostname with each image
# image = open("C:/Users/H S/PycharmProjects/Kivy/Untitled.png", 'rb')
image = cv2.imread("C:/Users/H S/PycharmProjects/Kivy/Untitled.png")
print(image)
print(sender_name)
s = time.time()
sender.send_image(sender_name, image)
e = time.time()
print('it took- ', e-s, ' sec')
|
python
|
import zerorpc
client = zerorpc.Client()
client.connect("tcp://127.0.0.1:4242")
num = 7
result = client.double(num)
print("Double", num, "is", result)
|
python
|
from Components.Converter.Converter import Converter
from Components.config import config
from Components.Element import cached
from Poll import Poll
from enigma import eDVBVolumecontrol
class ArcticVolume(Poll, Converter):
def __init__(self, val):
Converter.__init__(self, val)
Poll.__init__(self)
self.poll_interval = 500
self.poll_enabled = True
self.volctrl = eDVBVolumecontrol.getInstance()
#print "ArcticVolume start Converter"
def doSuspend(self, suspended):
if suspended:
self.poll_enabled = False
else:
self.downstream_elements.changed((self.CHANGED_POLL,))
self.poll_enabled = True
@cached
def getText(self):
#print "ArcticVolume: " + str(self.volctrl.getVolume())
return str(self.volctrl.getVolume())
@cached
def getValue(self):
#print "ArcticVolume: " + str(self.volctrl.getVolume())
return str(self.volctrl.getVolume())
text = property(getText)
value = property(getValue)
|
python
|
###############################################################################
# WaterTAP Copyright (c) 2021, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory, Oak Ridge National
# Laboratory, National Renewable Energy Laboratory, and National Energy
# Technology Laboratory (subject to receipt of any required approvals from
# the U.S. Dept. of Energy). All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and license
# information, respectively. These files are also available online at the URL
# "https://github.com/watertap-org/watertap/"
###############################################################################
"""
Data model for electrolyte database.
Usage to get configuration for IDAES::
base = <query database for Base config of interest>
c_list = <get Components from database>
# add all the components to the base
for c in c_list:
base.add(c)
# get the merged configuration for IDAES functions
config = base.idaes_config
Class diagram::
┌────────────────────────────────┐
│ ConfigGenerator <<abstract>> │
uses ├────────────────────────────────┤
┌─────►│+ConfigGenerator(data) │
│ │ │
│ ├────────────────────────────────┤
│ │+config │
│ │_transform(data) │
│ └────────────┬───────────────────┘
│ │
│ ├───────────┬───────────────────────┐
│ │ │ │
│ ┌──────────────┴┐ ┌──┴──────────┐ ┌─────┴─────┐
│ │ ReactionConfig│ │ ThermoConfig│ │ BaseConfig│
│ └─────▲─────────┘ └─▲───────────┘ └───────▲───┘
│ │ │ │
│ │ │ │
│ │ │ │
│ │uses │uses │uses
│ │ │ │
│ │ │ │
│ ┌───────┼───────────────────┼──────────────────────────┼────────────┐
│ │ │ │ │ │
│ │ ┌────┴─────┐ ┌─────────┴───┐ ┌─────────────────┴─────────┐ │
│ │ │ Reaction │ │ Component │ │ Base │ │
│ │ └─────┬────┘ └──────┬──────┘ │ │ │
│ │ │ │ │ +add(item:DataWrapper) │ │
│ │ │ │ └─────────┬─────────────────┘ │
│ │ │ │ │ │
│ │ │ │ │ │
│ │ ├───────────────┴─────────────────────┘ │
│ │ │ │
│ │ │ │
│ └────────┼──────────────────────────────────────────────────┬───────┘
│ │ │
│ │ │
│ │ ┌────────┴─────────────┐
│ │ subclass │ │
│ ┌───────▼────────────────────────────┐ │ Public interface to │
│ │DataWrapper <<abstract>> │ │ the rest of │
│ ├────────────────────────────────────┤ │ WaterTAP │
│ │+DataWrapper(data, config_gen_class)│ │ │
└───┼────────────────────────────────────┤ └──────────────────────┘
│+idaes_config: dict │
│+merge_keys: tuple[str] │
└────────────────────────────────────┘
"""
__author__ = "Dan Gunter"
# stdlib
import collections
from contextlib import contextmanager
import copy
from fnmatch import fnmatchcase
import logging
from pprint import pformat
import re
from typing import Dict, Type, List, Union, Tuple
# 3rd party
from pyomo.environ import units as pyunits
# IDAES methods and constants
from idaes.core import AqueousPhase, LiquidPhase, SolidPhase, VaporPhase
from idaes.core.base.phases import PhaseType
from idaes.core import Component as IComponent
from idaes.models.properties.modular_properties.eos.ideal import Ideal
from idaes.models.properties.modular_properties.base.generic_reaction import (
ConcentrationForm,
)
from idaes.models.properties.modular_properties.phase_equil.forms import fugacity
from idaes.models.properties.modular_properties.pure import Perrys
from idaes.models.properties.modular_properties.pure.ConstantProperties import Constant
from idaes.models.properties.modular_properties.pure.NIST import NIST
from idaes.models.properties.modular_properties.reactions.dh_rxn import constant_dh_rxn
from idaes.models.properties.modular_properties.pure.electrolyte import (
relative_permittivity_constant,
)
from idaes.models.properties.modular_properties.reactions.equilibrium_constant import (
van_t_hoff,
)
from idaes.models.properties.modular_properties.reactions.equilibrium_forms import (
power_law_equil,
log_power_law_equil,
solubility_product,
log_solubility_product,
)
from idaes.models.properties.modular_properties.state_definitions import FTPx, FpcTP
from idaes.core.base.components import Solvent, Solute, Cation, Anion
from idaes.models.properties.modular_properties.phase_equil import SmoothVLE
from idaes.models.properties.modular_properties.phase_equil.bubble_dew import (
IdealBubbleDew,
)
from .error import ConfigGeneratorError, BadConfiguration
_log = logging.getLogger(__name__)
@contextmanager
def field(f):
"""Clean way to use a field in block (see code below for lots of examples)."""
yield f
class ConfigGenerator:
"""Interface for getting an IDAES 'idaes_config' dict."""
merge_keys = ()
substitute_values = {}
SUBST_UNITS = "units"
def __init__(self, data: Dict, name=None):
"""Constructor.
Args:
data: Input data
name: Name of the component, e.g. "H2O"
"""
data_copy = copy.deepcopy(data)
_log.info(f"transform to IDAES config.start: name={name}")
self._transform(data_copy)
_log.info(f"transform to IDAES config.end: name={name}")
self.config = data_copy
@classmethod
def _transform(cls, data):
pass # subclasses should implement
@staticmethod
def _build_units(x: str = None):
if not x:
_log.info("setting dimensionless unit")
x = "dimensionless"
s = re.sub(r"([A-Za-z]+)", r"U.\1", x).replace("U.None", "U.dimensionless")
try:
units = eval(s, {"U": pyunits})
# Syntax/NameError are just general badness, AttributeError is an unknown unit
except (SyntaxError, NameError, AttributeError) as err:
_log.error(f"while evaluating unit {s}: {err}")
raise
return units
# shared
@classmethod
def _transform_parameter_data(cls, comp):
debugging, comp_name = _log.isEnabledFor(logging.DEBUG), comp.get("name", "?")
params = comp.get(DataWrapperNames.param, None)
if not params:
_log.warning(f"No parameter data found in data name={comp_name}")
return
for param_key in params:
val = params[param_key]
if param_key == Reaction.NAMES.reaction_order:
# change form of reaction_order, just like stoichiometry
reaction_order_table = {}
for phase in val:
for species, num in val[phase].items():
reaction_order_table[(phase, species)] = num
params[param_key] = reaction_order_table
elif len(val) > 1:
# List of objects with 'v', 'u', and maybe 'i' keys
# -> transform into dict of tuples with key `i` and
# value (<value>, built(<units>))
coeff_table = {}
if debugging:
_log.debug(f"start: transform parameter list key={param_key}")
for item in val:
try:
index = item.get("i", 0)
built_units = cls._build_units(item["u"])
except (AttributeError, TypeError, ValueError) as err:
raise ConfigGeneratorError(
f"Cannot extract parameter. name='{comp_name}', "
f"item='{item}': {err}"
)
coeff_table[index] = (item["v"], built_units)
params[param_key] = coeff_table
if debugging:
_log.debug(f"done: transform parameter list key={param_key}")
else:
# Single object with 'v', 'u' keys
# -> transform into single tuple (<value>, built(<units>))
if debugging:
_log.debug(f"start: transform single parameter key={param_key}")
item = val[0]
built_units = cls._build_units(item["u"])
params[param_key] = (item["v"], built_units)
if debugging:
_log.debug(f"done: transform single parameter key={param_key}")
@staticmethod
def _iterate_dict_or_list(value):
# if value is a dict, use dict keys as indexes, so really just do `.items()`
if hasattr(value, "keys"):
return value.items()
# otherwise number from 1..N
elif hasattr(value, "append"):
num = 1
for item in value:
yield str(num), item
@classmethod
def _wrap_section(cls, section: str, data: Dict):
"""Put all `data` inside {<section>: <name>: { /data/ }}.
The `<name>` is taken from `data["name"]`.
Also removes keys 'name' and special keys starting with underscore
like _id from the `data`.
Changes input argument.
Section will be, e.g., "components" or "equilibrium_reactions"
"""
comp_name = data["name"]
# create new location for component data
if section not in data:
data[section] = {}
assert comp_name not in data[section], "trying to add existing component"
data[section][comp_name] = {}
# copy existing to new location
to_delete = set() # cannot delete while iterating, so store keys to delete here
for key, value in data.items():
# if this is not a special field, add it to the the component
if key not in (
"name",
"base_units",
"reaction_type",
"components",
"reactant_elements",
section,
"_id",
):
data[section][comp_name][key] = value
# mark field for deletion, if not top-level field
if key not in ("base_units", section):
to_delete.add(key)
# remove copied fields from old location
for key in to_delete:
del data[key]
# remove special
cls._remove_special(data)
@classmethod
def _remove_special(cls, data):
"""Remove 'special' keys starting with an underscore (e.g. _id) as well as 'name'."""
for key in list(data.keys()):
if key.startswith("_") or key == "name":
del data[key]
@classmethod
def _substitute(cls, data):
debugging = _log.isEnabledFor(logging.DEBUG)
def dicty(d):
return hasattr(d, "keys")
def substitute_value(d, subst, key):
"""Find string value(s) at 'd[key]' in mapping 'subst' and substitute mapped value.
Return True if found, False otherwise.
"""
if debugging:
_log.debug(f"substitute value: d={d} subst={subst} key={key}")
# make a scalar into a list of length 1, but remember whether
# it's a list or not
if (
isinstance(d[key], str)
or isinstance(d[key], int)
or isinstance(d[key], float)
):
str_values = [d[key]]
is_list = False
else:
try:
str_values = list(d[key])
except TypeError:
str_values = [str(d[key])]
is_list = True
# substitute all values in the list, with the result in `new_list`
num_subst, new_list = 0, []
for str_value in str_values:
new_value = None
if dicty(subst):
if str_value in subst:
new_value = subst[str_value]
# add case-insensitivity
elif str_value.lower() in subst:
new_value = subst[str_value.lower()]
elif subst == cls.SUBST_UNITS:
if isinstance(
str_value, str
): # make sure it's not already evaluated
_log.debug(
f"Substituting units: set {{'{key}': units('{str_value}')}} in {d}"
)
new_value = cls._build_units(str_value)
if new_value is None:
new_list.append(str_value) # unsubstituted value
else:
new_list.append(new_value)
num_subst += 1
# change input to substituted list (or single value)
d[key] = new_list if is_list else new_list[0]
# return True only if all values were substituted
return num_subst == len(new_list)
def stringish(x):
"""String or list/tuple of strings?"""
if isinstance(x, str):
return True
if isinstance(x, list) or isinstance(x, tuple):
for item in x:
if not isinstance(x, str):
return False
return True
return False
sv = cls.substitute_values
for sv_section in sv:
if debugging:
_log.debug(f"start: substitute section {sv_section}")
# get parent dict at dotted path given by 'sv_section'
key_list = sv_section.split(".")
data_section = data
# walk down the dotted path to the terminal dict
while dicty(data_section) and len(key_list) > 1:
subsection = key_list.pop(0)
if subsection in data_section:
data_section = data_section[subsection]
else:
data_section = None # not present
# if found, perform substitution(s)
if dicty(data_section):
sv_key = key_list.pop()
_log.debug(
f"perform substitutions in data={data_section} for key='{sv_key}'"
)
# if it is a wildcard, allow multiple substitutions
if "*" in sv_key:
matches = [k for k in data_section if fnmatchcase(k, sv_key)]
for match_key in matches:
if not stringish(data_section[match_key]):
continue # don't try to substitute non strings/string-lists
did_subst = substitute_value(
data_section, sv[sv_section], match_key
)
if not did_subst:
_log.warning(
f"Could not find substitution: section={sv_section} match={match_key} "
f"value={data_section[match_key]}"
)
# if not a wildcard, do zero or one substitutions
elif sv_key in data_section:
did_subst = substitute_value(data_section, sv[sv_section], sv_key)
if not did_subst:
_log.warning(
f"Could not find substitution: section={sv_section} "
f"value={data_section[sv_key]}"
)
if debugging:
_log.debug(f"done: substitute section {sv_section}")
class ThermoConfig(ConfigGenerator):
substitute_values = {
"valid_phase_types": {
"pt.liquidphase": PhaseType.liquidPhase,
"pt.solidphase": PhaseType.solidPhase,
"pt.vaporphase": PhaseType.vaporPhase,
"pt.aqueousphase": PhaseType.aqueousPhase,
},
"*_comp": {
"perrys": Perrys,
"constant": Constant,
"nist": NIST,
"relative_permittivity_constant": relative_permittivity_constant,
},
"phase_equilibrium_form.*": {
"fugacity": fugacity,
},
"type": {
"solvent": Solvent,
"solute": Solute,
"cation": Cation,
"anion": Anion,
"component": IComponent,
},
}
def __init__(self, data, name="unknown", validation=True):
"""Constructor.
Args:
data: Input data
name: Name of the component, e.g. "H2O"
validation: If True, perform schema validation against input.
Raises:
ValidationError: If the input is bad.
"""
super().__init__(data, name=name)
if validation:
from .validate import validate # put here to avoid circular import
if _log.isEnabledFor(logging.DEBUG):
_log.debug(f"Validating Component:\n{pformat(data)}")
validate(data, obj_type="component")
@classmethod
def _transform(cls, data):
cls._transform_parameter_data(data)
cls._substitute(data)
with field("valid_phase_types") as fld:
if isinstance(data.get(fld, None), (list, tuple)) and len(data[fld]) == 1:
data[fld] = data[fld][0]
del data["elements"]
cls._wrap_section("components", data)
for name in data["components"]:
cls._key_to_tuple(data["components"][name], "phase_equilibrium_form")
@classmethod
def _key_to_tuple(cls, data, section):
"""Change all key values separated by '-' in the given section to tuples of those values."""
if section not in data:
return
temp = {}
for key in data[section]:
item_list = key.split("-")
if len(item_list) != 2:
raise BadConfiguration(
"ThermoConfig._key_to_tuple",
data,
missing=None,
why="\n" + section + " tuple key must be only 2 items\n",
)
temp[tuple(item_list)] = data[section][key]
data[section] = temp
class ReactionConfig(ConfigGenerator):
substitute_values = {
"heat_of_reaction": {"constant_dh_rxn": constant_dh_rxn},
"*_form": {
"log_power_law_equil": log_power_law_equil,
"power_law_equil": power_law_equil,
"log_solubility_product": log_solubility_product,
"solubility_product": solubility_product,
"concentrationform.molarity": ConcentrationForm.molarity,
"concentrationform.molefraction": ConcentrationForm.moleFraction,
"concentrationform.activity": ConcentrationForm.activity,
},
"*_constant": {
"van_t_hoff": van_t_hoff,
},
}
def __init__(self, data, name="unknown", validation=True):
"""Constructor.
Args:
data: Input data
name: Name of the component, e.g. "H2O"
validation: If True, perform schema validation against input.
Raises:
ValidationError: If the input is bad.
"""
super().__init__(data, name=name)
if validation:
from .validate import validate # put here to avoid circular import
if _log.isEnabledFor(logging.DEBUG):
_log.debug(f"Validating Reaction:\n{pformat(data)}")
validate(data, obj_type="reaction")
@classmethod
def _transform(cls, data):
"""In-place data transformation from standard storage format to
format expected by IDAES idaes_config methods
"""
cls._transform_parameter_data(data)
for key, value in data.items():
# reformat stoichiometry to have tuple keys
if key == "stoichiometry":
stoich = value
stoich_table = {}
for phase in stoich:
for component_name, num in stoich[phase].items():
skey = (phase, component_name)
stoich_table[skey] = num
data[key] = stoich_table
cls._substitute(data)
reaction_type = data["type"]
reaction_section = f"{reaction_type}_reactions"
# The section should match a merge-key for the Reaction class
if reaction_section not in Reaction.merge_keys:
raise RuntimeError(
f"Unexpected reaction type while generating config: "
f"type={reaction_type} data={data}"
)
del data["type"] # remove from output
cls._wrap_section(reaction_section, data)
class BaseConfig(ConfigGenerator):
substitute_values = {
"state_definition": {"FTPx": FTPx, "FpcTP": FpcTP},
"phases.Liq.type": {"LiquidPhase": LiquidPhase, "AqueousPhase": AqueousPhase},
"phases.Sol.type": {"SolidPhase": SolidPhase},
"phases.Vap.type": {"VaporPhase": VaporPhase},
"phases.Liq.equation_of_state": {"Ideal": Ideal},
"phases.Sol.equation_of_state": {"Ideal": Ideal},
"phases.Vap.equation_of_state": {"Ideal": Ideal},
"bubble_dew_method": {"IdealBubbleDew": IdealBubbleDew},
"phase_equilibrium_state.*": {
"SmoothVLE": SmoothVLE,
},
"base_units.*": ConfigGenerator.SUBST_UNITS,
}
@classmethod
def _transform(cls, data):
cls._substitute(data)
cls._remove_special(data)
cls._list_to_tuple(data, "state_bounds")
cls._list_of_lists_to_tuple(data, "phases_in_equilibrium")
cls._key_to_tuple(data, "phase_equilibrium_state")
@classmethod
def _list_to_tuple(cls, data, section):
"""Change all list values in the given section to tuples."""
if section not in data:
return
for key in data[section]:
if isinstance(data[section][key], list):
data[section][key] = tuple(data[section][key])
@classmethod
def _list_of_lists_to_tuple(cls, data, section):
"""Change all list of list values in the given section to tuples."""
if section not in data:
return
temp = []
for item in data[section]:
if isinstance(item, list):
temp.append(tuple(item))
data[section] = temp
@classmethod
def _key_to_tuple(cls, data, section):
"""Change all key values separated by '-' in the given section to tuples of those values."""
if section not in data:
return
temp = {}
for key in data[section]:
item_list = key.split("-")
if len(item_list) != 2:
raise BadConfiguration(
"BaseConfig._key_to_tuple",
data,
missing=None,
why="\n" + section + " tuple key must be only 2 items\n",
)
temp[tuple(item_list)] = data[section][key]
data[section] = temp
class DataWrapperNames:
param = "parameter_data"
reaction_order = "reaction_order"
class DataWrapper:
"""Interface to wrap data from DB in convenient ways for consumption by the rest of the library.
Do not use this class directly.
Derived classes will feed the data (from the database) and the appropriate subclass of GenerateConfig to the
constructor. Then the IDAES config will be available from the `idaes_config` attribute.
Note that no conversion work is done before the first access, and the converted result is cached to
avoid extra work on repeated accesses.
"""
#: Subclasses should set this to the list of top-level keys that should be added,
# i.e. merged, into the result when an instance is added to the base data wrapper.
merge_keys = ()
NAMES = DataWrapperNames
def __init__(
self,
data: Dict,
config_gen_class: Type[ConfigGenerator] = None,
validate_as_type=None,
):
"""Ctor.
Args:
data: Data from the DB
config_gen_class: Used to transform DB data to IDAES idaes_config
"""
self._data, self._config_gen, self._config = data, config_gen_class, None
self.name = self._data.get("name", "")
if "_id" in self._data:
del self._data["_id"]
self._preprocess() # additional subclass-specific preprocessing
if validate_as_type:
from .validate import validate
validate(self._data, obj_type=validate_as_type)
def remove(self, key):
if key in self.data:
del self.data[key]
def remove_parameter(self, key):
param = self.NAMES.param # alias
if param in self.data and key in self.data[param]:
del self.data[param][key]
self._config = None
def set_parameter(self, key: str, value, units: str = "dimensionless", index=0):
"""Add to existing parameters or create a new parameter value.
Args:
key: Name of parameter
value: New value
units: Units for value
index: If parameter is a list of values, index to set. Otherwise.
a list of length of 1 will be created with an index of 0.
Returns:
None
Raises:
KeyError: If the data structure doesn't have a spot for parameters.
This is likely a more basic problem with the current instance.
"""
param = self.NAMES.param # alias
if param not in self.data:
raise KeyError(f"Missing section {param}, so cannot set a parameter")
entry = {"v": value, "u": units, "i": index}
# check if there are alread value(s)
if key in self.data[param]:
# if existing values, replace matching index or add new
new_param, replaced = [], False
for item in self.data[param][key]:
if item["i"] == index:
# replace entry at this index with the new entry
new_param.append(entry)
replaced = True
else:
# keep current entry for this index
new_param.append(item)
if not replaced:
new_param.append(entry)
else:
# if no existing param, create new list of size 1
new_param = [entry]
self.data[param][key] = new_param
self._config = None # force regeneration
def _preprocess(self):
pass # define in subclasses
@property
def data(self):
return self._data
@property
def idaes_config(self) -> Dict:
""" "Get the data as an IDAES config dict.
Returns:
Python dict that can be passed to the IDAES as a config.
"""
if self._config is None:
# the config_gen() call will copy its input, so get the result from
# the .config attr
self._config = self._config_gen(self._data, name=self.name).config
return self._config
@property
def json_data(self) -> Dict:
"""Get the data in its "natural" form as a dict that can be serialized to JSON."""
copy = self._data.copy() # shallow copy is fine
if "_id" in copy:
del copy["_id"]
return copy
@classmethod
def from_idaes_config(cls, config: Dict) -> List["DataWrapper"]:
"""The inverse of the `idaes_config` property, this method constructs a new
instance of the wrapped data from the IDAES config information.
Args:
config: Valid IDAES configuration dictionary
Raises:
BadConfiguration: If the configuration can't be transformed into the EDB form due
to missing/invalid fields.
"""
pass # subclasses need to define this, using helper functions in this class
@classmethod
def _method_to_str(
cls, fld, src, tgt, subst, required=False, default=None, caller: str = None
):
"""Convert a method object to a string representation.
Raises:
BadConfiguration: if field is missing and required, or unrecognized without a default
"""
if fld in src:
value = src[fld]
try:
str_value = subst[value]
except KeyError:
if default is not None:
str_value = default
else:
raise BadConfiguration(
caller, config=src, why=f"Unknown value for {fld}"
)
tgt[fld] = str_value
elif required:
raise BadConfiguration(caller, config=src, missing=fld)
@classmethod
def _convert_parameter_data(cls, src, tgt, caller="unknown"):
if cls.NAMES.param not in src:
raise BadConfiguration(caller, src, missing=cls.NAMES.param)
pd, data = src[cls.NAMES.param], {}
for param, value in pd.items():
if isinstance(value, tuple):
data[param] = [{"v": value[0], "u": str(value[1])}]
elif isinstance(value, dict) and len(value) > 0:
key0 = list(value.keys())[0]
if isinstance(key0, tuple):
# process dict with tuple keys
if param == "reaction_order":
# skip, not something we need to store in EDB
_log.debug(f"skip 'reaction_order' in parameters from {caller}")
else:
pass # not implemented -- no other known values
else:
# process dict with scalar keys
param_list = []
for i, value2 in value.items():
try:
i = int(i)
except ValueError:
pass
except TypeError as err:
raise BadConfiguration(
caller,
src,
why=f"Unexpected key type in parameter_data: "
f"key='{i}' param={value}",
)
param_list.append({"i": i, "v": value2[0], "u": str(value2[1])})
data[param] = param_list
else:
raise BadConfiguration(
caller,
src,
why=f"Unexpected value type for '{cls.NAMES.param}': key='{param}', "
f"value='{value}'",
)
tgt[cls.NAMES.param] = data
class ComponentNames(DataWrapperNames):
pass
class Component(DataWrapper):
merge_keys = ("components",)
NAMES = ComponentNames
def __init__(self, data: Dict, validation=True):
"""Constructor.
Args:
data: Data from the DB
validation: If true, do schema validation of input
"""
vtype = "component" if validation else None
super().__init__(data, ThermoConfig, validate_as_type=vtype)
def _preprocess(self):
# set "type" field
if "type" in self._data:
return # already present
name, elements = None, None
try:
name = self._data["name"]
elements = self._data["elements"]
except KeyError:
missing = "name" if name is None else "elements"
raise BadConfiguration("Component._preprocess", self._data, missing=missing)
if name.endswith("-"): # negatively charged
component_type = "anion"
match = re.match(r".*(\d+)-$", name)
charge = -1 if match is None else -int(match.group(1))
self._data["charge"] = charge
elif name.endswith("+"): # positively charged
component_type = "cation"
match = re.match(r".*(\d+)\+$", name)
charge = 1 if match is None else int(match.group(1))
self._data["charge"] = charge
elif name == "H2O": # water is always "H2O"
component_type = "solvent"
else: # anything else neutral
component_type = "solute"
self._data["type"] = component_type
@classmethod
def from_idaes_config(cls, config: Dict) -> List["Component"]:
"""See documentation on parent class."""
whoami = "Component.from_idaes_config"
# get inverse mapping of strings and values from ThermoConfig.substitute_values, used
# for calls to _method_to_str()
subst_strings = {}
for _, mapping in ThermoConfig.substitute_values.items():
for k, v in mapping.items():
subst_strings[v] = k
if "components" not in config:
raise BadConfiguration(config=config, whoami=whoami, missing="components")
result = []
for name, c in config["components"].items():
d = {"name": name}
with field("type") as fld:
if fld not in c:
raise BadConfiguration(whoami, config, missing=fld)
possible = {Solvent, Solute, Cation, Anion, IComponent}
if c[fld] not in possible:
possible_list = ", ".join([str(t) for t in possible])
raise BadConfiguration(
whoami,
config,
why=f"Bad value for '{fld}': expected one of: {possible_list}; "
f"got='{c[fld]}'",
)
cls._method_to_str("valid_phase_types", c, d, subst_strings, caller=whoami)
for fld in c:
if fld.endswith("_comp"):
cls._method_to_str(fld, c, d, subst_strings, caller=whoami)
with field("phase_equilibrium_form") as fld:
if fld in c:
d[fld] = {}
for key, value in c[fld].items():
break
cls._method_to_str(fld, c[fld], d, subst_strings, caller=whoami)
# extract elements from name
d["elements"] = re.findall(r"[A-Z][a-z]?", name)
cls._convert_parameter_data(c, d)
result.append(Component(d))
return result
class ReactionNames(DataWrapperNames):
stoich = "stoichiometry"
hor = "heat_of_reaction"
eq_const = "equilibrium_constant"
eq_form = "equilibrium_form"
conc_form = "concentration_form"
class Reaction(DataWrapper):
merge_keys = ("equilibrium_reactions", "rate_reactions", "inherent_reactions")
NAMES = ReactionNames
PHASES = ("Liq", "Vap", "Sol")
def __init__(self, data: Dict, validation=True):
"""Constructor.
Args:
data: Data from the DB
validation: If true, do schema validation of input
"""
vtype = "reaction" if validation else None
super().__init__(data, ReactionConfig, validate_as_type=vtype)
@property
def reaction_type(self):
return self.data.get("type", "")
def set_reaction_order(
self,
phase: str,
order: Union[List[Tuple[str, float]], Dict[str, float]],
require_all: bool = False,
) -> None:
"""Set the reaction order for the given phase.
Args:
phase: a value from self.PHASES
order: Either a dict or list of (element, value) pairs
require_all: If True, require that all components in the reaction be
given an order. If False, it is OK if some components are missing.
Returns:
None. Reaction order is modified in place.
Raises:
KeyError: something is missing in the data structure, or unknown
component provided
ValueError: Wrong or incomplete components provided
"""
if bool(order) is False:
raise ValueError("No components provided for reaction order")
# schema validation should guarantee this structure
# If 'reaction_order' key does not exist, then create one as a copy of stoich
if self.NAMES.reaction_order in self.data[self.NAMES.param]:
ro = self.data[self.NAMES.param][self.NAMES.reaction_order]
else:
self.data[self.NAMES.param][self.NAMES.reaction_order] = self.data[
self.NAMES.stoich
].copy()
ro = self.data[self.NAMES.param][self.NAMES.reaction_order]
if phase not in self.PHASES:
raise ValueError(
f"Invalid phase '{phase}'. Valid values: " f"{', '.join(self.PHASES)}"
)
if phase not in ro:
raise KeyError(f"Phase '{phase}' not found")
ro = ro[phase]
# normalize input to dict form
if not hasattr(order, "keys"):
order = dict(order)
# additional checks for 'require_all' flag
if require_all:
if len(order) != len(ro):
why = "not enough" if len(order) < len(ro) else "too many"
raise ValueError(
f"{why.title()} components provided for new reaction "
f"order, with 'require_all' flag set to True"
)
if set(order.keys()) != set(ro.keys()):
raise ValueError(
"Components in new reaction order do not match "
"components in reaction, with 'require_all' flag "
"set to True"
)
# Replace one component at a time, raising a KeyError if unknown component
# Ensure that the instance is not modified if there are any errors.
ro_tmp = ro.copy()
for key, value in order.items():
if key not in ro:
raise KeyError(f"Component '{key}' not found in reaction")
ro_tmp[key] = value
# Update reaction order in this object
self.data[self.NAMES.param][self.NAMES.reaction_order][phase] = ro_tmp
@classmethod
def from_idaes_config(cls, config: Dict) -> List["Reaction"]:
"""See documentation on parent class."""
whoami = "Reaction.from_idaes_config" # for logging
# get inverse mapping of strings and values from
# ReactionConfig.substitute_values, used for calls to _method_to_str()
subst_strings = {}
for _, mapping in ReactionConfig.substitute_values.items():
for k, v in mapping.items():
subst_strings[v] = k
result = []
# XXX: base units?
for reaction_type in (
k for k in cls.merge_keys if (k.endswith("_reactions") and k in config)
):
for name, r in config[reaction_type].items():
d = {"name": name, "type": reaction_type.split("_reactions")[0]}
# convert all non-dictionary-valued fields into equivalent string values
for fld, val in r.items():
if isinstance(val, str): # leave string values as-is
d[fld] = val
elif not isinstance(val, dict): # convert all other non-dict values
cls._method_to_str(fld, r, d, subst_strings, caller=whoami)
cls._convert_parameter_data(r, d)
with field("stoichiometry") as fld:
if fld in r:
cls._convert_stoichiometry(r[fld], d)
result.append(Reaction(d))
return result
@classmethod
def _convert_stoichiometry(cls, src, tgt):
data, component_list = {}, []
for key, value in src.items():
phase, species = key
if phase in data:
data[phase][species] = value # set species & quantity
else:
data[phase] = {species: value} # create new dictionary
component_list.append(species)
tgt["stoichiometry"] = data
tgt["components"] = component_list
class Base(DataWrapper):
"""Wrapper for 'base' information to which a component or reaction is added."""
def __init__(self, data: Dict):
super().__init__(data, BaseConfig)
self._to_merge = []
self._component_names = set()
self._dirty = True
self._idaes_config = None
def add(self, item: DataWrapper):
"""Add wrapped data to this base object."""
self._to_merge.append(item)
if isinstance(item, Component):
self._component_names.add(item.name)
self._dirty = True
@property
def component_names(self):
return list(self._component_names)
@property
def idaes_config(self):
# if there is no change, return previously merged value
if not self._dirty:
return self._idaes_config
# if the base config has not yet been created, do that now
if self._idaes_config is None:
self._idaes_config = super().idaes_config
# merge in items that were added with the `add()` method
for item in self._to_merge:
self._merge(self._idaes_config, item)
# reset for more calls to `add()` or this method
self._dirty, self._to_merge = False, []
# return merged value
return self._idaes_config
@staticmethod
def _merge(dst, src: DataWrapper) -> Dict:
"""Merge on defined configuration keys."""
src_config = src.idaes_config
for key in src.merge_keys:
if key not in src_config:
continue
if key in dst:
dst[key].update(src_config[key])
else:
dst[key] = src_config[key]
return dst
class Result:
"""Encapsulate one or more JSON objects in the appropriate :class:`DataWrapper` subclass.
Users won't need to instantiate this directly, just iterate over it to retrieve the result of
a database query or other operation that returns EDB data objects.
For example::
result = db.get_reactions(..search-params...)
for reaction_obj in result:
# ..work with instance of class Reaction..
print(reaction_obj.name)
"""
def __init__(self, iterator=None, item_class=None):
if iterator is not None:
assert issubclass(item_class, DataWrapper)
self._it = iterator
self._it_class = item_class
def __iter__(self):
return self
def __next__(self):
datum = next(self._it)
obj = self._it_class(datum)
return obj
|
python
|
import os
import csv
import timeit
from datetime import datetime
import numpy
import logging
import coloredlogs
import numpy as np
import argparse
import copy
import json
import re
import sys
import onnxruntime
from onnx import numpy_helper
from perf_utils import *
import pprint
import time
from float16 import *
# import torch
debug = False
sys.path.append('.')
logger = logging.getLogger('')
ep_to_provider_list = {
"CPUExecutionProvider": ["CPUExecutionProvider"],
"CUDAExecutionProvider": ["CUDAExecutionProvider"],
"CUDAExecutionProvider_fp16": ["CUDAExecutionProvider"],
"TensorrtExecutionProvider": ["TensorrtExecutionProvider", "CUDAExecutionProvider"],
"TensorrtExecutionProvider_fp16": ["TensorrtExecutionProvider", "CUDAExecutionProvider"],
}
def run_trt_standalone(trtexec, model_path, ort_inputs, all_inputs_shape, fp16):
model_path = "--onnx=" + model_path
input_shape = []
print(all_inputs_shape)
for i in range(len(ort_inputs)):
name = ort_inputs[i].name
shape = []
for j in all_inputs_shape[i]:
shape.append(str(j))
shape = "x".join(shape)
shape = name + ':' + shape
input_shape.append(shape)
shapes_arg = '--optShapes=' + ','.join(input_shape)
print(shapes_arg)
result = {}
try:
if fp16:
p1 = subprocess.Popen([trtexec, model_path, "--fp16", "--percentile=90", "--explicitBatch", shapes_arg], stdout=subprocess.PIPE)
else:
p1 = subprocess.Popen([trtexec, model_path, "--percentile=90", "--explicitBatch", shapes_arg], stdout=subprocess.PIPE)
stdout, sterr = p1.communicate()
print(stdout)
stdout = stdout.decode("ascii").strip()
tmp = stdout.split("\n")
target_list = []
for t in tmp:
if 'mean:' in t:
target_list.append(t)
if 'percentile:' in t:
target_list.append(t)
target = target_list[2]
start = target.find('mean:') + 6
end = target.find('ms')
result["average_latency_ms"] = target[start:end]
target = target_list[3]
start = target.find('percentile:') + 12
end = target.find('ms')
result["latency_90_percentile"] = target[start:end]
print(result)
return result
except Exception as e:
logger.info("trtexec fails...")
return None
def get_latency_result(runtimes, batch_size):
latency_ms = sum(runtimes) / float(len(runtimes)) * 1000.0
latency_variance = numpy.var(runtimes, dtype=numpy.float64) * 1000.0
throughput = batch_size * (1000.0 / latency_ms)
return {
"test_times": len(runtimes),
"latency_variance": "{:.2f}".format(latency_variance),
"latency_90_percentile": "{:.2f}".format(numpy.percentile(runtimes, 90) * 1000.0),
"latency_95_percentile": "{:.2f}".format(numpy.percentile(runtimes, 95) * 1000.0),
"latency_99_percentile": "{:.2f}".format(numpy.percentile(runtimes, 99) * 1000.0),
"average_latency_ms": "{:.2f}".format(latency_ms),
"QPS": "{:.2f}".format(throughput),
}
def get_ort_session_inputs_and_outptus(name, session, ort_input):
sess_inputs = {}
sess_outputs = None
if name == 'BERT-Squad':
unique_ids_raw_output = ort_input[0]
input_ids = ort_input[1]
input_mask = ort_input[2]
segment_ids = ort_input[3]
sess_inputs = {
"unique_ids_raw_output___9:0": unique_ids_raw_output,
"input_ids:0": input_ids[0:1],
"input_mask:0": input_mask[0:1],
"segment_ids:0": segment_ids[0:1]}
sess_outputs = ["unique_ids:0", "unstack:0", "unstack:1"]
elif name == 'BiDAF':
sess_inputs = {
"context_word": ort_input[0],
"context_char": ort_input[2],
"query_word": ort_input[1],
"query_char": ort_input[3]}
sess_outputs = ["start_pos","end_pos"]
elif name == 'Yolov4':
sess_inputs[session.get_inputs()[0].name] = ort_input[0]
sess_outputs = ['Identity:0']
elif name == 'Shufflenet-v2':
sess_inputs[session.get_inputs()[0].name] = ort_input
else:
sess_inputs = {}
for i in range(len(session.get_inputs())):
sess_inputs[session.get_inputs()[i].name] = ort_input[i]
return (sess_inputs, sess_outputs)
def inference_ort(args, name, session, ep, ort_inputs, result_template, repeat_times, batch_size):
runtimes = []
for ort_input in ort_inputs:
sess_inputs, sess_outputs = get_ort_session_inputs_and_outptus(name, session, ort_input)
print("sess_inputs:")
print(sess_inputs)
print("sess_outputs:")
print(sess_outputs)
try:
if args.input_data == "random":
repeat_times = 1 # warn-up run is included in ort_inputs
else:
repeat_times += 1 # add warn-up run
runtime = timeit.repeat(lambda: session.run(sess_outputs, sess_inputs), number=1, repeat=repeat_times)
runtimes += runtime
except Exception as e:
logger.error(e)
return None
print(runtimes)
runtimes[:] = runtimes[1:]
print(runtimes)
result = {}
result.update(result_template)
result.update({"io_binding": False})
result.update(get_latency_result(runtimes, batch_size))
return result
def inference_ort_and_get_prediction(name, session, ort_inputs):
ort_outputs = []
for ort_input in ort_inputs:
sess_inputs, sess_outputs = get_ort_session_inputs_and_outptus(name, session, ort_input)
print("sess_inputs:")
print(sess_inputs)
print("sess_outputs:")
print(sess_outputs)
try:
result = session.run(sess_outputs, sess_inputs)
# handle shape of output differently
if name == 'BERT-Squad':
ort_outputs.append([result])
elif name == 'Shufflenet-v2':
ort_outputs.append(result[0])
else:
ort_outputs.append(result)
except Exception as e:
logger.error(e)
return None
return ort_outputs
# not use for this script yet
def inference_ort_with_io_binding(model, ort_inputs, result_template, repeat_times, batch_size, device='cuda'):
runtimes = []
session = model.get_session()
# Bind inputs and outputs to onnxruntime session
io_binding = session.io_binding()
for ort_input in ort_inputs:
# Bind inputs to device
if model.get_model_name() == 'BERT-Squad':
name = session.get_inputs()[0].name
print(name)
np_input = torch.from_numpy(ort_input[0]).to(device)
io_binding.bind_input(name, np_input.device.type, 0, numpy.longlong, np_input.shape, np_input.data_ptr())
name = session.get_inputs()[1].name
print(name)
np_input = torch.from_numpy(ort_input[1][0:1]).to(device)
io_binding.bind_input(name, np_input.device.type, 0, numpy.longlong, np_input.shape, np_input.data_ptr())
name = session.get_inputs()[2].name
print(name)
np_input = torch.from_numpy(ort_input[2][0:1]).to(device)
io_binding.bind_input(name, np_input.device.type, 0, numpy.longlong, np_input.shape, np_input.data_ptr())
name = session.get_inputs()[3].name
print(name)
np_input = torch.from_numpy(ort_input[3][0:1]).to(device)
io_binding.bind_input(name, np_input.device.type, 0, numpy.longlong, np_input.shape, np_input.data_ptr())
else:
name = session.get_inputs()[0].name
print(ort_input[0])
np_input = torch.from_numpy(ort_input[0]).to(device)
io_binding.bind_input(name, np_input.device.type, 0, numpy.float32, np_input.shape, np_input.data_ptr())
name_o = session.get_outputs()[0].name
io_binding.bind_output(name_o)
# name = session.get_inputs()[0].name
# np_input = torch.from_numpy(numpy.asarray(ort_inputs[0][0])).to(device)
# io_binding.bind_input(name, np_input.device.type, 0, numpy.float32, np_input.shape, np_input.data_ptr())
# name_o = session.get_outputs()[0].name
# io_binding.bind_output(name_o, 'cpu', 0, numpy.float32, session.get_outputs()[0].shape, None)
try:
runtimes = runtimes + timeit.repeat(lambda: session.run_with_iobinding(io_binding), number=1, repeat=repeat_times)
except Exception as e:
logger.error(e)
return None
print(runtimes)
result = {}
result.update(result_template)
result.update({"io_binding": True})
result.update(get_latency_result(runtimes, batch_size))
return result
def get_cuda_version():
from pathlib import Path
home = str(Path.home())
p1 = subprocess.Popen(["find", home+"/.local/lib/", "-name", "onnxruntime_pybind11_state.so"], stdout=subprocess.PIPE)
stdout, sterr = p1.communicate()
stdout = stdout.decode("ascii").strip()
p1 = subprocess.Popen(["ldd", stdout], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "libcudart.so"], stdin=p1.stdout, stdout=subprocess.PIPE)
stdout, sterr = p2.communicate()
stdout = stdout.decode("ascii").strip()
return stdout
def get_trt_version():
from pathlib import Path
home = str(Path.home())
p1 = subprocess.Popen(["find", home+"/.local/lib/", "-name", "onnxruntime_pybind11_state.so"], stdout=subprocess.PIPE)
stdout, sterr = p1.communicate()
stdout = stdout.decode("ascii").strip()
p1 = subprocess.Popen(["ldd", stdout], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "libnvinfer.so"], stdin=p1.stdout, stdout=subprocess.PIPE)
stdout, sterr = p2.communicate()
stdout = stdout.decode("ascii").strip()
if stdout == "":
p1 = subprocess.Popen(["find", home+"/.local/lib/", "-name", "libonnxruntime_providers_tensorrt.so"], stdout=subprocess.PIPE)
stdout, sterr = p1.communicate()
stdout = stdout.decode("ascii").strip()
p1 = subprocess.Popen(["ldd", stdout], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "libnvinfer.so"], stdin=p1.stdout, stdout=subprocess.PIPE)
stdout, sterr = p2.communicate()
stdout = stdout.decode("ascii").strip()
return stdout
# not use for this script temporarily
def tmp_get_trt_version():
p1 = subprocess.Popen(["dpkg", "-l"], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "TensorRT runtime libraries"], stdin=p1.stdout, stdout=subprocess.PIPE)
stdout, sterr = p2.communicate()
stdout = stdout.decode("ascii").strip()
if stdout != "":
stdout = re.sub('\s+', ' ', stdout)
return stdout
if os.path.exists("/usr/lib/x86_64-linux-gnu/libnvinfer.so"):
p1 = subprocess.Popen(["readelf", "-s", "/usr/lib/x86_64-linux-gnu/libnvinfer.so"], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "version"], stdin=p1.stdout, stdout=subprocess.PIPE)
stdout, sterr = p2.communicate()
stdout = stdout.decode("ascii").strip()
stdout = stdout.split(" ")[-1]
return stdout
elif os.path.exists("/usr/lib/aarch64-linux-gnu/libnvinfer.so"):
p1 = subprocess.Popen(["readelf", "-s", "/usr/lib/aarch64-linux-gnu/libnvinfer.so"], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "version"], stdin=p1.stdout, stdout=subprocess.PIPE)
stdout, sterr = p2.communicate()
stdout = stdout.decode("ascii").strip()
stdout = stdout.split(" ")[-1]
return stdout
return ""
#
# The following two lists will be generated.
#
# inputs: [[test_data_0_input_0.pb, test_data_0_input_1.pb ...], [test_data_1_input_0.pb, test_data_1_input_1.pb ...] ...]
# outputs: [[test_data_0_output_0.pb, test_data_0_output_1.pb ...], [test_data_1_output_0.pb, test_data_1_output_1.pb ...] ...]
#
def load_onnx_model_zoo_test_data(path, all_inputs_shape, data_type="fp32"):
print("Parsing test data in {} ...".format(path))
# p1 = subprocess.Popen(["find", path, "-name", "test_data_set*", "-type", "d"], stdout=subprocess.PIPE)
p1 = subprocess.Popen(["find", path, "-name", "test_data*", "-type", "d"], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["sort"], stdin=p1.stdout, stdout=subprocess.PIPE)
stdout, sterr = p2.communicate()
stdout = stdout.decode("ascii").strip()
test_data_set_dir = stdout.split("\n")
print(test_data_set_dir)
inputs = []
outputs = []
shape_flag = False
# if not empty means input shape has been parsed before.
if len(all_inputs_shape) > 0:
shape_flag = True
# find test data path
for test_data_dir in test_data_set_dir:
pwd = os.getcwd()
os.chdir(test_data_dir)
# load inputs
p1 = subprocess.Popen(["find", ".", "-name", "input*"], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["sort"], stdin=p1.stdout, stdout=subprocess.PIPE)
stdout, sterr = p2.communicate()
stdout = stdout.decode("ascii").strip()
input_data = stdout.split("\n")
print(input_data)
input_data_pb = []
for data in input_data:
tensor = onnx.TensorProto()
with open(data, 'rb') as f:
tensor.ParseFromString(f.read())
tensor_to_array = numpy_helper.to_array(tensor)
if data_type == "fp16" and tensor_to_array.dtype == np.dtype(np.float32):
tensor_to_array = tensor_to_array.astype(np.float16)
input_data_pb.append(tensor_to_array)
# print(np.array(input_data_pb[-1]).shape)
if not shape_flag:
all_inputs_shape.append(input_data_pb[-1].shape)
print(all_inputs_shape[-1])
inputs.append(input_data_pb)
print('Loaded {} inputs successfully.'.format(len(inputs)))
# load outputs
p1 = subprocess.Popen(["find", ".", "-name", "output*"], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["sort"], stdin=p1.stdout, stdout=subprocess.PIPE)
stdout, sterr = p2.communicate()
stdout = stdout.decode("ascii").strip()
output_data = stdout.split("\n")
print(output_data)
if len(output_data) > 0 and output_data[0] != '':
output_data_pb = []
for data in output_data:
tensor = onnx.TensorProto()
with open(data, 'rb') as f:
tensor.ParseFromString(f.read())
tensor_to_array = numpy_helper.to_array(tensor)
if data_type == "fp16" and tensor_to_array.dtype == np.dtype(np.float32):
tensor_to_array = tensor_to_array.astype(np.float16)
output_data_pb.append(tensor_to_array)
print(np.array(output_data_pb[-1]).shape)
outputs.append(output_data_pb)
print('Loaded {} outputs successfully.'.format(len(outputs)))
os.chdir(pwd)
return inputs, outputs
def generate_onnx_model_random_input(test_times, ref_input):
inputs = []
for i in range(test_times):
input_data = []
for tensor in ref_input:
shape = tensor.shape
dtype = tensor.dtype
if dtype == np.int8 or \
dtype == np.uint8 or \
dtype == np.int16 or \
dtype == np.uint16 or \
dtype == np.int32 or \
dtype == np.uint32 or \
dtype == np.int64 or \
dtype == np.uint64:
new_tensor = np.random.randint(0, np.max(tensor)+1, shape, dtype)
else:
new_tensor = np.random.random_sample(shape).astype(dtype)
print("original tensor:")
print(tensor)
print("new random tensor:")
print(new_tensor)
print("\n")
input_data.append(new_tensor)
inputs.append(input_data)
return inputs
def validate(all_ref_outputs, all_outputs, decimal):
print('Reference {} results.'.format(len(all_ref_outputs)))
print('Predicted {} results.'.format(len(all_outputs)))
print('decimal {}'.format(decimal))
# print(np.array(all_ref_outputs).shape)
# print(np.array(all_outputs).shape)
try:
for i in range(len(all_outputs)):
ref_outputs = all_ref_outputs[i]
outputs = all_outputs[i]
for j in range(len(outputs)):
ref_output = ref_outputs[j]
output = outputs[j]
# print(ref_output)
# print(output)
# Compare the results with reference outputs up to x decimal places
for ref_o, o in zip(ref_output, output):
# abs(desired-actual) < 1.5 * 10**(-decimal)
np.testing.assert_almost_equal(ref_o, o, decimal)
except Exception as e:
logger.error(e)
return False, e
print('ONNX Runtime outputs are similar to reference outputs!')
return True, None
# not use for this script
def cleanup_files():
files = []
p = subprocess.Popen(["find", ".", "-name", "test_data_set*", "-type", "d"], stdout=subprocess.PIPE)
stdout, sterr = p.communicate()
stdout = stdout.decode("ascii").strip()
files = files + stdout.split("\n")
p = subprocess.Popen(["find", ".", "-name", "*.onnx"], stdout=subprocess.PIPE)
stdout, sterr = p.communicate()
stdout = stdout.decode("ascii").strip()
files = files + stdout.split("\n")
p = subprocess.Popen(["find", ".", "-name", "*.gz"], stdout=subprocess.PIPE)
stdout, sterr = p.communicate()
stdout = stdout.decode("ascii").strip()
files = files + stdout.split("\n")
for f in files:
if "custom_test_data" in f:
print(f)
continue
subprocess.Popen(["rm","-rf", f], stdout=subprocess.PIPE)
def remove_profiling_files(path):
files = []
p = subprocess.Popen(["find", path, "-name", "onnxruntime_profile*"], stdout=subprocess.PIPE)
stdout, sterr = p.communicate()
stdout = stdout.decode("ascii").strip()
files = files + stdout.split("\n")
for f in files:
if "custom_test_data" in f:
continue
subprocess.Popen(["rm","-rf", f], stdout=subprocess.PIPE)
def update_fail_report(fail_results, args, model, ep, e_type, e):
result = {}
result["model"] = model
result["ep"] = ep
result["error type"] = e_type
result["error message"] = re.sub('^\n', '', str(e))
fail_results.append(result)
def update_fail_model(model_ep_fail_map, fail_results, args, model_name, ep, e_type, e):
if not model_name in model_ep_fail_map:
model_ep_fail_map[model_name] = [ep]
else:
if ep not in model_ep_fail_map[model_name]:
model_ep_fail_map[model_name].append(ep)
update_fail_report(fail_results, args, model_name, ep, e_type, e)
# If TRT fails, TRT FP16 should fail as well
if ep == 'TensorrtExecutionProvider':
ep_ = "TensorrtExecutionProvider_fp16"
e_ = "Not benchmarking TRT FP16 since TRT failed already."
update_fail_report(fail_results, args, model_name, ep_, e_type, e_)
model_ep_fail_map[model_name].append(ep_)
def skip_ep(model_name, ep, model_ep_fail_map):
if model_name == 'vision-yolov3' and "fp16" in ep:
return True
if model_name == 'speech' and "fp16" in ep:
return True
if model_name not in model_ep_fail_map:
return False
ep_fail_list = model_ep_fail_map[model_name]
if ep in ep_fail_list:
return True
return False
def read_model_ep_fail_map_from_file(map_file):
with open(map_file) as f:
try:
data = json.load(f)
except Exception as e:
return None
return data
def write_model_ep_fail_map_to_file(model_ep_fail_map):
with open('.model_ep_fail_map.json', 'w') as file:
file.write(json.dumps(model_ep_fail_map)) # use `json.loads` to do the reverse
def get_system_info(info):
info["cuda"] = get_cuda_version()
info["trt"] = get_trt_version()
p = subprocess.Popen(["cat", "/etc/os-release"], stdout=subprocess.PIPE)
stdout, sterr = p.communicate()
stdout = stdout.decode("ascii").strip()
stdout = stdout.split("\n")[:2]
infos = []
for row in stdout:
row = re.sub('=', ': ', row)
row = re.sub('"', '', row)
infos.append(row)
info["linux_distro"] = infos
p = subprocess.Popen(["lscpu"], stdout=subprocess.PIPE)
stdout, sterr = p.communicate()
stdout = stdout.decode("ascii").strip()
stdout = stdout.split("\n")
infos = []
for row in stdout:
if "mode" in row or "Arch" in row or "name" in row:
# row = row.replace(":\s+", ": ")
row = re.sub(': +', ': ', row)
infos.append(row)
info["cpu_info"] = infos
p1 = subprocess.Popen(["lspci", "-v"], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "NVIDIA"], stdin=p1.stdout, stdout=subprocess.PIPE)
stdout, sterr = p2.communicate()
stdout = stdout.decode("ascii").strip()
stdout = stdout.split("\n")
infos = []
for row in stdout:
row = re.sub('.*:', '', row)
infos.append(row)
info["gpu_info"] = infos
p = subprocess.Popen(["cat", "/proc/meminfo"], stdout=subprocess.PIPE)
stdout, sterr = p.communicate()
stdout = stdout.decode("ascii").strip()
stdout = stdout.split("\n")
infos = []
for row in stdout:
if "Mem" in row:
row = re.sub(': +', ': ', row)
infos.append(row)
info["memory"] = infos
def parse_models_info(path):
models = {}
with open(path) as f:
data = json.load(f)
for row in data:
if 'model_name' in row:
models[row['model_name']] = {}
else:
logger.error('Model name must be provided in models_info.json')
raise
model = models[row['model_name']]
if 'working_directory' in row:
model['working_directory'] = row['working_directory']
else:
logger.error('Model path must be provided in models_info.json')
raise
if 'model_path' in row:
model['model_path'] = row['model_path']
else:
logger.error('Model path must be provided in models_info.json')
raise
if 'test_data_path' in row:
model['test_data_path'] = row['test_data_path']
else:
logger.error('Test data path must be provided in models_info.json')
raise
return models
def convert_model_from_float_to_float16(model_path):
# from onnxmltools.utils.float16_converter import convert_float_to_float16
from onnxmltools.utils import load_model, save_model
from float16 import convert_float_to_float16
onnx_model = load_model(model_path)
new_onnx_model = convert_float_to_float16(onnx_model)
save_model(new_onnx_model, 'new_fp16_model.onnx')
return os.path.join(os.getcwd(), "new_fp16_model.onnx")
def create_session(model_path, providers, session_options):
logger.info(model_path)
try:
session = onnxruntime.InferenceSession(model_path, providers=providers, sess_options=session_options)
return session
except:
logger.info("Use symbolic_shape_infer.py")
try:
new_model_path = model_path[:].replace(".onnx", "_new.onnx")
if not os.path.exists(new_model_path):
subprocess.run("python3 ../symbolic_shape_infer.py --input " + model_path + " --output " + new_model_path + " --auto_merge", shell=True, check=True)
session = onnxruntime.InferenceSession(new_model_path, providers=providers, sess_options=session_options)
return session
except Exception as e:
print(e)
raise
def run_onnxruntime(args, models):
success_results = []
fail_results = []
latency_comparison_map = {} # model -> CUDA/TRT latency
profile_metrics_map = {} # model -> metrics from profiling file
model_ep_fail_map = {} # model -> failing ep
# read failing ep information if file exists
if args.running_mode == 'benchmark':
if os.path.exists('.model_ep_fail_map.json'):
model_ep_fail_map = read_model_ep_fail_map_from_file('.model_ep_fail_map.json')
if args.fp16:
ep_list = ["CUDAExecutionProvider", "TensorrtExecutionProvider", "CUDAExecutionProvider_fp16", "TensorrtExecutionProvider_fp16"]
else:
ep_list = ["CUDAExecutionProvider", "TensorrtExecutionProvider"]
validation_exemption = ["TensorrtExecutionProvider_fp16"]
#######################
# iterate model
#######################
for name, info in models.items():
latency_result = {}
path = info["working_directory"]
pwd = os.getcwd()
if not os.path.exists(path):
os.mkdir(path)
os.chdir(path)
path = os.getcwd()
# cleanup files before running a new inference
if args.running_mode == "validate":
remove_profiling_files(path)
inputs = []
ref_outputs = []
inputs_fp32 = []
ref_outputs_fp32 = []
inputs_fp16 = []
ref_outputs_fp16 = []
all_inputs_shape = [] # use for standalone trt
ep_to_ep_op_map = {} # ep -> { ep -> operator }
profile_already_parsed = set()
#######################
# iterate ep
#######################
for ep in ep_list:
if skip_ep(name, ep, model_ep_fail_map):
continue
ep_ = ep_to_provider_list[ep][0]
if (ep_ not in onnxruntime.get_available_providers()):
logger.error("No {} support".format(ep_))
continue
model_path = info["model_path"]
if "fp16" in ep:
fp16 = True
os.environ["ORT_TENSORRT_FP16_ENABLE"] = "1"
if ep == "CUDAExecutionProvider_fp16":
model_path = convert_model_from_float_to_float16(model_path)
logger.info("\nInitializing {} with float16 enabled to run on {} ...".format(name, ep))
else:
fp16 = False
os.environ["ORT_TENSORRT_FP16_ENABLE"] = "0"
logger.info("\nInitializing {} to run on {} ...".format(name, ep))
test_data_dir = info["test_data_path"]
# read input/output of test data
if fp16 and ep == "CUDAExecutionProvider_fp16":
if not inputs_fp16 or not ref_outputs_fp16:
inputs_fp16, ref_outputs_fp16 = load_onnx_model_zoo_test_data(test_data_dir, all_inputs_shape, "fp16")
inputs = inputs_fp16
ref_outputs = ref_outputs_fp16
else:
if not inputs_fp32 or not ref_outputs_fp32:
inputs_fp32, ref_outputs_fp32 = load_onnx_model_zoo_test_data(test_data_dir, all_inputs_shape)
inputs = inputs_fp32
ref_outputs = ref_outputs_fp32
if args.input_data == "random":
inputs = generate_onnx_model_random_input(args.test_times+1, inputs[0])
#######################################
# benchmark or validation
#######################################
if args.running_mode == 'benchmark':
logger.info("===========================")
logger.info("======== benchmark ========")
logger.info("===========================")
options = onnxruntime.SessionOptions()
options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
# create onnxruntime inference session
try:
sess = create_session(model_path, ep_to_provider_list[ep], options)
except Exception as e:
logger.error(e)
# update_fail_model(model_ep_fail_map, fail_results, args, name, ep, e)
continue
logger.info("[start] Begin to inference {} with {} ...".format(name, ep))
logger.info(sess.get_providers())
if sess:
logger.info("Model inputs nodes:")
for input_meta in sess.get_inputs():
logger.info(input_meta)
logger.info("Model outputs nodes:")
for output_meta in sess.get_outputs():
logger.info(output_meta)
batch_size = 1
result_template = {
"engine": "onnxruntime",
"version": onnxruntime.__version__,
"device": ep,
"fp16": fp16,
"io_binding": False,
"model_name": name,
"inputs": len(sess.get_inputs()),
"batch_size": batch_size,
"sequence_length": 1,
"datetime": str(datetime.now()),}
result = inference_ort(args, name, sess, ep, inputs, result_template, args.test_times, batch_size)
if result:
success_results.append(result)
logger.info(result)
latency_result[ep] = {}
latency_result[ep]["average_latency_ms"] = result["average_latency_ms"]
latency_result[ep]["latency_90_percentile"] = result["latency_90_percentile"]
# get standalone TensorRT perf
if "TensorrtExecutionProvider" in ep and args.trtexec:
result = run_trt_standalone(args.trtexec, model_path, sess.get_inputs(), all_inputs_shape, fp16)
if result and len(result) > 0:
if fp16:
latency_result["Standalone_TRT_fp16"] = result
else:
latency_result["Standalone_TRT"] = result
latency_comparison_map[name] = copy.deepcopy(latency_result)
elif args.running_mode == 'validate':
logger.info("==========================")
logger.info("======== validate ========")
logger.info("==========================")
# enable profiling to generate profiling file for analysis
options = onnxruntime.SessionOptions()
options.enable_profiling = True
options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
time.sleep(1) # avoid to generate same profile file name
# create onnxruntime inference session
try:
sess = create_session(model_path, ep_to_provider_list[ep], options)
except Exception as e:
logger.error(e)
update_fail_model(model_ep_fail_map, fail_results, args, name, ep, 'runtime error', e)
continue
sess.disable_fallback()
logger.info("Start to inference {} with {} ...".format(name, ep))
logger.info(sess.get_providers())
if sess:
logger.info("Model inputs nodes:")
for input_meta in sess.get_inputs():
logger.info(input_meta)
logger.info("Model outputs nodes:")
for output_meta in sess.get_outputs():
logger.info(output_meta)
# run inference and validate the result
#
# currently skip TensorRT float16 validation intentionally
if ep not in validation_exemption:
try:
ort_outputs = inference_ort_and_get_prediction(name, sess, inputs)
decimal = 0
status = validate(ref_outputs, ort_outputs, decimal)
if not status[0]:
update_fail_model(model_ep_fail_map, fail_results, args, name, ep, 'result accuracy issue', status[1])
continue
except Exception as e:
logger.error(e)
update_fail_model(model_ep_fail_map, fail_results, args, name, ep, 'runtime error', e)
continue
# Run inference again. the reason is that some ep like tensorrt
# it takes much longer time to generate graph on first run and
# we need to skip the perf result of that expensive run.
inference_ort_and_get_prediction(name, sess, inputs)
else:
inference_ort_and_get_prediction(name, sess, inputs)
inference_ort_and_get_prediction(name, sess, inputs)
sess.end_profiling()
# get metrics from profiling file
metrics = get_profile_metrics(path, profile_already_parsed)
if metrics:
print(ep)
ep_to_ep_op_map[ep] = metrics
####################
# end of iterate ep
####################
# get percentage of execution time and operators in TRT
if len(ep_to_ep_op_map) > 0:
trt_op_map = None
trt_fp16_op_map = None
cuda_op_map = None
cuda_fp16_op_map = None
for ep, op_map in ep_to_ep_op_map.items():
if ep == "CUDAExecutionProvider":
cuda_op_map = op_map
elif ep == "CUDAExecutionProvider_fp16":
cuda_fp16_op_map = op_map
elif ep == "TensorrtExecutionProvider":
trt_op_map = op_map
elif ep == "TensorrtExecutionProvider_fp16":
trt_fp16_op_map = op_map
profile_metrics_map[name] = {}
if cuda_op_map:
profile_metrics_map[name]['ratio_of_ops_in_cuda_not_fallback_cpu'] = calculate_cuda_op_percentage(cuda_op_map)
if trt_op_map:
total_trt_execution_time, total_execution_time, ratio_of_execution_time_in_trt = calculate_trt_latency_percentage(trt_op_map)
profile_metrics_map[name]['total_trt_execution_time'] = total_trt_execution_time
profile_metrics_map[name]['total_execution_time'] = total_execution_time
profile_metrics_map[name]['ratio_of_execution_time_in_trt'] = ratio_of_execution_time_in_trt
if cuda_op_map:
total_ops_in_trt, total_ops, ratio_of_ops_in_trt = calculate_trt_op_percentage(trt_op_map, cuda_op_map)
profile_metrics_map[name]['total_ops_in_trt'] = total_ops_in_trt
profile_metrics_map[name]['total_ops'] = total_ops
profile_metrics_map[name]['ratio_of_ops_in_trt'] = ratio_of_ops_in_trt
if trt_fp16_op_map:
total_trt_execution_time, total_execution_time, ratio_of_execution_time_in_trt = calculate_trt_latency_percentage(trt_fp16_op_map)
name_ = name + " (FP16)"
profile_metrics_map[name_] = {}
profile_metrics_map[name_]['total_trt_execution_time'] = total_trt_execution_time
profile_metrics_map[name_]['total_execution_time'] = total_execution_time
profile_metrics_map[name_]['ratio_of_execution_time_in_trt'] = ratio_of_execution_time_in_trt
if cuda_fp16_op_map:
total_ops_in_trt, total_ops, ratio_of_ops_in_trt = calculate_trt_op_percentage(trt_fp16_op_map, cuda_op_map)
profile_metrics_map[name_]['total_ops_in_trt'] = total_ops_in_trt
profile_metrics_map[name_]['total_ops'] = total_ops
profile_metrics_map[name_]['ratio_of_ops_in_trt'] = ratio_of_ops_in_trt
if debug:
pp = pprint.PrettyPrinter(indent=4)
print('CUDA operator map:')
pp.pprint(cuda_op_map)
print('TRT operator map:')
pp.pprint(trt_op_map)
print('CUDA FP16 operator map:')
pp.pprint(cuda_fp16_op_map)
print('TRT FP16 operator map:')
pp.pprint(trt_fp16_op_map)
# cleanup_files()
os.chdir(pwd)
# end of model
return success_results, fail_results, latency_comparison_map, model_ep_fail_map, profile_metrics_map
def add_improvement_information(latency_comparison_map):
for key, value in latency_comparison_map.items():
if not ('TensorrtExecutionProvider' in value and 'CUDAExecutionProvider' in value):
continue
trt_latency = float(value['TensorrtExecutionProvider']['average_latency_ms'])
cuda_latency = float(value['CUDAExecutionProvider']['average_latency_ms'])
gain = (cuda_latency - trt_latency)*100/cuda_latency
value["Tensorrt_gain(%)"] = "{:.2f} %".format(gain)
if "TensorrtExecutionProvider_fp16" in value and "CUDAExecutionProvider_fp16" in value:
trt_fp16_latency = float(value['TensorrtExecutionProvider_fp16']['average_latency_ms'])
cuda_fp16_latency = float(value['CUDAExecutionProvider_fp16']['average_latency_ms'])
gain = (cuda_fp16_latency - trt_fp16_latency)*100/cuda_fp16_latency
value["Tensorrt_fp16_gain(%)"] = "{:.2f} %".format(gain)
def output_details(results, csv_filename):
with open(csv_filename, mode="a", newline='') as csv_file:
column_names = [
"engine", "version", "device", "fp16", "io_binding", "model_name", "inputs", "batch_size",
"sequence_length", "datetime", "test_times", "QPS", "average_latency_ms", "latency_variance",
"latency_90_percentile", "latency_95_percentile", "latency_99_percentile"
]
csv_writer = csv.DictWriter(csv_file, fieldnames=column_names)
csv_writer.writeheader()
for result in results:
csv_writer.writerow(result)
logger.info(f"Detail results are saved to csv file: {csv_filename}")
def output_fail(results, csv_filename):
with open(csv_filename, mode="a", newline='') as csv_file:
column_names = [
"model", "ep", "error type", "error message"
]
csv_writer = csv.DictWriter(csv_file, fieldnames=column_names)
csv_writer.writeheader()
for result in results:
csv_writer.writerow(result)
logger.info(f"Failing results are saved to csv file: {csv_filename}")
def output_latency(results, csv_filename):
with open(csv_filename, mode="a", newline='') as csv_file:
column_names = ["Model",
"CUDA \nmean (ms)",
"CUDA \n90th percentile (ms)",
"TRT EP \nmean (ms)",
"TRT EP \n90th percentile (ms)",
"Standalone TRT \nmean (ms)",
"Standalone TRT \n90th percentile (ms)",
"CUDA fp16 \nmean (ms)",
"CUDA fp16 \n90th percentile (ms)",
"TRT EP fp16 \nmean (ms)",
"TRT EP fp16 \n90 percentile (ms)",
"Standalone TRT fp16 \nmean (ms)",
"Standalone TRT fp16 \n90th percentile (ms)",
"TRT EP \ngain (mean) (%)",
"TRT EP fp16 \ngain (mean) (%)"]
csv_writer = csv.writer(csv_file)
csv_writer.writerow(column_names)
for key, value in results.items():
cuda_average = ""
if 'CUDAExecutionProvider' in value and 'average_latency_ms' in value['CUDAExecutionProvider']:
cuda_average = value['CUDAExecutionProvider']['average_latency_ms']
cuda_99_percentile = ""
if 'CUDAExecutionProvider' in value and 'latency_90_percentile' in value['CUDAExecutionProvider']:
cuda_99_percentile = value['CUDAExecutionProvider']['latency_90_percentile']
trt_average = ""
if 'TensorrtExecutionProvider' in value and 'average_latency_ms' in value['TensorrtExecutionProvider']:
trt_average = value['TensorrtExecutionProvider']['average_latency_ms']
trt_99_percentile = ""
if 'TensorrtExecutionProvider' in value and 'latency_90_percentile' in value['TensorrtExecutionProvider']:
trt_99_percentile = value['TensorrtExecutionProvider']['latency_90_percentile']
standalone_trt_average = ""
if 'Standalone_TRT' in value and 'average_latency_ms' in value['Standalone_TRT']:
standalone_trt_average = value['Standalone_TRT']['average_latency_ms']
standalone_trt_99_percentile = ""
if 'Standalone_TRT' in value and 'latency_90_percentile' in value['Standalone_TRT']:
standalone_trt_99_percentile = value['Standalone_TRT']['latency_90_percentile']
cuda_fp16_average = ""
if 'CUDAExecutionProvider_fp16' in value and 'average_latency_ms' in value['CUDAExecutionProvider_fp16']:
cuda_fp16_average = value['CUDAExecutionProvider_fp16']['average_latency_ms']
cuda_fp16_99_percentile = ""
if 'CUDAExecutionProvider_fp16' in value and 'latency_90_percentile' in value['CUDAExecutionProvider_fp16']:
cuda_fp16_99_percentile = value['CUDAExecutionProvider_fp16']['latency_90_percentile']
trt_fp16_average = ""
if 'TensorrtExecutionProvider_fp16' in value and 'average_latency_ms' in value['TensorrtExecutionProvider_fp16']:
trt_fp16_average = value['TensorrtExecutionProvider_fp16']['average_latency_ms']
trt_fp16_99_percentile = ""
if 'TensorrtExecutionProvider_fp16' in value and 'latency_90_percentile' in value['TensorrtExecutionProvider_fp16']:
trt_fp16_99_percentile = value['TensorrtExecutionProvider_fp16']['latency_90_percentile']
standalone_trt_fp16_average = ""
if 'Standalone_TRT_fp16' in value and 'average_latency_ms' in value['Standalone_TRT_fp16']:
standalone_trt_fp16_average = value['Standalone_TRT_fp16']['average_latency_ms']
standalone_trt_fp16_99_percentile = ""
if 'Standalone_TRT_fp16' in value and 'latency_90_percentile' in value['Standalone_TRT_fp16']:
standalone_trt_fp16_99_percentile = value['Standalone_TRT_fp16']['latency_90_percentile']
row = [key,
cuda_average,
cuda_99_percentile,
trt_average,
trt_99_percentile,
standalone_trt_average,
standalone_trt_99_percentile,
cuda_fp16_average,
cuda_fp16_99_percentile,
trt_fp16_average,
trt_fp16_99_percentile,
standalone_trt_fp16_average,
standalone_trt_fp16_99_percentile,
value['Tensorrt_gain(%)'] if 'Tensorrt_gain(%)' in value else " ",
value['Tensorrt_fp16_gain(%)'] if 'Tensorrt_fp16_gain(%)' in value else " "
]
csv_writer.writerow(row)
logger.info(f"CUDA/TRT latency comparison are saved to csv file: {csv_filename}")
def output_ratio(results, csv_filename):
with open(csv_filename, mode="a", newline='') as csv_file:
column_names = ["Model",
"% CUDA operators (not fall back to CPU)",
"Total TRT operators",
"Total operators",
"% TRT operator",
"Total TRT execution time",
"Total execution time",
"% TRT execution time"]
csv_writer = csv.writer(csv_file)
csv_writer.writerow(column_names)
for key, value in results.items():
row = [key,
value['ratio_of_ops_in_cuda_not_fallback_cpu'] if 'ratio_of_ops_in_cuda_not_fallback_cpu' in value else " ",
value['total_ops_in_trt'] if 'total_ops_in_trt' in value else " ",
value['total_ops'] if 'total_ops' in value else " ",
value['ratio_of_ops_in_trt'] if 'ratio_of_ops_in_trt' in value else " ",
value['total_trt_execution_time'] if 'total_trt_execution_time' in value else " ",
value['total_execution_time'] if 'total_execution_time' in value else " ",
value['ratio_of_execution_time_in_trt'] if 'ratio_of_execution_time_in_trt' in value else " ",
]
csv_writer.writerow(row)
logger.info(f"Tensorrt ratio metrics are saved to csv file: {csv_filename}")
def output_system_info(result, csv_filename):
with open(csv_filename, mode="a", newline='') as csv_file:
column_names = [
"cpu_info", "cuda", "gpu_info", "linux_distro", "memory", "trt"
]
csv_writer = csv.DictWriter(csv_file, fieldnames=column_names)
csv_writer.writeheader()
csv_writer.writerow(result)
logger.info(f"System information are saved to csv file: {csv_filename}")
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model_list_file", required=False, default="model_list.json", help="Model list file.")
parser.add_argument("-r", "--running_mode", required=False, default="benchmark", choices=["validate", "benchmark"], help="Testing mode.")
parser.add_argument("-i", "--input_data", required=False, default="zoo", choices=["zoo", "random"], help="source of input data.")
parser.add_argument("--fp16", required=False, default=True, action="store_true", help="Inlcude Float16 into benchmarking.")
parser.add_argument("--trtexec", required=False, default=None, help="trtexec executable path.")
parser.add_argument("-t",
"--test_times",
required=False,
default=1,
type=int,
help="Number of repeat times to get average inference latency.")
args = parser.parse_args()
return args
def setup_logger(verbose):
if verbose:
coloredlogs.install(level='DEBUG', fmt='[%(filename)s:%(lineno)s - %(funcName)20s()] %(message)s')
else:
coloredlogs.install(fmt='%(message)s')
logging.getLogger("transformers").setLevel(logging.WARNING)
def main():
args = parse_arguments()
setup_logger(False)
pp = pprint.PrettyPrinter(indent=4)
models = parse_models_info(args.model_list_file)
perf_start_time = datetime.now()
success_results, fail_results, latency_comparison_map, failing_models, profile_metrics_map = run_onnxruntime(args, models)
perf_end_time = datetime.now()
logger.info("\nTotal time for running/profiling all models: {}".format(perf_end_time - perf_start_time))
logger.info(list(models.keys()))
logger.info("\nTotal models: {}".format(len(models)))
logger.info("Fail models: {}".format(len(failing_models)))
logger.info("Models FAIL/SUCCESS: {}/{}".format(len(failing_models), len(models) - len(failing_models)))
path = "result"
if not os.path.exists(path):
os.mkdir(path)
path = os.path.join(os.getcwd(), path)
if not os.path.exists(path):
os.mkdir(path)
time_stamp = datetime.now().strftime("%Y%m%d-%H%M%S")
if len(failing_models) > 0:
logger.info("\n============================================")
logger.info("========== Failing Models/EPs ==============")
logger.info("============================================")
logger.info(failing_models)
write_model_ep_fail_map_to_file(failing_models)
if latency_comparison_map:
logger.info("\n=========================================")
logger.info("=========== CUDA/TRT latency ===========")
logger.info("=========================================")
add_improvement_information(latency_comparison_map)
pp.pprint(latency_comparison_map)
csv_filename = f"benchmark_latency_{time_stamp}.csv"
csv_filename = os.path.join(path, csv_filename)
output_latency(latency_comparison_map, csv_filename)
if len(profile_metrics_map) > 0:
logger.info("\n========================================")
logger.info("========== TRT detail metrics ==========")
logger.info("========================================")
pp.pprint(profile_metrics_map)
csv_filename = f"benchmark_ratio_{time_stamp}.csv"
csv_filename = os.path.join(path, csv_filename)
output_ratio(profile_metrics_map, csv_filename)
logger.info("\n===========================================")
logger.info("=========== System information ===========")
logger.info("===========================================")
info = {}
get_system_info(info)
pp.pprint(info)
csv_filename = os.path.join(path, f"system_info_{time_stamp}.csv")
output_system_info(info, csv_filename)
if fail_results:
csv_filename = f"benchmark_fail_{time_stamp}.csv"
csv_filename = os.path.join(path, csv_filename)
output_fail(fail_results, csv_filename)
if success_results:
csv_filename = f"benchmark_success_{time_stamp}.csv"
csv_filename = os.path.join(path, csv_filename)
output_details(success_results, csv_filename)
if __name__ == "__main__":
main()
|
python
|
from django.db import models
# Create your models here.
class Person(models.Model):
name = models.CharField(max_length=255)
surname = models.CharField(max_length=255)
image = models.ImageField(upload_to='person_images')
|
python
|
import os
import click
from flask import current_app
from sqlalchemy import text
from app import db
def register(app):
@app.cli.group()
def translate():
"""Translation and localization commands."""
pass
@translate.command()
@click.argument('lang')
def init(lang):
"""Initialize a new language."""
if os.system('pybabel extract -F babel.cfg -k _l -o messages.pot .'):
raise RuntimeError('extract command failed')
if os.system(
'pybabel init -i messages.pot -d app/translations -l ' + lang):
raise RuntimeError('init command failed')
os.remove('messages.pot')
@translate.command()
def update():
"""Update all languages."""
if os.system('pybabel extract -F babel.cfg -k _l -o messages.pot .'):
raise RuntimeError('extract command failed')
if os.system('pybabel update -i messages.pot -d app/translations'):
raise RuntimeError('update command failed')
os.remove('messages.pot')
@translate.command()
def compile():
"""Compile all languages."""
if os.system('pybabel compile -d app/translations'):
raise RuntimeError('compile command failed')
@app.cli.group()
def sqlite():
"""Run SQLite commands."""
pass
@sqlite.command()
def create():
"""Create the initial database."""
db.drop_all()
db.create_all()
scripts = [
'./docs/scripts/country.sql',
'./docs/scripts/folder.sql',
'./docs/scripts/preference.sql',
'./docs/scripts/profile.sql'
]
for script in scripts:
with open(script) as f:
script_file = f.read()
for statement in script_file.split(';'):
db.session.execute(statement)
@app.cli.group()
def test():
"""Unit testing framework commands."""
pass
@test.command()
def run():
"""Run unit testing framework."""
if os.system('coverage run -m unittest discover'):
raise RuntimeError('')
@test.command()
def report():
"""Report unit testing framework."""
if os.system('coverage report -m'):
raise RuntimeError('')
@app.cli.group()
def doc():
"""Build documentation."""
pass
@doc.command()
def generate():
"Generate entity relationship diagram."
if os.system('./schemaspy/schemaspy'):
raise RuntimeError('')
|
python
|
from __future__ import annotations
import attr
__all__ = ("AllowedMentions",)
@attr.s(kw_only=True)
class AllowedMentions:
"""Represents an allowed mentions object.
This is used to determine the allowed mentions of any messages being sent from the client's user.
Parameters
----------
everyone: :class:`bool`
If mentioning everyone is allowed. By default True.
roles: :class:`bool` | :class:`list`
Either a list of role IDs, or a boolean value. Determines the allowed roles to be mentioned.
users: :class:`bool` | :class:`list`
Either a list of user IDs, or a boolean value. Dtermines the allowed users to be mentioned.
replied_user: :class:`bool`
If mentioning the replied user to the message is allowed.
"""
everyone: bool = attr.field(default=True)
roles: bool | list[int] = attr.field(default=True)
users: bool | list[int] = attr.field(default=True)
replied_user: bool = attr.field(default=True)
@classmethod
def none(cls: type[AllowedMentions]) -> AllowedMentions:
"""Creates a :class:`.AllowedMentions` instance that has no
allowed mentions set.
Returns
-------
:class:`.AllowedMentions`
The created instance.
"""
return cls(everyone=False, roles=False, users=False, replied_user=False)
def to_dict(self) -> dict[str, bool | list[int] | list[str]]:
"""Turns the AllowedMentions instance into a usable dict.
Returns
-------
:class:`dict`
The created dict from the AllowedMentions instance.
"""
payload: dict[str, bool | list[int] | list[str]] = {
"everyone": self.everyone,
"replied_user": self.replied_user,
}
parse: list[str] = []
if self.roles is True:
parse.append("roles")
if isinstance(self.roles, list):
payload["roles"] = self.roles
if self.users is True:
parse.append("users")
if isinstance(self.users, list):
payload["users"] = self.users
payload["parse"] = parse
return payload
|
python
|
import csv
class PlayerThumbnails:
def getThumbnailsID():
ThumbnailsID = []
with open('Logic/Files/assets/csv_logic/player_thumbnails.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0 or line_count == 1:
line_count += 1
else:
if row[8].lower() == 'true':
ThumbnailsID.append(line_count - 2)
if row[0] != "":
line_count += 1
return ThumbnailsID
|
python
|
"""
This file contains all the HTTP routes for basic pages (usually HTML)
"""
from flask import Blueprint, render_template, request
import _config as config
pages = Blueprint('controller', __name__)
@pages.route('/')
def index():
"""
A basic landing page for this web service
:return: HTTP Response (HTML page only)
"""
return render_template(
'page_index.html',
api_endpoint=config.API_ENDPOINT,
request=request
)
@pages.route('/about')
def about():
return render_template(
'page_about.html'
)
|
python
|
"""The Snooty documentation writer's tool."""
__version__ = "0.9.6.dev"
|
python
|
#!/usr/bin/env python
from subprocess import Popen
from subprocess import PIPE
class ChromeHtmlToPdf():
def __init__(self, url, output_path=None, verbose=False):
'''
Initialize class with google chrome parameters
Params:
Return:
'''
# Base command
self.command = 'google-chrome --headless --disable-gpu'
# Set output path
self.command += ' --print-to-pdf'
if output_path:
self.command += '=' + output_path
# Set url
self.command += ' ' + url
if verbose:
print self.command
def render(self):
''' Actually render html to pdf '''
try:
p = Popen(self.command, shell=True, stdout=PIPE, stderr=PIPE, close_fds=True)
stdout, stderr = p.communicate()
retcode = p.returncode
if retcode == 0:
# call was successful
return
elif retcode < 0:
raise Exception("Terminated by signal: ", -retcode)
else:
raise Exception(stderr)
except OSError, exc:
raise exc
|
python
|
from streaming.app import app
from streaming.config import config
from streaming.phishtank.api import Reported
# Topics
reports_topic = app.topic('phishtank-reports')
# Tables
states_table = app.Table('phishtank-state', default=str)
@app.agent(reports_topic)
async def get_phishtank_reports(states):
async for state in states:
try:
phishtank_reports = await Reported('API_KEY').get(states_table['size'], state['size'])
for report in phishtank_reports:
print(report)
# Do things
await update_etag.send(state)
except Exception as err:
print(err)
pass
@app.agent()
async def update_etag(states):
async for state in states:
states_table['etag'] = state['etag']
states_table['size'] = state['size']
@app.task
async def hallo():
phishtank_state = await Reported('API_KEY').latest()
if not states_table['etag'] or states_table['etag'] != phishtank_state['etag']:
await get_phishtank_reports.send(value=phishtank_state)
|
python
|
#!/usr/bin/python
# -*- coding: UTF-8 -*
from common.common_time import get_system_datetime
from db.base import DbBase
from db.connection_pool import MysqlConn
import copy
import datetime
from utils.status_code import response_code
from config import configuration
import traceback
import json
import os
from config import config
import logging
logger = logging.getLogger("main." + __name__)
config_name = os.getenv('FLASK_CONFIG') or 'default'
Config = config[config_name]
class DbOrgMgr(DbBase):
"""
User related db operation
"""
'''
0. Default there is an admin account
1. Use default admin account for first login
2. Setup the org_name in the UI portal
if org_name is empty:
then go to org setup
-- fill in the org info
-- setup ldap login
-- setup smtp and airflow url
'''
def __delete_admin(self):
conn = MysqlConn()
try:
db_name = configuration.get_database_name()
condition = "ID=%s and ACCOUNT_NAME =%s" % ('1', 'TorroAdmin')
delete_table_sql = self.create_delete_sql(db_name, "userTable", condition)
self.delete_exec(conn, delete_table_sql)
return response_code.SUCCESS
except Exception as e:
logger.error("FN:__delete_admin error:{}".format(traceback.format_exc()))
return response_code.DELETE_DATA_FAIL
finally:
conn.close()
def __set_ldap(self, ldap_info):
conn = MysqlConn()
try:
host = ldap_info['host']
port = ldap_info['port']
cer_path = ldap_info['cer_path']
use_ssl = ldap_info['use_ssl']
admin = ldap_info['admin_dn']
admin_pwd = ldap_info['admin_pwd']
user_search_base = ldap_info['user_search_base']
user_search_filter = ldap_info['user_search_filter']
display_name_attribute = ldap_info['display_name_attribute']
email_address_attribute = ldap_info['email_address_attribute']
adgroup_attribute = ldap_info['adgroup_attribute']
group_search_base = ldap_info['group_search_base']
group_search_filter = ldap_info['group_search_filter']
group_member_attribute = ldap_info['group_member_attribute']
email_suffix = ldap_info['email_suffix']
create_time = ldap_info['create_time']
time_modify = ldap_info['time_modify']
db_name = configuration.get_database_name()
# insert form
fields = ('HOST', 'PORT', 'CER_PATH', 'USE_SSL', 'ADMIN_DN', 'ADMIN_PWD',
'USER_SEARCH_BASE', 'USER_SERACH_FILTER', 'DISPLAY_NAME_LDAP_ATTRIBUTE', 'EMAIL_ADDRESS_LDAP_ATTRIBUTE', 'USER_ADGROUP_ATTRIBUTE',
'GROUP_SEARCH_BASE', 'GROUP_SERACH_FILTER', 'GROUP_MEMBER_ATTRIBUTE', 'GROUP_EMAIL_SUFFIX',
'CREATE_TIME', 'TIME_MODIFY')
values = (host, port, cer_path, use_ssl, admin, admin_pwd,
user_search_base, user_search_filter, display_name_attribute, email_address_attribute, adgroup_attribute,
group_search_base, group_search_filter, group_member_attribute, email_suffix,
create_time, time_modify)
sql = self.create_insert_sql(db_name, 'ldapTable', '({})'.format(', '.join(fields)), values)
logger.debug('FN:__set_ldap ldapTable_sql:{}'.format(sql))
ldap_id = self.insert_exec(conn, sql, return_insert_id=True)
ldap_info['id'] = ldap_id
data = response_code.SUCCESS
data['data'] = ldap_info
return data
except Exception as e:
logger.error("FN:__set_ldap error:{}".format(traceback.format_exc()))
return response_code.GET_DATA_FAIL
finally:
conn.close()
def __set_smtp(self, smtp_info):
conn = MysqlConn()
try:
smtp_host = smtp_info['smtp_host']
smtp_account = smtp_info['smtp_account']
smtp_mail_box = smtp_info['Smtp_mail_box']
smtp_pwd = smtp_info['smtp_pwd']
smtp_port = smtp_info['smtp_port']
smtp_tls = smtp_info['smtp_tls']
create_time = smtp_info['create_time']
db_name = configuration.get_database_name()
# insert form
fields = ('MAIL_HOST', 'MAIL_USER', 'MAIL_BOX', 'MAIL_PASS', 'PORT', 'USE_TLS', 'CREATE_TIME',
'TIME_MODIFY')
values = (smtp_host, smtp_account, smtp_mail_box, smtp_pwd, smtp_port, smtp_tls, create_time, create_time)
sql = self.create_insert_sql(db_name, 'smtpTable', '({})'.format(', '.join(fields)), values)
logger.debug('FN:__set_smtp smtpTable_sql:{}'.format(sql))
smtp_id = self.insert_exec(conn, sql, return_insert_id=True)
smtp_info['id'] = smtp_id
data = response_code.SUCCESS
data['data'] = smtp_info
return data
except Exception as e:
logger.error("FN:__set_smtp error:{}".format(traceback.format_exc()))
return response_code.GET_DATA_FAIL
finally:
conn.close()
def __delete_ldap(self):
conn = MysqlConn()
try:
db_name = configuration.get_database_name()
condition = "1=1"
delete_table_sql = self.create_delete_sql(db_name, "ldapTable", condition)
logger.debug('FN:__delete_ldap delete_ldapTable_sql:{}'.format(sql))
self.delete_exec(conn, delete_table_sql)
return response_code.SUCCESS
except Exception as e:
logger.error("FN:__delete_ldap error:{}".format(traceback.format_exc()))
return response_code.DELETE_DATA_FAIL
finally:
conn.close()
def __delete_smtp(self):
conn = MysqlConn()
try:
db_name = configuration.get_database_name()
condition = "1=1"
delete_table_sql = self.create_delete_sql(db_name, "smtpTable", condition)
logger.debug('FN:__delete_smtp delete_smtpTable_sql:{}'.format(sql))
self.delete_exec(conn, delete_table_sql)
return response_code.SUCCESS
except Exception as e:
logger.error("FN:__delete_smtp error:{}".format(traceback.format_exc()))
return response_code.DELETE_DATA_FAIL
finally:
conn.close()
def __set_org(self, org_info):
conn = MysqlConn()
try:
admin_group = org_info['admin_group']
visitor_group = org_info['base_group']
org_name = org_info['org_name']
airflow_url = org_info['airflow_url']
create_time = org_info['create_time']
des = org_info['des']
db_name = configuration.get_database_name()
# insert org
fields = ('ORG_NAME', 'AIRFLOW_URL', 'CREATE_TIME', 'DES', 'PROJECT_NAME')
values = (org_name, airflow_url, create_time, des, Config.DEFAULT_PROJECT)
sql = self.create_insert_sql(db_name, 'orgTable', '({})'.format(', '.join(fields)), values)
logger.debug('FN:__set_org orgTable_sql:{}'.format(sql))
org_id = self.insert_exec(conn, sql, return_insert_id=True)
select_condition = "GROUP_MAIL='%s' " % admin_group
select_table_sql = self.create_select_sql(db_name, "adgroupTable", "*", select_condition)
ad_group_info = self.execute_fetch_one(conn, select_table_sql)
if ad_group_info:
admin_group_id = ad_group_info['ID']
else:
# insert admin group
fields = ('GROUP_MAIL', 'CREATE_TIME', 'DES')
values = (admin_group, create_time, des)
sql = self.create_insert_sql(db_name, 'adgroupTable', '({})'.format(', '.join(fields)), values)
logger.debug('FN:__set_org adgroupTable_sql:{}'.format(sql))
admin_group_id = self.insert_exec(conn, sql, return_insert_id=True)
# insert org_to_adgroupTable
fields = ('ORG_ID', 'AD_GROUP_ID', 'ROLE_LIST')
values = (org_id, admin_group_id, json.dumps(['admin']).replace('\\', '\\\\'))
sql = self.create_insert_sql(db_name, 'org_to_adgroupTable', '({})'.format(', '.join(fields)), values)
logger.debug('FN:__set_org org_to_adgroupTable_sql:{}'.format(sql))
self.insert_exec(conn, sql, return_insert_id=True)
select_condition = "GROUP_MAIL='%s' " % visitor_group
select_table_sql = self.create_select_sql(db_name, "adgroupTable", "*", select_condition)
logger.debug('FN:__set_org adgroupTable_sql:{}'.format(sql))
ad_group_info = self.execute_fetch_one(conn, select_table_sql)
if ad_group_info:
visitor_group_id = ad_group_info['ID']
# insert visitor group
else:
fields = ('GROUP_MAIL', 'CREATE_TIME', 'DES')
values = (visitor_group, create_time, des)
sql = self.create_insert_sql(db_name, 'adgroupTable', '({})'.format(', '.join(fields)), values)
logger.debug('FN:__set_org adgroupTable_sql:{}'.format(sql))
visitor_group_id = self.insert_exec(conn, sql, return_insert_id=True)
# insert org_to_adgroupTable
fields = ('ORG_ID', 'AD_GROUP_ID', 'ROLE_LIST')
values = (org_id, visitor_group_id, json.dumps(['viewer']).replace('\\', '\\\\'))
sql = self.create_insert_sql(db_name, 'org_to_adgroupTable', '({})'.format(', '.join(fields)), values)
logger.debug('FN:__set_org org_to_adgroupTable_sql:{}'.format(sql))
self.insert_exec(conn, sql, return_insert_id=True)
org_info['org_id'] = org_id
org_info['admin_id'] = admin_group_id
org_info['visitor_id'] = visitor_group_id
data = response_code.SUCCESS
data['data'] = org_info
return data
except Exception as e:
logger.error("FN:__set_org error:{}".format(traceback.format_exc()))
return response_code.GET_DATA_FAIL
finally:
conn.close()
def __delete_org(self):
conn = MysqlConn()
try:
db_name = configuration.get_database_name()
condition = "1=1"
delete_table_sql = self.create_delete_sql(db_name, "orgTable", condition)
logger.debug('FN:__delete_org delete_orgTable_sql:{}'.format(delete_table_sql))
self.delete_exec(conn, delete_table_sql)
return response_code.SUCCESS
except Exception as e:
logger.error("FN:__delete_org error:{}".format(traceback.format_exc()))
return response_code.DELETE_DATA_FAIL
finally:
conn.close()
def __delete_adgroup_to_org(self, org_id=None):
conn = MysqlConn()
try:
db_name = configuration.get_database_name()
if not org_id:
select_table_sql = self.create_select_sql(db_name, "orgTable", "*")
org_id = self.execute_fetch_one(conn, select_table_sql)['ID']
# select_condition = "ORG_ID=%s" % org_id
# select_table_sql = self.create_select_sql(db_name, "org_to_adgroupTable", "*", select_condition)
# ad_group_infos = self.execute_fetch_all(conn, select_table_sql)
# for ad_group_info in ad_group_infos:
# ad_group_id = ad_group_info['AD_GROUP_ID']
# ad_condition = "ID=%s" % ad_group_id
# delete_table_sql = self.create_delete_sql(db_name, "adgroupTable", ad_condition)
# # print('delete_table_sql ', delete_table_sql)
# self.delete_exec(conn, delete_table_sql)
delete_condition = "1=1"
delete_table_sql = self.create_delete_sql(db_name, "org_to_adgroupTable", delete_condition)
logger.debug('FN:__delete_adgroup_to_org delete_org_to_adgroupTable_sql:{}'.format(delete_table_sql))
self.delete_exec(conn, delete_table_sql)
return response_code.SUCCESS
except Exception as e:
logger.error("FN:__delete_adgroup_to_org error:{}".format(traceback.format_exc()))
return response_code.DELETE_DATA_FAIL
finally:
conn.close()
def add_new_org_setting(self, org):
conn = MysqlConn()
try:
db_name = configuration.get_database_name()
org_info = {}
org_info['admin_group'] = org['admin_group']
org_info['base_group'] = org['base_group']
org_info['org_name'] = org['org_name']
org_info['airflow_url'] = org['airflow_url']
create_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
org_info['create_time'] = create_time
org_info['des'] = org['des']
ldap_info = {}
ldap_info['host'] = org['host']
ldap_info['port'] = org['port']
ldap_info['cer_path'] = org['cer_path']
ldap_info['use_ssl'] = org['use_ssl']
ldap_info['admin_dn'] = org['admin_dn']
ldap_info['admin_pwd'] = org['admin_pwd']
ldap_info['user_search_base'] = org['user_search_base']
ldap_info['user_search_filter'] = org['user_search_filter']
ldap_info['display_name_attribute'] = org['display_name_attribute']
ldap_info['email_address_attribute'] = org['email_address_attribute']
ldap_info['adgroup_attribute'] = org['adgroup_attribute']
ldap_info['group_search_base'] = org['group_search_base']
ldap_info['group_search_filter'] = org['group_search_filter']
ldap_info['group_member_attribute'] = org['group_member_attribute']
ldap_info['email_suffix'] = org['email_suffix']
ldap_info['create_time'] = create_time
ldap_info['time_modify'] = create_time
smtp_info = {}
smtp_info['smtp_host'] = org['smtp_host']
smtp_info['smtp_account'] = org['smtp_account']
smtp_info['Smtp_mail_box'] = org['Smtp_mail_box']
smtp_info['smtp_pwd'] = org['smtp_pwd']
smtp_info['smtp_port'] = org['smtp_port']
smtp_info['smtp_tls'] = org['smtp_tls']
smtp_info['create_time'] = create_time
sql = self.create_select_sql(db_name, 'ldapTable', '*')
ldap_infos = self.execute_fetch_all(conn, sql)
if ldap_infos:
self.__delete_adgroup_to_org()
self.__delete_ldap()
self.__delete_org()
self.__delete_smtp()
# data = response_code.ADD_DATA_FAIL
# return data
# sql = self.create_select_sql(db_name, 'orgTable', '*')
# org_infos = self.execute_fetch_all(conn, sql)
# if org_infos:
# data = response_code.ADD_DATA_FAIL
# return data
org_insert = self.__set_org(org_info)
ldap_insert = self.__set_ldap(ldap_info)
smtp_insert = self.__set_smtp(smtp_info)
data = response_code.SUCCESS
# self.__delete_admin()
org['org_id'] = org_insert['data']['org_id']
org['ldap_id'] = ldap_insert['data']['id']
org['smtp_id'] = smtp_insert['data']['id']
data['data'] = org
return data
except Exception as e:
logger.error("FN:add_new_org_setting error:{}".format(traceback.format_exc()))
return response_code.GET_DATA_FAIL
finally:
conn.close()
def get_ldap_info(self):
conn = MysqlConn()
try:
db_name = configuration.get_database_name()
sql = self.create_select_sql(db_name, 'ldapTable', '*')
ldap_info = self.execute_fetch_one(conn, sql)
if ldap_info:
data = response_code.SUCCESS
data['data'] = ldap_info
else:
data = response_code.GET_DATA_FAIL
return data
except Exception as e:
logger.error("FN:get_ldap_info error:{}".format(traceback.format_exc()))
return response_code.GET_DATA_FAIL
finally:
conn.close()
# get org info
def get_org_info(self):
conn = MysqlConn()
try:
db_name = configuration.get_database_name()
sql = self.create_select_sql(db_name, 'orgTable', '*')
logger.debug('FN:get_org_info orgTable_sql:{}'.format(sql))
org_info = self.execute_fetch_one(conn, sql)
if org_info:
org_id = org_info['ID']
db_name = configuration.get_database_name()
condition = "ORG_ID=%s " % (org_id)
relations = [{"table_name": "adgroupTable", "join_condition": "adgroupTable.ID=org_to_adgroupTable.AD_GROUP_ID"}]
sql = self.create_get_relation_sql(db_name, 'org_to_adgroupTable', '*', relations, condition)
ad_group_info = self.execute_fetch_all(conn, sql)
org_info['ad_group_list'] = ad_group_info
data = response_code.SUCCESS
data['data'] = org_info
else:
data = response_code.GET_DATA_FAIL
return data
except Exception as e:
logger.error("FN:get_org_info error:{}".format(traceback.format_exc()))
return response_code.GET_DATA_FAIL
finally:
conn.close()
# get org info
def get_org_info_by_id(self, id):
conn = MysqlConn()
try:
db_name = configuration.get_database_name()
condition = "ID=%s " % (id)
sql = self.create_select_sql(db_name, 'orgTable', '*', condition)
logger.debug('FN:get_org_info_by_id orgTable_sql:{}'.format(sql))
org_info = self.execute_fetch_one(conn, sql)
if org_info:
data = response_code.SUCCESS
data['data'] = org_info
else:
data = response_code.GET_DATA_FAIL
return data
except Exception as e:
logger.error("FN:get_org_info_by_id error:{}".format(traceback.format_exc()))
return response_code.GET_DATA_FAIL
finally:
conn.close()
# modify org info
def update_org_info(self, org):
conn = MysqlConn()
try:
db_name = configuration.get_database_name()
data = self.get_org_info_by_id(org['id'])
if data['code'] != 200:
return response_code.UPDATE_DATA_FAIL
logger.debug("FN:update_org_info data".format(data))
self.__delete_adgroup_to_org()
self.__delete_ldap()
self.__delete_org()
self.__delete_smtp()
org_info = {}
org_info['admin_group'] = org['admin_group']
org_info['base_group'] = org['base_group']
org_info['org_name'] = org['org_name']
org_info['airflow_url'] = org['airflow_url']
create_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
org_info['create_time'] = create_time
org_info['des'] = org['des']
ldap_info = {}
ldap_info['host'] = org['host']
ldap_info['port'] = org['port']
ldap_info['cer_path'] = org['cer_path']
ldap_info['use_ssl'] = org['use_ssl']
ldap_info['admin_dn'] = org['admin_dn']
ldap_info['admin_pwd'] = org['admin_pwd']
ldap_info['user_search_base'] = org['user_search_base']
ldap_info['user_search_filter'] = org['user_search_filter']
ldap_info['display_name_attribute'] = org['display_name_attribute']
ldap_info['email_address_attribute'] = org['email_address_attribute']
ldap_info['adgroup_attribute'] = org['adgroup_attribute']
ldap_info['group_search_base'] = org['group_search_base']
ldap_info['group_search_filter'] = org['group_search_filter']
ldap_info['group_member_attribute'] = org['group_member_attribute']
ldap_info['email_suffix'] = org['email_suffix']
ldap_info['create_time'] = create_time
ldap_info['time_modify'] = create_time
smtp_info = {}
smtp_info['smtp_host'] = org['smtp_host']
smtp_info['smtp_account'] = org['smtp_account']
smtp_info['smtp_pwd'] = org['smtp_pwd']
smtp_info['smtp_port'] = org['smtp_port']
smtp_info['smtp_tls'] = org['smtp_tls']
smtp_info['create_time'] = create_time
sql = self.create_select_sql(db_name, 'ldapTable', '*')
logger.debug('FN:update_org_info ldapTable_sql:{}'.format(sql))
ldap_infos = self.execute_fetch_all(conn, sql)
if ldap_infos:
data = response_code.ADD_DATA_FAIL
return data
sql = self.create_select_sql(db_name, 'orgTable', '*')
logger.debug('FN:update_org_info orgTable_sql:{}'.format(sql))
org_infos = self.execute_fetch_all(conn, sql)
if org_infos:
data = response_code.ADD_DATA_FAIL
return data
org_insert = self.__set_org(org_info)
ldap_insert = self.__set_ldap(ldap_info)
smtp_insert = self.__set_smtp(smtp_info)
data = response_code.SUCCESS
self.__delete_admin()
org['org_id'] = org_insert['data']['org_id']
org['ldap_id'] = ldap_insert['data']['id']
org['smtp_id'] = smtp_insert['data']['id']
data['data'] = org
return data
except Exception as e:
logger.error("FN:update_org_info error:{}".format(traceback.format_exc()))
return response_code.GET_DATA_FAIL
finally:
conn.close()
def get_roles_info(self):
conn = MysqlConn()
try:
db_name = configuration.get_database_name()
sql = self.create_select_sql(db_name, 'roleTable', '*')
logger.debug('FN:get_roles_info roleTable_sql:{}'.format(sql))
org_info = self.execute_fetch_all(conn, sql)
if org_info:
data = response_code.SUCCESS
data['data'] = org_info
else:
data = response_code.GET_DATA_FAIL
return data
except Exception as e:
logger.error("FN:get_roles_info error:{}".format(traceback.format_exc()))
return response_code.GET_DATA_FAIL
finally:
conn.close()
def get_smtp(self):
conn = MysqlConn()
try:
db_name = configuration.get_database_name()
sql = self.create_select_sql(db_name, 'smtpTable', '*')
logger.debug('FN:get_smtp smtpTable_sql:{}'.format(sql))
smtp_info = self.execute_fetch_one(conn, sql)
if not smtp_info:
return None, None, None, None, None, None
else:
mail_host = smtp_info['MAIL_HOST']
mail_user = smtp_info['MAIL_USER']
mail_box = smtp_info['MAIL_BOX']
mail_pass = smtp_info['MAIL_PASS']
port = smtp_info['PORT']
is_tls = smtp_info['USE_TLS']
return mail_host, mail_user, mail_box, mail_pass, port, is_tls
except Exception as e:
logger.error("FN:get_smtp error:{}".format(traceback.format_exc()))
return None, None, None, None, None, None
finally:
conn.close()
def offline_ad_group(self, account_id):
conn = MysqlConn()
try:
db_name = configuration.get_database_name()
# db_name2 = configuration.get_database_name('DB')
condition = 'ACCOUNT_ID="%s"' % (account_id)
user_fields = '*'
sql = self.create_select_sql(db_name, 'userTable', user_fields, condition=condition)
logger.debug('FN:offline_ad_group userTable_sql:{}'.format(sql))
user_info = self.execute_fetch_one(conn, sql)
ad_group_list = json.loads(user_info.get('GROUP_LIST', "[]"), strict=False)
logger.debug('FN:offline_ad_group ad_group_list:{}'.format(ad_group_list))
return ad_group_list
except Exception as e:
logger.error("FN:offline_ad_group error:{}".format(traceback.format_exc()))
return None, None
finally:
conn.close()
def get_user_cn(self, account_id):
conn = MysqlConn()
try:
db_name = configuration.get_database_name()
# db_name2 = configuration.get_database_name('DB')
condition = 'ACCOUNT_ID="%s"' % (account_id)
user_fields = '*'
sql = self.create_select_sql(db_name, 'userTable', user_fields, condition=condition)
logger.debug('FN:get_user_cn userTable_sql:{}'.format(sql))
user_info = self.execute_fetch_one(conn, sql)
account_cn = user_info.get('ACCOUNT_CN', None)
logger.debug('FN:get_user_cn ACCOUNT_CN:{}'.format(sql))
return account_cn
except Exception as e:
logger.error("FN:get_user_cn error:{}".format(traceback.format_exc()))
return None, None
finally:
conn.close()
# get airflow info
def get_airflow_url(self):
conn = MysqlConn()
try:
db_name = configuration.get_database_name()
sql = self.create_select_sql(db_name, 'orgTable', 'AIRFLOW_URL')
logger.debug('FN:get_airflow_url orgTable_sql:{}'.format(sql))
org_info = self.execute_fetch_one(conn, sql)
if org_info:
return org_info['AIRFLOW_URL']
else:
return ''
except Exception as e:
logger.error("FN:get_airflow_url error:{}".format(traceback.format_exc()))
return ''
finally:
conn.close()
def insert_notification(self, emails, input_form_id, history_id, notify_msg):
conn = MysqlConn()
try:
db_name = configuration.get_database_name()
notify_id_list = []
create_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
emails = list(set(emails))
logger.debug('FN:insert_notification emails:{} notify_msg'.format(emails, notify_msg))
for email in emails:
values = (email, input_form_id, history_id, notify_msg, 0, create_time)
fields = ('account_id', 'input_form_id', 'history_id', 'comment', 'is_read', 'create_time')
sql = self.create_insert_sql(db_name, 'inputNotifyTable', '({})'.format(', '.join(fields)), values)
notify_id = self.insert_exec(conn, sql, return_insert_id=True)
notify_id_list.append(str(notify_id))
return notify_id_list
except Exception as e:
logger.error("FN:insert_notification error:{}".format(traceback.format_exc()))
return []
finally:
conn.close()
org_mgr = DbOrgMgr()
|
python
|
from .api import process_large_corpus, process_small_corpus, \
process_belscript, process_pybel_graph, process_json_file, \
process_pybel_neighborhood, process_cbn_jgif_file, \
process_bel_stmt
|
python
|
from distutils.core import setup
version = "1.2"
setup(
name='chainee',
packages=['chainee'],
version=version,
license='MIT',
description='Chain your predicates, easy way.',
author='Yaroslav Pankovych',
author_email='[email protected]',
url='https://github.com/ypankovych/chainee',
download_url=f'https://github.com/ypankovych/chainee/archive/refs/tags/{version}.tar.gz',
keywords=['chain', 'easy', 'predicate'],
long_description=__doc__,
Install_requires=["anytree"],
classifiers=[
'Topic :: Utilities',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'Development Status :: 5 - Production/Stable',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: Implementation :: CPython'
],
)
|
python
|
# The idea here is to store all the different things you need for Code in one module
# Code can then import the module and call whatever functions are needed
# R. Sheehan 27 - 10 - 2020
MOD_NAME_STR = "Measurement"
# import the necessary modules
import board
import time
import digitalio
from analogio import AnalogOut
from analogio import AnalogIn
import supervisor # for listening to serial ports
# Define the names of the pins being written to and listened to
Vout = AnalogOut(board.A0)
Vin1 = AnalogIn(board.A1)
Vin2 = AnalogIn(board.A2)
Vin3 = AnalogIn(board.A3)
Vin4 = AnalogIn(board.A4)
Vin5 = AnalogIn(board.A5)
# Define the names of the read / write commands
readCmdStr = 'r'; # read data command string for reading max AC input
writeCmdStr = 'w'; # write data command string for writing frequency values
writeAngStrA = 'a'; # write analog output from DCPINA
writeAngStrB = 'b'; # write analog output from DCPINB
readAngStr = 'l'; # read analog input
# Define the constants
Vmax = 3.3 # max AO/AI value
bit_scale = (64*1024) # 64 bits
# Need the following functions to convert voltages to 12-bit readings
# which can be understood by the board
def dac_value(volts):
# convert a voltage to 10-bit value
FUNC_NAME = ".dac_value()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
if Vmax > 0.0 and bit_scale > 0:
return int((volts / Vmax)*bit_scale)
else:
ERR_STATEMENT = ERR_STATEMENT + "\nvolt, bit scale factors not defined"
raise Exception
except Exception as e:
print(ERR_STATEMENT)
print(e)
def get_voltage(pin, offset = 0.0):
# convert pin reading to voltage value
# correct voltage by substracting offset
FUNC_NAME = ".get_voltage()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
if Vmax > 0.0 and bit_scale > 0:
ret_val = ((pin.value*Vmax)/bit_scale)
return ret_val - offset if offset > 0.0 else ret_val
else:
ERR_STATEMENT = ERR_STATEMENT + "\nvolt, bit scale factors not defined"
raise Exception
except Exception as e:
print(ERR_STATEMENT)
print(e)
# Determine the zero offset using A0 and A1
def get_zero_offset():
# Determine the zero offset using A0 and A1
# Ensure that Vout (A0) is set to zero
# There is a bit of an offset in voltage between the Read and the Write,
# presumably because the pins are floating.
Vout.value = dac_value(0)
time.sleep(0.5)
deltaV = get_voltage(Vin1, 0.0) # offset in the voltage reading at A2 O(10 mV)
# print("deltaV Reading at A1: ", deltaV)
return deltaV;
def Blink():
# The first script that is run using CircuitPy
# Use this to check that everything is operational
# The led near the re-set button should flash count_limit times and switch off
# If this doesn't work something is wrong
# R. Sheehan 19 - 10 - 2020
FUNC_NAME = ".Voltage_Divider_Test()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
led = digitalio.DigitalInOut(board.D13)
led.direction = digitalio.Direction.OUTPUT
count = 0
count_limit = 20
while count < count_limit:
led.value = True
time.sleep(0.5)
led.value = False
time.sleep(0.5)
count = count + 1
except Exception as e:
print(ERR_STATEMENT)
print(e)
def Voltage_Divider_Test():
# Check the operation of the voltage-dividers and buffer amplifiers
# that are attached to the various inputs of the board
# R. Sheehan 23 - 10 - 2020
FUNC_NAME = ".Voltage_Divider_Test()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
# determine the zero offset
deltaV = get_zero_offset()
# define the voltage-divider scaling
Vscale = (5.0/3.0)
# Read the values here
# Determine the readings at pins A2, A3, A4, A5
Vin1val = get_voltage(Vin1, deltaV)
Vin2val = get_voltage(Vin2, deltaV)
Vin3val = get_voltage(Vin3, deltaV)
Vin4val = get_voltage(Vin4, deltaV)
Vin5val = get_voltage(Vin5, deltaV)
print("deltaV Reading at A1: ", deltaV)
print("Reading at A2: ", Vin2val, ", Real Reading at A2: ", Vin2val*Vscale)
print("Reading at A2: ", Vin3val, ", Real Reading at A2: ", Vin3val*Vscale)
print("Reading at A2: ", Vin4val, ", Real Reading at A2: ", Vin4val*Vscale)
print("Reading at A2: ", Vin5val, ", Real Reading at A2: ", Vin5val*Vscale)
except Exception as e:
print(ERR_STATEMENT)
print(e)
def Current_Source_Measurement():
# this method performs the current-source measurements
# R. Sheehan 23 - 10 - 2020
FUNC_NAME = ".Current_Source_Measurement()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
deltaV = get_zero_offset()
# define the voltage-divider scaling
Vscale = (5.0/3.0)
# Set the output value here
Vset = 0.0
R1 = (54.9/1000.0) # units of kOhm
R2 = (10.3/1000.0) # units of kOhm
R3 = (4.8/1000.0) # units of kOhm
ratio = R2 / (R1*R3)
Rload = (10.0/1000.0) # unit of kOhm
Vout.value = dac_value(Vset)
# Read the values here
# Determine the readings at pins A2, A3, A4, A5
Vin1val = get_voltage(Vin1, deltaV)
Vin2val = get_voltage(Vin2, deltaV)
Vin3val = get_voltage(Vin3, deltaV)
Vin4val = get_voltage(Vin4, deltaV)
Vin5val = get_voltage(Vin5, deltaV)
time.sleep(1.0) # give the board time to power everything
# print the real readings
print("\nVset: ",Vin1val)
print("Vctrl: ",Vin2val*Vscale)
print("VR3: ",Vin3val*Vscale - Vin4val*Vscale)
print("IR3 Measured: ",(Vin3val*Vscale - Vin4val*Vscale)/R3)
print("Iload predicted: ",Vin1val * ratio)
print("Vload predicted: ",Vin1val * ratio * Rload)
print("Vload: ", Vin5val*Vscale)
except Exception as e:
print(ERR_STATEMENT)
print(e)
def Cuffe_Iface():
# method that listens for input from LabVIEW and responds appropriately
# John Cuffe 10 - 10 - 2020
# Edited R. Sheehan 27 - 10 - 2020
FUNC_NAME = ".Cuffe_Iface()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
while True:
if supervisor.runtime.serial_bytes_available: # Listens for a serial command
command = input()
if command.startswith(writeAngStrA): # If the command starts with writeAngStrA it knows it is an output (Write)
try: # In case user inputs NAN somehow
SetVoltage = float(command[1:]) # Everything after the writeAngStrA is the voltage
if SetVoltage >= 0.0 and SetVoltage < 3.3: # Sets limits on the Output voltage to board specs
Vout.value = dac_value(SetVoltage) # Set the voltage
else:
Vout.value = dac_value(0.0) # Set the voltage to zero in the event of SetVoltage range error
except ValueError:
ERR_STATEMENT = ERR_STATEMENT + '\nVin must be a float'
raise Exception
elif command.startswith(readAngStr): # If the command starts with readAngStr it knows user is looking for Vin. (Read)
# in the scheme I have set up
# A1 measures Ground, A2 measures Vctrl-high, A3 measures Vr3-high, A4 measures Vr3-low, A5 measures Vrl-high
# Measurement at ground can be substracted off where required
print(get_voltage(Vin1), get_voltage(Vin2), get_voltage(Vin3), get_voltage(Vin4), get_voltage(Vin5)) # Prints to serial to be read by LabView
#print(get_voltage(Vin2))
#print(Vin1.value)
else:
print(get_voltage(Vin1), get_voltage(Vin2), get_voltage(Vin3), get_voltage(Vin4), get_voltage(Vin5)) # Prints to serial to be read by LabView
#print(get_voltage(Vin2))
#print(Vin1.value)
except Exception as e:
print(ERR_STATEMENT)
print(e)
def Ser_Test():
# method that prints a data reading continuously
# Trying to get Python to read data from port
# R. Sheehan 30 - 11 - 2020
FUNC_NAME = ".Ser_Test()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
print("Test String")
except Exception as e:
print(ERR_STATEMENT)
print(e)
def AC_Read():
# The idea is to investigate exactly what the sample rate of the IBM4 is
# CircuitPython is a ReadOnly filesystem, this means that it cannot create files on its drive
# It can only write info to the console / buffer
# This buffer can be read by LabVIEW
# The aim here is to get the IBM4 to read an AC signal continuously and then write the data being read
# To the console and then read this console data into LabVIEW
# To speed up the process I will not perform any voltage conversions here
# This can be done quite easily in LabVIEW
# I want to be able to read AC signals on at least 2 channels.
# This works to some extent the IBM4 is not able to sample at high enough frequency
# R. Sheehan 30 - 1 - 2020
FUNC_NAME = "AC_Read.()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
while True:
if supervisor.runtime.serial_bytes_available: # Listens for a serial command
command = input()
if command.startswith(readCmdStr): # If the command starts with readCmdStr it knows user is looking for Vin. (Read)
count = 0
count_lim = 500
#count_lim = 3e+4 # i think this is close to the upper limit
bit_readings_1 = []
#bit_readings_2 = []
start_time = time.time() # start the clock
while count < count_lim:
#bit_readings_1.append(Vin1.value) # no voltage conversions here
bit_readings_1.append(Vin2.value) # no voltage conversions here
count = count + 1
elapsed_time = (time.time() - start_time)
delta_T = float(elapsed_time / count_lim)
# output the data to the buffer
count = 0
print("Elapsed Time: %(v1)0.15f"%{"v1":elapsed_time})
print("Time-step: %(v1)0.15f"%{"v1":delta_T})
print("Start")
for i in range(0, count_lim, 1):
print(bit_readings_1[i])
print("End")
del bit_readings_1
else:
raise Exception
except Exception as e:
print(ERR_STATEMENT)
print(e)
def AC_Max():
# The IBM4 is not able to accurately sample a sine wave
# The aim now is to see if IBM4 can find the largest value of a sine wave
# in a given reading request period
# R. Sheehan 3 - 11 - 2020
FUNC_NAME = "AC_Max.()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
while True:
if supervisor.runtime.serial_bytes_available: # Listens for a serial command
command = input()
if command.startswith(readCmdStr): # If the command starts with readCmdStr it knows user is looking for Vin. (Read)
#print(get_voltage(Vin1), get_voltage(Vin2), get_voltage(Vin3), get_voltage(Vin4), get_voltage(Vin5)) # Prints to serial to be read by LabView
max_val = 0.0
count = 0
count_lim = 500
while count < count_lim:
t1 = get_voltage(Vin2) # read the voltage from the pin
if t1 > max_val: max_val = t1
count = count + 1
time.sleep(0.001)
print(max_val)
else:
raise Exception
except Exception as e:
print(ERR_STATEMENT)
print(e)
def IO_Simple():
# Check to ensure that ports A0 and A1 are working correctly
# R. Sheehan 27 - 10 - 2020
FUNC_NAME = ".IO_Simple()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
deltaV = get_zero_offset()
Vset = 2.315
Vout.value = dac_value(Vset) # tell A0 to output Vset Volts
time.sleep(0.1) # pause for 100 ms
print("Reading A1: ",get_voltage(Vin1, 0.0)) # Read the value that is input into A1
except Exception as e:
print(ERR_STATEMENT)
print(e)
|
python
|
import pytest
import main
from time import time
import hmac
import hashlib
from unittest.mock import Mock
from unittest.mock import MagicMock
def test_valid_signature():
timestamp = int(time())
slack_signing_secret = 'abcdefg'
main.slack_signing_secret = slack_signing_secret
req_body = 'abcdefgabcdefgabcdefgabcdefg'
signature = create_signature(timestamp,req_body,slack_signing_secret)
headers = {
'X-Slack-Signature': signature,
'X-Slack-Request-Timestamp': str(timestamp)
}
decode = Mock(decode=Mock(return_value=req_body))
req = Mock(get_data=Mock(return_value=decode), headers=headers)
assert main.verify_slack_signature(req) == True
def test_invalid_signature_secret_invalid():
timestamp = int(time())
slack_signing_secret = 'abcdefg'
main.slack_signing_secret = 'qwerty'
req_body = 'abcdefgabcdefgabcdefgabcdefg'
signature = create_signature(timestamp,req_body,slack_signing_secret)
headers = {
'X-Slack-Signature': signature,
'X-Slack-Request-Timestamp': str(timestamp)
}
decode = Mock(decode=Mock(return_value=req_body))
req = Mock(get_data=Mock(return_value=decode), headers=headers)
assert main.verify_slack_signature(req) == False
def test_invalid_signature_old_timestamp():
timestamp = int(time()) - 86400
slack_signing_secret = 'abcdefg'
main.slack_signing_secret = slack_signing_secret
req_body = 'abcdefgabcdefgabcdefgabcdefg'
signature = create_signature(timestamp,req_body,slack_signing_secret)
headers = {
'X-Slack-Signature': signature,
'X-Slack-Request-Timestamp': str(timestamp)
}
decode = Mock(decode=Mock(return_value=req_body))
req = Mock(get_data=Mock(return_value=decode), headers=headers)
assert main.verify_slack_signature(req) == False
def test_invalid_signature_signature_missing():
timestamp = int(time())
req_body = 'abcdefgabcdefgabcdefgabcdefg'
headers = {
'X-Slack-Request-Timestamp': str(timestamp)
}
decode = Mock(decode=Mock(return_value=req_body))
req = Mock(get_data=Mock(return_value=decode), headers=headers)
assert main.verify_slack_signature(req) == False
def test_invalid_signature_timestamp_missing():
req_body = 'abcdefgabcdefgabcdefgabcdefg'
headers = {
'X-Slack-Signature': 'dadsdasadsads'
}
decode = Mock(decode=Mock(return_value=req_body))
req = Mock(get_data=Mock(return_value=decode), headers=headers)
assert main.verify_slack_signature(req) == False
def create_signature(timestamp,req_body,slack_signing_secret):
signature_string = str.encode('v0:' + str(timestamp) + ':' + req_body)
signature = 'v0=' + hmac.new(str.encode(slack_signing_secret),
signature_string,
hashlib.sha256).hexdigest()
return signature
|
python
|
from matchbook.apiclient import APIClient
from matchbook.exceptions import MBError
__title__ = 'matchbook'
__version__ = '0.0.9'
|
python
|
# -*- coding: utf-8 -*-
"""
Test Generic Map
"""
import os
import pytest
import numpy as np
import astropy.units as u
from astropy.coordinates import SkyCoord
import matplotlib.pyplot as plt
import sunpy
import sunpy.map
import sunpy.coordinates
import sunpy.data.test
from sunpy.tests.helpers import figure_test
testpath = sunpy.data.test.rootdir
@pytest.fixture
def aia171_test_map():
return sunpy.map.Map(os.path.join(testpath, 'aia_171_level1.fits'))
@pytest.fixture
def heliographic_test_map():
return sunpy.map.Map(os.path.join(testpath, 'heliographic_phase_map.fits.gz'))
@pytest.fixture
def aia171_test_map_with_mask(aia171_test_map):
shape = aia171_test_map.data.shape
mask = np.zeros_like(aia171_test_map.data, dtype=bool)
mask[0:shape[0] // 2, 0:shape[1] // 2] = True
return sunpy.map.Map(np.ma.array(
aia171_test_map.data, mask=mask),
aia171_test_map.meta)
@figure_test
def test_plot_aia171(aia171_test_map):
aia171_test_map.plot()
@figure_test
def test_plot_aia171_clip(aia171_test_map):
aia171_test_map.plot(clip_interval=(5., 99.)*u.percent)
@figure_test
def test_peek_aia171(aia171_test_map):
aia171_test_map.peek()
@figure_test
def test_peek_grid_aia171(aia171_test_map):
aia171_test_map.peek(draw_grid=True)
@figure_test
def test_peek_grid_spacing_aia171(aia171_test_map):
aia171_test_map.peek(draw_grid=(5, 5) * u.deg)
@figure_test
def test_peek_limb_aia171(aia171_test_map):
aia171_test_map.peek(draw_limb=True)
@figure_test
def test_draw_grid_aia171(aia171_test_map):
aia171_test_map.plot()
aia171_test_map.draw_grid(grid_spacing=(30, 40) * u.deg)
@figure_test
def test_peek_grid_limb_aia171(aia171_test_map):
aia171_test_map.peek(draw_grid=True, draw_limb=True)
@figure_test
def test_plot_aia171_nowcsaxes(aia171_test_map):
ax = plt.gca()
aia171_test_map.plot(axes=ax)
@figure_test
def test_rectangle_aia171(aia171_test_map):
aia171_test_map.plot()
bottom_left = SkyCoord(
0 * u.arcsec, 0 * u.arcsec, frame=aia171_test_map.coordinate_frame)
w = 100 * u.arcsec
h = 100 * u.arcsec
aia171_test_map.draw_rectangle(bottom_left, w, h)
@figure_test
def test_plot_masked_aia171(aia171_test_map_with_mask):
aia171_test_map_with_mask.plot()
@figure_test
def test_plot_masked_aia171_nowcsaxes(aia171_test_map_with_mask):
ax = plt.gca()
aia171_test_map_with_mask.plot(axes=ax)
@figure_test
def test_plot_aia171_superpixel(aia171_test_map):
aia171_test_map.superpixel((9, 7) * u.pix, offset=(4, 4) * u.pix).plot()
@figure_test
def test_plot_aia171_superpixel_nowcsaxes(aia171_test_map):
ax = plt.gca()
aia171_test_map.superpixel(
(9, 7) * u.pix, offset=(4, 4) * u.pix).plot(axes=ax)
@figure_test
def test_plot_masked_aia171_superpixel(aia171_test_map_with_mask):
aia171_test_map_with_mask.superpixel(
(9, 7) * u.pix, offset=(4, 4) * u.pix).plot()
@figure_test
def test_plot_masked_aia171_superpixel_nowcsaxes(aia171_test_map_with_mask):
ax = plt.gca()
aia171_test_map_with_mask.superpixel(
(9, 7) * u.pix, offset=(4, 4) * u.pix).plot(axes=ax)
@figure_test
def test_draw_contours_aia(aia171_test_map):
aia171_test_map.plot()
aia171_test_map.draw_contours(u.Quantity(np.arange(1, 100, 10), 'percent'))
@figure_test
def test_heliographic_peek(heliographic_test_map):
heliographic_test_map.peek()
@figure_test
def test_heliographic_rectangle(heliographic_test_map):
heliographic_test_map.plot()
bottom = SkyCoord(
60 * u.deg, 50 * u.deg, frame=heliographic_test_map.coordinate_frame)
w = 13 * u.deg
h = 13 * u.deg
heliographic_test_map.draw_rectangle(bottom, w, h, color='cyan')
@figure_test
def test_heliographic_grid_annotations(heliographic_test_map):
heliographic_test_map.plot()
heliographic_test_map.draw_grid(annotate=False)
|
python
|
# -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
# this should be fine, but failed with pychecker 0.8.18 on python 2.6
def func():
d = { 'a': 1, 'b': 2}
print d.keys()
|
python
|
import torch
import torch.nn as nn
import torch.nn.init as init
import numpy as np
import math
import torchvision.utils as tvu
from torch.autograd import Variable
import matplotlib.pyplot as plt
def generate_images(generator, centers, num_clusters, alpha, z_dim, device):
idx_centers = torch.from_numpy(np.random.choice(np.arange(num_clusters), 16))
eps = torch.FloatTensor(16, z_dim).uniform_(-alpha, alpha).to(device)
noise = centers[idx_centers] + eps
num_images = noise.shape[0]
rows = int(math.sqrt(num_images))
images = generator(noise).cpu().detach()
grid_img = tvu.make_grid(images, nrow=rows)
return grid_img
def reconstrct_images(model, dataloader, device):
model.eval()
(x, _) = next(iter(dataloader))
x = x.to(device)
x_pre_vq = model._pre_vq_conv(model._encoder(x))
_, x_quantize, _, _ = model._vq_vae(x_pre_vq)
x_hat = model._decoder(x_quantize).cpu().detach()
#grid_img = tvu.make_grid(x_hat, nrow=rows)
x = x[:10].cpu().view(10 * 32, 32)
x_hat = x_hat[:10].cpu().view(10 * 32, 32)
comparison = torch.cat((x, x_hat), 1).view(10 * 32, 2 * 32)
return comparison
def type_tdouble(use_cuda=False):
return torch.cuda.DoubleTensor if use_cuda else torch.DoubleTensor
def init_weights(module):
for m in module.modules():
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
init.xavier_normal_(m.weight.data)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias, 0.0)
elif isinstance(m, nn.Sequential):
for sub_mod in m:
init_weights(sub_mod)
def one_hot(labels, n_class, use_cuda=False):
# Ensure labels are [N x 1]
if len(list(labels.size())) == 1:
labels = labels.unsqueeze(1)
mask = type_tdouble(use_cuda)(labels.size(0), n_class).fill_(0)
# scatter dimension, position indices, fill_value
return mask.scatter_(1, labels, 1)
def to_cuda(tensor):
if isinstance(tensor, torch.Tensor):
tensor = tensor.cuda()
return tensor
def conv_size(H_in, k_size, stride, padd, dil=1):
H_out = np.floor((H_in + 2 * padd - dil * (k_size - 1) - 1) / stride + 1)
return np.int(H_out)
def shuffle(X):
np.take(X, np.random.permutation(X.shape[0]), axis=0, out=X)
def numpy2torch(x):
return torch.from_numpy(x)
def extract_batch(data, it, batch_size):
x = numpy2torch(data[it * batch_size:(it + 1) * batch_size, :, :]) / 255.0
#x.sub_(0.5).div_(0.5)
return Variable(x)
def plot_scatter_outliers(mse_score_inlier, discriminator_score_inlier, mse_score_outlier, discriminator_score_outlier, epoch):
plt.scatter(mse_score_inlier, discriminator_score_inlier)
plt.scatter(mse_score_outlier, discriminator_score_outlier)
plt.xlabel('MSE_distance')
plt.ylabel('Discriminator_distance')
#plt.legend()
plt.grid(True)
plt.savefig('results/inlier_vs_outlier_{}.png'.format(epoch))
plt.close()
def get_mse_score(model, x, device):
N = x.size(0)
x = x.to(device)
_, x_hat, _ = model(x)
x = x.squeeze().cpu().detach().numpy()
x_hat = x_hat.squeeze().cpu().detach().numpy()
mse_score= []
for i in range(N):
distance = np.sum(np.power(x_hat[i].flatten() - x[i].flatten(), 2.0))
mse_score.append(distance)
return mse_score
def plot_mse_outliers(mse_score_inlier, mse_score_outlier, filename):
plt.hist(mse_score_inlier, 10, density=1, facecolor='g', alpha=0.75)
plt.hist(mse_score_outlier, 10, density=1, facecolor='r', alpha=0.75)
plt.xlabel('MSE_distance')
plt.ylabel('Histogram')
#plt.legend()
plt.grid(True)
plt.savefig(filename)
plt.close()
def save_checkpoint(state, filename):
torch.save(state, filename)
def save_img(img, filename):
npimg = img.numpy()
fig = plt.imshow(np.transpose(npimg, (1, 2, 0)), interpolation='nearest')
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
plt.savefig(filename)
plt.close()
|
python
|
import os
import numpy as np
def _download_and_extract(url, path, filename):
import shutil, zipfile
import requests
fn = os.path.join(path, filename)
while True:
try:
with zipfile.ZipFile(fn) as zf:
zf.extractall(path)
print('Unzip finished.')
break
except Exception:
os.makedirs(path, exist_ok=True)
f_remote = requests.get(url, stream=True)
sz = f_remote.headers.get('content-length')
assert f_remote.status_code == 200, 'fail to open {}'.format(url)
with open(fn, 'wb') as writer:
for chunk in f_remote.iter_content(chunk_size=1024*1024):
writer.write(chunk)
print('Download finished. Unzipping the file...')
class KGDataset1:
'''Load a knowledge graph with format 1
In this format, the folder with a knowledge graph has five files:
* entities.dict stores the mapping between entity Id and entity name.
* relations.dict stores the mapping between relation Id and relation name.
* train.txt stores the triples in the training set.
* valid.txt stores the triples in the validation set.
* test.txt stores the triples in the test set.
The mapping between entity (relation) Id and entity (relation) name is stored as 'id\tname'.
The triples are stored as 'head_name\trelation_name\ttail_name'.
'''
def __init__(self, path, name, read_triple=True, only_train=False):
url = 'https://s3.us-east-2.amazonaws.com/dgl.ai/dataset/{}.zip'.format(name)
if not os.path.exists(os.path.join(path, name)):
print('File not found. Downloading from', url)
_download_and_extract(url, path, name + '.zip')
path = os.path.join(path, name)
with open(os.path.join(path, 'entities.dict')) as f:
entity2id = {}
for line in f:
eid, entity = line.strip().split('\t')
entity2id[entity] = int(eid)
self.entity2id = entity2id
with open(os.path.join(path, 'relations.dict')) as f:
relation2id = {}
for line in f:
rid, relation = line.strip().split('\t')
relation2id[relation] = int(rid)
self.relation2id = relation2id
# TODO: to deal with contries dataset.
self.n_entities = len(self.entity2id)
self.n_relations = len(self.relation2id)
if read_triple == True:
self.train = self.read_triple(path, 'train')
if only_train == False:
self.valid = self.read_triple(path, 'valid')
self.test = self.read_triple(path, 'test')
def read_triple(self, path, mode):
# mode: train/valid/test
heads = []
tails = []
rels = []
with open(os.path.join(path, '{}.txt'.format(mode))) as f:
for line in f:
h, r, t = line.strip().split('\t')
heads.append(self.entity2id[h])
rels.append(self.relation2id[r])
tails.append(self.entity2id[t])
heads = np.array(heads, dtype=np.int64)
tails = np.array(tails, dtype=np.int64)
rels = np.array(rels, dtype=np.int64)
return (heads, rels, tails)
class KGDataset2:
'''Load a knowledge graph with format 2
In this format, the folder with a knowledge graph has five files:
* entity2id.txt stores the mapping between entity name and entity Id.
* relation2id.txt stores the mapping between relation name relation Id.
* train.txt stores the triples in the training set.
* valid.txt stores the triples in the validation set.
* test.txt stores the triples in the test set.
The mapping between entity (relation) name and entity (relation) Id is stored as 'name\tid'.
The triples are stored as 'head_nid\trelation_id\ttail_nid'.
'''
def __init__(self, path, name, read_triple=True, only_train=False):
url = 'https://s3.us-east-2.amazonaws.com/dgl.ai/dataset/{}.zip'.format(name)
if not os.path.exists(os.path.join(path, name)):
print('File not found. Downloading from', url)
_download_and_extract(url, path, '{}.zip'.format(name))
self.path = os.path.join(path, name)
f_rel2id = os.path.join(self.path, 'relation2id.txt')
with open(f_rel2id) as f_rel:
self.n_relations = int(f_rel.readline()[:-1])
if only_train == True:
f_ent2id = os.path.join(self.path, 'local_to_global.txt')
with open(f_ent2id) as f_ent:
self.n_entities = len(f_ent.readlines())
else:
f_ent2id = os.path.join(self.path, 'entity2id.txt')
with open(f_ent2id) as f_ent:
self.n_entities = int(f_ent.readline()[:-1])
if read_triple == True:
self.train = self.read_triple(self.path, 'train')
if only_train == False:
self.valid = self.read_triple(self.path, 'valid')
self.test = self.read_triple(self.path, 'test')
def read_triple(self, path, mode, skip_first_line=False):
heads = []
tails = []
rels = []
print('Reading {} triples....'.format(mode))
with open(os.path.join(path, '{}.txt'.format(mode))) as f:
if skip_first_line:
_ = f.readline()
for line in f:
h, t, r = line.strip().split('\t')
heads.append(int(h))
tails.append(int(t))
rels.append(int(r))
heads = np.array(heads, dtype=np.int64)
tails = np.array(tails, dtype=np.int64)
rels = np.array(rels, dtype=np.int64)
print('Finished. Read {} {} triples.'.format(len(heads), mode))
return (heads, rels, tails)
def get_dataset(data_path, data_name, format_str):
if data_name == 'Freebase':
dataset = KGDataset2(data_path, data_name)
elif format_str == '1':
dataset = KGDataset1(data_path, data_name)
else:
dataset = KGDataset2(data_path, data_name)
return dataset
def get_partition_dataset(data_path, data_name, format_str, part_id):
part_name = os.path.join(data_name, 'part_'+str(part_id))
if data_name == 'Freebase':
dataset = KGDataset2(data_path, part_name, read_triple=True, only_train=True)
elif format_str == '1':
dataset = KGDataset1(data_path, part_name, read_triple=True, only_train=True)
else:
dataset = KGDataset2(data_path, part_name, read_triple=True, only_train=True)
path = os.path.join(data_path, part_name)
partition_book = []
with open(os.path.join(path, 'partition_book.txt')) as f:
for line in f:
partition_book.append(int(line))
local_to_global = []
with open(os.path.join(path, 'local_to_global.txt')) as f:
for line in f:
local_to_global.append(int(line))
return dataset, partition_book, local_to_global
def get_server_partition_dataset(data_path, data_name, format_str, part_id):
part_name = os.path.join(data_name, 'part_'+str(part_id))
if data_name == 'Freebase':
dataset = KGDataset2(data_path, part_name, read_triple=False, only_train=True)
elif format_str == '1':
dataset = KGDataset1(data_path, part_name, read_triple=False, only_train=True)
else:
dataset = KGDataset2(data_path, part_name, read_triple=False, only_train=True)
path = os.path.join(data_path, part_name)
n_entities = len(open(os.path.join(path, 'partition_book.txt')).readlines())
local_to_global = []
with open(os.path.join(path, 'local_to_global.txt')) as f:
for line in f:
local_to_global.append(int(line))
global_to_local = [0] * n_entities
for i in range(len(local_to_global)):
global_id = local_to_global[i]
global_to_local[global_id] = i
local_to_global = None
return global_to_local, dataset
|
python
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
__author__ = 'Chao Wu'
r'''
python C:\Users\cwu\Desktop\Software\Papers\pH_effect\plot_ph_effect_contour\plot_contour.py
'''
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
EPCS_FILE = 'path\to\epcs.xlsx'
DGPS_FILE = 'path\to\dgps.xlsx'
OUT_DIR = 'path\to\output'
ENZYME_CAT = {'glycolysis': ['pgi', 'pfk', 'fbp', 'fba', 'tpi', 'gap', 'pgk', 'gpm', 'eno', 'pyk', 'pps', 'pdh'],
'pentose phosphate pathway': ['zwf', 'pgl', 'gnd', 'rpi', 'rpe', 'tkt1', 'tal', 'tkt2'],
'TCA cycle': ['cs', 'acn1', 'acn2', 'icd', 'kgd', 'suc', 'sdh', 'fum', 'mdh', 'icl', 'mals'],
'glutamate metabolism': ['gs', 'gdh', 'gls', 'gogat'],
'pyruvate metabolism': ['aldh', 'adh', 'pta', 'ak', 'ldh', 'pfl'],
'anaplerotic reactions': ['me1', 'me2', 'ppc', 'ppck'],
'ATP metabolism': ['atps4r', 'atpm']}
PATHWAYS = ['glycolysis', 'pentose phosphate pathway', 'TCA cycle', 'glutamate metabolism', 'pyruvate metabolism', 'anaplerotic reactions']
NPOINTS = 20
def plot_contour(out_dir, filename, data_file, cmap, constant_color):
dataInfo = pd.read_excel(data_file, header = 0, index_col = 0)
X_phin = dataInfo['ph_in'].values.reshape(NPOINTS, NPOINTS).T
Y_phout = dataInfo['ph_out'].values.reshape(NPOINTS, NPOINTS).T
data = dataInfo.iloc[:, 2:].copy()
data = (data.fillna(method = 'bfill') + data.fillna(method = 'ffill'))/2 # impute by mean
if filename == 'epcs':
for i, row in data.iterrows():
if (row < 0).any() or (row >= 1).any() or row.sum() >= 1:
data.loc[i, :] = 0
#data.to_excel(r'C:\Users\cwu\Desktop\all.xlsx')#!!!
if filename == 'epcs':
ndigits = 5
elif filename == 'dgps':
ndigits = 2
# plot per enzyme
for pathway in PATHWAYS:
enzymes = ENZYME_CAT[pathway]
ncols = 3
nrows = int(np.ceil((len(enzymes)+1)/ncols))
fig, axes = plt.subplots(nrows = nrows, ncols = ncols, figsize = (12, nrows*3), sharex = 'all', sharey = 'all')
for i, enz in enumerate(enzymes+['sum']):
if enz == 'sum':
Z = data[enzymes].sum(axis = 1).values.reshape(NPOINTS, NPOINTS).T
else:
Z = data[enz].values.reshape(NPOINTS, NPOINTS).T
if axes.ndim == 2:
indexer = (i//ncols, i%ncols)
elif axes.ndim == 1:
indexer = i
vmin = Z.min().min()
if vmin == 0:
vmin = 0.00001
vmax = Z.max().max()
levels = np.linspace(vmin, vmax, NPOINTS)
if vmax - vmin > 0.0001:
ctf = axes[indexer].contourf(X_phin, Y_phout, Z, vmin = vmin, vmax = vmax, levels = levels,
cmap = plt.cm.get_cmap(cmap).reversed())
cbar = fig.colorbar(mappable = ctf, ax = axes[indexer])
cbarTicks = cbar.get_ticks()
cbarTicksNew = np.linspace(cbarTicks.min(), cbarTicks.max(), 4)
cbar.set_ticks(cbarTicksNew)
cbar.ax.set_yticklabels(cbarTicksNew.round(ndigits))
cbar.ax.tick_params(labelsize = 13)
else:
Z = np.full_like(Z, (vmax + vmin)/2)
ctf = axes[indexer].contourf(X_phin, Y_phout, Z, NPOINTS, colors = constant_color)
cbar = fig.colorbar(mappable = ctf, ax = axes[indexer])
cbar.set_ticks([])
cbar.ax.set_yticklabels([])
cbar.set_label(round((vmax + vmin)/2, ndigits), horizontalalignment = 'left', rotation = 360,
labelpad = 5, fontsize = 13)
axes[indexer].locator_params(axis = 'x', nbins = 3)
axes[indexer].locator_params(axis = 'y', nbins = 4)
axes[indexer].tick_params(labelsize = 15)
axes[indexer].set_xlabel(enz, fontsize = 25)
ax_label = fig.add_subplot(111, frameon = False)
ax_label.tick_params(labelcolor = 'none', top = False, bottom = False, left = False, right = False)
ax_label.set_xlabel('Cytoplasmic pH', labelpad = 50, fontsize = 35)
ax_label.set_ylabel('Periplasmic pH', labelpad = 30, fontsize = 35)
for i in range(len(enzymes)+1, ncols*nrows):
if axes.ndim == 2:
indexer = (i//ncols, i%ncols)
elif axes.ndim == 1:
indexer = i
fig.delaxes(ax = axes[indexer])
os.makedirs(out_dir, exist_ok = True)
#plt.tight_layout()
fig.subplots_adjust(wspace = 0.4, hspace = 0.3)
plt.savefig('%s/%s_%s.jpg' % (out_dir, pathway, filename), dpi = 300, bbox_inches = 'tight')
def main():
plot_contour(OUT_DIR, 'epcs', EPCS_FILE, 'viridis', '#3C528B')
plot_contour(OUT_DIR, 'dgps', DGPS_FILE, 'plasma', '#D8556C')
if __name__ == '__main__':
main()
|
python
|
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from matplotlib.patches import Polygon
import numpy as np
import stader
d = stader.load_aircraft('b747_flight_condition2')
ac = stader.Aircraft(d)
msec = 15
dt = msec/1000
show_state = False
if show_state:
fig = plt.figure(figsize=(16,8))
ax = plt.subplot2grid((6,2), (0,0), rowspan=4)
ax1 = plt.subplot2grid((6,2), (0,1))
ax2 = plt.subplot2grid((6,2), (1,1))
ax3 = plt.subplot2grid((6,2), (2,1))
ax4 = plt.subplot2grid((6,2), (3,1))
ax5 = plt.subplot2grid((6,2), (4,1))
ax6 = plt.subplot2grid((6,2), (5,1))
for ax_ in [ax1, ax2, ax3]:
ax_.set_ylim([-180, 180])
for ax_ in [ax1, ax2, ax3, ax4, ax5, ax6]:
ax_.set_xlim([-60, 0])
else:
fig, ax = plt.subplots()
ax.set_yticklabels([], visible=False)
ax.set_yticks([])
ax.set_xticklabels([], visible=False)
ax.set_xticks([])
ax.set_axis_bgcolor((30.0/255, 144.0/255, 1.0, 1))
ax.plot(0,0, marker='o', markersize=10, color='y')
ax.plot([-.5, -.25], [0, 0], marker=None, linestyle='-', linewidth=5, color='y')
ax.plot([.5, .25], [0, 0], marker=None, linestyle='-', linewidth=5, color='y')
ax.set_ylim(-1, 1)
ax.set_xlim(-1, 1)
gnd = Polygon(((-10,0), (10,0), (10,-10), (-10, -10)), closed=True,
facecolor=(139.0/255, 69.0/255, 19.0/255),
edgecolor='white')
gnd_xy = gnd.get_xy()
ax.add_artist(gnd)
markers = []
orig_xys = []
for tick in range(-50, 51, 5):
x = 0.1 if tick % 10 else 0.25
c = 'k' if tick > 0 else 'w'
if tick == 0:
continue
markers.append(ax.plot([-x, x], [tick/35, tick/35], marker=None,
linestyle='-', linewidth=1, color=c)[0])
orig_xys.append(markers[-1].get_xydata())
inputs = {'elevator':0, 'aileron':0}
tracking = False
def press(event):
global inputs
global tracking
if event.key == 'up':
inputs['elevator'] -= np.deg2rad(1)
if event.key == 'down':
inputs['elevator'] += np.deg2rad(1)
if event.key == 'left':
inputs['aileron'] -= np.deg2rad(1)
if event.key == 'right':
inputs['aileron'] += np.deg2rad(1)
if event.key == 't':
tracking = ~tracking
print(inputs)
frame = 0
track_a = np.array([.5 if i < 6 else .05 for i in range(12)])
track_k = np.array([7, 11, 16, 25, 38, 61, 103, 131, 151, 181, 313, 523])
track_w = np.array([0.18, 0.28, 0.42, 0.65, 0.99, 1.60, 2.70, 3.43, 3.95, 4.74, 8.19, 13.69])
track_p = np.array([-0.29, -1.03, -3.13, 3.08, -0.84, 0.46, -2.74, -2.18, -1.78, -2.26, -1.82, 0.46])
def tracker(self):
global frame
frame += 1
if tracking:
t = frame*dt
track = np.sum(track_a*2*np.pi*track_k/240.0*np.cos(2*np.pi*track_k*t/240.0 + track_p))
track *= 0.025
print(t, (track))
else:
track = 0
ac.update(dt, inputs)
gnd_center = (np.rad2deg(ac.pitch-track))/35
R = np.array(((np.cos(ac.roll), np.sin(ac.roll), 0),
(-np.sin(ac.roll), np.cos(ac.roll), gnd_center),
(0,0,1)))
xy = np.hstack((gnd_xy, np.ones((gnd_xy.shape[0], 1))))
new_xy = R.dot(xy.T).T[:,:2]
gnd.set_xy(new_xy)
for orig_xy, marker in zip(orig_xys, markers):
xy = np.hstack((orig_xy, np.ones((orig_xy.shape[0], 1))))
new_xy = R.dot(xy.T)[:2,:]
marker.set_data(new_xy)
anim = FuncAnimation(fig, tracker, interval=msec, blit=False, repeat=False)
fig.canvas.mpl_connect('key_press_event', press)
plt.show()
|
python
|
import os.path
from lsst.meas.base import CircularApertureFluxAlgorithm
config.measurement.load(os.path.join(os.path.dirname(__file__), "apertures.py"))
config.measurement.load(os.path.join(os.path.dirname(__file__), "kron.py"))
config.measurement.load(os.path.join(os.path.dirname(__file__), "convolvedFluxes.py"))
config.load(os.path.join(os.path.dirname(__file__), "cmodel.py"))
|
python
|
import pyglet
from pyglet.gl import *
blueDot = pyglet.resource.image('blue-dot.png')
redDot = pyglet.resource.image('red-dot.png')
class Window(pyglet.window.Window):
"""docstring for Window"""
def __init__(self):
print("Init Window")
super(Window, self).__init__(500, 400, vsync=False)
self.fps_display = pyglet.clock.ClockDisplay()
self.dict_objects = {}
redDot.width, redDot.height = 10, 10
blueDot.width, redDot.height = 10, 10
def setLogic(self, logic):
self.logic = logic
self.dict_objects = self.logic.getDictObjects()
def on_key_press(self, symbol, modifiers):
if symbol == pyglet.window.key.ESCAPE:
super(Window, self).close()
else:
print("Key pressed: " + str(symbol))
def on_mouse_press(self, x, y, button, modifiers):
# detect where the click goes (tower -> upgrade menu, building place, etc...)
# maybe a (x,y) grid might be useful instead ob the list so an iteration
# over the list is unnecessary
x = int(x / 50) * 50
y = int(y / 50) * 50
if button == pyglet.window.mouse.LEFT:
print("Left click at (" + str(x) + "," + str(y) + ")")
self.logic.placeTower(x, y)
elif button == pyglet.window.mouse.RIGHT:
print("Right click at (" + str(x) + "," + str(y) + ")")
self.logic.placeMob(x, y)
def redraw(self):
super(Window, self).clear()
self.fps_display.draw()
# late each Tower or Mob carries its own texture and its won draw method?
batch = pyglet.graphics.Batch()
pyglet.gl.glPointSize(3)
for mob in self.dict_objects['mobs']:
"""
pyglet.text.Label("M", font_size=30,
x=(mob[0] + 25), y=(mob[1] + 25),
anchor_x='center', anchor_y='center').draw()
"""
#print("Mob x=%d y=%d" %(mob[0], mob[1]))
#redDot.blit((mob[0] + 25), (mob[1] + 25))
vertex_list = batch.add(1, pyglet.gl.GL_POINTS,None,
('v2i', (mob[0]+25, mob[1]+25)),
('c3B', (0, 255, 0)))
for tower in self.dict_objects['towers']:
"""
pyglet.text.Label("T", font_size=30,
x=(tower[0] + 25), y=(tower[1] + 25),
anchor_x='center', anchor_y='center').draw()
"""
#print("Tower x=%d %s y=%d %s"
# %(tower[0], type(tower[0]), tower[1], type(tower[1])))
#blueDot.blit(tower[0], tower[1])
#redDot.blit(350,200)
vertex_list = batch.add(1, pyglet.gl.GL_POINTS,None,
('v2i', (tower[0]+25, tower[1]+25)),
('c3B', (0, 0, 255)))
batch.draw()
|
python
|
# -*- coding:utf-8 -*-
import logging
from flask import request
from flask_restx import Namespace
from app.spider.csdn.csdn import CSDN
from app.spider.toutiao.toutiao_hotnews import ToutiaoNews
from app.api.util.base_resource import BaseResource
from app.api.util.web_response import WebResponse
from app.api.util.web_responsecode import WebResponseCode
log = logging.getLogger(__name__)
nsnews = Namespace('news', description='新闻资讯接口')
@nsnews.route("/toutiao")
class NewsController(BaseResource):
def get(self):
'''
获取头条热点新闻
refresh: 1,0,true,false
last: 最后一次的刷新索引值
:return:
'''
response = WebResponse()
refresh = request.values.get('refresh') in [1, '1', 'true', 'True', True]
last = request.values.get('last') if request.values.get('last') is not None else 0
news = ToutiaoNews().hotnews(refresh, last_max_behot_time=last)
if news:
response.data = {
'result': news.get('data'),
'has_more': news.get('has_more'),
'next': news.get('next')
}
else:
response.code = WebResponseCode.FAILED
return response.tojson()
@nsnews.route("/csdn")
class CSDNController(BaseResource):
def get(self):
'''
获取csdn热点科技资讯
:return:
'''
response = WebResponse()
last = request.values.get('last') if request.values.get('last') is not None else ''
news = CSDN().getHotNews(last)
if news:
response.data = news
else:
response.code = WebResponseCode.FAILED
return response.tojson()
|
python
|
"""Singleton Class"""
# standard library
import threading
class Singleton(type):
"""A singleton Metaclass"""
_instances = {}
_lock = threading.Lock()
def __call__(cls, *args, **kwargs):
"""Evoke call method."""
with cls._lock:
if cls not in cls._instances:
cls._instances[cls] = super().__call__(*args, **kwargs)
return cls._instances[cls]
|
python
|
def print_two(*args): # :TODO *args are for funcs and argv for inputs
arg1, arg2 = args
print('arg1: %r, arg2: %r' % (arg1, arg2))
def print_two_again(arg1, arg2):
print('arg1: %r, arg2: %r' % (arg1, arg2))
print_two("zdr", "zdr")
print_two_again("zdr", "zdr")
|
python
|
import os
import sys
import time
import requests
from py2neo import Graph, Node, Relationship
graph = Graph()
graph.run("CREATE CONSTRAINT ON (u:User) ASSERT u.username IS UNIQUE")
graph.run("CREATE CONSTRAINT ON (t:Tweet) ASSERT t.id IS UNIQUE")
graph.run("CREATE CONSTRAINT ON (h:Hashtag) ASSERT h.name IS UNIQUE")
TWITTER_BEARER = os.environ["TWITTER_BEARER"]
headers = dict(accept="application/json", Authorization="Bearer " + TWITTER_BEARER)
payload = dict(
count=100,
result_type="recent",
lang="en",
q=sys.argv[1]
)
base_url = "https://api.twitter.com/1.1/search/tweets.json?"
def find_tweets(since_id):
payload["since_id"] = since_id
url = base_url + "q={q}&count={count}&result_type={result_type}&lang={lang}&since_id={since_id}".format(**payload)
r = requests.get(url, headers=headers)
tweets = r.json()["statuses"]
return tweets
def upload_tweets(tweets):
for t in tweets:
u = t["user"]
e = t["entities"]
tweet = Node("Tweet", id=t["id"])
graph.merge(tweet)
tweet["text"] = t["text"]
tweet.push()
user = Node("User", username=u["screen_name"])
graph.merge(user)
graph.merge(Relationship(user, "POSTS", tweet))
for h in e.get("hashtags", []):
hashtag = Node("Hashtag", name=h["text"].lower())
graph.merge(hashtag)
graph.merge(Relationship(hashtag, "TAGS", tweet))
for m in e.get('user_mentions', []):
mention = Node("User", username=m["screen_name"])
graph.merge(mention)
graph.merge(Relationship(tweet, "MENTIONS", mention))
reply = t.get("in_reply_to_status_id")
if reply:
reply_tweet = Node("Tweet", id=reply)
graph.merge(reply_tweet)
graph.merge(Relationship(tweet, "REPLY_TO", reply_tweet))
ret = t.get("retweeted_status", {}).get("id")
if ret:
retweet = Node("Tweet", id=ret)
graph.merge(retweet)
graph.merge(Relationship(tweet, "RETWEETS", retweet))
since_id = -1
while True:
try:
tweets = find_tweets(since_id=since_id)
if not tweets:
print("No tweets found.")
time.sleep(60)
continue
since_id = tweets[0].get("id")
upload_tweets(tweets)
print("{} tweets uploaded!".format(len(tweets)))
time.sleep(60)
except Exception as e:
print(e)
time.sleep(60)
continue
|
python
|
import os
import shutil
def create_analysis_folder(folder_name):
if not os.path.exists(folder_name):
os.makedirs(folder_name)
shutil.copy('ffield', folder_name)
shutil.copy('parameters', folder_name)
|
python
|
from collections import defaultdict
import boto3
import click
from halo import Halo
from termcolor import colored, cprint
from ..app import app
from ..utils import formatted_time_ago
def task_id(task_detail: dict) -> str:
tags = {t["key"]: t["value"] for t in task_detail["tags"]}
try:
return tags["paaws:buildNumber"]
except KeyError:
return task_detail["taskArn"].split("/")[-1]
@click.command()
def ps():
"""Show running containers"""
ecs = boto3.client("ecs")
with Halo(text="fetching container information", spinner="dots"):
tasks = app.get_tasks()
tasks_by_group = defaultdict(list)
task_definitions = {}
for t in tasks:
tasks_by_group[t["group"]].append(t)
if t["taskDefinitionArn"] not in task_definitions:
task_definitions[t["taskDefinitionArn"]] = ecs.describe_task_definition(
taskDefinition=t["taskDefinitionArn"]
)["taskDefinition"]
for group in sorted(tasks_by_group.keys()):
tasks = tasks_by_group[group]
defn = task_definitions[tasks[0]["taskDefinitionArn"]]
print(colored("===", attrs=["dark"]), colored(group, "green"))
for t in tasks:
task_line = [
task_id(t),
" ",
colored("(", "white"),
colored(
"cpu:{cpu} mem:{memory}".format(
cpu=int(t["cpu"]) / 1024, memory=t["memory"]
),
"blue",
attrs=["dark", "bold"],
),
colored(")", "white"),
": ",
t["lastStatus"].lower(),
" ",
]
if "startedAt" in t:
task_line.append(formatted_time_ago(t["startedAt"]))
print("".join(task_line))
for c in t["containers"]:
try:
command = [
o["command"]
for o in t["overrides"]["containerOverrides"]
if o["name"] == c["name"]
][0]
except (KeyError, IndexError):
command = [
cd.get("command", ["[container default cmd]"])
for cd in defn["containerDefinitions"]
if cd["name"] == c["name"]
][0]
print_name = f" {c['name']}:"
indent = len(print_name) + 1
print(print_name, colored(" ".join(command), "white"))
container_line2 = [
" " * indent,
"{image} {status}".format(
image=c["image"].split("/")[-1], status=c["lastStatus"].lower()
),
]
cprint("".join(container_line2), attrs=["dark"])
print("")
|
python
|
from setuptools import setup
with open("README.md", "r", encoding="utf-8") as f:
long_description = f.read()
setup(
name="sku",
version="0.2",
description="scikit-learn Utilities",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/mythologic/sku",
author="Max Humber",
author_email="[email protected]",
license="MIT",
packages=["sku"],
python_requires=">=3.6",
setup_requires=["setuptools>=38.6.0"],
)
|
python
|
# sim_core/views.py
#################
#### imports ####
#################
from app import app
from flask import render_template, Blueprint
from logger import logger
from flask import current_app
################
#### config ####
################
sim_core_blueprint = Blueprint('sim_core', __name__, static_folder='../shared/static/dist', template_folder='./static/dist', url_prefix='/sim', static_url_path="")
################
#### routes ####
################
@sim_core_blueprint.route('/')
def index():
return render_template('sim.html')
|
python
|
from unittest import TestCase
from demands.pagination import PaginatedResults, PaginationType
class PaginationTestsMixin(object):
args = (1, 2, 3)
kwargs = {'one': 1, 'two': 2}
def get(self, start, end, *args, **kwargs):
self.assertEqual(args, self.args)
self.assertEqual(kwargs, self.kwargs)
return self.responses[start:end]
def test_iterate_one_undersized_page(self):
self.responses = list(range(5))
r = list(self.psc)
self.assertEqual(r, self.responses)
def test_iterate_multiple_full_pages(self):
self.responses = list(range(20))
r = list(self.psc)
self.assertEqual(r, self.responses)
def test_iterate_multiple_pages(self):
self.responses = list(range(25))
r = list(self.psc)
self.assertEqual(r, self.responses)
class PagePaginationTest(TestCase, PaginationTestsMixin):
def get(self, *args, **kwargs):
page = kwargs.pop('page')
page_size = kwargs.pop('page_size')
start = (page - 1) * page_size
end = start + page_size
return super(PagePaginationTest, self).get(start, end, *args, **kwargs)
def setUp(self):
self.psc = PaginatedResults(
self.get, args=self.args, kwargs=self.kwargs, page_size=10,
results_key=None)
class PagePaginationTestWithNestedResults(PagePaginationTest):
def get(self, *args, **kwargs):
results = super(PagePaginationTestWithNestedResults, self).get(
*args, **kwargs)
return {'results': results}
def setUp(self):
self.psc = PaginatedResults(
self.get, args=self.args, kwargs=self.kwargs, page_size=10)
class ItemPaginationTest(TestCase, PaginationTestsMixin):
def get(self, *args, **kwargs):
start = kwargs.pop('offset')
end = start + kwargs.pop('limit')
return super(ItemPaginationTest, self).get(start, end, *args, **kwargs)
def setUp(self):
self.psc = PaginatedResults(
self.get, args=self.args, kwargs=self.kwargs, page_size=10,
page_param='offset', page_size_param='limit',
pagination_type=PaginationType.ITEM, results_key=None)
class ItemPaginationTestWithNestedResults(ItemPaginationTest):
def get(self, *args, **kwargs):
results = super(ItemPaginationTestWithNestedResults, self).get(
*args, **kwargs)
return {'results': results}
def setUp(self):
self.psc = PaginatedResults(
self.get, args=self.args, kwargs=self.kwargs, page_size=10,
page_param='offset', page_size_param='limit',
pagination_type=PaginationType.ITEM)
class ItemPaginationTestWithNestedResultsAndNextLink(TestCase):
def setUp(self):
self.psc = PaginatedResults(
self.get, page_size=10,
page_param='offset', page_size_param='limit',
pagination_type=PaginationType.ITEM, next_key='next_page')
def get(self, *args, **kwargs):
# Emulate 5 full pages (offset 0-4), then emulate error.
offset = kwargs['offset']
if offset > 4 * 10:
raise ValueError('No Data')
next = 'next_url' if offset < 4 * 10 else None
return {'results': list(
range(offset, offset + kwargs['limit'])), 'next_page': next}
def test_iteration_stops_on_empty_next(self):
self.assertEqual(list(self.psc), list(range(0, 50)))
|
python
|
N, arr = int(input()), input().split()
print(all([int(i) > 0 for i in arr]) and any([i == i[::-1] for i in arr]))
|
python
|
import tkinter as tk
import tkinter.filedialog as fd
import src.helper.gui as hg
from src.image.extractor import Extractor
from src.helper.file import File
class ImageExtractForm(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.controller = controller
self.initialize()
hg.insert_header(self, 'Steganografi Extract Image')
self.render_file_frame()
self.render_key_frame()
self.render_output_frame()
self.render_execute_frame()
def initialize(self):
self.TITLE_ROW = 0
self.FILE_ROW = 1
self.KEY_ROW = 2
self.OUTPUT_ROW = 3
self.EXECUTE_ROW = 4
self.DEFAULT_OUT_FILENAME = 'extract_result'
self.image_dir = tk.StringVar()
self.image_dir.set('')
self.output_filename = tk.StringVar()
self.output_filename.set(self.DEFAULT_OUT_FILENAME)
def render_file_frame(self):
file_frame = hg.create_frame(self, self.FILE_ROW + 1)
hg.create_label(file_frame, 'Image', 0, 0)
hg.create_label(file_frame, self.image_dir, 0, 1, fix_text=False)
hg.create_button(file_frame, 'Choose',
lambda: self.load_image_file(), 1, 0)
def render_key_frame(self):
key_frame = hg.create_frame(self, self.KEY_ROW + 1)
hg.create_label(key_frame, 'Stegano Key:', 0, 0)
self.key_entry = hg.create_entry(key_frame, "", 1, 0)
def render_output_frame(self):
output_frame = hg.create_frame(self, self.OUTPUT_ROW + 1)
hg.create_label(output_frame, 'Output file\'s name:', 0, 0)
self.output_name = hg.create_entry(
output_frame, self.DEFAULT_OUT_FILENAME, 1, 0)
def render_execute_frame(self):
execute_frame = hg.create_frame(self, self.EXECUTE_ROW + 1)
hg.create_button(execute_frame, 'Execute',
lambda: self.execute(), 0, 0)
hg.create_button(execute_frame, 'Back',
lambda: self.controller.show_frame("StartPage"), 0, 1)
def load_image_file(self):
dialog = fd.askopenfilename(
filetypes=(("Image File", ('.bmp', '.png')),)
)
self.image_dir.set(dialog)
def execute(self):
print('Extract Started!')
print('> Image dir:', self.image_dir.get())
print('> Key:', self.key_entry.get())
file_dir = self.image_dir.get()
key = self.key_entry.get()
output_filename = self.output_name.get()
if file_dir == '' or key == '' or output_filename == '':
return
extract = Extractor(file_dir, key)
extract.extract_messages()
extract.parse_message()
file_name = "output/" + output_filename + "." + extract.extension
output_file = File(file_name)
byte = extract.write_secret_message()
output_file.write_files(byte)
print('Extraction Finished!')
title = "Finish Extract Secret Message from Image"
self.controller.show_end_frame(title, "None", file_name, 0)
|
python
|
import copy
import operator
from functools import cached_property, reduce
from typing import Dict, List, Optional
import torch
import torch.nn as nn
from torch import Tensor
from torch.nn.functional import mse_loss
from torch.optim import Adam
from ai_traineree import DEVICE
from ai_traineree.agents import AgentBase
from ai_traineree.agents.agent_utils import hard_update, soft_update
from ai_traineree.buffers import ReplayBuffer
from ai_traineree.buffers.buffer_factory import BufferFactory
from ai_traineree.loggers import DataLogger
from ai_traineree.networks.bodies import ActorBody, CriticBody
from ai_traineree.noise import GaussianNoise
from ai_traineree.types import ActionType, AgentState, BufferState, DoneType, NetworkState, ObsType, RewardType
from ai_traineree.types.dataspace import DataSpace
from ai_traineree.utils import to_numbers_seq, to_tensor
class DDPGAgent(AgentBase):
"""
Deep Deterministic Policy Gradients (DDPG).
Instead of popular Ornstein-Uhlenbeck (OU) process for noise this agent uses Gaussian noise.
This agent is intended for continuous tasks.
"""
model = "DDPG"
def __init__(
self, obs_space: DataSpace, action_space: DataSpace, noise_scale: float=0.2, noise_sigma: float=0.1, **kwargs
):
"""
Parameters:
obs_space (DataSpace): Dataspace describing the input.
action_space (DataSpace): Dataspace describing the output.
noise_scale (float): Added noise amplitude. Default: 0.2.
noise_sigma (float): Added noise variance. Default: 0.1.
Keyword parameters:
hidden_layers (tuple of ints): Shape of the hidden layers in fully connected network. Default: (64, 64).
gamma (float): Discount value. Default: 0.99.
tau (float): Soft-copy factor. Default: 0.002.
actor_lr (float): Learning rate for the actor (policy). Default: 0.0003.
critic_lr (float): Learning rate for the critic (value function). Default: 0.0003.
max_grad_norm_actor (float) Maximum norm value for actor gradient. Default: 10.
max_grad_norm_critic (float): Maximum norm value for critic gradient. Default: 10.
batch_size (int): Number of samples used in learning. Default: 64.
buffer_size (int): Maximum number of samples to store. Default: 1e6.
warm_up (int): Number of samples to observe before starting any learning step. Default: 0.
update_freq (int): Number of steps between each learning step. Default 1.
number_updates (int): How many times to use learning step in the learning phase. Default: 1.
"""
super().__init__(**kwargs)
self.device = self._register_param(kwargs, "device", DEVICE)
self.obs_space = obs_space
self.action_space = action_space
self._config['obs_space'] = self.obs_space
self._config['action_space'] = self.action_space
action_shape = action_space.to_feature()
action_size = reduce(operator.mul, action_shape)
# Reason sequence initiation.
hidden_layers = to_numbers_seq(self._register_param(kwargs, 'hidden_layers', (64, 64)))
self.actor = ActorBody(
obs_space.shape, action_shape, hidden_layers=hidden_layers, gate_out=torch.tanh).to(self.device)
self.critic = CriticBody(
obs_space.shape, action_size, hidden_layers=hidden_layers).to(self.device)
self.target_actor = ActorBody(
obs_space.shape, action_shape, hidden_layers=hidden_layers, gate_out=torch.tanh).to(self.device)
self.target_critic = CriticBody(
obs_space.shape, action_size, hidden_layers=hidden_layers).to(self.device)
# Noise sequence initiation
self.noise = GaussianNoise(
shape=action_shape, mu=1e-8, sigma=noise_sigma, scale=noise_scale, device=self.device)
# Target sequence initiation
hard_update(self.target_actor, self.actor)
hard_update(self.target_critic, self.critic)
# Optimization sequence initiation.
self.actor_lr = float(self._register_param(kwargs, 'actor_lr', 3e-4))
self.critic_lr = float(self._register_param(kwargs, 'critic_lr', 3e-4))
self.actor_optimizer = Adam(self.actor.parameters(), lr=self.actor_lr)
self.critic_optimizer = Adam(self.critic.parameters(), lr=self.critic_lr)
self.max_grad_norm_actor = float(self._register_param(kwargs, "max_grad_norm_actor", 10.0))
self.max_grad_norm_critic = float(self._register_param(kwargs, "max_grad_norm_critic", 10.0))
self.gamma = float(self._register_param(kwargs, 'gamma', 0.99))
self.tau = float(self._register_param(kwargs, 'tau', 0.02))
self.batch_size = int(self._register_param(kwargs, 'batch_size', 64))
self.buffer_size = int(self._register_param(kwargs, 'buffer_size', int(1e6)))
self.buffer = ReplayBuffer(self.batch_size, self.buffer_size)
self.warm_up = int(self._register_param(kwargs, 'warm_up', 0))
self.update_freq = int(self._register_param(kwargs, 'update_freq', 1))
self.number_updates = int(self._register_param(kwargs, 'number_updates', 1))
# Breath, my child.
self.reset_agent()
self.iteration = 0
self._loss_actor = 0.
self._loss_critic = 0.
def reset_agent(self) -> None:
self.actor.reset_parameters()
self.critic.reset_parameters()
self.target_actor.reset_parameters()
self.target_critic.reset_parameters()
@property
def loss(self) -> Dict[str, float]:
return {'actor': self._loss_actor, 'critic': self._loss_critic}
@loss.setter
def loss(self, value):
if isinstance(value, dict):
self._loss_actor = value['actor']
self._loss_critic = value['critic']
else:
self._loss_actor = value
self._loss_critic = value
def __eq__(self, o: object) -> bool:
return super().__eq__(o) \
and isinstance(o, type(self)) \
and self._config == o._config \
and self.buffer == o.buffer \
and self.get_network_state() == o.get_network_state()
@cached_property
def action_min(self):
return to_tensor(self.action_space.low)
@cached_property
def action_max(self):
return to_tensor(self.action_space.high)
@torch.no_grad()
def act(self, obs: ObsType, noise: float=0.0) -> List[float]:
"""Acting on the observations. Returns action.
Parameters:
obs (array_like): current state
eps (optional float): epsilon, for epsilon-greedy action selection. Default 0.
Returns:
action: (list float) Action values.
"""
t_obs = to_tensor(obs).float().to(self.device)
action = self.actor(t_obs)
action += noise*self.noise.sample()
action = torch.clamp(action, self.action_min, self.action_max)
return action.cpu().numpy().tolist()
def step(self, obs: ObsType, action: ActionType, reward: RewardType, next_obs: ObsType, done: DoneType) -> None:
self.iteration += 1
self.buffer.add(state=obs, action=action, reward=reward, next_state=next_obs, done=done)
if self.iteration < self.warm_up:
return
if len(self.buffer) > self.batch_size and (self.iteration % self.update_freq) == 0:
for _ in range(self.number_updates):
self.learn(self.buffer.sample())
def compute_value_loss(self, states, actions, next_states, rewards, dones):
next_actions = self.target_actor.act(next_states)
assert next_actions.shape == actions.shape, f"{next_actions.shape} != {actions.shape}"
Q_target_next = self.target_critic.act(next_states, next_actions)
Q_target = rewards + self.gamma * Q_target_next * (1 - dones)
Q_expected = self.critic(states, actions)
assert Q_expected.shape == Q_target.shape == Q_target_next.shape
return mse_loss(Q_expected, Q_target)
def compute_policy_loss(self, states) -> Tensor:
"""Compute Policy loss based on provided states.
Loss = Mean(-Q(s, _a) ),
where _a is actor's estimate based on state, _a = Actor(s).
"""
pred_actions = self.actor(states)
return -self.critic(states, pred_actions).mean()
def learn(self, experiences) -> None:
"""Update critics and actors"""
rewards = to_tensor(experiences['reward']).float().to(self.device).unsqueeze(1)
dones = to_tensor(experiences['done']).type(torch.int).to(self.device).unsqueeze(1)
states = to_tensor(experiences['state']).float().to(self.device)
actions = to_tensor(experiences['action']).float().to(self.device).view((-1,) + self.action_space.shape)
next_states = to_tensor(experiences['next_state']).float().to(self.device)
assert rewards.shape == dones.shape == (self.batch_size, 1), f"R.shape={rewards.shape}, D.shap={dones.shape}"
assert states.shape == next_states.shape == (self.batch_size,) + self.obs_space.shape, f"states.shape: {states.shape}"
assert actions.shape == (self.batch_size,) + self.action_space.shape, f"actions.shape: {actions.shape}" # type: ignore
# Value (critic) optimization
loss_critic = self.compute_value_loss(states, actions, next_states, rewards, dones)
self.critic_optimizer.zero_grad()
loss_critic.backward()
nn.utils.clip_grad_norm_(self.critic.parameters(), self.max_grad_norm_critic)
self.critic_optimizer.step()
self._loss_critic = float(loss_critic.item())
# Policy (actor) optimization
loss_actor = self.compute_policy_loss(states)
self.actor_optimizer.zero_grad()
loss_actor.backward()
nn.utils.clip_grad_norm_(self.actor.parameters(), self.max_grad_norm_actor)
self.actor_optimizer.step()
self._loss_actor = loss_actor.item()
# Soft update target weights
soft_update(self.target_actor, self.actor, self.tau)
soft_update(self.target_critic, self.critic, self.tau)
def state_dict(self) -> Dict[str, dict]:
"""Describes agent's networks.
Returns:
state: (dict) Provides actors and critics states.
"""
return {
"actor": self.actor.state_dict(),
"target_actor": self.target_actor.state_dict(),
"critic": self.critic.state_dict(),
"target_critic": self.target_critic.state_dict()
}
def log_metrics(self, data_logger: DataLogger, step: int, full_log: bool=False):
data_logger.log_value("loss/actor", self._loss_actor, step)
data_logger.log_value("loss/critic", self._loss_critic, step)
if full_log:
for idx, layer in enumerate(self.actor.layers):
if hasattr(layer, "weight"):
data_logger.create_histogram(f"actor/layer_weights_{idx}", layer.weight, step)
if hasattr(layer, "bias") and layer.bias is not None:
data_logger.create_histogram(f"actor/layer_bias_{idx}", layer.bias, step)
for idx, layer in enumerate(self.critic.layers):
if hasattr(layer, "weight"):
data_logger.create_histogram(f"critic/layer_weights_{idx}", layer.weight, step)
if hasattr(layer, "bias") and layer.bias is not None:
data_logger.create_histogram(f"critic/layer_bias_{idx}", layer.bias, step)
def get_state(self) -> AgentState:
return AgentState(
model=self.model,
obs_space=self.obs_space,
action_space=self.action_space,
config=self._config,
buffer=copy.deepcopy(self.buffer.get_state()),
network=copy.deepcopy(self.get_network_state()),
)
def get_network_state(self) -> NetworkState:
net = dict(
actor=self.actor.state_dict(),
target_actor=self.target_actor.state_dict(),
critic=self.critic.state_dict(),
target_critic=self.target_critic.state_dict(),
)
return NetworkState(net=net)
@staticmethod
def from_state(state: AgentState) -> AgentBase:
config = copy.copy(state.config)
config.update({'obs_space': state.obs_space, 'action_space': state.action_space})
agent = DDPGAgent(**config)
if state.network is not None:
agent.set_network(state.network)
if state.buffer is not None:
agent.set_buffer(state.buffer)
return agent
def set_buffer(self, buffer_state: BufferState) -> None:
self.buffer = BufferFactory.from_state(buffer_state)
def set_network(self, network_state: NetworkState) -> None:
self.actor.load_state_dict(copy.deepcopy(network_state.net['actor']))
self.target_actor.load_state_dict(network_state.net['target_actor'])
self.critic.load_state_dict(network_state.net['critic'])
self.target_critic.load_state_dict(network_state.net['target_critic'])
def save_state(self, path: str) -> None:
agent_state = self.get_state()
torch.save(agent_state, path)
def load_state(self, *, path: Optional[str]=None, agent_state: Optional[dict]=None):
if path is None and agent_state:
raise ValueError("Either `path` or `agent_state` must be provided to load agent's state.")
if path is not None and agent_state is None:
agent_state = torch.load(path)
self._config = agent_state.get('config', {})
self.__dict__.update(**self._config)
self.actor.load_state_dict(agent_state['actor'])
self.critic.load_state_dict(agent_state['critic'])
self.target_actor.load_state_dict(agent_state['target_actor'])
self.target_critic.load_state_dict(agent_state['target_critic'])
|
python
|
# -*- coding: utf-8 -*-
"""
flask_jsonschema
~~~~~~~~~~~~~~~~
flask_jsonschema
"""
import os
from functools import wraps
try:
import simplejson as json
except ImportError:
import json
from flask import current_app, request
from jsonschema import ValidationError, validate
class _JsonSchema(object):
def __init__(self, schemas):
self._schemas = schemas
def get_schema(self, path):
rv = self._schemas[path[0]]
for p in path[1:]:
rv = rv[p]
return rv
class JsonSchema(object):
def __init__(self, app=None):
self.app = app
if app is not None:
self._state = self.init_app(app)
def init_app(self, app):
default_dir = os.path.join(app.root_path, 'jsonschema')
schema_dir = app.config.get('JSONSCHEMA_DIR', default_dir)
schemas = {}
for fn in os.listdir(schema_dir):
key = fn.split('.')[0]
fn = os.path.join(schema_dir, fn)
if os.path.isdir(fn) or not fn.endswith('.json'):
continue
with open(fn) as f:
schemas[key] = json.load(f)
state = _JsonSchema(schemas)
app.extensions['jsonschema'] = state
return state
def validate(self, *path):
def wrapper(fn):
@wraps(fn)
def decorated(*args, **kwargs):
schema = current_app.extensions['jsonschema'].get_schema(path)
validate(request.json, schema)
return fn(*args, **kwargs)
return decorated
return wrapper
def __getattr__(self, name):
return getattr(self._state, name, None)
|
python
|
from tortoise.contrib.pydantic import pydantic_model_creator
from typing import Optional
from pydantic import BaseModel
from db.models import Meals
MealsInSchema = pydantic_model_creator(
Meals, name="MealIn", exclude_readonly=True
)
MealsOutSchema = pydantic_model_creator(
Meals, name="MealOut", exclude=["created_on"]
)
MealsDatabaseSchema = pydantic_model_creator(
Meals, name="Meal", exclude=["created_on"]
)
class UpdateMeal(BaseModel):
name: Optional[str]
|
python
|
"""
Host Guest Complex
==================
"""
from __future__ import annotations
import typing
from collections import abc
from ...molecules import BuildingBlock
from ...reactions import GenericReactionFactory
from ..topology_graph import (
ConstructionState,
NullOptimizer,
Optimizer,
TopologyGraph,
Vertex,
)
from .vertices import GuestVertex, HostVertex
class Guest:
"""
Holds the data defining the placement of a guest molecule.
"""
def __init__(
self,
building_block: BuildingBlock,
start_vector: tuple[float, float, float] = (1., 0., 0.),
end_vector: tuple[float, float, float] = (1., 0., 0.),
displacement: tuple[float, float, float] = (1., 0., 0.),
) -> None:
"""
Initialize a :class:`.Guest` instance.
Parameters:
building_block: The guest molecule.
start_vector: A direction vector which gets aligned with
`end_vector`.
end_vector: A direction vector which determines the
rotation applied to the `building_block`. A rotation
such that `start_vector` is transformed into
`end_vector` is applied.
displacement: The translational offset of the guest.
"""
self._building_block = building_block
self._start_vector = start_vector
self._end_vector = end_vector
self._displacement = displacement
def get_building_block(self) -> BuildingBlock:
"""
Return the building block.
Returns:
The building block.
"""
return self._building_block
def get_start_vector(self) -> tuple[float, float, float]:
"""
Return the start vector.
Returns:
The start vector.
"""
return self._start_vector
def get_end_vector(self) -> tuple[float, float, float]:
"""
Return the end vector.
Returns:
The end vector.
"""
return self._end_vector
def get_displacement(self) -> tuple[float, float, float]:
"""
Return the displacement.
Returns:
The displacement.
"""
return self._displacement
def __repr__(self) -> str:
return (
f'{self.__class__.__name__}('
f'{self._building_block!r}, '
f'start_vector={self._start_vector!r}, '
f'end_vector={self._end_vector!r}, '
f'displacement={self._displacement!r})'
)
class Complex(TopologyGraph):
"""
Represents a host-guest complex topology graph.
Host and guest building blocks do not require functional groups.
Examples:
*Construction*
You can use :class:`.ConstructedMolecule` instances as the
host, but you should turn them into a :class:`.BuildingBlock`
first
.. testcode:: construction
import stk
host = stk.ConstructedMolecule(
topology_graph=stk.cage.FourPlusSix(
building_blocks=(
stk.BuildingBlock(
smiles='NC1CCCCC1N',
functional_groups=[
stk.PrimaryAminoFactory(),
],
),
stk.BuildingBlock(
smiles='O=Cc1cc(C=O)cc(C=O)c1',
functional_groups=[stk.AldehydeFactory()],
),
),
optimizer=stk.MCHammer(),
),
)
complex = stk.ConstructedMolecule(
topology_graph=stk.host_guest.Complex(
host=stk.BuildingBlock.init_from_molecule(host),
guests=stk.host_guest.Guest(
building_block=stk.BuildingBlock('[Br][Br]'),
),
),
)
.. moldoc::
import moldoc.molecule as molecule
import stk
host = stk.ConstructedMolecule(
topology_graph=stk.cage.FourPlusSix(
building_blocks=(
stk.BuildingBlock(
smiles='NC1CCCCC1N',
functional_groups=[
stk.PrimaryAminoFactory(),
],
),
stk.BuildingBlock(
smiles='O=Cc1cc(C=O)cc(C=O)c1',
functional_groups=[stk.AldehydeFactory()],
),
),
optimizer=stk.MCHammer(),
),
)
complex = stk.ConstructedMolecule(
topology_graph=stk.host_guest.Complex(
host=stk.BuildingBlock.init_from_molecule(host),
guests=stk.host_guest.Guest(
building_block=stk.BuildingBlock('[Br][Br]'),
),
),
)
moldoc_display_molecule = molecule.Molecule(
atoms=(
molecule.Atom(
atomic_number=atom.get_atomic_number(),
position=position,
) for atom, position in zip(
complex.get_atoms(),
complex.get_position_matrix(),
)
),
bonds=(
molecule.Bond(
atom1_id=bond.get_atom1().get_id(),
atom2_id=bond.get_atom2().get_id(),
order=bond.get_order(),
) for bond in complex.get_bonds()
),
)
You can also generate complexes with multiple guests.
.. testcode:: multi-guest-construction
import stk
host = stk.ConstructedMolecule(
topology_graph=stk.cage.FourPlusSix(
building_blocks=(
stk.BuildingBlock(
smiles='NC1CCCCC1N',
functional_groups=[
stk.PrimaryAminoFactory(),
],
),
stk.BuildingBlock(
smiles='O=Cc1cc(C=O)cc(C=O)c1',
functional_groups=[stk.AldehydeFactory()],
),
),
optimizer=stk.MCHammer(),
),
)
guest1 = stk.host_guest.Guest(
building_block=stk.BuildingBlock('BrBr'),
displacement=(0., 3., 0.),
)
guest2 = stk.host_guest.Guest(
building_block=stk.BuildingBlock('C1CCCC1'),
)
complex = stk.ConstructedMolecule(
topology_graph=stk.host_guest.Complex(
host=stk.BuildingBlock.init_from_molecule(host),
guests=(guest1, guest2),
),
)
.. moldoc::
import moldoc.molecule as molecule
import stk
host = stk.ConstructedMolecule(
topology_graph=stk.cage.FourPlusSix(
building_blocks=(
stk.BuildingBlock(
smiles='NC1CCCCC1N',
functional_groups=[
stk.PrimaryAminoFactory(),
],
),
stk.BuildingBlock(
smiles='O=Cc1cc(C=O)cc(C=O)c1',
functional_groups=[stk.AldehydeFactory()],
),
),
optimizer=stk.MCHammer(),
),
)
guest1 = stk.host_guest.Guest(
building_block=stk.BuildingBlock('BrBr'),
displacement=(0., 3., 0.),
)
guest2 = stk.host_guest.Guest(
building_block=stk.BuildingBlock('C1CCCC1'),
)
complex = stk.ConstructedMolecule(
topology_graph=stk.host_guest.Complex(
host=stk.BuildingBlock.init_from_molecule(host),
guests=(guest1, guest2),
),
)
moldoc_display_molecule = molecule.Molecule(
atoms=(
molecule.Atom(
atomic_number=atom.get_atomic_number(),
position=position,
) for atom, position in zip(
complex.get_atoms(),
complex.get_position_matrix(),
)
),
bonds=(
molecule.Bond(
atom1_id=bond.get_atom1().get_id(),
atom2_id=bond.get_atom2().get_id(),
order=bond.get_order(),
) for bond in complex.get_bonds()
),
)
*Suggested Optimization*
For :class:`.Complex` topologies, it is recommended to use the
:class:`.Spinner` optimizer. It is also recommended that the
building blocks are already optimized prior to construction.
This optimizer will work on multi-guest systems.
.. testcode:: suggested-optimization
import stk
host = stk.ConstructedMolecule(
topology_graph=stk.cage.FourPlusSix(
building_blocks=(
stk.BuildingBlock(
smiles='NC1CCCCC1N',
functional_groups=[
stk.PrimaryAminoFactory(),
],
),
stk.BuildingBlock(
smiles='O=Cc1cc(C=O)cc(C=O)c1',
functional_groups=[stk.AldehydeFactory()],
),
),
optimizer=stk.MCHammer(),
),
)
guest1 = stk.host_guest.Guest(
building_block=stk.BuildingBlock('BrBr'),
displacement=(0., 3., 0.),
)
guest2 = stk.host_guest.Guest(
building_block=stk.BuildingBlock('C1CCCC1'),
)
complex = stk.ConstructedMolecule(
topology_graph=stk.host_guest.Complex(
host=stk.BuildingBlock.init_from_molecule(host),
guests=(guest1, guest2),
optimizer=stk.Spinner(),
),
)
.. moldoc::
import moldoc.molecule as molecule
import stk
host = stk.ConstructedMolecule(
topology_graph=stk.cage.FourPlusSix(
building_blocks=(
stk.BuildingBlock(
smiles='NC1CCCCC1N',
functional_groups=[
stk.PrimaryAminoFactory(),
],
),
stk.BuildingBlock(
smiles='O=Cc1cc(C=O)cc(C=O)c1',
functional_groups=[stk.AldehydeFactory()],
),
),
optimizer=stk.MCHammer(),
),
)
guest1 = stk.host_guest.Guest(
building_block=stk.BuildingBlock('BrBr'),
displacement=(0., 3., 0.),
)
guest2 = stk.host_guest.Guest(
building_block=stk.BuildingBlock('C1CCCC1'),
)
complex = stk.ConstructedMolecule(
topology_graph=stk.host_guest.Complex(
host=stk.BuildingBlock.init_from_molecule(host),
guests=(guest1, guest2),
optimizer=stk.Spinner(),
),
)
moldoc_display_molecule = molecule.Molecule(
atoms=(
molecule.Atom(
atomic_number=atom.get_atomic_number(),
position=position,
) for atom, position in zip(
complex.get_atoms(),
complex.get_position_matrix(),
)
),
bonds=(
molecule.Bond(
atom1_id=bond.get_atom1().get_id(),
atom2_id=bond.get_atom2().get_id(),
order=bond.get_order(),
) for bond in complex.get_bonds()
),
)
*Changing the Position of the Guest*
You can change the position and orientation of the guest, as
well as its displacement
.. testcode:: changing-the-position-of-the-guest
import stk
host = stk.ConstructedMolecule(
topology_graph=stk.cage.FourPlusSix(
building_blocks=(
stk.BuildingBlock(
smiles='BrCCBr',
functional_groups=[stk.BromoFactory()],
),
stk.BuildingBlock(
smiles='BrCC(Br)CBr',
functional_groups=[stk.BromoFactory()],
),
),
),
)
guest_building_block = stk.BuildingBlock('[Br][Br]')
guest = stk.host_guest.Guest(
building_block=guest_building_block,
# Apply a rotation onto the guest molecule such that
# the vector returned by get_direction() has the same
# direction as [1, 1, 1].
start_vector=guest_building_block.get_direction(),
end_vector=[1, 1, 1],
# Change the displacement of the guest.
displacement=[5.3, 2.1, 7.1],
)
complex = stk.ConstructedMolecule(
topology_graph=stk.host_guest.Complex(
host=stk.BuildingBlock.init_from_molecule(host),
guests=guest,
),
)
"""
def __init__(
self,
host: BuildingBlock,
guests: typing.Union[Guest, typing.Iterable[Guest]],
num_processes: int = 1,
optimizer: Optimizer = NullOptimizer(),
) -> None:
"""
Initialize an instance of :class:`.Complex`.
Parameters:
host: The host molecule.
guests: The guest molecules. Can be a single
:class:`.Guest` instance if only one guest is being
used.
num_processes: The number of parallel processes to create
during :meth:`construct`.
optimizer: Used to optimize the structure of the
constructed molecule.
"""
building_block_vertices = self._get_vertices_from_guests(
host=host,
guests=guests,
)
super().__init__(
building_block_vertices=building_block_vertices,
edges=(),
reaction_factory=GenericReactionFactory(),
construction_stages=(),
num_processes=num_processes,
optimizer=optimizer,
edge_groups=(),
)
def _get_vertices_from_guests(
self,
host: BuildingBlock,
guests: typing.Union[Guest, typing.Iterable[Guest]],
) -> dict[BuildingBlock, abc.Sequence[Vertex]]:
if isinstance(guests, Guest):
guests = (guests, )
building_block_vertices: dict[
BuildingBlock, abc.Sequence[Vertex]
]
building_block_vertices = {
host: (HostVertex(0, (0., 0., 0.)), )
}
guest_vertices = {
guest.get_building_block(): (GuestVertex(
id=i+1,
position=guest.get_displacement(),
start=guest.get_start_vector(),
target=guest.get_end_vector(),
), )
for i, guest in enumerate(guests)
}
building_block_vertices.update(guest_vertices)
return building_block_vertices
def clone(self) -> Complex:
return self._clone()
def _run_reactions(
self,
state: ConstructionState,
) -> ConstructionState:
return state
def _get_scale(
self,
building_block_vertices: dict[
BuildingBlock, abc.Sequence[Vertex]
],
) -> float:
return 1.
def __repr__(self) -> str:
return 'host_guest.Complex()'
|
python
|
import bs4
import json
import requests
import time
from utils import (get_content, get_soup, save_json, load_json)
MANGA_SEARCH_URL = 'https://myanimelist.net/manga.php?type=1&q='
# load series information
all_series = load_json("data.json")
for series in all_series:
# search on MyAnimeList
query_soup = get_soup(get_content(MANGA_SEARCH_URL + series['name']))
time.sleep(15) # rate limiting
table_row_tag = query_soup.find('div', class_='js-categories-seasonal').tr.next_sibling
link_tag = table_row_tag.find('a', class_='hoverinfo_trigger fw-b')
# series name in english
name_en = link_tag.strong.text
print(f'{series["name"]} | {name_en}')
# parse series page
info_url = link_tag['href']
info_soup = get_soup(get_content(info_url))
time.sleep(15) # rate limiting
container = info_soup.find('div', class_='js-scrollfix-bottom')
# author
author_tags = container.find('span', string='Authors:').parent.find_all('a')
author = ''
for tag in author_tags:
author_name = tag['href'].rsplit('/', 1)[1].replace('_', ' ')
author_work = tag.next_sibling # story, art or both
author += author_name + author_work
# update series information
series['name'] = name_en
series['author'] = author
# save updated series information
save_json("data.json", all_series)
|
python
|
from random import randint
from game_map.direction import Direction
from game_map.rect import Rect
class Room(Rect):
"""
A Room is just a Rect that can tell you where its walls are
"""
def __init__(self, x, y, width, height):
super(Room, self).__init__(x, y, width, height)
def get_wall(self, direction):
"""
Find the first wall in a given direction
:param Direction direction: direction to look
:return int, int, int, int: x1,y1, x2,y2 defining the wall
"""
if direction == Direction.UP:
return self.x1, self.y1 - 1, self.x2, self.y1 - 1
elif direction == Direction.RIGHT:
return self.x2 + 1, self.y1, self.x2 + 1, self.y2
elif direction == Direction.DOWN:
return self.x1, self.y2 + 1, self.x2, self.y2 + 1
elif direction == Direction.LEFT:
return self.x1 - 1, self.y1, self.x1 - 1, self.y2
def get_wall_point(self, direction=None):
"""
Returns a random point from the wall in the indicated direction
:param Direction direction:
:return int, int: x, y point along wall
"""
if direction is None:
direction = Direction.random_direction()
x1, y1, x2, y2 = self.get_wall(direction)
x = randint(x1, x2)
y = randint(y1, y2)
return x, y
|
python
|
"""
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
# Write your code here
from collections import deque, defaultdict
def pay(adj, ln):
visited = [False] * ln
distances = [0] * ln
max_distance = 0
farthest_vertex = 0
visited[0] = True
queue = deque([0])
while queue:
u = queue.popleft()
for v, w in adj[u]:
if not visited[v]:
visited[v] = True
queue.append(v)
distances[v] = distances[u] + w
if distances[v] >= max_distance:
max_distance = distances[v]
farthest_vertex = v
visited = [False] * ln
distances = [0] * ln
max_distance = 0
visited[farthest_vertex] = True
queue.append(farthest_vertex)
while queue:
u = queue.popleft()
for v, w in adj[u]:
if not visited[v]:
visited[v] = True
queue.append(v)
distances[v] = distances[u] + w
if distances[v] > max_distance:
max_distance = distances[v]
if max_distance > 10000:
cost = 10000
elif max_distance > 1000:
cost = 1000
elif max_distance > 100:
cost = 100
else:
cost = 0
return cost, max_distance
t = int(input())
for _ in range(t):
n = int(input())
adjacency = defaultdict(list)
for _ in range(n - 1):
a, b, weight = map(int, input().strip().split())
a -= 1
b -= 1
adjacency[a].append((b, weight))
adjacency[b].append((a, weight))
print(*pay(adjacency, n))
|
python
|
import unittest
from mock import Mock
from foundations_events.producers.jobs.run_job import RunJob
class TestProducerRunJob(unittest.TestCase):
def setUp(self):
from foundations_internal.foundations_job import FoundationsJob
self.route_name = None
self.message = None
self._foundations_job = FoundationsJob()
self._foundations_job.job_id = 'some_project'
self._router = Mock()
self._router.push_message.side_effect = self._push_message
self._producer = RunJob(self._router, self._foundations_job)
def test_push_message_sends_run_job_message_to_correct_channel(self):
self._producer.push_message()
self.assertEqual('run_job', self.route_name)
def test_push_message_sends_run_job_message_with_job_id(self):
self._foundations_job.job_id = 'my fantastic job'
self._foundations_job.project_name = 'this project'
self._producer.push_message()
self.assertEqual({'job_id': 'my fantastic job',
'project_name': 'this project',
'monitor_name': 'None'}, self.message)
def test_push_message_sends_run_job_message_with_job_id_different_job_different_project(self):
self._foundations_job.job_id = 'neural nets in space!'
self._foundations_job.project_name = 'that project'
self._producer.push_message()
self.assertEqual({'job_id': 'neural nets in space!',
'project_name': 'that project',
'monitor_name': 'None'}, self.message)
def _push_message(self, route_name, message):
self.route_name = route_name
self.message = message
|
python
|
import os
TEST_DIR = os.path.realpath(os.path.dirname(__file__))
|
python
|
# Copyright (c) 2017-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
import errno
from yardstick.tests import STL_MOCKS
from yardstick.common import exceptions as y_exceptions
from yardstick.network_services.vnf_generic.vnf.prox_irq import ProxIrqGen
from yardstick.network_services.vnf_generic.vnf.prox_irq import ProxIrqVNF
from yardstick.benchmark.contexts import base as ctx_base
SSH_HELPER = 'yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper'
STLClient = mock.MagicMock()
stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
stl_patch.start()
if stl_patch:
from yardstick.network_services.vnf_generic.vnf import prox_vnf
from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh
VNF_NAME = "vnf__1"
class TestProxIrqVNF(unittest.TestCase):
SCENARIO_CFG = {
'task_path': "",
'nodes': {
'tg__1': 'trafficgen_1.yardstick',
'vnf__1': 'vnf.yardstick'},
'runner': {
'duration': 600, 'type': 'Duration'},
'topology': 'prox-tg-topology-2.yaml',
'traffic_profile': '../../traffic_profiles/prox_binsearch.yaml',
'type': 'NSPerf',
'options': {
'tg__1': {'prox_args': {'-e': '',
'-t': ''},
'prox_config': 'configs/l3-gen-2.cfg',
'prox_path':
'/root/dppd-PROX-v035/build/prox'},
'vnf__1': {
'prox_args': {'-t': ''},
'prox_config': 'configs/l3-swap-2.cfg',
'prox_path': '/root/dppd-PROX-v035/build/prox'}}}
VNFD_0 = {
'short-name': 'VpeVnf',
'vdu': [
{
'routing_table': [
{
'network': '152.16.100.20',
'netmask': '255.255.255.0',
'gateway': '152.16.100.20',
'if': 'xe0'
},
{
'network': '152.16.40.20',
'netmask': '255.255.255.0',
'gateway': '152.16.40.20',
'if': 'xe1'
},
],
'description': 'VPE approximation using DPDK',
'name': 'vpevnf-baremetal',
'nd_route_tbl': [
{
'network': '0064:ff9b:0:0:0:0:9810:6414',
'netmask': '112',
'gateway': '0064:ff9b:0:0:0:0:9810:6414',
'if': 'xe0'
},
{
'network': '0064:ff9b:0:0:0:0:9810:2814',
'netmask': '112',
'gateway': '0064:ff9b:0:0:0:0:9810:2814',
'if': 'xe1'
},
],
'id': 'vpevnf-baremetal',
'external-interface': [
{
'virtual-interface': {
'dst_mac': '00:00:00:00:00:03',
'vpci': '0000:05:00.0',
'local_ip': '152.16.100.19',
'type': 'PCI-PASSTHROUGH',
'netmask': '255.255.255.0',
'dpdk_port_num': 0,
'bandwidth': '10 Gbps',
'dst_ip': '152.16.100.20',
'local_mac': '00:00:00:00:00:01'
},
'vnfd-connection-point-ref': 'xe0',
'name': 'xe0'
},
{
'virtual-interface': {
'dst_mac': '00:00:00:00:00:04',
'vpci': '0000:05:00.1',
'local_ip': '152.16.40.19',
'type': 'PCI-PASSTHROUGH',
'netmask': '255.255.255.0',
'dpdk_port_num': 1,
'bandwidth': '10 Gbps',
'dst_ip': '152.16.40.20',
'local_mac': '00:00:00:00:00:02'
},
'vnfd-connection-point-ref': 'xe1',
'name': 'xe1'
},
],
},
],
'description': 'Vpe approximation using DPDK',
'mgmt-interface': {
'vdu-id': 'vpevnf-baremetal',
'host': '1.1.1.1',
'password': 'r00t',
'user': 'root',
'ip': '1.1.1.1'
},
'benchmark': {
'kpi': [
'packets_in',
'packets_fwd',
'packets_dropped',
],
},
'connection-point': [
{
'type': 'VPORT',
'name': 'xe0',
},
{
'type': 'VPORT',
'name': 'xe1',
},
],
'id': 'VpeApproxVnf', 'name': 'VPEVnfSsh'
}
VNFD = {
'vnfd:vnfd-catalog': {
'vnfd': [
VNFD_0,
]
}
}
TRAFFIC_PROFILE = {
"schema": "isb:traffic_profile:0.1",
"name": "fixed",
"description": "Fixed traffic profile to run UDP traffic",
"traffic_profile": {
"traffic_type": "FixedTraffic",
"frame_rate": 100, # pps
"flow_number": 10,
"frame_size": 64,
},
}
CONTEXT_CFG = {
'nodes': {
'tg__2': {
'member-vnf-index': '3',
'role': 'TrafficGen',
'name': 'trafficgen_2.yardstick',
'vnfd-id-ref': 'tg__2',
'ip': '1.2.1.1',
'interfaces': {
'xe0': {
'local_iface_name': 'ens513f0',
'vld_id': prox_vnf.ProxApproxVnf.DOWNLINK,
'netmask': '255.255.255.0',
'local_ip': '152.16.40.20',
'dst_mac': '00:00:00:00:00:01',
'local_mac': '00:00:00:00:00:03',
'dst_ip': '152.16.40.19',
'driver': 'ixgbe',
'vpci': '0000:02:00.0',
'dpdk_port_num': 0,
},
'xe1': {
'local_iface_name': 'ens513f1',
'netmask': '255.255.255.0',
'network': '202.16.100.0',
'local_ip': '202.16.100.20',
'local_mac': '00:1e:67:d0:60:5d',
'driver': 'ixgbe',
'vpci': '0000:02:00.1',
'dpdk_port_num': 1,
},
},
'password': 'r00t',
'VNF model': 'l3fwd_vnf.yaml',
'user': 'root',
},
'tg__1': {
'member-vnf-index': '1',
'role': 'TrafficGen',
'name': 'trafficgen_1.yardstick',
'vnfd-id-ref': 'tg__1',
'ip': '1.2.1.1',
'interfaces': {
'xe0': {
'local_iface_name': 'ens785f0',
'vld_id': prox_vnf.ProxApproxVnf.UPLINK,
'netmask': '255.255.255.0',
'local_ip': '152.16.100.20',
'dst_mac': '00:00:00:00:00:02',
'local_mac': '00:00:00:00:00:04',
'dst_ip': '152.16.100.19',
'driver': 'i40e',
'vpci': '0000:05:00.0',
'dpdk_port_num': 0,
},
'xe1': {
'local_iface_name': 'ens785f1',
'netmask': '255.255.255.0',
'local_ip': '152.16.100.21',
'local_mac': '00:00:00:00:00:01',
'driver': 'i40e',
'vpci': '0000:05:00.1',
'dpdk_port_num': 1,
},
},
'password': 'r00t',
'VNF model': 'tg_rfc2544_tpl.yaml',
'user': 'root',
},
'vnf__1': {
'name': 'vnf.yardstick',
'vnfd-id-ref': 'vnf__1',
'ip': '1.2.1.1',
'interfaces': {
'xe0': {
'local_iface_name': 'ens786f0',
'vld_id': prox_vnf.ProxApproxVnf.UPLINK,
'netmask': '255.255.255.0',
'local_ip': '152.16.100.19',
'dst_mac': '00:00:00:00:00:04',
'local_mac': '00:00:00:00:00:02',
'dst_ip': '152.16.100.20',
'driver': 'i40e',
'vpci': '0000:05:00.0',
'dpdk_port_num': 0,
},
'xe1': {
'local_iface_name': 'ens786f1',
'vld_id': prox_vnf.ProxApproxVnf.DOWNLINK,
'netmask': '255.255.255.0',
'local_ip': '152.16.40.19',
'dst_mac': '00:00:00:00:00:03',
'local_mac': '00:00:00:00:00:01',
'dst_ip': '152.16.40.20',
'driver': 'i40e',
'vpci': '0000:05:00.1',
'dpdk_port_num': 1,
},
},
'routing_table': [
{
'netmask': '255.255.255.0',
'gateway': '152.16.100.20',
'network': '152.16.100.20',
'if': 'xe0',
},
{
'netmask': '255.255.255.0',
'gateway': '152.16.40.20',
'network': '152.16.40.20',
'if': 'xe1',
},
],
'member-vnf-index': '2',
'host': '1.2.1.1',
'role': 'vnf',
'user': 'root',
'nd_route_tbl': [
{
'netmask': '112',
'gateway': '0064:ff9b:0:0:0:0:9810:6414',
'network': '0064:ff9b:0:0:0:0:9810:6414',
'if': 'xe0',
},
{
'netmask': '112',
'gateway': '0064:ff9b:0:0:0:0:9810:2814',
'network': '0064:ff9b:0:0:0:0:9810:2814',
'if': 'xe1',
},
],
'password': 'r00t',
'VNF model': 'prox_vnf.yaml',
},
},
}
def test___init__(self):
prox_irq_vnf = ProxIrqVNF('vnf1', self.VNFD_0)
self.assertEqual(prox_irq_vnf.name, 'vnf1')
self.assertDictEqual(prox_irq_vnf.vnfd_helper, self.VNFD_0)
@mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node')
@mock.patch(SSH_HELPER)
def test_collect_kpi(self, ssh, *args):
mock_ssh(ssh)
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
resource_helper = mock.MagicMock()
resource_helper = mock.MagicMock()
core_1 = {'bucket_1': 1, 'bucket_2': 2, 'bucket_3': 3, 'bucket_4': 4, 'bucket_5': 5,
'bucket_6': 6, 'bucket_7': 7, 'bucket_8': 8, 'bucket_9': 9, 'bucket_10': 10,
'bucket_11': 11, 'bucket_12': 12, 'bucket_0': 100, 'cpu': 1, 'max_irq': 12,
'overflow': 10}
core_2 = {'bucket_1': 1, 'bucket_2': 2, 'bucket_3': 3, 'bucket_4': 4, 'bucket_5': 5,
'bucket_6': 0, 'bucket_7': 0, 'bucket_8': 0, 'bucket_9': 0, 'bucket_10': 0,
'bucket_11': 0, 'bucket_12': 0, 'bucket_0': 100, 'cpu': 2, 'max_irq': 12,
'overflow': 10}
irq_data = {'core_1': core_1, 'core_2': core_2}
resource_helper.execute.return_value = (irq_data)
build_config_file = mock.MagicMock()
build_config_file.return_value = None
prox_irq_vnf = ProxIrqVNF(VNF_NAME, vnfd)
startup = ["global", [["eal", "-4"]]]
master_0 = ["core 0", [["mode", "master"]]]
core_1 = ["core 1", [["mode", "irq"]]]
core_2 = ["core 2", [["mode", "irq"], ["task", "2"]]]
prox_irq_vnf.setup_helper._prox_config_data = \
[startup, master_0, core_1, core_2]
prox_irq_vnf.scenario_helper.scenario_cfg = self.SCENARIO_CFG
prox_irq_vnf.resource_helper = resource_helper
prox_irq_vnf.setup_helper.build_config_file = build_config_file
result = prox_irq_vnf.collect_kpi()
self.assertDictEqual(result["collect_stats"], {})
result = prox_irq_vnf.collect_kpi()
self.assertFalse('bucket_10' in result["collect_stats"]['core_2'])
self.assertFalse('bucket_11' in result["collect_stats"]['core_2'])
self.assertFalse('bucket_12' in result["collect_stats"]['core_2'])
self.assertEqual(result["collect_stats"]['core_2']['max_irq'], 12)
@mock.patch(SSH_HELPER)
def test_vnf_execute_oserror(self, ssh, *args):
mock_ssh(ssh)
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
prox_irq_vnf = ProxIrqVNF(VNF_NAME, vnfd)
prox_irq_vnf.resource_helper = resource_helper = mock.Mock()
resource_helper.execute.side_effect = OSError(errno.EPIPE, "")
prox_irq_vnf.vnf_execute("", _ignore_errors=True)
resource_helper.execute.side_effect = OSError(errno.ESHUTDOWN, "")
prox_irq_vnf.vnf_execute("", _ignore_errors=True)
resource_helper.execute.side_effect = OSError(errno.EADDRINUSE, "")
with self.assertRaises(OSError):
prox_irq_vnf.vnf_execute("", _ignore_errors=True)
@mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.socket')
@mock.patch(SSH_HELPER)
def test_terminate(self, ssh, *args):
mock_ssh(ssh)
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
mock_ssh(ssh, exec_result=(1, "", ""))
prox_irq_vnf = ProxIrqVNF(VNF_NAME, vnfd)
prox_irq_vnf._terminated = mock.MagicMock()
prox_irq_vnf._traffic_process = mock.MagicMock()
prox_irq_vnf._traffic_process.terminate = mock.Mock()
prox_irq_vnf.ssh_helper = mock.MagicMock()
prox_irq_vnf.setup_helper = mock.MagicMock()
prox_irq_vnf.resource_helper = mock.MagicMock()
prox_irq_vnf._vnf_wrapper.setup_helper = mock.MagicMock()
prox_irq_vnf._vnf_wrapper._vnf_process = mock.MagicMock(**{"is_alive.return_value": False})
prox_irq_vnf._vnf_wrapper.resource_helper = mock.MagicMock()
prox_irq_vnf._run_prox = mock.Mock(return_value=0)
prox_irq_vnf.q_in = mock.Mock()
prox_irq_vnf.q_out = mock.Mock()
self.assertIsNone(prox_irq_vnf.terminate())
@mock.patch(SSH_HELPER)
def test_wait_for_instantiate_panic(self, ssh, *args):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
mock_ssh(ssh, exec_result=(1, "", ""))
prox_irq_vnf = ProxIrqVNF(VNF_NAME, vnfd)
prox_irq_vnf._terminated = mock.MagicMock()
prox_irq_vnf._traffic_process = mock.MagicMock()
prox_irq_vnf._traffic_process.terminate = mock.Mock()
prox_irq_vnf.ssh_helper = mock.MagicMock()
prox_irq_vnf.setup_helper = mock.MagicMock()
prox_irq_vnf.resource_helper = mock.MagicMock()
prox_irq_vnf._vnf_wrapper.setup_helper = mock.MagicMock()
prox_irq_vnf._vnf_wrapper._vnf_process = mock.MagicMock(**{"is_alive.return_value": False})
prox_irq_vnf._vnf_wrapper.resource_helper = mock.MagicMock()
prox_irq_vnf._run_prox = mock.Mock(return_value=0)
prox_irq_vnf.q_in = mock.Mock()
prox_irq_vnf.q_out = mock.Mock()
prox_irq_vnf.WAIT_TIME = 0
with self.assertRaises(RuntimeError):
prox_irq_vnf.wait_for_instantiate()
class TestProxIrqGen(unittest.TestCase):
SCENARIO_CFG = {
'task_path': "",
'nodes': {
'tg__1': 'trafficgen_1.yardstick',
'vnf__1': 'vnf.yardstick'},
'runner': {
'duration': 600, 'type': 'Duration'},
'topology': 'prox-tg-topology-2.yaml',
'traffic_profile': '../../traffic_profiles/prox_binsearch.yaml',
'type': 'NSPerf',
'options': {
'tg__1': {'prox_args': {'-e': '',
'-t': ''},
'prox_config': 'configs/l3-gen-2.cfg',
'prox_path':
'/root/dppd-PROX-v035/build/prox'},
'vnf__1': {
'prox_args': {'-t': ''},
'prox_config': 'configs/l3-swap-2.cfg',
'prox_path': '/root/dppd-PROX-v035/build/prox'}}}
VNFD_0 = {
'short-name': 'VpeVnf',
'vdu': [
{
'routing_table': [
{
'network': '152.16.100.20',
'netmask': '255.255.255.0',
'gateway': '152.16.100.20',
'if': 'xe0'
},
{
'network': '152.16.40.20',
'netmask': '255.255.255.0',
'gateway': '152.16.40.20',
'if': 'xe1'
},
],
'description': 'VPE approximation using DPDK',
'name': 'vpevnf-baremetal',
'nd_route_tbl': [
{
'network': '0064:ff9b:0:0:0:0:9810:6414',
'netmask': '112',
'gateway': '0064:ff9b:0:0:0:0:9810:6414',
'if': 'xe0'
},
{
'network': '0064:ff9b:0:0:0:0:9810:2814',
'netmask': '112',
'gateway': '0064:ff9b:0:0:0:0:9810:2814',
'if': 'xe1'
},
],
'id': 'vpevnf-baremetal',
'external-interface': [
{
'virtual-interface': {
'dst_mac': '00:00:00:00:00:03',
'vpci': '0000:05:00.0',
'driver': 'i40e',
'local_ip': '152.16.100.19',
'type': 'PCI-PASSTHROUGH',
'netmask': '255.255.255.0',
'dpdk_port_num': 0,
'bandwidth': '10 Gbps',
'dst_ip': '152.16.100.20',
'local_mac': '00:00:00:00:00:01'
},
'vnfd-connection-point-ref': 'xe0',
'name': 'xe0'
},
{
'virtual-interface': {
'dst_mac': '00:00:00:00:00:04',
'vpci': '0000:05:00.1',
'driver': 'ixgbe',
'local_ip': '152.16.40.19',
'type': 'PCI-PASSTHROUGH',
'netmask': '255.255.255.0',
'dpdk_port_num': 1,
'bandwidth': '10 Gbps',
'dst_ip': '152.16.40.20',
'local_mac': '00:00:00:00:00:02'
},
'vnfd-connection-point-ref': 'xe1',
'name': 'xe1'
},
],
},
],
'description': 'Vpe approximation using DPDK',
'mgmt-interface': {
'vdu-id': 'vpevnf-baremetal',
'host': '1.1.1.1',
'password': 'r00t',
'user': 'root',
'ip': '1.1.1.1'
},
'benchmark': {
'kpi': [
'packets_in',
'packets_fwd',
'packets_dropped',
],
},
'connection-point': [
{
'type': 'VPORT',
'name': 'xe0',
},
{
'type': 'VPORT',
'name': 'xe1',
},
],
'id': 'VpeApproxVnf', 'name': 'VPEVnfSsh'
}
VNFD = {
'vnfd:vnfd-catalog': {
'vnfd': [
VNFD_0,
],
},
}
TRAFFIC_PROFILE = {
"schema": "isb:traffic_profile:0.1",
"name": "fixed",
"description": "Fixed traffic profile to run UDP traffic",
"traffic_profile": {
"traffic_type": "FixedTraffic",
"frame_rate": 100, # pps
"flow_number": 10,
"frame_size": 64,
},
}
CONTEXT_CFG = {
'nodes': {
'tg__2': {
'member-vnf-index': '3',
'role': 'TrafficGen',
'name': 'trafficgen_2.yardstick',
'vnfd-id-ref': 'tg__2',
'ip': '1.2.1.1',
'interfaces': {
'xe0': {
'local_iface_name': 'ens513f0',
'vld_id': prox_vnf.ProxApproxVnf.DOWNLINK,
'netmask': '255.255.255.0',
'local_ip': '152.16.40.20',
'dst_mac': '00:00:00:00:00:01',
'local_mac': '00:00:00:00:00:03',
'dst_ip': '152.16.40.19',
'driver': 'ixgbe',
'vpci': '0000:02:00.0',
'dpdk_port_num': 0,
},
'xe1': {
'local_iface_name': 'ens513f1',
'netmask': '255.255.255.0',
'network': '202.16.100.0',
'local_ip': '202.16.100.20',
'local_mac': '00:1e:67:d0:60:5d',
'driver': 'ixgbe',
'vpci': '0000:02:00.1',
'dpdk_port_num': 1,
},
},
'password': 'r00t',
'VNF model': 'l3fwd_vnf.yaml',
'user': 'root',
},
'tg__1': {
'member-vnf-index': '1',
'role': 'TrafficGen',
'name': 'trafficgen_1.yardstick',
'vnfd-id-ref': 'tg__1',
'ip': '1.2.1.1',
'interfaces': {
'xe0': {
'local_iface_name': 'ens785f0',
'vld_id': prox_vnf.ProxApproxVnf.UPLINK,
'netmask': '255.255.255.0',
'local_ip': '152.16.100.20',
'dst_mac': '00:00:00:00:00:02',
'local_mac': '00:00:00:00:00:04',
'dst_ip': '152.16.100.19',
'driver': 'i40e',
'vpci': '0000:05:00.0',
'dpdk_port_num': 0,
},
'xe1': {
'local_iface_name': 'ens785f1',
'netmask': '255.255.255.0',
'local_ip': '152.16.100.21',
'local_mac': '00:00:00:00:00:01',
'driver': 'i40e',
'vpci': '0000:05:00.1',
'dpdk_port_num': 1,
},
},
'password': 'r00t',
'VNF model': 'tg_rfc2544_tpl.yaml',
'user': 'root',
},
'vnf__1': {
'name': 'vnf.yardstick',
'vnfd-id-ref': 'vnf__1',
'ip': '1.2.1.1',
'interfaces': {
'xe0': {
'local_iface_name': 'ens786f0',
'vld_id': prox_vnf.ProxApproxVnf.UPLINK,
'netmask': '255.255.255.0',
'local_ip': '152.16.100.19',
'dst_mac': '00:00:00:00:00:04',
'local_mac': '00:00:00:00:00:02',
'dst_ip': '152.16.100.20',
'driver': 'i40e',
'vpci': '0000:05:00.0',
'dpdk_port_num': 0,
},
'xe1': {
'local_iface_name': 'ens786f1',
'vld_id': prox_vnf.ProxApproxVnf.DOWNLINK,
'netmask': '255.255.255.0',
'local_ip': '152.16.40.19',
'dst_mac': '00:00:00:00:00:03',
'local_mac': '00:00:00:00:00:01',
'dst_ip': '152.16.40.20',
'driver': 'i40e',
'vpci': '0000:05:00.1',
'dpdk_port_num': 1,
},
},
'routing_table': [
{
'netmask': '255.255.255.0',
'gateway': '152.16.100.20',
'network': '152.16.100.20',
'if': 'xe0',
},
{
'netmask': '255.255.255.0',
'gateway': '152.16.40.20',
'network': '152.16.40.20',
'if': 'xe1',
},
],
'member-vnf-index': '2',
'host': '1.2.1.1',
'role': 'vnf',
'user': 'root',
'nd_route_tbl': [
{
'netmask': '112',
'gateway': '0064:ff9b:0:0:0:0:9810:6414',
'network': '0064:ff9b:0:0:0:0:9810:6414',
'if': 'xe0',
},
{
'netmask': '112',
'gateway': '0064:ff9b:0:0:0:0:9810:2814',
'network': '0064:ff9b:0:0:0:0:9810:2814',
'if': 'xe1',
},
],
'password': 'r00t',
'VNF model': 'prox_vnf.yaml',
},
},
}
def test__check_status(self):
prox_irq_gen = ProxIrqGen('tg1', self.VNFD_0)
with self.assertRaises(NotImplementedError):
prox_irq_gen._check_status()
def test_listen_traffic(self):
prox_irq_gen = ProxIrqGen('tg1', self.VNFD_0)
prox_irq_gen.listen_traffic(mock.Mock())
def test_verify_traffic(self):
prox_irq_gen = ProxIrqGen('tg1', self.VNFD_0)
prox_irq_gen.verify_traffic(mock.Mock())
mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.socket')
@mock.patch(SSH_HELPER)
def test_terminate(self, ssh, *args):
mock_ssh(ssh)
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
prox_traffic_gen = ProxIrqGen(VNF_NAME, vnfd)
prox_traffic_gen._terminated = mock.MagicMock()
prox_traffic_gen._traffic_process = mock.MagicMock()
prox_traffic_gen._traffic_process.terminate = mock.Mock()
prox_traffic_gen.ssh_helper = mock.MagicMock()
prox_traffic_gen.setup_helper = mock.MagicMock()
prox_traffic_gen.resource_helper = mock.MagicMock()
prox_traffic_gen._vnf_wrapper.setup_helper = mock.MagicMock()
prox_traffic_gen._vnf_wrapper._vnf_process = mock.MagicMock()
prox_traffic_gen._vnf_wrapper.resource_helper = mock.MagicMock()
self.assertIsNone(prox_traffic_gen.terminate())
def test__wait_for_process(self):
prox_irq_gen = ProxIrqGen('tg1', self.VNFD_0)
with mock.patch.object(prox_irq_gen, '_check_status',
return_value=0) as mock_status, \
mock.patch.object(prox_irq_gen, '_tg_process') as mock_proc:
mock_proc.is_alive.return_value = True
mock_proc.exitcode = 234
self.assertEqual(prox_irq_gen._wait_for_process(), 234)
mock_proc.is_alive.assert_called_once()
mock_status.assert_called_once()
def test__wait_for_process_not_alive(self):
prox_irq_gen = ProxIrqGen('tg1', self.VNFD_0)
with mock.patch.object(prox_irq_gen, '_tg_process') as mock_proc:
mock_proc.is_alive.return_value = False
self.assertRaises(RuntimeError, prox_irq_gen._wait_for_process)
mock_proc.is_alive.assert_called_once()
def test__wait_for_process_delayed(self):
prox_irq_gen = ProxIrqGen('tg1', self.VNFD_0)
with mock.patch.object(prox_irq_gen, '_check_status',
side_effect=[1, 0]) as mock_status, \
mock.patch.object(prox_irq_gen,
'_tg_process') as mock_proc:
mock_proc.is_alive.return_value = True
mock_proc.exitcode = 234
self.assertEqual(prox_irq_gen._wait_for_process(), 234)
mock_proc.is_alive.assert_has_calls([mock.call(), mock.call()])
mock_status.assert_has_calls([mock.call(), mock.call()])
def test_scale(self):
prox_irq_gen = ProxIrqGen('tg1', self.VNFD_0)
self.assertRaises(y_exceptions.FunctionNotImplemented,
prox_irq_gen.scale)
@mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node')
@mock.patch(SSH_HELPER)
def test_collect_kpi(self, ssh, *args):
mock_ssh(ssh)
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
resource_helper = mock.MagicMock()
core_1 = {'bucket_1': 1, 'bucket_2': 2, 'bucket_3': 3, 'bucket_4': 4, 'bucket_5': 5,
'bucket_6': 6, 'bucket_7': 7, 'bucket_8': 8, 'bucket_9': 9, 'bucket_10': 10,
'bucket_11': 11, 'bucket_12': 12, 'bucket_0': 100, 'cpu': 1, 'max_irq': 12,
'overflow': 10}
core_2 = {'bucket_1': 1, 'bucket_2': 2, 'bucket_3': 3, 'bucket_4': 4, 'bucket_5': 5,
'bucket_6': 0, 'bucket_7': 0, 'bucket_8': 0, 'bucket_9': 0, 'bucket_10': 0,
'bucket_11': 0, 'bucket_12': 0, 'bucket_0': 100, 'cpu': 2, 'max_irq': 12,
'overflow': 10}
irq_data = {'core_1': core_1, 'core_2': core_2}
resource_helper.sut.irq_core_stats.return_value = (irq_data)
build_config_file = mock.MagicMock()
build_config_file.return_value = None
prox_irq_gen = ProxIrqGen(VNF_NAME, vnfd)
startup = ["global", [["eal", "-4"]]]
master_0 = ["core 0", [["mode", "master"]]]
core_1 = ["core 1", [["mode", "irq"]]]
core_2 = ["core 2", [["mode", "irq"], ["task", "2"]]]
prox_irq_gen.setup_helper._prox_config_data = \
[startup, master_0, core_1, core_2]
prox_irq_gen.scenario_helper.scenario_cfg = self.SCENARIO_CFG
prox_irq_gen.resource_helper = resource_helper
prox_irq_gen.setup_helper.build_config_file = build_config_file
result = prox_irq_gen.collect_kpi()
self.assertDictEqual(result["collect_stats"], {})
result = prox_irq_gen.collect_kpi()
self.assertFalse('bucket_10' in result["collect_stats"]['core_2'])
self.assertFalse('bucket_11' in result["collect_stats"]['core_2'])
self.assertFalse('bucket_12' in result["collect_stats"]['core_2'])
self.assertEqual(result["collect_stats"]['core_2']['max_irq'], 12)
|
python
|
import logging
from logger_config import configure_logging
logger_name = 'root_logger'
configure_logging(logger_name, log_dir='logs')
logger = logging.getLogger(logger_name)
logger.warning('This is warning')
logger.error('This is exception')
logger.info('This is info message')
logger.debug('This is debug message')
|
python
|
# All edits to original document Copyright 2016 Vincent Berthiaume.
#
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from tensorflow.python.framework import dtypes
import collections
#sms-tools stuff
import sys, os, os.path
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../sms-tools/software/models/'))
import utilFunctions as UF
import stft as STFT
from scipy.signal import get_window
from scipy.fftpack import fft, ifft
#ffmpeg + audio stuff
import subprocess as sp
import scikits.audiolab
import bisect
#general stuff?
import numpy as np
import matplotlib.pyplot as plt
from six.moves import cPickle as pickle
import math
# we have 7 music genres
NUM_CLASSES = 7
s_iTrainSize = 8 * NUM_CLASSES # 200000
s_iValid_size = 6 * NUM_CLASSES # 10000
s_iTestSize = 6 * NUM_CLASSES # 10000
SAMPLE_COUNT = 1 * 44100 # first 10 secs of audio
exponent = math.log(SAMPLE_COUNT, 2)+1
TOTAL_INPUTS = 2 ** int(exponent)
FORCE_PICKLING = False
Datasets = collections.namedtuple('Datasets', ['train', 'validation', 'test'])
overall_song_id = 0
ONE_HOT = False
# LIBRARY_PATH = '/media/kxstudio/LUSSIER/music/'
# LIBRARY_PATH = '/media/sf_stuff_for_virtual_machines/music/'
# LIBRARY_PATH = '/Volumes/Untitled/music/'
#nicolai -- not installed at this point
#LIBRARY_PATH = '/Users/nicolai/Music/vblandr'
#hesse
LIBRARY_PATH = '/mnt/c/Users/barth/Documents/vblandr/'
#gris
#LIBRARY_PATH = '/home/gris/Music/vblandr/'
def write_test_wav(cur_song_samples, str_id = ""):
filename = LIBRARY_PATH +'test'+ str_id +'.wav'
print ("writing", filename)
scikits.audiolab.wavwrite(cur_song_samples, filename, fs=44100, enc='pcm16')
def getAllDataSets(train_dir, dtype=np.float32):
pickle_file = getAllDataPickle(FORCE_PICKLING)
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
train_dataset = save['wholeTrainDataset']
train_labels = save['wholeTrainLabels']
valid_dataset = save['wholeValidDataset']
valid_labels = save['wholeValidLabels']
test_dataset = save['wholeTestDataset']
test_labels = save['wholeTestLabels']
del save # hint to help gc free up memory
#print('after pickling, Training set', train_dataset.shape, train_labels.shape)
#print('after pickling, Validation set', valid_dataset.shape, valid_labels.shape)
#print('after pickling, Test set', test_dataset.shape, test_labels.shape)
train = DataSet(train_dataset, train_labels, dtype=dtype)
validation = DataSet(valid_dataset, valid_labels, dtype=dtype)
test = DataSet(test_dataset, test_labels, dtype=dtype)
return Datasets(train=train, validation=validation, test=test)
def dense_to_one_hot(labels_dense, num_classes):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
class DataSet(object):
def __init__(self, songs, labels, dtype=np.float32):
#global overall_song_id
"""Construct a DataSet. `dtype` can be either `uint8` to leave the input as `[0, 255]`, or `float32` to rescale into `[0, 1]`."""
dtype = dtypes.as_dtype(dtype).base_dtype
if dtype not in (dtypes.uint8, dtypes.float32):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype)
#check that we have the same number of songs and labels
assert songs.shape[0] == labels.shape[0], ('songs.shape: %s labels.shape: %s' % (songs.shape, labels.shape))
self._num_examples = songs.shape[0]
#======================= DATA CONVERSION AND SHIT ===============================
#the original range for int16 is [-32768, 32767]
#if dtype == dtypes.float32:
# songs = songs.astype(np.float32) #cast the array into float32
# songs = np.multiply(songs, 1.0 / 65536) #convert int16 range into [-.5, .5]
# songs = np.add(songs, .5) #convert int16 [-.5, .5] range into [0,1.0]
# original code for pixels; #Convert from [0, 255] -> [0.0, 1.0].
#songs = np.multiply(songs, 1.0 / 255.0)
#check that song files are valid
for cur_song, cur_song_dft in enumerate(songs):
if cur_song == 0:
print ("-----DATASET CONSTRUCTOR--------")
print ("max: ", np.amax(cur_song_dft))
print ("min: ", np.amin(cur_song_dft))
print ("mean: ", np.mean(cur_song_dft))
#overall_song_id += 1
#check labels
#use this for issue #3
#labels = dense_to_one_hot(labels, NUM_CLASSES)
#================================================================================
self._songs = songs
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def songs(self):
return self._songs
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size):
"""Return the next `batch_size` examples from this data set."""
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Shuffle the data
perm = np.arange(self._num_examples)
np.random.shuffle(perm)
self._songs = self._songs[perm]
self._labels = self._labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
return self._songs[start:end], self._labels[start:end]
# ENDOF DataSet
def getAllDataPickle(p_bForce=False):
#get relevant paths
trainGenreNames, trainGenrePaths = getAllGenrePaths(LIBRARY_PATH + 'train_small/')
testGenreNames, testGenrePaths = getAllGenrePaths(LIBRARY_PATH + 'test_small/')
pickle_file = LIBRARY_PATH + 'allData.pickle'
#obtain data for each genre in their individual pickle file
allPickledTrainFilenames = getIndividualGenrePickles(trainGenrePaths, p_bForce)
allPickledTestFilenames = getIndividualGenrePickles(testGenrePaths, p_bForce)
#merge and randomize data from all genres into wholedatasets for training, validation, and test
wholeValidDataset, wholeValidLabels, wholeTrainDataset, wholeTrainLabels = getWholeDataFromIndividualGenrePickles(allPickledTrainFilenames, s_iTrainSize, s_iValid_size)
_, _, wholeTestDataset, wholeTestLabels = getWholeDataFromIndividualGenrePickles(allPickledTestFilenames, s_iTestSize)
wholeTrainDataset, wholeTrainLabels = randomize(wholeTrainDataset, wholeTrainLabels)
wholeTestDataset, wholeTestLabels = randomize(wholeTestDataset, wholeTestLabels)
wholeValidDataset, wholeValidLabels = randomize(wholeValidDataset, wholeValidLabels)
#save the data for later reuse:
try:
f = open(pickle_file, 'wb')
save = {'wholeTrainDataset': wholeTrainDataset,
'wholeTrainLabels': wholeTrainLabels,
'wholeValidDataset': wholeValidDataset,
'wholeValidLabels': wholeValidLabels,
'wholeTestDataset': wholeTestDataset,
'wholeTestLabels': wholeTestLabels}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
print ('\n================== DATASETS BUILT ================')
return pickle_file
# ENDOF BUILDDATASETS
def getAllGenrePaths(music_dir):
"""return a list of all music genres, e.g., 'audiobook', and their complete path"""
dirs = os.listdir(music_dir)
allAudioGenrePaths = []
allAudioGenres = []
for cur_dir in dirs:
if not cur_dir.startswith('.') and not cur_dir.endswith('pickle') :
allAudioGenrePaths.append(music_dir+cur_dir)
allAudioGenres.append(cur_dir)
return allAudioGenres, allAudioGenrePaths
def getIndividualGenrePickles(p_strDataFolderNames, p_bForce=False):
"""serialize list of data folders in their own pickle files, and return list of pickle filenames"""
all_pickle_filenames = []
for strCurFolderName in p_strDataFolderNames:
cur_pickle_filename = strCurFolderName + '.pickle'
all_pickle_filenames.append(cur_pickle_filename)
if os.path.exists(cur_pickle_filename) and not p_bForce:
print('%s already present - Skipping pickling.' % cur_pickle_filename)
else:
print '\nPickling',
print cur_pickle_filename,
dataset_cur_genre = getDataForGenre(strCurFolderName)
try:
#and try to pickle it
with open(cur_pickle_filename, 'wb') as f:
pickle.dump(dataset_cur_genre, f, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', cur_pickle_filename, ':', e)
return all_pickle_filenames
def removeInitialSilence(cur_song_pcm):
#using absolute value
env = abs(cur_song_pcm)
env = env.astype(np.float32) #cast the array into float32
max = np.max(env)
env = np.multiply(env, 1.0 / max) #normalize so that max value is == 1.0
#convolving as a way to do a fast moving average
N = 100
env = np.convolve(env, np.ones((N,))/N)[(N-1):]
#detect first non-silent sample
threshold = .01
endOfSilence = bisect.bisect(env,threshold)
print "\nend of silence: ", endOfSilence
return cur_song_pcm[endOfSilence:]
# load data for each genre
def getDataForGenre(genre_folder):
"""figure out the path to all the genre's song files, and how many songs we have"""
global overall_song_id
all_song_paths = []
for path, dirs, files in os.walk(genre_folder):
#insert file in correct label id
for file in files:
if not file.startswith('.') and (file.endswith('.wav') or file.endswith('.mp3')):
all_song_paths.append(path+"/"+file)
#data for cur genre will have shape all_song_paths x TOTAL_INPUTS
#dataset_cur_genre = np.ndarray(shape=(len(all_song_paths), TOTAL_INPUTS), dtype=np.int16)
dataset_cur_genre = np.ndarray(shape=(len(all_song_paths), TOTAL_INPUTS), dtype=np.float32)
songId = 0
#for each song in the current genre
for cur_song_file in all_song_paths:
try:
# convert current song to np.int16 array.
print cur_song_file
cur_song_pcm = songFile2pcm(cur_song_file)
cleaned_cur_song_pcm = removeInitialSilence(cur_song_pcm)
write_test_wav(cur_song_pcm, str(overall_song_id))
overall_song_id = overall_song_id +1
# only keep the first 2x TOTAL_INPUTS samples. since the fft is symetrical, we can use that to store more stuff
short_cur_song_pcm = cleaned_cur_song_pcm[:2*TOTAL_INPUTS]
#do the fft, keeping only the real numbers, ie the magnitude. mX has same len as cur_song_pcm, but is np.float64
mX = fft(short_cur_song_pcm).real
#only keep the first half since symmetrical, and we know len(mX) is multiple of 2
mX = mX[:len(mX)/2]
#PLOT THE THING
#if songId == 0:
# fft_buffer = np.insert(mX, np.zeros(len(mX)), 0)
# for i in np.arange (len(fft_buffer)/2):
# fft_buffer[i] = fft_buffer[len(fft_buffer)-i-1]
# plt.plot(fft_buffer)
# plt.show()
#need to convert to range 0,1 for tensorflow learning.
max = np.amax(mX)
min = np.amin(mX)
range = max - min
mX = mX - min
mX = mX/range
#and put it in the dataset_cur_genre
dataset_cur_genre[songId, :] = mX
songId += 1
except IOError as e:
print('skipping ', cur_song_file, ':', e)
#in case we skipped some songs, only keep the first songId songs in dataset_cur_genre
dataset_cur_genre = dataset_cur_genre[0:songId, :]
# print('Full dataset_cur_genre tensor:', dataset_cur_genre.shape)
# print('Mean:', np.mean(dataset_cur_genre))
# print('Standard deviation:', np.std(dataset_cur_genre))
return dataset_cur_genre
#END LOAD GENRE
def songFile2pcm(song_path):
song_path2 = song_path + '.wav'
command = [ 'ffmpeg',
'-i', song_path,
'-f', 's16le',
'-acodec', 'pcm_s16le',
'-ar', '44100', # sms tools wavread can only read 44100 Hz
'-ac', '1', # mono file
'-loglevel', 'quiet',
'-'] #instead of having an output file, using '-' sends it in the pipe. not actually sure how this works.
#run the command
#print(song_path)
print ".",
sys.stdout.flush()
pipe = sp.Popen(command, stdout=sp.PIPE)
#read the output into a numpy array
stdoutdata = pipe.stdout.read()
audio_array = np.fromstring(stdoutdata, dtype=np.int16)
# size = len(audio_array)
# print ("size: ", size)
#export this to a wav file, to test it
# write_test_wav(audio_array)
return audio_array
#END SONGFILE2PCM
# Merge individual genre datasets. Tune s_iTrainSize as needed to be able to fit all data in memory.
# Also create a validation dataset_cur_genre for hyperparameter tuning.
def getWholeDataFromIndividualGenrePickles(p_allPickledFilenames, p_iTrainSize, p_iValidSize=0):
iNum_classes = len(p_allPickledFilenames)
#make empty arrays for validation and training sets and labels
whole_valid_dataset, valid_labels = make_arrays(p_iValidSize, TOTAL_INPUTS, ONE_HOT)
whole_train_dataset, train_labels = make_arrays(p_iTrainSize, TOTAL_INPUTS, ONE_HOT)
#number of items per class. // is an int division in python3, not sure in python2
iNbrOfValidItemsPerClass = p_iValidSize // iNum_classes
iNbrOfTrainItemPerClass = p_iTrainSize // iNum_classes
#figure out useful indexes for the loop
iStartValidId, iStartTrainId = 0, 0
iEndValidId, iEndTrainId = iNbrOfValidItemsPerClass, iNbrOfTrainItemPerClass
iEndListId = iNbrOfValidItemsPerClass+iNbrOfTrainItemPerClass
#for each file in p_allPickledFilenames
for iPickleFileId, strPickleFilename in enumerate(p_allPickledFilenames):
try:
with open(strPickleFilename, 'rb') as f:
cur_genre_dataset = pickle.load(f)
# let's shuffle the items to have random validation and training set. np.random.shuffle suffles only first dimension
np.random.shuffle(cur_genre_dataset)
#if we asked for a validation set, use the first items for it
if whole_valid_dataset is not None:
#the first iNbrOfValidItemsPerClass items in letter_set are used for the validation set
whole_valid_dataset[iStartValidId:iEndValidId, :] = cur_genre_dataset[:iNbrOfValidItemsPerClass, :]
#label all images with the current file id
valid_labels[iStartValidId:iEndValidId] = iPickleFileId
#update ids for the train set
iStartValidId += iNbrOfValidItemsPerClass
iEndValidId += iNbrOfValidItemsPerClass
#the rest of the items are used for the training set
whole_train_dataset[iStartTrainId:iEndTrainId, :] = cur_genre_dataset[iNbrOfValidItemsPerClass:iEndListId, :]
train_labels[iStartTrainId:iEndTrainId] = iPickleFileId
iStartTrainId += iNbrOfTrainItemPerClass
iEndTrainId += iNbrOfTrainItemPerClass
except Exception as e:
print('Unable to process data from', strPickleFilename, ':', e)
raise
return whole_valid_dataset, valid_labels, whole_train_dataset, train_labels
#END OF getWholeDataFromIndividualGenrePickles
def make_arrays(p_iNb_rows, p_iNb_cols, one_hot):
if p_iNb_rows:
#dataset_cur_genre = np.ndarray((p_iNb_rows, p_iNb_cols), dtype=np.int16)
dataset_cur_genre = np.ndarray((p_iNb_rows, p_iNb_cols), dtype=np.float32)
if one_hot:
labels = np.ndarray((p_iNb_rows, NUM_CLASSES), dtype=np.int32)
else:
labels = np.ndarray(p_iNb_rows, dtype=np.int32)
else:
dataset_cur_genre, labels = None, None
return dataset_cur_genre, labels
# Next, we'll randomize the data. It's important to have the labels well shuffled for the training and test distributions to match.
def randomize(p_3ddataset_cur_genre, p_vLabels):
#with int x as parameter, np.random.permutation returns a random permutation of np.arange(x)
vPermutation = np.random.permutation(p_vLabels.shape[0])
threeDShuffleddataset_cur_genre = p_3ddataset_cur_genre[vPermutation,:]
threeDShuffledLabels = p_vLabels [vPermutation]
return threeDShuffleddataset_cur_genre, threeDShuffledLabels
|
python
|
__version__ = '3.0.8'
__buildinfo__ = {'branch': 'BRANCH_NOT_SET', 'last_commit': 'COMMIT_NOT_SET'}
|
python
|
from dash import dcc
import dash_bootstrap_components as dbc # pip install dash-bootstrap-components
from dash import Input, Output, State, html
from app import app
# Lotties: Emil at https://github.com/thedirtyfew/dash-extensions
url_sunlight = "https://assets8.lottiefiles.com/packages/lf20_bknKi1.json"
url_earth = "https://assets10.lottiefiles.com/datafiles/xjh641xEDuQg4qg/data.json"
url5 = "https://assets8.lottiefiles.com/packages/lf20_q6y5ptrh.json"
url6 = "https://assets4.lottiefiles.com/packages/lf20_tN5Ofx.json"
options = dict(loop=True, autoplay=True, rendererSettings=dict(preserveAspectRatio='xMidYMid slice'))
learn_card_1 = dbc.Card(
[
dbc.CardBody(
[
dbc.Row(
dbc.Label(
"What is KlimaDAO?", className='emission_card_topic',
),
),
dbc.Row(
dbc.Button(
'Click to learn', id='open_learn_card_1', n_clicks=0,
className='align-self-center'
),
),
dbc.Modal([
dbc.ModalHeader(dbc.ModalTitle('What is KlimaDAO?')),
dbc.ModalBody(
dcc.Markdown(
'''
Klima DAO is a Decentralized Autonomous Organization to drive climate action,
via our carbon-backed, algorithmic currency- the KLIMA token.
As the protocol grows, Klima DAO will solve the critical problems of the carbon markets:
- **Illiquidity**: Carbon Credits come in many different varieties; carbon brokers and
middlemen are used by buyers and sellers, fragmenting the total liquidity of the market.
- **Opacity**: Trades occur often behind closed doors, allowing buyers to underbuy the market.
- **Inefficiency**: buying and retiring carbon credits comes with friction and barriers,
by utilizing the polygon ecosystem, it removes this friction for all users
In delivery of its objectives, Klima DAO will become the single biggest disruptor of the
carbon markets and set a precedent for a new monetary system backed by carbon.
Klima DAO will serve the web3 ecosystem by offering accountability for those that
contribute, rewards for stakeholders, and a stake in governance for those that participate.
Klima DAO was inspired by Olympus DAO. It was conceptualized and built by a
distributed pseudo-anonymous team.
Klima is DAO-governed by it's community. All decisions are formed by community members on
the forum and made by KLIMA holders through snapshot voting.
'''
),
),
dbc.ModalFooter(
dbc.Button(
'close',
id='close_learn_card_1',
className='ms-auto',
n_clicks=0,
)
)
],
id="body_learn_card_1",
scrollable=True,
is_open=False,
),
]
),
],
color="success", # https://bootswatch.com/default/ for more card colors
inverse=True, # change color of text (black or white)
outline=False, # True = remove the block colors from the background and header
className='emission_card_style',
)
learn_card_2 = dbc.Card(
[
dbc.CardBody(
[
dbc.Row(
dbc.Label(
"Why KlimaDAO?", className='emission_card_topic',
),
),
dbc.Row(
dbc.Button(
'Click to learn', id='open_learn_card_2', n_clicks=0,
className='align-self-center'
),
),
dbc.Modal([
dbc.ModalHeader(dbc.ModalTitle('What is the point of KlimaDAO?')),
dbc.ModalBody(
dcc.Markdown(
'''
1. Driven Climate Action:
Klima DAO incentivizes new supply of Base Carbon Tonnes (BCT) on the blockchain
through the KLIMA token. By driving demand into BCT, it incentivizes carbon offset
producers to produce more carbon credits, assisting the adoption of new carbon mitigating
or sequestering technology, and disincentivizes companies wanting to offset their carbon
footprint with only C.Cs, and forces them to perform environmentally friendly actions.
KLIMA is the first building block for unlocking the carbon economy — an economy where more
economic activity leads to an acceleration in planetary regeneration rather than more
amage to our planet. Before, monetary incentives and environmental incentives aren't
typically aligned.
2. Become a Carbon-Based Reserve Currency:
The KLIMA ecosystem and monetary policy are managed by the Klima DAO.
This way we guarantee transparent decision making and long-term stability.
In the long term, we can use this system to optimize stability, to transition to a global
unit of account and medium of exchange. Currently, in the short term, we're focused on
growth and wealth creation, to incentivize users to join the new wave of carbon currency.
3. Facilitate the Climate Market:
The current carbon (and the climate in general) markets are illiquid, fragmented,
inefficient, and opaque. Because of this, we feel that carbon tonnage is heavily
undervalued, and is forced down because of these issues. By eliminating these issues,
the true price can be achieved.
'''
),
),
dbc.ModalFooter(
dbc.Button(
'close',
id='close_learn_card_2',
className='ms-auto',
n_clicks=0,
)
)
],
id="body_learn_card_2",
scrollable=True,
is_open=False,
),
]
),
],
color="success", # https://bootswatch.com/default/ for more card colors
inverse=True, # change color of text (black or white)
outline=False, # True = remove the block colors from the background and header
className='emission_card_style',
)
learn_card_3 = dbc.Card(
[
dbc.CardBody(
[
dbc.Row(
dbc.Label(
"What is Klima?", className='emission_card_topic',
),
),
dbc.Row(
dbc.Button(
'Click to learn', id='open_learn_card_4', n_clicks=0,
className='align-self-center'
),
),
dbc.Modal([
dbc.ModalHeader(dbc.ModalTitle('What is Klima?')),
dbc.ModalBody(
dcc.Markdown(
'''
KLIMA is an algorithmic carbon-backed currency,
inspired by [Olympus DAO](https://www.olympusdao.finance/) and their token mechanics.
KlimaDAO incentivises new supply of Base Carbon Tonnes (BCT) on the blockchain through
bonding with the Protocol. Each KLIMA token is backed at a 1:1 ratio with a BCT in the
treasury.
KlimaDAO leverages the [Toucan Protocol's](https://docs.toucan.earth/protocol/)
Carbon Bridge to retire real world Verified Carbon Units (VCUs) and convert them to a
tokenized form on the blockchain, VCUs can be verified from reputable carbon markets in a
transparent and traceable manner. The credits are then absorbed through the protocols'
bonding mechanism, building a treasury of verified tokenized carbon reductions.
This increases the amount of carbon assets locked within the treasury, thereby
reducing supply on the open market and leading to price appreciation within the
Voluntary Carbon Markets.
In summary, Klima serves two main purposes:
1. It serves as a floating currency and a form of money backed at a 1:1 ratio by voluntary
carbon credits.
2. It is used to govern the protocol and confer voting power to influence decisions on
various policies including supply expansion mechanics.
'''
),
),
dbc.ModalFooter(
dbc.Button(
'close',
id='close_learn_card_4',
className='ms-auto',
n_clicks=0,
)
)
],
id="body_learn_card_4",
scrollable=True,
is_open=False,
),
]
),
],
color="success", # https://bootswatch.com/default/ for more card colors
inverse=True, # change color of text (black or white)
outline=False, # True = remove the block colors from the background and header
className='emission_card_style',
)
learn_card_4 = dbc.Card(
[
dbc.CardBody(
[
dbc.Row(
dbc.Label(
"How do I participate?", className='emission_card_topic',
),
),
dbc.Row(
dbc.Button(
'Click to learn', id='open_learn_card_3', n_clicks=0,
className='align-self-center'
),
),
dbc.Modal([
dbc.ModalHeader(dbc.ModalTitle('How do I participate in KlimaDAO?')),
dbc.ModalBody(
dcc.Markdown(
'''
1. Klima DAO development:
Join the Discord to become a Klimate and hear about Protocol developments.
Those who wish to be involved in Protocol Governance should also join the Discord
to be onboarded by a member of the team.
2. Participation in the carbon economy:
BCTs are the underlying asset within the KlimaDAO treasury and their flow into the treasury
underpins protocol growth. BCTs can be created from real-world Verified Carbon Units (VCUs)
via the Toucan Protocol. Bonders provide BCT LP or BCT tokens in exchange for discounted
KLIMA tokens after a fixed vesting period. Once KLIMA tokens are held, stakers stake
their KLIMA tokens in return for more KLIMA tokens.
'''
),
),
dbc.ModalFooter(
dbc.Button(
'close',
id='close_learn_card_3',
className='ms-auto',
n_clicks=0,
)
)
],
id="body_learn_card_3",
scrollable=True,
is_open=False,
),
]
),
],
color="success", # https://bootswatch.com/default/ for more card colors
inverse=True, # change color of text (black or white)
outline=False, # True = remove the block colors from the background and header
className='emission_card_style',
)
learn_card_5 = dbc.Card(
[
dbc.CardBody(
[
dbc.Row(
dbc.Label(
"What is Staking?", className='emission_card_topic',
),
),
dbc.Row(
dbc.Button(
'Click to learn', id='open_learn_card_5', n_clicks=0,
className='align-self-center'
),
),
dbc.Modal([
dbc.ModalHeader(dbc.ModalTitle('What is Staking?')),
dbc.ModalBody(
dcc.Markdown(
'''
- Staking is the primary profit distribution mechanism of the protocol. It is designed
to be the primary mechanism of value accural for the majority of users.
For most, the best thing to do is to simply stake and compound the KLIMA acquired.
- Whenever the protocol has an excess of reserve per token, the protocol will mint
and distribute tokens to the stakers. The amount minted and distributed is controlled
by a variable called the reward rate.
The reward rate is the % percent supply that is rebased.
For a step by step guide on how to stake KLIMA, see the
[Community guide](https://klima-dao.notion.site/I-m-new-to-KLIMA-How-do-I-participate-bcf8881862e941a5b5550d1179e123f9)
'''
),
),
dbc.ModalFooter(
dbc.Button(
'close',
id='close_learn_card_5',
className='ms-auto',
n_clicks=0,
)
)
],
id="body_learn_card_5",
scrollable=True,
is_open=False,
),
]
),
],
color="success", # https://bootswatch.com/default/ for more card colors
inverse=True, # change color of text (black or white)
outline=False, # True = remove the block colors from the background and header
className='emission_card_style',
)
learn_card_6 = dbc.Card(
[
dbc.CardBody(
[
dbc.Row(
dbc.Label(
"What is Bonding?", className='emission_card_topic',
),
),
dbc.Row(
dbc.Button(
'Click to learn', id='open_learn_card_6', n_clicks=0,
className='align-self-center'
),
),
dbc.Modal([
dbc.ModalHeader(dbc.ModalTitle('What is Bonding?')),
dbc.ModalBody(
dcc.Markdown(
'''
What is Bonding?
Bonding is the process of trading assets to the protocol for KLIMA. The protocol will quote you an amount of KLIMA
for your asset, and the vesting period for the trade. Today, the protocol takes in:
1. Reserve Assets: BCT (Base Carbon Tonnes)
2. Liquidity Assets: KLIMA/BCT and BCT/USDC sushiswap LP pairs.
Bonding allows you to buy KLIMA at a lower cost basis. Because the protocol can sell at a discount to the market
price (as it can mint KLIMA at IV),you are able to more cheaply buy KLIMA
'''
),
),
dbc.ModalFooter(
dbc.Button(
'close',
id='close_learn_card_6',
className='ms-auto',
n_clicks=0,
)
)
],
id="body_learn_card_6",
scrollable=True,
is_open=False,
),
]
),
],
color="success", # https://bootswatch.com/default/ for more card colors
inverse=True, # change color of text (black or white)
outline=False, # True = remove the block colors from the background and header
className='emission_card_style',
)
learn_card_7 = dbc.Card(
[
dbc.CardBody(
[
dbc.Row(
dbc.Label(
"What is Rebasing?", className='emission_card_topic',
),
),
dbc.Row(
dbc.Button(
'Click to learn', id='open_learn_card_7', n_clicks=0,
className='align-self-center'
),
),
dbc.Modal([
dbc.ModalHeader(dbc.ModalTitle('What is Rebasing?')),
dbc.ModalBody(
dcc.Markdown(
'''
KLIMA is an algorithmic carbon-backed currency,
inspired by [Olympus DAO](https://www.olympusdao.finance/) and their token mechanics.
KlimaDAO incentivises new supply of Base Carbon Tonnes (BCT) on the blockchain through
bonding with the Protocol. Each KLIMA token is backed at a 1:1 ratio with a BCT in the
treasury.
KlimaDAO leverages the [Toucan Protocol's](https://docs.toucan.earth/protocol/)
Carbon Bridge to retire real world Verified Carbon Units (VCUs) and convert them to a
tokenized form on the blockchain, VCUs can be verified from reputable carbon markets in a
transparent and traceable manner. The credits are then absorbed through the protocols'
bonding mechanism, building a treasury of verified tokenized carbon reductions.
This increases the amount of carbon assets locked within the treasury, thereby
reducing supply on the open market and leading to price appreciation within the
Voluntary Carbon Markets.
In summary, Klima serves two main purposes:
1. It serves as a floating currency and a form of money backed at a 1:1 ratio by voluntary
carbon credits.
2. It is used to govern the protocol and confer voting power to influence decisions on
various policies including supply expansion mechanics.
'''
),
),
dbc.ModalFooter(
dbc.Button(
'close',
id='close_learn_card_7',
className='ms-auto',
n_clicks=0,
)
)
],
id="body_learn_card_7",
scrollable=True,
is_open=False,
),
]
),
],
color="success", # https://bootswatch.com/default/ for more card colors
inverse=True, # change color of text (black or white)
outline=False, # True = remove the block colors from the background and header
className='emission_card_style',
)
learn_card_8 = dbc.Card(
[
dbc.CardBody(
[
dbc.Row(
dbc.Label(
"Participant Goals?", className='emission_card_topic',
),
),
dbc.Row(
dbc.Button(
'Click to learn', id='open_learn_card_8', n_clicks=0,
className='align-self-center'
),
),
dbc.Modal([
dbc.ModalHeader(dbc.ModalTitle('Participant Goals?')),
dbc.ModalBody(
dcc.Markdown(
'''
Stakers care primarily about their KLIMA balance. While price is important in valuing their KLIMA and indicating
the market's perception of Klima DAO's utility and impact, it is not the main goal in the shorter-term.
**KLIMA is a long-term play, and maximizing holdings is the objective of stakers.**
A higher price of carbon will be achieved by increasing the quality of carbon removal projects, and creating a system
for producing carbon offsets at scale. A robust system will see a higher BCT price and a higher KLIMA price.
A smart staker cares about the long-term price exploration of BCT tokens and the quality of the TCO2s flowing into the
ecosystem.
Bonders care primarily about the On-chain Carbon Tonne supply and their KLIMA balance. Bonders have their KLIMA and
carbon assets locked in for a period of time, but can redeem KLIMA at a better rate than a staker by relinquishing
their BCTs to the treasury to lock it away indefinitely. Their carbon impact and KLIMA returns from bonding are
proportional to the amount bonded.
In the case where demand is greater than supply, purchasing BCTs and bonding them for new KLIMA will be cheaper
than purchasing KLIMA on the free market.
'''
),
),
dbc.ModalFooter(
dbc.Button(
'close',
id='close_learn_card_8',
className='ms-auto',
n_clicks=0,
)
)
],
id="body_learn_card_8",
scrollable=True,
is_open=False,
),
]
),
],
color="success", # https://bootswatch.com/default/ for more card colors
inverse=True, # change color of text (black or white)
outline=False, # True = remove the block colors from the background and header
className='emission_card_style',
)
learn_card_9 = dbc.Card(
[
dbc.CardBody(
[
dbc.Row(
dbc.Label(
"What are Carbon Markets?", className='emission_card_topic',
),
),
dbc.Row(
dbc.Button(
'Click to learn', id='open_learn_card_9', n_clicks=0,
className='align-self-center'
),
),
dbc.Modal([
dbc.ModalHeader(dbc.ModalTitle('What are Carbon Markets?')),
dbc.ModalBody(
dcc.Markdown(
'''
Carbon markets are a greenhouse gas trading system implemented to reduce CO2 and other greenhouse gas emissions by
putting a price on releasing carbon in the form of carbon offsets, sometimes called carbon credits.
Carbon markets are “Cap and Trade” markets. In this system the number of carbon offsets are capped for a particular
entity; a company, government, etc… This allows the entity to release a set amount of emissions.
If the entity wants to exceed their set emission level they need to trade carbon offsets with other
entities who are not using their carbon offsets or face a fine.
Extra credits can be created if participants voluntarily reduce their emissions by using cleaner energy sources or
other pollution controls. Over time the cap for emissions will be slowly lowered making carbon offsets more scarce
and more expensive, creating an economic incentive for entities to voluntarily reduce their emissions.
'''
),
),
dbc.ModalFooter(
dbc.Button(
'close',
id='close_learn_card_9',
className='ms-auto',
n_clicks=0,
)
)
],
id="body_learn_card_9",
scrollable=True,
is_open=False,
),
]
),
],
color="success", # https://bootswatch.com/default/ for more card colors
inverse=True, # change color of text (black or white)
outline=False, # True = remove the block colors from the background and header
className='emission_card_style',
)
learn_card_10 = dbc.Card(
[
dbc.CardBody(
[
dbc.Row(
dbc.Label(
"What is a Carbon Offset?", className='emission_card_topic',
),
),
dbc.Row(
dbc.Button(
'Click to learn', id='open_learn_card_10', n_clicks=0,
className='align-self-center'
),
),
dbc.Modal([
dbc.ModalHeader(dbc.ModalTitle('What is a Carbon Offset?')),
dbc.ModalBody(
dcc.Markdown(
'''
A carbon offset represents the removal of one tonne of carbon dioxide equivalent from the atmosphere or the avoidance
of one tonne of emissions. The term “carbon dioxide equivalent” is used because there are multiple greenhouse gasses,
all with a different Global Warming Potential (GWP), which illustrates impacts of different greenhouse gasses.
For instance methane has a GWP 28 times that of CO2. This means a company would need 28 carbon offsets to
emit 1 tonne of methane.
'''
),
),
dbc.ModalFooter(
dbc.Button(
'close',
id='close_learn_card_10',
className='ms-auto',
n_clicks=0,
)
)
],
id="body_learn_card_10",
scrollable=True,
is_open=False,
),
]
),
],
color="success", # https://bootswatch.com/default/ for more card colors
inverse=True, # change color of text (black or white)
outline=False, # True = remove the block colors from the background and header
className='emission_card_style',
)
learn_card_11 = dbc.Card(
[
dbc.CardBody(
[
dbc.Row(
dbc.Label(
"How are carbon offsets and renewable energy different?", className='emission_card_topic',
),
),
dbc.Row(
dbc.Button(
'Click to learn', id='open_learn_card_11', n_clicks=0,
className='align-self-center'
),
),
dbc.Modal([
dbc.ModalHeader(dbc.ModalTitle('How are carbon offsets and renewable energy different?')),
dbc.ModalBody(
dcc.Markdown(
'''
Renewable energy sources produce energy from natural sources, like wind or solar, with little to no carbon emissions.
Carbon offsets create a way to reduce the acceptable levels of current emissions over time, provide economic
incentive to reduce voluntarily and fund sources of renewable energy.
'''
),
),
dbc.ModalFooter(
dbc.Button(
'close',
id='close_learn_card_11',
className='ms-auto',
n_clicks=0,
)
)
],
id="body_learn_card_11",
scrollable=True,
is_open=False,
),
]
),
],
color="success", # https://bootswatch.com/default/ for more card colors
inverse=True, # change color of text (black or white)
outline=False, # True = remove the block colors from the background and header
className='emission_card_style',
)
@app.callback(
Output('body_learn_card_1', 'is_open'),
[
Input('open_learn_card_1', 'n_clicks'),
Input('close_learn_card_1', 'n_clicks'),
],
[State('body_learn_card_1', 'is_open')],
)
def toggle_modal1(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
@app.callback(
Output('body_learn_card_2', 'is_open'),
[
Input('open_learn_card_2', 'n_clicks'),
Input('close_learn_card_2', 'n_clicks'),
],
[State('body_learn_card_2', 'is_open')],
)
def toggle_modal2(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
@app.callback(
Output('body_learn_card_3', 'is_open'),
[
Input('open_learn_card_3', 'n_clicks'),
Input('close_learn_card_3', 'n_clicks'),
],
[State('body_learn_card_3', 'is_open')],
)
def toggle_modal3(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
@app.callback(
Output('body_learn_card_4', 'is_open'),
[
Input('open_learn_card_4', 'n_clicks'),
Input('close_learn_card_4', 'n_clicks'),
],
[State('body_learn_card_4', 'is_open')],
)
def toggle_modal4(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
@app.callback(
Output('body_learn_card_5', 'is_open'),
[
Input('open_learn_card_5', 'n_clicks'),
Input('close_learn_card_5', 'n_clicks'),
],
[State('body_learn_card_5', 'is_open')],
)
def toggle_modal5(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
@app.callback(
Output('body_learn_card_6', 'is_open'),
[
Input('open_learn_card_6', 'n_clicks'),
Input('close_learn_card_6', 'n_clicks'),
],
[State('body_learn_card_6', 'is_open')],
)
def toggle_modal6(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
@app.callback(
Output('body_learn_card_7', 'is_open'),
[
Input('open_learn_card_7', 'n_clicks'),
Input('close_learn_card_7', 'n_clicks'),
],
[State('body_learn_card_7', 'is_open')],
)
def toggle_modal7(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
@app.callback(
Output('body_learn_card_8', 'is_open'),
[
Input('open_learn_card_8', 'n_clicks'),
Input('close_learn_card_8', 'n_clicks'),
],
[State('body_learn_card_8', 'is_open')],
)
def toggle_modal8(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
@app.callback(
Output('body_learn_card_9', 'is_open'),
[
Input('open_learn_card_9', 'n_clicks'),
Input('close_learn_card_9', 'n_clicks'),
],
[State('body_learn_card_9', 'is_open')],
)
def toggle_modal9(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
@app.callback(
Output('body_learn_card_10', 'is_open'),
[
Input('open_learn_card_10', 'n_clicks'),
Input('close_learn_card_10', 'n_clicks'),
],
[State('body_learn_card_10', 'is_open')],
)
def toggle_modal10(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
@app.callback(
Output('body_learn_card_11', 'is_open'),
[
Input('open_learn_card_11', 'n_clicks'),
Input('close_learn_card_11', 'n_clicks'),
],
[State('body_learn_card_11', 'is_open')],
)
def toggle_modal11(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
layout = html.Div([
dbc.Row([
dbc.Col(dbc.Label('Foundations',
className="page_section_topic"))
]),
dbc.Row([
dbc.Col(learn_card_1, xs=12, sm=12, md=12, lg=3, xl=3, style={'padding': '10px', 'height': '100%'}),
dbc.Col(learn_card_2, xs=12, sm=12, md=12, lg=3, xl=3, style={'padding': '10px', 'height': '100%'}),
dbc.Col(learn_card_3, xs=12, sm=12, md=12, lg=3, xl=3, style={'padding': '10px', 'height': '100%'}),
dbc.Col(learn_card_4, xs=12, sm=12, md=12, lg=3, xl=3, style={'padding': '10px', 'height': '100%'})
]),
dbc.Row([
dbc.Col(dbc.Label('Protocol Mechanics',
className="page_section_topic"))
]),
dbc.Row([
dbc.Col(learn_card_5, xs=12, sm=12, md=12, lg=3, xl=3, style={'padding': '10px', 'height': '100%'}),
dbc.Col(learn_card_6, xs=12, sm=12, md=12, lg=3, xl=3, style={'padding': '10px', 'height': '100%'}),
dbc.Col(learn_card_7, xs=12, sm=12, md=12, lg=3, xl=3, style={'padding': '10px', 'height': '100%'}),
dbc.Col(learn_card_8, xs=12, sm=12, md=12, lg=3, xl=3, style={'padding': '10px', 'height': '100%'})
]),
dbc.Row([
dbc.Col(dbc.Label('Carbon Markets',
className="page_section_topic"))
]),
dbc.Row([
dbc.Col(learn_card_9, xs=12, sm=12, md=12, lg=3, xl=3, style={'padding': '10px', 'height': '100%'}),
dbc.Col(learn_card_10, xs=12, sm=12, md=12, lg=3, xl=3, style={'padding': '10px', 'height': '100%'}),
dbc.Col(learn_card_11, xs=12, sm=12, md=12, lg=3, xl=3, style={'padding': '10px', 'height': '100%'}),
dbc.Col(learn_card_8, xs=12, sm=12, md=12, lg=3, xl=3, style={'padding': '10px', 'height': '100%'})
]),
])
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.