content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
#!/usr/local/bin/python3
import sys
import numpy as np
from tensorflow import keras
from tensorflow.keras import layers
def getMNISTData():
print("Preparing data ...")
num_classes = 10
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
(x_train, y_train), (x_test, y_test) = (x_train[:1000], y_train[:1000]), (
x_test[:100],
y_test[:100],
)
# Scale images to the [0, 1] range
x_train = x_train.astype("float32") / 255
x_test = x_test.astype("float32") / 255
# Make sure images have shape (28, 28, 1)
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
print("x_train shape:", x_train.shape)
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples\n")
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
return (x_train[:1000], y_train[:1000]), (x_test[:100], y_test[:100])
def getModel():
model = keras.Sequential(
[
keras.Input(shape=(28, 28, 1)),
layers.Conv2D(16, kernel_size=(3, 3), padding="same", activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D(64, kernel_size=(3, 3), padding="same", activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D(128, kernel_size=(3, 3), padding="same", activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dense(32, activation="relu"),
layers.Dense(10, activation="softmax"),
]
)
print("")
model.summary()
print("")
return model
def run(model, train_data, test_data, batch_size, n_epochs):
print(f"Start training ...\n")
x_train, y_train = train_data
x_test, y_test = test_data
model.compile(
loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]
)
model.fit(
x_train, y_train, batch_size=batch_size, epochs=n_epochs, validation_split=0.1
)
print(f"\nStart testing ...")
score = model.evaluate(x_test, y_test, verbose=0)
print("Test loss:", score[0])
print("Test accuracy:", score[1])
if __name__ == "__main__":
batch_size = 128
n_epochs = 10
train_data, test_data = getMNISTData()
model = getModel()
run(model, train_data, test_data, batch_size, n_epochs)
|
python
|
# Collection of supporting functions for wrapper functions
__author__ = 'AndrewAnnex'
from ctypes import c_char_p, c_bool, c_int, c_double, c_char, c_void_p, sizeof, \
POINTER, pointer, Array, create_string_buffer, create_unicode_buffer, cast, Structure, \
CFUNCTYPE, string_at
import numpy
from numpy import ctypeslib as numpc
import six
errorformat = """
================================================================================
Toolkit version: {tkvsn}
{short} --
{explain}
{long}
{traceback}
================================================================================\
"""
class SpiceyError(Exception):
"""
SpiceyError wraps CSPICE errors.
:type value: str
"""
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
def toDoubleVector(x):
return DoubleArray.from_param(param=x)
def toDoubleMatrix(x):
return DoubleMatrix.from_param(param=x)
def toIntVector(x):
return IntArray.from_param(param=x)
def toBoolVector(x):
return BoolArray.from_param(param=x)
def toPythonString(inString):
if six.PY2:
if isinstance(inString, c_char_p):
return toPythonString(inString.value)
return string_at(inString)
elif six.PY3:
if isinstance(inString, c_char_p):
return toPythonString(inString.value)
return bytes.decode(string_at(inString))
def listtocharvector(x):
assert (isinstance(x, list))
return (c_char_p * len(x))(*[stringToCharP(y) for y in x])
def charvector(ndim=1, lenvals=10):
return ((c_char * lenvals) * ndim)()
def listtodoublematrix(data, x=3, y=3):
matrix = ((c_double * x) * y)()
for i, row in enumerate(data):
matrix[i] = tuple(row)
return matrix
def emptyCharArray(xLen=None, yLen=None):
if not yLen:
yLen = 1
if not xLen:
xLen = 1
if isinstance(xLen, c_int):
xLen = xLen.value
if isinstance(yLen, c_int):
yLen = yLen.value
return ((c_char * xLen) * yLen)()
def emptyDoubleMatrix(x=3, y=3):
return ((c_double * x) * y)()
def emptyDoubleVector(n):
if isinstance(n, c_int):
n = n.value
assert(isinstance(n, int))
return (c_double * n)()
def emptyIntVector(n):
if isinstance(n, c_int):
n = n.value
assert (isinstance(n, int))
return (c_int * n)()
def vectorToList(x):
if isinstance(x[0], bool):
return numpy.fromiter(x, numpy.bool, count=len(x))
elif isinstance(x[0], int):
return numpy.fromiter(x, numpy.int_, count=len(x))
elif isinstance(x[0], float):
return numpy.fromiter(x, numpy.float64, count=len(x))
elif isinstance(x[0].value, bytes):
return [toPythonString(y) for y in x]
def matrixToList(x):
return numpc.as_array(x)
def stringToCharP(inobject, inlen=None):
"""
:param inobject: input string, int for getting null string of length of int
:param inlen: optional parameter, length of a given string can be specified
:return:
"""
if inlen and isinstance(inobject, str):
return create_string_buffer(inobject.encode(encoding='UTF-8'), inlen)
if isinstance(inobject, bytes):
return inobject
if isinstance(inobject, c_int):
return stringToCharP(" " * inobject.value)
if isinstance(inobject, int):
return stringToCharP(" " * inobject)
return c_char_p(inobject.encode(encoding='UTF-8'))
def listToCharArray(inList, xLen=None, yLen=None):
assert (isinstance(inList, list))
if not yLen:
yLen = len(inList)
if not xLen:
xLen = max(len(s) for s in inList) + 1
if isinstance(xLen, c_int):
xLen = xLen.value
if isinstance(yLen, c_int):
yLen = yLen.value
return ((c_char * xLen) * yLen)(*[stringToCharP(l, inlen=xLen) for l in inList])
def listToCharArrayPtr(inList, xLen=None, yLen=None):
assert (isinstance(inList, list))
if not yLen:
yLen = len(inList)
if not xLen:
xLen = max(len(s) for s in inList) + 1
if isinstance(xLen, c_int):
xLen = xLen.value
if isinstance(yLen, c_int):
yLen = yLen.value
return cast(((c_char * xLen) * yLen)(*[stringToCharP(l, inlen=xLen) for l in inList]), c_char_p)
class DoubleArrayType:
# Class type that will handle all double vectors, inspiration from python cookbook 3rd edition
def from_param(self, param):
typename = type(param).__name__
if hasattr(self, 'from_' + typename):
return getattr(self, 'from_' + typename)(param)
elif isinstance(param, Array):
return param
else:
raise TypeError("Can't convert %s" % typename)
# Cast from lists/tuples
def from_list(self, param):
val = ((c_double) * len(param))(*param)
return val
# Cast from Tuple
def from_tuple(self, param):
val = ((c_double) * len(param))(*param)
return val
# Cast from a numpy array,
def from_ndarray(self, param):
# return param.data_as(POINTER(c_double))
# the above older method does not work with functions which take vectors of known size
return numpy.ctypeslib.as_ctypes(param)
# Cast from array.array objects
def from_array(self, param):
if param.typecode != 'd':
raise TypeError('must be an array of doubles')
ptr, _ = param.buffer_info()
return cast(ptr, POINTER(c_double))
class DoubleMatrixType:
# Class type that will handle all double matricies, inspiration from python cookbook 3rd edition
def from_param(self, param):
typename = type(param).__name__
if hasattr(self, 'from_' + typename):
return getattr(self, 'from_' + typename)(param)
elif isinstance(param, Array):
return param
else:
raise TypeError("Can't convert %s" % typename)
# Cast from lists/tuples
def from_list(self, param):
val = ((c_double * len(param[0])) * len(param))(*[DoubleArray.from_param(x) for x in param])
return val
# Cast from Tuple
def from_tuple(self, param):
val = ((c_double * len(param[0])) * len(param))(*[DoubleArray.from_param(x) for x in param])
return val
# Cast from a numpy array
def from_ndarray(self, param):
#return param.data_as(POINTER(c_double))
return numpy.ctypeslib.as_ctypes(param)
# Cast from a numpy matrix
def from_matrix(self, param):
#return param.data_as(POINTER(c_double))
return numpy.ctypeslib.as_ctypes(param)
class IntArrayType:
# Class type that will handle all int vectors, inspiration from python cookbook 3rd edition
def from_param(self, param):
typename = type(param).__name__
if hasattr(self, 'from_' + typename):
return getattr(self, 'from_' + typename)(param)
elif isinstance(param, Array):
return param
else:
raise TypeError("Can't convert %s" % typename)
# Cast from lists/tuples
def from_list(self, param):
val = ((c_int) * len(param))(*param)
return val
# Cast from Tuple
def from_tuple(self, param):
val = ((c_int) * len(param))(*param)
return val
# Cast from a numpy array
def from_ndarray(self, param):
#return param.data_as(POINTER(c_int)) # not sure if long is same as int, it should be..
#return numpy.ctypeslib.as_ctypes(param)
return self.from_param(param.tolist())
# Cast from array.array objects
def from_array(self, param):
if param.typecode != 'i':
raise TypeError('must be an array of ints')
ptr, _ = param.buffer_info()
return cast(ptr, POINTER(c_int))
class BoolArrayType:
# Class type that will handle all int vectors, inspiration from python cookbook 3rd edition
def from_param(self, param):
typename = type(param).__name__
if hasattr(self, 'from_' + typename):
return getattr(self, 'from_' + typename)(param)
elif isinstance(param, Array):
return param
else:
raise TypeError("Can't convert %s" % typename)
# Cast from lists/tuples
def from_list(self, param):
val = ((c_bool) * len(param))(*param)
return val
# Cast from Tuple
def from_tuple(self, param):
val = ((c_bool) * len(param))(*param)
return val
# Cast from a numpy array
def from_ndarray(self, param):
#return param.data_as(POINTER(c_int)) # not sure if long is same as int, it should be..
#return numpy.ctypeslib.as_ctypes(param)
return self.from_param(param.tolist())
DoubleArray = DoubleArrayType()
IntArray = IntArrayType()
BoolArray = BoolArrayType()
DoubleMatrix = DoubleMatrixType()
class Plane(Structure):
_fields_ = [
('_normal', c_double * 3),
('_constant', c_double)
]
@property
def normal(self):
return vectorToList(self._normal)
@property
def constant(self):
return self._constant
def __str__(self):
return '<Plane: normal=%s; constant=%s>' % (', '.join([str(x) for x in self._normal]), self._constant)
class Ellipse(Structure):
_fields_ = [
('_center', c_double * 3),
('_semi_major', c_double * 3),
('_semi_minor', c_double * 3)
]
@property
def center(self):
return vectorToList(self._center)
@property
def semi_major(self):
return vectorToList(self._semi_major)
@property
def semi_minor(self):
return vectorToList(self._semi_minor)
def __str__(self):
return '<SpiceEllipse: center = %s, semi_major = %s, semi_minor = %s>' % \
(self.center, self.semi_major, self.semi_minor)
class DataType(object):
SPICE_CHR = 0
SPICE_DP = 1
SPICE_INT = 2
SPICE_TIME = 3
SPICE_BOOL = 4
CHR = 0
DP = 1
INT = 2
TIME = 3
BOOL = 4
def __init__(self):
pass
class SpiceEKDataType(c_int):
_fields_ = [
('SPICE_CHR', c_int(0)),
('SPICE_DP', c_int(1)),
('SPICE_INT', c_int(2)),
('SPICE_TIME', c_int(3)),
('SPICE_BOOL', c_int(4)),
]
class SpiceEKExprClass(c_int):
_fields_ = [
('SPICE_EK_EXP_COL', c_int(0)),
('SPICE_EK_EXP_FUNC', c_int(1)),
('SPICE_EK_EXP_EXPR', c_int(2))
]
class SpiceEKAttDsc(Structure):
_fields_ = [
('_cclass', c_int),
('_dtype', SpiceEKDataType),
('_strlen', c_int),
('_size', c_int),
('_indexd', c_bool),
('_nullok', c_bool)
]
@property
def cclass(self):
return self._cclass
@property
def dtype(self):
return self._dtype.value
@property
def strlen(self):
return self._strlen
@property
def size(self):
return self._size
@property
def indexd(self):
return self._indexd
@property
def nullok(self):
return self._nullok
def __str__(self):
return '<SpiceEKAttDsc cclass = %s, dtype = %s, strlen = %s, size = %s, indexd = %s, nullok = %s >' % \
(self.cclass, self.dtype, self.strlen, self.size, self.indexd, self.nullok)
class SpiceEKSegSum(Structure):
_fields_ = [
('_tabnam', c_char * 65),
('_nrows', c_int),
('_ncols', c_int),
('_cnames', (c_char * 100) * 33),
('_cdescrs', SpiceEKAttDsc * 100)
]
@property
def tabnam(self):
return toPythonString(self._tabnam)
@property
def nrows(self):
return self._nrows
@property
def ncols(self):
return self._ncols
@property
def cnames(self):
return vectorToList(self._cnames)[0:self.ncols]
@property
def cdescrs(self):
return self._cdescrs[0:self.ncols]
def __str__(self):
return '<SpiceEKSegSum tabnam = %s, nrows = %s, ncols = %s, cnames = %s, cdescrs = %s >' % (self.tabnam, self.nrows, self.ncols, self.cnames, self.cdescrs)
#SpiceCell implementation below is inpart from github.com/DaRasch/spiceminer/
# and modified as needed for this author, maybe we should work together?
### helper classes/functions ###
BITSIZE = {'char': sizeof(c_char), 'int': sizeof(c_int), 'double': sizeof(c_double)}
def _char_getter(data_p, index, length):
return toPythonString((c_char * length).from_address(data_p + index * length * BITSIZE['char']))
def _double_getter(data_p, index, length):
return c_double.from_address(data_p + index * BITSIZE['double']).value
def _int_getter(data_p, index, length):
return c_int.from_address(data_p + index * BITSIZE['int']).value
def SPICEDOUBLE_CELL(size):
return SpiceCell.double(size)
def SPICEINT_CELL(size):
return SpiceCell.integer(size)
def SPICECHAR_CELL(size, length):
return SpiceCell.character(size, length)
class SpiceCell(Structure):
#Most written by DaRasch
DATATYPES_ENUM = {'char': 0, 'double': 1, 'int': 2, 'time': 3, 'bool': 4}
DATATYPES_GET = [_char_getter, _double_getter] + [_int_getter] * 3
baseSize = 6
minCharLen = 6
CTRLBLOCK = 6
_fields_ = [
('dtype', c_int),
('length', c_int),
('size', c_int),
('card', c_int),
('isSet', c_int),
('adjust', c_int),
('init', c_int),
('base', c_void_p),
('data', c_void_p)
]
def __init__(self, dtype=None, length=None, size=None, card=None, isSet=None, base=None, data=None):
super(SpiceCell, self).__init__()
self.dtype = dtype
self.length = length
self.size = size
self.card = card
self.isSet = isSet
self.adjust = 0 # Always False, because not implemented
self.init = 0 # Always False, because this is the constructor
self.base = base # void pointer
self.data = data
def __str__(self):
return '<SpiceCell dtype = %s, length = %s, size = %s, card = %s, isSet = %s, adjust = %s, init = %s, base = %s, data = %s>' % (self.dtype, self.length, self.size, self.card, self.isSet, self.adjust, self.init, self.base, self.data)
def is_int(self):
return self.dtype == 2
def is_double(self):
return self.dtype == 1
def is_char(self):
return self.dtype == 0
def is_time(self):
return self.dtype == 3
def is_bool(self):
return self.dtype == 4
def is_set(self):
return self.isSet == 1
@classmethod
def character(cls, size, length):
base = (c_char * ((cls.CTRLBLOCK + size) * length))()
data = (c_char * (size * length)).from_buffer(
base, cls.CTRLBLOCK * BITSIZE['char'] * length)
instance = cls(cls.DATATYPES_ENUM['char'], length, size, 0, 1,
cast(base, c_void_p),
cast(data, c_void_p))
return instance
@classmethod
def integer(cls, size):
base = (c_int * (cls.CTRLBLOCK + size))()
data = (c_int * size).from_buffer(
base, cls.CTRLBLOCK * BITSIZE['int'])
instance = cls(cls.DATATYPES_ENUM['int'], 0, size, 0, 1,
cast(base, c_void_p),
cast(data, c_void_p))
return instance
@classmethod
def double(cls, size):
base = (c_double * (cls.CTRLBLOCK + size))()
data = (c_double * size).from_buffer(
base, cls.CTRLBLOCK * BITSIZE['double'])
instance = cls(cls.DATATYPES_ENUM['double'], 0, size, 0, 1,
cast(base, c_void_p),
cast(data, c_void_p))
return instance
def __len__(self):
return self.card
def __iter__(self):
getter = SpiceCell.DATATYPES_GET[self.dtype]
length, card, data = self.length, self.card, self.data
for i in range(card):
yield (getter(data, i, length))
def __contains__(self, key):
return key in self.__iter__()
def __getitem__(self, key):
getter = SpiceCell.DATATYPES_GET[self.dtype]
length, card, data = self.length, self.card, self.data
if isinstance(key, slice):
start, stop, step = key.start or 0, key.stop or -1, key.step or 1
#TODO Typechecking
if card == 0:
return []
else:
return list(getter(data, i, length)
for i in range(start % card, stop % card + 1, step))
if key in range(-card, card):
return getter(data, key, length)
elif not isinstance(key, int):
msg = 'SpiceCell inices must be integers, not {}'.format(type(key))
raise TypeError(msg)
else:
raise IndexError('SpiceCell index out of range')
def reset(self):
self.card = 0
self.init = 0
|
python
|
#!/bin/python
if __name__ == "__main__":
from picomc import main
main()
|
python
|
import datetime
import logging
import json
import os
import socket
from config import Configuration
from io import StringIO
from loggly.handlers import HTTPSHandler as LogglyHandler
class JSONFormatter(logging.Formatter):
hostname = socket.gethostname()
fqdn = socket.getfqdn()
if len(fqdn) > len(hostname):
hostname = fqdn
def format(self, record):
message = record.msg
if record.args:
try:
message = record.msg % record.args
except TypeError as e:
raise e
data = dict(
host=self.hostname,
app="simplified",
name=record.name,
level=record.levelname,
filename=record.filename,
message=message,
timestamp=datetime.datetime.utcnow().isoformat()
)
if record.exc_info:
data['traceback'] = self.formatException(record.exc_info)
return json.dumps(data)
class StringFormatter(logging.Formatter):
"""Encode all output as a string.
In Python 2, this means a UTF-8 bytestring. In Python 3, it means a
Unicode string.
"""
def format(self, record):
data = super(StringFormatter, self).format(record)
return str(data)
class LogConfiguration(object):
"""Configures the active Python logging handlers based on logging
configuration from the database.
"""
DEFAULT_MESSAGE_TEMPLATE = "%(asctime)s:%(name)s:%(levelname)s:%(filename)s:%(message)s"
DEFAULT_LOGGLY_URL = "https://logs-01.loggly.com/inputs/%(token)s/tag/python/"
DEBUG = "DEBUG"
INFO = "INFO"
WARN = "WARN"
ERROR = "ERROR"
JSON_LOG_FORMAT = 'json'
TEXT_LOG_FORMAT = 'text'
# Settings for the integration with protocol=INTERNAL_LOGGING
LOG_LEVEL = 'log_level'
LOG_FORMAT = 'log_format'
DATABASE_LOG_LEVEL = 'database_log_level'
LOG_MESSAGE_TEMPLATE = 'message_template'
@classmethod
def initialize(cls, _db, testing=False):
"""Make the logging handlers reflect the current logging rules
as configured in the database.
:param _db: A database connection. If this is None, the default logging
configuration will be used.
:param testing: True if unit tests are currently running; otherwise
False.
"""
log_level, database_log_level, new_handlers = (
cls.from_configuration(_db, testing)
)
# Replace the set of handlers associated with the root logger.
logger = logging.getLogger()
logger.setLevel(log_level)
old_handlers = list(logger.handlers)
for handler in new_handlers:
logger.addHandler(handler)
for handler in old_handlers:
logger.removeHandler(handler)
# Set the loggers for various verbose libraries to the database
# log level, which is probably higher than the normal log level.
for logger in (
'sqlalchemy.engine', 'elasticsearch',
'requests.packages.urllib3.connectionpool',
):
logging.getLogger(logger).setLevel(database_log_level)
# These loggers can cause infinite loops if they're set to
# DEBUG, because their log is triggered during the process of
# logging something to Loggly. These loggers will never have their
# log level set lower than WARN.
if database_log_level == cls.ERROR:
loop_prevention_log_level = cls.ERROR
else:
loop_prevention_log_level = cls.WARN
for logger in ['urllib3.connectionpool']:
logging.getLogger(logger).setLevel(loop_prevention_log_level)
return log_level
@classmethod
def from_configuration(cls, _db, testing=False):
"""Return the logging policy as configured in the database.
:param _db: A database connection. If None, the default
logging policy will be used.
:param testing: A boolean indicating whether a unit test is
happening right now. If True, the database configuration will
be ignored in favor of a known test-friendly policy. (It's
okay to pass in False during a test *of this method*.)
:return: A 3-tuple (internal_log_level, database_log_level,
handlers). `internal_log_level` is the log level to be used
for most log messages. `database_log_level` is the log level
to be applied to the loggers for the database connector and
other verbose third-party libraries. `handlers` is a list of
Handler objects that will be associated with the top-level
logger.
"""
# Establish defaults, in case the database is not initialized or
# it is initialized but logging is not configured.
(internal_log_level, internal_log_format, database_log_level,
message_template) = cls._defaults(testing)
handlers = []
from model import ExternalIntegration
if _db and not testing:
goal = ExternalIntegration.LOGGING_GOAL
internal = ExternalIntegration.lookup(
_db, ExternalIntegration.INTERNAL_LOGGING, goal
)
loggly = ExternalIntegration.lookup(
_db, ExternalIntegration.LOGGLY, goal
)
if internal:
internal_log_level = internal.setting(cls.LOG_LEVEL).setdefault(
internal_log_level
)
internal_log_format = internal.setting(cls.LOG_FORMAT).setdefault(
internal_log_format
)
database_log_level = internal.setting(cls.DATABASE_LOG_LEVEL).setdefault(
database_log_level
)
message_template = internal.setting(cls.LOG_MESSAGE_TEMPLATE).setdefault(
message_template
)
if loggly:
handlers.append(cls.loggly_handler(loggly))
# handlers is either empty or it contains a loggly handler.
# Let's also add a handler that logs to standard error.
handlers.append(logging.StreamHandler())
for handler in handlers:
cls.set_formatter(
handler, internal_log_format, message_template
)
return internal_log_level, database_log_level, handlers
@classmethod
def _defaults(cls, testing=False):
"""Return default log configuration values."""
if testing:
internal_log_level = 'DEBUG'
internal_log_format = cls.TEXT_LOG_FORMAT
else:
internal_log_level = 'INFO'
internal_log_format = cls.JSON_LOG_FORMAT
database_log_level = 'WARN'
message_template = cls.DEFAULT_MESSAGE_TEMPLATE
return (internal_log_level, internal_log_format, database_log_level,
message_template)
@classmethod
def set_formatter(cls, handler, log_format, message_template):
"""Tell the given `handler` to format its log messages in a
certain way.
"""
if (log_format==cls.JSON_LOG_FORMAT
or isinstance(handler, LogglyHandler)):
formatter = JSONFormatter()
else:
formatter = StringFormatter(message_template)
handler.setFormatter(formatter)
@classmethod
def loggly_handler(cls, externalintegration):
"""Turn a Loggly ExternalIntegration into a log handler.
"""
token = externalintegration.password
url = externalintegration.url or cls.DEFAULT_LOGGLY_URL
if not url:
raise CannotLoadConfiguration(
"Loggly integration configured but no URL provided."
)
try:
url = cls._interpolate_loggly_url(url, token)
except (TypeError, KeyError) as e:
raise CannotLoadConfiguraiton(
"Cannot interpolate token %s into loggly URL %s" % (
token, url,
)
)
return LogglyHandler(url)
@classmethod
def _interpolate_loggly_url(cls, url, token):
if '%s' in url:
return url % token
if '%(' in url:
return url % dict(token=token)
# Assume the token is already in the URL.
return url
|
python
|
from genericpath import exists
import os, sys
from os.path import join
import tempfile
import shutil
import json
try:
curr_path = os.path.dirname(os.path.abspath(__file__))
teedoc_project_path = os.path.abspath(os.path.join(curr_path, "..", "..", ".."))
if os.path.basename(teedoc_project_path) == "teedoc":
sys.path.insert(0, teedoc_project_path)
except Exception:
pass
from teedoc import Plugin_Base
from teedoc import Fake_Logger
__version__ = "1.1.2"
class Plugin(Plugin_Base):
name = "teedoc-plugin-assets"
desc = "add assets(css js) support for teedoc"
defautl_config = {
"header_items": [],
"footer_items": [],
"env":{}
}
def on_init(self, config, doc_src_path, site_config, logger = None, multiprocess = True, **kw_args):
'''
@config a dict object
@logger teedoc.logger.Logger object
'''
self.logger = Fake_Logger() if not logger else logger
self.doc_src_path = doc_src_path
self.site_config = site_config
self.config = Plugin.defautl_config
# check config
for key in self.config["env"]:
if not key in config["env"]:
self.logger.e('you MUST set env var "{}" for gitalk plugin in site_config'.format(key))
self.config.update(config)
self.logger.i("-- plugin <{}> init".format(self.name))
self.logger.i("-- plugin <{}> config: {}".format(self.name, self.config))
self.files_to_copy = {}
self.html_header_items = []
self.html_footer_items = []
for item in self.config["header_items"]:
if item.startswith("/"):
path = os.path.join(self.doc_src_path, item[1:])
if os.path.exists(path):
if path.endswith(".js"):
self.html_header_items.append(f'<script src="{item}"></script>')
self.files_to_copy[item] = path
elif path.endswith(".css"):
self.html_header_items.append(f'<link rel="stylesheet" href="{item}" type="text/css"/>')
self.files_to_copy[item] = path
else:
self.logger.e(f"config: url {item} not support! you can use html tag instead")
else:
self.logger.e(f"config: url {item} wrong, file {path} no found ")
else:
self.html_header_items.append(item)
for item in self.config["footer_items"]:
if item.startswith("/"):
path = os.path.join(self.doc_src_path, item[1:])
if os.path.exists(path):
if path.endswith(".js"):
self.html_footer_items.append(f'<script src="{item}"></script>')
self.files_to_copy[item] = path
elif path.endswith(".css"):
self.html_footer_items.append(f'<link rel="stylesheet" href="{item}" type="text/css"/>')
self.files_to_copy[item] = path
else:
self.logger.e(f"config: url {item} not support! you can use html tag instead")
else:
self.logger.e(f"config: url {item} wrong, file {path} no found ")
elif item.startswith("http"):
if item.endswith(".js"):
self.html_footer_items.append(f'<script src="{item}"></script>')
elif item.endswith(".css"):
self.html_footer_items.append(f'<link rel="stylesheet" href="{item}" type="text/css"/>')
else:
self.logger.e(f"config: url {item} not support! you can use html tag instead")
else:
self.html_footer_items.append(item)
self.temp_dir = os.path.join(tempfile.gettempdir(), "teedoc_plugin_assets")
if os.path.exists(self.temp_dir):
shutil.rmtree(self.temp_dir)
os.makedirs(self.temp_dir)
self.files_to_copy = self._update_file_var(self.files_to_copy, self.config["env"], self.temp_dir)
def on_add_html_header_items(self, type_name):
return self.html_header_items
def on_add_html_footer_js_items(self, type_name):
return self.html_footer_items
def on_copy_files(self):
res = self.files_to_copy
self.files_to_copy = {}
return res
def on_del(self):
if os.path.exists(self.temp_dir):
try:
shutil.rmtree(self.temp_dir)
except Exception:
pass
def _update_file_var(self, files, vars, temp_dir):
for url, path in files.items():
with open(path, encoding='utf-8') as f:
content = f.read()
for k, v in vars.items():
content = content.replace("${}{}{}".format("{", k.strip(), "}"), str(v))
temp_path = os.path.join(temp_dir, os.path.basename(path))
with open(temp_path, "w", encoding='utf-8') as fw:
fw.write(content)
files[url] = temp_path
return files
if __name__ == "__main__":
config = {
}
plug = Plugin(config=config)
|
python
|
from .base_settings import *
import os
ALLOWED_HOSTS = ['*']
INSTALLED_APPS += [
'django_prometheus',
'django.contrib.humanize',
'django_user_agents',
'supporttools',
'rc_django',
]
TEMPLATES[0]['OPTIONS']['context_processors'].extend([
'supporttools.context_processors.supportools_globals'
])
if os.getenv('ENV') == 'localdev':
DEBUG = True
RESTCLIENTS_DAO_CACHE_CLASS = None
|
python
|
'''
This script loads an already trained CNN and prepares the Qm.f notation for each layer. Weights and activation are considered.
This distribution is used by net_descriptor to build a new prototxt file to finetune the quantized weights and activations
List of functions, for further details see below
- forward_pass
- get_qmf
- activation
- weights
Author: Moritz Milde
Date: 03.11.2016
E-Mail: [email protected]
'''
import numpy as np
import caffe
class distribute_bits():
def __init__(self):
self.caffe_root = '/home/moritz/Repositories/caffe_lp/'
self.model_dir = 'examples/low_precision/imagenet/models/'
self.weight_dir = '/media/moritz/Data/ILSVRC2015/pre_trained/'
self.n_bits = 16
def forward_pass(self):
'''
This function performs the forward pass to extract activations from network.
net is an instance of self, to prevent multiple forward passes which usually ends in a kernel crash
Input:
- self
.net_protoxt: holds the path to prototxt file (type: string)
.net_weights: holds the path to caffemodel file (type:string)
Output:
- self.net: caffe instance of network which was forward passed
net is used later to extract activation and propose Qm.f notation
'''
self.net = caffe.Net(self.net_prototxt, self.net_weights, caffe.TEST)
self.net.forward()
def get_qmf(self, x, key=None, debug=False):
'''
This function estimates the minimum number of integer bits (m) to represent the largest number
in either activation or weights.
Input:
- x: current blob flattened, e.g. blob.data.flatten() for activation or
net.params[key][1].data.flatten() for weights (type: caffe blob)
- key: Identification key of the current layer. Only used for debugging (type: string)
- debug: Flag to turn printing of helpful information on and off (type: bool)
Output:
- m: Number of bits needed to represent integer part of maximum weight/activation value (type: int)
- f: Number of bits available to represent fractional part after m was estimated (type: int)
'''
m = 0
while np.max(x) > 2 ** m:
if m > self.n_bits - 1:
break
m += 1
f = self.n_bits - m
if debug:
print 'Layer ' + str(key) + ': ' 'Max: ' + str(np.max(x))
print 'Layer ' + str(key) + ': ' 'Min: ' + str(np.min(x[np.nonzero(x)]))
return m, f
def activation(self, net_name, n_bits=None, load_mode='high_precision', threshold=0.1,
caffe_root=None, model_dir=None,
weight_dir=None, debug=False):
'''
This function distributes a given amount of bits optimally between integer and fractional part of fixed point number
based on I) the minimum number of bits required to represent the biggest number in activation, e.g. integer part and
on II) the percentage of values we would loose with a given m.
Input:
- net_name: A string which refer to the network, e.g. VGG16 or GoogleNet (type: string)
- n_bits: Number of available bits, e.g. 16. Default is 16 (type: int)
- load_mode: A flag to select the right layers. The keys differ between high and low precision
can either be 'high_precision' or 'low_precision'. Default is 'high_precision'.
low_precision should only be used if weights/activations of a network trained in low_precision
should be qunatized further a smaller number of bits (type: string)
- threshold: Threshold regulating how many parameters we allow to be dropped (0.1 == 10 %)
with a given number if integer bits, before we fix the Qm.f
- caffe_root: Path to your caffe_lp folder (type: string)
- model_dir: Relative path from caffe_root to model directory (where .prototxt files are located). This is usually
examples/low_precision/imagenet/models/
Please change accordingly! (type: string)
- weight_dir Path where you want save the .caffemodel files, e.g. on your HDD (type: string)
- debug: Flag to turn printing of helpful information on and off (type: bool)
'''
if model_dir is not None:
self.model_dir = model_dir
if weight_dir is not None:
self.weight_dir = weight_dir
if caffe_root is not None:
self.caffe_root = caffe_root
if n_bits is not None:
self.n_bits = n_bits
self.net_prototxt = self.caffe_root + self.model_dir + net_name + '_deploy.prototxt'
# try:
# self.net_weights = self.weight_dir + net_name + '.caffemodel.h5'
# except RuntimeError:
self.net_weights = self.weight_dir + net_name + '/' + net_name + '_original.caffemodel'
if debug:
print 'Checking if network was already simulated... '
# if 'self.net' not in locals() or 'self.net' not in globals():
if not hasattr(self, 'net'):
if debug:
print 'No. Doing forward pass'
distribute_bits.forward_pass(self)
if debug:
print 'Forward pass done'
else:
if debug:
print 'Yes'
i = 0
if load_mode == 'high_precision':
select_key1 = 'conv'
select_key2 = 'fc'
# We have to substract 2 since we have to ignore split layers
bit_distribution = np.zeros((2, len(filter(lambda x: select_key1 in x, self.net.blobs.keys())) +
len(filter(lambda x: select_key2 in x, self.net.blobs.keys()))))
if debug:
print 'Bit distribution activation: {}'.format(np.shape(bit_distribution))
else:
select_key = 'act'
bit_distribution = np.zeros((2, len(filter(lambda x: select_key in x, self.net.blobs.keys()))))
if debug:
print 'Starting extracting activation distribution layer-wise'
print '-------------------'
for key, blob in self.net.blobs.items():
if load_mode == 'high_precision':
if select_key2 in key:
select_key = select_key2
else:
select_key = select_key1
if 'split' in key:
continue
if select_key in key: # VERIFY FOR HIGH PRECISION VGG16!!
# do all l's in layers have an activation?
# only act and pooling
# check indices low prec. should be index 1
# Calculate number of bits (Qm.f)
m, f = distribute_bits.get_qmf(self, blob.data.flatten(), key, debug)
assert (m + f) <= self.n_bits, 'Too many bits assigned!'
if debug:
print key
print 'Before optimaization:\nNumber of integer bits: {} \nNumber of fractional bits: {}'.format(m, f)
# If we already cover the entire dynamic range
# distribute the remaining bits randomly between m & f
while (m + f < self.n_bits):
coin_flip = np.random.rand()
if coin_flip > 0.5:
m += 1
else:
f += 1
cut = 0
while cut < threshold:
cut = np.sum(blob.data.flatten() > 2**m - 1) / float(len(blob.data.flatten()))
if m < 2:
break
m -= 1
if debug:
print 'While optimization:\nNumber of integer bits: {} \nPercentage of ignored parameters: {} %'.format(m, cut)
# Account for sign bit!!!
m += 1
assert m > 0, 'No sign bit reserved!'
f = self.n_bits - m
if debug:
print 'After optimaization:\nNumber of integer bits: {} \nNumber of fractional bits: {}'.format(m, f)
bit_distribution[0, i] = m
bit_distribution[1, i] = f
i += 1
if debug:
print 'Done: ' + str(key)
print '-------------------'
return bit_distribution, self.net
def weights(self, net_name, n_bits=None, load_mode='high_precision', threshold=0.1,
caffe_root=None, model_dir=None,
weight_dir=None, debug=False):
'''
This function distributes a given amount of bits optimally between integer and fractional part of fixed point number
based on I) the minimum number of bits required to represent the biggest number in the weights, e.g. integer part and
on II) the percentage of values we would loose with a given m.
Input:
- net_name: A string which refer to the network, e.g. VGG16 or GoogleNet (type: string)
- n_bits: Number of available bits, e.g. 16. Default is 16 (type: int)
- load_mode: A flag to select the right layers. The keys differ between high and low precision
can either be 'high_precision' or 'low_precision'. Default is 'high_precision'.
low_precision should only be used if weights/activations of a network trained in low_precision
should be qunatized further a smaller number of bits (type: string)
- threshold: Threshold regulating how many parameters we allow to be dropped (0.1 == 10 %)
with a given number if integer bits, before we fix the Qm.f
- caffe_root: Path to your caffe_lp folder (type: string)
- model_dir: Relative path from caffe_root to model directory (where .prototxt files are located). This is usually
examples/low_precision/imagenet/models/
Please change accordingly! (type: string)
- weight_dir Path where you want save the .caffemodel files, e.g. on your HDD (type: string)
- debug: Flag to turn printing of helpful information on and off (type: bool)
'''
if model_dir is not None:
self.model_dir = model_dir
if weight_dir is not None:
self.weight_dir = weight_dir
if caffe_root is not None:
self.caffe_root = caffe_root
if n_bits is not None:
self.n_bits = n_bits
self.net_prototxt = self.caffe_root + self.model_dir + net_name + '_deploy.prototxt'
# check if h5 or not??
self.net_weights = self.weight_dir + net_name + '/' + net_name + '_original.caffemodel'
if debug:
print 'Checking if network was already simulated... '
# if 'self.net' not in locals() or 'self.net' not in globals():
if not hasattr(self, 'net'):
if debug:
print 'No. Doing forward pass'
distribute_bits.forward_pass(self)
if debug:
print 'Forward pass done'
else:
if debug:
print 'Yes!'
# Specify which images are loaded in one batch?
if load_mode == 'high_precision':
select_key1 = 'conv'
select_key2 = 'fc'
else:
select_key1 = 'conv_lp'
select_key2 = 'fc_lp'
i = 0
if debug:
print 'Starting extracting weight distribution layer-wise'
print '-------------------'
print self.net.blobs.keys()
bit_distribution = np.zeros((2, len(filter(lambda x: select_key1 in x, self.net.blobs.keys())) +
len(filter(lambda x: select_key2 in x, self.net.blobs.keys()))))
if debug:
print np.shape(bit_distribution)
# we have to substract 2 since normally the last fc layer splits into two accuracy layers
for key in self.net.blobs.keys():
if select_key1 in key or select_key2 in key: # VERIFY FOR HIGH PRECISION VGG16!!
# Caffe introduces split layer from the 1000 way classifier to Accurace layer and Softmax layer for example
# to not use these layer, since they also contain a key we have to explicitely skip these layers
if 'split' in key:
continue
# 0 HP Weights, 1 LP Weights, 2 HP Biases, 3 KP Biases
# Calculate number of bits (Qm.f)
m, f = distribute_bits.get_qmf(self, self.net.params[key][1].data.flatten(), key, debug)
assert (m + f) <= self.n_bits, 'Too many bits assigned!'
if debug:
print key
print 'Before optimaization:\nNumber of integer bits: {} \nNumber of fractional bits: {}'.format(m, f)
# If we already covert the entire dynamic range
# distribute the remaining bits randomly between m & f
while (m + f < self.n_bits):
coin_flip = np.random.rand()
if coin_flip > 0.5:
m += 1
else:
f += 1
cut = 0
while cut < threshold:
cut = np.sum(self.net.params[key][1].data.flatten() > 2**m - 1) / float(len(self.net.params[key][1].data.flatten()))
if m < 2:
break
m -= 1
if debug:
print 'While optimization:\nNumber of integer bits: {} \nPercentage of ignored parameters: {} %'.format(m, cut)
m += 1
assert m > 0, 'No sign bit reserved!'
f = self.n_bits - m
if debug:
print 'After optimaization:\nNumber of integer bits: {} \nNumber of fractional bits: {}'.format(m, f)
bit_distribution[0, i] = m
bit_distribution[1, i] = f
i += 1
if debug:
print 'Done: ' + str(key)
print '-------------------'
return bit_distribution, self.net
|
python
|
from timemachines.skatertools.evaluation.evaluators import chunk_to_end
def test_chunk_from_end():
ys = [1,2,3,4,5,6,7,8]
chunks = chunk_to_end(ys,5)
assert len(chunks[0])==5
assert len(chunks)==1
assert chunks[0][0]==4
|
python
|
from typing import NamedTuple, Optional
class ConnectionProperties(NamedTuple):
origin: str
port: Optional[int]
|
python
|
"""cli.py - Command line interface related routines"""
import logging
import os
import pathlib
from itertools import chain
import click
import click_logging
from ocdsextensionregistry import ProfileBuilder
from ocdskit.util import detect_format
from spoonbill import FileAnalyzer, FileFlattener
from spoonbill.common import COMBINED_TABLES, ROOT_TABLES, TABLE_THRESHOLD
from spoonbill.flatten import FlattenOptions
from spoonbill.i18n import LOCALE, _
from spoonbill.utils import read_lines, resolve_file_uri
LOGGER = logging.getLogger("spoonbill")
click_logging.basic_config(LOGGER)
CURRENT_SCHEMA_TAG = "1__1__5"
ANALYZED_LABEL = _(" Processed {} objects")
FLATTENED_LABEL = _(" Flattened {} objects")
class CommaSeparated(click.ParamType):
"""Click option type to convert comma separated string into list"""
name = "comma"
def convert(self, value, param, ctx): # noqa
if not value:
return []
return [v.lower() for v in value.split(",")]
def read_option_file(option, option_file):
if option_file:
option = read_lines(option_file)
return option
def get_selected_tables(base, selection):
for name in selection:
if name not in base:
msg = _("Wrong selection, table '{}' does not exist").format(name)
raise click.BadParameter(msg)
return {name: tab for name, tab in base.items() if name in selection}
# TODO: we could provide two commands: flatten and analyze
# TODO: generated state-file + schema how to validate
@click.command(help=_("CLI tool to flatten OCDS datasets"))
@click.option("--schema", help=_("Schema file uri"), type=str)
@click.option("--selection", help=_("List of tables to extract"), type=CommaSeparated())
@click.option(
"--split",
help=_("List of tables to split into multiple sheets"),
type=CommaSeparated(),
default="",
)
@click.option(
"--threshold",
help=_("Maximum number of elements in array before its spitted into table"),
type=int,
default=TABLE_THRESHOLD,
)
@click.option(
"--state-file",
help=_("Uri to previously generated state file"),
type=click.Path(exists=True),
)
@click.option("--xlsx", help=_("Path to result xlsx file"), type=click.Path(), default="result.xlsx")
@click.option("--csv", help=_("Path to directory for output csv files"), type=click.Path(), required=False)
@click.option("--combine", help=_("Combine same objects to single table"), type=CommaSeparated())
@click.option(
"--unnest",
help=_("Extract columns form child tables to parent table"),
type=CommaSeparated(),
default="",
)
@click.option(
"--unnest-file",
help=_("Same as --unnest, but read columns from a file"),
type=click.Path(exists=True),
required=False,
)
@click.option("--only", help=_("Specify which fields to output"), type=CommaSeparated(), default="")
@click.option(
"--only-file",
help=_("Same as --only, but read columns from a file"),
type=click.Path(exists=True),
required=False,
)
@click.option(
"--repeat",
help=_("Repeat a column from a parent sheet onto child tables"),
type=CommaSeparated(),
default="",
)
@click.option(
"--repeat-file",
help=_("Same as --repeat, but read columns from a file"),
type=click.Path(exists=True),
required=False,
)
@click.option(
"--count", help=_("For each array field, add a count column to the parent table"), is_flag=True, default=False
)
@click.option(
"--human",
help=_("Use the schema's title properties for column headings"),
is_flag=True,
)
@click.option(
"--language",
help=_("Language for headings"),
default=LOCALE.split("_")[0],
type=click.Choice(["en", "es"]),
)
@click_logging.simple_verbosity_option(LOGGER)
@click.argument("filename", type=click.Path(exists=True))
def cli(
filename,
schema,
selection,
split,
threshold,
state_file,
xlsx,
csv,
combine,
unnest,
unnest_file,
only,
only_file,
repeat,
repeat_file,
count,
human,
language,
):
"""Spoonbill cli entry point"""
click.echo(_("Detecting input file format"))
# TODO: handle line separated json
# TODO: handle single release/record
(
input_format,
_is_concatenated,
_is_array,
) = detect_format(filename)
if csv:
csv = pathlib.Path(csv).resolve()
if not csv.exists():
raise click.BadParameter(_("Desired location {} does not exists").format(csv))
if xlsx:
xlsx = pathlib.Path(xlsx).resolve()
if not xlsx.parent.exists():
raise click.BadParameter(_("Desired location {} does not exists").format(xlsx.parent))
click.echo(_("Input file is {}").format(click.style(input_format, fg="green")))
is_package = "package" in input_format
combine_choice = combine if combine else ""
if not is_package:
# TODO: fix this
click.echo("Single releases are not supported by now")
return
if schema:
schema = resolve_file_uri(schema)
if "release" in input_format:
root_key = "releases"
if not schema:
click.echo(_("No schema provided, using version {}").format(click.style(CURRENT_SCHEMA_TAG, fg="cyan")))
profile = ProfileBuilder(CURRENT_SCHEMA_TAG, {})
schema = profile.release_package_schema()
else:
root_key = "records"
if not schema:
click.echo(_("No schema provided, using version {}").format(click.style(CURRENT_SCHEMA_TAG, fg="cyan")))
profile = ProfileBuilder(CURRENT_SCHEMA_TAG, {})
schema = profile.record_package_schema()
title = schema.get("title", "").lower()
if not title:
raise ValueError(_("Incomplete schema, please make sure your data is correct"))
if "package" in title:
# TODO: is is a good way to get release/record schema
schema = schema["properties"][root_key]["items"]
path = pathlib.Path(filename)
workdir = path.parent
filename = path.name
selection = selection or ROOT_TABLES.keys()
combine = combine or COMBINED_TABLES.keys()
root_tables = get_selected_tables(ROOT_TABLES, selection)
combined_tables = get_selected_tables(COMBINED_TABLES, combine)
if state_file:
click.secho(_("Restoring from provided state file"), bold=True)
analyzer = FileAnalyzer(workdir, state_file=state_file)
else:
click.secho(_("State file not supplied, going to analyze input file first"), bold=True)
analyzer = FileAnalyzer(
workdir,
schema=schema,
root_key=root_key,
root_tables=root_tables,
combined_tables=combined_tables,
language=language,
table_threshold=threshold,
)
click.echo(_("Analyze options:"))
click.echo(_(" - table threshold => {}").format(click.style(str(threshold), fg="cyan")))
click.echo(_(" - language => {}").format(click.style(language, fg="cyan")))
click.echo(_("Processing file: {}").format(click.style(str(path), fg="cyan")))
total = path.stat().st_size
progress = 0
# Progress bar not showing with small files
# https://github.com/pallets/click/pull/1296/files
with click.progressbar(width=0, show_percent=True, show_pos=True, length=total) as bar:
for read, number in analyzer.analyze_file(filename, with_preview=True):
bar.label = ANALYZED_LABEL.format(click.style(str(number), fg="cyan"))
bar.update(read - progress)
progress = read
click.secho(
_("Done processing. Analyzed objects: {}").format(click.style(str(number + 1), fg="red")), fg="green"
)
state_file = pathlib.Path(f"{filename}.state")
state_file_path = workdir / state_file
click.echo(_("Dumping analyzed data to '{}'").format(click.style(str(state_file_path.absolute()), fg="cyan")))
analyzer.dump_to_file(state_file)
click.echo(_("Flattening file: {}").format(click.style(str(path), fg="cyan")))
if unnest and unnest_file:
raise click.UsageError(_("Conflicting options: unnest and unnest-file"))
if repeat and repeat_file:
raise click.UsageError(_("Conflicting options: repeat and repeat-file"))
if only and only_file:
raise click.UsageError(_("Conflicting options: only and only-file"))
options = {"selection": {}, "count": count}
unnest = read_option_file(unnest, unnest_file)
repeat = read_option_file(repeat, repeat_file)
only = read_option_file(only, only_file)
for name in selection:
table = analyzer.spec[name]
if table.total_rows == 0:
click.echo(_("Ignoring empty table {}").format(click.style(name, fg="red")))
continue
unnest = [col for col in unnest if col in table.combined_columns]
if unnest:
click.echo(
_("Unnesting columns {} for table {}").format(
click.style(",".join(unnest), fg="cyan"), click.style(name, fg="cyan")
)
)
only = [col for col in only if col in table]
if only:
click.echo(
_("Using only columns {} for table {}").format(
click.style(",".join(only), fg="cyan"), click.style(name, fg="cyan")
)
)
repeat = [col for col in repeat if col in table]
if repeat:
click.echo(
_("Repeating columns {} in all child table of {}").format(
click.style(",".join(repeat), fg="cyan"), click.style(name, fg="cyan")
)
)
options["selection"][name] = {
"split": split or analyzer.spec[name].should_split,
"pretty_headers": human,
"unnest": unnest,
"only": only,
"repeat": repeat,
}
options = FlattenOptions(**options)
flattener = FileFlattener(
workdir,
options,
analyzer.spec.tables,
root_key=root_key,
csv=csv,
xlsx=xlsx,
language=language,
)
all_tables = chain([table for table in flattener.flattener.tables.keys()], combine_choice)
click.echo(_("Going to export tables: {}").format(click.style(",".join(all_tables), fg="magenta")))
click.echo(_("Processed tables:"))
for table in flattener.flattener.tables.keys():
message = _("{}: {} rows").format(table, flattener.flattener.tables[table].total_rows)
if not flattener.flattener.tables[table].is_root:
message = "└-----" + message
click.echo(message)
else:
click.echo(message)
click.echo(_("Flattening input file"))
with click.progressbar(
flattener.flatten_file(filename),
length=analyzer.spec.total_items + 1,
width=0,
show_percent=True,
show_pos=True,
) as bar:
for count in bar:
bar.label = FLATTENED_LABEL.format(click.style(str(count + 1), fg="cyan"))
click.secho(_("Done flattening. Flattened objects: {}").format(click.style(str(count + 1), fg="red")), fg="green")
|
python
|
# -*- coding: utf-8 -*- #
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Add Attestor public key command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import textwrap
from googlecloudsdk.api_lib.container.binauthz import apis
from googlecloudsdk.api_lib.container.binauthz import attestors
from googlecloudsdk.api_lib.container.binauthz import kms
from googlecloudsdk.calliope import actions
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.container.binauthz import exceptions
from googlecloudsdk.command_lib.container.binauthz import flags
from googlecloudsdk.command_lib.container.binauthz import pkix
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class AddBeta(base.Command):
"""Add a public key to an Attestor."""
@classmethod
def Args(cls, parser):
flags.AddConcepts(
parser,
flags.GetAttestorPresentationSpec(
required=True,
positional=False,
group_help=(
'The attestor to which the public key should be added.'),
),
)
pgp_group = parser.add_mutually_exclusive_group(required=True)
pgp_group.add_argument(
'--public-key-file',
action=actions.DeprecationAction(
'public-key-file',
warn='This flag is deprecated. Use --pgp-public-key-file instead.'),
type=arg_parsers.FileContents(),
help='The path to the file containing the '
'ASCII-armored PGP public key to add.')
pgp_group.add_argument(
'--pgp-public-key-file',
type=arg_parsers.FileContents(),
help='The path to the file containing the '
'ASCII-armored PGP public key to add.')
parser.add_argument(
'--comment', help='The comment describing the public key.')
def Run(self, args):
api_version = apis.GetApiVersion(self.ReleaseTrack())
attestors_client = attestors.Client(api_version)
attestor_ref = args.CONCEPTS.attestor.Parse()
# TODO(b/71700164): Validate the contents of the public key file.
return attestors_client.AddPgpKey(
attestor_ref,
pgp_pubkey_content=args.public_key_file or args.pgp_public_key_file,
comment=args.comment)
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class AddAlpha(base.Command):
"""Add a public key to an Attestor."""
@classmethod
def Args(cls, parser):
flags.AddConcepts(
parser,
flags.GetAttestorPresentationSpec(
required=True,
positional=False,
group_help=(
'The attestor to which the public key should be added.'),
),
)
parser.add_argument(
'--comment', help='The comment describing the public key.')
key_group = parser.add_group(mutex=True, required=True)
pgp_group = key_group.add_group()
pgp_group.add_argument(
'--pgp-public-key-file',
type=arg_parsers.FileContents(),
help='The path to the file containing the '
'ASCII-armored PGP public key to add.')
kms_group = key_group.add_group()
flags.AddConcepts(
kms_group,
flags.GetCryptoKeyVersionPresentationSpec(
base_name='keyversion',
required=True,
positional=False,
use_global_project_flag=False,
group_help=textwrap.dedent("""\
The Cloud KMS (Key Management Service) CryptoKeyVersion whose
public key will be added to the attestor.""")),
)
pkix_group = key_group.add_group()
pkix_group.add_argument(
'--pkix-public-key-file',
required=True,
type=arg_parsers.FileContents(),
help='The path to the file containing the PKIX public key to add.')
pkix_group.add_argument(
'--pkix-public-key-algorithm',
choices=pkix.GetAlgorithmMapper().choices,
required=True,
help=textwrap.dedent("""\
The signing algorithm of the associated key. This will be used to
verify the signatures associated with this key."""))
parser.add_argument(
'--public-key-id-override',
type=str,
help=textwrap.dedent("""\
If provided, the ID to replace the default API-generated one. All IDs
must be valid URIs as defined by RFC 3986
(https://tools.ietf.org/html/rfc3986).
When creating Attestations to be verified by this key, one must always
provide this custom ID as the public key ID."""))
def Run(self, args):
api_version = apis.GetApiVersion(self.ReleaseTrack())
attestors_client = attestors.Client(api_version)
attestor_ref = args.CONCEPTS.attestor.Parse()
if args.pgp_public_key_file and args.public_key_id_override:
raise exceptions.InvalidArgumentError(
'--public-key-id-override may not be used with old-style PGP keys')
if args.keyversion:
key_resource = args.CONCEPTS.keyversion.Parse()
public_key = kms.Client().GetPublicKey(key_resource.RelativeName())
return attestors_client.AddPkixKey(
attestor_ref,
pkix_pubkey_content=public_key.pem,
pkix_sig_algorithm=attestors_client.ConvertFromKmsSignatureAlgorithm(
public_key.algorithm),
id_override=(args.public_key_id_override or
kms.GetKeyUri(key_resource)),
comment=args.comment)
elif args.pkix_public_key_file:
alg_mapper = pkix.GetAlgorithmMapper(api_version)
return attestors_client.AddPkixKey(
attestor_ref,
pkix_pubkey_content=args.pkix_public_key_file,
pkix_sig_algorithm=alg_mapper.GetEnumForChoice(
args.pkix_public_key_algorithm),
id_override=args.public_key_id_override,
comment=args.comment)
else:
# TODO(b/71700164): Validate the contents of the public key file.
return attestors_client.AddPgpKey(
attestor_ref,
pgp_pubkey_content=args.pgp_public_key_file,
comment=args.comment)
|
python
|
# CSC486 - Spring 2022
# Author: Dr. Patrick Shepherd
# NOTE: This file contains several functions, some of which already do something
# when run, even before you start writing code. For your convenience, in the
# main function, you may want to comment out the functions you are not currently
# using so they are not running each time you modify the code.
import networkx as nx
import matplotlib.pyplot as plt
# A convenient function to create an undirected scale free graph.
def undirected_scale_free_graph(n):
H = nx.scale_free_graph(n)
G = nx.Graph()
for (u, v) in H.edges():
G.add_edge(u, v)
del H
return G
def task1():
# Task 1: Examine some Erdos Renyi random graphs (named G1)
# to see how the parameter 'p' affects them.
n = 100
# Modify this parameter and run the code again
p = .05
G1 = nx.erdos_renyi_graph(n, p)
nx.draw_networkx(G1)
plt.show()
def task2():
# Task 2: Create a small world graph named G2
# The function you will call is nx.watts_strogatz_graph
# Call the function with parameters n, k, and p, in that order.
n = 100
k = 10
p = .3
# Create the variable G2 here, then plot the network as above.
def task3():
# Task 3: Create a scale free network named G3.
# The function you will call is nx.scale_free_graph
# The function only takes the parameter n.
n = 100
# Create the variable G3 here, then plot the network as above.
def task4():
# Task 4: Fill in the for loop below.
# Inside the loop, create a new random network, collect graph metric values,
# and plot them.
n = 100
for i in range(21):
# Set the current iteration's value of p
p = i*.05
# Create a new random network here
# Gather metric values here
# The x-coordinate list is already made for you.
# Pass it as the first argument to plt.scatter().
x = [p for j in range(n)]
# Plot the current set of points here
# Show the network
plt.show()
def task5and6():
# Task 5, 6: Fill in the for loop below.
# Inside the loop, create a new small world network, collect graph metric values,
# and plot them.
n = 100
for i in range(21):
# Task 6: after completing task 5, modify this parameter and
k = 3
# Set the current iteration's value of p
p = i*.05
# Create a new small world network here
# Gather metric values here
# The x-coordinate list is already made for you.
# Pass it as the first argument to plt.scatter().
x = [p for j in range(n)]
# Plot the current set of points here
# Show the network
plt.show()
def worked_example():
###############################################################
# WORKED EXAMPLE #
###############################################################
n = 100
p = .2
k = 4
# First, we create one of each network, using most of the parameters above.
G4 = nx.erdos_renyi_graph(n, p)
G5 = nx.watts_strogatz_graph(n, k, p)
G6 = undirected_scale_free_graph(n)
# Then, we collect the closeness centrality scores for all vertices in
# each network.
# These are dictionaries, in which the keys are vertex indices (0, 1, 2, ...)
# and values are the corresponding centrality scores for each vertex.
close4 = nx.closeness_centrality(G4)
close5 = nx.closeness_centrality(G5)
close6 = nx.closeness_centrality(G6)
# A handy way to get the values from a dictionary as a 1D list.
# NOTE: This is all we need to do here, as we don't need to know which
# score corresponds to which vertex in this case. We are just plotting
# all the scores from each network as a group.
y4 = close4.values()
y5 = close5.values()
y6 = close6.values()
# We will plot the scores out in such a way that all vertex scores from the
# random graph are on the left, all small world scores are in the middle,
# and all scale free scores are on the right. These lists are just meant
# to hold the x-values of each score so that we can plot them together.
# This way, all random network scores will be displayed vertically above
# x=1, all small world scores above x=2, and all scale free scores above
# x=3.
x4 = [1 for i in range(n)]
x5 = [2 for i in range(n)]
x6 = [3 for i in range(n)]
# Finally, we can use the function plt.scatter(x, y), where x and y are
# either numbers or lists of numbers, and are the coordinates of the points
# to plot. In other words, to plot three points, one at (1, 3), one at
# (2, 4), and one at (6, 5), you would call
# plt.scatter( [1, 2, 6], [3, 4, 5] )
# You can call plt.scatter as many times as you like before displaying the
# plot, and each call will place dots on the screen of a different color.
# Since there are three calls made below, the dots on the plot show up in
# three differently colored groups.
plt.scatter(x4, y4)
plt.scatter(x5, y5)
plt.scatter(x6, y6)
# Once you have plotted all your points, call plt.show() to display the plot.
plt.show()
def main():
task1()
task2()
task3()
worked_example()
task5and6()
if __name__ == '__main__':
main()
|
python
|
import os
from PyQt5.QtCore import QThread, pyqtSignal
from lib.ffmpegRunner import Runner
from lib.logger import Log
class Controller(QThread):
output = pyqtSignal(str)
def __init__(self,):
QThread.__init__(self, parent=None)
self.__runner = None
self.__outputFile = None
def setOutputFile(self, outputFile):
self.__outputFile = outputFile
self.start()
def run(self):
if not self.__check():
Log.e("Input file does not exists")
self.__showErrorDialog("File path does not exists")
return
self.__runner = Runner(self.__outputFile)
try:
Log.i("Started recording")
self.__runner.runCommand()
except ChildProcessError:
self.__showErrorDialog("Error running FFmpeg")
def terminate(self):
if self.__runner is not None:
self.__runner.terminateCommand()
self.output.emit("Recording saved.")
else:
self.output.emit("Recording has not yet started.")
def __showErrorDialog(self, message):
self.output.emit(message)
def __check(self):
return os.path.isdir(os.path.split(self.__outputFile)[0])
|
python
|
# https://arcade.academy/examples/array_backed_grid_sprites_1.html#array-backed-grid-sprites-1
"""
Array Backed Grid Shown By Sprites
Show how to use a two-dimensional list/array to back the display of a
grid on-screen.
This version syncs the grid to the sprite list in one go using resync_grid_with_sprites.
If Python and Arcade are installed, this example can be run from the command line with:
python -m arcade.examples.array_backed_grid_sprites_1
"""
import arcade
from typing import Tuple, Optional
from random import choice, randint
class GridCell:
"""Grid cell "content" without margin.
This exposes the geometric properties of the grid cell and allows to
modify the value of the structure storing all values of a grid.
"""
def __init__(self, position_lower_left_corner: Tuple[float, float], length: float, flat_index: int, row: int, column: int, value_container):
self.x_min = position_lower_left_corner[0]
self.y_min = position_lower_left_corner[1]
self.length = length
self.x_max = self.x_min + length
self.y_max = self.y_min + length
self.x_center = self.x_min + length / 2
self.y_center = self.y_min + length / 2
self.flat_index = flat_index
self.row = row
self.column = column
self._value_container = value_container
@property
def value(self):
return self._value_container[self.flat_index]
@value.setter
def value(self, value):
self._value_container[self.flat_index] = value
class GridOfSquares:
def __init__(self, row_count: int, column_count: int, grid_length: float, margin_width: float, initial_value=None):
self.row_count = row_count
self.column_count = column_count
self.grid_length = grid_length
self.margin_width = margin_width
self.width = grid_length * column_count + margin_width * (column_count + 1)
self.height = grid_length * row_count + margin_width * (row_count + 1)
self.data = self.row_count * self.column_count * [initial_value]
def _index_from(self, row: int, column: int) -> int:
if row >= self.row_count:
raise IndexError(f"Grid has only {self.row_count} rows, row {row} was requested.")
if column >= self.column_count:
raise IndexError(f"Grid has only {self.column_count} column, column {column} was requested.")
index = row * self.column_count + column
return index
def _row_column_from(self, index: int) -> Tuple[int, int]:
column = index % self.column_count
row = index // self.column_count
return row, column
def __getitem__(self, key) -> GridCell:
if isinstance(key, int):
index = key
row, column = self._row_column_from(index)
elif len(key) == 2:
row, column = key
index = self._index_from(row, column)
else:
raise KeyError(f"Unable to handle index type {type(key)}.")
if index >= len(self.data):
raise IndexError
else:
x = self.margin_width + column * (self.grid_length + self.margin_width)
y = self.margin_width + row * (self.grid_length + self.margin_width)
return GridCell((x, y), self.grid_length, index, row, column, self.data)
def __setitem__(self, key, value):
if isinstance(key, int):
self.data[key] = value
elif len(key) == 2:
index = self._index_from(key[0], key[1])
self.data[index] = value
else:
raise KeyError
def cell_at(self, position: Tuple[float, float]) -> Optional[GridCell]:
x, y = position
column = int(x // (self.grid_length + self.margin_width))
row = int(y // (self.grid_length + self.margin_width))
if 0 <= row < self.row_count and 0 <= column < self.column_count:
return self[row, column]
else:
return None
class Schatzsuche(arcade.Window):
"""
Main application class.
"""
def __init__(self, row_count: int, column_count: int, grid_length_px: int, margin_width_px: int, title: str):
"""
Set up the application.
"""
# We can store/access the data in this grid using index [row, column].
self.grid = GridOfSquares(row_count, column_count, grid_length_px, margin_width_px, "unknown")
super().__init__(self.grid.width, self.grid.height, title)
self.goal_row = randint(1, row_count-2)
self.goal_column = randint(1, column_count-2)
self.number_of_search_operations = 0
arcade.set_background_color(arcade.color.BLACK)
# We use the sprites for drawing the grid cells.
self.grid_sprite_list = arcade.SpriteList()
for cell in self.grid:
sprite = self._make_sprite("unknown", cell)
# show goal for debugging/learning
# if cell.row == self.goal_row and cell.column == self.goal_column:
# sprite.color = arcade.color.GREEN
self.grid_sprite_list.append(sprite)
def _make_sprite(self, direction: str, cell: GridCell) -> arcade.Sprite:
if direction in ["right", "up", "down"]:
resource = ":resources:images/tiles/signRight.png"
elif direction == "left":
resource = ":resources:images/tiles/signLeft.png"
elif direction == "goal":
resource = ":resources:images/items/gold_1.png"
elif direction == "unknown":
resource = ":resources:images/tiles/sandCenter.png"
else:
raise ValueError(f"Unknown direction {direction}")
new_sprite = arcade.Sprite(
resource,
center_x=cell.x_center,
center_y=cell.y_center,
)
if direction == "up":
new_sprite.angle = 90
elif direction == "down":
new_sprite.angle = 270
new_sprite.width = cell.length
new_sprite.height = cell.length
return new_sprite
def resync_grid_with_sprites(self):
for cell in self.grid:
if cell.value == "unknown":
continue
else:
new_sprite = self._make_sprite(cell.value, cell)
self.grid_sprite_list[cell.flat_index] = new_sprite
def on_draw(self):
"""
Render the screen.
"""
# This command has to happen before we start drawing
arcade.start_render()
self.grid_sprite_list.draw()
def on_key_press(self, key, modifiers):
if key == arcade.key.Q:
raise SystemExit()
def on_mouse_press(self, x, y, button, modifiers):
"""
Called when the user presses a mouse button.
"""
cell = self.grid.cell_at((x, y))
if cell is not None:
if cell.value == "unknown":
if self.goal_row == cell.row and self.goal_column == cell.column:
cell.value = "goal"
print(f"Yay -- you found it with {self.number_of_search_operations} tries.")
else:
self.number_of_search_operations += 1
possible_directions = []
if cell.row < self.goal_row:
possible_directions.append("up")
elif cell.row > self.goal_row:
possible_directions.append("down")
if cell.column < self.goal_column:
possible_directions.append("right")
elif cell.column > self.goal_column:
possible_directions.append("left")
cell.value = choice(possible_directions)
self.resync_grid_with_sprites()
def main():
game = Schatzsuche(25, 35, 30, 5, "Schatzsuche")
arcade.run()
if __name__ == "__main__":
main()
|
python
|
from datetime import date
def get_count_of_day(year,month,day):
"""
Function that returns the count of day (since the beginning of the year)
for a given year, month and day
Positional argument:
year -- type = integer
month -- type = integer
day -- type = integer
Example:
get_count_of_day(2015,7,18)
returns: 199
"""
d0 = date(year, 1, 1)
d1 = date(year, month, day)
delta = d1 - d0
count_of_day = delta.days + 1
return count_of_day
|
python
|
#!/usr/bin/env python3
from gi.repository import Meson
if __name__ == "__main__":
s = Meson.Sample.new("Hello, meson/py!")
s.print_message()
|
python
|
import os
import sys
import unittest
import numpy as np
from QGrain.algorithms import *
NORMAL_PARAM_COUNT = 2
WEIBULL_PARAM_COUNT = 2
GENERAL_WEIBULL_PARAM_COUNT = 3
# the component number must be positive int value
class TestCheckComponentNumber(unittest.TestCase):
# valid cases
def test_1_to_100(self):
for i in range(1, 101):
check_component_number(i)
# invalid cases
def test_str(self):
with self.assertRaises(TypeError):
check_component_number("1")
def test_float(self):
with self.assertRaises(TypeError):
check_component_number(1.4)
def test_list(self):
with self.assertRaises(TypeError):
check_component_number([1])
def test_zero(self):
with self.assertRaises(ValueError):
check_component_number(0)
def test_positive(self):
with self.assertRaises(ValueError):
check_component_number(-1)
class TestGetParamCount(unittest.TestCase):
def test_normal(self):
self.assertEqual(get_param_count(DistributionType.Normal), NORMAL_PARAM_COUNT)
def test_weibull(self):
self.assertEqual(get_param_count(DistributionType.Weibull), WEIBULL_PARAM_COUNT)
def test_gen_weibull(self):
self.assertEqual(get_param_count(DistributionType.GeneralWeibull), GENERAL_WEIBULL_PARAM_COUNT)
# the names of parameters must be corresponding with param count
class TestGetParamNames(unittest.TestCase):
def test_normal(self):
self.assertEqual(len(get_param_names(DistributionType.Normal)), NORMAL_PARAM_COUNT)
def test_weibull(self):
self.assertEqual(len(get_param_names(DistributionType.Weibull)), WEIBULL_PARAM_COUNT)
def test_gen_weibull(self):
self.assertEqual(len(get_param_names(DistributionType.GeneralWeibull)), GENERAL_WEIBULL_PARAM_COUNT)
# 1. make sure it has the func with that name
# 2. the count of params it accept is consistent with func `get_param_count`
class TestGetBaseFuncName(unittest.TestCase):
@staticmethod
def has_func(name):
return name in globals().keys()
@staticmethod
def get_func(name):
return globals()[name]
def test_has_normal(self):
func_name = get_base_func_name(DistributionType.Normal)
self.assertTrue(self.has_func(func_name))
def test_has_weibull(self):
func_name = get_base_func_name(DistributionType.Weibull)
self.assertTrue(self.has_func(func_name))
def test_has_gen_weibull(self):
func_name = get_base_func_name(DistributionType.GeneralWeibull)
self.assertTrue(self.has_func(func_name))
def test_normal_use_suitable_params(self):
func_name = get_base_func_name(DistributionType.Normal)
func = self.get_func(func_name)
# the first param is x
func(np.linspace(1, 11, 1001), *[i+1 for i in range(NORMAL_PARAM_COUNT)])
def test_weiubll_use_suitable_params(self):
func_name = get_base_func_name(DistributionType.Weibull)
func = self.get_func(func_name)
# the first param is x
func(np.linspace(1, 11, 1001), *[i+1 for i in range(WEIBULL_PARAM_COUNT)])
def test_gen_weibull_use_suitable_params(self):
func_name = get_base_func_name(DistributionType.GeneralWeibull)
func = self.get_func(func_name)
# the first param is x
func(np.linspace(1, 11, 1001), *[i+1 for i in range(GENERAL_WEIBULL_PARAM_COUNT)])
class TestGetParamBounds(unittest.TestCase):
# 1. each bound must has the left and right values
# 2. values must be real number or `None`
# `None` means no limit
def check_bound(self, bound):
self.assertEqual(len(bound), 2)
self.assertTrue(bound[0] is None == None or np.isreal(bound[0]))
self.assertTrue(bound[1] is None == None or np.isreal(bound[1]))
def test_normal(self):
bounds = get_param_bounds(DistributionType.Normal)
self.assertEqual(len(bounds), NORMAL_PARAM_COUNT)
for bound in bounds:
self.check_bound(bound)
def test_weibull(self):
bounds = get_param_bounds(DistributionType.Weibull)
self.assertEqual(len(bounds), WEIBULL_PARAM_COUNT)
for bound in bounds:
self.check_bound(bound)
def test_gen_weibull(self):
bounds = get_param_bounds(DistributionType.GeneralWeibull)
self.assertEqual(len(bounds), GENERAL_WEIBULL_PARAM_COUNT)
for bound in bounds:
self.check_bound(bound)
# length of each component's defaluts must be equal to the param count
class TestGetParamDefaults(unittest.TestCase):
def test_normal(self):
for component_number in range(1, 101):
defaluts = get_param_defaults(DistributionType.Normal, component_number)
self.assertEqual(len(defaluts), component_number)
for defaluts_of_component in defaluts:
self.assertEqual(len(defaluts_of_component), NORMAL_PARAM_COUNT)
def test_weibull(self):
for component_number in range(1, 101):
defaluts = get_param_defaults(DistributionType.Weibull, component_number)
self.assertEqual(len(defaluts), component_number)
for defaluts_of_component in defaluts:
self.assertEqual(len(defaluts_of_component), WEIBULL_PARAM_COUNT)
def test_gen_weibull(self):
for component_number in range(1, 101):
defaluts = get_param_defaults(DistributionType.GeneralWeibull, component_number)
self.assertEqual(len(defaluts), component_number)
for defaluts_of_component in defaluts:
self.assertEqual(len(defaluts_of_component), GENERAL_WEIBULL_PARAM_COUNT)
# 1. if COMPONENT_NUMBER equals 1, length must be PARAM_COUNT,
# else, length must be eqaul to (PARAM_COUNT+1) * COMPONENT_COUNT - 1
# (the additional param is the fraction of each component)
# 2. params have already been sorted by `location` key
class TestGetParams(unittest.TestCase):
def check_sorted(self, params):
for location, param in enumerate(params):
self.assertEqual(param[LOCATION_KEY], location)
def test_normal(self):
for component_number in range(1, 101):
params = get_params(DistributionType.Normal, component_number)
if component_number == 1:
self.assertEqual(len(params), NORMAL_PARAM_COUNT)
else:
self.assertEqual(len(params), (NORMAL_PARAM_COUNT+1) * component_number - 1)
self.check_sorted(params)
def test_weibull(self):
for component_number in range(1, 101):
params = get_params(DistributionType.Weibull, component_number)
if component_number == 1:
self.assertEqual(len(params), WEIBULL_PARAM_COUNT)
else:
self.assertEqual(len(params), (WEIBULL_PARAM_COUNT+1) * component_number - 1)
self.check_sorted(params)
def test_gen_weibull(self):
for component_number in range(1, 101):
params = get_params(DistributionType.GeneralWeibull, component_number)
if component_number == 1:
self.assertEqual(len(params), GENERAL_WEIBULL_PARAM_COUNT)
else:
self.assertEqual(len(params), (GENERAL_WEIBULL_PARAM_COUNT+1) * component_number - 1)
self.check_sorted(params)
# these funcs are hard to test alone and will be called in other funcs, just call them
class TestMISC(unittest.TestCase):
def setUp(self):
self.normal_params = get_params(DistributionType.Normal, 10)
self.weibull_params = get_params(DistributionType.Weibull, 10)
self.gen_weibull_params = get_params(DistributionType.GeneralWeibull, 10)
def tearDown(self):
self.normal_params = None
self.weibull_params = None
self.gen_weibull_params = None
def test_sort(self):
sort_params_by_location_in_place(self.normal_params)
sort_params_by_location_in_place(self.weibull_params)
sort_params_by_location_in_place(self.gen_weibull_params)
def test_get_bounds(self):
get_bounds(self.normal_params)
get_bounds(self.weibull_params)
get_bounds(self.gen_weibull_params)
def test_get_constrains(self):
for i in range(1, 101):
get_constrains(i)
def test_get_defaults(self):
get_defaults(self.normal_params)
get_defaults(self.weibull_params)
get_defaults(self.gen_weibull_params)
# use `exec` to check if it has syntax or other errors
class TestGetLambdaStr(unittest.TestCase):
def test_normal(self):
for i in range(1, 101):
lambda_str = get_lambda_str(DistributionType.Normal, i)
exec(lambda_str)
def test_weibull(self):
for i in range(1, 101):
lambda_str = get_lambda_str(DistributionType.Weibull, i)
exec(lambda_str)
def test_gen_weibull(self):
for i in range(1, 101):
lambda_str = get_lambda_str(DistributionType.GeneralWeibull, i)
exec(lambda_str)
# the processed params must in the form that:
# 1. component number length tuple
# 2. each tuple is consistant with one sub tuple and the fraction
# 3. the sub tuple is the params of single func that except x,
# so its length is equal to PARAM_COUNT
class TestProcessParams(unittest.TestCase):
def test_normal(self):
for i in range(1, 101):
if i == 1:
count = NORMAL_PARAM_COUNT
else:
count = (NORMAL_PARAM_COUNT+1)*i - 1
fake_params = np.ones((count,))
processed = process_params(DistributionType.Normal, i, fake_params)
self.assertEqual(len(processed), i)
for params, fraction in processed:
self.assertEqual(len(params), NORMAL_PARAM_COUNT)
def test_weibull(self):
for i in range(1, 101):
if i == 1:
count = WEIBULL_PARAM_COUNT
else:
count = (WEIBULL_PARAM_COUNT+1)*i - 1
fake_params = np.ones((count,))
processed = process_params(DistributionType.Weibull, i, fake_params)
self.assertEqual(len(processed), i)
for params, fraction in processed:
self.assertEqual(len(params), WEIBULL_PARAM_COUNT)
def test_gen_weibull(self):
for i in range(1, 101):
if i == 1:
count = GENERAL_WEIBULL_PARAM_COUNT
else:
count = (GENERAL_WEIBULL_PARAM_COUNT+1)*i - 1
fake_params = np.ones((count,))
processed = process_params(DistributionType.GeneralWeibull, i, fake_params)
self.assertEqual(len(processed), i)
for params, fraction in processed:
self.assertEqual(len(params), GENERAL_WEIBULL_PARAM_COUNT)
# 1. PDF func return 0.0 while the param is invalid
# 2. other funcs return NaN while the param is invalid
# 3. the result values of the func generated by lambda and manuscript must be equal
class TestNormalMathFuncs(unittest.TestCase):
@staticmethod
def get_func(lambda_str):
exec("func = "+lambda_str)
return locals()["func"]
# get zero while param is invalid
def test_sigma_invalid(self):
x = np.linspace(-10, 10, 1001)
res = np.equal(normal(x, 0, -1), np.zeros_like(x))
self.assertTrue(np.all(res))
def test_mean_invalid(self):
self.assertTrue(np.isnan(normal_mean(0, -1)))
def test_median_invalid(self):
self.assertTrue(np.isnan(normal_median(0, -1)))
def test_mode_invalid(self):
self.assertTrue(np.isnan(normal_mode(0, -1)))
def test_standard_deviation_invalid(self):
self.assertTrue(np.isnan(normal_standard_deviation(0, -1)))
def test_variance_invalid(self):
self.assertTrue(np.isnan(normal_variance(0, -1)))
def test_skewness_invalid(self):
self.assertTrue(np.isnan(normal_skewness(0, -1)))
def test_kurtosis_invalid(self):
self.assertTrue(np.isnan(normal_kurtosis(0, -1)))
def test_single(self):
lambda_str = get_lambda_str(DistributionType.Normal, 1)
generated_func = self.get_func(lambda_str)
manuscript_func = normal
x = np.linspace(-10, 10, 1001)
res = np.equal(generated_func(x, 0.7, 2.1), manuscript_func(x, 0.7, 2.1))
self.assertTrue(np.all(res))
def test_double(self):
lambda_str = get_lambda_str(DistributionType.Normal, 2)
generated_func = self.get_func(lambda_str)
manuscript_func = double_normal
x = np.linspace(-10, 10, 1001)
res = np.equal(generated_func(x, 0.71, 2.41, 5.3, 12.1, 0.34), \
manuscript_func(x, 0.71, 2.41, 5.3, 12.1, 0.34))
self.assertTrue(np.all(res))
def test_triple(self):
lambda_str = get_lambda_str(DistributionType.Normal, 3)
generated_func = self.get_func(lambda_str)
manuscript_func = triple_normal
x = np.linspace(-10, 10, 1001)
res = np.equal(generated_func(x, 0.52, 1.42, 2.3, 11.2, 4.2, 12.4, 0.21, 0.42), \
manuscript_func(x, 0.52, 1.42, 2.3, 11.2, 4.2, 12.4, 0.21, 0.42))
self.assertTrue(np.all(res))
def test_quadruple(self):
lambda_str = get_lambda_str(DistributionType.Normal, 4)
generated_func = self.get_func(lambda_str)
manuscript_func = quadruple_normal
x = np.linspace(-10, 10, 1001)
res = np.equal(generated_func(x, 0.52, 1.42, 2.3, 11.2, 4.2, 12.4, 0.21, 0.46, 0.21, 0.42, 0.08), \
manuscript_func(x, 0.52, 1.42, 2.3, 11.2, 4.2, 12.4, 0.21, 0.46, 0.21, 0.42, 0.08))
self.assertTrue(np.all(res))
class TestWeibullMathFuncs(unittest.TestCase):
@staticmethod
def get_func(lambda_str):
exec("func = "+lambda_str)
return locals()["func"]
def test_beta_invalid(self):
x = np.linspace(1, 11, 1001)
self.assertTrue(np.all(np.equal(weibull(x, 0, 1), np.zeros_like(x))))
self.assertTrue(np.all(np.equal(weibull(x, 0.0, 1), np.zeros_like(x))))
self.assertTrue(np.all(np.equal(weibull(x, -1, 1), np.zeros_like(x))))
self.assertTrue(np.all(np.equal(weibull(x, -2.7, 1), np.zeros_like(x))))
def test_eta_invalid(self):
x = np.linspace(1, 11, 1001)
self.assertTrue(np.all(np.equal(weibull(x, 2, 0), np.zeros_like(x))))
self.assertTrue(np.all(np.equal(weibull(x, 2, 0.0), np.zeros_like(x))))
self.assertTrue(np.all(np.equal(weibull(x, 2, -2), np.zeros_like(x))))
self.assertTrue(np.all(np.equal(weibull(x, 2, -3.1), np.zeros_like(x))))
def test_x_invalid(self):
x = np.linspace(-2, 2, 401)
y = weibull(x, 2, 2)
res = np.equal(np.less_equal(x, 0.0), np.equal(y, 0.0))
self.assertTrue(np.all(res))
def test_mean_invalid(self):
self.assertTrue(np.isnan(weibull_mean(-1, 1)))
self.assertTrue(np.isnan(weibull_mean(1, -1)))
def test_median_invalid(self):
self.assertTrue(np.isnan(weibull_median(-1, 1)))
self.assertTrue(np.isnan(weibull_median(1, -1)))
def test_mode_invalid(self):
self.assertTrue(np.isnan(weibull_mode(-1, 1)))
self.assertTrue(np.isnan(weibull_mode(1, -1)))
def test_standard_deviation_invalid(self):
self.assertTrue(np.isnan(weibull_standard_deviation(-1, 1)))
self.assertTrue(np.isnan(weibull_standard_deviation(1, -1)))
def test_variance_invalid(self):
self.assertTrue(np.isnan(weibull_variance(-1, 1)))
self.assertTrue(np.isnan(weibull_variance(1, -1)))
def test_skewness_invalid(self):
self.assertTrue(np.isnan(weibull_skewness(-1, 1)))
self.assertTrue(np.isnan(weibull_skewness(1, -1)))
def test_kurtosis_invalid(self):
self.assertTrue(np.isnan(weibull_kurtosis(-1, 1)))
self.assertTrue(np.isnan(weibull_kurtosis(1, -1)))
def test_single(self):
lambda_str = get_lambda_str(DistributionType.Weibull, 1)
generated_func = self.get_func(lambda_str)
manuscript_func = weibull
x = np.linspace(1, 10, 1001)
res = np.equal(generated_func(x, 0.7, 2.1), manuscript_func(x, 0.7, 2.1))
self.assertTrue(np.all(res))
def test_double(self):
lambda_str = get_lambda_str(DistributionType.Weibull, 2)
generated_func = self.get_func(lambda_str)
manuscript_func = double_weibull
x = np.linspace(1, 10, 1001)
res = np.equal(generated_func(x, 0.71, 2.41, 5.3, 12.1, 0.34), \
manuscript_func(x, 0.71, 2.41, 5.3, 12.1, 0.34))
self.assertTrue(np.all(res))
def test_triple(self):
lambda_str = get_lambda_str(DistributionType.Weibull, 3)
generated_func = self.get_func(lambda_str)
manuscript_func = triple_weibull
x = np.linspace(1, 10, 1001)
res = np.equal(generated_func(x, 0.52, 1.42, 2.3, 11.2, 4.2, 12.4, 0.21, 0.42), \
manuscript_func(x, 0.52, 1.42, 2.3, 11.2, 4.2, 12.4, 0.21, 0.42))
self.assertTrue(np.all(res))
def test_quadruple(self):
lambda_str = get_lambda_str(DistributionType.Weibull, 4)
generated_func = self.get_func(lambda_str)
manuscript_func = quadruple_weibull
x = np.linspace(1, 10, 1001)
res = np.equal(generated_func(x, 0.52, 1.42, 2.3, 11.2, 4.2, 12.4, 0.21, 0.46, 0.21, 0.42, 0.08), \
manuscript_func(x, 0.52, 1.42, 2.3, 11.2, 4.2, 12.4, 0.21, 0.46, 0.21, 0.42, 0.08))
self.assertTrue(np.all(res))
class TestGeneralWeibullMathFuncs(unittest.TestCase):
@staticmethod
def get_func(lambda_str):
exec("func = "+lambda_str)
return locals()["func"]
def test_base(self):
x1 = np.linspace(1, 11, 1001)
x_offset = 2.458
x2 = x1 - x_offset
res = np.equal(weibull(x2, 2.786, 5.267), gen_weibull(x1, x_offset, 2.786, 5.267))
self.assertTrue(np.all(res))
def test_mean(self):
self.assertEqual(weibull_mean(2.144, 2.455), gen_weibull_mean(0, 2.144, 2.455))
def test_median(self):
self.assertEqual(weibull_median(2.144, 2.455), gen_weibull_median(0, 2.144, 2.455))
def test_mode(self):
self.assertEqual(weibull_mode(2.144, 2.455), gen_weibull_mode(0, 2.144, 2.455))
def test_standard_deviation(self):
self.assertEqual(weibull_standard_deviation(2.144, 2.455), gen_weibull_standard_deviation(0, 2.144, 2.455))
def test_variance(self):
self.assertEqual(weibull_variance(2.144, 2.455), gen_weibull_variance(0, 2.144, 2.455))
def test_skewness(self):
self.assertEqual(weibull_skewness(2.144, 2.455), gen_weibull_skewness(0, 2.144, 2.455))
def test_kurtosis(self):
self.assertEqual(weibull_kurtosis(2.144, 2.455), gen_weibull_kurtosis(0, 2.144, 2.455))
def test_single(self):
lambda_str = get_lambda_str(DistributionType.GeneralWeibull, 1)
generated_func = self.get_func(lambda_str)
manuscript_func = gen_weibull
x = np.linspace(1, 10, 1001)
res = np.equal(generated_func(x, 0.2, 0.7, 2.1), manuscript_func(x, 0.2, 0.7, 2.1))
self.assertTrue(np.all(res))
def test_double(self):
lambda_str = get_lambda_str(DistributionType.GeneralWeibull, 2)
generated_func = self.get_func(lambda_str)
manuscript_func = double_gen_weibull
x = np.linspace(1, 10, 1001)
res = np.equal(generated_func(x, 0.37, 0.71, 2.41, 0.45, 5.3, 12.1, 0.34), \
manuscript_func(x, 0.37, 0.71, 2.41, 0.45, 5.3, 12.1, 0.34))
self.assertTrue(np.all(res))
def test_triple(self):
lambda_str = get_lambda_str(DistributionType.GeneralWeibull, 3)
generated_func = self.get_func(lambda_str)
manuscript_func = triple_gen_weibull
x = np.linspace(1, 10, 1001)
res = np.equal(generated_func(x, 0.76, 0.52, 1.42, 0.65, 2.3, 11.2, 0.54, 4.2, 12.4, 0.21, 0.42), \
manuscript_func(x, 0.76, 0.52, 1.42, 0.65, 2.3, 11.2, 0.54, 4.2, 12.4, 0.21, 0.42))
self.assertTrue(np.all(res))
def test_quadruple(self):
lambda_str = get_lambda_str(DistributionType.GeneralWeibull, 4)
generated_func = self.get_func(lambda_str)
manuscript_func = quadruple_gen_weibull
x = np.linspace(1, 10, 1001)
res = np.equal(generated_func(x, 0.80, 0.52, 1.42, 0.21, 2.3, 11.2, 0.43, 4.2, 12.4, 0.76, 0.21, 0.46, 0.21, 0.42, 0.08), \
manuscript_func(x, 0.80, 0.52, 1.42, 0.21, 2.3, 11.2, 0.43, 4.2, 12.4, 0.76, 0.21, 0.46, 0.21, 0.42, 0.08))
self.assertTrue(np.all(res))
# the func must be corresponding to the name
class TestGetSingleFunc(unittest.TestCase):
@staticmethod
def get_func(distribution_type: DistributionType):
name = get_base_func_name(distribution_type)
return globals()[name]
def test_normal(self):
actual_func = get_single_func(DistributionType.Normal)
expected_func = self.get_func(DistributionType.Normal)
self.assertIs(actual_func, expected_func)
def test_weibull(self):
actual_func = get_single_func(DistributionType.Weibull)
expected_func = self.get_func(DistributionType.Weibull)
self.assertIs(actual_func, expected_func)
def test_gen_weibull(self):
actual_func = get_single_func(DistributionType.GeneralWeibull)
expected_func = self.get_func(DistributionType.GeneralWeibull)
self.assertIs(actual_func, expected_func)
# the length of return values must be same as that of `get_defaults`
class TestGetParamByMean(unittest.TestCase):
def test_normal(self):
for i in range(1, 101):
params = get_param_by_mean(DistributionType.Normal, i, np.linspace(1, 10, i))
if i == 0:
self.assertEqual(len(params), NORMAL_PARAM_COUNT)
else:
self.assertEqual(len(params), (NORMAL_PARAM_COUNT+1)*i - 1)
def test_weibull(self):
for i in range(1, 101):
params = get_param_by_mean(DistributionType.Weibull, i, np.linspace(1, 10, i))
if i == 0:
self.assertEqual(len(params), WEIBULL_PARAM_COUNT)
else:
self.assertEqual(len(params), (WEIBULL_PARAM_COUNT+1)*i - 1)
def test_gen_weibull(self):
for i in range(1, 101):
params = get_param_by_mean(DistributionType.GeneralWeibull, i, np.linspace(1, 10, i))
if i == 0:
self.assertEqual(len(params), GENERAL_WEIBULL_PARAM_COUNT)
else:
self.assertEqual(len(params), (GENERAL_WEIBULL_PARAM_COUNT+1)*i - 1)
class TestAlgorithmData(unittest.TestCase):
def test_ctor(self):
for i in range(1, 101):
n = AlgorithmData(DistributionType.Normal, i)
w = AlgorithmData(DistributionType.Weibull, i)
g = AlgorithmData(DistributionType.GeneralWeibull, i)
def setUp(self):
self.normal_data = AlgorithmData(DistributionType.Normal, 10)
self.weibull_data = AlgorithmData(DistributionType.Weibull, 10)
self.gen_weibull_data = AlgorithmData(DistributionType.GeneralWeibull, 10)
def tearDown(self):
self.normal_data = None
self.weibull_data = None
self.gen_weibull_data = None
# these attrs will be used in other files
def test_has_attrs(self):
for data in [self.normal_data, self.weibull_data, self.gen_weibull_data]:
data.distribution_type
data.component_number
data.param_count
data.param_names
data.single_func
data.mixed_func
data.bounds
data.defaults
data.constrains
data.mean
data.median
data.mode
data.variance
data.standard_deviation
data.skewness
data.kurtosis
def test_read_only(self):
for data in [self.normal_data, self.weibull_data, self.gen_weibull_data]:
with self.assertRaises(AttributeError):
data.distribution_type = None
with self.assertRaises(AttributeError):
data.component_number = None
with self.assertRaises(AttributeError):
data.param_count = None
with self.assertRaises(AttributeError):
data.param_names = None
with self.assertRaises(AttributeError):
data.single_func = None
with self.assertRaises(AttributeError):
data.mixed_func = None
with self.assertRaises(AttributeError):
data.bounds = None
with self.assertRaises(AttributeError):
data.defaults = None
with self.assertRaises(AttributeError):
data.constrains = None
with self.assertRaises(AttributeError):
data.mean = None
with self.assertRaises(AttributeError):
data.median = None
with self.assertRaises(AttributeError):
data.mode = None
with self.assertRaises(AttributeError):
data.variance = None
with self.assertRaises(AttributeError):
data.standard_deviation = None
with self.assertRaises(AttributeError):
data.skewness = None
with self.assertRaises(AttributeError):
data.kurtosis = None
def test_process_params(self):
for data in [self.normal_data, self.weibull_data, self.gen_weibull_data]:
func_params = get_params(data.distribution_type, data.component_number)
fake_params = get_defaults(func_params)
actual_1 = data.process_params(fake_params, 0.0)
actual_2 = data.process_params(fake_params, 3.1)
expected = process_params(data.distribution_type, data.component_number, fake_params)
self.assertEqual(actual_1, expected)
if data.distribution_type == DistributionType.Normal or \
data.distribution_type == DistributionType.GeneralWeibull:
self.assertNotEqual(actual_2, expected)
def test_get_param_by_mean(self):
for data in [self.normal_data, self.weibull_data, self.gen_weibull_data]:
data.get_param_by_mean(np.linspace(1, 10, data.component_number))
if __name__ == "__main__":
unittest.main()
|
python
|
# Copyright (C) 2020 University of Oxford
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pandas as pd
import os
import sys
__all__ = ('EU_ZH_Fetcher',)
from utils.fetcher.base_epidemiology import BaseEpidemiologyFetcher
logger = logging.getLogger(__name__)
"""
site-location: https://github.com/covid19-eu-zh/covid19-eu-data
COVID19 data for European countries created and maintained by covid19-eu-zh
Data originally from
Austria's Sozial Ministerium https://www.sozialministerium.at/Informationen-zum-Coronavirus/Neuartiges-Coronavirus-(2019-nCov).html
Czech Ministry of Health https://onemocneni-aktualne.mzcr.cz/covid-19
Germany's Robert Koch Institute https://www.rki.de/DE/Content/InfAZ/N/Neuartiges_Coronavirus/Fallzahlen.html
Hungary's Office of the Prime Minister https://koronavirus.gov.hu/
Ireland's Health Protection Surveillance Centre https://www.hpsc.ie/a-z/respiratory/coronavirus/novelcoronavirus/casesinireland/
Poland - Government https://www.gov.pl/web/koronawirus/wykaz-zarazen-koronawirusem-sars-cov-2
Sweden's Public Health Authority https://www.folkhalsomyndigheten.se/smittskydd-beredskap/utbrott/aktuella-utbrott/covid-19/aktuellt-epidemiologiskt-lage/
Slovenia's Government Communications Office https://www.gov.si/en/topics/coronavirus-disease-covid-19/
Belgian institute for health: https://epistat.wiv-isp.be/Covid/
"""
class EU_ZH_Fetcher(BaseEpidemiologyFetcher):
LOAD_PLUGIN = True
SOURCE = 'EU_ZH'
def fetch(self, url):
return pd.read_csv(url)
# Certain regions have excess characters in some source files
def clean_string(self, input):
if isinstance(input, str):
return input.replace('', '')
else:
return input
def parse_int(self, data):
if pd.isna(data):
return None
if isinstance(data, str):
data = data.replace('*', '')
return int(data)
def country_fetcher(self, region, country, code_3, code_2):
logger.info("Processing number of cases in " + country)
if code_3 == 'NOR':
logger.warning("These GIDs not entirely accurate due to change in Norway's county boundaries, 2020.")
if code_3 == 'BEL':
logger.warning("These GIDs has MISSING region due to unknown data resourses, 2020.")
url = 'https://github.com/covid19-eu-zh/covid19-eu-data/raw/master/dataset/covid-19-' + code_2 + '.csv'
df = self.fetch(url)
for index, record in df.iterrows():
# date Y-m-d or Y-m-dTH:M:S
date = record['datetime'].split('T')[0]
adm_area_2 = None
# If no region is reported then all data is national
if not hasattr(record, region):
adm_area_1 = None
gid = [code_3]
# Ignore two known corrupted lines in the Polish data
elif str(record[region])[:4] == 'http':
continue
elif pd.isna(record[region]) and code_3 == 'POL':
continue
# Austria's national data is reported with a blank region
elif pd.isna(record[region]) and code_3 == 'AUT':
adm_area_1 = None
gid = [code_3]
elif region == 'nuts_2' and code_3 == 'BEL':
if self.clean_string(record['nuts_1']) == 'MISSING' or pd.isna(record[region]):
continue
success, adm_area_1, adm_area_2, adm_area_3, gid = self.adm_translator.tr(
input_adm_area_1=self.clean_string(record['nuts_1']),
input_adm_area_2=self.clean_string(record[region]),
return_original_if_failure=True,
suppress_exception=True
)
# If the region appears cleanly, then we can translate to obtain GID
elif region == 'nuts_1' and code_3 == 'BEL':
if pd.notna(record['nuts_2']):
continue
success, adm_area_1, adm_area_2, adm_area_3, gid = self.adm_translator.tr(
input_adm_area_1=self.clean_string(record[region]),
return_original_if_failure=True,
suppress_exception=True
)
else:
success, adm_area_1, adm_area_2, adm_area_3, gid = self.adm_translator.tr(
input_adm_area_1=self.clean_string(record[region]),
return_original_if_failure=True,
suppress_exception=True
)
upsert_obj = {
'source': self.SOURCE,
'date': date,
'country': country,
'countrycode': code_3,
'adm_area_1': adm_area_1,
'adm_area_2': adm_area_2,
'adm_area_3': None,
'gid': gid
}
# add the epidemiological properties to the object if they exist
if hasattr(record, 'tests'):
upsert_obj['tested'] = self.parse_int(record['tests'])
if hasattr(record, 'cases'):
upsert_obj['confirmed'] = self.parse_int(record['cases'])
if hasattr(record, 'tests_positive'):
upsert_obj['confirmed'] = self.parse_int(record['tests_positive'])
if hasattr(record, 'recovered'):
upsert_obj['recovered'] = self.parse_int(record['recovered'])
if hasattr(record, 'deaths'):
upsert_obj['dead'] = self.parse_int(record['deaths'])
if hasattr(record, 'hospitalized'):
upsert_obj['hospitalised'] = self.parse_int(record['hospitalized'])
if hasattr(record, 'intensive_care'):
upsert_obj['hospitalised_icu'] = self.parse_int(record['intensive_care'])
if hasattr(record, 'quarantine'):
upsert_obj['quarantined'] = self.parse_int(record['quarantine'])
self.upsert_data(**upsert_obj)
# read the list of countries from a csv file in order to fetch each one
def load_countries_to_fetch(self):
input_csv_fname = getattr(self.__class__, 'INPUT_CSV', "input.csv")
path = os.path.dirname(sys.modules[self.__class__.__module__].__file__)
csv_fname = os.path.join(path, input_csv_fname)
if not os.path.exists(csv_fname):
return None
colnames = ['country', 'code_3', 'code_2', 'region']
input_pd = pd.read_csv(csv_fname)
input_pd.columns = colnames
input_pd = input_pd.where((pd.notnull(input_pd)), None)
return input_pd
def run(self):
countries = self.load_countries_to_fetch()
for index, record in countries.iterrows():
self.country_fetcher(record['region'], record['country'], record['code_3'], record['code_2'])
|
python
|
# SPDX-FileCopyrightText: 2017 Scott Shawcroft, written for Adafruit Industries
#
# SPDX-License-Identifier: MIT
# NOTE: Make sure you've created your secrets.py file before running this example
# https://learn.adafruit.com/adafruit-pyportal/internet-connect#whats-a-secrets-file-17-2
import board
from adafruit_pyportal import PyPortal
# Set a data source URL
TEXT_URL = "http://wifitest.adafruit.com/testwifi/index.html"
# Create the PyPortal object
pyportal = PyPortal(url=TEXT_URL, status_neopixel=board.NEOPIXEL)
# Set display to show REPL
board.DISPLAY.show(None)
# Go get that data
print("Fetching text from", TEXT_URL)
data = pyportal.fetch()
# Print out what we got
print("-" * 40)
print(data)
print("-" * 40)
|
python
|
"""
weasyprint.css.descriptors
--------------------------
Validate descriptors used for some at-rules.
"""
import tinycss2
from ...logger import LOGGER
from ..utils import (
InvalidValues, comma_separated_list, get_custom_ident, get_keyword,
get_single_keyword, get_url, remove_whitespace, single_keyword,
single_token, split_on_comma)
from . import properties
DESCRIPTORS = {
'font-face': {},
'counter-style': {},
}
class NoneFakeToken:
type = 'ident'
lower_value = 'none'
class NormalFakeToken:
type = 'ident'
lower_value = 'normal'
def preprocess_descriptors(rule, base_url, descriptors):
"""Filter unsupported names and values for descriptors.
Log a warning for every ignored descriptor.
Return a iterable of ``(name, value)`` tuples.
"""
for descriptor in descriptors:
if descriptor.type != 'declaration' or descriptor.important:
continue
tokens = remove_whitespace(descriptor.value)
try:
# Use list() to consume generators now and catch any error.
if descriptor.name not in DESCRIPTORS[rule]:
raise InvalidValues('descriptor not supported')
function = DESCRIPTORS[rule][descriptor.name]
if function.wants_base_url:
value = function(tokens, base_url)
else:
value = function(tokens)
if value is None:
raise InvalidValues
result = ((descriptor.name, value),)
except InvalidValues as exc:
LOGGER.warning(
'Ignored `%s:%s` at %d:%d, %s.',
descriptor.name, tinycss2.serialize(descriptor.value),
descriptor.source_line, descriptor.source_column,
exc.args[0] if exc.args and exc.args[0] else 'invalid value')
continue
for long_name, value in result:
yield long_name.replace('-', '_'), value
def descriptor(rule, descriptor_name=None, wants_base_url=False):
"""Decorator adding a function to the ``DESCRIPTORS``.
The name of the descriptor covered by the decorated function is set to
``descriptor_name`` if given, or is inferred from the function name
(replacing underscores by hyphens).
:param wants_base_url:
The function takes the stylesheet’s base URL as an additional
parameter.
"""
def decorator(function):
"""Add ``function`` to the ``DESCRIPTORS``."""
if descriptor_name is None:
name = function.__name__.replace('_', '-')
else:
name = descriptor_name
assert name not in DESCRIPTORS[rule], name
function.wants_base_url = wants_base_url
DESCRIPTORS[rule][name] = function
return function
return decorator
def expand_font_variant(tokens):
keyword = get_single_keyword(tokens)
if keyword in ('normal', 'none'):
for suffix in (
'-alternates', '-caps', '-east-asian', '-numeric',
'-position'):
yield suffix, [NormalFakeToken]
token = NormalFakeToken if keyword == 'normal' else NoneFakeToken
yield '-ligatures', [token]
else:
features = {
'alternates': [],
'caps': [],
'east-asian': [],
'ligatures': [],
'numeric': [],
'position': []}
for token in tokens:
keyword = get_keyword(token)
if keyword == 'normal':
# We don't allow 'normal', only the specific values
raise InvalidValues
for feature in features:
function_name = f'font_variant_{feature.replace("-", "_")}'
if getattr(properties, function_name)([token]):
features[feature].append(token)
break
else:
raise InvalidValues
for feature, tokens in features.items():
if tokens:
yield (f'-{feature}', tokens)
@descriptor('font-face')
def font_family(tokens, allow_spaces=False):
"""``font-family`` descriptor validation."""
allowed_types = ['ident']
if allow_spaces:
allowed_types.append('whitespace')
if len(tokens) == 1 and tokens[0].type == 'string':
return tokens[0].value
if tokens and all(token.type in allowed_types for token in tokens):
return ' '.join(
token.value for token in tokens if token.type == 'ident')
@descriptor('font-face', wants_base_url=True)
@comma_separated_list
def src(tokens, base_url):
"""``src`` descriptor validation."""
if len(tokens) <= 2:
tokens, token = tokens[:-1], tokens[-1]
if token.type == 'function' and token.lower_name == 'format':
tokens, token = tokens[:-1], tokens[-1]
if token.type == 'function' and token.lower_name == 'local':
return 'local', font_family(token.arguments, allow_spaces=True)
url = get_url(token, base_url)
if url is not None and url[0] == 'url':
return url[1]
@descriptor('font-face')
@single_keyword
def font_style(keyword):
"""``font-style`` descriptor validation."""
return keyword in ('normal', 'italic', 'oblique')
@descriptor('font-face')
@single_token
def font_weight(token):
"""``font-weight`` descriptor validation."""
keyword = get_keyword(token)
if keyword in ('normal', 'bold'):
return keyword
if token.type == 'number' and token.int_value is not None:
if token.int_value in [100, 200, 300, 400, 500, 600, 700, 800, 900]:
return token.int_value
@descriptor('font-face')
@single_keyword
def font_stretch(keyword):
"""``font-stretch`` descriptor validation."""
return keyword in (
'ultra-condensed', 'extra-condensed', 'condensed', 'semi-condensed',
'normal',
'semi-expanded', 'expanded', 'extra-expanded', 'ultra-expanded')
@descriptor('font-face')
def font_feature_settings(tokens):
"""``font-feature-settings`` descriptor validation."""
return properties.font_feature_settings(tokens)
@descriptor('font-face')
def font_variant(tokens):
"""``font-variant`` descriptor validation."""
if len(tokens) == 1:
keyword = get_keyword(tokens[0])
if keyword in ('normal', 'none', 'inherit'):
return []
values = []
for name, sub_tokens in expand_font_variant(tokens):
try:
values.append(properties.validate_non_shorthand(
None, 'font-variant' + name, sub_tokens, required=True))
except InvalidValues:
return None
return values
@descriptor('counter-style')
def system(tokens):
"""``system`` descriptor validation."""
if len(tokens) > 2:
return
keyword = get_keyword(tokens[0])
if keyword == 'extends':
if len(tokens) == 2:
second_keyword = get_keyword(tokens[1])
if second_keyword:
return (keyword, second_keyword, None)
elif keyword == 'fixed':
if len(tokens) == 1:
return (None, 'fixed', 1)
elif tokens[1].type == 'number' and tokens[1].is_integer:
return (None, 'fixed', tokens[1].int_value)
elif len(tokens) == 1 and keyword in (
'cyclic', 'numeric', 'alphabetic', 'symbolic', 'additive'):
return (None, keyword, None)
@descriptor('counter-style', wants_base_url=True)
def negative(tokens, base_url):
"""``negative`` descriptor validation."""
if len(tokens) > 2:
return
values = []
tokens = list(tokens)
while tokens:
token = tokens.pop(0)
if token.type in ('string', 'ident'):
values.append(('string', token.value))
continue
url = get_url(token, base_url)
if url is not None and url[0] == 'url':
values.append(('url', url[1]))
if len(values) == 1:
values.append(('string', ''))
if len(values) == 2:
return values
@descriptor('counter-style', 'prefix', wants_base_url=True)
@descriptor('counter-style', 'suffix', wants_base_url=True)
def prefix_suffix(tokens, base_url):
"""``prefix`` and ``suffix`` descriptors validation."""
if len(tokens) != 1:
return
token, = tokens
if token.type in ('string', 'ident'):
return ('string', token.value)
url = get_url(token, base_url)
if url is not None and url[0] == 'url':
return ('url', url[1])
@descriptor('counter-style')
@comma_separated_list
def range(tokens):
"""``range`` descriptor validation."""
if len(tokens) == 1:
keyword = get_single_keyword(tokens)
if keyword == 'auto':
return 'auto'
elif len(tokens) == 2:
values = []
for i, token in enumerate(tokens):
if token.type == 'ident' and token.value == 'infinite':
values.append(float('inf') if i else -float('inf'))
elif token.type == 'number' and token.is_integer:
values.append(token.int_value)
if len(values) == 2 and values[0] <= values[1]:
return tuple(values)
@descriptor('counter-style', wants_base_url=True)
def pad(tokens, base_url):
"""``pad`` descriptor validation."""
if len(tokens) == 2:
values = [None, None]
for token in tokens:
if token.type == 'number':
if token.is_integer and token.value >= 0 and values[0] is None:
values[0] = token.int_value
elif token.type in ('string', 'ident'):
values[1] = ('string', token.value)
url = get_url(token, base_url)
if url is not None and url[0] == 'url':
values[1] = ('url', url[1])
if None not in values:
return tuple(values)
@descriptor('counter-style')
@single_token
def fallback(token):
"""``fallback`` descriptor validation."""
ident = get_custom_ident(token)
if ident != 'none':
return ident
@descriptor('counter-style', wants_base_url=True)
def symbols(tokens, base_url):
"""``symbols`` descriptor validation."""
values = []
for token in tokens:
if token.type in ('string', 'ident'):
values.append(('string', token.value))
continue
url = get_url(token, base_url)
if url is not None and url[0] == 'url':
values.append(('url', url[1]))
continue
return
return tuple(values)
@descriptor('counter-style', wants_base_url=True)
def additive_symbols(tokens, base_url):
"""``additive-symbols`` descriptor validation."""
results = []
for part in split_on_comma(tokens):
result = pad(remove_whitespace(part), base_url)
if result is None:
return
if results and results[-1][0] <= result[0]:
return
results.append(result)
return tuple(results)
|
python
|
import numpy as np
import pandas as pd
import os
import h5py
from scipy import io
##########################################################
# author: tecwang
# email: [email protected]
# Inspired by https://blog.csdn.net/zebralxr/article/details/78254192
# detail: Transfer mat file into csv file by python
##########################################################
rootDir = input("Please input the root dir!\n")
print("================================================================")
files = os.listdir(rootDir)
for f in files:
if(~os.path.isdir(f) and os.path.splitext(f)[1] == ".mat"):
# prepare file path
filename = os.path.join(rootDir + "\\" + f)
csv_prefix_name = os.path.basename(f).replace(".", "_")
csv_prefix_name = os.path.join(rootDir + "\\" + csv_prefix_name )
# print current processing file name
print(filename)
# read mat file
features_struct = io.loadmat(filename)
print(features_struct.keys()) # print keys in mat file
temp_arr = [] # store (1, 1) small data
for key in features_struct.keys():
item = features_struct[key]
try:
print(item.shape)
if(item.shape[0] == 1 and item.shape[1] == 1):
# if shape == (1, 1), then combine them and output it as a single csv file
temp_arr.append(item)
else:
# if shape != (1, 1), then output it as a single csv file
item = pd.DataFrame(item)
item.to_csv(csv_prefix_name + "_" + key + ".csv",index=False, header=None)
# output combined small data
temp_arr = pd.DataFrame(temp_arr)
temp_arr.to_csv(csv_prefix_name + "_temp_arr.csv",index=False, header=None)
except Exception:
# skip the head infomation such as header, version, globals, ...
pass
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 23 14:40:10 2017
@author: pudutta
"""
from __future__ import absolute_import, division, print_function
import codecs
import glob
import multiprocessing
import os
import pprint
import re
import nltk
import gensim.models.word2vec as w2v
import math
import sklearn.manifold
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from itertools import chain
from scipy import spatial
from collections import Counter
from operator import itemgetter
from xlwt import Workbook
import xlsxwriter
from openpyxl import load_workbook
from openpyxl.compat import range
from openpyxl.utils import get_column_letter
from openpyxl import Workbook
threshold=94
get=0
na=0
workbook = load_workbook(filename='C:/Users/pudutta/Desktop/trainquora2.xlsx')
first_sheet ="Mysheet"
ws = workbook.get_sheet_by_name(first_sheet)
for row in range(2, 10000):
if(ws.cell(row=row, column=4).value=="NA"):
na=na+1
else:
if((ws.cell(row=row, column=5).value>threshold and ws.cell(row=row, column=3).value==1) or (ws.cell(row=row, column=5).value<threshold and ws.cell(row=row, column=3).value==0) ):
print((ws.cell(row=row, column=1).value))
print((ws.cell(row=row, column=2).value))
print(row)
get=get+1
print(get/(9999-na))
|
python
|
#!/user/bin python
# -*- coding:utf-8 -*-
'''
@Author: xiaodong
@Email: [email protected]
@DateTime: 2015-11-08 19:06:43
@Description: 提取word文档信息 转换Excel
需求字段 : 姓名,职位,性别,年龄,教育程度,手机号码,邮箱,户籍,简历更新日期,工作经历,项目经历。
'''
from win32com.client import Dispatch, constants
import os,sys
import re
import chardet
from openpyxl import Workbook
from openpyxl.styles import Alignment, Font
reload(sys)
sys.setdefaultencoding('utf-8')
print sys.getdefaultencoding()
def get_doc(rootDir="."):
list_dirs = os.walk(rootDir)
filelist = []
# 遍历查找关键文件
for root, dirs, files in list_dirs:
for d in files:
# if keyFile in d:
if d.endswith('.doc') or d.endswith('.docx'):
filelist.append(os.path.join(root, d))
if filelist:
return filelist
else:
sys.exit('没有找到word文件,请确认运行目录是否正确.')
def get_encode(s):
'''
获取编码
'''
encoding = chardet.detect(s)['encoding']
if encoding == "GB2312":
encoding = "gbk"
return encoding
def Filter(content,keyWordList=''):
'''
过滤器:姓名,职位,性别,年龄,教育程度,手机号码,邮箱,户籍,简历更新日期,工作经历,项目经历。
'''
encoding = get_encode(content)
content = content.decode(encoding)
pattern_config = [
{ 'header':u'姓名', 'flied' : 'name', 'pattern' : ur'(?ms)(.*姓名)[::]*?\s+(?P<name>.*?)\s','result' : '' },
{ 'header':u'职位', 'flied' : 'job', 'pattern' : ur'(?ms)(.*职位.*?)[::]*?\s+(?P<job>.*?)\s','result' : '' },
{ 'header':u'性别', 'flied' : 'sex', 'pattern' : ur'(?ms).*(?P<sex>[男女]).*','result' : '' },
{ 'header':u'年龄', 'flied' : 'age', 'pattern' : ur'(?s)(.*年龄.*?)[::]\s+?(?P<age>\d{2}).*','result' : '' },
{ 'header':u'教育程度', 'flied': 'education', 'pattern' : ur'(?ms).*(?P<education>本科|大专|硕士|博士).*','result' : '' },
{ 'header':u'电话号码', 'flied' : 'phoneNumber','pattern' : ur'(?ms).*?(?P<phoneNumber>1\d{10}).*','result' : '' },
{ 'header':u'邮箱', 'flied' : 'mail', 'pattern' : ur'(?s).*\s(?P<mail>\S+?@\S+)','result' : '' },
{ 'header':u'户籍', 'flied' : 'homeTown', 'pattern' : ur'(?ms).*(户口|籍)[::]\s+(?P<homeTown>\S+)\s','result' : '' },
{ 'header':u'更新时间', 'flied' : 'updateTime','pattern' : ur'(?ms).*?(?P<updateTime>\d{4}-\d{2}-\d{2}\s+\d{2}[::]+\d+)','result' : '' },
{ 'header':u'工作经历', 'flied' : 'workExperience','pattern' : ur'(?ms).*?\s(工作(经历|经验))[:: ]*(?P<workExperience>.*)(项目经历)','result' : '' },
{ 'header':u'项目经历', 'flied' : 'projectExperience','pattern' : ur'(?ms).*?(项目经历)[:: ]*\s(?P<projectExperience>.*)','result' : '' },
]
for num in range(len(pattern_config)):
pattern = pattern_config[num]['pattern']
p = re.compile(pattern)
result = p.match(content)
# print pattern
if result:
_result = result.group(pattern_config[num]['flied'])
pattern_config[num]['result'] = _result
print '获取到%s信息 --------------- %s ' % ( pattern_config[num]['header'], _result )
else:
print '未匹配到%s信息' % pattern_config[num]['header']
return pattern_config
def toExcel(pattern_result,saveFileExcel):
title = [ t['header'] for t in pattern_result ]
content = [ t['result'] for t in pattern_result]
wb = Workbook()
ws = wb.active
ws.title = pattern_result[0]['header']
ws.sheet_properties.tabColor = "1072BA"
ws.append(title)
ws.append(content)
wb.save(saveFileExcel)
def docFilter(fileList):
'''
转换word为txt,提取信息后清除txt.
'''
error_file = []
for f in fileList:
# fileName = f.split('\\')[-1].split('.')[-2].decode('gbk')
# filePath = ''.join(f.split('\\')[:-1]).decode('gbk')
f = os.path.realpath(f)
fileName = f.split('\\')[-1].split('.')[0]
print fileName.decode('gbk') + ' start ..'
print '-------------------------------------'
word = Dispatch("Word.Application")
# 后台静默运行
word.Visible = 0
word.DisplayAlerts = 0
try:
doc = word.Documents.Open(f,0,ReadOnly = 1)
saveFileTxt = re.sub('.doc','.txt',f).decode('gbk')
#保存为 txt 格式
doc.SaveAs(u'%s' % saveFileTxt ,7)
content = open(saveFileTxt,'r').read()
#开始过滤
pattern_result = Filter(content)
except Exception, E:
print E
error_file.append(f)
continue
finally:
doc.Close()
os.remove(saveFileTxt)
#写入excel
saveFileExcel = re.sub('.doc','.xlsx',f).decode('gbk')
# saveFileExcel = u'猎聘网.xlsx'
toExcel(pattern_result,saveFileExcel)
# if os.path.exists(u'test.txt'):os.remove(u'test.txt')
word.Quit()
if error_file:
print '失败的文件:'
print '\n'.join(error_file).decode('gbk')
if __name__ == '__main__':
filelist = get_doc()
docFilter(filelist)
|
python
|
def my_zip(first,secoud):
first_it=iter(first)
secoud_it=iter(secoud)
while True:
try:
yield (next(first_it),next(secoud_it))
except StopIteration:
return
a=['s','gff x','c']
b=range(15)
m= my_zip(a,b)
for pair in my_zip(a,b):
print(pair)
a,b
|
python
|
import sqlite3
def changeDb():
databaseFile = "app/database/alunos.db"
escolha = 0
while escolha != 4:
print("""
1 = Numero para contato
2 = Situação escolar
3 = Documentações pendentes
4 = Sair do menu
""")
escolha = int(input("Escolha sua opção: "))
if escolha == 1:
conn = sqlite3.connect(databaseFile)
cursor = conn.cursor()
id_numpasta = input(
'Digite o numero da pasta para alterar os dados: ')
novo_fone = input("Digite um novo numero para contato: ")
cursor.execute("""
UPDATE alunos
SET fone = ?
WHERE id_numpasta = ?
""", (novo_fone, id_numpasta,))
conn.commit()
print('Dados alterados com sucesso')
elif escolha == 2:
conn = sqlite3.connect(databaseFile)
cursor = conn.cursor()
id_numpasta = input(
'Digite o numero da pasta para alterar os dados: ')
novo_situacao = input("Digite a atual situação escolar: ")
cursor.execute("""
UPDATE alunos
SET situacao = ?
WHERE id_numpasta = ?
""", (novo_situacao, id_numpasta,))
conn.commit()
print('Dados alterados com sucesso')
elif escolha == 3:
conn = sqlite3.connect(databaseFile)
cursor = conn.cursor()
id_numpasta = input(
'Digite o numero da pasta para alterar os dados: ')
novo_pedencias = input('Digite a atualização de pendencias: ')
cursor.execute("""
UPDATE alunos
SET pedencias = ?
WHERE id_numpasta = ?
""", (novo_pedencias, id_numpasta,))
conn.commit()
print('Dados alterados com sucesso')
elif escolha == 4:
conn = sqlite3.connect(databaseFile)
cursor = conn.cursor()
print('Saindo...')
conn.close()
else:
print('Opção invalida, tente novamente!!!')
print('-----------' * 10)
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015, 2016, 2017 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from datetime import datetime
import backtrader as bt
from backtrader.utils.py3 import range
class MetaChainer(bt.DataBase.__class__):
def __init__(cls, name, bases, dct):
'''Class has already been created ... register'''
# Initialize the class
super(MetaChainer, cls).__init__(name, bases, dct)
def donew(cls, *args, **kwargs):
'''Intercept const. to copy timeframe/compression from 1st data'''
# Create the object and set the params in place
_obj, args, kwargs = super(MetaChainer, cls).donew(*args, **kwargs)
if args:
_obj.p.timeframe = args[0]._timeframe
_obj.p.compression = args[0]._compression
return _obj, args, kwargs
class Chainer(bt.with_metaclass(MetaChainer, bt.DataBase)):
'''Class that chains datas'''
def islive(self):
'''Returns ``True`` to notify ``Cerebro`` that preloading and runonce
should be deactivated'''
return True
def __init__(self, *args):
self._args = args
def start(self):
super(Chainer, self).start()
for d in self._args:
d.setenvironment(self._env)
d._start()
# put the references in a separate list to have pops
self._ds = list(self._args)
self._d = self._ds.pop(0) if self._ds else None
self._lastdt = datetime.min
def stop(self):
super(Chainer, self).stop()
for d in self._args:
d.stop()
def get_notifications(self):
return [] if self._d is None else self._d.get_notifications()
def _gettz(self):
'''To be overriden by subclasses which may auto-calculate the
timezone'''
if self._args:
return self._args[0]._gettz()
return bt.utils.date.Localizer(self.p.tz)
def _load(self):
while self._d is not None:
if not self._d.next(): # no values from current data source
self._d = self._ds.pop(0) if self._ds else None
continue
# Cannot deliver a date equal or less than an alredy delivered
dt = self._d.datetime.datetime()
if dt <= self._lastdt:
continue
self._lastdt = dt
for i in range(self._d.size()):
self.lines[i][0] = self._d.lines[i][0]
return True
# Out of the loop -> self._d is None, no data feed to return from
return False
|
python
|
from confluent_kafka import Producer
import rest
import json
class BusProducer:
def __init__(self):
# Pre-shared credentials
self.credentials = json.load(open('bus_credentials.json'))
# Construct required configuration
self.configuration = {
'client.id': 'bus_producer_api',
'group.id': 'bus_producer_api_group',
'bootstrap.servers': ','.join(self.credentials['kafka_brokers_sasl']),
'security.protocol': 'SASL_SSL',
'ssl.ca.location': '/etc/ssl/certs',
'sasl.mechanisms': 'PLAIN',
'sasl.username': self.credentials['api_key'][0:16],
'sasl.password': self.credentials['api_key'][16:48],
'api.version.request': True
}
self.producer = Producer(self.configuration)
def send(self, topic, message):
# Check if topic exists and create it if not
if not self.handle_topic(topic):
return False
# Produce and flush message to buss
try:
self.producer.produce(topic, message.encode('utf-8'), 'key', -1, self.on_delivery)
# self.producer.poll(0)
self.producer.flush()
except Exception as err:
print('Sending data failed')
print(err)
return False
return True
def handle_topic(self, topic_name):
# Create rest client to handle topics
try:
rest_client = rest.MessageHubRest(self.credentials['kafka_admin_url'], self.credentials['api_key'])
except Exception as e:
print(e)
return False
# List all topics
try:
response = rest_client.list_topics()
topics = json.loads(response.text)
except Exception as e:
print(e)
return False
# Check if desired topic exists in topic list
topic_exists = False
for topic in topics:
if topic['name'] == topic_name:
topic_exists = True
# If topic does not exist
if not topic_exists:
# Create topic
try:
response = rest_client.create_topic(topic_name, 1, 1)
print(response.text)
except Exception as e:
print(e)
return False
return True
def on_delivery(self, err, msg):
if err:
# print('Delivery report: Failed sending message {0}'.format(msg.value()))
print(err)
# We could retry sending the message
else:
print('Message produced, offset: {0}'.format(msg.offset()))
|
python
|
"""ApacheParser is a member object of the ApacheConfigurator class."""
import fnmatch
import itertools
import logging
import os
import re
import subprocess
from letsencrypt import errors
logger = logging.getLogger(__name__)
class ApacheParser(object):
"""Class handles the fine details of parsing the Apache Configuration.
.. todo:: Make parsing general... remove sites-available etc...
:ivar str root: Normalized absolute path to the server root
directory. Without trailing slash.
:ivar str root: Server root
:ivar set modules: All module names that are currently enabled.
:ivar dict loc: Location to place directives, root - configuration origin,
default - user config file, name - NameVirtualHost,
"""
arg_var_interpreter = re.compile(r"\$\{[^ \}]*}")
fnmatch_chars = set(["*", "?", "\\", "[", "]"])
def __init__(self, aug, root, ctl):
# Note: Order is important here.
# This uses the binary, so it can be done first.
# https://httpd.apache.org/docs/2.4/mod/core.html#define
# https://httpd.apache.org/docs/2.4/mod/core.html#ifdefine
# This only handles invocation parameters and Define directives!
self.variables = {}
self.update_runtime_variables(ctl)
self.aug = aug
# Find configuration root and make sure augeas can parse it.
self.root = os.path.abspath(root)
self.loc = {"root": self._find_config_root()}
self._parse_file(self.loc["root"])
# This problem has been fixed in Augeas 1.0
self.standardize_excl()
# Temporarily set modules to be empty, so that find_dirs can work
# https://httpd.apache.org/docs/2.4/mod/core.html#ifmodule
# This needs to come before locations are set.
self.modules = set()
self.init_modules()
# Set up rest of locations
self.loc.update(self._set_locations())
# Must also attempt to parse sites-available or equivalent
# Sites-available is not included naturally in configuration
self._parse_file(os.path.join(self.root, "sites-available") + "/*")
def init_modules(self):
"""Iterates on the configuration until no new modules are loaded.
..todo:: This should be attempted to be done with a binary to avoid
the iteration issue. Else... parse and enable mods at same time.
"""
# Since modules are being initiated... clear existing set.
self.modules = set()
matches = self.find_dir("LoadModule")
iterator = iter(matches)
# Make sure prev_size != cur_size for do: while: iteration
prev_size = -1
while len(self.modules) != prev_size:
prev_size = len(self.modules)
for match_name, match_filename in itertools.izip(
iterator, iterator):
self.modules.add(self.get_arg(match_name))
self.modules.add(
os.path.basename(self.get_arg(match_filename))[:-2] + "c")
def update_runtime_variables(self, ctl):
""""
.. note:: Compile time variables (apache2ctl -V) are not used within the
dynamic configuration files. These should not be parsed or
interpreted.
.. todo:: Create separate compile time variables... simply for arg_get()
"""
stdout = self._get_runtime_cfg(ctl)
variables = dict()
matches = re.compile(r"Define: ([^ \n]*)").findall(stdout)
try:
matches.remove("DUMP_RUN_CFG")
except ValueError:
raise errors.PluginError("Unable to parse runtime variables")
for match in matches:
if match.count("=") > 1:
logger.error("Unexpected number of equal signs in "
"apache2ctl -D DUMP_RUN_CFG")
raise errors.PluginError(
"Error parsing Apache runtime variables")
parts = match.partition("=")
variables[parts[0]] = parts[2]
self.variables = variables
def _get_runtime_cfg(self, ctl): # pylint: disable=no-self-use
"""Get runtime configuration info.
:returns: stdout from DUMP_RUN_CFG
"""
try:
proc = subprocess.Popen(
[ctl, "-D", "DUMP_RUN_CFG"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
except (OSError, ValueError):
logger.error(
"Error accessing %s for runtime parameters!%s", ctl, os.linesep)
raise errors.MisconfigurationError(
"Error accessing loaded Apache parameters: %s", ctl)
# Small errors that do not impede
if proc.returncode != 0:
logger.warn("Error in checking parameter list: %s", stderr)
raise errors.MisconfigurationError(
"Apache is unable to check whether or not the module is "
"loaded because Apache is misconfigured.")
return stdout
def filter_args_num(self, matches, args): # pylint: disable=no-self-use
"""Filter out directives with specific number of arguments.
This function makes the assumption that all related arguments are given
in order. Thus /files/apache/directive[5]/arg[2] must come immediately
after /files/apache/directive[5]/arg[1]. Runs in 1 linear pass.
:param string matches: Matches of all directives with arg nodes
:param int args: Number of args you would like to filter
:returns: List of directives that contain # of arguments.
(arg is stripped off)
"""
filtered = []
if args == 1:
for i in range(len(matches)):
if matches[i].endswith("/arg"):
filtered.append(matches[i][:-4])
else:
for i in range(len(matches)):
if matches[i].endswith("/arg[%d]" % args):
# Make sure we don't cause an IndexError (end of list)
# Check to make sure arg + 1 doesn't exist
if (i == (len(matches) - 1) or
not matches[i + 1].endswith("/arg[%d]" % (args + 1))):
filtered.append(matches[i][:-len("/arg[%d]" % args)])
return filtered
def add_dir_to_ifmodssl(self, aug_conf_path, directive, args):
"""Adds directive and value to IfMod ssl block.
Adds given directive and value along configuration path within
an IfMod mod_ssl.c block. If the IfMod block does not exist in
the file, it is created.
:param str aug_conf_path: Desired Augeas config path to add directive
:param str directive: Directive you would like to add, e.g. Listen
:param args: Values of the directive; str "443" or list of str
:type args: list
"""
# TODO: Add error checking code... does the path given even exist?
# Does it throw exceptions?
if_mod_path = self._get_ifmod(aug_conf_path, "mod_ssl.c")
# IfModule can have only one valid argument, so append after
self.aug.insert(if_mod_path + "arg", "directive", False)
nvh_path = if_mod_path + "directive[1]"
self.aug.set(nvh_path, directive)
if len(args) == 1:
self.aug.set(nvh_path + "/arg", args[0])
else:
for i, arg in enumerate(args):
self.aug.set("%s/arg[%d]" % (nvh_path, i + 1), arg)
def _get_ifmod(self, aug_conf_path, mod):
"""Returns the path to <IfMod mod> and creates one if it doesn't exist.
:param str aug_conf_path: Augeas configuration path
:param str mod: module ie. mod_ssl.c
"""
if_mods = self.aug.match(("%s/IfModule/*[self::arg='%s']" %
(aug_conf_path, mod)))
if len(if_mods) == 0:
self.aug.set("%s/IfModule[last() + 1]" % aug_conf_path, "")
self.aug.set("%s/IfModule[last()]/arg" % aug_conf_path, mod)
if_mods = self.aug.match(("%s/IfModule/*[self::arg='%s']" %
(aug_conf_path, mod)))
# Strip off "arg" at end of first ifmod path
return if_mods[0][:len(if_mods[0]) - 3]
def add_dir(self, aug_conf_path, directive, args):
"""Appends directive to the end fo the file given by aug_conf_path.
.. note:: Not added to AugeasConfigurator because it may depend
on the lens
:param str aug_conf_path: Augeas configuration path to add directive
:param str directive: Directive to add
:param args: Value of the directive. ie. Listen 443, 443 is arg
:type args: list or str
"""
self.aug.set(aug_conf_path + "/directive[last() + 1]", directive)
if isinstance(args, list):
for i, value in enumerate(args, 1):
self.aug.set(
"%s/directive[last()]/arg[%d]" % (aug_conf_path, i), value)
else:
self.aug.set(aug_conf_path + "/directive[last()]/arg", args)
def find_dir(self, directive, arg=None, start=None, exclude=True):
"""Finds directive in the configuration.
Recursively searches through config files to find directives
Directives should be in the form of a case insensitive regex currently
.. todo:: arg should probably be a list
.. todo:: arg search currently only supports direct matching. It does
not handle the case of variables or quoted arguments. This should
be adapted to use a generic search for the directive and then do a
case-insensitive self.get_arg filter
Note: Augeas is inherently case sensitive while Apache is case
insensitive. Augeas 1.0 allows case insensitive regexes like
regexp(/Listen/, "i"), however the version currently supported
by Ubuntu 0.10 does not. Thus I have included my own case insensitive
transformation by calling case_i() on everything to maintain
compatibility.
:param str directive: Directive to look for
:param arg: Specific value directive must have, None if all should
be considered
:type arg: str or None
:param str start: Beginning Augeas path to begin looking
:param bool exclude: Whether or not to exclude directives based on
variables and enabled modules
"""
# Cannot place member variable in the definition of the function so...
if not start:
start = get_aug_path(self.loc["root"])
# No regexp code
# if arg is None:
# matches = self.aug.match(start +
# "//*[self::directive='" + directive + "']/arg")
# else:
# matches = self.aug.match(start +
# "//*[self::directive='" + directive +
# "']/* [self::arg='" + arg + "']")
# includes = self.aug.match(start +
# "//* [self::directive='Include']/* [label()='arg']")
regex = "(%s)|(%s)|(%s)" % (case_i(directive),
case_i("Include"),
case_i("IncludeOptional"))
matches = self.aug.match(
"%s//*[self::directive=~regexp('%s')]" % (start, regex))
if exclude:
matches = self._exclude_dirs(matches)
if arg is None:
arg_suffix = "/arg"
else:
arg_suffix = "/*[self::arg=~regexp('%s')]" % case_i(arg)
ordered_matches = []
# TODO: Wildcards should be included in alphabetical order
# https://httpd.apache.org/docs/2.4/mod/core.html#include
for match in matches:
dir_ = self.aug.get(match).lower()
if dir_ == "include" or dir_ == "includeoptional":
# start[6:] to strip off /files
#print self._get_include_path(self.get_arg(match +"/arg")), directive, arg
ordered_matches.extend(self.find_dir(
directive, arg,
self._get_include_path(self.get_arg(match + "/arg")),
exclude))
# This additionally allows Include
if dir_ == directive.lower():
ordered_matches.extend(self.aug.match(match + arg_suffix))
return ordered_matches
def get_arg(self, match):
"""Uses augeas.get to get argument value and interprets result.
This also converts all variables and parameters appropriately.
"""
value = self.aug.get(match)
# No need to strip quotes for variables, as apache2ctl already does this
# but we do need to strip quotes for all normal arguments.
# Note: normal argument may be a quoted variable
# e.g. strip now, not later
value = value.strip("'\"")
variables = ApacheParser.arg_var_interpreter.findall(value)
for var in variables:
# Strip off ${ and }
try:
value = value.replace(var, self.variables[var[2:-1]])
except KeyError:
raise errors.PluginError("Error Parsing variable: %s" % var)
return value
def _exclude_dirs(self, matches):
"""Exclude directives that are not loaded into the configuration."""
filters = [("ifmodule", self.modules), ("ifdefine", self.variables)]
valid_matches = []
for match in matches:
for filter_ in filters:
if not self._pass_filter(match, filter_):
break
else:
valid_matches.append(match)
return valid_matches
def _pass_filter(self, match, filter_):
"""Determine if directive passes a filter.
:param str match: Augeas path
:param list filter: list of tuples of form
[("lowercase if directive", set of relevant parameters)]
"""
match_l = match.lower()
last_match_idx = match_l.find(filter_[0])
while last_match_idx != -1:
# Check args
end_of_if = match_l.find("/", last_match_idx)
# This should be aug.get (vars are not used e.g. parser.aug_get)
expression = self.aug.get(match[:end_of_if] + "/arg")
if expression.startswith("!"):
# Strip off "!"
if expression[1:] in filter_[1]:
return False
else:
if expression not in filter_[1]:
return False
last_match_idx = match_l.find(filter_[0], end_of_if)
return True
def _get_include_path(self, arg):
"""Converts an Apache Include directive into Augeas path.
Converts an Apache Include directive argument into an Augeas
searchable path
.. todo:: convert to use os.path.join()
:param str arg: Argument of Include directive
:returns: Augeas path string
:rtype: str
"""
# Check to make sure only expected characters are used <- maybe remove
# validChars = re.compile("[a-zA-Z0-9.*?_-/]*")
# matchObj = validChars.match(arg)
# if matchObj.group() != arg:
# logger.error("Error: Invalid regexp characters in %s", arg)
# return []
# Remove beginning and ending quotes
arg = arg.strip("'\"")
# Standardize the include argument based on server root
if not arg.startswith("/"):
# Normpath will condense ../
arg = os.path.normpath(os.path.join(self.root, arg))
else:
arg = os.path.normpath(arg)
# Attempts to add a transform to the file if one does not already exist
if os.path.isdir(arg):
self._parse_file(os.path.join(arg, "*"))
else:
self._parse_file(arg)
# Argument represents an fnmatch regular expression, convert it
# Split up the path and convert each into an Augeas accepted regex
# then reassemble
split_arg = arg.split("/")
for idx, split in enumerate(split_arg):
if any(char in ApacheParser.fnmatch_chars for char in split):
# Turn it into a augeas regex
# TODO: Can this instead be an augeas glob instead of regex
split_arg[idx] = ("* [label()=~regexp('%s')]" %
self.fnmatch_to_re(split))
# Reassemble the argument
# Note: This also normalizes the argument /serverroot/ -> /serverroot
arg = "/".join(split_arg)
return get_aug_path(arg)
def fnmatch_to_re(self, clean_fn_match): # pylint: disable=no-self-use
"""Method converts Apache's basic fnmatch to regular expression.
Assumption - Configs are assumed to be well-formed and only writable by
privileged users.
https://apr.apache.org/docs/apr/2.0/apr__fnmatch_8h_source.html
http://apache2.sourcearchive.com/documentation/2.2.16-6/apr__fnmatch_8h_source.html
:param str clean_fn_match: Apache style filename match, similar to globs
:returns: regex suitable for augeas
:rtype: str
"""
# This strips off final /Z(?ms)
return fnmatch.translate(clean_fn_match)[:-7]
def _parse_file(self, filepath):
"""Parse file with Augeas
Checks to see if file_path is parsed by Augeas
If filepath isn't parsed, the file is added and Augeas is reloaded
:param str filepath: Apache config file path
"""
# Test if augeas included file for Httpd.lens
# Note: This works for augeas globs, ie. *.conf
inc_test = self.aug.match(
"/augeas/load/Httpd/incl [. ='%s']" % filepath)
if not inc_test:
# Load up files
# This doesn't seem to work on TravisCI
# self.aug.add_transform("Httpd.lns", [filepath])
self._add_httpd_transform(filepath)
self.aug.load()
def _add_httpd_transform(self, incl):
"""Add a transform to Augeas.
This function will correctly add a transform to augeas
The existing augeas.add_transform in python doesn't seem to work for
Travis CI as it loads in libaugeas.so.0.10.0
:param str incl: filepath to include for transform
"""
last_include = self.aug.match("/augeas/load/Httpd/incl [last()]")
if last_include:
# Insert a new node immediately after the last incl
self.aug.insert(last_include[0], "incl", False)
self.aug.set("/augeas/load/Httpd/incl[last()]", incl)
# On first use... must load lens and add file to incl
else:
# Augeas uses base 1 indexing... insert at beginning...
self.aug.set("/augeas/load/Httpd/lens", "Httpd.lns")
self.aug.set("/augeas/load/Httpd/incl", incl)
def standardize_excl(self):
"""Standardize the excl arguments for the Httpd lens in Augeas.
Note: Hack!
Standardize the excl arguments for the Httpd lens in Augeas
Servers sometimes give incorrect defaults
Note: This problem should be fixed in Augeas 1.0. Unfortunately,
Augeas 0.10 appears to be the most popular version currently.
"""
# attempt to protect against augeas error in 0.10.0 - ubuntu
# *.augsave -> /*.augsave upon augeas.load()
# Try to avoid bad httpd files
# There has to be a better way... but after a day and a half of testing
# I had no luck
# This is a hack... work around... submit to augeas if still not fixed
excl = ["*.augnew", "*.augsave", "*.dpkg-dist", "*.dpkg-bak",
"*.dpkg-new", "*.dpkg-old", "*.rpmsave", "*.rpmnew",
"*~",
self.root + "/*.augsave",
self.root + "/*~",
self.root + "/*/*augsave",
self.root + "/*/*~",
self.root + "/*/*/*.augsave",
self.root + "/*/*/*~"]
for i, excluded in enumerate(excl, 1):
self.aug.set("/augeas/load/Httpd/excl[%d]" % i, excluded)
self.aug.load()
def _set_locations(self):
"""Set default location for directives.
Locations are given as file_paths
.. todo:: Make sure that files are included
"""
default = self._set_user_config_file()
temp = os.path.join(self.root, "ports.conf")
if os.path.isfile(temp):
listen = temp
name = temp
else:
listen = default
name = default
return {"default": default, "listen": listen, "name": name}
def _find_config_root(self):
"""Find the Apache Configuration Root file."""
location = ["apache2.conf", "httpd.conf"]
for name in location:
if os.path.isfile(os.path.join(self.root, name)):
return os.path.join(self.root, name)
raise errors.NoInstallationError("Could not find configuration root")
def _set_user_config_file(self):
"""Set the appropriate user configuration file
.. todo:: This will have to be updated for other distros versions
:param str root: pathname which contains the user config
"""
# Basic check to see if httpd.conf exists and
# in hierarchy via direct include
# httpd.conf was very common as a user file in Apache 2.2
if (os.path.isfile(os.path.join(self.root, "httpd.conf")) and
self.find_dir("Include", "httpd.conf", self.loc["root"])):
return os.path.join(self.root, "httpd.conf")
else:
return os.path.join(self.root, "apache2.conf")
def case_i(string):
"""Returns case insensitive regex.
Returns a sloppy, but necessary version of a case insensitive regex.
Any string should be able to be submitted and the string is
escaped and then made case insensitive.
May be replaced by a more proper /i once augeas 1.0 is widely
supported.
:param str string: string to make case i regex
"""
return "".join(["[" + c.upper() + c.lower() + "]"
if c.isalpha() else c for c in re.escape(string)])
def get_aug_path(file_path):
"""Return augeas path for full filepath.
:param str file_path: Full filepath
"""
return "/files%s" % file_path
|
python
|
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import smtplib
__author__ = 'ipetrash'
"""Пример отсылки письма, содержащего обычный текст и html, "самому себе"."""
# http://www.tutorialspoint.com/python/python_sending_email.htm
# https://docs.python.org/3.4/library/email-examples.html
if __name__ == '__main__':
mail_sender = 'USERNAME@DOMAIN' # Например: [email protected]
mail_passwd = 'PASSWORD' # Пароль к почте
smtp_server = 'YOUR.MAIL.SERVER' # Например: smtp.mail.ru
port = 587
mail_subject = 'Здарова чувак! Hello!!!'
mail_from = mail_sender
mail_to = [
mail_sender
# , '*****@mail.com',
# ...
]
# mail_cc = [
# # '*****@mail.com',
# # '*****@gmail.com',
# ...
# ]
# Create a text/plain message
msg = MIMEMultipart()
msg['From'] = mail_from
msg['To'] = ', '.join(mail_to)
# msg['Cc'] = ', '.join(mail_cc) # Получатели копии письма
msg['Subject'] = mail_subject
# Create the body of the message (a plain-text and an HTML version).
text = "Hi!\nHow are you?\nHere is the link you wanted:\nhttps://www.python.org"
html = """
<html>
<head></head>
<body>
<p>Hi!<br>
How are you?<br>
Here is the <a href="https://www.python.org">link</a> you wanted.
</p>
</body>
</html>
"""
msg.attach(MIMEText(text))
msg.attach(MIMEText(html, _subtype='html'))
msg.attach(MIMEText(html))
try:
# Send the message on SMTP server.
with smtplib.SMTP(smtp_server, port) as s:
s.starttls()
s.login(mail_sender, mail_passwd)
s.send_message(msg)
print('Email sent')
except Exception as e:
print('Error sending mail: ' + str(e))
|
python
|
#!/usr/bin/env python
import os
import pyfwk
from pyfi.entity.entity.db import EntityDB
# -----------------------------EXCHANGE-MODEL-----------------------------#
class TypeModel(pyfwk.Model):
model = None
dbase = None
table = None
columns = None
@staticmethod
def instance():
if not TypeModel.model:
TypeModel.model = TypeModel()
return TypeModel.model
def __init__(self):
self.dbase = EntityDB.instance()
self.table = 'type'
id = pyfwk.DBCol('id', 'INTEGER PRIMARY KEY')
symbol = pyfwk.DBCol('symbol', 'TEXT')
name = pyfwk.DBCol('name', 'TEXT')
self.columns = [id, symbol, name]
self.validate()
# ----------------------------------MAIN----------------------------------#
def main():
fm = pyfwk.FileManager.instance()
fm.set_root(os.path.dirname(os.path.dirname(__file__)))
tm = TypeModel.instance()
if __name__ == '__main__':
main()
|
python
|
"""Sensitive Field Type"""
# standard library
import logging
from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Union
# first-party
from tcex.input.field_types.exception import InvalidEmptyValue, InvalidLengthValue, InvalidType
if TYPE_CHECKING: # pragma: no cover
# third-party
from pydantic.fields import ModelField
# first-party
from tcex.input.input import StringVariable
# get tcex logger
logger = logging.getLogger('tcex')
class Sensitive:
"""Sensitive Field Type"""
allow_empty: bool = True
min_length: Optional[int] = None
max_length: Optional[int] = None
def __init__(self, value: Union[str, 'Sensitive']):
"""Initialize the Sensitive object."""
if isinstance(value, Sensitive):
self._sensitive_value = value.value
else:
self._sensitive_value = value
@classmethod
def __get_validators__(cls) -> Callable:
"""Define one or more validators for Pydantic custom type."""
yield cls.validate_type
yield cls.validate_allow_empty
yield cls.validate_max_length
yield cls.validate_min_length
yield cls.wrap_type
def __len__(self) -> int:
"""Return the length of the sensitive value."""
return len(self._sensitive_value)
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
"""Modify the field schema."""
def _update_not_none(mapping: Dict[Any, Any], **update: Any) -> None:
mapping.update({k: v for k, v in update.items() if v is not None})
_update_not_none(
field_schema,
type='string',
writeOnly=True,
format='password',
minLength=cls.min_length,
maxLength=cls.max_length,
)
def __repr__(self) -> str:
"""."""
return f'''Sensitive('{self}')'''
def __str__(self) -> str:
"""Return the value masked.
If App is running in > DEBUG logging level and the sensitive data is greater
than X, then show the first and last character of the value. This is very
helpful in debugging App where the incorrect credential could have been passed.
"""
if self._sensitive_value and logger.getEffectiveLevel() <= 10: # DEBUG or TRACE
if isinstance(self.value, str) and len(self.value) >= 10:
return f'''{self.value[:1]}{'*' * 4}{self.value[-1:]}'''
return '**********'
@classmethod
def validate_allow_empty(cls, value: Union[str, 'StringVariable'], field: 'ModelField') -> str:
"""Raise exception if value is empty and allow_empty is False."""
if cls.allow_empty is False:
if isinstance(value, str) and value.replace(' ', '') == '':
raise InvalidEmptyValue(field_name=field.name)
return value
@classmethod
def validate_max_length(cls, value: Union[str, 'StringVariable'], field: 'ModelField') -> str:
"""Raise exception if value does not match pattern."""
if cls.max_length is not None and len(value) > cls.max_length:
raise InvalidLengthValue(
field_name=field.name, constraint=cls.max_length, operation='max'
)
return value
@classmethod
def validate_min_length(cls, value: Union[str, 'StringVariable'], field: 'ModelField') -> str:
"""Raise exception if value does not match pattern."""
if cls.min_length is not None and len(value) < cls.min_length:
raise InvalidLengthValue(
field_name=field.name, constraint=cls.min_length, operation='min'
)
return value
@classmethod
def validate_type(cls, value: Union[str, 'StringVariable'], field: 'ModelField') -> str:
"""Raise exception if value is not a String type."""
if not isinstance(value, (bytes, str, Sensitive)):
raise InvalidType(
field_name=field.name, expected_types='(bytes, str)', provided_type=type(value)
)
return value
@property
def value(self) -> str:
"""Return the actual value."""
return self._sensitive_value
@classmethod
def wrap_type(cls, value: Union[str, 'StringVariable']) -> str:
"""Raise exception if value is not a String type."""
return cls(value)
def sensitive(
allow_empty: bool = True,
max_length: Optional[int] = None,
min_length: Optional[int] = None,
) -> type:
"""Return configured instance of String."""
namespace = dict(
allow_empty=allow_empty,
max_length=max_length,
min_length=min_length,
)
return type('ConstrainedSensitive', (Sensitive,), namespace)
|
python
|
"""
License
-------
Copyright (C) 2021 - David Fernández Castellanos
You can use this software, redistribute it, and/or modify it under the
terms of the Creative Commons Attribution 4.0 International Public License.
Explanation
---------
This module contains the statistical model of the COVID-19 vaccination campaign
described in assets/model_explanation.html. Moreover, it also includes functions
to sample the model's parameter space.
"""
import numpy as np
import pandas as pd
import time
import datetime
import functools
from collections import defaultdict
import argparse
from argparse import RawTextHelpFormatter
from plot import plot_model_results
def run_single_realization(
p_pro, p_anti, pressure, tau, nv_0, nv_max, max_day_number, N
):
"""
Run a single realization of the statistical model of vaccination campaigns.
This single run corresponds to simulating the evolution of the vaccination campaign
as a function of time. See the assets/model_explanation.html for details on the model.
Parameters
----------
p_pro : float
The probability that a certain person belongs to the pro-vaccines group
p_anti : float
The probability that a specific person belongs to the anti-vaccines group
pressure : float
Strenght of the social pressure effect
tau : float
Duplication time of the weekly arriving vaccines
nv_0 : float
Initial stock of vaccines, measured as a fraction over the population size
nv_max : floa
Maximum weekly delivery capacity, measured as a fraction over the population size
max_day_number : int
Number of days that are going to be simulated
N : int
The population size
Returns
-------
Dictionary (key:string, value:list)
Dictionary with different data collected as a function of the day number
"""
assert p_pro + p_anti <= 1.0
p_agnostics = 1 - (p_pro + p_anti)
n_pro = int(p_pro * N)
n_agnostics = int(p_agnostics * N)
F = lambda t: min(nv_0 * np.exp(np.log(2) * t / (tau * 7)), nv_max) * N
day_number = 0
vaccines_stock = 0
cum_number_vac_received = 0
n_vaccinated = 0
n_waiting = n_pro - n_vaccinated
people_vaccinated_per_hundred = list()
daily_vaccinations_per_million = list()
cum_number_vac_received_per_hundred = list()
vaccines_in_stock_per_hundred = list()
while day_number < max_day_number:
# ------ add arriving vaccines to the stock ------
if day_number % 7 == 0.0:
nv_arriving = int(F(day_number))
else:
nv_arriving = 0
assert nv_arriving >= 0
vaccines_stock += nv_arriving
cum_number_vac_received += nv_arriving
# ------ apply vaccines ------
# prob. of having it available does not take into account only people waitting, but the whole population
# for example, if the population is big, the vaccines will be more spread and less likely to reach anyone
# however is we use the people waiting, we assume the vaccines are being distributed specifically among
# them. Moreover, this is the prob. of having it available a specific day. Since we work in cycles of
# 7 days and only ~2 days a week is possible to have it, we should multiply it by ~2/7 to get an effective
# prob. per day;
proc_vac_available = (2.0 / 7.0) * vaccines_stock / N
delta_n_vacc = np.random.poisson(n_waiting * proc_vac_available)
# don't apply more vaccines than available
delta_n_vacc = min(delta_n_vacc, vaccines_stock)
# don't apply more vaccines than people waiting for it
delta_n_vacc = min(delta_n_vacc, n_waiting)
n_vaccinated += delta_n_vacc
n_waiting -= delta_n_vacc
vaccines_stock -= delta_n_vacc
fract_pop_vaccinated = n_vaccinated / N
# ------ convert agnostics ------
prob_change_mind = fract_pop_vaccinated * pressure
delta_n_agnos = np.random.poisson(n_agnostics * prob_change_mind)
# don't convert more agnostics than agnostics available
delta_n_agnos = min(delta_n_agnos, n_agnostics)
n_agnostics -= delta_n_agnos
n_waiting += delta_n_agnos
day_number += 1
people_vaccinated_per_hundred.append(fract_pop_vaccinated * 100)
daily_vaccinations_per_million.append(delta_n_vacc * 1e6 / N)
cum_number_vac_received_per_hundred.append(cum_number_vac_received * 100 / N)
vaccines_in_stock_per_hundred.append(vaccines_stock * 100 / N)
data = {
"people_vaccinated_per_hundred": people_vaccinated_per_hundred,
"daily_vaccinations_per_million": daily_vaccinations_per_million,
"cum_number_vac_received_per_hundred": cum_number_vac_received_per_hundred,
"vaccines_in_stock_per_hundred": vaccines_in_stock_per_hundred,
}
return data
@functools.lru_cache(maxsize=10)
def run_sampling(params, start_date, end_date, CI, N, max_running_time=None):
"""
Sample the model's parameter space. For that, the model is run for
each input combination of parameters.
Parameters
----------
params : tuple of tuples
Each of the tuples contain a combination of model parameters
(p_pro, p_anti, pressure, tau, nv_0, nv_max, max_day_number).
See run_single_realization for details.
start_date : datetime.datetime
Starting date
end_date : datetime.datetime
The last date at which the model run stops
CI : float
Value of the quantile used for establishing the confidence intervals
N : int
The population size
Returns
-------
Dictionary of dictionaries
Each dictionary key corresponds to the different quantities returned by run_single_realization.
Each of the values is another dictionary of lists that contains the mean of the quantity, its upper
and lower confidence intervals, and the dates associated with each list index.
"""
starting_time = time.time()
dates = pd.date_range(start_date, end_date, freq="1d")
max_days = len(dates)
data = defaultdict(list)
number_finished_samples = 0
for p_pro, p_anti, pressure, tau, nv_0, nv_max in params:
data_ = run_single_realization(
p_pro, p_anti, pressure, tau, nv_0, nv_max, max_days, N
)
# merge a dict into a dict of lists
for k, v in data_.items():
data[k].append(v)
number_finished_samples += 1
elapsed_time = time.time() - starting_time
if max_running_time is not None and elapsed_time > max_running_time:
break
# we work with numpy arrays since Dash Store cannot handle DataFarmes
data = {k: {"dates": dates, "samples": np.vstack(v)} for k, v in data.items()}
# Note: the average is over a time window, but samples are not mixed here
for k in ["daily_vaccinations_per_million"]:
v = data[k]["samples"]
df = pd.DataFrame(np.vstack(v).T, index=dates)
# The model simulates the dynamics of the application of a single dosis, but actually
# (most) those who got a first dosis, will get a second one ~30 days later. Since such second
# doses are included in the daily_vaccinations_per_million from the real-world data,
# we must ialso ncluded them in the model results. For that, we shift the original applied
# doses by 30 days and concatenate the DataFrames.
# The fact that all the second doses are appended after all the first ones
# doesn't matter since afterward we will reindex to compute a moving average
shifted_df = pd.DataFrame(
np.vstack(v).T, index=dates + datetime.timedelta(days=30)
)
df = df.add(shifted_df, fill_value=0.0)
# compute averages over windows of 7 days, as in the real-world data
df = df.reindex(pd.date_range(start=start_date, end=end_date, freq="7d"))
# do not call df.index.values, because that transforms Timestamps to numpy.datetime, and plotly seems to prefer Timestamps
data[k]["dates"] = df.index
data[k]["samples"] = df.values.T
# get confidence intervals for each date, computed accros samples
data_CI = defaultdict(dict)
for k in data.keys():
samples = data[k]["samples"]
quantiles = np.quantile(samples, [(1 - CI)/2., (1 + CI)/2.], axis=0)
data_CI[k]["upper"] = quantiles[1]
data_CI[k]["lower"] = quantiles[0]
data_CI[k]["mean"] = samples.mean(axis=0)
data_CI[k]["dates"] = data[k]["dates"]
data_CI["number_finished_samples"] = number_finished_samples
return data_CI
def sample_param_combinations(
p_pro_bounds,
p_anti_bounds,
pressure_bounds,
tau_bounds,
nv_0_bounds,
nv_max_bounds,
n_rep,
):
"""
Create a sample of parameter combinations. Each parameter
combination is created by drawing values from uniform distributions
with bounds defined by the function's arguments.
Parameters
----------
p_pro_bounds : 2D-tuple of floats
Lower and upper bound for the probability that a certain person belongs to the pro-vaccines group
p_anti_bounds : 2D-tuple of floats
Lower and upper bound for the probability that a specific person belongs to the anti-vaccines group
pressure_bounds : 2D-tuple of floats
Lower and upper bound for the strength of the social pressure effect
tau_bounds : 2D-tuple of floats
Lower and upper bound for the duplication time of the weekly arriving vaccines
nv_0_bounds : 2D-tuple of floats
Lower and upper bound for the initial stock of vaccines measured as a fraction over the population size
nv_max_bounds : 2D-tuple of floats
Lower and upper bound for the maximum weekly delivery capacity measured as a fraction over the population size
n_rep : int
Number of parameter combination, i.e., number of random parameter samples drawn
Returns
-------
Tuple of tuples
Each of the tuples contain a combination of model parameters
(p_pro, p_anti, pressure, tau, nv_0, nv_max, max_day_number).
Tuple
The probability that a person belongs to the agnostics group
"""
params_combinations = list()
p_soft_no_values = list()
n = 0
while len(params_combinations) < n_rep:
p_pro = np.random.uniform(p_pro_bounds[0], p_pro_bounds[1])
p_anti = np.random.uniform(p_anti_bounds[0], p_anti_bounds[1])
# use rejection sampling to ensure that p_anti + p_pro < 1
if p_pro + p_anti > 1.0:
# rejection
n += 1
if n > n_rep * 10:
# if the ammount of rejections is not too high, it means
# that given upper and lower bounds of p_anti and p_pro are
# mutually incompatible. Thus, we abort the parameter sampling
return None, None
else:
continue
else:
pressure = np.random.uniform(pressure_bounds[0], pressure_bounds[1])
tau = np.random.uniform(tau_bounds[0], tau_bounds[1])
nv_0 = np.random.uniform(nv_0_bounds[0], nv_0_bounds[1])
nv_max = np.random.uniform(nv_max_bounds[0], nv_max_bounds[1])
# work with tuples so that we can later use @functools.lru_cache, since it need
# hashable types
params_combinations.append(
tuple([p_pro, p_anti, pressure, tau, nv_0, nv_max])
)
p_soft_no_values.append(1 - (p_pro + p_anti))
return tuple(params_combinations), tuple(p_soft_no_values)
def run_model(
# populatio parameters
p_pro_bounds,
p_anti_bounds,
pressure_bounds,
# vaccinations parameters
tau_bounds,
nv_0_bounds,
nv_max_bounds,
# samping
CI,
n_rep,
N,
date_range,
max_running_time=None,
):
# default output messages
msg_agnostics_pct = "Agnosticts: "
msg_error = ""
# some sliders use values 0-100
params_combinations, p_soft_no_values = sample_param_combinations(
np.array(p_pro_bounds) / 100,
np.array(p_anti_bounds) / 100,
np.array(pressure_bounds),
np.array(tau_bounds),
np.array(nv_0_bounds) / 100,
np.array(nv_max_bounds) / 100,
n_rep,
)
if params_combinations is not None:
# evaluate the agnostics population from the pro and anti vaccines samples
p_soft_no_values = 100 * np.array(p_soft_no_values)
a = max(np.mean(p_soft_no_values) - np.std(p_soft_no_values), 0)
b = np.mean(p_soft_no_values) + np.std(p_soft_no_values)
a_str = "{0:.0f}".format(a)
b_str = "{0:.0f}".format(b)
# if the uncertainty interval is smaller than 1%, report one value instead of the interval
if abs(a - b) < 1:
msg_agnostics_pct += a_str + "%"
else:
msg_agnostics_pct += a_str + " - " + b_str + "%"
else:
msg_error = "ERROR: The pertentages of pro- and anti-vaccines are simultaneously too high. Please reduce them."
return None, msg_error, msg_agnostics_pct
model_results = run_sampling(
params_combinations,
date_range["start_date"],
date_range["end_date"],
CI / 100,
N,
max_running_time,
)
if max_running_time is not None:
number_finished_samples = model_results["number_finished_samples"]
if number_finished_samples < len(params_combinations):
msg_error = f"ERROR: Maximum computation time of {max_running_time}s exceeded. Only {number_finished_samples} of the desired {len(params_combinations)} Monte Carlo runs were performed."
return model_results, msg_error, msg_agnostics_pct
class SplitArgsStr(argparse.Action):
def __call__(self, parser, namespace, values_str, option_string=None):
values = values_str.split(",")
# If ',' is not in the string, the input corresponds to a single value.
# Create list of two values with it.
if len(values) == 1:
values += values
setattr(namespace, self.dest, values)
class SplitArgsFloat(argparse.Action):
def __call__(self, parser, namespace, values_str, option_string=None):
values = [float(x) for x in values_str.split(",")]
# If ',' is not in the string, the input corresponds to a single value.
# Create list of two values with it.
if len(values) == 1:
values += values
setattr(namespace, self.dest, values)
def main():
description = """
This program performs a Monte Carlo sampling of a statistical model of the
COVID-19 vaccination campaign (you can find a detailed explanation of
the model in assets/model_explanation.html).
In each Monte Carlo run, the value of each parameter is drawn from a uniform
probability distribution. The bounds of each distribution are defined in the
command line call as comma-separated strings for each parameter. If instead
of a comma-separated string, a single value is given, that parameter will
assume in every Monte Carlo run exactly that specific value.
When the sampling is complete, the results are automatically rendered as an
interactive plot in your default internet browser.
Example call:
'python model.py --pro=30,40 --anti=17,40 --pressure=0.02,0.025 --dupl_time=3,4 --init_stock=0.2,0.24 --max_delivery=10,10 --date_range=2020-12-30,2021-12-1'
Author: David Fernández Castellanos.
Related links:
- The author's website: https://www.davidfcastellanos.com
- The source code: https://github.com/kastellane/COVID19-Vaccination-Model
- An interactive web app version: https://covid19-vaccination-app.davidfcastellanos.com
- An associated blog post: https://www.davidfcastellanos.com/covid-19-vaccination-model
"""
parser = argparse.ArgumentParser(
description=description, formatter_class=RawTextHelpFormatter
)
parser.add_argument(
"--pro",
type=str,
help="comma-separated upper and lower bounds for the probability that a certain person belongs to the pro-vaccines group",
default="30,40",
action=SplitArgsFloat,
required=True,
)
parser.add_argument(
"--anti",
type=str,
help="comma-separated upper and lower bounds for the probability that a specific person belongs to the anti-vaccines group",
default="30,40",
action=SplitArgsFloat,
required=True,
)
parser.add_argument(
"--pressure",
type=str,
help="comma-separated upper and lower bounds for the strenght of the social pressure effect",
default="0.02,0.025",
action=SplitArgsFloat,
required=True,
)
parser.add_argument(
"--dupl_time",
type=str,
help="comma-separated upper and lower bounds for the duplication time of the weekly arriving vaccines",
default="3,4",
action=SplitArgsFloat,
required=True,
)
parser.add_argument(
"--init_stock",
type=str,
help="comma-separated upper and lower bounds for the initial stock of vaccines, measured as a percentege of the population size",
default="0.2,0.2",
action=SplitArgsFloat,
required=True,
)
parser.add_argument(
"--max_delivery",
type=str,
help="comma-separated upper and lower bounds for the maximum weekly delivery capacity, measured as a percentage over the population size",
default="10,10",
action=SplitArgsFloat,
required=True,
)
parser.add_argument(
"--mc_samples",
type=int,
help="number of Monte Carlo samples (optional)",
default="100",
)
parser.add_argument(
"--date_range",
type=str,
help="comma-separated starting and ending dates (optional)",
default="2020-12-30,2021-12-1",
action=SplitArgsStr,
required=True,
)
parser.add_argument(
"--CI",
type=float,
help="value of the quantile used for establishing the confidence intervals",
default="0.95",
)
args = vars(parser.parse_args())
# populatio parameters
p_pro_bounds = args["pro"]
p_anti_bounds = args["anti"]
pressure_bounds = args["pressure"]
# vaccinations parameters
tau_bounds = args["dupl_time"]
nv_0_bounds = args["init_stock"]
nv_max_bounds = args["max_delivery"]
# samping
n_rep = args["mc_samples"]
N = 50000
start_date = args["date_range"][0]
end_date = args["date_range"][1]
CI = args["CI"]
date_range = dict(start_date=start_date, end_date=end_date)
model_results, msg_error, msg_agnostics_pct = run_model(
# populatio parameters
p_pro_bounds,
p_anti_bounds,
pressure_bounds,
# vaccinations parameters
tau_bounds,
nv_0_bounds,
nv_max_bounds,
# samping
CI,
n_rep,
N,
date_range,
)
if msg_error != "":
print(msg_error)
else:
fig = plot_model_results(model_results, CI)
# plot_country_data(fig, selected_countries, country_data)
fig.show(renderer="browser")
return
if __name__ == "__main__":
main()
|
python
|
import torch
import torch.fx.experimental.fx_acc.acc_ops as acc_ops
import torch.nn as nn
from torch.testing._internal.common_fx2trt import AccTestCase, InputTensorSpec
class TestSizeConverter(AccTestCase):
def test_size(self):
class Size(nn.Module):
def forward(self, x):
bs = x.size(0)
return x.view(bs, -1)
inputs = [torch.randn(1, 2, 3, 4)]
self.run_test(Size(), inputs, expected_ops={acc_ops.size})
def test_size_dynamic_shape(self):
class Size(nn.Module):
def forward(self, x):
bs = x.size(0)
return x.view(bs, -1)
input_specs = [
InputTensorSpec(
shape=(-1, 12, 32),
dtype=torch.float32,
shape_ranges=[((1, 12, 32), (3, 12, 32), (100, 12, 32))],
),
]
self.run_test_with_dynamic_shape(
Size(), input_specs, expected_ops={acc_ops.size}
)
|
python
|
from authlib.oauth2.rfc8414 import AuthorizationServerMetadata
|
python
|
# db_util.py
from datetime import datetime
import sqlite3
import os
import inspect
import uuid #uuid4() --> cookie
from utils import err, done,log
try:
# log(os.environ['GATEWAY_INTERFACE'])
if 'cgi' in os.environ['GATEWAY_INTERFACE'].lower():
DATABASE = 'csci4140.db'
else:
DATABASE = "csci4140.db"
except:
DATABASE = "csci4140.db"
# Should not modify USER_TABLE and IMAGE_TABLE!!!!!!!
USER_TABLE = "user"
IMAGE_TABLE = "image_link"
"""
@ DatabaseInstance has these object:
- conn | connection
- curs | cursor
csci4140.db
@@@ admin @@@
| username: TEXT
| password: TEXT
@@@ user @@@
| username: TEXT
| password: TEXT
| cookie: TEXT
@@@ image_link @@@
| owner: TEXT
| private: INTEGER (1/ 0)
| image_url: TEXT
| timestamp: DATETIME
"""
# Singleton
class DatabaseInstance(object):
@staticmethod
def init_db(database):
try:
conn = sqlite3.connect(database)
curs = conn.cursor()
curs.execute('''CREATE TABLE IF NOT EXISTS user (
username TEXT PRIMARY KEY,
password TEXT,
cookie TEXT
)''')
curs.execute('''CREATE TABLE IF NOT EXISTS admin (
username TEXT PRIMARY KEY,
password TEXT
)''')
curs.execute('''CREATE TABLE IF NOT EXISTS image_link (
owner TEXT,
private INTEGER,
image_url TEXT,
Timestamp TEXT
)''')
conn.commit()
return conn, curs
except Exception as error:
err(str(error))
return None, None
DatabaseInstance.conn, DatabaseInstance.curs = DatabaseInstance.init_db(DATABASE)
# DEBUG
def debug():
clean_table(USER_TABLE)
remove_table(USER_TABLE)
clean_table(PUBLIC_IMAGE_TABLE)
remove_table(PUBLIC_IMAGE_TABLE)
clean_table(PRIVATE_IMAGE_TABLE)
remove_table(PRIVATE_IMAGE_TABLE)
reset_conn()
DatabaseInstance.conn, DatabaseInstance.curs = DatabaseInstance.init_db(DATABASE)
create_user("bengood362","123456")
create_user("benben123","123456")
create_user("benben123","456789")
create_user("ben123","456789")
print list_table(USER_TABLE)
update_user("bengood362","234567","987654")
print list_table(USER_TABLE)
update_user("bengood362","123456","987654")
print list_table(USER_TABLE)
print login_user("bengood362", "987654")
print table_len(USER_TABLE)
reset_conn()
print login_user("bengood362", "987654")
print list_table(USER_TABLE)
### Image method
# Timestamp will be automatically generated by SQLite3
def create_image(username, visibility, image_link):
try:
conn = DatabaseInstance.conn
curs = DatabaseInstance.curs
if visibility == "public":
private = 0
else:
private = 1
timestamp = datetime.now().strftime('%Y%m%d%H%M%S')
# Image name statistically won't collide because there is uuid.uuid4().hex[8:] appended
curs.execute("INSERT INTO image_link VALUES('{0}',{1},'{2}','{3}')".format(username, private, image_link, timestamp))
conn.commit()
done(username+":create image success")
return (True, "create_image_success")
except Exception as error:
err(error)
return (False,str(error))
def read_public_image(username, page_number, limit=8):
try:
page_number = int(page_number)
limit = int(limit)
conn = DatabaseInstance.conn
curs = DatabaseInstance.curs
res = curs.execute("SELECT image_url, Timestamp FROM image_link WHERE private=0 ORDER BY Timestamp DESC").fetchall()
index_from = min(len(res),(page_number-1)*limit)
index_to = min(len(res),(page_number)*limit)
photo_links = res[index_from:index_to]
done("successfully fetched public images")
total_page = max(1, ((len(res)-1)//8)+1)
return (True, photo_links, total_page)
except Exception as error:
err(error)
return (False,str(error), 0)
def read_logged_image(username, page_number, limit=8):
try:
page_number = int(page_number)
limit = int(limit)
username = str(username)
conn = DatabaseInstance.conn
curs = DatabaseInstance.curs
res = curs.execute("SELECT image_url, Timestamp FROM image_link WHERE ((owner='{0}' and private=1) or (private=0)) ORDER BY Timestamp DESC".format(username)).fetchall()
index_from = min(len(res),(page_number-1)*limit)
index_to = min(len(res),(page_number)*limit)
photo_links = res[index_from:index_to]
done("successfully fetched public images")
total_page = max(1, ((len(res)-1)//8)+1)
return (True, photo_links, total_page)
except Exception as error:
err(error)
return (False,str(error), 0)
### User method
# TESTED! check if user exists -> create
def admin_exist():
try:
conn = DatabaseInstance.conn
curs = DatabaseInstance.curs
length = len(curs.execute("SELECT rowid FROM admin").fetchall())
if length == 0:
return False
else:
return True
except Exception as error:
err(error)
return True
def create_admin(password):
try:
conn = DatabaseInstance.conn
curs = DatabaseInstance.curs
curs.execute("INSERT INTO admin VALUES('{0}','{1}');".format("Admin", password))
conn.commit()
done("create admin success")
return (True, "change admin password success")
except Exception as error:
err(str(error))
return (False, str(error))
def login_admin(password):
try:
conn = DatabaseInstance.conn
curs = DatabaseInstance.curs
rowid = curs.execute("SELECT rowid FROM admin WHERE username='{0}' and password='{1}'".format("Admin", password)).fetchall()
if rowid != 0:
return (True,"login success")
else:
return (False,"wrong password")
except Exception as error:
err(error)
return (False, str(err))
def create_user(username, password):
try:
conn = DatabaseInstance.conn
curs = DatabaseInstance.curs
user_not_exist = not entry_exist(USER_TABLE, "username", username)
if user_not_exist:
curs.execute("INSERT INTO user VALUES('{0}','{1}', NULL);".format(username, password))
conn.commit()
done(username+":create user success")
return (True, "register_success")
else:
err(username+":user exists")
return (False, "user_exists")
except Exception as error:
err(username+str(error))
return (False, str(error))
def get_username(cookie):
try:
curs = DatabaseInstance.curs
usernames = curs.execute("SELECT username FROM user WHERE cookie='{0}'".format(cookie)).fetchall()
if len(usernames) != 0:
username = usernames[0][0]
if username == None:
err("Cannot find username with such user cookie, please login")
return (False,'Cannot find username with such user cookie, please login')
else:
done("Username is retrieved by this cookie {0}".format(cookie))
return (True,username)
else:
err("Cannot find any user with such cookie")
return (False,'Cannot find any user with such cookie')
except Exception as error:
err(error)
return (False,error)
#For find matching cookie with username
def get_cookie(username):
try:
curs = DatabaseInstance.curs
cookies = curs.execute("SELECT cookie FROM user WHERE username='{0}'".format(username)).fetchall()
if len(cookies) != 0:
cookies = cookies[0][0]
if cookies == None:
err("Cannot find cookie with such user name, please login")
return (False,"Cannot find cookie with such user name, please login")
else:
done("Cookie retrieved for {0}".format(username))
return (True,cookies)
else:
return (False,"Cannot find any user with such user name")
except Exception as error:
err(error)
return (False,error)
#NOTE: duplicate of get_username(cookie), but I don't have the heart to delete it...
def resume_session(cookie):
try:
curs = DatabaseInstance.curs
users = curs.execute("SELECT username FROM user WHERE cookie='{0}'".format(cookie)).fetchall()
if len(users) == 0:
err("Cookie invalid, please try to clean cookie from browser")
return False
return True
except Exception as error:
err(error)
return False
# clean cookies
def logout(username):
try:
curs = DatabaseInstance.curs
conn = DatabaseInstance.conn
now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
curs.execute("UPDATE user SET cookie=NULL WHERE username='{0}';".format(username))
conn.commit()
done(username+":logout")
return (True,'')
except Exception as error:
err(username+str(error))
return (False, str(error))
# TESTED!
def login_user(username, password):
try:
conn = DatabaseInstance.conn
curs = DatabaseInstance.curs
success = len(curs.execute("SELECT * from user WHERE username='{0}' AND password='{1}';".format(username, password)).fetchall()) >= 1
if success:
cookie=uuid.uuid4().hex
p=curs.execute("UPDATE user SET cookie='{0}' WHERE username='{1}';".format(cookie, username))
conn.commit()
done(username+":login success, {0}".format(cookie))
return (True,'login success')
else:
err(username+":wrong password")
return (False,"wrong_password")
except Exception as error:
err(username+str(error))
return (False,str(error))
# TESTED! check if old_pass correct --> update
def update_user(username, old_pass, password):
try:
conn = DatabaseInstance.conn
curs = DatabaseInstance.curs
res = curs.execute("SELECT password FROM user WHERE username='{0}'".format(username)).fetchone()
if res == None:
err(username+":password cannot be fetched for this user")
return (False,username+":pasword cannot be fetched for this user")
saved_pass = res[0]
if saved_pass != old_pass:
err(username+":old password is incorrect")
return (False,"old password does not match")
else:
curs.execute("UPDATE user SET password='{0}' WHERE username='{1}';".format(password, username))
conn.commit()
done(username+":password changed")
return (True,'password has changed')
except Exception as error:
err(username+str(error))
return (False,str(error))
# common method
# TESTED!
def entry_exist(tablename, column_key, column_val):
try:
curs = DatabaseInstance.curs
stmt = "SELECT EXISTS(SELECT * FROM {0} WHERE {1}='{2}')".format(tablename, column_key, column_val)
res = curs.execute(stmt).fetchone()[0]
# done("entry_exist checked with {0},{1},{2},output {3}".format(tablename, column_key, column_val, res!=0))
return res != 0
except Exception as error:
err(error)
return True
# TESTED!
def list_table(tablename):
try:
curs = DatabaseInstance.curs
return curs.execute("SELECT * FROM {0}".format(tablename)).fetchall()
except Exception as error:
err(error)
return False
# TESTED!
def table_len(tablename):
try:
curs = DatabaseInstance.curs
return len(curs.execute("SELECT * from {0};".format(tablename)).fetchall())
return True
except Exception as error:
err(error)
return False
# TESTED!
def clean_table(tablename):
try:
conn = DatabaseInstance.conn
curs = DatabaseInstance.curs
curs.execute("DELETE FROM {0};".format(tablename))
conn.commit()
done("table cleaned for {0}".format(tablename))
return (True,'clean successfully')
except Exception as error:
err(error)
return (False,str(error))
# TESTED!
def remove_table(tablename):
try:
conn = DatabaseInstance.conn
curs = DatabaseInstance.curs
curs.execute("DROP TABLE {0};".format(tablename))
conn.commit()
done("table removed for {0}".format(tablename))
return (True,"Success")
except Exception as error:
err(error)
return (False,str(error))
# TESTED!
def reset_conn():
try:
close_conn()
DatabaseInstance.conn, DatabaseInstance.curs = DatabaseInstance.init_db(DATABASE)
return True
except Exception as error:
err(error)
return False
# TESTED!
def close_conn():
try:
conn = DatabaseInstance.conn
conn.close()
DatabaseInstance.conn = None
DatabaseInstance.curs = None
return True
except Exception as error:
err(error)
return False
if __name__ == "__main__":
pass
|
python
|
from django.db.models import Choices
class ValueTypes(Choices):
PERCENTAGE = 'percentage'
FIXED_AMOUNT = 'fixed amount'
FREE_SHIPPING = 'free shipping'
|
python
|
import logging
from .builder import TargetBuilder
from .config import YAMLParser
from .device import RunnerFactory
from .testcase import TestCaseFactory
class TestsRunner:
"""Class responsible for loading, building and running tests"""
def __init__(self, targets, test_paths, build=True):
self.targets = targets
self.test_configs = []
self.test_paths = test_paths
self.tests_per_target = {k: [] for k in targets}
self.build = build
self.runners = {target: RunnerFactory.create(target) for target in self.targets}
self.passed = 0
self.failed = 0
self.skipped = 0
def search_for_tests(self):
paths = []
for path in self.test_paths:
candidate = list(path.rglob('test*.yaml'))
if not candidate:
raise ValueError(f'Test {path} does not contain .yaml test configuration')
paths.extend(candidate)
self.test_paths = paths
def parse_tests(self):
self.test_configs = []
for path in self.test_paths:
parser = YAMLParser(path, self.targets)
testcase_config = parser.parse_test_config()
self.test_configs.extend(testcase_config)
logging.debug(f"File {path} parsed successfuly\n")
def run(self):
self.search_for_tests()
self.parse_tests()
for test_config in self.test_configs:
test = TestCaseFactory.create(test_config)
self.tests_per_target[test.target].append(test)
if self.build:
for target in self.targets:
TargetBuilder(target).build()
for target, tests in self.tests_per_target.items():
for test_case in tests:
test_case.log_test_started()
self.runners[target].run(test_case)
test_case.log_test_status()
for target, tests in self.tests_per_target.items():
# Convert bools to int
for test in tests:
self.passed += int(test.passed())
self.failed += int(test.failed())
self.skipped += int(test.skipped())
return self.passed, self.failed, self.skipped
|
python
|
from base import PMLM
from models import EDMM, MLMM, MEMM, DCSMM, DEDMM, DKLMM
import utils
|
python
|
from JumpScale import j
import sys
import fcntl
import os
import time
class Empty():
pass
class AAProcessManagerCmds():
ORDER = 100
def __init__(self, daemon=None):
self._name = "pm"
self.daemon = daemon
self._reloadtime = time.time()
if daemon is not None:
self.daemon._adminAuth = self._adminAuth
def stop(self, session=None):
print("STOP PROCESS MANAGER\n\n\n\n\n")
if session is not None:
self._adminAuth(session.user, session.passwd)
# raise RuntimeError("STOP APPLICATION 112299")
args = sys.argv[:]
args.insert(0, sys.executable)
max_fd = 1024
for fd in range(3, max_fd):
try:
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
except IOError:
continue
fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
os.chdir("%s/apps/processmanager/" % j.dirs.baseDir)
os.execv(sys.executable, args)
def reloadjumpscripts(self, session=None):
if self._reloadtime + 5 > time.time():
print("Not reloading")
return
print("RELOAD JUMPSCRIPTS\n\n\n\n\n")
if session is not None:
self._adminAuth(session.user, session.passwd)
s = self.daemon.cmdsInterfaces["jumpscripts"]
s.loadJumpscripts(None)
def _init(self):
self.childrenPidsFound = {} # children already found, to not double count
# make sure the empty queues no longer needed
def getMonitorObject(self, name, id, monobject=None, lastcheck=0, session=None):
if session is not None:
self._adminAuth(session.user, session.passwd)
if name not in j.core.processmanager.monObjects.__dict__:
raise RuntimeError("Could not find factory for monitoring object:%s" % name)
if lastcheck == 0:
lastcheck = time.time()
val = j.core.processmanager.monObjects.__dict__[name].get(id, monobject=monobject, lastcheck=lastcheck)
if session is not None:
return val.__dict__
else:
return val
def exit(self, session=None):
if session is not None:
self._adminAuth(session.user, session.passwd)
j.application.stop()
def _adminAuth(self, user, passwd):
return True
|
python
|
import pandas as pd
import datetime
import pandas_datareader.data as web
import matplotlib.pyplot as plt
from matplotlib import style
start = datetime.datetime(2015, 1, 1) #Dato for onsket aksjekurs
end = datetime.datetime.now()
df = web.DataReader("ORK.OL", "yahoo", start, end) # Verdien av Orkla aksjen fra 2015 til i dag.
style.use('fivethirtyeight')
print(df.head())
df['High'].plot() # bruker matplotlib for grafisk visualisering.
plt.legend()
plt.show()
|
python
|
"""
Implementation of geo query language
"""
|
python
|
# 第3章: 正規表現
import json
import gzip
def extract(title):
with gzip.open('jawiki-country.json.gz', 'rt', encoding='utf8') as fin:
for line in fin:
jsd = json.loads(line)
if jsd['title'] == title:
return jsd['text']
return ''
def main():
article = extract('イギリス')
with open('england-article.txt', 'w', encoding='utf8') as fout:
fout.write(article)
if __name__ == '__main__':
main()
|
python
|
t = int(input())
for i in range(t):
s = str(input())
w = s.split()
s1 = 'not'
if(s1 in w):
print('Real Fancy')
else:
print("regularly fancy")
|
python
|
#!/usr/bin/python3
import pytest
from brownie import network, Contract, Wei, chain, reverts
@pytest.fixture(scope="module")
def requireMainnetFork():
assert (network.show_active() == "mainnet-fork" or network.show_active() == "mainnet-fork-alchemy")
@pytest.fixture(scope="module")
def iUSDC(accounts, LoanTokenLogicStandard):
iUSDC = loadContractFromAbi(
"0x32E4c68B3A4a813b710595AebA7f6B7604Ab9c15", "iUSDC", LoanTokenLogicStandard.abi)
return iUSDC
@pytest.fixture(scope="module")
def BZX(accounts, LoanTokenLogicStandard, interface):
BZX = loadContractFromAbi(
"0xD8Ee69652E4e4838f2531732a46d1f7F584F0b7f", "BZX", abi=interface.IBZx.abi)
return BZX
@pytest.fixture(scope="module")
def BZRX(accounts, TestToken):
return Contract.from_abi("BZRX", address="0x56d811088235F11C8920698a204A5010a788f4b3", abi=TestToken.abi)
@pytest.fixture(scope="module")
def GOVERNANCE_DELEGATOR(accounts, GovernorBravoDelegator, STAKING, TIMELOCK, GovernorBravoDelegate, BZRX):
# ADMIN = accounts[0]
# MIN_VOTINGPEROD = 5760
# MIN_VOTING_DELAY = 1
# MIN_PROPOSAL_THRESHOLD = 5150000e18
# impl = accounts[0].deploy(GovernorBravoDelegate)
# governorBravoDelegator = accounts.at(STAKING.owner()).deploy(GovernorBravoDelegator, TIMELOCK, STAKING, ADMIN, impl, MIN_VOTINGPEROD, MIN_VOTING_DELAY, MIN_PROPOSAL_THRESHOLD)
gov = Contract.from_abi("governorBravoDelegator", address="0x9da41f7810c2548572f4Fa414D06eD9772cA9e6E", abi=GovernorBravoDelegate.abi)
# init timelock below
calldata = TIMELOCK.setPendingAdmin.encode_input(gov.address)
eta = chain.time()+TIMELOCK.delay() + 10
bzxOwner = STAKING.owner()
TIMELOCK.queueTransaction(TIMELOCK, 0, b"", calldata, eta, {'from': bzxOwner})
chain.sleep(eta-chain.time())
chain.mine()
TIMELOCK.executeTransaction(TIMELOCK, 0, b"", calldata, eta, {'from': bzxOwner})
TIMELOCK.acceptAdmin({'from': gov})
gov.__setPendingLocalAdmin(TIMELOCK, {'from': bzxOwner})
gov.__acceptLocalAdmin({'from': TIMELOCK})
# at this point gov and timelock owns each other.
assert gov.admin() == TIMELOCK
assert TIMELOCK.admin() == gov
BZRX.transferFrom("0xBE0eB53F46cd790Cd13851d5EFf43D12404d33E8", bzxOwner, 50*1e6*1e18, {'from': "0xBE0eB53F46cd790Cd13851d5EFf43D12404d33E8"})
STAKING.stake([BZRX], [BZRX.balanceOf(bzxOwner)], {'from': bzxOwner})
STAKING.setGovernor(gov, {"from": bzxOwner})
return gov
@pytest.fixture(scope="module")
def STAKING(StakingV1_1, accounts, StakingProxy):
bzxOwner = "0xB7F72028D9b502Dc871C444363a7aC5A52546608"
stakingAddress = "0xe95Ebce2B02Ee07dEF5Ed6B53289801F7Fc137A4"
proxy = Contract.from_abi("staking", address=stakingAddress,abi=StakingProxy.abi)
impl = accounts[0].deploy(StakingV1_1)
proxy.replaceImplementation(impl, {"from": bzxOwner})
return Contract.from_abi("staking", address=stakingAddress,abi=StakingV1_1.abi)
@pytest.fixture(scope="module")
def TIMELOCK(Timelock, accounts):
# hours12 = 12*60*60
# days2 = 2*24*60*60
# timelock = accounts[0].deploy(Timelock, accounts[0], days2)
# return timelock
timelock = Contract.from_abi("TIMELOCK", address="0xfedC4dD5247B93feb41e899A09C44cFaBec29Cbc", abi=Timelock.abi, owner=accounts[0])
return timelock
@pytest.fixture(scope="module")
def iUSDC(LoanTokenLogicStandard):
iUSDC = loadContractFromAbi(
"0x32E4c68B3A4a813b710595AebA7f6B7604Ab9c15", "iUSDC", LoanTokenLogicStandard.abi)
return iUSDC
@pytest.fixture(scope="module")
def TOKEN_SETTINGS(LoanTokenSettings):
return Contract.from_abi(
"loanToken", address="0x11ba2b39bc80464c14b7eea54d2ec93d8f60e7b8", abi=LoanTokenSettings.abi)
@pytest.fixture(scope="function", autouse=True)
def isolate(fn_isolation):
pass
def loadContractFromAbi(address, alias, abi):
try:
return Contract(alias)
except ValueError:
contract = Contract.from_abi(alias, address=address, abi=abi)
return contract
def testGovernance(requireMainnetFork, GOVERNANCE_DELEGATOR, TIMELOCK, STAKING, TOKEN_SETTINGS, iUSDC, accounts):
proposer = "0x95BeeC2457838108089fcD0E059659A4E60B091A"
bzxOwner = "0xB7F72028D9b502Dc871C444363a7aC5A52546608"
# make a proposal to change iUSDC name
newName = iUSDC.name() + "1"
calldata = TOKEN_SETTINGS.initialize.encode_input(iUSDC.loanTokenAddress(), newName, iUSDC.symbol())
calldata2 = iUSDC.updateSettings.encode_input(TOKEN_SETTINGS, calldata)
tx = GOVERNANCE_DELEGATOR.propose([iUSDC.address],[0],[""],[calldata2],"asdf", {"from": proposer})
proposalCount = GOVERNANCE_DELEGATOR.proposalCount()
proposal = GOVERNANCE_DELEGATOR.proposals(proposalCount)
id = proposal[0]
eta = proposal[2]
startBlock = proposal[3]
endBlock = proposal[4]
forVotes = proposal[5]
againstVotes = proposal[6]
abstainVotes = proposal[7]
canceled = proposal[8]
assert GOVERNANCE_DELEGATOR.state.call(id) == 0
chain.mine()
chain.mine(startBlock - chain.height)
# after first vote state is active
tx = GOVERNANCE_DELEGATOR.castVote(id,1, {"from" : bzxOwner})
assert GOVERNANCE_DELEGATOR.state.call(id) == 1
chain.mine(endBlock - chain.height)
assert GOVERNANCE_DELEGATOR.state.call(id) == 1
chain.mine()
assert GOVERNANCE_DELEGATOR.state.call(id) == 4
GOVERNANCE_DELEGATOR.queue(id, {"from" : bzxOwner})
proposal = GOVERNANCE_DELEGATOR.proposals(proposalCount)
eta = proposal[2]
chain.sleep(eta - chain.time())
chain.mine()
GOVERNANCE_DELEGATOR.execute(id, {"from" : bzxOwner})
assert True
def testGovernanceProposeCancel(requireMainnetFork, GOVERNANCE_DELEGATOR, TIMELOCK, STAKING, TOKEN_SETTINGS, iUSDC, accounts):
proposer = "0x95BeeC2457838108089fcD0E059659A4E60B091A"
bzxOwner = "0xB7F72028D9b502Dc871C444363a7aC5A52546608"
# make a proposal to change iUSDC name
newName = iUSDC.name() + "1"
calldata = TOKEN_SETTINGS.initialize.encode_input(iUSDC.loanTokenAddress(), newName, iUSDC.symbol())
calldata2 = iUSDC.updateSettings.encode_input(TOKEN_SETTINGS, calldata)
tx = GOVERNANCE_DELEGATOR.propose([iUSDC.address],[0],[""],[calldata2],"asdf", {"from": proposer})
proposalCount = GOVERNANCE_DELEGATOR.proposalCount()
proposal = GOVERNANCE_DELEGATOR.proposals(proposalCount)
id = proposal[0]
eta = proposal[2]
startBlock = proposal[3]
endBlock = proposal[4]
forVotes = proposal[5]
againstVotes = proposal[6]
abstainVotes = proposal[7]
canceled = proposal[8]
tx = GOVERNANCE_DELEGATOR.cancel(id, {"from": bzxOwner})
proposal = GOVERNANCE_DELEGATOR.proposals(proposalCount)
canceled = proposal[8]
assert canceled == True
def testGovernanceProposeVotingActiveCancel(requireMainnetFork, GOVERNANCE_DELEGATOR, TIMELOCK, STAKING, TOKEN_SETTINGS, iUSDC, accounts):
proposer = "0x95BeeC2457838108089fcD0E059659A4E60B091A"
bzxOwner = "0xB7F72028D9b502Dc871C444363a7aC5A52546608"
# make a proposal to change iUSDC name
newName = iUSDC.name() + "1"
calldata = TOKEN_SETTINGS.initialize.encode_input(iUSDC.loanTokenAddress(), newName, iUSDC.symbol())
calldata2 = iUSDC.updateSettings.encode_input(TOKEN_SETTINGS, calldata)
tx = GOVERNANCE_DELEGATOR.propose([iUSDC.address],[0],[""],[calldata2],"asdf", {"from": proposer})
proposalCount = GOVERNANCE_DELEGATOR.proposalCount()
proposal = GOVERNANCE_DELEGATOR.proposals(proposalCount)
id = proposal[0]
eta = proposal[2]
startBlock = proposal[3]
endBlock = proposal[4]
forVotes = proposal[5]
againstVotes = proposal[6]
abstainVotes = proposal[7]
canceled = proposal[8]
chain.mine()
chain.mine(startBlock - chain.height)
tx = GOVERNANCE_DELEGATOR.castVote(id,1, {"from" : bzxOwner})
assert GOVERNANCE_DELEGATOR.state.call(id) == 1
tx = GOVERNANCE_DELEGATOR.cancel(id, {"from": bzxOwner})
proposal = GOVERNANCE_DELEGATOR.proposals(proposalCount)
canceled = proposal[8]
assert canceled == True
def testGovernanceProposeVotingActiveVotingEndsDefeated(requireMainnetFork, GOVERNANCE_DELEGATOR, TIMELOCK, STAKING, TOKEN_SETTINGS, iUSDC, accounts):
proposer = "0x95BeeC2457838108089fcD0E059659A4E60B091A"
bzxOwner = "0xB7F72028D9b502Dc871C444363a7aC5A52546608"
# make a proposal to change iUSDC name
newName = iUSDC.name() + "1"
calldata = TOKEN_SETTINGS.initialize.encode_input(iUSDC.loanTokenAddress(), newName, iUSDC.symbol())
calldata2 = iUSDC.updateSettings.encode_input(TOKEN_SETTINGS, calldata)
tx = GOVERNANCE_DELEGATOR.propose([iUSDC.address],[0],[""],[calldata2],"asdf", {"from": proposer})
proposalCount = GOVERNANCE_DELEGATOR.proposalCount()
proposal = GOVERNANCE_DELEGATOR.proposals(proposalCount)
id = proposal[0]
eta = proposal[2]
startBlock = proposal[3]
endBlock = proposal[4]
forVotes = proposal[5]
againstVotes = proposal[6]
abstainVotes = proposal[7]
canceled = proposal[8]
chain.mine()
chain.mine(startBlock - chain.height)
tx = GOVERNANCE_DELEGATOR.castVote(id,0, {"from" : bzxOwner})
assert GOVERNANCE_DELEGATOR.state.call(id) == 1
chain.mine(endBlock - chain.height)
assert GOVERNANCE_DELEGATOR.state.call(id) == 1
chain.mine()
assert GOVERNANCE_DELEGATOR.state.call(id) == 3
with reverts("GovernorBravo::queue: proposal can only be queued if it is succeeded"):
GOVERNANCE_DELEGATOR.queue(id, {"from" : bzxOwner})
tx = GOVERNANCE_DELEGATOR.cancel(id, {"from": bzxOwner})
proposal = GOVERNANCE_DELEGATOR.proposals(proposalCount)
canceled = proposal[8]
assert canceled == True
def testGovernanceReallyComplexTXToSetITokens(requireMainnetFork, GOVERNANCE_DELEGATOR, TIMELOCK, STAKING, TOKEN_SETTINGS, iUSDC, accounts,TestToken, LoanTokenLogicStandard, TokenRegistry, LoanToken, LoanTokenSettings, interface, PriceFeeds, ProtocolSettings, LoanTokenSettingsLowerAdmin, BZRX):
proposer = "0x95BeeC2457838108089fcD0E059659A4E60B091A"
bzxOwner = accounts.at("0xB7F72028D9b502Dc871C444363a7aC5A52546608", force=True)
# begining of building calldata arrays
# calldataArray = getTransactionListToDeployITokens(accounts)
calldataArray = []
targets = []
underlyingSymbol = "ABC"
iTokenSymbol = "i{}".format(underlyingSymbol)
iTokenName = "Fulcrum {} iToken ({})".format(underlyingSymbol, iTokenSymbol)
loanTokenAddress = bzxOwner.deploy(TestToken, underlyingSymbol, underlyingSymbol, 18, 1e50).address
loanTokenLogicStandard = bzxOwner.deploy(LoanTokenLogicStandard, bzxOwner).address
bzxRegistry = Contract.from_abi("bzxRegistry", address="0xf0E474592B455579Fe580D610b846BdBb529C6F7", abi=TokenRegistry.abi)
bzx = Contract.from_abi("bzx", address="0xD8Ee69652E4e4838f2531732a46d1f7F584F0b7f", abi=interface.IBZx.abi, owner=bzxOwner)
priceFeed = Contract.from_abi("pricefeed", bzx.priceFeeds(), abi=PriceFeeds.abi, owner=bzxOwner)
iTokenProxy = bzxOwner.deploy(LoanToken, bzxOwner, loanTokenLogicStandard)
loanTokenSettings = bzxOwner.deploy(LoanTokenSettings)
calldata = loanTokenSettings.initialize.encode_input(
loanTokenAddress, iTokenName, iTokenSymbol)
iToken = Contract.from_abi("loanTokenLogicStandard",
iTokenProxy, LoanTokenLogicStandard.abi, bzxOwner)
iToken.transferOwnership(TIMELOCK, {"from": bzxOwner})
calldata = iToken.updateSettings.encode_input(loanTokenSettings, calldata)
calldataArray.append(calldata)
targets.append(iToken.address)
# Setting price Feed
bzx.transferOwnership(TIMELOCK, {"from": bzxOwner})
priceFeed.transferOwnership(TIMELOCK, {"from": bzxOwner})
priceFeedAddress = "0xA9F9F897dD367C416e350c33a92fC12e53e1Cee5" # FAKE price feed
calldata = priceFeed.setPriceFeed.encode_input([loanTokenAddress], [priceFeedAddress])
calldataArray.append(calldata)
targets.append(priceFeed.address)
# calldata = bzx.setLoanPool.encode_input([iToken], [loanTokenAddress])
# calldataArray.append(calldata)
# targets.append(bzx.address)
# protocolSettings = ProtocolSettings.deploy({'from': TIMELOCK})
# bzx.replaceContract(protocolSettings, {"from": TIMELOCK})
# calldata = bzx.setSupportedTokens.encode_input([loanTokenAddress], [True], False)
# calldataArray.append(calldata)
# targets.append(bzx.address)
# base_data = [
# b"0x0", # id
# False, # active
# str(TIMELOCK), # owner
# "0x0000000000000000000000000000000000000001", # loanToken
# "0x0000000000000000000000000000000000000002", # collateralToken
# Wei("20 ether"), # minInitialMargin
# Wei("15 ether"), # maintenanceMargin
# 0 # fixedLoanTerm
# ]
# params = []
# supportedTokenAssetsPairs = bzxRegistry.getTokens(0, 100) # TODO move this into a loop for permissionless to support more than 100
# loanTokensArr = []
# collateralTokensArr = []
# amountsArr =[]
# for tokenAssetPair in supportedTokenAssetsPairs:
# if tokenAssetPair[0] == iToken.address:
# continue
# # below is to allow different collateral for new iToken
# base_data_copy = base_data.copy()
# base_data_copy[3] = loanTokenAddress
# base_data_copy[4] = tokenAssetPair[1] # pair is iToken, Underlying
# print(base_data_copy)
# params.append(base_data_copy)
# loanTokensArr.append(loanTokenAddress)
# collateralTokensArr.append(tokenAssetPair[1])
# amountsArr.append(7*10**18)
# loanTokenSettingsLowerAdmin = LoanTokenSettingsLowerAdmin.deploy({'from': TIMELOCK}) # TODO use Tom addr
# calldata = loanTokenSettingsLowerAdmin.setupLoanParams.encode_input(params, True)
# calldata = iToken.updateSettings.encode_input(loanTokenSettingsLowerAdmin.address, calldata)
# calldataArray.append(calldata)
# targets.append(iToken.address)
# calldata = loanTokenSettingsLowerAdmin.setupLoanParams.encode_input(params, False)
# iToken.updateSettings.encode_input(loanTokenSettingsLowerAdmin.address, calldata)
# calldataArray.append(calldata)
# targets.append(iToken.address)
# calldata = loanTokenSettingsLowerAdmin.setDemandCurve.encode_input(0, 23.75*10**18, 0, 0, 80*10**18, 80*10**18, 120*10**18)
# iToken.updateSettings.encode_input(loanTokenSettingsLowerAdmin.address, calldata)
# calldataArray.append(calldata)
# targets.append(iToken.address)
# params.clear()
# for tokenAssetPair in supportedTokenAssetsPairs:
# # below is to allow new iToken.loanTokenAddress in other existing iTokens
# existingIToken = Contract.from_abi("existingIToken", address=tokenAssetPair[0], abi=LoanTokenLogicStandard.abi, owner=bzxOwner)
# base_data_copy = base_data.copy()
# existingITokenLoanTokenAddress = existingIToken.loanTokenAddress()
# base_data_copy[3] = existingITokenLoanTokenAddress
# base_data_copy[4] = loanTokenAddress # pair is iToken, Underlying
# print(base_data_copy)
# params.append(base_data_copy)
# calldata = loanTokenSettingsLowerAdmin.setupLoanParams.encode_input(params, True)
# calldata = existingIToken.updateSettings.encode_input(loanTokenSettingsLowerAdmin.address, calldata)
# calldataArray.append(calldata)
# targets.append(existingIToken.address)
# calldata = loanTokenSettingsLowerAdmin.setupLoanParams.encode_input(params, False)
# calldata = existingIToken.updateSettings.encode_input(loanTokenSettingsLowerAdmin.address, calldata)
# calldataArray.append(calldata)
# targets.append(existingIToken.address)
# loanTokensArr.append(loanTokenAddress)
# collateralTokensArr.append(existingITokenLoanTokenAddress)
# amountsArr.append(7*10**18)
# params.clear()
# calldata = bzx.setLiquidationIncentivePercent.encode_input(loanTokensArr, collateralTokensArr, amountsArr)
# calldataArray.append(calldata)
# targets.append(bzx.address)
# end of building calldata arrays
tx = GOVERNANCE_DELEGATOR.propose(
targets,
[0] * len(calldataArray),
[""] * len(calldataArray),
calldataArray,
"asdf",
{"from": proposer})
proposalCount = GOVERNANCE_DELEGATOR.proposalCount()
proposal = GOVERNANCE_DELEGATOR.proposals(proposalCount)
id = proposal[0]
eta = proposal[2]
startBlock = proposal[3]
endBlock = proposal[4]
forVotes = proposal[5]
againstVotes = proposal[6]
abstainVotes = proposal[7]
canceled = proposal[8]
assert GOVERNANCE_DELEGATOR.state.call(id) == 0
chain.mine()
chain.mine(startBlock - chain.height)
# after first vote state is active
tx = GOVERNANCE_DELEGATOR.castVote(id,1, {"from" : bzxOwner})
assert GOVERNANCE_DELEGATOR.state.call(id) == 1
chain.mine(endBlock - chain.height)
assert GOVERNANCE_DELEGATOR.state.call(id) == 1
chain.mine()
assert GOVERNANCE_DELEGATOR.state.call(id) == 4
GOVERNANCE_DELEGATOR.queue(id, {"from" : bzxOwner})
proposal = GOVERNANCE_DELEGATOR.proposals(proposalCount)
eta = proposal[2]
chain.sleep(eta - chain.time())
chain.mine()
GOVERNANCE_DELEGATOR.execute(id, {"from" : bzxOwner})
assert True
def testGovernanceConsecutiveProposal(requireMainnetFork, GOVERNANCE_DELEGATOR, TIMELOCK, STAKING, TOKEN_SETTINGS, iUSDC, accounts,TestToken, LoanTokenLogicStandard, TokenRegistry, LoanToken, LoanTokenSettings, interface, PriceFeeds, ProtocolSettings, LoanTokenSettingsLowerAdmin, BZRX):
proposer = "0x95BeeC2457838108089fcD0E059659A4E60B091A"
bzxOwner = accounts.at("0xB7F72028D9b502Dc871C444363a7aC5A52546608", force=True)
# FIRST
# begining of building calldata arrays
# calldataArray = getTransactionListToDeployITokens(accounts)
calldataArray = []
targets = []
underlyingSymbol = "ABC"
iTokenSymbol = "i{}".format(underlyingSymbol)
iTokenName = "Fulcrum {} iToken ({})".format(underlyingSymbol, iTokenSymbol)
loanTokenAddress = bzxOwner.deploy(TestToken, underlyingSymbol, underlyingSymbol, 18, 1e50).address
loanTokenLogicStandard = bzxOwner.deploy(LoanTokenLogicStandard, bzxOwner).address
bzxRegistry = Contract.from_abi("bzxRegistry", address="0xf0E474592B455579Fe580D610b846BdBb529C6F7", abi=TokenRegistry.abi)
bzx = Contract.from_abi("bzx", address="0xD8Ee69652E4e4838f2531732a46d1f7F584F0b7f", abi=interface.IBZx.abi, owner=bzxOwner)
priceFeed = Contract.from_abi("pricefeed", bzx.priceFeeds(), abi=PriceFeeds.abi, owner=bzxOwner)
iTokenProxy = bzxOwner.deploy(LoanToken, bzxOwner, loanTokenLogicStandard)
loanTokenSettings = bzxOwner.deploy(LoanTokenSettings)
calldata = loanTokenSettings.initialize.encode_input(
loanTokenAddress, iTokenName, iTokenSymbol)
iToken = Contract.from_abi("loanTokenLogicStandard",
iTokenProxy, LoanTokenLogicStandard.abi, bzxOwner)
iToken.transferOwnership(TIMELOCK, {"from": bzxOwner})
calldata = iToken.updateSettings.encode_input(loanTokenSettings, calldata)
calldataArray.append(calldata)
targets.append(iToken.address)
tx = GOVERNANCE_DELEGATOR.propose(
targets,
[0] * len(calldataArray),
[""] * len(calldataArray),
calldataArray,
"asdf",
{"from": proposer})
proposalCount = GOVERNANCE_DELEGATOR.proposalCount()
proposal = GOVERNANCE_DELEGATOR.proposals(proposalCount)
id = proposal[0]
eta = proposal[2]
startBlock = proposal[3]
endBlock = proposal[4]
forVotes = proposal[5]
againstVotes = proposal[6]
abstainVotes = proposal[7]
canceled = proposal[8]
assert GOVERNANCE_DELEGATOR.state.call(id) == 0
chain.mine(startBlock - chain.height)
chain.mine()
# after first vote state is active
tx = GOVERNANCE_DELEGATOR.castVote(id,1, {"from" : bzxOwner})
assert GOVERNANCE_DELEGATOR.state.call(id) == 1
chain.mine(endBlock - chain.height)
assert GOVERNANCE_DELEGATOR.state.call(id) == 1
chain.mine()
assert GOVERNANCE_DELEGATOR.state.call(id) == 4
GOVERNANCE_DELEGATOR.queue(id, {'from': accounts[0]})
proposal = GOVERNANCE_DELEGATOR.proposals(proposalCount)
eta = proposal[2]
chain.sleep(eta - chain.time())
chain.mine()
GOVERNANCE_DELEGATOR.execute(id, {'from': accounts[0]})
# SECOND
# begining of building calldata arrays
# calldataArray = getTransactionListToDeployITokens(accounts)
calldataArray = []
targets = []
underlyingSymbol = "DEF"
iTokenSymbol = "i{}".format(underlyingSymbol)
iTokenName = "Fulcrum {} iToken ({})".format(underlyingSymbol, iTokenSymbol)
loanTokenAddress = bzxOwner.deploy(TestToken, underlyingSymbol, underlyingSymbol, 18, 1e50).address
loanTokenLogicStandard = bzxOwner.deploy(LoanTokenLogicStandard, bzxOwner).address
bzxRegistry = Contract.from_abi("bzxRegistry", address="0xf0E474592B455579Fe580D610b846BdBb529C6F7", abi=TokenRegistry.abi)
bzx = Contract.from_abi("bzx", address="0xD8Ee69652E4e4838f2531732a46d1f7F584F0b7f", abi=interface.IBZx.abi, owner=bzxOwner)
priceFeed = Contract.from_abi("pricefeed", bzx.priceFeeds(), abi=PriceFeeds.abi, owner=bzxOwner)
iTokenProxy = bzxOwner.deploy(LoanToken, bzxOwner, loanTokenLogicStandard)
loanTokenSettings = bzxOwner.deploy(LoanTokenSettings)
calldata = loanTokenSettings.initialize.encode_input(
loanTokenAddress, iTokenName, iTokenSymbol)
iToken = Contract.from_abi("loanTokenLogicStandard",
iTokenProxy, LoanTokenLogicStandard.abi, bzxOwner)
iToken.transferOwnership(TIMELOCK, {"from": bzxOwner})
calldata = iToken.updateSettings.encode_input(loanTokenSettings, calldata)
calldataArray.append(calldata)
targets.append(iToken.address)
tx = GOVERNANCE_DELEGATOR.propose(
targets,
[0] * len(calldataArray),
[""] * len(calldataArray),
calldataArray,
"asdf",
{"from": proposer})
proposalCount = GOVERNANCE_DELEGATOR.proposalCount()
proposal = GOVERNANCE_DELEGATOR.proposals(proposalCount)
id = proposal[0]
eta = proposal[2]
startBlock = proposal[3]
endBlock = proposal[4]
forVotes = proposal[5]
againstVotes = proposal[6]
abstainVotes = proposal[7]
canceled = proposal[8]
assert GOVERNANCE_DELEGATOR.state.call(id) == 0
chain.mine()
chain.mine(startBlock - chain.height)
# after first vote state is active
tx = GOVERNANCE_DELEGATOR.castVote(id, 1, {"from" : bzxOwner})
assert GOVERNANCE_DELEGATOR.state.call(id) == 1
chain.mine(endBlock - chain.height)
assert GOVERNANCE_DELEGATOR.state.call(id) == 1
chain.mine()
assert GOVERNANCE_DELEGATOR.state.call(id) == 4
GOVERNANCE_DELEGATOR.queue(id, {"from" : bzxOwner})
proposal = GOVERNANCE_DELEGATOR.proposals(proposalCount)
eta = proposal[2]
chain.sleep(eta - chain.time())
chain.mine()
GOVERNANCE_DELEGATOR.execute(id, {"from" : bzxOwner})
assert True
def testGovernanceAcceptLocalAdmin(requireMainnetFork, GOVERNANCE_DELEGATOR, TIMELOCK, STAKING, TOKEN_SETTINGS, iUSDC, accounts,TestToken, LoanTokenLogicStandard, TokenRegistry, LoanToken, LoanTokenSettings, interface, PriceFeeds, ProtocolSettings, LoanTokenSettingsLowerAdmin, BZRX):
proposer = "0x95BeeC2457838108089fcD0E059659A4E60B091A"
bzxOwner = accounts.at("0xB7F72028D9b502Dc871C444363a7aC5A52546608", force=True)
# FIRST
# begining of building calldata arrays
# calldataArray = getTransactionListToDeployITokens(accounts)
calldataArray = []
targets = []
calldata = GOVERNANCE_DELEGATOR.__acceptLocalAdmin.encode_input()
calldataArray.append(calldata)
targets.append(GOVERNANCE_DELEGATOR.address)
tx = GOVERNANCE_DELEGATOR.propose(
targets,
[0] * len(calldataArray),
[""] * len(calldataArray),
calldataArray,
"DAO.__acceptLocalAdmin()",
{"from": proposer})
|
python
|
from .database import *
def password_is_valid(password):
if len(password) < 5:
return False
return True
class Login():
def __init__(self, data_set):
self.db = self.db_create(data_set)
def db_create(self, data_set):
db = Database(data_set)
db.up_database()
return db
def login_user(self, username, password):
if self.db.get_user(username):
if self.db.compare_password(username, password):
return "Successfully logged in!"
return "Password is incorrect, try another one!"
return "This user does not exists!"
def create_user(self, username, password):
if self.db.user_exists(username) is False:
if password_is_valid(password):
self.db.add_user(username, password)
return "User created with success!"
return "The password must have at list 5 characters"
return "This user already exists!"
|
python
|
ls = []
for i in range(100):
if i%3==0:
ls.append(i)
print(ls)
ls = [i for i in range(100) if i%3==0] # list comprehension
print(ls)
dict1 = {i:f"item{i}" for i in range(1, 12) if i%2==0} # dictionary comprehension
print(dict1)
dict1 = {value:key for key,value in dict1.items()} # can change key:value to value:key
print(dict1)
dresses = {dress for dress in ["dress1", "dress2", "dress3", "dress2"]} # set comprehension cannot give repeated values
print(type(dresses))
print(dresses)
dresses = [dress for dress in ["dress1", "dress2", "dress3", "dress 2"]] # list comprehension can give repeated values
print(type(dresses))
print(dresses)
evens = (i for i in range(100) if i%10==0) # generator comprehension use parenthisis
print(type(evens))
print(evens)
print(evens.__next__())
print(evens.__next__())
for items in evens:
print(items)
print("how much values do you want in a list")
limit = int(input())
print("enter values of list")
list = []
for i in range(limit):
list.append(input())
print("press 1 for list comprehension\npress 2 for set comprehension\npress 3 for generator comprehension")
ans = int(input())
if ans == 1:
ls = [i for i in list]
print(ls)
elif ans == 2:
dresses = {i for i in list}
print(type(dresses))
print(dresses)
elif ans == 3:
dict1 = (i for i in list)
print(type(dict1))
for item in dict1:
print(item)
|
python
|
# Copyright 2017 Eun Woo Song
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import sqlite3
from pyquibase.pyquibase import Pyquibase
class TestPyquibase(unittest.TestCase):
def tearDown(self):
"""
Delete the database
"""
os.remove("testdb.sqlite")
def test_liquibase_update_xml(self):
self.pyquibase = Pyquibase.sqlite(
'testdb.sqlite',
os.path.join(os.path.dirname(__file__), 'db-changelog-1.xml')
)
# run liquibase update
self.pyquibase.update()
# verify that liquibase has been executed properly by
# executing sql queries
con = sqlite3.connect('testdb.sqlite')
con.execute("INSERT INTO test VALUES (1, 'test')")
actual = con.execute('SELECT * FROM test').fetchall()
expected = [(1, 'test')]
self.assertListEqual(actual, expected)
def test_liquibase_update_yml(self):
self.pyquibase = Pyquibase.sqlite(
'testdb.sqlite',
os.path.join(os.path.dirname(__file__), 'db-changelog-1.yaml')
)
# run liquibase update
self.pyquibase.update()
# verify that liquibase has been executed properly by
# executing sql queries
con = sqlite3.connect('testdb.sqlite')
con.execute("INSERT INTO test VALUES (1, 'test')")
actual = con.execute('SELECT * FROM test').fetchall()
expected = [(1, 'test')]
self.assertListEqual(actual, expected)
|
python
|
import json
import os
import hashlib
import getpass
def main():
FileStructureSetUp().set_up_files()
class ScheduleTable:
# Describes a schedule table
class Row:
def __init__(self, block_type: str = "NA", day: str = "NA", place: str = "NA", time: str = "NA"):
"""
Describes a row in the schedule table
:param block_type: String, meeting type label
:param day: String, name of the day of the week of the meeting
:param place: String, name of the place of the meeting
:param time: String, times avilble (ex. 5:30-6:30)
"""
self.__type = block_type
self.__day = day
self.__place = place
self.__time = time
def to_dict(self) -> dict:
"""
Converts a row into a dictionary to be converted to json
:return: dict that represents the contents of the row
"""
row_dict = {
"type": self.__type,
"day": self.__day,
"place": self.__place,
"time": self.__time
}
return row_dict
def __init__(self, title: str = ""):
"""
Makes a schedule tabel
:param title: String, title of the table
"""
if title == "":
title = "Schedule"
self.__title = title
self.__rows = []
def add_row(self, block_type: str, day: str, place: str, time: str):
"""
Creates a new row in the table
:param block_type: String, meeting type label
:param day: String, name of the day of the week of the meeting
:param place: String, name of the place of the meeting
:param time: String, times avilble (ex. 5:30-6:30)
"""
self.__rows.append(self.Row(block_type, day, place, time).to_dict())
def add_many_rows(self):
"""Repeatedly asks the user to add information for new rows untill they are done"""
print('\nSchedule Table Rows')
add_rows = 'y'
while add_rows == 'y' or add_rows == 'Y' or str(add_rows).lower() == 'yes':
block_type = input("Enter block type (Usually 'SI Session' or 'Office Hours'): ")
day = input("Enter day of this block: ")
place = input("Enter place where the block occurs: ")
time = input("Enter time when the block occurs (ex, 5:30-6:20): ")
self.add_row(block_type, day, place, time)
add_rows = input('Add another row? (y/n): ')
print()
def to_dict(self) -> dict:
"""
Converts table into a dict to be made into a json
:return: dictionary representation of the table
"""
schedule_dict = {
"title": self.__title,
"rows": self.__rows
}
return schedule_dict
class MessageToStudents:
def __init__(self, title: str = "", message: str = "", author: str = ""):
self.title = title
self.message = message
self.author = author
def write_message(self):
self.title = input("Enter an additional message title: ")
self.message = input("Enter an additional message: ")
self.author = input("Enter an author name: ")
return self
def to_dict(self):
message_dict = {
"title" : self.title,
"message" : self.message,
"author" : self.author
}
return message_dict
class FileStructureSetUp():
def __init__(self):
pass
def set_up_files(self):
"""Sets up the file structure for the website"""
self.create_missing_dirs()
self.set_admin()
self.design_site()
def create_missing_dirs(self):
"""Creates any missing dirs"""
self.attmept_to_make_dir("./static/res/share/")
self.attmept_to_make_dir("./static/json")
def attmept_to_make_dir(self, path: str):
"""Check that a dir exists, if it doesn't attempt to create it"""
if not os.path.exists(path):
try:
os.mkdir(path)
except Exception:
print("Something went wrong creating" + path)
def set_admin(self):
"""Creates and writes out the admin json"""
admin_json = {}
admin_json['path'] = self.set_path()
(admin_json['username'], admin_json['password']) = self.set_credentials()
with open('./static/json/admin.json', 'w') as admin:
json.dump(admin_json, admin, indent=2)
def set_path(self) -> str:
"""
Asks the user to confirm that the path provided is where the website isrun from.
If not they can provide their own path. The path is then returned as a string
:return: String, path to current working dir
"""
newPath = os.getcwd()
print("Your current working directory is:")
print(newPath + "\n")
choice = input("Would you like to use this path (y/n): ")
if choice != 'y' and choice != 'Y' and choice != 'yes':
newPath = input('Please enter your new path:\n')
return newPath
def set_credentials(self):
"""Gets and hashes the credentials"""
username = str(hashlib.sha256(input('Please enter an admin username: ').encode()).hexdigest())
password = getpass.getpass('Please enter an admin password: ')
while len(password) < 8:
print("Password should be 8 or more characters long")
password = getpass.getpass('Please enter an admin password: ')
password = str(hashlib.sha256(password.encode()).hexdigest())
passeord_re_enter = str(hashlib.sha256(getpass.getpass('Please re-enter the password: ').encode()).hexdigest())
if password == passeord_re_enter:
print("Username and password have been updated")
return (username, password)
else:
print("Passwords do not match, changes are being ignored")
exit()
def design_site(self):
"""Creates a json that describes the general layout of the website"""
design_json = {}
design_json['title'] = input("Please enter a title for your site: ")
schedule_table = ScheduleTable(input("Please enter a schedule title: "))
schedule_table.add_many_rows()
design_json['schedule_table'] = schedule_table.to_dict()
design_json['message_to_students'] = MessageToStudents().write_message().to_dict()
with open('./static/json/design.json', 'w') as design:
json.dump(design_json, design, indent=2)
if __name__ == "__main__":
main()
|
python
|
import pytest
import python_jsonschema_objects as pjo
@pytest.fixture
def arrayClass():
schema = {
"title": "ArrayVal",
"type": "object",
"properties": {
"min": {
"type": "array",
"items": {"type": "string"},
"default": [],
"minItems": 1,
},
"max": {
"type": "array",
"items": {"type": "string"},
"default": [],
"maxItems": 1,
},
"both": {
"type": "array",
"items": {"type": "string"},
"default": [],
"maxItems": 2,
"minItems": 1,
},
"unique": {
"type": "array",
"items": {"type": "string"},
"default": [],
"uniqueItems": True,
},
"reffed": {
"type": "array",
"items": {"$ref": "#/definitions/myref"},
"minItems": 1,
},
},
"definitions": {"myref": {"type": "string"}},
}
ns = pjo.ObjectBuilder(schema).build_classes()
return ns["Arrayval"](min=["1"], both=["1"])
def test_validators_work_with_reference(arrayClass):
arrayClass.reffed = ["foo"]
with pytest.raises(pjo.ValidationError):
arrayClass.reffed = []
def test_array_length_validates(markdown_examples):
builder = pjo.ObjectBuilder(
markdown_examples["Example Schema"], resolved=markdown_examples
)
ns = builder.build_classes()
with pytest.raises(pjo.ValidationError):
ns.ExampleSchema(
firstName="Fred",
lastName="Huckstable",
dogs=["Fido", "Spot", "Jasper", "Lady", "Tramp"],
)
def test_minitems(arrayClass):
arrayClass.min = ["1"]
arrayClass.min.append("2")
with pytest.raises(pjo.ValidationError):
arrayClass.min = []
def test_maxitems(arrayClass):
arrayClass.max = []
arrayClass.max.append("2")
assert arrayClass.max == ["2"]
with pytest.raises(pjo.ValidationError):
arrayClass.max.append("3")
# You have to explicitly validate with append
arrayClass.validate()
with pytest.raises(pjo.ValidationError):
arrayClass.max = ["45", "42"]
def test_unique(arrayClass):
arrayClass.unique = ["hi", "there"]
with pytest.raises(pjo.ValidationError):
arrayClass.unique.append("hi")
# You have to explicitly validate with append
arrayClass.validate()
with pytest.raises(pjo.ValidationError):
arrayClass.unique = ["Fred", "Fred"]
|
python
|
import pytesseract
import settings
def img_to_text():
pytesseract.pytesseract.tesseract_cmd = settings.WORKSPACE_DIR + r'/etc/Tesseract-OCR/tesseract.exe'
text = pytesseract.image_to_string(settings.WORKSPACE_DIR + r'/screenshots_temp/screenshot.png', lang=settings.get_language())
text = text.replace("\n", " ")
return text
|
python
|
import multiprocessing
import numpy as np
from basetrainer import BaseTrainer
from solvers import LeastSquares
from scorers import MeanAbsolute
from parallelworker import Worker, Task
# Feature selection + linear regression training and validation toolkit
class ParallelTrainer(BaseTrainer):
def __init__(self, x, y, solver=LeastSquares, scorer=MeanAbsolute, number_of_folds=5):
BaseTrainer.__init__(self, x=x, y=y, solver=solver, scorer=scorer, number_of_folds=number_of_folds)
# defaults
self.task_queue = None
self.result_queue = None
self.number_of_processes = None
def _start_workers(self):
# establish communication queue
self.task_queue = multiprocessing.JoinableQueue()
self.result_queue = multiprocessing.Queue()
# number of processes
if self.number_of_processes is None:
self.number_of_processes = multiprocessing.cpu_count()
# make and start workers
workers = [Worker(self.task_queue, self.result_queue, self.x, self.y, self.folds, self.solver,
self.scorer) for i in xrange(self.number_of_processes)]
for w in workers:
w.start()
def _end_workers(self):
# add a poison pill for each workers
for i in xrange(self.number_of_processes):
self.task_queue.put(None)
# finish sending poison pill
self.task_queue.join()
# close queues
self.task_queue.close()
self.result_queue.close()
# clean up
self.task_queue = None
self.result_queue = None
def _run_feature_selection(self, forward=True, backward=False):
# spin up workers
self._start_workers()
# actually run the feature selection
BaseTrainer._run_feature_selection(self, forward, backward)
# end workers
self._end_workers()
def _do_forward_selection(self, col_indices_for_inputs, best_score):
# column indices
l_indices = np.shape(self.x)[1]
# no more columns to add
if len(col_indices_for_inputs) == l_indices:
return None, best_score
# count number of pending tasks
tasks = 0
# distribute all potential feature sets to processes
for potential_index in xrange(l_indices):
# already in linear regression
if potential_index in col_indices_for_inputs:
continue
# add index to copy of list
potential_col_indices = col_indices_for_inputs + [potential_index]
# add potential task
self.task_queue.put(Task(potential_col_indices))
# increment number of pending tasks
tasks += 1
# let processes score each feature set
#self.task_queue.join()
# new columns
new_col_indices = None
# collect responses
while True:
# get next result
result = self.result_queue.get()
# process result
if self._is_better_score(best_score, result.score):
best_score = result.score
new_col_indices = result.column_indices
# decrement number of pending tasks
tasks -= 1
# no more tasks
if tasks == 0:
break
return new_col_indices, best_score
def _do_backward_selection(self, col_indices_for_inputs, best_score):
# no more columns to remove
if len(col_indices_for_inputs) == 1:
return None, best_score
# count number of pending tasks
tasks = 0
# distribute all potential feature sets to processes
for potential_index in col_indices_for_inputs:
# remove index from a copy of the list
potential_col_indices = [x for x in col_indices_for_inputs if x != potential_index]
# add potential task
self.task_queue.put(Task(potential_col_indices))
# increment number of pending tasks
tasks += 1
# let processes score each feature set
#self.task_queue.join()
# new columns
new_col_indices = None
# collect responses
while True:
# get next result
result = self.result_queue.get()
# process result
if self._is_better_score(best_score, result.score):
best_score = result.score
new_col_indices = result.column_indices
# decrement number of pending tasks
tasks -= 1
# no more tasks
if tasks == 0:
break
return new_col_indices, best_score
|
python
|
from stack_and_queue.stack_and_queue import Node, Stack, PseudoQueue
import pytest
|
python
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from pyup.updates import Update, RequirementUpdate, InitialUpdate, SequentialUpdate, ScheduledUpdate
from unittest import TestCase
from pyup.requirements import RequirementFile
from pyup.errors import UnsupportedScheduleError
from pyup.config import Config, RequirementConfig
from mock import Mock, patch
from datetime import datetime
class UpdateBaseTest(TestCase):
def setUp(self):
self.config = Mock()
self.config.pin_file.return_value = True
class ShouldUpdateTest(TestCase):
def setUp(self):
self.config = Config()
self.req1 = Mock()
self.req1.key = "foo"
self.req1.latest_version_within_specs = "0.2"
self.req1.needs_update = True
self.req1.is_pinned = True
self.req1.is_insecure = False
self.req2 = Mock()
self.req2.key = "bar"
self.req2.latest_version_within_specs = "0.2"
self.req2.needs_update = True
self.req2.is_pinned = True
self.req2.is_insecure = False
self.req_file = Mock()
self.req_file.requirements = [self.req1, self.req2]
self.req_file.path = "requirements.txt"
self.update = Update(
requirement_files=[self.req_file],
config=self.config
)
def test_default_yes(self):
self.assertTrue(self.update.should_update(self.req1, self.req_file))
def test_update_all_restricted_in_file(self):
self.assertTrue(self.update.should_update(self.req1, self.req_file))
self.config.requirements = [RequirementConfig(path="requirements.txt", update="insecure")]
self.assertFalse(self.update.should_update(self.req1, self.req_file))
def test_update_insecure(self):
self.config.update = "insecure"
self.assertFalse(self.update.should_update(self.req1, self.req_file))
self.req1.is_insecure = True
self.assertTrue(self.update.should_update(self.req1, self.req_file))
def test_update_unpinned(self):
self.config.update = "all"
self.config.pin = False
self.req1.is_pinned = False
self.assertFalse(self.update.should_update(self.req1, self.req_file))
self.req1.is_pinned = True
self.assertTrue(self.update.should_update(self.req1, self.req_file))
self.req1.is_pinned = False
self.config.pin = True
self.assertTrue(self.update.should_update(self.req1, self.req_file))
class UpdateCreateUpdateKeyTest(UpdateBaseTest):
def test_unpinned_requirement(self):
req = Mock()
req.key = "django"
req.is_pinned = False
self.assertEqual(Update.create_update_key(req), "django-pin")
def test_latest_version_within_specs(self):
req = Mock()
req.key = "django"
req.is_pinned = True
req.latest_version_within_specs = "1.10"
self.assertEqual(Update.create_update_key(req), "django-1.10")
class UpdateGetCommitMessageTest(UpdateBaseTest):
def test_unpinned_requirement(self):
req = Mock()
req.key = "django"
req.is_pinned = False
req.latest_version_within_specs = "1.10"
self.assertEqual(Update.get_commit_message(req), "Pin django to latest version 1.10")
def test_pinned_requirement(self):
req = Mock()
req.key = "django"
req.is_pinned = True
req.latest_version_within_specs = "1.10"
req.version = "1.0"
self.assertEqual(Update.get_commit_message(req), "Update django from 1.0 to 1.10")
class UpdateInitTestCase(UpdateBaseTest):
def test_init_empty(self):
update = Update([], self.config)
self.assertEqual(update, dict())
def test_init_with_reqs(self):
with patch("pyup.requirements.Requirement") as req:
req.needs_update = True
req_files = [RequirementFile("req.txt", "django")]
update = Update(req_files, self.config)
self.assertEqual(len(update.keys()), 1)
class UpdateAddTest(UpdateBaseTest):
def test_add_with_empty(self):
update = Update([], self.config)
req_file = Mock()
req = Mock()
req.key = "django"
req.is_pinned = False
req.latest_version_within_specs = "1.10"
update.add(req, req_file)
self.assertEqual("django-pin" in update, True)
self.assertEqual(len(update["django-pin"]), 1)
def test_add_with_match(self):
update = Update([], self.config)
req_file = Mock()
req = Mock()
req.key = "django"
req.is_pinned = False
req.latest_version_within_specs = "1.10"
update.add(req, req_file)
self.assertEqual("django-pin" in update, True)
self.assertEqual(len(update["django-pin"]), 1)
update.add(req, req_file)
self.assertEqual(len(update["django-pin"]), 2)
class UpdateGetRequirementUpdateClassTest(UpdateBaseTest):
def test_class(self):
update = Update([], self.config)
self.assertEqual(RequirementUpdate, update.get_requirement_update_class())
class InitialUpdateTestBody(UpdateBaseTest):
def test_body(self):
self.assertTrue("updated so far." in InitialUpdate.get_body([]))
class SequentialUpdateTestBody(UpdateBaseTest):
def test_body(self):
self.assertTrue("is not pinned" in SequentialUpdate.get_body([]))
class SequentialUpdateTestTitle(UpdateBaseTest):
def test_get_title(self):
req = Mock()
req.key = "foo"
req.latest_version_within_specs = "bar"
self.assertEqual(SequentialUpdate.get_title(req), "Update foo to bar")
class SequentialUpdateTestBrach(UpdateBaseTest):
def test_requirement_pinned(self):
req = Mock()
req.key = "django"
req.is_pinned = True
req.latest_version_within_specs = "1.10"
req.version = "1.0"
self.assertEqual(SequentialUpdate.get_branch(req), "update-django-1.0-to-1.10")
def test_requirement_not_pinned(self):
req = Mock()
req.key = "django"
req.is_pinned = False
req.latest_version_within_specs = "1.10"
self.assertEqual(SequentialUpdate.get_branch(req), "pin-django-1.10")
class SequentialUpdateTestGetUpdates(UpdateBaseTest):
def test_get_updates_empty(self):
update = SequentialUpdate([], self.config)
self.assertEqual(len([u for u in update.get_updates()]), 0)
def test_get_updates(self):
update = SequentialUpdate([], config=self.config)
req_file = Mock()
req = Mock()
req.key = "django"
req.is_pinned = False
req.latest_version_within_specs = "1.10"
req.changelog = {"1.10": "foo"}
update.add(req, req_file)
self.assertEqual("django-pin" in update, True)
self.assertEqual(len(update["django-pin"]), 1)
update.add(req, req_file)
self.assertEqual(len(update["django-pin"]), 2)
updates = [u for u in update.get_updates()]
self.assertEqual(len(updates), 1)
class InitialUpdateTestGetUpdates(UpdateBaseTest):
def test_get_updates_empty(self):
update = InitialUpdate([], self.config)
self.assertEqual(len([u for u in update.get_updates()]), 0)
def test_get_updates(self):
update = InitialUpdate([], config=self.config)
req_file = Mock()
req = Mock()
req.key = "django"
req.is_pinned = False
req.latest_version_within_specs = "1.10"
req.changelog = {"1.10": "foo"}
update.add(req, req_file)
self.assertEqual("django-pin" in update, True)
self.assertEqual(len(update["django-pin"]), 1)
update.add(req, req_file)
self.assertEqual(len(update["django-pin"]), 2)
updates = [u for u in update.get_updates()]
self.assertEqual(len(updates), 1)
class ScheduledUpdateBaseTest(UpdateBaseTest):
def setUp(self):
super(ScheduledUpdateBaseTest, self).setUp()
self.config.is_valid_schedule = True
self.config.schedule = "every day on monday"
self.update = ScheduledUpdate([], self.config)
class ScheduledUpdateTest(ScheduledUpdateBaseTest):
@patch("pyup.updates.datetime")
def test_title_every_day(self, dt):
dt.now.return_value = datetime(2016, 9, 13, 9, 21, 42, 702067)
self.config.schedule = "every day"
self.assertEquals(
self.update.get_title(),
"Scheduled daily dependency update on tuesday"
)
@patch("pyup.updates.datetime")
def test_title_every_week(self, dt):
dt.now.return_value = datetime(2016, 9, 16, 9, 21, 42, 702067)
self.config.schedule = "every week on wednesday"
self.assertEquals(
self.update.get_title(),
"Scheduled weekly dependency update for week 37"
)
@patch("pyup.updates.datetime")
def test_title_every_two_weeks(self, dt):
dt.now.return_value = datetime(2016, 9, 18, 9, 21, 42, 702067)
self.config.schedule = "every two weeks on sunday"
self.assertEquals(
self.update.get_title(),
"Scheduled biweekly dependency update for week 38"
)
@patch("pyup.updates.datetime")
def test_title_every_month(self, dt):
dt.now.return_value = datetime(2016, 12, 13, 9, 21, 42, 702067)
self.config.schedule = "every month"
self.assertEquals(
self.update.get_title(),
"Scheduled monthly dependency update for December"
)
def test_title_unsupported_schedule(self):
with self.assertRaises(UnsupportedScheduleError):
self.config.schedule = "uhm, what?"
self.update.get_title()
@patch("pyup.updates.datetime")
def test_get_branch(self, dt):
dt.now.return_value = datetime(2016, 12, 13, 9, 21, 42, 702067)
self.assertEquals(
self.update.get_branch(),
"scheduled-update-12-13-2016"
)
def test_get_body(self):
self.assertTrue("updated so far" in self.update.get_body([]))
|
python
|
"""
Generates JSON file that composes the game interactions
"""
import json, os, errno, random
from queue import *
from collections import defaultdict
from generation.world import ATTRIBUTES, OBJECT_TYPES, ITEMS, ROOMS, OUTSIDE, LOWER_FLOORS
from generation.notes import Notes
from generation.event import Event
def write_attributes():
"""
Writes item attributes to ./content/attributes.json
"""
with open_w('./content/attributes.json') as f:
json.dump(ATTRIBUTES, f)
def generate_items(n):
"""Generates n items and writes to ./content/items.json
Items contains list of attributes that describe their properties.
Args:
n (int): Number of unique items to generate
Returns:
Dictionary of generated items
"""
items = defaultdict(dict)
# Fill n items with correctly mapped attributes and store in items dictionary
for i in range(n):
item, properties = random.choice(list(ITEMS.items()))
# print(item, properties)
attributes = []
for p in properties:
# TODO: Add random contextual attribute
if p in OBJECT_TYPES.keys():
attributes.extend(OBJECT_TYPES[p])
elif p in ATTRIBUTES:
attributes.append(p)
items[item] = attributes
# Add notes as items
path = './content/notes.json'
notes = Notes.from_json(path)
for i in notes.keys():
if i == 'note 2' or i == 'note 4':
items[i] = ['event', 'portable', 'readable']
else:
items[i] = ['portable', 'readable']
# Generate events for all items that have an event attribute
path = './content/events.json'
Event.setup(path)
for i in list(items.keys()):
if 'event' in items[i]:
generate_event(i, path)
# Write items to a json file
with open_w('./content/items.json') as f:
json.dump(items, f)
return items
def generate_event(item, path):
"""
Generates events for items that have the event attribute. Writes these events to a json file.
"""
text, goal, reaction, exit_trigger = None, None, None, None
if item == 'note 2':
text = 'Monologue: I wonder what he has figured out. Seems interesting though'
elif item == 'note 4':
text = 'Monologue: What have I gotten myself into..'
goal = 'Solve the murder'
reaction = 'move key to inventory'
elif item == 'mirror':
text = 'Am I the murderer?'
reaction = 'end game'
event = Event(item, text, goal, reaction, exit_trigger)
event.to_json(path)
def generate_rooms(n, f, items):
"""Generates n rooms and writes to /content/rooms.json
Rooms describe what items are in it and what other rooms (directions) are possible to navigate to. Directions are
ordered in north, east, south, west or up, right, down, left directions.
Args:
n (int): Number of unique rooms to generate
f (int): Number of floors
items (dict): Dictionary of possible unique items in the world
"""
rooms = defaultdict(dict)
# Choose n rooms and fill 2D list of rooms by floor
# TODO: Basements
# possible_rooms = []
# for i in range(f - 1):
# # n rooms left, need to reserve f - i rooms for stairways in floors left,
# # need to reserve (f - i - 1) rooms so that no floor is just a stairway
# r_num = random.randint(1, n - (f - i) - (f - i - 1))
# possible_rooms += [random.sample(ROOMS, r_num)]
# possible_rooms[i].append("stairway")
# n -= r_num + 1
# # Shuffle chosen rooms
# random.shuffle(possible_rooms[i])
# possible_rooms += [random.sample(ROOMS, n - 1)]
# possible_rooms[-1].append("stairway")
# random.shuffle(possible_rooms[-1])
# print(possible_rooms)
# Choose n unique rooms
possible_rooms = random.sample(ROOMS, n)
# Shuffle chosen rooms
random.shuffle(possible_rooms)
# BFS generation
# TODO: Make sure start room is not locked
start = possible_rooms.pop()
frontier = Queue()
frontier.put(start)
came_from = {}
came_from[start] = None
print('start', start)
locked = {}
while not frontier.empty():
current = frontier.get()
if current == "stairway":
pass
# Reserve one travel direction for room that current came from
dec = 1 if came_from[current] else 0
# Adjacent rooms (which rooms you can travel to from current room)
lower_bound = 0 if not (frontier.empty() and len(possible_rooms)) else 1
upper_bound = len(possible_rooms) if len(possible_rooms) < 4 - dec else 4 - dec
adj_rooms = [possible_rooms.pop() for i in range(random.randint(lower_bound, upper_bound))]
# Pad adj_rooms with empty strings if no room to travel in that direction
while len(adj_rooms) < 4 - bool(came_from[current]):
adj_rooms.insert(random.randint(0, 4), '')
# Insert as an adj_room the room player came from so that the graph is bidirecitonal
if came_from[current]:
i, cf_room = came_from[current]
# Determine opposite direction: 0 -> 2, 1 -> 3, 2 -> 0, 3 -> 1
i = i + 2 if i < 2 else i - 2
adj_rooms.insert(i, cf_room)
for idx, next in enumerate(adj_rooms):
if next == "": continue
if next not in came_from:
frontier.put(next)
# idx corresponds to what direction coming from, 0 is north, 1 is east, 2 is south, 3 is west
came_from[next] = (idx, current)
# TODO: Place items in current room based on context
rooms[current] = { "directions": adj_rooms, "items": [] }
# Randomly place rest of items
items = list(items.keys())
random.shuffle(items)
for i in items:
rand_room = random.choice(list(rooms.keys()))
rooms[rand_room]['items'].append(i)
# A* to determine path to key. This is to make sure key (note 4) is reachable
# Randomly choose locked rooms that is not in the path of the key
with open_w('./content/rooms.json') as f:
json.dump(rooms, f)
def generate_world(i=len(ITEMS), r=len(ROOMS), f=4):
"""Generates the world environment (items, rooms)
Args:
i (int): Number of unique items to generate
r (int): Number of unique rooms to generate
"""
if f > r:
raise ValueError("Number of floors cannot be less than the number of rooms to generate")
items = generate_items(i)
generate_rooms(r, f, items)
def generate_notes():
"""
Generates the notes placed in the world used to progress the plot line
"""
# TODO: Add variations of core notes, and generate from list
path = './content/notes.json'
Notes.setup(path)
text = ["I’ve finally arrived at the mansion. I can’t wait to restart my research once again. They just didn’t understand my work. Stupid … stupid. I’ll show them, and this time I’m so close. I think I may finally have everything I need. Only time will tell…",
"I heard a knock on the front door today. The only thing was that when I went to check, no one was there. It’s probably just my imagination. Also things are looking good, and the experiment seems to be successful. I just need time to examine the results and confirm my hypothesis.",
"It’s been a sad three days. Experiment 4 that I thought proved to be a success ended up slowly decaying, and now I have to find somewhere to dispose of it’s carcass. Maybe all of this is not worth it. The academy was right.",
"AHHHHHHHHHHHHHHHHHHH!!!! AHHHHHHHHHHH!", "I miss her so much. Sometimes at night, I hallucinate and see her standing outside my window and just watching me. But I know that’s not possible. It’s already been five years since the accident. I just miss her touch so much. She always understood me and my work. Unlike those filthy pigs at the academy.",
"I somehow lost Experiment 13. I’m sure last night I strapped it back in, double checking that it was restrained. However, when I went to check today, it wasn’t there anymore. Very strange. What’s even more confusing is it seems that the chains were cleanly cut, but that’s not possible.",
"I’ve made a breakthrough! I was just going about it the wrong way. Hehehe, and they said it couldn’t be done. I’m finally so close. *Some fancy formulas that you don’t understand are also written below*",
"Happy birthday to me. Happy birthday to me. Happy birthday to meeeeee!", "Someone must have remembered my birthday! I went outside today for the first time in weeks and found Experiment 13’s collar in the mailbox. Maybe it was the milkman.",
"I heard another knock on the door today. And once again when I went to check who it was, there wasn’t anybody there. I looked around and checked around the mansion, but still found nothing. I think I’ve been spending too much time in the lab. Doctor did say to get Vitamin D … or was it C? Which one gave you scurvy? I can’t remember.",
"Molly appeared in my dream. She was so vivid and realistic, and she came so close to me I could almost count each individual mole on her neck that formed a star. Her silky smooth blonde hair seems just as soft as I remember. If only I could feel it against my skin again, oh there’s nothing that I wouldn’t do.",
"Molly appeared in my dream once again. It’s two nights in a row, and this time there were two of her. I was so ecstatic I tried running up to both of her, in the dream of course, and I ended up hitting my toe on the nightstand. What confuses me is that I woke up today with a big welt on the same toe I stubbed in my dream. My sleepwalking must be getting worse.",
"The doctor prescribed me some medication for my toe. For some reason, every night since that dream, when I wake up in the morning the welt seems to get bigger and hurt even more. So I went to the doctor and he gave me some percocets. That dream made me miss Molly even more. If only I could have an infinite number of Mollys to be with me and understand me. What was I talking about? Oh yeah, percocets. But Molly... percocets.",
"In recent days, I have made more and more breakthroughs, but I feel that someone has been watching me. Whenever I’m in the lab and working on an experiment, I get a feeling that someone is looking over my shoulder. I’m worried it’s those dirty pigs from the academy. They must be after my research especially now that I’m so close to succeeding. But I’ll never let them get a hold of this. Not on my life.",
"Someone is after me. I am writing this note in desperation. If you find it, I am likely dead. Whoever reads this, protect my research. That’s all that I care abou- *The writing looks rushed and there is blood on the note. This seems to be the last note.*"]
titles = ['Starting my research', 'A knock', 'Failure', '[Untitled]', 'Molly', 'Experiment 13', 'Breakthrough!', 'Happy day', 'A present', 'Another one', 'A dream', 'Some pains', 'Medication', 'Someone is watching…', 'Someone is out to get me…']
days = [1, 7, 13, 30, 35, 57, 92, 100, 101, 118, 119, 120, 125, 141, 169]
for i in range(len(days)):
note = Notes(titles[i], days[i], text[i])
note.to_json(path)
def mkdir(path):
"""
Makes directory at given path if not already existing
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
def open_w(path):
"""
Open "path" for writing, creating any parent directories as needed.
"""
mkdir(os.path.dirname(path))
return open(path, 'w')
if __name__ == "__main__":
mkdir('./content')
generate_notes()
write_attributes()
generate_world()
|
python
|
# -*- coding: utf-8 -*-
"""An HTTP server as training storage browser."""
from .cli import *
from .server import *
from .snapshot import *
|
python
|
from .base import BaseComponent
class KojiBuilder(BaseComponent):
componentName = "koji_builder"
deploymentConfigName = "koji-builder"
def create_build(self):
self.state.apply_object_from_template(
"general/imagestream.yml",
imagename="koji-builder",
)
self.state.apply_object_from_template(
"koji_builder/buildconfig.yml",
)
return "koji-builder"
def create(self):
if not self.state.config.get('builder', 'built_in'):
# Nothing to do yet
# TODO: Maybe control certificates for external builders
return
self.state.koji_hub.ensure_builder_user(
"koji-builder-built-in-1",
"x86_64",
)
self.state.ca.create_client_cert(
"koji-builder-built-in-1",
)
self.state.apply_object_from_template(
"koji_builder/configmap.yml",
maxjobs=5,
vendor="MBox",
)
self.state.apply_object_from_template(
"koji_builder/deploymentconfig.yml",
)
|
python
|
from django.db import models
class Page(models.Model):
title = models.CharField(max_length=255)
path = models.SlugField(unique=True)
body = models.TextField()
|
python
|
# coding=utf8
"""
@author: Yantong Lai
@date: 09/26/2019
@code description: It is a Python3 file to implement cosine similarity with TF-IDF and Word Embedding methods.
"""
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statistics
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
glove_path = "../../data/glove.6B.50d.txt"
punctuation_list = ['.', ',', '!', '?', '@', '#', '$', '%', '^', '&', '*', '(', ')']
def tf_idf(tfidf_vectorizer, corpus):
"""
It is a function to use TF-IDF to vectorize corpus.
:param corpus: corpus to fit
:return: vector
"""
X = tfidf_vectorizer.fit_transform(corpus)
return X.toarray()
def calculate_cosine_similarity(vec, *args):
"""
It is a function to calculate cosine similarity.
:param vec: vector
:return: cosine similarity result
"""
return cosine_similarity(vec, args)
def load_GloVe_model(path):
"""
It is a function to load GloVe model
:param path: model path
:return: model array
"""
print("Load GloVe Model.")
with open(path, 'r') as f:
content = f.readlines()
model = {}
for line in content:
splitLine = line.split()
word = splitLine[0]
embedding = np.array((splitLine[1:]))
model[word] = embedding
print("Done.", len(model), " words loaded!\n")
return model
def process(sentence, lemmatizer):
"""
It is a function to
:param sentence:
:return:
"""
res = []
# 1. Split
splitSentence = sentence.split()
# 2. To lower case
for word in splitSentence:
word = word.lower()
# 3. Lemmatize
word = lemmatizer.lemmatize(word)
# 4. Remove stop words
stopword_set = set(stopwords.words("english"))
if word in stopword_set:
continue
# 5. Remove punctuation
if word in punctuation_list:
continue
res.append(word)
return res
def get_glove_vec(sentence, lemmatizer, glove_model):
"""
It is a function to get glove vector for cosine similarity calculation.
:param process_sen: processed sentence
:param glove_model: GloVe model
:return: np.mean(process_sen)
"""
res = []
for word in process(sentence, lemmatizer):
try:
vec = glove_model.get(word).astype(float)
res.append(np.mean(vec))
except Exception:
continue
return res
def calculate_glove_cosine_similarity(s1, s2, lemmatizer, glove_model):
"""
It is a function to calculate GloVe embedding cosine similarity.
:param glove_model: GloVe model
:return: GloVe cosine similarity
"""
# 1. Get GloVe Vector
s1_vec = get_glove_vec(s1, lemmatizer, glove_model) # <List> object
s2_vec = get_glove_vec(s2, lemmatizer, glove_model)
# 2. Measure the length of vector
try:
if len(s1_vec) == len(s2_vec):
s1_array = np.array((s1_vec)).reshape(1, -1)
s2_array = np.array((s2_vec)).reshape(1, -1)
elif len(s1_vec) > len(s2_vec):
s1_array = np.array((s1_vec)).reshape(1, -1)
s2_array = np.zeros(shape=(1, len(s1_vec)))
s2_array[0, :len(s2_vec)] = s2_vec
else:
s2_array = np.array((s2_vec)).reshape(1, -1)
s1_array = np.zeros(shape=(1, len(s2_vec)))
s1_array[0, :len(s1_vec)] = s1_vec
assert s1_array.shape == s2_array.shape
s1_mean = np.mean(s1_array, axis=0).reshape(1, -1)
s2_mean = np.mean(s2_array, axis=0).reshape(1, -1)
return cosine_similarity(s1_mean, s2_mean)[0][0]
except Exception as e:
print(e)
def main():
corpus = ['The president greets the press in Chicago',
'Obama speaks to the media in Illinois']
s1 = 'The president greets the press in Chicago'
s2 = 'Obama speaks to the media in Illinois'
s3 = 'I love you'
s4 = 'We went to Starbucks to buy hazelnut lattee yesterday'
s5 = 'We often go to Starbucks to buy coffee and chat with each other.!!!!!!'
############## 1. TF-IDF ###############
tf_idf_vectorizer = TfidfVectorizer()
tf_idf_vec = tf_idf(tfidf_vectorizer=tf_idf_vectorizer,
corpus=corpus)
print("tf_idf_vec = ", tf_idf_vec)
print("tf_idf_vec.shape = ", tf_idf_vec.shape)
print("tf_idf_vectorizer.get_feature_names() = ", tf_idf_vectorizer.get_feature_names())
##### 2. TF-IDF Cosine Similarity ######
tfidf_cosine_res = cosine_similarity(tf_idf_vec)[0][1]
print("tfidf_cosine_res = ", tfidf_cosine_res)
print("\n")
########### 3. Lemmatization ###########
lemmatizer = WordNetLemmatizer()
########## 4. Load GloVe Model #########
glove_model = load_GloVe_model(glove_path) # len(glove_model) = 400000
###### 5. GloVe Cosine Similarity ######
res = calculate_glove_cosine_similarity(s1, s3, lemmatizer, glove_model)
print("res = ", res)
res1 = calculate_glove_cosine_similarity(s1, s2, lemmatizer, glove_model)
print("res1 = ", res1)
res2 = calculate_glove_cosine_similarity(s2, s3, lemmatizer, glove_model)
print("res2 = ", res2)
res3 = calculate_glove_cosine_similarity(s5, s4, lemmatizer, glove_model)
print("res3 = ", res3)
if __name__ == '__main__':
main()
|
python
|
import datetime
import os
if __name__ == '__main__':
# 列出当前目录文件文件夹
files = [file for file in os.listdir('.')]
print(files)
# 操作系统
print(os.name)
# print(os.uname().__str__())
# 环境变量
path=os.environ.get('path')
print('path is: ',path)
defaultEnv=os.environ.get('test','test')
print('test environment is: ',defaultEnv)
## 操作文件和目录
abspath=os.path.abspath(".")
print('absolute path is: ',abspath)
joinpath=os.path.join('.','test.py')
print('join path is: ',joinpath)
os.mkdir('d:/testdir')
print('创建目录: ',os.path.exists(os.path.abspath('d:/testdir')))
os.rmdir('d:/testdir')
print('删除目录后是否不存在: ',os.path.exists(os.path.abspath("d:/testdir")))
# 得到扩展名
filenameList=os.path.splitext(os.path.abspath('os-fun.py'))
print('split extension is: '+str(filenameList))
# 列出所有的pyhon文件
files=[f for f in os.listdir('.') if os.path.isfile(f) and os.path.splitext(f)[1]=='.py']
for f in files:
print('file is: ',f, 'size: ',os.path.getsize(f),',create time: ',datetime.datetime.fromtimestamp( os.path.getctime(f)), ',modify time: ',os.path.getmtime(f))
|
python
|
'''Expcontrol functionality that depends on psychopy.'''
import collections
import numpy
import psychopy.core
import psychopy.visual
import psychopy.logging
import psychopy.event
from psychopy.hardware.emulator import SyncGenerator
class Clock(object):
'''
Time-keeping functionality for expcontrol by wrapping Psychopy's
core.Clock instance.'''
def __init__(self):
'''Initialise a clock instance.'''
self.ppclock = psychopy.core.Clock()
super(Clock, self).__init__()
psychopy.logging.setDefaultClock(self.ppclock)
return
def __call__(self):
'''Return the current time stamp from ppclock.getTime'''
return self.ppclock.getTime()
def start(self):
'''Reset the clock to 0.'''
self.ppclock.reset()
return self()
def wait(self, time):
'''wait for time duration (s).'''
psychopy.core.wait(time)
return
def waituntil(self, time):
'''wait until the clock reaches time.'''
self.wait(time-self())
return
class PulseClock(Clock):
'''
Time-keeping with tracking of pulses (e.g. from a scanner trigger)
through a keyboard button at some interval. Note that time is
still tracked in seconds, not pulses. So on its own, using this class
will ensure that you synchronise your experiment to the first pulse
(see start method), but everything afterwards still runs in seconds as
with the standard Clock class.
The only further refinement is that the clock will attempt to meausure
pulse period empirically whenever given a chance (ie, self.waituntil is
called with enough remaining time that a pulse is expected during the
wait. These estimates are stored in self.periodhistory.
'''
def __init__(self, key, period, pulsedur=0.01, tolerance=.1, timeout=20., \
verbose=False, ndummies=0):
self.period = period
self.pulsedur = pulsedur
self.tolerance = tolerance
self.periodhistory = [period]
self.timeout = timeout
self.verbose = verbose
assert ndummies >= 0, 'ndummies must be 0 or greater'
self.ndummies = ndummies
super(PulseClock, self).__init__()
self.keyhand = KeyboardResponse(key, self.ppclock)
return
def waitpulse(self):
'''wait until a pulse is received. An exception is raised if the wait
exceeds self.timeout.'''
key, keytime = self.keyhand.waitkey(self.timeout)
assert key, 'exceeded %.0fs timeout without receiving pulse' % \
self.timeout
# first time of response if we got multiple
keytime = keytime[0]
return keytime
def start(self):
'''reset the clock and return once the correct pulse has been received
(one for each of self.ndummies+1).'''
# need to first reset the second clock to make the timeout counter
# in waitpulse work properly
super(PulseClock, self).start()
# nb +1 so we always wait for a pulse. dummies are in ADDITION to this
for dummy in range(self.ndummies+1):
if self.verbose:
print 'waiting for pulse %d' % dummy
# but this means that the starttime recorded here is off
starttime = self.waitpulse()
# so we adjust the clock to compensate for starttime (not quite the
# same as zeroing the clock - if time has passed since the pulse
# was received this operation will produce a current clock time >0
self.ppclock.add(starttime)
# return current time after all this
return self()
def waituntil(self, time):
'''wait until time, catching any pulses along the way.'''
# current time
now = self()
nowpulse = now / self.period
timepulse = time / self.period
npulseleft = numpy.floor(timepulse)-numpy.floor(nowpulse)
if npulseleft < 1:
# less than a self.period left, so wait it out using standard
# second clock
super(PulseClock, self).waituntil(time)
return
# if we make it here, there must be pulses to catch
actualtime = self.waitpulse()
# we expect the next pulse to be number
predictpulse = numpy.ceil(now / self.period)
# now we can update our estimate of period like so...
newpulse = actualtime / predictpulse
if numpy.abs(newpulse-self.period) > self.tolerance:
raise Exception('pulse period beyond tolerance: ' +
'expected=%.4f, estimated=%.4f' % (self.period,
newpulse))
self.period = newpulse
if self.verbose:
print 'Pulse at %.2f. tr=%.3f' % (actualtime, newpulse)
self.periodhistory.append(newpulse)
# avoid catching the same pulse twice
if (time-self()) > self.pulsedur:
self.wait(self.pulsedur)
# we recurse with a depth of npulseleft. This is important to
# handle cases where you are waiting n pulses + a bit extra
self.waituntil(time)
return
class Window(object):
'''
Display control functionality for expcontrol by wrapping
Psychopy's visual.Window.
'''
def __init__(self, *args, **kwargs):
'''
Initialise a window instance. All input arguments are piped to
psychopy.visual.Window.
'''
self.winhand = psychopy.visual.Window(*args, **kwargs)
# flip a few times because it is thought this helps stabilise
# timings
[self() for flip in range(50)]
return
def __call__(self):
'''flip the screen and return an exact time stamp of when the flip
occurred.'''
return self.winhand.flip()
def close(self):
'''close the screen.'''
self.winhand.close()
return
class KeyboardResponse(object):
'''
Psychopy-based keyboard response checking.
'''
esckey = 'escape'
def __init__(self, keylist, clock):
'''
Initialise a KeyboardResponse instance. keylist is a list of valid keys
(all other inputs are ignored). clock is a handle to a current Psychopy
clock instance.
'''
if not isinstance(keylist, collections.Iterable):
keylist = [keylist]
self.keylist = keylist + [self.esckey]
self.ppclock = clock
return
def __call__(self):
'''Check for responses.'''
ktup = psychopy.event.getKeys(keyList=self.keylist,
timeStamped=self.ppclock)
return self.parsekey(ktup)
def waitkey(self, dur=float('inf')):
'''wait for a key press for a set duration (default inf).'''
ktup = psychopy.event.waitKeys(maxWait=dur, keyList=self.keylist,
timeStamped=self.ppclock)
return self.parsekey(ktup)
def parsekey(self, ktup):
'''Convert timestamped key presses to separate key and time stamp
arrays. Used internally to support __call__ and waitkey.'''
keys = []
timestamps = []
if ktup:
keys, timestamps = zip(*ktup)
if self.esckey in keys:
raise Exception('user pressed escape')
return numpy.array(keys), numpy.array(timestamps)
class PulseEmulator(object):
'''
Simulate pulses at some period. Just a convenience wrapper for
psychopy.hardware.emulator.SynchGenerator.
'''
def __init__(self, *args, **kwargs):
'''Initialise a PulseEmulator instance. All arguments are passed to
SynchGenerator.'''
self.pulsehand = SyncGenerator(*args, **kwargs)
return
def start(self):
'''Start sending pulses.'''
self.pulsehand.start()
psychopy.core.runningThreads.append(self.pulsehand)
return
def stop(self):
'''Stop sending pulses.'''
self.pulsehand.stop()
return
|
python
|
class CEPlayground():
pass
def main():
return 0
|
python
|
from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
def run(
self, network, in_data, out_attributes, user_options, num_cores,
outfile):
import StringIO
import arrayio
from genomicode import arrayplatformlib
from genomicode import parallel
from genomicode import filelib
from genomicode import AnnotationMatrix
from Betsy import module_utils as mlib
M = arrayio.read(in_data.identifier)
metadata = {}
# Add GENE_ID, GENE_SYMBOL, and DESCRIPTION. Figure out which
# platforms provide each one of this.
CATEGORIES = [
arrayplatformlib.GENE_ID,
arrayplatformlib.GENE_SYMBOL,
# biomaRt doesn't convert description. So just ignore it
# for now.
# TODO: implement DESCRIPTION.
#arrayplatformlib.DESCRIPTION,
]
#all_platforms = arrayplatformlib.identify_all_platforms_of_matrix(M)
#assert all_platforms, "Unknown platform: %s" % in_data.identifier
#header, platform_name = all_platforms[0]
scores = arrayplatformlib.score_matrix(M)
scores = [x for x in scores if x.max_score >= 0.75]
assert scores, "I could not identify any platforms."
# Find all the platforms not in the matrix.
platforms = [
arrayplatformlib.find_platform_by_name(x.platform_name) for
x in scores]
categories = [x.category for x in platforms]
missing = [x for x in CATEGORIES if x not in categories]
score = scores[0]
platform = platforms[0]
to_add = [] # list of platform names
for category in missing:
x = arrayplatformlib.PLATFORMS
x = [x for x in x if x.category == category]
x = [x for x in x if x.bm_organism == platform.bm_organism]
x = [x for x in x if x.name != score.platform_name]
# Take the first one, if any.
if x:
to_add.append(x[0].name)
if to_add:
annotate = mlib.get_config(
"annotate_matrix", which_assert_file=True)
sq = parallel.quote
cmd = [
"python",
sq(annotate),
"--no_na",
"--header", sq(score.header),
]
for x in to_add:
x = ["--platform", sq(x)]
cmd.extend(x)
cmd.append(in_data.identifier)
cmd = " ".join(cmd)
data = parallel.sshell(cmd)
metadata["commands"] = [cmd]
assert data.find("Traceback") < 0, data
else:
data = open(in_data.identifier).read()
# Clean up the headers.
platform2pretty = {
"Entrez_ID_human" : "Gene ID",
"Entrez_Symbol_human" : "Gene Symbol",
"Entrez_ID_mouse" : "Gene ID",
"Entrez_Symbol_mouse" : "Gene Symbol",
}
handle = open(outfile, 'w')
header_written = False
for cols in filelib.read_cols(StringIO.StringIO(data)):
if not header_written:
cols = [platform2pretty.get(x, x) for x in cols]
cols = AnnotationMatrix.uniquify_headers(cols)
header_written = True
print >>handle, "\t".join(cols)
return metadata
def name_outfile(self, antecedents, user_options):
return "signal_annot.tdf"
#from Betsy import module_utils
#original_file = module_utils.get_inputid(antecedents.identifier)
#filename = 'signal_annot_' + original_file + '.tdf'
#return filename
|
python
|
import pathlib
import setuptools
# The directory containing this file
TOPLEVEL_DIR = pathlib.Path(__file__).parent.absolute()
ABOUT_FILE = TOPLEVEL_DIR / "pokejdr" / "_version.py"
README = TOPLEVEL_DIR / "README.md"
# Information on the omc3 package
ABOUT_POKEJDR: dict = {}
with ABOUT_FILE.open("r") as f:
exec(f.read(), ABOUT_POKEJDR)
with README.open("r") as docs:
long_description = docs.read()
# Dependencies for the package itself
DEPENDENCIES = [
"numpy>=1.19.0",
"pandas>=1.0",
"loguru>=0.5.3",
"pydantic>=1.7",
]
# Extra dependencies
EXTRA_DEPENDENCIES = {
"test": [
"pytest>=5.2",
"pytest-cov>=2.7",
],
}
EXTRA_DEPENDENCIES.update(
{"all": [elem for list_ in EXTRA_DEPENDENCIES.values() for elem in list_]}
)
setuptools.setup(
name=ABOUT_POKEJDR["__title__"],
version=ABOUT_POKEJDR["__version__"],
description=ABOUT_POKEJDR["__description__"],
long_description=long_description,
long_description_content_type="text/markdown",
author=ABOUT_POKEJDR["__author__"],
author_email=ABOUT_POKEJDR["__author_email__"],
url=ABOUT_POKEJDR["__url__"],
packages=setuptools.find_packages(),
include_package_data=True,
package_data={
"pokejdr": ["data/*"],
}, # Include all files found in the "data" subdirectory
python_requires=">=3.6",
license=ABOUT_POKEJDR["__license__"],
classifiers=[
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
install_requires=DEPENDENCIES,
tests_require=EXTRA_DEPENDENCIES["test"],
extras_require=EXTRA_DEPENDENCIES,
)
|
python
|
# !/usr/bin/python3
# -*- coding: utf-8 -*-
# https://stackoverflow.com/a/11236372/1576803
import datetime
import pytz
def get_hora_feliz_dia():
tz = pytz.timezone("America/Argentina/Buenos_Aires")
now = datetime.datetime.now(tz).date()
midnight = tz.localize(datetime.datetime.combine(now, datetime.time(0, 0, 3)), is_dst=None)
return midnight.astimezone(pytz.utc).time()
def get_hora_update_groups():
tz = pytz.timezone("America/Argentina/Buenos_Aires")
now = datetime.datetime.now(tz).date()
midnight = tz.localize(datetime.datetime.combine(now, datetime.time(0, 1, 3)), is_dst=None)
return midnight.astimezone(pytz.utc).time()
|
python
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
"""
disable highlight focused widget
Tested environment:
Mac OS X 10.6.8
http://stackoverflow.com/questions/1987546/qt4-stylesheets-and-focus-rect
"""
import sys
try:
from PySide import QtCore
from PySide import QtGui
except ImportError:
from PyQt4 import QtCore
from PyQt4 import QtGui
class Demo(QtGui.QWidget):
def __init__(self):
super(Demo, self).__init__()
x, y, w, h = 500, 200, 300, 400
self.setGeometry(x, y, w, h)
# highlight
tv = QtGui.QTreeView(self)
tv.setGeometry(10, 10, 100, 100)
# disable highlight
tv2 = QtGui.QTreeView(self)
tv2.setGeometry(10, 110, 100, 100)
tv2.setFrameShape(QtGui.QFrame.NoFrame)
tv2.setFrameShadow(QtGui.QFrame.Plain)
tv2.setAttribute(QtCore.Qt.WA_MacShowFocusRect, 0)
def show_and_raise(self):
self.show()
self.raise_()
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
demo = Demo()
demo.show_and_raise()
sys.exit(app.exec_())
|
python
|
#!/usr/bin/env python
import mysql.connector
from mysql.connector import errorcode
from ConfigParser import SafeConfigParser
__author__ = 'catalyst256'
__copyright__ = 'Copyright 2014, Honeymalt Project'
__credits__ = []
__license__ = 'GPL'
__version__ = '0.1'
__maintainer__ = 'catalyst256'
__email__ = '[email protected]'
__status__ = 'Development'
def db_connect(host):
conf = SafeConfigParser()
conf.read('HoneyMalt.conf')
database = conf.get('kippodb', 'database').strip('\'')
username = conf.get('kippodb', 'username').strip('\'')
password = conf.get('kippodb', 'password').strip('\'')
config = {
'user': username,
'password': password,
'host': host,
'database': database,
'raise_on_warnings': True,
}
try:
cnx = mysql.connector.connect(**config)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
return "Something is wrong with your user name or password"
elif err.errno == errorcode.ER_BAD_DB_ERROR:
return "Database does not exists"
else:
return err
else:
return cnx
|
python
|
#!/usr/bin/env python3
import sys,os,io,base64
import pandas as pd
import psycopg2,psycopg2.extras
DBHOST = "localhost"
DBPORT = 5432
DBNAME = "refmet"
DBUSR = "www"
DBPW = "foobar"
dsn = (f"host='{DBHOST}' port='{DBPORT}' dbname='{DBNAME}' user='{DBUSR}' password='{DBPW}'")
dbcon = psycopg2.connect(dsn)
#dbcon.cursor_factory = psycopg2.extras.DictCursor
rowcount = pd.read_sql("SELECT COUNT(*) FROM main", dbcon)
rowcount.to_csv(sys.stdout, "\t", index=False)
|
python
|
"""
Reading and Writing of meteorological data
"""
def __clean_HDF5_PLUGIN_PATH():
"""
if the libraries from hdf5plugin are in HDF5_PLUGIN_PATH, then remove them
"""
import os
import logging
if "HDF5_PLUGIN_PATH" in os.environ:
paths = os.environ["HDF5_PLUGIN_PATH"].split(":")
keep = []
for one_path in paths:
if len(one_path) == 0:
continue
if 'h5z-sz' not in one_path:
logging.info(f"removed {one_path} from HDF5_PLUGIN_PATH")
continue
keep.append(one_path)
if len(keep) > 0:
os.environ["HDF5_PLUGIN_PATH"] = ":".join(keep)
else:
del os.environ["HDF5_PLUGIN_PATH"]
# TODO: figure out why this is needed and remove it!
__clean_HDF5_PLUGIN_PATH()
from .file_type import get_file_type
from .reader import read
from .writer import write
from .dataset import drop_unused
from .compressor import compress
from .analyzer import analyze
from .cli import main as cli
from .evaluator import evaluate
|
python
|
from enum import Enum
from aoc2019.shared.intcode import IntCode
class Direction(Enum):
NORTH = 1
SOUTH = 2
WEST = 3
EAST = 4
class MoveResult(Enum):
WALL = 0
SUCCESS = 1
OXYGEN_SYSTEM = 2
def calculateMinutesToFullOxygen(oxygenSystemLocation, shipMap):
currentMinute = 0
oxygenLocations = set([oxygenSystemLocation])
visitedLocations = set()
while len(oxygenLocations) != len(shipMap):
currentOxygenLocations = oxygenLocations.copy()
for location in currentOxygenLocations:
if location not in visitedLocations:
northLocation = (location[0], location[1] + 1)
southLocation = (location[0], location[1] - 1)
westLocation = (location[0] - 1, location[1])
eastLocation = (location[0] + 1, location[1])
newLocations = [northLocation, southLocation,
westLocation, eastLocation]
for newLocation in newLocations:
if newLocation in shipMap and newLocation not in oxygenLocations:
oxygenLocations.add(newLocation)
visitedLocations.add(location)
currentMinute = currentMinute + 1
return currentMinute
class RepairDroid:
def __init__(self, program):
self.minStepsToOxygenSystem = None
self.oxygenSystemLocation = None
self.visitedPositions = set()
self.__computer = IntCode(program)
self.__currentPosition = (0, 0)
self.__totalSteps = 0
def findOxygenSystem(self):
self.__searchInDirection(Direction.NORTH)
self.__searchInDirection(Direction.SOUTH)
self.__searchInDirection(Direction.WEST)
self.__searchInDirection(Direction.EAST)
def __getReverseDirection(self, direction):
if direction is Direction.NORTH:
return Direction.SOUTH
elif direction is Direction.SOUTH:
return Direction.NORTH
elif direction is Direction.WEST:
return Direction.EAST
elif direction is Direction.EAST:
return Direction.WEST
def __searchInDirection(self, direction):
self.__computer.inputs.append(direction.value)
self.__computer.run()
output = self.__computer.output[-1]
if output is MoveResult.WALL.value:
return
# Move successful, update position
self.__updatePosition(direction)
# If returned to origin, this branch search is done
if self.__currentPosition is (0, 0):
return
# Position has not been visited, increment steps. If a backtrack, decrement and return
if self.__currentPosition not in self.visitedPositions:
self.__totalSteps = self.__totalSteps + 1
self.visitedPositions.add(self.__currentPosition)
else:
self.__totalSteps = self.__totalSteps - 1
return
if output is MoveResult.OXYGEN_SYSTEM.value:
if self.oxygenSystemLocation is None:
self.oxygenSystemLocation = self.__currentPosition
if self.minStepsToOxygenSystem is None or self.__totalSteps < self.minStepsToOxygenSystem:
self.minStepsToOxygenSystem = self.__totalSteps
# Search in all new directions
reverseDirection = self.__getReverseDirection(direction)
for newDirection in Direction:
if newDirection is not reverseDirection:
self.__searchInDirection(newDirection)
# Time to back track
self.__searchInDirection(reverseDirection)
def __updatePosition(self, direction):
if direction is Direction.NORTH:
self.__currentPosition = (
self.__currentPosition[0], self.__currentPosition[1] + 1)
elif direction is Direction.SOUTH:
self.__currentPosition = (
self.__currentPosition[0], self.__currentPosition[1] - 1)
elif direction is Direction.WEST:
self.__currentPosition = (
self.__currentPosition[0] - 1, self.__currentPosition[1])
elif direction is Direction.EAST:
self.__currentPosition = (
self.__currentPosition[0] + 1, self.__currentPosition[1])
|
python
|
'''
Created on 19 okt. 2013
@author: Juice
'''
from constraints.constraint import Constraint
class NonogramConstraint(Constraint):
def __init__(self, group, initialValues):
super(NonogramConstraint, self).__init__(group, initialValues)
# def notify(self, cell):
# pass
def setAdjacencies(self, adjacencies):
# a list of adjacencies
self.adjacencies = list(adjacencies)
def applyConstraint(self):
# for now, assume only one color
cells = self.group.getCells()
if len(cells) == sum(self.adjacencies) + len(self.adjacencies)-1:
print "test", self
index = 0
for adjacency in self.adjacencies:
for i in range(adjacency):
cell = cells[index + i]
cell.setValue(1)
index += i + 1
if index < len(cells):
cell = cells[index]
cell.setValue(0)
pass
def notify(self, cell):
pass
def searchForNakedSet(self):
pass
def getType(self):
return "Nonogram constraint"
# def getAllowedValuesForValueList(self, allowedValues, usedValues):
# return allowedValues
def __str__(self):
return "nono["+",".join([str(x) for x in self.adjacencies]) + "]"
|
python
|
from typing import Dict, Union
import pyomo.environ as pyo
from oogeso import dto
from oogeso.core.devices.base import Device
class HeatPump(Device):
"""
Heat pump or electric heater (el to heat)
"""
carrier_in = ["el"]
carrier_out = ["heat"]
serial = []
def __init__(
self,
dev_data: dto.DeviceHeatPumpData, # Fixme: Correct?
carrier_data_dict: Dict[str, dto.CarrierElData], # Fixme: Correct?
):
super().__init__(dev_data=dev_data, carrier_data_dict=carrier_data_dict)
self.dev_data = dev_data
self.id = dev_data.id
self.carrier_data = carrier_data_dict
def _rules(self, pyomo_model: pyo.Model, t: int) -> Union[pyo.Expression, pyo.Constraint.Skip]:
dev = self.id
# heat out = el in * efficiency
lhs = pyomo_model.varDeviceFlow[dev, "heat", "out", t]
rhs = pyomo_model.varDeviceFlow[dev, "el", "in", t] * pyomo_model.paramDevice[dev]["eta"]
return pyo.Expression(lhs == rhs)
def define_constraints(self, pyomo_model: pyo.Model):
"""Specifies the list of constraints for the device"""
list_to_reconstruct = super().define_constraints(pyomo_model)
constr = pyo.Constraint(pyomo_model.setHorizon, rule=self._rules)
# add constraint to model:
setattr(pyomo_model, "constr_{}_{}".format(self.id, "misc"), constr)
return list_to_reconstruct
def get_flow_var(self, pyomo_model: pyo.Model, t: int):
return pyomo_model.varDeviceFlow[self.id, "el", "in", t]
|
python
|
__all__ = ["Assembly", "Collection", "Extraction", "GetSubs", "Jelly", "QFix", "Setup", "Stages", "Support"]
|
python
|
from dataclasses import dataclass
from collections import Counter
from math import ceil
from queue import Queue
from typing import Dict
from aocd import data
@dataclass
class Recipe:
output_reagent: str
output_amount: int
inputs: Counter
def __repr__(self):
return f"{self.inputs} => {self.output_amount} {self.output_reagent}"
@classmethod
def from_string(cls, s):
inputs = Counter()
all_inputs_string, output_string = s.split('=>')
input_strings = all_inputs_string.split(',')
for input_string in input_strings:
amount_string, reagent_string = input_string.strip().split(' ')
inputs[reagent_string.strip()] = int(amount_string)
output_amount_string, output_reagent_string = output_string.strip().split(
' ')
return cls(output_reagent_string.strip(), int(output_amount_string),
inputs)
def reactions_from_data(data):
# print(data)
out = {}
for row in data.split('\n'):
recipe = Recipe.from_string(row)
out[recipe.output_reagent] = recipe
return out
def ore_needed_for_fuel(fuel_amount, recipes: Dict[str, Recipe]):
reserves = Counter()
orders = Queue()
orders.put({"ingredient": "FUEL", "amount": fuel_amount})
ore_needed = 0
while not orders.empty():
order = orders.get()
ingredient = order["ingredient"]
amount_needed = order["amount"]
if ingredient == 'ORE':
ore_needed += amount_needed
elif amount_needed <= reserves[ingredient]:
reserves -= Counter({ingredient: amount_needed})
else:
amount_needed -= reserves[ingredient]
recipe = recipes[ingredient]
batches = ceil(amount_needed / recipe.output_amount)
for input_ in recipe.inputs:
new_order = {"ingredient": input_,
"amount": recipe.inputs[input_] * batches}
orders.put(new_order)
leftover_amount = batches * recipe.output_amount - amount_needed
reserves[ingredient] = leftover_amount
return ore_needed
def bsearch(f, target, low, high):
""" Given a function f(x), return the value x for which f(x) is closest to
but does not exceed target. x is between low and high."""
if abs(low - high) <= 1:
if f(high) == target:
return high
return low
low_value = f(low)
high_value = f(high)
middle = (low + high) // 2
#print(f"f({low}) == {low_value}, f({high}) == {high_value}")
if abs(low_value - target) < abs(high_value - target):
return bsearch(f, target, low, middle)
else:
return bsearch(f, target, middle, high)
def main():
reactions = reactions_from_data(data)
print(ore_needed_for_fuel(1, reactions))
f = lambda x: ore_needed_for_fuel(x, reactions)
print(bsearch(f, 1000000000000, 1, 1000000000))
if __name__ == '__main__':
main()
|
python
|
#!/usr/bin/env python
from ansible_collections.terryhowe.hashivault.plugins.module_utils.hashivault import hashivault_argspec
from ansible_collections.terryhowe.hashivault.plugins.module_utils.hashivault import hashivault_auth_client
from ansible_collections.terryhowe.hashivault.plugins.module_utils.hashivault import hashivault_init
from ansible_collections.terryhowe.hashivault.plugins.module_utils.hashivault import hashiwrapper
ANSIBLE_METADATA = {'status': ['deprecated'], 'supported_by': 'community', 'version': '1.1'}
DOCUMENTATION = '''
---
module: hashivault_audit_enable
version_added: "2.2.0"
short_description: Hashicorp Vault audit enable module
description:
- Module to enable audit backends in Hashicorp Vault. Use hashivault_audit instead.
options:
name:
description:
- name of auditor
description:
description:
- description of auditor
options:
description:
- options for auditor
extends_documentation_fragment: hashivault
'''
EXAMPLES = '''
---
- hosts: localhost
tasks:
- hashivault_audit_enable:
name: "syslog"
'''
def main():
argspec = hashivault_argspec()
argspec['name'] = dict(required=True, type='str')
argspec['description'] = dict(required=False, type='str')
argspec['options'] = dict(required=False, type='dict')
module = hashivault_init(argspec)
result = hashivault_audit_enable(module.params)
if result.get('failed'):
module.fail_json(**result)
else:
module.exit_json(**result)
@hashiwrapper
def hashivault_audit_enable(params):
client = hashivault_auth_client(params)
name = params.get('name')
description = params.get('description')
options = params.get('options')
backends = client.sys.list_enabled_audit_devices()
backends = backends.get('data', backends)
path = name + "/"
if path in backends and backends[path]["options"] == options:
return {'changed': False}
client.sys.enable_audit_device(name, description=description, options=options)
return {'changed': True}
if __name__ == '__main__':
main()
|
python
|
import os
import gzip
import pickle
import h5py
import numpy as np
import theano
from utils.misc import get_file_names_in_dir
from utils.vocab import UNK
class Loader(object):
def __init__(self, argv):
self.argv = argv
def load(self, **kwargs):
raise NotImplementedError
@staticmethod
def load_data(fn):
with gzip.open(fn, 'rb') as gf:
return pickle.load(gf)
@staticmethod
def load_key_value_format(fn):
data = []
with open(fn, 'r') as f:
for line in f:
key, value = line.rstrip().split()
data.append((key, int(value)))
return data
@staticmethod
def load_hdf5(path):
return h5py.File(path, 'r')
def load_txt_from_dir(self, dir_path, file_prefix):
file_names = get_file_names_in_dir(dir_path + '/*')
file_names = [fn for fn in file_names
if os.path.basename(fn).startswith(file_prefix)
and fn.endswith('txt')]
return [self.load(path=fn) for fn in file_names]
def load_hdf5_from_dir(self, dir_path, file_prefix):
file_names = get_file_names_in_dir(dir_path + '/*')
file_names = [fn for fn in file_names
if os.path.basename(fn).startswith(file_prefix)
and fn.endswith('hdf5')]
return [self.load_hdf5(fn) for fn in file_names]
class Conll05Loader(Loader):
def load(self, path, data_size=1000000, is_test=False):
if path is None:
return []
corpus = []
sent = []
with open(path) as f:
for line in f:
elem = [l for l in line.rstrip().split()]
if len(elem) > 0:
if is_test:
sent.append(elem[:6])
else:
sent.append(elem)
else:
corpus.append(sent)
sent = []
if len(corpus) >= data_size:
break
return corpus
class Conll12Loader(Loader):
def load(self, path, data_size=1000000, is_test=False):
if path is None:
return []
corpus = []
sent = []
with open(path) as f:
for line in f:
elem = [l for l in line.rstrip().split()]
if len(elem) > 10:
if is_test:
sent.append(elem[:11])
else:
sent.append(elem)
elif len(elem) == 0:
corpus.append(sent)
sent = []
if len(corpus) >= data_size:
break
return corpus
def load_emb(path):
word_list = []
emb = []
with open(path) as f:
for line in f:
line = line.rstrip().split()
word_list.append(line[0])
emb.append(line[1:])
emb = np.asarray(emb, dtype=theano.config.floatX)
if UNK not in word_list:
word_list = [UNK] + word_list
unk_vector = np.mean(emb, axis=0)
emb = np.vstack((unk_vector, emb))
return word_list, emb
def load_pickle(fn):
with gzip.open(fn, 'rb') as gf:
return pickle.load(gf)
def load_key_value_format(fn):
data = []
with open(fn, 'r') as f:
for line in f:
key, value = line.rstrip().split()
data.append((key, int(value)))
return data
|
python
|
from unittest2 import TestCase
from os import chdir, pardir, environ
from os.path import join, dirname, exists
from shutil import rmtree, copyfile
from subprocess import check_call, PIPE, STDOUT, CalledProcessError #, check_output
import sys
from tempfile import mkdtemp
from .venvtest import VirtualenvTestCase
class TestVirtualenvTaskSpecification(VirtualenvTestCase):
def test_running_task_in_specified_virtualenv(self):
with open(join(self.site_packages_path, "some_venv_module.py"), "w"):
pass
subpavement = """
from paver import tasks
from paver.virtual import virtualenv
@tasks.task
@virtualenv(dir="%s")
def t1():
import some_venv_module
""" % join(self.basedir, "virtualenv")
pavement_dir = mkdtemp(prefix="unrelated_pavement_module_")
try:
with open(join(pavement_dir, "pavement.py"), "w") as f:
f.write(subpavement)
chdir(pavement_dir)
nonvenv_paver_bin = join(dirname(__file__), pardir, 'distutils_scripts', 'paver')
check_call([sys.executable, nonvenv_paver_bin, "t1"],
env={
'PYTHONPATH' : join(dirname(__file__), pardir),
'PATH': environ['PATH']
})
finally:
rmtree(pavement_dir)
|
python
|
from django.db import transaction
from django.db.models.functions import Lower
from record_label.models import Band, BandLabel, MusicFestival, RecordLabel
from record_label.serializers import RecordLabelSerializer
def restructure_data(festivals_api_data):
"""A helper function to restructure the data from the festivals API
into the RecordLabel structure output by our API.
:args:
- `festivals_api_data`: a list of Python objects
:returns:
- `out`: Serialized output (JSON) with the following schema:
[
{"label": <str>,
"bands": [
{"name": <str>,
"festivals: [{"name": <str>},]
}
]
},
]
:note:
- this function creates (and then destroys) DB object.
- this is to take advantage of Django ORM for relationship mapping, and
Django ModelSerializers for JSON serialization.
- Writing to the DB may not be necessary.
- However I note that in production, we would likely persist the API
data on our DB to reduce unncessary repeated compute.
"""
# Ensure atomic transactions (for speed and security so concurrent requests have independent state)
with transaction.atomic():
for festival in festivals_api_data:
# Create MusicFestival object in ORM
festival_obj, _ = MusicFestival.objects.get_or_create(
name=festival.get("name")
)
bands = festival.get("bands", [])
for band in bands:
band_obj, _ = Band.objects.get_or_create(name=band.get("name"))
label_obj, _ = RecordLabel.objects.get_or_create(
name=band.get("recordLabel")
)
bandlabel_obj, _ = BandLabel.objects.get_or_create(
band=band_obj, recordLabel=label_obj
)
festival_obj.bands.add(bandlabel_obj)
out = RecordLabelSerializer(
RecordLabel.objects.all().order_by(Lower("name")), many=True
).data
# Cleanup DB
MusicFestival.objects.all().delete()
BandLabel.objects.all().delete()
Band.objects.all().delete()
RecordLabel.objects.all().delete()
return out
|
python
|
# visualize the networkx DiGraph using a Dash dashboard
# General warning: Note that with this dashboard, the edge arrows drawn are infact symmetrical and angled correctly.
# And are all the same distance/size they just don't always look that way because the scaling of the x-axis
# isn't the same scaling of the y-axis all the time (depending on how the user draws the box to zoom and the default aspect ratios).
# must run process_ontology_OWL_file.py before running visualize.py !
import pandas as pd
import dash
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output
import plotly.graph_objs as go
import networkx as nx
import pygraphviz
import math
import numpy as np
import io
import json
import matplotlib.pyplot as plt
from scipy.special import binom
import argparse
def Bernstein(n, k):
"""
Bernstein polynomial.
"""
coeff = binom(n, k)
def _bpoly(x):
return coeff * x ** k * (1 - x) ** (n - k)
return _bpoly
def Bezier(points, num=200):
"""
Build Bezier curve from points.
"""
N = len(points)
t = np.linspace(0, 1, num=num)
curve = np.zeros((num, 2))
for ii in range(N):
curve += np.outer(Bernstein(N - 1, ii)(t), points[ii])
return curve
def get_figure(
N_node_details,
N_edge_details,
N,
G,
edge_type=None,
node_class=None,
node_property=None,
extra_edge_type=None,
):
the_nodes_to_display, the_edges_to_display = get_filtered_data(
N_node_details, N_edge_details, N, G, edge_type
)
# blank figure object
fig = go.Figure()
# Add node traces as ovals to the figure object
# Note how 72 is the conversion of graphviz point scale to inches scale
for node in N_node_details:
node_name = node.get("name")
# Do not show the nodes not in the_nodes_to_display
if node_name not in the_nodes_to_display:
continue
fillcolor = None
textcolor = "black"
line_color = "black"
if node_class:
node_class_list = eval(N.get_node(node_name).attr.get("all classes"))
if node_class in node_class_list:
fillcolor = "#aed9f6"
textcolor = "#0D3BF6"
if node.get("non_default_edge_type"):
line_color = "orange"
fillcolor = "orange"
if node_property:
if G.nodes.get(node_name).get("properties").get(node_property):
line_color = "yellow"
fillcolor = "yellow"
fig.add_shape(
type="circle",
fillcolor=fillcolor,
layer="below",
line_color=line_color,
x0=node.get("position").get("x") - 0.5 * node.get("width") * 72,
y0=node.get("position").get("y") - 0.5 * node.get("height") * 72,
x1=node.get("position").get("x") + 0.5 * node.get("width") * 72,
y1=node.get("position").get("y") + 0.5 * node.get("height") * 72,
)
# add scatter trace of text labels to the figure object
fig.add_trace(
go.Scatter(
x=[node.get("position").get("x")],
y=[node.get("position").get("y")],
# https://plotly.com/python/hover-text-and-formatting/#customizing-hover-text-with-a-hovertemplate
hovertemplate=node.get("node_hovertext"),
text=node_name,
mode="text",
textfont=dict(
color=textcolor,
size=8.5,
family="sans-serif",
),
)
)
# adding edges (and arrows and tees to edges)
for edge in N_edge_details:
edge_position = edge.get("positions")
# # Do not show the edges not in edges_to_display
o_edge = N.get_edge(edge["node1"], edge["node2"])
if o_edge not in the_edges_to_display:
continue
start = edge_position[0]
end = edge_position[1]
backwards = edge_position[2:][::-1]
edge_fix = (
[start] + backwards + [end]
) # graphviz has weird edge coordinate format that doesn't have coordinates in correct order
# approximate the B spline curve
# see the following websites to better understand:
# http://graphviz.996277.n3.nabble.com/how-to-draw-b-spline-td1328.html
# https://stackoverflow.com/questions/28279060/splines-with-python-using-control-knots-and-endpoints
# https://stackoverflow.com/questions/53934876/how-to-draw-a-graphviz-spline-in-d3
# https://ocw.mit.edu/courses/electrical-engineering-and-computer-science/6-837-computer-graphics-fall-2012/lecture-notes/MIT6_837F12_Lec01.pdf
# https://github.com/kawache/Python-B-spline-examples
# https://stackoverflow.com/questions/12643079/b%C3%A9zier-curve-fitting-with-scipy
# https://nurbs-python.readthedocs.io/en/latest/module_bspline.html
blocks = divide_into_4s(edge_fix)
path = [] # path to draw
path.append(start)
for chunk in blocks:
curve = Bezier(chunk, 200)
path = path + curve.tolist()
edge_color = "black"
# add arrow adornment using linear algebra
if edge.get("edge_type") == "causes_or_promotes":
# A,B = [path[-20],path[-1]]
A, B = [path[20], path[0]]
A = np.array(A)
B = np.array(B)
height = 5 * math.sqrt(3)
theta = 45
width = height * math.tan(theta / 2)
U = (B - A) / np.linalg.norm(B - A)
V = np.array((-1 * U[1], U[0]))
v1 = B - height * U + width * V
v2 = B - height * U - width * V
adornment_to_add = [v1.tolist()] + [B] + [v2.tolist()]
xpoint = [coordinate[0] for coordinate in adornment_to_add]
ypoint = [coordinate[1] for coordinate in adornment_to_add]
edge_color = "blue"
fig.add_trace(
go.Scatter(
x=xpoint,
y=ypoint,
line_shape="linear",
mode="lines",
line=dict(color=edge_color),
)
)
# add tee adornment using linear algebra
if edge.get("edge_type") == "is_inhibited_or_prevented_or_blocked_or_slowed_by":
# B,A = [path[0],path[1]]
B, A = [path[-1], path[2]]
A = np.array(A)
B = np.array(B)
height = 0
width = 10
U = (B - A) / np.linalg.norm(B - A)
V = np.array((-1 * U[1], U[0]))
v1 = B - height * U + width * V
v2 = B - height * U - width * V
adornment_to_add = [v1.tolist()] + [B] + [v2.tolist()]
xpoint = [coordinate[0] for coordinate in adornment_to_add]
ypoint = [coordinate[1] for coordinate in adornment_to_add]
edge_color = "red"
fig.add_trace(
go.Scatter(
x=xpoint,
y=ypoint,
line_shape="linear",
mode="lines",
line=dict(color=edge_color),
)
)
# add edge spline trace to the figure object
xp = [coordinate[0] for coordinate in path]
yp = [coordinate[1] for coordinate in path]
fig.add_trace(
go.Scatter(
x=xp,
y=yp,
marker=dict(color=edge_color),
line_shape="spline",
hovertemplate=edge.get("edge_hovertext"),
)
)
# change the x and y axis ranges to be the values found in the 'header' of the graphviz graph layout string
fig.update_xaxes(range=[0, 8395.7])
fig.update_yaxes(range=[0, 1404])
fig.update_layout(
showlegend=False,
plot_bgcolor="rgba(0,0,0,0)",
height=700,
xaxis={"showgrid": False},
yaxis={"showgrid": False},
)
print("get_figure! FIG IS RETURNING")
return fig
# divide graphviz edge curve coordinates into groups of coordinates to help draw edges as correct spline curves (cubic B splines)
def divide_into_4s(input):
size = 4
step = 3
output = [input[i : i + size] for i in range(1, len(input) - 2, step)]
return output
# unit vector to help with edge geometry (specifically drawing arrows)
def unit_vector(v):
return v / np.linalg.norm(v)
def get_filtered_data(N_node_details, N_edge_details, N, G, edge_type=None):
if edge_type is None:
# By default display everything
nodes_to_display = [n.get("name") for n in N_node_details]
edges_to_display = [N.get_edge(e["node1"], e["node2"]) for e in N_edge_details]
else:
nodes_to_display = []
edges_to_display = []
for edge in G.edges:
if G.edges.get(edge).get("type") == edge_type:
node1, node2 = [edge[0], edge[1]]
edges_to_display.append(edge)
nodes_to_display.append(node1)
nodes_to_display.append(node2)
return nodes_to_display, edges_to_display
def visualize(gpickle_file_path):
"""
Main function to run the dashboard to visualize the ontology.
input: gpickle_file_path = path to gpickle of the networkx graph of the ontology
output: app = Dash app object
"""
# load in networkx graph to access graph information
G = nx.read_gpickle(gpickle_file_path)
print(nx.info(G))
# pos = nx.nx_agraph.graphviz_layout(G, prog='dot')
# convert the network x graph to a graphviz graph
N = nx.nx_agraph.to_agraph(G)
# Class filter to go under the graph
# Get all nodes classes
allclasses = set()
for node in N.nodes():
nodeclasslist = eval(node.attr.get("all classes"))
if isinstance(nodeclasslist, list) or isinstance(nodeclasslist, set):
allclasses.update([e for e in nodeclasslist])
# build the filter items for the layout
allclasses_filter_radioitems = [{"value": ee, "label": ee} for ee in allclasses]
allclasses_filter_radioitems.append({"label": "None", "value": "none"})
# Node Property filter to go under the graph
# Get all nodes properties
allnodeproperties = set()
for node in G.nodes():
props = G.nodes.get(node).get("properties")
allnodeproperties.update(props.keys())
allnodeproperties_filter_radioitems = [
{"value": ee, "label": ee} for ee in allnodeproperties
]
allnodeproperties_filter_radioitems.append({"label": "None", "value": "none"})
# change the graphviz graph settings to make the graph layout of edges and nodes as we want
N.edge_attr.update(splines="curved", directed=True)
N.layout(prog="dot")
# output the graphviz graph layout details as a string file to parse and vizualize using native python plotly and dash
f = (
N.string()
) # this string contains the coordinates for the edges so they aren't just straight lines but curve to avoid going through other nodes
# use python's in-memory text stream so string is the same across systems
# ... so universal newline decoding is performed when reading the string
s = io.StringIO(f, newline=None)
# option to save graphviz graph file if desired. Not necessary though.
# N.write('edges_spline_layout_coordinates.txt') #this file also has the coordinates for the splines for the edges that curve around the nodes instead of going through the nodes
# parse the graphviz graph string for the layout information we need
data = s.getvalue().split(";\n")
# remove header and footer content
header = data[0:3]
content = data[3 : len(data) - 1]
# close the in memory file
s.close()
# go through each item in 'content', and separate into either node or edge object
N_nodes = []
N_edges = []
for item in content:
if " -> " in item:
N_edges.append(item)
else:
N_nodes.append(item)
default_edge_type = [
"is_inhibited_or_prevented_or_blocked_or_slowed_by",
"causes_or_promotes",
]
# populate node graph layout details from graphviz
N_node_details = []
for N_node in N_nodes:
name = N_node.split("\t")[1].strip('"')
node_attrs = N.get_node(name).attr
height = node_attrs.get("height", 0)
width = node_attrs.get("width", 0)
position = node_attrs.get("pos", []).split(",")
node_properties = G.nodes.get(name).get("properties")
node_classes = G.nodes.get(name).get("all classes")
node_classes_hovertext = "<br>-".join([f"<b>{cla}</b>" for cla in node_classes])
node_properties_hovertext = "<br>-".join(
[f"<b>{key}</b>: {val}" for (key, val) in node_properties.items()]
)
n_details = {
"name": name,
"position": {"x": float(position[0]), "y": float(position[1])},
"height": float(height),
"width": float(width),
"node_hovertext": f"<b>Node classes:</b><br>{node_classes_hovertext}<br><br><b>Nodes properties:</b><br>{node_properties_hovertext}",
}
for edg in G.edges(name, data=True):
edg_type = edg[2].get("type")
if edg_type not in default_edge_type:
n_details["non_default_edge_type"] = edg_type
N_node_details.append(n_details)
# populate edge graph layout details from graphviz
N_edge_details = []
for edge in N_edges:
node1, node2 = edge.split("\t")[1].split(" -> ")
node1 = node1.strip('"')
node2 = node2.strip('"')
edge_attrs = N.get_edge(node1, node2).attr
positions = (
edge_attrs.get("pos")
.replace("e,", "")
.replace("\\", "")
.replace("r", "")
.replace("n", "")
)
positions = [
[float(x), float(y)]
for (x, y) in [cp.split(",") for cp in positions.split(" ")]
]
edge_type = edge_attrs.get("type")
edge_properties = G.edges.get((node1, node2)).get("properties")
if edge_properties:
edge_properties_hovertext = "<br>-".join(
[f"<b>{key}</b>: {val}" for (key, val) in edge_properties.items()]
)
else:
edge_properties_hovertext = "None"
edge_details = {
"node1": node1,
"node2": node2,
"positions": positions,
"edge_type": edge_type,
"edge_hovertext": f"<b>Edge properties:</b><br>{edge_properties_hovertext}",
}
N_edge_details.append(edge_details)
# divide the x and y coordinates into separate lists
node_x_list = []
node_y_list = []
for node in N_node_details:
node_x_list.append(node.get("position").get("x"))
node_y_list.append(node.get("position").get("y"))
# links to help undertand dash better if needed
# https://plotly.com/python/line-charts/
# https://plotly.com/python/shapes/
# radio icons and dropdown menus
# https://www.datacamp.com/community/tutorials/learn-build-dash-python
################### START OF DASH APP ###################
app = dash.Dash()
# NEED TO ADD HTML formating and maybe CSS
app.layout = html.Div(
children=[
html.H1(children="Climate Mind DiGraph"),
dcc.Graph(
id="graph",
figure=get_figure(N_node_details, N_edge_details, N, G),
config=dict({"scrollZoom": True}),
),
html.Div(
children=[
html.Label("Display Nodes with following edges:"),
dcc.RadioItems(
id="edge-type-filter",
options=[
{
"label": "causes_or_promotes",
"value": "causes_or_promotes",
},
{
"label": "is_inhibited_or_prevented_or_blocked_or_slowed_by",
"value": "is_inhibited_or_prevented_or_blocked_or_slowed_by",
},
{"label": "All", "value": "all"},
],
value="all",
),
]
),
html.Div(
children=[
html.Label("Higlight Nodes with the specific class:"),
dcc.Dropdown(
id="node-class-filter",
options=allclasses_filter_radioitems,
value="none",
),
]
),
html.Div(
children=[
html.Label("Higlight Nodes with the following non empty property:"),
dcc.Dropdown(
id="node-property-filter",
options=allnodeproperties_filter_radioitems,
value="none",
),
]
),
html.Div(
children=[
html.Label("Higlight Nodes"),
dcc.Checklist(
id="node-extra-edge-type-filter",
options=[
{
"label": "If they contain additional edge type(s) "
"(beyond 'causes_or_promotes' or "
"'is_inhibited_or_prevented_or_blocked_or_slowed_by')",
"value": "yes",
}
],
value="",
),
]
),
html.Div(
children=[
html.Label("Node Data (on click):"),
html.Pre(id="click-data"),
],
),
]
)
@app.callback(
dash.dependencies.Output("click-data", "children"),
[dash.dependencies.Input("graph", "clickData")],
)
def display_click_data(clickData):
return json.dumps(clickData, indent=2)
@app.callback(
dash.dependencies.Output("graph", "figure"),
[
dash.dependencies.Input("edge-type-filter", "value"),
dash.dependencies.Input("node-class-filter", "value"),
dash.dependencies.Input("node-property-filter", "value"),
dash.dependencies.Input("node-extra-edge-type-filter", "value"),
],
)
def display_click_data(edge_type, node_class, node_property, extra_edge_type):
print("display_click_data!")
if (
not edge_type
and not node_class
and not node_property
and not extra_edge_type
):
# Nothing has to happen.
# otherwise the callback is called in some load/init cases
raise dash.exceptions.PreventUpdate
if edge_type == "all":
edge_type = None
if node_class == "none":
node_class = None
if node_property == "none":
node_property = None
if extra_edge_type != "yes":
extra_edge_type = None
print(f"display_click_data! edge_type={edge_type}, node_class={node_class}")
return get_figure(
N_node_details,
N_edge_details,
N,
G,
edge_type,
node_class,
node_property,
extra_edge_type,
)
return app
def main(args):
"""
Main function to run the dashboard to visualize the ontology.
input: args = args from the argument parser for the function (gpickle_file_path)
example: python3 visualize.py "Climate_Mind_DiGraph.gpickle"
"""
# load arguments
gpickle_file_path = args.gpickle_file_path
app = visualize(gpickle_file_path=gpickle_file_path)
return app
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="get networkx graph object from ontology after running make_network.py"
)
parser.add_argument(
"gpickle_file_path", type=str, help="path to reference networkx gpickle object"
)
args = parser.parse_args()
app = main(args)
app.run(debug=False, host="0.0.0.0")
app.run_server(debug=False, host="0.0.0.0", port=8050)
|
python
|
#!/usr/bin/python
import sys
from socket import socket, AF_INET, SOCK_STREAM
import pymouse
import time
import numpy as np
import matplotlib.pyplot as plt
def main():
if len(sys.argv) != 3:
print "\nusage : python tcp_client.py #host #port\n"
else:
host = sys.argv[1]
port = int(sys.argv[2])
s = socket(AF_INET, SOCK_STREAM)
try:
plt.ion() #set interactive
plt.show()
s.connect((host, port))
(ax_list, ay_list, az_list) = ([],[],[])
(ax_factor, ay_factor, az_factor) = (20.0,20.0,20.0)
t_step = 0.3
t = 0
t_list = []
fig=plt.figure(num=None, figsize=(13, 9), dpi=75, facecolor='w', edgecolor='k')
(thresh_lower, thresh_upper) = (0,0)
x_min = 0
x_max = t_step*5
y_min = -2
y_max = 2
plt.axis([x_min,x_max,y_min,y_max])
while True: #poll
data = s.recv(1024).split(',')
if len(data) > 0:
try:
(ax, ay) = (float(data[3])*ax_factor, float(data[4])*ay_factor)
except ValueError:
(ax,ay) = (0,0) #fix headings transfer
ax_list.append(ax)
#ay_list.append(ay)
#az_list.append(float(data[5])*az_factor)
t_list.append(t)
plt.plot(t_list, ax_list, color="red", linewidth=1.0, linestyle="-", label="Ax")
#plt.plot(t_list, ay_list, color="green", linewidth=1.0, linestyle="-", label="Ay")
#plt.plot(t_list, az_list, color="blue", linewidth=1.0, linestyle="-", label="Az")
t+=t_step
if t > x_max:
#move axis forward
x_min += t_step
x_max += t_step
plt.axis([x_min,x_max,y_min,y_max])
if ax < thresh_lower:
plt.axhline(y=thresh_lower, c='w', ls='-')
thresh_lower = ax
plt.axhline(y=thresh_lower, c='k', ls='--')
if ax > thresh_upper:
plt.axhline(y=thresh_upper, c='w', ls='-')
thresh_upper = ax
plt.axhline(y=thresh_upper, c='k', ls='--')
plt.draw()
print t,data[3] #dump on terminal
except Exception as inst:
print sys.exc_info()[0]
print data
raise
if __name__ == "__main__":
main()
|
python
|
"""Test the particle swarm optimisation class"""
import copy
import pytest
import numpy as np
from pracopt.optimiser import ParticleSwarm
from pracopt.objective import Shubert, ObjectiveTest
from pracopt.utils import evaluate
# PSO with test objective functions
@pytest.fixture
def new_test_pso():
"""Return a new instance of the simulated annealing class
with 1D test function.
Don't run optimise on this as objective is constant - will hang."""
obj = ObjectiveTest()
pso = ParticleSwarm(obj)
pso.max_evaluations = 10
return pso
# PSO classes with 5D Shubert objective
@pytest.fixture
def new_pso5():
"""Return a new instance of the simulated annealing class
with 2D Shubert objective function."""
obj = Shubert(5)
return ParticleSwarm(obj)
# Test functions
def test_pso_init(new_test_pso):
"""Test init of pso."""
assert new_test_pso.dimension == 1
assert new_test_pso._n_particles == 25
assert new_test_pso._particle_x.shape == (25, 1)
assert new_test_pso._particle_v.shape == (25, 1)
assert new_test_pso._particle_best_x.shape == (25, 1)
assert new_test_pso._particle_best_f.shape == (25, 1)
assert new_test_pso._global_best_x.shape == (1,)
assert new_test_pso._global_best_f == np.Inf
def test_pso_rand(new_pso5, new_test_pso):
"""Test random number generator."""
# Fix seed for reproducible results
np.random.seed(seed=1)
with pytest.raises(ValueError):
# low bigger than high.
new_pso5._uniform_random(1,-1)
for _ in range(10):
sample = new_pso5._uniform_random(-1,1)
assert sample >= -1
assert sample <= 1
samps = 200
samples = np.zeros((samps,1))
for i in range (samps):
samples[i] = new_test_pso._uniform_random(-2,2)
assert round(np.mean(samples)*100)/100 == 0.02
assert round(np.std(samples)*100)/100 == 1.22
# Test positive sample mean
samples = samples[samples > 0]
assert round(np.mean(samples)*100)/100 == 1
def test_pso_particle_init(new_pso5):
"""Test particle initialisation for PSO."""
new_pso5._initialise_particles()
for i in range(new_pso5._n_particles):
pos = new_pso5._particle_x[i,:]
# Best value should be f(current position)
assert new_pso5._particle_best_f[i] == new_pso5.objective.f(pos)
# Global best value
assert new_pso5._global_best_f <= new_pso5._particle_best_f[i]
# Best position should be current position
assert all([a == b for a, b in zip(pos, new_pso5._particle_best_x[i,:])])
# Velocity should be in range [-4,4]
assert all([a <= b for a, b in zip(new_pso5._particle_v[i,:],
4*np.ones((new_pso5.dimension,1)))])
def test_velocity_update(new_pso5):
"""Test updating the velocity of a particle."""
# Index outside range will fail
with pytest.raises(AssertionError):
new_pso5._update_velocity(-1)
with pytest.raises(AssertionError):
new_pso5._update_velocity(26)
# Check an updated velocity value
i = 10
new_pso5._initialise_particles()
current_v = copy.deepcopy(new_pso5._particle_v[i,:])
new_pso5._update_velocity(i)
new_v = new_pso5._particle_v[i,:]
for i in range(new_pso5.dimension):
assert current_v[i] != new_v[i]
def test_run_reset(new_pso5):
"""Test running the optimisation."""
np.random.seed(seed=1)
new_pso5.max_evaluations = 200
new_pso5.run()
assert pytest.approx(new_pso5._global_best_f, -40.81)
new_pso5.reset()
assert new_pso5._global_best_f == np.Inf
assert new_pso5.objective.evaluations == 0
|
python
|
import untangle
import os
from os.path import join
class DroneCommandParser:
def __init__(self):
# store the commandsandsensors as they are called so you don't have to parse each time
self.command_tuple_cache = dict()
# parse the command files from XML (so we don't have to store ids and can use names
# for readability and portability!)
# grab module path per http://www.karoltomala.com/blog/?p=622
path = os.path.abspath(__file__)
dir_path = os.path.dirname(path)
self.common_commands = untangle.parse(join(dir_path, 'common.xml'))
self.minidrone_commands = untangle.parse(join(dir_path, 'minidrone.xml'))
self.ardrone3_commands = untangle.parse(join(dir_path, 'ardrone3.xml'))
def get_command_tuple(self, project, myclass, cmd):
"""
Parses the command XML for the specified class name and command name
:param myclass: class name (renamed to myclass to avoid reserved name) in the xml file
:param cmd: command to execute (from XML file)
:return:
"""
# only search if it isn't already in the cache
if (myclass, cmd) in self.command_tuple_cache:
return self.command_tuple_cache[(myclass, cmd)]
# pick the right command file to draw from
if (project == "ardrone3"):
my_file = self.ardrone3_commands
elif (project == "minidrone"):
my_file = self.minidrone_commands
else:
my_file = self.common_commands
# run the search first in minidrone xml and then hit common if that failed
project_id = int(my_file.project['id'])
for child in my_file.project.myclass:
if child['name'] == myclass:
class_id = int(child['id'])
#print child['name']
for subchild in child.cmd:
#print subchild
if subchild['name'] == cmd:
#print subchild['name']
cmd_id = int(subchild['id'])
# cache the result
self.command_tuple_cache[(myclass, cmd)] = (project_id, class_id, cmd_id)
return (project_id, class_id, cmd_id)
def get_command_tuple_with_enum(self, project, myclass, cmd, enum_name):
"""
Parses the command XML for the specified class name and command name and checks for enum_name
:param myclass: class name (renamed to myclass to avoid reserved name) in the xml file
:param cmd: command to execute (from XML file)
:return:
"""
# only search if it isn't already in the cache
if (myclass, cmd, enum_name) in self.command_tuple_cache:
#print("using the cache")
#print(self.command_tuple_cache[(myclass, cmd, enum_name)])
return self.command_tuple_cache[(myclass, cmd, enum_name)]
# pick the right command file to draw from
if (project == "ardrone3"):
my_file = self.ardrone3_commands
elif (project == "minidrone"):
my_file = self.minidrone_commands
else:
my_file = self.common_commands
# run the search first in minidrone xml and then hit common if that failed
project_id = int(my_file.project['id'])
for child in my_file.project.myclass:
if child['name'] == myclass:
class_id = int(child['id'])
#print child['name']
for subchild in child.cmd:
#print subchild
if subchild['name'] == cmd:
#print subchild['name']
cmd_id = int(subchild['id'])
for arg_child in subchild.arg:
if arg_child['type'] == "enum":
for e_idx, echild in enumerate(arg_child.enum):
if echild['name'] == enum_name:
enum_id = e_idx
# cache the result
self.command_tuple_cache[(myclass, cmd, enum_name)] = ((project_id, class_id, cmd_id), enum_id)
#print ((project_id, class_id, cmd_id), enum_id)
return ((project_id, class_id, cmd_id), enum_id)
|
python
|
import numpy as np
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.utils import np_utils
import keras.callbacks
import keras.backend.tensorflow_backend as KTF
import tensorflow as tf
batch_size = 128
nb_classes = 10
nb_epoch = 20
nb_data = 28*28
log_filepath = '/tmp/keras_log'
# load data
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# reshape
X_train = X_train.reshape(X_train.shape[0], X_train.shape[1]*X_train.shape[2])
X_test = X_test.reshape(X_test.shape[0], X_test.shape[1]*X_test.shape[2])
# rescale
X_train = X_train.astype(np.float32)
X_train /= 255
X_test = X_test.astype(np.float32)
X_test /= 255
# convert class vectors to binary class matrices (one hot vectors)
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
old_session = KTF.get_session()
with tf.Graph().as_default():
session = tf.Session('')
KTF.set_session(session)
KTF.set_learning_phase(1)
# build model
model = Sequential()
model.add(Dense(512, input_shape=(nb_data,), init='normal',name='dense1'))
model.add(Activation('relu', name='relu1'))
model.add(Dropout(0.2, name='dropout1'))
model.add(Dense(512, init='normal', name='dense2'))
model.add(Activation('relu', name='relu2'))
model.add(Dropout(0.2, name='dropout2'))
model.add(Dense(10, init='normal', name='dense3'))
model.add(Activation('softmax', name='softmax1'))
model.summary()
model.compile(loss='categorical_crossentropy', optimizer=SGD(lr=0.001), metrics=['accuracy'])
tb_cb = keras.callbacks.TensorBoard(log_dir=log_filepath, histogram_freq=1)
cbks = [tb_cb]
history = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch = nb_epoch, verbose=1, callbacks=cbks)
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy;', score[1])
KTF.set_session(old_session)
|
python
|
'''
Created on Aug 9, 2018
@author: fan
Generate mean, and variance covariance of key state variables from data
'''
import logging
import numpy as np
logger = logging.getLogger(__name__)
def gen_mean(df, mean_var_list,
short_mean_var_list=None,
group_by_var_list=None,
conditioning=None):
if (short_mean_var_list == None):
short_mean_var_list = mean_var_list
'''
Generate the means of a number of variables
'''
if (conditioning is not None):
df_subset = df[conditioning]
else:
df_subset = df
if (group_by_var_list is not None):
means = df_subset.groupby(group_by_var_list)[mean_var_list].mean()
elif (group_by_var_list is None):
means = df_subset[mean_var_list].mean()
means.info = {}
means.info['obs'] = len(df_subset)
means.info['means'] = {var_short: means[var] for var, var_short in zip(mean_var_list, short_mean_var_list)}
return means
def gen_varcov(df, varcov_var_list,
short_varcov_var_list=None,
group_by_var_list=None,
conditioning=None):
if (short_varcov_var_list is None):
short_varcov_var_list = varcov_var_list
if (conditioning is not None):
df_subset = df[conditioning]
else:
df_subset = df
if (group_by_var_list is not None):
varcov = df_subset.groupby(group_by_var_list)[varcov_var_list].cov()
else:
subset = df_subset[varcov_var_list]
varcov = subset.cov()
varcov.info = {}
varcov.info['obs'] = len(df_subset)
varcov.info['variance'] = {var_short: varcov[var][var] for var, var_short in
zip(varcov_var_list, short_varcov_var_list)}
varcov.info['sd'] = {var_short: np.sqrt(varcov[var][var]) for var, var_short in
zip(varcov_var_list, short_varcov_var_list)}
return varcov
|
python
|
# Example module - finance.py
__all__ = ['tax1', 'tax2'] #defines the names to import when '*' is used
tax1 = 5
tax2 = 10
def cost(): return 'cost'
# Imported into code using
from finance import *
print tax1
print tax2
|
python
|
class Image:
def __init__(self,
source_ref,
image_shape,
class_map=None,
instance_nb=0,
scene_type=None,
weather_condition=None,
distractor=None):
"""
:param source_ref:
:param image_shape: default is (height, width, depth)
:param class_map: a dictionary with {label_id: label_text}
:param instance_nb: number of instance inside the image
:param scene_type: main idea of an image
:param weather_condition:
weather-condition=0 indicates "no weather degradation"
weather-condition=1 indicates "fog/haze"
weather-condition=2 indicates "rain"
weather-condition=3 indicates "snow"
:param distractor
distractor=0 indicates "not a distractor"
distractor=1 indicates "distractor"
"""
self.source_ref = source_ref
self.image_shape = image_shape
self.class_map = class_map
self.scene_type = scene_type
self.weather_condition = weather_condition
self.distractor = distractor
self.instance_nb = instance_nb
@property
def height(self):
return self.image_shape[0]
@property
def width(self):
return self.image_shape[1]
|
python
|
# This program launches Google in the browser using text from the cmd or clipboard
# 1. Figure out the URL
# The website I want to go on is formatted like this https://google.com/search?q=
# 2. Handle the command line arguments
import webbrowser, sys
import pyperclip
if len(sys.argv) > 1:
# Get address from command line
query = ' '.join(sys.argv[1:])
# 3. Handle the clipboard content and launch the browser
else:
query = pyperclip.paste()
webbrowser.open('https://www.google.com/search?q=' + query)
|
python
|
from django.db import models
from django.utils import simplejson as json
from jsonate.utils import jsonate
from jsonate.widgets import JsonateWidget
from jsonate.form_fields import JsonateFormField
class JsonateField(models.TextField):
__metaclass__ = models.SubfieldBase
def to_python(self, value):
if value == "":
return None
try:
if isinstance(value, basestring):
return json.loads(value)
except ValueError:
pass
return value
def get_db_prep_save(self, value, *args, **kwargs):
if value == "":
return None
value = jsonate(value)
return super(JsonateField, self).get_db_prep_save(value, *args, **kwargs)
def formfield(self, **kwargs):
defaults = {
'form_class': JsonateFormField,
'widget': JsonateWidget
}
defaults.update(kwargs)
return super(JsonateField, self).formfield(**defaults)
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^jsonate\.fields\.JsonateField"])
|
python
|
import re
def is_valid_name(name):
regex = re.compile(r"^(?!_$)(?![-])(?!.*[_-]{2})[a-z0-9_-]+(?<![-])$", re.X)
if not re.match(regex, name):
return False
return True
|
python
|
# -*- coding: utf-8 -*-
"""
Setup file for automatic installation and distribution of AMfe.
Run: 'python setup.py sdist' for Source Distribution
Run: 'python setup.py install' for Installation
Run: 'python setup.py bdist_wheel' for Building Binary-Wheel
(recommended for windows-distributions)
Attention: For every python-minor-version an extra wheel has to be built
Use environments and install different python versions by using
conda create -n python34 python=3.4 anaconda
Run: 'pip install wheelfile.whl' for Installing Binary-Wheel
Run: 'python setup.py bdist --format=<format> für Binary-Distribution:
<format>=gztar|ztar|tar|zip|rpm|pgktool|sdux|wininst|msi
Recommended: tar|zip|wininst (evtl. msi)
Run: 'python setup.py bdist --help-formats' to find out which distribution
formats are available
"""
# Uncomment next line for debugging
# DISTUTILS_DEBUG='DEBUG'
import sys
def query_yes_no(question, default="yes"):
'''
Ask a yes/no question and return their answer.
Parameters
----------
question: String
The question to be asked
default: String "yes" or "no"
The default answer
Returns:
--------
answer: Boolean
Answer: True if yes, False if no.
'''
valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no'.\n")
no_fortran_str = '''
###############################################################################
############### Compilation of Fortran sources is disabled! ##################
###############################################################################
'''
no_extension_str = '''
###############################################################################
############### Fortran-Extensions cannot be installed! ##################
############### Install Numpy before installing AMfe ##################
###############################################################################
'''
if __name__ == '__main__':
from setuptools import setup, find_packages
config = {
'name': 'amfe',
'version': '0.2',
'description': 'Nonlinear Finite Element Code with simplicity in mind.',
'long_description': 'Nonlinear Finite Element Code for \
Structural Dynamics. \
This code includes Model Order Reduction Techniques for nonlinear Systems \
such as DEIM or ECSW.',
'author': 'Johannes Rutzmoser',
'url': 'https://github.com/tum-am/amfe',
'download_url': 'Where to download it.',
'author_email': '[email protected]',
'maintainer': 'Christian Meyer',
'maintainer_email': '[email protected]',
'install_requires': ['numpy>=1.10', 'scipy>=0.17', 'pandas',
'h5py', 'matplotlib'],
'tests_require': ['nose', 'sphinx==1.3.1', 'sphinx_rtd_theme'],
#'packages': ['amfe'],
'packages': find_packages(),
'scripts': [],
'entry_points': {},
'provides': 'amfe',
'platforms': 'Linux, Windows',
'license': 'BSD3-License'
}
if 'no_fortran' in sys.argv:
sys.argv.remove('no_fortran')
print(no_fortran_str)
setup(**config)
else:
try:
from numpy.distutils.core import Extension, setup
ext_assembly = Extension(name='amfe.f90_assembly',
sources=['amfe/fortran/assembly.f90'],
language='f90',)
ext_element = Extension(name='amfe.f90_element',
sources=['amfe/fortran/element.pyf',
'amfe/fortran/element.f90'],
language='f90',)
ext_material = Extension(name='amfe.f90_material',
sources=['amfe/fortran/material.f90'],
language='f90',)
ext_modules = [ext_assembly, ext_element, ext_material]
setup(ext_modules=ext_modules, **config)
except ImportError:
# from distutils.core import setup
from setuptools import setup
print(no_extension_str)
answer = query_yes_no('Fortran files cannot be installed. \
It is recommended to abort installation and \
first install numpy. Then retry \
installation of AMfe. \
Do you want to continue installation?', 'no')
if answer:
setup(**config)
|
python
|
"""
Cli
Copyright (C) 2021 Netpro Project RepoSync
"""
import logging
import socket
from pathlib import Path
import click
from .client import Client
from .core_op import apply as core_apply
from .core_op import fop
from .core_op import show as core_show
from .server import Server
logging.basicConfig()
_LOG = logging.getLogger(__name__)
_LOG.setLevel(logging.INFO)
@click.command()
@click.option('--address', '-a', prompt=True,
help="Client address to synchronize.", required=True)
@click.option('--file', '-f', 'file_path', type=click.Path(exists=True), prompt=True,
help="File path to synchronize.", required=True)
@click.option('--port', '-p', prompt=True, help="Use port", required=True, type=int)
def sync_cli(address: str, file_path: str, port: int) -> None:
"""
RepoSync cli
Args:
address (str): client address to synchronize.
file_path (str): file path to synchronize.
port (int): connect port.
"""
_LOG.info("address: %s", address)
_LOG.info("port: %d", port)
with Client(port, address) as client:
client.connect(Path(file_path), Path('.cache'))
@click.command()
@click.option('--port', '-p', prompt=True, help="Use port", required=True, type=int)
@click.option('--address', '-a', prompt=True,
help="Self address", required=False, default=socket.gethostname())
def server_cli(port: int, address: str):
"""
Server
Args:
port (int): use port.
address (str): self address.
"""
with Server(port, address) as server:
server.connect()
@click.group()
def git_cli() -> None:
"""
File update and show log.
"""
@git_cli.command()
@click.option('--file', '-f', 'file_path', type=click.Path(exists=True), prompt=True,
help="File path to update.", required=True)
def init(file_path: str):
"""
Run fop
Args:
filepath (str): Target file path.
"""
fop(file_path, '.cache')
@git_cli.command()
@click.option('--pager', '-p', 'use_pager', help="use pager", default=True)
def show(use_pager: bool) -> None:
"""
show logs.
Args:
use_pager (bool): use pager
"""
core_show(".cache", use_pager)
@git_cli.command()
@click.option('--hash', '-h', 'hash_value', prompt=True, help="A hash of the history to apply.", required=True)
@click.option('--file', '-f', 'file_path', type=click.Path(exists=True), prompt=True,
help="File path to update.", required=True)
def apply(hash_value: str, file_path: str) -> None:
"""
Specify a hash to undo history changes.
Args:
hash (str): A hash of the history to apply.
"""
_LOG.info("Apply hash of: %s", hash_value)
core_apply(hash_value, '.cache', file_path)
_LOG.info("Update file: %s", file_path)
|
python
|
# Generated by Django 2.0.4 on 2019-03-27 12:31
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('protein', '0005_proteingproteinpair_g_protein_subunit'),
]
operations = [
migrations.RemoveField(
model_name='proteinconformation',
name='template_structure',
),
]
|
python
|
#!/usr/bin/env python
# encoding: utf-8
"""
Created by 'bens3' on 2013-06-21.
Copyright (c) 2013 'bens3'. All rights reserved.
"""
import pylons
def is_ldap_user():
"""
Help function for determining if current user is LDAP user
@return: boolean
"""
return 'ckanext-ldap-user' in pylons.session
|
python
|
import os
LOG_FILE_PATH = os.path.join(os.getcwd(), "log.log")
DATABASE_PATH = os.path.join(os.getcwd(), "database.db")
ICONS_PATH = os.path.join(os.getcwd(), "icons")
CHAT_SETTINGS_PATH = os.path.join(os.getcwd(), "chat_settings")
BOTS_TXT_PATH = os.path.join(os.getcwd(), "bots.txt")
DEVICES_PATH = os.path.join(os.getcwd(), "src", "devices.txt")
CHOICE_ACTION_TEXT = "Выберите действие: "
CATEGORY_NAMES = [
"Основной аккаунт",
"Управление ботами",
"Модерация чата",
"Рейды"
]
MAIN_MENU = [
["0", "Добавить аккаунты в базу данных (bots.txt -> database.db)"],
["1", CATEGORY_NAMES[0]],
["2", CATEGORY_NAMES[1]],
["3", CATEGORY_NAMES[2]],
["4", CATEGORY_NAMES[3]]
]
MAIN_ACCOUNT_MENU = [
["b", "Назад"],
["1", "Пройти викторину"],
["2", "Лайкать последние записи (+ комментарии при наличии)"],
["3", "Подписаться на всех"],
["4", "Очистить подписки"],
["5", "Список пользователей которые добавили вас в чс"],
["6", "Отправить монеты"],
["7", "Отметится во всех сообществах"],
["8", "Оставить комментарий на стенке у онлайн участников"],
["9", "Оставить комментарий на стенке у новых участников"]
]
BOTS_MANAGEMENT_MENU = [
["b", "Назад"],
["s", "Показать список ботов в базе данных"],
["d", "Удалить аккаунт бота с базы данных"],
["1", "Сыграть в лотерею"],
["2", "Отправить монеты"],
["3", "Лайкнуть пост"],
["4", "Закинуть ботов в чат"],
["5", "Удалить ботов из чата"],
["6", "Закинуть ботов в сообщество"],
["7", "Отправить сообщение в чат"],
["8", "Подписать ботов на пользователя"],
["9", "Отписать ботов от пользователя"],
["10", "Изменить никнейм"],
["11", "Оставить комментарий на стенке"],
["12", "Изменить аватарку"],
["13", "Начать чат с пользователем"]
]
CHAT_MODERATION_MENU = [
["b", "Назад"],
["1", "Очистить сообщения в чате"],
["2", "Сохранить настройки чата в текстовом документе"],
["3", "Установить режим просмотра"],
["4", "Установить режим просмотра (с таймером)"],
["5", "Анализ сообщений чата"]
]
BADASS_MENU = [
["b", "Назад"],
["1", "Отправить системное сообщение"],
["2", "Спамить системными сообщениями"],
["3", "Пригласить всех активных участников сообщества в чат"],
["4", "Спам по всем общим чатам"],
["5", "Спам сис. сообщениями по всем общим чатам"]
]
|
python
|
from .enemy import Enemy
from .gnat import Gnat
from .laser import Laser
from .player import Player
from .patroller_gnat import PatrollerGnat
|
python
|
# Copyright (C) 2009-2012 - Curtis Hovey <sinzui.is at verizon.net>
# This software is licensed under the GNU General Public License version 2
# (see the file COPYING)."""Text formatting features for the edit menu."""
__all__ = [
'FormatPlugin',
]
from gettext import gettext as _
from gi.repository import GObject
from gdpbase import (
GDPPluginMixin,
Gedit,
)
from gdp import config
from gdp.format import Formatter
class FormatPlugin(GDPPluginMixin, GObject.Object, Gedit.WindowActivatable):
"""Plugin for formatting code."""
__gtype_name__ = "GDPFormatPlugin"
window = GObject.property(type=Gedit.Window)
CONTROLLER_CLASS = Formatter
ACTION_GROUP_NAME = 'GDPFormatActions'
MENU_XML = """
<ui>
<menubar name="MenuBar">
<menu name='EditMenu' action='Edit'>
<placeholder name="EditOps_3">
<separator />
<menu action="GDPFormatMenu">
<menuitem action="RewrapText"/>
<menuitem action="FixLineEnding"/>
<menuitem action="TabsToSpaces"/>
<menuitem action="QuoteLines"/>
<menuitem action="SortImports"/>
<menuitem action="SingleLine"/>
<menuitem action="REReplace"/>
</menu>
<separator />
</placeholder>
</menu>
<menu name='ToolsMenu' action='Tools'>
<placeholder name="ToolsOps_2">
<separator />
<menuitem action="CheckProblems"/>
<menuitem action="CheckAllProblems"/>
<menuitem action="ShowSyntaxErrorsOnly"/>
<menuitem action="ReformatDoctest"/>
<menuitem action="ReformatCSS"/>
<separator />
</placeholder>
</menu>
</menubar>
</ui>
"""
def actions(self, formatter):
"""See `GDPPluginMixin`"""
return [
('GDPFormatMenu', None, _('_Format'), None, None, None),
('RewrapText', None, _("Rewrap _text"), None,
_("Rewrap the text to 78 characters."),
formatter.rewrap_text),
('FixLineEnding', None, _("Fix _line endings"), None,
_('Remove trailing whitespace and use newline endings.'),
formatter.newline_ending),
('TabsToSpaces', None, _("Convert t_abs to spaces"), None,
_('Convert tabs to spaces using the preferred tab size.'),
formatter.tabs_to_spaces),
('QuoteLines', None, _("_Quote lines"), '<Alt>Q',
_("Format the text as a quoted email."),
formatter.quote_lines),
('SortImports', None, _("Sort _imports"), None,
_('Sort and wrap imports.'),
formatter.sort_imports),
('SingleLine', None, _("_Single line"), None,
_("Format the text as a single line."),
formatter.single_line),
('REReplace', None, _("Regular _expression line replace"), None,
_("Reformat each line using a regular expression."),
formatter.re_replace),
('ReformatDoctest', None, _("Reformat _doctest"), None,
_("Reformat the doctest."),
formatter.reformat_doctest),
('ReformatCSS', None, _("Reformat _CSS"), None,
_("Reformat the CSS file or selection."),
formatter.reformat_css),
('CheckProblems', None, _("C_heck syntax and style"), 'F3',
_("Check syntax and style problems."),
formatter.check_style),
('CheckAllProblems', None,
_("Check syntax and style of all files"), None,
_("Check syntax and style problems in all open documents."),
formatter.check_all_style),
('ShowSyntaxErrorsOnly', None,
_("Show syntax errors only"), None,
_("Check syntax and style ignore info and warnings."),
formatter.on_show_syntax_errors_only_toggled,
config.getboolean('formatter', 'report_only_errors')),
]
def __init__(self):
"""Initialize the plugin the whole Gedit application."""
GObject.Object.__init__(self)
self.controller = None
def do_activate(self):
"""Activate the plugin in the current top-level window.
Add 'Format' to the edit menu and create a Formatter.
"""
self.activate()
self.connect_signal(
self.window, 'tab-added', self.on_tab_added_or_changed)
self.connect_signal(
self.window, 'active-tab-changed', self.on_tab_added_or_changed)
def do_deactivate(self):
"""Deactivate the plugin in the current top-level window."""
self.deactivate()
def do_update_state(self):
"""Toggle the plugin's sensativity in the top-level window.
This plugin is always active.
"""
pass
def on_tab_added_or_changed(self, window, tab):
"""Callback method for `tab-added` window signal.
Connects `document-saved` signal.
"""
document = tab.get_document()
if self.controller is None:
self.activate()
self.controller.correct_language(document)
language = document.get_language()
if language is None:
language_id = None
else:
language_id = language.get_id()
manager = self.window.get_ui_manager()
format_doctest_item = '/MenuBar/ToolsMenu/ToolsOps_2/ReformatDoctest'
manager.get_action(format_doctest_item).props.sensitive = (
language_id == 'doctest')
format_css_item = '/MenuBar/ToolsMenu/ToolsOps_2/ReformatCSS'
manager.get_action(format_css_item).props.sensitive = (
language_id == 'css')
self.connect_signal_after(
document, 'syntax-error-python', self.controller.check_style)
self.connect_signal_after(
document, 'saved', self.controller.check_style_background)
|
python
|
from django.urls import path
from survey import views
from rest_framework import routers
from rest_framework.urlpatterns import format_suffix_patterns
urlpatterns=[
path('',views.index,name="index"),
path('constructor/',views.Constructor,name="constructor"),
path('egresados/',views.Egresados,name="egresados"),
path('encuesta/<int:id>/',views.GetEncuesta,name="encuesta"),
path('respuestas/<int:id>/',views.Respuestas,name="respuestas"),
path('responder/',views.Responder,name="responder"),
path('survey/',views.Survey.as_view(),name="survey"),
path('saveQuestion/',views.SaveQuestion.as_view(),name="saveQuestion"),
path('saveAnswer/',views.SaveAnswer.as_view(),name="saveAnswer"),
path('deleteQuestion/',views.DeleteQuestion.as_view(),name="deleteQuestion"),
path('getAlumnos/',views.GetAlumnos.as_view(),name="getAlumnos"),
path('guardarRespuesta/',views.GuardarRespuesta.as_view(),name="guardarRespuesta"),
path('guardarCarrera/',views.GuardarCarrera.as_view(),name="guardarCarrera"),
path('enviar/',views.MandarCorreo.as_view(),name="enviar"),
]
|
python
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .pickleable_gpt2bpe_encoder import (
PickleableGPT2BPEEncoder,
)
__all__ = [
"PickleableGPT2BPEEncoder",
]
|
python
|
import pytest
import os
from click.testing import CliRunner
@pytest.mark.usefixtures("session")
@pytest.fixture()
def cli_runner(pushd):
# this will make sure we are back at `cwd`
# after this test is finished
pushd(os.getcwd())
yield CliRunner(mix_stderr=False)
|
python
|
#Copyright (c) 2021 Kason Suchow
import pygame
import imageManager
import data
class BaseCard(object):
def __init__(self):
self.x = -500
self.y = -500
self.pos = (self.x, self.y)
self.color = ''
self.name = ''
self.description = ''
self.attack = 0
self.defense = 0
self.cost = 0
self.type = ''
self.face = None
self.back = imageManager.cardBack
self.faceUp = False
self.hovered = False
self.clicked = True
self.selected = False
self.move = ''
self.tapped = False
self.image = self.back
def draw(self, surface):
if self.tapped:
surface.blit(pygame.transform.rotate(self.image, 90), self.pos)
else:
surface.blit(self.image, self.pos)
if self.selected:
self.drawOutline(surface)
def drawOutline(self, surface):
if self.tapped:
surface.blit(pygame.transform.rotate(imageManager.highlightCard, 90), (self.x - 2, self.y - 2))
else:
surface.blit(imageManager.highlightCard, (self.x - 2, self.y - 2))
def update(self):
self.pos = (self.x, self.y)
if self.faceUp:
self.image = self.face
else:
self.image = self.back
self.checkHovered()
if self.selected:
self.checkInput()
def checkHovered(self):
mousePos = pygame.mouse.get_pos()
if self.x <= mousePos[0] <= self.x + 80:
if self.y <= mousePos[1] <= self.y + 112:
self.hovered = True
else:
self.hovered = False
else:
self.hovered = False
def checkClicked(self, player):
for event in data.events:
if self.hovered and event.type == pygame.MOUSEBUTTONDOWN:
self.clicked = True
self.doTask(player)
else:
self.clicked = False
def checkSelected(self):
for event in data.events:
if self.hovered and event.type == pygame.MOUSEBUTTONDOWN:
self.selected = True
elif event.type == pygame.MOUSEBUTTONDOWN:
self.selected = False
def checkInput(self):
for event in data.events:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_k:
self.selected = False
self.move = 'graveyard'
if event.key == pygame.K_t:
if self.tapped:
self.tapped = False
else:
self.tapped = True
if event.key == pygame.K_b:
self.move = 'hand'
class RedMana(BaseCard):
def __init__(self):
super().__init__()
self.color = 'red'
self.name = 'Red Mana'
self.description = 'Adds 3 Red Mana'
self.attack = 0
self.defense = 0
self.cost = 1
self.type = 'mana'
self.face = imageManager.cardRedMana
def doTask(self, player):
if player.meditationPoints >= self.cost:
player.redMana += 3
player.meditationPoints -= 1
self.move = 'graveyard'
class BlueMana(BaseCard):
def __init__(self):
super().__init__()
self.color = 'blue'
self.name = 'Blue Mana'
self.description = 'Adds 3 Blue Mana'
self.attack = 0
self.defense = 0
self.cost = 1
self.type = 'mana'
self.face = imageManager.cardBlueMana
def doTask(self, player):
if player.meditationPoints >= self.cost:
player.blueMana += 3
player.meditationPoints -= 1
self.move = 'graveyard'
class GreenMana(BaseCard):
def __init__(self):
super().__init__()
self.color = 'green'
self.name = 'Green Mana'
self.description = 'Adds 3 Green Mana'
self.attack = 0
self.defense = 0
self.cost = 1
self.type = 'mana'
self.face = imageManager.cardGreenMana
def doTask(self, player):
if player.meditationPoints >= self.cost:
player.greenMana += 3
player.meditationPoints -= 1
self.move = 'graveyard'
class Turtle(BaseCard):
def __init__(self):
super().__init__()
self.color = 'green'
self.name = 'Turtle'
self.description = 'Beefy Turtle defender boi'
self.attack = 1
self.defense = 4
self.cost = 2
self.type = 'defender'
self.face = imageManager.cardTurtle
def doTask(self, player):
if player.greenMana >= self.cost:
player.greenMana -= self.cost
self.move = 'defenders'
class SeaWall(BaseCard):
def __init__(self):
super().__init__()
self.color = 'green'
self.name = 'Sea Wall'
self.description = 'An old stone wall on the shore'
self.attack = 0
self.defense = 3
self.cost = 1
self.type = 'defender'
self.face = imageManager.cardSeaWall
def doTask(self, player):
if player.greenMana >= self.cost:
player.greenMana -= self.cost
self.move = 'defenders'
class Souls(BaseCard):
def __init__(self):
super().__init__()
self.color = 'red'
self.name = 'Souls'
self.description = 'Soul Sand?'
self.attack = 2
self.defense = 1
self.cost = 1
self.type = 'attacker'
self.face = imageManager.cardSouls
def doTask(self, player):
if player.redMana >= self.cost:
player.redMana -= self.cost
self.move = 'attackers'
|
python
|
"""Tests for reviewboard.diffviewer.managers.DiffCommitManager."""
from __future__ import unicode_literals
from dateutil.parser import parse as parse_date
from kgb import SpyAgency
from reviewboard.diffviewer.models import DiffCommit, DiffSet
from reviewboard.testing.testcase import TestCase
class DiffCommitManagerTests(SpyAgency, TestCase):
"""Unit tests for DiffCommitManager."""
fixtures = ['test_scmtools']
def test_create_from_data(self):
"""Testing DiffCommitManager.create_from_data"""
diff = (
b'diff --git a/README b/README\n'
b'index d6613f5..5b50866 100644\n'
b'--- README\n'
b'+++ README\n'
b'@ -1,1 +1,1 @@\n'
b'-blah..\n'
b'+blah blah\n'
)
repository = self.create_repository(tool_name='Test')
self.spy_on(repository.get_file_exists,
call_fake=lambda *args, **kwargs: True)
diffset = DiffSet.objects.create_empty(
repository=repository,
basedir='',
revision=1)
raw_date = '2000-01-01 00:00:00-0600'
parsed_date = parse_date(raw_date)
commit = DiffCommit.objects.create_from_data(
repository=repository,
diff_file_name='diff',
diff_file_contents=diff,
parent_diff_file_name=None,
parent_diff_file_contents=b'',
request=None,
commit_id='r1',
parent_id='r0',
author_name='Author',
author_email='[email protected]',
author_date=parsed_date,
committer_name='Committer',
committer_email='[email protected]',
committer_date=parsed_date,
commit_message='Description',
diffset=diffset,
validation_info={})
self.assertEqual(commit.files.count(), 1)
self.assertEqual(diffset.files.count(), commit.files.count())
self.assertEqual(diffset.commit_count, 1)
# We have to compare regular equality and equality after applying
# ``strftime`` because two datetimes with different timezone info
# may be equal
self.assertEqual(parsed_date, commit.author_date)
self.assertEqual(parsed_date, commit.committer_date)
self.assertEqual(
raw_date,
commit.author_date.strftime(DiffCommit.ISO_DATE_FORMAT))
self.assertEqual(
raw_date,
commit.committer_date.strftime(DiffCommit.ISO_DATE_FORMAT))
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.