content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
# Generated by Django 2.2.7 on 2019-12-18 14:21
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('catalog', '0003_favourite'),
]
operations = [
migrations.AlterField(
model_name='recipe',
name='edit_date',
field=models.DateTimeField(auto_now=True),
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pub_date', models.DateTimeField(auto_now_add=True)),
('text', models.CharField(max_length=250)),
('recipe', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='catalog.Recipe')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Comment',
'verbose_name_plural': 'Comments',
'ordering': ['-pub_date'],
},
),
]
|
python
|
# always write to disk
FILE_UPLOAD_HANDLERS = [
'django.core.files.uploadhandler.TemporaryFileUploadHandler'
]
STATIC_URL = '/static/'
STATIC_ROOT = '/app/public'
MEDIA_ROOT = '/data'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# compressor
'compressor.finders.CompressorFinder',
)
COMPRESS_PRECOMPILERS = (
('text/less', 'lessc {infile} {outfile}'),
)
COMPRESS_CSS_FILTERS = (
'compressor.filters.css_default.CssAbsoluteFilter',
'compressor.filters.yuglify.YUglifyCSSFilter',
)
|
python
|
import os
import keras
import random as rn
import numpy as np
import tensorflow as tf
from keras.layers import Dense, Activation, Embedding
from keras.layers import Input, Flatten, dot, concatenate, Dropout
from keras import backend as K
from keras.models import Model
from keras.engine.topology import Layer
from keras import initializers
from TemporalPositionEncoding import PositionalEncoding
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config = config)
K.tensorflow_backend.set_session(sess)
class SurroundingSlots(Layer):
def __init__(self, window_length, max_range, trainable=True, name=None, **kwargs):
super(SurroundingSlots, self).__init__(name=name, trainable=trainable, **kwargs)
self.window_length = window_length
self.max_range = max_range
def build(self, inshape):
1
def call(self, x):
surr = K.cast(x, dtype=tf.int32) + K.arange(start=-self.window_length, stop=self.window_length + 1, step=1)
surrUnderflow = K.cast(surr < 0, dtype=tf.int32)
surrOverflow = K.cast(surr > self.max_range - 1, dtype=tf.int32)
return surr * (-(surrUnderflow + surrOverflow) + 1) + surrUnderflow * (surr + self.max_range) + surrOverflow * (surr - self.max_range)
def compute_output_shape(self, inshape):
return (inshape[0], self.window_length * 2 + 1)
class MATE(Layer):
def __init__(self, dimension, trainable=True, name=None, **kwargs):
super(MATE, self).__init__(name=name, trainable=trainable, **kwargs)
self.dimension = dimension
def build(self, inshape):
# for multiplicative attention
self.W = self.add_weight(name="W", shape=(self.dimension, self.dimension), initializer=initializers.get("random_normal"))
# for personalization
self.Wmonth = self.add_weight(name="Wmonth", shape=(self.dimension, self.dimension), initializer=initializers.get("random_normal"))
self.Wday = self.add_weight(name="Wday", shape=(self.dimension, self.dimension), initializer=initializers.get("random_normal"))
self.Wdate = self.add_weight(name="Wdate", shape=(self.dimension, self.dimension), initializer=initializers.get("random_normal"))
self.Whour = self.add_weight(name="Whour", shape=(self.dimension, self.dimension), initializer=initializers.get("random_normal"))
def call(self, x):
userEmbedding = x[0]
curMonthEmbedding = K.reshape(x[1], shape=(-1, 1, self.dimension))
curDayEmbedding = K.reshape(x[2], shape=(-1, 1, self.dimension))
curDateEmbedding = K.reshape(x[3], shape=(-1, 1, self.dimension))
curHourEmbedding = K.reshape(x[4], shape=(-1, 1, self.dimension))
monthEmbeddings = x[5]
dayEmbeddings = x[6]
dateEmbeddings = x[7]
hourEmbeddings = x[8]
# personalization
curMonthEmbedding = curMonthEmbedding * (K.dot(userEmbedding, self.Wmonth))
curDayEmbedding = curDayEmbedding * (K.dot(userEmbedding, self.Wday))
curDateEmbedding = curDateEmbedding * (K.dot(userEmbedding, self.Wdate))
curHourEmbedding = curHourEmbedding * (K.dot(userEmbedding, self.Whour))
monthEmbeddings = monthEmbeddings * (K.dot(userEmbedding, self.Wmonth))
dayEmbeddings = dayEmbeddings * (K.dot(userEmbedding, self.Wday))
dateEmbeddings = dateEmbeddings * (K.dot(userEmbedding, self.Wdate))
hourEmbeddings = hourEmbeddings * (K.dot(userEmbedding, self.Whour))
# query for gradated attention
monthQ = curMonthEmbedding
dayQ = curDayEmbedding
dateQ = curDateEmbedding
hourQ = curHourEmbedding
# key, value
monthKV = concatenate([monthEmbeddings, curMonthEmbedding], axis=1)
dayKV = concatenate([dayEmbeddings, curDayEmbedding], axis=1)
dateKV = concatenate([dateEmbeddings, curDateEmbedding], axis=1)
hourKV = concatenate([hourEmbeddings, curHourEmbedding], axis=1)
# attention score
monthQKV = K.softmax(K.batch_dot(monthQ, K.permute_dimensions(monthKV, pattern=(0, 2, 1))) / K.sqrt(K.cast(self.dimension, dtype=tf.float32)), axis=-1)
dayQKV = K.softmax(K.batch_dot(dayQ, K.permute_dimensions(dayKV, pattern=(0, 2, 1))) / K.sqrt(K.cast(self.dimension, dtype=tf.float32)), axis=-1)
dateQKV = K.softmax(K.batch_dot(dateQ, K.permute_dimensions(dateKV, pattern=(0, 2, 1))) / K.sqrt(K.cast(self.dimension, dtype=tf.float32)), axis=-1)
hourQKV = K.softmax(K.batch_dot(hourQ, K.permute_dimensions(hourKV, pattern=(0, 2, 1))) / K.sqrt(K.cast(self.dimension, dtype=tf.float32)), axis=-1)
# embedding for each granularity of period information
monthEmbedding = K.batch_dot(monthQKV, monthKV)
dayEmbedding = K.batch_dot(dayQKV, dayKV)
dateEmbedding = K.batch_dot(dateQKV, dateKV)
hourEmbedding = K.batch_dot(hourQKV, hourKV)
# multiplicative attention
q = userEmbedding
kv = K.concatenate([monthEmbedding, dayEmbedding, dateEmbedding, hourEmbedding], axis=1)
qW = K.dot(q, self.W)
a = K.sigmoid(K.batch_dot(qW, K.permute_dimensions(kv, pattern=(0, 2, 1))))
timeRepresentation = K.batch_dot(a, kv)
return timeRepresentation
def compute_output_shape(self, inshape):
return (None, 1, self.dimension)
class TAHE(Layer):
def __init__(self, dimension, trainable=True, name=None, **kwargs):
super(TAHE, self).__init__(name=name, trainable=trainable, **kwargs)
self.dimension = dimension
def build(self, inshape):
1
def call(self, x):
recentTimeRepresentations = x[0]
curTimeRepresentation = x[1]
recentTimestamps = x[2]
recentItemEmbeddings = x[3]
# previous timestamp == 0 ==> no history
mask = K.cast(recentTimestamps > 0, dtype=tf.float32)
# time-based attention
similarity = K.batch_dot(K.l2_normalize(curTimeRepresentation, axis=-1), K.permute_dimensions(K.l2_normalize(recentTimeRepresentations, axis=-1), pattern=(0, 2, 1)))
masked_similarity = mask * ((similarity + 1.0) / 2.0)
weightedPrevItemEmbeddings = K.batch_dot(masked_similarity, recentItemEmbeddings)
userHistoryRepresentation = weightedPrevItemEmbeddings
return userHistoryRepresentation
def compute_output_shape(self, inshape):
return (None, self.dimension)
class meanLayer(Layer):
def __init__(self, trainable=True, name=None, **kwargs):
super(meanLayer, self).__init__(name=name, trainable=trainable, **kwargs)
def build(self, inshape):
1
def call(self, x):
return K.mean(x, axis=1, keepdims=True)
def compute_output_shape(self, inshape):
return (inshape[0], 1, inshape[2])
class Slice(Layer):
def __init__(self, index, trainable=True, name=None, **kwargs):
super(Slice, self).__init__(name=name, trainable=trainable, **kwargs)
self.index = index
def build(self, inshape):
1
def call(self, x):
return x[:, self.index, :]
def compute_output_shape(self, inshape):
return (inshape[0], inshape[2])
class TemporalPositionEncoding(Layer):
def __init__(self, trainable=True, name=None, **kwargs):
super(TemporalPositionEncoding, self).__init__(name=name, trainable=trainable, **kwargs)
def build(self, inshape):
self.a = self.add_weight(name="a", shape=(1, ), initializer=initializers.get("ones"))
def call(self, x):
item = x[0]
time = x[1]
return item + time * self.a
def compute_output_shape(self, inshape):
return inshape[0]
def TimelyRec(input_shape, num_users, num_items, embedding_size, sequence_length, width, depth, dropout=None):
userInput = Input(shape=[1], dtype=tf.int32)
itemInput = Input(shape=[1], dtype=tf.int32)
monthInput = Input(shape=[1], dtype=tf.int32)
dayInput = Input(shape=[1], dtype=tf.int32)
dateInput = Input(shape=[1], dtype=tf.int32)
hourInput = Input(shape=[1], dtype=tf.int32)
curTimestampInput = Input(shape=[1], dtype=tf.int32)
recentMonthInput = []
recentDayInput = []
recentDateInput = []
recentHourInput = []
recentTimestampInput = []
recentItemidInput = []
for i in range(sequence_length):
recentMonthInput.append(Input(shape=[1], dtype=tf.int32))
for i in range(sequence_length):
recentDayInput.append(Input(shape=[1], dtype=tf.int32))
for i in range(sequence_length):
recentDateInput.append(Input(shape=[1], dtype=tf.int32))
for i in range(sequence_length):
recentHourInput.append(Input(shape=[1], dtype=tf.int32))
for i in range(sequence_length):
recentTimestampInput.append(Input(shape=[1], dtype=tf.int32))
for i in range(sequence_length):
recentItemidInput.append(Input(shape=[1], dtype=tf.int32))
userEmbedding = Embedding(num_users+1, embedding_size)(userInput)
itemEmbeddingSet = Embedding(num_items+1, embedding_size)
itemEmbedding = itemEmbeddingSet(itemInput)
recentItemEmbeddings = itemEmbeddingSet(concatenate(recentItemidInput, axis=-1))
recentTimestamps = concatenate(recentTimestampInput, axis=-1)
monthEmbedding = Embedding(12, embedding_size)
dayEmbedding = Embedding(7, embedding_size)
dateEmbedding = Embedding(31, embedding_size)
hourEmbedding = Embedding(24, embedding_size)
curMonthEmbedding = monthEmbedding(monthInput)
curDayEmbedding = dayEmbedding(dayInput)
curDateEmbedding = dateEmbedding(dateInput)
curHourEmbedding = hourEmbedding(hourInput)
recentMonthEmbeddings = monthEmbedding(concatenate(recentMonthInput, axis=-1))
recentDayEmbeddings = dayEmbedding(concatenate(recentDayInput, axis=-1))
recentDateEmbeddings = dateEmbedding(concatenate(recentDateInput, axis=-1))
recentHourEmbeddings = hourEmbedding(concatenate(recentHourInput, axis=-1))
monthEmbeddings = []
dayEmbeddings = []
dateEmbeddings = []
hourEmbeddings = []
prevMonthEmbeddings = []
prevDayEmbeddings = []
prevDateEmbeddings = []
prevHourEmbeddings = []
ratio = 0.2
for i in range(sequence_length):
prevMonthEmbeddings.append([])
for j in range(1, max(int(12 * ratio + 0.5), 1) + 1):
monthSurr = monthEmbedding(SurroundingSlots(window_length=j, max_range=12)(recentMonthInput[i]))
prevMonthEmbeddings[i].append(meanLayer()(monthSurr))
prevDayEmbeddings.append([])
for j in range(1, max(int(7 * ratio + 0.5), 1) + 1):
daySurr = dayEmbedding(SurroundingSlots(window_length=j, max_range=7)(recentDayInput[i]))
prevDayEmbeddings[i].append(meanLayer()(daySurr))
prevDateEmbeddings.append([])
for j in range(1, max(int(31 * ratio + 0.5), 1) + 1):
dateSurr = dateEmbedding(SurroundingSlots(window_length=j, max_range=31)(recentDateInput[i]))
prevDateEmbeddings[i].append(meanLayer()(dateSurr))
prevHourEmbeddings.append([])
for j in range(1, max(int(24 * ratio + 0.5), 1) + 1):
hourSurr = hourEmbedding(SurroundingSlots(window_length=j, max_range=24)(recentHourInput[i]))
prevHourEmbeddings[i].append(meanLayer()(hourSurr))
for i in range(1, max(int(12 * ratio + 0.5), 1) + 1):
monthSurr = monthEmbedding(SurroundingSlots(window_length=i, max_range=12)(monthInput))
monthEmbeddings.append(meanLayer()(monthSurr))
for i in range(1, max(int(7 * ratio + 0.5), 1) + 1):
daySurr = dayEmbedding(SurroundingSlots(window_length=i, max_range=7)(dayInput))
dayEmbeddings.append(meanLayer()(daySurr))
for i in range(1, max(int(31 * ratio + 0.5), 1) + 1):
dateSurr = dateEmbedding(SurroundingSlots(window_length=i, max_range=31)(dateInput))
dateEmbeddings.append(meanLayer()(dateSurr))
for i in range(1, max(int(24 * ratio + 0.5), 1) + 1):
hourSurr = hourEmbedding(SurroundingSlots(window_length=i, max_range=24)(hourInput))
hourEmbeddings.append(meanLayer()(hourSurr))
if int(12 * ratio + 0.5) <= 1:
monthEmbeddings = monthEmbeddings[0]
for i in range(sequence_length):
prevMonthEmbeddings[i] = prevMonthEmbeddings[i][0]
else:
monthEmbeddings = concatenate(monthEmbeddings, axis=1)
for i in range(sequence_length):
prevMonthEmbeddings[i] = concatenate(prevMonthEmbeddings[i], axis=1)
if int(7 * ratio + 0.5) <= 1:
dayEmbeddings = dayEmbeddings[0]
for i in range(sequence_length):
prevDayEmbeddings[i] = prevDayEmbeddings[i][0]
else:
dayEmbeddings = concatenate(dayEmbeddings, axis=1)
for i in range(sequence_length):
prevDayEmbeddings[i] = concatenate(prevDayEmbeddings[i], axis=1)
if int(31 * ratio + 0.5) <= 1:
dateEmbeddings = dateEmbeddings[0]
for i in range(sequence_length):
prevDateEmbeddings[i] = prevDateEmbeddings[i][0]
else:
dateEmbeddings = concatenate(dateEmbeddings, axis=1)
for i in range(sequence_length):
prevDateEmbeddings[i] = concatenate(prevDateEmbeddings[i], axis=1)
if int(24 * ratio + 0.5) <= 1:
hourEmbeddings = hourEmbeddings[0]
for i in range(sequence_length):
prevHourEmbeddings[i] = prevHourEmbeddings[i][0]
else:
hourEmbeddings = concatenate(hourEmbeddings, axis=1)
for i in range(sequence_length):
prevHourEmbeddings[i] = concatenate(prevHourEmbeddings[i], axis=1)
recentTimestampTEs = PositionalEncoding(output_dim=embedding_size)(recentTimestamps)
curTimestampTE = PositionalEncoding(output_dim=embedding_size)(curTimestampInput)
# temporal position encoding
te = TemporalPositionEncoding()
itemEmbedding = te([itemEmbedding, curTimestampTE])
recentItemEmbeddings = te([recentItemEmbeddings, recentTimestampTEs])
userVector = Flatten()(userEmbedding)
itemVector = Flatten()(itemEmbedding)
curTimestampTE = Flatten()(curTimestampTE)
# MATE
curTimeRepresentation = Flatten()(MATE(embedding_size)([userEmbedding, curMonthEmbedding, curDayEmbedding, curDateEmbedding, curHourEmbedding, monthEmbeddings, dayEmbeddings, dateEmbeddings, hourEmbeddings])) # None * embedding_size
prevTimeRepresentations = []
for i in range(sequence_length):
prevTimeRepresentations.append(MATE(embedding_size)([userEmbedding, Slice(i)(recentMonthEmbeddings), Slice(i)(recentDayEmbeddings), Slice(i)(recentDateEmbeddings), Slice(i)(recentHourEmbeddings), prevMonthEmbeddings[i], prevDayEmbeddings[i], prevDateEmbeddings[i], prevHourEmbeddings[i]])) # None * embedding_size)
prevTimeRepresentations = concatenate(prevTimeRepresentations, axis=1)
# TAHE
userHistoryRepresentation = TAHE(embedding_size)([prevTimeRepresentations, curTimeRepresentation, recentTimestamps, recentItemEmbeddings])
# combination
x = concatenate([userVector, itemVector, curTimeRepresentation, userHistoryRepresentation])
in_shape = embedding_size * 4
for i in range(depth):
if i == depth - 1:
x = Dense(1, input_shape=(in_shape,))(x)
else:
x = Dense(width, input_shape=(in_shape,))(x)
x = Activation('relu')(x)
if dropout is not None:
x = Dropout(dropout)(x)
in_shape = width
outputs = Activation('sigmoid')(x)
model = Model(inputs=[userInput, itemInput, monthInput, dayInput, dateInput, hourInput, curTimestampInput] + [recentMonthInput[i] for i in range(sequence_length)] + [recentDayInput[i] for i in range(sequence_length)] + [recentDateInput[i] for i in range(sequence_length)] + [recentHourInput[i] for i in range(sequence_length)] + [recentTimestampInput[i] for i in range(sequence_length)] + [recentItemidInput[i] for i in range(sequence_length)], outputs=outputs)
return model
|
python
|
from sklearn.base import BaseEstimator, TransformerMixin
import pandas as pd
import unidecode as udc
import scipy
class CustomOneHotEncoder(BaseEstimator, TransformerMixin):
"""
Clase que convierte a dummies las variables categóricas de un dataFrame. Permite eliminar
las dummies creadas según su representación.
:param X: DataFrame sobre el que se van a realizar los cambios.
:param categorical_columns: Lista de las variables categóricas para transformar.
:param features_not_drop: Lista de las variables categóricas que se transforman pero de las que no
queremos eliminar las columnas resultantes según su representación.
:param threshold: Valor númerico entre 0 y 1 que indica el punto de corte para eliminar las dummies
según representación. Se corta según el % de 0 que contiene la columna. Todas las
columnas con un % de 0s mayor que el threshold indicado son eliminadas.
:param sparse_matrix: Bool. Si es True el transformador devuelve una SparseMatrix. Por defecto
False y devuelve un DataFrame
:return: Devuelve el DataFrame o SparseMatrix modificado con las nuevas dummies.
"""
def __init__(self, categorical_columns, features_not_drop, threshold, sparse_matrix = False):
super().__init__()
self.categorical_columns = categorical_columns
self.threshold = threshold
self.features_not_drop = features_not_drop
self.sparse_matrix = sparse_matrix
self.columns_to_drop_ = list()
def fit(self, X, y=None):
X_ = X.copy()
# Dummies para las categóricas
X__ = pd.get_dummies(X_, drop_first = False)
# Se marcan las columnas que se van a borrar
for feat in self.categorical_columns:
X__.rename(columns=lambda x:
udc.unidecode(x.replace(feat, 'oneHotEncoder_' + feat)),
inplace = True)
for feat in self.features_not_drop:
X__.rename(columns=lambda x:
udc.unidecode(x.replace('oneHotEncoder_' + feat, 'oneHotEncoderX_' + feat)),
inplace = True)
# Se seleccionan las columnas del OneHot con representación 'threshold'
for feat in X__.columns:
try:
if ((X__[feat].value_counts(normalize = True)[0] > self.threshold) & ('oneHotEncoder_' in feat)):
self.columns_to_drop_.append(feat)
except:
pass
return self
def transform(self, X, y=None):
X_ = X.copy()
X__ = pd.get_dummies(X_, drop_first = False)
for feat in self.categorical_columns:
X__.rename(columns=lambda x:
udc.unidecode(x.replace(feat, 'oneHotEncoder_' + feat)),
inplace = True)
# Se eliminan las columnas seleccionadas del dataframe
for col in self.columns_to_drop_:
try:
X__.drop(columns= col, inplace = True)
except:
pass
# Se eliminan caracteres de los column_names no admitidos por el modelo
X__.rename(columns=lambda x: udc.unidecode(x.replace("]", ")")), inplace = True)
if self.sparse_matrix:
X__ = scipy.sparse.csr_matrix(X__.values)
return X__
|
python
|
# -*- coding: utf-8 -*-
###########################################################
# #
# Copyright (c) 2018 Radek Augustýn, licensed under MIT. #
# #
###########################################################
__author__ = "[email protected]"
# @PRODUCTION MODULE [Full]
from base import fileRead
templates = { }
def registerTemplate(templateName, fileName, content = None):
templates[templateName] = (fileName, content)
templates["sample"] = (None, 'ahoj<div id="mojeTestId"><p class="caption">Moje Caption</p><p class="shortDescription">Moje shortDescription</p><p class="description">Moje dDescription</p></div>')
def getTemplate(name):
"""Returns HTML template content. For the first time in a given template it reads data from the file.
:param String name: Name of the template.
:return String: Template HTML content.
>>> print getTemplate("sample")
ahoj<div id="mojeTestId"><p class="caption">Moje Caption</p><p class="shortDescription">Moje shortDescription</p><p class="description">Moje dDescription</p></div>
"""
if name in templates:
fileName, content = templates[name]
if not content:
content = fileRead(fileName)
registerTemplate(name, fileName, content)
return content
else:
return ""
def getHtmlDiv(templateName, identifier):
"""Extracts content of the html DIV element with given id. There must not be another div inside.
:param String templateName: Name of the template in the templates list.
:param String identifier: Id of the selected DIV element.
:return String: Content of DIV element with given id.
>>> getHtmlDiv("sample", "mojeTestId")
'<p id="caption">Moje Caption</p><p id="shortDescription">Moje shortDescription</p><p id="description">Moje dDescription</p>'
"""
html = getTemplate(templateName)
startPos = html.find('<div id="%s"' % identifier)
startPos = html.find(">", startPos)
endPos = html.find('</div>', startPos)
if startPos >= 0 and endPos >= 0:
return html[startPos+1:endPos]
else:
return ""
def getHtmlItems(templateName, identifier):
"""
:param templateName:
:param identifier:
:return:
>>> getHtmlItems("sample", "mojeTestId")
{'caption': 'Moje Caption', 'shortDescription': 'Moje shortDescription', 'description': 'Moje dDescription'}
"""
result = {}
divContent = getHtmlDiv(templateName, identifier)
for paragraph in divContent.split("</p>"):
paragraph = paragraph.strip()
if paragraph and paragraph.startswith("<p"):
classNameStart = paragraph.find('class="') + 7
classNameEnd = paragraph.find('"', classNameStart)
className = paragraph[classNameStart:classNameEnd]
content = paragraph[paragraph.find(">") + 1:]
result[className] = content
return result
def setAttrsFromHTML(obj, templateName, identifier):
"""
:param obj:
:param templateName:
:param identifier:
:return:
>>> class A:pass
>>> a = A
>>> setAttrsFromHTML(a, "sample", "mojeTestId")
>>> a.caption
"""
for key, value in getHtmlItems(templateName, identifier).iteritems():
setattr(obj, key, value)
class HTMLFormatter:
def __init__(self):
self.html = ""
self._indent = 0
self.indentStr = ""
def add(self, str):
self.html += str
def addLine(self, str):
for i in range(self._indent):
str = "\t" + str
self.add(str + "\n")
def addLineAndIndent(self, str):
self.addLine(str)
self.indent()
def unIndentAndAddLine(self, str):
self.unIndent()
self.addLine(str)
def indent(self, count = 1):
self._indent = self._indent + count
def unIndent(self, count = 1):
self._indent = self._indent - count
if self._indent < 0 :
self._indent = 0
|
python
|
"""
Check if 2 strings are anagrams of each other
"""
from collections import Counter
def check_anagrams(str1, str2):
ctr1 = Counter(str1)
ctr2 = Counter(str2)
return ctr1 == ctr2
def check_anagrams_version2(str1, str2):
hmap1 = [0] * 26
hmap2 = [0] * 26
for char in str1:
pos = ord(char) - ord("a")
hmap1[pos] += 1
for char in str2:
pos = ord(char) - ord("a")
hmap2[pos] += 1
return hmap1 == hmap2
if __name__ == "__main__":
str1 = "apple"
str2 = "pleap"
op = check_anagrams(str1, str2)
print(op)
|
python
|
import sys
import os
#reference = sys.argv[1]
#os.system("cp "+reference+" "+sys.argv[4])
firstfile = sys.argv[1] #sys.argv[1]
secondfile = sys.argv[2]
thirdfile = sys.argv[3]
seq1 = set()
seq2 = set()
file3 = open(thirdfile, 'r')
for line in file3:
myline = line.strip()
seqnames = myline.split('\t')
seq1.add(seqnames[0])
seq2.add(seqnames[1])
lines1 = []
file1 = open(firstfile, 'r')
for line in file1:
myline = line.strip()
if (myline[0] == '>'):
#contents = myline.split('\w')
#myseq = contents[0][1:]
myseq = myline[1:myline.find(' ')]
if (myseq in seq1):
lines1.append(myline)
lines1.append(file1.readline().strip())
lines2 = []
file2 = open(secondfile, 'r')
for line in file2:
myline = line.strip()
if (myline[0] == '>'):
myseq = myline[1:myline.find(' ')]
if (myseq in seq2):
lines2.append(myline)
lines2.append(file2.readline().strip())
fourthfile = open(firstfile, 'w')
#fifthfile = open(sys.argv[2], 'w')
for line in lines1:
fourthfile.write(line+"\n")
#for line in lines2:
# fifthfile.write(line+"\n")
|
python
|
class TicTacToe():
'''
Game of Tic-Tac-Toe
rules reference: https://en.wikipedia.org/wiki/Tic-tac-toe
'''
# coordinates of the cells for each possible line
lines = [ [(0,0), (0,1), (0,2)],
[(1,0), (1,1), (1,2)],
[(2,0), (2,1), (2,2)],
[(0,0), (1,0), (2,0)],
[(0,1), (1,1), (2,1)],
[(0,2), (1,2), (2,2)],
[(0,0), (1,1), (2,2)],
[(0,2), (1,1), (2,0)]
]
def __init__(self):
# 3x3 board, 0 = empty, 1 = occupied by player 1, 2 = occupied by player 2
self.board = [[0 for y in range(self.rows())] for x in range(self.cols())]
self.current_player = 1
def rows(self):
return 3
def cols(self):
return 3
# for display : width and height of a cell when displaying the game
def cell_size(self):
return 80, 80
# for display: label for cell at coordinates (x, y)
def get_label(self, x, y):
s = self.board[x][y]
if s == 0:
return ""
elif s == 1:
return "O"
elif s == 2:
return "X"
# a move by a player is valid if the cell is empty
def is_valid_play(self, move, player):
x, y = move
return self.board[x][y] == 0
# update the board with the move from a player
def play(self, move, player):
x, y = move
self.board[x][y] = player
# update the current_player
self.current_player = 2 if self.current_player == 1 else 1
def get_current_player(self):
return self.current_player
# return -1 if the game is not finished, 0 if draw, 1 or 2 if one of the player wins
def winner(self):
for line in TicTacToe.lines:
a, b, c = line
if self.board[a[0]][a[1]] != 0 and \
self.board[a[0]][a[1]] == self.board[b[0]][b[1]] == self.board[c[0]][c[1]]:
# one of the player won, return the player id (1 or 2)
return self.board[a[0]][a[1]]
# no player has won yet, check for a draw
for x in range(3):
for y in range(3):
if self.board[x][y] == 0:
# play still possible, game not finished
return -1
# no play is possible anymore, this is a draw
return 0
|
python
|
import cv2
import numpy as np
# Read image
img = cv2.imread("imori.jpg")
# Dicrease color
out = img.copy()
out = out // 64 * 64 + 32
cv2.imwrite("out.jpg", out)
cv2.imshow("result", out)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
python
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import collections.abc
import json
import typing
from azure.functions import _sql as sql
from . import meta
class SqlConverter(meta.InConverter, meta.OutConverter,
binding='sql'):
@classmethod
def check_input_type_annotation(cls, pytype: type) -> bool:
return issubclass(pytype, sql.SqlRowList)
@classmethod
def check_output_type_annotation(cls, pytype: type) -> bool:
return issubclass(pytype, (sql.SqlRowList, sql.SqlRow))
@classmethod
def decode(cls,
data: meta.Datum,
*,
trigger_metadata) -> typing.Optional[sql.SqlRowList]:
if data is None or data.type is None:
return None
data_type = data.type
if data_type in ['string', 'json']:
body = data.value
elif data_type == 'bytes':
body = data.value.decode('utf-8')
else:
raise NotImplementedError(
f'Unsupported payload type: {data_type}')
rows = json.loads(body)
if not isinstance(rows, list):
rows = [rows]
return sql.SqlRowList(
(None if row is None else sql.SqlRow.from_dict(row))
for row in rows)
@classmethod
def encode(cls, obj: typing.Any, *,
expected_type: typing.Optional[type]) -> meta.Datum:
if isinstance(obj, sql.SqlRow):
data = sql.SqlRowList([obj])
elif isinstance(obj, sql.SqlRowList):
data = obj
elif isinstance(obj, collections.abc.Iterable):
data = sql.SqlRowList()
for row in obj:
if not isinstance(row, sql.SqlRow):
raise NotImplementedError(
f'Unsupported list type: {type(obj)}, \
lists must contain SqlRow objects')
else:
data.append(row)
else:
raise NotImplementedError(f'Unsupported type: {type(obj)}')
return meta.Datum(
type='json',
value=json.dumps([dict(d) for d in data])
)
|
python
|
'''
This program parses a txt file containing proteins to analyse with IUPRED/BLAST/JALVIEW
'''
import warnings # allows program to be able to ignore benign warnings
#####
# IGNORE WARNINGS
#####
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
import requests
import csv # allows for csv r/w
import pandas as pd # allows for csv r/w
import json
import mechanize
import webbrowser
import time
import collections # allows orderedDict
from selenium import webdriver # for web browser interation/headless browser
from bs4 import BeautifulSoup
import glob
import os.path
import datetime
import urllib2
# Also Using PhantomJS installed via npm (included with NodeJS)
#########################################
############WORKING FUNCTIONS############
#########################################
def parseDataSet(fileName='FruitiData.txt'):
'''
Parses original dataSet containing the amino acid sequences of the maternal transcription factors we're interested in.
Takes in file name as string
Outputs:
1. orderdDict
2. list of dict keys
3. list of dict vals
Can call function and set global variable equal to one or all of
the dataTypes/Sets that this outputs.
Example:
variable = parseDataSet()[1]
This would result in variable being equal to list of all keys in dict
created.
'''
# open dataset text file > create var == to each line as list
fList = open(fileName).readlines()
# convert list to dictionary
fDict = collections.OrderedDict() # creates empty orderedDict
##fDict = {}
dictVal = '' # empty string to hold dictVals
dictKey = '' # empty string to hold dictKeys
length = len(fList)
for line in xrange(0, length):
#print('inside for')
#print('line: ' + str(line))
if(line % 2 == 0): # if zero or even > use as key
#print('inside if1')
dictKey = str(fList[line]).replace('\n', '')
if(line % 2 != 0): # if odd > use as value
#print('inside if2')
dictVal = str(fList[line]).replace('\n', '')
if(dictKey != '' and dictVal != ''):
#print('inside if3')
fDict.update({dictKey: dictVal})
dictKey = dictVal = ''
listFDictKeys = fDict.keys() # saves dict keys as list
listFDictVals = fDict.values() # saves dict vals as list
# testing prints
# print(fDict)
# print(listFDictVals)
return fDict, listFDictKeys, listFDictVals
# creates timestamp
def timeStamp():
'''
returns list = ['mmddyy','hh:mm:ss','Weekday']
'''
# ts = time.gmtime()
ts = time.localtime()
ts2 = time.strftime('%m%d%y,%H:%M:%S,%d%m%y-%H%M%S,%A', ts)
ts2 = ts2.split(',')
return ts2
###############################################
############TESTING BELOW THIS LINE############
###############################################
# creates a csv to write to, add headers row
def csvCreate(listX, listY, csvName='preIupred.csv'):
'''
Takes in listFDictKeys, listFDictVals
'''
f = csv.writer(open(csvName, 'w'), delimiter=',', lineterminator='\n')
# f.writerow(['iupred2', 'meta', 'seqence', 'anchor2'])
f.writerow(['mmddyy', 'hh:mm:ss', 'Key', 'Value', 'example1', 'example2', 'example3'])
for i in xrange(len(listX)):
f.writerow((timeStamp()[0], timeStamp()[1], listX[i], listY[i]))
# using 'csv' library open csv > updates specific cell
def csvUpdate():
'''
1. Opens preIupred.csv (r)
2. Opens preIupred.csv (w)
3. Writes over header names
4.
'''
# read csv file into 'fooReader'
fooReader = csv.reader(open('preIupred.csv', 'rb'), delimiter=',', lineterminator='\n')
f = csv.writer(open('preIupred.csv', 'w'), delimiter=',', lineterminator='\n')
f.writerow(['mmddyy', 'hh:mm:ss', 'Key', 'Value', 'example1', 'example2', 'example3'])
input = '>Mnt 64.001883 0.822785'
# read each row in 'fooReader'
for row in fooReader:
# define first row column as 'value' for testing
key = row[2]
# test if value (1st column) is the same as input (user input)
if key == input:
#... if it is then print the 5th column in a certain way
f.writerow(('FUCKOFF-ITWORKED', '', '', '', '', '', 'hello'))
#print('this is where the beat drops!')
'''
# f.writerow(['iupred2', 'meta', 'seqence', 'anchor2']) #OLD HEADER NAMES, MIGHT USE THEM AGAIN, JUST HERE TO SAVE EM
# f.writerow(['mmddyy', 'hh:mm:ss', 'Key', 'Value', 'example1', 'example2', 'example3'])
for i in xrange(5):
f.writerow(('FUCKOFF-ITWORKED', '', '', '', '', '', 'hello'))
'''
# using pandas - update csv file at cell level
def csvUpdate2():
'''
Pandas Cheatsheet:
import pandas as pd
#Open csv and set to var:
df = pd.read_csv('preIupred.csv')
#Select single cell by row/column:
df.iloc([0], [0])
OR
df.iat([0], [0])
#Select single cell by row and column label
df.loc([0], ['COLUMN-HEADER-NAME'])
OR
df.at([0], ['COLUMN-HEADER-NAME'])
#Select single cell by row and column label
df.ix[0, 'COLUMN-HEADER-NAME']
'''
pd.options.display.max_colwidth = 1000 # sets max string length to display
df = pd.read_csv('preIupred.csv') # load csv to var 'df'
df['example1'] # focuses on column with header 'example1'
match = df['example1'].str.contains('>Mnt 64.001883 0.822785')
#print('match: ' + str(match))
shell = df['Value'][match]
# print(df)
# print(df['Key'][match].value_counts())
# df.set_value(5, 'example1', 'USEFUL-DATA') #updates value of cell at row 5 + header 'Value' to 'CHANGED'
#df.to_csv('preIupred.csv', index=False)
# creates list holding URLs to visit
def urlCreate():
pages = [] # empty list to hold URLs to visit
# create list of urls to visit
for i in xrange(1, 2):
url = 'https://iupred2a.elte.hu/'
# is missing other types of scenarios
pages.append(url)
'''
# opens each URL > sets var to html > sets var to cleaned up html
for item in pages:
page = requests.get(item)
soup = BeautifulSoup(page.text, 'html.parser')
# print(soup)
'''
# Demo function
def demo(txtName='FruitiData.txt', csvName='preIupred.csv', dateApndOpt=1):
if(csvName[-4:] == '.csv'):
if(dateApndOpt == 1):
csvNameTime = csvName[:-4] + '_' + timeStamp()[2] + '.csv'
else:
csvNameTime = csvName[:-4] + '.csv'
else:
if(dateApndOpt == 1):
csvNameTime = csvName + '_' + timeStamp()[2] + '.csv'
else:
csvNameTime = csvName + '.csv'
listD, listX, listY = parseDataSet(txtName) # this parses data from file txtName, can insert different file name within same directory
'''
1. Calls function to parse data set from FruitiData.txt then saves/outputs as ordered dict
2. Calls function that takes parsed data from step one and then saves it to a csv 'collectData1.csv'
'''
csvCreate(listX, listY, csvNameTime) # this takes in vars from 'parseDataSet()' > creates/writes to csv
# csvUpdate()
# csvUpdate2()
# csvUpdate() # uncomment to continue testing this
# csvUpdate2() # updates csv at cell level using pandas (seems best method)
# demo() # uncomment to run main program
def blastParse(fileName='PFK3E0EY015-Alignment.json', jalName='jalViewFile.fa'):
with open(fileName) as json_file:
data = json.load(json_file)
# print(type(data))
# print(json.dumps(data, indent=2)) #pretty printed
# for i in xrange(10):
# print(data['BlastOutput2'][0]['report']['results']['search']['hits'][2]['hsps'][i])
# print('')
# print('')
dictHolder = {}
iterMain = data['BlastOutput2'][0]['report']['results']['search']['hits']
f = open(jalName, 'w')
f.write('')
fl = open(jalName, 'a')
for i in xrange(4):
print '#########################'
for item in xrange(len(iterMain)):
subject = data['BlastOutput2'][0]['report']['results']['search']['hits'][item]['hsps']
title = data['BlastOutput2'][0]['report']['results']['search']['hits'][item]['description'][0]['title']
sciName = str(data['BlastOutput2'][0]['report']['results']['search']['hits'][item]['description'][0]['sciname'])
dictHolder[sciName] = dictHolder.get(sciName, 0) + 1
if(dictHolder[sciName] == 1):
fl.write('\n' + '> ' + sciName)
print("title: " + str(title))
print("sciname: " + str(sciName))
subHolder = ''
for i in xrange(len(subject)):
subHolder += str(subject[i]['hseq'])
print("index: " + str(i) + " subject: " + str(subject[i]['hseq']))
print("subjectFull: " + str(subHolder))
fl.write('\n' + str(subHolder))
print('\n\n')
print(dictHolder)
fl.close()
# print data['BlastOutput2'][0]['report']['results']['search']['hits'][0]['description'][0]['title']
# fList = open(fileName).readlines()
# print fList
'''
# open dataset text file > create var == to each line as list
fList = open(fileName).readlines()
# convert list to dictionary
fDict = collections.OrderedDict() # creates empty orderedDict
##fDict = {}
dictVal = '' # empty string to hold dictVals
dictKey = '' # empty string to hold dictKeys
length = len(fList)
for line in xrange(0, length):
#print('inside for')
#print('line: ' + str(line))
if(line % 2 == 0): # if zero or even > use as key
#print('inside if1')
dictKey = str(fList[line]).replace('\n', '')
if(line % 2 != 0): # if odd > use as value
#print('inside if2')
dictVal = str(fList[line]).replace('\n', '')
if(dictKey != '' and dictVal != ''):
#print('inside if3')
fDict.update({dictKey: dictVal})
dictKey = dictVal = ''
listFDictKeys = fDict.keys() # saves dict keys as list
listFDictVals = fDict.values() # saves dict vals as list
# testing prints
# print(fDict)
# print(listFDictVals)
return fDict, listFDictKeys, listFDictVals
'''
def openDownloads():
list_of_files = glob.glob("C:/Users/SJCCRAC/Documents/Python Code") # * means all if need specific format then *.csv
latest_file = max(list_of_files, key=os.path.getctime)
print list_of_files
print latest_file
# blastParse() #runs blastParse function
def downloadUrl():
print('Beginning file download with urllib2...')
url = 'https://blast.ncbi.nlm.nih.gov/Blast.cgi?RESULTS_FILE=on&RID=P09YHPX0014&FORMAT_TYPE=JSON2_S&FORMAT_OBJECT=Alignment&CMD=Get'
filedata = urllib2.urlopen(url)
datatowrite = filedata.read()
with open('/Users/SJCCRAC/Documents/Python Code/testDownload.json', 'wb') as f:
f.write(datatowrite)
print(datatowrite)
# openDownloads() # tests openDownloads() functions
# downloadUrl()
demo('7_proteins.txt', 'preIupred.csv', 1) # (txtName='FruitiData.txt', csvName='preIupred.csv', apndDate[1=yes, 0=no])
'''
Parses original formatted amino acid sequence data
Outputs is to csv file that you specify, default = 'preIupred.csv'
'''
|
python
|
from typing import TYPE_CHECKING
from UE4Parse.BinaryReader import BinaryStream
from UE4Parse.Provider.Common import GameFile
if TYPE_CHECKING:
from UE4Parse.IO import FFileIoStoreReader
from UE4Parse.IO.IoObjects.FIoChunkId import FIoChunkId
from UE4Parse.IO.IoObjects.FIoOffsetAndLength import FIoOffsetAndLength
class FIoStoreEntry(GameFile):
__slots__ = ("UserData",)
UserData: int
def CompressionMethodString(self) -> str:
return "COMPRESS_" + self.Container.TocResource.CompressionMethods[
self.CompressionMethodIndex - 1] if self.CompressionMethodIndex > 0 else "COMPRESS_None"
@property
def Offset(self) -> int:
return self.OffsetLength.GetOffset
@property
def Length(self) -> int:
return self.OffsetLength.GetLength
@property
def ContainerName(self) -> str:
return self.Container.FileName[:-5] + ".utoc"
@property
def Encrypted(self) -> bool:
return self.Container.TocResource.Header.is_encrypted()
@property
def OffsetLength(self) -> 'FIoOffsetAndLength':
return self.Container.Toc[self.ChunkId]
@property
def ChunkId(self) -> 'FIoChunkId':
return self.Container.TocResource.ChunkIds[self.UserData]
def __init__(self, io_store, userdata: int, name: str):
super().__init__()
self.Container = io_store
self.UserData = userdata
self.Name = name.lower() if io_store.caseinSensitive else name
# compressionBlockSize = ioStore.TocResource.Header.CompressionBlockSize
# firstBlockIndex = int(self.Offset / compressionBlockSize) - 1
# lastBlockIndex = int((Align(self.Offset + self.Length, compressionBlockSize) - 1) / compressionBlockSize)
# for i in range(firstBlockIndex, lastBlockIndex):
# compressionBlock = ioStore.TocResource.CompressionBlocks[i]
# self.UncompressedSize += compressionBlock.UncompressedSize
# self.CompressionMethodIndex = compressionBlock.CompressionMethodIndex
#
# rawSize = Align(compressionBlock.CompressedSize, 16)
# self.Size += rawSize
#
# if ioStore.TocResource.Header.is_encrypted():
# self.Encrypted = True
def get_data(self) -> BinaryStream:
return self.Container.Read(self.ChunkId)
|
python
|
# Copyright 2014 The Crashpad Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{
'includes': [
'../build/crashpad.gypi',
],
'targets': [
{
'target_name': 'crashpad_snapshot_test_lib',
'type': 'static_library',
'dependencies': [
'snapshot.gyp:crashpad_snapshot',
'../compat/compat.gyp:crashpad_compat',
'../third_party/mini_chromium/mini_chromium.gyp:base',
'../util/util.gyp:crashpad_util',
],
'include_dirs': [
'..',
],
'sources': [
'test/test_cpu_context.cc',
'test/test_cpu_context.h',
'test/test_exception_snapshot.cc',
'test/test_exception_snapshot.h',
'test/test_memory_snapshot.cc',
'test/test_memory_snapshot.h',
'test/test_module_snapshot.cc',
'test/test_module_snapshot.h',
'test/test_process_snapshot.cc',
'test/test_process_snapshot.h',
'test/test_system_snapshot.cc',
'test/test_system_snapshot.h',
'test/test_thread_snapshot.cc',
'test/test_thread_snapshot.h',
],
},
{
'target_name': 'crashpad_snapshot_test',
'type': 'executable',
'dependencies': [
'crashpad_snapshot_test_module',
'snapshot.gyp:crashpad_snapshot',
'../client/client.gyp:crashpad_client',
'../compat/compat.gyp:crashpad_compat',
'../test/test.gyp:crashpad_test',
'../third_party/gtest/gtest.gyp:gtest',
'../third_party/gtest/gtest.gyp:gtest_main',
'../third_party/mini_chromium/mini_chromium.gyp:base',
'../util/util.gyp:crashpad_util',
],
'include_dirs': [
'..',
],
'sources': [
'cpu_context_test.cc',
'crashpad_info_client_options_test.cc',
'mac/cpu_context_mac_test.cc',
'mac/mach_o_image_annotations_reader_test.cc',
'mac/mach_o_image_reader_test.cc',
'mac/mach_o_image_segment_reader_test.cc',
'mac/process_reader_test.cc',
'mac/process_types_test.cc',
'mac/system_snapshot_mac_test.cc',
'minidump/process_snapshot_minidump_test.cc',
'win/pe_image_annotations_reader_test.cc',
'win/process_reader_win_test.cc',
'win/system_snapshot_win_test.cc',
],
'conditions': [
['OS=="mac"', {
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/OpenCL.framework',
],
},
}],
],
},
{
'target_name': 'crashpad_snapshot_test_module',
'type': 'loadable_module',
'dependencies': [
'../client/client.gyp:crashpad_client',
'../third_party/mini_chromium/mini_chromium.gyp:base',
],
'include_dirs': [
'..',
],
'sources': [
'crashpad_info_client_options_test_module.cc',
],
},
],
}
|
python
|
import unittest
import numpy as np
import tensorflow as tf
from pplp.core import box_4c_encoder
class Box4cEncoderTest(unittest.TestCase):
def test_np_box_3d_to_box_4c(self):
# Test non-vectorized numpy version on ortho boxes
# Sideways box
box_3d_1 = np.asarray([0, 0, 0, 2, 1, 5, 0])
# Straight box
box_3d_2 = np.asarray([0, 0, 0, 2, 1, 5, -np.pi / 2])
# Ground plane facing upwards, at 2m along y axis
ground_plane = [0, -1, 0, 2]
exp_box_4c_1 = np.asarray(
[1.0, 1.0, -1.0, -1.0,
0.5, -0.5, -0.5, 0.5,
2.0, 7.0])
exp_box_4c_2 = np.asarray(
[0.5, 0.5, -0.5, -0.5,
1.0, -1.0, -1.0, 1.0,
2.0, 7.0])
# Convert box_3d to box_4c
box_4c_1 = box_4c_encoder.np_box_3d_to_box_4c(box_3d_1, ground_plane)
box_4c_2 = box_4c_encoder.np_box_3d_to_box_4c(box_3d_2, ground_plane)
np.testing.assert_almost_equal(box_4c_1, exp_box_4c_1, decimal=3)
np.testing.assert_almost_equal(box_4c_2, exp_box_4c_2, decimal=3)
def test_np_box_3d_to_box_4c_rotated_translated(self):
# Test non-vectorized numpy version on rotated boxes
box_3d_1 = np.asarray([0.0, 0.0, 0.0, 2.0, 1.0, 5.0, -1 * np.pi / 8])
box_3d_2 = np.asarray([0.0, 0.0, 0.0, 2.0, 1.0, 5.0, -3 * np.pi / 8])
box_3d_3 = np.asarray([0.0, 0.0, 0.0, 2.0, 1.0, 5.0, -5 * np.pi / 8])
box_3d_4 = np.asarray([0.0, 0.0, 0.0, 2.0, 1.0, 5.0, -7 * np.pi / 8])
box_3d_5 = np.asarray([0.0, 0.0, 0.0, 2.0, 1.0, 5.0, 1 * np.pi / 8])
box_3d_6 = np.asarray([0.0, 0.0, 0.0, 2.0, 1.0, 5.0, 3 * np.pi / 8])
box_3d_7 = np.asarray([0.0, 0.0, 0.0, 2.0, 1.0, 5.0, 5 * np.pi / 8])
box_3d_8 = np.asarray([0.0, 0.0, 0.0, 2.0, 1.0, 5.0, 7 * np.pi / 8])
# Also test a box translated along xz
box_3d_translated = box_3d_1 + [10, 0, 10, 0, 0, 0, 0]
# Ground plane facing upwards, at 2m along y axis
ground_plane = [0, -1, 0, 2]
# Convert box_3d to box_4c
box_4c_1 = box_4c_encoder.np_box_3d_to_box_4c(box_3d_1, ground_plane)
box_4c_2 = box_4c_encoder.np_box_3d_to_box_4c(box_3d_2, ground_plane)
box_4c_3 = box_4c_encoder.np_box_3d_to_box_4c(box_3d_3, ground_plane)
box_4c_4 = box_4c_encoder.np_box_3d_to_box_4c(box_3d_4, ground_plane)
box_4c_5 = box_4c_encoder.np_box_3d_to_box_4c(box_3d_5, ground_plane)
box_4c_6 = box_4c_encoder.np_box_3d_to_box_4c(box_3d_6, ground_plane)
box_4c_7 = box_4c_encoder.np_box_3d_to_box_4c(box_3d_7, ground_plane)
box_4c_8 = box_4c_encoder.np_box_3d_to_box_4c(box_3d_8, ground_plane)
box_4c_translated = box_4c_encoder.np_box_3d_to_box_4c(
box_3d_translated, ground_plane)
# Expected boxes_4c
exp_box_4c_1 = [0.733, 1.115, -0.733, -1.115,
0.845, -0.079, -0.845, 0.079,
2.000, 7.000]
exp_box_4c_2 = [0.845, 0.079, -0.845, -0.079,
0.733, -1.115, -0.733, 1.115,
2.000, 7.000]
exp_box_4c_3 = [0.079, 0.845, -0.079, -0.845,
1.115, -0.733, -1.115, 0.733,
2.000, 7.000]
exp_box_4c_4 = [1.115, 0.733, -1.115, -0.733,
0.079, -0.845, -0.079, 0.845,
2.000, 7.000]
exp_box_4c_5 = [1.115, 0.733, -1.115, -0.733,
0.079, -0.845, -0.079, 0.845,
2.000, 7.000]
exp_box_4c_6 = [0.079, 0.845, -0.079, -0.845,
1.115, -0.733, -1.115, 0.733,
2.000, 7.000]
exp_box_4c_7 = [0.845, 0.079, -0.845, -0.079,
0.733, -1.115, -0.733, 1.115,
2.000, 7.000]
exp_box_4c_8 = [0.733, 1.115, -0.733, -1.115,
0.845, -0.079, -0.845, 0.079,
2.000, 7.000]
exp_box_4c_translated = [10.733, 11.115, 9.267, 8.885,
10.845, 9.921, 9.155, 10.079,
2.000, 7.000]
np.testing.assert_almost_equal(box_4c_1, exp_box_4c_1, decimal=3)
np.testing.assert_almost_equal(box_4c_2, exp_box_4c_2, decimal=3)
np.testing.assert_almost_equal(box_4c_3, exp_box_4c_3, decimal=3)
np.testing.assert_almost_equal(box_4c_4, exp_box_4c_4, decimal=3)
np.testing.assert_almost_equal(box_4c_5, exp_box_4c_5, decimal=3)
np.testing.assert_almost_equal(box_4c_6, exp_box_4c_6, decimal=3)
np.testing.assert_almost_equal(box_4c_7, exp_box_4c_7, decimal=3)
np.testing.assert_almost_equal(box_4c_8, exp_box_4c_8, decimal=3)
np.testing.assert_almost_equal(box_4c_translated,
exp_box_4c_translated, decimal=3)
def test_np_box_3d_to_box_4c_heights(self):
# Boxes above, on, or below ground plane
box_3d_1 = np.asarray([0.0, 3.0, 0.0, 2.0, 1.0, 5.0, 0.0]) # below
box_3d_2 = np.asarray([0.0, 2.0, 0.0, 2.0, 1.0, 5.0, 0.0]) # on
box_3d_3 = np.asarray([0.0, 1.0, 0.0, 2.0, 1.0, 5.0, 0.0]) # above
# Ground plane facing upwards, at 2m along y axis
ground_plane = [0, -1, 0, 2]
# Convert box_3d to box_4c
box_4c_1 = box_4c_encoder.np_box_3d_to_box_4c(box_3d_1, ground_plane)
box_4c_2 = box_4c_encoder.np_box_3d_to_box_4c(box_3d_2, ground_plane)
box_4c_3 = box_4c_encoder.np_box_3d_to_box_4c(box_3d_3, ground_plane)
# Expected boxes_4c
exp_box_4c_1 = np.asarray([1.0, 1.0, -1.0, -1.0,
0.5, -0.5, -0.5, 0.5,
-1.0, 4.0])
exp_box_4c_2 = np.asarray([1.0, 1.0, -1.0, -1.0,
0.5, -0.5, -0.5, 0.5,
0.0, 5.0])
exp_box_4c_3 = np.asarray([1.0, 1.0, -1.0, -1.0,
0.5, -0.5, -0.5, 0.5,
1.0, 6.0])
np.testing.assert_almost_equal(box_4c_1, exp_box_4c_1)
np.testing.assert_almost_equal(box_4c_2, exp_box_4c_2)
np.testing.assert_almost_equal(box_4c_3, exp_box_4c_3)
def test_tf_box_3d_to_box_4c(self):
# Test that tf version matches np version
# (rotations, xz translation, heights)
boxes_3d = np.asarray([
# Rotated
[0.0, 0.0, 0.0, 2.0, 1.0, 5.0, -1 * np.pi / 8],
[0.0, 0.0, 0.0, 2.0, 1.0, 5.0, -3 * np.pi / 8],
[0.0, 0.0, 0.0, 2.0, 1.0, 5.0, -5 * np.pi / 8],
[0.0, 0.0, 0.0, 2.0, 1.0, 5.0, -7 * np.pi / 8],
[0.0, 0.0, 0.0, 2.0, 1.0, 5.0, 1 * np.pi / 8],
[0.0, 0.0, 0.0, 2.0, 1.0, 5.0, 3 * np.pi / 8],
[0.0, 0.0, 0.0, 2.0, 1.0, 5.0, 5 * np.pi / 8],
[0.0, 0.0, 0.0, 2.0, 1.0, 5.0, 7 * np.pi / 8],
# Translated along xz
[10, 0, 5, 2, 1, 5, - 1 * np.pi / 8],
# Below, on, or above ground plane
[0.0, 3.0, 0.0, 2.0, 1.0, 5.0, 0.0],
[0.0, 2.0, 0.0, 2.0, 1.0, 5.0, 0.0],
[0.0, 1.0, 0.0, 2.0, 1.0, 5.0, 0.0],
])
# Ground plane facing upwards, at 2m along y axis
ground_plane = [0, -1, 0, 2]
# Numpy conversion box_3d to box_4c
np_boxes_4c = np.asarray(
[box_4c_encoder.np_box_3d_to_box_4c(box_3d, ground_plane)
for box_3d in boxes_3d])
# Convert to tensors
tf_boxes_3d = tf.convert_to_tensor(boxes_3d, dtype=tf.float32)
tf_ground_plane = tf.convert_to_tensor(ground_plane, dtype=tf.float32)
# Tensorflow conversion box_3d to box_4c
tf_boxes_4c = box_4c_encoder.tf_box_3d_to_box_4c(tf_boxes_3d,
tf_ground_plane)
sess = tf.Session()
with sess.as_default():
tf_boxes_4c_out = tf_boxes_4c.eval()
# Loop through to show a separate error when box doesn't match
for box_idx in range(len(np_boxes_4c)):
np.testing.assert_almost_equal(np_boxes_4c[box_idx],
tf_boxes_4c_out[box_idx],
decimal=5)
def test_np_box_4c_to_box_3d(self):
box_4c_1 = np.asarray([1.0, 0.0, -1.0, 0.5,
0.5, -1.0, 0.0, 1.0,
1.0, 3.0])
box_4c_2 = np.asarray([1.0, 0.0, -1.0, -0.5,
0.0, -1.0, 0.5, 1.0,
1.0, 3.0])
ground_plane = np.asarray([0, -1, 0, 2])
box_3d_1 = box_4c_encoder.np_box_4c_to_box_3d(box_4c_1, ground_plane)
box_3d_2 = box_4c_encoder.np_box_4c_to_box_3d(box_4c_2, ground_plane)
# Expected boxes_3d
exp_box_3d_1 = [0.125, 1.000, 0.125, 1.768, 1.414, 2.000, -0.785]
exp_box_3d_2 = [-0.125, 1.000, 0.125, 1.768, 1.414, 2.000, 0.785]
np.testing.assert_almost_equal(box_3d_1, exp_box_3d_1, decimal=3)
np.testing.assert_almost_equal(box_3d_2, exp_box_3d_2, decimal=3)
def test_tf_box_4c_to_box_3d(self):
np_boxes_4c = np.asarray(
[
[1.0, 0.0, -1.0, 0.5, 0.5, -1.0, 0.0, 1.0, 1.0, 3.0],
[1.0, 0.0, -1.0, -0.5, 0.0, -1.0, 0.5, 1.0, 1.0, 3.0],
[1.0, 0.0, -1.0, -0.5, 0.0, -1.0, 0.5, 1.0, 1.0, 3.0],
[1.0, 0.0, -1.0, -0.5, 0.0, -1.0, 0.5, 1.0, 1.0, 3.0],
[1.0, 0.0, -1.0, -0.5, 0.0, -1.0, 0.5, 1.0, 1.0, 3.0],
])
np_ground_plane = np.asarray([0, -1, 0, -1])
np_boxes_3d = [box_4c_encoder.np_box_4c_to_box_3d(box_4c,
np_ground_plane)
for box_4c in np_boxes_4c]
tf_boxes_4c = tf.convert_to_tensor(np_boxes_4c,
dtype=tf.float32)
tf_ground_plane = tf.convert_to_tensor(np_ground_plane,
dtype=tf.float32)
tf_boxes_3d = box_4c_encoder.tf_box_4c_to_box_3d(tf_boxes_4c,
tf_ground_plane)
sess = tf.Session()
with sess.as_default():
tf_boxes_3d_out = tf_boxes_3d.eval()
for box_idx in range(len(np_boxes_3d)):
np.testing.assert_almost_equal(np_boxes_3d[box_idx],
tf_boxes_3d_out[box_idx],
decimal=3)
|
python
|
from .xml_style import XMLDataset
class VOCDataset(XMLDataset):
CLASSES = ['spike']
def __init__(self, **kwargs):
super(VOCDataset, self).__init__(**kwargs)
|
python
|
#!/usr/bin/env python
from .web_api_2 import SwaggerGiant
|
python
|
import os, paramiko, time, schedule, smtplib, ssl
from datetime import datetime
from email.message import EmailMessage
host='localhost'
port='5432'
user='postgres'
password='admin'
database='testdb'
#chemin de sauvegarde locale
local_dir = 'C:\\Users\\Kamla\\projets\\auto-backup-sqldb\\backup\\'
#local_dir = 'Chemin vers le dossier de la base de donnees a sauvegarder\\'
#chemin de sauvegarde distant
remote_dir = '/C:/Users/vmwin10/Documents/ftpfile/'
def job():
print("Backup working...")
filestamp = time.strftime('%Y-%m-%dT%H-%M-%S.%z')
#nom pour le fichier sql qui serra genere par pg_dump
database_remote = database+"_"+filestamp+".bak.sql"
PASS="set PGPASSWORD=%s" % (password)
#lancement de la commande mysqldump qui va faire une sauvegarde en local
#les fichiers sont sauvegarder dans le respertoire 'backup'
os.system("(cd backup) && ("+PASS+") && (pg_dump -h %s -p %s -U %s -f %s -C -d %s)" % (host, port, user, database_remote, database))
print("Database dumped to "+database_remote)
# debut du SFTP
ssh_client=paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
#on se connecte a la machine dans laquelle serra sauvegarde le le fichier backup
ssh_client.connect(hostname='192.168.126.2',username='vmwin10',password='vmwin10')
ftp_client=ssh_client.open_sftp()
#envoie du fichier local vers le remote
ftp_client.put(local_dir+database_remote,remote_dir+database_remote)
ftp_client.close()
print("Successfull Backup")
# A chaque backup un email est envoye
msg = EmailMessage()
msg.set_content("Un backup vient d'etre effectue")
msg["Subject"] = "Email de Backup"
msg["From"] = "[email protected]"
msg["To"] = "[email protected]"
context=ssl.create_default_context()
with smtplib.SMTP("smtp.gmail.com", port=587) as smtp:
smtp.starttls(context=context)
smtp.login(msg["From"], "password")
smtp.send_message(msg)
# le backup se fait chaque 1h
schedule.every(3).seconds.do(job)
#schedule.every(15).minutes.do(job)
#schedule.every().hour.do(job)
#schedule.every().day.at("10:30").do(job)
#schedule.every(10).to(10).minutes.do(job)
#schedule.every().monday.do(job)
#schedule.every().wednesday.at("15:00").do(job)
#schedule.every().minute.at(":15").do(job)
while True:
schedule.run_pending()
time.sleep(1)
|
python
|
from pathlib import Path
import pandas as pd
from collections import defaultdict
from typing import List, Union
from .types import Child
def create_csv(children: List[Child], output_dir: Union[Path,str]):
header_df = create_header(children)
episodes_df = create_episodes(children)
uasc_df = create_uasc(children)
reviews_df = create_reviews(children)
oc2_df = create_oc2(children)
oc3_df = create_oc3(children)
ad1_df = create_ad1(children)
sbpfa_df = create_should_be_placed_for_adoption(children)
prev_perm_df = create_previous_permanence(children)
missing_df = create_missing(children)
output_dir = Path(output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
header_df.to_csv(output_dir / 'header.csv', index=False)
episodes_df.to_csv(output_dir / 'episodes.csv', index=False)
uasc_df.to_csv(output_dir / 'uasc.csv', index=False)
reviews_df.to_csv(output_dir / 'reviews.csv', index=False)
oc2_df.to_csv(output_dir / 'oc2.csv', index=False)
oc3_df.to_csv(output_dir / 'oc3.csv', index=False)
ad1_df.to_csv(output_dir / 'ad1.csv', index=False)
sbpfa_df.to_csv(output_dir / 'placed_for_adoption.csv', index=False)
prev_perm_df.to_csv(output_dir / 'previous_permanence.csv', index=False)
missing_df.to_csv(output_dir / 'missing.csv', index=False)
def create_header(children: List[Child]) -> pd.DataFrame:
return pd.DataFrame({
'CHILD': [c.child_id for c in children],
'SEX': [c.sex for c in children],
'DOB': [c.dob.strftime('%d/%m/%Y') for c in children],
'ETHNIC': [c.ethnicity for c in children],
'UPN': [c.upn for c in children],
'MOTHER': [1 if c.mother_child_dob is not None else None for c in children],
'MC_DOB': [c.mother_child_dob.strftime('%d/%m/%Y') if c.mother_child_dob is not None else None for c in children],
})
def create_episodes(children: List[Child]) -> pd.DataFrame:
data = defaultdict(list)
for child in children:
for episode in child.episodes:
data['CHILD'].append(child.child_id)
data['DECOM'].append(episode.start_date.strftime('%d/%m/%Y'))
data['RNE'].append(episode.reason_for_new_episode)
data['LS'].append(episode.legal_status)
data['CIN'].append(episode.cin)
data['PLACE'].append(episode.place)
data['PLACE_PROVIDER'].append(episode.place_provider)
data['DEC'].append(episode.end_date.strftime('%d/%m/%y') if episode.end_date is not None else None)
data['REC'].append(episode.reason_end)
data['REASON_PLACE_CHANGE'].append(episode.reason_place_change)
data['HOME_POST'].append(episode.home_postcode)
data['PL_POST'].append(episode.place_postcode)
data['URN'].append(episode.urn)
return pd.DataFrame(data)
def create_uasc(children: List[Child]) -> pd.DataFrame:
data = defaultdict(list)
for child in children:
if child.date_uasc_ceased is not None:
data['CHILD'].append(child.child_id)
data['SEX'].append(child.sex)
data['DOB'].append(child.dob.strftime('%d/%m/%Y'))
data['DUC'].append(child.date_uasc_ceased.strftime('%d/%m/%Y'))
return pd.DataFrame(data)
def create_reviews(children: List[Child]) -> pd.DataFrame:
data = defaultdict(list)
for child in children:
for review in child.reviews:
data['CHILD'].append(child.child_id)
data['DOB'].append(child.dob.strftime('%d/%m/%Y'))
data['REVIEW'].append(review.review_date.strftime('%d/%m/%Y'))
data['REVIEW_CODE'].append(review.review_code)
return pd.DataFrame(data)
def create_oc3(children: List[Child]) -> pd.DataFrame:
data = defaultdict(list)
for child in children:
if child.leaving_care_data is not None:
data['CHILD'].append(child.child_id)
data['DOB'].append(child.dob.strftime('%d/%m/%Y'))
data['IN_TOUCH'].append(child.leaving_care_data.in_touch)
data['ACTIV'].append(child.leaving_care_data.activ)
data['ACCOM'].append(child.leaving_care_data.accom)
return pd.DataFrame(data)
def create_ad1(children: List[Child]) -> pd.DataFrame:
data = defaultdict(list)
for child in children:
if child.adoption_data is not None:
ad = child.adoption_data
data['CHILD'].append(child.child_id)
data['DOB'].append(child.dob.strftime('%d/%m/%Y'))
data['DATE_INT'].append(ad.start_date.strftime('%d/%m/%Y'))
data['DATE_MATCH'].append(ad.start_date.strftime('%d/%m/%Y'))
data['FOSTER_CARE'].append(ad.foster_care)
data['NB_ADOPTR'].append(ad.number_adopters)
data['SEX_ADOPTR'].append(ad.sex_adopter)
data['LS_ADOPTR'].append(ad.ls_adopter)
return pd.DataFrame(data)
def create_should_be_placed_for_adoption(children: List[Child]) -> pd.DataFrame:
data = defaultdict(list)
for child in children:
if child.adoption_data is not None:
ad = child.adoption_data
data['CHILD'].append(child.child_id)
data['DOB'].append(child.dob.strftime('%d/%m/%Y'))
data['DATE_PLACED'].append(ad.start_date.strftime('%d/%m/%Y'))
data['DATE_PLACED_CEASED'].append(ad.end_date.strftime('%d/%m/%Y') if ad.end_date is not None else None)
data['REASON_PLACED_CEASED'].append(ad.reason_ceased if ad.reason_ceased is not None else None)
return pd.DataFrame(data)
def create_oc2(children: List[Child]) -> pd.DataFrame:
bool_to_str = lambda x: 1 if x else 0
data = defaultdict(list)
for child in children:
if child.outcomes_data is not None:
oc = child.outcomes_data
data['CHILD'].append(child.child_id)
data['DOB'].append(child.dob.strftime('%d/%m/%Y'))
data['SDQ_SCORE'].append(oc.sdq_score)
data['SDQ_REASON'].append(oc.sdq_reason)
data['CONVICTED'].append(bool_to_str(oc.convicted))
data['HEALTH_CHECK'].append(bool_to_str(oc.health_check))
data['IMMUNISATIONS'].append(bool_to_str(oc.immunisations))
data['TEETH_CHECK'].append(bool_to_str(oc.teeth_check))
data['HEALTH_ASSESSMENT'].append(bool_to_str(oc.health_assessment))
data['SUBSTANCE_MISUSE'].append(bool_to_str(oc.substance_misuse))
data['INTERVENTION_RECEIVED'].append(bool_to_str(oc.intervention_received))
data['INTERVENTION_OFFERED'].append(bool_to_str(oc.intervention_offered))
df = pd.DataFrame(data)
# Pandas converts ints with null to float by default, so need to convert back
# to nullable integer.
df['SDQ_SCORE'] = df['SDQ_SCORE'].astype('Int64')
return df
def create_previous_permanence(children: List[Child]) -> pd.DataFrame:
data = defaultdict(list)
for child in children:
data['CHILD'].append(child.child_id)
data['DOB'].append(child.dob.strftime('%d/%m/%Y'))
data['PREV_PERM'].append(child.previous_permanent)
data['LA_PERM'].append(None) # this needs to be inferred
data['DATE_PERM'].append(child.prev_permanent_date.strftime('%d/%m/%Y') if child.prev_permanent_date is not None else None)
return pd.DataFrame(data)
def create_missing(children: List[Child]) -> pd.DataFrame:
data = defaultdict(list)
for child in children:
for mp in child.missing_periods:
data['CHILD'].append(child.child_id)
data['DOB'].append(child.dob.strftime('%d/%m/%Y'))
data['MISSING'].append(mp.missing_type)
data['MIS_START'].append(mp.start_date.strftime('%d/%m/%Y'))
data['MIS_END'].append(mp.end_date.strftime('%d/%m/%Y') if mp.end_date is not None else None)
return pd.DataFrame(data)
|
python
|
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--latitude", type=float, required=True, help="The latitude of your bounding box center")
parser.add_argument("--longitude", type=float, required=True, help="The longitude of your bounding box center")
args = parser.parse_args()
dlat = 0.005
dlon = 0.02 # double it from 0.01
n = args.latitude + (dlat/2)
s = args.latitude - (dlat/2)
e = args.longitude + (dlon/2)
w = args.longitude - (dlon/2)
query = """<query type="way">
<bbox-query s="${south}" w="${west}" n="${north}" e="${east}"/>
<has-kv k="highway" regv="."/>
<has-kv k="access" modv="not" regv="no"/>
<has-kv k="access" modv="not" regv="private"/>
<has-kv k="area" modv="not" regv="yes"/>
</query>
<union>
<item/>
<recurse type="down"/>
</union>
<print/>"""
from string import Template
t = Template(query)
interpolated = t.substitute(north=str(n), south=str(s), east=str(e), west=str(w))
print interpolated
|
python
|
from modules.data.fileRead import readMat
from numpy import arange
from modules.modelar.leastSquares import calculate
# Alternativa para caso as constantes escolhidas não forem escolhidas pelo Usuário
SP = 50
OVERSHOOT = 0.10
TS = 70
# Pegando vetores de entrada e saída
ENTRADA, SAIDA, TEMPO = readMat()
# Calculando intervalo de tempo
TEMPO_AMOSTRAGEM = TEMPO[0][1]
# Calculando intervalo de tempo
TEMPO_CALCULO = arange(0,(len(TEMPO[0])*TEMPO_AMOSTRAGEM),TEMPO_AMOSTRAGEM)
# Calculando coeficientes
COEFICIENTE_A1, COEFICIENTE_B1 = calculate()
|
python
|
import argparse
import os
import pandas as pd
import re
import spacy
import sys
from datetime import datetime
from geopy.extra.rate_limiter import RateLimiter
from geopy import Nominatim
from epitator.geoname_annotator import GeonameAnnotator
from epitator.date_annotator import DateAnnotator
from epitator.count_annotator import CountAnnotator
from epitator.annotator import AnnoDoc
from typing import Iterable, Union
from transformers import BartForConditionalGeneration, BartTokenizer
from tqdm import tqdm
os.environ['SPACY_MODEL_SHORTCUT_LINK'] = 'en_core_web_trf'
spacy.prefer_gpu()
sys.path.append('../EpiTator')
locator = Nominatim(user_agent="ppcoom")
geocode = RateLimiter(locator.geocode, min_delay_seconds=1/20)
locator = Nominatim(user_agent="ppcoom")
geocode = RateLimiter(locator.geocode, min_delay_seconds=1/20)
dengue_regex = re.compile(
r'([A-Za-z ]+).*\[w\/e (.+)\] \/ (.+) \/ (.+) \/ (.+) \/ (.+) \/ (.+)', re.MULTILINE)
tqdm.pandas()
# setup our BART transformer summarization model
print('loading transformers')
tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn')
model = BartForConditionalGeneration.from_pretrained(
'facebook/bart-large-cnn')
COUNTRY_COL = "country"
CONTENT_COL = "content"
SUMMARY_COL = "summary"
DATA_DIR = "../data"
SUMMARIZED_DATA_DIR = f"{DATA_DIR}/summarized"
EXTRACTED_DATA_DIR = f"{DATA_DIR}/extracted"
def extract_arguments() -> Iterable[Union[str, list]]:
"""
Name: extract_arguments
Purpose: extracts the arguments specified by the user
Input: None
Output: filepath - The csv filepath specified by the user
countries - The countries specified by the user
"""
CSV_FILE_ENDING = ".csv"
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--filepath", type=str, required=True, help="The filepath to the promed data to analyze")
parser.add_argument("-c", "--countries", nargs="+", required=True, help="The countries to filter for in the data")
args = parser.parse_args()
"""
Validate the following:
1. The filepath has a length > 0
2. The filepath actually points to a file
3. The file pointed to by the filepath is a csv
"""
filepath = args.filepath
if (
len(filepath) <= 0 or
os.path.isfile(filepath) is False or
filepath.endswith(CSV_FILE_ENDING) is False
):
print(f"The filepath: {filepath} is either not a valid csv or a valid file.")
sys.exit(-1)
"""
Validate the countries specified are valid strings
"""
invalid_country_specified = False
for country in args.countries:
if (len(country.strip()) <= 0 or country is None):
print(f"The country: {country} is not valid")
invalid_country_specified = True
if invalid_country_specified:
sys.exit(-1)
return filepath, args.countries
def read_data(csv_filepath: str) -> pd.DataFrame:
"""
Name: read_data
Purpose: To read the data inside the csv filepath specified
Input: csv_filepath - The filepath to the csv
Output: A DataFrame representation of the csv data
"""
return pd.read_csv(csv_filepath)
def filter_df_by_countries(promed_df: pd.DataFrame, countries_to_srch_for: list) -> pd.DataFrame:
"""
Name: filter_df_by_countries
Purpose: Filter the specified data frame by the countries specified
Input: promed_df - The promed dataframe
countries_to_srch_for - The countries we shoud filter on
Output: A new filtered dataframe
"""
filtered_pd = None
for country in countries_to_srch_for:
country_filtered_df = promed_df.loc[(promed_df[COUNTRY_COL].str.lower() == country.lower())]
if filtered_pd is None:
filtered_pd = country_filtered_df
else:
filtered_pd.append(country_filtered_df)
return filtered_pd
def clean_df_content(promed_df: pd.DataFrame, debug: bool = False) -> pd.DataFrame:
cleaned_df = {}
for index, row in promed_df.iterrows():
content = row[CONTENT_COL]
cleaned_content = clean(content)
if (debug):
print("---------------------------")
print(f"{content}")
print("---------------------------")
for col in promed_df.columns:
row_val = row[col]
if col == CONTENT_COL:
row_val = cleaned_content
if col in cleaned_df:
cleaned_df[col].append(row_val)
else:
cleaned_df[col] = [row_val]
return pd.DataFrame(cleaned_df)
def clean(content):
split = content.splitlines()
last_index = -1
lower = [x.lower().strip() for x in split]
if '--' in lower:
last_index = lower.index('--')
elif 'communicated by:' in lower:
last_index = lower.index('communicated by:')-1
cleaned = split[12:last_index]
return '\n'.join([x for x in cleaned if x])
def summarize_df_content(promed_df: pd.DataFrame) -> pd.DataFrame:
summarized_df = {}
for index, row in promed_df.iterrows():
content = row[CONTENT_COL]
summarized_content = summarizer(content)
for col in promed_df.columns:
row_val = row[col]
if col == SUMMARY_COL:
row_val = summarized_content
if col != CONTENT_COL:
if col in summarized_df:
summarized_df[col].append(row_val)
else:
summarized_df[col] = [row_val]
return pd.DataFrame(summarized_df)
def summarizer(text: str) -> str:
input_ids = tokenizer(text, return_tensors='pt', max_length=1024,
padding=True, truncation=True)['input_ids']
summary_ids = model.generate(input_ids)
summary = ''.join([tokenizer.decode(s) for s in summary_ids])
summary = summary.replace('<s>', '').replace('</s>', '')
return summary
def extract_cchf_data_from_df(promed_df: pd.DataFrame) -> pd.DataFrame:
promed_df[[
'admin1_code',
'admin2_code',
'admin3_code',
'admin4_code',
'location_name',
'location_lat',
'location_lon',
'cases',
'cases_tags',
'deaths',
'deaths_tags',
'dates_start',
'dates_end',
]] = promed_df[SUMMARY_COL].progress_apply(epitator_extract)
promed_df = promed_df.applymap(lambda x: x[0] if isinstance(
x, list) and len(x) > 0 else x)
promed_df = promed_df.applymap(lambda y: pd.NA if isinstance(
y, (list, str)) and len(y) == 0 else y)
promed_df = promed_df.reset_index(drop=True)
return promed_df
# function that extracts location names/admin codes/lat/lng, case and death counts, and date ranges from the input string
# uses epitator since it already trained rules for extracting medical/infectious disease data
def epitator_extract(txt: str, max_ents: int = 1) -> dict:
# input string and add annotators
doc = AnnoDoc(txt)
doc.add_tiers(GeonameAnnotator())
doc.add_tiers(CountAnnotator())
doc.add_tiers(DateAnnotator())
# extract geographic data
geos = doc.tiers["geonames"].spans
geo_admin1s = [x.geoname.admin1_code for x in geos]
geo_admin2s = [x.geoname.admin2_code for x in geos]
geo_admin3s = [x.geoname.admin3_code for x in geos]
geo_admin4s = [x.geoname.admin4_code for x in geos]
geo_names = [x.geoname.name for x in geos]
geo_lats = [x.geoname.latitude for x in geos]
geo_lons = [x.geoname.longitude for x in geos]
# extract case counts and death counts
counts = doc.tiers["counts"].spans
cases_counts = [x.metadata['count'] for x in counts if 'case' in x.metadata['attributes']
and 'death' not in x.metadata['attributes']]
cases_tags = [x.metadata['attributes']
for x in counts if 'case' in x.metadata['attributes'] and 'death' not in x.metadata['attributes']]
death_counts = [x.metadata['count']
for x in counts if 'death' in x.metadata['attributes']]
death_tags = [x.metadata['attributes']
for x in counts if 'death' in x.metadata['attributes']]
# extract the date range
dates = doc.tiers["dates"].spans
dates_start = [pd.to_datetime(
x.metadata["datetime_range"][0], errors='coerce') for x in dates]
dates_end = [pd.to_datetime(
x.metadata["datetime_range"][1], errors='coerce') for x in dates]
# return only max_ents entities from the extracted lists
# currently set to the first result for each list, since that is usually the most important one
# and other ones can be filler/garbage data
return pd.Series([
geo_admin1s[:max_ents],
geo_admin2s[:max_ents],
geo_admin3s[:max_ents],
geo_admin4s[:max_ents],
geo_names[:max_ents],
geo_lats[:max_ents],
geo_lons[:max_ents],
cases_counts[:max_ents],
cases_tags[:max_ents],
death_counts[:max_ents],
death_tags[:max_ents],
dates_start[:max_ents],
dates_end[:max_ents],
])
def main():
print("Extracting the specified arguments")
csv_filepath, countries = extract_arguments()
print("Reading the promed data")
orig_promed_df = read_data(
csv_filepath = csv_filepath
)
print("Filtering the promed data")
filtered_promed_df = filter_df_by_countries(
promed_df = orig_promed_df,
countries_to_srch_for = countries
)
print(filtered_promed_df)
print("Cleaning the promed data")
cleaned_promed_content_df = clean_df_content(
promed_df = filtered_promed_df
)
print("Summarizing dataframe contents")
summarized_promed_data = summarize_df_content(
promed_df = filtered_promed_df
)
if os.path.isdir(SUMMARIZED_DATA_DIR) is False:
os.mkdir(SUMMARIZED_DATA_DIR)
csv_countries_selected = ""
for country in countries:
csv_countries_selected += f"_{country.lower()}"
print("Saving summarized promed data")
csv_country_summarized_data = f"summarized_promed_cchf_data"
summarized_promed_data.to_csv(f"{SUMMARIZED_DATA_DIR}/{csv_country_summarized_data}{csv_countries_selected}.csv", index=False)
print("Extracting promed data")
extraced_promed_data_df = extract_cchf_data_from_df(
promed_df = summarized_promed_data
)
print("Saving extracted promed data")
if os.path.isdir(EXTRACTED_DATA_DIR) is False:
os.mkdir(EXTRACTED_DATA_DIR)
csv_country_extracted_data = f"extracted_promed_cchf_data"
extraced_promed_data_df.to_csv(f"{EXTRACTED_DATA_DIR}/{csv_country_extracted_data}{csv_countries_selected}.csv", index=False)
if __name__ == "__main__":
main()
|
python
|
from cmsisdsp.sdf.nodes.simu import *
import numpy as np
import cmsisdsp as dsp
class Processing(GenericNode):
def __init__(self,inputSize,outputSize,fifoin,fifoout):
GenericNode.__init__(self,inputSize,outputSize,fifoin,fifoout)
def run(self):
i=self.getReadBuffer()
o=self.getWriteBuffer()
b=dsp.arm_scale_q15(i,0x6000,1)
o[:]=b[:]
return(0)
|
python
|
def say_hi():
print("hello world function")
def cube(num):
return num*num*num
say_hi()
print(cube(3))
# Statements
is_male = False
if is_male:
say_hi()
else:
print("Goodbay")
# Statements
is_female = True
if is_female or is_male:
print("Hi")
else:
print("Goodbay")
# Dictionary
months = {
0: "hola",
1: "adiós"
}
|
python
|
import os
from argh.dispatching import dispatch_command
import application
def start_app():
port = int(os.getenv('PORT'))
application.start(port=port)
if __name__ == '__main__':
dispatch_command(start_app)
|
python
|
import os
from git import Repo
from django.core.exceptions import PermissionDenied
from base.handlers.extra_handlers import ExtraHandler
from base.handlers.file_handler import FileHandler
from base.handlers.form_handler import FormHandler
from base.handlers.path_handlers import PathHandler
from base.handlers.github_handler import GithubHandler
from base.handlers.yaml_handlers import YAMLHandler
from startbootstrap.dbio import PostDbIO, SiteDataDbIO, SocialProfileDbIO
from theJekyllProject.dbio import RepoDbIO
class SBSFormHandler:
def __init__(self, user, repo):
"""
:param user: logged in user
:param repo: the main repo name
"""
self.path = PathHandler(user, repo).create_repo_path()
def load_site_initials(self, request, form_class):
"""
Load the site data initials from the database
"""
site_data = SiteDataDbIO().get_obj({
'repo': RepoDbIO().get_repo(request.user)
})
return FormHandler(request, form_class).load_initials(site_data)
def post_site_data(self, user, form_field_dict):
"""
handle the post site data View method
:param user: the logged in user
:param form_field_dict: form field cleaned data
:return:
"""
repo = RepoDbIO().get_repo(user)
form_field_dict['repo'] = repo
site_data = SiteDataDbIO().get_obj({'repo': repo})
if site_data:
SiteDataDbIO().update_obj(site_data, form_field_dict)
else:
SiteDataDbIO().create_obj(**form_field_dict)
config_path = os.path.join(self.path, '_config.yml')
self.del_repo(form_field_dict)
# Complete all the yaml operations
yaml_dict = YAMLHandler().read_yaml_file(config_path, True)
new_yaml = YAMLHandler().change_yaml(yaml_dict, form_field_dict)
YAMLHandler().write_dict_yaml(config_path, new_yaml)
# Complete all the git operations
repo = Repo(self.path)
GithubHandler.commit_all_changes(repo, 'Change site data')
GithubHandler.push_code(repo, 'gh-pages')
def load_social_profile_initials(self, request, form_class):
"""
Load the site profile initials from the database
"""
social_data = SocialProfileDbIO().get_obj({
'repo': RepoDbIO().get_repo(request.user)
})
return FormHandler(request, form_class).load_initials(social_data)
def post_social_profile_data(self, user, form_field_dict):
"""
handle the post social profile View method
:param user: the logged in user
:param form_field_dict: form field cleaned data
:return:
"""
repo = RepoDbIO().get_repo(user)
# repo is the foriegn key so it needs to be in the dict.
form_field_dict['repo'] = repo
social_data = SocialProfileDbIO().get_obj({'repo': repo})
if social_data:
SocialProfileDbIO().update_obj(social_data, form_field_dict)
else:
SocialProfileDbIO().create_obj(**form_field_dict)
config_path = os.path.join(self.path, '_config.yml')
self.del_repo(form_field_dict)
# Complete all the yaml operations
yaml_dict = YAMLHandler().read_yaml_file(config_path, True)
new_yaml = YAMLHandler().change_yaml(yaml_dict, form_field_dict)
YAMLHandler().write_dict_yaml(config_path, new_yaml)
# Complete all the git operations
repo = Repo(self.path)
GithubHandler.commit_all_changes(repo, 'Change site data')
GithubHandler.push_code(repo, 'gh-pages')
def load_posts_initials(self, request, form_class, pk=None):
"""
Load the posts initials from the database
"""
repo = RepoDbIO().get_repo(request.user)
if pk:
post = PostDbIO().get_obj({
'pk': pk,
'repo__user': request.user,
'repo': repo
})
if post is None:
raise PermissionDenied
else:
post = None
return FormHandler(request, form_class).load_initials(post)
def post_posts_data(self, user, form_field_dict, pk=None):
"""
handle the post posts View method
:param user: the logged in user
:param form_field_dict: form field cleaned data
We have to delete the file if the title is changed otherwise two
different files will be created.
:return:
"""
# TODO image copying is not done and delete the old one.
# TODO take care of the layout
repo = RepoDbIO().get_repo(user)
if pk:
post = PostDbIO().get_obj({
'pk': pk,
'repo__user': user,
'repo': repo
})
if pk is None:
raise PermissionDenied
if post.title is not form_field_dict['title']:
file_name = ExtraHandler().file_name_f_title(post.title,
'html')
FileHandler('/'.join([self.path, '_posts']),
file_name).delete_file()
post = PostDbIO().update_obj(post, **form_field_dict)
else:
form_field_dict['repo'] = repo
post = PostDbIO().create_obj(**form_field_dict)
ExtraHandler().del_keys(form_field_dict, ('repo', 'content',))
yaml_content = YAMLHandler().create_yaml(form_field_dict)
w_yaml_content = ExtraHandler().wrap_content('---', yaml_content)
full_content = ExtraHandler().join_content(w_yaml_content,
post.content)
file_name = ExtraHandler().file_name_f_title(post.title,
'html')
FileHandler('/'.join([self.path, '_posts']),
file_name).rewrite_file(full_content)
# Complete all the git operations
repo = Repo(self.path)
GithubHandler.commit_all_changes(repo, 'Change site data')
GithubHandler.push_code(repo, 'gh-pages')
def load_page_initials(self, request, form_class, pk=None):
"""
Load the page initials from the database
"""
repo = RepoDbIO().get_repo(request.user)
if pk:
post = PostDbIO().get_obj({
'pk': pk,
'repo__user': request.user,
'repo': repo
})
else:
raise PermissionDenied
return FormHandler(request, form_class).load_initials(post)
def post_page_data(self, user, form_field_dict, pk=None):
"""
handle the post page View method
:param user: the logged in user
:param form_field_dict: form field cleaned data
We have to delete the file if the title is changed otherwise two
different files will be created.
:return:
"""
# TODO image copying is not done.
# TODO take care of the layout
repo = RepoDbIO().get_repo(user)
if pk:
post = PostDbIO().get_obj({
'pk': pk,
'repo__user': user,
'repo': repo
})
if pk is None:
raise PermissionDenied
if post.title is not form_field_dict['title']:
file_name = ExtraHandler().file_name_f_title(post.title,
'html')
FileHandler('/'.join([self.path, '_posts']),
file_name).delete_file()
post = PostDbIO().update_obj(post, **form_field_dict)
else:
raise PermissionDenied
ExtraHandler().del_keys(form_field_dict, ('repo', 'content',))
yaml_content = YAMLHandler().create_yaml(form_field_dict)
w_yaml_content = ExtraHandler().wrap_content('---', yaml_content)
full_content = ExtraHandler().join_content(w_yaml_content,
post.content)
file_name = ExtraHandler().file_name_f_title(post.title,
'html')
FileHandler('/'.join([self.path, '_posts']),
file_name).rewrite_file(full_content)
# Complete all the git operations
repo = Repo(self.path)
GithubHandler.commit_all_changes(repo, 'Change site data')
GithubHandler.push_code(repo, 'gh-pages')
|
python
|
from radixlib.api_types.identifiers import AccountIdentifier
from radixlib.serializable import Serializable
from radixlib.api_types import TokenAmount
from typing import Dict, Any
import radixlib as radix
import json
class TransferTokens(Serializable):
""" Defines a TransferTokens action """
def __init__(
self,
from_account: str,
to_account: str,
amount: int,
token_rri: str,
) -> None:
""" Instantiates a new TransferTokens action used for the creation of new tokens.
Args:
from_account (str): The account which will be sending the tokens.
to_account (str): The account which will be getting the tokens.
amount (int): The amount of tokens to send.
token_rri (str): The RRI of the token to send.
"""
self.from_account: AccountIdentifier = AccountIdentifier(from_account)
self.to_account: AccountIdentifier = AccountIdentifier(to_account)
self.amount: int = amount
self.token_rri: str = token_rri
def to_dict(self) -> Dict[str, Any]:
"""" Converts the object to a dictionary """
return radix.utils.remove_none_values_recursively(
radix.utils.convert_to_dict_recursively({
"type": "TransferTokens",
"from_account": self.from_account,
"to_account": self.to_account,
"amount": TokenAmount(
rri = self.token_rri,
amount = self.amount
)
})
)
def to_json_string(self) -> str:
""" Converts the object to a JSON string """
return json.dumps(self.to_dict())
@classmethod
def from_dict(
cls,
dictionary: Dict[Any, Any]
) -> 'TransferTokens':
""" Loads a TransferTokens from a Gateway API response dictionary
Args:
dictionary (dict): The dictionary to load the object from
Returns:
TransferTokens: A new TransferTokens initalized from the dictionary
Raises:
TypeError: Raised when the type of the action in the dictionary does not match
the action name of the class
"""
if dictionary.get('type') != "TransferTokens":
raise TypeError(f"Expected a dictionary with a type of TransferTokens but got: {dictionary.get('type')}")
return cls(
from_account = dictionary['from_account']['address'],
to_account = dictionary['to_account']['address'],
amount = int(dictionary['amount']['value']),
token_rri = dictionary['amount']['token_identifier']['rri']
)
@classmethod
def from_json_string(
cls,
json_string: str
) -> 'TransferTokens':
""" Loads a TransferTokens from a Gateway API response JSON string. """
return cls.from_dict(json.loads(json_string))
|
python
|
# -*- coding: utf-8 -*-
from flask import render_template, redirect, request, url_for, flash, jsonify, abort
from flask_login import login_user, logout_user, login_required, current_user
from . import estate
from .. import db
from ..models import SzEstate
import urllib
import os
import time
import math
from datetime import datetime,date
import requests
from bs4 import BeautifulSoup
import chardet
initCached = False
max_cache_num = 1000
sz_cache = {}
#房源公示
@estate.route('/sz', methods=['GET','POST'])
#@login_required
def sz():
formDate = None
formZone = None
formSN = None
if request.method == 'POST':
if 'textDate' in request.form:
formDate = request.form['textDate'].lstrip().rstrip()
if 'textZone' in request.form:
formZone = request.form['textZone'].lstrip().rstrip()
if 'textSn' in request.form:
formSN = request.form['textSn'].lstrip().rstrip()
#print formDate,formZone,formSN
#初次使用系统,初始化缓存
global initCached
global initCheckProcess
global sz_cache
if not initCached:
initCached = True
initCache()
#准备首页数据
today = datetime.today()
#当天时间
curDayString = '%d-%02d-%02d' % (today.year,today.month,today.day)
#没有任何一个参数则默认显示今天
if not formDate and not formZone and not formSN:#
formDate = curDayString
#搜索结果
estates = searchEstates(formDate,formZone,formSN)
if not estates:
estates = []
return render_template("estate/sz_estate.html",curDayString=curDayString,formDate=formDate,curEstates=estates)
#更新房源
@estate.route('/update_sz', methods=['GET'])
@login_required
def update_sz():
#doCheck()
return redirect(url_for('estate.sz'))
#初始化缓存
@estate.route('/cache_sz', methods=['GET'])
@login_required
def cache_sz():
initCache()
return redirect(url_for('estate.sz'))
#根据条件搜索
def searchEstates(date,zone,sn,no_repeat=True):
global sz_cache
es = sz_cache.get(date)
#当日的数据强制重刷
today = datetime.today()
curDayString = '%d-%02d-%02d' % (today.year,today.month,today.day)
if curDayString == date:
es = None
arr = []
#sn是否为数字
isSnNum = True
if sn:
try:
int(sn)
except:
isSnNum = False
if not es:
#无缓存,全部数据从数据库取得
#print 'search 1'
if date and zone and sn:
if isSnNum:
es = SzEstate.query.filter_by(pub_date=date).filter_by(zone=zone).filter_by(sn=sn).all()
else:
es = SzEstate.query.filter_by(pub_date=date).filter_by(zone=zone).filter(SzEstate.name.like('%'+sn+'%')).all()
elif zone and sn:
if isSnNum:
es = SzEstate.query.filter_by(zone=zone).filter_by(sn=sn).all()
else:
es = SzEstate.query.filter_by(zone=zone).filter(SzEstate.name.like('%'+sn+'%')).all()
elif date and sn:
if isSnNum:
es = SzEstate.query.filter_by(pub_date=date).filter_by(sn=sn).all()
else:
es = SzEstate.query.filter_by(pub_date=date).filter(SzEstate.name.like('%'+sn+'%')).all()
elif date and zone:
es = SzEstate.query.filter_by(pub_date=date).filter_by(zone=zone).all()
elif date:
es = SzEstate.query.filter_by(pub_date=date).all()
elif zone:
es = SzEstate.query.filter_by(zone=zone).all()
elif sn:
if isSnNum:
es = SzEstate.query.filter_by(sn=sn).all()
else:
es = SzEstate.query.filter(SzEstate.name.like('%'+sn+'%')).all()
#包装数据
for e in es:
ee = {'sid':e.sid,'name':e.name,'csn':e.csn,'zone':e.zone,'space':e.space,'usage':e.usage,'floor':e.floor,'sn':e.sn,'proxy':e.proxy,'pub_date':e.pub_date}
arr.append(ee)
analyzeEstate(ee)
elif zone or sn:
#有缓存且有zone或sn条件,从缓存中搜索
#print 'search 2'
for e in es:
if zone and sn and zone == e.get('zone') and sn == e.get('sn'):
arr.append(e)
elif zone and zone == e.get('zone'):
arr.append(e)
elif sn and sn == e.get('sn'):
arr.append(e)
else:
#无zone或sn条件
#print 'search 3'
arr = es
#筛选重复的房源
if no_repeat:
no_repeat_arr = []
no_repeat_keys = []
for e in arr:
esn = e.get('sn')
if not esn or no_repeat_keys.count(esn) > 0:
continue
no_repeat_keys.append(esn)
no_repeat_arr.append(e)
return no_repeat_arr
return arr
#获取指定参数房源 page:页数 zone:区域 tep_name:项目名称
retry_error = 0
max_retry_error = 5
def getEstates(page,zone="",tep_name=""):
global retry_error
global max_retry_error
user_agent = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 UBrowser/6.1.2107.204 Safari/537.36'
values = {'targetpage' : page, 'zone' : zone, 'tep_name' : tep_name}
headers = {'User-Agent' : user_agent}
data = urllib.urlencode(values)
#print "data:",data
url = '%s%s%s' % ('http://ris.szpl.gov.cn/bol/EsSource.aspx','?',data)
print url
html = None
try:
html = requests.get(url, headers=headers)
except Exception,e:
print Exception,":",e
retry_error = retry_error + 1
if retry_error < max_retry_error:
#发生错误重新尝试,最多max_retry_error次
print "retry count:%d %d %s %s" % (retry_error,page,zone,tep_name)
getEstates(page,zone,tep_name)
return []
#解析html
es = parse_html(html.content)
retry_error = 0
return es
#解析数据
def parse_html(html):
objs = []
#print 'html:',html
charset_obj = chardet.detect(html)
#print 'html charset',charset_obj
soup = BeautifulSoup(html,'html5lib',from_encoding=charset_obj['encoding'])
table = soup.find('table',id='DataGrid1')
trs = []
if table:
trs = table.find_all('tr')
#print "parse len:",len(trs)
if len(trs) > 0:
trs = trs[1:]
for tr in trs:
tds = tr.find_all('td')
#sid
sid = tds[0].find('a')['onclick']
sid = sid[sid.find('(')+1:sid.find(')')]
#项目名称 招商路北住宅楼18栋
name = tds[0].find('a').string
#合同流水号 (2017)第21090号
csn = tds[1].string
#区属 南山
zone = tds[2].string
#面积(㎡) 75.40
space = tds[3].string
#用途 多层铝窗住宅
usage = tds[4].string
#楼层
floor = tds[5].string
#房源编码
sn = tds[6].string
#代理中介名称
proxy = tds[7].find('a').string
foid = tds[7].find('a')['href']
#中介电话
proxy_phone = tds[7].string
#发布日期
pub_date = tds[8].string
obj = {'sid':sid,'name':name,'csn':csn,'zone':zone,'space':space,'usage':usage,'floor':floor,'sn':sn,'proxy':proxy,'proxy_phone':proxy_phone,'pub_date':pub_date}
objs.append(obj)
#print obj
#print "%s %s %s" % (sid,pub_date,sn)
objs.reverse()
return objs
def hasUpdate(updates,sid):
for e in updates:
if e.get('sid') == sid:
return True
return False
#实际检查更新函数
def doCheck(cached=True):
loop = True
page = 1
updates = []
while loop:
es = getEstates(page)
#降序
es.reverse()
page = page + 1
loop = False
count = 0
update_arr = []
no_update_arr = []
for e in es:
count = count + 1
sz_es = SzEstate.query.filter_by(sid=e.get('sid')).first()
if not sz_es:
#插入到第一个
if not hasUpdate(updates,e.get('sid')):
update_arr.append(e.get('sid',''))
updates.insert(0,e)
else:
no_update_arr.append(e.get('sid',''))
#第一个如果也是更新的房源,则去寻找下一页
if count == len(es):
print 'doCheck next page:',page
loop = True
print "update_arr:",update_arr
print "no_update_arr:",no_update_arr
#更新数据库
for e in updates:
estate = SzEstate()
estate.sid=int(e.get('sid',''))
estate.name=e.get('name','')
estate.csn=e.get('csn','')
estate.zone=e.get('zone','')
estate.space=float(e.get('space',''))
estate.usage=e.get('usage','')
estate.floor=e.get('floor','')
estate.total_floor=e.get('total_floor','')
estate.sn=e.get('sn','')
estate.proxy=e.get('proxy','')
estate.pub_date=e.get('pub_date','')
db.session.add(estate)
if cached:
pushCache(e)
#提交事务
update_num = len(updates)
if update_num > 0:
db.session.commit()
#排序并检查数量
sortCache()
checkCacheNum()
return update_num
#初始化所有数据
def initEstates(maxPage = None, delay = 0.5):
total_num = getEstatesNum()
total_num = int(total_num)
print 'total_num:',total_num
if not maxPage:
maxPage = math.floor(total_num/20)
maxPage = int(maxPage)+1
print 'maxPage:',maxPage
for i in range(maxPage):
time.sleep(delay)
page = maxPage-i
print 'proccess page:',page
if page < 1:
print 'proccess complete:',page
break
es = getEstates(page)
for e in es:
sz_es = SzEstate.query.filter_by(sid=e.get('sid')).first()
if not sz_es:
estate = SzEstate()
estate.sid=int(e.get('sid',''))
estate.name=e.get('name','')
estate.csn=e.get('csn','')
estate.zone=e.get('zone','')
estate.space=float(e.get('space',''))
estate.usage=e.get('usage','')
estate.floor=e.get('floor','')
estate.total_floor=e.get('total_floor','')
estate.sn=e.get('sn','')
estate.proxy=e.get('proxy','')
estate.pub_date=e.get('pub_date','')
db.session.add(estate)
#提交事务
db.session.commit()
#获取记录数量
def getEstatesNum():
global retry_error
global max_retry_error
user_agent = 'Mozilla/4.0 (compatibl; MSIE 5.5; Windows NT)'
values = {'targetpage' : 1, 'zone' : '', 'tep_name' : ''}
headers = {'User-Agent' : user_agent}
data = urllib.urlencode(values)
url = '%s%s%s' % ('http://ris.szpl.gov.cn/bol/EsSource.aspx','?',data)
html = None
try:
html = requests.get(url, headers=headers)
except Exception,e:
print Exception,":",e
retry_error = retry_error + 1
if retry_error < max_retry_error:
#发生错误重新尝试,最多max_retry_error次
print "retry count:%d %d %s %s" % (retry_error,page,zone,tep_name)
getEstatesNum()
return 0
charset_obj = chardet.detect(html.content)
soup = BeautifulSoup(html.content,'html5lib',from_encoding=charset_obj['encoding'])
span_a1s = soup.find_all('span',class_='a1')
span_a1 = None
if len(span_a1s) > 1:
span_a1 = span_a1s[1]
num = 0
if span_a1:
num = int(span_a1.string[2:-4])
retry_error = 0
return num
#初始化缓存
def initCache():
global sz_cache
del sz_cache
sz_cache = {}
sz_es = SzEstate.query.all()
total = len(sz_es)
sz_es = sz_es[total-max_cache_num:total]
for e in sz_es:
ee = {'sid':e.sid,'name':e.name,'csn':e.csn,'zone':e.zone,'space':e.space,'usage':e.usage,'floor':e.floor,'sn':e.sn,'proxy':e.proxy,'pub_date':e.pub_date}
pushCache(ee)
#排序
sortCache()
print '---------------initCache',len(sz_es)
#获取最大和最小日期
def getCacheLimitDate():
global sz_cache
max,min = None,None
for k in sz_cache:
if not max:
min = max = k
if k > max:
max = k
if k < min:
min = k
return max,min
#统计缓存数量
def countCache():
count = 0
for k in sz_cache:
count = count + len(sz_cache[k])
return count
#删除时间最早的一个房源,也就是sid最小的一个
def delMinEstate(arr):
min = None
for e in arr:
if not min:
min = e
if e.get('sid') < min.get('sid'):
min = e
if min:
print 'remove cache date:',min.get('pub_date')
arr.remove(min)
#为缓存排序
def sortCache(date=None):
print 'sortCache',date
for k in sz_cache:
if k == date or not date:
arr = sz_cache[k]
arr.sort(sortCompare)
#排序算法
def sortCompare(e1,e2):
if e1.get('sid')>e2.get('sid'):
return -1
return 1
#分析房源
def analyzeEstate(estate):
#暂不分析
#todo
return
es = SzEstate.query.filter_by(sn=estate.get('sn')).all()
arr = []
for e in es:
if e.sid != estate.get('sid'):
ee = {'sid':e.sid,'name':e.name,'csn':e.csn,'zone':e.zone,'space':e.space,'usage':e.usage,'floor':e.floor,'sn':e.sn,'proxy':e.proxy,'pub_date':e.pub_date}
arr.append(ee)
estate['same'] = arr
estate['new'] = True
for e in arr:
if e.get('pub_date') < estate.get('pub_date'):
estate['new'] = False
#插入数据到缓存
def pushCache(e,check = False):
global sz_cache
global max_cache_num
pub_date = e.get('pub_date',None)
if pub_date:
arr = sz_cache.get(pub_date,None)
if not arr:
arr = []
sz_cache[pub_date] = arr
print 'add cache date:',pub_date
analyzeEstate(e)
arr.append(e)
if check:
#排序
sortCache(pub_date)
checkCacheNum()
#检查并维持缓存大小
def checkCacheNum():
count = countCache()
#print 'cache count start:',count
if count > max_cache_num:
maxDate,minDate = getCacheLimitDate()
delMinEstate(sz_cache[minDate])
count = countCache()
#print 'cache count end:',count
#if count > max_cache_num:
#checkCacheNum()
|
python
|
# Freetype library
freetype = StaticLibrary( 'freetype', sources = [ 'src/base/*', 'src/gzip/ftgzip.c', 'src/winfonts/winfnt.c', 'src/cid/type1cid.c' ], defines = [ 'FT2_BUILD_LIBRARY', 'FT_CONFIG_OPTION_SYSTEM_ZLIB' ] )
freetype.include( 'include' )
# Add Freetype modules sources
prefix = { 'gzip': 'ft', 'cid': 'type1', 'lzw': 'ft' }
for folder in Folders( 'src/*' ):
if not folder.name in ['tools', 'base', 'bzip2', 'cache', 'winfonts']:
fileName = (prefix[folder.name] if folder.name in prefix.keys() else '') + folder.name + '.c'
freetype.files( folder.path + '/' + fileName )
# Platform specific settings
if platform == 'MacOS':
freetype.define( 'DARWIN_NO_CARBON' )
|
python
|
"""
Created on Wednesday Septebmer 25 17:07 2019
tools to work with XRF data from the Geotek MSCL (Olympus head)
@author: SeanPaul La Selle
"""
import os
import sys
import glob
import tkinter
from tkinter import filedialog
import numpy as np
import csv
import pandas
import matplotlib as matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter,
AutoMinorLocator)
matplotlib.rcParams['pdf.fonttype'] = 42
import warnings
from corescan_plotting import ct, linescan
###############################################################################
def xrf_in(filename='',mode='geochem'):
"""
read in Geotek MSCL (v7.9) XRF data from from .out file
"""
## Get filename if not specified in function call
if not filename:
filename = filedialog.askopenfilename()
if not filename:
sys.exit()
header, data = csv_xrf_parser(filename)
dict = xrf_array2dict(header, data, mode)
# Determine the directory of the file
directory = os.path.dirname(filename)
## Read other files
# if not xml_fname:
# xml_fname = glob.glob(os.path.splitext(filename)[0]+'*.xml')[0]
# xml_dic = linescan_xml(xml_fname)
return dict
###############################################################################
def csv_xrf_parser(filename):
"""
parses a Geotek XRF .out file (MSCL v7.9), returns the elements and an
array with depths, counts, ppm and errors
"""
with open(filename) as csvfile:
readcsv = csv.reader(csvfile,delimiter='\t')
header=[]
data = []
for i,row in enumerate(readcsv): # Assume header is 9 rows
header.append(row)
if(i>=9):
break
for row in readcsv: # From here, csv should be data
data.append([float(i) for i in row])
for i,r in enumerate(data): # Need to pad rows with empty data
if len(r) != len(max(data,key=len)):
r = np.append(r,np.ones((len(max(data,key=len))-len(r))))
data[i] = np.nan*r
data = np.reshape(data,(np.shape(data)[0],len(max(data,key=len))))
return header, data
###############################################################################
def xrf_array2dict(header,data,mode='geochem'):
"""
passes an array of Geotek XRF data (MSCL v7.9) to a dictionary of values
for each element
"""
dict = {'ID': os.path.splitext(str.split(header[0][0])[4])[0]}
dict["elements"] = header[7][5::2] # Assume elements start on the 7th row
dict["depth"] = data[:,0]
dict["section number"] = data[:,1]
dict["section depth"] = data[:,2]
dict["xrf total counts"] = data[:,3]
dict["live time"] = data[:,4]
dict["comp"] = data[:,5::2] # full array of compositional data
dict["error"] = data[:,6::2] # array of errors in measurement
for i,e in enumerate(dict["elements"]): # create key-value pair for elements
dict[e] = dict["comp"][:,i]
#Set ppm tolerance depending on soil vs geochem mode
if 'geochem' in mode:
tol = 500
dict = remove_open(dict)
elif 'soil' in mode:
tol = 50.
dict['comp'] = removeinvalid(dict['comp'],tol=tol)
if 'geochem' in mode:
dict['clr'] = clr(dict['comp'])
dict['mode'] = mode
return dict
###############################################################################
def remove_open(dict,k=1000000):
"""
removes rows from a compositional data array (measurements x elements) if
they don't add up to a constant sum "k", which should equal
k = 1, 100, 10^6, 10^9, etc. (proportions, %, ppm, ppb, etc.)
Default is set for ppm (1,000,000)
"""
sums = [np.sum(row) for row in dict['comp']]
rounded_sums = np.around(sums,decimals=0)
not_closed = np.where(rounded_sums != k)
keys = ['comp','depth','section number','section depth','xrf total counts',
'live time','error']
for e in dict['elements']:
keys.append(e)
for key in keys:
dict[key] = np.delete(dict[key],not_closed,axis=0)
return dict
###############################################################################
def removeinvalid(array,tol=500.):
"""
remove all XRF measurements whose concentrations are less than 'tol'.
geotek recommends 500+ ppm in geochem mode, 50+ ppm in soil mode.
"""
array[array < tol] = np.nan
return array
###############################################################################
def clr(array):
"""
centered log ratio transform on matrix with each column having a different
compositional component
ported to python and modified from matlab code written by:
Thio-Henestrosa, S., and J. A. Martin-Fernandez (2005),
Dealing with compositional data: the freeware CoDaPack,
Math. Geol., 37(7), 773-793.
"""
rows = np.shape(array)[0]
clr = np.zeros_like(array)
m = np.ma.log(array)
for r in range(rows):
clr[r,:] = m[r,:] - np.nanmean(m[r,:])
return clr
###############################################################################
def makelogratio(dict, ratio):
"""
dict[ratio] is the log ratio of elements e1 and e2
ratio is a string in the form 'e1/e2' and e1 and e2 are
elements in dic['elements']. If not in the form 'e1/e2',
will not do anything (pass)
"""
try:
e1, e2 = ratio.split('/')
dict[ratio] = np.log(dict[e1]/dict[e2])
except ValueError:
pass
return dict
###############################################################################
def makeppmratio(dict, ratio):
"""
dict[ratio] is the ratio of ppm concentrations of elements e1 and e2
ratio is a string in the form 'e1/e2' and e1 and e2 are
elements in dic['elements']. If not in the form 'e1/e2',
will not do anything (pass)
"""
try:
e1, e2 = ratio.split('/')
dict[ratio] = dict[e1]/dict[e2]
except ValueError:
pass
return dict
###############################################################################
def nptsmooth(y, n, inf_nan=True, keep_nans=True):
"""
smooths the data in y using a running mean
over 2*n+1 successive point, n points on each side of the
current point. At the ends of the series skewed or one-sided
means are used.
slightly modified from code ported from Matlab code written by:
Olof Liungman, 1997
Dept. of Oceanography, Earth Sciences Centre
Göteborg University, Sweden
E-mail: [email protected]
"""
y = y.copy()
if inf_nan:
y[y == np.inf] = np.nan
y[y == -np.inf] = np.nan
d = len(y)
filtr = np.isnan(y)
out = np.zeros_like(y)
temp = np.zeros((2*n+1, d-2*n))
temp[n,:] = y[n:-n]
with warnings.catch_warnings(): # ignore "mean of empty slice" warnings
warnings.simplefilter("ignore", category=RuntimeWarning)
for ii in range(n):
out[ii] = np.nanmean(y[:ii+n+1])
out[d-ii-1] = np.nanmean(y[d-ii-1-n:])
temp[ii,:] = y[ii:d-2*n+ii]
temp[ii+n+1,:] = y[ii+n+1:d-n+ii+1]
out[n:d-n] = np.nanmean(temp, axis=0)
if keep_nans:
out[filtr] = np.nan
return out
###############################################################################
def plot_xrf(dict, elements, smooth=5, clr=False):
"""
plot parts per mil (or centered log ratios) elemental ratios for
elements/element pairs as a function of depth.
elements = array of strings for elements/ratios to plot e.g. ['Al','Ti','Ca/K']
smooth = window size to smooth xrf data
clr = False by default, will plot centered log ratios if True
"""
if not elements:
elements = dict['elements']
root = tkinter.Tk()
pix2in = root.winfo_fpixels('1i')
screen_width = root.winfo_screenwidth()/pix2in*0.75
screen_height = root.winfo_screenheight()/pix2in*0.75
screen_aspect = screen_width/screen_height
colormap = plt.cm.tab20
norm = matplotlib.colors.Normalize(vmin=0,vmax = np.size(elements))
nplots = np.size(elements)
fig = plt.figure(figsize=(screen_width*nplots/12,screen_height))
keep_nans=False # for npointssmooth
LinearLocator = matplotlib.ticker.LinearLocator
for i,e in enumerate(elements):
ax = plt.subplot(1,nplots,i+1)
ax.xaxis.set_major_locator(LinearLocator(2))
ax.xaxis.set_major_formatter(FormatStrFormatter('%d'))
if '/' in e:
if clr:
dict = makelogratio(dict,e)
else:
dict = makeppmratio(dict,e)
p = ax.plot(dict[e],dict['depth'],color = colormap(norm(i)))
else:
if clr:
clr_vector = dict['clr'][:,dict['elements'].index(e)]
p = ax.plot(clr_vector,dict['depth'],color = colormap(norm(i)))
else:
ppm_vector = dict[e]
p = ax.plot(ppm_vector,dict['depth'],color = colormap(norm(i)))
if smooth:
p[0].set_alpha(0.4)
if '/' in e:
x = nptsmooth(dict[e], smooth, keep_nans=keep_nans)
else:
if clr:
x = nptsmooth(dict['clr'][:,dict['elements'].index(e)],
smooth, keep_nans=keep_nans)
else:
x = nptsmooth(dict[e],smooth, keep_nans=keep_nans)
ax.plot(x, dict['depth'], color=colormap(norm(i)))
ax.xaxis.set_ticks_position('bottom')
if not clr:
plt.xticks(rotation=90)
if i == 0: # Far left plot needs depth ticks
ax.yaxis.set_ticks_position('left')
loc = matplotlib.ticker.MultipleLocator(base=10.0)
loc1 = matplotlib.ticker.MultipleLocator(base=1.0)
ax.yaxis.set_major_locator(loc)
ax.yaxis.set_minor_locator(loc1)
ax.yaxis.set_tick_params(labelleft=True)
ax.set_ylabel('Depth in core (cm)')
ax.yaxis.set_label_position('left')
ax.spines['left'].set_visible(True)
elif i == nplots-1: # Far right plot needs depth ticks
ax.yaxis.set_ticks_position('right')
loc = matplotlib.ticker.MultipleLocator(base=10.0)
loc1 = matplotlib.ticker.MultipleLocator(base=1.0)
ax.yaxis.set_major_locator(loc)
ax.yaxis.set_minor_locator(loc1)
ax.yaxis.set_tick_params(labelright=True)
ax.set_ylabel('Depth in core (cm)')
ax.yaxis.set_label_position('right')
ax.spines['right'].set_visible(True)
else: # Plots in middle don't need depth ticks
ax.yaxis.set_ticks([])
if ax.get_xlim()[0] < 0.: # avoid negative x axis limits
ax.set_xlim(0,ax.get_xlim()[1])
ax.set_title(e,color=colormap(norm(i)))
# ax.yaxis.grid(color='k',linewidth=0.1)
ax.invert_yaxis()
return fig
###############################################################################
def plot_ct_ls_xrf(ct_image, ct_xml,
ls_image, ls_xml,
dict, elements, clr=False, smooth=5,
ct_vmin=15000,ct_vmax=30000):
"""
plot ppm or centered log ratio of elements and ratios in 'elements' next to
CT and linescan images.
use "ct_in" and "ls_in" to complete image processing before running
"plot_xrf_clr". Set clr=True to plot centered log ratios. By default,
"parts per mil" are plotted.
"""
root = tkinter.Tk()
pix2in = root.winfo_fpixels('1i')
screen_width = root.winfo_screenwidth()/pix2in*0.75
screen_height = root.winfo_screenheight()/pix2in*0.75
screen_aspect = screen_width/screen_height
nplots = np.size(elements)+1
if nplots > 12:
print('WARNING: CANNOT PLOT MORE THAN 11 ELEMENTS AT A TIME')
fig = plt.figure(figsize=(screen_width*nplots/12,screen_height))
plt.clf()
# Plot CT
aspect=1
ax = plt.subplot(1,nplots,1)
ct_img = plt.imshow(ct_image, aspect=aspect,
extent=(0,ct_xml['physical-width'],
ct_xml['physical-height']+ct_xml['physical-top']/100,
ct_xml['physical-top']/100),vmin=ct_vmin,vmax=ct_vmax,
cmap=matplotlib.cm.CMRmap)
ls_img = plt.imshow(ls_image, aspect=aspect,
extent=(ct_xml['physical-width']+
0.2*ct_xml['physical-width'],
ct_xml['physical-width']+ls_xml['physical-width'],
ls_xml['physical-top']+ls_xml['physical-height'],
ls_xml['physical-top']))
ax.yaxis.set_major_locator(MultipleLocator(10))
ax.yaxis.set_minor_locator(MultipleLocator(1))
ax.set_xlim(0,ct_xml['physical-width']+ls_xml['physical-width'])
ax.set_ylim(ct_xml['physical-height']+ct_xml['physical-top']/100,
ct_xml['physical-top']/100) ## set equal to the linescan
ax.get_xaxis().set_visible(False)
ax.set_anchor('NW')
im_pos=ax.get_position()
# Plot XRF
keep_nans=True # for npointssmooth
LinearLocator = matplotlib.ticker.LinearLocator
colormap = plt.cm.tab20
norm = matplotlib.colors.Normalize(vmin=0,vmax = np.size(elements))
n = np.size(elements)
smooth=smooth
depth = ls_xml['physical-top'] + dict['section depth']
for i,e in enumerate(elements):
ax = plt.subplot(1,nplots,i+2)
ax.xaxis.set_major_locator(LinearLocator(2))
ax.xaxis.set_major_formatter(FormatStrFormatter('%d'))
pos=ax.get_position()
ax.set_position([pos.x0,im_pos.y0,pos.width,im_pos.height])
ax.set_ylim(ct_xml['physical-height']+ct_xml['physical-top']/100,
ct_xml['physical-top']/100)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
if '/' in e:
if clr:
dict = makelogratio(dict,e)
else:
dict = makeppmratio(dict,e)
p = ax.plot(dict[e],dict['depth'],color = colormap(norm(i)))
else:
if clr:
clr_vector = dict['clr'][:,dict['elements'].index(e)]
p = ax.plot(clr_vector,depth,color = colormap(norm(i)))
else:
ppm_vector = dict[e]
p = ax.plot(ppm_vector,depth,color = colormap(norm(i)))
if smooth:
p[0].set_alpha(0.4)
if '/' in e:
x = nptsmooth(dict[e], smooth, keep_nans=keep_nans)
else:
if clr:
x = nptsmooth(dict['clr'][:,dict['elements'].index(e)],
smooth, keep_nans=keep_nans)
else:
x = nptsmooth(dict[e],smooth, keep_nans=keep_nans)
ax.plot(x, depth, color=colormap(norm(i)))
if not clr:
plt.xticks(rotation=90)
ax.xaxis.set_ticks_position('bottom')
if i == n-1: # Far right plot needs depth ticks
ax.spines['left'].set_visible(False)
ax.yaxis.set_ticks_position('right')
loc = matplotlib.ticker.MultipleLocator(base=10.0)
ax.yaxis.set_major_locator(loc)
ax.yaxis.set_tick_params(labelright=True)
ax.set_ylabel('Depth in core (cm)')
ax.yaxis.set_label_position('right')
ax.spines['right'].set_visible(True)
else: # Plots in middle don't need depth ticks
ax.yaxis.set_ticks([])
ax.set_title(e,color=colormap(norm(i)))
|
python
|
#!/usr/bin/python
#coding=utf-8
'''
@author: sheng
@license:
'''
SPELL=u'fútū'
CN=u'扶突'
NAME=u'futu41'
CHANNEL='largeintestine'
CHANNEL_FULLNAME='LargeIntestineChannelofHand-Yangming'
SEQ='LI18'
if __name__ == '__main__':
pass
|
python
|
class Bar():
pass
|
python
|
import os
import core.settings as st
from flask import Flask
from api.login import app as login_router
from api.create_account import app as account_router
from api.products import app as products_router
from api.producer import app as producer_router
from api.shop_car import app as shop_car_router
from api.order import app as order_router
import core.settings as st
CONFIG_FILES = os.path.join('static')
app = Flask(__name__)
app.secret_key = os.urandom(24)
app.register_blueprint(login_router)
app.register_blueprint(account_router)
app.register_blueprint(products_router)
app.register_blueprint(producer_router)
app.register_blueprint(shop_car_router)
app.register_blueprint(order_router)
if __name__ == '__main__':
app.run(debug = True, port = st.PORT)
|
python
|
# Copyright (c) 2017 Presslabs SRL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import braintree
import django
from django.conf import settings
settings.configure(
DEBUG=True,
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
},
PAYMENT_METHOD_SECRET=b'MOW_x1k-ayes3KqnFHNZUxvKipC8iLjxiczEN76TIEA=',
PAYMENT_PROCESSORS={
'BraintreeTriggered': {
'setup_data': {
'environment': braintree.Environment.Sandbox,
'merchant_id': "your-merchand-id-here",
'public_key': "your-public-id-here",
'private_key': "your-private-id-here"
},
'class': 'silver_braintree.payment_processors.BraintreeTriggered',
},
'BraintreeTriggeredRecurring': {
'setup_data': {
'environment': braintree.Environment.Sandbox,
'merchant_id': "your-merchand-id-here",
'public_key': "your-public-id-here",
'private_key': "your-private-id-here"
},
'class': 'silver_braintree.payment_processors.BraintreeTriggeredRecurring'
},
'Manual': {
'class': 'silver.models.payment_processors.manual.ManualProcessor'
}
},
INSTALLED_APPS=(
'dal',
'dal_select2',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.admin',
'silver',
'silver_braintree',),
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
'LOCATION': 'unique-snowflake',
}
},
USE_TZ=True,
STATIC_URL='/static/',
SILVER_AUTOMATICALLY_CREATE_TRANSACTIONS=True,
SECRET_KEY='dummy'
)
django.setup()
|
python
|
from .logic import *
from .notifications import *
from .preprocessors import *
from .vigil import *
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on 2021.03.22
Start operation.
@author: zoharslong
"""
|
python
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import sys
import os
import time as t
import numpy as np
import theano as th
import theano.tensor as T
import theano.ifelse
import theano.compile
import theano.compile.mode
import hand_io
############## Objective in theano ##################
def get_identity(dim,dtype):
A = T.zeros((dim,dim),dtype=dtype)
for i in range(dim):
A = T.set_subtensor(A[i,i], 1.)
return A
def to_pose_params(theta,nbones):
pose_params = T.zeros((nbones+3,3),theta.dtype)
pose_params = T.set_subtensor(pose_params[0,:], theta[0:3])
pose_params = T.set_subtensor(pose_params[1,:], T.ones((3,),theta.dtype))
pose_params = T.set_subtensor(pose_params[2,:], theta[3:6])
i_theta = 6
i_pose_params = 5
n_fingers = 5
for i_finger in range(n_fingers):
for i in [1, 2, 3]:
pose_params = T.set_subtensor(pose_params[i_pose_params,0], theta[i_theta])
i_theta += 1
if i == 1:
pose_params = T.set_subtensor(pose_params[i_pose_params,1], theta[i_theta])
i_theta += 1
i_pose_params += 1
i_pose_params += 1
return pose_params
def euler_angles_to_rotation_matrix(xzy):
tx = xzy[0]
ty = xzy[2]
tz = xzy[1]
Rx = get_identity(3,dtype=tx.dtype)
Rx = T.set_subtensor(Rx[1,1],T.cos(tx))
Rx = T.set_subtensor(Rx[2,1],T.sin(tx))
Rx = T.set_subtensor(Rx[1,2],-Rx[2,1])
Rx = T.set_subtensor(Rx[2,2],Rx[1,1])
Ry = get_identity(3,dtype=tx.dtype)
Ry = T.set_subtensor(Ry[0,0],T.cos(ty))
Ry = T.set_subtensor(Ry[0,2],T.sin(ty))
Ry = T.set_subtensor(Ry[2,0],-Ry[0,2])
Ry = T.set_subtensor(Ry[2,2],Ry[0,0])
Rz = get_identity(3,dtype=tx.dtype)
Rz = T.set_subtensor(Rz[0,0],T.cos(tz))
Rz = T.set_subtensor(Rz[1,0],T.sin(tz))
Rz = T.set_subtensor(Rz[0,1],-Rz[1,0])
Rz = T.set_subtensor(Rz[1,1],Rz[0,0])
return T.dot(T.dot(Rz,Ry),Rx)
def get_posed_relatives(pose_params,base_relatives):
def inner(rot_param,base_relative):
tr = get_identity(4, dtype = base_relative.dtype)
R = euler_angles_to_rotation_matrix(rot_param)
tr = T.set_subtensor(tr[:3,:3], R)
return T.dot(base_relative, tr)
relatives,_ = th.scan(fn=inner,
outputs_info=None,
sequences=[pose_params[3:],base_relatives])
return relatives
### warning, this function contains hack ###
def relatives_to_absolutes(relatives,parents):
def compute_absolute(i,parent,relative,absolutes):
# hack (parent == -1 accesses last element - we set it to zero)
# Theano did not take ifselse here
absolutes = T.set_subtensor(absolutes[i],T.dot(absolutes[parent],relative))
return absolutes
absolutes = T.zeros_like(relatives)
# hack (parent == -1 accesses last element - we set it to zero)
# Theano did not take ifselse here
absolutes = T.set_subtensor(absolutes[-1],get_identity(4,dtype=relatives.dtype))
absolutes_timeline,_ = th.scan(fn=compute_absolute,
sequences=[T.arange(relatives.shape[0]),parents,relatives],
outputs_info=absolutes)
return absolutes_timeline[-1]
def angle_axis_to_rotation_matrix(angle_axis):
n = T.sqrt(T.sum(angle_axis**2))
def aa2R():
angle_axis_normalized = angle_axis / n
x = angle_axis_normalized[0]
y = angle_axis_normalized[1]
z = angle_axis_normalized[2]
s, c = T.sin(n), T.cos(n)
R = T.zeros((3,3),dtype=angle_axis.dtype)
R = T.set_subtensor(R[0,0], x*x+(1-x*x)*c)
R = T.set_subtensor(R[0,1], x*y*(1-c)-z*s)
R = T.set_subtensor(R[0,2], x*z*(1-c)+y*s)
R = T.set_subtensor(R[1,0], x*y*(1-c)+z*s)
R = T.set_subtensor(R[1,1], y*y+(1-y*y)*c)
R = T.set_subtensor(R[1,2], y*z*(1-c)-x*s)
R = T.set_subtensor(R[2,0], x*z*(1-c)-y*s)
R = T.set_subtensor(R[2,1], z*y*(1-c)+x*s)
R = T.set_subtensor(R[2,2], z*z+(1-z*z)*c)
return R
return th.ifelse.ifelse(T.lt(n,.0001), get_identity(3, dtype=angle_axis.dtype), aa2R())
def apply_global_transform(pose_params,positions):
R = angle_axis_to_rotation_matrix(pose_params[0])
s = pose_params[1]
R *= s[np.newaxis,:]
t = pose_params[2]
return T.transpose(T.dot(R, T.transpose(positions))) + t
def get_skinned_vertex_positions(pose_params,base_relatives,parents,inverse_base_absolutes,
base_positions,weights,mirror_factor):
relatives = get_posed_relatives(pose_params,base_relatives)
absolutes = relatives_to_absolutes(relatives,parents)
transforms,_ = th.scan(fn=(lambda A, B : T.dot(A,B)),
sequences=[absolutes,inverse_base_absolutes])
positions = T.tensordot(transforms,base_positions,[2, 1]).dimshuffle((2,0,1))
positions = (positions * weights[:,:,np.newaxis]).sum(axis=1)[:,:3]
positions = T.set_subtensor(positions[:,0],positions[:,0]*mirror_factor)
positions = apply_global_transform(pose_params,positions)
return positions
def hand_objective(params,nbones,base_relatives,parents,inverse_base_absolutes,base_positions,
weights,mirror_factor,points,correspondences):
pose_params = to_pose_params(params,nbones)
vertex_positions = get_skinned_vertex_positions(pose_params,base_relatives,parents,
inverse_base_absolutes,base_positions,
weights,mirror_factor)
err,_ = th.scan(fn=(lambda pt, i_vert : pt - vertex_positions[i_vert]),
sequences=[points,correspondences],
outputs_info=None)
return err
params_ = T.dvector('params_')
parents_ = T.ivector('parents_')
base_relatives_ = T.dtensor3('base_relatives_')
inverse_base_absolutes_ = T.dtensor3('inverse_base_absolutes_')
triangles_ = T.imatrix('triangles_')
base_positions_ = T.dmatrix('base_positions_')
weights_ = T.dmatrix('weights_')
nbones_ = T.iscalar('nbones_')
mirror_factor_ = T.dscalar('mirror_factor_')
correspondences_ = T.ivector('correspondences_')
points_ = T.dmatrix('points_')
triangles_ = T.imatrix('triangles_')
seed_ = T.dvector('seed_')
compile_mode = 'FAST_COMPILE'
#compile_mode = 'FAST_RUN'
th.config.linker='cvm'
start = t.time()
err_ = hand_objective(params_,nbones_,base_relatives_,parents_,inverse_base_absolutes_,base_positions_,
weights_,mirror_factor_,points_,correspondences_)
f = th.function([params_,nbones_,base_relatives_,parents_,inverse_base_absolutes_,base_positions_,
weights_,mirror_factor_,points_,correspondences_], err_, mode=compile_mode)
end = t.time()
tf_compile = (end - start)
print("tf_compile: %f" % tf_compile)
start = t.time()
jac = T.Rop(T.flatten(err_),params_,seed_)
fjac = th.function([params_,seed_,nbones_,base_relatives_,parents_,inverse_base_absolutes_,base_positions_,
weights_,mirror_factor_,points_,correspondences_], jac, mode=compile_mode)
end = t.time()
tJ_compile = (end - start)
print("tJ_compile: %f" % tJ_compile)
ntasks = (len(sys.argv)-1)//5
for task_id in range(ntasks):
print("task_id: %i" % task_id)
argv_idx = task_id*5 + 1
dir_in = sys.argv[argv_idx]
dir_out = sys.argv[argv_idx+1]
fn = sys.argv[argv_idx+2]
nruns_f = int(sys.argv[argv_idx+3])
nruns_J = int(sys.argv[argv_idx+4])
model_dir = dir_in + "model/"
fn_in = dir_in + fn
fn_out = dir_out + fn
params, data = hand_io.read_hand_instance(model_dir, fn_in + ".txt", False)
if data.model.is_mirrored:
mirror_factor = -1.
else:
mirror_factor = 1.
start = t.time()
for i in range(nruns_f):
err = f(params, data.model.nbones, data.model.base_relatives, data.model.parents,
data.model.inverse_base_absolutes,data.model.base_positions,
data.model.weights,mirror_factor,data.points,
data.correspondences)
end = t.time()
tf = (end - start)/nruns_f
print("err:")
#print(err)
name = "Theano_rop"
seed = np.eye(params.shape[0],dtype=params.dtype)
tJ = 0
if nruns_J > 0:
start = t.time()
for i in range(nruns_J):
J = np.array([fjac(params,curr_seed,data.model.nbones, data.model.base_relatives, data.model.parents,
data.model.inverse_base_absolutes,data.model.base_positions,
data.model.weights,mirror_factor,data.points,
data.correspondences)
for curr_seed in seed]).transpose()
end = t.time()
tJ = ((end - start)/nruns_J) + tf ###!!!!!!!!! adding this because no function value is returned by fjac
print("J:")
#print(J)
hand_io.write_J(fn_out + "_J_" + name + ".txt",J)
hand_io.write_times(fn_out + "_times_" + name + ".txt",tf,tJ)
|
python
|
# Copyright 2018 Cisco and its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "Miroslav Kovac"
__copyright__ = "Copyright 2018 Cisco and its affiliates"
__license__ = "Apache License, Version 2.0"
__email__ = "[email protected]"
import os
import shutil
import sys
import tempfile
from git import Repo
from git.cmd import Git
from git.exc import GitCommandError
'''Notes:
repo.index.add(repo.untracked_files)
Add all new files to the index
repo.index.add([i.a_path for i in repo.index.diff(None)])
Add all modified files to the index. Also works for new directories.
repo.index.commit('commit for delete file')
Commit any changes
repo.git.push()
Push changes to origin.
repo.git.rm([f1, f2, ...])
Remove files safely and add removal to index (note that files are
left in lace, and then look like untracked files).
'''
def pull(repo_dir):
"""
Pull all the new files in the master in specified directory.
Directory should contain path where .git file is located.
:param repo_dir: directory where .git file is located
"""
g = Git(repo_dir)
g.pull()
a = Repo(repo_dir)
for s in a.submodules:
s.update(recursive=True, init=True)
class RepoUtil(object):
"""Simple class for rolling up some git operations as part of file
manipulation. The user should create the object with the URL to
the repository and an appropriate set of credentials. At this
"""
def __init__(self, repourl):
self.repourl = repourl
self.localdir = None
self.repo = None
def get_repo_dir(self):
"""Return the repository directory name from the URL"""
return os.path.basename(self.repourl)
def get_repo_owner(self):
"""Return the root directory name of the repo. In GitHub
parlance, this would be the owner of the repository.
"""
owner = os.path.basename(os.path.dirname(self.repourl))
if ':' in owner:
return owner[owner.index(':') + 1:]
return owner
def clone(self, config_user_name=None, config_user_email=None):
"""Clone the specified repository to a local temp directory. This
method may generate a git.exec.GitCommandError if the
repository does not exist
"""
self.localdir = tempfile.mkdtemp()
self.repo = Repo.clone_from(self.repourl, self.localdir)
if config_user_name:
with self.repo.config_writer() as config:
config.set_value('user', 'email', config_user_email)
config.set_value('user', 'name', config_user_name)
def updateSubmodule(self, recursive=True, init=True):
"""Clone submodules of a git repository"""
for submodule in self.repo.submodules:
submodule.update(recursive, init)
def add_all_untracked(self):
"""Commit all untracked and modified files. This method shouldn't
generate any exceptions as we don't allow unexpected
operations to be invoked.
"""
self.repo.index.add(self.repo.untracked_files)
modified = []
deleted = []
for i in self.repo.index.diff(None):
if os.path.exists(self.localdir+'/'+i.a_path):
modified.append(i.a_path)
else:
deleted.append(i.a_path)
if len(modified) > 0:
self.repo.index.add(modified)
if len(deleted) > 0:
self.repo.index.remove(deleted)
def commit_all(self, message='RepoUtil Commit'):
"""Equivalent of git commit -a -m MESSAGE."""
self.repo.git.commit(a=True, m=message)
def push(self):
"""Push repo to origin. Credential errors may happen here."""
self.repo.git.push("origin")
def remove(self):
"""Remove the temporary storage."""
shutil.rmtree(self.localdir)
self.localdir = None
self.repo = None
if __name__ == '__main__':
#
# local imports
#
from argparse import ArgumentParser
#
# test arguments
#
parser = ArgumentParser(description='RepoUtil test params:')
parser.add_argument('userpass', nargs=1, type=str,
help='Provide username:password for github https access'
)
args = parser.parse_args()
if not args.userpass:
print("username:password required")
sys.exit(1)
#
# This repo exists
#
TEST_REPO = 'https://%[email protected]/einarnn/test.git'
#
# This repo does not exist
#
BOGUS_REPO = 'https://%[email protected]/einarnn/testtest.git'
#
# Create, clone and remove repo that exists.
#
print('\nTest 1\n------')
try:
r = RepoUtil(TEST_REPO % args.userpass[0])
r.clone()
print('Temp directory: '+r.localdir)
r.remove()
except GitCommandError as e:
print('Git Exception: ' + e.status)
#
# Create, clone and modify a repo with good credentials. Will Then
# try to modify, commit and push. If the file 'ok.txt' is present,
# we will try to delete it. If it's not, we will create it!
#
print('\nTest 2\n------')
try:
r = RepoUtil(TEST_REPO % args.userpass[0])
r.clone()
print('Temp directory: '+r.localdir)
ok_path = r.localdir + '/ok.txt'
if os.path.exists(ok_path):
print('Removing test file!')
r.repo.git.rm(ok_path)
# os.remove(ok_path)
else:
print('Creating test file!')
with open(ok_path, 'w') as f:
f.write('hello!\n')
f.close()
try:
r.add_all_untracked()
r.commit_all(message='push should succeed')
r.push()
except GitCommandError as e:
print('Git Exception: ' + e.stderr)
r.remove()
except GitCommandError as e:
print('Git Exception: ' + e.stderr)
#
# Create, clone and modify a repo with bogus credentials. Will Then try
# to modify, commit and push, but still with bogus credentials.
#
print('\nTest 3\n------')
try:
r = RepoUtil(TEST_REPO % (args.userpass[0]+'bogus'))
r.clone()
print('Temp directory: '+r.localdir)
with open(r.localdir+'/bogus.txt', 'w') as f:
f.write('hello!\n')
f.close()
try:
r.add_all_untracked()
r.commit_all(message='push should fail')
r.push()
except GitCommandError as e:
print('Git Exception as expected: ' + e.stderr)
r.remove()
except GitCommandError as e:
print('Git Exception: ' + e.stderr)
#
# Try to create, clone and remove repo that does not exist. If
# this is the caser, no dangling directory is left, so no need to
# try and remove it.
#
print('\nTest 4\n------')
try:
r = RepoUtil(BOGUS_REPO % args.userpass[0])
r.clone()
print('Temp directory: ' + r.localdir)
r.remove()
except GitCommandError as e:
print('Git Exception as expected: ' + e.stderr)
|
python
|
import logging,uuid
from exchangemanager import ExchangeManager
from result import Result
from order import Order
class BacktestManager(ExchangeManager):
def __init__(self, config = {} ):
ExchangeManager.__init__(self, "BTEST", config )
self.balance = None
self.log = logging.getLogger('crypto')
def processOrder(self, order ):
order.setExchange( self.getName() )
self.log.info("backtest exchange processing order")
if order.rate != order.MARKET:
r = { "uuid" : "test-{}".format(uuid.uuid4()) }
order.ref_id = r["uuid"]
order.status = order.OPEN
order.meta["api"] = {
"create": r
}
res = order.save()
self.log.info("save results {}".format(res))
return Result(True,"success",r)
else:
return Result.fail("Market orders not allowed on bittrex")
def syncOrder(self,order):
if order.status < order.TERMINATED_STATE:
status = order.status
#results = self.api.account_get_order( order.ref_id )
#data = results.getData()
if order.order_type == Order.SELL:
order.status = Order.COMPLETED
elif order.order_type == Order.BUY:
order.status = Order.FILLED
if status != order.status:
order.save()
if order.status == order.COMPLETED:
assocorder = Order.findById(order.assoc_id)
if assocorder.isOk():
aorder = assocorder.data["results"][0]
aorder.status = Order.COMPLETED
self.log.info("found associated order {}".format(aorder.ref_id))
aorder.meta["sold_at"] = float(order.rate)
aorder.assoc_id = order.pkey
res = aorder.save()
self.log.info("saved associated order {}".format(res))
return True
def getBalance(self,currency):
return 10000
def getBalances(self):
return {}
|
python
|
# Import libraries
import matplotlib.pyplot as plt
import numpy as np
# Draw
plt.title("Lines") # put title on plot
plt.plot([-4,2], [-2,-2], "b") # Plot the lines to draw a house
plt.plot([-4,-1], [2,3], "b")
plt.plot([-1,1], [3,5], "b")
plt.plot([2,4], [-2,0], "b")
plt.plot([1,4], [5,4], "b")
plt.plot([1,-2], [5,4], "b")
plt.plot([-4,-2], [2,4], "b")
plt.plot([4,4], [4,0], "b")
plt.plot([-1,2], [3,2], "b")
plt.plot([-4,-4], [-2,2], "b")
plt.plot([2,4], [2,4], "b")
plt.plot([2,2], [-2,2], "b")
plt.show() #display the plot
|
python
|
# -*- coding: utf-8 -*-
import sys
from optparse import OptionParser;
from xml.dom import minidom;
import re
import os
import csv
import hashlib
import shutil
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from HorizonBuildFileUtil import HorizonBuildFileUtil
import subprocess
class HorizonUE4Build(object):
"""description of class"""
def __init__(self):
#current tool version is 1
self.m_iCodeVersion = 1
self.m_sConfig = "default";
self.m_sOutReportFilePath = "Output/HorizonUE4BuildReport.log"
self.m_sClean = False
def __generateOptionParser__(self):
parser = OptionParser();
parser.add_option("--config", dest="config",
default="./Config/HorizonUE4Build/UE4Build_sample.xml",
help="config file", metavar="FILE")
parser.add_option("--clean", action="store_true", dest="clean")
parser.add_option("--engine", dest="unreal_engine_root",
default="UnrealEngineRoot",
help="root path of unreal engine", metavar="FILE")
parser.add_option("--project", dest="project_file_full_path",
default="project_file_full_path",
help="project_file_full_path", metavar="project_file_full_path")
parser.add_option("--build_platform", dest="build_platform",
default="win64",
help="ex: Win64, Win32, Android...", metavar="build_platform")
parser.add_option("--build_config", dest="build_config",
default="win64",
help="ex: Win64, Win32, Android...", metavar="build_config")
parser.add_option("--archive", dest="build_archive_path",
default="./Archive/Build/",
help="build_archive_path", metavar="build_archive_path")
parser.add_option("--buildclient", action="store_true", dest="buildclient")
parser.add_option("--buildserver", action="store_true", dest="buildserver")
parser.add_option("--cookclient", action="store_true", dest="cookclient")
parser.add_option("--cookserver", action="store_true", dest="cookserver")
parser.add_option("--crosscompile", action="store_true", dest="crosscompile")
return parser;
def init(self):
print("curretn folder:" + os.getcwd() + "\n")
parser = self.__generateOptionParser__()
(self.options, self.args) = parser.parse_args()
print("options:" + str(self.options))
print("args" + str(self.args))
if(self.options.config != None):
self.m_sConfig = self.options.config;
if(self.options.clean != None):
self.m_sClean = self.options.clean;
if(self.options.unreal_engine_root != None):
self.m_sUnrealEngineRoot = self.options.unreal_engine_root;
if(self.options.project_file_full_path != None):
self.m_sProjectFileFullPath = self.options.project_file_full_path;
if(self.options.build_platform != None):
self.m_sBuildPlatform = self.options.build_platform;
if(self.options.build_config != None):
self.m_sBuildConfig = self.options.build_config;
if(self.options.build_archive_path != None):
self.m_sBuildArchivePath = self.options.build_archive_path;
print("m_sUnrealEngineRoot:" + str(self.m_sUnrealEngineRoot))
print("m_sProjectFileFullPath:" + str(self.m_sProjectFileFullPath))
print("m_sBuildPlatform:" + str(self.m_sBuildPlatform))
print("m_sBuildArchivePath:" + str(self.m_sBuildArchivePath))
#xmldoc = minidom.parse(self.m_sConfig)
#self.m_sHorizonEngineRoot = os.path.abspath(xmldoc.getElementsByTagName('UnrealEngineRoot')[0].firstChild.nodeValue);
def execute(self):
HorizonBuildFileUtil.HorizonBuildFileUtil.EnsureDir(self.m_sOutReportFilePath)
reportFile = open(self.m_sOutReportFilePath, 'w', encoding = 'utf-8')
reportFile.truncate()
reportFile.close()
if(self.options.cookclient != None):
self.cookClient()
if(self.options.cookserver != None):
self.cookServer()
#self.__buildEngine()
if(self.options.buildclient != None):
self.buildClient()
if(self.options.buildserver != None):
self.buildServer()
def buildClient(self):
bSuccess = False
self.__buildClientEditor()
reportFile = open(self.m_sOutReportFilePath, 'a', encoding = 'utf-8')
sCmd = '"{UNREAL_ENGINE_ROOT}/Engine/Build/BatchFiles/RunUAT.{EXT}" BuildCookRun \
-nocompileeditor -nop4 \
-project="{PROJECT_FILE_FULL_PATH}" -cook -stage -archive -archivedirectory="{BUILD_ARCHIVE_PATH}" \
-package -clientconfig={BUILD_CONFIG} \
-SKIPEDITORCONTENT -pak -prereqs -nodebuginfo -platform={BUILD_PLATFORM} \
-build -CrashReporter -utf8output -compile'
sCmd = self.__getBuildCommand(sCmd)
HorizonBuildFileUtil.HorizonBuildFileUtil.LogInfo(reportFile, "==================" + sCmd)
result = subprocess.run(sCmd, shell=True)
if(result.returncode == 0):
bSuccess = True
reportFile.close()
return bSuccess
def cookClient(self):
bSuccess = False
reportFile = open(self.m_sOutReportFilePath, 'a', encoding = 'utf-8')
sCmd = '"{UNREAL_ENGINE_ROOT}/Engine/Build/BatchFiles/RunUAT.{EXT}" BuildCookRun \
-project="{PROJECT_FILE_FULL_PATH}" \
-noP4 -platform={BUILD_PLATFORM} \
-clientconfig={BUILD_CONFIG} -serverconfig={BUILD_CONFIG} \
-cook -allmaps -NoCompile -stage \
-pak -archive -archivedirectory="{BUILD_ARCHIVE_PATH}"'
sCmd = self.__getBuildCommand(sCmd)
HorizonBuildFileUtil.HorizonBuildFileUtil.LogInfo(reportFile, "==================" + sCmd)
result = subprocess.run(sCmd, shell=True)
if(result.returncode == 0):
bSuccess = True
reportFile.close()
return bSuccess
def buildServer(self):
bSuccess = False
#self.__buildServerEditor()
reportFile = open(self.m_sOutReportFilePath, 'a', encoding = 'utf-8')
sCmd = '"{UNREAL_ENGINE_ROOT}/Engine/Build/BatchFiles/RunUAT.{EXT}" BuildCookRun \
-nocompileeditor -nop4 \
-project="{PROJECT_FILE_FULL_PATH}" -cook -stage -archive -archivedirectory="{BUILD_ARCHIVE_PATH}" \
-package -server -serverconfig={BUILD_CONFIG} -noclient \
-SKIPEDITORCONTENT -pak -prereqs -nodebuginfo -platform={BUILD_PLATFORM} \
-build -CrashReporter -utf8output -compile'
sCmd = self.__getBuildCommand(sCmd)
HorizonBuildFileUtil.HorizonBuildFileUtil.LogInfo(reportFile, "==================" + sCmd)
result = subprocess.run(sCmd, shell=True)
if(result.returncode == 0):
bSuccess = True
reportFile.close()
return bSuccess
def cookServer(self):
bSuccess = False
self.__buildClientEditor()
reportFile = open(self.m_sOutReportFilePath, 'a', encoding = 'utf-8')
sCmd = '"{UNREAL_ENGINE_ROOT}/Engine/Build/BatchFiles/RunUAT.{EXT}" BuildCookRun \
-project="{PROJECT_FILE_FULL_PATH}" \
-noP4 -platform={BUILD_PLATFORM} \
-clientconfig={BUILD_CONFIG} -serverconfig={BUILD_CONFIG} \
-cook -server -serverplatform={BUILD_PLATFORM} -noclient -NoCompile -stage \
-pak -archive -archivedirectory="{BUILD_ARCHIVE_PATH}"'
sCmd = self.__getBuildCommand(sCmd)
HorizonBuildFileUtil.HorizonBuildFileUtil.LogInfo(reportFile, "==================" + sCmd)
result = subprocess.run(sCmd, shell=True)
if(result.returncode == 0):
bSuccess = True
reportFile.close()
return bSuccess
#========================private function==============================
def __buildEngine(self):
# for fix error: https://answers.unrealengine.com/questions/409205/automated-build-system-errors-ue4editor-xdll-missi.html
bSuccess = False
reportFile = open(self.m_sOutReportFilePath, 'a', encoding = 'utf-8')
sCmd = '"{UNREAL_ENGINE_ROOT}/Engine/Binaries/DotNET/UnrealBuildTool.exe" \
UE4Game {BUILD_PLATFORM} {BUILD_CONFIG} -waitmutex -DEPLOY'
sBuildTarget = os.path.splitext(os.path.basename(self.m_sProjectFileFullPath))[0]
sCmd = sCmd.format(
UNREAL_ENGINE_ROOT=self.m_sUnrealEngineRoot,
BUILD_TARGET=sBuildTarget,
PROJECT_FILE_FULL_PATH=self.m_sProjectFileFullPath,
BUILD_PLATFORM=self.m_sBuildPlatform,
BUILD_CONFIG=self.m_sBuildConfig)
HorizonBuildFileUtil.HorizonBuildFileUtil.LogInfo(reportFile, "==================" + sCmd)
result = subprocess.run(sCmd, shell=True)
if(result.returncode == 0):
bSuccess = True
reportFile.close()
return bSuccess
def __buildClientEditor(self):
# for fix error: https://answers.unrealengine.com/questions/409205/automated-build-system-errors-ue4editor-xdll-missi.html
bSuccess = False
reportFile = open(self.m_sOutReportFilePath, 'a', encoding = 'utf-8')
sCmd = '"{UNREAL_ENGINE_ROOT}/Engine/Binaries/DotNET/UnrealBuildTool.exe" \
{BUILD_TARGET} {BUILD_PLATFORM} {BUILD_CONFIG} -project="{PROJECT_FILE_FULL_PATH}" \
-editorrecompile -progress -noubtmakefiles -NoHotReloadFromIDE -2015'
sBuildTarget = os.path.splitext(os.path.basename(self.m_sProjectFileFullPath))[0]
sCmd = sCmd.format(
UNREAL_ENGINE_ROOT=self.m_sUnrealEngineRoot,
BUILD_TARGET=sBuildTarget,
PROJECT_FILE_FULL_PATH=self.m_sProjectFileFullPath,
BUILD_PLATFORM=self.m_sBuildPlatform,
BUILD_CONFIG=self.m_sBuildConfig)
HorizonBuildFileUtil.HorizonBuildFileUtil.LogInfo(reportFile, "==================" + sCmd)
result = subprocess.run(sCmd, shell=True)
if(result.returncode == 0):
bSuccess = True
reportFile.close()
return bSuccess
def __buildServerEditor(self):
# for fix error: https://answers.unrealengine.com/questions/409205/automated-build-system-errors-ue4editor-xdll-missi.html
bSuccess = False
reportFile = open(self.m_sOutReportFilePath, 'a', encoding = 'utf-8')
sCmd = '"{UNREAL_ENGINE_ROOT}/Engine/Binaries/DotNET/UnrealBuildTool.exe" \
{BUILD_TARGET} {BUILD_CONFIG} {BUILD_PLATFORM} -project="{PROJECT_FILE_FULL_PATH}" \
-editorrecompile -progress -noubtmakefiles -NoHotReloadFromIDE -2015'
sBuildTarget = os.path.splitext(os.path.basename(self.m_sProjectFileFullPath))[0] + "Server"
sCmd = sCmd.format(
UNREAL_ENGINE_ROOT=self.m_sUnrealEngineRoot,
BUILD_TARGET=sBuildTarget,
PROJECT_FILE_FULL_PATH=self.m_sProjectFileFullPath,
BUILD_PLATFORM=self.m_sBuildPlatform,
BUILD_CONFIG=self.m_sBuildConfig)
HorizonBuildFileUtil.HorizonBuildFileUtil.LogInfo(reportFile, "==================" + sCmd)
result = subprocess.run(sCmd, shell=True)
if(result.returncode == 0):
bSuccess = True
reportFile.close()
return bSuccess
def __getExt(self):
sExt = "sh"
bIsWindows = sys.platform.startswith('win')
if(bIsWindows):
sExt = "bat"
else:
sExt = "sh"
return sExt
def __getBuildCommand(self, sCmd):
sExt = self.__getExt()
sResult = sCmd.format(
UNREAL_ENGINE_ROOT=self.m_sUnrealEngineRoot,
EXT=sExt,
PROJECT_FILE_FULL_PATH=self.m_sProjectFileFullPath,
BUILD_PLATFORM=self.m_sBuildPlatform,
BUILD_CONFIG=self.m_sBuildConfig,
BUILD_ARCHIVE_PATH=self.m_sBuildArchivePath
)
return sResult
|
python
|
"""Flategy - a basic playable strategy game & bot."""
import os
import io
import subprocess
import tempfile
import cairo
import IPython.display
import numpy as np
class State:
__slots__ = ['position', 'radius', 'world_shape']
def __init__(self, position, radius, world_shape):
self.position = position
self.radius = radius
self.world_shape = world_shape
def to_dict(self):
return dict(position=self.position,
radius=self.radius,
world_shape=self.world_shape)
def replace(self, **args):
d = self.to_dict()
d.update(args)
return type(self)(**d)
@property
def world_aspect(self):
(left, top), (right, bottom) = self.world_shape
return (bottom - top) / (right - left)
# Rendering
def draw(self, surface, width):
ctx = cairo.Context(surface)
# set up the basic view transformation
(left, top), (right, bottom) = self.world_shape
scale = width / (right - left)
ctx.scale(scale, scale)
ctx.translate(-left, -top)
ctx.rectangle(left, top, right, bottom)
ctx.set_source_rgb(255, 255, 255)
ctx.fill()
ctx.set_source_rgb(0, 0, 0)
# render the world
for pos, r in zip(self.position, self.radius):
ctx.arc(pos[0], pos[1], r, 0, 2*np.pi)
ctx.fill()
def to_svg(self, width):
f = io.BytesIO()
with cairo.SVGSurface(f, width, int(self.world_aspect * width)) as surface:
self.draw(surface, width)
f.seek(0)
return f.read()
def _repr_svg_(self):
return self.to_svg(256).decode('utf8')
@classmethod
def video(cls, states, filename, dt, width):
with tempfile.TemporaryDirectory() as tmp:
# Render PNG frames
for n, frame in enumerate(states):
with cairo.ImageSurface(cairo.FORMAT_ARGB32, width, int(frame.world_aspect * width)) as surface:
frame.draw(surface, width)
surface.write_to_png(os.path.join(tmp, 'frame_{:04d}.png'.format(n)))
# Convert PNG* => MP4
subprocess.check_call(['ffmpeg', '-i', os.path.join(tmp, 'frame_%04d.png'),
'-y', '-r', str(int(1/dt)), '-pix_fmt', 'yuv420p', filename])
return IPython.display.Video(filename)
|
python
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from transformers import BertModel, RobertaModel
class EmbeddingGeneratorGLOVE(nn.Module):
def __init__(self, config, path):
super(EmbeddingGeneratorGLOVE, self).__init__()
self.config = config
print('Loading Pre-trained Glove Embeddings...')
embed_weights = np.load(path)
vocab_size, dim = embed_weights.shape
embed_weights = torch.FloatTensor(embed_weights)
self.embedding_model = nn.Embedding(vocab_size, dim, padding_idx=config.PAD_IDX)
self.embedding_model.weight = nn.Parameter(embed_weights)
def forward(self, xs):
# [batch_size, max_seq_len, hidden_dim]
xs = self.embedding_model(xs)
return xs
class EembeddingGeneratorBERT(nn.Module):
"""
Pretrained Language Model - BERT
"""
def __init__(self, config):
super(EembeddingGeneratorBERT, self).__init__()
self.embedding_model = BertModel.from_pretrained(
config.PRETRAINED_BERT_NAME,
return_dict=True
)
self.embedding_model.to(config.DEVICE)
def forward(self, xs, attn_mask):
xs = self.embedding_model(xs, attention_mask=attn_mask)
# [batch_size, max_seq_len, hidden_dim]
xs = xs.last_hidden_state # extract the last hidden layer
return xs
class EembeddingGeneratorRoBERTa(nn.Module):
"""
Pretrained Language Model - RoBERTa
"""
def __init__(self, config):
super(EembeddingGeneratorRoBERTa, self).__init__()
self.embedding_model = RobertaModel.from_pretrained(
config.PRETRAINED_ROBERTA_NAME,
return_dict=True
)
self.embedding_model.to(config.DEVICE)
def forward(self, xs, attn_mask):
xs = self.embedding_model(xs, attention_mask=attn_mask)
# [batch_size, max_seq_len, hidden_dim]
xs = xs.last_hidden_state # extract the last hidden layer
return xs
class CharacterEmbedding(nn.Module):
'''
In : (N, sentence_len, word_len)
Out: (N, sentence_len, c_embd_size)
Reference: https://github.com/jojonki/BiDAF/blob/master/layers/char_embedding.py
'''
def __init__(self, config):
super(CharacterEmbedding, self).__init__()
self.config = config
self.embd_size = config.CHAR_EMBED_DIM
self.embedding = nn.Embedding(config.CHAR_VOCAB_SIZE, config.CHAR_EMBED_DIM, padding_idx=config.PAD_IDX)
# nn.Conv1d(in_channels, out_channels, kernel_size, stride=1, padding=0, ...
self.conv = nn.ModuleList([nn.Conv2d(1, config.CHAR_EMBED_CNN_NUM_OUT_CHANNELS,
(f[0], f[1])) for f in config.CHAR_EMBED_CHAR_FILTERS])
self.dropout = nn.Dropout(config.CHAR_EMBED_DROPOUT_RATE)
def forward(self, x):
# x: (N, seq_len, word_len)
input_shape = x.size()
# bs = x.size(0)
# seq_len = x.size(1)
word_len = x.size(2)
x = x.view(-1, word_len) # (N*seq_len, word_len)
x = self.embedding(x) # (N*seq_len, word_len, c_embd_size)
x = x.view(*input_shape, -1) # (N, seq_len, word_len, c_embd_size)
x = x.sum(2) # (N, seq_len, c_embd_size)
# CNN
x = x.unsqueeze(1) # (N, Cin, seq_len, c_embd_size), insert Channnel-In dim
# Conv2d
# Input : (N,Cin, Hin, Win )
# Output: (N,Cout,Hout,Wout)
x = [F.relu(conv(x)) for conv in self.conv] # (N, Cout, seq_len, c_embd_size-filter_w+1). stride == 1
# [(N,Cout,Hout,Wout) -> [(N,Cout,Hout*Wout)] * len(filter_heights)
# [(N, seq_len, c_embd_size-filter_w+1, Cout)] * len(filter_heights)
x = [xx.view((xx.size(0), xx.size(2), xx.size(3), xx.size(1))) for xx in x]
# maxpool like
# [(N, seq_len, Cout)] * len(filter_heights)
x = [torch.sum(xx, 2) for xx in x]
# (N, seq_len, Cout==word_embd_size)
x = torch.cat(x, 1)
x = self.dropout(x)
return x
class EembeddingGeneratorPOS(nn.Module):
def __init__(self, config):
super(EembeddingGeneratorPOS, self).__init__()
self.embedding_model = nn.Embedding(config.POS_VOCAB_SIZE, config.POS_EMBED_DIM, padding_idx=config.PAD_IDX)
self.embedding_model.to(config.DEVICE)
def forward(self, xs):
xs = self.embedding_model(xs)
# [batch_size, max_seq_len, hidden_dim]
return xs
|
python
|
# -*- coding: utf-8 -*-
import time as builtin_time
import pandas as pd
import numpy as np
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
class Time():
"""
A class object to get time.
Its methods (functions) are:
- reset()
- get()
See those for further informations.
Parameters
----------
None
Returns
----------
None
Example
----------
>>> import neurokit as nk
>>> myclock = nk.Time()
>>> time_passed_since_myclock_creation = myclock.get()
>>> myclock.reset()
>>> time_passed_since_reset = myclock.get()
Authors
----------
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
Dependencies
----------
- time
"""
def __init__(self):
self.clock = builtin_time.clock()
def reset(self):
"""
Reset the clock of the Time object.
Parameters
----------
None
Returns
----------
None
Example
----------
>>> import neurokit as nk
>>> time_passed_since_neuropsydia_loading = nk.time.get()
>>> nk.time.reset()
>>> time_passed_since_reset = nk.time.get()
Authors
----------
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
Dependencies
----------
- time
"""
self.clock = builtin_time.clock()
def get(self, reset=True):
"""
Get time since last initialisation / reset.
Parameters
----------
reset = bool, optional
Should the clock be reset after returning time?
Returns
----------
float
Time passed in milliseconds.
Example
----------
>>> import neurokit as nk
>>> time_passed_since_neurobox_loading = nk.time.get()
>>> nk.time.reset()
>>> time_passed_since_reset = nk.time.get()
Authors
----------
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
Dependencies
----------
- time
"""
t = (builtin_time.clock()-self.clock)*1000
if reset is True:
self.reset()
return(t)
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
def find_following_duplicates(array):
"""
Find the duplicates that are following themselves.
Parameters
----------
array : list or array
A list containig duplicates.
Returns
----------
list
A list containing True for each unique and False for following duplicates.
Example
----------
>>> import neurokit as nk
>>> mylist = ["a","a","b","a","a","a","c","c","b","b"]
>>> nk.find_following_duplicates(mylist)
Authors
----------
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
Dependencies
----------
- numpy
"""
array = array.copy()
uniques = []
for i in range(len(array)):
if i == 0:
uniques.append(True)
else:
if array[i] == array[i-1]:
uniques.append(False)
else:
uniques.append(True)
# Find index of uniques
indices = np.where(uniques)
return(uniques)
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
def find_closest_in_list(number, array, direction="both", strictly=False):
"""
Find the closest number in the array from x.
Parameters
----------
number : float
The number.
array : list
The list to look in.
direction : str
"both" for smaller or greater, "greater" for only greater numbers and "smaller" for the closest smaller.
strictly : bool
False for stricly superior or inferior or True for including equal.
Returns
----------
closest = int
Example
----------
>>> import neurokit as nk
>>> nk.find_closest_in_list(1.8, [3, 5, 6, 1, 2])
Authors
----------
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
"""
if direction == "both":
closest = min(array, key=lambda x:abs(x-number))
if direction == "smaller":
if strictly is True:
closest = max(x for x in array if x < number)
else:
closest = max(x for x in array if x <= number)
if direction == "greater":
if strictly is True:
closest = min(filter(lambda x: x > number, array))
else:
closest = min(filter(lambda x: x >= number, array))
return(closest)
|
python
|
import workalendar.africa
import workalendar.america
import workalendar.asia
import workalendar.europe
import workalendar.oceania
import workalendar.usa
from pywatts.core.exceptions.util_exception import UtilException
def _init_calendar(continent: str, country: str):
""" Check if continent and country are correct and return calendar object.
:param continent: Continent where the country or region is located.
:type continent: str
:param country: Country or region to use for the calendar object.
:type country: str
:return: Returns workalendar object to use for holiday lookup.
:rtype: workalendar object
"""
if hasattr(workalendar, continent.lower()):
module = getattr(workalendar, continent.lower())
if hasattr(module, country):
return getattr(module, country)()
else:
raise UtilException(f"The country {country} does not fit to the continent {continent}")
else:
raise UtilException(f"The continent {continent} does not exist.")
|
python
|
from django.shortcuts import render
from morad.models import Car
from django.views.generic import (ListView,DetailView,DeleteView,UpdateView,CreateView)
from django.urls.base import reverse_lazy
class ListCars(ListView):
template_name = 'cars/cars.html'
model = Car
class DetailCar(DetailView):
template_name = 'cars/details.html'
model = Car
class CreateCar(CreateView):
template_name = 'cars/create.html'
model = Car
fields = ['name','color','type_car','model_car','description','honer']
class UpdateCar(UpdateView):
template_name = 'cars/update.html'
model = Car
fields = ['name','color','type_car','model_car','description','honer']
class DeleteCar(DeleteView):
template_name = 'cars/delete.html'
model = Car
success_url = reverse_lazy("list-cars")
|
python
|
"""
Tests for string_utils.py
"""
import pytest
from django.test import TestCase
from common.djangoapps.util.string_utils import str_to_bool
class StringUtilsTest(TestCase):
"""
Tests for str_to_bool.
"""
def test_str_to_bool_true(self):
assert str_to_bool('True')
assert str_to_bool('true')
assert str_to_bool('trUe')
def test_str_to_bool_false(self):
assert not str_to_bool('Tru')
assert not str_to_bool('False')
assert not str_to_bool('false')
assert not str_to_bool('')
assert not str_to_bool(None)
assert not str_to_bool('anything')
def test_str_to_bool_errors(self):
def test_raises_error(val):
with pytest.raises(AttributeError):
assert not str_to_bool(val)
test_raises_error({})
test_raises_error([])
test_raises_error(1)
test_raises_error(True)
|
python
|
import sys
from datetime import timedelta
def print_expected_call_message(additional_message):
print(f"""{additional_message}
Expected application call:
python3 regex_text.py [searched phrase] [left_padding] [right_padding]
Example call:
python3 regex_text.py "I don't know" 2 3""")
def handle_arguments():
if not (arg_len := len(sys.argv)) == 4:
print_expected_call_message(f'Expected two arguments, got {arg_len-1}.')
exit()
try:
phrase = sys.argv[1]
padding_left, padding_right = [timedelta(int(number)) for number in sys.argv[2:4]]
return([phrase, padding_left, padding_right])
except:
print_expected_call_message(f'An error has occured.')
exit()
|
python
|
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from robot.utils import (IRONPYTHON, JYTHON, py3to2, Sortable, secs_to_timestr,
timestr_to_secs, WINDOWS)
from robot.errors import TimeoutError, DataError, FrameworkError
if JYTHON:
from .jython import Timeout
elif IRONPYTHON:
from .ironpython import Timeout
elif WINDOWS:
from .windows import Timeout
else:
from .posix import Timeout
@py3to2
class _Timeout(Sortable):
def __init__(self, timeout=None, variables=None):
self.string = timeout or ''
self.secs = -1
self.starttime = -1
self.error = None
if variables:
self.replace_variables(variables)
@property
def active(self):
return self.starttime > 0
def replace_variables(self, variables):
try:
self.string = variables.replace_string(self.string)
if not self:
return
self.secs = timestr_to_secs(self.string)
self.string = secs_to_timestr(self.secs)
except (DataError, ValueError) as err:
self.secs = 0.000001 # to make timeout active
self.error = (u'Setting %s timeout failed: %s'
% (self.type.lower(), err))
def start(self):
if self.secs > 0:
self.starttime = time.time()
def time_left(self):
if not self.active:
return -1
elapsed = time.time() - self.starttime
# Timeout granularity is 1ms. Without rounding some timeout tests fail
# intermittently on Windows, probably due to threading.Event.wait().
return round(self.secs - elapsed, 3)
def timed_out(self):
return self.active and self.time_left() <= 0
def run(self, runnable, args=None, kwargs=None):
if self.error:
raise DataError(self.error)
if not self.active:
raise FrameworkError('Timeout is not active')
timeout = self.time_left()
error = TimeoutError(self._timeout_error,
test_timeout=isinstance(self, TestTimeout))
if timeout <= 0:
raise error
executable = lambda: runnable(*(args or ()), **(kwargs or {}))
return Timeout(timeout, error).execute(executable)
def get_message(self):
if not self.active:
return '%s timeout not active.' % self.type
if not self.timed_out():
return '%s timeout %s active. %s seconds left.' \
% (self.type, self.string, self.time_left())
return self._timeout_error
@property
def _timeout_error(self):
return '%s timeout %s exceeded.' % (self.type, self.string)
def __str__(self):
return self.string
def __bool__(self):
return bool(self.string and self.string.upper() != 'NONE')
@property
def _sort_key(self):
return not self.active, self.time_left()
def __eq__(self, other):
return self is other
def __ne__(self, other):
return not self == other
def __hash__(self):
return id(self)
class TestTimeout(_Timeout):
type = 'Test'
_keyword_timeout_occurred = False
def __init__(self, timeout=None, variables=None, rpa=False):
if rpa:
self.type = 'Task'
_Timeout.__init__(self, timeout, variables)
def set_keyword_timeout(self, timeout_occurred):
if timeout_occurred:
self._keyword_timeout_occurred = True
def any_timeout_occurred(self):
return self.timed_out() or self._keyword_timeout_occurred
class KeywordTimeout(_Timeout):
type = 'Keyword'
|
python
|
from import_export import resources
from electricity.models import FeedBack
class FeedBackResource(resources.ModelResource):
class Meta:
model = FeedBack
|
python
|
from nose import with_setup
from pybbn.causality.ace import Ace
from pybbn.graph.dag import Bbn
from pybbn.graph.edge import Edge, EdgeType
from pybbn.graph.node import BbnNode
from pybbn.graph.variable import Variable
def setup():
"""
Setup.
:return: None.
"""
pass
def teardown():
"""
Teardown.
:return: None.
"""
pass
def get_drug_network():
gender_probs = [0.49, 0.51]
drug_probs = [0.23323615160349853, 0.7667638483965015,
0.7563025210084033, 0.24369747899159663]
recovery_probs = [0.31000000000000005, 0.69,
0.27, 0.73,
0.13, 0.87,
0.06999999999999995, 0.93]
X = BbnNode(Variable(1, 'drug', ['false', 'true']), drug_probs)
Y = BbnNode(Variable(2, 'recovery', ['false', 'true']), recovery_probs)
Z = BbnNode(Variable(0, 'gender', ['female', 'male']), gender_probs)
bbn = Bbn() \
.add_node(X) \
.add_node(Y) \
.add_node(Z) \
.add_edge(Edge(Z, X, EdgeType.DIRECTED)) \
.add_edge(Edge(Z, Y, EdgeType.DIRECTED)) \
.add_edge(Edge(X, Y, EdgeType.DIRECTED))
return bbn
@with_setup(setup, teardown)
def test_ace():
"""
Tests getting average causal effect.
"""
bbn = get_drug_network()
ace = Ace(bbn)
results = ace.get_ace('drug', 'recovery', 'true')
t = results['true']
f = results['false']
assert t - 0.832 < 0.001
assert f - 0.782 < 0.001
|
python
|
__author__ = 'elsabakiu, neilthemathguy, dmorina'
from rest_framework import status, viewsets
from rest_framework.response import Response
from crowdsourcing.serializers.project import *
from rest_framework.decorators import detail_route, list_route
from crowdsourcing.models import Module, Category, Project, Requester, ProjectRequester
from crowdsourcing.permissions.project import IsProjectCollaborator
from rest_framework.permissions import IsAuthenticated
from rest_framework import mixins
from django.shortcuts import get_object_or_404
class CategoryViewSet(viewsets.ModelViewSet):
queryset = Category.objects.filter(deleted=False)
serializer_class = CategorySerializer
@detail_route(methods=['post'])
def update_category(self, request, id=None):
category_serializer = CategorySerializer(data=request.data)
category = self.get_object()
if category_serializer.is_valid():
category_serializer.update(category,category_serializer.validated_data)
return Response({'status': 'updated category'})
else:
return Response(category_serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
def list(self, request, *args, **kwargs):
try:
category = self.queryset
categories_serialized = CategorySerializer(category, many=True)
return Response(categories_serialized.data)
except:
return Response([])
def destroy(self, request, *args, **kwargs):
category_serializer = CategorySerializer()
category = self.get_object()
category_serializer.delete(category)
return Response({'status': 'deleted category'})
class ProjectViewSet(viewsets.ModelViewSet):
queryset = Project.objects.filter(deleted=False)
serializer_class = ProjectSerializer
@detail_route(methods=['post'], permission_classes=[IsProjectCollaborator])
def update_project(self, request, pk=None):
project_serializer = ProjectSerializer(data=request.data)
project = self.get_object()
if project_serializer.is_valid():
project_serializer.update(project,project_serializer.validated_data)
return Response({'status': 'updated project'})
else:
return Response(project_serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
def list(self, request, *args, **kwargs):
try:
projects = Project.objects.all()
projects_serialized = ProjectSerializer(projects, many=True)
return Response(projects_serialized.data)
except:
return Response([])
def destroy(self, request, *args, **kwargs):
project_serializer = ProjectSerializer()
project = self.get_object()
project_serializer.delete(project)
return Response({'status': 'deleted project'})
class ModuleViewSet(viewsets.ModelViewSet):
from crowdsourcing.models import Module
queryset = Module.objects.all()
serializer_class = ModuleSerializer
class ProjectRequesterViewSet(mixins.CreateModelMixin, mixins.DestroyModelMixin,
mixins.RetrieveModelMixin, viewsets.GenericViewSet):
serializer_class = ProjectRequesterSerializer
queryset = ProjectRequester.objects.all()
#permission_classes=(IsProjectCollaborator,)
#TODO to be moved under Project
def retrieve(self, request, *args, **kwargs):
project_requester = get_object_or_404(self.queryset, project=get_object_or_404(Project.objects.all(),id=kwargs['pk']))
serializer = ProjectRequesterSerializer(instance=project_requester)
return Response(serializer.data, status.HTTP_200_OK)
|
python
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from layers import ImplicitGraph
from torch.nn import Parameter
from utils import get_spectral_rad, SparseDropout
import torch.sparse as sparse
class IGNN(nn.Module):
def __init__(self, nfeat, nhid, nclass, num_node, dropout, kappa=0.9, adj_orig=None):
super(IGNN, self).__init__()
self.adj = None
self.adj_rho = None
self.adj_orig = adj_orig
#one layer with V
self.ig1 = ImplicitGraph(nfeat, nhid, num_node, kappa)
self.dropout = dropout
self.X_0 = Parameter(torch.zeros(nhid, num_node), requires_grad=False)
self.V = nn.Linear(nhid, nclass, bias=False)
def forward(self, features, adj):
if adj is not self.adj:
self.adj = adj
self.adj_rho = get_spectral_rad(adj)
x = features
x = self.ig1(self.X_0, adj, x, F.relu, self.adj_rho, A_orig=self.adj_orig).T
x = F.dropout(x, self.dropout, training=self.training)
x = self.V(x)
return x
|
python
|
from PIL import Image
import argparse
import os
import sys
current_directory = os.getcwd()
def args_check(args = None):
if(args == None):
print("Arguments are reqiured for execution")
parser = argparse.ArgumentParser(description="Resizer - A lightweight Image size and resolution resizer")
parser.add_argument('--input-file', '-i',
help = "Path to the input file")
parser.add_argument('--input-folder', '-if',
help = "Path to the input folder")
parser.add_argument('--resize', '-r',
help = 'Change the image/images to the specified resolution')
parser.add_argument('--reduce', '-rs',
help = 'Reduce the size of the image/images', action='store_true')
parser.add_argument('--output-file', '-o',
help = "Path to the output file")
parser.add_argument('--output-folder', '-of',
help = "Path to the output folder")
return parser.parse_args(args)
def clear_screen():
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
def change_res(resolution, path=None, filename=None, output_location=None, fullpath=None):
if fullpath is None:
filepath = os.path.join(path, filename)
print(filepath)
print(output_location)
image = Image.open(filepath)
if output_location is None:
change_res_path = os.path.join(current_directory, filename)
else:
change_res_path = os.path.join(output_location, filename)
new_image = image.resize(dimensions(resolution))
new_image.save(change_res_path)
print("Image saved at = " + change_res_path)
else:
filepath = fullpath
filename = os.path.basename(filepath)
image = Image.open(filepath)
if output_location is None:
change_res_path = os.path.join(current_directory, filename)
else:
change_res_path = os.path.join(output_location, filename)
new_image = image.resize(dimensions(resolution))
new_image.save(change_res_path)
print("Image saved at = " + change_res_path)
def reduce_size(path=None, filename=None, output_location=None, fullpath=None):
if fullpath is None:
filepath = os.path.join(path, filename)
image = Image.open(filepath)
if output_location is None:
reduce_size_path = os.path.join(current_directory, filename)
else:
reduce_size_path = os.path.join(output_location, filename)
else:
filepath = fullpath
filename = os.path.basename(fullpath)
image = Image.open(filepath)
if output_location is None:
reduce_size_path = os.path.join(current_directory, filename)
else:
reduce_size_path = os.path.join(output_location,filename)
image.save(reduce_size_path, optimize = True, quality = 85)
print("Image saved at = " + change_res_path)
def dimensions(resolution):
dimensions = resolution.split('x')
width, height = int(dimensions[0]), int(dimensions[1])
print("New Height = " + str(height) + ", Width = " + str(width))
return (width, height)
def bulkChange(change_type, input_location, output_folder=None, resolution=None):
imgExts = ['png','bmp','jpg']
if input_location is None:
print("Input Location can't be empty. Please try again.")
else:
for path, dirs, files in os.walk(input_location):
for fn in files:
print(path, fn)
ext = fn[-3:].lower()
if ext not in imgExts:
continue
if change_type is 'change_resolution':
change_res(resolution, path, fn, output_location=output_folder)
elif change_type is 'reduce_size':
reduce_size(path, fn, output_location=output_folder)
def main():
clear_screen()
if args_check(sys.argv[1:]).input_file:
input_f = args_check(sys.argv[1:]).input_file
if args_check(sys.argv[1:]).output_file:
print(args_check(sys.argv[1:]).output_file)
output_f = args_check(sys.argv[1:]).output_file
else:
output_f = None
if args_check(sys.argv[1:]).resize:
change_type = 'change_resolution'
change_res(args_check(sys.argv[1:]).resize,fullpath=input_f, output_location=output_f)
elif args_check(sys.argv[1:]).reduce:
print(args_check(sys.argv[1:]).reduce)
change_type = 'reduce_size'
reduce_size(fullpath=input_f, output_location=output_f)
else:
print("Please specify the --change-resolution or the --reduce-size arguments")
elif args_check(sys.argv[1:]).input_folder:
input_fld = args_check(sys.argv[1:]).input_folder
if args_check(sys.argv[1:]).output_folder:
print(args_check(sys.argv[1:]).output_folder)
output_fld = args_check(sys.argv[1:]).output_folder
else:
output_fld = None
if args_check(sys.argv[1:]).resize:
change_type = 'change_resolution'
bulkChange(change_type, input_fld, output_folder=output_fld, resolution=args_check(sys.argv[1:]).change_resolution)
elif args_check(sys.argv[1:]).reduce:
change_type = 'reduce_size'
bulkChange(change_type, input_fld, output_folder=output_fld)
else:
print("Please enter an Input file using --input or -i. You can even use an input folder using --input-folder or -if.")
if __name__ == '__main__':
main()
|
python
|
a,b = 1,2
print a+b
|
python
|
import shlex
import json
from .BaseClient import BaseClient
from .Response import JSONResponse
from . import typchk
DefaultTimeout = 10 # seconds
class ContainerClient(BaseClient):
class ContainerZerotierManager:
def __init__(self, client, container):
self._container = container
self._client = client
def info(self):
return self._client.json('corex.zerotier.info', {'container': self._container})
def list(self):
return self._client.json('corex.zerotier.list', {'container': self._container})
_raw_chk = typchk.Checker({
'container': int,
'command': {
'command': str,
'arguments': typchk.Any(),
'queue': typchk.Or(str, typchk.IsNone()),
'max_time': typchk.Or(int, typchk.IsNone()),
'stream': bool,
'tags': typchk.Or([str], typchk.IsNone()),
'id': typchk.Or(str, typchk.IsNone()),
}
})
def __init__(self, client, container):
super().__init__(client.timeout)
self._client = client
self._container = container
self._zerotier = ContainerClient.ContainerZerotierManager(client, container) # not (self) we use core0 client
@property
def container(self):
"""
:return: container id
"""
return self._container
@property
def zerotier(self):
"""
information about zerotier id
:return:
"""
return self._zerotier
def raw(self, command, arguments, queue=None, max_time=None, stream=False, tags=None, id=None):
"""
Implements the low level command call, this needs to build the command structure
and push it on the correct queue.
:param command: Command name to execute supported by the node (ex: core.system, info.cpu, etc...)
check documentation for list of built in commands
:param arguments: A dict of required command arguments depends on the command name.
:param queue: command queue (commands on the same queue are executed sequentially)
:param max_time: kill job server side if it exceeded this amount of seconds
:param stream: If True, process stdout and stderr are pushed to a special queue (stream:<id>) so
client can stream output
:param tags: job tags
:param id: job id. Generated if not supplied
:return: Response object
"""
args = {
'container': self._container,
'command': {
'command': command,
'arguments': arguments,
'queue': queue,
'max_time': max_time,
'stream': stream,
'tags': tags,
'id': id,
},
}
# check input
self._raw_chk.check(args)
response = self._client.raw('corex.dispatch', args)
result = response.get()
if result.state != 'SUCCESS':
raise RuntimeError('failed to dispatch command to container: %s' % result.data)
cmd_id = json.loads(result.data)
return self._client.response_for(cmd_id)
class ContainerManager():
_nic = {
'type': typchk.Enum('default', 'bridge', 'zerotier', 'vlan', 'vxlan', 'macvlan', 'passthrough'),
'id': typchk.Or(str, typchk.Missing()),
'name': typchk.Or(str, typchk.Missing()),
'hwaddr': typchk.Or(str, typchk.Missing()),
'config': typchk.Or(
typchk.Missing(),
{
'dhcp': typchk.Or(bool, typchk.IsNone(), typchk.Missing()),
'cidr': typchk.Or(str, typchk.IsNone(), typchk.Missing()),
'gateway': typchk.Or(str, typchk.IsNone(), typchk.Missing()),
'dns': typchk.Or([str], typchk.IsNone(), typchk.Missing()),
}
),
'monitor': typchk.Or(bool, typchk.Missing()),
}
_create_chk = typchk.Checker({
'root': str,
'mount': typchk.Or(
typchk.Map(str, str),
typchk.IsNone()
),
'host_network': bool,
'nics': [_nic],
'port': typchk.Or(
typchk.Map(int, int),
typchk.Map(str, int),
typchk.IsNone()
),
'privileged': bool,
'hostname': typchk.Or(
str,
typchk.IsNone()
),
'storage': typchk.Or(str, typchk.IsNone()),
'name': typchk.Or(str, typchk.IsNone()),
'identity': typchk.Or(str, typchk.IsNone()),
'env': typchk.Or(typchk.IsNone(), typchk.Map(str, str)),
'cgroups': typchk.Or(
typchk.IsNone(),
[typchk.Length((str,), 2, 2)], # array of (str, str) tuples i.e [(subsyste, name), ...]
)
})
_client_chk = typchk.Checker(
typchk.Or(int, str)
)
_nic_add = typchk.Checker({
'container': int,
'nic': _nic,
})
_nic_remove = typchk.Checker({
'container': int,
'index': int,
})
_portforward_chk = typchk.Checker({
'container': int,
'host_port': str,
'container_port': int,
})
DefaultNetworking = object()
def __init__(self, client):
self._client = client
def create(
self, root_url, mount=None, host_network=False, nics=DefaultNetworking, port=None,
hostname=None, privileged=False, storage=None, name=None, tags=None, identity=None, env=None,
cgroups=None,
):
"""
Creater a new container with the given root flist, mount points and
zerotier id, and connected to the given bridges
:param root_url: The root filesystem flist
:param mount: a dict with {host_source: container_target} mount points.
where host_source directory must exists.
host_source can be a url to a flist to mount.
:param host_network: Specify if the container should share the same network stack as the host.
if True, container creation ignores both zerotier, bridge and ports arguments below. Not
giving errors if provided.
:param nics: Configure the attached nics to the container
each nic object is a dict of the format
{
'type': nic_type # one of default, bridge, zerotier, macvlan, passthrough, vlan, or vxlan (note, vlan and vxlan only supported by ovs)
'id': id # depends on the type
bridge: bridge name,
zerotier: network id,
macvlan: the parent link name,
passthrough: the link name,
vlan: the vlan tag,
vxlan: the vxlan id
'name': name of the nic inside the container (ignored in zerotier type)
'hwaddr': Mac address of nic.
'config': { # config is only honored for bridge, vlan, and vxlan types
'dhcp': bool,
'cidr': static_ip # ip/mask
'gateway': gateway
'dns': [dns]
}
}
:param port: A dict of host_port: container_port pairs (only if default networking is enabled)
Example:
`port={8080: 80, 7000:7000}`
Source Format: NUMBER, IP:NUMBER, IP/MAST:NUMBER, or DEV:NUMBER
:param hostname: Specific hostname you want to give to the container.
if None it will automatically be set to core-x,
x beeing the ID of the container
:param privileged: If true, container runs in privileged mode.
:param storage: A Url to the ardb storage to use to mount the root flist (or any other mount that requires g8fs)
if not provided, the default one from core0 configuration will be used.
:param name: Optional name for the container
:param identity: Container Zerotier identity, Only used if at least one of the nics is of type zerotier
:param env: a dict with the environment variables needed to be set for the container
:param cgroups: custom list of cgroups to apply to this container on creation. formated as [(subsystem, name), ...]
please refer to the cgroup api for more detailes.
"""
if nics == self.DefaultNetworking:
nics = [{'type': 'default'}]
elif nics is None:
nics = []
args = {
'root': root_url,
'mount': mount,
'host_network': host_network,
'nics': nics,
'port': port,
'hostname': hostname,
'privileged': privileged,
'storage': storage,
'name': name,
'identity': identity,
'env': env,
'cgroups': cgroups,
}
# validate input
self._create_chk.check(args)
response = self._client.raw('corex.create', args, tags=tags)
return JSONResponse(response)
def list(self):
"""
List running containers
:return: a dict with {container_id: <container info object>}
"""
return self._client.json('corex.list', {})
def find(self, *tags):
"""
Find containers that matches set of tags
:param tags:
:return:
"""
tags = list(map(str, tags))
return self._client.json('corex.find', {'tags': tags})
def terminate(self, container):
"""
Terminate a container given it's id
:param container: container id
:return:
"""
self._client_chk.check(container)
args = {
'container': int(container),
}
response = self._client.raw('corex.terminate', args)
result = response.get()
if result.state != 'SUCCESS':
raise RuntimeError('failed to terminate container: %s' % result.data)
def nic_add(self, container, nic):
"""
Hot plug a nic into a container
:param container: container ID
:param nic: {
'type': nic_type # one of default, bridge, zerotier, macvlan, passthrough, vlan, or vxlan (note, vlan and vxlan only supported by ovs)
'id': id # depends on the type
bridge: bridge name,
zerotier: network id,
macvlan: the parent link name,
passthrough: the link name,
vlan: the vlan tag,
vxlan: the vxlan id
'name': name of the nic inside the container (ignored in zerotier type)
'hwaddr': Mac address of nic.
'config': { # config is only honored for bridge, vlan, and vxlan types
'dhcp': bool,
'cidr': static_ip # ip/mask
'gateway': gateway
'dns': [dns]
}
}
:return:
"""
args = {
'container': container,
'nic': nic
}
self._nic_add.check(args)
return self._client.json('corex.nic-add', args)
def nic_remove(self, container, index):
"""
Hot unplug of nic from a container
Note: removing a nic, doesn't remove the nic from the container info object, instead it sets it's state
to `destroyed`.
:param container: container ID
:param index: index of the nic as returned in the container object info (as shown by container.list())
:return:
"""
args = {
'container': container,
'index': index
}
self._nic_remove.check(args)
return self._client.json('corex.nic-remove', args)
def client(self, container):
"""
Return a client instance that is bound to that container.
:param container: container id
:return: Client object bound to the specified container id
Return a ContainerResponse from container.create
"""
self._client_chk.check(container)
return ContainerClient(self._client, int(container))
def backup(self, container, url):
"""
Backup a container to the given restic url
all restic urls are supported
:param container:
:param url: Url to restic repo
examples
(file:///path/to/restic/?password=<password>)
:return: Json response to the backup job (do .get() to get the snapshot ID
"""
args = {
'container': container,
'url': url,
}
return JSONResponse(self._client.raw('corex.backup', args))
def restore(self, url, tags=None):
"""
Full restore of a container backup. This restore method will recreate
an exact copy of the backedup container (including same network setup, and other
configurations as defined by the `create` method.
To just restore the container data, and use new configuration, use the create method instead
with the `root_url` set to `restic:<url>`
:param url: Snapshot url, the snapshot ID is passed as a url fragment
examples:
`file:///path/to/restic/repo?password=<password>#<snapshot-id>`
:param tags: this will always override the original container tags (even if not set)
:return:
"""
args = {
'url': url,
}
return JSONResponse(self._client.raw('corex.restore', args, tags=tags))
def add_portforward(self, container, host_port, container_port):
"""
Add portforward from host to kvm container
:param container: id of the container
:param host_port: port on host to forward from (string)
format: NUMBER, IP:NUMBER, IP/MAST:NUMBER, or DEV:NUMBER
:param container_port: port on container to forward to
:return:
"""
if isinstance(host_port, int):
host_port = str(host_port)
args = {
'container': container,
'host_port': host_port,
'container_port': container_port,
}
self._portforward_chk.check(args)
return self._client.json('corex.portforward-add', args)
def remove_portforward(self, container, host_port, container_port):
"""
Remove portforward from host to kvm container
:param container: id of the container
:param host_port: port on host forwarded from
:param container_port: port on container forwarded to
:return:
"""
if isinstance(host_port, int):
host_port = str(host_port)
args = {
'container': container,
'host_port': host_port,
'container_port': container_port,
}
self._portforward_chk.check(args)
return self._client.json('corex.portforward-remove', args)
|
python
|
import tensorflow as tf
import keras
# print(tf.__version__, keras.__version__)
amv_model_path = "model/frmodel.h5"
export_path = "model/ArtMaterialVerification/2"
model = tf.keras.models.load_model(amv_model_path)
with tf.keras.backend.get_session() as sess:
tf.saved_model.simple_save(
sess,
export_path,
inputs={'input_image': model.input},
outputs={t.name:t for t in model.outputs}
)
|
python
|
import pandas as pd
import numpy as np
import scipy as sp
import random
from scipy.spatial.distance import mahalanobis
class TrainOutlier:
data = None
percentilek = None
valuecountsdict = None
colsum = None
median = None
invcovmx = None
cols = None
threshold = None
datetimecols = None
def train(self):
df = self.data
if((self.cols != None) & (self.datetimecols != None)):
df = df[self.cols+self.datetimecols]
elif(self.datetimecols == None):
df = df[self.cols]
elif(self.cols == None):
df = df[self.datetimecols]
else:
raise ValueError('At least one categorical or date time column must be supplied')
#df_cols = pd.DataFrame((df.nunique() < 100) & (df.nunique() > 2),columns = ['values'])
#self.cols = df_cols[df_cols.values == True].index
if(self.datetimecols != None):
df = self.get_datetimefeatures(df)
df,cols_freq = self.get_inv_frequency_values(df,self.cols)
df,self.colsum = self.get_probability_values(df,self.cols,cols_freq)
self.median = pd.DataFrame(df[cols_freq].apply(np.median),columns=['median']).reset_index()
df_mahalanobis,self.invcovmx = self.get_mahalanobis_distance(df,self.median,cols_freq)
self.threshold = np.percentile(df_mahalanobis,self.percentilek)
self.valuecountsdict = self.get_value_counts_dict(df,self.cols)
return self #value_counts_dict, df_sum_values, df_median_values, invcovmx, cols, threshold
def get_datetimefeatures(self, df):
for d in self.datetimecols:
df[d+'_weekday'] = self.data[d].apply(lambda m : m.weekday())
df[d+'_hourofday'] = self.data[d].apply(lambda m : m.hour)
self.cols = self.cols + [d+'_weekday',d+'_hourofday']
return df
def get_inv_frequency_values(self,df,cols):
cols_freq = []
for c in cols:
d = pd.DataFrame(df[c].value_counts()).reset_index()
d.columns = [c,c+'_frequency']
df = pd.merge(df,d,how='left',on=[c])
df[c+'_frequency'] = 1/df[c+'_frequency']
cols_freq.append(c+'_frequency')
return(df,cols_freq)
def get_probability_values(self,df,cols,cols_freq):
df_sum_values = pd.DataFrame(df[cols_freq].apply(sum),columns=['sum']).reset_index()
for c in cols_freq:
v = df_sum_values.loc[df_sum_values['index'] == c,'sum'].values[0]
df[c] = df[c].apply(lambda x : x/(1 + v))
return(df,df_sum_values)
def get_mahalanobis_distance(self,df,df_median_values,cols_freq):
#Calculate covariance matrix
covmx = df[cols_freq].cov()
invcovmx = sp.linalg.inv(covmx)
df_mahalanobis = df[cols_freq].apply(lambda x: (mahalanobis(df_median_values['median'].values, x, invcovmx)), axis=1)
return df_mahalanobis,invcovmx
def get_value_counts_dict(self,df,cols):
value_counts_dict = {}
for c in cols:
d = df.groupby([c,c+'_frequency']).size().reset_index()
value_counts_dict[c] = d
return(value_counts_dict)
def __init__(self,data,percentile_k = 99.9,cat_cols=None, datetime_cols=None):
self.data = data
self.percentilek = percentile_k
self.cols = cat_cols
self.datetimecols = datetime_cols
|
python
|
import os
from twisted.logger import FilteringLogObserver, LogLevelFilterPredicate, LogLevel, jsonFileLogObserver
from twisted.python import logfile
from twisted.python.log import FileLogObserver
log_dir = os.environ.get("LOG_DIR", '/var/log/')
log_level = os.environ.get("TWISTED_LOG_LEVEL", 'INFO').lower()
log_rotate_length = int(os.environ.get("LOG_ROTATE_LENGTH", 100000000))
max_rotated_log_files = int(os.environ.get("MAX_LOG_ROTATED_FILES", 10))
def get_log_observer():
f = logfile.LogFile("carbon_forwarder.log", log_dir, rotateLength=log_rotate_length, maxRotatedFiles=max_rotated_log_files)
observer = FileLogObserver(f)
filterer = FilteringLogObserver(observer.emit,
[LogLevelFilterPredicate(
LogLevel.levelWithName(log_level))])
return filterer
def get_json_log_observer():
f = logfile.LogFile("carbon_forwarder.log", log_dir, rotateLength=log_rotate_length, maxRotatedFiles=max_rotated_log_files)
observer = jsonFileLogObserver(f)
filterer = FilteringLogObserver(observer,
[LogLevelFilterPredicate(
LogLevel.levelWithName(log_level))])
return filterer
|
python
|
# (C) Datadog, Inc. 2019-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import os
import pytest
from datadog_checks.redisdb import Redis
from . import common
pytestmark = pytest.mark.e2e
def assert_common_metrics(aggregator):
tags = ['redis_host:{}'.format(common.HOST), 'redis_port:6382', 'redis_role:master']
aggregator.assert_service_check('redis.can_connect', status=Redis.OK, tags=tags)
aggregator.assert_metric('redis.mem.fragmentation_ratio', count=2, tags=tags)
aggregator.assert_metric('redis.rdb.bgsave', count=2, tags=tags)
aggregator.assert_metric('redis.aof.last_rewrite_time', count=2, tags=tags)
aggregator.assert_metric('redis.replication.master_repl_offset', count=2, tags=tags)
aggregator.assert_metric('redis.net.rejected', count=2, tags=tags)
aggregator.assert_metric('redis.cpu.sys_children', count=1, tags=tags)
aggregator.assert_metric('redis.aof.rewrite', count=2, tags=tags)
aggregator.assert_metric('redis.mem.maxmemory', count=2, tags=tags)
aggregator.assert_metric('redis.mem.lua', count=2, tags=tags)
aggregator.assert_metric('redis.net.instantaneous_ops_per_sec', count=2, tags=tags)
aggregator.assert_metric('redis.perf.latest_fork_usec', count=2, tags=tags)
aggregator.assert_metric('redis.keys.evicted', count=2, tags=tags)
aggregator.assert_metric('redis.net.slaves', count=2, tags=tags)
aggregator.assert_metric('redis.net.maxclients', count=2, tags=tags)
aggregator.assert_metric('redis.clients.blocked', count=2, tags=tags)
aggregator.assert_metric('redis.stats.keyspace_misses', count=1, tags=tags)
aggregator.assert_metric('redis.pubsub.channels', count=2, tags=tags)
aggregator.assert_metric('redis.net.clients', count=2, tags=tags)
aggregator.assert_metric('redis.net.connections', count=2, tags=tags + ['source:unknown'])
aggregator.assert_metric('redis.mem.used', count=2, tags=tags)
aggregator.assert_metric('redis.mem.peak', count=2, tags=tags)
aggregator.assert_metric('redis.stats.keyspace_hits', count=1, tags=tags)
aggregator.assert_metric('redis.net.commands', count=1, tags=tags)
aggregator.assert_metric('redis.replication.backlog_histlen', count=2, tags=tags)
aggregator.assert_metric('redis.mem.rss', count=2, tags=tags)
aggregator.assert_metric('redis.cpu.sys', count=1, tags=tags)
aggregator.assert_metric('redis.pubsub.patterns', count=2, tags=tags)
aggregator.assert_metric('redis.keys.expired', count=2, tags=tags)
aggregator.assert_metric('redis.info.latency_ms', count=2, tags=tags)
aggregator.assert_metric('redis.cpu.user', count=1, tags=tags)
aggregator.assert_metric('redis.cpu.user_children', count=1, tags=tags)
aggregator.assert_metric('redis.rdb.last_bgsave_time', count=2, tags=tags)
aggregator.assert_metric('redis.rdb.changes_since_last', count=2, tags=tags)
tags += ['redis_db:db14']
aggregator.assert_metric('redis.expires', count=2, tags=tags)
aggregator.assert_metric('redis.expires.percent', count=2, tags=tags)
aggregator.assert_metric('redis.persist', count=2, tags=tags)
aggregator.assert_metric('redis.persist.percent', count=2, tags=tags)
aggregator.assert_metric('redis.keys', count=2, tags=tags)
aggregator.assert_metric('redis.key.length', count=2, tags=(['key:test_key1', 'key_type:list'] + tags))
aggregator.assert_metric('redis.key.length', count=2, tags=(['key:test_key2', 'key_type:list'] + tags))
aggregator.assert_metric('redis.key.length', count=2, tags=(['key:test_key3', 'key_type:list'] + tags))
aggregator.assert_metric('redis.replication.delay', count=2)
@pytest.mark.skipif(os.environ.get('REDIS_VERSION') != '3.2', reason='Test for redisdb v3.2')
def test_e2e_v_3_2(dd_agent_check, master_instance):
aggregator = dd_agent_check(master_instance, rate=True)
assert_common_metrics(aggregator)
tags = ['redis_host:{}'.format(common.HOST), 'redis_port:6382', 'redis_role:master']
aggregator.assert_metric('redis.clients.biggest_input_buf', count=2, tags=tags)
aggregator.assert_metric('redis.clients.longest_output_list', count=2, tags=tags)
aggregator.assert_all_metrics_covered()
@pytest.mark.skipif(os.environ.get('REDIS_VERSION') != '4.0', reason='Test for redisdb v4.0')
def test_e2e_v_4_0(dd_agent_check, master_instance):
aggregator = dd_agent_check(master_instance, rate=True)
assert_common_metrics(aggregator)
tags = ['redis_host:{}'.format(common.HOST), 'redis_port:6382', 'redis_role:master']
aggregator.assert_metric('redis.clients.biggest_input_buf', count=2, tags=tags)
aggregator.assert_metric('redis.mem.overhead', count=2, tags=tags)
aggregator.assert_metric('redis.clients.longest_output_list', count=2, tags=tags)
aggregator.assert_metric('redis.mem.startup', count=2, tags=tags)
aggregator.assert_metric('redis.active_defrag.running', count=2, tags=tags)
aggregator.assert_metric('redis.active_defrag.hits', count=2, tags=tags)
aggregator.assert_metric('redis.active_defrag.misses', count=2, tags=tags)
aggregator.assert_metric('redis.active_defrag.key_hits', count=2, tags=tags)
aggregator.assert_metric('redis.active_defrag.key_misses', count=2, tags=tags)
aggregator.assert_all_metrics_covered()
@pytest.mark.skipif(os.environ.get('REDIS_VERSION') != 'latest', reason='Test for the latest redisdb version')
def test_e2e_v_latest(dd_agent_check, master_instance):
aggregator = dd_agent_check(master_instance, rate=True)
assert_common_metrics(aggregator)
tags = ['redis_host:{}'.format(common.HOST), 'redis_port:6382', 'redis_role:master']
aggregator.assert_metric('redis.mem.overhead', count=2, tags=tags)
aggregator.assert_metric('redis.mem.startup', count=2, tags=tags)
aggregator.assert_metric('redis.active_defrag.running', count=2, tags=tags)
aggregator.assert_metric('redis.active_defrag.hits', count=2, tags=tags)
aggregator.assert_metric('redis.active_defrag.misses', count=2, tags=tags)
aggregator.assert_metric('redis.active_defrag.key_hits', count=2, tags=tags)
aggregator.assert_metric('redis.active_defrag.key_misses', count=2, tags=tags)
aggregator.assert_metric('redis.server.io_threads_active', count=2, tags=tags)
aggregator.assert_metric('redis.stats.io_threaded_reads_processed', count=1, tags=tags)
aggregator.assert_metric('redis.stats.io_threaded_writes_processed', count=1, tags=tags)
aggregator.assert_metric('redis.cpu.sys_main_thread', count=1, tags=tags)
aggregator.assert_metric('redis.cpu.user_main_thread', count=1, tags=tags)
aggregator.assert_all_metrics_covered()
|
python
|
import os
from oelint_adv.cls_rule import Rule
from oelint_parser.helper_files import expand_term
from oelint_parser.helper_files import get_layer_root
class RubygemsTestCase(Rule):
TESTCASE_DIR = "lib/oeqa/runtime/cases"
def __init__(self):
super().__init__(id="rubygems.testcase",
severity="error",
message="Recipe has to have a test case")
def __sanitize_pn(self, name):
return name.replace("@", "").replace("/", "-").replace("-", "_")
def __needle_to_search_for(self, name):
return "class RubyGemsTest{pn}(RubyGemsTestUtils)".format(pn=self.__sanitize_pn(name))
def check(self, _file, stash):
res = []
if "recipes-rubygems/" not in _file:
return []
found = False
_pn = expand_term(stash, _file, "${PN}")
_layer_root = get_layer_root(_file)
_needle = self.__needle_to_search_for(_pn)
for root, dirs, files in os.walk(os.path.join(_layer_root, RubygemsTestCase.TESTCASE_DIR)):
for f in files:
if not f.endswith(".py"):
continue
with open(os.path.join(root, f)) as i:
if _needle in i.read():
found = True
break
if not found:
res += self.finding(_file, 1)
return res
|
python
|
# -*- coding: utf-8 -*-
"""Sonos Alarms."""
from __future__ import unicode_literals
import logging
from datetime import datetime
import re
import weakref
from .core import discover, PLAY_MODES
from .xml import XML
log = logging.getLogger(__name__) # pylint: disable=C0103
TIME_FORMAT = "%H:%M:%S"
def is_valid_recurrence(text):
"""Check that text is a valid recurrence string.
A valid recurrence string is 'DAILY', 'ONCE', 'WEEKDAYS', 'WEEKENDS' or
of the form 'ON_DDDDDD' where D is a number from 0-7 representing a day
of the week (Sunday is 0), e.g. 'ON_034' meaning Sunday, Wednesday and
Thursday
Arg:
text(str): the recurrence string to check
Returns:
bool: True if the recurrence string is valid, else False
Examples:
::
>>> from soco.alarms import is_valid_recurrence
>>> is_valid_recurrence('WEEKENDS')
True
>>> is_valid_recurrence('')
False
>>> is_valid_recurrence('ON_132') # Mon, Tue, Wed
True
>>> is_valid_recurrence('ON_777') # Sat
True
>>> is_valid_recurrence('ON_3421') # Mon, Tue, Wed, Thur
True
>>> is_valid_recurrence('ON_123456789') # Too many digits
False
"""
if text in ("DAILY", "ONCE", "WEEKDAYS", "WEEKENDS"):
return True
return re.search(r'^ON_[0-7]{1,7}$', text) is not None
class Alarm(object):
"""A class representing a Sonos Alarm.
Alarms may be created or updated and saved to, or removed from the Sonos
system. An alarm is not automatically saved. Call `save()` to do that.
Example:
.. code-block::
>>> # create an alarm with default properties
>>> alarm = Alarm(my_device)
>>> print alarm.volume
20
>>> print get_alarms()
set([])
>>> # save the alarm to the Sonos system
>>> alarm.save()
>>> print get_alarms()
set([<Alarm id:88@15:26:15 at 0x107abb090>])
>>> # update the alarm
>>> alarm.recurrence = "ONCE"
>>> # Save it again for the change to take effect
>>> alarm.save()
>>> # Remove it
>>> alarm.remove()
>>> print get_alarms()
set([])
"""
# pylint: disable=too-many-instance-attributes
_all_alarms = weakref.WeakValueDictionary()
# pylint: disable=too-many-arguments
def __init__(
self, zone, start_time=None, duration=None,
recurrence='DAILY', enabled=True,
program_uri=None, program_metadata='',
play_mode='NORMAL', volume=20, include_linked_zones=False):
"""
Args:
zone (SoCo): The soco instance which will play the alarm.
start_time (datetime.time, optional): The alarm's start time.
Specify hours, minutes and seconds only. Defaults to the
current time
duration (datetime.time, optional): The alarm's duration. Specify
hours, minutes and seconds only. May be None for unlimited
duration. Defaults to None
recurrence (str, optional): A string representing how often the
alarm should be triggered. Can be 'DAILY', 'ONCE', 'WEEKDAYS',
'WEEKENDS' or of the form 'ON_DDDDDD' where D is a number from
0-7 representing a day of the week (Sunday is 0), e.g. 'ON_034'
meaning Sunday, Wednesday and Thursday. Defaults to 'DAILY'
enabled (bool, optional): True if alarm is enabled, False
otherwise. Defaults to True
program_uri(str, optional): The uri to play. If None, the built-in
Sonos chime sound will be used. Defaults to None
program_metadata (str, optional): The metadata associated with
program_uri. Defaults to ''
play_mode(str, optional): The play mode for the alarm. Can be one
of 'NORMAL', 'SHUFFLE_NOREPEAT', 'SHUFFLE', 'REPEAT_ALL'.
Defaults to 'NORMAL'
volume (int, optional): The alarm's volume (0-100). Defaults to 20
include_linked_zones (bool, optional): True if the alarm should be
played on the other speakers in the same group, False
otherwise. Defaults to False
"""
super(Alarm, self).__init__()
self.zone = zone
if start_time is None:
start_time = datetime.now().time()
self.start_time = start_time
self.duration = duration
self._recurrence = recurrence
self.enabled = enabled
self.program_uri = program_uri
self.program_metadata = program_metadata
self._play_mode = play_mode
self._volume = volume
self.include_linked_zones = include_linked_zones
self._alarm_id = None
def __repr__(self):
middle = str(self.start_time.strftime(TIME_FORMAT))
return "<{0} id:{1}@{2} at {3}>".format(
self.__class__.__name__, self._alarm_id, middle, hex(id(self)))
@property
def play_mode(self):
"""The play mode for the alarm.
Can be one of 'NORMAL', 'SHUFFLE_NOREPEAT', 'SHUFFLE',
'REPEAT_ALL'.
"""
return self._play_mode
@play_mode.setter
def play_mode(self, play_mode):
"""Set the play mode."""
play_mode = play_mode.upper()
if play_mode not in PLAY_MODES:
raise KeyError("'%s' is not a valid play mode" % play_mode)
self._play_mode = play_mode
@property
def volume(self):
"""The alarm's volume (0-100)."""
return self._volume
@volume.setter
def volume(self, volume):
"""Set the volume."""
# max 100
volume = int(volume)
self._volume = max(0, min(volume, 100)) # Coerce in range
@property
def recurrence(self):
"""A string representing how often the alarm should be triggered.
Can be 'DAILY', 'ONCE', 'WEEKDAYS', 'WEEKENDS' or of the form
'ON_DDDDDDD' where D is a number from 0-7 representing a day of the
week (Sunday is 0), e.g. 'ON_034' meaning Sunday, Wednesday and
Thursday.
"""
return self._recurrence
@recurrence.setter
def recurrence(self, recurrence):
"""Set the recurrence."""
if not is_valid_recurrence(recurrence):
raise KeyError("'%s' is not a valid recurrence value" % recurrence)
self._recurrence = recurrence
def save(self):
"""Save the alarm to the Sonos system.
Raises:
SoCoUPnPError if the alarm cannot be created because there is
already an alarm for this room at the specified time
"""
# pylint: disable=bad-continuation
args = [
('StartLocalTime', self.start_time.strftime(TIME_FORMAT)),
('Duration', '' if self.duration is None else
self.duration.strftime(TIME_FORMAT)),
('Recurrence', self.recurrence),
('Enabled', '1' if self.enabled else '0'),
('RoomUUID', self.zone.uid),
('ProgramURI', "x-rincon-buzzer:0" if self.program_uri is None
else self.program_uri),
('ProgramMetaData', self.program_metadata),
('PlayMode', self.play_mode),
('Volume', self.volume),
('IncludeLinkedZones', '1' if self.include_linked_zones else '0')
]
if self._alarm_id is None:
response = self.zone.alarmClock.CreateAlarm(args)
self._alarm_id = response['AssignedID']
Alarm._all_alarms[self._alarm_id] = self
else:
# The alarm has been saved before. Update it instead.
args.insert(0, ('ID', self._alarm_id))
self.zone.alarmClock.UpdateAlarm(args)
def remove(self):
"""Removes the alarm.
Removes the alarm from the Sonos system. There is no need to
call `save`. The Python instance is not deleted, and can be
saved back to Sonos again if desired.
"""
self.zone.alarmClock.DestroyAlarm([
('ID', self._alarm_id)
])
alarm_id = self._alarm_id
try:
del Alarm._all_alarms[alarm_id]
except KeyError:
pass
self._alarm_id = None
def get_alarms(soco=None):
"""Get a set of all alarms known to the Sonos system.
Args:
soco (SoCo, optional): a SoCo instance to query. If None, a random
instance is used. Defaults to None
Returns:
set: A set of Alarm instances
Note:
Any existing Alarm instance will have its attributes updated to those
currently stored on the Sonos system.
"""
# Get a soco instance to query. It doesn't matter which.
if soco is None:
soco = discover().pop()
response = soco.alarmClock.ListAlarms()
alarm_list = response['CurrentAlarmList']
tree = XML.fromstring(alarm_list.encode('utf-8'))
# An alarm list looks like this:
# <Alarms>
# <Alarm ID="14" StartTime="07:00:00"
# Duration="02:00:00" Recurrence="DAILY" Enabled="1"
# RoomUUID="RINCON_000ZZZZZZ1400"
# ProgramURI="x-rincon-buzzer:0" ProgramMetaData=""
# PlayMode="SHUFFLE_NOREPEAT" Volume="25"
# IncludeLinkedZones="0"/>
# <Alarm ID="15" StartTime="07:00:00"
# Duration="02:00:00" Recurrence="DAILY" Enabled="1"
# RoomUUID="RINCON_000ZZZZZZ01400"
# ProgramURI="x-rincon-buzzer:0" ProgramMetaData=""
# PlayMode="SHUFFLE_NOREPEAT" Volume="25"
# IncludeLinkedZones="0"/>
# </Alarms>
# pylint: disable=protected-access
alarms = tree.findall('Alarm')
result = set()
for alarm in alarms:
values = alarm.attrib
alarm_id = values['ID']
# If an instance already exists for this ID, update and return it.
# Otherwise, create a new one and populate its values
if Alarm._all_alarms.get(alarm_id):
instance = Alarm._all_alarms.get(alarm_id)
else:
instance = Alarm(None)
instance._alarm_id = alarm_id
Alarm._all_alarms[instance._alarm_id] = instance
instance.start_time = datetime.strptime(
values['StartTime'], "%H:%M:%S").time() # NB StartTime, not
# StartLocalTime, which is used by CreateAlarm
instance.duration = None if values['Duration'] == '' else\
datetime.strptime(values['Duration'], "%H:%M:%S").time()
instance.recurrence = values['Recurrence']
instance.enabled = values['Enabled'] == '1'
instance.zone = [zone for zone in soco.all_zones
if zone.uid == values['RoomUUID']][0]
instance.program_uri = None if values['ProgramURI'] ==\
"x-rincon-buzzer:0" else values['ProgramURI']
instance.program_metadata = values['ProgramMetaData']
instance.play_mode = values['PlayMode']
instance.volume = values['Volume']
instance.include_linked_zones = values['IncludeLinkedZones'] == '1'
result.add(instance)
return result
|
python
|
"""The core event-based simulation engine"""
import heapq
from abc import abstractmethod
from dataclasses import dataclass, field
from enum import Enum, auto
from typing import Iterator, List, NamedTuple, Optional, Protocol, runtime_checkable
# from .event import EventError, EventLike, StopEngineError
__all__ = [
"Engine",
"EngineError",
"EngineState",
"EngineStatus",
"Event",
"EventError",
"StopEngineError",
]
class EngineError(Exception): # pragma: no cover
"""The simulation encountered an error"""
def __init__(self, now: int, msg: str):
self.now = now
self.message = msg
super().__init__(str(self))
def __str__(self):
return f"{self.now}: {self.message}"
class EngineState(Enum):
"""Enumeration of allowed engine states"""
WAITING = auto() # Initial state of a fresh simulation
STOPPED = auto() # Simulation was stopped early for a reason
RUNNING = auto() # Simulation is in a normal running state
PAUSED = auto() # Simulation was paused by the user
ABORTED = auto() # Simulation was aborted due to error
FINISHED = auto() # Simulation completed normally
class EngineStatus(NamedTuple):
"""Data structure to hold the current simulation status"""
state: EngineState
message: str
class EventError(Exception):
"""Base error raised by Events"""
def __init__(self, event: "Event", msg: str):
self.event = event
super().__init__(msg)
class StopEngineError(EventError):
"""Raised by Events to indicate that the simulation should be aborted"""
@runtime_checkable
class EventLike(Protocol):
"""An Event like interface to use in typing"""
timestep: int
name: str
@abstractmethod
def call(self, *args):
"""Executes the event callback"""
class Event:
"""The core Event object"""
def __init__(self, timestep: int, name: str, data: dict = {}):
self.timestep = timestep
self.name = name
self.data = data
def call(self, ctx: dict = {}) -> Iterator[Optional["Event"]]:
"""The event callback function.
This is the business end of the event. It's job is to decide from the context which events to fire and when.
The function yields events until exhausted. The engine will consume all yielded events and execute them in
the order they are yielded.
The engine will pass a yet ill-defined simulation context dictionary that should contain all relevant context
objects an event would need
"""
yield None
@dataclass(order=True)
class QueueItem:
timestep: int
event: EventLike = field(compare=False)
@dataclass
class Engine:
"""The core simulation engine.
The engine is responsible for managing the event queue and running the entire simulation
"""
name: str = "Unnamed" # The name of this engine
def __post_init__(self):
self.now = 0
self.queue: List[QueueItem] = []
self._status: EngineStatus = EngineStatus(
state=EngineState.WAITING,
message="Initialized",
)
def __str__(self):
return f"Engine({self.name}) - {len(self.queue)} events - Status: '{self.state.name}'"
@property
def status(self):
"""The status of the engine holds an `EngineStatus` object comprising of the current engine state and a message"""
return self._status
def set_status(self, state: EngineState, message: str):
"""Setter method for the engine status"""
self._status = EngineStatus(state=state, message=message)
@property
def state(self) -> EngineState:
"""The engine state is an `Enginestate` enumerated object of allowed states"""
return self.status.state
@property
def message(self) -> str:
"""The latest engine status message"""
return self.status.message
def is_state(self, state: EngineState) -> bool:
"""Returns whether the current engine state evaluates to the provided one"""
return self.state == state
def schedule(self, event: EventLike, timestep: int = None) -> None:
"""Schedule an event to the queue"""
if isinstance(event, EventLike):
timestep = timestep or event.timestep
heapq.heappush(self.queue, QueueItem(timestep, event))
def stop(self, msg: str) -> None:
"""Stops the engine with a message"""
self.set_status(EngineState.STOPPED, msg)
def abort(self, msg: str) -> None:
"""Aborts the engine with a message"""
self.set_status(EngineState.ABORTED, msg)
def finish(self, msg: str) -> None:
"""Finish the program"""
self.set_status(EngineState.FINISHED, msg)
def run(self, stop_at: int = None) -> None:
"""Runs the simulation.
This involves continually retrieving events from the queue until
it either is exhausted or the timestep reaches a given `stop` time.
"""
self.set_status(
EngineState.RUNNING, f"Stopping at {stop_at if stop_at else 'Never'}"
)
while True:
if not self.queue:
self.finish(f"Simulation finished at {self.now}")
return
queue_item = heapq.heappop(self.queue)
timestep = queue_item.timestep
event = queue_item.event
if stop_at is not None and timestep > stop_at:
self.now = stop_at
self.stop(f"Simulation max time {stop_at} exceeded")
return
else:
self.now = timestep
if not self.consume_event(event):
return
def consume_event(self, event: EventLike):
"""Processes an event, checks for errors and schedules any events that are yielded"""
try:
for evt in event.call():
if evt:
self.schedule(evt)
except StopEngineError as e:
self.stop(
f"Simulation was stopped by event {event.name} at t {self.now}: {e}"
)
except EventError as e:
self.abort(
f"Simulation was aborted by event {event.name} at t{self.now}: {e}"
)
else:
return True
|
python
|
import pdb
import copy
import json
import numpy as np
from utils import game_util
import constants
class ActionUtil(object):
def __init__(self):
self.actions = [
{'action' : 'MoveAhead', 'moveMagnitude' : constants.AGENT_STEP_SIZE},
{'action' : 'RotateLeft'},
{'action' : 'RotateRight'},
#{'action' : 'LookUp'},
#{'action' : 'LookDown'},
]
self.action_to_ind = {frozenset(action.items()) : ii for ii,action in enumerate(self.actions)}
self.reverse_actions = {
'MoveAhead' : 'MoveBack',
'MoveBack' : 'MoveAhead',
'MoveLeft' : 'MoveRight',
'MoveRight' : 'MoveLeft',
'RotateLeft' : 'RotateRight',
'RotateRight' : 'RotateLeft',
'LookUp' : 'LookDown',
'LookDown' : 'LookUp',
'PickupObject' : 'PutObject',
'PutObject' : 'PickupObject',
'OpenObject' : 'CloseObject',
'CloseObject' : 'OpenObject'
}
self.num_actions = len(self.actions)
def action_dict_to_ind(self, action):
return self.action_to_ind[frozenset(action.items())]
|
python
|
import requests
apikey = "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjYwODE4ODU1YTcxOGRmNGVkMTkwZjE1ZSIsImlhdCI6MTYxOTEwMTc4MSwiZXhwIjoxNjIxNjkzNzgxfQ.SlyayNaXu8PTPYAtyR9h7tIlR9ooXn72DRn6EAwcgV6rNY1rZQCoSs_d2EESIJs3kb0LwCSfU9o5lWMW9_Twigj3FxX99iAg7_gB1m6TReJ2moZ-rYIst6RTtJtWQWBezZ-37RyACH9s44WQ9qnlrXBYKgnW6LyVi18KdfwEYekgbKM6bSkvPTVYdtjkzktKwKZfIouts4nQGm0tvTfQC_AtOP22338i5N2I952gBN0lf9fn6iaj64TCAXaUA4JhMNZad6ekK0AWauGZsHcaOaLiqpbxKjGs2d69fCOcdKsbDGwoGSEL_6TUho9Yfb405yS9ZE4TjatGNtBaRmSv9g"
r2 = requests.get('clav-api.di.uminho.pt/v2/entidades?apikey=' + apikey)
entidades = r2.json()
f = open("entidades.txt", "w")
for e in entidades:
f.write(e['sigla'] + '::' + e['designacao'] + '::' + e['id'] + '\n')
f.close()
|
python
|
# coding: utf-8
from distutils.core import setup
__version__ = '0.2.3'
short_description = 'Statistics for Django projects'
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except (IOError, ImportError):
long_description = short_description
install_requires = [
'Django>=1.7',
'jsonfield>=1.0.0',
'python-dateutil==2.5.3',
]
setup(
name='django-statsy',
packages=['statsy'],
version=__version__,
description=short_description,
long_description=long_description,
author='Alexander Zhebrak',
author_email='[email protected]',
license='MIT',
url='https://github.com/zhebrak/django-statsy',
download_url='https://pypi.python.org/pypi/django-statsy',
keywords=['django', 'statistics', 'analytics'],
install_requires=install_requires,
zip_safe=False,
include_package_data=True,
classifiers=[],
)
|
python
|
import os
sd = None
def set_sd(new_sd):
global sd
sd = new_sd
tmp_dir = "tmp/"
export_tmp = tmp_dir + "dashboard_export.csv"
if not os.path.exists(tmp_dir):
os.mkdir(tmp_dir)
|
python
|
# Seasons
SEASONS = [
"PRESEASON 3",
"SEASON 3",
"PRESEASON 2014",
"SEASON 2014",
"PRESEASON 2015",
"SEASON 2015",
"PRESEASON 2016",
"SEASON 2016",
"PRESEASON 2017",
"SEASON 2017",
"PRESEASON 2018",
"SEASON 2018",
"PRESEASON 2019",
"SEASON 2019",
]
|
python
|
import fire
from .utils import *
tfd=test_font_dir
if __name__ == '__main__':
fire.Fire()
|
python
|
print('='* 40)
print('{:^40}'.format('Listagem de Preços!!'))
print('='* 40)
listagem = ('Espeto de Carne', 8.00,
'Espeto de Frango', 5.00,
'Espeto de Linguiça', 5.50,
'Espeto de Kafta', 6.00,
'Espeto de Queijo', 6.50,
'Espeto de Medalhão Frango', 6.00,
'Espeto de Mandioca C/Bacon', 6.00,
'Espeto de Filé de Tilapia', 6.50,
'Espeto de Coração', 6.50,
'Espeto de Linguiça C/Pimenta', 6.50)
for pos in range(0, len(listagem)):
if pos % 2 == 0:
print(f'{listagem[pos]:.<30}', end='')
else:
print(f'R${listagem[pos]:>7.2f}')
print('=' * 40)
|
python
|
from django.shortcuts import render
from django.http import HttpResponse
from random import randint
def big(): return randint(0, 1_000_000)
def index(request):
return HttpResponse("Hello, there! Welcome to the base of the project! Your big ugly number is " + str(big()))
|
python
|
import os
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from settings import CLIENT_NAME, Client, LAUNCH_MODE, LaunchMode, URL, LocatorType
class LauncherNotSupported(Exception):
pass
class LaunchModeNotSupported(Exception):
pass
class InvalidLocatorException(Exception):
pass
list_of_supported_locator_type = (
LocatorType.id,
LocatorType.name,
LocatorType.xpath,
LocatorType.link_text,
LocatorType.partial_link_text,
LocatorType.tag,
LocatorType.class_name,
LocatorType.css,
)
dictionary_of_locator_type_and_description = {
LocatorType.id: By.ID,
LocatorType.name: By.NAME,
LocatorType.xpath: By.XPATH,
LocatorType.link_text: By.LINK_TEXT,
LocatorType.partial_link_text: By.PARTIAL_LINK_TEXT,
LocatorType.tag: By.TAG_NAME,
LocatorType.class_name: By.CLASS_NAME,
LocatorType.css: By.CSS_SELECTOR
}
def wait_till_browser_is_ready(func):
def ensure_browser_is_in_ready_state(self, *agrs):
WebDriverWait(self.driver, self.wait_timeout).until(
lambda driver:
driver.execute_script(
'return document.readyState == "complete";'),
'page is not completely loaded'
)
return func(self, *agrs)
return ensure_browser_is_in_ready_state
def wait_till_element_is_visible(func):
def ensure_element_visible(self, *args):
locator = args[0]
WebDriverWait(self.driver, self.wait_timeout).until(
lambda driver:
self.is_element_visible(locator)
)
return func(self, *args)
return ensure_element_visible
class Launcher(object):
def launch(self):
raise NotImplemented("launch method not implemented")
class ChromeLauncher(Launcher):
def __init__(self):
self.chrome_options = webdriver.ChromeOptions()
self.chrome_options.add_argument("--disable-extensions")
self.chrome_options.add_argument("--disable-infobars")
self.chrome_options.add_argument("--test-type")
if os.name == 'posix':
self.chrome_options.add_argument("--kiosk")
else:
self.chrome_options.add_argument("--start-maximized")
def launch(self):
web_driver = webdriver.Chrome(chrome_options=self.chrome_options)
web_driver.get(URL)
return web_driver
class DealTapDriver(object):
def click(self, name_of_item):
raise NotImplemented
def get_text(self, name_of_item):
raise NotImplemented
def launch_aut(self):
raise NotImplemented
def quit_aut(self):
raise NotImplemented
class DealTapWebDriver(DealTapDriver):
def __init__(self, driver=None):
self.driver = driver
self.locator_dictionary = None
self.wait_timeout = 20
def launch_aut(self):
launcher = get_launcher_from_factory()
driver = launcher.launch()
return driver
@wait_till_element_is_visible
def click(self, name_of_item):
element = self.find_element(name_of_item)
element.click()
@wait_till_element_is_visible
def get_text(self, name_of_item):
element = self.find_element(name_of_item)
return element.text
@wait_till_element_is_visible
def set_text(self, name_of_item, text_to_set, append=False):
element = self.find_element(name_of_item)
if append:
element.send_keys(text_to_set)
else:
element.clear()
element.send_keys(text_to_set)
@wait_till_browser_is_ready
def find_element(self, name_of_locator):
locator_description = self.locator_dictionary[name_of_locator]
locator_type = locator_type_detector(locator_description)
locator_description = locator_description.replace("{}=".format(locator_type), "", 1)
return self.driver.find_element(
dictionary_of_locator_type_and_description[locator_type],
locator_description
)
def is_element_visible(self, locator):
try:
element = self.find_element(locator)
return element.is_displayed() and element.is_enabled()
except NoSuchElementException:
return False
def quit_aut(self):
self.driver.quit()
def execute_javascript(self, script, *args):
return self.driver.execute_script(script, *args)
def get_launcher_from_factory():
if CLIENT_NAME == Client.CHROME:
return ChromeLauncher()
else:
raise LauncherNotSupported()
def get_dealtap_driver_from_factory(driver=None):
if LAUNCH_MODE == LaunchMode.WEB:
return DealTapWebDriver(driver)
else:
raise LaunchModeNotSupported()
def locator_type_detector(locator_description):
actual_locator_type = locator_description[0: locator_description.find('=')]
locator = list([locator for locator in list_of_supported_locator_type if locator == actual_locator_type])
if len(locator) != 1:
raise InvalidLocatorException("locator named {} is not a valid locator ".format(actual_locator_type))
return locator[0]
|
python
|
import logging
import subprocess
import os
import platform
import sys
from cmd2 import utils
logger = logging.getLogger(__name__.split(".")[-1])
class Command:
"""
Provides a way to run bash commands on local or remote side
Remote execution of commands is done over SSH protocol for given username and host
"""
# Host platform string for Windows
PLATFORM_OS_WIN32 = "win32"
# Host platform string for Linux
PLATFORM_OS_LINUX = "linux"
# Host platform string for MAC OS
PLATFORM_OS_MACOS = "darwin"
# Path to System folder on Windows platform
WIN32_SYSTEM_PATH = (
os.path.join(
os.environ["SystemRoot"],
"SysNative" if platform.architecture()[0] == "32bit" else "System32",
)
if sys.platform == PLATFORM_OS_WIN32
else ""
)
# Encoding used to decode stdout with
OUTPUT_ENCODING = "ISO-8859-1"
# ssh connection param template for linux platform
LINUX_SSH_CONN_PARAM_TEMPLATE = " {} {}@{} '{}'"
# ssh connection param template for win32 platform
WIN32_SSH_CONN_PARAM_TEMPLATE = " {} {}@{} {}"
# Relative path to the ssh executable on Windows platform
WIN32_SSH_RELATIVE_EXE_PATH = "OpenSSH\\ssh.exe"
# Path that is used to check if we have administrative rights
ADMIN_CHECK_PATH = os.sep.join(
[os.environ.get("SystemRoot", "C:\\windows"), "temp"]
)
# Localhost string
HOST_LOCALHOST = "localhost"
def __init__(self, username):
"""
Constructor
@param username Default username
"""
self.__username = username
self.__host = None
self.__port = None
# Host platform
self.__platform = sys.platform
# Path to ssh binary on host
self.__sshPath = None
# Subprocess check_output shell param
self.__coShell = None
# Set subprocess params on init
self.__setSshHostCommandParams()
def setUsername(self, username):
"""
Change username
@param username New username
"""
self.__username = username
def setHost(self, host, port):
"""
Change host
@param host New host
@param port New port
"""
self.__host = host
self.__port = port
def getUsername(self):
"""
Get current username
@return Current username
"""
return self.__username
def getHost(self):
"""
Get current host
@return Current host
"""
return self.__host if self.__host else self.HOST_LOCALHOST
def getPort(self):
"""
Get current port
@return Current port
"""
return self.__port
def runCommand(self, command, local=False):
"""
Run a command locally or via ssh
@param command Command to run
@param local Set to True to run command on local host explicitly (default = False)
@return stdout
"""
# If host is set -> run via SSH
if self.__host and not local:
if self.__sshPath:
command = self.__sshPath.format(
"-T {}".format("-p " + self.__port if self.__port else ""),
self.__username,
self.__host,
command,
)
else:
# TODO: Proper Error handling, throw exception here (no ssh binary = no remote command execution)
logger.error("No SSH binary found on host!")
return None
logger.debug(command)
stdout = (
subprocess.check_output(command, shell=self.__coShell)
.decode(self.OUTPUT_ENCODING)
.strip()
)
logger.debug(stdout)
return stdout
def spawnSshShell(self, host, command):
"""
Spawns an interactive ssh shell on the host
@param host Remote host to connect to, if none jump-host will be used
@param command Command to execute on remote shell
@return Return code of the spawned ssh shell process
"""
proc = subprocess.Popen(
self.__sshPath.format(
"{}".format("-p " + self.__port if self.__port else ""),
self.__username,
self.__host if not host else host,
"{}".format(command if command else ""),
),
stdout=sys.stdout,
stderr=sys.stderr,
shell=True,
)
# Start the process reader threads (for stdout and stderr)
proc_reader = utils.ProcReader(proc, sys.stdout, sys.stderr)
# Block here until we exit from the process
proc_reader.wait()
return proc.returncode
def sshCommandStringConvert(self, command):
"""
Convert command that is sent over ssh acording to the host environment
@param command Command string that needs to be converted
@return converted command string
"""
# For now we need to convert the string which contains " chars to '
# only when host is Win32 platform
# Some of the docker commands may fail if they are sent from Win32
# host over ssh if this conversion is not done
if self.__platform == self.PLATFORM_OS_WIN32:
command = command.replace('"', "'")
return command
def getHostPlatform(self):
"""
Return the host platform on which this tool is running
@return current host platform
"""
if self.__platform is self.PLATFORM_OS_WIN32:
return self.PLATFORM_OS_WIN32
elif self.__platform is self.PLATFORM_OS_MACOS:
return self.PLATFORM_OS_MACOS
# Assume for everything else that we are on Linux like OS
else:
return self.PLATFORM_OS_LINUX
def checkAdmin(self):
"""
Checks if the environment in which this tool is run has administrative privileges
@return Tuple with two values: username, hasAdmin (True or False)
"""
if self.__platform == self.PLATFORM_OS_WIN32:
try:
# only windows users with admin privileges can read the C:\windows\temp
temp = os.listdir(self.ADMIN_CHECK_PATH)
except:
return (os.environ["USERNAME"], False)
else:
return (os.environ["USERNAME"], True)
elif self.__platform == self.PLATFORM_OS_LINUX:
if "SUDO_USER" in os.environ and os.geteuid() == 0:
return (os.environ["SUDO_USER"], True)
else:
return (os.environ["USERNAME"], False)
elif self.__platform == self.PLATFORM_OS_MACOS:
logger.info("There is no need for SUDO check on MAC_OS for now")
def __setSshHostCommandParams(self):
"""
Checks host platform and sets correct ssh binary path and params
for subprocess command call
"""
logger.debug("Host platform: " + self.__platform)
# Check the host platform in order to get the path to ssh binary
if self.__platform == self.PLATFORM_OS_WIN32:
self.__sshPath = (
os.path.join(self.WIN32_SYSTEM_PATH, self.WIN32_SSH_RELATIVE_EXE_PATH)
+ self.WIN32_SSH_CONN_PARAM_TEMPLATE
)
self.__coShell = False
elif self.__platform == self.PLATFORM_OS_LINUX or self.PLATFORM_OS_MACOS:
self.__sshPath = "ssh" + self.LINUX_SSH_CONN_PARAM_TEMPLATE
self.__coShell = True
if self.__sshPath is not None:
logger.debug("SSH binary path: " + self.__sshPath)
else:
logger.error(
"No SSH binary found on host, only local cmd execution will work!"
)
return
|
python
|
# print ' name ' , multiple times
# for loop
for i in range(1,11):
i = 'Omkar'
print(i)
# while loop
i = 1
while (i<11) :
print('Omkar)
i = i + 1
|
python
|
# see https://www.codewars.com/kata/614adaedbfd3cf00076d47de/train/python
def expansion(matrix, n):
for _ in range(n):
rows = [x + [sum(x)] for x in matrix]
extraRow = [sum([x[i] for x in rows]) for i in range(len(matrix))] + [sum([matrix[i][i] for i in range(len(matrix))])]
rows.append(extraRow)
matrix = rows
return matrix
from TestFunction import Test
test = Test(None)
m1 = [
[1,2],
[5,3]
]
m2 = [
[4,1],
[19,-2]
]
m3 = [
[102,39],
[-11,-97]
]
m4 = [
[53, -64, 16, 16],
[-98, 0, -14, -87],
[75, -74, 39, 36],
[32, 90, 42, 12]
]
test.describe("Example Tests")
test.it('Depth 1')
test.assert_equals(expansion(m1, 1), [[1, 2, 3], [5, 3, 8], [6, 5, 4]])
test.assert_equals(expansion(m2, 1), [[4, 1, 5], [19, -2, 17], [23, -1, 2]])
test.assert_equals(expansion(m3, 1), [[102, 39, 141], [-11, -97, -108], [91, -58, 5]])
test.it('Depth 2')
test.assert_equals(expansion(m1, 2), [[1, 2, 3, 6], [5, 3, 8, 16], [6, 5, 4, 15], [12, 10, 15, 8]])
# test.assert_equals(expansion(m2, 2), [[4, 1, 5, 10], [19, -2, 17, 34], [23, -1, 2, 24], [46, -2, 24, 4]])
# test.assert_equals(expansion(m3, 2), [[102, 39, 141, 282], [-11, -97, -108, -216], [91, -58, 5, 38], [182, -116, 38, 10]])
# test.assert_equals(expansion(m4, 2), [[53, -64, 16, 16, 21, 42], [-98, 0, -14, -87, -199, -398], [75, -74, 39, 36, 76, 152], [32, 90, 42, 12, 176, 352], [62, -48, 83, -23, 104, 178], [124, -96, 166, -46, 178, 208]])
|
python
|
"""
Multi-core and Distributed Sampling
===================================
The choice of the sampler determines in which way parallelization is performed.
See also the `explanation of the samplers <sampler.html>`_.
"""
from .singlecore import SingleCoreSampler
from .mapping import MappingSampler
from .multicore import MulticoreParticleParallelSampler
from .base import Sample, Sampler
from .dask_sampler import DaskDistributedSampler
from .multicore_evaluation_parallel import MulticoreEvalParallelSampler
from .redis_eps import (RedisEvalParallelSampler,
RedisEvalParallelSamplerServerStarter)
from .concurrent_future import ConcurrentFutureSampler
__all__ = ["Sample",
"Sampler",
"SingleCoreSampler",
"MulticoreParticleParallelSampler",
"MappingSampler",
"DaskDistributedSampler",
"RedisEvalParallelSampler",
"MulticoreEvalParallelSampler",
"RedisEvalParallelSamplerServerStarter",
"ConcurrentFutureSampler"]
|
python
|
from game_state import GameState
import arcade as ac
import math
class DrawingManager:
@classmethod
def tick(cls):
if "entities" in GameState.current_state:
for ent in GameState.current_state["entities"]:
if "pos" in ent and "rot" in ent and "drawing" in ent:
cls.draw(ent, ent["drawing"])
@classmethod
def draw(cls, ent, drawing):
if "type" in drawing:
if drawing["type"] == "filled_circle":
color = drawing["color"] if "color" in drawing else (0, 0, 0)
radius = drawing["radius"] if "radius" in drawing else 20
ac.draw_circle_filled(
ent["pos"][0], ent["pos"][1], radius, color
)
elif drawing["type"] == "particle":
color = drawing["color"] if "color" in drawing else (0, 0, 0)
radius = drawing["radius"]*(1-ent["elapsed"]/ent["lifespan"])
ac.draw_circle_filled(
ent["pos"][0], ent["pos"][1], radius, color
)
elif drawing["type"] == "filled_triangle":
color = drawing["color"] if "color" in drawing else (0, 0, 0)
radius = drawing["radius"]
x, y = ent["pos"][0], ent["pos"][1]
a1 = -math.pi/2 + ent["rot"] + math.pi/2
a2 = math.pi/6 + ent["rot"] + math.pi/2
a3 = 5*math.pi/6 + ent["rot"] + math.pi/2
p1 = [radius*math.cos(a1), radius*math.sin(a1)]
p2 = [radius*math.cos(a2), radius*math.sin(a2)]
p3 = [radius*math.cos(a3), radius*math.sin(a3)]
ac.draw_triangle_filled(
x + p1[0], y + p1[1], x + p2[0],
y + p2[1], x + p3[0], y + p3[1],
color
)
|
python
|
#!/usr/bin/python3
#
# Copyright 2012 Sonya Huang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from wiod import config, common
from common.dbconnect import db
from common import imfdata, sqlhelper, utils
from common.plotutils import GNUPlot, ScatterPlot
import usa.config
def do_overview_table(sortby):
minyear = min(config.STUDY_YEARS)
maxyear = max(config.STUDY_YEARS)
data = {}
reverse_data = {}
for (country, name) in config.countries.items():
(env_i, gdp_i, intensity_i) = common.get_efficiency(
country, minyear, "env", "gdp")
(env_f, gdp_f, intensity_f) = common.get_efficiency(
country, maxyear, "env", "gdp")
if sortby == "growth":
pop_i = common.get_national_value(country, minyear, "pop")
pop_f = common.get_national_value(country, maxyear, "pop")
ppp_i = common.get_national_value(country, minyear, "ppppc")
ppp_f = common.get_national_value(country, maxyear, "ppppc")
percap_i = env_i / pop_i * 1000
percap_f = env_f / pop_f * 1000
growth = intensity_f - intensity_i
pgrowth = percap_f - percap_i
reverse_data[ppp_i] = name
data[name] = [
utils.add_commas(val).rjust(10) for val in (ppp_i, ppp_f)]
data[name] += [
"%.2f" % val for val in (intensity_i, intensity_f, growth,
percap_i, percap_f, pgrowth)]
else: # end year intensity
reverse_data[intensity_f] = name
data[name] = [
utils.add_commas(val).rjust(10)
for val in (gdp_i, gdp_f, env_i, env_f)]
data[name] += ["%.2f" % val for val in (intensity_i, intensity_f)]
for key in sorted(reverse_data.keys()):
country = reverse_data[key]
vals = data[country]
print(country.ljust(18) + " & " + " & ".join(vals) + " \\NN")
def do_import_table():
minyear = min(config.STUDY_YEARS)
maxyear = max(config.STUDY_YEARS)
sector = 'CONS_h'
fd = {}
fd_imports = {}
for year in (minyear, maxyear):
strings = {
"schema": config.WIOD_SCHEMA,
"year": year,
}
stmt = db.prepare(
"""SELECT country, sum(value)
FROM %(schema)s.niot_%(year)d
WHERE to_ind = $1
AND is_import = $2
GROUP BY country""" % strings)
fd[year] = {}
fd_imports[year] = {}
for (country, value) in stmt(sector, True):
fd_imports[year][country] = value
fd[year][country] = value
for (country, value) in stmt(sector, False):
fd[year][country] += value
shares = {}
for (country, total) in fd[maxyear].items():
share = fd_imports[maxyear][country] / total
shares[share] = country
sorted_shares = sorted(shares.keys(), reverse=True)
midpoint = int(len(sorted_shares) / 2)
for i in range(midpoint):
values = []
for index in (i, i + midpoint):
country = shares[sorted_shares[index]]
minval = imfdata.convert_to_2005(
fd_imports[minyear][country], country, minyear)
maxval = imfdata.convert_to_2005(
fd_imports[maxyear][country], country, maxyear)
minshare = fd_imports[minyear][country] / fd[minyear][country]
maxshare = fd_imports[maxyear][country] / fd[maxyear][country]
values += [
config.countries[country],
utils.add_commas(minval), utils.add_commas(maxval),
"%.1f" % (minshare * 100), "%.1f" % (maxshare * 100),
""] # want blank space between two halves
values.pop() # remove trailing empty string
print(" & ".join(values) + " \\NN")
def do_kyoto_table():
minyear = min(config.STUDY_YEARS)
maxyear = max(config.STUDY_YEARS)
minstrings = {
"schema": config.WIOD_SCHEMA,
"year": minyear,
"fd_sectors": sqlhelper.set_repr(config.default_fd_sectors),
}
maxstrings = minstrings.copy()
maxstrings["year"] = maxyear
envsql = """SELECT value FROM %(schema)s.env_%(year)d
WHERE country = $1 AND measurement = $2
AND industry = 'total'"""
envstmt_i = db.prepare(envsql % minstrings)
envstmt_f = db.prepare(envsql % maxstrings)
un_stmt = db.prepare(
"SELECT value FROM %s.mdg_emissions" % config.UN_SCHEMA +
" WHERE country = $1 AND year = $2")
data = {}
(eu_i, eu_f, un_eu_90, un_eu_i, un_eu_f) = (0, 0, 0, 0, 0)
for (country, name) in config.countries.items():
env_i = envstmt_i(country, "CO2")[0][0]
env_f = envstmt_f(country, "CO2")[0][0]
percent = (env_f - env_i) / env_i * 100
(un_env_90, un_env_91, un_env_i, un_env_f,
un_percent, un_percent_90) = \
(0, 0, 0, 0, None, None)
result = un_stmt(country, 1990)
if len(result):
un_env_90 = result[0][0]
else:
# use 1991 as a proxy for 1990 for some countries if applicable
# germany is the only annex b country that is applicable
# so hopefully it won't mess up eu15 calculation too much
result = un_stmt(country, 1991)
if len(result):
un_env_91 = result[0][0]
result = un_stmt(country, minyear)
if len(result):
un_env_i = result[0][0]
result = un_stmt(country, maxyear)
if len(result):
un_env_f = result[0][0]
if un_env_i and un_env_f:
un_percent = (un_env_f - un_env_i) / un_env_i * 100
if un_env_90 and un_env_f:
un_percent_90 = (un_env_f - un_env_90) / un_env_90 * 100
data[country] = (env_i, env_f, percent, un_percent, un_percent_90)
if country in config.eu15:
eu_i += env_i
eu_f += env_f
un_eu_i += un_env_i
un_eu_f += un_env_f
if un_env_90:
un_eu_90 += un_env_90
else:
un_eu_90 += un_env_91
eu_percent = (eu_f - eu_i) / eu_i * 100
un_eu_percent = (un_eu_f - un_eu_i) / un_eu_i * 100
un_eu_percent_90 = (un_eu_f - un_eu_90) / un_eu_90 * 100
print("%s & %s & %s & %d\\%% & %.1f\\%% & %.1f\\%% & %.1f \\NN" %
("EU-15".ljust(18),
utils.add_commas(eu_i).rjust(9),
utils.add_commas(eu_f).rjust(9),
-8, eu_percent, un_eu_percent, un_eu_percent_90))
for (target, countries) in config.annex_b_countries.items():
for country in countries:
vals = data[country]
if vals[4] is None:
percent_90 = ""
else:
percent_90 = "%.1f" % vals[4]
print("%s & %s & %s & %d\\%% & %.1f\\%% & %.1f & %s \\NN" %
(config.countries[country].ljust(18),
utils.add_commas(vals[0]).rjust(9),
utils.add_commas(vals[1]).rjust(9),
target, vals[2], vals[3], percent_90))
def do_plots():
for (name, measurements) in config.env_series_names.items():
data = {}
for year in config.STUDY_YEARS:
strings = {
"schema": config.WIOD_SCHEMA,
"year": year,
"fd_sectors": sqlhelper.set_repr(config.default_fd_sectors),
"measurements": sqlhelper.set_repr(measurements),
"nipa_schema": usa.config.NIPA_SCHEMA,
}
stmt = db.prepare(
"""SELECT a.country, a.series, b.gdp,
a.series / b.gdp as intensity
FROM (SELECT country, sum(value) as series
FROM %(schema)s.env_%(year)d
WHERE industry = 'total'
AND measurement in %(measurements)s
GROUP BY country) a,
(SELECT aa.country, sum(value) * deflator as gdp
FROM %(schema)s.indbyind_%(year)d aa,
(SELECT 100 / gdp as deflator
FROM %(nipa_schema)s.implicit_price_deflators
WHERE year = $1) bb
WHERE to_ind in %(fd_sectors)s
GROUP BY aa.country, deflator) b
WHERE a.country = b.country
AND a.series is not null
ORDER BY a.series / b.gdp""" % strings)
for row in stmt(year):
country = row[0]
intensity = row[3]
if country not in data:
data[country] = {}
data[country][year] = intensity
slopes = {}
for (country, country_data) in data.items():
n = len(country_data.keys())
if n < 2:
continue
sum_y = sum(country_data.values())
sum_x = sum(country_data.keys())
slope = (n * sum([k * v for (k, v) in country_data.items()]) \
- sum_x * sum_y) / \
(n * sum([k * k for k in country_data.keys()]) - sum_x)
slopes[country] = slope * 1000000
years = "%d-%d" % (config.STUDY_YEARS[0], config.STUDY_YEARS[-1])
i = 0
binsize = 8
plot = None
for (country, slope) in sorted(slopes.items(), key=lambda x: x[1]):
if i % binsize == 0:
if plot is not None:
plot.write_tables()
plot.generate_plot()
tier = i / binsize + 1
plot = GNUPlot("tier%d" % tier, "",
#"%s intensity from %s, tier %d" \
# % (name, years, tier),
"wiod-%s" % name.replace(" ", "-"))
plot.legend("width -5")
for year in config.STUDY_YEARS:
if year in data[country]:
plot.set_value(
"%s (%.2f)" % (config.countries[country], slope),
year,
data[country][year])
i += 1
if plot is not None:
plot.write_tables()
plot.generate_plot()
def do_kuznets_plot():
minyear = min(config.STUDY_YEARS)
maxyear = max(config.STUDY_YEARS)
plot = ScatterPlot("gdp vs emissions change", None, "wiod")
for country in config.countries:
gdp_pop = common.get_national_value(country, minyear, "ppppc")
(env_i, denom_i, intensity_i) = common.get_efficiency(
country, minyear, "env", "gdp")
(env_f, denom_f, intensity_f) = common.get_efficiency(
country, maxyear, "env", "gdp")
# numbers are just for sorting which goes on x axis
plot.set_value("1 ppp per capita", country, gdp_pop)
plot.set_value("2 emiss change", country, intensity_f - intensity_i)
plot.write_tables()
plot.generate_plot()
for year in (minyear, maxyear):
plot = ScatterPlot("gdp vs emissions %d" % year, None, "wiod")
for country in config.countries:
gdp_pop = common.get_national_value(country, year, "ppppc")
env_pop = common.get_efficiency(country, year, "env", "gdp")
plot.set_value("1 gdp per capita", country, gdp_pop)
plot.set_value("2 emissions per capita", country, env_pop[2])
plot.write_tables()
plot.generate_plot()
#do_overview_table()
do_overview_table("growth")
#do_import_table()
#do_kyoto_table()
#do_plots()
#do_kuznets_plot()
|
python
|
import os, sys
# add NADE to path
nade_path = os.path.join(os.path.abspath('.'), 'bench_models', 'nade')
sys.path.append('./bench_models/nade/')
|
python
|
"""
Part of BME595 project
Program:
Show statistics of dataset
"""
from collections import Counter
from data import data_loader, _preprocess_dataset_small, _preprocess_dataset_large
def show_distribution(max_len=60, deduplicate=False):
small_sentences, small_polarities, purposes, _ = _preprocess_dataset_small(max_len, deduplicate=deduplicate)
large_sentences, large_polarities, polarity_to_idx = _preprocess_dataset_large(max_len, deduplicate=deduplicate)
purpose_size = len(small_sentences)
polarity_size = len(small_sentences) + len(large_sentences)
print('\nsmall dataset size:', len(small_sentences))
print('large dataset size:', len(large_sentences))
print('purpose data size:', purpose_size)
print('polarity data size (merge small and large):', polarity_size)
print('\npurpose distribution:')
purpose_to_idx = {'Criticizing': 0, 'Comparison': 1, 'Use': 2,
'Substantiating': 3, 'Basis': 4, 'Neutral': 5}
ctr = Counter(purposes)
for purpose, idx in purpose_to_idx.items():
print(purpose.ljust(30), ctr[idx]/purpose_size)
print('\npolarity distribution:')
polarity_to_idx = {'Neutral': 0, 'Positive': 1, 'Negative': 2}
ctr = Counter(small_polarities+large_polarities)
for polarity, idx in polarity_to_idx.items():
print(polarity.ljust(30), ctr[idx]/polarity_size)
if __name__ == '__main__':
show_distribution()
|
python
|
import re
from behave import given, when, then
from django.core import mail
from {{ cookiecutter.project_slug }}.apps.myauth.tests.factories import VerifiedUserFactory
from {{ cookiecutter.project_slug }}.apps.profile.models import Profile
from features.hints import BehaveContext
@given("a registered user")
def step_impl(context: BehaveContext):
context.user = VerifiedUserFactory()
@when("they submit a password reset request")
def step_impl(context: BehaveContext):
context.response = context.test.client.post("/auth/password/reset/", data={
"email": context.user.email
})
@when("the user logs in")
def step_impl(context: BehaveContext):
context.response = context.test.client.post("/auth/login/", data={"email": "[email protected]", "password": "qwertyuiop"})
@then("they are sent a {email_type} email")
def step_impl(context: BehaveContext, email_type):
"""
:type context: behave.runner.Context
"""
response = context.response
context.test.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
if email_type == "password reset":
subject_substring = "Password Reset"
action_url_regex = r"http[^ ]*/auth/password/reset/[^ ]*/"
elif email_type == "email confirm":
subject_substring = "Confirm Your E-mail"
action_url_regex = r"http[^ ]*/auth/email/confirm/[^ ]*/"
else:
raise NotImplementedError(f"{email_type}")
context.test.assertIn(subject_substring, email.subject)
action_url_search = re.search(action_url_regex, email.body)
context.test.assertTrue(action_url_search, f"Expected to find link matchin {action_url_regex} in email body: {email.body}")
context.action_url = action_url_search[0]
context.action_url_type = email_type
@then(u"the password reset link resets their password")
def step_impl(context: BehaveContext):
context.test.assertEqual(context.action_url_type, "password reset")
response = context.test.client.get(context.action_url)
context.test.assertEqual(response.status_code, 302, "First redirect to password form page")
password_page_url = response["location"]
response = context.test.client.get(password_page_url)
context.test.assertEqual(response.status_code, 200, "Form page load")
response = context.test.client.post(password_page_url, data={
"password1": "coco2017",
"password2": "coco2017"
})
context.test.assertRedirects(response, "/auth/password/reset/key/done/")
@then(u"the email confirm link confirms their email")
def step_impl(context: BehaveContext):
context.test.assertEqual(context.action_url_type, "email confirm")
response = context.test.client.get(context.action_url)
context.test.assertRedirects(response, "/profile/")
@then(u"the user is {neg} redirected to {url}")
def step_impl(context: BehaveContext, neg: str, url: str):
context.test.assertEqual(context.response.status_code, 302, "The user should be redirected")
if neg == "not":
context.test.assertNotEqual(context.response.url, url, f"The user should not be redirect to {url}")
elif neg == "indeed":
context.test.assertEqual(context.response.url, url, f"The user should be redirect to {url}")
|
python
|
from mle_monitor import MLEProtocol
meta_data = {
"purpose": "Test MLEProtocol",
"project_name": "MNIST",
"exec_resource": "local",
"experiment_dir": "log_dir",
"experiment_type": "hyperparameter-search",
"base_fname": "main.py",
"config_fname": "tests/fixtures/base_config.json",
"num_seeds": 5,
"num_total_jobs": 10,
"num_jobs_per_batch": 5,
"num_job_batches": 2,
"time_per_job": "00:05:00", # days-hours-minutes
"num_cpus": 2,
"num_gpus": 1,
}
def test_add_protocol():
# Add experiment to new protocol and add data
protocol = MLEProtocol(protocol_fname="mle_protocol.db")
e_id = protocol.add(meta_data, save=False)
proto_data = protocol.get(e_id)
for k, v in meta_data.items():
assert proto_data[k] == v
return
def test_load_protocol():
# Reload database - assert correctness of data
protocol = MLEProtocol(protocol_fname="tests/fixtures/mle_protocol_test.db")
last_data = protocol.get()
for k, v in meta_data.items():
if k not in ["config_fname", "purpose"]:
assert last_data[k] == v
# Check adding of new data
e_id = protocol.add(meta_data, save=False)
proto_data = protocol.get(e_id)
for k, v in meta_data.items():
assert proto_data[k] == v
return
def test_update_delete_abort_protocol():
# Change some entry of DB store and check it
protocol = MLEProtocol(protocol_fname="mle_protocol.db")
e_id = protocol.add(meta_data, save=False)
# Update some element in the database
protocol.update(e_id, "exec_resource", "slurm-cluster", save=False)
assert protocol.get(e_id, "exec_resource") == "slurm-cluster"
# Abort the experiment - changes status
protocol.abort(e_id, save=False)
assert protocol.status(e_id) == "aborted"
return
def test_monitor_protocol():
# Check that all required keys are in collected data
protocol = MLEProtocol(protocol_fname="mle_protocol.db")
_ = protocol.add(meta_data, save=False)
# Get the monitoring data - used later in dashboard
data = protocol.monitor()
total_keys = [
"total",
"run",
"done",
"aborted",
"sge",
"slurm",
"gcp",
"local",
"report_gen",
"gcs_stored",
"retrieved",
]
for k in total_keys:
assert k in data["total_data"].keys()
last_keys = ["e_id", "e_dir", "e_type", "e_script", "e_config", "report_gen"]
for k in last_keys:
assert k in data["last_data"].keys()
time_keys = [
"total_jobs",
"total_batches",
"jobs_per_batch",
"time_per_batch",
"start_time",
"stop_time",
"duration",
]
for k in time_keys:
assert k in data["time_data"].keys()
return
|
python
|
"""
Module: 'sys' on esp32 1.9.4
"""
# MCU: (sysname='esp32', nodename='esp32', release='1.9.4', version='v1.9.4 on 2018-05-11', machine='ESP32 module with ESP32')
# Stubber: 1.2.0
argv = None
byteorder = 'little'
def exit():
pass
implementation = None
maxsize = 2147483647
modules = None
path = None
platform = 'esp32'
def print_exception():
pass
stderr = None
stdin = None
stdout = None
version = '3.4.0'
version_info = None
|
python
|
import pickle
filename = './data/29_header_payload_all.traffic'
with open(filename, 'r') as f:
traffic = f.readlines()
with open('./data/29_payload_all.traffic','w') as f:
for i in range(len(traffic)):
s_traffic = traffic[i].split()
if s_traffic[10] == '11':
payload = s_traffic[0] + ' ' + ' '.join(s_traffic[29:])
else:
payload = s_traffic[0] + ' ' + ' '.join(s_traffic[41:])
f.write(payload + '\n')
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: fmgr_secprof_av
notes:
- Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/).
author:
- Luke Weighall (@lweighall)
- Andrew Welsh (@Ghilli3)
- Jim Huber (@p4r4n0y1ng)
short_description: Manage security profile
description:
- Manage security profile groups for FortiManager objects
options:
adom:
description:
- The ADOM the configuration should belong to.
required: false
default: root
mode:
description:
- Sets one of three modes for managing the object.
- Allows use of soft-adds instead of overwriting existing values
choices: ['add', 'set', 'delete', 'update']
required: false
default: add
scan_mode:
description:
- Choose between full scan mode and quick scan mode.
required: false
choices:
- quick
- full
replacemsg_group:
description:
- Replacement message group customized for this profile.
required: false
name:
description:
- Profile name.
required: false
mobile_malware_db:
description:
- Enable/disable using the mobile malware signature database.
required: false
choices:
- disable
- enable
inspection_mode:
description:
- Inspection mode.
required: false
choices:
- proxy
- flow-based
ftgd_analytics:
description:
- Settings to control which files are uploaded to FortiSandbox.
required: false
choices:
- disable
- suspicious
- everything
extended_log:
description:
- Enable/disable extended logging for antivirus.
required: false
choices:
- disable
- enable
comment:
description:
- Comment.
required: false
av_virus_log:
description:
- Enable/disable AntiVirus logging.
required: false
choices:
- disable
- enable
av_block_log:
description:
- Enable/disable logging for AntiVirus file blocking.
required: false
choices:
- disable
- enable
analytics_wl_filetype:
description:
- Do not submit files matching this DLP file-pattern to FortiSandbox.
required: false
analytics_max_upload:
description:
- Maximum size of files that can be uploaded to FortiSandbox (1 - 395 MBytes, default = 10).
required: false
analytics_db:
description:
- Enable/disable using the FortiSandbox signature database to supplement the AV signature databases.
required: false
choices:
- disable
- enable
analytics_bl_filetype:
description:
- Only submit files matching this DLP file-pattern to FortiSandbox.
required: false
content_disarm:
description:
- EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED!
- List of multiple child objects to be added. Expects a list of dictionaries.
- Dictionaries must use FortiManager API parameters, not the ansible ones listed below.
- If submitted, all other prefixed sub-parameters ARE IGNORED.
- This object is MUTUALLY EXCLUSIVE with its options.
- We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide.
- WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS
required: false
content_disarm_cover_page:
description:
- Enable/disable inserting a cover page into the disarmed document.
required: false
choices:
- disable
- enable
content_disarm_detect_only:
description:
- Enable/disable only detect disarmable files, do not alter content.
required: false
choices:
- disable
- enable
content_disarm_office_embed:
description:
- Enable/disable stripping of embedded objects in Microsoft Office documents.
required: false
choices:
- disable
- enable
content_disarm_office_hylink:
description:
- Enable/disable stripping of hyperlinks in Microsoft Office documents.
required: false
choices:
- disable
- enable
content_disarm_office_linked:
description:
- Enable/disable stripping of linked objects in Microsoft Office documents.
required: false
choices:
- disable
- enable
content_disarm_office_macro:
description:
- Enable/disable stripping of macros in Microsoft Office documents.
required: false
choices:
- disable
- enable
content_disarm_original_file_destination:
description:
- Destination to send original file if active content is removed.
required: false
choices:
- fortisandbox
- quarantine
- discard
content_disarm_pdf_act_form:
description:
- Enable/disable stripping of actions that submit data to other targets in PDF documents.
required: false
choices:
- disable
- enable
content_disarm_pdf_act_gotor:
description:
- Enable/disable stripping of links to other PDFs in PDF documents.
required: false
choices:
- disable
- enable
content_disarm_pdf_act_java:
description:
- Enable/disable stripping of actions that execute JavaScript code in PDF documents.
required: false
choices:
- disable
- enable
content_disarm_pdf_act_launch:
description:
- Enable/disable stripping of links to external applications in PDF documents.
required: false
choices:
- disable
- enable
content_disarm_pdf_act_movie:
description:
- Enable/disable stripping of embedded movies in PDF documents.
required: false
choices:
- disable
- enable
content_disarm_pdf_act_sound:
description:
- Enable/disable stripping of embedded sound files in PDF documents.
required: false
choices:
- disable
- enable
content_disarm_pdf_embedfile:
description:
- Enable/disable stripping of embedded files in PDF documents.
required: false
choices:
- disable
- enable
content_disarm_pdf_hyperlink:
description:
- Enable/disable stripping of hyperlinks from PDF documents.
required: false
choices:
- disable
- enable
content_disarm_pdf_javacode:
description:
- Enable/disable stripping of JavaScript code in PDF documents.
required: false
choices:
- disable
- enable
ftp:
description:
- EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED!
- List of multiple child objects to be added. Expects a list of dictionaries.
- Dictionaries must use FortiManager API parameters, not the ansible ones listed below.
- If submitted, all other prefixed sub-parameters ARE IGNORED.
- This object is MUTUALLY EXCLUSIVE with its options.
- We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide.
- WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS
required: false
ftp_archive_block:
description:
- Select the archive types to block.
- FLAG Based Options. Specify multiple in list form.
required: false
choices:
- encrypted
- corrupted
- multipart
- nested
- mailbomb
- unhandled
- partiallycorrupted
- fileslimit
- timeout
ftp_archive_log:
description:
- Select the archive types to log.
- FLAG Based Options. Specify multiple in list form.
required: false
choices:
- encrypted
- corrupted
- multipart
- nested
- mailbomb
- unhandled
- partiallycorrupted
- fileslimit
- timeout
ftp_emulator:
description:
- Enable/disable the virus emulator.
required: false
choices:
- disable
- enable
ftp_options:
description:
- Enable/disable FTP AntiVirus scanning, monitoring, and quarantine.
- FLAG Based Options. Specify multiple in list form.
required: false
choices:
- scan
- quarantine
- avmonitor
ftp_outbreak_prevention:
description:
- Enable FortiGuard Virus Outbreak Prevention service.
required: false
choices:
- disabled
- files
- full-archive
http:
description:
- EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED!
- List of multiple child objects to be added. Expects a list of dictionaries.
- Dictionaries must use FortiManager API parameters, not the ansible ones listed below.
- If submitted, all other prefixed sub-parameters ARE IGNORED.
- This object is MUTUALLY EXCLUSIVE with its options.
- We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide.
- WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS
required: false
http_archive_block:
description:
- Select the archive types to block.
- FLAG Based Options. Specify multiple in list form.
required: false
choices:
- encrypted
- corrupted
- multipart
- nested
- mailbomb
- unhandled
- partiallycorrupted
- fileslimit
- timeout
http_archive_log:
description:
- Select the archive types to log.
- FLAG Based Options. Specify multiple in list form.
required: false
choices:
- encrypted
- corrupted
- multipart
- nested
- mailbomb
- unhandled
- partiallycorrupted
- fileslimit
- timeout
http_content_disarm:
description:
- Enable Content Disarm and Reconstruction for this protocol.
required: false
choices:
- disable
- enable
http_emulator:
description:
- Enable/disable the virus emulator.
required: false
choices:
- disable
- enable
http_options:
description:
- Enable/disable HTTP AntiVirus scanning, monitoring, and quarantine.
- FLAG Based Options. Specify multiple in list form.
required: false
choices:
- scan
- quarantine
- avmonitor
http_outbreak_prevention:
description:
- Enable FortiGuard Virus Outbreak Prevention service.
required: false
choices:
- disabled
- files
- full-archive
imap:
description:
- EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED!
- List of multiple child objects to be added. Expects a list of dictionaries.
- Dictionaries must use FortiManager API parameters, not the ansible ones listed below.
- If submitted, all other prefixed sub-parameters ARE IGNORED.
- This object is MUTUALLY EXCLUSIVE with its options.
- We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide.
- WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS
required: false
imap_archive_block:
description:
- Select the archive types to block.
- FLAG Based Options. Specify multiple in list form.
required: false
choices:
- encrypted
- corrupted
- multipart
- nested
- mailbomb
- unhandled
- partiallycorrupted
- fileslimit
- timeout
imap_archive_log:
description:
- Select the archive types to log.
- FLAG Based Options. Specify multiple in list form.
required: false
choices:
- encrypted
- corrupted
- multipart
- nested
- mailbomb
- unhandled
- partiallycorrupted
- fileslimit
- timeout
imap_content_disarm:
description:
- Enable Content Disarm and Reconstruction for this protocol.
required: false
choices:
- disable
- enable
imap_emulator:
description:
- Enable/disable the virus emulator.
required: false
choices:
- disable
- enable
imap_executables:
description:
- Treat Windows executable files as viruses for the purpose of blocking or monitoring.
required: false
choices:
- default
- virus
imap_options:
description:
- Enable/disable IMAP AntiVirus scanning, monitoring, and quarantine.
- FLAG Based Options. Specify multiple in list form.
required: false
choices:
- scan
- quarantine
- avmonitor
imap_outbreak_prevention:
description:
- Enable FortiGuard Virus Outbreak Prevention service.
required: false
choices:
- disabled
- files
- full-archive
mapi:
description:
- EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED!
- List of multiple child objects to be added. Expects a list of dictionaries.
- Dictionaries must use FortiManager API parameters, not the ansible ones listed below.
- If submitted, all other prefixed sub-parameters ARE IGNORED.
- This object is MUTUALLY EXCLUSIVE with its options.
- We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide.
- WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS
required: false
mapi_archive_block:
description:
- Select the archive types to block.
- FLAG Based Options. Specify multiple in list form.
required: false
choices:
- encrypted
- corrupted
- multipart
- nested
- mailbomb
- unhandled
- partiallycorrupted
- fileslimit
- timeout
mapi_archive_log:
description:
- Select the archive types to log.
- FLAG Based Options. Specify multiple in list form.
required: false
choices:
- encrypted
- corrupted
- multipart
- nested
- mailbomb
- unhandled
- partiallycorrupted
- fileslimit
- timeout
mapi_emulator:
description:
- Enable/disable the virus emulator.
required: false
choices:
- disable
- enable
mapi_executables:
description:
- Treat Windows executable files as viruses for the purpose of blocking or monitoring.
required: false
choices:
- default
- virus
mapi_options:
description:
- Enable/disable MAPI AntiVirus scanning, monitoring, and quarantine.
- FLAG Based Options. Specify multiple in list form.
required: false
choices:
- scan
- quarantine
- avmonitor
mapi_outbreak_prevention:
description:
- Enable FortiGuard Virus Outbreak Prevention service.
required: false
choices:
- disabled
- files
- full-archive
nac_quar:
description:
- EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED!
- List of multiple child objects to be added. Expects a list of dictionaries.
- Dictionaries must use FortiManager API parameters, not the ansible ones listed below.
- If submitted, all other prefixed sub-parameters ARE IGNORED.
- This object is MUTUALLY EXCLUSIVE with its options.
- We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide.
- WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS
required: false
nac_quar_expiry:
description:
- Duration of quarantine.
required: false
nac_quar_infected:
description:
- Enable/Disable quarantining infected hosts to the banned user list.
required: false
choices:
- none
- quar-src-ip
nac_quar_log:
description:
- Enable/disable AntiVirus quarantine logging.
required: false
choices:
- disable
- enable
nntp:
description:
- EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED!
- List of multiple child objects to be added. Expects a list of dictionaries.
- Dictionaries must use FortiManager API parameters, not the ansible ones listed below.
- If submitted, all other prefixed sub-parameters ARE IGNORED.
- This object is MUTUALLY EXCLUSIVE with its options.
- We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide.
- WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS
required: false
nntp_archive_block:
description:
- Select the archive types to block.
- FLAG Based Options. Specify multiple in list form.
required: false
choices:
- encrypted
- corrupted
- multipart
- nested
- mailbomb
- unhandled
- partiallycorrupted
- fileslimit
- timeout
nntp_archive_log:
description:
- Select the archive types to log.
- FLAG Based Options. Specify multiple in list form.
required: false
choices:
- encrypted
- corrupted
- multipart
- nested
- mailbomb
- unhandled
- partiallycorrupted
- fileslimit
- timeout
nntp_emulator:
description:
- Enable/disable the virus emulator.
required: false
choices:
- disable
- enable
nntp_options:
description:
- Enable/disable NNTP AntiVirus scanning, monitoring, and quarantine.
- FLAG Based Options. Specify multiple in list form.
required: false
choices:
- scan
- quarantine
- avmonitor
nntp_outbreak_prevention:
description:
- Enable FortiGuard Virus Outbreak Prevention service.
required: false
choices:
- disabled
- files
- full-archive
pop3:
description:
- EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED!
- List of multiple child objects to be added. Expects a list of dictionaries.
- Dictionaries must use FortiManager API parameters, not the ansible ones listed below.
- If submitted, all other prefixed sub-parameters ARE IGNORED.
- This object is MUTUALLY EXCLUSIVE with its options.
- We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide.
- WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS
required: false
pop3_archive_block:
description:
- Select the archive types to block.
- FLAG Based Options. Specify multiple in list form.
required: false
choices:
- encrypted
- corrupted
- multipart
- nested
- mailbomb
- unhandled
- partiallycorrupted
- fileslimit
- timeout
pop3_archive_log:
description:
- Select the archive types to log.
- FLAG Based Options. Specify multiple in list form.
required: false
choices:
- encrypted
- corrupted
- multipart
- nested
- mailbomb
- unhandled
- partiallycorrupted
- fileslimit
- timeout
pop3_content_disarm:
description:
- Enable Content Disarm and Reconstruction for this protocol.
required: false
choices:
- disable
- enable
pop3_emulator:
description:
- Enable/disable the virus emulator.
required: false
choices:
- disable
- enable
pop3_executables:
description:
- Treat Windows executable files as viruses for the purpose of blocking or monitoring.
required: false
choices:
- default
- virus
pop3_options:
description:
- Enable/disable POP3 AntiVirus scanning, monitoring, and quarantine.
- FLAG Based Options. Specify multiple in list form.
required: false
choices:
- scan
- quarantine
- avmonitor
pop3_outbreak_prevention:
description:
- Enable FortiGuard Virus Outbreak Prevention service.
required: false
choices:
- disabled
- files
- full-archive
smb:
description:
- EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED!
- List of multiple child objects to be added. Expects a list of dictionaries.
- Dictionaries must use FortiManager API parameters, not the ansible ones listed below.
- If submitted, all other prefixed sub-parameters ARE IGNORED.
- This object is MUTUALLY EXCLUSIVE with its options.
- We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide.
- WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS
required: false
smb_archive_block:
description:
- Select the archive types to block.
- FLAG Based Options. Specify multiple in list form.
required: false
choices:
- encrypted
- corrupted
- multipart
- nested
- mailbomb
- unhandled
- partiallycorrupted
- fileslimit
- timeout
smb_archive_log:
description:
- Select the archive types to log.
- FLAG Based Options. Specify multiple in list form.
required: false
choices:
- encrypted
- corrupted
- multipart
- nested
- mailbomb
- unhandled
- partiallycorrupted
- fileslimit
- timeout
smb_emulator:
description:
- Enable/disable the virus emulator.
required: false
choices:
- disable
- enable
smb_options:
description:
- Enable/disable SMB AntiVirus scanning, monitoring, and quarantine.
- FLAG Based Options. Specify multiple in list form.
required: false
choices:
- scan
- quarantine
- avmonitor
smb_outbreak_prevention:
description:
- Enable FortiGuard Virus Outbreak Prevention service.
required: false
choices:
- disabled
- files
- full-archive
smtp:
description:
- EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED!
- List of multiple child objects to be added. Expects a list of dictionaries.
- Dictionaries must use FortiManager API parameters, not the ansible ones listed below.
- If submitted, all other prefixed sub-parameters ARE IGNORED.
- This object is MUTUALLY EXCLUSIVE with its options.
- We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide.
- WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS
required: false
smtp_archive_block:
description:
- Select the archive types to block.
- FLAG Based Options. Specify multiple in list form.
required: false
choices:
- encrypted
- corrupted
- multipart
- nested
- mailbomb
- unhandled
- partiallycorrupted
- fileslimit
- timeout
smtp_archive_log:
description:
- Select the archive types to log.
- FLAG Based Options. Specify multiple in list form.
required: false
choices:
- encrypted
- corrupted
- multipart
- nested
- mailbomb
- unhandled
- partiallycorrupted
- fileslimit
- timeout
smtp_content_disarm:
description:
- Enable Content Disarm and Reconstruction for this protocol.
required: false
choices:
- disable
- enable
smtp_emulator:
description:
- Enable/disable the virus emulator.
required: false
choices:
- disable
- enable
smtp_executables:
description:
- Treat Windows executable files as viruses for the purpose of blocking or monitoring.
required: false
choices:
- default
- virus
smtp_options:
description:
- Enable/disable SMTP AntiVirus scanning, monitoring, and quarantine.
- FLAG Based Options. Specify multiple in list form.
required: false
choices:
- scan
- quarantine
- avmonitor
smtp_outbreak_prevention:
description:
- Enable FortiGuard Virus Outbreak Prevention service.
required: false
choices:
- disabled
- files
- full-archive
'''
EXAMPLES = '''
- name: DELETE Profile
community.network.fmgr_secprof_av:
name: "Ansible_AV_Profile"
mode: "delete"
- name: CREATE Profile
community.network.fmgr_secprof_av:
name: "Ansible_AV_Profile"
comment: "Created by Ansible Module TEST"
mode: "set"
inspection_mode: "proxy"
ftgd_analytics: "everything"
av_block_log: "enable"
av_virus_log: "enable"
scan_mode: "full"
mobile_malware_db: "enable"
ftp_archive_block: "encrypted"
ftp_outbreak_prevention: "files"
ftp_archive_log: "timeout"
ftp_emulator: "disable"
ftp_options: "scan"
'''
RETURN = """
api_result:
description: full API response, includes status code and message
returned: always
type: str
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.fortimanager import FortiManagerHandler
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import FMGBaseException
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import FMGRCommon
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import DEFAULT_RESULT_OBJ
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import FAIL_SOCKET_MSG
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import prepare_dict
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import scrub_dict
###############
# START METHODS
###############
def fmgr_antivirus_profile_modify(fmgr, paramgram):
"""
:param fmgr: The fmgr object instance from fortimanager.py
:type fmgr: class object
:param paramgram: The formatted dictionary of options to process
:type paramgram: dict
:return: The response from the FortiManager
:rtype: dict
"""
mode = paramgram["mode"]
adom = paramgram["adom"]
response = DEFAULT_RESULT_OBJ
# EVAL THE MODE PARAMETER FOR SET OR ADD
if mode in ['set', 'add', 'update']:
url = '/pm/config/adom/{adom}/obj/antivirus/profile'.format(adom=adom)
datagram = scrub_dict(prepare_dict(paramgram))
# EVAL THE MODE PARAMETER FOR DELETE
else:
# SET THE CORRECT URL FOR DELETE
url = '/pm/config/adom/{adom}/obj/antivirus/profile/{name}'.format(adom=adom, name=paramgram["name"])
datagram = {}
response = fmgr.process_request(url, datagram, paramgram["mode"])
return response
#############
# END METHODS
#############
def main():
argument_spec = dict(
adom=dict(required=False, type="str", default="root"),
mode=dict(choices=["add", "set", "delete", "update"], type="str", default="add"),
scan_mode=dict(required=False, type="str", choices=["quick", "full"]),
replacemsg_group=dict(required=False, type="dict"),
name=dict(required=False, type="str"),
mobile_malware_db=dict(required=False, type="str", choices=["disable", "enable"]),
inspection_mode=dict(required=False, type="str", choices=["proxy", "flow-based"]),
ftgd_analytics=dict(required=False, type="str", choices=["disable", "suspicious", "everything"]),
extended_log=dict(required=False, type="str", choices=["disable", "enable"]),
comment=dict(required=False, type="str"),
av_virus_log=dict(required=False, type="str", choices=["disable", "enable"]),
av_block_log=dict(required=False, type="str", choices=["disable", "enable"]),
analytics_wl_filetype=dict(required=False, type="dict"),
analytics_max_upload=dict(required=False, type="int"),
analytics_db=dict(required=False, type="str", choices=["disable", "enable"]),
analytics_bl_filetype=dict(required=False, type="dict"),
content_disarm=dict(required=False, type="list"),
content_disarm_cover_page=dict(required=False, type="str", choices=["disable", "enable"]),
content_disarm_detect_only=dict(required=False, type="str", choices=["disable", "enable"]),
content_disarm_office_embed=dict(required=False, type="str", choices=["disable", "enable"]),
content_disarm_office_hylink=dict(required=False, type="str", choices=["disable", "enable"]),
content_disarm_office_linked=dict(required=False, type="str", choices=["disable", "enable"]),
content_disarm_office_macro=dict(required=False, type="str", choices=["disable", "enable"]),
content_disarm_original_file_destination=dict(required=False, type="str", choices=["fortisandbox",
"quarantine",
"discard"]),
content_disarm_pdf_act_form=dict(required=False, type="str", choices=["disable", "enable"]),
content_disarm_pdf_act_gotor=dict(required=False, type="str", choices=["disable", "enable"]),
content_disarm_pdf_act_java=dict(required=False, type="str", choices=["disable", "enable"]),
content_disarm_pdf_act_launch=dict(required=False, type="str", choices=["disable", "enable"]),
content_disarm_pdf_act_movie=dict(required=False, type="str", choices=["disable", "enable"]),
content_disarm_pdf_act_sound=dict(required=False, type="str", choices=["disable", "enable"]),
content_disarm_pdf_embedfile=dict(required=False, type="str", choices=["disable", "enable"]),
content_disarm_pdf_hyperlink=dict(required=False, type="str", choices=["disable", "enable"]),
content_disarm_pdf_javacode=dict(required=False, type="str", choices=["disable", "enable"]),
ftp=dict(required=False, type="list"),
ftp_archive_block=dict(required=False, type="str", choices=["encrypted",
"corrupted",
"multipart",
"nested",
"mailbomb",
"unhandled",
"partiallycorrupted",
"fileslimit",
"timeout"]),
ftp_archive_log=dict(required=False, type="str", choices=["encrypted",
"corrupted",
"multipart",
"nested",
"mailbomb",
"unhandled",
"partiallycorrupted",
"fileslimit",
"timeout"]),
ftp_emulator=dict(required=False, type="str", choices=["disable", "enable"]),
ftp_options=dict(required=False, type="str", choices=["scan", "quarantine", "avmonitor"]),
ftp_outbreak_prevention=dict(required=False, type="str", choices=["disabled", "files", "full-archive"]),
http=dict(required=False, type="list"),
http_archive_block=dict(required=False, type="str", choices=["encrypted",
"corrupted",
"multipart",
"nested",
"mailbomb",
"unhandled",
"partiallycorrupted",
"fileslimit",
"timeout"]),
http_archive_log=dict(required=False, type="str", choices=["encrypted",
"corrupted",
"multipart",
"nested",
"mailbomb",
"unhandled",
"partiallycorrupted",
"fileslimit",
"timeout"]),
http_content_disarm=dict(required=False, type="str", choices=["disable", "enable"]),
http_emulator=dict(required=False, type="str", choices=["disable", "enable"]),
http_options=dict(required=False, type="str", choices=["scan", "quarantine", "avmonitor"]),
http_outbreak_prevention=dict(required=False, type="str", choices=["disabled", "files", "full-archive"]),
imap=dict(required=False, type="list"),
imap_archive_block=dict(required=False, type="str", choices=["encrypted",
"corrupted",
"multipart",
"nested",
"mailbomb",
"unhandled",
"partiallycorrupted",
"fileslimit",
"timeout"]),
imap_archive_log=dict(required=False, type="str", choices=["encrypted",
"corrupted",
"multipart",
"nested",
"mailbomb",
"unhandled",
"partiallycorrupted",
"fileslimit",
"timeout"]),
imap_content_disarm=dict(required=False, type="str", choices=["disable", "enable"]),
imap_emulator=dict(required=False, type="str", choices=["disable", "enable"]),
imap_executables=dict(required=False, type="str", choices=["default", "virus"]),
imap_options=dict(required=False, type="str", choices=["scan", "quarantine", "avmonitor"]),
imap_outbreak_prevention=dict(required=False, type="str", choices=["disabled", "files", "full-archive"]),
mapi=dict(required=False, type="list"),
mapi_archive_block=dict(required=False, type="str", choices=["encrypted",
"corrupted",
"multipart",
"nested",
"mailbomb",
"unhandled",
"partiallycorrupted",
"fileslimit",
"timeout"]),
mapi_archive_log=dict(required=False, type="str", choices=["encrypted",
"corrupted",
"multipart",
"nested",
"mailbomb",
"unhandled",
"partiallycorrupted",
"fileslimit",
"timeout"]),
mapi_emulator=dict(required=False, type="str", choices=["disable", "enable"]),
mapi_executables=dict(required=False, type="str", choices=["default", "virus"]),
mapi_options=dict(required=False, type="str", choices=["scan", "quarantine", "avmonitor"]),
mapi_outbreak_prevention=dict(required=False, type="str", choices=["disabled", "files", "full-archive"]),
nac_quar=dict(required=False, type="list"),
nac_quar_expiry=dict(required=False, type="str"),
nac_quar_infected=dict(required=False, type="str", choices=["none", "quar-src-ip"]),
nac_quar_log=dict(required=False, type="str", choices=["disable", "enable"]),
nntp=dict(required=False, type="list"),
nntp_archive_block=dict(required=False, type="str", choices=["encrypted",
"corrupted",
"multipart",
"nested",
"mailbomb",
"unhandled",
"partiallycorrupted",
"fileslimit",
"timeout"]),
nntp_archive_log=dict(required=False, type="str", choices=["encrypted",
"corrupted",
"multipart",
"nested",
"mailbomb",
"unhandled",
"partiallycorrupted",
"fileslimit",
"timeout"]),
nntp_emulator=dict(required=False, type="str", choices=["disable", "enable"]),
nntp_options=dict(required=False, type="str", choices=["scan", "quarantine", "avmonitor"]),
nntp_outbreak_prevention=dict(required=False, type="str", choices=["disabled", "files", "full-archive"]),
pop3=dict(required=False, type="list"),
pop3_archive_block=dict(required=False, type="str", choices=["encrypted",
"corrupted",
"multipart",
"nested",
"mailbomb",
"unhandled",
"partiallycorrupted",
"fileslimit",
"timeout"]),
pop3_archive_log=dict(required=False, type="str", choices=["encrypted",
"corrupted",
"multipart",
"nested",
"mailbomb",
"unhandled",
"partiallycorrupted",
"fileslimit",
"timeout"]),
pop3_content_disarm=dict(required=False, type="str", choices=["disable", "enable"]),
pop3_emulator=dict(required=False, type="str", choices=["disable", "enable"]),
pop3_executables=dict(required=False, type="str", choices=["default", "virus"]),
pop3_options=dict(required=False, type="str", choices=["scan", "quarantine", "avmonitor"]),
pop3_outbreak_prevention=dict(required=False, type="str", choices=["disabled", "files", "full-archive"]),
smb=dict(required=False, type="list"),
smb_archive_block=dict(required=False, type="str", choices=["encrypted",
"corrupted",
"multipart",
"nested",
"mailbomb",
"unhandled",
"partiallycorrupted",
"fileslimit",
"timeout"]),
smb_archive_log=dict(required=False, type="str", choices=["encrypted",
"corrupted",
"multipart",
"nested",
"mailbomb",
"unhandled",
"partiallycorrupted",
"fileslimit",
"timeout"]),
smb_emulator=dict(required=False, type="str", choices=["disable", "enable"]),
smb_options=dict(required=False, type="str", choices=["scan", "quarantine", "avmonitor"]),
smb_outbreak_prevention=dict(required=False, type="str", choices=["disabled", "files", "full-archive"]),
smtp=dict(required=False, type="list"),
smtp_archive_block=dict(required=False, type="str", choices=["encrypted",
"corrupted",
"multipart",
"nested",
"mailbomb",
"unhandled",
"partiallycorrupted",
"fileslimit",
"timeout"]),
smtp_archive_log=dict(required=False, type="str", choices=["encrypted",
"corrupted",
"multipart",
"nested",
"mailbomb",
"unhandled",
"partiallycorrupted",
"fileslimit",
"timeout"]),
smtp_content_disarm=dict(required=False, type="str", choices=["disable", "enable"]),
smtp_emulator=dict(required=False, type="str", choices=["disable", "enable"]),
smtp_executables=dict(required=False, type="str", choices=["default", "virus"]),
smtp_options=dict(required=False, type="str", choices=["scan", "quarantine", "avmonitor"]),
smtp_outbreak_prevention=dict(required=False, type="str", choices=["disabled", "files", "full-archive"]),
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, )
# MODULE PARAMGRAM
paramgram = {
"mode": module.params["mode"],
"adom": module.params["adom"],
"scan-mode": module.params["scan_mode"],
"replacemsg-group": module.params["replacemsg_group"],
"name": module.params["name"],
"mobile-malware-db": module.params["mobile_malware_db"],
"inspection-mode": module.params["inspection_mode"],
"ftgd-analytics": module.params["ftgd_analytics"],
"extended-log": module.params["extended_log"],
"comment": module.params["comment"],
"av-virus-log": module.params["av_virus_log"],
"av-block-log": module.params["av_block_log"],
"analytics-wl-filetype": module.params["analytics_wl_filetype"],
"analytics-max-upload": module.params["analytics_max_upload"],
"analytics-db": module.params["analytics_db"],
"analytics-bl-filetype": module.params["analytics_bl_filetype"],
"content-disarm": {
"cover-page": module.params["content_disarm_cover_page"],
"detect-only": module.params["content_disarm_detect_only"],
"office-embed": module.params["content_disarm_office_embed"],
"office-hylink": module.params["content_disarm_office_hylink"],
"office-linked": module.params["content_disarm_office_linked"],
"office-macro": module.params["content_disarm_office_macro"],
"original-file-destination": module.params["content_disarm_original_file_destination"],
"pdf-act-form": module.params["content_disarm_pdf_act_form"],
"pdf-act-gotor": module.params["content_disarm_pdf_act_gotor"],
"pdf-act-java": module.params["content_disarm_pdf_act_java"],
"pdf-act-launch": module.params["content_disarm_pdf_act_launch"],
"pdf-act-movie": module.params["content_disarm_pdf_act_movie"],
"pdf-act-sound": module.params["content_disarm_pdf_act_sound"],
"pdf-embedfile": module.params["content_disarm_pdf_embedfile"],
"pdf-hyperlink": module.params["content_disarm_pdf_hyperlink"],
"pdf-javacode": module.params["content_disarm_pdf_javacode"],
},
"ftp": {
"archive-block": module.params["ftp_archive_block"],
"archive-log": module.params["ftp_archive_log"],
"emulator": module.params["ftp_emulator"],
"options": module.params["ftp_options"],
"outbreak-prevention": module.params["ftp_outbreak_prevention"],
},
"http": {
"archive-block": module.params["http_archive_block"],
"archive-log": module.params["http_archive_log"],
"content-disarm": module.params["http_content_disarm"],
"emulator": module.params["http_emulator"],
"options": module.params["http_options"],
"outbreak-prevention": module.params["http_outbreak_prevention"],
},
"imap": {
"archive-block": module.params["imap_archive_block"],
"archive-log": module.params["imap_archive_log"],
"content-disarm": module.params["imap_content_disarm"],
"emulator": module.params["imap_emulator"],
"executables": module.params["imap_executables"],
"options": module.params["imap_options"],
"outbreak-prevention": module.params["imap_outbreak_prevention"],
},
"mapi": {
"archive-block": module.params["mapi_archive_block"],
"archive-log": module.params["mapi_archive_log"],
"emulator": module.params["mapi_emulator"],
"executables": module.params["mapi_executables"],
"options": module.params["mapi_options"],
"outbreak-prevention": module.params["mapi_outbreak_prevention"],
},
"nac-quar": {
"expiry": module.params["nac_quar_expiry"],
"infected": module.params["nac_quar_infected"],
"log": module.params["nac_quar_log"],
},
"nntp": {
"archive-block": module.params["nntp_archive_block"],
"archive-log": module.params["nntp_archive_log"],
"emulator": module.params["nntp_emulator"],
"options": module.params["nntp_options"],
"outbreak-prevention": module.params["nntp_outbreak_prevention"],
},
"pop3": {
"archive-block": module.params["pop3_archive_block"],
"archive-log": module.params["pop3_archive_log"],
"content-disarm": module.params["pop3_content_disarm"],
"emulator": module.params["pop3_emulator"],
"executables": module.params["pop3_executables"],
"options": module.params["pop3_options"],
"outbreak-prevention": module.params["pop3_outbreak_prevention"],
},
"smb": {
"archive-block": module.params["smb_archive_block"],
"archive-log": module.params["smb_archive_log"],
"emulator": module.params["smb_emulator"],
"options": module.params["smb_options"],
"outbreak-prevention": module.params["smb_outbreak_prevention"],
},
"smtp": {
"archive-block": module.params["smtp_archive_block"],
"archive-log": module.params["smtp_archive_log"],
"content-disarm": module.params["smtp_content_disarm"],
"emulator": module.params["smtp_emulator"],
"executables": module.params["smtp_executables"],
"options": module.params["smtp_options"],
"outbreak-prevention": module.params["smtp_outbreak_prevention"],
}
}
module.paramgram = paramgram
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
fmgr = FortiManagerHandler(connection, module)
fmgr.tools = FMGRCommon()
else:
module.fail_json(**FAIL_SOCKET_MSG)
list_overrides = ["content-disarm", "ftp", "http", "imap", "mapi", "nac-quar", "nntp", "pop3", "smb", "smtp"]
paramgram = fmgr.tools.paramgram_child_list_override(list_overrides=list_overrides,
paramgram=paramgram, module=module)
module.paramgram = paramgram
results = DEFAULT_RESULT_OBJ
try:
results = fmgr_antivirus_profile_modify(fmgr, paramgram)
fmgr.govern_response(module=module, results=results,
ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram))
except Exception as err:
raise FMGBaseException(err)
return module.exit_json(**results[1])
if __name__ == "__main__":
main()
|
python
|
import falcon
from chromarestserver.resource import (
ChromaSdkResource,
SessionRootResource,
HeartBeatResource,
KeyboardResource
)
from chromarestserver.model import (
KeyboardModel,
SessionModel
)
app = falcon.API()
usb_keyboard = KeyboardModel()
session = SessionModel()
chromasdk = ChromaSdkResource(session=session)
session = SessionRootResource(session=session)
heartbeat = HeartBeatResource(session=session)
keyboard = KeyboardResource(session=session, usb=usb_keyboard)
app.add_route('/razer/chromasdk', chromasdk)
app.add_route('/{session_id}/chromasdk', session)
app.add_route('/{session_id}/chromasdk/heartbeat', heartbeat)
app.add_route('/{session_id}/chromasdk/keyboard', keyboard)
|
python
|
import os
import json
from typing import Optional
from requests import post,get
from fastapi import FastAPI
app = FastAPI()
ha_ip = os.environ['HA_IP']
ha_port = os.environ['HA_PORT']
ha_entity = os.environ['HA_ENTITY'] #must be a sensor
ha_token = os.environ['HA_TOKEN']
ha_friendly_name = os.environ['HA_FRIENDLY_NAME']
ha_domain = ha_entity.split('.')[0]
if not ha_domain.lower() == "sensor":
print("Specify a sensor as HA_ENTITY")
exit()
base_url = str("http://" + ha_ip + ":" + ha_port + "/api/states/" + ha_entity)
headers = {
"Authorization": str("Bearer " + ha_token),
"Content-Type": "application/json"
}
def get_current_value():
cur_val = json.loads(get(base_url, headers=headers).text)
return cur_val["attributes"]["status"], cur_val["attributes"]["activity"]
@app.post("/status/{status}")
def catch_status(status:str):
null,activity = get_current_value()
payload = {"state":status,"attributes":{"activity":activity,"status":status,"friendly_name":ha_friendly_name,"unit_of_measurement":""}}
print(payload)
post(base_url,headers=headers,json=payload)
@app.post("/activity/{activity}")
def catch_activity(activity:str):
status,null = get_current_value()
payload = {"state":status,"attributes":{"activity":activity,"status":status,"friendly_name":ha_friendly_name,"unit_of_measurement":""}}
print(payload)
post(base_url,headers=headers,json=payload)
|
python
|
# ISC
#
# Copyright (c) 2022 Adir Vered <[email protected]>
#
# Permission to use, copy, modify, and/or distribute this software for any purpose
# with or without fee is hereby granted, provided that the above copyright notice
# and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
# OR PERFORMANCE OF THIS SOFTWARE.
address = [
# GPIO_A/BOOT/C/DIF/H/X/Y/Z/...:
# we need to substruct 0x400 from the original offset then we add 0x100
# to all mentiond above registers offsets in this region.
# by the datasheet we need to multipile the offset by 4 and there for
# the offset for each register is get the 0x400 missing, got it?!
0xC8834000,
# GPIO_AO:
0xC8100000
]
offsets = {
# GPIO_AO
"AO" : {
"O_EN" : 0x09,
"O" : 0x09,
"I" : 0x0A,
"UP" : 0x0B,
"UP_EN" : 0x0B,
},
# GPIO_Z
"Z" : {
"O_EN" : 0x115,
"O" : 0x116,
"I" : 0x117,
"UP" : 0x13D,
"UP_EN" : 0x14B,
},
# GPIO_CLK
"CLK" : {
"O_EN" : 0x115,
"O" : 0x116,
"I" : 0x117,
"UP" : 0x13D,
"UP_EN" : 0x14B,
},
# GPIO_CARD
"CARD" : {
"O_EN" : 0x112,
"O" : 0x113,
"I" : 0x114,
"UP" : 0x13C,
"UP_EN" : 0x14A,
},
# GPIO_BOOT
"BOOT" : {
"O_EN" : 0x112,
"O" : 0x113,
"I" : 0x114,
"UP" : 0x13C,
"UP_EN" : 0x14A,
},
# GPIO_H
"H" : {
"O_EN" : 0x10F,
"O" : 0x110,
"I" : 0x111,
"UP" : 0x13B,
"UP_EN" : 0x149,
},
# GPIO_Y
"Y" : {
"O_EN" : 0x10F,
"O" : 0x110,
"I" : 0x111,
"UP" : 0x13B,
"UP_EN" : 0x149,
},
# GPIO_DV
"DV" : {
"O_EN" : 0x10C,
"O" : 0x10D,
"I" : 0x10E,
"UP" : 0x13A,
"UP_EN" : 0x148,
},
# GPIO_X
"X" : {
"O_EN" : 0x118,
"O" : 0x119,
"I" : 0x11A,
"UP" : 0x13E,
"UP_EN" : 0x14C,
},
}
presets = {
# offsets pre shift bit:
"AO" : {
"O" : 16,
"UP" : 16
},
"Z" : {
"O_EN" : 14,
"O" : 14,
"I" : 14,
"UP" : 14,
"UP_EN" : 14
},
"CLK" : {
"O_EN" : 28,
"O" : 28,
"I" : 28,
"UP" : 28,
"UP_EN" : 28
},
"CARD" : {
"O_EN" : 20,
"O" : 20,
"I" : 20,
"UP" : 20,
"UP_EN" : 20
},
"H" : {
"O_EN" : 20,
"O" : 20,
"I" : 20,
"UP" : 20,
"UP_EN" : 20
}
}
|
python
|
from huobi.client.trade import TradeClient
from huobi.constant import *
from huobi.utils import *
trade_client = TradeClient(api_key=g_api_key, secret_key=g_secret_key)
symbol_test = "eosusdt"
i = 0
n = 3
order_id_list = []
while i < n:
order_id = trade_client.create_order(
symbol=symbol_test,
account_id=g_account_id,
order_type=OrderType.BUY_LIMIT,
source=OrderSource.API,
amount=18.0,
price=0.292,
)
LogInfo.output("created order id : {id}".format(id=order_id))
order_id_list.append(order_id)
i = i + 1
result = trade_client.cancel_orders(symbol_test, order_id_list)
result.print_object()
|
python
|
from Utils import *
'''
On Adamson data
'''
Data_dir = "/home/luodongyang/SCData/Perturb/Adamson/"
#------------------------------------------------------------------------#
# Read Data
## Matrix
mat=mmread(os.path.join(Data_dir, "GSM2406677_10X005_matrix.mtx.txt"))
cell_ident = pd.read_csv(os.path.join(Data_dir, "GSM2406677_10X005_cell_identities.csv"))
genes_path = os.path.join(Data_dir, "GSM2406677_10X005_genes.tsv")
barcodes_path = os.path.join(Data_dir, "GSM2406677_10X005_barcodes.tsv")
gene_names = pd.read_table(genes_path, sep='\t', skiprows=0, header=None)
gene_names = gene_names.iloc[:,1]
barcodes = pd.read_table(barcodes_path, sep='\t', skiprows=0, header=None)
barcodes = list(barcodes.iloc[:,0])
#------------------------------------------------------------------------#
# Processing
## conversion & Filtering
guide_summ = guide_summary(cell_ident) # Guide summary
selected_guides = list(guide_summ['GuideName'][guide_summ['Count'] > 100])
temp_idx = []
for ll in range(len(cell_ident)):
if cell_ident['guide identity'][ll] in selected_guides:
temp_idx.append(ll)
cell_ident = cell_ident.loc[temp_idx]
Y = pd.DataFrame(mat.toarray())
Y.index = gene_names
Y.columns = barcodes
[filtered_genes,filtered_cells] = filter_Gene_Cell(Y, gene_thresh=10, cell_thresh=1000) # filtering
selected_cells = list(set(filtered_cells) & set(cell_ident['cell BC']))
cell_ident.index = cell_ident['cell BC']
cell_ident = cell_ident.loc[selected_cells]
Y = Y.loc[filtered_genes, selected_cells]
Y_log = pd.DataFrame(np.log2(tp10k_transform(Y)+1))
guides = cell_ident['guide identity']
#------------------------------------------------------------------------#
# PCA
[Ufb,Sfb,Vfb,PCscore] = fb_pca(Y_log, n_components=50, center=True, scale=False)
## PC variance explained
plt.plot(Sfb, label='PC Variance Explained')
plt.savefig('./Figs/PC_eigens_Adamson.jpg', dpi=300)
plt.close()
## Use PC scores for plotting
plot_pca = PCscore[['PC1','PC2']]
plot_pca['Guides'] = guides
sns.lmplot('PC1','PC2',data=plot_pca,hue='Guides',fit_reg=False, scatter_kws={'s':5})
plt.savefig('./Figs/PCA_Adamson.jpg', dpi=300)
plt.close()
#------------------------------------------------------------------------#
# t-SNE
tsne_model = manifold.TSNE(n_components=2, perplexity=20, verbose=2,init='pca',n_iter_without_progress=10000,min_grad_norm=0)
T_sne = tsne_model.fit_transform(PCscore.iloc[:,range(10)])
T_sne = pd.DataFrame(T_sne)
plot_tsne = T_sne.copy()
plot_tsne.columns = ['tSNE-1', 'tSNE-2']
plot_tsne.index = selected_cells
plot_tsne['Guides'] = guides
sns.lmplot('tSNE-1','tSNE-2',data=plot_tsne,hue='Guides',fit_reg=False, scatter_kws={'s':5})
plt.savefig('./Figs/tSNE_Adamson.jpg', dpi=300)
plt.close()
#------------------------------------------------------------------------#
# LASSO
X = pd.DataFrame(Vfb.transpose())
X.index = PCscore.index
X.columns = PCscore.columns
guides_dummy = pd.get_dummies(guides)
lasso_model = linear_model.Lasso(alpha=0.1, precompute=True)
lasso_model.fit(PCscore, guides_dummy)
#------------------------------------------------------------------------#
# Random Forest
guides_dummy = pd.get_dummies(guides)
RF_model = RandomForestClassifier(n_estimators=100,n_jobs=-1,oob_score=True,class_weight='balanced')
RF_model.fit(PCscore, guides_dummy)
PC_rank = pd.DataFrame({'PCs':['PC'+str(x+1) for x in range(50)],
'Importance':RF_model.feature_importances_})
PC_rank = PC_rank.loc[np.argsort(-PC_rank['Importance'], )]
PC_rank.index = range(1,51)
plt.plot(PC_rank['Importance'], label='PC Importance')
plt.savefig('./Figs/PC_importance_Adamson.jpg', dpi=300)
plt.close()
PC_rank.to_csv('./Figs/PC_importance_Adamson.csv')
#------------------------------------------------------------------------#
# PCA with important PCs
selected_PCs = list(PC_rank['PCs'][0:30]) # Previous = 10
New_feature_Y = PCscore[selected_PCs].transpose()
[Unew,Snew,Vnew,PCscore_new] = fb_pca(New_feature_Y, n_components=10, center=True, scale=False)
plot_pca = PCscore_new[['PC1','PC2']]
plot_pca['Guides'] = guides
sns.lmplot('PC1','PC2',data=plot_pca,hue='Guides',fit_reg=False, scatter_kws={'s':5})
plt.savefig('./Figs/PCA_new_Adamson.jpg', dpi=300)
plt.close()
#------------------------------------------------------------------------#
# tSNE with important PCs
tsne_model = manifold.TSNE(n_components=2, perplexity=20, verbose=2,init='pca',n_iter_without_progress=10000,min_grad_norm=0)
T_sne = tsne_model.fit_transform(PCscore[selected_PCs])
T_sne = pd.DataFrame(T_sne)
plot_tsne = T_sne.copy()
plot_tsne.columns = ['tSNE-1', 'tSNE-2']
plot_tsne.index = selected_cells
plot_tsne['Guides'] = guides
sns.lmplot('tSNE-1','tSNE-2',data=plot_tsne,hue='Guides',fit_reg=False, scatter_kws={'s':5})
plt.savefig('./Figs/tSNE_new_Adamson.jpg', dpi=300)
plt.close()
#------------------------------------------------------------------------#
# tSNE with important PCs
tsne_model = manifold.TSNE(n_components=2, perplexity=20, verbose=2,init='pca',n_iter_without_progress=10000,min_grad_norm=0)
selected_PCs = list(set(selected_PCs) - set(['PC'+str(x) for x in range(1,5)]))
T_sne = tsne_model.fit_transform(PCscore[selected_PCs])
T_sne = pd.DataFrame(T_sne)
plot_tsne = T_sne.copy()
plot_tsne.columns = ['tSNE-1', 'tSNE-2']
plot_tsne.index = selected_cells
plot_tsne['Guides'] = guides
sns.lmplot('tSNE-1','tSNE-2',data=plot_tsne,hue='Guides',fit_reg=False, scatter_kws={'s':5})
plt.savefig('./Figs/tSNE_PC1-4_removed_Adamson.jpg', dpi=300)
plt.close()
#------------------------------------------------------------------------#
'''
On Dixit data
'''
from Utils import *
Data_dir = "/home/luodongyang/SCData/Perturb/Dixit/"
#------------------------------------------------------------------------#
# Read Data
## Matrix
mat=mmread(os.path.join(Data_dir, "GSM2396856_dc_3hr.mtx.txt"))
genes_path = os.path.join(Data_dir, "GSM2396856_dc_3hr_genenames.csv")
gene_names = pd.read_table(genes_path, sep=',', skiprows=0)
gene_names = gene_names.iloc[:,1]
barcodes_path = os.path.join(Data_dir, "GSM2396856_dc_3hr_cellnames.csv")
barcodes = pd.read_table(barcodes_path, sep=',', skiprows=0)
barcodes = list(barcodes.iloc[:,1])
## Get the GUIDE part of the X
cbc_gbc_dict_path = os.path.join(Data_dir, "GSM2396856_dc_3hr_cbc_gbc_dict_lenient.csv")
gbcs = [row[0] for row in csv.reader(open(cbc_gbc_dict_path))]
cbcs_raw = [row[1] for row in csv.reader(open(cbc_gbc_dict_path))]
cbcs = []
for temp_val in cbcs_raw:
temp = temp_val.replace(' ','').split(',')
cbcs.append(list(set(temp)&set(barcodes)))
gbc_cbc_dict = dict(zip(gbcs, cbcs))
X_guides = dict2X(GUIDES_DICT=gbc_cbc_dict, cbcs=barcodes)
#------------------------------------------------------------------------#
# Processing
## conversion & Filtering
Y = pd.DataFrame(mat.toarray())
Y.index = gene_names
Y.columns = barcodes
[filtered_genes,filtered_cells] = filter_Gene_Cell(Y, gene_thresh=10, cell_thresh=1000) # filtering
cell_idx = X_guides.index[X_guides.sum(axis=1)==1]
selected_cells = list(set(filtered_cells) & set(cell_idx))
Y = Y.loc[filtered_genes, selected_cells]
X_guides = X_guides.loc[selected_cells]
Y_log = pd.DataFrame(np.log2(tp10k_transform(Y)+1))
guide_list = list(X_guides.columns)
guides = []
for ii in range(len(X_guides)):
guides.append(guide_list[list(X_guides.iloc[ii,:]).index(1)])
#------------------------------------------------------------------------#
# Merge Guides --> same gene
for ii in range(len(guides)):
guides[ii] = guides[ii].split('_')[1]
#------------------------------------------------------------------------#
# PCA
[Ufb,Sfb,Vfb,PCscore] = fb_pca(Y_log, n_components=100, center=True, scale=False)
## PC variance explained
plt.plot(Sfb, label='PC Variance Explained')
plt.savefig('./Figs/PC_eigens_Dixit.jpg', dpi=300)
plt.close()
## Use PC scores for plotting
plot_pca = PCscore[['PC1','PC2']]
plot_pca['Guides'] = guides
sns.lmplot('PC1','PC2',data=plot_pca,hue='Guides',fit_reg=False, scatter_kws={'s':5})
plt.savefig('./Figs/PCA_Dixit.jpg', dpi=300)
plt.close()
#------------------------------------------------------------------------#
# t-SNE
tsne_model = manifold.TSNE(n_components=2, perplexity=20, verbose=2,init='pca',n_iter_without_progress=10000,min_grad_norm=0)
T_sne = tsne_model.fit_transform(PCscore.iloc[:,range(15)])
T_sne = pd.DataFrame(T_sne)
plot_tsne = T_sne.copy()
plot_tsne.columns = ['tSNE-1', 'tSNE-2']
plot_tsne.index = selected_cells
plot_tsne['Guides'] = guides
sns.lmplot('tSNE-1','tSNE-2',data=plot_tsne,hue='Guides',fit_reg=False, scatter_kws={'s':5})
plt.savefig('./Figs/tSNE_Dixit.jpg', dpi=300)
plt.close()
#------------------------------------------------------------------------#
# LASSO
'''
X = pd.DataFrame(Vfb.transpose())
X.index = PCscore.index
X.columns = PCscore.columns
guides_dummy = pd.get_dummies(guides)
lasso_model = linear_model.Lasso(alpha=0.1, precompute=True)
lasso_model.fit(PCscore, guides_dummy)
'''
#------------------------------------------------------------------------#
# Random Forest
guides_dummy = pd.get_dummies(guides)
RF_model = RandomForestClassifier(n_estimators=100,n_jobs=-1,oob_score=True,class_weight='balanced')
RF_model.fit(PCscore, guides_dummy)
PC_rank = pd.DataFrame({'PCs':['PC'+str(x+1) for x in range(100)],
'Importance':RF_model.feature_importances_})
PC_rank = PC_rank.loc[np.argsort(-PC_rank['Importance'], )]
PC_rank.index = range(1,101)
plt.plot(PC_rank['Importance'], label='PC Importance')
plt.savefig('./Figs/PC_importance_Dixit.jpg', dpi=300)
plt.close()
PC_rank.to_csv('./Figs/PC_importance_Dixit.csv')
#------------------------------------------------------------------------#
# PCA with important PCs
selected_PCs = list(PC_rank['PCs'][0:10])
New_feature_Y = PCscore[selected_PCs].transpose()
[Unew,Snew,Vnew,PCscore_new] = fb_pca(New_feature_Y, n_components=10, center=True, scale=False)
plot_pca = PCscore_new[['PC1','PC2']]
plot_pca['Guides'] = guides
sns.lmplot('PC1','PC2',data=plot_pca,hue='Guides',fit_reg=False, scatter_kws={'s':5})
plt.savefig('./Figs/PCA_new_Dixit.jpg', dpi=300)
plt.close()
#------------------------------------------------------------------------#
# tSNE with important PCs
tsne_model = manifold.TSNE(n_components=2, perplexity=20, verbose=2,init='pca',n_iter_without_progress=10000,min_grad_norm=0)
T_sne = tsne_model.fit_transform(PCscore[selected_PCs])
T_sne = pd.DataFrame(T_sne)
plot_tsne = T_sne.copy()
plot_tsne.columns = ['tSNE-1', 'tSNE-2']
plot_tsne.index = selected_cells
plot_tsne['Guides'] = guides
sns.lmplot('tSNE-1','tSNE-2',data=plot_tsne,hue='Guides',fit_reg=False, scatter_kws={'s':5})
plt.savefig('./Figs/tSNE_new_Dixit.jpg', dpi=300)
plt.close()
#------------------------------------------------------------------------#
# tSNE with important PCs
tsne_model = manifold.TSNE(n_components=2, perplexity=20, verbose=2,init='pca',n_iter_without_progress=10000,min_grad_norm=0)
selected_PCs = list(set(selected_PCs) - set(['PC'+str(x) for x in range(1,10)]))
T_sne = tsne_model.fit_transform(PCscore[selected_PCs])
T_sne = pd.DataFrame(T_sne)
plot_tsne = T_sne.copy()
plot_tsne.columns = ['tSNE-1', 'tSNE-2']
plot_tsne.index = selected_cells
plot_tsne['Guides'] = guides
sns.lmplot('tSNE-1','tSNE-2',data=plot_tsne,hue='Guides',fit_reg=False, scatter_kws={'s':5})
plt.savefig('./Figs/tSNE_PC1-4_removed_Dixit.jpg', dpi=300)
plt.close()
#------------------------------------------------------------------------#
|
python
|
# debug_importer.py
import sys
class DebugFinder:
@classmethod
def find_spec(cls, name, path, target=None):
print(f"Importing {name!r}")
return None
sys.meta_path.insert(0, DebugFinder)
|
python
|
import json
import os
import unittest
from netdice.common import Flow, StaticRoute
from netdice.explorer import Explorer
from netdice.input_parser import InputParser
from netdice.problem import Problem
from netdice.properties import WaypointProperty, IsolationProperty
from netdice.reference_explorer import ReferenceExplorer
from netdice.util import project_root_dir
from tests.problem_helper import get_test_input_file, get_paper_problem
class CompareToReferenceTest(unittest.TestCase):
@staticmethod
def is_compatible(state: list, mask: list):
pos = 0
for i in state:
if mask[pos] != -1 and mask[pos] != i:
return False
pos += 1
return True
@staticmethod
def get_ground_truth_file(scenario_name: str):
return os.path.join(project_root_dir, "tests", "ground_truth", scenario_name)
@staticmethod
def load_ref_from_file(fname: str):
p_property_val = None
data = []
with open(fname, 'r') as f:
for l in f:
entry = json.loads(l)
data.append(entry)
if "p_property" in entry:
p_property_val = float(entry["p_property"])
return data, p_property_val
@staticmethod
def store_ref_to_file(fname: str, data: list):
with open(fname, 'w') as f:
for entry in data:
print(json.dumps(entry), file=f)
def compare_to_reference(self, problem: Problem, scenario_name: str, allow_cache=True):
explorer = Explorer(problem, full_trace=True)
solution = explorer.explore_all()
# cache ground truth
cache_file = CompareToReferenceTest.get_ground_truth_file(scenario_name)
if allow_cache and os.path.exists(cache_file):
ref_stats, ref_p_property_val = CompareToReferenceTest.load_ref_from_file(cache_file)
else:
ref_explorer = ReferenceExplorer(problem, full_trace=True)
ref_solution = ref_explorer.explore_all()
ref_stats = ref_explorer._trace
ref_p_property_val = ref_solution.p_property.val()
if allow_cache:
CompareToReferenceTest.store_ref_to_file(cache_file, ref_stats)
# check equal forwarding graphs for all states
for dref in ref_stats:
if "state" in dref:
# find state for smart explorer
found = False
cmp_data = None
for dsmart in explorer._trace:
cmp_data = dsmart
if CompareToReferenceTest.is_compatible(dref["state"], dsmart["state"]):
found = True
break
self.assertTrue(found, "state {} not found for smart exploration".format(dref["state"]))
self.assertEqual(dref["fw_graph"], cmp_data["fw_graph"],
"state: {}\nmatched by: {}".format(dref["state"], cmp_data["state"]))
# compare probabilities
self.assertAlmostEqual(solution.p_property.val(), ref_p_property_val, delta=1E-10)
def test_paper_example(self):
problem = get_paper_problem()
self.compare_to_reference(problem, "paper_example.txt")
def test_paper_example_alt_flow(self):
problem = get_paper_problem()
problem.property = WaypointProperty(Flow(1, "42.42.0.0/16"), 2)
self.compare_to_reference(problem, "paper_example_alt_flow.txt")
def test_paper_example_alt_flow_2(self):
problem = get_paper_problem()
problem.property = WaypointProperty(Flow(2, "42.42.0.0/16"), 3)
self.compare_to_reference(problem, "paper_example_alt_flow_2.txt")
def test_paper_example_alt_flow_3(self):
problem = get_paper_problem()
problem.property = WaypointProperty(Flow(4, "42.42.0.0/16"), 3)
self.compare_to_reference(problem, "paper_example_alt_flow_3.txt")
def test_paper_example_static_route(self):
problem = get_paper_problem()
problem.property = WaypointProperty(Flow(1, "42.42.0.0/16"), 2)
problem.static_routes = [StaticRoute("42.42.0.0/16", 1, 4)]
self.compare_to_reference(problem, "paper_example_static_route.txt")
def test_paper_example_multi_flow(self):
problem = get_paper_problem()
problem.property = IsolationProperty([Flow(1, "42.42.0.0/16"), Flow(4, "99.99.99.0/24")])
self.compare_to_reference(problem, "paper_example_multi_flow.txt")
def test_nsfnet_node_failures(self):
problem = InputParser(get_test_input_file("Nsfnet.json")).get_problems()[0]
self.compare_to_reference(problem, "Nsfnet_node_failures.txt")
def test_nsfnet_alt_(self):
problem = InputParser(get_test_input_file("Nsfnet_alt.json")).get_problems()[0]
self.compare_to_reference(problem, "Nsfnet_alt.txt")
def test_ecmp(self):
problem = InputParser(get_test_input_file("ecmp.json")).get_problems()[0]
self.compare_to_reference(problem, "ecmp.txt")
|
python
|
import os
from datetime import datetime
from flask import Flask, render_template, redirect, flash, abort, url_for, request
from flask.ext.restless import APIManager
from flask_admin import Admin
from flask_admin.contrib.sqla import ModelView
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.login import UserMixin
from wtforms import form, fields, validators
from flask.ext import login
from flask.ext.admin.contrib import sqla
from flask.ext.admin import helpers, expose, AdminIndexView
from werkzeug.security import generate_password_hash, check_password_hash
# Create Flask application
app = Flask(__name__)
# Create secrey key so we can use sessions
app.config['SECRET_KEY'] = os.urandom(24).encode('hex')
# Create in-memory database
basedir = os.path.abspath(os.path.dirname(__file__))
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///bubbles.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_ECHO'] = True
db = SQLAlchemy(app)
# Flask-SQLAlchemy: Define a models
class BubblesUser(db.Model, UserMixin):
__tablename__ = 'bubbles_users'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), index=True, unique=True, nullable=False)
password = db.Column(db.String(50), nullable=False)
email = db.Column(db.Unicode(50), nullable=False)
description = db.Column(db.Text, nullable=False)
role = db.Column(db.Unicode(50), nullable=False)
experience_points = db.Column(db.Integer)
skills = db.Column(db.Text)
created_at = db.Column(db.Date)
bubbles = db.relationship('BubblesBubble', backref='bubbles_users', lazy='dynamic')
settings = db.relationship('BubblesUserSetting', backref='bubbles_users', uselist=False, lazy='select')
resources = db.relationship('BubblesResource', backref='bubbles_users', lazy='dynamic')
projects = db.relationship('BubblesProject', backref='bubbles_users', lazy='dynamic')
quests = db.relationship('BubblesQuest', backref='bubbles_users', lazy='dynamic')
def __repr__(self):
return '<User: ' + str(self.name) + ' - Id: ' + str(self.id) + '>'
# Flask-Login integration
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return self.id
# Required for administrative interface
def __unicode__(self):
return self.username
class BubblesBubble(db.Model, UserMixin):
__tablename__ = 'bubbles_bubbles'
id = db.Column(db.Integer, primary_key=True)
data = db.Column(db.Text)
project_id = db.Column(db.Integer, db.ForeignKey('bubbles_projects.id'))
user_id = db.Column(db.Integer, db.ForeignKey('bubbles_users.id'))
type = db.Column(db.String, default="bubble")
order = db.Column(db.Integer, default=1)
setting = db.relationship('BubblesSetting', backref='bubbles_bubbles', uselist=False, lazy='select')
resources = db.relationship('BubblesResource', backref='bubbles_bubbles', uselist=False, lazy='select')
def __repr__(self):
return '<BubbleId: ' + str(self.id) + '>'
class BubblesMetaGlobal(db.Model, UserMixin):
__tablename__ = 'bubbles_meta_global'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(30), nullable=False)
content = db.Column(db.Text, nullable=False)
def __repr__(self):
return '<BubblesMetaGlobal %r>' % str(self.name)
class BubblesPage(db.Model):
__tablename__ = 'bubbles_pages'
id = db.Column(db.Integer, primary_key=True)
alias = db.Column(db.String, nullable=False)
title = db.Column(db.String, nullable=False)
meta_locals = db.relationship('BubblesMetaLocal', backref='bubbles_pages', lazy='dynamic')
def __repr__(self):
return '<BubblesPage %r>' % self.id
class BubblesMetaLocal(db.Model):
__tablename__ = 'bubbles_meta_local'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, nullable=False)
page = db.Column(db.Integer, db.ForeignKey('bubbles_pages.id'))
content = db.Column(db.String, nullable=False)
def __repr__(self):
return '<BubblesMetaLocal %r>' % str(self.name)
bubbles_project_resource = db.Table('bubbles_project_resource',
db.Column('project_id', db.Integer, db.ForeignKey('bubbles_projects.id')),
db.Column('resource_id', db.Integer, db.ForeignKey('bubbles_resources.id'))
)
class BubblesProject(db.Model):
__tablename__ = 'bubbles_projects'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, nullable=False)
description = db.Column(db.Text)
user_id = db.Column(db.Integer, db.ForeignKey('bubbles_users.id'))
bubbles = db.relationship('BubblesBubble', backref='bubbles_projects', lazy='dynamic')
resources = db.relationship('BubblesResource', secondary=bubbles_project_resource,
backref=db.backref('bubbles_projects', lazy='dynamic'))
def __repr__(self):
return '<BubblesProject %r>' % str(self.id)
class BubblesQuest(db.Model):
__tablename__ = 'bubbles_quests'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(45))
description = db.Column(db.Text)
author_id = db.Column(db.Integer, db.ForeignKey('bubbles_users.id'))
editor_id = db.Column(db.String(255), default="null")
state = db.Column(db.String(45), nullable=False)
resource = db.Column(db.String(255), default="null")
language = db.Column(db.String(45), default="null")
def __repr__(self):
return '<BubblesQuestId %r>' % str(self.id)
class BubblesResource(db.Model):
__tablename__ = 'bubbles_resources'
id = db.Column(db.Integer, primary_key=True)
type = db.Column(db.String(45), nullable=False)
data = db.Column(db.String(255), nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('bubbles_users.id'))
bubble = db.Column(db.Integer, db.ForeignKey('bubbles_bubbles.id'))
def __repr__(self):
return '<BubblesResourceId %r>' % str(self.id)
class BubblesSettingCms(db.Model):
__tablename__ = 'bubbles_settings_cms'
id = db.Column(db.Integer, primary_key=True)
property = db.Column(db.String(255))
value = db.Column(db.String(255), nullable=False)
activated = db.Column(db.Integer, nullable=False, default=1)
description = db.Column(db.String(255), nullable=False)
def __repr__(self):
return '<BubblesSettingCms %r>' % self.property
class BubblesSetting(db.Model):
__tablename__ = 'bubbles_settings'
id = db.Column(db.Integer, primary_key=True)
bubble_id = db.Column(db.Integer, db.ForeignKey('bubbles_bubbles.id'))
size_x = db.Column(db.Integer, nullable=False)
size_y = db.Column(db.Integer, nullable=False)
bubbles_image = db.Column(db.String(255))
def __repr__(self):
return '<BubblesSetting %r>' % self.id
class BubblesUserSetting(db.Model):
__tablename__ = 'bubbles_user_settings'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('bubbles_users.id'))
avatar_image = db.Column(db.String(128))
def __repr__(self):
return '<BubblesUserSetting %r>' % self.id
class BubbleSkin(db.Model):
__tablename__ = 'bubble_skins'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(30), nullable=False)
value = db.Column(db.Integer, nullable=False)
activated = db.Column(db.Integer, nullable=False)
description = db.Column(db.Text)
def __repr__(self):
return '<BubbleSkin %r>' % self.id
class BubbleMessage(db.Model):
__tablename__ = 'bubbles_messages'
id = db.Column(db.Integer, primary_key=True)
sender_id = db.Column(db.Integer, db.ForeignKey('bubbles_users.id'), primary_key=True)
receiver_id = db.Column(db.Integer, db.ForeignKey('bubbles_users.id'), primary_key=True)
sender = db.relationship('BubblesUser', backref='sender_id', foreign_keys='BubbleMessage.sender_id')
receiver = db.relationship('BubblesUser', backref='receiver_id', foreign_keys='BubbleMessage.receiver_id')
content = db.Column(db.Text, nullable=False)
created_at = db.Column(db.DateTime, default=datetime.now)
viewed_at = db.Column(db.DateTime)
def __repr__(self):
return '<BubbleMessage %r>' % self.id
# Define login and registration forms (for flask-login)
class LoginForm(form.Form):
login = fields.TextField(validators=[validators.required()])
password = fields.PasswordField(validators=[validators.required()])
def validate_login(self, field):
user = self.get_user()
if user is None:
raise validators.ValidationError('Invalid user')
# we're comparing the plaintext pw with the the hash from the db
if not check_password_hash(user.password, self.password.data):
# to compare plain text passwords use
# if user.password != self.password.data:
raise validators.ValidationError('Invalid password')
def get_user(self):
return db.session.query(BubblesUser).filter_by(password=self.password.data).first()
# Initialize flask-login
def init_login():
login_manager = login.LoginManager()
login_manager.init_app(app)
# Create user loader function
@login_manager.user_loader
def load_user(user_id):
return db.session.query(BubblesUser).get(user_id)
manager = APIManager(app, flask_sqlalchemy_db=db)
manager.create_api(BubblesUser, methods=['GET', 'POST', 'DELETE', 'UPDATE'])
manager.create_api(BubblesBubble, methods=['GET', 'POST', 'DELETE', 'UPDATE'])
manager.create_api(BubblesMetaGlobal, methods=['GET', 'POST', 'DELETE', 'UPDATE'])
manager.create_api(BubblesMetaLocal, methods=['GET', 'POST', 'DELETE', 'UPDATE'])
manager.create_api(BubblesPage, methods=['GET', 'POST', 'DELETE', 'UPDATE'])
manager.create_api(BubblesProject, methods=['GET', 'POST', 'DELETE', 'UPDATE'])
manager.create_api(BubblesQuest, methods=['GET', 'POST', 'DELETE', 'UPDATE'])
manager.create_api(BubblesResource, methods=['GET', 'POST', 'DELETE', 'UPDATE'])
manager.create_api(BubblesSetting, methods=['GET', 'POST', 'DELETE', 'UPDATE'])
manager.create_api(BubbleSkin, methods=['GET', 'POST', 'DELETE', 'UPDATE'])
manager.create_api(BubbleMessage, methods=['GET', 'POST', 'DELETE', 'UPDATE'])
manager.create_api(BubblesUserSetting, methods=['GET', 'POST', 'DELETE', 'UPDATE'])
manager.create_api(BubblesSettingCms, methods=['GET', 'POST', 'DELETE', 'UPDATE'])
# Initialize flask-login
init_login()
# Create customized model view class
class MyModelView(sqla.ModelView):
def is_accessible(self):
return login.current_user.is_authenticated and (
login.current_user.role == 'admin' or login.current_user.role == 'Admin')
# Create customized index view class that handles login & registration
class MyAdminIndexView(AdminIndexView):
@expose('/')
def index(self):
if not login.current_user.is_authenticated:
return redirect(url_for('.login_view'))
return super(MyAdminIndexView, self).index()
@expose('/login/', methods=('GET', 'POST'))
def login_view(self):
# handle user login
form = LoginForm(request.form)
if helpers.validate_form_on_submit(form):
user = form.get_user()
login.login_user(user)
if login.current_user.is_authenticated:
return redirect(url_for('.index'))
self._template_args['form'] = form
return super(MyAdminIndexView, self).index()
@expose('/logout/')
def logout_view(self):
login.logout_user()
return redirect(url_for('.index'))
admin = Admin(app, name='bubbles', template_mode='bootstrap3', index_view=MyAdminIndexView())
admin.add_view(MyModelView(BubblesUser, db.session))
admin.add_view(MyModelView(BubblesBubble, db.session))
admin.add_view(MyModelView(BubblesProject, db.session))
admin.add_view(MyModelView(BubblesQuest, db.session))
admin.add_view(MyModelView(BubblesResource, db.session))
admin.add_view(MyModelView(BubblesSetting, db.session))
admin.add_view(MyModelView(BubbleMessage, db.session))
admin.add_view(MyModelView(BubblesUserSetting, db.session))
admin.add_view(MyModelView(BubblesPage, db.session))
admin.add_view(MyModelView(BubblesMetaLocal, db.session))
admin.add_view(MyModelView(BubblesSettingCms, db.session))
admin.add_view(MyModelView(BubbleSkin, db.session))
admin.add_view(MyModelView(BubblesMetaGlobal, db.session))
@app.route("/")
def index():
return render_template('index.html')
db.drop_all()
db.create_all()
if __name__ == "__main__":
app.debug = True
app.run(debug=True)
|
python
|
from setuptools import setup, find_packages
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='ciscoaplookup',
version="0.10.0",
author="Steffen Schumacher",
author_email="[email protected]",
description="The Cisco Wireless LAN Compliance Lookup library",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/steffenschumacher/ciscoaplookup.git",
packages=find_packages(),
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
include_package_data=True,
install_requires=['requests', 'xlrd==1.2.0', 'beautifulsoup4', 'country_converter'],
setup_requires=[
'pytest-runner',
],
tests_require=[
'pytest',
],
)
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 28 11:07:41 2019
@author: Kevin
"""
import numpy as np
import pickle
from shapely.geometry import Point
class TileCreator(object):
def __init__(self, configuration, polygon):
self.output_path = configuration['tile_coords_path']
# Avg. earth radius in meters
self.radius = 6371000
# Square side length of tiles in meters
self.side = 240
# Bounding box coordinates for NRW, i.e. North, South, East, West
self.N = 52.7998
self.S = 50.0578
self.E = 9.74158
self.W = 5.59334
self.polygon = polygon
def defineTileCoords(self):
# dlat spans a distance of 'side' meters in north-south direction:
# 1 degree in latitude direction spans (2*np.pi*r)/360° meters
# Hence, 'side' meters need to be divided by this quantity to obtain
# the number of degrees which span 'side' meters in latitude (north-south) direction
dlat = (self.side*360) / (2*np.pi*self.radius)
Tile_coords = []
y = self.S
while y < self.N:
x = self.W
while x < self.E:
# Center point of current image tile
cp = Point(x,y)
# Download 4800x4800 pixel imagery if one of the bounding box corners is inside the NRW polygon
# Bounding box coordinates for a given image tile
minx = x - (((self.side * 360) / (2 * np.pi * self.radius * np.cos(np.deg2rad(y))))/2)
miny = y - dlat/2
maxx = x + (((self.side * 360) / (2 * np.pi * self.radius * np.cos(np.deg2rad(y))))/2)
maxy = y + dlat/2
# Bounding box corners for a given image tile
# Lower Left
LL = Point(minx,miny)
# Lower Right
LR = Point(maxx,miny)
# Upper Left
UL = Point(minx,maxy)
# Upper Right
UR = Point(maxx, maxy)
# If bounding box corners are within NRW polygon
if (self.polygon.intersects(LL) | self.polygon.intersects(LR) | self.polygon.intersects(UL) | self.polygon.intersects(UR)):
Tile_coords.append((minx, miny, maxx, maxy))
# Update longitude value
x = x + ((self.side * 360) / (2 * np.pi * self.radius * np.cos(np.deg2rad(y))))
# Update latitude value
y = y + dlat
with open(self.output_path,'wb') as f:
pickle.dump(Tile_coords, f)
|
python
|
from setuptools import setup
setup(
name='YAFN',
version='0.0.1',
author='txlyre',
author_email='[email protected]',
packages=['yafn', 'yafn-tracker'],
url='https://github.com/txlyre/yafn',
license='LICENSE',
description='Yet another p2p file network protocol.',
install_requires=[
'cbor2',
'pyzmq',
'pyvis',
'aiohttp',
'pycryptodome',
],
)
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 14 14:47:38 2021
@author: cxue2
"""
import torch
from xfdlfw import Result
from xfdlfw.metric import ConfusionMatrix, Accuracy, MeanSquaredError, MeanAbsoluteError, CrossEntropy
acc = Accuracy('acc')
ce_ = CrossEntropy('ce_')
mse = MeanSquaredError('mse')
mae = MeanAbsoluteError('mae')
# __init__
rsl = Result((acc, ce_, ce_))
print(rsl.summary())
# unregistered metric check
try:
_ = Result((ce_, acc))
_.summary((mse,))
except Exception as e:
print('Exception catched:', repr(e))
# test regression
met = [mse, mae]
rsl_0 = Result(met)
o = torch.randn((7, 3))
t = torch.randn((7, 3))
rsl_0.push(o, t)
o = torch.randn((7, 3))
t = torch.randn((7, 3))
rsl_0.push(o, t)
print(rsl_0.summary(met))
rsl_1 = Result(met)
o = torch.randn((7, 3))
t = torch.randn((7, 3))
rsl_1.push(o, t)
o = torch.randn((7, 3))
t = torch.randn((7, 3))
rsl_1.push(o, t)
print(rsl_1.summary())
print('is rsl_0 better than rsl_0?', rsl_0.is_better_than(rsl_0, met))
print('is rsl_0 better than rsl_1?', rsl_0.is_better_than(rsl_1, met))
# test classification
met = [ce_, acc]
rsl_0 = Result(met)
o = torch.randn((7, 3))
t = torch.randint(0, 3, (7,))
rsl_0.push(o, t)
o = torch.randn((7, 3))
t = torch.randint(0, 3, (7,))
rsl_0.push(o, t)
print(rsl_0.summary())
rsl_1 = Result(met)
o = torch.randn((7, 3))
t = torch.randint(0, 3, (7,))
rsl_1.push(o, t)
o = torch.randn((7, 3))
t = torch.randint(0, 3, (7,))
rsl_1.push(o, t)
print(rsl_1.summary())
print('is rsl_0 better than rsl_1?', rsl_0.is_better_than(rsl_1, met))
|
python
|
"""BleBox sensor entities."""
# pylint: disable=fixme
from homeassistant.const import DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.entity import Entity
from . import CommonEntity, async_add_blebox
async def async_setup_platform(hass, config, async_add, discovery_info=None):
"""Set up BleBox platform."""
return await async_add_blebox(
BleBoxSensorEntity, "sensors", hass, config, async_add, PlatformNotReady
)
async def async_setup_entry(hass, config_entry, async_add):
"""Set up a BleBox entry."""
return await async_add_blebox(
BleBoxSensorEntity,
"sensors",
hass,
config_entry.data,
async_add,
PlatformNotReady,
)
# TODO: create and use constants from blebox_uniapi?
UNIT_MAP = {"celsius": TEMP_CELSIUS}
DEV_CLASS_MAP = {"temperature": DEVICE_CLASS_TEMPERATURE}
class BleBoxSensorEntity(CommonEntity, Entity):
"""Representation of a BleBox sensor feature."""
@property
def state(self):
"""Return the state."""
return self._feature.current
@property
def unit_of_measurement(self):
"""Return the unit."""
return UNIT_MAP[self._feature.unit]
@property
def device_class(self):
"""Return the device class."""
return DEV_CLASS_MAP[self._feature.device_class]
|
python
|
from CHECLabPy.spectrum_fitters.gentile import sipm_gentile_spe, \
calculate_spectrum, SiPMGentileFitter, SpectrumParameter
import numpy as np
from numpy.testing import assert_allclose
from numba import typed
def test_sipm_gentile_spe():
x = np.linspace(-1, 20, 1000, dtype=np.float32)
y = sipm_gentile_spe(x, 0., 0.2, 1., 0.1, 0.2, 1.)
np.testing.assert_allclose(np.trapz(y, x), 1, rtol=1e-3)
def test_calculate_spectrum():
x = np.linspace(-1, 20, 1000, dtype=np.float32)
parameter_values = [0., 0.2, 1., 0.1, 0.2, 1.]
lookup = typed.Dict()
lookup['eped'] = 0
lookup['eped_sigma'] = 1
lookup['spe'] = 2
lookup['spe_sigma'] = 3
lookup['opct'] = 4
lookup['lambda_'] = 5
y = calculate_spectrum(x, lookup, *parameter_values)
np.testing.assert_allclose(np.trapz(y, x), 1, rtol=1e-3)
def test_sipm_gentile_fitter():
# Define SPE
params = dict(
eped=-0.5,
eped_sigma=0.2,
spe=2,
spe_sigma=0.15,
opct=0.3,
)
lambda_values = [0.5, 0.7, 0.9]
# Get charges
random = np.random.RandomState(1)
pdf_x = np.linspace(-10, 50, 10000, dtype=np.float32)
pdf_y = []
charges = []
for lambda_ in lambda_values:
pdf = sipm_gentile_spe(pdf_x, lambda_=lambda_, **params)
pdf /= pdf.sum()
charge = random.choice(pdf_x, 30000, p=pdf)
pdf_y.append(pdf)
charges.append(charge)
# Create Fitter class
n_illuminations = len(lambda_values)
fitter = SiPMGentileFitter(n_illuminations=n_illuminations)
# Update Fit Parameters
spectrum_parameter_list = [
SpectrumParameter("eped", 0, (-10, 10)),
SpectrumParameter("eped_sigma", 0.5, (0.01, 1)),
SpectrumParameter("spe", 1, (0.1, 5)),
SpectrumParameter("spe_sigma", 0.5, (0.01, 1)),
SpectrumParameter("opct", 0.4, (0.01, 0.8)),
SpectrumParameter("lambda_", 0.7, (0.001, 3), multi=True),
]
fitter.parameters.update(spectrum_parameter_list)
fitter.range = (-10, 50)
fitter.n_bins = 1000
fitter.apply(*charges)
parameter_values = fitter.fit_result_values
parameter_errors = fitter.fit_result_errors
rtol = 1e-2
assert_allclose(parameter_values["eped"], params["eped"], rtol=rtol)
assert_allclose(parameter_values["eped_sigma"], params["eped_sigma"], rtol=rtol)
assert_allclose(parameter_values["spe"], params["spe"], rtol=rtol)
assert_allclose(parameter_values["spe_sigma"], params["spe_sigma"], rtol=rtol)
assert_allclose(parameter_values["opct"], params["opct"], rtol=rtol)
assert_allclose(parameter_values["lambda_0"], lambda_values[0], rtol=rtol)
assert_allclose(parameter_values["lambda_1"], lambda_values[1], rtol=rtol)
assert_allclose(parameter_values["lambda_2"], lambda_values[2], rtol=rtol)
assert parameter_errors["eped"] < 0.01
assert parameter_errors["eped_sigma"] < 0.01
assert parameter_errors["spe"] < 0.01
assert parameter_errors["spe_sigma"] < 0.01
assert parameter_errors["opct"] < 0.01
assert parameter_errors["lambda_0"] < 0.01
assert parameter_errors["lambda_1"] < 0.01
assert parameter_errors["lambda_2"] < 0.01
|
python
|
# the url address of the REST API server
CDS_LB='https://rest-endpoint.example.com'
# location of client certificate and key
CDS_CERT='../certs/cds_cert.pem'
CDS_KEY='../certs/cds_key.pem'
# the endpoint url of REST server, multiple version can and will be available
CDS_API='/v2.0/DetectionRequests'
CDS_URL=CDS_LB+CDS_API
USER_AGENT='symc-dlp-cloud-connector'
|
python
|
import dash
from dash.dependencies import Input, Output
import dash_core_components as dcc
import dash_html_components as html
import dash_table_experiments as dt
import json
import pandas as pd
import numpy as np
import plotly
app = dash.Dash()
app.scripts.config.serve_locally=True
DF_WALMART = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/1962_2006_walmart_store_openings.csv')
DF_GAPMINDER = pd.read_csv(
'https://raw.githubusercontent.com/plotly/datasets/master/gapminderDataFiveYear.csv'
)
DF_GAPMINDER = DF_GAPMINDER[DF_GAPMINDER['year'] == 2007]
DF_SIMPLE = pd.DataFrame({
'x': ['A', 'B', 'C', 'D', 'E', 'F'],
'y': [4, 3, 1, 2, 3, 6],
'z': ['a', 'b', 'c', 'a', 'b', 'c']
})
app.layout = html.Div([
html.H4('Gapminder DataTable'),
dt.DataTable(
rows=DF_GAPMINDER.to_dict('records'),
filterable=False,
sortable=True,
id='datatable-gapminder'
),
dcc.Graph(
id='graph-gapminder'
),
html.H4('Simple DataTable'),
dt.DataTable(
rows=DF_SIMPLE.to_dict('records'),
filterable=False,
sortable=True,
id='datatable'
),
dcc.Graph(
id='graph'
),
], className="container")
@app.callback(
Output('graph', 'figure'),
[Input('datatable', 'rows')])
def update_figure(rows):
dff = pd.DataFrame(rows)
return {
'data': [{
'x': dff['x'],
'y': dff['y'],
'text': dff['z'],
'type': 'bar'
}]
}
@app.callback(
Output('graph-gapminder', 'figure'),
[Input('datatable-gapminder', 'rows')])
def update_figure(rows):
dff = pd.DataFrame(rows)
fig = plotly.tools.make_subplots(
rows=3, cols=1,
subplot_titles=('Life Expectancy', 'GDP Per Capita', 'Population',),
shared_xaxes=True)
marker = {'color': '#0074D9'}
fig.append_trace({
'x': dff['country'],
'y': dff['lifeExp'],
'type': 'bar',
'marker': marker
}, 1, 1)
fig.append_trace({
'x': dff['country'],
'y': dff['gdpPercap'],
'type': 'bar',
'marker': marker
}, 2, 1)
fig.append_trace({
'x': dff['country'],
'y': dff['pop'],
'type': 'bar',
'marker': marker
}, 3, 1)
fig['layout']['showlegend'] = False
fig['layout']['height'] = 800
fig['layout']['margin'] = {
'l': 20,
'r': 20,
't': 60,
'b': 200
}
return fig
app.css.append_css({"external_url": "https://codepen.io/chriddyp/pen/bWLwgP.css"})
if __name__ == '__main__':
app.run_server(debug=True)
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.