content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
from SpellCorrect import input_processing
from Organize_Names import Corpus_Combo, Combined_words,Word_Classfication,import_file,Remov_Num,Remove_nan,Remove_strNum,Clean_Word,Valuable_word
from Import_corpus import input_file
from Get_Synonyms import Get_SynonymFromlst
from Standard_corpus import standarlized_output,descript_corpus
from BuiltinAll_corpus import exportCombined,exportDict,inputDict
from Classify_word import combined_dict
from BuiltinAll_corpus import inputDict
def SpellCorrection(translate,input_function,words_list):
input_fun=input_processing(translate,input_function,words_list) #all_corpus--> word_list
return (input_fun)
def Archi_Sector(INPUT_sector,translate):
sectors = inputDict(r'C:\Users\DELL\Desktop\RoomCorpus\sectors.txt')
Sector = input_processing(translate,INPUT_sector,sectors)
return(Sector)
def Synonym_Function(correct_function,aimlist,tolerance):
Function = Get_SynonymFromlst(aimlist,correct_function,tolerance)
return(Function)
def Custom_corpus(FilePath,sheetName):
Corpus = Corpus_Combo(FilePath,sheetName)
return(Corpus)
def Buildin_corpus():
Corpus = inputDict(r'C:/Users/DELL/Desktop/RoomCorpus/Corpus_Combo.txt')
all_corpus = exportCombined(r'C:\Users\DELL\Desktop\RoomCorpus','All_corpus.txt',ex_Corpus)
return(Corpus,all_corpus)
def Custom_or_Build(build,Custom_path,Buildin_corpus,BuildinWords):
if build=='yes':
Custom_corpus = Corpus_Combo(Custom_path,'Sheet1')
all_corpus = Combined_words(Custom_corpus)
noun_lst = Custom_corpus['noun_n']
return(Custom_corpus,all_corpus,noun_lst)
elif build=='no':
buildin_corpus = inputDict(Buildin_corpus)
all_corpus = inputDict(BuildinWords)
noun_lst = buildin_corpus['noun_n']
return(buildin_corpus,all_corpus,noun_lst)
else:
return('please enter valid content: yes or no')
def standard_Dict(build,CustomNoun_path,Roomname_path,tolerance):
if build=='yes':
combined = combined_dict(CustomNoun_path,Roomname_path,'Sheet1',tolerance)
return(combined)
elif build=='no':
combined = inputDict(r'C:\Users\DELL\Desktop\wuzzynaming\RoomName_standard\Combined_dict.txt')
return(combined)
else:
return('please enter valid content: yes or no')
def Abbreviation(FilePath,sheetName):
NameFiles=import_file(FilePath,sheetName)
Name_nonum=Remove_strNum(NameFiles)
Abbreviation = Clean_Word(Name_nonum)[1]
return(Abbreviation)
def Value_word(FilePath,sheetName,Rank):
NameFiles=import_file(FilePath,sheetName)
Name_nonum=Remove_strNum(NameFiles)
Cleaned_name = Clean_Word(Name_nonum)[0]
valuable_words = Valuable_word(Cleaned_name,Rank)
return(Abbreviation)
def Word_Class(valuable_words):
nouns= Word_Classfication(valuable_words)[0]
adj= Word_Classfication(valuable_words)[1]
verbs= Word_Classfication(valuable_words)[2]
adv= Word_Classfication(valuable_words)[3]
return(nouns,adj,verbs,adv)
def standard(Function,Description,Sector,Ownership,combined_dict):
standards = standarlized_output(Function,Description,Sector,Ownership,combined_dict)
Function,Description,Sector,Ownership,Property = standards[0],standards[1],standards[2],standards[3],standards[4]
return(Function,Description,Sector,Ownership,Property)
|
python
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : environ.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 01/19/2018
#
# This file is part of Jacinle.
# Distributed under terms of the MIT license.
import contextlib
from copy import deepcopy
from jacinle.utils.meta import dict_deep_keys, dict_deep_update
__all__ = ['env', 'load_env', 'has_env', 'get_env', 'set_env', 'with_env']
class Environ(object):
__env_ext__ = '.env.pkl'
def __init__(self, envs=None):
self.envs = dict()
if envs is not None:
self.load(envs)
def load(self, env_spec, incremental=False):
new_envs = self.__get_envs_from_spec(env_spec)
if incremental:
dict_deep_update(self.envs, new_envs)
else:
self.envs = deepcopy(new_envs)
return self
def update(self, env_spec):
return self.load(env_spec, incremental=True)
def dump(self, path, prefix=None):
raise NotImplementedError('Not supported yet: "Environ.dump".')
def as_dict(self):
return deepcopy(self.envs)
def as_dict_ref(self):
return self.envs
def clone(self):
new_env = Environ()
new_env.envs = deepcopy(self.envs)
return new_env
def keys(self, is_flattened=True):
if is_flattened:
return dict_deep_keys(self.envs)
return list(self.envs.keys())
def has(self, key):
"""
Check whether a key is in current env object.
:param key: the key.
:return: True if the provided key is in current env object.
"""
return self.get(key, None) is not None
def get(self, key, default=None):
"""
Get a value of a environment provided a key. You can provide a default value, but this value will not affect
the env object.
:param key: the key, note that dict of dict can (should) be imploded by ``.''.
:param default: if the given key is not found in current env object, the default value will be returned.
:return: the value if the env contains the given key, otherwise the default value provided.
"""
subkeys = key.split('.')
current = self.envs
for subkey in subkeys[0:-1]:
if subkey not in current:
current[subkey] = dict()
current = current[subkey]
if subkeys[-1] in current:
return current[subkeys[-1]]
elif default is None:
return default
else:
current[subkeys[-1]] = default
return default
def set(self, key, value=None, do_inc=False, do_replace=True, inc_default=0):
"""
Set an environment value by key-value pair.
:param key: the key, note that dict of dict can (should) be imploded by ``.''.
:param value: the value.
:param do_inc: if True, will perform += instead of =
:param do_replace: if True, will set the value regardless of its original value
:param inc_default: the default value for the do_inc operation
:return: self
"""
subkeys = key.split('.')
current = self.envs
for subkey in subkeys[0:-1]:
if subkey not in current:
current[subkey] = dict()
current = current[subkey]
if do_inc:
if subkeys[-1] not in current:
current[subkeys[-1]] = inc_default
current[subkeys[-1]] += value
elif do_replace or subkeys[-1] not in current:
current[subkeys[-1]] = value
return self
def set_default(self, key, default=None):
"""
Set an environment value by key-value pair. If the key already exists, it will not be overwritten.
:param key: the key, note that dict of dict can (should) be imploded by ``.''.
:param default: the ``default'' value.
:return: self
"""
self.set(key, default, do_replace=False)
def inc(self, key, inc=1, default=0):
"""
Increase the environment value provided a key.
:param key: the key, note that dict of dict can (should) be imploded by ``.''.
:param inc: the number to be increased,
:param default: the default value of the accumulator.
:return:
"""
self.set(key, inc, do_inc=True, inc_default=default)
return self
def __contains__(self, item):
return self.has(item)
def __getitem__(self, item):
return self.get(item, None)
def __setitem__(self, key, value):
self.set(key, value)
return value
def __get_envs_from_spec(self, env_spec):
if isinstance(env_spec, str) and env_spec.endswith(self.__env_ext__):
raise NotImplementedError('Not implemented loading method.')
elif isinstance(env_spec, dict):
return env_spec
elif isinstance(env_spec, object) and (hasattr(env_spec, 'envs') or hasattr(env_spec, '__envs__')):
return getattr(env_spec, 'envs', None) or getattr(env_spec, '__envs__')
else:
raise TypeError('unsupported env spec: {}.'.format(env_spec))
env = Environ()
load_env = env.load
update_env = env.update
has_env = env.has
get_env = env.get
set_env = env.set
@contextlib.contextmanager
def with_env(env_spec, incremental=True):
if not incremental:
backup = env.as_dict_ref()
else:
backup = env.as_dict()
env.load(env_spec, incremental=incremental)
yield
env.envs = backup
|
python
|
import uuid
import json
from django.core.cache import cache
from django.core.mail import send_mail
from django.shortcuts import reverse
from django.conf import settings
def validate_email(user, email):
title = '验证你的电子邮箱 | conus 通知推送'
token = uuid.uuid4().hex
body = (
'请打开下方连接以验证你的电子邮箱:\n'
f'{settings.EMAIL_SITE_URL}{reverse("user:validate_email")}?token={token}'
)
send_mail(title, body, None, [email])
cache.set(f'user:{user.pk}:validate_email', json.dumps({token: email}))
|
python
|
import unittest
from braintreehttp.testutils import TestHarness
from braintreehttp.serializers import FormPart
class FormPartTest(unittest.TestCase):
def test_init_lowercase_headers(self):
form_part = FormPart({ "key": "value" }, { "content-type": "application/json" })
self.assertTrue("Content-Type" in form_part.headers)
self.assertEqual(len(form_part.headers), 1)
def test_init_headers_collision(self):
form_part = FormPart({ "key": "value" }, { "content-type": "application/json", "CONTENT-TYPE": "application/pdf"})
self.assertTrue("Content-Type" in form_part.headers)
self.assertEqual(len(form_part.headers), 1)
def test_init_single_character_header(self):
form_part = FormPart({ "key": "value" }, { "x": "application/json" })
self.assertTrue("X" in form_part.headers)
self.assertEqual(len(form_part.headers), 1)
def test_init_multiple_headers(self):
form_part = FormPart({ "key": "value" }, { "x": "application/json", "Content-type": "application/pdf", "CONTENT-ENCODING": "gzip" })
self.assertTrue("X" in form_part.headers)
self.assertTrue("Content-Type" in form_part.headers)
self.assertTrue("Content-Encoding" in form_part.headers)
self.assertEqual(len(form_part.headers), 3)
|
python
|
#!/usr/bin/python
from pathlib import Path
import os
import shutil
from readConfig import ReadConfig
from datetime import datetime
class Utils:
"""
工具类
"""
@staticmethod
def save_file(file_name, row_list, mode):
"""
将 row_list 中的股票数据保存到 file_name 路径文件中
:param file_name: 路径文件名
:param row_list: 存放股票数据的列表
:param mode: 写入模式, w:覆盖写, a:追加
:return:
"""
path = Path(file_name)
with open(str(path), mode, encoding='UTF-8') as f: # a追加写入
for i in row_list:
row_result = ''
for j in i:
result = j.replace("%", '')
row_result += ('\t' + result)
f.write(row_result.lstrip() + '\n')
print(row_result.lstrip())
f.close()
@staticmethod
def save_date(file_name, data_list, mode):
"""
写入日期
:param path:
:return:
"""
path = Path(file_name)
# 保存数据并打印数据
with open(str(path), mode, encoding='UTF-8') as f:
for i in data_list:
f.write(i + '\n')
print(i)
f.close()
@staticmethod
def date2weekday(date):
"""
接收一个 yyyyMMdd的日期,返回对应的星期
:param date: yyyyMMdd
:return: weekday
"""
week = datetime.strptime(date,"%Y%m%d").weekday()
return {
0: "星期一",
1: "星期二",
2: "星期三",
3: "星期四",
4: "星期五",
5: "星期六",
6: "星期日"
}.get(week)
def print_title(title_list):
"""
打印标题
:return:
"""
for i in title_list:
print(i, end='\t')
print()
def get_stock_data_path():
"""
:return: 保存股票数据的根路径
"""
config = ReadConfig()
path = config.find_path("config.ini")
config.__read__(path)
stock_path = config.get_stock("path")
if not os.path.exists(stock_path):
os.makedirs(stock_path)
return stock_path
@staticmethod
def remkdir(directory):
"""
如果directory不存在则创建,如果存在删除该目录下所有内容
:param directory: 路径
:return: 创建成功返回true, 否则false
"""
try:
if os.path.exists(directory):
shutil.rmtree(directory)
os.makedirs(directory)
return True
except Exception:
return False
|
python
|
"""
Switch based on e.g. a MCP23017 IC, connected to a Raspberry Pi board.
It is expected that a server part is running, through a Redis in-memory database. Commands are
sent to the Redis database, and responses captured. The server would then process the actions
in background.
Author: find me on codeproject.com --> JurgenVanGorp
"""
import smtplib
import time
import sys
import redis
from datetime import datetime
import logging
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchEntity
from homeassistant.const import CONF_NAME
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
###
### USER EDITABLE CONSTANTS #####################################################################
###
# Email parameters are ONLY needed in case logging is configured in the switch, through the
# CONF_VERBOSE_LEVEL parameter. I.e. this can be done on a per-device level. If no logging is
# set, the following parameters can be left empty. If logging is configured for debugging
# purposes (e.g. verbose_level = 3), emails will be sent on critical actions, allowing proper
# debugging. Do mind that the emails will significantly slow down the operation, so handle with care.
CONST_EMAIL_SENDER = "[email protected]"
CONST_EMAIL_RECEIVER = ""
CONST_EMAIL_SMTPSERVER = "uit.telenet.be"
# Communications between Clients and the server happen through a Redis in-memory database
# so to limit the number of writes on the (SSD or microSD) storage. For larger implementations
# dozens to hundreds of requests can happen per second. Writing to disk would slow down the
# process, and may damage the storage.
# Make sure to have Redis installed in the proper locations, e.g. also in the virtual python
# environments. The default is that Redis is installed on localhost (127.0.0.1).
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
# The COMMAND_TIMEOUT value is the maximum time (in seconds) that is allowed between pushing a
# button and the action that must follow. This is done to protect you from delayed actions
# whenever the I2C bus is heavily used, or the CPU is overloaded. If you e.g. push a button,
# and the I2C is too busy with other commands, the push-button command is ignored when
# COMMAND_TIMEOUT seconds have passed. Typically you would push the button again if nothing
# happens after one or two seconds. If both commands are stored, the light is switched on and
# immediately switched off again.
# Recommended minimum value one or two seconds
# COMMAND_TIMEOUT = 2
# Recommended maximum value is 10 seconds. Feel free to set higher values, but be prepared that
# you can can experience strange behaviour if there is a lot of latency on the bus.
COMMAND_TIMEOUT = 1
###
### PROGRAM INTERNAL CONSTANTS ####################################################################
###
DEFAULT_I2C_ADDRESS = 0x20
CONF_INPUT_I2C_ADDRESS = "input_i2c_address"
CONF_INPUT_PIN = "input_pin"
CONF_OUTPUT_I2C_ADDRESS = "output_i2c_address"
CONF_OUTPUT_PIN = "output_pin"
CONF_FRIENDLY_NAME = "friendly_name"
CONF_VERBOSE_LEVEL = "verbose_level"
CONF_RELAY_MODE = "relay_mode"
CONF_TIMER_DELAY = "timer_delay"
# Acceptable Commands for controlling the I2C bus
# These are the commands you need to use to control the DIR register of the MCP23017, or
# for setting and clearing pins.
IDENTIFY = "IDENTIFY" # Polls an MCP23017 board on the I2C bus (True/False)
GETDIRBIT = "GETDBIT" # Read the specific IO pin dir value (1 = input)
GETDIRREGISTER = "GETDIRREG" # Read the full DIR register (low:1 or high:2)
SETDIRBIT = "SETDBIT" # Set DIR pin to INPUT (1)
CLEARDIRBIT = "CLRDBIT" # Clear DIR pin command to OUTPUT (0)
GETIOPIN = "GETPIN" # Read the specific IO pin value
GETIOREGISTER = "GETIOREG" # Read the full IO register (low:1 or high:2)
SETDATAPIN = "SETPIN" # Set pin to High
CLEARDATAPIN = "CLRPIN" # Set pin to low
TOGGLEPIN = "TOGGLE" # Toggle a pin to the "other" value for TOGGLEDELAY time
# The dummy command is sent during initialization of the database and verification if
# the database can be written to. Dummy commands are not processed.
DUMMY_COMMAND = 'dummycommand'
### END OF CONSTANTS SECTION #########################################################
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_OUTPUT_I2C_ADDRESS): vol.All(int, vol.Range(min=0x03, max=0x77)),
vol.Required(CONF_OUTPUT_PIN): vol.All(int, vol.Range(min=0, max=15)),
vol.Optional(CONF_INPUT_I2C_ADDRESS, default=0xFF): vol.All(int, vol.Range(min=0x03, max=0xFF)),
vol.Optional(CONF_INPUT_PIN, default=15): vol.All(int, vol.Range(min=0, max=15)),
vol.Optional(CONF_NAME, default="MCP23017"): cv.string,
vol.Optional(CONF_FRIENDLY_NAME, default="MCP23017"): cv.string,
vol.Optional(CONF_VERBOSE_LEVEL, default=0): vol.All(int, vol.Range(min=0, max=3)),
vol.Optional(CONF_TIMER_DELAY, default=2.0): vol.All(float, vol.Range(min=1.0, max=604800.0)),
vol.Optional(CONF_RELAY_MODE, default="A"): vol.In(["A", "B", "C", "D", "E", "F"])
}
)
def setup_platform(hass, config, add_devices, discovery_info=None):
# Collect parameters from configuration.yaml
name = config.get(CONF_NAME)
friendlyname = config.get(CONF_FRIENDLY_NAME)
input_i2c_address = config.get(CONF_INPUT_I2C_ADDRESS)
input_pin_num = config.get(CONF_INPUT_PIN)
output_i2c_address = config.get(CONF_OUTPUT_I2C_ADDRESS)
output_pin_num = config.get(CONF_OUTPUT_PIN)
verbosity = config.get(CONF_VERBOSE_LEVEL)
timer_delay = config.get(CONF_TIMER_DELAY)
relay_mode = config.get(CONF_RELAY_MODE)
# Present device to hassio
add_devices([MCP23017_Relay(input_i2c_address, input_pin_num, output_i2c_address, \
output_pin_num, verbosity, timer_delay, relay_mode, friendlyname, name)])
class mcp23017client():
"""
A class for starting an in-memory Redis database communication with the mcp23017server service.
"""
def __init__(self):
# Commands have id datetime.now().strftime("%d-%b-%Y %H:%M:%S.%f")}, i.e. the primary key is a timestamp.
# Commands given at exactly the same time, will overwrite each other, but this is not expected to happen.
# The commands table is then formatted as (all fields are TEXT, even if formatted as "0xff" !!)
# id, command TEXT, boardnr TEXT DEFAULT '0x00', pinnr TEXT DEFAULT '0x00', datavalue TEXT DEFAULT '0x00'
self._commands = None
# Responses have id datetime.now().strftime("%d-%b-%Y %H:%M:%S.%f")}, i.e. the primary key is a timestamp.
# The Responses table is then formatted as (all fields are TEXT, even if formatted as "0xff" !!)
# id, command_id TEXT, datavalue TEXT, response TEXT
self._responses = None
def OpenAndVerifyDatabase(self):
"""
Opens an existing database, or creates a new one if not yet existing. Then
verifies if the Redis database is accessible.
"""
# First try to open the database itself.
try:
# Open the shared memory databases.
# Redis database [0] is for commands that are sent from the clients to the server.
nowTrying = "Commands"
self._commands = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=0)
# Redis database [1] is for responses from the server so the clients.
nowTrying = "Responses"
self._responses = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=1)
except OSError as err:
# Capturing OS error.
return "FATAL OS ERROR. Could not open [{}] database. This program is now exiting with error [{}].".format(nowTrying, err)
except:
# Capturing all other errors.
return "FATAL UNEXPECTED ERROR. Could not open [{}] database. This program is now exiting with error [{}].".format(nowTrying, sys.exc_info()[0])
# Do a dummy write to the Commands database, as verification that the database is fully up and running.
try:
# Remember: fields are
# id, command TEXT, boardnr TEXT DEFAULT '0x00', pinnr TEXT DEFAULT '0x00', datavalue TEXT DEFAULT '0x00'
id = (datetime.now() - datetime.utcfromtimestamp(0)).total_seconds()
datamap = {'command':DUMMY_COMMAND, 'boardnr':0x00, 'pinnr':0xff, 'datavalue':0x00}
# Write the info to the Redis database
self._commands.hset(id, None, None, datamap)
# Set expiration to 1 second, after which Redis will automatically delete the record
self._commands.expire(id, 1)
except:
# Capturing all errors.
return "FATAL UNEXPECTED ERROR. Could not read and/or write the [Commands] database. This program is now exiting with error [{}].".format(sys.exc_info()[0])
# Next, do a dummy write to the Responses database, as verification that the database is fully up and running.
try:
# Remember: fields are
# id, command_id TEXT, datavalue TEXT, response TEXT
id = (datetime.now() - datetime.utcfromtimestamp(0)).total_seconds()
datamap = {'datavalue':0x00, 'response':'OK'}
# Write the info to the Redis database
self._responses.hset(id, None, None, datamap)
# Set expiration to 1 second, after which Redis will automatically delete the record
self._responses.expire(id, 1)
except:
# Capturing all errors.
return "FATAL UNEXPECTED ERROR. Could not read and/or write the [Responses] database. This program is now exiting with error [{}].".format(sys.exc_info()[0])
# We got here, so return zero error message.
return ""
def SendCommand(self, whichCommand, board_id, pin_id):
"""
Send a new command to the mcp23017server through a Redis database record.
The commands will get a time-out, to avoid that e.g. a button pushed now, is only processed hours later.
Response times are expected to be in the order of (fractions of) seconds.
"""
# Prepare new id based on timestamp. Since this is up to the milliseconds, the ID is expected to be unique
id = (datetime.now() - datetime.utcfromtimestamp(0)).total_seconds()
# Create data map
mapping = {'command':whichCommand, 'boardnr':board_id, 'pinnr':pin_id}
# Expiration in the Redis database can be set already. Use the software expiration with some grace period.
# Expiration must be an rounded integer, or Redis will complain.
expiration = round(COMMAND_TIMEOUT + 1)
# Now send the command to the Redis in-memory database
self._commands.hset(id, None, None, mapping)
# Command must self-delete within the expiration period. Redis can take care.
self._commands.expire(id, expiration)
# The timestamp is also the id of the command (needed for listening to the response)
return id
def WaitForReturn(self, command_id):
"""
Wait for a response to come back from the mcp23017server, once the command has been processed on the
I2C bus. If the waiting is too long (> COMMAND_TIMEOUT), cancel the operation and return an error.
"""
answer = None
# If no timely answer, then cancel anyway. So, keep track of when we started.
checking_time = datetime.now()
while answer == None:
# request the data from the Redis database, based on the Command ID.
datafetch = self._responses.hgetall(command_id)
# Verify if a response is available.
if len(datafetch) > 0:
# Do data verification, to cover for crippled data entries without crashing the software.
try:
datavalue = datafetch[b'datavalue'].decode('ascii')
except:
datavalue = 0x00
try:
response = datafetch[b'response'].decode('ascii')
except:
response = "Error Parsing mcp23017server data."
answer = (datavalue, response)
if (datetime.now() - checking_time).total_seconds() > COMMAND_TIMEOUT:
answer = (0x00, "Time-out error trying to get result from server for Command ID {}".format(command_id))
return answer
def ProcessCommand(self, whichCommand, board_id, pin_id):
"""
The ProcessCommand function is a combination of sending the Command to the mcp23017server host, and
waiting for the respone back.
"""
retval = -1
# First send the command to the server
command_id = self.SendCommand(whichCommand, board_id, pin_id)
# Then wait for the response back
response = self.WaitForReturn(command_id)
# A good command will result in an "OK" to come back from the server.
if response[1].strip().upper() == 'OK':
# OK Received, now process the data value that was sent back.
retval = response[0]
if(isinstance(retval,str)):
if len(retval) == 0:
retval = 0x00
else:
try:
if 'x' in retval:
retval = int(retval, 16)
else:
retval = int(retval, 10)
except:
# wrong type of data received
retval = "Error when processing return value. Received value that I could not parse: [{}]".format(response[0])
else:
retval = "Error when processing pin '0x{:02X}' on board '0x{:02X}'. Error Received: {}".format(board_id, pin_id, response[1])
return retval
class MCP23017_Relay(SwitchEntity):
"""
Relay for MCP23017 GPIO
"""
def __init__(self, i2c_in, pin_in, i2c_out, pin_out, verbosity, \
timer_delay, relay_mode, friendlyname, name, invert_logic = False):
self._name = name
self._friendly_name = friendlyname
self._verbose = verbosity
self._invert_logic = invert_logic
self._state = False
self._relay_mode = relay_mode
self._timer_delay = timer_delay
self._datapipe = mcp23017client()
# input and output chips
self._i2c_in = i2c_in
self._pin_in = pin_in
self._i2c_out = i2c_out
self._pin_out = pin_out
# In case input is same as output, or if input is default 0xff, then chip is output only
if (i2c_in==0xff) or ((i2c_in==i2c_out) and (pin_in == pin_out)):
self._output_only = True
else:
self._output_only = False
# Initiate data pipe to the Redis database server, and set the proper DIR bits (input vs. output)
err_msg = self._datapipe.OpenAndVerifyDatabase()
if err_msg == "":
self.SetDirBits()
else:
if self._verbose > 0:
self._SendStatusMessage("ERROR initializing: [{}]. ".format(err_msg))
if self._verbose > 0:
self._SendStatusMessage("\n\nCompleted Initialization.")
@property
def name(self):
return self._friendly_name
@property
def is_on(self) -> bool:
# The input must always be read from the I2C bus. Reason is that states can also be changed by
# human interaction, i.e. not controlled by the Home Assistant software.
self._read_bus()
return self._state
def SetDirBits(self):
"""
Set the MCP23017 DIR bits, which determine whether a pin is an input (High) or an output (Low).
This software handles two possibilities:
* PASSIVE OUTPUT - the output is determined by Home Assistant. If Home Assistant tells the output
to be high, then it will stay high. This is for e.g. status lights.
* INPUT-OUTPUT - the output is toggled (or set) by one MCP23017 pin, and the status of the light
is read on another pin. This allows monitoring of the input (e.g. caused by human interaction)
and software changing the state through the output pin.
"""
# In case input is same as output, or if input is default 0xff, then chip is output only
if self._output_only:
msg_update = self._datapipe.ProcessCommand(CLEARDIRBIT, self._i2c_out, self._pin_out)
if self._verbose > 1:
self._SendStatusMessage("Info: Clearing DIR bit for OUTPUT ONLY: [{}] cleared on board [{}]. Update Message is: [{}]".format(self._pin_out, self._i2c_out, msg_update))
else:
msg_update = self._datapipe.ProcessCommand(CLEARDIRBIT, self._i2c_out, self._pin_out)
if self._verbose > 1:
self._SendStatusMessage("Info: Clearing DIR bit for OUTPUT: [{}] cleared on board [{}]. Update Message is: [{}]".format(self._pin_out, self._i2c_out, msg_update))
msg_update = self._datapipe.ProcessCommand(SETDIRBIT, self._i2c_in, self._pin_in)
if self._verbose > 1:
self._SendStatusMessage("Info: Setting DIR bit for INPUT: [{}] set on board [{}]. Update Message is: [{}]".format(self._pin_in, self._i2c_in, msg_update))
def turn_on(self):
"""
Switches output on in case the IC is output only.
In case of a toggle output: monitors input if it not switched on already, and toggles output if not.
"""
self.SetDirBits()
if self._output_only:
msg_update = self._datapipe.ProcessCommand(SETDATAPIN, self._i2c_out, self._pin_out)
if self._verbose > 1:
self._SendStatusMessage("Turned pin [{}] on board [{}] ON. Update Message is: [{}]".format(self._pin_out, self._i2c_out, msg_update))
else:
self._read_bus()
if self._state:
if self._verbose > 1:
self._SendStatusMessage("Wanted to turn pin [{}] on board [{}] ON through TOGGLING, but input was already on. Nothing to do.".format(self._pin_out, self._i2c_out))
else:
msg_update = self._datapipe.ProcessCommand(TOGGLEPIN, self._i2c_out, self._pin_out)
if self._verbose > 1:
self._SendStatusMessage("Turned pin [{}] on board [{}] ON through TOGGLING. Update Message is: [{}]".format(self._pin_out, self._i2c_out, msg_update))
# Re-read the bus state for the specific input
self._read_bus()
def turn_off(self):
"""
Switches output off in case the IC is output only.
In case of a toggle output: monitors input if it not switched off already, and toggles output if not.
"""
self.SetDirBits()
if self._output_only:
msg_update = self._datapipe.ProcessCommand(CLEARDATAPIN, self._i2c_out, self._pin_out)
if self._verbose > 1:
self._SendStatusMessage("Turned pin [{}] on board [{}] OFF. Update Message is: [{}]".format(self._pin_out, self._i2c_out, msg_update))
else:
self._read_bus()
if self._state:
msg_update = self._datapipe.ProcessCommand(TOGGLEPIN, self._i2c_out, self._pin_out)
if self._verbose > 1:
self._SendStatusMessage("Turned pin [{}] on board [{}] OFF through TOGGLING. Update Message is: [{}]".format(self._pin_out, self._i2c_out, msg_update))
else:
if self._verbose > 1:
self._SendStatusMessage("Wanted to turn pin [{}] on board [{}] OFF through TOGGLING, but input was already off. Nothing to do.".format(self._pin_out, self._i2c_out))
# Re-read the bus state for the specific input
self._read_bus()
def toggle(self):
"""
Toggles output. In case of an output only, the polarity is switched.
In case of an input/output configuration, the output is momentarily activated (toggle switch).
"""
self.SetDirBits()
if self._output_only:
self._read_bus()
if self._state:
msg_update = self._datapipe.ProcessCommand(CLEARDATAPIN, self._i2c_out, self._pin_out)
if self._verbose > 1:
self._SendStatusMessage("Toggle command for OUTPUT ONLY case: switched pin [{}] on board [{}] OFF.".format(self._pin_out, self._i2c_out))
else:
msg_update = self._datapipe.ProcessCommand(SETDATAPIN, self._i2c_out, self._pin_out)
if self._verbose > 1:
self._SendStatusMessage("Toggle command for OUTPUT ONLY case: switched pin [{}] on board [{}] ON.".format(self._pin_out, self._i2c_out))
else:
msg_update = self._datapipe.ProcessCommand(TOGGLEPIN, self._i2c_out, self._pin_out)
if self._verbose > 1:
self._SendStatusMessage("Toggle command received for output pin [{}] on board [{}]. Input should now have reversed.".format(self._pin_out, self._i2c_out))
# Re-read the bus state for the specific input
self._read_bus()
def _read_bus(self):
"""
Read input pin from the I2C bus in an input/output configuration, or read the
output pin in an output-only configuration.
"""
if self._output_only:
self._state = self._datapipe.ProcessCommand(GETIOPIN, self._i2c_out, self._pin_out)
else:
self._state = self._datapipe.ProcessCommand(GETIOPIN, self._i2c_in, self._pin_in)
def _SendStatusMessage(self, extraText = ""):
"""
Send an email with current status. Dependent of the verbosity level, more or less information is provided.
"""
dateTimeObj = datetime.now()
txtmsg = "Home Assistant Info on: " + dateTimeObj.strftime("%d-%b-%Y -- %H:%M:%S.%f")
if extraText != "":
txtmsg = txtmsg + "\n" + extraText + "\n"
if self._verbose == 1:
sendMyEmail(extraText + self._name + " is switched " + \
("[on]" if self._state else "[off]"))
if self._verbose > 2:
if self._state:
txtmsg = txtmsg + "{} is switched ON.\n\n".format(self._name)
else:
txtmsg = txtmsg + "{} is switched OFF.\n\n".format(self._name)
txtmsg = txtmsg + "Switch details: \n"
if self._output_only:
txtmsg = txtmsg + "This is an OUTPUT ONLY \n"
txtmsg = txtmsg + " * output pin: [{}] on board [{}] \n".format(self._pin_out, self._i2c_out)
else:
txtmsg = txtmsg + "This is an INPUT DRIVEN OUTPUT \n"
txtmsg = txtmsg + " * input pin: [{}] on board [{}] \n".format(self._pin_in, self._i2c_in)
txtmsg = txtmsg + " * output pin: [{}] on board [{}] \n".format(self._pin_out, self._i2c_out)
txtmsg = txtmsg + "Relay mode = [{}]\n".format(self._relay_mode)
sendMyEmail(txtmsg)
def sendMyEmail(txtMessage):
"""
Send an email to an smtp server.
"""
if CONST_EMAIL_RECEIVER != "":
mailsender = CONST_EMAIL_SENDER
mailrecipient = CONST_EMAIL_RECEIVER
smtpserver = CONST_EMAIL_SMTPSERVER
msg = "From: " + mailsender + "\r\nTo: " + mailrecipient + "\r\n\r\n"
msg = msg + txtMessage
server = smtplib.SMTP(smtpserver, 25)
server.set_debuglevel(1)
server.sendmail(mailsender, mailrecipient, msg)
server.close()
|
python
|
import logging
import os
from rich import print
from typing import Dict
from sfaira.commands.utils import get_ds
from sfaira.consts.utils import clean_doi
from sfaira.data import DatasetBase
log = logging.getLogger(__name__)
class H5adExport:
datasets: Dict[str, DatasetBase]
doi: str
doi_sfaira_repr: str
path_cache: str
path_data: str
path_loader: str
path_out: str
schema: str
def __init__(self, doi, path_cache, path_data, path_loader, path_out, schema):
self.doi = doi
self.doi_sfaira_repr = clean_doi(self.doi)
self.path_cache = path_cache
self.path_data = path_data
self.path_loader = path_loader
self.path_out = path_out
if schema not in ["cellxgene"]:
raise ValueError(f"Did not recognize schema {schema}")
self.schema = schema
def write(self):
self._load_objects()
self._write_h5ads()
def _load_objects(self):
dsg, _ = get_ds(doi_sfaira_repr=self.doi_sfaira_repr, path_cache=self.path_cache, path_data=self.path_data,
path_loader=self.path_loader)
dsg.load(load_raw=False, allow_caching=True)
if self.schema == "cellxgene":
dsg.streamline_features(schema="cellxgene:" + "2.0.0")
dsg.streamline_metadata(
schema=self.schema.lower(),
clean_obs=False,
clean_var=False,
clean_uns=True,
clean_obs_names=False,
keep_orginal_obs=False,
keep_symbol_obs=True,
keep_id_obs=True,
)
dsg.collapse_counts()
self.datasets = dsg.datasets
def _write_h5ads(self):
counter = 0
for k, v in self.datasets.items():
fn = v.doi_cleaned_id + ".h5ad"
dir_name = v.directory_formatted_doi
if not os.path.exists(os.path.join(self.path_out, dir_name)):
os.makedirs(os.path.join(self.path_out, dir_name))
fn_out = os.path.join(self.path_out, dir_name, fn)
print(f'[bold orange]Sfaira butler: "Preparing {fn_out} for you."')
v.adata.write_h5ad(fn_out)
counter += 1
print(f'[bold orange]Sfaira butler: "I wrote a total of {counter} .h5ad files."')
|
python
|
from typing import Optional, Sequence
from kubragen import KubraGen
from kubragen.builder import Builder
from kubragen.consts import PROVIDER_K3D, PROVIDER_K3S
from kubragen.data import ValueData
from kubragen.exception import InvalidParamError, InvalidNameError
from kubragen.helper import QuotedStr
from kubragen.kdatahelper import KDataHelper_Volume
from kubragen.object import ObjectItem, Object
from kubragen.types import TBuild, TBuildItem
from .option import EFKOptions
class EFKBuilder(Builder):
"""
EFK builder.
Based on `How To Set Up an Elasticsearch, Fluentd and Kibana (EFK) Logging Stack on Kubernetes <https://www.digitalocean.com/community/tutorials/how-to-set-up-an-elasticsearch-fluentd-and-kibana-efk-logging-stack-on-kubernetes>`_.
.. list-table::
:header-rows: 1
* - build
- description
* - BUILD_ACCESSCONTROL
- creates service account, roles, and roles bindings
* - BUILD_CONFIG
- creates configurations
* - BUILD_SERVICE
- creates StatefulSet and Services
.. list-table::
:header-rows: 1
* - build item
- description
* - BUILDITEM_SERVICE_ACCOUNT
- ServiceAccount
* - BUILDITEM_ELASTICSEARCH_SERVICE
- Elasticsearch Service
* - BUILDITEM_ELASTICSEARCH_STATEFULSET
- Elasticsearch StatefulSet
* - BUILDITEM_KIBANA_DEPLOYMENT
- Kibana Deployment
* - BUILDITEM_KIBANA_SERVICE
- Kibana Service
* - BUILDITEM_FLUENTD_CLUSTER_ROLE
- Fluentd Cluster Role
* - BUILDITEM_FLUENTD_CLUSTER_ROLE_BINDING
- Fluentd Cluster Role Binding
* - BUILDITEM_FLUENTD_DAEMONSET
- Fluentd Daemonset
.. list-table::
:header-rows: 1
* - object name
- description
- default value
* - service-account
- ServiceAccount
- ```<basename>```
* - elasticsearch-service
- Elasticsearch Service
- ```<basename>-elasticsearch```
* - elasticsearch-statefulset
- Elasticsearch StatefulSet
- ```<basename>-elasticsearch```
* - elasticsearch-pod-label-all
- Elasticsearch label *app* to be used by selection
- ```<basename>-elasticsearch```
* - kibana-service
- Kibana Service
- ```<basename>-kibana```
* - kibana-deployment
- Kibana Deployment
- ```<basename>-kibana```
* - kibana-pod-label-all
- Kibana label *app* to be used by selection
- ```<basename>-kibana```
* - fluentd-cluster-role
- Fluentd ClusterRole
- ```<basename>-fluentd```
* - fluentd-cluster-role-binding
- Fluentd ClusterRoleBinding
- ```<basename>-fluentd```
* - fluentd-deployment
- Fluentd Deployment
- ```<basename>-fluentd```
* - fluentd-pod-label-all
- Fluentd label *app* to be used by selection
- ```<basename>-fluentd```
"""
options: EFKOptions
configfile: Optional[str]
_namespace: str
SOURCE_NAME = 'kg_efk'
BUILD_ACCESSCONTROL = TBuild('accesscontrol')
BUILD_CONFIG = TBuild('config')
BUILD_SERVICE = TBuild('service')
BUILDITEM_SERVICE_ACCOUNT = TBuildItem('service-account')
BUILDITEM_ELASTICSEARCH_SERVICE = TBuildItem('elasticsearch-service')
BUILDITEM_ELASTICSEARCH_STATEFULSET = TBuildItem('elasticsearch-statefulset')
BUILDITEM_KIBANA_DEPLOYMENT = TBuildItem('kibana-deployment')
BUILDITEM_KIBANA_SERVICE = TBuildItem('kibana-service')
BUILDITEM_FLUENTD_CLUSTER_ROLE = TBuildItem('fluentd-cluster-role')
BUILDITEM_FLUENTD_CLUSTER_ROLE_BINDING = TBuildItem('fluentd-cluster-role-binding')
BUILDITEM_FLUENTD_DAEMONSET = TBuildItem('fluentd-daemonset')
def __init__(self, kubragen: KubraGen, options: Optional[EFKOptions] = None):
super().__init__(kubragen)
if options is None:
options = EFKOptions()
self.options = options
self.configfile = None
self._namespace = self.option_get('namespace')
if self.option_get('config.authorization.serviceaccount_create') is not False:
serviceaccount_name = self.basename()
else:
serviceaccount_name = self.option_get('config.authorization.serviceaccount_use')
if serviceaccount_name == '':
serviceaccount_name = None
if self.option_get('config.authorization.roles_bind') is not False:
if serviceaccount_name is None:
raise InvalidParamError('To bind roles a service account is required')
self.object_names_init({
'service-account': serviceaccount_name,
'elasticsearch-service': self.basename('-elasticsearch'),
'elasticsearch-statefulset': self.basename('-elasticsearch'),
'elasticsearch-pod-label-app': self.basename('-elasticsearch'),
'fluentd-cluster-role': self.basename('-fluentd'),
'fluentd-cluster-role-binding': self.basename('-fluentd'),
'fluentd-daemonset': self.basename('-fluentd'),
'fluentd-pod-label-app': self.basename('-fluentd'),
})
if self.option_get('enable.kibana'):
self.object_names_init({
'kibana-service': self.basename('-kibana'),
'kibana-deployment': self.basename('-kibana'),
'kibana-pod-label-app': self.basename('-kibana'),
})
def option_get(self, name: str):
return self.kubragen.option_root_get(self.options, name)
def basename(self, suffix: str = ''):
return '{}{}'.format(self.option_get('basename'), suffix)
def namespace(self):
return self._namespace
def build_names(self) -> Sequence[TBuild]:
return [self.BUILD_ACCESSCONTROL, self.BUILD_CONFIG, self.BUILD_SERVICE]
def build_names_required(self) -> Sequence[TBuild]:
ret = [self.BUILD_SERVICE]
if self.option_get('config.authorization.serviceaccount_create') is not False or \
self.option_get('config.authorization.roles_create') is not False:
ret.append(self.BUILD_ACCESSCONTROL)
return ret
def builditem_names(self) -> Sequence[TBuildItem]:
return [
self.BUILDITEM_SERVICE_ACCOUNT,
self.BUILDITEM_ELASTICSEARCH_SERVICE,
self.BUILDITEM_ELASTICSEARCH_STATEFULSET,
self.BUILDITEM_KIBANA_DEPLOYMENT,
self.BUILDITEM_KIBANA_SERVICE,
self.BUILDITEM_FLUENTD_CLUSTER_ROLE,
self.BUILDITEM_FLUENTD_CLUSTER_ROLE_BINDING,
self.BUILDITEM_FLUENTD_DAEMONSET,
]
def internal_build(self, buildname: TBuild) -> Sequence[ObjectItem]:
if buildname == self.BUILD_ACCESSCONTROL:
return self.internal_build_accesscontrol()
elif buildname == self.BUILD_CONFIG:
return self.internal_build_config()
elif buildname == self.BUILD_SERVICE:
return self.internal_build_service()
else:
raise InvalidNameError('Invalid build name: "{}"'.format(buildname))
def internal_build_accesscontrol(self) -> Sequence[ObjectItem]:
ret = []
if self.option_get('config.authorization.serviceaccount_create') is not False:
ret.extend([
Object({
'apiVersion': 'v1',
'kind': 'ServiceAccount',
'metadata': {
'name': self.object_name('service-account'),
'namespace': self.namespace(),
}
}, name=self.BUILDITEM_SERVICE_ACCOUNT, source=self.SOURCE_NAME, instance=self.basename()),
])
if self.option_get('config.authorization.roles_create') is not False:
ret.extend([
Object({
'apiVersion': 'rbac.authorization.k8s.io/v1',
'kind': 'ClusterRole',
'metadata': {
'name': self.object_name('fluentd-cluster-role'),
},
'rules': [{
'apiGroups': [''],
'resources': ['pods', 'namespaces'],
'verbs': ['get', 'list', 'watch']
}]
}, name=self.BUILDITEM_FLUENTD_CLUSTER_ROLE, source=self.SOURCE_NAME, instance=self.basename()),
])
if self.option_get('config.authorization.roles_bind') is not False:
ret.extend([
Object({
'kind': 'ClusterRoleBinding',
'apiVersion': 'rbac.authorization.k8s.io/v1',
'metadata': {
'name': self.object_name('fluentd-cluster-role-binding'),
},
'roleRef': {
'kind': 'ClusterRole',
'name': self.object_name('fluentd-cluster-role'),
'apiGroup': 'rbac.authorization.k8s.io'
},
'subjects': [{
'kind': 'ServiceAccount',
'name': self.object_name('service-account'),
'namespace': self.namespace(),
}]
}, name=self.BUILDITEM_FLUENTD_CLUSTER_ROLE_BINDING, source=self.SOURCE_NAME, instance=self.basename())
])
return ret
def internal_build_config(self) -> Sequence[ObjectItem]:
# Reserve for future use
return []
def internal_build_service(self) -> Sequence[ObjectItem]:
ret = []
ret.extend([
Object({
'kind': 'Service',
'apiVersion': 'v1',
'metadata': {
'name': self.object_name('elasticsearch-service'),
'namespace': self.namespace(),
'labels': {
'app': self.object_name('elasticsearch-pod-label-app'),
}
},
'spec': {
'selector': {
'app': self.object_name('elasticsearch-pod-label-app'),
},
'clusterIP': 'None',
'ports': [{
'port': 9200,
'name': 'rest'
}, {
'port': 9300,
'name': 'inter-node'
}],
}
}, name=self.BUILDITEM_ELASTICSEARCH_SERVICE, source=self.SOURCE_NAME, instance=self.basename()),
Object({
'apiVersion': 'apps/v1',
'kind': 'StatefulSet',
'metadata': {
'name': self.object_name('elasticsearch-statefulset'),
'namespace': self.namespace(),
},
'spec': {
'serviceName': self.object_name('elasticsearch-service'),
'replicas': self.option_get('config.elasticsearch.replicas'),
'selector': {
'matchLabels': {
'app': self.object_name('elasticsearch-pod-label-app'),
}
},
'template': {
'metadata': {
'labels': {
'app': self.object_name('elasticsearch-pod-label-app'),
}
},
'spec': {
'volumes': [
KDataHelper_Volume.info(base_value={
'name': 'data',
}, value=self.option_get('kubernetes.volumes.elasticsearch-data')),
],
'containers': [{
'name': 'elasticsearch',
'image': self.option_get('container.elasticsearch'),
'ports': [{
'containerPort': 9200,
'name': 'rest',
'protocol': 'TCP'
},
{
'containerPort': 9300,
'name': 'inter-node',
'protocol': 'TCP'
}],
'volumeMounts': [{
'name': 'data',
'mountPath': '/usr/share/elasticsearch/data'
}],
'env': [{
'name': 'cluster.name',
'value': self.object_name('elasticsearch-statefulset'),
},
{
'name': 'NODE_NAME',
'valueFrom': {
'fieldRef': {
'fieldPath': 'metadata.name'
}
},
},
{
'name': 'node.name',
'value': QuotedStr('$(NODE_NAME).{}'.format(self.object_name('elasticsearch-service'))),
},
{
'name': 'discovery.seed_hosts',
'value': ','.join(['{}-{}.{}'.format(
self.object_name('elasticsearch-statefulset'), rpl, self.object_name('elasticsearch-service'))
for rpl in range(self.option_get('config.elasticsearch.replicas'))
]),
},
{
'name': 'cluster.initial_master_nodes',
'value': ','.join(['{}-{}.{}'.format(
self.object_name('elasticsearch-statefulset'), rpl, self.object_name('elasticsearch-service'))
for rpl in range(self.option_get('config.elasticsearch.replicas'))
]),
},
{
'name': 'ES_JAVA_OPTS',
'value': '-Xms512m '
'-Xmx512m'
}],
'resources': ValueData(value=self.option_get('kubernetes.resources.elasticsearch-statefulset'),
disabled_if_none=True),
}],
'initContainers': [{
'name': 'fix-permissions',
'image': 'busybox',
'command': [
'sh',
'-c',
'chown -R '
'1000:1000 '
'/usr/share/elasticsearch/data'
],
'securityContext': {
'privileged': True
},
'volumeMounts': [{
'name': 'data',
'mountPath': '/usr/share/elasticsearch/data'
}],
},
{
'name': 'increase-vm-max-map',
'image': 'busybox',
'command': [
'sysctl',
'-w',
'vm.max_map_count=262144'
],
'securityContext': {
'privileged': True
},
},
{
'name': 'increase-fd-ulimit',
'image': 'busybox',
'command': [
'sh',
'-c',
'ulimit -n '
'65536'
],
'securityContext': {
'privileged': True
},
}]
}
},
}
}, name=self.BUILDITEM_ELASTICSEARCH_STATEFULSET, source=self.SOURCE_NAME, instance=self.basename()),
Object({
'apiVersion': 'apps/v1',
'kind': 'DaemonSet',
'metadata': {
'name': self.object_name('fluentd-daemonset'),
'namespace': self.namespace(),
'labels': {
'app': self.object_name('fluentd-pod-label-app'),
}
},
'spec': {
'selector': {
'matchLabels': {
'app': self.object_name('fluentd-pod-label-app'),
}
},
'template': {
'metadata': {
'labels': {
'app': self.object_name('fluentd-pod-label-app'),
}
},
'spec': {
'serviceAccount': self.object_name('service-account'),
'serviceAccountName': self.object_name('service-account'),
'tolerations': [{
'key': 'node-role.kubernetes.io/master',
'effect': 'NoSchedule'
}],
'containers': [{
'name': 'fluentd',
'image': self.option_get('container.fluentd'),
'env': [{
'name': 'FLUENT_ELASTICSEARCH_HOST',
'value': '{}.{}.svc.cluster.local'.format(self.object_name('elasticsearch-service'), self.namespace()),
},
{
'name': 'FLUENT_ELASTICSEARCH_PORT',
'value': '9200'
},
{
'name': 'FLUENT_ELASTICSEARCH_SCHEME',
'value': 'http'
},
{
'name': 'FLUENTD_SYSTEMD_CONF',
'value': 'disable'
},
ValueData(value={
'name': 'FLUENT_CONTAINER_TAIL_PARSER_TYPE',
'value': '/^(?<time>.+) (?<stream>stdout|stderr) [^ ]* (?<log>.*)$/',
}, enabled=self.kubragen.provider.provider == PROVIDER_K3D or self.kubragen.provider.provider == PROVIDER_K3S)],
'volumeMounts': [{
'name': 'varlog',
'mountPath': '/var/log'
},
{
'name': 'varlibdockercontainers',
'mountPath': '/var/lib/docker/containers',
'readOnly': True
}],
'resources': ValueData(value=self.option_get('kubernetes.resources.fluentd-daemonset'),
disabled_if_none=True),
}],
'terminationGracePeriodSeconds': 30,
'volumes': [{
'name': 'varlog',
'hostPath': {
'path': '/var/log'
}
},
{
'name': 'varlibdockercontainers',
'hostPath': {
'path': '/var/lib/docker/containers'
}
}]
}
}
}
}, name=self.BUILDITEM_FLUENTD_DAEMONSET, source=self.SOURCE_NAME, instance=self.basename()),
])
if self.option_get('enable.kibana'):
ret.extend([
Object({
'apiVersion': 'v1',
'kind': 'Service',
'metadata': {
'name': self.object_name('kibana-service'),
'namespace': self.namespace(),
'labels': {
'app': self.object_name('kibana-pod-label-app'),
},
},
'spec': {
'ports': [{
'port': self.option_get('config.kibana.service_port'),
'targetPort': 5601,
}],
'selector': {
'app': self.object_name('kibana-pod-label-app'),
}
}
}, name=self.BUILDITEM_KIBANA_SERVICE, source=self.SOURCE_NAME, instance=self.basename()),
Object({
'apiVersion': 'apps/v1',
'kind': 'Deployment',
'metadata': {
'name': self.object_name('kibana-deployment'),
'namespace': self.namespace(),
'labels': {
'app': self.object_name('kibana-pod-label-app'),
}
},
'spec': {
# 'replicas': 1,
'selector': {
'matchLabels': {
'app': self.object_name('kibana-pod-label-app'),
}
},
'template': {
'metadata': {
'labels': {
'app': self.object_name('kibana-pod-label-app'),
}
},
'spec': {
'containers': [{
'name': 'kibana',
'image': self.option_get('container.kibana'),
'env': [{
'name': 'ELASTICSEARCH_HOSTS',
'value': 'http://{}:9200'.format(self.object_name('elasticsearch-service')),
}],
'ports': [{
'containerPort': 5601
}],
'livenessProbe': ValueData(value={
'httpGet': {
'path': '/api/status',
'port': 5601,
},
'initialDelaySeconds': 30,
'timeoutSeconds': 20,
}, enabled=self.option_get('config.probes')),
'readinessProbe': ValueData(value={
'httpGet': {
'path': '/api/status',
'port': 5601,
},
'initialDelaySeconds': 30,
'timeoutSeconds': 20,
}, enabled=self.option_get('config.probes')),
'resources': ValueData(
value=self.option_get('kubernetes.resources.kibana-deployment'),
disabled_if_none=True),
}]
}
}
}
}, name=self.BUILDITEM_KIBANA_DEPLOYMENT, source=self.SOURCE_NAME, instance=self.basename()),
])
return ret
|
python
|
from pathlib import Path
import pandas as pd
from modules.settings.settings import SettingsManager
class PseudoIDManager:
""" Class for setting, getting pseudo_id filepaths and getting pseudo_id value """
def __init__(self, l2c: dict, settm: SettingsManager):
self._settm = settm
self._l2c = l2c
self._df_cols = ['pseudo_id', 'internal_lab_id', 'pid_num', 'lab_code', 'submitter', 'tag']
self._submitter = None
self._lab_code = None
self._file = None
self._df = None
self._pids = []
self._lids = []
self.init_settings()
def init_settings(self):
self._set_submitter()
self._set_file()
self._set_lab_code()
self._read_csv()
self._set_lids()
self._set_pids()
def is_ready(self):
"""
Checks if all required settings have been entered.
:return: bool
"""
return all([self._submitter, self._lab_code, self._file, self._df])
def omitted_settings(self) -> str:
"""
Returns missing settings
:return: string detailing missing required settings
"""
msg_list = []
if not self._submitter:
msg_list.append("submitter")
if not self._lab_code:
msg_list.append("lab")
if not self._file:
msg_list.append("pseudo_id_filepath")
fields = ",".join(msg_list)
return f"The following settings must be entered: {fields}."
def generate_pids_from_lids(self, lids: list) -> list:
"""
Validates list of lids to ensure uniqueness, then generates equal number of unique pids
:param lids: list of internal_lab_ids
:return: list of generated pids
"""
print(lids)
if not isinstance(lids, list):
print("not list")
return None
size = len(lids)
if self._df is None or size == 0:
return None
if self._lab_code is None:
return None
pids = []
if self._df.empty:
first_num = 1
last_num = first_num + size
for i in range(first_num, last_num):
pids.append(self._mk_pid(i))
else:
last_row = self._df.iloc[-1]
first_num = last_row['pid_num'] + 1
last_num = first_num + size
for i in range(first_num, last_num):
pids.append(self._mk_pid(i))
return pids
def write_pidlids_to_csv(self, pidlids: list, tag: str):
"""
:param tag: tag string (timedate string)
:param pidlids: list of pseudo_id and internal_lab_id tuples (pseudo_id, internal_lab_id)
:return:
"""
rows = []
for pidlid in pidlids:
pid, lid = pidlid
pid_num = self._pid_to_num(pid)
lab_code = self._pid_to_lab_code(pid)
row = {'lab_code': lab_code,
'pid_num': pid_num,
'pseudo_id': pid,
'internal_lab_id': lid,
'submitter': self._submitter,
'tag': tag}
rows.append(row)
print(rows)
print("in write", self._df)
if self._df is not None:
self._df = self._df.append(rows, ignore_index=True)
print(self._df)
self._df.to_csv(self._file, index=False)
def validate_lab_code(self) -> bool:
if self._df is None:
return False
lab = self._settm.get_value('select_single', 'lab')
lab_code = self._l2c[lab]
p_lab_codes = self._df['lab_code'].tolist()
if len(p_lab_codes) == 0:
return True
if str(lab_code) not in p_lab_codes:
return False
return True
def get_file(self):
return self._file
def _set_lids(self):
if self._df is not None:
self._lids = [str(lid) for lid in self._df['internal_lab_id'].tolist()]
def _set_pids(self):
if self._df is not None:
self._pids = [str(pid) for pid in self._df['pseudo_id'].tolist()]
def _set_file(self):
file_str = self._settm.get_value('entered_value', 'pseudo_id_filepath')
if file_str is not None:
file = Path(file_str)
if self._is_valid_file(file):
self._file = file
def _set_lab_code(self):
lab = self._settm.get_value('select_single', 'lab')
if lab is not None:
self._lab_code = self._l2c[lab]
def _set_submitter(self):
submitter = self._settm.get_value('entered_value', 'submitter')
if submitter is not None:
self._submitter = submitter
def get_first_pid(self):
if self._df is None:
return None
if not self.validate_lab_code():
return None
if self._df.empty:
print("df is empty")
first_pid = self._mk_pid(1)
return first_pid
else:
print("df is nonempty")
last_row = self._df.iloc[-1]
num = last_row['pid_num'] + 1
first_pid = self._mk_pid(num)
return first_pid
def validate_lids(self, lids: list) -> bool:
"""
Validates a list of internal_lab_ids
:param lids: list of internal_lab_ids
:return: bool
"""
res = []
for lid in lids:
res.append(self._is_valid_lid(str(lid)))
if all(res):
return True
else:
return False
def _validate_pidlids(self, pidlids: list) -> bool:
"""
Validates a list of tuples with internal_lab_id and pseudo_id by checking against previous
:param lids: list of internal_lab_ids
:return: bool
"""
res = []
for pidlid in pidlids:
pid, lid = pidlid
res.append(self._is_valid_lid(lid))
res.append(self._is_valid_pid(pid))
if all(res):
return True
else:
return False
def _is_valid_lid(self, lid: str) -> bool:
if lid in self._lids:
return False
return True
def _is_valid_pid(self, pid: str) -> bool:
if pid in self._pids:
return False
return True
def _mk_pid(self, num: int) -> str:
num_str = self._int_zfill(num)
return f"{self._lab_code}-{num_str}"
def _create_csv(self):
if self._file is None:
return
if self._file.is_file():
return
if self._file.parent.is_dir():
_df = pd.DataFrame(columns=self._df_cols)
_df.to_csv(self._file, index=False)
def _read_csv(self):
""" read the csv into a pandas df, perform various validity checks"""
if self._file is None:
self._df = None
return
if not self._file.is_file():
self._create_csv()
try:
self._df = pd.read_csv(self._file)
if set(self._df.columns) != set(self._df_cols):
print("pseudo_id file is incompatible")
self._df = None
return
except:
self._df = None
return
lab_code_set = set(self._df['lab_code'])
if len(lab_code_set) > 1:
self._df = None
return
if len(lab_code_set) == 1 and self._lab_code not in lab_code_set:
self._df = None
return
if not self._df['pid_num'].is_monotonic_increasing:
self._df = None
return
def _write_csv(self):
if self._df is not None:
self._df.to_csv(self._file, index=False, columns=self._df_cols)
def _int_zfill(self, value: int) -> str:
number_str = str(value)
return number_str.zfill(8)
@staticmethod
def _pid_to_lab_code(pid: str) -> str:
lab_code, _ = pid.split('-')
return lab_code
@staticmethod
def _pid_to_num(pid: str) -> int:
_, no_str = pid.split('-')
return int(no_str)
@staticmethod
def _is_valid_file(file: str) -> bool:
try:
fobj = Path(file)
if fobj.parent.is_dir():
return True
else:
return False
except:
return False
#
# if __name__ == '__main__':
#
# pidm = PseudoIDManager()
# print(pidm.get_first_pid())
#
# lids = [
# 'test5',
# 'test6',
# 'test7',
# 'test8'
# ]
#
# pids = pidm.generate_pids_from_lids(lids)
# print(pids)
#
# pidlids = list(zip(pids, lids))
#
# print(pidlids)
#
# pidm.write_pidlids_to_csv(pidlids)
|
python
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import itertools
from typing import Optional
from openfermion import ops
from openfermion.testing.testing_utils import random_interaction_operator
if TYPE_CHECKING:
import openfermion
def random_interaction_operator_term(
order: int,
real: bool = True,
seed: Optional[int] = None,
) -> 'openfermion.InteractionOperator':
"""Generates a random interaction operator with non-zero coefficients only
on terms corresponding to the given number of unique orbitals.
The number of orbitals is equal to the given order.
Args:
order: How many unique orbitals the non-zero terms should correspond to.
real: Whether or not the coefficients should be real. Defaults to True.
seed: The seed. If None (default), uses np.random.
"""
n_orbitals = order
if order > 4:
return ops.InteractionOperator.zero(order)
operator = random_interaction_operator(n_orbitals, real=real, seed=seed)
operator.constant = 0
for indices in itertools.product(range(n_orbitals), repeat=2):
if len(set(indices)) != order:
operator.one_body_tensor[indices] = 0
for indices in itertools.product(range(n_orbitals), repeat=4):
if len(set(indices)) != order:
operator.two_body_tensor[indices] = 0
return operator
|
python
|
# -*- coding: utf-8 -*-
"""
Helpers for our web and worker (scraper) instances
"""
from werkzeug.contrib.atom import AtomFeed
from flask import make_response
import json
from bson import json_util
import datetime
import pymongo
from conn import db
def _get_plans(count=1000, query={}):
return list(db.plans.find(query, limit=count).sort(
[("year", pymongo.DESCENDING), ("month", pymongo.DESCENDING), ("day", pymongo.DESCENDING)]))
def _get_gushim(query={}, fields=None):
return list(db.gushim.find(query, fields=fields))
def _get_plan_statistics():
return db.plans.aggregate([
{"$unwind" : "$gushim" },
{"$project": {"gush_id": "$gushim", "status": "$status", "_id": 0}},
{"$group": {"_id": {"gush_id": "$gush_id", "status": "$status"}, "count": {"$sum": 1}}}
])
def _create_response_json(data):
"""
Convert dictionary to JSON. json_util.default adds automatic mongoDB result support
"""
r = make_response(json.dumps(data, ensure_ascii=False, default=json_util.default))
r.headers['Access-Control-Allow-Origin'] = "*"
r.headers['Content-Type'] = "application/json; charset=utf-8"
return r
def _create_response_atom_feed(request, plans, feed_title=''):
"""
Create an atom feed of plans fetched from the DB based on an optional query
"""
feed = AtomFeed(feed_title, feed_url=request.url, url=request.url_root)
for p in plans:
formatted = _format_plan(p, request.url_root)
feed.add(
title=formatted['title'],
content=formatted['content'],
content_type='html',
author="OpenTABA.info",
# id=url + '&status=' + p['status'],
# ^^ it seems like the &tblView= value keeps changing in the URL, which causes the ID to change and dlvr.it to republish items.
id="%s-%s" % (formatted['title'], p['status']),
# this is a unique ID (not real URL) so adding status to ensure uniqueness in TBA stages
url=formatted['url'],
links=formatted['links'],
updated=formatted['last_update']
)
return feed
def _format_plan(plan, server_root=None):
"""
Take a plan and format it for atom feed and social networks
"""
formatted_plan = {}
formatted_plan['url'] = plan['details_link']
# special emphasizing for some statuses
if plan['status'] in [u'פרסום ההפקדה', u'פרסום בעיתונות להפקדה']:
formatted_plan['status'] = u'»»%s««' % plan['status']
else:
formatted_plan['status'] = plan['status']
# the plan's content
formatted_plan['content'] = plan['essence'] + ' [' + formatted_plan['status'] + ', ' + \
'%02d/%02d/%04d' % (plan['day'], plan['month'], plan['year']) + ', ' + plan['number'] + ']'
# the title
formatted_plan['title'] = plan['location_string']
# 'not title' is not supposed to happen anymore because every plan currently has a location
if not formatted_plan['title']:
formatted_plan['title'] = plan['number']
# mavat link - if we have a code and the base url for this server (currently only from the atom feed) we can give a direct link
# (through our server). otherwise link to the search page with parameters
if plan['mavat_code'] == '' or server_root is None:
formatted_plan['links'] = [{'href' : 'http://www.mavat.moin.gov.il/MavatPS/Forms/SV3.aspx?tid=4&tnumb=' + plan['number'], 'rel': 'related', 'title': u'מבא"ת'}]
else:
formatted_plan['links'] = [{'href': '%splan/%s/mavat' % (server_root, plan['plan_id']), 'rel': 'related', 'title': u'מבא"ת'}]
# plan last update
formatted_plan['last_update'] = datetime.date(plan['year'], plan['month'], plan['day'])
return formatted_plan
"""
A small class to enable json-serializing of datetime.date objects
To use it: json.dumps(json_object, cls=helpers.DateTimeEncoder)
"""
class DateTimeEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, 'isoformat'):
return obj.isoformat()
else:
return json.JSONEncoder.default(self, obj)
|
python
|
from django.urls import include, path
from rest_framework.routers import DefaultRouter
from .views import CreatePollViewSet, GetResultViewSet, PollViewSet
v1_router = DefaultRouter()
v1_router.register('createPoll', CreatePollViewSet, basename='create_poll')
v1_router.register('poll', PollViewSet, basename='vote_poll')
v1_router.register('getResult', GetResultViewSet, basename='get_result')
urlpatterns = [
path('v1/', include(v1_router.urls)),
]
|
python
|
# Gumby theft detection program
# Created Feb 8, 2018
# Copyright (c) Sambhav Saggi 2018
# All rights reserved
# Created for Gregory Calvetti
# Output may be wrong
#Set some constants
DENSITY_THRESH=0.7
#Get ready to compare numbers
def takeClosest(list, derp):
meme = []
for i in list:
meme.append(abs(derp-i))
return meme.index(min(meme))
#Material info
baking_soda = {
"fire": 1,
"vinegar": 2,
"iodine": 1,
"bromo": 2
}
drywall_compound = {
"fire": 2,
"vinegar": 0,
"iodine": 1,
"bromo": 0
}
corn_starch = {
"fire": 2,
"vinegar": 0,
"iodine": 2,
"bromo": 1
}
salt = {
"fire": 1,
"vinegar": 0,
"iodine": 1,
"bromo": 1
}
flour = {
"fire": 2,
"vinegar": 0,
"iodine": 2,
"bromo": 1
}
sugar = {
"fire": 2,
"vinegar": 0,
"iodine": 0,
"bromo": 1
}
#Density
density={
"copper" : 8.96,
"steel" : 7.85,
"brass" : 8.73,
"aluminum" : 2.7,
"none" : 0
}
#Evidence
evidence = {
"num":{"footprint":247,"density":8.9},
"chroma":["blue","red"],
"react":{"fire":2,"vinegar":2,"bromo":2,"iodine":2}
#0 is no reaction, 1 is some reaction, 2 is violent reaction. Iodine: 0 is none
# 1 is yellow, 2 is brown. Bromo: 0 is green, 1 is yellow, 2 is blue
}
#Suspects
suspect_list=["cafe_worker","janitor","handyman","administrator","ccac"]
cafe_worker = {
"name":"cafe_worker",
"powder1":baking_soda,
"powder2":flour,
"metal1":density["copper"],
"metal2":density["steel"],
"footprint":244,
"chroma":["yellow","blue"]
}
janitor = {
"name":"janitor",
"powder1":salt,
"powder2":baking_soda,
"metal1":density["steel"],
"metal2":density["none"],
"footprint":286,
"chroma":["blue","red"]
}
handyman = {
"name":"handyman",
"powder1":drywall_compound,
"powder2":sugar,
"metal1":density["aluminum"],
"metal2":density["steel"],
"footprint":265,
"chroma":["black",""]
}
administrator = {
"name":"administrator",
"powder1":sugar,
"powder2":salt,
"metal1":density["copper"],
"metal2":density["brass"],
"footprint":330,
"chroma":["blue","orange"]
}
ccac = {
"name":"ccac",
"powder1":baking_soda,
"powder2":corn_starch,
"metal1":density["copper"],
"metal2":density["aluminum"],
"footprint":265,
"chroma":["red","blue"]
}
#BEGIN THE SEARCH!!!!
for i in range(0,5):
test_suspect = suspect_list[i]
#Cafe Worker
if test_suspect == "cafe_worker":
cafe_worker_evidence=0
#Powder 1
powder1=cafe_worker["powder1"]
if powder1["fire"] == evidence["react"]['fire']:
cafe_worker_evidence+=1
if powder1["vinegar"] == evidence["react"]['vinegar']:
cafe_worker_evidence+=1
if powder1["iodine"] == evidence["react"]['iodine']:
cafe_worker_evidence+=1
if powder1["bromo"] == evidence["react"]['bromo']:
cafe_worker_evidence+=1
#Powder 2
powder2=cafe_worker["powder2"]
if powder2["fire"] == evidence["react"]['fire']:
cafe_worker_evidence+=1
if powder2["vinegar"] == evidence["react"]['vinegar']:
cafe_worker_evidence+=1
if powder2["iodine"] == evidence["react"]['iodine']:
cafe_worker_evidence+=1
if powder2["bromo"] == evidence["react"]['bromo']:
cafe_worker_evidence+=1
#Janitor
elif test_suspect == "janitor":
janitor_evidence=0
#Powder 1
powder1=janitor["powder1"]
if powder1["fire"] == evidence["react"]['fire']:
janitor_evidence+=1
if powder1["vinegar"] == evidence["react"]['vinegar']:
janitor_evidence+=1
if powder1["iodine"] == evidence["react"]['iodine']:
janitor_evidence+=1
if powder1["bromo"] == evidence["react"]['bromo']:
janitor_evidence+=1
#Powder 2
powder2=janitor["powder2"]
if powder2["fire"] == evidence["react"]['fire']:
janitor_evidence+=1
if powder2["vinegar"] == evidence["react"]['vinegar']:
janitor_evidence+=1
if powder2["iodine"] == evidence["react"]['iodine']:
janitor_evidence+=1
if powder2["bromo"] == evidence["react"]['bromo']:
janitor_evidence+=1
#Handyman
elif test_suspect == "handyman":
handyman_evidence=0
#Powder 1
powder1=handyman["powder1"]
if powder1["fire"] == evidence["react"]['fire']:
handyman_evidence+=1
if powder1["vinegar"] == evidence["react"]['vinegar']:
handyman_evidence+=1
if powder1["iodine"] == evidence["react"]['iodine']:
handyman_evidence+=1
if powder1["bromo"] == evidence["react"]['bromo']:
handyman_evidence+=1
#Powder 2
powder2=handyman["powder2"]
if powder2["fire"] == evidence["react"]['fire']:
handyman_evidence+=1
if powder2["vinegar"] == evidence["react"]['vinegar']:
handyman_evidence+=1
if powder2["iodine"] == evidence["react"]['iodine']:
handyman_evidence+=1
if powder2["bromo"] == evidence["react"]['bromo']:
handyman_evidence+=1
#Administrator
elif test_suspect == "administrator":
administrator_evidence=0
#Powder 1
powder1=administrator["powder1"]
if powder1["fire"] == evidence["react"]['fire']:
administrator_evidence+=1
if powder1["vinegar"] == evidence["react"]['vinegar']:
administrator_evidence+=1
if powder1["iodine"] == evidence["react"]['iodine']:
administrator_evidence+=1
if powder1["bromo"] == evidence["react"]['bromo']:
administrator_evidence+=1
#Powder 2
powder2=administrator["powder2"]
if powder2["fire"] == evidence["react"]['fire']:
administrator_evidence+=1
if powder2["vinegar"] == evidence["react"]['vinegar']:
administrator_evidence+=1
if powder2["iodine"] == evidence["react"]['iodine']:
administrator_evidence+=1
if powder2["bromo"] == evidence["react"]['bromo']:
administrator_evidence+=1
#CCAC
elif test_suspect == "ccac":
ccac_evidence=0
#Powder 1
powder1=ccac["powder1"]
if powder1["fire"] == evidence["react"]['fire']:
ccac_evidence+=1
if powder1["vinegar"] == evidence["react"]['vinegar']:
ccac_evidence+=1
if powder1["iodine"] == evidence["react"]['iodine']:
ccac_evidence+=1
if powder1["bromo"] == evidence["react"]['bromo']:
ccac_evidence+=1
#Powder 2
powder2=ccac["powder2"]
if powder2["fire"] == evidence["react"]['fire']:
ccac_evidence+=1
if powder2["vinegar"] == evidence["react"]['vinegar']:
ccac_evidence+=1
if powder2["iodine"] == evidence["react"]['iodine']:
ccac_evidence+=1
if powder2["bromo"] == evidence["react"]['bromo']:
ccac_evidence+=1
# ------Done w/ powders -------
#Begin metals
for i in range(0,5):
test_suspect = suspect_list[i]
#Cafe Worker
if test_suspect == "cafe_worker":
#Metal 1
metal1=cafe_worker["metal1"]
if abs(metal1 - evidence["num"]["density"]) < DENSITY_THRESH:
cafe_worker_evidence+=1
#Metal 2
metal2=cafe_worker["metal2"]
if abs(metal2 - evidence["num"]["density"]) < DENSITY_THRESH:
cafe_worker_evidence+=1
#Janitor
elif test_suspect == "janitor":
#Metal 1
metal1=janitor["metal1"]
if abs(metal1 - evidence["num"]["density"]) < DENSITY_THRESH:
janitor_evidence+=1
#Metal 2
metal2=janitor["metal2"]
if abs(metal2 - evidence["num"]["density"]) < DENSITY_THRESH:
janitor_evidence+=1
#Handyman
elif test_suspect == "handyman":
#Metal 1
metal1=handyman["metal1"]
if abs(metal1 - evidence["num"]["density"]) < DENSITY_THRESH:
janitor_evidence+=1
#Metal 2
metal2=handyman["metal2"]
if abs(metal2 - evidence["num"]["density"]) < DENSITY_THRESH:
handyman_evidence+=1
#Administrator
elif test_suspect == "administrator":
#Metal 1
metal1=administrator["metal1"]
if abs(metal1 - evidence["num"]["density"]) < DENSITY_THRESH:
administrator_evidence+=1
#Metal 2
metal2=administrator["metal2"]
if abs(metal2 - evidence["num"]["density"]) < DENSITY_THRESH:
administrator_evidence+=1
#CCAC
elif test_suspect == "ccac":
#Metal 1
metal1=ccac["metal1"]
if abs(metal1 - evidence["num"]["density"]) < DENSITY_THRESH:
ccac_evidence+=1
#Metal 2
metal2=ccac["metal2"]
if abs(metal2 - evidence["num"]["density"]) < DENSITY_THRESH:
ccac_evidence+=1
# ------Done w/ metals -------
#Begin shoe size
shoes=[0,0,0,0,0]
for i in range(0,5):
test_suspect = suspect_list[i]
if test_suspect == "cafe_worker":
shoes[i]=cafe_worker["footprint"]
elif test_suspect == "janitor":
shoes[i]=janitor["footprint"]
elif test_suspect == "handyman":
shoes[i]=handyman["footprint"]
elif test_suspect == "administrator":
shoes[i]=administrator["footprint"]
elif test_suspect == "ccac":
shoes[i]=ccac["footprint"]
smallestID=takeClosest(shoes,evidence["num"]["footprint"])
if smallestID == 0:
cafe_worker_evidence+=1
elif smallestID == 1:
janitor_evidence+=1
elif smallestID == 2:
handyman_evidence+=1
elif smallestID == 3:
administrator_evidence+=1
elif smallestID == 4:
ccac_evidence += 1
# ------Done w/ shoe size -------
#Begin chromotography
for i in range(0,5):
test_suspect = suspect_list[i]
if test_suspect == "cafe_worker":
if cafe_worker["chroma"][0] == evidence["chroma"][0]:
cafe_worker_evidence+=1
elif cafe_worker["chroma"][1] == evidence["chroma"][0]:
cafe_worker_evidence+=1
elif cafe_worker["chroma"][0] == evidence["chroma"][1]:
cafe_worker_evidence+=1
elif cafe_worker["chroma"][1] == evidence["chroma"][1]:
cafe_worker_evidence+=1
elif test_suspect == "janitor":
if janitor["chroma"][0] == evidence["chroma"][0]:
janitor_evidence+=1
elif janitor["chroma"][1] == evidence["chroma"][0]:
janitor_evidence+=1
elif janitor["chroma"][0] == evidence["chroma"][1]:
janitor_evidence+=1
elif janitor["chroma"][1] == evidence["chroma"][1]:
janitor_evidence+=1
elif test_suspect == "handyman":
if handyman["chroma"][0] == evidence["chroma"][0]:
handyman_evidence+=1
elif handyman["chroma"][1] == evidence["chroma"][0]:
handyman_evidence+=1
elif handyman["chroma"][0] == evidence["chroma"][1]:
handyman_evidence+=1
elif handyman["chroma"][1] == evidence["chroma"][1]:
handyman_evidence+=1
elif test_suspect == "administrator":
if administrator["chroma"][0] == evidence["chroma"][0]:
administrator_evidence+=1
elif administrator["chroma"][1] == evidence["chroma"][0]:
administrator_evidence+=1
elif administrator["chroma"][0] == evidence["chroma"][1]:
administrator_evidence+=1
elif administrator["chroma"][1] == evidence["chroma"][1]:
administrator_evidence+=1
elif test_suspect == "ccac":
if ccac["chroma"][0] == evidence["chroma"][0]:
ccac_evidence+=1
elif ccac["chroma"][1] == evidence["chroma"][0]:
ccac_evidence+=1
elif ccac["chroma"][0] == evidence["chroma"][1]:
ccac_evidence+=1
elif ccac["chroma"][1] == evidence["chroma"][1]:
ccac_evidence+=1
################################################################################
#Display results
total=10
results=[cafe_worker_evidence,janitor_evidence,handyman_evidence,
administrator_evidence,ccac_evidence]
suspect_list_proper=["cafe worker","janitor","handyman","administrator",
"CCAC teacher"]
unranked={
"Cafe worker":cafe_worker_evidence/total*100,
"Janitor":janitor_evidence/total*100,
"Handyman":handyman_evidence/total*100,
"Administrator":administrator_evidence/total*100,
"CCAC teacher":ccac_evidence/total*100
}
unrankedxy={
"Cafe worker":(str(cafe_worker_evidence/total*100)+"\t%"),
"Janitor":(str(janitor_evidence/total*100)+"\t%"),
"Handyman":(str(handyman_evidence/total*100)+"\t%"),
"Administrator":(str(administrator_evidence/total*100)+"\t%"),
"CCAC teacher":(str(ccac_evidence/total*100)+"\t%")
}
ranked=sorted(unranked, key=unranked.get, reverse=True)
ordered_out=["","","","",""]
for i in range(0,len(ranked)):
ordered_out[i] = ranked[i]+" \t"+unrankedxy[ranked[i]]
maxNum = max(results)
guilty=suspect_list_proper[results.index(maxNum)]
print("It was the "+guilty+"!\n")
print("GASP!\n")
for i in ordered_out:
print(i)
#Done
|
python
|
from . import command
from opensfm.actions import match_features
class Command(command.CommandBase):
name = 'match_features'
help = 'Match features between image pairs'
def run_impl(self, dataset, args):
match_features.run_dataset(dataset)
def add_arguments_impl(self, parser):
pass
|
python
|
# -*- coding: utf-8 -*-
"""
Jakob Seidl, Nanoelectronics Group UNSW Sydney
"""
import pyvisa as visa
import pyneMeas.Instruments.Instrument as Instrument
import time
import math
import numpy as np
@Instrument.enableOptions
class SRS830(Instrument.Instrument):
"""
SRS830 Lock-In Amplifier
>>> myLockin = I.SRS830(GPIBAddress)
"""
defaultOutput = "sourceLevel"
defaultInput = "senseLevel"
def __init__(self, address):
super(SRS830,self).__init__()
self.dev = visa.ResourceManager().open_resource("GPIB0::"+str(address)+"::INSTR")
print(self.dev.query("*IDN?")) # Probably should query and check we have the right device
self.outputMode = "amplitude" # or "frequency"
self.sweepDelay = 0.5
self.type ="SRS830"
self.name = 'mySRS830'
self.scaleFactor = 1.0
self.input = self._getInput()
#AutoRange:
self.autoSensitivityEnable = False
self.possibleCurrRanges = [9,12,15,18,21,24,26]
self.rangeIndex = 6 #random
self.rangeDic = { #we shift the lower intervals 30% (-> 0.7*number) downwards so they dont overlap with the bounds when we shift up.
0:[15E-18,1.5E-12],
1:[0.7*1.5E-12,15E-12],
2:[0.7*15E-12,150E-12],
3:[0.7*150E-12,1.5E-9],
4:[0.7*1.5E-9,15E-9],
5:[0.7*15E-9,150E-9],
6:[0.7*150E-9,1.5E-6],
}
@Instrument.addOptionSetter("name")
def _setName(self,instrumentName):
self.name = instrumentName
@Instrument.addOptionGetter("name")
def _getName(self):
return self.name
@Instrument.addOptionSetter("frequency")
def _setFrequency(self,frequency):
self.dev.write("FREQ "+str(frequency))
@Instrument.addOptionGetter("frequency")
def _getFrequency(self):
return float(self.dev.query("FREQ?"))
@Instrument.addOptionSetter("amplitude")
def _setAmplitude(self,amplitude):
self.dev.write("SLVL "+str(amplitude))
@Instrument.addOptionGetter("amplitude")
def _getAmplitude(self):
return float(self.dev.query("SLVL?"))
@Instrument.addOptionGetter("senseLevel")
def readXY(self): # This function is a bit more ellaborate since snap return a string comprised of both X and Y values. The function then separates this string, i.e. looks for the separation coma and both exponents...
XY = self.dev.query("SNAP?1,2") # ONE could also use OUTP? i (i=1,2,3,4 X,Y,R theta)
divideIndex = XY.find(",")
X = XY[0:divideIndex]
#print(X)
Y = XY[divideIndex+1:-1]
#print(Y)
#Voltage measurements:
if self.input == 'A' or self.input =='A-B': #
XexpIndex = X.find("e-")
YexpIndex = Y.find("e-")
if (XexpIndex == -1 or YexpIndex == -1):
return [float(X)*(1/self.scaleFactor),float(Y)*(1/self.scaleFactor)]
else:
XList =list(X)
if XList[XexpIndex+2] =="0":
XList[XexpIndex+2] =""
Xfinal = float("".join(XList))
YList =list(Y)
if YList[YexpIndex+2] =="0":
YList[YexpIndex+2] =""
Yfinal = float("".join(YList))
return [(1/self.scaleFactor)*Xfinal,(1/self.scaleFactor)*Yfinal]
#Current measurements
XexpIndex = X.find("e-")
if XexpIndex != -1:
# print("Xexp index is "+str(XexpIndex))
XList =list(X)
if XList[XexpIndex+2] =="0":
XList[XexpIndex+2] =""
Xfinal = float("".join(XList))
else:
Xfinal = 0 # we need to do something like this because if the result is already zero or some odd/weird number, the exponent finding process above breaks down
YexpIndex = Y.find("e-")
if YexpIndex != -1:
# print("Yexp index is "+str(YexpIndex))
YList =list(Y)
if YList[YexpIndex+2] =="0":
YList[YexpIndex+2] =""
Yfinal = float("".join(YList))
else:
Yfinal = 0
if self.autoSensitivityEnable:
self._autoSensitivity([Xfinal,Yfinal])
return [(1/self.scaleFactor)*Xfinal,(1/self.scaleFactor)*Yfinal]
def _autoSensitivity(self,givenValue):
R = math.sqrt(givenValue[0]**2+givenValue[1]**2)
# print('#############\n')
# print('current value is'+str(R)+'\n')
# print('current range index is: '+str(self.rangeIndex)+'\n')
# print('the boundaries here are: '+str(self.rangeDic[self.rangeIndex][0]) +'and' + str(self.rangeDic[self.rangeIndex][1])+'\n')
# print('#############\n')
if (self.rangeDic[self.rangeIndex][0] >= R): #and not math.isnan(R)
a = self.rangeIndex -1
if a in range(7):
self.rangeIndex = a
# print(self.rangeIndex)
self.dev.write("SENS "+str(self.possibleCurrRanges[self.rangeIndex]))
time.sleep(2)
elif (R > self.rangeDic[self.rangeIndex][1]):
a = self.rangeIndex +1
if a in range(7):
self.rangeIndex = a
#print(self.rangeIndex)
self.dev.write("SENS "+str(self.possibleCurrRanges[self.rangeIndex]))
time.sleep(2)
else:
pass
@Instrument.addOptionSetter("sourceLevel")
def _setSourceLevel(self,sourceLevel):
self.dev.write(("SLVL " if self.outputMode == "amplitude" else "FREQ ") +str(sourceLevel))
time.sleep(self.sweepDelay)
@Instrument.addOptionGetter("sourceLevel")
def _getSourceLevel(self):
return float(self.dev.query(("SLVL?" if self.outputMode == "amplitude" else "FREQ?")))
@Instrument.addOptionSetter("timeConst")
def _setTimeConst(self,timeConst): #Index can be between 0 (10 microsec to 19 30ksec)
possibleConstants = [10E-6,30E-6,100E-6,300E-6,1E-3,3E-3,10E-3,30E-3,100E-3,300E-3,1,3,10,30,100,300]
if float(timeConst) in possibleConstants: #Not including the highest kilosecond integration times since not never used.
self.dev.write("OFLT "+str(possibleConstants.index(timeConst)))
else:
raise ValueError(
"\"{}\" is not a valid time Constant for the SRS830.".format(timeConst) +
" Valid time constants are: (10,30,100,300)X 1E-6, (1,3,10,30,100,300)X 1E-3 and (1,3,10,30,100,300)X 1E-0 seconds."
)
@Instrument.addOptionSetter("senseRange")
def _setSenseRange(self,senseRange):
possibleSenseRanges = [1E-9,2E-9,5E-9,10E-9,20E-9,50E-9,100E-9,200E-9,500E-9,1E-6,2E-6,5E-6,10E-6,20E-6,50E-6,100E-6,200E-6,500E-6,1E-3,2E-3,5E-3,10E-3,20E-3,50E-3,100E-3,200E-3,500E-3,1] #For voltages !!
if senseRange in possibleSenseRanges:
sensIndex = possibleSenseRanges.index(senseRange)-1
if self.rangeIndex != sensIndex: #Only change rhe range if it is different than before. This will be helpfull with the auoRange function True
self.dev.write("SENS "+str(sensIndex))
time.sleep(0.4)
#self.rangeIndex = sensIndex
else:
pass
else:
raise ValueError(
"\"{}\" is not a valid senseRange for the SRS830.".format(senseRange) +
" Valid sensitivities are:\"{}\" Volts or microAmps respectively.".format(possibleSenseRanges)
)
@Instrument.addOptionGetter("senseRange")
def _getSenseRange(self):
possibleSenseRanges = [1E-9,2E-9,5E-9,10E-9,20E-9,50E-9,100E-9,200E-9,500E-9,1E-6,2E-6,5E-6,10E-6,20E-6,50E-6,100E-6,200E-6,500E-6,1E-3,2E-3,5E-3,10E-3,20E-3,50E-3,100E-3,200E-3,500E-3,1] #For voltages !!
tempRes = int(self.dev.query('SENS?'))
return(possibleSenseRanges[tempRes])
@Instrument.addOptionSetter("input")
def _setInput(self,inputSetting): #I1 = I(1E6) and I2 = I(1E8)
possibleSettings = ["A","A-B","I1","I2"]
if str(inputSetting) in possibleSettings:
self.dev.write("ISRC "+str(possibleSettings.index(inputSetting)))
self.input = str(inputSetting)
@Instrument.addOptionGetter("input")
def _getInput(self):
possibleSettings = ["A","A-B","I1","I2"]
modeIndex = int(self.dev.query("ISRC?"))
return possibleSettings[modeIndex]
@Instrument.addOptionGetter("scaleFactor")
def _getScaleFactor(self):
return self.scaleFactor
@Instrument.addOptionSetter("scaleFactor")
def _setScaleFactor(self,scaleFactor):
self.scaleFactor = scaleFactor
@Instrument.addOptionSetter("sweepParameter")
def _setSweepParameter(self,sweepParameter):
if sweepParameter in ("frequency","amplitude"):
self.outputMode = str(sweepParameter)
else:
raise ValueError(
"\"{}\" is not a valid sweepParameter for the SRS830.".format(sweepParameter) +
" You can either sweep the reference 'frequency' or the sine output 'amplitude'."
)
def _getSenseIndex(self):
return int(self.dev.query("SENS?"))
@Instrument.addOptionGetter("sweepParameter")
def _getSweepParameter(self):
return self.outputMode
@Instrument.addOptionSetter('sweepDelay')
def _setSweepDelay(self,delay):
self.sweepDelay = delay
@Instrument.addOptionGetter('sweepDelay')
def _getSweepDelay(self):
return self.sweepDelay
@Instrument.addOptionSetter("autoSensitivity")
def _setAutoSensitivity(self,enable):
self.autoSensitivityEnable = True if enable else False
@Instrument.addOptionGetter("autoSensitivity")
def _getAutoSensitivity(self):
return self.autoSensitivityEnable
def autoPhase(self):
self.dev.write("APHS")
time.sleep(3)
@Instrument.addOptionSetter("phase")
def _setPhase(self,phase):
self.dev.write("PHAS "+str(phase))
@Instrument.addOptionGetter("phase")
def _getPhase(self):
return float(self.dev.query("PHAS?"))
def close(self):
self.dev.close()
def goTo(self,target,stepsize= 0.01,delay=0.2):
currentOutput = self.get('sourceLevel')
sign = 1 if (target>currentOutput) else -1
sweepArray = np.arange(currentOutput,target+sign*stepsize,sign*stepsize)
for point in sweepArray:
self.set('sourceLevel',point)
time.sleep(delay)
self.set('sourceLevel',target)
|
python
|
# Copyright (C) 2020-Present the hyssop authors and contributors.
#
# This module is part of hyssop and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
'''
File created: November 21st 2020
Modified By: hsky77
Last Updated: March 27th 2021 19:31:11 pm
'''
from .server import AioHttpView, AioHttpRequest, AioHttpApplication, AioHttpServer, routes
Version = '0.0.6'
|
python
|
# -*- coding: utf-8 -*-
"""
lantz.drivers.newport
~~~~~~~~~~~~~~~~~~~~~
:company: Newport.
:description: Test and Measurement Equipment.
:website: http://www.newport.com/
---
:copyright: 2015, see AUTHORS for more details.
:license: GPLv3,
"""
from .motionesp301 import ESP301, ESP301Axis
from .motionsmc100 import SMC100
__all__ = ['ESP301', 'ESP301Axis', 'SMC100']
|
python
|
import typing
import os
import aiohttp
from urllib.parse import quote
from deta.utils import _get_project_key_id
from deta.base import FetchResponse, Util
def AsyncBase(name: str):
project_key, project_id = _get_project_key_id()
return _AsyncBase(name, project_key, project_id)
class _AsyncBase:
def __init__(self, name: str, project_key: str, project_id: str, host: str = None):
if not project_key:
raise AssertionError("No Base name provided")
host = host or os.getenv("DETA_BASE_HOST") or "database.deta.sh"
self._base_url = f"https://{host}/v1/{project_id}/{name}"
self.util = Util()
self._session = aiohttp.ClientSession(
headers={
"Content-type": "application/json",
"X-API-Key": project_key,
},
raise_for_status=True,
)
async def close(self) -> None:
await self._session.close()
async def get(self, key: str):
key = quote(key, safe="")
try:
async with self._session.get(f"{self._base_url}/items/{key}") as resp:
return await resp.json()
except aiohttp.ClientResponseError as e:
if e.status == 404:
return
else:
raise e
async def delete(self, key: str):
key = quote(key, safe="")
async with self._session.delete(f"{self._base_url}/items/{key}"):
return
async def insert(
self, data: typing.Union[dict, list, str, int, bool], key: str = None
):
if not isinstance(data, dict):
data = {"value": data}
else:
data = data.copy()
if key:
data["key"] = key
async with self._session.post(
f"{self._base_url}/items", json={"item": data}
) as resp:
return await resp.json()
async def put(
self, data: typing.Union[dict, list, str, int, bool], key: str = None
):
if not isinstance(data, dict):
data = {"value": data}
else:
data = data.copy()
if key:
data["key"] = key
async with self._session.put(
f"{self._base_url}/items", json={"items": [data]}
) as resp:
if resp.status == 207:
resp_json = await resp.json()
return resp_json["processed"]["items"][0]
else:
return None
async def put_many(
self, items: typing.List[typing.Union[dict, list, str, int, bool]]
):
if len(items) > 25:
raise AssertionError("We can't put more than 25 items at a time.")
_items = []
for i in items:
if not isinstance(i, dict):
_items.append({"value": i})
else:
_items.append(i)
async with self._session.put(
f"{self._base_url}/items", json={"items": _items}
) as resp:
return await resp.json()
async def fetch(
self,
query: typing.Union[dict, list] = None,
*,
limit: int = 1000,
last: str = None,
):
payload = {}
if query:
payload["query"] = query if isinstance(query, list) else [query]
if limit:
payload["limit"] = limit
if last:
payload["last"] = last
async with self._session.post(f"{self._base_url}/query", json=payload) as resp:
resp_json = await resp.json()
paging = resp_json.get("paging")
return FetchResponse(
paging.get("size"), paging.get("last"), resp_json.get("items")
)
async def update(self, updates: dict, key: str):
if key == "":
raise ValueError("Key is empty")
payload = {
"set": {},
"increment": {},
"append": {},
"prepend": {},
"delete": [],
}
for attr, value in updates.items():
if isinstance(value, Util.Trim):
payload["delete"].append(attr)
elif isinstance(value, Util.Increment):
payload["increment"][attr] = value.val
elif isinstance(value, Util.Append):
payload["append"][attr] = value.val
elif isinstance(value, Util.Prepend):
payload["prepend"][attr] = value.val
else:
payload["set"][attr] = value
if not payload:
raise ValueError("Provide at least one update action.")
key = quote(key, safe="")
await self._session.patch(f"{self._base_url}/items/{key}", json=payload)
|
python
|
#!/usr/bin/env python3
# Software License Agreement (BSD License)
#
# Copyright (c) 2018, UFACTORY, Inc.
# All rights reserved.
#
# Author: Vinman <[email protected]> <[email protected]>
from ..core.config.x_config import XCONF
class APIState(object):
NOT_CONNECTED = -1 # 已断开或未连接
NOT_READY = -2 # 未使能或者设置状态
API_EXCEPTION = -3 # 接口异常,可能是参数错误
CMD_NOT_EXIST = -4 # 命令不存在
TCP_LIMIT = -6 # 笛卡尔限位
JOINT_LIMIT = -7 # 关节角度限位
OUT_OF_RANGE = -8 # 超出范围
EMERGENCY_STOP = -9 # 紧急停止
SERVO_NOT_EXIST = -10 # 不存在此ID的关节
CONVERT_FAILED = -11 # 转换Blockly失败
RUN_BLOCKLY_EXCEPTION = -12 # 运行blockly app异常
NORMAL = 0 # 正常
HAS_ERROR = XCONF.UxbusState.ERR_CODE # 有尚未清除的错误
HAS_WARN = XCONF.UxbusState.WAR_CODE # 有尚未清除的警告
RES_TIMEOUT = XCONF.UxbusState.ERR_TOUT # 命令回复超时
RES_LENGTH_ERROR = XCONF.UxbusState.ERR_LENG # TCP长度错误
CMD_NUM_ERROR = XCONF.UxbusState.ERR_NUM # TCP序号错误
CMD_PROT_ERROR = XCONF.UxbusState.ERR_PROT # TCP协议标志错误
FUN_ERROR = XCONF.UxbusState.ERR_FUN # TCP回复指令和发送指令不匹配
NO_TCP = XCONF.UxbusState.ERR_NOTTCP # 写数据异常
STATE_NOT_READY = XCONF.UxbusState.STATE_NOT_READY # 参数错误
RET_IS_INVALID = XCONF.UxbusState.INVALID # 结果无效
OTHER = XCONF.UxbusState.ERR_OTHER # 其它错误
PARAM_ERROR = XCONF.UxbusState.ERR_PARAM # 参数错误
TGPIO_ID_ERR = 20 # 末端IO主机ID错误
MODBUS_BAUD_NOT_SUPPORT = 21 # modbus不支持此波特率
MODBUS_BAUD_NOT_CORRECT = 22 # 末端modbus波特率不正确
MODBUS_ERR_LENG = 23 # modbus回复数据长度错误
TRAJ_RW_FAILED = 31 # 读写轨迹失败(加载轨迹或保存轨迹)
TRAJ_RW_TOUT = 32 # 读写轨迹等待超时(加载轨迹或保存轨迹)
TRAJ_PLAYBACK_TOUT = 33 # 回放轨迹超时(多种情况)
SUCTION_CUP_TOUT = 41 # 等待吸泵设置超时
MODE_IS_NOT_CORRECT = 51 # 模式不正确
WAIT_FINISH_TIMEOUT = 100 # 等待操作完成超时
CHECK_FAILED = 101 # 等待操作完成过程检测状态连续失败次数过多
END_EFFECTOR_HAS_FAULT = 102 # 末端配件有错误
END_EFFECTOR_NOT_ENABLED = 103 # 末端配件未使能
|
python
|
from pykgr.package import Package
from pykgr.shell import Shell
from pykgr.configuration import conf as config
from pykgr.environment import Environment
from pykgr.builder import Builder
from pykgr.package import Package
|
python
|
#!/usr/bin/env python
# encoding: utf-8
from unittest import TestCase
from ycyc.libs.statelib.state import (
FreeState, SequenceState, QSequenceState,
StateNotAllowError, EndOfSequenceStateError, SequenceStateFinishedError,
)
class TestFreeState(TestCase):
def test_usage(self):
state = FreeState("init")
self.assertEqual(state.state, "init")
self.assertEqual(state.exception, None)
self.assertEqual(state.last_state, None)
with state.processing("init", "state1", "error1"):
pass
self.assertEqual(state.state, "state1")
self.assertEqual(state.exception, None)
self.assertEqual(state.last_state, "init")
with self.assertRaises(StateNotAllowError):
with state.processing("nothing", "", ""):
pass
self.assertEqual(state.state, "state1")
self.assertEqual(state.exception, None)
self.assertEqual(state.last_state, "init")
with self.assertRaises(ValueError):
with state.processing("state1", "state2", "error2"):
raise ValueError()
self.assertEqual(state.state, "error2")
self.assertIsInstance(state.exception, ValueError)
self.assertEqual(state.last_state, "state1")
with state.processing(("error2", "state3"), "state3", "error3"):
pass
self.assertEqual(state.state, "state3")
self.assertEqual(state.exception, None)
self.assertEqual(state.last_state, "error2")
with state.processing(("error2", "state3"), "state4", "error4"):
pass
self.assertEqual(state.state, "state4")
self.assertEqual(state.exception, None)
self.assertEqual(state.last_state, "state3")
class TestSequenceState(TestCase):
def test_usage(self):
class TSequenceState(SequenceState):
__SequenceStates__ = ("init", "state1", "state2", "state3")
state = TSequenceState()
self.assertEqual(state.state, "init")
self.assertEqual(state.index, 0)
self.assertEqual(state.exception, None)
self.assertEqual(state.last_state, None)
self.assertEqual(state.next_state, "state1")
self.assertEqual(state.is_finished, False)
with state.processing():
pass
self.assertEqual(state.state, "state1")
self.assertEqual(state.index, 1)
self.assertEqual(state.exception, None)
self.assertEqual(state.last_state, "init")
self.assertEqual(state.next_state, "state2")
self.assertEqual(state.is_finished, False)
with state.processing():
pass
self.assertEqual(state.state, "state2")
self.assertEqual(state.index, 2)
self.assertEqual(state.exception, None)
self.assertEqual(state.last_state, "state1")
self.assertEqual(state.next_state, "state3")
self.assertEqual(state.is_finished, False)
with self.assertRaises(ValueError):
with state.processing():
raise ValueError()
self.assertEqual(state.state, "error")
self.assertEqual(state.index, 2)
self.assertIsInstance(state.exception, ValueError)
self.assertEqual(state.last_state, "state2")
self.assertEqual(state.next_state, "state3")
self.assertEqual(state.is_finished, True)
with self.assertRaises(SequenceStateFinishedError):
with state.processing():
pass
self.assertEqual(state.state, "error")
self.assertEqual(state.index, 2)
self.assertIsInstance(state.exception, ValueError)
self.assertEqual(state.last_state, "state2")
self.assertEqual(state.next_state, "state3")
self.assertEqual(state.is_finished, True)
|
python
|
import streamlit as st
# https://github.com/streamlit/release-demos/blob/master/0.65/demos/query_params.py
query_params = st.experimental_get_query_params()
default = query_params["text"][0] if "text" in query_params else ""
x = st.text_area('Enter Text', value=default)
st.write('The text in uppercase is ', x.upper())
|
python
|
from parser import EPL
import glob
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.lines import Line2D
import copy
class Experiment:
def __init__(self, suppath, file_regex='*-analyzed.txt', parser=EPL):
self.suppath = suppath
self.parser = parser
self.conditions = []
self.animals = []
self.frequency = []
for dirs in glob.glob(suppath + '/*/'):
# Get experimental conditions in folders below suppath
self.conditions.append(os.path.basename(dirs[0:-1:1]))
condition_dict = []
for condition in self.conditions:
# loop through experimental conditions and add animal folders to animals list
pathlist = glob.glob(self.suppath + condition+'/*/')
animallist = []
experiment_classes = []
for animal in pathlist:
animallist.append(os.path.basename(animal[0:-1:1]))
experiment_path_list = glob.glob(self.suppath + condition + '/' + os.path.basename(animal[0:-1:1]) + '/' + file_regex)
animal_classes = []
# if self.frequencies: # Got to do it this way because numpy array is weird
# freq_set = True
for experiment in experiment_path_list:
if not self.frequency:
# We need a list of frequencies later in the program.
# Sets freq list if it hasent been set yet
self.frequency.append(self.parser(experiment).frequency)
# Create a list of parsers classes containing data of each animal
animal_classes.append(self.parser(experiment))
# Create a list of all animal lists for a single experimental condition
experiment_classes.append(animal_classes)
# Create a dictionary for a single condition of the form {Animal_ID: [List of animal trials]}
condition_dict.append(dict(zip(animallist, experiment_classes)))
# Create an dict for the experiment of the form {Condition 1: condition_dict_1,
# Condition 2: condition_dict_2}
self.experiment_dict = dict(zip(self.conditions, condition_dict))
self.frequency.sort()
@staticmethod
def list_to_str(original):
co = copy.copy(original)
new = []
while co:
new.append(str(co.pop(0)))
return new
def get_experiment(self):
return self.experiment_dict
class ABR(Experiment):
def __init__(self, path, ParsingClass):
self.path = path
self.parse = ParsingClass
# Need to do this to generate the experiment dict
super().__init__(suppath=path, parser=ParsingClass)
self.plot = self.Plot(self.experiment_dict, self.list_to_str)
@staticmethod
def _write_csvmat_to_file(file, csvmat):
"""
Necessary backend function for printing csv files
Basically transposes csvmat and fills in extra spaces with spaces (' ')
necessary because we may want to print to a csv a list of lists with different sizes inside
in general we have lists which are the columns of a csv
i.e. [Header, 1, 2, 3, 4, 5]
we need a way to print all the headers first, then the data in the appropriate columns
example:
csvmat = [[1, 2], [a, b, c, d], [x, y, z]]
csvmat = _write_csvmat_to_file(file, csvmat)
csvmat = [[1, a, x], [2, b, y], [ , c, z], [ , d, ]]
file.wirte(csvmat)
:param file:
:param csvmat:
:return:
"""
strmat = []
maxlen = 0
for i in range(len(csvmat)):
if maxlen <= len(csvmat[i]):
maxlen = len(csvmat[i])
for i in range(maxlen):
line = []
for j in range(len(csvmat)):
try:
line.append(str(csvmat[j][i]) + ',')
except:
line.append(' ,')
strmat.append(line)
for i in range(len(strmat)):
if i > 0:
file.write('\n')
for j in range(len(strmat[0])):
file.write(strmat[i][j])
def write_agf_csv(self):
for f in self.frequency:
csvmat = []
strmat = []
# Basically this creates a string that can be transposed later
# [Frequency, 1, 2, 3, 4, 5, 6]
# This theme is conserved throughout the function
csvmat.append(['frequency'] + [str(f)])
# Need to open twice, first "w" or write, is to open a new blank file
file = open(str(f) + 'kHz_data.csv', 'w')
for condition in self.experiment_dict:
csvmat.append(['Condition'] + [condition])
for animal in self.experiment_dict[condition]:
for run in self.experiment_dict[condition][animal]:
if f == run.frequency:
csvmat.append(['Animal'] + [str(run.id)])
csvmat.append(['Level'] + self.list_to_str(run.levels))
csvmat.append(['Amplitudes'] + self.list_to_str(run.amplitudes))
# What im doing here is finding the maximum length of sublists
# contained within csvmat
# need to do this so I can create a perfectly rectangular list of lists
# which would be impossible with two long lists of condition, frequency, and animal
# maxlen = 0
# for i in range(len(csvmat)):
# if maxlen <= len(csvmat[i]):
# maxlen = len(csvmat[i])
#
# for i in range(maxlen):
# line = []
# for j in range(len(csvmat)):
# try:
# line.append(str(csvmat[j][i]) + ',')
# except:
# line.append(' ,')
# strmat.append(line)
#
# for i in range(len(strmat)):
# if i > 0:
# file.write('\n')
# for j in range(len(strmat[0])):
# file.write(strmat[i][j])
self._write_csvmat_to_file(file,csvmat)
file.close()
def write_thr_csv(self):
for condition in self.experiment_dict:
csvmat = []
csvmat.append(['Condition'] + [condition])
# Need to open twice, first "w" or write, is to open a new blank file
file = open(condition + '_Thresholds.csv', 'w')
for animal in self.experiment_dict[condition]:
csvmat.append([animal])
freq = []
threshold = []
for run in self.experiment_dict[condition][animal]:
freq.append(run.frequency)
threshold.append(run.threshold)
threshold = [y for y, _ in sorted(zip(threshold, freq))]
freq.sort()
freq = ['Frequency'] + self.list_to_str(freq)
threshold = ['Threshold'] + self.list_to_str(threshold)
csvmat.append(freq)
csvmat.append(threshold)
self._write_csvmat_to_file(file, csvmat)
file.close()
class Plot:
def __init__(self, experiment_dict, l2s):
self.experiment_dict = experiment_dict
self.list_to_string = l2s
FREQ = []
for i,condition in enumerate(self.experiment_dict):
for animal in self.experiment_dict[condition]:
freq = []
thr = []
for run in self.experiment_dict[condition][animal]:
freq.append(run.frequency)
thr.append(run.threshold)
FREQ.append(freq)
self.frequency_list, _ = self._mean(FREQ, FREQ)
self.frequency_list = sorted(self.frequency_list)
@staticmethod
def _mean(x: list, y: list):
"""
Takes in a list of lists of differing sizes containing a possibly different lengths
and makes a flattened list of the mean
:param self:
:param data:
:return:
"""
z = zip(x, y)
X = np.array([])
for a in x:
for i in a:
if not np.any(X == i):
X = np.append(X,i)
Y = copy.copy(X)
for index,val in enumerate(X):
m = 0 # mean
i = 0 # iteration
for a,b in zip(x,y):
if len(a) != len(b):
raise IndexError('X,Y dimmensions missmatch')
if np.any(np.array(a) == val):
m += np.array(b)[np.array(a) == val]
i += 1
if i > 0:
Y[index] = m/i
else:
Y[index] = m
return X.tolist(), Y.tolist()
def _var(self, x: list, y: list):
"""
Takes in a list of lists of differing sizes containing a possibly different lengths
and makes a flattened list of the mean
:param self:
:param data:
:return:
"""
X, Y_mean = self._mean(x,y)
Y = np.array(copy.copy(X))
for index, val in enumerate(X):
m = 0 # mean
i = 0 # iteration
for a, b in zip(x, y):
if np.any(np.array(a) == val):
m += (np.array(b)[np.array(a) == val] - Y_mean[index])**2
i += 1
if i > 1:
Y[index] = m / (i-1)
elif i == 0 or i == 1:
Y[index] = 0
return X, Y.tolist()
def _std(self, x: list, y: list):
X,Y = self._var(x,y)
for i,val in enumerate(Y):
Y[i] = val ** 0.5
return X,Y
def threshold(self, errbar=False):
fig,ax = plt.subplots()
fig.set_size_inches(5,4)
ax.set_xscale('log')
legend_elements = []
for i, condition in enumerate(self.experiment_dict):
legend_elements.append(Line2D([0],[0],color='C'+str(i), lw=2, label=str(condition)))
THR = []
FREQ = []
for animal in self.experiment_dict[condition]:
freq = []
thr = []
for run in self.experiment_dict[condition][animal]:
freq.append(run.frequency)
thr.append(run.threshold)
thr = [y for y, _ in sorted(zip(thr, freq))]
freq.sort()
THR.append(thr)
FREQ.append(freq)
ax.plot(freq, thr, '.-', c='C'+str(i), alpha=0.1)
FREQ_mean, THR_mean = self._mean(FREQ, THR)
_, THR_variance = self._std(FREQ, THR)
if errbar:
ax.errorbar(FREQ_mean, THR_mean, yerr=THR_variance, c='C'+str(i), linewidth=2)
else:
plt.fill_between(FREQ_mean, np.array(THR_mean) - np.array(THR_variance),
np.array(THR_mean) + np.array(THR_variance), alpha = .2, color = 'C'+str(i))
plt.plot(FREQ_mean,THR_mean, '.-', c='C'+str(i))
ax.set_xscale('log')
ax.set_xticks(FREQ_mean)
ax.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax.get_xaxis().set_minor_formatter(matplotlib.ticker.NullFormatter())
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.ticklabel_format(style='plain')
ax.set_xlabel('Frequency (kHz)')
ax.set_ylabel('Threshold (dB)')
ax.legend(handles=legend_elements, loc='best',frameon=False)
plt.title('Threshold')
plt.show()
def agf(self, frequency=None, errbar=None):
fig,ax = plt.subplots()
fig.set_size_inches(5,4)
ax.set_xscale('log')
legend_elements = []
for i, condition in enumerate(self.experiment_dict):
legend_elements.append(Line2D([0],[0],color='C'+str(i), lw=2, label=str(condition)))
lvl = []
amp = []
for animal in self.experiment_dict[condition]:
for run in self.experiment_dict[condition][animal]:
if np.array(run.frequency) == frequency:
ax.plot(run.levels, run.amplitudes, '-', c='C'+str(i), alpha=0.1)
amp.append(run.amplitudes)
lvl.append(run.levels)
lvl_mean, amp_mean = self._mean(lvl, amp)
_, amp_variance = self._std(lvl, amp)
if errbar:
ax.errorbar(lvl_mean, amp_mean, yerr=amp_variance, c='C' + str(i), linewidth=2)
else:
plt.fill_between(lvl_mean, np.array(amp_mean) - np.array(amp_variance),
np.array(amp_mean) + np.array(amp_variance), alpha = .2, color = 'C'+str(i))
ax.plot(lvl_mean, amp_mean, '.-', c='C' + str(i), linewidth=2)
plt.title('Amplitude Growth Function')
ax.set_xticks(lvl_mean)
ax.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax.get_xaxis().set_minor_formatter(matplotlib.ticker.NullFormatter())
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_xlabel('Level (dB)')
ax.set_ylabel('N1P1 Amplidue')
ax.legend(handles=legend_elements, loc='best',frameon=False)
plt.show()
|
python
|
import time
import picamerax
frames = 60
with picamerax.PiCamera() as camera:
camera.resolution = (1024, 768)
camera.framerate = 30
camera.start_preview()
# Give the camera some warm-up time
time.sleep(2)
start = time.time()
camera.capture_sequence([
'image%02d.jpg' % i
for i in range(frames)
], use_video_port=True)
finish = time.time()
print('Captured %d frames at %.2ffps' % (
frames,
frames / (finish - start)))
|
python
|
#-----------------------------------------------------------------------------
# Copyright (c) 2013-2019, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
### **NOTE** This module is used during bootstrap.
### Import *ONLY* builtin modules.
### List of built-in modules: sys.builtin_module_names
"""
Set up 'os' and 'os.path' module replacement functions for use during import
bootstrap.
"""
import sys
_builtin_names = sys.builtin_module_names
_mindirlen = 0
# Wrap os.environ, os.listdir(), os.sep
# We cannot cache the content of os.listdir(). It was found to cause problems
# with programs that dynamically add python modules to be reimported by that
# same program (i.e., plugins), because the cache is only built once
# at the beginning, and never updated. So, we must really list the directory
# again.
if 'posix' in _builtin_names: # For Linux, Unix, Mac OS X
from posix import environ as os_environ
from posix import listdir as os_listdir
os_sep = '/'
_mindirlen = 1
elif 'nt' in _builtin_names: # For Windows
from nt import environ as os_environ
from nt import listdir as os_listdir
os_sep = '\\'
_mindirlen = 3
else:
raise ImportError('No os specific module found')
# Wrap os.path.join()
def os_path_join(a, b, sep=os_sep):
if a == '':
return b
lastchar = a[-1:]
if lastchar == '/' or lastchar == sep:
return a + b
return a + sep + b
# Wrap os.path.dirname()
def os_path_dirname(a, sep=os_sep, mindirlen=_mindirlen):
for i in range(len(a) - 1, -1, -1):
c = a[i]
if c == '/' or c == sep:
if i < mindirlen:
return a[:i + 1]
return a[:i]
return ''
# Wrap os.path.basename()
if sys.platform.startswith('win'):
# Implementation from ntpath.py module
# from standard Python 2.7 Library.
def os_path_basename(pth):
## Implementation of os.path.splitdrive()
if pth[1:2] == ':':
d = pth[0:2]
p = pth[2:]
else:
d = ''
p = pth
## Implementation of os.path.split()
# set i to index beyond p's last slash
i = len(p)
while i and p[i - 1] not in '/\\':
i = i - 1
head, tail = p[:i], p[i:] # now tail has no slashes
# Windows implementation is based on split(). We need
# to return only tail.
return tail
else:
# Implementation from ntpath.py module
# from standard Python 2.7 Library.
def os_path_basename(pth):
i = pth.rfind('/') + 1
return pth[i:]
if 'PYTHONCASEOK' not in os_environ:
def caseOk(filename):
files = os_listdir(os_path_dirname(filename))
return os_path_basename(filename) in files
else:
def caseOk(filename):
return True
|
python
|
#!/usr/bin/env python
from flask_babel import _
import os
basedir = os.path.abspath(os.path.dirname(__file__))
POSTGRES = {
'user': 'catalog',
'pw': 'catalog',
'db': 'catalog',
'host': 'localhost',
'port': '5432',
}
class Config(object):
""" App configuration """
CLIENT_SECRET_JSON = os.path.join(basedir, 'client_secrets.json')
LANGUAGES = ['en', 'es']
SECRET_KEY = os.environ.get('SECRET_KEY') or 'you-will-never-guess'
SQLALCHEMY_DATABASE_URI = 'postgresql://%(user)s:%(pw)s@%(host)s:%(port)s/%(db)s' % POSTGRES # noqa
SQLALCHEMY_TRACK_MODIFICATIONS = True
|
python
|
#!/usr/bin/env python3
""" Simulate 3D aggregate mixing
Generate simulated aggregates that can be analyzed by `analyze_3d_aggregate_mixing.py`
.. code-block:: bash
$ ./simulate_3d_aggregate_mixing.py \\
--num-red 400 \\
--num-green 127 \\
--aggregate-radius 75.3 \\
--neighbor-radius 20 \\
--same-cell-radius 5 \\
--num-batches 16 \\
../data/sim_uniform_pos
Where the options are:
* ``num_red``: Number of "red" (mKate) cells to generate
* ``num_green``: Number of "green" (GFP) cells to generate
* ``aggregate_radius``: um - Radius of the spherical aggregate
* ``neighbor_radius``: um - Cells this close or closer are "neighbors"
* ``same_cell_radius``: um - Cells this close or closer are "the same cell"
* ``num_batches``: Number of aggregates to simulate (to match the number of empirical samples)
"""
import sys
import shutil
import pathlib
import argparse
from typing import Optional
# Allow the scripts directory to be used in-place
THISDIR = pathlib.Path(__file__).resolve().parent
BASEDIR = THISDIR.parent
if THISDIR.name == 'scripts' and (BASEDIR / 'cm_microtissue_struct').is_dir():
sys.path.insert(0, str(BASEDIR))
# Our own imports
from cm_microtissue_struct.simulation import (
simulate_spheres_in_sphere, split_red_green
)
from cm_microtissue_struct.io import save_position_data
from cm_microtissue_struct.plotting import plot_3d_sphere_cloud
from cm_microtissue_struct.consts import (
NUM_RED, NUM_GREEN, AGGREGATE_RADIUS, SAME_CELL_RADIUS, NEIGHBOR_RADIUS,
)
# Main function
def simulate_mixing(outdir: pathlib.Path,
prefix: Optional[str] = None,
distribution: str = 'uniform',
num_red: int = NUM_RED,
num_green: int = NUM_GREEN,
num_batches: int = 1,
aggregate_radius: float = AGGREGATE_RADIUS,
neighbor_radius: float = NEIGHBOR_RADIUS,
same_cell_radius: float = SAME_CELL_RADIUS):
""" Simulate mixing two populations in an aggregate
:param Path outdir:
The base directory to write results to
:param str prefix:
The prefix for each simulation
:param str distribution:
The distribution to simulate
:param int num_red:
The number of "red" cells to generate
:param int num_green:
The number of "green" cells to generate
:param int num_batches:
The number of times to run the simulation
:param float aggregate_radius:
The radius for the overall aggregate
:param float neighbor_radius:
(UNUSED) Cells closer than this are "neighbors"
:param float same_cell_radius:
Cells closer than this are the "same cell" (hard shell model)
"""
if outdir.is_dir():
shutil.rmtree(str(outdir))
outdir.mkdir(parents=True)
if prefix is None:
prefix = distribution
for batch_id in range(1, num_batches+1):
print(f'Simulating batch {batch_id} of {num_batches}')
green_dir = outdir / f'{prefix}{batch_id:02d}_gfp_statistics'
green_file = green_dir / f'{prefix}{batch_id:02d}_gfp_Position.csv'
red_dir = outdir / f'{prefix}{batch_id:02d}_spot_statistics'
red_file = red_dir / f'{prefix}{batch_id:02d}_spot_Position.csv'
plotfile = outdir / f'{prefix}{batch_id:02d}_sphere_plot.png'
# Make a mixed sphere with cells uniformly distributed
print(f'Simulating {num_red + num_green} spheres')
all_points = simulate_spheres_in_sphere(num_red + num_green,
particle_radius=same_cell_radius,
sphere_radius=aggregate_radius,
umin=0.0, umax=1.0, udist='uniform')
# Assign cells to be red or green using a selected distribution
red_points, green_points = split_red_green(all_points,
num_red=num_red,
num_green=num_green,
udist=distribution)
plot_3d_sphere_cloud([red_points, green_points], ['red', 'green'],
radii=same_cell_radius, figsize=(16, 16),
outfile=plotfile)
save_position_data(green_file, green_points)
save_position_data(red_file, all_points)
# Command line interface
def parse_args(args=None):
parser = argparse.ArgumentParser()
parser.add_argument('--prefix', help='Prefix for the directories')
parser.add_argument("--num-red", type=int, default=NUM_RED,
help='Number of "red" (mKate) cells to generate')
parser.add_argument("--num-green", type=int, default=NUM_GREEN,
help='Number of "green" (GFP) cells to generate')
parser.add_argument('--num-batches', type=int, default=3,
help='Number of simulations to run')
parser.add_argument("--aggregate-radius", type=float, default=AGGREGATE_RADIUS,
help='Radius of the spherical aggregate in um')
parser.add_argument("--neighbor-radius", type=float, default=NEIGHBOR_RADIUS,
help='Cells this close or closer are "neighbors"')
parser.add_argument("--same-cell-radius", type=float, default=SAME_CELL_RADIUS,
help='Cells this close or closer are "the same cell"')
parser.add_argument('-d', '--distribution', default='uniform',
choices=('uniform', 'left_triangle', 'right_triangle',
'inside', 'outside'))
parser.add_argument('outdir', type=pathlib.Path,
help='Directory to write the plots out to')
return parser.parse_args(args=args)
def main(args=None):
args = parse_args(args=args)
simulate_mixing(**vars(args))
if __name__ == '__main__':
main()
|
python
|
from statuscheck.services.bases._statuspageio import BaseStatusPageAPI
class ServiceAPI(BaseStatusPageAPI):
name = "Epic Games"
domain_id = "ft308v428dv3"
status_url = "https://status.epicgames.com/"
service_url = "http://epicgames.com/"
|
python
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
import unittest
from tr55.water_quality import get_volume_of_runoff, get_pollutant_load
from tr55.tables import POLLUTION_LOADS
class TestWaterQuality(unittest.TestCase):
def test_volume(self):
"""
Test the water volume computation.
"""
cell_res = 30 # meters
cell_count = 100
runoff = 0.4 # inches
liters = get_volume_of_runoff(runoff, cell_count, cell_res)
self.assertEquals(30480, liters,
"Did not calculate the correct runoff volume")
def test_load(self):
"""
Test the pollutant load computation.
"""
nlcd = 24
pollutant = 'tn'
emc = POLLUTION_LOADS[nlcd][pollutant]
runoff_liters = 1000
expected = ((runoff_liters * emc) / 1000000) * 2.205
load = get_pollutant_load('developed_high', pollutant, runoff_liters)
self.assertEquals(expected, load)
def test_load_bad_nlcd(self):
"""
Test that a bad land use value generates an error.
"""
self.assertRaises(Exception, get_pollutant_load, 'asdf', 'tn', 1000)
def test_load_bad_pollutant(self):
"""
Test that a pollutant name generates an error.
"""
self.assertRaises(Exception, get_pollutant_load, 'developed_high',
'asdf', 1000)
|
python
|
import pytorch_lightning as pl
class TeacherForcingAnnealCallback(pl.Callback):
def __init__(self, start, end, epochs):
assert start >= end
self.start = start
self.end = end
self.epochs = epochs
self.slope = float((start - end)) / epochs
def on_validation_epoch_end(self, trainer, model):
current = model.teacher_forcing_prob
new_teacher_forcing_prob = max(self.end, current - self.slope)
model.teacher_forcing_prob = new_teacher_forcing_prob
@classmethod
def add_cli(self, parser):
parser.add_argument("--teacher_forcing_start", type=float, default=0.8)
parser.add_argument("--teacher_forcing_end", type=float, default=0.0)
parser.add_argument("--teacher_forcing_anneal_epochs", type=int, default=8)
class TimeMaskedLossCallback(pl.Callback):
def __init__(self, start, end, steps):
assert start <= end
self.start = start
self.end = end
self.steps = steps
self.slope = float((end - start)) / steps
self._time_mask = self.start
@property
def time_mask(self):
return round(self._time_mask)
def on_train_start(self, trainer, model):
if model.time_masked_idx is None:
model.time_masked_idx = self.time_mask
def on_train_batch_end(self, trainer, model, *args):
self._time_mask = min(self.end, self._time_mask + self.slope)
model.time_masked_idx = self.time_mask
model.log("time_masked_idx", self.time_mask)
@classmethod
def add_cli(self, parser):
parser.add_argument("--time_mask_start", type=int, default=1)
parser.add_argument("--time_mask_end", type=int, default=12)
parser.add_argument("--time_mask_anneal_steps", type=int, default=1000)
parser.add_argument("--time_mask_loss", action="store_true")
|
python
|
import datetime
import pandas as pd
import io
import os
import boto3
from io import BytesIO
from io import StringIO
from airflow import DAG
from airflow.providers.amazon.aws.operators.redshift_sql import RedshiftSQLOperator
from airflow.operators.dummy import DummyOperator
from airflow.operators.redshift_to_s3_operator import RedshiftToS3Transfer
from airflow.operators.python_operator import PythonOperator
from airflow.providers.amazon.aws.transfers.redshift_to_s3 import RedshiftToS3Operator
from airflow.models import Variable
dt = datetime.datetime.today()
s3 = boto3.resource('s3')
s3_c = boto3.client("s3")
#
#
def load_raw_data_from_s3_and_save_it_locally():
obj = s3.Object(Variable.get('s3_staging_bucket'), Variable.get('unload_raw_data_to_s3_key')+'/'+Variable.get('unload_raw_data_to_s3_filename'))
with BytesIO(obj.get()['Body'].read()) as bio:
df = pd.read_csv(bio)
print(f':::::::dataframe:\n{df.info()}')
df.to_csv(Variable.get('raw_data_file'))
print(f':::::::Dataframe was saved locally')
return True
def put_preprocessed_data_into_s3():
dataframe = pd.read_csv(Variable.get('eliminated_papers_older_than_01_01_2020'))
print(f':::::::dataframe:\n{dataframe.info()}')
csv_buf = StringIO()
dataframe.to_csv(csv_buf, header=True, index=False)
csv_buf.seek(0)
s3_c.put_object(Bucket=Variable.get('s3_staging_bucket'), Body=csv_buf.getvalue(), Key=Variable.get('intermediate_preprocessed_s3_key'))
print(f':::::::Dataframe was saved to s3')
return True
def put_spacy_preprocessed_data_into_s3():
dataframe = pd.read_csv(Variable.get('spacy_preprocessed'))
print(f':::::::dataframe:\n{dataframe.info()}')
csv_buf = StringIO()
dataframe.to_csv(csv_buf, header=True, index=False)
csv_buf.seek(0)
s3_c.put_object(Bucket=Variable.get('s3_staging_bucket'), Body=csv_buf.getvalue(), Key=Variable.get('spacy_preprocessed_s3_key'))
print(f':::::::Dataframe was saved to s3')
return True
def load_preprocessed_data_from_s3_and_save_it_locally():
print(f":::::::object located at {Variable.get('spacy_preprocessed_s3_key')}")
obj = s3.Object(Variable.get('s3_staging_bucket'), Variable.get('spacy_preprocessed_s3_key'))
with BytesIO(obj.get()['Body'].read()) as bio:
df = pd.read_csv(bio)
print(f':::::::dataframe:\n{df.info()}')
df.to_csv(Variable.get('spacy_preprocessed'))
print(f":::::::Dataframe was saved locally at {Variable.get('spacy_preprocessed')}")
return True
|
python
|
import databases
import sqlalchemy
import logging
import os
import json
from fastapi import FastAPI
from sqlalchemy import text
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
db_string = "postgresql://{0}:{1}@{2}/{3}".format(os.getenv("POSTGRES_USER"),
os.getenv("POSTGRES_PASSWORD"),
os.getenv("DATABASE_URL"),
os.getenv("DATABASE"))
database = databases.Database(db_string)
metadata = sqlalchemy.MetaData()
engine = sqlalchemy.create_engine(db_string)
metadata.create_all(engine)
app = FastAPI()
@app.on_event("startup")
async def startup():
logger.info("Connecting to database: "+db_string)
await database.connect()
@app.on_event("shutdown")
async def shutdown():
logger.info("Disconnecting from database: " + db_string)
await database.disconnect()
@app.get("/")
def read_root():
return {
"Message": "API is up and running",
"DATABASE": [os.getenv("DATABASE_URL"), os.getenv("POSTGRES_USER"), os.getenv("POSTGRES_PASSWORD")],
"AUTHENTICATION": os.getenv("AUTHENTICATION")
}
# res1: data1 ---> select data1 as res1,
# res2: data2
@app.get("/metrics")
def read_item():
with engine.connect() as con:
rs = con.execute(text('select id, data1 as res1, data2 as res2 from metrics m where m.id = :val'), {'val': '1000000'})
# metrics = []
for v in rs:
for column, value in v.items():
logger.info('column: ' + format(column) + ', value: ' + format(value))
# metrics.__add__({format(column) : format(value)})
return {"metrics": v.items()}
|
python
|
#!/usr/bin/env python
import socket
import warnings
import pytest
from flask import _request_ctx_stack
from ._internal import _determine_scope
from ._internal import _make_accept_header
from ._internal import _rewrite_server_name
from .live_server import LiveServer
@pytest.fixture
def client(app):
"""A Flask test client. An instance of :class:`flask.testing.TestClient`
by default.
"""
with app.test_client() as client:
yield client
@pytest.fixture
def client_class(request, client):
"""Uses to set a ``client`` class attribute to current Flask test client::
@pytest.mark.usefixtures('client_class')
class TestView:
def login(self, email, password):
credentials = {'email': email, 'password': password}
return self.client.post(url_for('login'), data=credentials)
def test_login(self):
assert self.login('[email protected]', 'pass').status_code == 200
"""
if request.cls is not None:
request.cls.client = client
@pytest.fixture(scope=_determine_scope)
def live_server(request, app, pytestconfig):
"""Run application in a separate process.
When the ``live_server`` fixture is applied, the ``url_for`` function
works as expected::
def test_server_is_up_and_running(live_server):
index_url = url_for('index', _external=True)
assert index_url == 'http://localhost:5000/'
res = urllib2.urlopen(index_url)
assert res.code == 200
"""
# Set or get a port
port = app.config.get("LIVESERVER_PORT", None)
if not port:
port = pytestconfig.getvalue("live_server_port")
if port == 0:
# Bind to an open port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
port = s.getsockname()[1]
s.close()
host = pytestconfig.getvalue("live_server_host")
# Explicitly set application ``SERVER_NAME`` for test suite
original_server_name = app.config["SERVER_NAME"] or "localhost.localdomain"
final_server_name = _rewrite_server_name(original_server_name, str(port))
app.config["SERVER_NAME"] = final_server_name
wait = request.config.getvalue("live_server_wait")
clean_stop = request.config.getvalue("live_server_clean_stop")
server = LiveServer(app, host, port, wait, clean_stop)
if request.config.getvalue("start_live_server"):
server.start()
request.addfinalizer(server.stop)
yield server
if original_server_name is not None:
app.config["SERVER_NAME"] = original_server_name
@pytest.fixture
def config(app):
"""An application config."""
return app.config
@pytest.fixture
def request_ctx(app):
"""The request context which contains all request relevant information,
e.g. `session`, `g`, `flashes`, etc.
"""
warnings.warn(
"In Werzeug 2.0.0, the Client request methods "
"(client.get, client.post) always return an instance of TestResponse. This "
"class provides a reference to the request object through 'response.request' "
"The fixture 'request_ctx' is deprecated and will be removed in the future, using TestResponse.request "
"is the prefered way.",
DeprecationWarning,
stacklevel=2,
)
return _request_ctx_stack.top
@pytest.fixture(params=["application/json", "text/html"])
def mimetype(request):
return request.param
@pytest.fixture
def accept_mimetype(mimetype):
return _make_accept_header(mimetype)
@pytest.fixture
def accept_json(request):
return _make_accept_header("application/json")
@pytest.fixture
def accept_jsonp():
return _make_accept_header("application/json-p")
@pytest.fixture(params=["*", "*/*"])
def accept_any(request):
return _make_accept_header(request.param)
|
python
|
from Jumpscale import j
def main(self):
"""
to run:
kosmos 'j.clients.zdb._tests_run(name="admin")'
"""
c = j.servers.zdb.test_instance.client_admin_get()
c.reset()
c.namespaces_list()
assert c.namespaces_list() == ["default"]
c.namespace_new("test_namespace")
assert c.namespace_exists("test_namespace")
assert c.namespaces_list() == ["default", "test_namespace"]
c.namespace_delete("test_namespace")
assert c.namespaces_list() == ["default"]
c.namespace_new("test_namespace")
c.reset()
assert c.namespaces_list() == ["default"]
self._log_info("test ok")
return "OK"
|
python
|
class IdentityHashingService:
def hash(self, value):
return value
|
python
|
class Solution:
"""
@param s: a string
@param words: a list of words
@return: all starting indices of substring(s)
"""
def findSubstring(self, s, words):
# write your code here
m = len(s)
target = len(words)
result = []
if m == 0 or target == 0:
return result
n = len(words[0])
expected = collections.Counter(words)
for i in range(n):
seen = collections.Counter()
count = 0
for j in range(i, m - n + 1, n):
word = s[j: j + n]
if word not in expected:
count = 0
seen.clear()
else:
count += 1
seen[word] += 1
while seen[word] > expected[word]:
deletedWord = s[j - (count - 1)*n: j - (count - 2)*n]
seen[deletedWord] -= 1
count -= 1
if count == target:
result.append(j - (count - 1)*n)
firstWord = s[j - (count - 1)*n: j - (count - 2)*n]
seen[firstWord] -= 1
count -= 1
return result
|
python
|
import requests
import json
import logging
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
auth_server = 'https://api.amazon.com/auth/register'
alexa_user_name = '[email protected]"'
alexa_password = 'your_password'
guardSystemId- ''
history_api_endpoint = 'https://alexa.amazon.com/api/smarthome/v1/history/events'
guard_api_endpoint = 'https://alexa.amazon.com/api/smarthome/v0/onGuardSystems/'
auth_request_body = {
"auth_data": {
"email_password": {
"email": alexa_user_name,
"password": alexa_password,
}
},
"registration_data": {
"domain": "Device",
"device_name": "ALEGCNGL9K0HMA3LH5OI7P24C53",
"app_name": "ALEGCNGL9K0HMA3LH5OI7P24C53",
"app_version": "1",
"device_model": "XXX",
"os_version": "XXX",
"device_type": "ALEGCNGL9K0HM",
"device_serial": "ALEGCNGL9K0HMA3LH5OI7P24C53"
},
"cookies": {
"domain": ".amazon.com"
},
"requested_token_type": [
"website_cookies",
"bearer"
]
}
class AuthCredentials:
def __init__(self, cookies, csrf):
self.cookies = cookies
self.csrf = csrf
def __repr__(self):
return '[ cookies = ' + self.cookies + ' csrf = ' + self.csrf + ' ]'
def extract_credentials(website_cookies):
cookies = ''
csrf = ''
for website_cookie in website_cookies:
name = website_cookie['Name']
value = website_cookie['Value']
cookies = cookies + name + '=' + value + '; '
if 'csrf' == name:
csrf = value
return AuthCredentials(cookies, csrf)
def get_auth_headers(credentials):
if credentials is None:
return
if credentials.csrf != '':
headers['Cookie'] = credentials.cookies
else:
csrf = '1'
headers['Cookie'] = 'csrf='+csrf+'; '+credentials.cookies
headers['csrf'] = credentials.csrf
headers['Origin'] = 'www.amazon.com'
return headers
headers = {
"Content-Type": "application/json",
"User-Agent": 'Mozilla/5.0'
}
resp = requests.post('auth_server', json=body, headers=headers)
response = resp.json()
# print(json.dumps(response, indent=4))
auth_credentials = extract_credentials(response['response']['success']['tokens']['website_cookies'])
# print(auth_credentials)
auth_headers = get_auth_headers(auth_credentials)
print(auth_headers)
history_filters = {
'timestamp': {
'from': "2020-01-03T16:20:50.780Z",
'to': "2020-06-25T16:20:50.780Z"
},
'filter': {
'type': "V1",
'filterExpression': []
},
'maxResults': 100
}
# Alexa history of your activities
response = requests.post(history_api_endpoint,
json= history_filters,
headers=auth_headers,
verify=False )
print(json.dumps(response.json(), indent=4))
# Alexa guard information
response= requests.get(guard_api_endpoint + guardSystemId,
headers=auth_headers)
print(json.dumps(response.json(), indent=4))
|
python
|
#!/usr/bin/env python
a = [66.25, 333, 333, 1, 1234.5]
print("\na=", a)
print("\na.count(333) = %d, a.count(66.25) = %d, a.count('x') = %d" % (a.count(333), a.count(66.25), a.count('x')) )
print("\na.insert(2, -1) = ", a.insert(2, -1))
print("a = ", a)
print("\na.append(333) = ", a.append(333))
print("a = ", a)
print("\na.index(333) = ", a.index(333))
print("a = ", a)
print("\na.remove(333) = ", a.remove(333))
print("a = ", a)
print("\na.reverse() = ", a.reverse())
print("a = ", a)
print("\na.sort() = ", a.sort())
print("a = ", a)
print("\na.pop() = ", a.pop())
print("a = ", a)
|
python
|
'''The doc module provides decorator functions that will modify a
functions __apidoc__ object. For example, when using `method('GET')`
it will set the value of the functions method as 'get' on its __apidoc__'''
from http import HTTPStatus
from oapispec.core.utils import merge, not_none
def doc(**kwargs):
'''A decorator to add documentation metadata to the decorated function'''
def wrapper(documented):
current_doc = getattr(documented, '__apidoc__', {})
if 'name' not in current_doc:
kwargs['name'] = documented.__name__
documented.__apidoc__ = merge(current_doc, kwargs)
return documented
return wrapper
def route(endpoint):
'''
A decorator to route resources.
'''
return doc(route=endpoint)
def hide(func):
'''A decorator to hide a resource or a method from specifications'''
return doc()(func)
def expect(model, description=None):
'''
A decorator to Specify the expected input model
:param ModelBase inputs: An expect model
:param bool validate: whether to perform validation or not
'''
return doc(expect=[(model, description)])
def param(name, type=str, location='query', description=None, format=None, required=False, default=None):
'''
A decorator to specify one of the expected parameters
:param str name: the parameter name
:param str description: a small description
:param str location: the parameter location `(query|header|formData|body|cookie)`
'''
return doc(params={
name: not_none({
'in': location,
'type': type,
'format': format,
'default': default,
'required': True if required else None,
'description': description
})
})
def response(http_status: HTTPStatus, model=None, headers=None):
'''
A decorator to specify one of the expected responses
:param HTTPStatus http_status: the HTTP status (or any object with a 'value' and 'description' property)
:param ModelBase model: an optional response model
:param dict<str, header> headers: an optional dict of headers that are returned with this response
'''
code = http_status.value
description = http_status.description
return doc(responses={str(code): (description, model, headers)})
def header(name, description=None, **kwargs):
'''
A decorator to specify one of the expected headers
:param str name: the HTTP header name
:param str description: a description about the header
'''
return doc(headers={
name: {
'description': description
},
**kwargs
})
def produces(mimetypes):
'''A decorator to specify the MIME types the API can produce'''
return doc(produces=mimetypes)
def deprecated(func):
'''A decorator to mark a resource or a method as deprecated'''
return doc(deprecated=True)(func)
def vendor(*args, **kwargs):
'''
A decorator to expose vendor extensions.
Extensions can be submitted as dict or kwargs.
The ``x-`` prefix is optionnal and will be added if missing.
See: http://swagger.io/specification/#specification-extensions-128
'''
for arg in args:
kwargs.update(arg)
return doc(vendor=kwargs)
def method(http_method):
'''A decorator to set the method for the handler'''
return doc(method=http_method.lower())
def namespace(name, description=None):
'''A decorator that groups the decorated handler function in a namespace'''
return doc(namespace=not_none({
'name': name,
'description': description
}))
|
python
|
import itertools
import json
from collections import defaultdict
from django.core.serializers.json import DjangoJSONEncoder
from .orm_lookups import get_django_filter
from .query import BoundQuery
from .types import ASC, DSC
def _filter(qs, path_str, filter_):
negation = False
lookup = filter_.lookup
if lookup.startswith("not_"):
negation = True
lookup = lookup[4:]
filter_expression = get_django_filter(
filter_.orm_bound_field.type_, path_str, lookup, filter_.parsed
)
if negation:
return qs.exclude(filter_expression)
else:
return qs.filter(filter_expression)
def _cols_sub_query(bound_query):
filters = [
filter_
for filter_ in bound_query.valid_filters
if filter_.orm_bound_field.filter_
]
return BoundQuery(
bound_query.model_name,
bound_query.col_fields,
filters,
bound_query.limit,
bound_query.orm_model,
)
def _rows_sub_query(bound_query):
filters = [
filter_
for filter_ in bound_query.valid_filters
if filter_.orm_bound_field.filter_
]
body_fields = [f for f in bound_query.body_fields if f.direction]
return BoundQuery(
bound_query.model_name,
bound_query.row_fields + body_fields,
filters,
bound_query.limit,
bound_query.orm_model,
)
def get_result_queryset(request, bound_query, debug=False):
all_fields = {f.queryset_path_str: f for f in bound_query.bound_fields}
all_fields.update({f.queryset_path_str: f for f in bound_query.bound_filters})
qs = bound_query.orm_model.get_queryset(
request, {f.split("__")[0] for f in all_fields}, debug=debug
)
# sql functions and qs annotations
for field in all_fields.values():
qs = field.annotate(request, qs, debug=debug)
# filter normal and sql function fields (aka __date)
for filter_ in bound_query.valid_filters:
if filter_.orm_bound_field.filter_:
qs = _filter(qs, filter_.orm_bound_field.queryset_path_str, filter_)
# nothing to group on, early out with an aggregate
if not any(f.group_by for f in bound_query.bound_fields):
return [
qs.aggregate(
**{
field.queryset_path_str: field.aggregate_clause
for field in bound_query.bound_fields + bound_query.bound_filters
if field.aggregate_clause
}
)
]
# group by
group_by_fields = [
field.queryset_path_str for field in bound_query.bound_fields if field.group_by
]
qs = qs.values(*group_by_fields).distinct()
# aggregates
aggregate_clauses = {
field.queryset_path_str: field.aggregate_clause
for field in bound_query.bound_fields + bound_query.bound_filters
if field.aggregate_clause
}
qs = qs.annotate(**aggregate_clauses)
# having, aka filter aggregate fields
for filter_ in bound_query.valid_filters:
if filter_.orm_bound_field.having:
qs = _filter(qs, filter_.orm_bound_field.queryset_path_str, filter_)
# sort
sort_fields = []
for field in bound_query.sort_fields:
if field.direction is ASC:
sort_fields.append(field.orm_bound_field.queryset_path_str)
if field.direction is DSC:
sort_fields.append(f"-{field.orm_bound_field.queryset_path_str}")
qs = qs.order_by(*sort_fields)
return qs[: bound_query.limit]
def get_result_list(request, bound_query):
return list(get_result_queryset(request, bound_query))
def _get_objs_for_calculated_fields(request, bound_query, orm_models, res):
# gather up all the objects to fetch for calculated fields
to_load = defaultdict(set)
loading_for = defaultdict(set)
for field in bound_query.bound_fields:
if field.model_name:
loading_for[field.model_name].add(field.name)
pks = to_load[field.model_name]
for row in res:
pks.add(row[field.queryset_path_str])
# fetch all the calculated field objects
objs = {}
for model_name, pks in to_load.items():
objs[model_name] = (
orm_models[model_name]
.get_queryset(request, loading_for[model_name])
.in_bulk(pks)
)
return objs
# dump out the results
def _format_table(fields, data, objs):
namespace = {"objs": objs, "data": data}
field_lines = []
for i, field in enumerate(fields):
namespace[f"format_{i}"] = field.get_formatter()
value = f"row[{field.queryset_path_str!r}]"
if field.model_name:
value = f"objs[{field.model_name!r}].get({value})"
field_lines.append(f"{field.path_str!r}: format_{i}({value}),")
code = ["[None if row is None else {", *field_lines, "} for row in data]"]
return eval("\n".join(code), namespace)
def _get_fields(row, fields):
res = []
for field in fields:
v = row[field.queryset_path_str]
if isinstance(v, (list, set)): # pragma: postgres
v = tuple(v)
try:
hash(v)
except TypeError:
v = json.dumps(v, cls=DjangoJSONEncoder)
res.append((field.queryset_path_str, v))
return tuple(res)
def _get_data_and_all_keys(bound_query, res):
data = defaultdict(dict)
all_row_keys = set()
all_col_keys = set()
for row in res:
row_key = _get_fields(row, bound_query.bound_row_fields)
col_key = _get_fields(row, bound_query.bound_col_fields)
data[row_key][col_key] = dict(_get_fields(row, bound_query.bound_body_fields))
all_row_keys.add(row_key)
all_col_keys.add(col_key)
return data, all_row_keys, all_col_keys
def _get_keys(res, fields, all_keys):
keys = {} # abuse dict to preserve order while removing duplicates
if fields:
for row in res:
key = _get_fields(row, fields)
if key in all_keys:
keys[key] = None
else:
keys[()] = None
return keys
def _format_grid(data, col_keys, row_keys, fields, objs):
body_data = []
for col_key in col_keys:
table = []
for row_key in row_keys:
table.append(data[row_key].get(col_key, None))
body_data.append(_format_table(fields, table, objs))
return body_data
def get_results(request, bound_query, orm_models, with_format_hints):
if not bound_query.fields:
return {"rows": [], "cols": [], "body": [], "length": 0}
res = get_result_list(request, bound_query)
if not res:
return {"rows": [], "cols": [], "body": [], "length": 0}
if bound_query.bound_col_fields and bound_query.bound_row_fields:
# need to fetch rows and col titles seperately to get correct sort
rows_res = get_result_list(request, _rows_sub_query(bound_query))
cols_res = get_result_list(request, _cols_sub_query(bound_query))
else:
rows_res = res
cols_res = res
objs = _get_objs_for_calculated_fields(request, bound_query, orm_models, res)
if bound_query.bound_col_fields or bound_query.bound_body_fields:
raw_body_data, raw_row_data, raw_col_data = _get_data_and_all_keys(
bound_query, res
)
# the raw versions have the subset of correct results
# the res versions have the correct ordering
col_keys = _get_keys(cols_res, bound_query.bound_col_fields, raw_col_data)
row_keys = _get_keys(rows_res, bound_query.bound_row_fields, raw_row_data)
body_data = _format_grid(
raw_body_data, col_keys, row_keys, bound_query.bound_body_fields, objs
)
row_data = _format_table(
bound_query.bound_row_fields, [dict(row) for row in row_keys], objs
)
col_data = _format_table(
bound_query.bound_col_fields, [dict(col) for col in col_keys], objs
)
format_hints = {}
for fields, data in [
(bound_query.bound_row_fields, row_data),
(bound_query.bound_col_fields, col_data),
(
bound_query.bound_body_fields,
list(itertools.chain.from_iterable(body_data)),
),
]:
format_hints.update(
{field.path_str: field.get_format_hints(data) for field in fields}
)
return {
"rows": row_data,
"cols": col_data,
"body": body_data,
"length": len(res),
"formatHints": format_hints,
}
else:
row_data = _format_table(
bound_query.bound_row_fields,
[dict(_get_fields(row, bound_query.bound_row_fields)) for row in res],
objs,
)
if with_format_hints:
format_hints = {
field.path_str: field.get_format_hints(row_data)
for field in bound_query.bound_row_fields
}
else:
format_hints = None
return {
"rows": row_data,
"cols": [{}] if res else [],
"body": [[{}] * len(res)],
"length": len(res),
"formatHints": format_hints,
}
|
python
|
"""
Unit tests for \
landlab.components.vegetation_dynamics.vegetation_dynamics
"""
from nose.tools import assert_equal, assert_true, assert_raises, with_setup
from numpy.testing import assert_array_almost_equal
try:
from nose.tools import assert_is_instance
except ImportError:
from landlab.testing.tools import assert_is_instance
import numpy as np
from landlab import RasterModelGrid
from landlab.components.vegetation_dynamics.vegetation_dynamics \
import Vegetation
(_SHAPE, _SPACING, _ORIGIN) = ((20, 20), (10e0, 10e0), (0., 0.))
_ARGS = (_SHAPE, _SPACING, _ORIGIN)
def setup_grid():
from landlab import RasterModelGrid
grid = RasterModelGrid((20, 20), spacing=10e0)
grid['cell']['vegetation__plant_functional_type']= \
np.zeros(grid.number_of_cells, dtype=int)
Veg = Vegetation(grid)
globals().update({
'Veg': Vegetation(grid)
})
@with_setup(setup_grid)
def test_name():
assert_equal(Veg.name, 'Vegetation')
@with_setup(setup_grid)
def test_input_var_names():
assert_equal(sorted(Veg.input_var_names),
['surface__evapotranspiration',
'surface__potential_evapotranspiration_30day_mean',
'surface__potential_evapotranspiration_rate',
'vegetation__plant_functional_type',
'vegetation__water_stress'])
@with_setup(setup_grid)
def test_output_var_names():
assert_equal(sorted(Veg.output_var_names),
['vegetation__cover_fraction',
'vegetation__dead_biomass',
'vegetation__dead_leaf_area_index',
'vegetation__live_biomass',
'vegetation__live_leaf_area_index'])
@with_setup(setup_grid)
def test_var_units():
assert_equal(set(Veg.input_var_names) |
set(Veg.output_var_names),
set(dict(Veg.units).keys()))
assert_equal(Veg.var_units('vegetation__live_leaf_area_index'), 'None')
assert_equal(Veg.var_units('vegetation__dead_leaf_area_index'), 'None')
assert_equal(Veg.var_units('vegetation__cover_fraction'), 'None')
assert_equal(Veg.var_units('surface__evapotranspiration'), 'mm')
assert_equal(Veg.var_units('surface__potential_evapotranspiration_rate'),
'mm')
assert_equal(Veg.var_units(
'surface__potential_evapotranspiration_30day_mean'), 'mm')
assert_equal(Veg.var_units('vegetation__water_stress'), 'None')
assert_equal(Veg.var_units('vegetation__live_biomass'), 'g m^-2 d^-1')
assert_equal(Veg.var_units('vegetation__dead_biomass'), 'g m^-2 d^-1')
assert_equal(Veg.var_units('vegetation__plant_functional_type'), 'None')
@with_setup(setup_grid)
def test_grid_shape():
assert_equal(Veg.grid.number_of_node_rows, _SHAPE[0])
assert_equal(Veg.grid.number_of_node_columns, _SHAPE[1])
@with_setup(setup_grid)
def test_grid_x_extent():
assert_equal(Veg.grid.extent[1], (_SHAPE[1] - 1) * _SPACING[1])
@with_setup(setup_grid)
def test_grid_y_extent():
assert_equal(Veg.grid.extent[0], (_SHAPE[0] - 1) * _SPACING[0])
@with_setup(setup_grid)
def test_field_getters():
for name in Veg.grid['node']:
field = Veg.grid['node'][name]
assert_is_instance(field, np.ndarray)
assert_equal(field.shape,
(Veg.grid.number_of_node_rows *
Veg.grid.number_of_node_columns, ))
for name in Veg.grid['cell']:
field = Veg.grid['cell'][name]
assert_is_instance(field, np.ndarray)
assert_equal(field.shape,
(Veg.grid.number_of_cell_rows *
Veg.grid.number_of_cell_columns, ))
assert_raises(KeyError, lambda: Veg.grid['not_a_var_name'])
@with_setup(setup_grid)
def test_field_initialized_to_zero():
for name in Veg.grid['node']:
field = Veg.grid['node'][name]
assert_array_almost_equal(field, np.zeros(Veg.grid.number_of_nodes))
for name in Veg.grid['cell']:
field = Veg.grid['cell'][name]
assert_array_almost_equal(field, np.zeros(Veg.grid.number_of_cells))
|
python
|
import os
#We don't want to show the pygmy version and welcome message. Snif
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "hide"
from random import uniform,randint
import tqdm
"""
We import pygame in order to create a patch for the long calculations
If the QLearning calculation time or the creation of the path is too long, the application crashes.
So we need to create a patch to avoid this
This patch is not too resource-intensive and is not necessary if you don't use the graphical interface
"""
import pygame
class Solver:
def __init__(self,maze,learning_rate=0.8,discount_factor=0.5,maxstep=1000,epsilon=0.3,interface=True):
"""
Initiate the solver
Hyperparameters :
- maze : the maze which is a 2-dimensional array containing only numerical values
- learning_rate : the learning rate of the QLearning algorithm, must be between 0 and 1
- discount_factor : the discount factor of the QLearning algorithm, must be between 0 and 1
- maxstep : Number of explorations the agent will perform.
An exploration starts at the start and must find the exit.
- epsilon : the value of the espilon-greedy method, must be between 0 and 1
- interface : if you are using the solver with an application (True) or not (False)
"""
self.learning_rate = learning_rate
self.discount_factor = discount_factor
self.maxstep = int(maxstep)
self.epsilon = epsilon
#Variable indicating whether an interface is used
self.interface = interface
"""
Maze code :
path = 0
start = 1
end = 2
trap = 3
wall = 4
"""
self.maze = maze
#Create constants of the maze
self.length = len(maze)
self.width = len(maze[0])
#Explore the maze
self.start = None
self.end = None
self.trap = []
for i in range(self.length):
for j in range(self.width):
ele = maze[i][j]
if ele == 1:
self.start = (i,j)
elif ele == 2:
self.end = (i,j)
elif ele == 3:
self.trap.append((i,j))
#The maze must have an enter and an exit
if self.start==None or self.end==None:
print("Maze must have a start (code1) and an end (code 2)")
quit()
def learning(self):
"""
Algorithm of QLearning you can find in
"Reinforcement learning : An Introduction" of Sutton and Barto
"""
#Init the QTable
self.createQ()
#Until all the episodes are completed
for i in tqdm.trange(self.maxstep):
#Begin the episode at the start of the maze
posX = self.start[0]
posY = self.start[1]
#The episode runs until the agent arrives at his destination
while(not(posX==self.end[0] and posY==self.end[1]) and
not ((posX,posY) in self.trap)):
#Application control
if self.interface :
#The crash proof patch
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
pygame.quit()
exit()
#The next position of the agent depend on a greedy choice
choice = self.greedyChoice(posX,posY)
#Update the next position of the agent
newX,newY = self.updatePosition(choice,posX,posY)
#Test if the new position is the exit
if newX==self.end[0] and newY==self.end[1]:
reward = 1
#Test of the new position is a trap
elif (newX,newY) in self.trap:
reward = -1
else:
reward = 0
#Coordinates in the QTable of the last and new position
t_pos = posX*self.width+posY
tpos = newX*self.width+newY
#Update the QTable
self.Qtable[t_pos][choice] = self.Qtable[t_pos][choice] + self.learning_rate * (reward + self.discount_factor*max(self.Qtable[tpos]) - self.Qtable[t_pos][choice])
#Position of the agent is update
posX=newX
posY=newY
#When the algorithm is over, create the path the agent has to follow from the start to the end
path = []
posX = self.start[0]
posY = self.start[1]
#Create a counter while creating the path
count = 0
#Create the path until it finds the exit
#OR it reaches a limit :
# The Q-Learning might not reach the best solution with the maxstep we fixed so you can't find the best way to the exit
while not(posX==self.end[0] and posY==self.end[1]) and count<=self.length*self.width:
#Application control
if self.interface :
#The crash proof patch
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
pygame.quit()
exit()
#Coordinates in the QTable of the position
pos = posX*self.width+posY
#Take the best direction
direction = self.Qtable[pos].index(max(self.Qtable[pos]))
#Update the path
path.append(direction)
#Update the next position
posX,posY = self.updatePosition(direction,posX,posY)
count+=1
return path,self.start
def updatePosition(self,direction,posX,posY):
"""
Update (x,y) coordinates depend on a direction
"""
if direction==0:
posX-=1
elif direction==1:
posX+=1
elif direction==2:
posY+=1
elif direction==3:
posY-=1
return posX,posY
def greedyChoice(self,posX,posY):
"""
Epsilon-Greedy choice
"""
#Take the line in QTable correspondint to the position
ligne = self.Qtable[posX*self.width+posY]
if uniform(0,1)>=self.epsilon:
#best choice of the line
return ligne.index(max(ligne))
else:
#Or take a random position
#Not the most elegant way to do it
choice = []
for i in range(4):
if ligne[i]!=-1:
choice.append(i)
pos = randint(0,len(choice)-1)
return choice[pos]
def createQ(self):
"""
Create the Qtable
Globaly, just have to test if the value at a specific position is a wall or not
"""
self.Qtable = []
for i in range(self.length):
for j in range(self.width):
ligne = []
#up
if i-1<0 or self.maze[i-1][j]==4:
ligne.append(-1)
else:
ligne.append(0)
#bottom
if i+1>=self.length or self.maze[i+1][j]==4:
ligne.append(-1)
else:
ligne.append(0)
#right
if j+1>=self.width or self.maze[i][j+1]==4:
ligne.append(-1)
else:
ligne.append(0)
#left
if j-1<0 or self.maze[i][j-1]==4:
ligne.append(-1)
else:
ligne.append(0)
self.Qtable.append(ligne)
if __name__ == "__main__":
"""
Test a maze
"""
maze = [
[0,0,0,0,0,0,0,4,2],
[0,0,0,0,0,4,0,4,0],
[0,0,0,0,3,4,0,0,0],
[0,0,0,0,0,4,4,4,0],
[1,0,0,0,0,0,0,4,3]
]
solver = Solver(maze,interface=False)
print(solver.learning())
|
python
|
# -*- coding: utf-8 -*-
import time
# from datetime import datetime, timedelta
from flask import Blueprint, request, Response, abort
from sqlalchemy import func, and_, or_
import os
import math
import ujson
import logging
from datetime import datetime
from models import FDMEvent, FDMEventType, FDMEventGroup, DataQarSessionIdent, DataQarFile, ConfFleet, ConfAcType
from models import ConfOperator, FDMEventSort, FDMEventDetails, FDMEventLog, FDMParam, ConfAfm, ConfSop, FDMParamScale
from models import DataQar, User, FrontDataLogging, ConfFDMApi, Flight, FDMEventParticipant, EFSUser, FDMMultilimitEvent, DataQarPhase
from models import Program, Task, CoreAirport, FDMCalculatedEvent, DataQarFlightIdent, FDMFlightParticipant, ExportParamName, FDMSystemUser
from database import db_session
from sqlalchemy.orm import aliased
from decorator.auth_decorators import permission_required, login_required, get_user_data
from utils import get_int_or_none
import datetime
import sys
import smtplib
import dns.resolver
from threading import Timer, Thread
import time
import random
from os import environ
reload(sys)
sys.setdefaultencoding("utf-8")
import codecs
from collections import OrderedDict
restService = Blueprint('restService', __name__, url_prefix='/api')
logger = logging.getLogger('WskaznikiBezpieczenstwa')
hdlr = logging.FileHandler('.\page-access.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
logger.info("Logger started...")
def sendmail(FROM = '', SUBJECT = 'Hi'):
TO = []
final_results = []
results = map(lambda fi: {
'email': fi.email,
}, db_session.query(User.email)
.filter(User.is_active == 1)
.all())
for i in results:
TO.append(i['email'])
print "New event's found. \nSending mail to client"
TEXT = 'Hi \nNew event''s appear'
print TO
TO = ['']
#message = "From: %s\nTo: %s\nSubject: %s\n\n%s" % (FROM, TO, SUBJECT, TEXT)
for i in TO:
message = "From: %s\nTo: %s\nSubject: %s\n\n%s" % (FROM, i, SUBJECT, TEXT)
domain = i.split('@')[1]
mx = str(dns.resolver.query(domain, 'MX')[0].exchange)
server = smtplib.SMTP(mx)
server.sendmail(FROM, i, message)
server.quit()
def checkIncidents(intervall = 10):
while True:
time.sleep(float(environ['NEW_INCIDENTS_CHECK_INTERVALL']))
results = map(lambda fi: {
'events_nr': fi.events_nr,
}, db_session.query(FrontDataLogging.events_nr)
.filter(FrontDataLogging.is_notified == -1)
.all())
for i in results:
if i['events_nr'] > 0:
repeat = True;
while repeat == True:
e = None
e = FrontDataLogging.query.filter(FrontDataLogging.is_notified == -1).first()
if e == None:
repeat = False
break;
e.is_notified = 0
try:
db_session.add(e)
db_session.commit()
except:
abort(400)
sendmail()
break;
@restService.route('/users', methods=['GET'])
@login_required()
def get_users():
if request.method == 'GET':
user = request.args.get('user')
logger.info("[%s][Specific Range]",
user)
results = map(lambda fi: {
'value': fi.ID,
'label': fi.first_name + ' ' + fi.last_name,
}, db_session.query(User.ID, User.first_name, User.last_name)
.all())
return Response(ujson.dumps(results), mimetype='application/json')
@restService.route('/event_list', methods=['GET'])
@login_required()
def get_all_events():
"""
Fetch all event list from database.
"""
if request.method == 'GET':
date_from = request.args.get('date_from')
date_to = request.args.get('date_to')
user = request.args.get('user')
temparray = request.args.get('events_id')
temparray1 = request.args.get('programs_id')
events_id = ProcessData(temparray) #Transform data
programs_id = ProcessData(temparray1) # Transform data
assigned_to = request.args.get('assigned_to')
status = request.args.get('status')
importance = request.args.get('importance')
event_with_program = request.args.get('event_with_program')
logger.info("[%s][Specific Range] DateFrom: %s, DateTo: %s,",
user, date_from, date_to)
is_efs = is_efs_connected().response[0]
cau = aliased(FDMEvent)
war = aliased(FDMEvent)
baked_query = db_session.query(FDMEvent.session_id, ConfAcType.model, ConfFleet.ac_reg, DataQarSessionIdent.apt_origin,
DataQarSessionIdent.apt_dest, DataQarSessionIdent.block_off,
DataQarSessionIdent.block_on, func.count(cau.id).label('cautions'), func.count(war.id).label('warnings'))\
.outerjoin(DataQarSessionIdent, FDMEvent.session_id == DataQarSessionIdent.session_id)\
.outerjoin(DataQarFile, DataQarSessionIdent.qar_id == DataQarFile.qar_id)\
.outerjoin(ConfFleet, DataQarFile.ac_id == ConfFleet.id)\
.outerjoin(ConfAcType, ConfFleet.ac_type == ConfAcType.id)\
.outerjoin(cau, and_(FDMEvent.id == cau.id, cau.severity == 'caution'))\
.outerjoin(war, and_(FDMEvent.id == war.id, war.severity == 'warning')) \
.group_by(FDMEvent.session_id)
if event_with_program == 0 or event_with_program == "0":
baked_query = baked_query.join(FDMEventParticipant, FDMEvent.id == FDMEventParticipant.event_id)
baked_query = baked_query.outerjoin(Flight, Flight.flight_id == FDMEventParticipant.flightlog_flt_id)
else:
baked_query = baked_query.outerjoin(FDMEventParticipant, FDMEvent.id == FDMEventParticipant.event_id)
baked_query = baked_query.outerjoin(Flight, Flight.flight_id == FDMEventParticipant.flightlog_flt_id)
if date_from != 'null':
baked_query = baked_query.filter(DataQarSessionIdent.block_off > date_from)
if date_to != 'null':
baked_query = baked_query.filter(DataQarSessionIdent.block_off < date_to)
if assigned_to != 'null' and assigned_to != 'all':
baked_query = baked_query.filter(FDMEvent.assigned_to == assigned_to)
if assigned_to == 'null':
baked_query = baked_query.filter(FDMEvent.assigned_to == None)
if status != 'null' and status != 'all':
baked_query = baked_query.filter(FDMEvent.status == status)
if status == 'null':
baked_query = baked_query.filter(FDMEvent.status == None)
if importance != 'null' and importance != 'all':
baked_query = baked_query.filter(FDMEvent.importance == importance)
if importance == 'null':
baked_query = baked_query.filter(FDMEvent.importance == None)
if len(events_id) > 0 and events_id[0] != 0:
baked_query = baked_query.filter(FDMEvent.event_type_id.in_(events_id))
if events_id == []:
baked_query = baked_query.filter(FDMEvent.event_type_id.in_(events_id))
if len(programs_id) > 0 and programs_id[0] != 0:
baked_query = baked_query.filter(or_(Flight.training_task_id.in_(programs_id), FDMEventParticipant.flightlog_flt_id == None))
elif programs_id == []:
baked_query = baked_query.filter(or_(Flight.training_task_id.in_(programs_id), FDMEventParticipant.flightlog_flt_id == None))
else:
baked_query = baked_query.filter(or_(Flight.training_task_id.in_([]), FDMEventParticipant.flightlog_flt_id == None))
sta = aliased(DataQar)
sto = aliased(DataQar)
results = map(lambda fi:{
'session_id': fi.session_id,
'ac_type': fi.model,
'ac_reg': fi.ac_reg,
'airport_departure': fi.apt_origin,
'airport_arrival': fi.apt_dest,
'block_off': fi.block_off,
'block_on': fi.block_on,
'cautions': fi.cautions,
'warnings': fi.warnings,
'events': map(lambda gi:{
'event_id': gi.id,
'event_group': gi.event_group,
'event_subgroup': gi.event,
'event_type': gi.description,
'event_start': gi.start,
'event_end': gi.end,
'severity': gi.severity,
'assigned_to': gi.assigned_to,
'status': gi.status,
'importance': gi.importance
}, db_session.query(FDMEvent.id, FDMEventSort.description.label('event_group'), FDMEventGroup.event,
FDMEventType.description, sta.TS.label('start'), sto.TS.label('end'), FDMEvent.severity,
(User.first_name + ' ' + User.last_name).label('assigned_to'), FDMEvent.status, FDMEvent.importance)
.outerjoin(FDMEventType, FDMEvent.event_type_id == FDMEventType.id)
.outerjoin(FDMEventGroup, FDMEventType.event_subgroup_id == FDMEventGroup.id)
.outerjoin(FDMEventSort, FDMEventGroup.event_group_id == FDMEventSort.id)
.outerjoin(sta, FDMEvent.start_id == sta.id)
.outerjoin(sto, FDMEvent.stop_id == sto.id)
.outerjoin(User, FDMEvent.assigned_to == User.ID)
.filter(FDMEvent.session_id == fi.session_id, FDMEvent.is_visible == 1)
.all()
)
}, baked_query.all()
)
for entity in results:
c = 0
w = 0
for event in entity['events']:
if event['severity'] == 'caution':
c = c + 1
else:
w = w + 1
entity['cautions'] = c
entity['warnings'] = w
return Response(ujson.dumps(results), mimetype='application/json')
@restService.route('/event_details/bars', methods=['GET'])
@login_required()
def get_event_details_bars():
"""
Fetch details of details.
"""
if request.method == 'GET':
event_id = request.args.get('event_id')
user = request.args.get('user')
logger.info("[%s][Event Details] Event Id: %s",
user, event_id)
event_id = get_int_or_none(event_id)
results = map(lambda fi: {
'param_name': fi.param_name,
'param_unit': fi.param_unit,
'param_full': fi.param_full,
'unit_full': fi.unit_full,
'start': fi.start,
'stop': fi.stop,
'max': fi.max,
'min': fi.min,
'avg': fi.avg,
'var': fi.var,
'caution_limit': fi.caution_limit,
'warning_limit': fi.warning_limit,
'limit_type': fi.limit_type,
'left_border': fi.left_border,
'right_border': fi.right_border,
'value': fi.value,
}, db_session.query(FDMEvent.id, FDMParam.param_name, FDMParam.param_unit, FDMParam.param_full,
FDMParam.unit_full, func.IF(FDMParamScale.is_start_value, FDMEventDetails.start_value, None).label('start'),
func.IF(FDMParamScale.is_stop_value, FDMEventDetails.stop_value, None).label('stop'),
func.IF(FDMParamScale.is_max, FDMEventDetails.max, None).label('max'),
func.IF(FDMParamScale.is_min, FDMEventDetails.min, None).label('min'),
func.IF(FDMParamScale.is_avg, FDMEventDetails.avg, None).label('avg'),
func.IF(FDMParamScale.is_var, FDMEventDetails.var, None).label('var'),
func.IF(FDMParamScale.is_value, FDMEventDetails.value, None).label('value'),
func.coalesce(ConfAfm.caution, ConfSop.caution).label('caution_limit'),
func.coalesce(ConfAfm.warning, ConfSop.warning).label('warning_limit'),
func.coalesce(ConfAfm.left_border, ConfSop.left_border).label('left_border'),
func.coalesce(ConfAfm.right_border, ConfSop.right_border).label('right_border'),
FDMParamScale.limit_type,
FDMEventDetails.value)
.join(FDMEventType, FDMEventType.id == FDMEvent.event_type_id)
.join(DataQarSessionIdent, DataQarSessionIdent.session_id == FDMEvent.session_id)
.join(DataQarFile, DataQarFile.qar_id == DataQarSessionIdent.qar_id)
.join(ConfFleet, ConfFleet.id == DataQarFile.ac_id)
.join(FDMEventDetails, FDMEventDetails.event_id == FDMEvent.id)
.join(FDMEventLog, and_(FDMEventLog.event_type_id == FDMEvent.event_type_id,
FDMEventLog.param_id == FDMEventDetails.param_id))
.join(FDMParam, FDMParam.id == FDMEventLog.param_id)
.outerjoin(ConfAfm, and_(ConfAfm.event_type_id == FDMEvent.event_type_id,
ConfAfm.param_id == FDMEventDetails.param_id,
ConfAfm.ac_type_id == ConfFleet.ac_type,
FDMEventType.limit_source == 'afm'))
.outerjoin(ConfSop, and_(ConfSop.event_type_id == FDMEvent.event_type_id,
ConfSop.param_id == FDMEventDetails.param_id,
FDMEventType.limit_source == 'sop'))
.join(FDMParamScale, FDMParamScale.log_id == FDMEventLog.id)
.filter(FDMEventLog.is_head == True, FDMEvent.id == event_id)
.all())
return Response(ujson.dumps(results), mimetype='application/json')
@restService.route('/event_group', methods=['GET'])
@login_required()
def get_groups():
if request.method == 'GET':
user = request.args.get('user')
logger.info("[%s][Specific Range]",
user)
results = map(lambda fi: {
'event_group_id': fi.id,
'label': fi.description,
'value': fi.description,
'children': map(lambda gi: {
'event_subgroup_id': gi.id,
'label': gi.event,
'children': map(lambda hi: {
'event_type_id': hi.id,
'label': hi.description,
}, db_session.query(FDMEventType.description, FDMEventType.id)
.filter(FDMEventType.event_subgroup_id == gi.id)
.all()),
}, db_session.query(FDMEventGroup.event, FDMEventGroup.id)
.filter(FDMEventGroup.event_group_id == fi.id)
.all()),
}, db_session.query(FDMEventSort.description, FDMEventSort.id)
.all())
return Response(ujson.dumps(results), mimetype='application/json')
@restService.route('/programs', methods=['GET'])
@login_required()
def get_programs():
if request.method == 'GET':
user = request.args.get('user')
logger.info("[%s][Specific Range]",
user)
results = map(lambda fi: {
'program_id': fi.id,
'label': fi.name,
'value': fi.name,
'description': fi.description,
'children': map(lambda gi: {
'task_id': gi.id,
'description': gi.description,
'label': gi.name
}, db_session.query(Task.name, Task.id, Task.description)
.filter(Task.program_id == fi.id)
.all()),
}, db_session.query(Program.description, Program.id, Program.name)
.all())
return Response(ujson.dumps(results), mimetype='application/json')
def ProcessData(arr):
if ((len(arr) != 0) and ('u' not in arr) and ('N' not in arr)):
x = ''
idarray = []
if arr == u'Nan':
return idarray
for num in arr:
if (num != ','):
x += str(num)
else:
idarray.append(int(x))
x = ''
idarray.append(int(x))
final_list = idarray
return final_list
elif ((len(arr) != 0) and 'u' in arr) or ((len(arr) != 0) and 'N' in arr):
return [-1]
else:
return []
return Response(ujson.dumps(results), mimetype='application/json')
def valueSwitch(x, table, alt_offset, war_ias, cau_ias):
return {
'TS': table.TS,
'GS': table.GS,
'IAS': table.IAS,
'FLAPS': table.FLAPS,
'E1_OIL_T': table.E1_OIL_T,
'ROLL': table.ROLL,
'PITCH': table.PITCH,
'ALT_GPS': table.ALT_GPS,
'VS': table.VS,
'E1_RPM_1': table.E1_RPM_1,
'ACC_NORM': table.ACC_NORM,
'E1_VOLT_1': table.E1_VOLT_1,
'E1_AMP_1': table.E1_AMP_1,
'E1_OIL_P': table.E1_OIL_P,
'E1_FP': table.E1_FP,
'E1_CHT_1': table.E1_CHT_1,
'E1_CHT_2': table.E1_CHT_2,
'E1_CHT_3': table.E1_CHT_3,
'E1_CHT_4': table.E1_CHT_4,
'TRK': table.TRK,
'HDG_MAG': table.HDG_MAG,
'FQtyL': table.FQtyL,
'FQtyR': table.FQtyR,
'E1_FF_1': table.E1_FF_1,
'HGT': table.HGT,
'ALT_BARO': table.ALT_BARO,
'BARO': table.BARO,
'gradient': table.VS/table.GS,
'tailwind': table.GS - (table.IAS * (1 + (table.ALT_GPS/1000 * 0.02))),
'time': table.TS,
'crosswind': (((table.GS * func.SIN(func.RADIANS(func.IF(func.ABS(table.TRK - table.HDG_MAG) > 180, func.ABS(360 - func.ABS(table.TRK - table.HDG_MAG)),func.ABS(table.TRK - table.HDG_MAG))))) / (func.SQRT(func.POW((table.IAS * (1 + (table.ALT_GPS/1000 * 0.02))),2) + func.POW(table.GS,2) - 2*(table.IAS * (1 + (table.ALT_GPS/1000 * 0.02)))*table.GS*func.COS(func.RADIANS(func.IF(func.ABS(table.TRK - table.HDG_MAG) > 180, func.ABS(360 - func.ABS(table.TRK - table.HDG_MAG)), func.ABS(table.TRK - table.HDG_MAG))))))) * (func.SQRT(func.POW((table.IAS * (1 + (table.ALT_GPS/1000 * 0.02))),2) + func.POW(table.GS,2) - 2*(table.IAS * (1 + (table.ALT_GPS/1000 * 0.02)))*table.GS*func.COS(func.RADIANS(func.IF(func.ABS(table.TRK - table.HDG_MAG) > 180, func.ABS(360 - func.ABS(table.TRK - table.HDG_MAG)), func.ABS(table.TRK - table.HDG_MAG))))))),
'vs_to_hgt': table.VS/(table.HGT - alt_offset),
'roll_to_hgt': table.ROLL/(table.HGT - alt_offset),
'elev': table.ALT_GPS - (table.HGT - alt_offset),
# 'duration': ,
# 'lost_fuel': ,
# 'lndg_dist': ,
# 'trk_var': ,
# 'runup_duration': ,
# 'fuel_endurance': ,
# 'elev_rate': ,
'alt_dev': func.ABS(table.ALT_GPS - table.ALT_BARO),
'fuel_diff': func.ABS(table.FQtyL - table.FQtyR),
# 'end_of_rwy': ,
# 'nearest_ad': ,
# 'tcr_to_hgt': ,
'caution_ias': FDMMultilimitEvent.limit_caution,
'warning_ias': FDMMultilimitEvent.limit_warning,
# 'spd_to_dist_ratio': ,
'hgt_corr': table.HGT - alt_offset,
'cycle': table.cycle
}.get(x)
@restService.route('/event_details/chart', methods=['GET'])
@login_required()
def get_event_details_chart():
"""
Fetch details of details.
"""
if request.method == 'GET':
event_id = request.args.get('event_id')
user = request.args.get('user')
logger.info("[%s][Event Details] Event Id: %s",
user, event_id)
event_id = get_int_or_none(event_id)
results = db_session.query(FDMEvent.id, FDMParam.param_name, FDMParam.is_primary, FDMParam.calculations,
FDMParamScale.is_color_scale, FDMParamScale.limit_type, FDMEvent.start_id,
FDMEvent.stop_id, FDMEventDetails.min, FDMEventDetails.max,
func.coalesce(ConfAfm.caution, ConfSop.caution).label('caution_limit'),
func.coalesce(ConfAfm.warning, ConfSop.warning).label('warning_limit'),
ConfFleet.alt_gps_offset, FDMEventLog.is_chart, FDMMultilimitEvent.limit_warning, FDMMultilimitEvent.limit_caution,
func.IF(FDMParamScale.is_max == 0, 1, 2).label('critical_value'),
FDMParam.param_name_front, FDMParam.param_full, FDMParam.param_unit, FDMEventType.is_every_second,
FDMParamScale.is_mirror_reflection, FDMEvent.event_type_id, FDMEventLog.is_abs, FDMParam.is_calculated, FDMParam.id )\
.join(FDMEventLog, FDMEvent.event_type_id == FDMEventLog.event_type_id)\
.join(FDMParam, FDMEventLog.param_id == FDMParam.id)\
.join(FDMEventType, FDMEventType.id == FDMEvent.event_type_id)\
.join(DataQarSessionIdent, DataQarSessionIdent.session_id == FDMEvent.session_id)\
.join(DataQarFile, DataQarFile.qar_id == DataQarSessionIdent.qar_id) \
.join(ConfFleet, ConfFleet.id == DataQarFile.ac_id)\
.outerjoin(FDMParamScale, FDMEventLog.id == FDMParamScale.log_id) \
.outerjoin(FDMEventDetails, and_(FDMEventDetails.event_id == FDMEvent.id,
FDMEventDetails.param_id == FDMEventLog.param_id)) \
.outerjoin(ConfAfm, and_(ConfAfm.event_type_id == FDMEvent.event_type_id,
ConfAfm.param_id == FDMEventDetails.param_id,
ConfAfm.ac_type_id == ConfFleet.ac_type,
FDMEventType.limit_source == 'afm'))\
.outerjoin(ConfSop, and_(ConfSop.event_type_id == FDMEvent.event_type_id,
ConfSop.param_id == FDMEventDetails.param_id,
FDMEventType.limit_source == 'sop'))\
.outerjoin(FDMMultilimitEvent, FDMEvent.start_id == FDMMultilimitEvent.data_qar_id)\
.filter(FDMEvent.id == event_id)\
.order_by(FDMEventLog.order_nr).all()
baked_query = db_session.query(DataQar.cycle, DataQar.TS).outerjoin(FDMMultilimitEvent, FDMMultilimitEvent.data_qar_id == DataQar.id)
headers = ['id']
results2 = []
coloring_data = []
for entity in results:
#Map to DB column
"""
0 - event id
1 - column name
2 - if column is primary
3 - does it need special calculation
4 - is color scaled
5 - which way to color
6 - start_id
7 - stop_id
8 - min
9 - max
10 - caution limit
11 - warning limit
12 - alt offset
13 - is in chart
14 - multilimit warning
15 - multilimit caution
16 - is max value
17 - param front name
18 - param description
19 - param unit
20 - is second
21 - mirror reflection
22 - event type id
23 - is abs
24 - is calculated
25 - Param id
"""
if entity[13]:
if entity[24] != 1:
baked_query = baked_query.add_columns(valueSwitch(entity[1], DataQar, entity[12], entity[14], entity[15]))
h = {
'name': entity[1],
'front_name': entity[17],
'full_name': entity[18],
'param_unit': entity[19],
'is_second': entity[20],
'caution_limit': entity[10],
'warning_limit': entity[11],
'is_mirror_reflection': entity[21],
'is_chart': entity[13],
'event_type_id': entity[22],
'is_abs': entity[23],
'is_calculated': entity[24]
}
headers.append(h)
else:
baked_query = db_session.query(DataQar.cycle, DataQar.TS, FDMCalculatedEvent.value).filter(FDMCalculatedEvent.data_qar_id == DataQar.id) \
.filter(FDMCalculatedEvent.param_id == entity[25])
h = {
'name': entity[1],
'front_name': entity[17],
'full_name': entity[18],
'param_unit': entity[19],
'is_second': entity[20],
'caution_limit': entity[10],
'warning_limit': entity[11],
'is_mirror_reflection': entity[21],
'is_chart': entity[13],
'event_type_id': entity[22],
'is_abs': entity[23],
'is_calculated': entity[24]
}
headers.append(h)
results = baked_query.filter(DataQar.id >= results[0][6], DataQar.id <= results[0][7]).order_by(DataQar.id).all()
results2.append(tuple(headers))
for item in results:
results2.append(item)
final_results = []
final_results.append(results2)
results3 = db_session.query(FDMMultilimitEvent.limit_warning, FDMMultilimitEvent.limit_caution,
FDMMultilimitEvent.limit_type, DataQar.TS, FDMMultilimitEvent.event_type_id) \
.filter(and_(FDMEvent.start_id <= FDMMultilimitEvent.data_qar_id,
FDMEvent.stop_id >= FDMMultilimitEvent.data_qar_id)) \
.filter(FDMEvent.id == event_id) \
.filter(FDMMultilimitEvent.data_qar_id == DataQar.id) \
final_results.append(results3)
return Response(ujson.dumps(final_results), mimetype='application/json')
@restService.route('/event_details/table', methods=['GET'])
@login_required()
def get_event_details_table():
"""
Fetch details of details.
"""
if request.method == 'GET':
event_id = request.args.get('event_id')
user = request.args.get('user')
logger.info("[%s][Event Details] Event Id: %s",
user, event_id)
event_id = get_int_or_none(event_id)
results = db_session.query(FDMEvent.id, FDMParam.param_name, FDMParam.is_primary, FDMParam.calculations,
FDMParamScale.is_color_scale, FDMParamScale.limit_type, FDMEvent.start_id,
FDMEvent.stop_id, FDMEventDetails.min, FDMEventDetails.max,
func.coalesce(ConfAfm.caution, ConfSop.caution).label('caution_limit'),
func.coalesce(ConfAfm.warning, ConfSop.warning).label('warning_limit'),
ConfFleet.alt_gps_offset, FDMEventLog.is_details, FDMMultilimitEvent.limit_warning, FDMMultilimitEvent.limit_caution,
func.IF(FDMParamScale.is_max == 0, 1, 2).label('critical_value'),
FDMParam.param_name_front, FDMParam.param_full, FDMParam.param_unit, FDMEventType.is_every_second, FDMParam.is_calculated, FDMEventLog.is_abs, FDMEvent.session_id, FDMParamScale.id, ConfFleet.rec_type)\
.join(FDMEventLog, FDMEvent.event_type_id == FDMEventLog.event_type_id)\
.join(FDMParam, FDMEventLog.param_id == FDMParam.id)\
.join(FDMEventType, FDMEventType.id == FDMEvent.event_type_id)\
.join(DataQarSessionIdent, DataQarSessionIdent.session_id == FDMEvent.session_id)\
.join(DataQarFile, DataQarFile.qar_id == DataQarSessionIdent.qar_id) \
.join(ConfFleet, ConfFleet.id == DataQarFile.ac_id)\
.outerjoin(FDMParamScale, FDMEventLog.id == FDMParamScale.log_id) \
.outerjoin(FDMEventDetails, and_(FDMEventDetails.event_id == FDMEvent.id,
FDMEventDetails.param_id == FDMEventLog.param_id)) \
.outerjoin(ConfAfm, and_(ConfAfm.event_type_id == FDMEvent.event_type_id,
ConfAfm.param_id == FDMEventDetails.param_id,
ConfAfm.ac_type_id == ConfFleet.ac_type,
FDMEventType.limit_source == 'afm'))\
.outerjoin(ConfSop, and_(ConfSop.event_type_id == FDMEvent.event_type_id,
ConfSop.param_id == FDMEventDetails.param_id,
FDMEventType.limit_source == 'sop'))\
.outerjoin(FDMMultilimitEvent, FDMEvent.start_id == FDMMultilimitEvent.data_qar_id)\
.filter(FDMEvent.id == event_id)\
.order_by(FDMEventLog.order_nr).all()
baked_query = db_session.query(DataQar.id).outerjoin(FDMMultilimitEvent, FDMMultilimitEvent.data_qar_id == DataQar.id)
headers = ['id']
results2 = []
coloring_data = []
for entity in results:
#Map to DB column
#array desc:
"""
0 - event id
1 - column name
2 - if column is primary
3 - does it need special calculation
4 - is color scaled
5 - which way to color
6 - start_id
7 - stop_id
8 - min
9 - max
10 - caution limit
11 - warning limit
12 - alt offset
13 - is in table
14 - multilimit warning
15 - multilimit caution
16 - is max value
17 - param front name
18 - param description
19 - param unit
20 - is second
21 - is calculated
22 - is abs
23 - event session id
24 - paramscale id
25 - rec type
26 - phase desc
"""
if entity[13]:
if entity[21] != 1:
baked_query = baked_query.add_columns(valueSwitch(entity[1], DataQar, entity[12], entity[14], entity[15]) )
h = {
'name': entity[1],
'front_name': entity[17],
'full_name': entity[18],
'param_unit': entity[19],
'is_second': entity[20],
'rec_type': entity[25]
}
headers.append(h)
else:
baked_query = db_session.query(DataQar.cycle, DataQar.TS, DataQar.IAS, DataQar.GS, DataQar.ALT_GPS, DataQar.HGT, FDMCalculatedEvent.value, FDMCalculatedEvent.value, FDMCalculatedEvent.param_id).filter(
FDMCalculatedEvent.data_qar_id == DataQar.id)
h = {
'name': entity[1],
'front_name': entity[17],
'full_name': entity[18],
'param_unit': entity[19],
'is_second': entity[20],
'is_calculated': entity[21],
'rec_type': entity[25]
}
headers.append(h)
if entity[4]:
coloring_data.append(entity[1])
coloring_data.append(entity[5])
coloring_data.append(entity[8])
coloring_data.append(entity[9])
coloring_data.append(entity[10])
coloring_data.append(entity[11])
coloring_data.append(entity[16])
coloring_data.append(entity[20])
coloring_data.append(entity[22])
baked_query = baked_query.add_columns(DataQar.cycle, DataQarPhase.description).outerjoin(DataQarPhase, DataQar.PH == DataQarPhase.id)
if entity[25] == 2 or entity[25] == 4 or entity[25] == 5 or entity[25] == 3:
results = baked_query.filter(DataQar.id >= results[0][6]-60, DataQar.id <= results[0][7]+60).filter(
DataQar.session_id == entity[23]).filter(DataQar.cycle == 1).order_by(DataQar.id).all()
else:
results = baked_query.filter(DataQar.id >= results[0][6] - 240, DataQar.id <= results[0][7] + 240).filter(
DataQar.session_id == entity[23]).filter(DataQar.cycle == 1).order_by(DataQar.id).all()
results2.append(tuple(headers))
for item in results:
results2.append(item)
final_results = []
final_results.append(tuple(coloring_data))
final_results.append(results2)
results3 = db_session.query(FDMMultilimitEvent.limit_warning, FDMMultilimitEvent.limit_caution,
FDMMultilimitEvent.limit_type, DataQar.TS) \
.filter(and_(FDMEvent.start_id <= FDMMultilimitEvent.data_qar_id,
FDMEvent.stop_id >= FDMMultilimitEvent.data_qar_id)) \
.filter(FDMEvent.id == event_id) \
.filter(FDMMultilimitEvent.data_qar_id == DataQar.id) \
final_results.append(results3)
return Response(ujson.dumps(final_results), mimetype='application/json')
@restService.route('/event_details/map', methods=['GET'])
@login_required()
def get_event_details_map():
"""
Fetch details of details.
"""
if request.method == 'GET':
event_id = request.args.get('event_id')
user = request.args.get('user')
demo_env = os.environ.get('demo_env', 'False')
logger.info("[%s][Event Details] Event Id: %s",
user, event_id)
event_id = get_int_or_none(event_id)
results = db_session.query(FDMEvent.id, FDMParam.param_name, FDMParamScale.is_color_scale, FDMEvent.start_id,
FDMEvent.stop_id, ConfFleet.alt_gps_offset, FDMParamScale.limit_type,
FDMEventDetails.min, FDMEventDetails.max,
func.coalesce(ConfAfm.caution, ConfSop.caution).label('caution_limit'),
func.coalesce(ConfAfm.warning, ConfSop.warning).label('warning_limit'),
FDMMultilimitEvent.limit_warning, FDMMultilimitEvent.limit_caution,
func.IF(FDMParamScale.is_max == 0, 1, 2).label('critical_value'), ConfFleet.rec_type) \
.join(FDMEventLog, FDMEvent.event_type_id == FDMEventLog.event_type_id) \
.join(FDMParam, FDMEventLog.param_id == FDMParam.id) \
.join(DataQarSessionIdent, DataQarSessionIdent.session_id == FDMEvent.session_id) \
.join(DataQarFile, DataQarFile.qar_id == DataQarSessionIdent.qar_id) \
.join(ConfFleet, ConfFleet.id == DataQarFile.ac_id) \
.join(FDMEventType, FDMEventType.id == FDMEvent.event_type_id)\
.outerjoin(FDMParamScale, FDMEventLog.id == FDMParamScale.log_id) \
.outerjoin(FDMEventDetails, and_(FDMEventDetails.event_id == FDMEvent.id,
FDMEventDetails.param_id == FDMEventLog.param_id)) \
.outerjoin(ConfAfm, and_(ConfAfm.event_type_id == FDMEvent.event_type_id,
ConfAfm.param_id == FDMEventDetails.param_id,
ConfAfm.ac_type_id == ConfFleet.ac_type,
FDMEventType.limit_source == 'afm')) \
.outerjoin(ConfSop, and_(ConfSop.event_type_id == FDMEvent.event_type_id,
ConfSop.param_id == FDMEventDetails.param_id,
FDMEventType.limit_source == 'sop')) \
.outerjoin(FDMMultilimitEvent, FDMEvent.start_id == FDMMultilimitEvent.data_qar_id)\
.filter(FDMEvent.id == event_id) \
.all()
start_id = results[0][3]
stop_id = results[0][4]
alt_gps_offset = results[0][5]
multilimit_warning = results[0][11]
multilimit_caution = results[0][12]
param_name = ''
coloring_data = []
for entry in results:
if entry[2]:
param_name = entry[1]
coloring_data.append(entry[6])
coloring_data.append(entry[7])
coloring_data.append(entry[8])
coloring_data.append(entry[9])
coloring_data.append(entry[10])
coloring_data.append(entry[13])
prev_data_qar = aliased(DataQar)
next_data_qar = aliased(DataQar)
time_alt = 0
if entry[14] == 4 or entry[14] == 5 or entry[14] == 3 or entry[14] == 2:
time_alt = 60
else:
time_alt = 240
randLat = random.uniform(0, 1) + 0.75
randLng = random.uniform(0, 1) + 2.5
if (param_name == ''):
param_name = 'IAS'
results = map(lambda fi: {
'LAT1': fi.LAT1 - (randLat if (demo_env == 'True') else 0),
'LNG1': fi.LNG1 - (randLng if (demo_env == 'True') else 0),
'LAT2': fi.LAT2 - (randLat if (demo_env == 'True') else 0),
'LNG2': fi.LNG2 - (randLng if (demo_env == 'True') else 0),
'value': fi.value,
'time': fi.time,
}, db_session.query(prev_data_qar.LAT.label('LAT1'), prev_data_qar.LNG.label('LNG1'),
next_data_qar.LAT.label('LAT2'), next_data_qar.LNG.label('LNG2'),
valueSwitch(param_name, prev_data_qar, alt_gps_offset, multilimit_warning, multilimit_caution).label('value'), prev_data_qar.TS.label('time'))
.join(next_data_qar, next_data_qar.id == prev_data_qar.id + 1)
.filter(prev_data_qar.id >= start_id-time_alt, prev_data_qar.id <= stop_id+time_alt)
.all())
else:
results = map(lambda fi: {
'LAT1': fi.LAT1 - (randLat if (demo_env == 'True') else 0),
'LNG1': fi.LNG1 - (randLng if (demo_env == 'True') else 0),
'LAT2': fi.LAT2 - (randLat if (demo_env == 'True') else 0),
'LNG2': fi.LNG2 - (randLng if (demo_env == 'True') else 0),
'value': fi.value,
}, db_session.query(prev_data_qar.LAT.label('LAT1'), prev_data_qar.LNG.label('LNG1'),
next_data_qar.LAT.label('LAT2'), next_data_qar.LNG.label('LNG2'),
valueSwitch(param_name, prev_data_qar, alt_gps_offset, multilimit_warning, multilimit_caution).label('value'))
.join(next_data_qar, next_data_qar.id == prev_data_qar.id + 1)
.filter(prev_data_qar.id >= start_id-time_alt, prev_data_qar.id <= stop_id+time_alt)
.all())
results2 = []
results2.append(tuple(coloring_data))
results2.append(results)
results = results2
return Response(ujson.dumps(results), mimetype='application/json')
@restService.route('/event_details/basic_info', methods=['GET', 'PUT'])
@login_required()
def get_info():
"""
Fetch all event list from database.
"""
if request.method == 'GET':
is_efs = is_efs_connected().response[0]
user = request.args.get('user')
event_id = request.args.get('event_id')
logger.info("[%s][Specific Range]",
user)
sta = aliased(DataQar)
sto = aliased(DataQar)
if is_efs == 'true':
instr = aliased(EFSUser)
stud = aliased(EFSUser)
results = map(lambda fi: {
'event_id': fi.id,
'event_group': fi.event_group,
'event_subgroup': fi.event,
'event_type': fi.description,
'event_start': fi.start,
'event_end': fi.end,
'severity': fi.severity,
'assigned_to': fi.assigned_to,
'status': fi.status,
'importance': fi.importance,
'student': fi.stud,
'instructor': fi.instr,
'ac_type': fi.model,
'ac_reg': fi.ac_reg,
'airport_departure': fi.apt_origin,
'airport_arrival': fi.apt_dest,
'session_id': fi.session_id
}, db_session.query(FDMEvent.id, FDMEventSort.description.label('event_group'), FDMEventGroup.event,
FDMEventType.description, sta.TS.label('start'), sto.TS.label('end'), FDMEvent.severity,
FDMEvent.assigned_to, FDMEvent.status, FDMEvent.importance, ConfAcType.model,
ConfFleet.ac_reg, DataQarSessionIdent.apt_origin, DataQarSessionIdent.apt_dest,
(stud.first_name + ' ' + stud.last_name).label('stud'),
(instr.first_name + ' ' + instr.last_name).label('instr'), FDMEvent.session_id)
.outerjoin(FDMEventType, FDMEvent.event_type_id == FDMEventType.id)
.outerjoin(FDMEventGroup, FDMEventType.event_subgroup_id == FDMEventGroup.id)
.outerjoin(FDMEventSort, FDMEventGroup.event_group_id == FDMEventSort.id)
.outerjoin(sta, FDMEvent.start_id == sta.id)
.outerjoin(sto, FDMEvent.stop_id == sto.id)
.outerjoin(FDMEventParticipant, FDMEvent.id == FDMEventParticipant.event_id)
.outerjoin(Flight, FDMEventParticipant.flightlog_flt_id == Flight.flight_id)
.outerjoin(stud, Flight.trainee_id == stud.ID)
.outerjoin(instr, Flight.instructor_id == instr.ID)
.outerjoin(DataQarSessionIdent, FDMEvent.session_id == DataQarSessionIdent.session_id)
.outerjoin(DataQarFile, DataQarSessionIdent.qar_id == DataQarFile.qar_id)
.outerjoin(ConfFleet, DataQarFile.ac_id == ConfFleet.id)
.outerjoin(ConfAcType, ConfFleet.ac_type == ConfAcType.id)
.filter(FDMEvent.id == event_id)
.all())
else:
results = map(lambda fi: {
'event_id': fi.id,
'event_group': fi.event_group,
'event_subgroup': fi.event,
'event_type': fi.description,
'event_start': fi.start,
'event_end': fi.end,
'severity': fi.severity,
'assigned_to': fi.assigned_to,
'status': fi.status,
'importance': fi.importance,
'ac_type': fi.model,
'ac_reg': fi.ac_reg,
'airport_departure': fi.apt_origin,
'airport_arrival': fi.apt_dest,
'session_id': fi.session_id
}, db_session.query(FDMEvent.id, FDMEventSort.description.label('event_group'), FDMEventGroup.event,
FDMEventType.description, sta.TS.label('start'), sto.TS.label('end'), FDMEvent.severity,
FDMEvent.assigned_to, FDMEvent.status, FDMEvent.importance, ConfAcType.model,
ConfFleet.ac_reg, DataQarSessionIdent.apt_origin, DataQarSessionIdent.apt_dest, FDMEvent.session_id, FDMEventGroup.id)
.outerjoin(FDMEventType, FDMEvent.event_type_id == FDMEventType.id)
.outerjoin(FDMEventGroup, FDMEventType.event_subgroup_id == FDMEventGroup.id)
.outerjoin(FDMEventSort, FDMEventGroup.event_group_id == FDMEventSort.id)
.outerjoin(sta, FDMEvent.start_id == sta.id)
.outerjoin(sto, FDMEvent.stop_id == sto.id)
.outerjoin(DataQarSessionIdent, FDMEvent.session_id == DataQarSessionIdent.session_id)
.outerjoin(DataQarFile, DataQarSessionIdent.qar_id == DataQarFile.qar_id)
.outerjoin(ConfFleet, DataQarFile.ac_id == ConfFleet.id)
.outerjoin(ConfAcType, ConfFleet.ac_type == ConfAcType.id)
.filter(FDMEvent.id == event_id)
.all())
editable_results = []
not_editable_results = []
header = []
for item in results[0]:
o = {
'property': item,
'value': results[0][item]
}
if item == 'status' or item == 'importance' or item == 'assigned_to':
editable_results.append(o)
elif item == 'session_id':
header.append(o)
else:
if is_efs == 'true':
if item == 'event_type' or item == 'event_id':
not_editable_results.insert(0, o)
elif item == 'event_end' or item == 'severity':
not_editable_results.insert(1, o)
elif item == 'event_group' or item == 'event_subgroup':
not_editable_results.insert(2, o)
elif item == 'event_start':
not_editable_results.insert(4, o)
elif item == 'airport_departure' or item == 'instructor':
not_editable_results.insert(7, o)
elif item == 'student':
not_editable_results.insert(6, o)
elif item == 'ac_reg':
not_editable_results.insert(8, o)
else:
not_editable_results.append(o)
else:
if item == 'event_type' or item == 'event_id' or item == 'severity' or item == 'event_group':
not_editable_results.insert(0, o)
elif item == 'event_end':
not_editable_results.insert(1, o)
elif item == 'event_subgroup':
not_editable_results.insert(3, o)
elif item == 'event_start':
not_editable_results.insert(5, o)
elif item == 'airport_departure':
not_editable_results.insert(9, o)
else:
not_editable_results.append(o)
final_results = []
final_results.append(editable_results)
final_results.append(not_editable_results)
final_results.append(header)
return Response(ujson.dumps(final_results), mimetype='application/json')
if request.method == 'PUT':
if not request.json:
abort(400)
assigned_to = None
status = None
importance = None
event_id = None
e = None
for item in request.json:
if(item['property'] == 'assigned_to'):
assigned_to = item['value']
if(item['property'] == 'status'):
status = item['value']
if(item['property'] == 'importance'):
importance = item['value']
if(item['property'] == 'event_id'):
event_id = item['value']
user_data = get_user_data(request)
if event_id:
e = FDMEvent.query.filter(FDMEvent.id == event_id).first()
e.assigned_to = assigned_to
e.status = status
e.importance = importance
e.modify_ts = datetime.datetime.now()
e.modify_user = user_data['id']
else:
abort(406)
try:
db_session.add(e)
db_session.commit()
except:
abort(400)
return Response(), 204
@restService.route('/event_details/export_flight', methods=['GET'])
@login_required()
def get_flight_data_to_export():
if request.method == 'GET':
event_id = request.args.get('event_id')
ac_reg_data = request.args.get('ac_reg')
results = db_session.query(DataQar).filter(DataQar.session_id == db_session.query(FDMEvent.session_id)\
.filter(FDMEvent.id == event_id)).all()
results2 = db_session.query(ConfFleet.alt_gps_offset).filter(ConfFleet.ac_reg == ac_reg_data).all()
for i in results:
i.HGT = i.HGT - results2[0][0]
i.metadata = None
i._decl_class_registry = None
i._sa_class_manager = None
i._sa_instance_state = None
i.query = None
return Response(ujson.dumps(results), mimetype='application/json')
@restService.route('/event_details/export_event', methods=['GET'])
@login_required()
def get_event_data_to_export():
if request.method == 'GET':
event_id = request.args.get('event_id')
ac_reg_data = request.args.get('ac_reg')
logger.info("[Event Details] Event Id: %s",
event_id)
event_id = get_int_or_none(event_id)
event_start = db_session.query(FDMEvent.start_id).filter(FDMEvent.id == event_id).first()
event_start = get_int_or_none(event_start[0])
event_end = db_session.query(FDMEvent.stop_id).filter(FDMEvent.id == event_id).first()
event_end = get_int_or_none(event_end[0])
results = db_session.query(DataQar).filter(DataQar.id >= event_start).filter(DataQar.id <= event_end).all()
results2 = db_session.query(ConfFleet.alt_gps_offset).filter(ConfFleet.ac_reg == ac_reg_data).all()
for i in results:
i.HGT = i.HGT - results2[0][0]
i.metadata = None
i._decl_class_registry = None
i._sa_class_manager = None
i._sa_instance_state = None
i.query = None
return Response(ujson.dumps(results), mimetype='application/json')
@restService.route('/event_details/column_name', methods=['GET'])
@login_required()
def get_file_column_name():
if request.method == 'GET':
results = db_session.query(ExportParamName.data_qar_name, ExportParamName.export_name, ExportParamName.param_order).all()
return Response(ujson.dumps(results), mimetype='application/json')
@restService.route('/system_card', methods=['GET'])
@login_required()
def get_data_logging():
"""
Fetch all event list from database.
"""
if request.method == 'GET':
dataType = request.args.get('dataType')
if dataType == 'last_7_days':
cau = aliased(FDMEvent)
war = aliased(FDMEvent)
results1 = map(lambda fi: {
'group_type': fi.all_type,
'group_count': fi.group_count
}, db_session.query(FDMEvent.id, FDMEventSort.description.label('all_type'),
func.count(FDMEvent.event_type_id).label('group_count'))
.outerjoin(FDMEventType, FDMEvent.event_type_id == FDMEventType.id) \
.outerjoin(FDMEventGroup, FDMEventType.event_subgroup_id == FDMEventGroup.id) \
.outerjoin(FDMEventSort, FDMEventGroup.event_group_id == FDMEventSort.id) \
.outerjoin(DataQar, FDMEvent.stop_id == DataQar.id)
.filter(DataQar.TS <= datetime.datetime.now()) \
.filter(DataQar.TS >= (datetime.datetime.now() - datetime.timedelta(days=7))) \
.group_by(FDMEventSort.description) \
.all() \
)
results2 = map(lambda fi: {
'events_nr': fi.events_nr,
'cautions_nr': fi.cautions_nr,
'warnings_nr': fi.warnings_nr,
'operations_nr': fi.operations_nr
},
db_session.query(func.count(FDMEvent.session_id.distinct()).label('operations_nr'),
func.count(FDMEvent.id).label('events_nr'),
func.count(cau.id).label('cautions_nr'),
func.count(war.id).label('warnings_nr')) \
.outerjoin(DataQarSessionIdent, FDMEvent.session_id == DataQarSessionIdent.session_id) \
.outerjoin(DataQarFile, DataQarSessionIdent.qar_id == DataQarFile.qar_id) \
.outerjoin(cau, and_(FDMEvent.id == cau.id, cau.severity == 'caution')) \
.outerjoin(war, and_(FDMEvent.id == war.id, war.severity == 'warning')) \
.outerjoin(DataQar, FDMEvent.stop_id == DataQar.id) \
.filter(DataQar.TS <= datetime.datetime.now()) \
.filter(DataQar.TS >= (datetime.datetime.now() - datetime.timedelta(days=7))) \
)
results3 = map(lambda fi: {
'flights_nr': fi.flights_nr
},
db_session.query(func.count(DataQar.flight_id.distinct()).label('flights_nr'))
.filter(DataQar.flight_id != 'null')
.filter(DataQar.TS <= datetime.datetime.now()) \
.filter(DataQar.TS >= (datetime.datetime.now() - datetime.timedelta(days=7))) \
)
results4 = map(lambda fi: {
'ts': fi.ts
},
db_session.query(FrontDataLogging.ts)
.order_by(FrontDataLogging.ts)
)
'''
results = map(lambda fi: {
'id': fi.id,
'processing_id': fi.processing_id,
'operations_nr': fi.operations_nr,
'events_nr': fi.events_nr,
'cautions_nr': fi.cautions_nr,
'warnings_nr': fi.warnings_nr,
'ua_nr': fi.ua_nr,
'cfit_nr': fi.cfit_nr,
'loc_nr': fi.loc_nr,
'eo_nr': fi.eo_nr,
'mac_nr': fi.mac_nr,
're_nr': fi.re_nr,
'others_nr': fi.others_nr,
'ts': fi.ts,
'flights_nr': fi.flights_nr,
}, db_session.query(FrontDataLogging.id, FrontDataLogging.processing_id, FrontDataLogging.operations_nr,
FrontDataLogging.events_nr, FrontDataLogging.cautions_nr, FrontDataLogging.warnings_nr,
FrontDataLogging.ua_nr, FrontDataLogging.cfit_nr, FrontDataLogging.loc_nr,
FrontDataLogging.eo_nr, FrontDataLogging.mac_nr, FrontDataLogging.re_nr,
FrontDataLogging.others_nr, FrontDataLogging.ts, FrontDataLogging.flights_nr)
.filter(FrontDataLogging.ts <= datetime.datetime.now()) \
.filter(FrontDataLogging.ts >= (datetime.datetime.now() - datetime.timedelta(days=7))) \
.order_by(FrontDataLogging.id.desc()) \
.all()
)
'''
if dataType == 'last_month':
cau = aliased(FDMEvent)
war = aliased(FDMEvent)
results1 = map(lambda fi: {
'group_type': fi.all_type,
'group_count': fi.group_count
}, db_session.query(FDMEvent.id, FDMEventSort.description.label('all_type'),
func.count(FDMEvent.event_type_id).label('group_count'))
.outerjoin(FDMEventType, FDMEvent.event_type_id == FDMEventType.id) \
.outerjoin(FDMEventGroup, FDMEventType.event_subgroup_id == FDMEventGroup.id) \
.outerjoin(FDMEventSort, FDMEventGroup.event_group_id == FDMEventSort.id) \
.outerjoin(DataQar, FDMEvent.stop_id == DataQar.id)
.filter(DataQar.TS <= datetime.datetime.now()) \
.filter(DataQar.TS >= (datetime.datetime.now() - datetime.timedelta(days=30))) \
.group_by(FDMEventSort.description) \
.all() \
)
results2 = map(lambda fi: {
'events_nr': fi.events_nr,
'cautions_nr': fi.cautions_nr,
'warnings_nr': fi.warnings_nr,
'operations_nr': fi.operations_nr
},
db_session.query(func.count(FDMEvent.session_id.distinct()).label('operations_nr'),
func.count(FDMEvent.id).label('events_nr'),
func.count(cau.id).label('cautions_nr'),
func.count(war.id).label('warnings_nr')) \
.outerjoin(DataQarSessionIdent, FDMEvent.session_id == DataQarSessionIdent.session_id) \
.outerjoin(DataQarFile, DataQarSessionIdent.qar_id == DataQarFile.qar_id) \
.outerjoin(cau, and_(FDMEvent.id == cau.id, cau.severity == 'caution')) \
.outerjoin(war, and_(FDMEvent.id == war.id, war.severity == 'warning')) \
.outerjoin(DataQar, FDMEvent.stop_id == DataQar.id) \
.filter(DataQar.TS <= datetime.datetime.now()) \
.filter(DataQar.TS >= (datetime.datetime.now() - datetime.timedelta(days=30))) \
)
results3 = map(lambda fi: {
'flights_nr': fi.flights_nr
},
db_session.query(func.count(DataQar.flight_id.distinct()).label('flights_nr'))
.filter(DataQar.flight_id != 'null')
.filter(DataQar.TS <= datetime.datetime.now()) \
.filter(DataQar.TS >= (datetime.datetime.now() - datetime.timedelta(days=30))) \
)
results4 = map(lambda fi: {
'ts': fi.ts
},
db_session.query(FrontDataLogging.ts)
.order_by(FrontDataLogging.ts)
)
'''
results = map(lambda fi: {
'id': fi.id,
'processing_id': fi.processing_id,
'operations_nr': fi.operations_nr,
'events_nr': fi.events_nr,
'cautions_nr': fi.cautions_nr,
'warnings_nr': fi.warnings_nr,
'ua_nr': fi.ua_nr,
'cfit_nr': fi.cfit_nr,
'loc_nr': fi.loc_nr,
'eo_nr': fi.eo_nr,
'mac_nr': fi.mac_nr,
're_nr': fi.re_nr,
'others_nr': fi.others_nr,
'ts': fi.ts,
'flights_nr': fi.flights_nr,
}, db_session.query(FrontDataLogging.id, FrontDataLogging.processing_id,
func.sum(FrontDataLogging.operations_nr).label('operations_nr'),
func.sum(FrontDataLogging.events_nr).label('events_nr'),
func.sum(FrontDataLogging.cautions_nr).label('cautions_nr'),
func.sum(FrontDataLogging.warnings_nr).label('warnings_nr'),
func.sum(FrontDataLogging.ua_nr).label('ua_nr'),
func.sum(FrontDataLogging.cfit_nr).label('cfit_nr'),
func.sum(FrontDataLogging.loc_nr).label('loc_nr'),
func.sum(FrontDataLogging.eo_nr).label('eo_nr'),
func.sum(FrontDataLogging.mac_nr).label('mac_nr'),
func.sum(FrontDataLogging.re_nr).label('re_nr'),
func.sum(FrontDataLogging.others_nr).label('others_nr'), FrontDataLogging.ts,
func.sum(FrontDataLogging.flights_nr).label('flights_nr'))
.filter(FrontDataLogging.ts <= datetime.datetime.now()) \
.filter(FrontDataLogging.ts >= (datetime.datetime.now() - datetime.timedelta(days=30)) )\
.order_by(FrontDataLogging.id.desc()) \
.all()
)
'''
if dataType == 'all':
cau = aliased(FDMEvent)
war = aliased(FDMEvent)
results1 = map(lambda fi: {
'group_type': fi.all_type,
'group_count': fi.group_count
}, db_session.query(FDMEvent.id, FDMEventSort.description.label('all_type'), func.count(FDMEvent.event_type_id).label('group_count'))
.outerjoin(FDMEventType, FDMEvent.event_type_id == FDMEventType.id) \
.outerjoin(FDMEventGroup, FDMEventType.event_subgroup_id == FDMEventGroup.id) \
.outerjoin(FDMEventSort, FDMEventGroup.event_group_id == FDMEventSort.id) \
.group_by(FDMEventSort.description) \
.all() \
)
results2 = map(lambda fi: {
'events_nr': fi.events_nr,
'cautions_nr': fi.cautions_nr,
'warnings_nr': fi.warnings_nr,
'operations_nr': fi.operations_nr
},
db_session.query(func.count(FDMEvent.session_id.distinct()).label('operations_nr'), func.count(FDMEvent.id).label('events_nr'),
func.count(cau.id).label('cautions_nr'), func.count(war.id).label('warnings_nr')) \
.outerjoin(DataQarSessionIdent, FDMEvent.session_id == DataQarSessionIdent.session_id) \
.outerjoin(DataQarFile, DataQarSessionIdent.qar_id == DataQarFile.qar_id) \
.outerjoin(cau, and_(FDMEvent.id == cau.id, cau.severity == 'caution')) \
.outerjoin(war, and_(FDMEvent.id == war.id, war.severity == 'warning')) \
)
print '######'
results3 = map(lambda fi: {
'flights_nr': fi.flights_nr
},
db_session.query(func.count(DataQar.flight_id.distinct()).label('flights_nr'))
.filter(DataQar.flight_id != 'null')
)
results4 = map(lambda fi: {
'ts': fi.ts
},
db_session.query(FrontDataLogging.ts)
.order_by(FrontDataLogging.ts)
)
result = results1+results2+results3+results4
return Response(ujson.dumps(result), mimetype='application/json')
@restService.route('/system_card_user_info', methods=['GET'])
@login_required()
def get_data_logging_user():
"""
Fetch all event list from database.
"""
if request.method == 'GET':
dataType = request.args.get('dataType')
if dataType == 'all':
results1 = map(lambda fi: {
'all_events': fi.all_events
}, db_session.query(func.count(FDMEvent.id).label('all_events'))
.all()
)
results2 = map(lambda fi: {
'new_events': fi.new_events,
}, db_session.query(func.count(FDMEvent.id).label('new_events'))
.filter(FDMEvent.status == None)
.all()
)
results3 = map(lambda fi: {
'in_progress': fi.in_progress,
}, db_session.query(func.count(FDMEvent.id).label('in_progress'))
.filter(FDMEvent.status == "in progress")
.all()
)
results4 = map(lambda fi: {
'analysed': fi.analysed,
}, db_session.query(func.count(FDMEvent.id).label('analysed'))
.filter(FDMEvent.status == "analysed")
.all()
)
if dataType == 'last_7_days':
results1 = map(lambda fi: {
'all_events': fi.all_events
}, db_session.query(func.count(FDMEvent.id).label('all_events'))
.outerjoin(DataQarSessionIdent, FDMEvent.session_id == DataQarSessionIdent.session_id) \
.filter(DataQarSessionIdent.block_off <= datetime.datetime.now()) \
.filter(DataQarSessionIdent.block_off >= (datetime.datetime.now() - datetime.timedelta(days=7))) \
.all()
)
results2 = map(lambda fi: {
'new_events': fi.new_events,
}, db_session.query(func.count(FDMEvent.id).label('new_events'))
.filter(FDMEvent.status == None)
.filter(DataQarSessionIdent.block_off <= datetime.datetime.now()) \
.filter(DataQarSessionIdent.block_off >= (datetime.datetime.now() - datetime.timedelta(days=7))) \
.all()
)
results3 = map(lambda fi: {
'in_progress': fi.in_progress,
}, db_session.query(func.count(FDMEvent.id).label('in_progress'))
.filter(FDMEvent.status == "in progress")
.filter(DataQarSessionIdent.block_off <= datetime.datetime.now()) \
.filter(DataQarSessionIdent.block_off >= (datetime.datetime.now() - datetime.timedelta(days=7))) \
.all()
)
results4 = map(lambda fi: {
'analysed': fi.analysed,
}, db_session.query(func.count(FDMEvent.id).label('analysed'))
.filter(FDMEvent.status == "analysed")
.filter(DataQarSessionIdent.block_off <= datetime.datetime.now()) \
.filter(DataQarSessionIdent.block_off >= (datetime.datetime.now() - datetime.timedelta(days=7))) \
.all()
)
if dataType == 'last_month':
results1 = map(lambda fi: {
'all_events': fi.all_events
}, db_session.query(func.count(FDMEvent.id).label('all_events'))
.outerjoin(DataQarSessionIdent, FDMEvent.session_id == DataQarSessionIdent.session_id) \
.filter(DataQarSessionIdent.block_off <= datetime.datetime.now()) \
.filter(DataQarSessionIdent.block_off >= (datetime.datetime.now() - datetime.timedelta(days=30))) \
.all()
)
results2 = map(lambda fi: {
'new_events': fi.new_events,
}, db_session.query(func.count(FDMEvent.id).label('new_events'))
.filter(FDMEvent.status == None)
.filter(DataQarSessionIdent.block_off <= datetime.datetime.now()) \
.filter(DataQarSessionIdent.block_off >= (datetime.datetime.now() - datetime.timedelta(days=30))) \
.all()
)
results3 = map(lambda fi: {
'in_progress': fi.in_progress,
}, db_session.query(func.count(FDMEvent.id).label('in_progress'))
.filter(FDMEvent.status == "in progress")
.filter(DataQarSessionIdent.block_off <= datetime.datetime.now()) \
.filter(DataQarSessionIdent.block_off >= (datetime.datetime.now() - datetime.timedelta(days=30))) \
.all()
)
results4 = map(lambda fi: {
'analysed': fi.analysed,
}, db_session.query(func.count(FDMEvent.id).label('analysed'))
.filter(FDMEvent.status == "analysed")
.filter(DataQarSessionIdent.block_off <= datetime.datetime.now()) \
.filter(DataQarSessionIdent.block_off >= (datetime.datetime.now() - datetime.timedelta(days=30))) \
.all()
)
results = results1 + results2 + results3 + results4
return Response(ujson.dumps(results), mimetype='application/json')
@restService.route('/efs_connect', methods=['GET'])
@login_required()
def is_efs_connected():
if request.method == 'GET':
is_access = map(lambda fi: {
'value': fi.value,
}, db_session.query(ConfFDMApi.value)
)
if is_access[0]['value'] == 'True':
results = True
else:
results = False
return Response(ujson.dumps(results), content_type='application/json')
@restService.route('/event_stats/data', methods=['GET'])
@login_required()
def get_events_stats_data():
temparray = request.args.get('events_id')
temparray1 = request.args.get('programs_id')
events_id = ProcessData(temparray) #Transform data
programs_id = ProcessData(temparray1) # Transform data
chart_type = request.args.get('chart_type')
date_from = request.args.get('date_from')
date_to = request.args.get('date_to')
student = request.args.get('student')
instructor = request.args.get('instructor')
ac = request.args.get('ac')
apt_dep = request.args.get('apt_dep')
apt_arr = request.args.get('apt_arr')
severity = request.args.get('severity')
time_aggregation = request.args.get('time_aggregation')
event_with_program = request.args.get('event_with_program')
if request.method == 'GET':
is_efs = is_efs_connected().response[0]
sta = aliased(DataQar)
sto = aliased(DataQar)
stud = aliased(EFSUser)
instr = aliased(EFSUser)
if is_efs == 'true':
baked_query = db_session.query(func.count(FDMEvent.id).label('value'),
ConfAcType.model, ConfFleet.ac_reg, Flight.trainee_id, Flight.training_prog_id, Flight.instructor_id, Program.name,
func.date_format(sta.TS, '%m/%Y').label('month'), func.date_format(sta.TS, '%Y').label('year'),
func.date_format(sta.TS, '%d/%m/%Y').label('day'), sta.TS,
func.IF(Flight.instructor_id, (instr.first_name + ' ' + instr.last_name), 'None').label('instr'),
func.IF(Flight.trainee_id, (stud.first_name + ' ' + stud.last_name),'None').label('stud')) \
.join(FDMEventType, FDMEvent.event_type_id == FDMEventType.id)\
.join(FDMEventGroup, FDMEventType.event_subgroup_id == FDMEventGroup.id)\
.join(FDMEventSort, FDMEventGroup.event_group_id == FDMEventSort.id)\
.join(sta, FDMEvent.start_id == sta.id)\
.join(sto, FDMEvent.stop_id == sto.id)\
.join(DataQarSessionIdent, FDMEvent.session_id == DataQarSessionIdent.session_id)\
.join(DataQarFile, DataQarSessionIdent.qar_id == DataQarFile.qar_id)\
.join(ConfFleet, DataQarFile.ac_id == ConfFleet.id)\
.join(ConfAcType, ConfFleet.ac_type == ConfAcType.id) \
else:
baked_query = db_session.query(func.count(FDMEvent.id).label('value'),
ConfAcType.model, ConfFleet.ac_reg,
func.date_format(sta.TS, '%m/%Y').label('month'),
func.date_format(sta.TS, '%Y').label('year'),
func.date_format(sta.TS, '%d/%m/%Y').label('day'), sta.TS ) \
.join(FDMEventType, FDMEvent.event_type_id == FDMEventType.id) \
.join(FDMEventGroup, FDMEventType.event_subgroup_id == FDMEventGroup.id) \
.join(FDMEventSort, FDMEventGroup.event_group_id == FDMEventSort.id) \
.join(sta, FDMEvent.start_id == sta.id) \
.join(sto, FDMEvent.stop_id == sto.id) \
.join(DataQarSessionIdent, FDMEvent.session_id == DataQarSessionIdent.session_id) \
.join(DataQarFile, DataQarSessionIdent.qar_id == DataQarFile.qar_id) \
.join(ConfFleet, DataQarFile.ac_id == ConfFleet.id) \
.join(ConfAcType, ConfFleet.ac_type == ConfAcType.id) \
if is_efs == 'true':
if event_with_program == 0 or event_with_program == "0":
baked_query = baked_query.join(FDMEventParticipant, FDMEvent.id == FDMEventParticipant.event_id) \
.outerjoin(Flight, FDMEventParticipant.flightlog_flt_id == Flight.flight_id) \
.outerjoin(Program, Flight.training_prog_id == Program.id) \
.outerjoin(stud, Flight.trainee_id == stud.ID) \
.outerjoin(instr, Flight.instructor_id == instr.ID) \
else:
baked_query = baked_query.outerjoin(FDMEventParticipant, FDMEvent.id == FDMEventParticipant.event_id) \
.outerjoin(Flight, FDMEventParticipant.flightlog_flt_id == Flight.flight_id) \
.outerjoin(Program, Flight.training_prog_id == Program.id) \
.outerjoin(stud, Flight.trainee_id == stud.ID) \
.outerjoin(instr, Flight.instructor_id == instr.ID) \
stud1 = aliased(EFSUser)
instr1 = aliased(EFSUser)
baked_query_2 = db_session.query(func.Count(DataQarFlightIdent.id).label('flight_time'), func.date_format(DataQarSessionIdent.block_off, '%m/%Y').label('month'),
func.date_format(DataQarSessionIdent.block_off, '%d/%m/%Y').label('day'), func.date_format(DataQarSessionIdent.block_off, '%Y').label('year'),
ConfAcType.model, ConfFleet.ac_reg ) \
.outerjoin(FDMFlightParticipant, DataQarFlightIdent.flight_id == FDMFlightParticipant.flight_id) \
.outerjoin(DataQarSessionIdent, DataQarFlightIdent.session_id == DataQarSessionIdent.session_id) \
.outerjoin(DataQarFile, DataQarSessionIdent.qar_id == DataQarFile.qar_id) \
.outerjoin(ConfFleet, DataQarFile.ac_id == ConfFleet.id) \
.outerjoin(ConfAcType, ConfFleet.ac_type == ConfAcType.id) \
if is_efs == 'true':
baked_query_2 = baked_query_2.add_columns(func.IF(Flight.instructor_id, (instr1.first_name + ' ' + instr1.last_name),'None').label('instr'),
func.IF(Flight.trainee_id, (stud1.first_name + ' ' + stud1.last_name),'None').label('stud'), Program.name) \
.outerjoin(Flight, FDMFlightParticipant.flightlog_flt_id == Flight.flight_id) \
.outerjoin(stud1, Flight.trainee_id == stud1.ID).outerjoin(instr1, Flight.instructor_id == instr1.ID) \
.outerjoin(Program, Flight.training_prog_id == Program.id) \
if is_efs == 'false':
if chart_type == 'events_per_program' or chart_type == 'events_per_instructor' or chart_type == 'events_per_student':
abort(400)
if date_from != 'null':
baked_query = baked_query.filter(DataQarSessionIdent.block_off > date_from)
baked_query_2 = baked_query_2.filter(DataQarSessionIdent.block_off > date_from)
if date_to != 'null':
baked_query = baked_query.filter(DataQarSessionIdent.block_off < date_to)
baked_query_2 = baked_query_2.filter(DataQarSessionIdent.block_off < date_to)
if student != 'null':
baked_query = baked_query.filter(Flight.trainee_id == student)
baked_query_2 = baked_query_2.filter(Flight.trainee_id == student)
if instructor != 'null':
baked_query = baked_query.filter(Flight.instructor_id == instructor)
baked_query_2 = baked_query_2.filter(Flight.instructor_id == instructor)
if ac != 'null':
baked_query = baked_query.filter(ConfFleet.ac_type == ac)
baked_query_2 = baked_query_2.filter(ConfFleet.ac_type == ac)
if apt_dep != 'null':
baked_query = baked_query.filter(DataQarSessionIdent.apt_origin == apt_dep)
baked_query_2 = baked_query_2.filter(DataQarSessionIdent.apt_origin == apt_dep)
if apt_arr != 'null':
baked_query = baked_query.filter(DataQarSessionIdent.apt_dest == apt_arr)
baked_query_2 = baked_query_2.filter(DataQarSessionIdent.apt_dest == apt_arr)
if severity != 'null':
baked_query = baked_query.filter(FDMEvent.severity == severity)
if len(events_id) > 0 and events_id[0] != -1:
baked_query = baked_query.filter(FDMEvent.event_type_id.in_(events_id))
if events_id == []:
baked_query = baked_query.filter(FDMEvent.event_type_id.in_([]))
if is_efs == 'true':
if len(programs_id) > 0 and programs_id[0] != -1:
baked_query = baked_query.filter(or_(Flight.training_task_id.in_(programs_id), FDMEventParticipant.flightlog_flt_id == None))
if programs_id == []:
baked_query = baked_query.filter(or_(Flight.training_task_id.in_([]), FDMEventParticipant.flightlog_flt_id == None))
if chart_type == 'trend_in_time':
if time_aggregation == 'day':
baked_query = baked_query.group_by(func.date_format(sta.TS, '%d/%m/%Y')).order_by(sta.TS)
baked_query_2 = baked_query_2.group_by(func.date_format(DataQarSessionIdent.block_off, '%d/%m/%Y')).order_by(DataQarSessionIdent.block_off)
results = map(lambda fi: {
'value': fi.value,
'key': fi.day,
}, baked_query.all()
)
results2 = map(lambda fi: {
'value2': fi.flight_time,
'key': fi.day,
}, baked_query_2.all()
)
elif time_aggregation == 'month':
baked_query = baked_query.group_by(func.date_format(sta.TS, '%m/%Y')).order_by(sta.TS)
baked_query_2 = baked_query_2.group_by(func.date_format(DataQarSessionIdent.block_off, '%m/%Y')).order_by(DataQarSessionIdent.block_off)
results = map(lambda fi: {
'value': fi.value,
'key': fi.month,
}, baked_query.all()
)
results2 = map(lambda fi: {
'value2': fi.flight_time,
'key': fi.month,
}, baked_query_2.all()
)
elif time_aggregation == 'year':
baked_query = baked_query.group_by(func.date_format(sta.TS, '%Y')).order_by(sta.TS)
baked_query_2 = baked_query_2.group_by(func.date_format(DataQarSessionIdent.block_off, '%Y')).order_by(DataQarSessionIdent.block_off)
results = map(lambda fi: {
'value': fi.value,
'key': fi.year,
}, baked_query.all()
)
results2 = map(lambda fi: {
'value2': fi.flight_time,
'key': fi.year,
}, baked_query_2.all()
)
elif chart_type == 'events_per_students':
baked_query = baked_query.filter(Flight.trainee_id != None).group_by(Flight.trainee_id).order_by(stud.last_name)
baked_query_2 = baked_query_2.filter(Flight.trainee_id != None).group_by(Flight.trainee_id).order_by(stud1.last_name)
results = map(lambda fi: {
'value': fi.value,
'key': fi.stud
}, baked_query.all()
)
results2 = map(lambda fi: {
'value2': fi.flight_time,
'key': fi.stud,
}, baked_query_2.all()
)
elif chart_type == 'events_per_instructors':
baked_query = baked_query.filter(Flight.instructor_id != None).group_by(Flight.instructor_id).order_by(instr.last_name)
baked_query_2 = baked_query_2.filter(Flight.instructor_id != None).group_by(Flight.instructor_id).order_by(instr1.last_name)
results = map(lambda fi: {
'value': fi.value,
'key': fi.instr
}, baked_query.all()
)
results2 = map(lambda fi: {
'value2': fi.flight_time,
'key': fi.instr,
}, baked_query_2.all()
)
elif chart_type == 'events_per_ac_type':
baked_query = baked_query.group_by(ConfFleet.ac_type).order_by(ConfAcType.model)
baked_query_2 = baked_query_2.group_by(ConfFleet.ac_type).order_by(ConfAcType.model)
results = map(lambda fi: {
'value': fi.value,
'key': fi.model
}, baked_query.all()
)
results2 = map(lambda fi: {
'value2': fi.flight_time,
'key': fi.model,
}, baked_query_2.all()
)
elif chart_type == 'events_per_ac_reg':
baked_query = baked_query.group_by(ConfFleet.ac_reg).order_by(ConfFleet.ac_reg)
baked_query_2 = baked_query_2.group_by(ConfFleet.ac_type).order_by(ConfAcType.model)
results = map(lambda fi: {
'value': fi.value,
'key': fi.ac_reg
}, baked_query.all()
)
results2 = map(lambda fi: {
'value2': fi.flight_time,
'key': fi.ac_reg,
}, baked_query_2.all()
)
elif chart_type == 'events_per_program':
baked_query = baked_query.group_by(Flight.training_prog_id)
baked_query_2 = baked_query_2.group_by(Flight.training_prog_id)
results = map(lambda fi: {
'value': fi.value,
'key': fi.name
}, baked_query.all()
)
results2 = map(lambda fi: {
'value2': fi.flight_time,
'key': fi.name,
}, baked_query_2.all()
)
results = results + results2
return Response(ujson.dumps(results), content_type='application/json')
@restService.route('/students', methods=['GET'])
@login_required()
def get_students():
is_efs = is_efs_connected().response[0]
if is_efs == 'true':
results = map(lambda fi: {
'value': fi.ID,
'label': fi.user
}, db_session.query((EFSUser.first_name + ' ' + EFSUser.last_name).label('user'), EFSUser.ID)
.filter(EFSUser.group_id == 18).all())
else:
results = None
return Response(ujson.dumps(results, ensure_ascii=False), content_type='application/json')
@restService.route('/instructors', methods=['GET'])
@login_required()
def get_instructors():
is_efs = is_efs_connected().response[0]
if is_efs == 'true':
results = map(lambda fi: {
'value': fi.ID,
'label': fi.user.encode('UTF8')
}, db_session.query((EFSUser.first_name + ' ' + EFSUser.last_name).label('user'), EFSUser.ID)
.filter(or_(EFSUser.group_id == 19, EFSUser.group_id == 20)).all())
else:
results = None
return Response(ujson.dumps(results, ensure_ascii=False), content_type='application/json')
@restService.route('/airports', methods=['GET'])
@login_required()
def get_airports():
results = map(lambda fi: {
'value': fi.icao,
'label': fi.icao
}, db_session.query(CoreAirport.icao).all())
return Response(ujson.dumps(results), content_type='application/json')
@restService.route('/aircrafts', methods=['GET'])
@login_required()
def get_aircrafts():
results = map(lambda fi: {
'value': fi.id,
'label': fi.model
}, db_session.query(ConfAcType.id, ConfAcType.model).all())
return Response(ujson.dumps(results), content_type='application/json')
|
python
|
from typing import Tuple
import requests
from bs4 import BeautifulSoup
from hospital_types import (
ScrapedData,
AppointmentAvailability,
HospitalAvailabilitySchema,
)
import aiohttp, asyncio
URL: str = "http://netreg.afph.tsgh.ndmctsgh.edu.tw/webreg/calendar_type/5xn1z9fPG5H4JDJEV98dHQ%3D%3D"
async def scrape_sanjunzong_penghu() -> ScrapedData:
timeout = aiohttp.ClientTimeout(total=5)
async with aiohttp.ClientSession(timeout=timeout) as session:
async with session.get(URL) as r:
return parse_sanjunzong_penghu(await r.text())
def parse_sanjunzong_penghu(html: str) -> ScrapedData:
soup = BeautifulSoup(html, "html.parser")
weekly_tables = soup.find("table", {"id": "timeTable"}).find_all("tbody")
available_links = []
for table in weekly_tables:
links = table.find_all("a")
available_links = available_links + links
availability: HospitalAvailabilitySchema = {
"self_paid": AppointmentAvailability.AVAILABLE
if available_links
else AppointmentAvailability.UNAVAILABLE,
"government_paid": AppointmentAvailability.NO_DATA,
}
return (
31,
availability,
)
|
python
|
from django.contrib.auth import get_user_model
from django.test import TestCase
UserModel = get_user_model()
class UserModelTest(TestCase):
def setUp(self) -> None:
self.user = UserModel.objects.create_user(
email='[email protected]',
)
self.user.set_password = '1234567890Mazen'
self.user.save()
def test_UserInit(self):
self.assertEqual('[email protected]', self.user.email)
self.assertFalse(self.user.is_staff)
self.assertFalse(self.user.is_superuser)
|
python
|
#!/usr/bin/env python
##################################################
# Gnuradio Python Flow Graph
# Title: FCCH Bursts Detector
# Author: Piotr Krysik
#
# Description: Detects positions of FCCH bursts. At the end of each
# detected FCCH burst adds to the stream a tag with key "fcch" and value
# which is a frequency offset estimate. The input sampling frequency
# should be integer multiply of GSM GMKS symbol rate - 1625000/6 Hz.
##################################################
from gnuradio import blocks
from gnuradio import gr
from gnuradio.filter import firdes
import grgsm
class fcch_detector(grgsm.hier_block):
def __init__(self, OSR=4):
grgsm.hier_block.__init__(
self, "FCCH bursts detector",
gr.io_signature(1, 1, gr.sizeof_gr_complex*1),
gr.io_signature(1, 1, gr.sizeof_gr_complex*1),
)
##################################################
# Parameters
##################################################
self.OSR = OSR
##################################################
# Variables
##################################################
self.f_symb = f_symb = 1625000.0/6.0
self.samp_rate = samp_rate = f_symb*OSR
##################################################
# Blocks
##################################################
self.gsm_fcch_burst_tagger_0 = grgsm.fcch_burst_tagger(OSR)
self.blocks_threshold_ff_0_0 = blocks.threshold_ff(0, 0, 0)
self.blocks_threshold_ff_0 = blocks.threshold_ff(int((138)*samp_rate/f_symb), int((138)*samp_rate/f_symb), 0)
self.blocks_multiply_conjugate_cc_0 = blocks.multiply_conjugate_cc(1)
self.blocks_moving_average_xx_0 = blocks.moving_average_ff(int((142)*samp_rate/f_symb), 1, int(1e6))
self.blocks_delay_0 = blocks.delay(gr.sizeof_gr_complex*1, int(OSR))
self.blocks_complex_to_arg_0 = blocks.complex_to_arg(1)
##################################################
# Connections
##################################################
self.connect((self, 0), (self.blocks_multiply_conjugate_cc_0, 0))
self.connect((self.blocks_delay_0, 0), (self.blocks_multiply_conjugate_cc_0, 1))
self.connect((self.blocks_complex_to_arg_0, 0), (self.blocks_threshold_ff_0_0, 0))
self.connect((self, 0), (self.blocks_delay_0, 0))
self.connect((self.blocks_multiply_conjugate_cc_0, 0), (self.blocks_complex_to_arg_0, 0))
self.connect((self.blocks_moving_average_xx_0, 0), (self.blocks_threshold_ff_0, 0))
self.connect((self.blocks_threshold_ff_0_0, 0), (self.blocks_moving_average_xx_0, 0))
self.connect((self.gsm_fcch_burst_tagger_0, 0), (self, 0))
self.connect((self, 0), (self.gsm_fcch_burst_tagger_0, 0))
self.connect((self.blocks_threshold_ff_0, 0), (self.gsm_fcch_burst_tagger_0, 1))
def get_OSR(self):
return self.OSR
def set_OSR(self, OSR):
self.OSR = OSR
self.set_samp_rate(self.f_symb*self.OSR)
self.blocks_delay_0.set_dly(int(self.OSR))
|
python
|
import logging
def get_logger(LEVEL='info', log_file = None,name=None):
head = '[%(asctime)-15s] [%(levelname)s] %(message)s'
if LEVEL == 'info':
logging.basicConfig(level=logging.INFO, format=head)
elif LEVEL == 'debug':
logging.basicConfig(level=logging.DEBUG, format=head)
logger = logging.getLogger(name)
if log_file != None:
fh = logging.FileHandler(log_file)
logger.addHandler(fh)
return logger
|
python
|
from sklearn.neighbors import KNeighborsClassifier
from sklearn import datasets
from sklearn.metrics import accuracy_score
iris = datasets.load_iris()
X, y = iris.data, iris.target
knn = KNeighborsClassifier()
knn.fit(X, y)
accuracy_score(y, knn.predict(X))
|
python
|
import random
import discord
from discord.ext import commands
from .inputs import cl, cf, chill, cfe, ur
from .utils import COLOR
class Coffee(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(aliases=['ask_out'])
async def wannagrabacoffee(self, ctx, *, member: discord.Member):
'''Wanna ask someone out on coffee'''
embed = discord.Embed(
title=f'{member}, Someone wants to grab a coffee with you...*wink *wink',
color=COLOR.DEFAULT)
embed.add_field(name='This happened....', value=f'{random.choice(cf)}')
embed.set_footer(text='not actually')
await ctx.send(embed=embed)
@commands.command(aliases=['brew'])
async def coffee(self, ctx):
'''A lovely coffee command (sip, sip)'''
op = f'{random.choice(cfe)}'
embed = discord.Embed(title='Coffee',
description=op,
color=COLOR.DEFAULT)
embed.set_footer(
text=f'Caffeiene Level-{random.choice(cl)}.{random.choice(chill)}')
embed.set_image(url=random.choice(ur))
await ctx.send(embed=embed)
def setup(client):
client.add_cog(Coffee(client))
|
python
|
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
import torch
from mmf.common.sample import Sample
from mmf.models.transformers.heads.itm import ITM
from mmf.models.transformers.heads.mlm import MLM
from mmf.models.transformers.heads.mlp import MLP
from omegaconf import OmegaConf
from tests.test_utils import skip_if_no_cuda
class TestMLMHead(unittest.TestCase):
def setUp(self):
self.config = OmegaConf.create(
{"type": "mlm", "freeze": False, "vocab_size": 1000, "hidden_size": 768}
)
@skip_if_no_cuda
def test_forward(self):
module = MLM(self.config).to("cuda")
sequence_input = torch.rand(size=(1, 64, 768), dtype=torch.float, device="cuda")
encoder_output = [sequence_input, sequence_input]
processed_sample_list = Sample()
processed_sample_list["mlm_labels"] = {}
processed_sample_list["mlm_labels"]["combined_labels"] = torch.ones(
size=(1, 64), dtype=torch.long, device="cuda"
)
output = module(sequence_input, encoder_output, processed_sample_list)
self.assertTrue("logits" in output)
self.assertTrue("losses" in output and "masked_lm_loss" in output["losses"])
self.assertEqual(output["logits"].shape, torch.Size([64, 1000]))
class TestMLPHead(unittest.TestCase):
def setUp(self):
self.config = OmegaConf.create(
{"type": "mlp", "num_labels": 2, "hidden_size": 768}
)
def test_forward(self):
module = MLP(self.config)
sequence_input = torch.ones(size=(1, 64, 768), dtype=torch.float)
encoder_output = [sequence_input, sequence_input]
processed_sample_list = {}
output = module(sequence_input, encoder_output, processed_sample_list)
self.assertTrue("scores" in output)
self.assertEqual(output["scores"].shape, torch.Size([1, 2]))
class TestITMHead(unittest.TestCase):
def setUp(self):
self.config = OmegaConf.create({"type": "itm", "hidden_size": 768})
def test_forward(self):
module = ITM(self.config)
sequence_input = torch.ones(size=(1, 64, 768), dtype=torch.float)
encoder_output = [sequence_input, sequence_input]
processed_sample_list = Sample()
processed_sample_list["itm_labels"] = {}
processed_sample_list["itm_labels"]["is_correct"] = torch.tensor(
False, dtype=torch.long
)
output = module(sequence_input, encoder_output, processed_sample_list)
self.assertTrue("itm_loss" in output["losses"])
self.assertEqual(output["losses"]["itm_loss"].shape, torch.Size([]))
|
python
|
import pandas as pd
from sklearn.model_selection import train_test_split
class DataLoader:
def __init__(self):
self.dataset = None
self.sensor = None
self.target = None
def load(self,file,isTest=False):
if not isTest:
print("loading")
self.dataset = pd.read_csv(file)
print("loaded")
self.target = self.dataset[['target_carbon_monoxide', 'target_benzene', 'target_nitrogen_oxides']]
self.sensor = self.dataset[list(self.dataset.columns[1:-3])]
self.sensor.columns = ['deg_C', 'rel_h', 'abs_h','s_1', 's_2', 's_3', 's_4', 's_5']
else:
print("loading test files")
self.dataset = pd.read_csv(file)
print("Test files loaded")
self.sensor = self.dataset[list(self.dataset.columns[1:])]
self.sensor.columns = ['deg_C', 'rel_h', 'abs_h', 's_1', 's_2', 's_3', 's_4', 's_5']
def process(self):
col = ['s_1', 's_2', 's_3', 's_4', 's_5']
df = self.sensor[col].values/1000
self.sensor.update(pd.DataFrame(df))
def split(self):
if type(self.target) == None:
print("This is test data")
return
X_train, X_test, y_train, y_test = \
train_test_split(self.sensor, self.target, test_size=0.33, random_state=42)
return X_train, X_test, y_train, y_test
|
python
|
#!/usr/bin/env python3
import sys
def isBalanced(expression):
if len(expression) & 1:
return False
parentheses = []
for c in expression:
if c == '(' or c == '{' or c == '[':
parentheses.append(c)
elif c == ')' or c == '}' or c == ']':
offset = 1 if c == ')' else 2
if len(parentheses) and chr(ord(parentheses[-1]) + offset) == c:
parentheses.pop()
else:
return False
return not parentheses
print(*(isBalanced(line.rstrip()) for line in sys.stdin), sep='\n')
|
python
|
from collections import defaultdict
from dataclasses import dataclass, field
from typing import Any, DefaultDict, Optional, Type, TypeVar
from discord import Guild
from commanderbot.lib import FromDataMixin, GuildID, JsonSerializable, LogOptions
from commanderbot.lib.utils import dict_without_ellipsis, dict_without_falsies
ST = TypeVar("ST")
@dataclass
class StacktracerGuildData(JsonSerializable, FromDataMixin):
log_options: Optional[LogOptions] = None
# @overrides FromDataMixin
@classmethod
def try_from_data(cls: Type[ST], data: Any) -> Optional[ST]:
if isinstance(data, dict):
log_options = LogOptions.from_field_optional(data, "log")
return cls(log_options=log_options)
# @implements JsonSerializable
def to_json(self) -> Any:
# Omit empty log options.
data = dict_without_ellipsis(log=self.log_options or ...)
return data
def set_log_options(
self, log_options: Optional[LogOptions]
) -> Optional[LogOptions]:
old_value = self.log_options
self.log_options = log_options
return old_value
def _guilds_defaultdict_factory() -> DefaultDict[GuildID, StacktracerGuildData]:
return defaultdict(lambda: StacktracerGuildData())
# @implements StacktracerStore
@dataclass
class StacktracerData(JsonSerializable, FromDataMixin):
"""
Implementation of `StacktracerStore` using an in-memory object hierarchy.
"""
# Global log options configured by bot owners.
log_options: Optional[LogOptions] = None
# Per-guild log options configured by admins (or owners).
guilds: DefaultDict[GuildID, StacktracerGuildData] = field(
default_factory=_guilds_defaultdict_factory
)
# @overrides FromDataMixin
@classmethod
def try_from_data(cls: Type[ST], data: Any) -> Optional[ST]:
if isinstance(data, dict):
# Construct global log options.
log_options = LogOptions.from_field_optional(data, "log_options")
# Construct guild data.
guilds = _guilds_defaultdict_factory()
for raw_guild_id, raw_guild_data in data.get("guilds", {}).items():
guild_id = int(raw_guild_id)
guilds[guild_id] = StacktracerGuildData.from_data(raw_guild_data)
return cls(
log_options=log_options,
guilds=guilds,
)
# @implements JsonSerializable
def to_json(self) -> Any:
guilds = {
str(guild_id): guild_data.to_json()
for guild_id, guild_data in self.guilds.items()
}
# Omit empty guilds.
trimmed_guilds = dict_without_falsies(guilds)
# Omit empty fields.
data = dict_without_ellipsis(
log_options=self.log_options or ...,
guilds=trimmed_guilds or ...,
)
return data
# @implements StacktracerStore
async def get_global_log_options(self) -> Optional[LogOptions]:
return self.log_options
# @implements StacktracerStore
async def set_global_log_options(
self, log_options: Optional[LogOptions]
) -> Optional[LogOptions]:
old_value = self.log_options
self.log_options = log_options
return old_value
# @implements StacktracerStore
async def get_guild_log_options(self, guild: Guild) -> Optional[LogOptions]:
return self.guilds[guild.id].log_options
# @implements StacktracerStore
async def set_guild_log_options(
self, guild: Guild, log_options: Optional[LogOptions]
) -> Optional[LogOptions]:
return self.guilds[guild.id].set_log_options(log_options)
|
python
|
from flask_restful import Resource, current_app
from backend.services.campaign_service import CampaignService
from backend.services.organisation_service import OrganisationService
from backend.models.postgis.utils import NotFound
from backend.services.users.authentication_service import token_auth
class OrganisationsCampaignsAPI(Resource):
@token_auth.login_required
def post(self, organisation_id, campaign_id):
"""
Assigns a campaign to an organisation
---
tags:
- campaigns
produces:
- application/json
parameters:
- in: header
name: Authorization
description: Base64 encoded session token
required: true
type: string
default: Token sessionTokenHere==
- name: organisation_id
in: path
description: Unique organisation ID
required: true
type: integer
default: 1
- name: campaign_id
in: path
description: Unique campaign ID
required: true
type: integer
default: 1
responses:
200:
description: Organisation and campaign assigned successfully
401:
description: Unauthorized - Invalid credentials
403:
description: Forbidden - users have submitted mapping
404:
description: Project not found
500:
description: Internal Server Error
"""
try:
if OrganisationService.can_user_manage_organisation(
organisation_id, token_auth.current_user()
):
if CampaignService.campaign_organisation_exists(
campaign_id, organisation_id
):
message = (
"Campaign {} is already assigned to organisation {}.".format(
campaign_id, organisation_id
)
)
return {"Error": message, "SubCode": "CampaignAlreadyAssigned"}, 400
CampaignService.create_campaign_organisation(
organisation_id, campaign_id
)
message = (
"campaign with id {} assigned for organisation with id {}".format(
campaign_id, organisation_id
)
)
return {"Success": message}, 200
else:
return {
"Error": "User is not a manager of the organisation",
"SubCode": "UserNotPermitted",
}, 403
except Exception as e:
error_msg = f"Campaign Organisation POST - unhandled error: {str(e)}"
current_app.logger.critical(error_msg)
return {"Error": error_msg, "SubCode": "InternalServerError"}, 500
def get(self, organisation_id):
"""
Returns all campaigns related to an organisation
---
tags:
- campaigns
produces:
- application/json
parameters:
- in: header
name: Authorization
description: Base64 encoded session token
required: false
type: string
default: Token sessionTokenHere==
- name: organisation_id
in: path
description: Unique project ID
required: true
type: integer
default: 1
responses:
200:
description: Success
404:
description: Organisation not found
500:
description: Internal Server Error
"""
try:
campaigns = CampaignService.get_organisation_campaigns_as_dto(
organisation_id
)
return campaigns.to_primitive(), 200
except NotFound:
return {"Error": "No campaign found", "SubCode": "NotFound"}, 404
except Exception as e:
error_msg = f"Organisation Campaigns GET - unhandled error: {str(e)}"
current_app.logger.critical(error_msg)
return {"Error": error_msg, "SubCode": "InternalServerError"}, 500
@token_auth.login_required
def delete(self, organisation_id, campaign_id):
"""
Un-assigns an organization from an campaign
---
tags:
- campaigns
produces:
- application/json
parameters:
- in: header
name: Authorization
description: Base64 encoded session token
required: true
type: string
default: Token sessionTokenHere==
- name: organisation_id
in: path
description: Unique organisation ID
required: true
type: integer
default: 1
- name: campaign_id
in: path
description: Unique campaign ID
required: true
type: integer
default: 1
responses:
200:
description: Organisation and campaign unassociated successfully
401:
description: Unauthorized - Invalid credentials
403:
description: Forbidden - users have submitted mapping
404:
description: Project not found
500:
description: Internal Server Error
"""
try:
if OrganisationService.can_user_manage_organisation(
organisation_id, token_auth.current_user()
):
CampaignService.delete_organisation_campaign(
organisation_id, campaign_id
)
return (
{"Success": "Organisation and campaign unassociated successfully"},
200,
)
else:
return {
"Error": "User is not a manager of the organisation",
"SubCode": "UserNotPermitted",
}, 403
except NotFound:
return {
"Error": "Organisation Campaign Not Found",
"SubCode": "NotFound",
}, 404
except Exception as e:
error_msg = f"Organisation Campaigns DELETE - unhandled error: {str(e)}"
current_app.logger.critical(error_msg)
return {"Error": error_msg, "SubCode": "InternalServerError"}, 500
|
python
|
#!/usr/bin/env python
#coding: utf-8
__author__ = 'Toshihiro Kamiya <[email protected]>'
__status__ = 'experimental'
import os.path
import re
import sys
import subprocess
UNIZP_COMMAND = "/usr/bin/unzip"
def get_class_names_from_jar(jar_file):
class_names = []
pat = re.compile(r"^\s*testing:\s*(.+)[.]class\s+OK\s*$")
text = subprocess.check_output([UNIZP_COMMAND, "-t", jar_file])
for L in text.split('\n'):
L = L.rstrip()
m = pat.match(L)
if m:
class_names.append(m.group(1))
return class_names
def gen_dest_dir_name(jar_file):
return jar_file + ".files"
def main(argv):
from argparse import ArgumentParser
psr = ArgumentParser(description="Expand a jar file to extract class files and class list")
psr.add_argument('jar_file', action='store')
args = psr.parse_args(argv[1:])
dest_dir = gen_dest_dir_name(args.jar_file)
if os.path.exists(dest_dir):
sys.exit("output directory already exists: %s" % dest_dir)
os.mkdir(dest_dir)
class_names = get_class_names_from_jar(args.jar_file)
with open(os.path.join(dest_dir, "class_list"), "wb") as f:
for cn in class_names:
f.write("%s\n" % cn)
subprocess.check_call([UNIZP_COMMAND, args.jar_file, "-d", dest_dir])
if __name__ == '__main__':
main(sys.argv)
|
python
|
# Copyright 2020 Katteli Inc.
# TestFlows.com Open-Source Software Testing Framework (http://testflows.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import testflows.settings as settings
from testflows.database.pipeline import WriteToDatabasePipeline
from testflows.database.clickhouse import Database, DatabaseConnection
from testflows._core.compress import CompressedFile
def database_handler():
"""Handler to write output messages to database.
"""
options = {option.key: option.value for option in settings.database}
conn = DatabaseConnection(
host=options.pop("host", "localhost"),
database=options.pop("database", "default"),
user=options.pop("user", None),
password=options.pop("password", None),
port=options.pop("port", 8123)
)
database = Database(connection=conn)
with CompressedFile(settings.read_logfile, tail=True) as log:
log.seek(0)
WriteToDatabasePipeline(log, database, tail=True).run()
|
python
|
from pathlib import Path
import importlib
import tensorflow as tf
from annoy import AnnoyIndex
DEFAULT_CONFIG = {
"dataset": "Dataset",
"model": "RetreivalModel",
"network": "retrieval_basic_factorization",
"network_args": {
"embedding_dimension": 32,
}
}
class RetrievalModel:
def __init__(self, weight_dir,config=DEFAULT_CONFIG):
networks_module = importlib.import_module("recommenders.networks")
network_fn_ = getattr(networks_module, config["network"])
network_args = config.get("network_args", {})
datasets_module = importlib.import_module("recommenders.datasets")
dataset_class_ = getattr(datasets_module, config["dataset"])
dataset_args = config.get("dataset_args", {})
dataset = dataset_class_(**dataset_args)
dataset.load_or_generate_data(update_to_latest_db=False)
self.query_model, _ = network_fn_(dataset.unique_user_ids, dataset.unique_movie_ids, **network_args)
self.query_model.load_weights(weight_dir / 'query')
self.index = AnnoyIndex(self.query_model.embedding_dimension, 'dot')
self.index.load(str(weight_dir /'candid.annoy'))
def predict(self, features, num_candids = 100):
query_embedding = self.query_model.predict(features).squeeze()
candids = self.index.get_nns_by_vector(query_embedding, num_candids)
return candids
|
python
|
# Generated by Django 2.1.3 on 2018-12-08 07:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('staf', '0010_process_slug'),
('multiplicity', '0023_referencespacetype_process'),
]
operations = [
migrations.RemoveField(
model_name='photo',
name='topic',
),
migrations.AddField(
model_name='photo',
name='process',
field=models.ForeignKey(blank=True, limit_choices_to={'slug__isnull': False}, null=True, on_delete=django.db.models.deletion.CASCADE, to='staf.Process'),
),
]
|
python
|
from __future__ import absolute_import, division, print_function
try:
from threading import local
except ImportError:
from django.utils._threading_local import local
_thread_locals = local()
def get_current_request():
""" returns the request object for this thread """
return getattr(_thread_locals, "request", None)
def get_current_user():
""" returns the current user, if exist, otherwise returns None """
request = get_current_request()
if request:
return getattr(request, "user", None)
def thread_local_middleware(get_response):
# One-time configuration and initialization.
def middleware(request):
# Code to be executed for each request before
# the view (and later middleware) are called.
_thread_locals.request = request
response = get_response(request)
# Code to be executed for each request/response after
# the view is called.
if hasattr(_thread_locals, "request"):
del _thread_locals.request
return response
return middleware
|
python
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import scipy.spatial
import tensorflow as tf
import tensorflow_addons as tfa
"""
Free parameters to control the synthesis
"""
_MAX_SS_SIGMA = 5 # control subsurface scattering strength
_MAX_BLUR_SIGMA = 10 # control spatially varying blur strength
_SV_SIGMA = 0.5 # 1. --> not sv blur on boudary; 0. -> always sv blur
"""
Common I/O Utils
"""
def read_float(path, channel=3, itype='jpg', is_linear=False):
"""Decode an image from string. Return 3 channels.
Args:
path: a tf string of the image path.
channel: scalar, number of channels of the input image.
itype: string, the type of the input image.
is_linear: a bool, indicates whether or not to convert to linear color space.
(undo gamma correction)
Returns:
A 3D tensor of the read-in image.
"""
image_string = tf.io.read_file(path)
if itype == 'jpg':
image = tf.image.decode_jpeg(image_string, channels=channel)
elif itype == 'png':
image = tf.image.decode_png(image_string, channels=channel)
image = tf.image.convert_image_dtype(image, tf.float32)
if is_linear:
image = srgb_to_rgb(image)
return image
def srgb_to_rgb(srgb, name='srgb_to_rgb'):
"""Converts sRGB to linear RGB."""
with tf.name_scope(name):
mask = tf.cast(tf.greater(srgb, 0.04045), dtype=srgb.dtype)
return (srgb / 12.92 * (1.0 - mask) + tf.pow(
(srgb + 0.055) / 1.055, 2.4) * mask)
def rgb_to_srgb(rgb, name='rgb_to_srgb'):
"""Converts linear RGB to sRGB."""
with tf.name_scope(name):
mask = tf.cast(tf.greater(rgb, 0.0031308), dtype=tf.float32)
return (rgb * 12.92 * (1.0 - mask) +
(tf.pow(rgb, 1.0 / 2.4) * 1.055 - 0.055) * mask)
def resize_image(image, new_sizeh=None, new_sizew=None, rsz=None):
"""Customized image resizing op."""
with tf.name_scope('resize_image'):
if new_sizeh is None:
height = tf.cast(tf.shape(image)[0], tf.float32)
width = tf.cast(tf.shape(image)[1], tf.float32)
new_sizeh = tf.cast(height * rsz, tf.int32)
new_sizew = tf.cast(width * rsz, tf.int32)
return tf.compat.v1.image.resize(
image, [new_sizeh, new_sizew],
method=tf.image.ResizeMethod.BILINEAR,
align_corners=False)
"""
Subsurface scattering approxmiation
"""
def apply_ss_shadow_map(mask):
"""Apply subsurface scattering approximation to the shadow mask.
Args:
mask: A Tensor of shape [H, W, 1].
Returns:
A Tensor of shape [H, W, 3] that is applied with wavelength-dependent blur.
"""
r = tf.random.uniform(
shape=(), minval=0.5, maxval=_MAX_SS_SIGMA, dtype=tf.float32) # a global scalar to scale all the blur size
shadow_map = wavelength_filter(mask, num_lv=6, scale=r, is_rgb=False)
shadow_map = tf.minimum(1., shadow_map/0.6) # a heuristic scalar for more stable normalization
return shadow_map
def wavelength_filter(input_img, num_lv=6, scale=5, is_rgb=False, name='wavelength_filter'):
"""Image-based subsurface scattering approximation
Parameters from the NVIDIA screen-space subsurface scattering (SS) slide 98.
http://developer.download.nvidia.com/presentations/2007/gdc/Advanced_Skin.pdf
Args:
input_img: a 3D tensor [H, W, C].
num_lv: a scalar that specifies the number of Gaussian filter levels in the SS model.
scale: a scalar that is the scale used to calibrate the kernel size into # pixels based on the size of the face in the image.
is_rgb: a bool that indicates whether input is grayscale(c=1) or rgb(c=3).
name: string, name of the graph.
Returns:
A 3D tensor after approximated with subsurface scattering.
"""
with tf.name_scope(name):
scale = tf.cast(scale, tf.float32)
ss_weights = np.array([[0.042, 0.22, 0.437, 0.635],
[0.220, 0.101, 0.355, 0.365],
[0.433, 0.119, 0.208, 0],
[0.753, 0.114, 0, 0],
[1.412, 0.364, 0, 0],
[2.722, 0.080, 0, 0]])
ss_weights_norm = np.sum(ss_weights, 0)
img_blur_rgb = 0.
for lv in range(num_lv):
if lv != 0:
blur_kernel = ss_weights[lv, 0] * scale
else:
blur_kernel = ss_weights[lv, 0] * scale
rgb_weights = ss_weights[lv, 1:]
if not is_rgb:
blur_img = gaussian_filter(tf.expand_dims(input_img, 0), blur_kernel)[0]
blur_r = blur_img * rgb_weights[0] * 1.2
blur_g = blur_img * rgb_weights[1]
blur_b = blur_img * rgb_weights[2]
else:
blur_r = gaussian_filter(
tf.expand_dims(input_img[..., 0, tf.newaxis], 0),
blur_kernel)[0] * rgb_weights[0] * 1. / ss_weights_norm[1]
blur_g = gaussian_filter(
tf.expand_dims(input_img[..., 1, tf.newaxis], 0),
blur_kernel)[0] * rgb_weights[1] * 1. / ss_weights_norm[2]
blur_b = gaussian_filter(
tf.expand_dims(input_img[..., 2, tf.newaxis], 0),
blur_kernel)[0] * rgb_weights[2] * 1. / ss_weights_norm[3]
img_blur = tf.concat([blur_r, blur_g, blur_b], 2)
img_blur_rgb += img_blur
return img_blur_rgb
def gaussian_filter(image, sigma, pad_mode='REFLECT', name='gaussian_filter'):
"""Applies Gaussian filter to an image using depthwise conv.
Args:
image: 4-D Tensor with float32 dtype and shape [N, H, W, C].
sigma: Positive float or 0-D Tensor.
pad_mode: String, mode argument for tf.pad. Default is 'REFLECT' for
whole-sample symmetric padding.
name: A string to name this part of the graph.
Returns:
Filtered image, has the same shape with the input.
"""
with tf.name_scope(name):
image.shape.assert_has_rank(4)
sigma = tf.cast(sigma, tf.float32)
sigma.shape.assert_has_rank(0) # sigma is a scalar.
channels = tf.shape(image)[3]
r = tf.cast(tf.math.ceil(2.0 * sigma), tf.int32)
n = tf.range(-tf.cast(r, tf.float32), tf.cast(r, tf.float32) + 1)
coeffs = tf.exp(-0.5 * (n / sigma)**2)
coeffs /= tf.reduce_sum(coeffs)
coeffs_x = tf.tile(tf.reshape(coeffs, (1, -1, 1, 1)), (1, 1, channels, 1))
coeffs_y = tf.reshape(coeffs_x, (2 * r + 1, 1, channels, 1))
padded = tf.pad(image, ((0, 0), (r, r), (r, r), (0, 0)), pad_mode)
with tf.device('/cpu:0'): # seems necessary for depthwise_conv2d
filtered = tf.nn.depthwise_conv2d(
padded, coeffs_x, (1, 1, 1, 1), 'VALID', name='filter_x')
filtered = tf.nn.depthwise_conv2d(
filtered, coeffs_y, (1, 1, 1, 1), 'VALID', name='filter_y')
filtered.set_shape(image.shape)
return filtered
"""
Spatially varying utils
"""
def apply_disc_filter(input_img, kernel_sz, is_rgb=True):
"""Apply disc filtering to the input image with a specified kernel size.
To handle large kernel sizes, this is operated (and thus approximated) in
frequency domain (fft).
Args:
input_img: a 2D or 3D tensor. [H, W, 1] or [H, W].
kernel_sz: a scalar tensor that specifies the disc kernel size.
is_rgb: a bool that indicates whether FFT is grayscale(c=1) or rgb(c=3).
Returns:
A Tensor after applied disc filter, has the same size as the input tensor.
"""
if kernel_sz == 0:
raise Warning('Input kenrel size is 0.')
return input_img
disc = create_disc_filter(kernel_sz)
offset = kernel_sz - 1
# if len(tf.shape(input_img)) == 2:
# padding_img = [[0, kernel_sz], [0, kernel_sz]]
# elif len(tf.shape(input_img)) == 3:
padding_img = [[0, kernel_sz], [0, kernel_sz], [0, 0]]
img_padded = tf.pad(input_img, padding_img, 'constant')
paddings = [[0, tf.shape(img_padded)[0] - tf.shape(disc)[0]],
[0, tf.shape(img_padded)[1] - tf.shape(disc)[1]]]
disc_padded = tf.pad(disc, paddings)
# if len(tf.shape(input_img)) == 2:
# img_blurred = fft_filter(
# img_padded, disc_padded)[offset:offset + tf.shape(input_img)[0],
# offset:offset + tf.shape(input_img)[1]]
# else:
img_blurred = fft3_filter(
img_padded, disc_padded,
is_rgb=is_rgb)[offset:offset + tf.shape(input_img)[0],
offset:offset + tf.shape(input_img)[1]]
return img_blurred
def create_disc_filter(r):
"""Create a disc filter of radius r.
Args:
r: an int of the kernel radius.
Returns:
disk filter: A 2D Tensor
"""
x, y = tf.meshgrid(tf.range(-r, r + 1), tf.range(-r, r + 1))
mask = tf.less_equal(tf.pow(x, 2) + tf.pow(y, 2), tf.pow(r, 2))
mask = tf.cast(mask, tf.float32)
mask /= tf.reduce_sum(mask)
return mask
def get_brightness_mask(size, min_val=0.5):
"""Render per-pixel intensity variation mask within [min_val, 1.].
Args:
size: A 2D tensor of target mask size.
Returns:
A Tensor of shape [H, W, 1] that is generated with perlin noise pattern.
"""
perlin_map = perlin_collection((size[0], size[1]), [2, 2], 2,
tf.random.uniform([], 0.05, 0.25))
perlin_map = perlin_map / (1. / (min_val + 1e-6)) + min_val
perlin_map = tf.minimum(perlin_map, 1.)
return perlin_map
def fft_filter(img, kernel):
"""Apply FFT to a 2D tensor.
Args:
img: a 2D tensor of the input image [H, W].
kernel: a 2D tensor of the kernel.
Returns:
a 2D tensor applied with a filter using FFT.
"""
with tf.name_scope('fft2d_gray'):
img = tf.cast(img, tf.complex64)
kernel = tf.cast(kernel, tf.complex64)
img_filtered = tf.cast(
tf.abs(tf.signal.ifft2d(tf.multiply(tf.signal.fft2d(img), tf.signal.fft2d(kernel)))),
tf.float32)
return img_filtered
def fft3_filter(img, kernel, is_rgb=True):
"""Apply FFT to a 3D tensor.
Args:
img: a 3D tensor of the input image [H, W, C].
kernel: a 2D tensor of the kernel.
is_rgb: a bool that indicates whether input is rgb or not.
Returns:
a filtered 3D tensor, has the same size as input.
"""
with tf.name_scope('fft2d_rgb'):
img = tf.cast(img, tf.complex64)
kernel = tf.cast(kernel, tf.complex64)
if not is_rgb:
img_r = fft_filter(img[..., 0], kernel)
img_r = tf.expand_dims(img_r, 2)
return img_r
else:
img_r = fft_filter(img[..., 0], kernel)
img_g = fft_filter(img[..., 1], kernel)
img_b = fft_filter(img[..., 2], kernel)
img_filtered = tf.stack([img_r, img_g, img_b], 2)
return img_filtered
def perlin_collection(size, reso, octaves, persistence):
"""Generate perlin patterns of varying frequencies.
Args:
size: a tuple of the target noise pattern size.
reso: a tuple that specifies the resolution along lateral and longitudinal.
octaves: int, number of octaves to use in the perlin model.
persistence: int, persistence applied to every iteration of the generation.
Returns:
a 2D tensor of the perlin noise pattern.
"""
noise = tf.zeros(size)
amplitude = 1.0
for _ in range(octaves):
noise += amplitude * perlin(size, reso)
amplitude *= persistence
reso[0] *= 2
reso[1] *= 2
return noise
def perlin(size, reso):
"""Generate a perlin noise pattern, with specified frequency along x and y.
Theory: https://flafla2.github.io/2014/08/09/perlinnoise.html
Args:
size: a tuple of integers of the target shape of the noise pattern.
reso: reso: a tuple that specifies the resolution along lateral and longitudinal (x and y).
Returns:
a 2D tensor of the target size.
"""
ysample = tf.linspace(0.0, reso[0], size[0])
xsample = tf.linspace(0.0, reso[1], size[1])
xygrid = tf.stack(tf.meshgrid(ysample, xsample), 2)
xygrid = tf.math.mod(tf.transpose(xygrid, [1, 0, 2]), 1.0)
xyfade = (6.0 * xygrid**5) - (15.0 * xygrid**4) + (10.0 * xygrid**3)
angles = 2.0 * np.pi * tf.random.uniform([reso[0] + 1, reso[1] + 1])
grads = tf.stack([tf.cos(angles), tf.sin(angles)], 2)
gradone = tf.compat.v1.image.resize(grads[0:-1, 0:-1], [size[0], size[1]], 'nearest')
gradtwo = tf.compat.v1.image.resize(grads[1:, 0:-1], [size[0], size[1]], 'nearest')
gradthr = tf.compat.v1.image.resize(grads[0:-1, 1:], [size[0], size[1]], 'nearest')
gradfou = tf.compat.v1.image.resize(grads[1:, 1:], [size[0], size[1]], 'nearest')
gradone = tf.reduce_sum(gradone * tf.stack([xygrid[:, :, 0], xygrid[:, :, 1]], 2), 2)
gradtwo = tf.reduce_sum(gradtwo * tf.stack([xygrid[:, :, 0] - 1, xygrid[:, :, 1]], 2), 2)
gradthr = tf.reduce_sum(gradthr * tf.stack([xygrid[:, :, 0], xygrid[:, :, 1] - 1], 2), 2)
gradfou = tf.reduce_sum(gradfou * tf.stack([xygrid[:, :, 0] - 1, xygrid[:, :, 1] - 1], 2), 2)
inteone = (gradone * (1.0 - xyfade[:, :, 0])) + (gradtwo * xyfade[:, :, 0])
intetwo = (gradthr * (1.0 - xyfade[:, :, 0])) + (gradfou * xyfade[:, :, 0])
intethr = (inteone * (1.0 - xyfade[:, :, 1])) + (intetwo * xyfade[:, :, 1])
return tf.sqrt(2.0) * intethr
def apply_spatially_varying_blur(image, blur_size=2, blurtype='disk'):
"""Apply spatially-varying blur to an image.
Using pyramid to approximate for efficiency
Args:
image: a 3D image tensor [H, W, C].
blur_size: base value for the blur size in the pyramic.
blurtype: type of blur, either 'disk' or 'gaussian'.
Returns:
a 2D tensor of the target size.
"""
pyramid = create_pyramid(image, blur_size=blur_size, blurtype=blurtype)
image_blurred = apply_pyramid_blend(pyramid)
return image_blurred
def apply_pyramid_blend(pyramid):
"""Reconstruct an image using bilinear interpolation between pyramid levels.
Args:
pyramid: a list of tensors applied with different blur levels.
Returns:
A reconstructed 3D tensor that is collapsed from the input pyramid.
"""
num_levels = 3
guidance_perlin_base = perlin_collection(
(tf.shape(pyramid[0])[0], tf.shape(pyramid[0])[1]), [2, 2], 1,
tf.random.uniform([], 0.05, 0.25))
guidance_perlin_base -= tf.reduce_min(guidance_perlin_base)
guidance_perlin_base /= tf.reduce_max(guidance_perlin_base)
guidance_blur = tf.clip_by_value(guidance_perlin_base / (1. / num_levels),
0.0, num_levels)
image_reconst = pyramid
for i in range(int(num_levels) - 2, -1, -1):
alpha = tf.clip_by_value(guidance_blur - i, 0., 1.)
alpha = tf.expand_dims(alpha, 2)
image_reconst[i] = lerp(pyramid[i], image_reconst[i + 1], alpha)
return image_reconst[0]
def create_pyramid(image, blur_size=2, blurtype='disk'):
"""Create a pyramid of different levels of disk blur.
Args:
image: a 2D or 3D tensor of the input image.
blur_size: base value for the blur size in the pyramic.
blurtype: a string that specifies the kind of blur, either disk or gaussian.
Returns:
Pyramid: a list of tensors applied with different blur kernels.
"""
image_pyramid = []
for i in range(3):
rsz = np.power(2, i) * blur_size
if blurtype == 'disk':
input_lv = apply_disc_filter(image, rsz, is_rgb=False)
elif blurtype == 'gaussian':
input_lv = gaussian_filter(tf.expand_dims(input_lv, 0), blur_size)[0, ...]
else:
raise ValueError('Unknown blur type.')
image_pyramid.append(input_lv)
return image_pyramid
def lerp(a, b, x):
"""Linear interpolation between a and b using weight x."""
return a + x * (b - a)
def render_shadow_from_mask(mask, segmentation=None):
"""Render a shadow mask by applying spatially-varying blur.
Args:
mask: A Tensor of shape [H, W, 1].
segmentation: face segmentation, apply to the generated shadow mask if provided.
Returns:
A Tensor of shape [H, W, 1] containing the shadow mask.
"""
mask = tf.expand_dims(mask, 2)
disc_filter_sz = tf.random.uniform(
shape=(), minval=1, maxval=_MAX_BLUR_SIGMA, dtype=tf.int32)
mask_blurred = tf.cond(
tf.greater(tf.random.uniform([]),
tf.constant(_SV_SIGMA)), lambda: apply_spatially_varying_blur(
mask,
blur_size=tf.random.uniform(
shape=(), minval=1, maxval=3, dtype=tf.int32)),
lambda: apply_disc_filter(mask, disc_filter_sz, is_rgb=False))
mask_blurred_norm = tf.math.divide(mask_blurred, tf.reduce_max(mask_blurred))
if segmentation is not None:
mask_blurred_seg = mask_blurred_norm * segmentation
else:
mask_blurred_seg = mask_blurred_norm
tf.compat.v1.debugging.assert_greater_equal(
tf.reduce_sum(mask_blurred_seg),
0.1,
message='Rendered silhouette mask values too small.') # sample drops if this happens
return mask_blurred_norm
def render_perlin_mask(size, segmentation=None):
"""Render a shadow mask using perlin noise pattern.
Args:
size: A 2D tensor of target mask size.
segmentation: face segmentation, apply to the generated shadow mask if provided.
Returns:
A Tensor of shape [H, W, 1] containing the shadow mask.
"""
with tf.name_scope('render_perlin'):
size = tf.cast(size, tf.int32)
perlin_map = perlin_collection((size[0], size[1]), [4, 4], 4,
tf.random.uniform([], 0.05, 0.85))
perlin_map_thre = tf.cast(tf.greater(perlin_map, 0.15), tf.float32)
perlin_shadow_map = render_shadow_from_mask(
perlin_map_thre, segmentation=segmentation)
return perlin_shadow_map
def render_silhouette_mask(silhouette, size, segmentation=None):
"""Render a shadow mask using silhouette image.
The sihouette image is first augmented by applying random rotation and tiling.
Then used to render a shadow mask by applying spatially-varying blur.
Args:
silhouette: Rotation matrices of shape [H, W, 1].
size: A 2D tensor of target mask size.
segmentation: face segmentation, apply to the generated shadow mask if provided.
Returns:
A Tensor of shape [H, W, 1] containing the shadow mask.
"""
with tf.name_scope('render_silhouette'):
silhouette.shape.assert_has_rank(3)
tf.compat.v1.assert_equal(silhouette.shape[2], 1)
degree = tf.random.uniform(shape=(), minval=0, maxval=360, dtype=tf.float32)
silhouette_rot = tfa.image.rotate(
silhouette, degree * np.pi / 180., interpolation='BILINEAR')
rand_rz_ratio = tf.random.uniform(
shape=(), minval=0.3, maxval=0.6, dtype=tf.float32)
silhouette_rsz = resize_image(silhouette_rot, rsz=rand_rz_ratio)
num_rep_h = tf.math.floordiv(
tf.cast(size[0], tf.float32),
tf.cast(tf.shape(silhouette_rsz)[0], tf.float32)) + 2
num_rep_h = tf.cast(num_rep_h, tf.int32)
num_rep_w = tf.math.floordiv(
tf.cast(size[1], tf.float32),
tf.cast(tf.shape(silhouette_rsz)[1], tf.float32)) + 2
num_rep_w = tf.cast(num_rep_w, tf.int32)
silhouette_solid_tile = tf.tile(silhouette_rsz, [num_rep_h, num_rep_w, 1])
silhouette_solid_tile = silhouette_solid_tile[:size[0], :size[1], 0]
silhouette_solid_mask = render_shadow_from_mask(
silhouette_solid_tile, segmentation=segmentation)
return silhouette_solid_mask
"""
Color jitter
"""
def apply_tone_curve(image, gain=(0.5, 0.5, 0.5), is_rgb=False):
"""Apply tone perturbation to images.
Tone curve jitter comes from Schlick's bias and gain.
Schlick, Christophe. "Fast alternatives to Perlin’s bias and gain functions." Graphics Gems IV 4 (1994).
Args:
image: a 3D image tensor [H, W, C].
gain: a tuple of length 3 that specifies the strength of the jitter per color channel.
is_rgb: a bool that indicates whether input is grayscale (C=1) or rgb (C=3).
Returns:
3D tensor applied with a tone curve jitter, has the same size as input.
"""
image_max = tf.reduce_max(image)
image /= image_max
if not is_rgb:
mask = tf.cast(tf.greater_equal(image, 0.5), image.dtype)
image = getbias(image * 2.0, gain[0]) / 2.0 * (1.0 - mask) + (
getbias(image * 2.0 - 1.0, 1.0 - gain[0]) / 2.0 + 0.5) * mask
else:
image_r = image[..., 0, tf.newaxis]
image_r_mask = tf.cast(tf.greater_equal(image_r, 0.5), image.dtype)
image_r = getbias(image_r * 2.0, gain[0]) / 2.0 * (1.0 - image_r_mask) + (
getbias(image_r * 2.0 - 1.0, 1.0 - gain[0]) / 2.0 + 0.5) * image_r_mask
image_g = image[..., 1, tf.newaxis]
image_g_mask = tf.cast(tf.greater_equal(image_r, 0.5), image.dtype)
image_g = getbias(image_g * 2.0, gain[1]) / 2.0 * (1.0 - image_g_mask) + (
getbias(image_g * 2.0 - 1.0, 1.0 - gain[1]) / 2.0 + 0.5) * image_g_mask
image_b = image[..., 2, tf.newaxis]
image_b_mask = tf.cast(tf.greater_equal(image_r, 0.5), image.dtype)
image_b = getbias(image_b * 2.0, gain[2]) / 2.0 * (1.0 - image_b_mask) + (
getbias(image_b * 2.0 - 1.0, 1.0 - gain[2]) / 2.0 + 0.5) * image_b_mask
image = tf.concat([image_r, image_g, image_b], 2)
return image * image_max
def getbias(x, bias):
"""Bias in Ken Perlin’s bias and gain functions."""
return x / ((1.0 / bias - 2.0) * (1.0 - x) + 1.0 + 1e-6)
def get_ctm_ls(image, target):
"""Use least square to obtain color transfer matrix.
Args:
image: the source tensor of shape [H, W, 3].
target: target tensor with the same shape as input.
Returns:
tensor of size 3 by 3 that minimizes |C x image - target|_2.
"""
image = tf.reshape(image, [-1, 3])
target = tf.reshape(target, [-1, 3])
ctm = tf.linalg.lstsq(image, target, l2_regularizer=0.0, fast=True)
return tf.transpose(ctm)
def apply_ctm(image, ctm):
"""Apply a color transfer matrix.
Args:
image: a tensor that contains the source image of shape [H, W, 3].
ctm: a tensor that contains a 3 by 3 color matrix.
Returns:
a tensor of the same shape as image.
"""
shape = tf.shape(image)
image = tf.reshape(image, [-1, 3])
image = tf.tensordot(image, ctm, axes=[[-1], [-1]])
return tf.reshape(image, shape)
def apply_geometric_augmentation(image):
"""Randomly apply geometric augmentation."""
processed_images = tf.image.random_flip_left_right(image)
return processed_images
|
python
|
from rest_framework import serializers
from .models import ListModel
from utils import datasolve
class BinsizeGetSerializer(serializers.ModelSerializer):
bin_size = serializers.CharField(read_only=True, required=False)
bin_size_w = serializers.FloatField(read_only=True, required=False)
bin_size_d = serializers.FloatField(read_only=True, required=False)
bin_size_h = serializers.FloatField(read_only=True, required=False)
creater = serializers.CharField(read_only=True, required=False)
create_time = serializers.DateTimeField(read_only=True, format='%Y-%m-%d %H:%M:%S')
update_time = serializers.DateTimeField(read_only=True, format='%Y-%m-%d %H:%M:%S')
class Meta:
model = ListModel
exclude = ['openid', 'is_delete', ]
read_only_fields = ['id', ]
class BinsizePostSerializer(serializers.ModelSerializer):
openid = serializers.CharField(read_only=False, required=False, validators=[datasolve.openid_validate])
bin_size = serializers.CharField(read_only=False, required=True, validators=[datasolve.data_validate])
bin_size_w = serializers.FloatField(read_only=False, required=True, validators=[datasolve.data_validate])
bin_size_d = serializers.FloatField(read_only=False, required=True, validators=[datasolve.data_validate])
bin_size_h = serializers.FloatField(read_only=False, required=True, validators=[datasolve.data_validate])
creater = serializers.CharField(read_only=False, required=True, validators=[datasolve.data_validate])
class Meta:
model = ListModel
exclude = ['is_delete', ]
read_only_fields = ['id', 'create_time', 'update_time', ]
class BinsizeUpdateSerializer(serializers.ModelSerializer):
bin_size = serializers.CharField(read_only=False, required=True, validators=[datasolve.data_validate])
bin_size_w = serializers.FloatField(read_only=False, required=True, validators=[datasolve.data_validate])
bin_size_d = serializers.FloatField(read_only=False, required=True, validators=[datasolve.data_validate])
bin_size_h = serializers.FloatField(read_only=False, required=True, validators=[datasolve.data_validate])
creater = serializers.CharField(read_only=False, required=True, validators=[datasolve.data_validate])
class Meta:
model = ListModel
exclude = ['openid', 'is_delete', ]
read_only_fields = ['id', 'create_time', 'update_time', ]
class BinsizePartialUpdateSerializer(serializers.ModelSerializer):
bin_size = serializers.CharField(read_only=False, required=False, validators=[datasolve.data_validate])
bin_size_w = serializers.FloatField(read_only=False, required=False, validators=[datasolve.data_validate])
bin_size_d = serializers.FloatField(read_only=False, required=False, validators=[datasolve.data_validate])
bin_size_h = serializers.FloatField(read_only=False, required=False, validators=[datasolve.data_validate])
creater = serializers.CharField(read_only=False, required=False, validators=[datasolve.data_validate])
class Meta:
model = ListModel
exclude = ['openid', 'is_delete', ]
read_only_fields = ['id', 'create_time', 'update_time', ]
class FileRenderSerializer(serializers.ModelSerializer):
bin_size = serializers.CharField(read_only=False, required=False)
bin_size_w = serializers.FloatField(read_only=False, required=False)
bin_size_d = serializers.FloatField(read_only=False, required=False)
bin_size_h = serializers.FloatField(read_only=False, required=False)
creater = serializers.CharField(read_only=False, required=False)
create_time = serializers.DateTimeField(read_only=True, format='%Y-%m-%d %H:%M:%S')
update_time = serializers.DateTimeField(read_only=True, format='%Y-%m-%d %H:%M:%S')
class Meta:
model = ListModel
ref_name = 'BinSizeFileRenderSerializer'
exclude = ['openid', 'is_delete', ]
|
python
|
"""
https://leetcode.com/problems/logger-rate-limiter/
Tags: Google; Medium; Stack
"""
from typing import List
from operator import add, sub, mul, floordiv
def div(op1: int, op2: int) -> int:
neg = (op1 < 0 < op2) or (op2 < 0 < op1)
ans = floordiv(abs(op1), abs(op2))
return ans * (-1 if neg else 1)
class Solution:
def evalRPN(self, tokens: List[str]) -> int:
stack = []
ops = {'+': add, '-': sub, '*': mul, '/': div}
for ch in tokens:
if ch in ops:
op2 = stack.pop()
op1 = stack.pop()
stack.append(ops[ch](op1, op2))
else:
stack.append(int(ch))
# print(f"{stack} {ch}")
return stack[0]
|
python
|
# -*- coding: utf-8 -*-
from .aapt import Aapt
from .minicap import Minicap
from .rotation import Rotation
from .performance.fps import Fps
from .performance.cpu import Cpu
from .performance.meminfo import Meminfo
from .performance import DeviceWatcher
__all__ = ['Aapt', 'Minicap', 'Rotation', 'Fps', 'Cpu', 'Meminfo', 'DeviceWatcher']
|
python
|
import numpy, re
def encode_onehot(index, size):
onehot = numpy.zeros(size, dtype=numpy.int8)
onehot[index] = 1
return onehot
def prettify_string(string, replace_newline_with_space=True):
pretty_string = str(string).strip()
if replace_newline_with_space:
pretty_string = pretty_string.replace('\n', ' ').replace('\r', ' ')
pretty_string = re.sub(' +', ' ', pretty_string)
return pretty_string
def remove_size_descriptions(text):
tailored_text = ''
for word in text.lower().split(' '):
if is_alphanumeric(word):
continue
elif ',' in word:
valid_word = True
for word_part in word.split(','):
if is_alphanumeric(word_part):
valid_word = False
if valid_word is False:
continue
elif '/' in word:
valid_word = True
for word_part in word.split('/'):
if is_alphanumeric(word_part):
valid_word = False
if valid_word is False:
continue
elif '~' in word:
valid_word = True
for word_part in word.split('~'):
if is_alphanumeric(word_part):
valid_word = False
if valid_word is False:
continue
tailored_text += word
tailored_text += ' '
return tailored_text.strip()
def is_alphabet(string):
is_alphabet = True
for char in string:
if not ord('A') <= ord(char) <= ord('z'):
is_alphabet = False
break
return is_alphabet
def is_numeric(string):
is_numeric = True
for char in string:
if not ord('0') <= ord(char) <= ord('9'):
is_numeric = False
break
return is_numeric
def is_alphanumeric(string):
is_alphanumeric = True
for char in string:
if ord('A') <= ord(char) <= ord('z'):
continue
elif ord('0') <= ord(char) <= ord('9'):
continue
is_alphanumeric = False
break
return is_alphanumeric
|
python
|
from unittest import TestCase
from ddsc.core.consistency import UploadDetails, ProjectChecker, DSResourceNotConsistentError
from mock import Mock, patch, call
class TestUploadDetails(TestCase):
def test_inconsistent_status(self):
mock_dds_file = Mock()
mock_dds_file.name = 'file1.dat'
mock_dds_file.id = '123'
mock_status = Mock(is_consistent=False, initiated_on='2021-01-01', error_on=None)
mock_dds_file.get_upload.return_value.status = mock_status
upload_details = UploadDetails(mock_dds_file, '/data/file1.dat')
self.assertEqual(upload_details.inconsistent(), True)
self.assertEqual(upload_details.had_error(), False)
self.assertEqual(upload_details.is_bad(), True)
self.assertEqual(upload_details.name(), 'file1.dat')
self.assertEqual(upload_details.status_str(), 'Inconsistent')
self.assertEqual(upload_details.file_id(), '123')
self.assertEqual(upload_details.message(), 'started upload at 2021-01-01')
def test_error_status(self):
mock_dds_file = Mock()
mock_dds_file.name = 'file1.dat'
mock_dds_file.id = '123'
mock_status = Mock(is_consistent=True, initiated_on='2021-01-01', error_on='2021-01-02', error_message='bad data')
mock_dds_file.get_upload.return_value.status = mock_status
upload_details = UploadDetails(mock_dds_file, '/data/file1.dat')
self.assertEqual(upload_details.inconsistent(), False)
self.assertEqual(upload_details.had_error(), True)
self.assertEqual(upload_details.is_bad(), True)
self.assertEqual(upload_details.name(), 'file1.dat')
self.assertEqual(upload_details.status_str(), 'Error')
self.assertEqual(upload_details.file_id(), '123')
self.assertEqual(upload_details.message(), 'bad data')
def test_error_ok(self):
mock_dds_file = Mock()
mock_dds_file.name = 'file1.dat'
mock_dds_file.id = '123'
mock_status = Mock(is_consistent=True, initiated_on='2021-01-01', error_on=None, error_message=None)
mock_dds_file.get_upload.return_value.status = mock_status
upload_details = UploadDetails(mock_dds_file, '/data/file1.dat')
self.assertEqual(upload_details.inconsistent(), False)
self.assertEqual(upload_details.had_error(), False)
self.assertEqual(upload_details.is_bad(), False)
self.assertEqual(upload_details.name(), 'file1.dat')
self.assertEqual(upload_details.status_str(), 'Ok')
self.assertEqual(upload_details.file_id(), '123')
self.assertEqual(upload_details.message(), '')
self.assertEqual(upload_details.remote_path, '/data/file1.dat')
class TestProjectChecker(TestCase):
def setUp(self):
self.config = Mock()
self.project = Mock()
self.project.name = "Mouse"
self.checker = ProjectChecker(self.config, self.project)
def test_files_are_ok__good(self):
self.project.get_project_files_generator.return_value = []
self.assertEqual(self.checker.files_are_ok(), True)
def test_files_are_ok__error(self):
self.project.get_project_files_generator.side_effect = DSResourceNotConsistentError(Mock(), Mock(), Mock())
dds_file = Mock()
dds_file.name = "file1.txt"
dds_file.id = "123"
dds_file.get_upload.return_value.status.is_consistent = False
dds_file.get_upload.return_value.status.error_on = None
dds_file.get_upload.return_value.status.initiated_on = '2021-01-01'
self.project.get_path_to_files.return_value.items.return_value = [
("/data/bad/file1.txt", dds_file)
]
self.assertEqual(self.checker.files_are_ok(), False)
headers, data = self.checker.get_bad_uploads_table_data()
self.assertEqual(headers, ['File', 'Status', 'Message', 'FileID', 'RemotePath'])
self.assertEqual(data, [['file1.txt', 'Inconsistent', 'started upload at 2021-01-01', '123',
'/data/bad/file1.txt']])
@patch('ddsc.core.consistency.print')
@patch('ddsc.core.consistency.UploadDetails')
def test_print_bad_uploads_table(self, mock_upload_details, mock_print):
mock_upload_details.return_value.is_bad.return_value = True
mock_upload_details.return_value.name.return_value = 'file1.txt'
mock_upload_details.return_value.status_str.return_value = 'BAD'
mock_upload_details.return_value.message.return_value = ' file is bad'
mock_upload_details.return_value.file_id.return_value = '123'
mock_upload_details.return_value.remote_path = '/data/file1.txt'
self.project.get_path_to_files.return_value.items.return_value = [
('/data/file1.txt', Mock())
]
self.checker.print_bad_uploads_table()
mock_print.assert_has_calls([
call("ERROR: Project Mouse is not in a consistent state.\n"),
call("Please wait while file uploads are checked.\nThis process can take quite a while."),
call('File Status Message FileID RemotePath\n'
'--------- -------- ----------- -------- ---------------\n'
'file1.txt BAD file is bad 123 /data/file1.txt'),
call('\nNOTE: Inconsistent files should resolve in a few minutes after starting.'),
call('\nAn inconsistent file can be deleted by running:\n ddsclient delete -p <ProjectName> '
'--path <RemotePath>'),
call()
])
@patch('ddsc.core.consistency.print')
@patch('ddsc.core.consistency.time')
def test_wait_for_consistency(self, mock_time, mock_print):
self.project.get_project_files_generator.side_effect = [
DSResourceNotConsistentError(Mock(), Mock(), Mock()),
[]
]
self.checker.wait_for_consistency(wait_sec=10)
mock_print.assert_has_calls([
call('Checking files for project Mouse'),
call('Project not consistent yet. Waiting.'),
call('Checking files for project Mouse'),
call('Project Mouse is consistent.')
])
mock_time.sleep.assert_called_with(10)
|
python
|
# Global Benchmark Database (GBD)
# Copyright (C) 2020 Markus Iser, Karlsruhe Institute of Technology (KIT)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gbd_tool.util import eprint
from tatsu import parse
import pprint
def build_query(query=None, hashes=[], resolve=[], collapse="GROUP_CONCAT", group_by="hash", join_type="LEFT"):
statement = "SELECT {} FROM {} {} WHERE {} GROUP BY {}"
s_attributes = group_by + ".value"
s_from = group_by
s_tables = ""
s_conditions = "1=1"
s_group_by = group_by + ".value"
tables = set(resolve)
if query is not None and query:
ast = parse(GRAMMAR, query)
s_conditions = build_where(ast)
tables.update(collect_tables(ast))
if len(hashes):
s_conditions = s_conditions + " AND hash.hash in ('{}')".format("', '".join(hashes))
if len(resolve):
s_attributes = s_attributes + ", " + ", ".join(['{}(DISTINCT({}.value))'.format(collapse, table) for table in resolve])
s_tables = " ".join(['{} JOIN {} ON {}.hash = {}.hash'.format(join_type, table, group_by, table) for table in tables if table != group_by])
return statement.format(s_attributes, s_from, s_tables, s_conditions, s_group_by)
def build_where(ast):
if ast["q"]:
return build_where(ast["q"])
elif ast["qop"]:
return "({} {} {})".format(build_where(ast["left"]), ast["qop"], build_where(ast["right"]))
elif ast["sop"]:
return "{}.value {} \"{}\"".format(ast["left"], ast["sop"], ast["right"])
elif ast["aop"]:
return "{} {} {}".format(build_where(ast["left"]), ast["aop"], build_where(ast["right"]))
elif ast["bracket_term"]:
return "({})".format(build_where(ast["bracket"]))
elif ast["top"]:
return "{} {} {}".format(build_where(ast["left"]), ast["top"], build_where(ast["right"]))
elif ast["value"]:
return "CAST({}.value AS FLOAT)".format(ast["value"])
elif ast["constant"]:
return ast["constant"]
def collect_tables(ast):
if ast["q"]:
return collect_tables(ast["q"])
elif ast["qop"] or ast["aop"] or ast["top"]:
return collect_tables(ast["left"]) | collect_tables(ast["right"])
elif ast["bracket_term"]:
return collect_tables(ast["bracket_term"])
elif ast["sop"]:
return { ast["left"] }
elif ast["value"]:
return { ast["value"] }
else:
return set()
GRAMMAR = r'''
@@grammar::EXP
@@ignorecase::True
start = q:query $ ;
query = '(' q:query ')' | left:query qop:('and' | 'or') right:query | scon | acon;
scon = left:colname sop:('=' | '!=') right:alnum | left:colname sop:('like') right:likean ;
acon = left:term aop:('=' | '!=' | '<' | '>' | '<=' | '>=' ) right:term ;
term = value:colname | constant:num | '(' left:term top:('+'|'-'|'*'|'/') right:term ')' ;
num = /[0-9\.\-]+/ ;
alnum = /[a-zA-Z0-9_\.\-\/\?]+/ ;
likean = /[\%]?[a-zA-Z0-9_\.\-\/\?]+[\%]?/;
colname = /[a-zA-Z][a-zA-Z0-9_]+/ ;
'''
|
python
|
import pianohat
import time
from pygame import mixer
my_sound_files = [
"/home/pi/Downloads/despacito.wav",
"/home/pi/Downloads/Metal.wav",
"/home/pi/Downloads/Metal.wav",
"/home/pi/Downloads/Metal.wav",
"/home/pi/Downloads/Metal.wav",
"/home/pi/Downloads/despacito.wav",
"/home/pi/Downloads/Metal.wav",
"/home/pi/Downloads/Metal.wav",
"/home/pi/Downloads/Metal.wav",
"/home/pi/Downloads/despacito.wav",
"/home/pi/Downloads/Metal.wav",
"/home/pi/Downloads/Metal.wav",
"/home/pi/Downloads/Metal.wav"]
def handle_note(channel, pressed):
if pressed:
my_sounds[channel].play(loops=0)
print("you pressed key {}".format(channel))
else:
print("you released key{}".format(channel))
mixer.init(22050, -16, 2, 512)
mixer.set_num_channels(13)
my_sounds = [mixer.Sound(sound_file) for sound_file in my_sound_files]
pianohat.on_note(handle_note)
while True:
time.sleep(.001)
|
python
|
from django.db import models
# Create your models here.
class Employee(models.Model):
Name = models.CharField(max_length=64)
Email = models.CharField(max_length=64)
Password = models.CharField(max_length=64)
Position = models.CharField(max_length=64)
Salary = models.IntegerField()
def __str__(self):
return f"{self.id}: {self.Name} is {self.Position} with salary of {self.Salary}. Email:{self.Email} and Password:{self.Password}"
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from distutils.core import setup
# Use 2to3 build conversion if required
try:
from distutils.command.build_py import build_py_2to3 as build_py
except ImportError:
# 2.x
from distutils.command.build_py import build_py
setup(
name="snpy",
description="A wrapper-library for working with openSNP data",
license="WTFPL",
version="0.1",
author="Sergei Lebedev",
author_email="[email protected]",
url="http://github.com/superbobry/snpy/",
classifiers=[
"Intended Audience :: Developers",
"License :: Public Domain",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering :: Bio-Informatics",
],
py_modules=["sn"],
platforms="any",
cmdclass={"build_py": build_py}
)
|
python
|
# Copyright (C) 2021 ServiceNow, Inc.
import pytest
from nrcan_p2.data_processing.preprocessing_str import (
add_newline,
sentence_tokenize_spacy_sm,
sentence_tokenize_spacy_lg,
tokenize_spacy_sm,
tokenize_spacy_lg,
rm_stopwords_spacy,
rm_punct,
rm_newline
)
@pytest.mark.parametrize("input_str,expected",
[
("blah \n blah \n", "blah \n blah \n\n"),
("blah \n blah ", "blah \n blah \n")
]
)
def test_add_newline(input_str, expected):
result = add_newline(input_str)
assert result == expected
@pytest.mark.parametrize("input_str,expected",
[
("blah\nblah", "blah blah"),
("blah\n", "blah ")
]
)
def test_rm_newline(input_str, expected):
result = rm_newline(input_str)
assert result == expected
@pytest.mark.parametrize("input_str,expected",
[
("blah.! blah-! @?-.,:;=()", "blah blah "),
("blah\n", "blah\n")
]
)
def test_rm_punct(input_str, expected):
result = rm_punct(input_str)
assert result == expected
@pytest.mark.parametrize("input_str, expected",
[
("Here is my sentence. Here is another sentence!", "Here is my sentence.\nHere is another sentence!\n")
]
)
def test_sentence_tokenize_spacy_sm(input_str, expected):
result = sentence_tokenize_spacy_sm(input_str)
assert result == expected
@pytest.mark.parametrize("input_str, expected",
[
("Here is my sentence. Here is another sentence!", "Here is my sentence.\nHere is another sentence!\n"),
]
)
def test_sentence_tokenize_spacy_lg(input_str, expected):
result = sentence_tokenize_spacy_lg(input_str)
assert result == expected
@pytest.mark.parametrize("input_str, expected",
[
("""Here. "This," he said, "is ridiculous." His mother-in-law- A.K. Hawings, an old but sharp woman- did not agree...""",
"""Here . " This , " he said , " is ridiculous . " His mother - in - law- A.K. Hawings , an old but sharp woman- did not agree ...""")
]
)
def test_tokenize_spacy_sm(input_str, expected):
result = tokenize_spacy_sm(input_str)
assert result.strip() == expected.strip()
@pytest.mark.parametrize("input_str, expected",
[
("""Here. "This," he said, "is ridiculous." His mother-in-law- A.K. Hawings, an old but sharp woman- did not agree...""",
"""Here . " This , " he said , " is ridiculous . " His mother - in - law- A.K. Hawings , an old but sharp woman- did not agree ..."""),
("""Here omg he said.\n And, then he runn-ing,\n that we didn't do it.""",
["""Here omg he said.\n""", """And, then he runn-ing,\n""", """that we didn't do it."""]
)
]
)
def test_tokenize_spacy_lg(input_str, expected):
result = tokenize_spacy_lg(input_str)
assert result.strip() == expected.strip()
@pytest.mark.parametrize("input_str, expected",
[
("""Here omg he said we did n't do it We A.J. Patterson. Do. latterly thence we went the two of us""",
"""omg said A.J. Patterson. Do. went"""),
]
)
def test_tokenize_spacy_lg(input_str, expected):
result = rm_stopwords_spacy(input_str)
assert result.strip() == expected.strip()
@pytest.mark.parametrize("input_str, expected",
[
("""Here. "This," he said, "is ridiculous." His mother-in-law- A.K. Hawings, an old but sharp woman- did not agree...
He's gotten away with this again, as a goose get's away with doing anything at all.
I've got him! But how did you get him? Whither did he run?? Didn't you know?
""",
# tokenization removes newlines
""". " , " said , " ridiculous . " mother - - law- A.K. Hawings , old sharp woman- agree ...
gotten away , goose away .
got ! ? run ? ? know ?
"""),
("Here (he said) don't even try, no.", "( said ) try , .")
]
)
def test_tokenize_spacy_lg_rm_stopwords(input_str, expected):
result = tokenize_spacy_lg(input_str)
result = rm_stopwords_spacy(result)
assert result.strip() == expected.strip()
|
python
|
"""
Helper functions and classes around GeoIP lookups, based on Maxmind's
`maxminddb <https://pypi.python.org/pypi/maxminddb>`_ and
`geoip2 <https://pypi.python.org/pypi/geoip2>`_ Python packages.
"""
import logging
import time
import genc
from geoip2.database import Reader
from geoip2.errors import AddressNotFoundError, GeoIP2Error
from maxminddb import InvalidDatabaseError
from maxminddb.const import MODE_AUTO
from ichnaea.conf import settings
from ichnaea.constants import DEGREE_DECIMAL_PLACES
from ichnaea.geocode import GEOCODER
LOGGER = logging.getLogger(__name__)
# The region codes present in the GeoIP data files, extracted from
# the CSV files. Accuracy numbers from October 2015 from
# https://www.maxmind.com/en/geoip2-city-database-accuracy.
# Default value is 0.3 if the website didn't include any data.
REGION_SCORE = {
"AD": 0.3,
"AE": 0.9,
"AF": 0.3,
"AG": 0.3,
"AI": 0.3,
"AL": 0.3,
"AM": 0.3,
"AO": 0.3,
"AQ": 0.3,
"AR": 0.7,
"AS": 0.3,
"AT": 0.7,
"AU": 0.7,
"AW": 0.3,
"AX": 0.3,
"AZ": 0.3,
"BA": 0.3,
"BB": 0.3,
"BD": 0.3,
"BE": 0.8,
"BF": 0.3,
"BG": 0.7,
"BH": 0.3,
"BI": 0.3,
"BJ": 0.3,
"BL": 0.3,
"BM": 0.3,
"BN": 0.3,
"BO": 0.3,
"BQ": 0.3,
"BR": 0.7,
"BS": 0.3,
"BT": 0.3,
"BW": 0.3,
"BY": 0.3,
"BZ": 0.3,
"CA": 0.8,
"CC": 0.3,
"CD": 0.3,
"CF": 0.3,
"CG": 0.3,
"CH": 0.7,
"CI": 0.3,
"CK": 0.3,
"CL": 0.8,
"CM": 0.3,
"CN": 0.6,
"CO": 0.6,
"CR": 0.9,
"CU": 0.3,
"CV": 0.3,
"CW": 0.3,
"CX": 0.3,
"CY": 0.3,
"CZ": 0.8,
"DE": 0.8,
"DJ": 0.3,
"DK": 0.8,
"DM": 0.3,
"DO": 0.3,
"DZ": 0.3,
"EC": 0.8,
"EE": 0.8,
"EG": 0.7,
"ER": 0.3,
"ES": 0.8,
"ET": 0.3,
"FI": 0.5,
"FJ": 0.3,
"FK": 0.3,
"FM": 0.3,
"FO": 0.3,
"FR": 0.7,
"GA": 0.3,
"GB": 0.8,
"GD": 0.3,
"GE": 0.3,
"GF": 0.3,
"GG": 0.3,
"GH": 0.3,
"GI": 0.3,
"GL": 0.3,
"GM": 0.3,
"GN": 0.3,
"GP": 0.3,
"GQ": 0.3,
"GR": 0.6,
"GS": 0.3,
"GT": 0.3,
"GU": 0.3,
"GW": 0.3,
"GY": 0.3,
"HK": 0.9,
"HN": 0.3,
"HR": 0.6,
"HT": 0.3,
"HU": 0.8,
"ID": 0.7,
"IE": 0.5,
"IL": 0.7,
"IM": 0.3,
"IN": 0.6,
"IO": 0.3,
"IQ": 0.3,
"IR": 0.3,
"IS": 0.8,
"IT": 0.6,
"JE": 0.3,
"JM": 0.3,
"JO": 0.3,
"JP": 0.8,
"KE": 0.3,
"KG": 0.3,
"KH": 0.3,
"KI": 0.3,
"KM": 0.3,
"KN": 0.3,
"KP": 0.3,
"KR": 0.7,
"KW": 0.3,
"KY": 0.3,
"KZ": 0.3,
"LA": 0.3,
"LB": 0.3,
"LC": 0.3,
"LI": 0.3,
"LK": 0.3,
"LR": 0.3,
"LS": 0.3,
"LT": 0.7,
"LU": 0.9,
"LV": 0.8,
"LY": 0.3,
"MA": 0.3,
"MC": 0.3,
"MD": 0.3,
"ME": 0.3,
"MF": 0.3,
"MG": 0.3,
"MH": 0.3,
"MK": 0.3,
"ML": 0.3,
"MM": 0.3,
"MN": 0.3,
"MO": 0.3,
"MP": 0.3,
"MQ": 0.3,
"MR": 0.3,
"MS": 0.3,
"MT": 0.9,
"MU": 0.3,
"MV": 0.3,
"MW": 0.3,
"MX": 0.6,
"MY": 0.7,
"MZ": 0.3,
"NA": 0.3,
"NC": 0.3,
"NE": 0.3,
"NF": 0.3,
"NG": 0.3,
"NI": 0.3,
"NL": 0.8,
"NO": 0.8,
"NP": 0.3,
"NR": 0.3,
"NU": 0.3,
"NZ": 0.6,
"OM": 0.3,
"PA": 0.3,
"PE": 0.8,
"PF": 0.3,
"PG": 0.3,
"PH": 0.5,
"PK": 0.8,
"PL": 0.6,
"PM": 0.3,
"PN": 0.3,
"PR": 0.9,
"PS": 0.3,
"PT": 0.7,
"PW": 0.3,
"PY": 0.3,
"QA": 0.9,
"RE": 0.3,
"RO": 0.7,
"RS": 0.7,
"RU": 0.8,
"RW": 0.3,
"SA": 0.7,
"SB": 0.3,
"SC": 0.3,
"SD": 0.3,
"SE": 0.7,
"SG": 0.9,
"SH": 0.3,
"SI": 0.8,
"SJ": 0.3,
"SK": 0.7,
"SL": 0.3,
"SM": 0.3,
"SN": 0.3,
"SO": 0.3,
"SR": 0.3,
"SS": 0.3,
"ST": 0.3,
"SV": 0.3,
"SX": 0.3,
"SY": 0.3,
"SZ": 0.3,
"TC": 0.3,
"TD": 0.3,
"TF": 0.3,
"TG": 0.3,
"TH": 0.8,
"TJ": 0.3,
"TK": 0.3,
"TL": 0.3,
"TM": 0.3,
"TN": 0.3,
"TO": 0.3,
"TR": 0.7,
"TT": 0.3,
"TV": 0.3,
"TW": 0.8,
"TZ": 0.3,
"UA": 0.7,
"UG": 0.3,
"UM": 0.3,
"US": 0.8,
"UY": 0.8,
"UZ": 0.3,
"VA": 0.3,
"VC": 0.3,
"VE": 0.6,
"VG": 0.3,
"VI": 0.3,
"VN": 0.7,
"VU": 0.3,
"WF": 0.3,
"WS": 0.3,
"XK": 0.3,
"YE": 0.3,
"YT": 0.3,
"ZA": 0.7,
"ZM": 0.3,
"ZW": 0.3,
}
# The largest subdivision radius in each region, based on
# https://en.wikipedia.org/wiki/List_of_country_subdivisions_by_area
SUB_RADII = {
"AU": 1200000.0,
"BR": 1000000.0,
"CA": 1400000.0,
"CD": 500000.0,
"CL": 500000.0,
"CN": 1000000.0,
"DZ": 600000.0,
"EG": 500000.0,
"GL": 1600000.0,
"ID": 550000.0,
"KZ": 550000.0,
"LY": 500000.0,
"ML": 600000.0,
"NE": 600000.0,
"RU": 1200000.0,
"SA": 650000.0,
"SD": 450000.0,
"SO": 500000.0,
"US": 1200000.0,
}
SUB_RADIUS = 400000.0
REGION_RADIUS = 5000000.0
"""
Usually a per-region radius is calculated. This is the worst case
radius returned for GeoIP region based queries, based on data
for Russia:
``geocalc.distance(60.0, 100.0, 41.199278, 27.351944) == 5234427 meters``
"""
# City selection based on
# https://en.wikipedia.org/wiki/List_of_cities_proper_by_population
# Radius data based on bbox from http://www.geonames.org/getJSON?id=<num>
# from ichnaea.geocalc import distance
# round(max(distance(box['north'], box['west'], box['north'], box['east']),
# distance(box['north'], box['west'], box['south'], box['west']))
# / 2000.0) * 1000.0
# representing an inner circle inside the bounding box
CITY_RADII = {
98182: 39000.0, # Baghdad
108410: 30000.0, # Riyadh
112931: 39000.0, # Tehran
323786: 27000.0, # Ankara
360630: 40000.0, # Cairo
524901: 47000.0, # Moscow
745044: 48000.0, # Istanbul
1172451: 36000.0, # Lahore
1185241: 46000.0, # Dhaka
1275339: 50000.0, # Mumbai
1277333: 33000.0, # Bengaluru
1279233: 28000.0, # Ahmedabad
1566083: 27000.0, # Ho Chi Minh City
1609350: 33000.0, # Bangkok
1642911: 42000.0, # Jakarta
1668341: 40000.0, # Taipei
1701668: 47000.0, # Manila
1792947: 48000.0, # Tianjin
1796236: 68000.0, # Shanghai
1816670: 49000.0, # Beijing
1835848: 46000.0, # Seoul
1850147: 42000.0, # Tokyo
1871859: 26000.0, # Pyongyang
2314302: 40000.0, # Kinshasa
2643743: 40000.0, # London
2950159: 27000.0, # Berlin
3117735: 26000.0, # Madrid
3435910: 50000.0, # Buenos Aires
3448439: 46000.0, # Sao Paulo
3530597: 50000.0, # Mexico City
3688689: 40000.0, # Bogota
3871336: 32000.0, # Santiago
3936456: 40000.0, # Lima
5128581: 41000.0, # New York
}
CITY_RADIUS = 25000.0
"""
Radius returned for GeoIP city based queries.
25km is pure guesswork but should cover most cities, except those
explicitly listed in :data:`~ichnaea.geoip.CITY_RADII`.
"""
GEOIP_GENC_MAP = {
"AX": "FI", # Aland Islands -> Finland
"PS": "XW", # Palestine -> West Bank
"SJ": "XR", # Svalbard and Jan Mayen -> Svalbard
"UM": "US", # US Minor Outlying Territories -> US
}
def configure_geoip(filename=None, mode=MODE_AUTO, raven_client=None, _client=None):
"""
Configure and return a :class:`~ichnaea.geoip.GeoIPWrapper` instance.
If no geoip database file of the correct type can be found, return
a :class:`~ichnaea.geoip.GeoIPNull` dummy implementation instead.
:param raven_client: A configured raven/sentry client.
:type raven_client: :class:`raven.base.Client`
:param _client: Test-only hook to provide a pre-configured client.
"""
filename = settings("geoip_path") if filename is None else filename
if _client is not None:
return _client
if not filename:
# No DB file specified in the config
if raven_client is not None:
try:
raise OSError("No geoip filename specified.")
except OSError:
raven_client.captureException()
LOGGER.info("Returning GeoIPNull.")
return GeoIPNull()
try:
db = GeoIPWrapper(filename, mode=mode)
if not db.check_extension() and raven_client is not None:
try:
raise RuntimeError("Maxmind C extension not installed.")
except RuntimeError:
raven_client.captureException()
# Actually initialize the memory cache, by doing one fake look-up
db.lookup("127.0.0.1")
except (InvalidDatabaseError, IOError, OSError, ValueError):
# Error opening the database file, maybe it doesn't exist
if raven_client is not None:
raven_client.captureException()
LOGGER.info("Returning GeoIPNull.")
return GeoIPNull()
LOGGER.info("GeoIP configured.")
return db
class GeoIPWrapper(Reader):
"""
A wrapper around the :class:`geoip2.database.Reader` class with a lookup
function which returns `None` instead of raising exceptions.
Takes the absolute path to a geoip database on the local filesystem
and an additional mode, which defaults to
:data:`maxminddb.const.MODE_AUTO`.
:raises: :exc:`maxminddb.InvalidDatabaseError`
"""
lookup_exceptions = (
AddressNotFoundError,
GeoIP2Error,
InvalidDatabaseError,
ValueError,
)
def __init__(self, filename, mode=MODE_AUTO):
super(GeoIPWrapper, self).__init__(filename, mode=mode)
database_type = self.metadata().database_type
if database_type not in ("GeoIP2-City", "GeoLite2-City"):
message = "Invalid database type, expected City"
raise InvalidDatabaseError(message)
@property
def age(self):
"""
:returns: The age of the database file in days.
:rtype: int
"""
build_epoch = self.metadata().build_epoch
return int(round((time.time() - build_epoch) / 86400, 0))
def ping(self):
"""
:returns: True if this is a real database with a valid db file.
:rtype: bool
"""
return True
def check_extension(self):
"""
:returns: True if the C extension was installed correctly.
:rtype: bool
"""
for instance in (self.metadata(), self._db_reader):
if type(instance).__module__ != "builtins":
return False
return True
def lookup(self, addr):
"""
Look up information for the given IP address.
:param addr: IP address (e.g. '203.0.113.30')
:type addr: str
:returns: A dictionary with city, region data and location data.
:rtype: dict
"""
try:
record = self.city(addr)
except self.lookup_exceptions:
# The GeoIP database has no data for this IP or is broken.
record = None
if not record:
return None
region = record.country
city = record.city.geoname_id if record.city else None
subs = []
if record.subdivisions:
for sub in record.subdivisions:
subs.append(sub.iso_code)
location = record.location
if not (location.latitude and location.longitude and region.iso_code):
return None
code = GEOIP_GENC_MAP.get(region.iso_code, region.iso_code).upper()
radius, region_radius = self.radius(code, location, subs=subs, city=city)
score = 0.9
if city:
score = REGION_SCORE.get(code, 0.3)
return {
# Round lat/lon to a standard maximum precision
"latitude": round(location.latitude, DEGREE_DECIMAL_PLACES),
"longitude": round(location.longitude, DEGREE_DECIMAL_PLACES),
"region_code": code,
"region_name": genc.region_by_alpha2(code).name,
"city": bool(city),
"radius": radius,
"region_radius": region_radius,
"score": score,
}
def radius(self, code, location, subs=None, city=None, default=REGION_RADIUS):
"""
Return the best radius guess for the given region code.
:param code: A two-letter region code.
:type code: str
:param subs: A list of ISO subdivision codes.
:type code: list
:param city: A geoname_id from a city record or None.
:type city: int
:returns: A tuple of radius/region radius guesses in meters.
:rtype: tuple
"""
region_radius = GEOCODER.region_max_radius(code)
if region_radius is None:
# No region code or no successful radius lookup
region_radius = default
# Use region radius as an upper bound for city / subdivision
# radius for really small regions. E.g. Vatican City cannot
# be larger than the Vatican as a region.
radius = region_radius
if location.accuracy_radius:
radius = min(float(location.accuracy_radius * 1000.0), radius)
if subs:
radius = min(SUB_RADII.get(code, SUB_RADIUS), radius)
if city:
radius = min(CITY_RADII.get(city, CITY_RADIUS), radius)
return (radius, region_radius)
class GeoIPNull(object):
"""
A dummy implementation of the :class:`~ichnaea.geoip.GeoIPWrapper` API.
"""
def lookup(self, addr):
"""
:returns: None
"""
return None
@property
def age(self):
"""
:returns: -1
"""
return -1
def close(self):
pass
def ping(self):
"""
:returns: False
"""
return False
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
|
python
|
#define the main() function
def main():
i = 0
x = 119.0
for i in range(120):
if((i%2)==0):
x += 3.
else:
x -= 5.
s = "%3.2e" % x
print(s)
if __name__ == "__main__":
main()
|
python
|
import asyncio
from userbot import CMD_HELP, StartTime, bot
from userbot.utils import bash, edit_or_reply, zelda_cmd
@zelda_cmd(pattern="sxs (.*)")
async def amireallysxs(sxs):
user = await bot.get_me()
capt = str(sxs.pattern_match.group(1).split(" ", 2)[0])
link = str(sxs.pattern_match.group(1).split(" ", 2)[1])
capti = capt.replace(".", " ")
thumb = "https://telegra.ph/file/ddb9147429cae2ae6135e.jpg"
await sxs.edit("__Please Wait.__")
await sxs.edit("__Please Wait..__")
await sxs.edit("__Please Wait.__")
await sxs.edit("__Please Wait..__")
await sxs.edit("__Creating Content...__")
await sxs.edit("__Creating Content..__")
await sxs.edit("__Creating Content...__")
await sxs.edit("⚡")
await asyncio.sleep(2)
output = (
f"**{capti}**\n\n"
f"⬇️ KLIK UNTUK MENONTON ⬇️\n"
f"{link}\n\n"
f"📍Support Join : @LustsketchID\n"
f"📍Free VIP : @VIPLiveRecords\n"
)
if thumb:
try:
logo = thumb
await sxs.delete()
msg = await bot.send_file(sxs.chat_id, logo, caption=output)
await asyncio.sleep(300)
# await msg.delete()
except BaseException:
await sxs.edit(
output + "\n\n ***Logo yang diberikan tidak valid."
"\nPastikan link diarahkan ke gambar logo**"
)
# await asyncio.sleep(100)
# await sxs.delete()
else:
await edit_or_reply(sxs, output)
@zelda_cmd(pattern="lsid (.*)")
async def amireallylsid(lsid):
user = await bot.get_me()
capt = str(lsid.pattern_match.group(1).split(" ", 2)[0])
link = str(lsid.pattern_match.group(1).split(" ", 2)[1])
capti = capt.replace(".", " ")
thumb = "https://telegra.ph/file/22eda4c6851fd81b3b46a.jpg"
await lsid.edit("__Please Wait.__")
await lsid.edit("__Please Wait..__")
await lsid.edit("__Please Wait.__")
await lsid.edit("__Please Wait..__")
await lsid.edit("__Creating Content...__")
await lsid.edit("__Creating Content..__")
await lsid.edit("__Creating Content...__")
await lsid.edit("⚡")
await asyncio.sleep(2)
output = (
f"**{capti}**\n\n"
f"⬇️ Your Link\n"
f"{link}\n\n"
f"📍Support Join : @SexualSins58\n"
f"📍Free VIP : @VIPLiveRecords\n"
)
if thumb:
try:
logo = thumb
await lsid.delete()
msg = await bot.send_file(lsid.chat_id, logo, caption=output)
await asyncio.sleep(300)
# await msg.delete()
except BaseException:
await lsid.edit(
output + "\n\n ***Logo yang diberikan tidak valid."
"\nPastikan link diarahkan ke gambar logo**"
)
# await asyncio.sleep(100)
# await lsid.delete()
else:
await edit_or_reply(lsid, output)
CMD_HELP.update(
{
"ch_asupan": f"**Plugin : **`asupan`\
\n\n**KHUSUS UNTUK OWNER BOT. BELUM TERSEDIA UNTUK USER**\
"
}
)
|
python
|
#!/usr/bin/env python
from flexbe_core import EventState, Logger
import actionlib
import rospy
from jacobian_control.msg import DoAdaptionAction, DoAdaptionGoal, DoAdaptionResult, DoAdaptionFeedback
class HandoverAdaptionExec(EventState):
''' Calls jacobian-control node for adaption. '''
def __init__(self, command=0, topic='/do_adaption', reality_damp=0.3, terminate_dist_override=0.0, terminate_timeout_override=0.0, fixed_orientation=True,
terminate=True, use_reference_trajectory=True, joint_speed_limit=0.1):
super(HandoverAdaptionExec, self).__init__(outcomes = ['succeeded', 'error'])
self._topic = topic #'do_adaption'
self._command = command
self._reality_damp = reality_damp # 0.5
self._terminate_dist_override = terminate_dist_override # 0.5
self._terminate_timeout_override = terminate_timeout_override # 0.5
self._fixed_orientation = fixed_orientation
self._terminate = terminate
self._joint_speed_limit = joint_speed_limit
self._use_reference_trajectory =use_reference_trajectory
self._client = actionlib.SimpleActionClient(self._topic, DoAdaptionAction)
Logger.loginfo('Waiting for adaption server ...')
self._client.wait_for_server()
Logger.loginfo('found adaption server!')
self._error = False
def execute(self, userdata):
# Check if the client failed to send the goal.
if self._error:
return 'error'
if self._client.get_state() is 3: # succeeded
rospy.logwarn(self._client.get_state())
return 'succeeded'
if self._client.get_state() is 4: # aborted
rospy.logwarn(self._client.get_state())
return 'error'
def on_enter(self, userdata):
# Create the goal.
goal = DoAdaptionGoal()
goal.command = DoAdaptionGoal.COMMAND_ADAPT
goal.reality_damp = self._reality_damp
goal.fixed_orientation = self._fixed_orientation
goal.terminate = self._terminate
goal.terminate_dist_override = self._terminate_dist_override
goal.terminate_timeout_override = self._terminate_timeout_override
goal.joint_speed_limit = self._joint_speed_limit
goal.use_reference_trajectory = self._use_reference_trajectory
Logger.loginfo('sending goal: %s' %str(goal))
self._error = False # make sure to reset the error state since a previous state execution might have failed
try:
self._client.send_goal(goal)
except Exception as e:
self._error = True
def on_exit(self, userdata):
self._client.cancel_all_goals()
rospy.loginfo('Exit adaption.')
|
python
|
from .receipt_parser import parse_receipt # noqa
|
python
|
import animal
class Bear(animal.Animal):
pass
|
python
|
from dataclasses import dataclass
from minsk.analysis.binding.expression import BoundExpression
from minsk.analysis.binding.kind import BoundNodeKind
from minsk.analysis.binding.operators.unary import BoundUnaryOperator
from minsk.analysis.type import MinskType
@dataclass(frozen=True)
class BoundUnaryExpression(BoundExpression):
operator: BoundUnaryOperator
operand: BoundExpression
@property
def kind(self) -> BoundNodeKind:
return BoundNodeKind.UnaryExpression
@property
def ty(self) -> MinskType:
return self.operator.result_type
|
python
|
from zeit.cms.i18n import MessageFactory as _
import fb
import gocept.form.grouped
import requests
import urllib
import urlparse
import zeit.cms.browser.menu
import zope.app.appsetup.product
import zope.formlib.form
import zope.session.interfaces
class IFacebookApp(zope.interface.Interface):
app_id = zope.schema.TextLine(
title=_('Facebook app id'))
app_secret = zope.schema.TextLine(
title=_('Facebook app secret'))
page_name = zope.schema.TextLine(
title=_('Facebook page name (e.g. "ZEIT ONLINE")'))
class TokenForm(zeit.cms.browser.form.FormBase,
gocept.form.grouped.Form):
form_fields = zope.formlib.form.FormFields(IFacebookApp)
field_groups = (gocept.form.grouped.RemainingFields(_('')),)
@zope.formlib.form.action(
_('Create'), condition=zope.formlib.form.haveInputWidgets)
def redirect_to_facebook(self, action, data):
data['redirect_uri'] = self.url(
self.context, 'generate-facebook-token')
session = zope.session.interfaces.ISession(self.request)
session['zeit.push.facebook'].update(data)
# Step 1: Get user token. <https://developers.facebook.com
# /docs/facebook-login/manually-build-a-login-flow#login>
url = 'https://www.facebook.com/dialog/oauth?' + urllib.urlencode({
'client_id': data['app_id'],
'redirect_uri': data['redirect_uri'],
'scope': 'manage_pages,publish_pages',
})
self.request.response.redirect(url, trusted=True)
class GenerateToken(zeit.cms.browser.view.Base):
def __call__(self):
code = self.request.form.get('code')
if not code:
raise ValueError('Query parameter `code` is missing.')
# Step 1b: Convert code to token <https://developers.facebook.com
# /docs/facebook-login/manually-build-a-login-flow#confirm>
r = requests.get(
'https://graph.facebook.com/oauth/access_token?' +
urllib.urlencode({
'client_id': self.settings['app_id'],
'client_secret': self.settings['app_secret'],
'redirect_uri': self.settings['redirect_uri'],
'code': code,
}))
if 'error' in r.text:
raise ValueError(r.text)
result = urlparse.parse_qs(r.text)
short_lived_user_token = result['access_token'][0]
# Step 2: Exchange for long-lived token.
# <https://developers.facebook.com
# /docs/facebook-login/access-tokens/#extending>
r = requests.get(
'https://graph.facebook.com/oauth/access_token?' +
urllib.urlencode({
'client_id': self.settings['app_id'],
'client_secret': self.settings['app_secret'],
'grant_type': 'fb_exchange_token',
'fb_exchange_token': short_lived_user_token,
}))
if 'error' in r.text:
raise ValueError(r.text)
result = urlparse.parse_qs(r.text)
long_lived_user_token = result['access_token'][0]
# Step 3. Retrieve page access token. <https://developers.facebook.com
# /docs/facebook-login/access-tokens/#pagetokens>
#
# Note: Since we used a long-lived user token, the page token will be
# long-lived (~60 days), too.
user = fb.graph.api(long_lived_user_token)
accounts = user.get_object(cat='single', id='me', fields=['accounts'])
self.page_token = [
x['access_token'] for x in accounts['accounts']['data']
if x['name'] == self.settings['page_name']][0]
return super(GenerateToken, self).__call__()
@property
def settings(self):
session = zope.session.interfaces.ISession(self.request)
return session['zeit.push.facebook']
@property
def config_file(self):
config = zope.app.appsetup.product.getProductConfiguration(
'zeit.push')
return config['facebook-accounts']
class MenuItem(zeit.cms.browser.menu.GlobalMenuItem):
title = _("Facebook Tokens")
viewURL = '@@facebook-token.html'
pathitem = '@@facebook-token.html'
|
python
|
import FWCore.ParameterSet.Config as cms
from DQMOffline.Trigger.razorHemispheres_cff import *
from DQMOffline.Trigger.RazorMonitor_cfi import hltRazorMonitoring
# HLT_Rsq0p35_v*
Rsq0p35_RazorMonitoring = hltRazorMonitoring.clone()
Rsq0p35_RazorMonitoring.FolderName = cms.string('HLT/SUSY/Rsq0p35/')
Rsq0p35_RazorMonitoring.numGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_Rsq0p35_v*")
# HLT_Rsq0p35_v* tight
Rsq0p35_Tight_RazorMonitoring = hltRazorMonitoring.clone()
Rsq0p35_Tight_RazorMonitoring.FolderName = cms.string('HLT/SUSY/Rsq0p35_Tight/')
Rsq0p35_Tight_RazorMonitoring.numGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_Rsq0p35_v*")
Rsq0p35_Tight_RazorMonitoring.jetSelection = cms.string("pt>120")
# HLT_Rsq0p40_v*
Rsq0p40_RazorMonitoring = hltRazorMonitoring.clone()
Rsq0p40_RazorMonitoring.FolderName = cms.string('HLT/SUSY/Rsq0p40/')
Rsq0p40_RazorMonitoring.numGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_Rsq0p40_v*")
# HLT_Rsq0p40_v* tight
Rsq0p40_Tight_RazorMonitoring = hltRazorMonitoring.clone()
Rsq0p40_Tight_RazorMonitoring.FolderName = cms.string('HLT/SUSY/Rsq0p40_Tight/')
Rsq0p40_Tight_RazorMonitoring.numGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_Rsq0p40_v*")
Rsq0p40_Tight_RazorMonitoring.jetSelection = cms.string("pt>120")
# HLT_RsqMR300_Rsq0p09_MR200_v*
RsqMR300_Rsq0p09_MR200_RazorMonitoring = hltRazorMonitoring.clone()
RsqMR300_Rsq0p09_MR200_RazorMonitoring.FolderName = cms.string('HLT/SUSY/RsqMR300_Rsq0p09_MR200/')
RsqMR300_Rsq0p09_MR200_RazorMonitoring.numGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_RsqMR300_Rsq0p09_MR200_v*")
# HLT_RsqMR300_Rsq0p09_MR200_v* tight
RsqMR300_Rsq0p09_MR200_Tight_RazorMonitoring = hltRazorMonitoring.clone()
RsqMR300_Rsq0p09_MR200_Tight_RazorMonitoring.FolderName = cms.string('HLT/SUSY/RsqMR300_Rsq0p09_MR200_Tight/')
RsqMR300_Rsq0p09_MR200_Tight_RazorMonitoring.numGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_RsqMR300_Rsq0p09_MR200_v*")
RsqMR300_Rsq0p09_MR200_Tight_RazorMonitoring.jetSelection = cms.string("pt>120")
# HLT_RsqMR320_Rsq0p09_MR200_v*
RsqMR320_Rsq0p09_MR200_RazorMonitoring = hltRazorMonitoring.clone()
RsqMR320_Rsq0p09_MR200_RazorMonitoring.FolderName = cms.string('HLT/SUSY/RsqMR320_Rsq0p09_MR200/')
RsqMR320_Rsq0p09_MR200_RazorMonitoring.numGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_RsqMR320_Rsq0p09_MR200_v*")
# HLT_RsqMR320_Rsq0p09_MR200_v* tight
RsqMR320_Rsq0p09_MR200_Tight_RazorMonitoring = hltRazorMonitoring.clone()
RsqMR320_Rsq0p09_MR200_Tight_RazorMonitoring.FolderName = cms.string('HLT/SUSY/RsqMR320_Rsq0p09_MR200_Tight/')
RsqMR320_Rsq0p09_MR200_Tight_RazorMonitoring.numGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_RsqMR320_Rsq0p09_MR200_v*")
RsqMR320_Rsq0p09_MR200_Tight_RazorMonitoring.jetSelection = cms.string("pt>120")
# HLT_RsqMR300_Rsq0p09_MR200_4jet_v*
RsqMR300_Rsq0p09_MR200_4jet_RazorMonitoring = hltRazorMonitoring.clone()
RsqMR300_Rsq0p09_MR200_4jet_RazorMonitoring.FolderName = cms.string('HLT/SUSY/RsqMR300_Rsq0p09_MR200_4jet/')
RsqMR300_Rsq0p09_MR200_4jet_RazorMonitoring.numGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_RsqMR300_Rsq0p09_MR200_4jet_v*")
# HLT_RsqMR300_Rsq0p09_MR200_4jet_v* tight
RsqMR300_Rsq0p09_MR200_4jet_Tight_RazorMonitoring = hltRazorMonitoring.clone()
RsqMR300_Rsq0p09_MR200_4jet_Tight_RazorMonitoring.FolderName = cms.string('HLT/SUSY/RsqMR300_Rsq0p09_MR200_4jet_Tight/')
RsqMR300_Rsq0p09_MR200_4jet_Tight_RazorMonitoring.numGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_RsqMR300_Rsq0p09_MR200_4jet_v*")
RsqMR300_Rsq0p09_MR200_4jet_Tight_RazorMonitoring.jetSelection = cms.string("pt>120")
# HLT_RsqMR320_Rsq0p09_MR200_4jet_v*
RsqMR320_Rsq0p09_MR200_4jet_RazorMonitoring = hltRazorMonitoring.clone()
RsqMR320_Rsq0p09_MR200_4jet_RazorMonitoring.FolderName = cms.string('HLT/SUSY/RsqMR320_Rsq0p09_MR200_4jet/')
RsqMR320_Rsq0p09_MR200_4jet_RazorMonitoring.numGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_RsqMR320_Rsq0p09_MR200_4jet_v*")
# HLT_RsqMR320_Rsq0p09_MR200_4jet_v* tight
RsqMR320_Rsq0p09_MR200_4jet_Tight_RazorMonitoring = hltRazorMonitoring.clone()
RsqMR320_Rsq0p09_MR200_4jet_Tight_RazorMonitoring.FolderName = cms.string('HLT/SUSY/RsqMR320_Rsq0p09_MR200_4jet_Tight/')
RsqMR320_Rsq0p09_MR200_4jet_Tight_RazorMonitoring.numGenericTriggerEventPSet.hltPaths = cms.vstring("HLT_RsqMR320_Rsq0p09_MR200_4jet_v*")
RsqMR320_Rsq0p09_MR200_4jet_Tight_RazorMonitoring.jetSelection = cms.string("pt>120")
susyHLTRazorMonitoring = cms.Sequence(
cms.ignore(hemispheresDQM)+ #for razor triggers
cms.ignore(caloHemispheresDQM)+ #for razor triggers
Rsq0p35_RazorMonitoring+
Rsq0p35_Tight_RazorMonitoring+
Rsq0p40_RazorMonitoring+
Rsq0p40_Tight_RazorMonitoring+
RsqMR300_Rsq0p09_MR200_RazorMonitoring+
RsqMR300_Rsq0p09_MR200_Tight_RazorMonitoring+
RsqMR320_Rsq0p09_MR200_RazorMonitoring+
RsqMR320_Rsq0p09_MR200_Tight_RazorMonitoring+
RsqMR300_Rsq0p09_MR200_4jet_RazorMonitoring+
RsqMR300_Rsq0p09_MR200_4jet_Tight_RazorMonitoring+
RsqMR320_Rsq0p09_MR200_4jet_RazorMonitoring+
RsqMR320_Rsq0p09_MR200_4jet_Tight_RazorMonitoring
)
|
python
|
project = 'matador-test'
environments = {
'test': {'dbms': 'oracle', 'connection': 'user@instance'}
}
credentials = {
'test': {'user': 'test_user', 'password': 'test_password'}
}
|
python
|
events = ["evt1", "evt2", "evt3"]
def my_pop():
if events:
evt = events.pop(0)
print(evt)
print(events)
my_pop()
my_pop()
my_pop()
my_pop()
|
python
|
"Models for managing site sections and ad placements."
from __future__ import unicode_literals
from django.conf import settings
from django.core.cache import cache
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from .conf import SECTION_CACHE_KEY, PLACEMENTS_KEY_FORMAT
from .validators import validate_pattern
@python_2_unicode_compatible
class Section(models.Model):
"A grouping of site urls."
name = models.CharField(max_length=100)
slug = models.SlugField(max_length=100, unique=True)
pattern = models.CharField(max_length=200, validators=[validate_pattern, ])
priority = models.PositiveSmallIntegerField(default=0)
def __str__(self):
return self.name
def retrieve_all_sections():
"Get all sections from the cache or query the database."
sections = cache.get(SECTION_CACHE_KEY, None)
if sections is None:
sections = Section.objects.order_by('-priority')
if settings.ADCODE_CACHE_TIMEOUT:
sections = list(sections)
cache.set(SECTION_CACHE_KEY, sections, settings.ADCODE_CACHE_TIMEOUT)
return sections
@python_2_unicode_compatible
class Size(models.Model):
"Common Ad size."
name = models.CharField(max_length=100)
width = models.PositiveSmallIntegerField()
height = models.PositiveSmallIntegerField()
def __str__(self):
return '{name} {width}x{height}'.format(
name=self.name, width=self.width, height=self.height)
@python_2_unicode_compatible
class Placement(models.Model):
"Ad to be rendered in given sections."
name = models.CharField(max_length=100)
slug = models.SlugField(max_length=100, unique=True)
remote_id = models.CharField(max_length=200, blank=True, default='')
size = models.ForeignKey(Size, related_name='placements', on_delete=models.CASCADE)
sections = models.ManyToManyField(Section, blank=True, related_name='placements')
def __str__(self):
return '{} ({})'.format(self.name, self.size)
@property
def placeholder(self):
size = {'width': self.width, 'height': self.height}
return settings.ADCODE_PLACEHOLDER_TEMPLATE.format(**size)
@property
def width(self):
return self.size.width
@property
def height(self):
return self.size.height
def retrieve_section_placements(section):
"Get all placements for the section from the cache or query the database."
cache_key = PLACEMENTS_KEY_FORMAT.format(section.pk)
placements = cache.get(cache_key, None)
if placements is None:
placements = Placement.objects.filter(sections=section).select_related('size')
if settings.ADCODE_CACHE_TIMEOUT:
placements = list(placements)
cache.set(cache_key, placements, settings.ADCODE_CACHE_TIMEOUT)
return placements
|
python
|
from .base_regularizer import BaseRegularizer
from .center_invariant_regularizer import CenterInvariantRegularizer
from .regular_face_regularizer import RegularFaceRegularizer
from .lp_regularizer import LpRegularizer
from .zero_mean_regularizer import ZeroMeanRegularizer
from .sparse_centers_regularizer import SparseCentersRegularizer
|
python
|
#!/usr/bin/env python
"""circuits Hello World"""
from circuits import Component, Event
class hello(Event):
"""hello Event"""
class App(Component):
def hello(self):
"""Hello Event Handler"""
print("Hello World!")
def started(self, component):
"""Started Event Handler
This is fired internally when your application starts up and can be used to
trigger events that only occur once during startup.
"""
self.fire(hello()) # Fire hello Event
raise SystemExit(0) # Terminate the Application
App().run()
|
python
|
#!/usr/bin/env python
# Very useful script because I know the DMC steps start from s005
import re
import sys
import argparse
#import numpy as np
import autorunner
@autorunner.dmc_dat()
def common(x):
return x
# this style is not optimal but fine for now regardless
@autorunner.qmcpack_output()
def output_capturer(x):
return x
def resolve(regex_match):
if regex_match:
return regex_match.group(0)
return ''
def sort_by_series(x):
return resolve(re.search(r'\.s[0-9]+\.', filename)).strip('.').strip('s')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('files', nargs='*', default=common())
parser.add_argument('-e', '--equil', nargs='*', type=int, default=None)
args = parser.parse_args()
if not args.equil:
args.equil = [0] * len(args.files)
assert len(args.files) == len(args.equil)
# divide files by number of twists
scalar = {}
equil_scalar = {}
for e, filename in zip(args.equil, args.files):
twist = resolve(re.search(r'tw[0-9]+', filename))
scalar.setdefault(twist, [])
equil_scalar.setdefault(twist, [])
scalar[twist].append(filename)
equil_scalar[twist].append(e)
# (TEMP) calculate the total of multiple runs
qmcout = []
for filename in output_capturer():
# I don't think we need to separate since we need ALL values
#twist = resolve(re.search(r'tw[0-9]+', filename))
#qmcout.setdefault(twist, [])
#qmcout[twist].append(filename)
qmcout.append(filename)
print(scalar)
for twist in scalar.keys():
filename_list = scalar[twist]
eq_length_list = equil_scalar[twist]
filename_list.sort(key=sort_by_series)
out_name = re.sub(r'\.s[0-9]+\.', r'.s000.', filename_list[0])
out_string = ''
# dirty quick method
with open(filename_list[0]) as header_ref: # separate the header
out_string += header_ref.readlines()[0]
for eq_length, filename in zip(eq_length_list, filename_list):
with open(filename, 'r') as something:
some_list = [ x.strip() for x in something.readlines() if not '#' in x ]
some_list = some_list[eq_length:]
out_string += '\n'.join(some_list).strip() + '\n'
with open(out_name, 'w') as something:
something.write(out_string)
print("written to: "+out_name)
|
python
|
# Generated by Django 4.0.4 on 2022-04-21 02:30
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Cheese',
fields=[
('slug', models.SlugField(max_length=200, primary_key=True, serialize=False)),
('name', models.CharField(max_length=200, unique=True)),
('description', models.TextField(blank=True)),
('country_of_origin', models.CharField(max_length=200)),
('type', models.CharField(max_length=200)),
('fat_content', models.FloatField(blank=True, null=True)),
('last_updated', models.DateField(auto_now=True)),
],
options={
'ordering': ['name'],
'get_latest_by': 'last_updated',
},
),
]
|
python
|
import os
from PIL import Image
wnid = 'n03082979'
image_dir = './downloaded_images/{}/{}_urlimages'.format(wnid, wnid)
for filename in os.listdir(image_dir):
# if filename.endswith('.png'):
try:
filepath = os.path.join(image_dir, filename)
img = Image.open(filepath) # open the image file
img.verify() # verify that it is, in fact an image
except (IOError, SyntaxError) as e:
print('Bad file, removing: {}'.format(filename)) # print out the names of corrupt files
os.remove(filepath)
|
python
|
# INTERSECTION OF TWO ARRAYS LEETCODE SOLUTION:
# creating a class.
class Solution(object):
# creating a function to solve the problem.
def intersection(self, nums1, nums2):
# returning the intersection between the two arrays.
return list(set(nums1).intersection(set(nums2)))
|
python
|
import enum
import math
import time
import warnings
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union, cast
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim
from torch import Tensor
from . import functional as rtdlF
ModuleType = Union[str, Callable[..., nn.Module]]
_INTERNAL_ERROR_MESSAGE = 'Internal error. Please, open an issue.'
def _is_glu_activation(activation: ModuleType):
return (
isinstance(activation, str)
and activation.endswith('GLU')
or activation in [ReGLU, GEGLU]
)
def _all_or_none(values):
return all(x is None for x in values) or all(x is not None for x in values)
class ReGLU(nn.Module):
"""The ReGLU activation function from [shazeer2020glu].
Examples:
.. testcode::
module = ReGLU()
x = torch.randn(3, 4)
assert module(x).shape == (3, 2)
References:
* [shazeer2020glu] Noam Shazeer, "GLU Variants Improve Transformer", 2020
"""
def forward(self, x: Tensor) -> Tensor:
return rtdlF.reglu(x)
class GEGLU(nn.Module):
"""The GEGLU activation function from [shazeer2020glu].
Examples:
.. testcode::
module = GEGLU()
x = torch.randn(3, 4)
assert module(x).shape == (3, 2)
References:
* [shazeer2020glu] Noam Shazeer, "GLU Variants Improve Transformer", 2020
"""
def forward(self, x: Tensor) -> Tensor:
return rtdlF.geglu(x)
class _TokenInitialization(enum.Enum):
UNIFORM = 'uniform'
NORMAL = 'normal'
@classmethod
def from_str(cls, initialization: str) -> '_TokenInitialization':
try:
return cls(initialization)
except ValueError:
valid_values = [x.value for x in _TokenInitialization]
raise ValueError(f'initialization must be one of {valid_values}')
def apply(self, x: Tensor, d: int) -> None:
d_sqrt_inv = 1 / math.sqrt(d)
if self == _TokenInitialization.UNIFORM:
# used in the paper "Revisiting Deep Learning Models for Tabular Data";
# is equivalent to `nn.init.kaiming_uniform_(x, a=math.sqrt(5))` (which is
# used by torch to initialize nn.Linear.weight, for example)
nn.init.uniform_(x, a=-d_sqrt_inv, b=d_sqrt_inv)
elif self == _TokenInitialization.NORMAL:
nn.init.normal_(x, std=d_sqrt_inv)
class NumericalFeatureTokenizer(nn.Module):
"""Transforms continuous features to tokens (embeddings).
See `FeatureTokenizer` for the illustration.
For one feature, the transformation consists of two steps:
* the feature is multiplied by a trainable vector
* another trainable vector is added
Note that each feature has its separate pair of trainable vectors, i.e. the vectors
are not shared between features.
Examples:
.. testcode::
x = torch.randn(4, 2)
n_objects, n_features = x.shape
d_token = 3
tokenizer = NumericalFeatureTokenizer(n_features, d_token, True, 'uniform')
tokens = tokenizer(x)
assert tokens.shape == (n_objects, n_features, d_token)
"""
def __init__(
self,
n_features: int,
d_token: int,
bias: bool,
initialization: str,
) -> None:
"""
Args:
n_features: the number of continuous (scalar) features
d_token: the size of one token
bias: if `False`, then the transformation will include only multiplication.
**Warning**: :code:`bias=False` leads to significantly worse results for
Transformer-like (token-based) architectures.
initialization: initialization policy for parameters. Must be one of
:code:`['uniform', 'normal']`. Let :code:`s = d ** -0.5`. Then, the
corresponding distributions are :code:`Uniform(-s, s)` and :code:`Normal(0, s)`.
In [gorishniy2021revisiting], the 'uniform' initialization was used.
References:
* [gorishniy2021revisiting] Yury Gorishniy, Ivan Rubachev, Valentin Khrulkov, Artem Babenko, "Revisiting Deep Learning Models for Tabular Data", 2021
"""
super().__init__()
initialization_ = _TokenInitialization.from_str(initialization)
self.weight = nn.Parameter(Tensor(n_features, d_token))
self.bias = nn.Parameter(Tensor(n_features, d_token)) if bias else None
for parameter in [self.weight, self.bias]:
if parameter is not None:
initialization_.apply(parameter, d_token)
@property
def n_tokens(self) -> int:
"""The number of tokens."""
return len(self.weight)
@property
def d_token(self) -> int:
"""The size of one token."""
return self.weight.shape[1]
def forward(self, x: Tensor) -> Tensor:
x = self.weight[None] * x[..., None]
if self.bias is not None:
x = x + self.bias[None]
return x
class CategoricalFeatureTokenizer(nn.Module):
"""Transforms categorical features to tokens (embeddings).
See `FeatureTokenizer` for the illustration.
The module efficiently implements a collection of `torch.nn.Embedding` (with
optional biases).
Examples:
.. testcode::
# the input must contain integers. For example, if the first feature can
# take 3 distinct values, then its cardinality is 3 and the first column
# must contain values from the range `[0, 1, 2]`.
cardinalities = [3, 10]
x = torch.tensor([
[0, 5],
[1, 7],
[0, 2],
[2, 4]
])
n_objects, n_features = x.shape
d_token = 3
tokenizer = CategoricalFeatureTokenizer(cardinalities, d_token, True, 'uniform')
tokens = tokenizer(x)
assert tokens.shape == (n_objects, n_features, d_token)
"""
category_offsets: Tensor
def __init__(
self,
cardinalities: List[int],
d_token: int,
bias: bool,
initialization: str,
) -> None:
"""
Args:
cardinalities: the number of distinct values for each feature. For example,
:code:`cardinalities=[3, 4]` describes two features: the first one can
take values in the range :code:`[0, 1, 2]` and the second one can take
values in the range :code:`[0, 1, 2, 3]`.
d_token: the size of one token.
bias: if `True`, for each feature, a trainable vector is added to the
embedding regardless of feature value. The bias vectors are not shared
between features.
initialization: initialization policy for parameters. Must be one of
:code:`['uniform', 'normal']`. Let :code:`s = d ** -0.5`. Then, the
corresponding distributions are :code:`Uniform(-s, s)` and :code:`Normal(0, s)`. In
the paper [gorishniy2021revisiting], the 'uniform' initialization was
used.
References:
* [gorishniy2021revisiting] Yury Gorishniy, Ivan Rubachev, Valentin Khrulkov, Artem Babenko, "Revisiting Deep Learning Models for Tabular Data", 2021
"""
super().__init__()
assert cardinalities, 'cardinalities must be non-empty'
assert d_token > 0, 'd_token must be positive'
initialization_ = _TokenInitialization.from_str(initialization)
category_offsets = torch.tensor([0] + cardinalities[:-1]).cumsum(0)
self.register_buffer('category_offsets', category_offsets, persistent=False)
self.embeddings = nn.Embedding(sum(cardinalities), d_token)
self.bias = nn.Parameter(Tensor(len(cardinalities), d_token)) if bias else None
for parameter in [self.embeddings.weight, self.bias]:
if parameter is not None:
initialization_.apply(parameter, d_token)
@property
def n_tokens(self) -> int:
"""The number of tokens."""
return len(self.category_offsets)
@property
def d_token(self) -> int:
"""The size of one token."""
return self.embeddings.embedding_dim
def forward(self, x: Tensor) -> Tensor:
x = self.embeddings(x + self.category_offsets[None])
if self.bias is not None:
x = x + self.bias[None]
return x
class FeatureTokenizer(nn.Module):
"""Combines `NumericalFeatureTokenizer` and `CategoricalFeatureTokenizer`.
The "Feature Tokenizer" module from [gorishniy2021revisiting]. The module transforms
continuous and categorical features to tokens (embeddings).
In the illustration below, the red module in the upper brackets represents
`NumericalFeatureTokenizer` and the green module in the lower brackets represents
`CategoricalFeatureTokenizer`.
.. image:: ../images/feature_tokenizer.png
:scale: 33%
:alt: Feature Tokenizer
Examples:
.. testcode::
n_objects = 4
n_num_features = 3
n_cat_features = 2
d_token = 7
x_num = torch.randn(n_objects, n_num_features)
x_cat = torch.tensor([[0, 1], [1, 0], [0, 2], [1, 1]])
# [2, 3] reflects cardinalities fr
tokenizer = FeatureTokenizer(n_num_features, [2, 3], d_token)
tokens = tokenizer(x_num, x_cat)
assert tokens.shape == (n_objects, n_num_features + n_cat_features, d_token)
References:
* [gorishniy2021revisiting] Yury Gorishniy, Ivan Rubachev, Valentin Khrulkov, Artem Babenko "Revisiting Deep Learning Models for Tabular Data", 2021
"""
def __init__(
self,
n_num_features: int,
cat_cardinalities: List[int],
d_token: int,
) -> None:
"""
Args:
n_num_features: the number of continuous features. Pass :code:`0` if there
are no numerical features.
cat_cardinalities: the number of unique values for each feature. See
`CategoricalFeatureTokenizer` for details. Pass an empty list if there
are no categorical features.
d_token: the size of one token.
"""
super().__init__()
assert n_num_features >= 0, 'n_num_features must be non-negative'
assert (
n_num_features or cat_cardinalities
), 'at least one of n_num_features or cat_cardinalities must be positive/non-empty'
self.initialization = 'uniform'
self.num_tokenizer = (
NumericalFeatureTokenizer(
n_features=n_num_features,
d_token=d_token,
bias=True,
initialization=self.initialization,
)
if n_num_features
else None
)
self.cat_tokenizer = (
CategoricalFeatureTokenizer(
cat_cardinalities, d_token, True, self.initialization
)
if cat_cardinalities
else None
)
@property
def n_tokens(self) -> int:
"""The number of tokens."""
return sum(
x.n_tokens
for x in [self.num_tokenizer, self.cat_tokenizer]
if x is not None
)
@property
def d_token(self) -> int:
"""The size of one token."""
return (
self.cat_tokenizer.d_token # type: ignore
if self.num_tokenizer is None
else self.num_tokenizer.d_token
)
def forward(self, x_num: Optional[Tensor], x_cat: Optional[Tensor]) -> Tensor:
"""Perform the forward pass.
Args:
x_num: continuous features. Must be presented if :code:`n_num_features > 0`
was passed to the constructor.
x_cat: categorical features (see `CategoricalFeatureTokenizer.forward` for
details). Must be presented if non-empty :code:`cat_cardinalities` was
passed to the constructor.
Returns:
tokens
Raises:
AssertionError: if the described requirements for the inputs are not met.
"""
assert (
x_num is not None or x_cat is not None
), 'At least one of x_num and x_cat must be presented'
assert _all_or_none(
[self.num_tokenizer, x_num]
), 'If self.num_tokenizer is (not) None, then x_num must (not) be None'
assert _all_or_none(
[self.cat_tokenizer, x_cat]
), 'If self.cat_tokenizer is (not) None, then x_cat must (not) be None'
x = []
if self.num_tokenizer is not None:
x.append(self.num_tokenizer(x_num))
if self.cat_tokenizer is not None:
x.append(self.cat_tokenizer(x_cat))
return x[0] if len(x) == 1 else torch.cat(x, dim=1)
class CLSToken(nn.Module):
"""[CLS]-token for BERT-like inference.
To learn about the [CLS]-based inference, see [devlin2018bert].
When used as a module, the [CLS]-token is appended **to the end** of each item in
the batch.
Examples:
.. testcode::
batch_size = 2
n_tokens = 3
d_token = 4
cls_token = CLSToken(d_token, 'uniform')
x = torch.randn(batch_size, n_tokens, d_token)
x = cls_token(x)
assert x.shape == (batch_size, n_tokens + 1, d_token)
assert (x[:, -1, :] == cls_token.expand(len(x))).all()
References:
* [devlin2018bert] Jacob Devlin, Ming-Wei Chang, Kenton Lee, Kristina Toutanova "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding" 2018
"""
def __init__(self, d_token: int, initialization: str) -> None:
"""
Args:
d_token: the size of token
initialization: initialization policy for parameters. Must be one of
:code:`['uniform', 'normal']`. Let :code:`s = d ** -0.5`. Then, the
corresponding distributions are :code:`Uniform(-s, s)` and :code:`Normal(0, s)`. In
the paper [gorishniy2021revisiting], the 'uniform' initialization was
used.
References:
* [gorishniy2021revisiting] Yury Gorishniy, Ivan Rubachev, Valentin Khrulkov, Artem Babenko "Revisiting Deep Learning Models for Tabular Data", 2021
"""
super().__init__()
initialization_ = _TokenInitialization.from_str(initialization)
self.weight = nn.Parameter(Tensor(d_token))
initialization_.apply(self.weight, d_token)
def expand(self, *leading_dimensions: int) -> Tensor:
"""Expand (repeat) the underlying [CLS]-token to a tensor with the given leading dimensions.
A possible use case is building a batch of [CLS]-tokens. See `CLSToken` for
examples of usage.
Note:
Under the hood, the `torch.Tensor.expand` method is applied to the
underlying :code:`weight` parameter, so gradients will be propagated as
expected.
Args:
leading_dimensions: the additional new dimensions
Returns:
tensor of the shape :code:`(*leading_dimensions, len(self.weight))`
"""
if not leading_dimensions:
return self.weight
new_dims = (1,) * (len(leading_dimensions) - 1)
return self.weight.view(*new_dims, -1).expand(*leading_dimensions, -1)
def forward(self, x: Tensor) -> Tensor:
"""Append self **to the end** of each item in the batch (see `CLSToken`)."""
return torch.cat([x, self.expand(len(x), 1)], dim=1)
def _make_nn_module(module_type: ModuleType, *args) -> nn.Module:
if isinstance(module_type, str):
if module_type == 'ReGLU':
return ReGLU()
elif module_type == 'GEGLU':
return GEGLU()
else:
try:
cls = getattr(nn, module_type)
except AttributeError as err:
raise ValueError(
f'Failed to construct the module {module_type} with the arguments {args}'
) from err
return cls(*args)
else:
return module_type(*args)
class MLP(nn.Module):
"""The MLP model used in [gorishniy2021revisiting].
The following scheme describes the architecture:
.. code-block:: text
MLP: (in) -> Block -> ... -> Block -> Linear -> (out)
Block: (in) -> Linear -> Activation -> Dropout -> (out)
Examples:
.. testcode::
x = torch.randn(4, 2)
module = MLP.make_baseline(x.shape[1], [3, 5], 0.1, 1)
assert module(x).shape == (len(x), 1)
References:
* [gorishniy2021revisiting] Yury Gorishniy, Ivan Rubachev, Valentin Khrulkov, Artem Babenko, "Revisiting Deep Learning Models for Tabular Data", 2021
"""
class Block(nn.Module):
"""The main building block of `MLP`."""
def __init__(
self,
*,
d_in: int,
d_out: int,
bias: bool,
activation: ModuleType,
dropout: float,
) -> None:
super().__init__()
self.linear = nn.Linear(d_in, d_out, bias)
self.activation = _make_nn_module(activation)
self.dropout = nn.Dropout(dropout)
def forward(self, x: Tensor) -> Tensor:
return self.dropout(self.activation(self.linear(x)))
def __init__(
self,
*,
d_in: int,
d_layers: List[int],
dropouts: Union[float, List[float]],
activation: Union[str, Callable[[], nn.Module]],
d_out: int,
) -> None:
"""
Note:
`make_baseline` is the recommended constructor.
"""
super().__init__()
if isinstance(dropouts, float):
dropouts = [dropouts] * len(d_layers)
assert len(d_layers) == len(dropouts)
self.blocks = nn.Sequential(
*[
MLP.Block(
d_in=d_layers[i - 1] if i else d_in,
d_out=d,
bias=True,
activation=activation,
dropout=dropout,
)
for i, (d, dropout) in enumerate(zip(d_layers, dropouts))
]
)
self.head = nn.Linear(d_layers[-1] if d_layers else d_in, d_out)
@classmethod
def make_baseline(
cls: Type['MLP'],
d_in: int,
d_layers: List[int],
dropout: float,
d_out: int,
) -> 'MLP':
"""Create a "baseline" `MLP`.
This variation of MLP was used in [gorishniy2021revisiting]. Features:
* :code:`Activation` = :code:`ReLU`
* all linear layers except for the first one and the last one are of the same dimension
* the dropout rate is the same for all dropout layers
Args:
d_in: the input size
d_layers: the dimensions of the linear layers. If there are more than two
layers, then all of them except for the first and the last ones must
have the same dimension. Valid examples: :code:`[]`, :code:`[8]`,
:code:`[8, 16]`, :code:`[2, 2, 2, 2]`, :code:`[1, 2, 2, 4]`. Invalid
example: :code:`[1, 2, 3, 4]`.
dropout: the dropout rate for all hidden layers
d_out: the output size
Returns:
MLP
References:
* [gorishniy2021revisiting] Yury Gorishniy, Ivan Rubachev, Valentin Khrulkov, Artem Babenko, "Revisiting Deep Learning Models for Tabular Data", 2021
"""
assert isinstance(dropout, float), 'In this constructor, dropout must be float'
if len(d_layers) > 2:
assert len(set(d_layers[1:-1])) == 1, (
'In this constructor, if d_layers contains more than two elements, then'
' all elements except for the first and the last ones must be equal.'
)
return MLP(
d_in=d_in,
d_layers=d_layers, # type: ignore
dropouts=dropout,
activation='ReLU',
d_out=d_out,
)
def forward(self, x: Tensor) -> Tensor:
x = self.blocks(x)
x = self.head(x)
return x
class ResNet(nn.Module):
"""The ResNet model used in [gorishniy2021revisiting].
The following scheme describes the architecture:
.. code-block:: text
ResNet: (in) -> Linear -> Block -> ... -> Block -> Head -> (out)
|-> Norm -> Linear -> Activation -> Dropout -> Linear -> Dropout ->|
| |
Block: (in) ------------------------------------------------------------> Add -> (out)
Head: (in) -> Norm -> Activation -> Linear -> (out)
Examples:
.. testcode::
x = torch.randn(4, 2)
module = ResNet.make_baseline(
d_in=x.shape[1],
n_blocks=2,
d_main=3,
d_hidden=4,
dropout_first=0.25,
dropout_second=0.0,
d_out=1
)
assert module(x).shape == (len(x), 1)
References:
* [gorishniy2021revisiting] Yury Gorishniy, Ivan Rubachev, Valentin Khrulkov, Artem Babenko, "Revisiting Deep Learning Models for Tabular Data", 2021
"""
class Block(nn.Module):
"""The main building block of `ResNet`."""
def __init__(
self,
*,
d_main: int,
d_hidden: int,
bias_first: bool,
bias_second: bool,
dropout_first: float,
dropout_second: float,
normalization: ModuleType,
activation: ModuleType,
skip_connection: bool,
) -> None:
super().__init__()
self.normalization = _make_nn_module(normalization, d_main)
self.linear_first = nn.Linear(d_main, d_hidden, bias_first)
self.activation = _make_nn_module(activation)
self.dropout_first = nn.Dropout(dropout_first)
self.linear_second = nn.Linear(d_hidden, d_main, bias_second)
self.dropout_second = nn.Dropout(dropout_second)
self.skip_connection = skip_connection
def forward(self, x: Tensor) -> Tensor:
x_input = x
x = self.normalization(x)
x = self.linear_first(x)
x = self.activation(x)
x = self.dropout_first(x)
x = self.linear_second(x)
x = self.dropout_second(x)
if self.skip_connection:
x = x_input + x
else:
x = x_input * x
return x
class Head(nn.Module):
"""The final module of `ResNet`."""
def __init__(
self,
*,
d_in: int,
d_out: int,
bias: bool,
normalization: ModuleType,
activation: ModuleType,
) -> None:
super().__init__()
self.normalization = _make_nn_module(normalization, d_in)
self.activation = _make_nn_module(activation)
self.linear = nn.Linear(d_in, d_out, bias)
def forward(self, x: Tensor) -> Tensor:
if self.normalization is not None:
x = self.normalization(x)
x = self.activation(x)
x = self.linear(x)
return x
def __init__(
self,
*,
d_in: int,
n_blocks: int,
d_main: int,
d_hidden: int,
dropout_first: float,
dropout_second: float,
normalization: ModuleType,
activation: ModuleType,
d_out: int,
) -> None:
"""
Note:
`make_baseline` is the recommended constructor.
"""
super().__init__()
self.first_layer = nn.Linear(d_in, d_main)
if d_main is None:
d_main = d_in
self.blocks = nn.Sequential(
*[
ResNet.Block(
d_main=d_main,
d_hidden=d_hidden,
bias_first=True,
bias_second=True,
dropout_first=dropout_first,
dropout_second=dropout_second,
normalization=normalization,
activation=activation,
skip_connection=True,
)
for _ in range(n_blocks)
]
)
self.blocks2 = nn.Sequential(
*[
ResNet.Block(
d_main=d_main,
d_hidden=d_hidden,
bias_first=True,
bias_second=True,
dropout_first=dropout_first,
dropout_second=dropout_second,
normalization=normalization,
activation=activation,
skip_connection=False,
)
for _ in range(n_blocks)
]
)
self.head = ResNet.Head(
d_in=d_main*2,
d_out=d_out,
bias=True,
normalization=normalization,
activation=activation,
)
@classmethod
def make_baseline(
cls: Type['ResNet'],
*,
d_in: int,
n_blocks: int,
d_main: int,
d_hidden: int,
dropout_first: float,
dropout_second: float,
d_out: int,
) -> 'ResNet':
"""Create a "baseline" `ResNet`.
This variation of ResNet was used in [gorishniy2021revisiting]. Features:
* :code:`Activation` = :code:`ReLU`
* :code:`Norm` = :code:`BatchNorm1d`
Args:
d_in: the input size
n_blocks: the number of Blocks
d_main: the input size (or, equivalently, the output size) of each Block
d_hidden: the output size of the first linear layer in each Block
dropout_first: the dropout rate of the first dropout layer in each Block.
dropout_second: the dropout rate of the second dropout layer in each Block.
References:
* [gorishniy2021revisiting] Yury Gorishniy, Ivan Rubachev, Valentin Khrulkov, Artem Babenko, "Revisiting Deep Learning Models for Tabular Data", 2021
"""
return cls(
d_in=d_in,
n_blocks=n_blocks,
d_main=d_main,
d_hidden=d_hidden,
dropout_first=dropout_first,
dropout_second=dropout_second,
normalization='BatchNorm1d',
activation='ReLU',
d_out=d_out,
)
def forward(self, x: Tensor) -> Tensor:
x = self.first_layer(x)
x1 = self.blocks(x)
x2 = self.blocks2(x)
x = torch.cat([x1, x2], dim=-1)
x = self.head(x)
return x
class MultiheadAttention(nn.Module):
"""Multihead Attention (self-/cross-) with optional 'linear' attention.
To learn more about Multihead Attention, see [devlin2018bert]. See the implementation
of `Transformer` and the examples below to learn how to use the compression technique
from [wang2020linformer] to speed up the module when the number of tokens is large.
Examples:
.. testcode::
n_objects, n_tokens, d_token = 2, 3, 12
n_heads = 6
a = torch.randn(n_objects, n_tokens, d_token)
b = torch.randn(n_objects, n_tokens * 2, d_token)
module = MultiheadAttention(
d_token=d_token, n_heads=n_heads, dropout=0.2, bias=True, initialization='kaiming'
)
# self-attention
x, attention_stats = module(a, a, None, None)
assert x.shape == a.shape
assert attention_stats['attention_probs'].shape == (n_objects * n_heads, n_tokens, n_tokens)
assert attention_stats['attention_logits'].shape == (n_objects * n_heads, n_tokens, n_tokens)
# cross-attention
assert module(a, b, None, None)
# Linformer self-attention with the 'headwise' sharing policy
k_compression = torch.nn.Linear(n_tokens, n_tokens // 4)
v_compression = torch.nn.Linear(n_tokens, n_tokens // 4)
assert module(a, a, k_compression, v_compression)
# Linformer self-attention with the 'key-value' sharing policy
kv_compression = torch.nn.Linear(n_tokens, n_tokens // 4)
assert module(a, a, kv_compression, kv_compression)
References:
* [devlin2018bert] Jacob Devlin, Ming-Wei Chang, Kenton Lee, Kristina Toutanova "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding" 2018
* [wang2020linformer] Sinong Wang, Belinda Z. Li, Madian Khabsa, Han Fang, Hao Ma "Linformer: Self-Attention with Linear Complexity", 2020
"""
def __init__(
self,
*,
d_token: int,
n_heads: int,
dropout: float,
bias: bool,
initialization: str,
) -> None:
"""
Args:
d_token: the token size. Must be a multiple of :code:`n_heads`.
n_heads: the number of heads. If greater than 1, then the module will have
an addition output layer (so called "mixing" layer).
dropout: dropout rate for the attention map. The dropout is applied to
*probabilities* and do not affect logits.
bias: if `True`, then input (and output, if presented) layers also have bias.
`True` is a reasonable default choice.
initialization: initialization for input projection layers. Must be one of
:code:`['kaiming', 'xavier']`. `kaiming` is a reasonable default choice.
Raises:
AssertionError: if requirements for the inputs are not met.
"""
super().__init__()
if n_heads > 1:
assert d_token % n_heads == 0, 'd_token must be a multiple of n_heads'
assert initialization in ['kaiming', 'xavier']
self.W_q = nn.Linear(d_token, d_token, bias)
self.W_k = nn.Linear(d_token, d_token, bias)
self.W_v = nn.Linear(d_token, d_token, bias)
self.W_out = nn.Linear(d_token, d_token, bias) if n_heads > 1 else None
self.n_heads = n_heads
self.dropout = nn.Dropout(dropout) if dropout else None
for m in [self.W_q, self.W_k, self.W_v]:
# the "xavier" branch tries to follow torch.nn.MultiheadAttention;
# the second condition checks if W_v plays the role of W_out; the latter one
# is initialized with Kaiming in torch
if initialization == 'xavier' and (
m is not self.W_v or self.W_out is not None
):
# gain is needed since W_qkv is represented with 3 separate layers (it
# implies different fan_out)
nn.init.xavier_uniform_(m.weight, gain=1 / math.sqrt(2))
if m.bias is not None:
nn.init.zeros_(m.bias)
if self.W_out is not None:
nn.init.zeros_(self.W_out.bias)
def _reshape(self, x: Tensor) -> Tensor:
batch_size, n_tokens, d = x.shape
d_head = d // self.n_heads
return (
x.reshape(batch_size, n_tokens, self.n_heads, d_head)
.transpose(1, 2)
.reshape(batch_size * self.n_heads, n_tokens, d_head)
)
def forward(
self,
x_q: Tensor,
x_kv: Tensor,
key_compression: Optional[nn.Linear],
value_compression: Optional[nn.Linear],
) -> Tuple[Tensor, Dict[str, Tensor]]:
"""Perform the forward pass.
Args:
x_q: query tokens
x_kv: key-value tokens
key_compression: Linformer-style compression for keys
value_compression: Linformer-style compression for values
Returns:
(tokens, attention_stats)
"""
assert _all_or_none(
[key_compression, value_compression]
), 'If key_compression is (not) None, then value_compression must (not) be None'
q, k, v = self.W_q(x_q), self.W_k(x_kv), self.W_v(x_kv)
for tensor in [q, k, v]:
assert tensor.shape[-1] % self.n_heads == 0, _INTERNAL_ERROR_MESSAGE
if key_compression is not None:
k = key_compression(k.transpose(1, 2)).transpose(1, 2)
v = value_compression(v.transpose(1, 2)).transpose(1, 2) # type: ignore
batch_size = len(q)
d_head_key = k.shape[-1] // self.n_heads
d_head_value = v.shape[-1] // self.n_heads
n_q_tokens = q.shape[1]
q = self._reshape(q)
k = self._reshape(k)
attention_logits = q @ k.transpose(1, 2) / math.sqrt(d_head_key)
attention_probs = F.softmax(attention_logits, dim=-1)
if self.dropout is not None:
attention_probs = self.dropout(attention_probs)
x = attention_probs @ self._reshape(v)
x = (
x.reshape(batch_size, self.n_heads, n_q_tokens, d_head_value)
.transpose(1, 2)
.reshape(batch_size, n_q_tokens, self.n_heads * d_head_value)
)
if self.W_out is not None:
x = self.W_out(x)
return x, {
'attention_logits': attention_logits,
'attention_probs': attention_probs,
}
class Transformer(nn.Module):
"""Transformer with extra features.
This module is the backbone of `FTTransformer`."""
WARNINGS = {'first_prenormalization': True, 'prenormalization': True}
class FFN(nn.Module):
"""The Feed-Forward Network module used in every `Transformer` block."""
def __init__(
self,
*,
d_token: int,
d_hidden: int,
bias_first: bool,
bias_second: bool,
dropout: float,
activation: ModuleType,
):
super().__init__()
self.linear_first = nn.Linear(
d_token,
d_hidden * (2 if _is_glu_activation(activation) else 1),
bias_first,
)
self.activation = _make_nn_module(activation)
self.dropout = nn.Dropout(dropout)
self.linear_second = nn.Linear(d_hidden, d_token, bias_second)
def forward(self, x: Tensor) -> Tensor:
x = self.linear_first(x)
x = self.activation(x)
x = self.dropout(x)
x = self.linear_second(x)
return x
class Head(nn.Module):
"""The final module of the `Transformer` that performs BERT-like inference."""
def __init__(
self,
*,
d_in: int,
bias: bool,
activation: ModuleType,
normalization: ModuleType,
d_out: int,
):
super().__init__()
self.normalization = _make_nn_module(normalization, d_in)
self.activation = _make_nn_module(activation)
self.linear = nn.Linear(d_in, d_out, bias)
def forward(self, x: Tensor) -> Tensor:
x = x[:, -1]
x = self.normalization(x)
x = self.activation(x)
x = self.linear(x)
return x
def __init__(
self,
*,
d_token: int,
n_blocks: int,
attention_n_heads: int,
attention_dropout: float,
attention_initialization: str,
attention_normalization: str,
ffn_d_hidden: int,
ffn_dropout: float,
ffn_activation: str,
ffn_normalization: str,
residual_dropout: float,
prenormalization: bool,
first_prenormalization: bool,
last_layer_query_idx: Union[None, List[int], slice],
n_tokens: Optional[int],
kv_compression_ratio: Optional[float],
kv_compression_sharing: Optional[str],
head_activation: ModuleType,
head_normalization: ModuleType,
d_out: int,
) -> None:
super().__init__()
if isinstance(last_layer_query_idx, int):
raise ValueError(
'last_layer_query_idx must be None, list[int] or slice. '
f'Do you mean last_layer_query_idx=[{last_layer_query_idx}] ?'
)
if not prenormalization:
assert (
not first_prenormalization
), 'If `prenormalization` is False, then `first_prenormalization` must be False'
assert _all_or_none([n_tokens, kv_compression_ratio, kv_compression_sharing]), (
'If any of the following arguments is (not) None, then all of them must (not) be None: '
'n_tokens, kv_compression_ratio, kv_compression_sharing'
)
assert kv_compression_sharing in [None, 'headwise', 'key-value', 'layerwise']
if not prenormalization:
if self.WARNINGS['prenormalization']:
warnings.warn(
'prenormalization is set to False. Are you sure about this? '
'The training can become less stable. '
'You can turn off this warning by tweaking the '
'rtdl.Transformer.WARNINGS dictionary.',
UserWarning,
)
assert (
not first_prenormalization
), 'If prenormalization is False, then first_prenormalization is ignored and must be set to False'
if (
prenormalization
and first_prenormalization
and self.WARNINGS['first_prenormalization']
):
warnings.warn(
'first_prenormalization is set to True. Are you sure about this? '
'For example, the vanilla FTTransformer with '
'first_prenormalization=True performs SIGNIFICANTLY worse. '
'You can turn off this warning by tweaking the '
'rtdl.Transformer.WARNINGS dictionary.',
UserWarning,
)
time.sleep(3)
def make_kv_compression():
assert (
n_tokens and kv_compression_ratio
), _INTERNAL_ERROR_MESSAGE # for mypy
# https://github.com/pytorch/fairseq/blob/1bba712622b8ae4efb3eb793a8a40da386fe11d0/examples/linformer/linformer_src/modules/multihead_linear_attention.py#L83
return nn.Linear(n_tokens, int(n_tokens * kv_compression_ratio), bias=False)
self.shared_kv_compression = (
make_kv_compression()
if kv_compression_ratio and kv_compression_sharing == 'layerwise'
else None
)
self.prenormalization = prenormalization
self.last_layer_query_idx = last_layer_query_idx
self.blocks = nn.ModuleList([])
for layer_idx in range(n_blocks):
layer = nn.ModuleDict(
{
'attention': MultiheadAttention(
d_token=d_token,
n_heads=attention_n_heads,
dropout=attention_dropout,
bias=True,
initialization=attention_initialization,
),
'ffn': Transformer.FFN(
d_token=d_token,
d_hidden=ffn_d_hidden,
bias_first=True,
bias_second=True,
dropout=ffn_dropout,
activation=ffn_activation,
),
'attention_residual_dropout': nn.Dropout(residual_dropout),
'ffn_residual_dropout': nn.Dropout(residual_dropout),
'output': nn.Identity(), # for hooks-based introspection
}
)
if layer_idx or not prenormalization or first_prenormalization:
layer['attention_normalization'] = _make_nn_module(
attention_normalization, d_token
)
layer['ffn_normalization'] = _make_nn_module(ffn_normalization, d_token)
if kv_compression_ratio and self.shared_kv_compression is None:
layer['key_compression'] = make_kv_compression()
if kv_compression_sharing == 'headwise':
layer['value_compression'] = make_kv_compression()
else:
assert (
kv_compression_sharing == 'key-value'
), _INTERNAL_ERROR_MESSAGE
self.blocks.append(layer)
self.head = Transformer.Head(
d_in=d_token,
d_out=d_out,
bias=True,
activation=head_activation, # type: ignore
normalization=head_normalization if prenormalization else 'Identity',
)
def _get_kv_compressions(self, layer):
return (
(self.shared_kv_compression, self.shared_kv_compression)
if self.shared_kv_compression is not None
else (layer['key_compression'], layer['value_compression'])
if 'key_compression' in layer and 'value_compression' in layer
else (layer['key_compression'], layer['key_compression'])
if 'key_compression' in layer
else (None, None)
)
def _start_residual(self, layer, stage, x):
assert stage in ['attention', 'ffn'], _INTERNAL_ERROR_MESSAGE
x_residual = x
if self.prenormalization:
norm_key = f'{stage}_normalization'
if norm_key in layer:
x_residual = layer[norm_key](x_residual)
return x_residual
def _end_residual(self, layer, stage, x, x_residual):
assert stage in ['attention', 'ffn'], _INTERNAL_ERROR_MESSAGE
x_residual = layer[f'{stage}_residual_dropout'](x_residual)
x = x + x_residual
if not self.prenormalization:
x = layer[f'{stage}_normalization'](x)
return x
def forward(self, x: Tensor) -> Tensor:
assert (
x.ndim == 3
), 'The input must have 3 dimensions: (n_objects, n_tokens, d_token)'
for layer_idx, layer in enumerate(self.blocks):
layer = cast(nn.ModuleDict, layer)
query_idx = (
self.last_layer_query_idx if layer_idx + 1 == len(self.blocks) else None
)
x_residual = self._start_residual(layer, 'attention', x)
x_residual, _ = layer['attention'](
x_residual if query_idx is None else x_residual[:, query_idx],
x_residual,
*self._get_kv_compressions(layer),
)
if query_idx is not None:
x = x[:, query_idx]
x = self._end_residual(layer, 'attention', x, x_residual)
x_residual = self._start_residual(layer, 'ffn', x)
x_residual = layer['ffn'](x_residual)
x = self._end_residual(layer, 'ffn', x, x_residual)
x = layer['output'](x)
x = self.head(x)
return x
class FTTransformer(nn.Module):
"""The FT-Transformer model proposed in [gorishniy2021revisiting].
Transforms features to tokens with `FeatureTokenizer` and applies `Transformer` [vaswani2017attention]
to the tokens. The following illustration provides a high-level overview of the
architecture:
.. image:: ../images/ft_transformer.png
:scale: 25%
:alt: FT-Transformer
The following illustration demonstrates one Transformer block for :code:`prenormalization=True`:
.. image:: ../images/transformer_block.png
:scale: 25%
:alt: PreNorm Transformer block
Examples:
.. testcode::
x_num = torch.randn(4, 3)
x_cat = torch.tensor([[0, 1], [1, 0], [0, 2], [1, 1]])
module = FTTransformer.make_baseline(
n_num_features=3,
cat_cardinalities=[2, 3],
d_token=8,
n_blocks=2,
attention_dropout=0.2,
ffn_d_hidden=6,
ffn_dropout=0.2,
residual_dropout=0.0,
d_out=1,
)
x = module(x_num, x_cat)
assert x.shape == (4, 1)
module = FTTransformer.make_default(
n_num_features=3,
cat_cardinalities=[2, 3],
d_out=1,
)
x = module(x_num, x_cat)
assert x.shape == (4, 1)
To learn more about the baseline and default parameters:
.. testcode::
baseline_parameters = FTTransformer.get_baseline_transformer_subconfig()
default_parameters = FTTransformer.get_default_transformer_config()
References:
* [gorishniy2021revisiting] Yury Gorishniy, Ivan Rubachev, Valentin Khrulkov, Artem Babenko, "Revisiting Deep Learning Models for Tabular Data", 2021
* [vaswani2017attention] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, Illia Polosukhin, "Attention Is All You Need", 2017
"""
def __init__(
self, feature_tokenizer: FeatureTokenizer, transformer: Transformer
) -> None:
"""
Note:
`make_baseline` and `make_default` are the recommended constructors.
"""
super().__init__()
if transformer.prenormalization:
assert 'attention_normalization' not in transformer.blocks[0], (
'In the prenormalization setting, FT-Transformer does not '
'allow using the first normalization layer '
'in the first transformer block'
)
self.feature_tokenizer = feature_tokenizer
self.cls_token = CLSToken(
feature_tokenizer.d_token, feature_tokenizer.initialization
)
self.transformer = transformer
@classmethod
def get_baseline_transformer_subconfig(
cls: Type['FTTransformer'],
) -> Dict[str, Any]:
"""Get the baseline subset of parameters for the backbone."""
return {
'attention_n_heads': 8,
'attention_initialization': 'kaiming',
'ffn_activation': 'ReGLU',
'attention_normalization': 'LayerNorm',
'ffn_normalization': 'LayerNorm',
'prenormalization': True,
'first_prenormalization': False,
'last_layer_query_idx': None,
'n_tokens': None,
'kv_compression_ratio': None,
'kv_compression_sharing': None,
'head_activation': 'ReLU',
'head_normalization': 'LayerNorm',
}
@classmethod
def get_default_transformer_config(
cls: Type['FTTransformer'], *, n_blocks: int = 3
) -> Dict[str, Any]:
"""Get the default parameters for the backbone.
Note:
The configurations are different for different values of:code:`n_blocks`.
"""
assert 1 <= n_blocks <= 6
grid = {
'd_token': [96, 128, 192, 256, 320, 384],
'attention_dropout': [0.1, 0.15, 0.2, 0.25, 0.3, 0.35],
'ffn_dropout': [0.0, 0.05, 0.1, 0.15, 0.2, 0.25],
}
arch_subconfig = {k: v[n_blocks - 1] for k, v in grid.items()} # type: ignore
baseline_subconfig = cls.get_baseline_transformer_subconfig()
# (4 / 3) for ReGLU/GEGLU activations results in almost the same parameter count
# as (2.0) for element-wise activations (e.g. ReLU or GELU; see the "else" branch)
ffn_d_hidden_factor = (
(4 / 3) if _is_glu_activation(baseline_subconfig['ffn_activation']) else 2.0
)
return {
'n_blocks': n_blocks,
'residual_dropout': 0.0,
'ffn_d_hidden': int(arch_subconfig['d_token'] * ffn_d_hidden_factor),
**arch_subconfig,
**baseline_subconfig,
}
@classmethod
def _make(
cls,
n_num_features,
cat_cardinalities,
transformer_config,
):
feature_tokenizer = FeatureTokenizer(
n_num_features=n_num_features,
cat_cardinalities=cat_cardinalities,
d_token=transformer_config['d_token'],
)
if transformer_config['d_out'] is None:
transformer_config['head_activation'] = None
if transformer_config['kv_compression_ratio'] is not None:
transformer_config['n_tokens'] = feature_tokenizer.n_tokens + 1
return FTTransformer(
feature_tokenizer,
Transformer(**transformer_config),
)
@classmethod
def make_baseline(
cls: Type['FTTransformer'],
*,
n_num_features: int,
cat_cardinalities: Optional[List[int]],
d_token: int,
n_blocks: int,
attention_dropout: float,
ffn_d_hidden: int,
ffn_dropout: float,
residual_dropout: float,
last_layer_query_idx: Union[None, List[int], slice] = None,
kv_compression_ratio: Optional[float] = None,
kv_compression_sharing: Optional[str] = None,
d_out: int,
) -> 'FTTransformer':
"""Create a "baseline" `FTTransformer`.
This variation of FT-Transformer was used in [gorishniy2021revisiting]. See
`get_baseline_transformer_subconfig` to learn the values of other parameters.
See `FTTransformer` for usage examples.
Tip:
`get_default_transformer_config` can serve as a starting point for choosing
hyperparameter values.
Args:
n_num_features: the number of continuous features
cat_cardinalities: the cardinalities of categorical features (see
`CategoricalFeatureTokenizer` to learn more about cardinalities)
d_token: the token size for each feature. Must be a multiple of :code:`n_heads=8`.
n_blocks: the number of Transformer blocks
attention_dropout: the dropout for attention blocks (see `MultiheadAttention`).
Usually, positive values work better (even when the number of features is low).
ffn_d_hidden: the *input* size for the *second* linear layer in `Transformer.FFN`.
Note that it can be different from the output size of the first linear
layer, since activations such as ReGLU or GEGLU change the size of input.
For example, if :code:`ffn_d_hidden=10` and the activation is ReGLU (which
is always true for the baseline and default configurations), then the
output size of the first linear layer will be set to :code:`20`.
ffn_dropout: the dropout rate after the first linear layer in `Transformer.FFN`.
residual_dropout: the dropout rate for the output of each residual branch of
all Transformer blocks.
last_layer_query_idx: indices of tokens that should be processed by the last
Transformer block. Note that for most cases there is no need to apply
the last Transformer block to anything except for the [CLS]-token. Hence,
runtime and memory can be saved by setting :code:`last_layer_query_idx=[-1]`,
since the :code:`-1` is the position of [CLS]-token in FT-Transformer.
Note that this will not affect the result in any way.
kv_compression_ratio: apply the technique from [wang2020linformer] to speed
up attention modules when the number of features is large. Can actually
slow things down if the number of features is too low. Note that this
option can affect task metrics in unpredictable way. Overall, use this
option with caution. See `MultiheadAttention` for some examples and the
implementation of `Transformer` to see how this option is used.
kv_compression_sharing: weight sharing policy for :code:`kv_compression_ratio`.
Must be one of :code:`[None, 'headwise', 'key-value', 'layerwise']`.
See [wang2020linformer] to learn more about sharing policies.
:code:`headwise` and :code:`key-value` are reasonable default choices. If
:code:`kv_compression_ratio` is `None`, then this parameter also must be
`None`. Otherwise, it must not be `None` (compression parameters must be
shared in some way).
d_out: the output size.
References:
* [gorishniy2021revisiting] Yury Gorishniy, Ivan Rubachev, Valentin Khrulkov, Artem Babenko, "Revisiting Deep Learning Models for Tabular Data", 2021
* [wang2020linformer] Sinong Wang, Belinda Z. Li, Madian Khabsa, Han Fang, Hao Ma "Linformer: Self-Attention with Linear Complexity", 2020
"""
transformer_config = cls.get_baseline_transformer_subconfig()
for arg_name in [
'n_blocks',
'd_token',
'attention_dropout',
'ffn_d_hidden',
'ffn_dropout',
'residual_dropout',
'last_layer_query_idx',
'kv_compression_ratio',
'kv_compression_sharing',
'd_out',
]:
transformer_config[arg_name] = locals()[arg_name]
return cls._make(n_num_features, cat_cardinalities, transformer_config)
@classmethod
def make_default(
cls: Type['FTTransformer'],
*,
n_num_features: int,
cat_cardinalities: Optional[List[int]],
n_blocks: int = 3,
last_layer_query_idx: Union[None, List[int], slice] = None,
kv_compression_ratio: Optional[float] = None,
kv_compression_sharing: Optional[str] = None,
d_out: int,
) -> 'FTTransformer':
"""Create the default `FTTransformer`.
With :code:`n_blocks=3` (default) it is the FT-Transformer variation that is
referred to as "default FT-Transformer" in [gorishniy2021revisiting]. See
`FTTransformer` for usage examples. See `FTTransformer.make_baseline` for
parameter descriptions.
Note:
The second component of the default FT-Transformer is the default optimizer,
which can be created with the `make_default_optimizer` method.
Note:
According to [gorishniy2021revisiting], the default FT-Transformer is
effective in the ensembling mode (i.e. when predictions of several default
FT-Transformers are averaged). For a single FT-Transformer, it is still
possible to achieve better results by tuning hyperparameter for the
`make_baseline` constructor.
References:
* [gorishniy2021revisiting] Yury Gorishniy, Ivan Rubachev, Valentin Khrulkov, Artem Babenko, "Revisiting Deep Learning Models for Tabular Data", 2021
"""
transformer_config = cls.get_default_transformer_config(n_blocks=n_blocks)
for arg_name in [
'last_layer_query_idx',
'kv_compression_ratio',
'kv_compression_sharing',
'd_out',
]:
transformer_config[arg_name] = locals()[arg_name]
return cls._make(n_num_features, cat_cardinalities, transformer_config)
def optimization_param_groups(self) -> List[Dict[str, Any]]:
"""The replacement for :code:`.parameters()` when creating optimizers.
Example::
optimizer = AdamW(
model.optimization_param_groups(), lr=1e-4, weight_decay=1e-5
)
"""
no_wd_names = ['feature_tokenizer', 'normalization', '.bias']
assert isinstance(
getattr(self, no_wd_names[0], None), FeatureTokenizer
), _INTERNAL_ERROR_MESSAGE
assert (
sum(1 for name, _ in self.named_modules() if no_wd_names[1] in name)
== len(self.transformer.blocks) * 2
- int('attention_normalization' not in self.transformer.blocks[0]) # type: ignore
+ 1
), _INTERNAL_ERROR_MESSAGE
def needs_wd(name):
return all(x not in name for x in no_wd_names)
return [
{'params': [v for k, v in self.named_parameters() if needs_wd(k)]},
{
'params': [v for k, v in self.named_parameters() if not needs_wd(k)],
'weight_decay': 0.0,
},
]
def make_default_optimizer(self) -> torch.optim.AdamW:
"""Make the optimizer for the default FT-Transformer."""
return torch.optim.AdamW(
self.optimization_param_groups(),
lr=1e-4,
weight_decay=1e-5,
)
def forward(self, x_num: Optional[Tensor], x_cat: Optional[Tensor]) -> Tensor:
x = self.feature_tokenizer(x_num, x_cat)
x = self.cls_token(x)
x = self.transformer(x)
return x
|
python
|
#coding: utf8
from rust.core import db as models
class Permission(models.Model):
"""
资源权限
"""
resource = models.CharField(default='', max_length=128) # 资源名
method = models.CharField(default='GET', max_length=32) # 方法名
created_at = models.DateTimeField(auto_now_add=True) # 更新时间
class Meta(object):
table_name = 'rust_permission'
class PermissionGroup(models.Model):
"""
权限组
"""
name = models.CharField(default='', max_length=512, unique=True) # 组名
desc = models.CharField(default='', max_length=1024) #描述
class Meta(object):
table_name = 'rust_permission_group'
class PermissionGroupHasPermission(models.Model):
"""
权限组拥有的权限
"""
group_id = models.IntegerField(default=0)
permission_id = models.IntegerField(default=0)
class Meta(object):
table_name = 'rust_permission_group_has_permission'
class PermissionGroupHasUser(models.Model):
"""
权限组中的用户
"""
group_id = models.IntegerField(default=0)
user_id = models.IntegerField(default=0)
updated_at = models.DateTimeField(null=False)
class Meta(object):
table_name = 'rust_permission_group_has_user'
class UserLimitedPermission(models.Model):
"""
用户禁止访问的资源及方法
"""
user_id = models.IntegerField(default=0)
permission_id = models.IntegerField(default=0)
updated_at = models.DateTimeField() # 更新时间
class Meta(object):
table_name = 'rust_user_limited_permission'
|
python
|
from phoopy.kernel import Bundle
from os import path
class AppBundle(Bundle):
def service_path(self):
return path.join(self.get_bundle_dir(), 'config', 'services.yml') # pragma: no cover
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Collection of tests for :mod:`orion.core.utils.working_dir`."""
import os
import shutil
from pathlib import Path
import pytest
from orion.core.utils.working_dir import SetupWorkingDir
class ExperimentStub:
def __init__(self, working_dir=None):
self.name = "exp-name"
self.version = 1
self.working_dir = working_dir
def test_exp_with_new_working_dir(tmp_path):
"""Check if a permanent directory is created."""
tmp_path = os.path.join(tmp_path, "orion")
experiment = ExperimentStub(tmp_path)
assert not os.path.exists(tmp_path)
with SetupWorkingDir(experiment):
assert os.path.exists(tmp_path)
assert experiment.working_dir == tmp_path
assert os.path.exists(tmp_path)
shutil.rmtree(tmp_path)
def test_exp_with_existing_working_dir(tmp_path):
"""Check if an existing permanent directory is not overwritten."""
tmp_path = os.path.join(tmp_path, "orion")
experiment = ExperimentStub(tmp_path)
os.makedirs(tmp_path)
assert os.path.exists(tmp_path)
file_path = os.path.join(tmp_path, "some_file")
Path(file_path).touch()
assert os.path.exists(file_path)
with SetupWorkingDir(experiment):
assert os.path.exists(tmp_path)
assert experiment.working_dir == tmp_path
assert os.path.exists(tmp_path)
assert os.path.exists(file_path)
shutil.rmtree(tmp_path)
def test_exp_with_no_working_dir():
"""Check if a permanent directory is deleted."""
experiment = ExperimentStub(None)
with SetupWorkingDir(experiment):
assert experiment.working_dir is not None
assert os.path.exists(experiment.working_dir)
tmp_path = experiment.working_dir
assert experiment.working_dir is None
assert not os.path.exists(tmp_path)
|
python
|
#!/usr/bin/python
import matplotlib.pyplot as plt
from matplotlib import *
import sys, getopt
import copy
import time
import datetime
import random
import sys
import os
import re
def get_data(file_list, type, start, finish, nice):
mapped_reqs, running_reqs, refused_reqs = [], [], []
mapped_requests_dict = dict()
mapped_requests_dict["request_list"] = []
mapped_requests_dict["incoming_time"] = []
mapped_requests_dict["name"] = ""
running_requests_dict = dict()
running_requests_dict["request_list"] = []
running_requests_dict["incoming_time"] = []
running_requests_dict["name"] = ""
refused_requests_dict = dict()
refused_requests_dict["request_list"] = []
refused_requests_dict["incoming_time"] = []
refused_requests_dict["name"] = ""
file_list_iter = 0
unfinished_test_count = False
for element in file_list:
start_time, data_point_count = 0, 0
name = ""
if isinstance(element, basestring) or len(element) == 1:
if not isinstance(element, basestring):
element = str(element[0])
for line in open(element):
if start_time == 0:
start_time = datetime.datetime.strptime(line[:22], '%Y-%m-%d %H:%M:%S,%f')
if "| Orchestrator:" in line:
name = line[line.find("| Orchestrator:")+15:]
if "| What to optimize:" in line:
name += "_" + line[line.find("| What to optimize:")+19:]
if "| When to optimize:" in line:
name += "_" + line[line.find("| When to optimize:")+19:]
if "| Optimize strategy:" in line:
name += "_" + line[line.find("| Optimize strategy:")+20:]
if "Mapped service_requests count:" in line:
data_point_count += 1
if start <= data_point_count <= finish:
if "Mapped service_requests count:" in line:
count = line[line.find("Mapped service_requests count:")+31:]
mapped_requests_dict["request_list"].append(int(count))
sec = ((datetime.datetime.strptime(line[:22], '%Y-%m-%d %H:%M:%S,%f')) - start_time).total_seconds()
mapped_requests_dict["incoming_time"].append(sec)
elif "Running service_requests count:" in line:
count = line[line.find("Running service_requests count:")+32:]
running_requests_dict["request_list"].append(int(count))
sec = ((datetime.datetime.strptime(line[:22], '%Y-%m-%d %H:%M:%S,%f')) - start_time).total_seconds()
running_requests_dict["incoming_time"].append(sec)
elif "Refused service_requests count:" in line:
count = line[line.find("Refused service_requests count:")+32:]
refused_requests_dict["request_list"].append(int(count))
sec = ((datetime.datetime.strptime(line[:22], '%Y-%m-%d %H:%M:%S,%f')) - start_time).total_seconds()
refused_requests_dict["incoming_time"].append(sec)
mapped_requests_dict["name"] = (name+"_"+str(file_list[file_list_iter])).replace("\n", "")
mapped_reqs.append(copy.copy(mapped_requests_dict))
mapped_requests_dict["name"] = ""
mapped_requests_dict["request_list"] = []
mapped_requests_dict["incoming_time"] = []
running_requests_dict["name"] = (name+"_"+str(file_list[file_list_iter])).replace("\n", "")
running_reqs.append(copy.copy(running_requests_dict))
running_requests_dict["name"] = ""
running_requests_dict["request_list"] = []
running_requests_dict["incoming_time"] = []
refused_requests_dict["name"] = (name+"_"+str(file_list[file_list_iter])).replace("\n", "")
refused_reqs.append(copy.copy(refused_requests_dict))
refused_requests_dict["name"] = ""
refused_requests_dict["request_list"] = []
refused_requests_dict["incoming_time"] = []
else:
start_time, data_point_count = 0, 0
name = ""
mapped_reqs_to_avg, running_reqs_to_avg, refused_reqs_to_avg = [], [], []
for file in element:
mapped_requests_dict["name"] = ""
mapped_requests_dict["request_list"] = []
mapped_requests_dict["incoming_time"] = []
running_requests_dict["name"] = ""
running_requests_dict["request_list"] = []
running_requests_dict["incoming_time"] = []
refused_requests_dict["name"] = ""
refused_requests_dict["request_list"] = []
refused_requests_dict["incoming_time"] = []
data_point_count = 0
for line in open(file):
if start_time == 0:
start_time = datetime.datetime.strptime(line[:22], '%Y-%m-%d %H:%M:%S,%f')
if "| Orchestrator:" in line:
name = line[line.find("| Orchestrator:") + 15:]
if "| What to optimize:" in line:
name += "_" + line[line.find("| What to optimize:") + 19:]
if "| When to optimize:" in line:
name += "_" + line[line.find("| When to optimize:") + 19:]
if "| Optimize strategy:" in line:
name += "_" + line[line.find("| Optimize strategy:") + 20:]
if "Mapped service_requests count:" in line:
data_point_count += 1
if start <= data_point_count <= finish:
if "Mapped service_requests count:" in line:
count = line[line.find("Mapped service_requests count:") + 31:]
mapped_requests_dict["request_list"].append(int(count))
sec = ((datetime.datetime.strptime(line[:22],
'%Y-%m-%d %H:%M:%S,%f')) - start_time).total_seconds()
mapped_requests_dict["incoming_time"].append(sec)
elif "Running service_requests count:" in line:
count = line[line.find("Running service_requests count:") + 32:]
running_requests_dict["request_list"].append(int(count))
sec = ((datetime.datetime.strptime(line[:22],
'%Y-%m-%d %H:%M:%S,%f')) - start_time).total_seconds()
running_requests_dict["incoming_time"].append(sec)
elif "Refused service_requests count:" in line:
count = line[line.find("Refused service_requests count:") + 32:]
refused_requests_dict["request_list"].append(int(count))
sec = ((datetime.datetime.strptime(line[:22],
'%Y-%m-%d %H:%M:%S,%f')) - start_time).total_seconds()
refused_requests_dict["incoming_time"].append(sec)
mapped_requests_dict["name"] = (name + "_AVG_" + str(file_list[file_list_iter])).replace("\n", "")
mapped_reqs_to_avg.append(copy.copy(mapped_requests_dict))
running_requests_dict["name"] = (name + "_AVG_" + str(file_list[file_list_iter])).replace("\n", "")
running_reqs_to_avg.append(copy.copy(running_requests_dict))
refused_requests_dict["name"] = (name + "_AVG_" + str(file_list[file_list_iter])).replace("\n", "")
refused_reqs_to_avg.append(copy.copy(refused_requests_dict))
mapped_requests_dict["name"] = ""
mapped_requests_dict["request_list"] = []
mapped_requests_dict["incoming_time"] = []
running_requests_dict["name"] = ""
running_requests_dict["request_list"] = []
running_requests_dict["incoming_time"] = []
refused_requests_dict["name"] = ""
refused_requests_dict["request_list"] = []
refused_requests_dict["incoming_time"] = []
# Get the longest list len
longest_list = len(mapped_reqs_to_avg[0]["request_list"])
for x in range(0, len(mapped_reqs_to_avg)):
if len(mapped_reqs_to_avg[x]["request_list"]) > longest_list:
longest_list = len(mapped_reqs_to_avg[x]["request_list"])
# Average dicts
avg_mapped_requests_dict = dict()
avg_mapped_requests_dict["request_list"] = []
avg_mapped_requests_dict["incoming_time"] = []
avg_mapped_requests_dict["name"] = ""
avg_running_requests_dict = dict()
avg_running_requests_dict["request_list"] = []
avg_running_requests_dict["incoming_time"] = []
avg_running_requests_dict["name"] = ""
avg_refused_requests_dict = dict()
avg_refused_requests_dict["request_list"] = []
avg_refused_requests_dict["incoming_time"] = []
avg_refused_requests_dict["name"] = ""
inc_summa, req_summa, log_file_counter = 0, 0, 0
for i in range(0, longest_list):
for m in mapped_reqs_to_avg:
try:
inc_summa += m["incoming_time"][i]
req_summa += m["request_list"][i]
log_file_counter += 1
except:
unfinished_test_count = True
# in this case, the current test is shorter than the others
pass
avg_mapped_requests_dict["incoming_time"].append(round(inc_summa / log_file_counter, 2))
avg_mapped_requests_dict["request_list"].append(int(req_summa / log_file_counter))
avg_mapped_requests_dict["name"] = mapped_reqs_to_avg[0]["name"]
inc_summa, req_summa, log_file_counter = 0, 0, 0
for i in range(0, longest_list):
for m in running_reqs_to_avg:
try:
inc_summa += m["incoming_time"][i]
req_summa += m["request_list"][i]
log_file_counter += 1
except:
# in this case, the current test is shorter than the others
pass
avg_running_requests_dict["incoming_time"].append(round(inc_summa / log_file_counter, 2))
avg_running_requests_dict["request_list"].append(int(req_summa / log_file_counter))
avg_running_requests_dict["name"] = running_reqs_to_avg[0]["name"]
inc_summa, req_summa, log_file_counter = 0, 0, 0
for i in range(0, longest_list):
for m in refused_reqs_to_avg:
try:
inc_summa += m["incoming_time"][i]
req_summa += m["request_list"][i]
log_file_counter += 1
except:
# in this case, the current test is shorter than the others
pass
avg_refused_requests_dict["incoming_time"].append(round(inc_summa / log_file_counter, 2))
avg_refused_requests_dict["request_list"].append(int(req_summa / log_file_counter))
avg_refused_requests_dict["name"] = refused_reqs_to_avg[0]["name"]
inc_summa, req_summa, log_file_counter = 0, 0, 0
mapped_reqs.append(copy.copy(avg_mapped_requests_dict))
running_reqs.append(copy.copy(avg_running_requests_dict))
refused_reqs.append(copy.copy(avg_refused_requests_dict))
file_list_iter += 1
if unfinished_test_count:
print ('\x1b[1;33;0m' + 'There are one or more unfinished tests!!!' + '\x1b[0m')
return mapped_reqs, running_reqs, refused_reqs
def separate_and_avg(log_files):
# Separate
try:
result = []
if "[" in log_files:
avg_log_files = log_files.split(",")
# where are [ and ] characters:
start = [i for i, s in enumerate(avg_log_files) if '[' in s]
end = [i for i, s in enumerate(avg_log_files) if ']' in s]
if len(start) != len(end):
print("The number of [ and ] is not equal!!")
raise
# delete special characters:
avg_log_files = ([s.replace('[', '') for s in avg_log_files])
avg_log_files = ([s.replace(']', '') for s in avg_log_files])
# merge those items in the list that were in the same parentheses
correction = 0
for k in range(0, len(start)):
avg_log_files[(start[k]-correction):(end[k]+1-correction)] = \
[','.join(avg_log_files[(start[k]-correction):(end[k]+1-correction)])]
correction += end[k] - start[k]
for element in avg_log_files:
while "." in element:
tmp_element = []
element = element.split(",")
for i in element:
if i!='':
tmp_element.append(i)
element = tmp_element
result.append(element)
print "result::"
for x in result:
print x
return result
else:
return log_files.split(",")
except Exception as e:
print e
print "Separate file error!"
def main(argv):
mapped_online_req_list = None
mapped_offline_req_list = None
mapped_hybrid_req_list = None
running_online_req_list = None
running_offline_req_list = None
running_hybrid_req_list = None
refused_online_req_list = None
refused_offline_req_list = None
refused_hybrid_req_list = None
start_count = 0
finish_count = float('inf')
path = ""
nice, nolegend = False, False
format = "png"
mark_every = 50
marker_size = 4
try:
opts, args = getopt.getopt(argv, "hs:f:", ["online_log_files=", "offline_log_files=", "hybrid_log_files=",
"dir=", "nice", "format=", "nolegend", "markersize=",
"markevery=", "s=", "f="])
except getopt.GetoptError:
print 'Invalid argument!!! create_plots.py ' \
'--online_log_files=<online_log_file1,[online_log_file2,online_log_file3],' \
'online_log_file4 ...> --offline_log_files=<offline_log_file1,offline_log_file2,...> ' \
'--hybrid_log_files=<hybrid_log_file1,hybrid_log_file2,...> ' \
'--dir=<directory name> --s=<start of interval> --f=<end of interval> --nice --format=<pdf or png> ' \
'--nolegend --markersize=<recommended:5> --markevery=<recommended:40-70>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'create_plots.py ' \
'--online_log_files=<online_log_file1,[online_log_file2,online_log_file3],' \
'online_log_file4 ...> --offline_log_files=<offline_log_file1,offline_log_file2,...> ' \
'--hybrid_log_files=<hybrid_log_file1,hybrid_log_file2,...> ' \
'--dir=<directory name> --s=<start of interval> --f=<end of interval> --nice --format=<pdf or png> ' \
'--nolegend --markersize=<recommended:5> --markevery=<recommended:40-70>'
sys.exit()
elif opt in ("--online_log_files="):
online_log_files = arg
elif opt in ("--offline_log_files="):
offline_log_files = arg
elif opt in ("--hybrid_log_files="):
hybrid_log_files = arg
elif opt in ("--dir="):
path = arg
elif opt in ("--s="):
start_count = int(arg)
elif opt in ("--f="):
finish_count = int(arg)
elif opt in ("--nice"):
nice = True
elif opt in ("--nolegend"):
nolegend = True
elif opt in ("--format="):
if arg == "pdf" or arg == "png":
format = arg
else:
print 'Invalid format! Only pdf or png!'
sys.exit()
elif opt in ("--markersize="):
marker_size = int(arg)
elif opt in ("--markevery="):
mark_every = int(arg)
else:
print 'Bad parameters! Use python create_plots.py --help'
sys.exit()
print "argument:::::"
print (sys.argv)
print "arg0" + sys.argv[0]
print "arg1" + sys.argv[1]
print "arg2" + sys.argv[2]
try:
online_files = separate_and_avg(online_log_files)
mapped_online_req_list, running_online_req_list, refused_online_req_list = \
get_data(online_files, "Online", start_count, finish_count, nice)
except Exception as e:
print e
print "The program runs without online log file."
try:
offline_files = separate_and_avg(offline_log_files)
mapped_offline_req_list, running_offline_req_list, refused_offline_req_list = \
get_data(offline_files, "Offline", start_count, finish_count, nice)
except Exception as e:
print e
print "The program runs without offline log file."
try:
hybrid_files = separate_and_avg(hybrid_log_files)
mapped_hybrid_req_list, running_hybrid_req_list, refused_hybrid_req_list = \
get_data(hybrid_files, "Hybrid", start_count, finish_count, nice)
except Exception as e:
print e
print "The program runs without hybrid log file."
if path == "":
raise ValueError("Have to give a saving directory! Example: --dir=test100")
if not os.path.exists(path):
os.mkdir(path)
if path[:-1] != "/":
path = path + "/"
colors_ls = ['red', 'blue', 'green', 'yellow', 'skyblue', 'yellowgreen', 'black', 'orange', 'magenta', 'slategray']
lines_ls = [[8, 4, 2, 4, 2, 4], [4, 2], [], [8, 4, 4, 2], [8, 4, 2, 4], [5, 2, 10, 5], []]
markers_ls = ['o', 'v', '+', 's', '*', '', '|', 'x']
colors_iter = iter(['red', 'blue', 'green', 'yellow', 'skyblue', 'yellowgreen', 'black', 'orange', 'magenta', 'slategray'])
lines_iter = iter([[8, 4, 2, 4, 2, 4], [4, 2], [], [8, 4, 4, 2], [8, 4, 2, 4], [5, 2, 10, 5], []])
markers_iter = iter(['o', 'v', '+', 's', '*', '', '|', 'x'])
on_act_colors, on_act_lines, on_act_marker, off_act_colors, off_act_lines, off_act_marker, hy_act_colors, \
hy_act_lines, hy_act_marker = [], [], [], [], [], [], [], [], []
# Create mapped picture
if mapped_online_req_list is not None:
for element in mapped_online_req_list:
try:
color = colors_iter.next()
except:
color = random.choice(colors_ls)
try:
line = lines_iter.next()
except:
line = random.choice(lines_ls)
try:
marker = markers_iter.next()
except:
marker = random.choice(markers_ls)
finally:
on_act_marker.append(marker)
on_act_colors.append(color)
on_act_lines.append(line)
label = element["name"].replace('/', '_').replace('-', '').replace('.', '_')
plt.plot(range(0, len(element["request_list"])), element["request_list"], color=color,
label=label[:80], dashes=line, marker=marker, markersize=marker_size,
markevery=mark_every)
if mapped_offline_req_list is not None:
for element in mapped_offline_req_list:
try:
color = colors_iter.next()
except:
color = random.choice(colors_ls)
try:
line = lines_iter.next()
except:
line = random.choice(lines_ls)
try:
marker = markers_iter.next()
except:
marker = random.choice(markers_ls)
finally:
off_act_marker.append(marker)
off_act_colors.append(color)
off_act_lines.append(line)
label = element["name"].replace('/', '_').replace('-', '_').replace('.', '_')
plt.plot(range(0, len(element["request_list"])), element["request_list"], color=color,
label=label[:80], dashes=line, marker=marker, markersize=marker_size,
markevery=mark_every)
if mapped_hybrid_req_list is not None:
for element in mapped_hybrid_req_list:
try:
color = colors_iter.next()
except:
color = random.choice(colors_ls)
try:
line = lines_iter.next()
except:
line = random.choice(lines_ls)
try:
marker = markers_iter.next()
except:
marker = random.choice(markers_ls)
finally:
hy_act_marker.append(marker)
hy_act_colors.append(color)
hy_act_lines.append(line)
label = element["name"].replace('/', '_').replace('-', '_').replace('.', '_')
plt.plot(range(0, len(element["request_list"])), element["request_list"], color=color,
label=label[:80], dashes=line, marker=marker, markersize=marker_size,
markevery=mark_every)
plt.grid('on')
plt.title('Accepted incoming service requests')
plt.ylabel('Accepted requests count')
plt.xlabel('Incoming requests')
plt.xticks()
if start_count != 0 or finish_count != float('inf'):
plt.xlim(xmin=start_count, xmax=finish_count)
lgd = plt.legend(loc='upper left', bbox_to_anchor=(0, -0.1), numpoints=1)
if nolegend:
plt.legend().set_visible(False)
plt.savefig(path + "mapped_requests" + str(time.ctime()).replace(' ', '_').replace(':', '-') + "." + format,
bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.clf()
# Create mapped picture with time axis
if mapped_online_req_list is not None:
i = 0
for element in mapped_online_req_list:
color = on_act_colors[i]
line = on_act_lines[i]
marker = on_act_marker[i]
label = element["name"].replace('/', '_').replace('-', '').replace('.', '_')
plt.plot(element["incoming_time"], element["request_list"], color=color,
label=label[:80], dashes=line, marker=marker, markersize=marker_size,
markevery=mark_every)
i += 1
if mapped_offline_req_list is not None:
i = 0
for element in mapped_offline_req_list:
color = off_act_colors[i]
line = off_act_lines[i]
marker = off_act_marker[i]
label = element["name"].replace('/', '_').replace('-', '').replace('.', '_')
plt.plot(element["incoming_time"], element["request_list"], color=color,
label=label[:80], dashes=line, marker=marker, markersize=marker_size,
markevery=mark_every)
i += 1
if mapped_hybrid_req_list is not None:
i = 0
for element in mapped_hybrid_req_list:
color = hy_act_colors[i]
line = hy_act_lines[i]
marker = hy_act_marker[i]
label = element["name"].replace('/', '_').replace('-', '').replace('.', '_')
plt.plot(element["incoming_time"], element["request_list"], color=color,
label=label[:80], dashes=line, marker=marker, markersize=marker_size,
markevery=mark_every)
i += 1
plt.grid('on')
plt.title('Accepted incoming service requests')
plt.ylabel('Accepted requests count')
plt.xlabel('Sec')
lgd = plt.legend(loc='upper left', bbox_to_anchor=(0, -0.1), numpoints=1)
#TODO: fix zoom with time axis too
if nolegend:
plt.legend().set_visible(False)
plt.savefig(path + "mapped_requests_with_time_axis_" +
str(time.ctime()).replace(' ', '_').replace(':', '-') + "." + format,
bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.clf()
# Create Running picture
if running_online_req_list is not None:
i = 0
for element in running_online_req_list:
color = on_act_colors[i]
line = on_act_lines[i]
marker = on_act_marker[i]
label = element["name"].replace('/', '_').replace('-', '').replace('.', '_')
plt.plot(range(0, len(element["request_list"])), element["request_list"], color=color,
label=label[:80], dashes=line, marker=marker, markersize=marker_size,
markevery=mark_every)
i += 1
if running_offline_req_list is not None:
i = 0
for element in running_offline_req_list:
color = off_act_colors[i]
line = off_act_lines[i]
marker = off_act_marker[i]
label = element["name"].replace('/', '_').replace('-', '').replace('.', '_')
plt.plot(range(0, len(element["request_list"])), element["request_list"], color=color,
label=label[:80], dashes=line, marker=marker, markersize=marker_size,
markevery=mark_every)
i += 1
if running_hybrid_req_list is not None:
i = 0
for element in running_hybrid_req_list:
color = hy_act_colors[i]
line = hy_act_lines[i]
marker = hy_act_marker[i]
label = element["name"].replace('/', '_').replace('-', '').replace('.', '_')
plt.plot(range(0, len(element["request_list"])), element["request_list"], color=color,
label=label[:80], dashes=line, marker=marker, markersize=marker_size,
markevery=mark_every)
i += 1
plt.grid('on')
plt.title('Currently running (mapped) requests in the NFFG')
plt.ylabel('Requests count')
plt.xlabel('Incoming requests')
lgd = plt.legend(loc='upper left', bbox_to_anchor=(0, -0.1), numpoints=1)
if start_count != 0 or finish_count != float('inf'):
plt.xlim(xmin=start_count, xmax=finish_count)
if nolegend:
plt.legend().set_visible(False)
plt.savefig(path + "running_requests" + str(time.ctime()). \
replace(' ', '_').replace(':', '-') + "." + format, bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.clf()
# Create Running picture with time axis
if running_online_req_list is not None:
i = 0
for element in running_online_req_list:
color = on_act_colors[i]
line = on_act_lines[i]
marker = on_act_marker[i]
label = element["name"].replace('/', '_').replace('-', '').replace('.', '_')
plt.plot(element["incoming_time"], element["request_list"], color=color,
label=label[:80],
dashes=line, marker=marker, markersize=marker_size, markevery=mark_every)
i += 1
if running_offline_req_list is not None:
i = 0
for element in running_offline_req_list:
color = off_act_colors[i]
line = off_act_lines[i]
marker = off_act_marker[i]
label = element["name"].replace('/', '_').replace('-', '').replace('.', '_')
plt.plot(element["incoming_time"], element["request_list"], color=color,
label=label[:80], dashes=line, marker=marker, markersize=marker_size,
markevery=mark_every)
i += 1
if running_hybrid_req_list is not None:
i = 0
for element in running_hybrid_req_list:
color = hy_act_colors[i]
line = hy_act_lines[i]
marker = hy_act_marker[i]
label = element["name"].replace('/', '_').replace('-', '').replace('.', '_')
plt.plot(element["incoming_time"], element["request_list"], color=color,
label=label[:80], dashes=line, marker=marker, markersize=marker_size,
markevery=mark_every)
i += 1
plt.grid('on')
plt.title('Currently running (mapped) requests in the NFFG')
plt.ylabel('Requests count')
plt.xlabel('Sec')
lgd = plt.legend(loc='upper left', bbox_to_anchor=(0, -0.1), numpoints=1)
if nolegend:
plt.legend().set_visible(False)
# TODO: fix zoom with time axis too
plt.savefig(path + "running_requests_with_time_axis" + str(time.ctime()). \
replace(' ', '_').replace(':', '-') + "." + format, bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.clf()
# Create refused picture
if refused_online_req_list is not None:
i = 0
for element in refused_online_req_list:
color = on_act_colors[i]
line = on_act_lines[i]
marker = on_act_marker[i]
label = element["name"].replace('/', '_').replace('-', '').replace('.', '_')
plt.plot(range(0, len(element["request_list"])), element["request_list"], color=color,
label=label[:80], dashes=line, marker=marker, markersize=marker_size,
markevery=mark_every)
i += 1
if refused_offline_req_list is not None:
i = 0
for element in refused_offline_req_list:
color = off_act_colors[i]
line = off_act_lines[i]
marker = off_act_marker[i]
label = element["name"].replace('/', '_').replace('-', '').replace('.', '_')
plt.plot(range(0, len(element["request_list"])), element["request_list"], color=color,
label=label[:80], dashes=line, marker=marker, markersize=marker_size,
markevery=mark_every)
i += 1
if refused_hybrid_req_list is not None:
i = 0
for element in refused_hybrid_req_list:
color = hy_act_colors[i]
line = hy_act_lines[i]
marker = hy_act_marker[i]
label = element["name"].replace('/', '_').replace('-', '').replace('.', '_')
plt.plot(range(0, len(element["request_list"])), element["request_list"], color=color,
label=label[:80], dashes=line, marker=marker, markersize=marker_size,
markevery=mark_every)
i += 1
plt.title('Refused requests during the simulation')
plt.ylabel('Refused requests count')
plt.xlabel('Incoming requests')
lgd = plt.legend(loc='upper left', bbox_to_anchor=(0, -0.1), numpoints=1)
if start_count != 0 or finish_count != float('inf'):
plt.xlim(xmin=start_count, xmax=finish_count)
if nolegend:
plt.legend().set_visible(False)
plt.grid('on')
plt.savefig(path + "refused_requests" + str(time.ctime()). \
replace(' ', '_').replace(':', '-') + "." + format, bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.clf()
# Create refused picture with time
if refused_online_req_list is not None:
i = 0
for element in refused_online_req_list:
color = on_act_colors[i]
line = on_act_lines[i]
marker = on_act_marker[i]
label = element["name"].replace('/', '_').replace('-', '').replace('.', '_')
plt.plot(element["incoming_time"], element["request_list"], color=color,
label=label[:80], dashes=line, marker=marker, markersize=marker_size,
markevery=mark_every)
i += 1
if refused_offline_req_list is not None:
i = 0
for element in refused_offline_req_list:
color = off_act_colors[i]
line = off_act_lines[i]
marker = off_act_marker[i]
label = element["name"].replace('/', '_').replace('-', '').replace('.', '_')
plt.plot(element["incoming_time"], element["request_list"], color=color,
label=label[:80], dashes=line, marker=marker, markersize=marker_size,
markevery=mark_every)
i += 1
if refused_hybrid_req_list is not None:
i = 0
for element in refused_hybrid_req_list:
color = hy_act_colors[i]
line = hy_act_lines[i]
marker = hy_act_marker[i]
label = element["name"].replace('/', '_').replace('-', '').replace('.', '_')
plt.plot(element["incoming_time"], element["request_list"], color=color,
label=label[:80], dashes=line, marker=marker, markersize=marker_size,
markevery=mark_every)
i += 1
plt.grid('on')
plt.title('Refused requests during the simulation')
plt.ylabel('Refused requests count')
plt.xlabel('Sec')
lgd = plt.legend(loc='upper left', bbox_to_anchor=(0, -0.1), numpoints=1)
if nolegend:
plt.legend().set_visible(False)
# TODO: fix zoom with time axis too
plt.savefig(path + "refused_requests_with_time_axis" + str(time.ctime()). \
replace(' ', '_').replace(':', '-') + "." + format, bbox_extra_artists=(lgd,), bbox_inches='tight')
print('\x1b[1;32;0m' + 'Creating plots are DONE :)' + '\x1b[0m')
if __name__ == "__main__":
main(sys.argv[1:])
|
python
|
from pyexcel.renderer import Renderer
from pyexcel_echarts.options import MANAGER
DEFAULT_TITLE = 'pyexcel via pyechars'
class Chart(Renderer):
def render_sheet(self, sheet, chart_type='bar', mode='embed',
title=DEFAULT_TITLE,
subtitle="",
width=800,
height=400,
title_pos="auto",
title_top="auto",
title_color="#000",
subtitle_color="#aaa",
title_text_size=18,
subtitle_text_size=12,
background_color="#fff",
**keywords):
charter = MANAGER.get_a_plugin(
chart_type,
mode=mode,
title=title, subtitle=subtitle,
width=width, height=height,
title_pos=title_pos, title_top=title_top,
title_color=title_color, title_text_size=title_text_size,
subtitle_color=subtitle_color,
subtitle_text_size=subtitle_text_size,
background_color=background_color)
charter.render_sheet(
sheet, **keywords)
self._stream.write(str(charter))
def render_book(self, book, chart_type='bar', mode='embed',
title=DEFAULT_TITLE,
subtitle="",
width=800,
height=400,
title_pos="auto",
title_top="auto",
title_color="#000",
subtitle_color="#aaa",
title_text_size=18,
subtitle_text_size=12,
background_color="#fff",
**keywords):
charter = MANAGER.get_a_plugin(
chart_type,
mode=mode,
title=title, subtitle=subtitle,
width=width, height=height,
title_pos=title_pos, title_top=title_top,
title_color=title_color, title_text_size=title_text_size,
subtitle_color=subtitle_color,
subtitle_text_size=subtitle_text_size,
background_color=background_color)
charter.render_book(book,
**keywords)
self._stream.write(str(charter))
|
python
|
class Solution:
def largestDivisibleSubset(self, nums: List[int]) -> List[int]:
S = {-1: set()}
for x in sorted(nums):
S[x] = max((S[d] for d in S if x % d == 0), key=len) | {x}
return list(max(S.values(), key=len))
|
python
|
# coding=utf-8
"""execution.py - This module provides classes that execute commands.
"""
import os
import sys
import signal
import subprocess
from .config import Config
from .base import PluginManager
from .log import LogManager
class ExecutionManager(PluginManager):
"""This class is used to execute commands for scripts running.
"""
def __init__(self, configManager: Config) -> None:
super().__init__('execution', configManager)
self.force = getattr(self, 'force', True)
self.executable = getattr(self, 'executable', str(sys.executable))
def exec_command(self, command: str, log_manager: LogManager = None) -> int:
"""Execute the command and output to a log file or not.
Args:
command (str): Target command.
log_manager (LogManager): Class `LogManager` instance.
Returns:
int:Status code returned by executing the command.
"""
if log_manager:
with open(log_manager.path, 'a') as log:
# The child process calls the system command and prints the error message to a log file.
ret = subprocess.call(command, shell=True, stdout=log, stderr=log)
else:
ret = subprocess.call(command, shell=True)
return ret
def kill(self):
"""Force kill process to end execution.
Notes:
The kill operation on Linux system and Windows system may be different.
"""
try:
os.kill(os.getpid(), signal.SIGKILL)
except:
os.kill(os.getpid(), signal.CTRL_BREAK_EVENT)
|
python
|
# -*- encoding: utf-8 -*-
import numpy as np
def get_seq_graph(edge):
dy, dx = np.array([-1,0,1,1,1,0,-1,-1]), np.array([-1,-1,-1,0,1,1,1,0])
def get_neighbors(node):
Y, X = node[0]+dy, node[1]+dx
neighbors = edge[Y, X]
Y, X = Y[neighbors], X[neighbors]
return zip(Y,X)
graph = {}
Y, X = edge.nonzero()
for node in zip(Y,X):
graph[node] = get_neighbors(node)
seq = []
first_el = (Y[0], X[0])
seq.append(first_el)
ext_el = first_el
act_el = graph[ext_el][0]
while (first_el != ext_el) or (len(seq)==1):
ind_el = np.where(np.array(graph[(ext_el)])!=act_el)
ind_el_uq = np.unique(ind_el[0])
if len(ind_el_uq)==1:
ind_el = ind_el_uq[0]
else:
acum_dist = []
for ind in ind_el_uq:
dist_ = (graph[(ext_el)][ind][0]-ext_el[0])**2+(graph[(ext_el)][ind][1]-ext_el[1])**2
acum_dist.append(dist_)
min_dist = acum_dist.index(min(acum_dist))
ind_el = ind_el_uq[min_dist]
act_el = ext_el
ext_el = graph[(act_el)][ind_el]
seq.append(ext_el)
lst1, lst2 = zip(*seq)
return (np.array(lst1), np.array(lst2))
|
python
|
from __future__ import print_function
import argparse
import os
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import torch.nn.functional as F
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
weight_shape = list(m.weight.data.size())
fan_in = np.prod(weight_shape[1: 4])
fan_out = np.prod(weight_shape[2: 4]) * weight_shape[0]
w_bound = np.sqrt(6. / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0.0)
elif classname.find('Linear') != -1:
weight_shape = list(m.weight.data.size())
fan_in = weight_shape[1]
fan_out = weight_shape[0]
w_bound = np.sqrt(6.0 / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0.0)
elif classname.find('BatchNorm') != -1:
m.weight.data.fill_(1.0)
m.bias.data.fill_(0.0)
elif classname.find('LSTMCell') != -1:
m.bias_ih.data.fill_(0.0)
m.bias_hh.data.fill_(0.0)
class STN3d(nn.Module):
def __init__(self, num_points = 2500):
super(STN3d, self).__init__()
self.num_points = num_points
self.conv1 = torch.nn.Conv1d(4, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 1024, 1)
self.mp1 = torch.nn.MaxPool1d(num_points)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, 9)
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.bn4 = nn.BatchNorm1d(512)
self.bn5 = nn.BatchNorm1d(256)
def forward(self, x):
# x --> 3 * 3
batchsize = x.shape[0]
if batchsize > 1:
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = self.mp1(x)
x = x.view(-1, 1024)
x = F.relu(self.bn4(self.fc1(x)))
x = F.relu(self.bn5(self.fc2(x)))
else:
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = self.mp1(x)
x = x.view(-1, 1024)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
iden = Variable(torch.eye(3)).view(1, -1).repeat(batchsize, 1)
if x.is_cuda:
device = torch.device('cuda:%d' % x.get_device())
iden = iden.to(device=device)
x = x + iden
x = x.view(-1, 3, 3)
return x
class PointNetfeat(nn.Module):
def __init__(self, num_points=2500, global_feat=True):
super(PointNetfeat, self).__init__()
self.stn = STN3d(num_points=num_points)
self.conv1 = torch.nn.Conv1d(4, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 1024, 1)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.mp1 = torch.nn.MaxPool1d(num_points)
self.num_points = num_points
self.global_feat = global_feat
def forward(self, x):
trans = self.stn(x)
x = torch.cat([torch.bmm(trans, x[:, :3, :]), x[:, 3, :].unsqueeze(1)], dim=1)
if x.shape[0] > 1:
x = F.relu(self.bn1(self.conv1(x)))
pointfeat = x
x = F.relu(self.bn2(self.conv2(x)))
x = self.bn3(self.conv3(x))
else:
x = F.relu(self.conv1(x))
pointfeat = x
x = F.relu(self.conv2(x))
x = self.conv3(x)
x = self.mp1(x)
x = x.view(-1, 1024)
if self.global_feat:
return x, trans
else:
x = x.view(-1, 1024, 1).repeat(1, 1, self.num_points)
return torch.cat([x, pointfeat], 1), trans
class end_layer(nn.Module):
def __init__(self, in_channels=1024, out_channels=1):
super(end_layer, self).__init__()
self.fc1 = nn.Linear(in_channels, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, out_channels)
self.bn1 = nn.BatchNorm1d(512)
self.bn2 = nn.BatchNorm1d(256)
self.apply(weights_init)
def forward(self, x):
if x.size()[0] == 1:
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
else:
x = F.relu(self.bn1(self.fc1(x)))
x = F.relu(self.bn2(self.fc2(x)))
return self.fc3(x)
class PointNetActorCritic(nn.Module):
def __init__(self, num_points=2500, num_actions=4):
super(PointNetActorCritic, self).__init__()
self.num_points = num_points
self.feat = PointNetfeat(num_points, global_feat=True)
self.lstm = nn.LSTMCell(1024, 1024)
self.critic_linear = end_layer(in_channels=1024, out_channels=1)
self.actor_linear = end_layer(in_channels=1024, out_channels=num_actions)
self.apply(weights_init)
self.train()
def forward(self, inputs):
x, (hx, cx) = inputs
x, _ = self.feat(x)
hx, cx = self.lstm(x, (hx, cx))
x = hx
return self.critic_linear(x), self.actor_linear(x), (hx, cx)
if __name__ == '__main__':
sim_data = Variable(torch.rand(10, 4, 2500))
# trans = STN3d()
# out = trans(sim_data)
# print('stn', out.size())
# pointfeat = PointNetfeat(global_feat=True)
# out, _ = pointfeat(sim_data)
# print('global feat', out.size())
# pointfeat = PointNetfeat(global_feat=False)
# out, _ = pointfeat(sim_data)
# print('point feat', out.size())
cls = PointNetActorCritic(num_actions=4)
hx, cx = Variable(torch.zeros(10, 1024)), Variable(torch.zeros(10, 1024))
if torch.cuda.is_available():
sim_data = sim_data.cuda()
cls = cls.cuda()
hx, cx = hx.cuda(), cx.cuda()
v, q, (hx ,cx) = cls((sim_data, (hx, cx)))
print(v.shape, q.shape, hx.shape, cx.shape)
print(v)
print(q)
|
python
|
import numpy as np
# 对一维数组的切片
origin = np.arange(1,100)
print(origin)
print(origin[0:2])
print(origin[:12:4])
print(origin[:12:])
print(origin[5:])
print(origin[::-1]) #倒序
# 二维数组切片
origin = np.random.random((3,4))
print(origin)
print(origin[-2:,:2])
print(origin[::-2,::-1])
|
python
|
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from io import StringIO
from unittest.mock import patch
from django.core.management import call_command
from django.test import TestCase
from logging import getLogger
logger = getLogger("test")
class DataBCTest(TestCase):
# Minio and the file system are mocked out - so that we don't create any artifacts during this test.
@patch('gwells.management.commands.export_databc.open')
@patch('gwells.management.commands.export_databc.Minio')
@patch('gwells.management.commands.export_databc.os')
def test_export_no_exceptions(self, fake_os, fake_minio, fake_open):
# This is a very simple test, that just checks to see that the export can be run without any
# exceptions. This should catch most of the situations that could cause an export to fail.
out = StringIO()
call_command('export_databc', stdout=out)
self.assertIn('GeoJSON export complete.', out.getvalue())
class ImportLicencesTest(TestCase):
""" tests functions used by `./manage.py import_licences` """
fixtures = ['gwells-codetables', 'wellsearch-codetables', 'wellsearch', 'registries', 'registries-codetables']
def test_import_using_fixture_file(self):
out = StringIO()
TEST_LICENCES = os.path.join(os.path.dirname(__file__), 'import_licences_test_licences.csv')
call_command('import_licences', '-d', filename=TEST_LICENCES, stdout=out)
val = out.getvalue()
self.assertIn('Licence import complete with 0 errors.', val)
|
python
|