content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
import tensorflow as tf
import numpy as np
from tensorflow import keras
# Use simple nearest neighbour upsampling, 9x9, 1x1 and 5x5 convolutional layers. MSE: 0.0028712489
# Described in https://towardsdatascience.com/an-evolution-in-single-image-super-resolution-using-deep-learning-66f0adfb2d6b
# Article: https://arxiv.org/pdf/1501.00092.pdf
def createModel4(TILESIZE_INPUT):
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.InputLayer(input_shape=(TILESIZE_INPUT * TILESIZE_INPUT, )))
model.add(tf.keras.layers.Reshape( (TILESIZE_INPUT, TILESIZE_INPUT, 1) ))
model.add(tf.keras.layers.UpSampling2D(interpolation = 'bilinear'))
model.add(tf.keras.layers.UpSampling2D(interpolation = 'bilinear'))
model.add(tf.keras.layers.Conv2D(64, (9, 9), padding='same'))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.Conv2D(32, (1, 1), padding='same'))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.Conv2D(1, (5, 5), padding='same'))
model.add(tf.keras.layers.Flatten())
model.compile(optimizer='Adam', loss='mse', metrics=['MeanSquaredError'])
return model
# Use simple nearest neighbour upsampling, 9x9, 1x1 and 5x5 convolutional layers. MSE: 0.0028712489
# Described in https://towardsdatascience.com/an-evolution-in-single-image-super-resolution-using-deep-learning-66f0adfb2d6b
# Article: https://arxiv.org/pdf/1501.00092.pdf
def createModel2(TILESIZE_INPUT):
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.InputLayer(input_shape=(TILESIZE_INPUT * TILESIZE_INPUT, )))
model.add(tf.keras.layers.Reshape( (TILESIZE_INPUT, TILESIZE_INPUT, 1) ))
model.add(tf.keras.layers.UpSampling2D(interpolation = 'bilinear'))
model.add(tf.keras.layers.Conv2D(64, (9, 9), padding='same'))
model.add(tf.keras.layers.Dropout(DROPOUT))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.Conv2D(32, (1, 1), padding='same'))
model.add(tf.keras.layers.Dropout(DROPOUT))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.Conv2D(1, (5, 5), padding='same'))
model.add(tf.keras.layers.Dropout(DROPOUT))
model.add(tf.keras.layers.Flatten())
model.compile(optimizer='Adam', loss='mse', metrics=['MeanSquaredError'])
return model
|
python
|
import logging
import os
import re
from robobrowser import RoboBrowser
logger = logging.getLogger(__name__)
id_re = re.compile("\(([^)]+)\)")
def scrape_snotel_sites(url=None):
if not url:
url = "http://www.wcc.nrcs.usda.gov/nwcc/yearcount?network=sntl&counttype=statelist&state="
browser = RoboBrowser(parser="html5lib")
browser.open(url)
browser.response.raise_for_status()
table = browser.find_all("table")[4]
sites = [] # list of sites with name and code
cols = [t.text.strip() for t in table.tr.find_all("th")]
for row in table.find_all("tr"):
if row.td and row.td.text.strip() == 'SNTL':
items = [i.text.strip() for i in row.find_all("td")]
sites.append(dict(zip(cols, items)))
return sites
def build_id(listing):
number = id_re.findall(listing["site_name"])[0]
state = listing["state"]
return "{}:{}:{}".format(number, state, "SNTL")
|
python
|
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import scipy.io
from torch.utils.data import Dataset
import pickle
import os
import sys
# Data Processing
def load_file(path_to_file):
return np.genfromtxt(path_to_file,delimiter='\t',skip_header=4).astype('float32')
class WalkDataset(Dataset):
"""Walking dataset"""
def __init__(self, root_dir, sequence_length = 30, transform=None):
"""
Args:
root_dir (string): Path to the database folder.
transform (callable, optional): Optional transform to be applied on
a sample.
"""
files = os.listdir(root_dir)
for i, f in enumerate(files):
files[i]=os.path.join(root_dir,f)
self.files = files
self.transform = transform
f = open('stats','rb')
self.mu, self.sigma = pickle.load(f)
f.close()
self.sl = sequence_length
self.len = None # Total number of fixed length sequences
self.file_len = [0]*len(files) # Number of fixed length sequences in each file
self.len_cum = [0]*(len(files)+1) # Number of acumulated sequences
def load_file(path_to_file):
return np.genfromtxt(path_to_file,delimiter='\t',skip_header=4).astype('float32')
def __len__(self):
if self.len is not None:
return self.len
else:
# Calculate length of the entire fixed length dataset
for i, name in enumerate(self.files):
temp = load_file(name)
sl = temp.shape[0] # Number of timesteps
self.file_len[i] = sl//(self.sl+1) # Number of fixed length sequences in the file
self.len_cum[i+1] = np.sum(self.file_len)
self.len = np.sum(self.file_len)
return self.len
def __getitem__(self, idx):
data = []
target = []
#data_lengths = []
idxs = np.arange(len(self))
idxs = idxs.tolist()
if isinstance(idx, slice):
idxs = idxs[idx]
else:
idxs = [idxs[idx]]
last_file = -1
for i, n in enumerate(idxs):
if i>=self.len:
raise IndexError('The requested sequence does not exist')
top = self.len_cum[1]
file_n = 0
while top-1 < n:
file_n += 1
top = self.len_cum[file_n+1]
if last_file != file_n:
t = load_file(self.files[file_n])
t = np.delete(t, np.s_[-3:],1) # Delete the last 3 columns
#t = np.delete(t, np.s_[self.file_len[file_n]*(self.sl+1):],0) # Delete extra timesteps
t = np.divide((t-self.mu), self.sigma) # Normalize data
out_t = np.delete(t, np.s_[:18],1) # Delete Rigth Leg Data
last_file = file_n
actual = n + 1 - self.len_cum[file_n]
input_t = t[(actual-1)*self.sl:actual*self.sl,:]
output_t = out_t[(actual-1)*self.sl+1:actual*self.sl+1,:]
#print('first file: '+self.files[0])
#print ('file name: '+self.files[file_n])
#print('data size: {}, target size {}'.format(input_t.shape, output_t.shape))
#sys.stdout.flush()
data.append(input_t)
target.append(output_t)
if len(data)>1:
data = np.stack(data, axis = 1) # Batch Dimension
target = np.stack(target, axis = 1)
else:
data = data[0]
target = target[0]
data = torch.from_numpy(data)
target = torch.from_numpy(target)
#data = Variable(data, requires_grad = False)
#target = Variable(target, requires_grad = False)
sample = {'data':data, 'target':target}
return sample
# for i in range(len(list_files)):
# t = load_file(list_files[i])
# t = np.delete(t,np.s_[-3:],1) # Delete the last 3 columns
# input_t = np.delete(t,np.s_[-1],0) # Delete last element
# input_t = np.divide((input_t-self.mu),self.sigma) # Normalize data
# output_t = np.delete(t,np.s_[0],0) # Delete first element
# output_t = np.divide((output_t -self.mu),self.sigma) # Normalize data
# output_t = np.delete(output_t,np.s_[:18],1) # Delete Right Leg data
# data.append(input_t)
# data_lengths.append(input_t.shape[0]) # Sequence length
# target.append(output_t)
#
# largest = max(data_lengths)
# container = torch.zeros((len(data),largest,36))
# target_container = torch.zeros((len(data),largest,18))
# for i in range(len(data)):
# input_t = data[i]
# output_t = target[i]
# extra = largest-input_t.shape[0]
# container[i] = torch.from_numpy(np.concatenate([input_t,np.zeros((extra,input_t.shape[1]),dtype=input_t.dtype)],0))
# target_container[i] = torch.from_numpy(np.concatenate([output_t,np.zeros((extra,output_t.shape[1]),dtype=output_t.dtype)],0))
# container = Variable(container, requires_grad = False)
# target_container = Variable(target_container, requires_grad = False)
# data_packed = nn.utils.rnn.pack_padded_sequence(container, data_lengths,
# batch_first=True)
# target_packed = nn.utils.rnn.pack_padded_sequence(target_container, data_lengths,
# batch_first=True)
#
# sample = {'data':data_packed, 'target':target_packed}
#
# return sample
# Main model
class Net(nn.Module):
def __init__(self, hidden_dim):
super(Net, self).__init__()
self.hidden_dim = hidden_dim
self.lstm = nn.LSTM(36, hidden_dim, 1, batch_first = True, dropout = 0.5)
self.fc1 = nn.Linear(hidden_dim, 18)
self.fc2 = nn.Linear(100,18)
self.dp = nn.Dropout()
def forward(self, x, hc):
#print('input:{}, h1: {}, h2: {}'.format(x.size(),hc[0].size(),hc[1].size()))
#sys.stdout.flush()
o, hc = self.lstm(x, hc)
#o_unpacked, o_unpacked_length = nn.utils.rnn.pad_packed_sequence(o, batch_first = True)
#x_unpacked, x_unpacked_length = nn.utils.rnn.pad_packed_sequence(x, batch_first = True)
#x_l = torch.chunk(x_unpacked, 2, dim = 2)
x_l = torch.chunk(x, 2, dim = 2)
x_o = x_l[1] # Left Leg data
#o = F.relu(self.fc1(o_unpacked))
#o = F.relu(self.fc1(o))
o = self.fc1(o)
#o = self.dp(o)
#o = self.fc2(o)
o = x_o + o
#print(o.size())
#sys.stdout.flush()
#o = nn.utils.rnn.pack_padded_sequence(o, o_unpacked_length, batch_first=True)
return o, hc
def init_hidden(self,x):
#batch_size = x.batch_sizes
#batch_size = batch_size[0]
batch_size = x.size()[0]
h_0 = torch.zeros(1, batch_size, self.hidden_dim)
c_0 = torch.zeros(1, batch_size, self.hidden_dim)
return (h_0, c_0)
|
python
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: Donny You([email protected])
# Loss Manager for Object Detection.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from loss.modules.det_modules import SSDFocalLoss, SSDMultiBoxLoss
from loss.modules.det_modules import YOLOv3Loss
from loss.modules.det_modules import FRLoss
from utils.tools.logger import Logger as Log
DET_LOSS_DICT = {
'ssd_focal_loss': SSDFocalLoss,
'ssd_multibox_loss': SSDMultiBoxLoss,
'yolov3_loss': YOLOv3Loss,
'fr_loss': FRLoss
}
class DetLossManager(object):
def __init__(self, configer):
self.configer = configer
def get_det_loss(self, key):
if key not in DET_LOSS_DICT:
Log.error('Loss: {} not valid!'.format(key))
exit(1)
loss = DET_LOSS_DICT[key](self.configer)
return loss
|
python
|
a = []
# append element at the end.
a.append(2)
a.append(3)
print(a)
# insert at a specific location.
a.insert(0, 5)
a.insert(10, 5)
print(a)
# when specified a position not in list, it inserts at the end.
a.insert(100, 6)
print(a)
# Deleting elements from a list.
a.remove(5) # removes the first occurence of value passed
print(a, len(a))
del a[0]
print(a, len(a))
# access the last element
print(a[-1])
# Printing a list
print(len(a))
for item in range(len(a)): # the len is not inclusive
print("(", item, ", ", a[item], ")")
print("-" * 30)
for item in range(0, len(a), 1): # the len is not inclusive
print("(", item, ", ", a[item], ")")
print("-" * 30)
# Reverse printing a list
for item in range(len(a) - 1, -1, -1): # the len is not inclusive
print("(", item, ", ", a[item], ")")
print("-" * 30)
# Jump a certain number of times.
for item in range(0, len(a), 2): # the len is not inclusive
print("(", item, ", ", a[item], ")")
print("-" * 30)
|
python
|
from .Ticket import Ticket, StateTicket
################################################################################
################################################################################
################################################################################
################################################################################
class Single(Ticket):
def getStateTicket(self, diamondState):
stateTicket = None
if diamondState == "firstBase_secondBase_thirdBase":
stateTicket = BasesLoadedSingle()
elif diamondState == "secondBase_thirdBase":
stateTicket = SecondThirdSingle()
elif diamondState == "firstBase_thirdBase":
stateTicket = FirstThirdSingle()
elif diamondState == "firstBase_secondBase":
stateTicket = FirstSecondSingle()
elif diamondState == "thirdBase":
stateTicket = ThirdSingle()
elif diamondState == "secondBase":
stateTicket = SecondSingle()
elif diamondState == "firstBase":
stateTicket = FirstSingle()
else: #Bases Empty
stateTicket = EmptySingle()
return stateTicket
################################################################################
################################################################################
class BasesLoadedSingle(StateTicket):
def recordOuts(self, umpire):
pass
def recordEvents(self, pitcherId, batterId, diamond, umpire, scoreKeeper):
scoreKeeper.recordBatterH(batterId)
scoreKeeper.recordPitcherH(pitcherId)
# Runners on second and third score
for base in ("thirdBase", "secondBase"):
runnerId, onHook = diamond.popBase(base)
scoreKeeper.recordTeamRun()
scoreKeeper.recordBatterRun(runnerId)
scoreKeeper.recordBatterRbi(batterId)
scoreKeeper.recordPitcherRun(onHook)
if scoreKeeper.exOuts() < 3:
scoreKeeper.recordPitcherER(onHook)
def moveBases(self, diamond):
diamond.moveBase("firstBase", "secondBase")
def reachedBase(self, pitcherId, batterId, diamond):
diamond.reachedBase("firstBase", batterId, pitcherId)
################################################################################
################################################################################
class SecondThirdSingle(StateTicket):
def recordOuts(self, umpire):
pass
def recordEvents(self, pitcherId, batterId, diamond, umpire, scoreKeeper):
scoreKeeper.recordBatterH(batterId)
scoreKeeper.recordPitcherH(pitcherId)
# Runners on second and third score
for base in ("thirdBase", "secondBase"):
runnerId, onHook = diamond.popBase(base)
scoreKeeper.recordTeamRun()
scoreKeeper.recordBatterRun(runnerId)
scoreKeeper.recordBatterRbi(batterId)
scoreKeeper.recordPitcherRun(onHook)
if scoreKeeper.exOuts() < 3:
scoreKeeper.recordPitcherER(onHook)
def moveBases(self, diamond):
pass
def reachedBase(self, pitcherId, batterId, diamond):
diamond.reachedBase("firstBase", batterId, pitcherId)
################################################################################
################################################################################
class FirstThirdSingle(StateTicket):
def recordOuts(self, umpire):
pass
def recordEvents(self, pitcherId, batterId, diamond, umpire, scoreKeeper):
scoreKeeper.recordBatterH(batterId)
scoreKeeper.recordPitcherH(pitcherId)
# Runners on second and third score
runnerId, onHook = diamond.popBase("thirdBase")
scoreKeeper.recordTeamRun()
scoreKeeper.recordBatterRun(runnerId)
scoreKeeper.recordBatterRbi(batterId)
scoreKeeper.recordPitcherRun(onHook)
if scoreKeeper.exOuts() < 3:
scoreKeeper.recordPitcherER(onHook)
def moveBases(self, diamond):
diamond.moveBase("firstBase", "secondBase")
def reachedBase(self, pitcherId, batterId, diamond):
diamond.reachedBase("firstBase", batterId, pitcherId)
################################################################################
################################################################################
class FirstSecondSingle(StateTicket):
def recordOuts(self, umpire):
pass
def recordEvents(self, pitcherId, batterId, diamond, umpire, scoreKeeper):
scoreKeeper.recordBatterH(batterId)
scoreKeeper.recordPitcherH(pitcherId)
# Runners on second and third score
runnerId, onHook = diamond.popBase("secondBase")
scoreKeeper.recordTeamRun()
scoreKeeper.recordBatterRun(runnerId)
scoreKeeper.recordBatterRbi(batterId)
scoreKeeper.recordPitcherRun(onHook)
if scoreKeeper.exOuts() < 3:
scoreKeeper.recordPitcherER(onHook)
def moveBases(self, diamond):
pass
def reachedBase(self, pitcherId, batterId, diamond):
diamond.reachedBase("firstBase", batterId, pitcherId)
################################################################################
################################################################################
class ThirdSingle(StateTicket):
def recordOuts(self, umpire):
pass
def recordEvents(self, pitcherId, batterId, diamond, umpire, scoreKeeper):
scoreKeeper.recordBatterH(batterId)
scoreKeeper.recordPitcherH(pitcherId)
# Runners on second and third score
runnerId, onHook = diamond.popBase("thirdBase")
scoreKeeper.recordTeamRun()
scoreKeeper.recordBatterRun(runnerId)
scoreKeeper.recordBatterRbi(batterId)
scoreKeeper.recordPitcherRun(onHook)
if scoreKeeper.exOuts() < 3:
scoreKeeper.recordPitcherER(onHook)
def moveBases(self, diamond):
pass
def reachedBase(self, pitcherId, batterId, diamond):
diamond.reachedBase("firstBase", batterId, pitcherId)
################################################################################
################################################################################
class SecondSingle(StateTicket):
def recordOuts(self, umpire):
pass
def recordEvents(self, pitcherId, batterId, diamond, umpire, scoreKeeper):
scoreKeeper.recordBatterH(batterId)
scoreKeeper.recordPitcherH(pitcherId)
# Runners on second and third score
runnerId, onHook = diamond.popBase("secondBase")
scoreKeeper.recordTeamRun()
scoreKeeper.recordBatterRun(runnerId)
scoreKeeper.recordBatterRbi(batterId)
scoreKeeper.recordPitcherRun(onHook)
if scoreKeeper.exOuts() < 3:
scoreKeeper.recordPitcherER(onHook)
def moveBases(self, diamond):
pass
def reachedBase(self, pitcherId, batterId, diamond):
diamond.reachedBase("firstBase", batterId, pitcherId)
################################################################################
################################################################################
class FirstSingle(StateTicket):
def recordOuts(self, umpire):
pass
def recordEvents(self, pitcherId, batterId, diamond, umpire, scoreKeeper):
scoreKeeper.recordBatterH(batterId)
scoreKeeper.recordPitcherH(pitcherId)
def moveBases(self, diamond):
diamond.moveBase("firstBase", "secondBase")
def reachedBase(self, pitcherId, batterId, diamond):
diamond.reachedBase("firstBase", batterId, pitcherId)
################################################################################
################################################################################
class EmptySingle(StateTicket):
def recordOuts(self, umpire):
pass
def recordEvents(self, pitcherId, batterId, diamond, umpire, scoreKeeper):
scoreKeeper.recordBatterH(batterId)
scoreKeeper.recordPitcherH(pitcherId)
def moveBases(self, diamond):
pass
def reachedBase(self, pitcherId, batterId, diamond):
diamond.reachedBase("firstBase", batterId, pitcherId)
################################################################################
################################################################################
|
python
|
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
import matplotlib.pyplot as plt
from PIL import Image
import cv2
from copy import deepcopy
# image_path = '/home/kshitij/PycharmProjects/Computer_Vision/Assignment_3/Question_2/iceCream1.jpg'
# image_path = '/home/kshitij/PycharmProjects/Computer_Vision/Assignment_3/Question_2/iceCream2.jpg'
image_path = '/home/kshitij/PycharmProjects/Computer_Vision/Assignment_3/Question_2/iceCream3.jpg'
image = cv2.imread(image_path)
image = cv2.resize(image, (0, 0), fx=0.25, fy=0.25)
orig_img = deepcopy(image)
sh = image.shape
flat_image = image.reshape((image.shape[0] * image.shape[1], 3))
bandwidth2 = estimate_bandwidth(flat_image, quantile=.04, n_samples=1000)
ms = MeanShift(bandwidth2, bin_seeding=True)
ms.fit(flat_image)
labels = ms.labels_
for i in range(len(labels)):
label = labels[i]
flat_image[i] = ms.cluster_centers_[label]
print("DONE CLUSTERING")
res = flat_image.reshape(sh)
# cv2.imshow('orig', orig_img)
# cv2.imshow('res', res)
cv2.imwrite('clustered_iceCream3.jpg', res)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
|
python
|
import random
import networkx as nx
import matplotlib.pyplot as plt
class graph:
__dg = None
def __init__(self):
#self.__dg = nx.DiGraph()
self.__dg = nx.Graph()
def add_nodes(self, nodes):
for i in range(0, len(nodes)):
self.__dg.add_node(nodes[i])
def add_edges(self, edges):
for edge in edges:
for ele in edge['rel']:
self.__dg.add_edge(edge['word'], ele['to'])
def drawAndShow(self, size):
nx.draw(self.__dg, with_labels=True, node_size = size, node_color = self.randomcolor(size), edge_color = self.randomcolor(size))
plt.rcParams['font.sans-serif'] = ['simsun']
plt.show()
def drawAndShow1(self):
nx.draw(self.__dg, with_labels=True)
plt.rcParams['font.sans-serif'] = ['simsun']
plt.show()
def randomcolor(self, size):
rst = []
colorArr = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F']
for ele in size:
color = ""
for i in range(6):
color += colorArr[random.randint(0, 14)]
rst.append('#' + color)
return rst
|
python
|
# coding: utf-8
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-节点管理(BlueKing-BK-NODEMAN) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at https://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import json
import os
from collections import defaultdict
import yaml
from django.conf import settings
from django.core.management.base import BaseCommand
from apps.utils.generate_api_js import main
def esb_json2apigw_yaml(json_file_path: str):
with open(file=json_file_path, encoding="utf-8") as esb_json_file_stream:
esb_json = json.loads(esb_json_file_stream.read())
# 对相同api路径进行聚合
api_info_gby_path = defaultdict(list)
for api_info in esb_json:
api_info_gby_path[api_info["path"]].append(api_info)
apigw_json = {
"swagger": "2.0",
"basePath": "/",
"info": {"version": "0.1", "title": "API Gateway Resources"},
"schemes": ["http"],
"paths": {},
}
for api_path, api_infos in api_info_gby_path.items():
http_method_api_info_map = {}
for api_info in api_infos:
http_method_api_info_map[api_info["registed_http_method"].lower()] = {
"operationId": f"{api_info['resource_classification'].lower()}_{api_info['resource_name']}",
"description": api_info["description"],
"tags": [api_info["resource_classification"]],
"x-bk-apigateway-resource": {
"isPublic": True,
"allowApplyPermission": True,
"matchSubpath": False,
"backend": {
"type": "HTTP",
"method": api_info["registed_http_method"].lower(),
"path": api_info["path"],
"matchSubpath": False,
"timeout": api_info.get("timeout", 0),
"upstreams": {},
"transformHeaders": {},
},
"authConfig": {"userVerifiedRequired": False},
"disabledStages": [],
},
}
apigw_json["paths"][api_path] = http_method_api_info_map
with open(
os.path.join(settings.BASE_DIR, settings.APP_CODE, "support-files", "nodeman.apigw.yaml"),
encoding="utf-8",
mode="w",
) as f:
yaml.dump(apigw_json, f, encoding="utf-8", allow_unicode=True)
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument("-g", "--is_apigw", action="store_true", help="whether for api_gateway")
parser.add_argument("--is_apigw_yaml", action="store_true", help="convert esb json to apigw yaml")
parser.add_argument("-f", type=str, help="json file path, required when select --is-apigw-yaml")
def handle(self, **kwargs):
if kwargs["is_apigw_yaml"]:
esb_json2apigw_yaml(kwargs["f"])
else:
main(kwargs["is_apigw"])
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from alphalogic_api.protocol import rpc_pb2
from alphalogic_api.attributes import Visible, Access
from alphalogic_api.multistub import MultiStub
from alphalogic_api import utils
from alphalogic_api.logger import log
from alphalogic_api.utils import Exit
class AbstractParameter(object):
"""
AbstractParameter implements ParameterService service (see `rpc.proto <https://github.com/Alphaopen/alphalogic_api/
blob/master/alphalogic_api/protocol/proto/rpc.proto>`_)
"""
def _call(self, func_name, *args, **kwargs):
return self.multi_stub.parameter_call(func_name, id=self.id, *args, **kwargs)
def name(self):
"""
Return parameter name
:rtype: unicode
"""
answer = self._call('name')
return answer.name
def display_name(self):
"""
Return parameter display name
:rtype: unicode
"""
answer = self._call('display_name')
return answer.display_name
def desc(self):
"""
Return parameter description
:rtype: unicode
"""
answer = self._call('desc')
return answer.desc
def set_display_name(self, display_name):
"""
Set parameter display name
:arg display_name: unicode
"""
answer = self._call('set_display_name', display_name=display_name)
def set_desc(self, desc):
"""
Set parameter description
:arg desc: unicode
"""
answer = self._call('set_desc', desc=desc)
def is_string(self):
"""
Return True if parameter value type is string
:rtype: bool
"""
answer = self._call('is_string')
return answer.yes
def is_long(self):
"""
Return True if parameter value type is long
:rtype: bool
"""
answer = self._call('is_long')
return answer.yes
def is_double(self):
"""
Return True if parameter value type is double
:rtype: bool
"""
answer = self._call('is_double')
return answer.yes
def is_datetime(self):
"""
Return True if parameter value type is datetime
:rtype: bool
"""
answer = self._call('is_datetime')
return answer.yes
def is_bool(self):
"""
Return True if parameter value type is bool
:rtype: bool
"""
answer = self._call('is_bool')
return answer.yes
def is_map(self):
"""
Return True if parameter value type is map
:rtype: bool
"""
answer = self._call('is_map')
return answer.yes
def is_runtime(self):
"""
Return True if parameter type is Visible.runtime
:rtype: bool
"""
answer = self._call('is_runtime')
return answer.yes
def is_setup(self):
"""
Return True if parameter type is Visible.setup
:rtype: bool
"""
answer = self._call('is_setup')
return answer.yes
def is_hidden(self):
"""
Return True if parameter type is Visible.hidden
:rtype: bool
"""
answer = self._call('is_hidden')
return answer.yes
def is_common(self):
"""
Return True if parameter type is Visible.common
:rtype: bool
"""
answer = self._call('is_common')
return answer.yes
def set_runtime(self):
"""
Set parameter type to Visible.runtime
"""
answer = self._call('set_runtime')
def set_setup(self):
"""
Set parameter type to Visible.setup
"""
answer = self._call('set_setup')
def set_hidden(self):
"""
Set parameter type to Visible.hidden
"""
answer = self._call('set_hidden')
def set_common(self):
"""
Set parameter type to Visible.common
"""
answer = self._call('set_common')
def is_read_only(self):
"""
Return True if parameter access type is Access.read_only
:rtype: bool
"""
answer = self._call('is_read_only')
return answer.yes
def is_read_write(self):
"""
Return True if parameter access type is Access.read_write
:rtype: bool
"""
answer = self._call('is_read_write')
return answer.yes
def set_read_only(self):
"""
Set parameter access type to Access.read_only
"""
answer = self._call('set_read_only')
def set_read_write(self):
"""
Set parameter access type to Access.read_write
"""
answer = self._call('set_read_write')
def is_licensed(self):
"""
Return True if parameter is the license key parameter
:rtype: bool
"""
answer = self._call('is_licensed')
return answer.yes
def set_licensed(self):
"""
Set the license key parameter
"""
answer = self._call('set_licensed')
def clear(self):
"""
Remove all predefined values from the 'choices' argument of the parameter
"""
answer = self._call('clear')
def get(self):
"""
Get parameter value
:rtype: long, float, datetime, bool or unicode
"""
answer = self._call('get')
return utils.value_from_rpc(answer.value)
def set(self, value):
"""
Set parameter value
:arg value: The value type: long, float, datetime, bool or unicode
"""
value_rpc = utils.get_rpc_value(self.value_type, value)
self._call('set', value=value_rpc)
def enums(self):
"""
Get the predefined enumeration of values from the 'choices' argument of the parameter
:rtype: List of values of long, float, datetime, bool or unicode type in a tuple as (value1, value2, value3 ….)
"""
answer = self._call('enums')
return [utils.value_from_rpc(key.value) for key in answer.enums]
def set_enum(self, value, enum_name):
"""
Add/replace enumeration member – a pair (value, name) – for the 'choices' argument of the parameter
:param value: The value type: long, float, datetime, bool or unicode
:param enum_name: enumeration member name
"""
value_rpc = rpc_pb2.Value()
utils.build_rpc_value(value_rpc, type(value), value)
answer = self._call('set_enum', enum_name=enum_name, value=value_rpc)
def set_enums(self, values):
"""
Add/replace multiple enumeration members for the 'choices' argument of the parameter
:param values: An array of values can be one of the following:
* List of values of long, float, datetime, bool or unicode type in a tuple as (value1, value2, value3 ….)
* List of enumeration members in a tuple of tuples as ((value1, 'enum_name1'), (value2, 'enum_name2'), ...)
"""
value_type = self.value_type
req = rpc_pb2.ParameterRequest(id=self.id)
for val in values:
e = req.enums.add()
if isinstance(val, tuple):
e.name = unicode(val[1])
utils.build_rpc_value(e.value, type(val[0]), val[0])
else:
e.name = unicode(val)
utils.build_rpc_value(e.value, type(val), val)
self.multi_stub.call_helper('set_enums', fun_set=MultiStub.parameter_fun_set, request=req,
stub=self.multi_stub.stub_parameter)
def has_enum(self, enum_name):
"""
Return True if parameter has a predefined enumeration of values
:rtype: bool
"""
answer = self._call('has_enum', enum_name=enum_name)
return answer.yes
def owner(self):
"""
Return ID of the parameter's owner
:rtype: uint64
"""
answer = self._call('owner')
return answer.owner
class Parameter(AbstractParameter):
"""
Class Parameter inherits all data elements and methods from :class:`~alphalogic_api.objects.parameter.AbstractParameter`.
"""
index_number = 0
def __init__(self, *args, **kwargs):
self.index_number = Parameter.index_number
Parameter.index_number += 1
for arg in kwargs:
self.__dict__[arg] = kwargs[arg]
self.visible = kwargs.get('visible', Visible.runtime)
self.access = kwargs.get('access', Access.read_write)
self.callback = kwargs.get('callback', None)
if 'value_type' not in kwargs:
raise Exception('value_type not found in Parameter')
if kwargs['value_type'] not in [bool, int, long, float, datetime.datetime, unicode, list, dict]:
raise Exception('value_type={0} is unknown'.format(kwargs['value_type']))
self.default = kwargs.get('default')
self.choices = kwargs.get('choices', None)
def set_multi_stub(self, multi_stub):
self.multi_stub = multi_stub
def __getattr__(self, item):
if item == 'val':
return self.get()
if item in self.__dict__:
return self.__dict__[item]
def __setattr__(self, attr, value):
if attr == 'val' and self.parameter_name.lower() == 'name': # exclude change 'name' value
log.error('Attempt to change name of device')
raise Exit
if attr == 'val':
if value is not None:
self.set(value)
elif attr in ['value_type', 'visible', 'access', 'default', 'choices', 'multi_stub', 'id',
'parameter_name', 'callback', 'index_number']:
self.__dict__[attr] = value
return self
def set_choices(self):
if isinstance(self.choices, tuple):
self.clear()
self.set_enums(self.choices)
def get_copy(self):
return Parameter(value_type=self.value_type, default=self.default, visible=self.visible,
access=self.access, callback=self.callback, choices=self.choices)
class ParameterBool(Parameter):
def __new__(cls, *args, **kwargs):
return Parameter(*args, value_type=bool, **kwargs)
class ParameterLong(Parameter):
def __new__(cls, *args, **kwargs):
return Parameter(*args, value_type=int, **kwargs)
class ParameterDouble(Parameter):
def __new__(cls, *args, **kwargs):
return Parameter(*args, value_type=float, **kwargs)
class ParameterDatetime(Parameter):
def __new__(cls, *args, **kwargs):
return Parameter(*args, value_type=datetime.datetime, **kwargs)
class ParameterString(Parameter):
def __new__(cls, *args, **kwargs):
return Parameter(*args, value_type=unicode, **kwargs)
class ParameterList(Parameter):
def __new__(cls, *args, **kwargs):
return Parameter(*args, value_type=list, **kwargs)
class ParameterDict(Parameter):
def __new__(cls, *args, **kwargs):
return Parameter(*args, value_type=dict, **kwargs)
|
python
|
from unittest.mock import ANY, patch
from arcsecond import ArcsecondAPI
from click.testing import CliRunner
from oort.cli.cli import upload
from oort.server.errors import InvalidOrgMembershipOortCloudError, UnknownOrganisationOortCloudError
from oort.shared.models import Organisation
from tests.utils import (
TEL_DETAILS,
TEL_UUID,
TEST_LOGIN_ORG_SUBDOMAIN,
TEST_LOGIN_USERNAME,
save_arcsecond_test_credentials,
use_test_database
)
@use_test_database
def test_cli_upload_missing_folders():
save_arcsecond_test_credentials()
runner = CliRunner()
result = runner.invoke(upload)
assert result.exit_code != 0 and result.exception
assert 'Missing argument \'FOLDER\'.' in result.output
@use_test_database
def test_cli_upload_unknown_organisation():
save_arcsecond_test_credentials()
runner = CliRunner()
error = {'detail': 'unknown organisation'}
with patch.object(ArcsecondAPI, 'read', return_value=(None, error)) as mock_method_read:
result = runner.invoke(upload, ['.', '-o', 'dummy_org'])
assert result.exit_code != 0
assert isinstance(result.exception, UnknownOrganisationOortCloudError)
mock_method_read.assert_called_once_with('dummy_org')
@use_test_database
def test_cli_upload_unknown_membership():
save_arcsecond_test_credentials(subdomain='saao')
Organisation.create(subdomain='saao')
Organisation.create(subdomain=TEST_LOGIN_ORG_SUBDOMAIN)
# Make the test
runner = CliRunner()
result = runner.invoke(upload, ['.', '-o', TEST_LOGIN_ORG_SUBDOMAIN])
assert result.exit_code != 0
assert isinstance(result.exception, InvalidOrgMembershipOortCloudError)
@use_test_database
def test_cli_upload_missing_org_telescope():
save_arcsecond_test_credentials()
# Create the watch command org to pass the org check.
Organisation.create(subdomain=TEST_LOGIN_ORG_SUBDOMAIN)
# Make the test
runner = CliRunner()
with patch.object(ArcsecondAPI, 'list', return_value=([], None)) as mock_method_read:
result = runner.invoke(upload, ['.', '-o', TEST_LOGIN_ORG_SUBDOMAIN])
assert result.exit_code == 0
assert f"Here is a list of existing telescopes for organisation {TEST_LOGIN_ORG_SUBDOMAIN}:" in result.output
mock_method_read.assert_called_once()
@use_test_database
def test_cli_upload_with_org_telescope_answer_nope():
# Prepare
save_arcsecond_test_credentials()
Organisation.create(subdomain=TEST_LOGIN_ORG_SUBDOMAIN)
runner = CliRunner()
# Run
with patch.object(ArcsecondAPI, 'read', return_value=(TEL_DETAILS, None)) as mock_method_read, \
patch('oort.uploader.engine.walker.walk') as mock_method_walk, \
patch('builtins.input', return_value='Nope'):
result = runner.invoke(upload, ['.', '-o', TEST_LOGIN_ORG_SUBDOMAIN, '-t', TEL_UUID])
# Assert
assert result.exit_code == 0
assert f"arcsecond username: @{TEST_LOGIN_USERNAME}" in result.output.lower()
assert f"uploading to organisation account '{TEST_LOGIN_ORG_SUBDOMAIN}'" in result.output.lower()
mock_method_walk.assert_not_called()
mock_method_read.assert_called_once()
@use_test_database
def test_cli_upload_with_org_telescope_answer_yep():
# Prepare
save_arcsecond_test_credentials()
Organisation.create(subdomain=TEST_LOGIN_ORG_SUBDOMAIN)
runner = CliRunner()
with patch.object(ArcsecondAPI, 'read', return_value=(TEL_DETAILS, None)) as mock_method_read, \
patch('oort.uploader.engine.walker.walk') as mock_method_walk, \
patch('builtins.input', return_value='\n'):
# Run
result = runner.invoke(upload, ['.', '-o', TEST_LOGIN_ORG_SUBDOMAIN, '-t', TEL_UUID])
# Assert
assert result.exit_code == 0
assert f"arcsecond username: @{TEST_LOGIN_USERNAME}" in result.output.lower()
assert f"uploading to organisation account '{TEST_LOGIN_ORG_SUBDOMAIN}'" in result.output.lower()
mock_method_read.assert_called_once()
mock_method_walk.assert_called_once_with('.', ANY, False, debug=False)
|
python
|
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('../../MNIST_data', one_hot=True)
import tensorflow as tf
x = tf.placeholder(tf.float32,[None,784])
|
python
|
from aiogram import types
from asyncio import sleep
from typing import Union
import NekoGram
async def default_start_function(message: Union[types.Message, types.CallbackQuery]):
neko: NekoGram.Neko = message.conf['neko']
if not await neko.storage.check_user_exists(user_id=message.from_user.id):
lang = message.from_user.language_code if message.from_user.language_code in neko.texts.keys() \
else neko.storage.default_language
await neko.storage.create_user(user_id=message.from_user.id, language=lang)
await sleep(0.1) # Sleep a bit to make sure user is added to the database
else:
# Completely erase user data
await neko.storage.set_user_data(user_id=message.from_user.id)
data = await neko.build_text(text='start', user=message.from_user)
if isinstance(message, types.Message):
await message.reply(text=data.data.text, parse_mode=data.data.parse_mode,
disable_web_page_preview=data.data.no_preview, reply=False,
disable_notification=data.data.silent, reply_markup=data.data.markup)
await message.delete()
else:
await message.message.edit_text(text=data.data.text, disable_web_page_preview=data.data.no_preview,
reply_markup=data.data.markup, parse_mode=data.data.parse_mode)
|
python
|
import os
import pandas as pd
import nltk
import re
import spacy
from sklearn.feature_extraction.text import CountVectorizer
from data_module.corpus import data_operations as do
from data_module.corpus.clean import remove_single_quotes
def get_top_n_words(corpus, n=None):
"""
List the top n words in a vocabulary according
to occurrence in a text corpus.
get_top_n_words(["I love Python", "Python is a language programming",
"Hello world", "I love the world"]) ->
[('python', 2),
('world', 2),
('love', 2),
('hello', 1),
('is', 1),
('programming', 1),
('the', 1),
('language', 1)]
"""
vec = CountVectorizer().fit(corpus)
bag_of_words = vec.transform(corpus)
sum_words = bag_of_words.sum(axis=0)
words_freq = [
(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()]
words_freq = sorted(words_freq, key = lambda x: x[1], reverse=True)
return words_freq[:n]
########################### EXECUTION STARTS BELLOW ############################
nlp = spacy.load('en_core_web_sm')
# nlp = spacy.load('en')
DATA_FOLDER = os.path.join(
os.path.dirname(__file__), 'data/microservices/')
RAW_ANSWERS_FILE = DATA_FOLDER + 'raw/answers.csv'
# change this data source to change the corpus
RAW_QUESTIONS_FILE = DATA_FOLDER + 'nontech_discussions.csv'
RAW_UNION_FILE = DATA_FOLDER + 'raw/relevance_union.csv'
CLEAN_UNION_FILE = DATA_FOLDER + 'clean/relevance_union.csv'
rawdata_answers = pd.read_csv(RAW_ANSWERS_FILE)
rawdata_questions = pd.read_csv(RAW_QUESTIONS_FILE)
rawdata_union = pd.read_csv(RAW_UNION_FILE)
open_tags = [
r'
', r'
', r'<br>', r'<em>', r'</em>', r'<p>',
r'</p>', r'<ul>', r'</ul>', r'<li>', r'</li>',
r'<strong>', r'</strong>', r'<img src=[^>]*>',
r'<blockquote>', r'</blockquote>', r'<ol>', r'</ol>', r'<hrs>'
r'<sub>', r'</sub>', r'<h3>', r'</h3>', r'<h1>', r'</h1>', r'<h2>',
r'</h2>', r'<h4>', r'</h4>', r'<h5>', r'</h5>', r'<div[^>]*>', r'</div>',
r'<pre>', r'</pre>', r'<code>', r'</code>', r'<a href=[^>]*>',r'(</a>)',
r'<br>', r'<br/>'
]
closed_tags = [
(r'<a href=[^>]*>',r'(</a>)'),
(r'<div[^>]*>',r'(</div>)'),
(r'<code>', r'</code>'),
(r'<blockquote>',r'</blockquote>')
]
stop_words = set(open('stopword_list.txt', 'r').read().split("\n"))
dscs = rawdata_questions
punctuation_rgx = r"[^()[\]<>+\-_=\*|\^{}$&%#@!?.,:;/\"]+"
for idx, question in dscs.iterrows():
file_name = 'instance_' + str(question["Id"]) + ".txt"
file_path = DATA_FOLDER + 'clean/nontech/' + file_name
with open(file_path, '+w') as fh:
# Cleaning questions body fom HTML
for closed_tag in closed_tags:
question["Body"] = do.remove_block_tag(closed_tag, question["Body"])
for open_tag in open_tags:
question["Body"] = do.remove_single_tag(open_tag, question["Body"])
# Cleaning question title
stage_one = re.findall(punctuation_rgx, question['Title'].lower())
stage_one = [word for line in stage_one for word in line.split()]
stage_one = list(map(remove_single_quotes, stage_one))
stage_two = re.findall(r"[^\d]+", " ".join(stage_one))
stage_two = [word for line in stage_two for word in line.split()]
words_to_remove = stop_words.intersection(set(stage_two))
stage_three = [
word for word in stage_two if word not in words_to_remove]
leemed_title = nlp(" ".join(stage_three))
leemed_title = " ".join(
[word.lemma_ for word in leemed_title
if word.lemma_ != "-PRON-" and word.lemma_ != "'s"])
# Cleaning question body
stage_one = re.findall(punctuation_rgx, question['Body'].lower())
stage_one = [word for line in stage_one for word in line.split()]
stage_one = list(map(remove_single_quotes, stage_one))
stage_two = re.findall(r"[^\d]+", " ".join(stage_one))
stage_two = [word for line in stage_two for word in line.split()]
words_to_remove = stop_words.intersection(set(stage_two))
stage_three = [
word for word in stage_two if word not in words_to_remove]
leemed_body = nlp(" ".join(stage_three))
leemed_body = " ".join(
[word.lemma_ for word in leemed_body
if word.lemma_ != "-PRON-" and word.lemma_ != "'s"])
fh.write(leemed_title)
fh.write('\n\n')
fh.write(leemed_body)
# Cleaning answers
answers = rawdata_answers.loc[
rawdata_answers.ParentId == question["Id"]]
for idx, answer in answers.iterrows():
for closed_tag in closed_tags:
answer["Body"] = do.remove_block_tag(closed_tag, answer["Body"])
for open_tag in open_tags:
answer["Body"] = do.remove_single_tag(open_tag, answer["Body"])
# Cleaning answer body
stage_one = re.findall(punctuation_rgx, answer['Body'].lower())
stage_one = [word for line in stage_one for word in line.split()]
stage_one = list(map(remove_single_quotes, stage_one))
stage_two = re.findall(r"[^\d]+", " ".join(stage_one))
stage_two = [word for line in stage_two for word in line.split()]
words_to_remove = stop_words.intersection(set(stage_two))
stage_three = [
word for word in stage_two if word not in words_to_remove]
leemed_answer = nlp(" ".join(stage_three))
leemed_answer = " ".join(
[word.lemma_ for word in leemed_answer
if word.lemma_ != "-PRON-" and word.lemma_ != "'s"])
fh.write('\n\n')
fh.write(leemed_answer)
print("Discussion %d printed" % question['Id'])
|
python
|
import unittest
from config import TEST_DB_PATH
from repositories.item_repository import ItemRepository
from utilities.csv_utilities import clear_csv, read_csv
class TestItemRepository(unittest.TestCase):
def setUp(self):
clear_csv(TEST_DB_PATH)
self.item_repo = ItemRepository(TEST_DB_PATH)
self.book = ['Patrick Ness', 'The Knife of Never Letting Go', '2008', '0001']
self.blog = [
'Eero Tarmo', 'Soundi.fi',
'Androgyyniä laulua ja irtonaista kävelyä – tältä kuulostaa Arto Tuunelan kevät',
'https://www.soundi.fi/jutut/pariisin-kevat-nokkamies-kasasi-kevat-aiheisen-soittolistan/',
'13.3.2016', '0002'
]
self.video = [
'Christian Duenas', 'Pygame Menu System Tutorial Part 2: Building the Menu and States',
'https://youtu.be/bmRFi7-gy5Y', '24.7.2020', '0003'
]
self.item = ["Pablo Picasso", "Ls Demoiselles d'Avignon", "1907"]
def test_initialises_repo(self):
self.assertTrue(isinstance(self.item_repo._items, dict))
def test_create_book(self):
book = self.item_repo.create('book', self.book)
self.assertTrue(book)
def test_create_blog(self):
blog = self.item_repo.create('blog', self.blog)
self.assertTrue(blog)
def test_create_video(self):
video = self.item_repo.create('video', self.video)
self.assertTrue(video)
def test_create_nonexisting_type(self):
item = self.item_repo.create('painting', self.item)
self.assertFalse(item)
def test_create_duplicate_item(self):
self.item_repo.create('book', self.book)
new_item = self.item_repo.create('book', self.book)
self.assertFalse(new_item)
def test_list_items_empty(self):
items = self.item_repo.list_items()
self.assertEqual(len(items), 0)
def test_list_items_not_empty(self):
self.item_repo.create('book', self.book)
items = self.item_repo.list_items()
self.assertEqual(len(items), 1)
def test_duplicate_not_added_to_items(self):
self.item_repo.create('book', self.book)
self.item_repo.create('book', self.book)
items = self.item_repo.list_items()
self.assertEqual(len(items), 1)
def test_delete_item(self):
self.item_repo.create('book', self.book)
self.item_repo.create('blog', self.blog)
self.item_repo.create('video', self.video)
self.item_repo.delete_item('0001')
items = self.item_repo.list_items()
for item in items:
self.assertNotEqual(item[1], '0001')
def test_save_file_not_empty(self):
self.item_repo.create('book', self.book)
self.item_repo.create('blog', self.blog)
self.item_repo.create('video', self.video)
self.item_repo.save()
data = read_csv(TEST_DB_PATH)
self.assertEqual(len(data), 3)
def test_delete_all(self):
self.item_repo.create('book', self.book)
self.item_repo.create('blog', self.blog)
self.item_repo.create('video', self.video)
self.item_repo.delete_all_items()
items = self.item_repo.list_items()
self.assertFalse(items)
def test_delete_all_clear_csv(self):
self.item_repo.create('book', self.book)
self.item_repo.create('blog', self.blog)
self.item_repo.create('video', self.video)
self.item_repo.delete_all_items()
self.item_repo.save()
data = read_csv(TEST_DB_PATH)
self.assertFalse(len(data), 0)
def test_find_existing_item(self):
self.item_repo.create('book', self.book)
item = self.item_repo.find_by_id('0001')
self.assertEqual(item['id'], '0001')
def test_find_nonexisting_item_empty_repo(self):
self.assertIsNone(self.item_repo.find_by_id('0004'))
def test_find_nonexisting_item_nonempty_repo(self):
self.item_repo.create('book', self.book)
self.assertIsNone(self.item_repo.find_by_id('0004'))
|
python
|
#!/usr/bin/env python3
import sys
import subprocess
import requests
from datetime import datetime
def main(args):
if len(args) < 2:
print('usage: sync-cloudflare.py [output path]')
return
output = args[1]
now = datetime.utcnow().isoformat()
ips = []
resp = requests.get('https://www.cloudflare.com/ips-v4')
resp.raise_for_status()
ips.extend([ip for ip in resp.text.strip().split('\n')])
resp = requests.get('https://www.cloudflare.com/ips-v6')
resp.raise_for_status()
ips.extend([ip for ip in resp.text.strip().split('\n')])
new_ips = '\n'.join(ips)
try:
with open('/tmp/cloudflare_origin_pulls.cache', 'r') as f:
cached_ips = f.read()
except FileNotFoundError:
cached_ips = ''
if new_ips == cached_ips:
return
lines = []
lines.append('#')
lines.append(f'# Cloudflare Origin Pulls ({now})')
lines.append('#')
lines.append('')
for ip in ips:
lines.append(f'set_real_ip_from {ip};')
lines.append('')
lines.append('real_ip_header CF-Connecting-IP;')
lines.append('')
content = '\n'.join(lines)
with open(output, 'w') as f:
f.write(content)
print(content)
subprocess.run(['/usr/sbin/nginx', '-t'], check=True)
subprocess.run(['/usr/bin/systemctl', 'reload', 'nginx'], check=True)
with open('/tmp/cloudflare_origin_pulls.cache', 'w') as f:
f.write(new_ips)
if __name__ == '__main__':
main(sys.argv)
|
python
|
import os
import sys
from urllib2 import urlopen
import json
import ConfigParser
config = ConfigParser.ConfigParser()
config.readfp(open(r'config.txt'))
apikey = config.get('Sonarr Config', 'apikey')
host = config.get('Sonarr Config', 'host')
port = config.get('Sonarr Config', 'port')
url = 'http://'+host+':'+port+'/api/series?apikey='+apikey
response = urlopen(url)
shows = json.loads(response.read())
shownames = []
for show in shows:
# now song is a dictionary
shownames.append(show['title'])
found = shownames
for f in found:
print f
|
python
|
#!/usr/bin/env python
from kivy.app import App
from kivy.animation import Animation
from kivy.uix.floatlayout import FloatLayout
from kivy.graphics import Line
from kivy.gesture import Gesture, GestureDatabase
from kivy.vector import Vector
from kivy.properties import NumericProperty,BooleanProperty
from museolib.my_gestures import squares
from museolib.widgets.validation import Valid
def simplegesture(name, point_list):
"""
A simple helper function
"""
g = Gesture()
g.add_stroke(point_list)
g.normalize()
g.name = name
return g
class GestureBoard(FloatLayout):
"""
Our application main widget, derived from touchtracer example, use data
constructed from touches to match symboles loaded from my_gestures.
"""
edge_size = NumericProperty(0)
exists=BooleanProperty(False)
def __init__(self, *args, **kwargs):
super(GestureBoard, self).__init__()
self.gdb = GestureDatabase()
# add pre-recorded gestures to database
for square in squares:
self.gdb.add_gesture(square)
def on_touch_down(self, touch):
super(GestureBoard,self).on_touch_down(touch)
if self.collide_point(*touch.pos):
if App.get_running_app().config.getboolean('museotouch','validation') == True:
# start collecting points in touch.ud
# create a line to display the points
userdata = touch.ud
userdata['line'] = Line(points=(touch.x, touch.y))
return True
def on_touch_move(self, touch):
if self.collide_point(*touch.pos):
super(GestureBoard,self).on_touch_move(touch)
# store points of the touch movement
try:
touch.ud['line'].points += [touch.x, touch.y]
return True
except (KeyError) as e:
pass
def on_touch_up(self, touch):
super(GestureBoard,self).on_touch_up(touch)
# touch is over, display informations, and check if it matches some
# known gesture.
try :
g = simplegesture(
'',
list(zip(touch.ud['line'].points[::2], touch.ud['line'].points[1::2]))
)
self.edge_size = (self.stroke_length(list(zip(touch.ud['line'].points[::2], touch.ud['line'].points[1::2]))))/4
if self.edge_size < 150:
self.edge_size=150
# gestures to my_gestures.py
except :
return
# use database to find the more alike gesture, if any
g2 = self.gdb.find(g, minscore=0.9)
if g2:
for index,square in enumerate(squares) :
if (g2[1] == square):
if index in [0,1]:
square_pos=[touch.x,touch.y-self.edge_size]
elif index in [2,3]:
square_pos=[touch.x-self.edge_size,touch.y-self.edge_size]
elif index in [4,5]:
square_pos=[touch.x-self.edge_size,touch.y]
elif index in [6,7]:
square_pos=[touch.x,touch.y]
valid = Valid(pos=(0,0),size=[self.edge_size,self.edge_size],rotation=180,scale_min=0.5)
self.add_widget(valid)
Animation(pos=square_pos,d=.3,rotation=0,transition='out_sine').start(valid)
self.exists=True
break
def stroke_length(self,l):
distance = 0
for index, point in enumerate(l) :
if index < len(l)-1:
distance += Vector(point).distance(l[index+1])
return distance
|
python
|
import numpy as np
from sympy import simplify, integrate, zeros, S, Matrix, symbols, pi, cos, sin
from .funcs_aproximacion import producto_asecas
def producto_escalar_trigono(f, g, var=symbols('x'), a=-pi, b=pi, I=None, numeric=False):
"""Aplica el producto escalar <f,g> = 1/(2pi) ∫_[-pi]^[pi] f.g
Args:
f (funcion): f
g (funcion): g
var (variable): variable de integración
a (int, optional): limite inferior de integracion. Defaults to 0.
b (int, optional): limite superior de integracion. Defaults to 1.
I (list, optional): Si no es None, lista de valores sobre los que hacer un sumatorio discreto. Defaults to None.
numeric (bool, optional): si True, realiza una aproximación numérica de la integral usando un método de sympy.
Returns:
funcion, float: Valor del producto escalar. Se devuelve como funcion si tiene variables.
"""
prod = producto_asecas(f, g, var, a, b, I, numeric)
return simplify(prod / (2 * pi))
def coefs_fourier(f, var=symbols('x'), I=[0, 1], n_coefs=2):
"""Genera los coeficientes de la serie de fourier. Esta es la versión continua, donde los coeficientes se calculan usando la expresión de la integral.
Args:
f (funcion): Función a aproximar
var (variable, optional): Variable de la función. Defaults to symbols('x').
I (list, optional): Intervalo de aproximación de la función. Defaults to [0, 1].
n_coefs (int, optional): Número de coeficientes de la serie a generar. Defaults to 2.
Returns:
dict_coefs: {a_0, a_1, b_1, a_2, b_2, ...}
"""
dict_coefs = {}
dict_coefs['a0'] = simplify(1 / pi * integrate(f, (var, I[0], I[1])))
for i in range(1, n_coefs):
dict_coefs[f'a{i}'] = simplify(1 / pi * integrate(f * cos(i * var), (var, I[0], I[1])))
dict_coefs[f'b{i}'] = simplify(1 / pi * integrate(f * sin(i * var), (var, I[0], I[1])))
return dict_coefs
def coefs_fourier_discr(f, var=symbols('x'), I=[0, 1], n_coefs=2, m=10):
"""Genera los coeficientes de la serie de fourier. Esta es la versión donde la integral se aproxima como un sumatorio discreto de m términos sobre I.
Args:
f (funcion): Función a aproximar
var (variable, optional): Variable de la función. Defaults to symbols('x').
I (list, optional): Intervalo de aproximación de la función. Defaults to [0, 1].
n_coefs (int, optional): Número de coeficientes de la serie a generar. Defaults to 2.
m (int, optional): Número de elementos en los que dividir I para el sumatorio.
Returns:
dict_coefs: {a_0, a_1, b_1, a_2, b_2, ...}
"""
dict_coefs = {}
lista_xk = np.linspace(I[0], I[1], 2 * m)
dict_coefs['a0'] = np.sum([f.subs(var, xk) * cos(0 * xk) for xk in lista_xk]) / m
for i in range(1, n_coefs):
dict_coefs[f'a{i}'] = np.sum([f.evalf(subs={var: S(xk)}) * cos(S(i) * xk) for xk in lista_xk]) / m
dict_coefs[f'b{i}'] = np.sum([f.evalf(subs={var: S(xk)}) * sin(S(i) * xk) for xk in lista_xk]) / m
return dict_coefs
def serie_fourier(f, var=symbols('x'), I=[0, 1], n_coefs=3, discreto=False, m=10):
"""Genera la serie de Fourier para la función f sobre un intervalo.
Args:
f (funcion): Función a aproximar
var (variable, optional): Variable de la función. Defaults to symbols('x').
I (list, optional): Intervalo de aproximación de la función. Defaults to [0, 1].
n_coefs (int, optional): Número de coeficientes de la serie a generar. Defaults to 2.
discreto (bool, optional): Si True, genera una aproximación discreta de los coeficientes empleando m términos.
m (int, optional): Número de elementos en los que dividir I para el sumatorio.
Returns:
funcion: Función polinómica con la serie de Fourier.
"""
if discreto:
dict_coefs = coefs_fourier_discr(f, var, I, n_coefs, m)
else:
dict_coefs = coefs_fourier(f, var, I, n_coefs)
serie_fourier = dict_coefs['a0'] / 2
for i in range(1, n_coefs):
serie_fourier += dict_coefs[f'a{i}'] * cos(i * var) + dict_coefs[f'b{i}'] * sin(i * var)
return simplify(serie_fourier)
|
python
|
from ..util.conversion import physical_compatible
from ..util import config, conversion
class df(object):
"""Top-level class for DF classes"""
def __init__(self,ro=None,vo=None):
"""
NAME:
__init__
PURPOSE:
initialize a DF object
INPUT:
ro= (None) distance scale
vo= (None) velocity scale
OUTPUT:
HISTORY:
2016-02-28 - Written - Bovy (UofT)
"""
# Parse ro and vo
if ro is None:
self._ro= config.__config__.getfloat('normalization','ro')
self._roSet= False
else:
self._ro= conversion.parse_length_kpc(ro)
self._roSet= True
if vo is None:
self._vo= config.__config__.getfloat('normalization','vo')
self._voSet= False
else:
self._vo= conversion.parse_velocity_kms(vo)
self._voSet= True
return None
def _check_consistent_units(self):
"""Internal function to check that the set of units for this object is consistent with that for the potential"""
assert physical_compatible(self,self._pot), 'Physical conversion for the DF object is not consistent with that of the Potential given to it'
def turn_physical_off(self):
"""
NAME:
turn_physical_off
PURPOSE:
turn off automatic returning of outputs in physical units
INPUT:
(none)
OUTPUT:
(none)
HISTORY:
2017-06-05 - Written - Bovy (UofT)
"""
self._roSet= False
self._voSet= False
return None
def turn_physical_on(self,ro=None,vo=None):
"""
NAME:
turn_physical_on
PURPOSE:
turn on automatic returning of outputs in physical units
INPUT:
ro= reference distance (kpc; can be Quantity)
vo= reference velocity (km/s; can be Quantity)
OUTPUT:
(none)
HISTORY:
2016-06-05 - Written - Bovy (UofT)
2020-04-22 - Don't turn on a parameter when it is False - Bovy (UofT)
"""
if not ro is False: self._roSet= True
if not vo is False: self._voSet= True
if not ro is None and ro:
self._ro= conversion.parse_length_kpc(ro)
if not vo is None and vo:
self._vo= conversion.parse_velocity_kms(vo)
return None
|
python
|
#!/usr/bin/python3
# Copyright (c) 2021 by Fred Morris Tacoma WA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is the RPZ view.
We try to keep what's in the telemetry view and what's actually being served by
the zone in sync.
"""
import traceback
import logging
from time import time
import asyncio
from asyncio import Queue
import socket
import dns.message
import dns.rdatatype as rdatatype
import dns.rcode as rcode
import dns.query
from dns.exception import DNSException
# The class has a different name (UpdateMessage) in dnspython 2.x. This is for
# version 1.x.
from dns.update import Update as Updater
PRINT_COROUTINE_ENTRY_EXIT = None
TTL = 600
class Connection(object):
"""Manages a queue of requests and replies."""
def __init__(self, event_loop, server, rpz, statistics):
self.event_loop = event_loop
self.server = server
self.rpz = rpz
self.keep_open = False
self.reader_ = None
self.writer_ = None
if statistics:
self.request_stats = statistics.Collector('dns request')
else:
self.request_stats = None
return
def close(self):
if self.writer_ is None:
return
self.writer_.close()
self.reader_ = self.writer_ = None
return
def timer(self, collection):
"""Helper for marshalling coroutines."""
collection = getattr(self, collection)
return collection and collection.start_timer() or None
async def make_request(self, request=None, timer=None):
"""Sends the request and returns the response.
Context is a TCP connection. Request and response are the naked
request / response bytes respectively. Over the wire, this method
handles the prepended length bytes.
"""
if PRINT_COROUTINE_ENTRY_EXIT:
PRINT_COROUTINE_ENTRY_EXIT('> rpz.Connection.make_request()')
# Open a connection if necessary.
if self.writer_ is None and request is not None:
self.reader_, self.writer_ = await asyncio.open_connection(self.server, 53)
# Send the request, and await a response.
if request is not None:
self.writer_.write( len(request).to_bytes(2, byteorder='big') + request )
await self.writer_.drain()
response_length = int.from_bytes( await self.reader_.read(2), byteorder='big')
response = b''
while response_length:
resp = await self.reader_.read(response_length)
if not len(resp):
break
response += resp
response_length -= len(resp)
# Close it? Ok, close it.
if not self.keep_open:
self.close()
if self.request_stats:
timer.stop()
if PRINT_COROUTINE_ENTRY_EXIT:
PRINT_COROUTINE_ENTRY_EXIT('< rpz.Connection.make_request()')
return response
class ZoneEntry(object):
"""All data for an FQDN.
This means the PTR record.
"""
TXT_RECORD_REFRESH_MINUTES = 30
def __init__(self, name):
self.name = name
self.ptr = None
self.last_refresh = time()
return
def update(self, rtype, rval):
if rtype == rdatatype.PTR:
self.ptr = rval
self.last_refresh = time()
return
def needs_refresh(self):
"""Returns True if the TXT record needs to be refreshed."""
return time() - self.last_refresh > self.TXT_RECORD_REFRESH_MINUTES
class ZoneContents(dict):
"""This is a dictionary of zone entries.
The key is the name and the value is a ZoneEntry.
"""
def update_entry(self, rname, rtype, rval):
rname = rname.split('.in-addr.arpa')[0] + '.in-addr.arpa'
if rname not in self:
self[rname] = ZoneEntry( rname )
self[rname].update(rtype, rval)
return
class EndOfZone(EOFError):
pass
class TelemetryPackage(dict):
"""When we load from the RPZ this is what we get."""
CONVERSIONS = dict(
ptr = lambda x:x,
depth = lambda x:int(x),
first = lambda x:float(x),
last = lambda x:float(x),
count = lambda x:int(x),
trend = lambda x:float(x),
score = lambda x:float(x)
)
COMPLETE = set(CONVERSIONS.keys())
def complete(self):
return self.COMPLETE <= set(self.keys())
def set(self, k, v):
self[k] = self.CONVERSIONS[k](v)
return
def reverse_to_address(reverse_ref):
"""Take the reverse lookup qname format and extract the address."""
return '.'.join(reversed(reverse_ref.split('.in-addr.arpa')[0].split('.')))
def address_to_reverse(address):
"""Take the address and construct the reverse lookup format."""
return '{}.in-addr.arpa'.format('.'.join(reversed(address.split('.'))))
class RPZ(object):
RDTYPES = set((rdatatype.PTR, rdatatype.TXT))
def __init__(self, event_loop, server, rpz, statistics):
self.event_loop = event_loop
self.server = server
self.rpz = rpz.lower().rstrip('.') + '.'
self.task_queue = Queue(loop=event_loop)
self.processor_ = self.event_loop.create_task(self.queue_processor())
self.conn_ = Connection(event_loop, server, rpz, statistics)
self.contents = ZoneContents()
if statistics:
self.axfr_stats = statistics.Collector("rpz axfr")
self.delete_stats = statistics.Collector("rpz delete")
self.update_stats = statistics.Collector("rpz update")
else:
self.axfr_stats = self.delete_stats = self.update_stats = None
return
async def close(self):
"""Cleanup, such as cancelling the queue processor."""
if PRINT_COROUTINE_ENTRY_EXIT:
PRINT_COROUTINE_ENTRY_EXIT('> rpz.RPZ.close()')
self.conn_.close()
self.processor_.cancel()
await self.processor_
if PRINT_COROUTINE_ENTRY_EXIT:
PRINT_COROUTINE_ENTRY_EXIT('< rpz.RPZ.close()')
return
def timer(self, collection):
"""Helper for marshalling coroutines."""
collection = getattr(self, collection)
return collection and collection.start_timer() or None
def create_task(self, task):
"""Create a task in the RPZ queue."""
self.task_queue.put_nowait(task)
return
def process_zone_rec(self, qname, rtype, rval, telemetry_view):
"""Updates the memory view from a zone rec.
This updates both the RPZ view and the telemetry view.
"""
self.contents.update_entry(qname, rtype, rval)
# For telemetry updates, wait until we have all of the info for an update.
if qname not in self.telemetry_data_cache:
self.telemetry_data_cache[qname] = TelemetryPackage()
if rtype == rdatatype.PTR:
self.telemetry_data_cache[qname].set( 'ptr', rval )
elif rtype == rdatatype.TXT:
for kv in rval.strip('"').split(','):
self.telemetry_data_cache[qname].set( *kv.split('=',1) )
if not self.telemetry_data_cache[qname].complete():
return
# We have all of the requisite data...
telemetry_view.update_resolution_from_rpz(
reverse_to_address(qname.replace(self.rpz, '').lower()),
self.telemetry_data_cache[qname]
)
# Done.
del self.telemetry_data_cache[qname]
return
async def load_axfr_(self, associations):
"""Internal method."""
keep_open = self.conn_.keep_open
self.conn_.keep_open = True
# Construct the AXFR request and send it.
req = dns.message.make_query(self.rpz, 'AXFR')
wire_req = req.to_wire()
wire_resp = await self.conn_.make_request(wire_req, self.conn_.timer('request_stats'))
resp = dns.message.from_wire(wire_resp, xfr=True)
if resp.rcode() != rcode.NOERROR:
self.global_error('axfr - rcode', resp)
return
answer = resp.answer
# First record has to be an SOA record.
if answer[0].rdtype != rdatatype.SOA:
self.global_error('axfr - no soa', resp)
return
if answer[0].name.to_text().lower() != self.rpz:
self.global_error('axfr - wrong soa', resp)
return
answer = answer[1:]
self.telemetry_data_cache = {}
# Process and update the in-memory view.
try:
while True:
for rrset in answer:
name = rrset.name.to_text().lower()
if rrset.rdtype == rdatatype.SOA and name == self.rpz:
raise EndOfZone
if rrset.rdtype not in self.RDTYPES:
continue
for rr in rrset:
self.process_zone_rec(name, rrset.rdtype, rr.to_text(), associations)
wire_resp = await self.conn_.make_request(None, self.conn_.timer('request_stats')) # Get another response, no question asked.
resp = dns.message.from_wire(wire_resp, xfr=True)
if resp.rcode() != rcode.NOERROR:
self.global_error('axfr - rcode 2', resp)
break
answer = resp.answer
except EndOfZone:
pass
self.telemetry_data_cache = None
# Close the connection if we jimmied it open.
self.conn_.keep_open = keep_open
if not keep_open and self.task_queue.empty():
self.conn_.close()
return
async def load_axfr(self, associations, timer):
"""Use AXFR to load the RPZ context and populate associations.
associations is a db.Associator object.
An AXFR results in one or more query responses being sent by the server.
"""
if PRINT_COROUTINE_ENTRY_EXIT:
PRINT_COROUTINE_ENTRY_EXIT('> rpz.RPZ.load_axfr()')
await self.load_axfr_(associations)
if self.axfr_stats:
timer.stop()
if PRINT_COROUTINE_ENTRY_EXIT:
PRINT_COROUTINE_ENTRY_EXIT('< rpz.RPZ.load_axfr()')
return
async def delete_(self, address):
"""Internal method."""
qname = address_to_reverse(address)
if qname not in self.contents:
return
# Remove it from the memory view.
del self.contents[qname]
# Remove it from the zone.
qname += '.' + self.rpz
update = Updater(self.rpz)
update.delete(qname)
wire_req = update.to_wire()
wire_resp = await self.conn_.make_request(wire_req, self.conn_.timer('request_stats'))
resp = dns.message.from_wire(wire_resp)
if resp.rcode() != rcode.NOERROR:
self.global_error('delete', resp)
return
async def delete(self, address, timer):
"""Remove the specified address from the RPZ.
The address is a string.
"""
if PRINT_COROUTINE_ENTRY_EXIT:
PRINT_COROUTINE_ENTRY_EXIT('> rpz.RPZ.delete()')
await self.delete_(address)
if self.delete_stats:
timer.stop()
if PRINT_COROUTINE_ENTRY_EXIT:
PRINT_COROUTINE_ENTRY_EXIT('< rpz.RPZ.delete()')
return
async def update_(self, address, score):
"""Internal method."""
# Get the expected resolution. When this is called by RearView.solve() the
# best resolution has been determined.
if not address.best_resolution:
logging.error(
'update_(): best_resolution is None for address:{} with resolutions:{}'.format(
address.address, [ k for k in address.resolutions.keys() ]
)
)
return
qname = address_to_reverse(address.address)
ptr_value = address.best_resolution.chain[-1].rstrip('.') + '.'
zone_entry = self.contents.get(qname)
if ( zone_entry is not None
and zone_entry.ptr is not None
and ptr_value == zone_entry.ptr
and not zone_entry.needs_refresh()
):
return
self.contents.update_entry(qname, rdatatype.PTR, ptr_value)
qname = qname + '.' + self.rpz
update = Updater(self.rpz)
update.delete(qname)
update.add(qname, TTL, rdatatype.PTR, ptr_value)
update.add(qname, TTL, rdatatype.TXT,
','.join((
'{}={}'.format( k, v )
for k, v in
( ('depth', len(address.best_resolution.chain)),
('first', address.best_resolution.first_seen),
('last', address.best_resolution.last_seen),
('count', address.best_resolution.query_count),
('trend', address.best_resolution.query_trend),
('score', score)
)
))
)
wire_req = update.to_wire()
wire_resp = await self.conn_.make_request(wire_req, self.conn_.timer('request_stats'))
try:
resp = dns.message.from_wire(wire_resp)
except DNSException as e:
logging.error('Invalid DNS response to ({} -> {})'.format(address.address, ptr_value))
self.conn_.close()
return
if resp.rcode() != rcode.NOERROR:
self.global_error('update', resp)
return
async def update(self, address, score, timer):
"""Add / update the specified address in the RPZ.
The address is a db.Address object.
"""
if PRINT_COROUTINE_ENTRY_EXIT:
PRINT_COROUTINE_ENTRY_EXIT('> rpz.RPZ.update()')
await self.update_(address, score)
if self.update_stats:
timer.stop()
if PRINT_COROUTINE_ENTRY_EXIT:
PRINT_COROUTINE_ENTRY_EXIT('< rpz.RPZ.update()')
return
async def queue_processor(self):
"""Processes the task queue, in coordination with the Connection."""
if PRINT_COROUTINE_ENTRY_EXIT:
PRINT_COROUTINE_ENTRY_EXIT('> rpz.RPZ.queue_processor()')
while True:
task = await self.task_queue.get()
self.conn_.keep_open = not self.task_queue.empty()
try:
await task
self.task_queue.task_done()
except Exception as e:
traceback.print_exc()
self.event_loop.stop()
return
# This actually never exits.
if PRINT_COROUTINE_ENTRY_EXIT:
PRINT_COROUTINE_ENTRY_EXIT('< rpz.RPZ.queue_processor()')
return
def global_error(self, text, response):
"""Called when an error related to processing DNS requests occurs.
All this does at the moment is log the text, but it can be overridden
if needed.
"""
logging.error(text)
return
|
python
|
from enum import Enum
class Emoji(Enum):
PLAY = "*play*"
FACE_HEARTS = "<3"
FACE_TONGUE = ":P"
FACE_SMILE = ":D"
FACE_CRY_LAUGH = "xD"
FACE_HALO = "=D"
FACE_NERDY = "*nerdy*"
FACE_TEAR = "*cry*"
FACE_SAD = ":("
FACE_ZZZ = "*sleep*"
FACE_ROLLING_EYES = "*rolling-eyes*"
FILM = "*watch*"
POPCORN = "*popcorn*"
FACE_KISS = "*kiss*"
FACE_BLUSH_SMILE = "*smiling*"
FACE_THINK = "*thinking*"
THUMBS_UP = ":thumbsup:"
THUMBS_DOWN = ":thumbsdown:"
PIZZA = "*pizza*"
PARTY = "*party*"
FOLDED_HANDS = "*folded-hands*"
FIRE = "*hot*"
|
python
|
"""Dependency injector resource provider unit tests."""
import asyncio
import unittest2 as unittest
from dependency_injector import containers, providers, resources, errors
# Runtime import to get asyncutils module
import os
_TOP_DIR = os.path.abspath(
os.path.sep.join((
os.path.dirname(__file__),
'../',
)),
)
import sys
sys.path.append(_TOP_DIR)
from asyncutils import AsyncTestCase
def init_fn(*args, **kwargs):
return args, kwargs
class ResourceTests(unittest.TestCase):
def test_is_provider(self):
self.assertTrue(providers.is_provider(providers.Resource(init_fn)))
def test_provided_instance_provider(self):
provider = providers.Resource(init_fn)
self.assertIsInstance(provider.provided, providers.ProvidedInstance)
def test_injection(self):
resource = object()
def _init():
_init.counter += 1
return resource
_init.counter = 0
class Container(containers.DeclarativeContainer):
resource = providers.Resource(_init)
dependency1 = providers.List(resource)
dependency2 = providers.List(resource)
container = Container()
list1 = container.dependency1()
list2 = container.dependency2()
self.assertEqual(list1, [resource])
self.assertIs(list1[0], resource)
self.assertEqual(list2, [resource])
self.assertIs(list2[0], resource)
self.assertEqual(_init.counter, 1)
def test_init_function(self):
def _init():
_init.counter += 1
_init.counter = 0
provider = providers.Resource(_init)
result1 = provider()
self.assertIsNone(result1)
self.assertEqual(_init.counter, 1)
result2 = provider()
self.assertIsNone(result2)
self.assertEqual(_init.counter, 1)
provider.shutdown()
def test_init_generator(self):
def _init():
_init.init_counter += 1
yield
_init.shutdown_counter += 1
_init.init_counter = 0
_init.shutdown_counter = 0
provider = providers.Resource(_init)
result1 = provider()
self.assertIsNone(result1)
self.assertEqual(_init.init_counter, 1)
self.assertEqual(_init.shutdown_counter, 0)
provider.shutdown()
self.assertEqual(_init.init_counter, 1)
self.assertEqual(_init.shutdown_counter, 1)
result2 = provider()
self.assertIsNone(result2)
self.assertEqual(_init.init_counter, 2)
self.assertEqual(_init.shutdown_counter, 1)
provider.shutdown()
self.assertEqual(_init.init_counter, 2)
self.assertEqual(_init.shutdown_counter, 2)
def test_init_class(self):
class TestResource(resources.Resource):
init_counter = 0
shutdown_counter = 0
def init(self):
self.__class__.init_counter += 1
def shutdown(self, _):
self.__class__.shutdown_counter += 1
provider = providers.Resource(TestResource)
result1 = provider()
self.assertIsNone(result1)
self.assertEqual(TestResource.init_counter, 1)
self.assertEqual(TestResource.shutdown_counter, 0)
provider.shutdown()
self.assertEqual(TestResource.init_counter, 1)
self.assertEqual(TestResource.shutdown_counter, 1)
result2 = provider()
self.assertIsNone(result2)
self.assertEqual(TestResource.init_counter, 2)
self.assertEqual(TestResource.shutdown_counter, 1)
provider.shutdown()
self.assertEqual(TestResource.init_counter, 2)
self.assertEqual(TestResource.shutdown_counter, 2)
def test_init_not_callable(self):
provider = providers.Resource(1)
with self.assertRaises(errors.Error):
provider.init()
def test_init_and_shutdown(self):
def _init():
_init.init_counter += 1
yield
_init.shutdown_counter += 1
_init.init_counter = 0
_init.shutdown_counter = 0
provider = providers.Resource(_init)
result1 = provider.init()
self.assertIsNone(result1)
self.assertEqual(_init.init_counter, 1)
self.assertEqual(_init.shutdown_counter, 0)
provider.shutdown()
self.assertEqual(_init.init_counter, 1)
self.assertEqual(_init.shutdown_counter, 1)
result2 = provider.init()
self.assertIsNone(result2)
self.assertEqual(_init.init_counter, 2)
self.assertEqual(_init.shutdown_counter, 1)
provider.shutdown()
self.assertEqual(_init.init_counter, 2)
self.assertEqual(_init.shutdown_counter, 2)
def test_shutdown_of_not_initialized(self):
def _init():
yield
provider = providers.Resource(_init)
result = provider.shutdown()
self.assertIsNone(result)
def test_initialized(self):
provider = providers.Resource(init_fn)
self.assertFalse(provider.initialized)
provider.init()
self.assertTrue(provider.initialized)
provider.shutdown()
self.assertFalse(provider.initialized)
def test_call_with_context_args(self):
provider = providers.Resource(init_fn, 'i1', 'i2')
self.assertEqual(provider('i3', i4=4), (('i1', 'i2', 'i3'), {'i4': 4}))
def test_fluent_interface(self):
provider = providers.Resource(init_fn) \
.add_args(1, 2) \
.add_kwargs(a3=3, a4=4)
self.assertEqual(provider(), ((1, 2), {'a3': 3, 'a4': 4}))
def test_set_args(self):
provider = providers.Resource(init_fn) \
.add_args(1, 2) \
.set_args(3, 4)
self.assertEqual(provider.args, tuple([3, 4]))
def test_clear_args(self):
provider = providers.Resource(init_fn) \
.add_args(1, 2) \
.clear_args()
self.assertEqual(provider.args, tuple())
def test_set_kwargs(self):
provider = providers.Resource(init_fn) \
.add_kwargs(a1='i1', a2='i2') \
.set_kwargs(a3='i3', a4='i4')
self.assertEqual(provider.kwargs, {'a3': 'i3', 'a4': 'i4'})
def test_clear_kwargs(self):
provider = providers.Resource(init_fn) \
.add_kwargs(a1='i1', a2='i2') \
.clear_kwargs()
self.assertEqual(provider.kwargs, {})
def test_call_overridden(self):
provider = providers.Resource(init_fn, 1)
overriding_provider1 = providers.Resource(init_fn, 2)
overriding_provider2 = providers.Resource(init_fn, 3)
provider.override(overriding_provider1)
provider.override(overriding_provider2)
instance1 = provider()
instance2 = provider()
self.assertIs(instance1, instance2)
self.assertEqual(instance1, ((3,), {}))
self.assertEqual(instance2, ((3,), {}))
def test_deepcopy(self):
provider = providers.Resource(init_fn, 1, 2, a3=3, a4=4)
provider_copy = providers.deepcopy(provider)
self.assertIsNot(provider, provider_copy)
self.assertEqual(provider.args, provider_copy.args)
self.assertEqual(provider.kwargs, provider_copy.kwargs)
self.assertIsInstance(provider, providers.Resource)
def test_deepcopy_initialized(self):
provider = providers.Resource(init_fn)
provider.init()
with self.assertRaises(errors.Error):
providers.deepcopy(provider)
def test_deepcopy_from_memo(self):
provider = providers.Resource(init_fn)
provider_copy_memo = providers.Resource(init_fn)
provider_copy = providers.deepcopy(
provider,
memo={id(provider): provider_copy_memo},
)
self.assertIs(provider_copy, provider_copy_memo)
def test_deepcopy_args(self):
provider = providers.Resource(init_fn)
dependent_provider1 = providers.Factory(list)
dependent_provider2 = providers.Factory(dict)
provider.add_args(dependent_provider1, dependent_provider2)
provider_copy = providers.deepcopy(provider)
dependent_provider_copy1 = provider_copy.args[0]
dependent_provider_copy2 = provider_copy.args[1]
self.assertNotEqual(provider.args, provider_copy.args)
self.assertIs(dependent_provider1.cls, dependent_provider_copy1.cls)
self.assertIsNot(dependent_provider1, dependent_provider_copy1)
self.assertIs(dependent_provider2.cls, dependent_provider_copy2.cls)
self.assertIsNot(dependent_provider2, dependent_provider_copy2)
def test_deepcopy_kwargs(self):
provider = providers.Resource(init_fn)
dependent_provider1 = providers.Factory(list)
dependent_provider2 = providers.Factory(dict)
provider.add_kwargs(d1=dependent_provider1, d2=dependent_provider2)
provider_copy = providers.deepcopy(provider)
dependent_provider_copy1 = provider_copy.kwargs['d1']
dependent_provider_copy2 = provider_copy.kwargs['d2']
self.assertNotEqual(provider.kwargs, provider_copy.kwargs)
self.assertIs(dependent_provider1.cls, dependent_provider_copy1.cls)
self.assertIsNot(dependent_provider1, dependent_provider_copy1)
self.assertIs(dependent_provider2.cls, dependent_provider_copy2.cls)
self.assertIsNot(dependent_provider2, dependent_provider_copy2)
def test_deepcopy_overridden(self):
provider = providers.Resource(init_fn)
object_provider = providers.Object(object())
provider.override(object_provider)
provider_copy = providers.deepcopy(provider)
object_provider_copy = provider_copy.overridden[0]
self.assertIsNot(provider, provider_copy)
self.assertEqual(provider.args, provider_copy.args)
self.assertIsInstance(provider, providers.Resource)
self.assertIsNot(object_provider, object_provider_copy)
self.assertIsInstance(object_provider_copy, providers.Object)
def test_deepcopy_with_sys_streams(self):
provider = providers.Resource(init_fn)
provider.add_args(sys.stdin, sys.stdout, sys.stderr)
provider_copy = providers.deepcopy(provider)
self.assertIsNot(provider, provider_copy)
self.assertIsInstance(provider_copy, providers.Resource)
self.assertIs(provider.args[0], sys.stdin)
self.assertIs(provider.args[1], sys.stdout)
self.assertIs(provider.args[2], sys.stderr)
def test_repr(self):
provider = providers.Resource(init_fn)
self.assertEqual(
repr(provider),
'Resource({0}, initialized={1})'.format(
init_fn,
provider.initialized,
)
)
class AsyncResourceTest(AsyncTestCase):
def test_init_async_function(self):
resource = object()
async def _init():
await asyncio.sleep(0.001)
_init.counter += 1
return resource
_init.counter = 0
provider = providers.Resource(_init)
result1 = self._run(provider())
self.assertIs(result1, resource)
self.assertEqual(_init.counter, 1)
result2 = self._run(provider())
self.assertIs(result2, resource)
self.assertEqual(_init.counter, 1)
self._run(provider.shutdown())
def test_init_async_generator(self):
resource = object()
async def _init():
await asyncio.sleep(0.001)
_init.init_counter += 1
yield resource
await asyncio.sleep(0.001)
_init.shutdown_counter += 1
_init.init_counter = 0
_init.shutdown_counter = 0
provider = providers.Resource(_init)
result1 = self._run(provider())
self.assertIs(result1, resource)
self.assertEqual(_init.init_counter, 1)
self.assertEqual(_init.shutdown_counter, 0)
self._run(provider.shutdown())
self.assertEqual(_init.init_counter, 1)
self.assertEqual(_init.shutdown_counter, 1)
result2 = self._run(provider())
self.assertIs(result2, resource)
self.assertEqual(_init.init_counter, 2)
self.assertEqual(_init.shutdown_counter, 1)
self._run(provider.shutdown())
self.assertEqual(_init.init_counter, 2)
self.assertEqual(_init.shutdown_counter, 2)
def test_init_async_class(self):
resource = object()
class TestResource(resources.AsyncResource):
init_counter = 0
shutdown_counter = 0
async def init(self):
await asyncio.sleep(0.001)
self.__class__.init_counter += 1
return resource
async def shutdown(self, resource_):
await asyncio.sleep(0.001)
self.__class__.shutdown_counter += 1
assert resource_ is resource
provider = providers.Resource(TestResource)
result1 = self._run(provider())
self.assertIs(result1, resource)
self.assertEqual(TestResource.init_counter, 1)
self.assertEqual(TestResource.shutdown_counter, 0)
self._run(provider.shutdown())
self.assertEqual(TestResource.init_counter, 1)
self.assertEqual(TestResource.shutdown_counter, 1)
result2 = self._run(provider())
self.assertIs(result2, resource)
self.assertEqual(TestResource.init_counter, 2)
self.assertEqual(TestResource.shutdown_counter, 1)
self._run(provider.shutdown())
self.assertEqual(TestResource.init_counter, 2)
self.assertEqual(TestResource.shutdown_counter, 2)
def test_init_with_error(self):
async def _init():
raise RuntimeError()
provider = providers.Resource(_init)
future = provider()
self.assertTrue(provider.initialized)
self.assertTrue(provider.is_async_mode_enabled())
# Disable default exception handling to prevent output
asyncio.get_event_loop().set_exception_handler(lambda loop, context: ...)
with self.assertRaises(RuntimeError):
self._run(future)
# Restore default exception handling
asyncio.get_event_loop().set_exception_handler(None)
self.assertFalse(provider.initialized)
self.assertTrue(provider.is_async_mode_enabled())
def test_init_and_shutdown_methods(self):
async def _init():
await asyncio.sleep(0.001)
_init.init_counter += 1
yield
await asyncio.sleep(0.001)
_init.shutdown_counter += 1
_init.init_counter = 0
_init.shutdown_counter = 0
provider = providers.Resource(_init)
self._run(provider.init())
self.assertEqual(_init.init_counter, 1)
self.assertEqual(_init.shutdown_counter, 0)
self._run(provider.shutdown())
self.assertEqual(_init.init_counter, 1)
self.assertEqual(_init.shutdown_counter, 1)
self._run(provider.init())
self.assertEqual(_init.init_counter, 2)
self.assertEqual(_init.shutdown_counter, 1)
self._run(provider.shutdown())
self.assertEqual(_init.init_counter, 2)
self.assertEqual(_init.shutdown_counter, 2)
def test_shutdown_of_not_initialized(self):
async def _init():
yield
provider = providers.Resource(_init)
provider.enable_async_mode()
result = self._run(provider.shutdown())
self.assertIsNone(result)
def test_concurrent_init(self):
resource = object()
async def _init():
await asyncio.sleep(0.001)
_init.counter += 1
return resource
_init.counter = 0
provider = providers.Resource(_init)
result1, result2 = self._run(
asyncio.gather(
provider(),
provider()
),
)
self.assertIs(result1, resource)
self.assertEqual(_init.counter, 1)
self.assertIs(result2, resource)
self.assertEqual(_init.counter, 1)
|
python
|
# coding: utf-8
# This block of code fetches the data, and defines a function that
# splits the data into test/train, and into batches.
# Note that this function will only download the data once. Subsequent
# calls will load the data from the hard drive
def MNIST_Loaders(train_batch_size, test_batch_size=None):
if test_batch_size is None:
test_batch_size = train_batch_size
normalize = transforms.Normalize((0.1307,), (0.3081,))
Clean = transforms.Compose([transforms.ToTensor(), normalize])
#!wget www.di.ens.fr/~lelarge/MNIST.tar.gz
#!tar -zxvf MNIST.tar.gz
train_data = datasets.MNIST('./', train=True,
download=True, transform=Clean)
test_data = datasets.MNIST('./', train=False,
download=True, transform=Clean)
train_loader = torch.utils.data.DataLoader(train_data,
batch_size=train_batch_size)
test_loader = torch.utils.data.DataLoader(test_data,
batch_size=test_batch_size)
return train_loader, test_loader
|
python
|
import sqlite3
# Configure database
connection = sqlite3.connect('map.db')
# Cursor for execute DB command
c = connection.cursor()
# CREATE TABLE
# c.execute("""CREATE TABLE map (
# id integer,
# lat real,
# lng real,
# comment text
# )""")
# INSERT VALUE
# c.execute("INSERT INTO map VALUES ('3','35.276718482995214','136.25179933602564','Hikone Castle has Hikonyan')")
# connection.commit()
# SELECT TABLE
c.execute("SELECT * FROM map")
print(c.fetchall())
connection.commit()
connection.close()
|
python
|
from .models import Language
from rest_framework import serializers
class LanguageSerializer(serializers.ModelSerializer):
class Meta:
model = Language
fields = ('id', 'name', 'paradigm')
|
python
|
"""
Playground for downloading GenBank files, wrapping in bowtie2, sCLIP/SOR prep
"""
from Bio import Entrez
import csv
import re
import requests
import subprocess
import os
from Bio import SeqIO
import shutil
from urllib.error import HTTPError
import time
import inv_config as config
# TODO Remove .gbk files at the end that don't have SOR/sCLIP to save disc space?
# my ncbi info - please don't share haha. You can get your own easily by logging in to NCBI and requesting an API Key
Entrez.email = config.email
Entrez.api_key = config.api_key
def ascp(accession_num, save_path=os.getcwd()):
url = 'https://www.ebi.ac.uk/ena/data/warehouse/filereport?accession={0}&result=read_run&fields=fastq_ftp'.format(
accession_num)
# Send the request to ENA
print("Requesting ENA for FASTQ FTP link for accession run {0}...".format(accession_num))
r = requests.get(url)
# from the text of the request, grab the fastq link(s)
fastq_finder = re.compile('ftp.*?fastq.gz')
print("FASTQ FTP links found:")
fastq_links = fastq_finder.findall(r.text)
if len(fastq_links) < 2:
print("Insufficient links found! Please check accession number or if the accession has submitted FASTQ files.")
return False
for link in fastq_links:
print(link)
# Alright, now for each link, build an ascp command
# Modify as needed, but should be default
ascp_openssh_file = config.ascp_ssh_key
print("Retrieving files by ascp...")
for link in fastq_links:
# build the ASCP file path
ascp_path = '[email protected]:/' + link[18:]
# build the ASCP command
cmd = 'ascp -QT -l300M -P33001 -i "{0}" {1} {2}'.format(ascp_openssh_file, ascp_path, save_path)
# subprocess
try:
subprocess.run(cmd, shell=True)
except subprocess.CalledProcessError as err:
print("Error:", err)
return True
# uses bowtie2-build to make a reference index
def bowtie2_build(ref, ind):
subprocess.run(['bowtie2-build', ref, ind], stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
return
# align using bowtie2
# note bowtie2 uses stderr for output, oddly enough
# Use local mode to capture clipping events
def bowtie2(ind, fq1, fq2, sam, use_threads=4):
# Very rarely, there is a poorly formatted FASTQ file that catches. Return a fail.
try:
subprocess.run(['bowtie2', '-x', ind, '-1', fq1, '-2', fq2, '-S', sam, '-p', str(use_threads), '--local'],
check=True, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
return True
except subprocess.CalledProcessError as e:
print("Bowtie2 error! {0}".format(e))
return False
def get_file_len(a):
with open(a, 'r') as b:
lines = len(b.readlines())
return lines
# get SOR files using awk
def dump_sor(reads, outfile):
headers = 'colors,POS,CIGAR,TLEN\n'
# CMD has been modified to exclude zero-based TLEN
cmd = "awk 'BEGIN { OFS = \",\" } $2 ~ /113|177|65|129/ $9 !~ /0/ {print $2, $4, $6, $9}'"
with open(outfile, 'w') as o:
# add headers
o.write(headers)
o.flush()
subprocess.run(cmd, stdin=reads, shell=True, stdout=o)
return
# get sCLIP reads using awk
def dump_sclip(reads, outfile):
headers = 'colors,POS,CIGAR,TLEN\n'
cmd = "awk 'BEGIN {OFS=\",\"} ($2 ~ /147|83/ && $6 ~ /^..?S/) || ($2 ~ /99|163/ && $6 ~ /S$/) {next;} $6 ~ /^..?S/ {print $2, $4, $6, $9 }'"
with open(outfile, 'w') as o:
o.write(headers)
o.flush()
# maybe using rb will allow us to properly read samtools bam
subprocess.run(cmd, stdin=reads, shell=True, stdout=o)
return
# bam, sort, and index using samtools
def bamify(sam_file, bam_file, use_threads=8):
# first, convert sam to bam
print("Convering to BAM...")
subprocess.run(['samtools', 'view', '-u', '-b', sam_file, '-o', 'tmp.bam', '-@', str(use_threads)])
# then, sort the bam file
print("Sorting...")
subprocess.run(['samtools', 'sort', 'tmp.bam', '-o', bam_file, '-@', str(use_threads)],
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
# now, index the bam file
print("Indexing...")
subprocess.run(['samtools', 'index', bam_file])
os.remove('tmp.bam')
return
# Extract reads by accession
# Keep in memory to pass to awk commands
def extract_reads(acc, bam_file, sor_file, sclip_file, use_threads=8):
print("Extracting reads from {0}...".format(acc))
# Unfortunately, assumes .1 for accession version...could alternatively re-do the grab so it maintains version
with open('tmp.sam', 'w') as o:
subprocess.run(['samtools', 'view', bam_file, acc+'.1', '-@', str(use_threads)], stdout=o, encoding='utf-8')
with open('tmp.sam', 'r') as i:
#print("Extracting SOR reads...")
dump_sor(i, sor_file)
with open('tmp.sam', 'r') as i:
#print("Extracting sCLIP reads...")
dump_sclip(i, sclip_file)
os.remove('tmp.sam')
return
### MAIN ###
# Firstly, load up the data table
data_file = config.data_file
acc_save_path = config.acc_save_path
fasta_save_path = config.fasta_save_path
run_save_path = config.run_save_path
sam_save_path = config.sam_save_path
sor_save_path = config.sor_save_path
sclip_save_path = config.sclip_save_path
script_path = config.script_path
acc_list_path = config.acc_list_path
use_threads = config.use_threads
sor_read_threshold = config.sor_read_threshold
sclip_read_threshold = 100 # Usually not a problem.
max_error = 10 # Timeout error threshold
with open(data_file, 'r') as f:
reader = csv.DictReader(f, delimiter='\t')
for row in reader:
my_paths = [acc_save_path, fasta_save_path, run_save_path, sam_save_path, sor_save_path, sclip_save_path]
for path in my_paths:
if not os.path.exists(path):
os.mkdir(path)
acc_with_genes = [] # List of accessions we want to run SOR mapping for
good_acc = [] # List of accession numbers we ultimately want to pass to detect_inversion_clusters
# Now, we want a couple things: The SRA Accession, the Biosample Accession, and the list of NUCCORE Accessions
sra_accession = row['SRA']
biosample_accession = row['Biosample']
nuccore_accessions = row['RefSeq Accessions'].split(',')
print("Now processing: {0}".format(biosample_accession))
# First, we need to bowtie2 the reads and reference together. So let's first grab the reads:
code = ascp(sra_accession, save_path=run_save_path)
if not code:
print("ASCP error! Skipping...")
else:
# Then, let's grab the accessions using Entrez Efetch - we want separate gbk files and a combined FASTA
# Sometimes there are literally too many accessions, and we get an HTTP 414 error. Break it up by 50?
batch_size = 50
for i in range(0, len(nuccore_accessions), batch_size):
end = min(len(nuccore_accessions), i + batch_size)
print('Retrieving gbk records {0} to {1}...'.format(i, end))
current_acc = nuccore_accessions[i:end]
nuccore_acc_query = ','.join(current_acc)
num_attempts = 1
while num_attempts < max_error:
try:
handle = Entrez.efetch(db='nuccore', id=nuccore_acc_query, rettype='gbwithparts', retmode='text')
num_attempts = max_error + 1
except HTTPError as err:
if 500 <= err.code <= 599:
print("Received error from server: {0}".format(err.code))
print("Attempt {0} of {1}".format(num_attempts, max_error))
num_attempts += 1
time.sleep(15)
else:
raise
for record in SeqIO.parse(handle, format='gb'):
name = os.path.join(acc_save_path, record.name + '.gb')
# Verify CDS info. If none, exclude from further analysis
elements = record.features
num_cds = 0
for element in elements:
if element.type == "CDS":
num_cds += 1
if num_cds == 0:
print("No gene data detected for {0}. Removing from analysis...".format(record.name))
else:
acc_with_genes.append(record.name)
with open(name, 'w') as out_handle:
SeqIO.write(record, out_handle, "gb")
handle.close()
print("{0} accessions with gene data detected.".format(len(acc_with_genes)))
print("Retrieving fasta records {0} to {1}...".format(i, end))
fasta_output = os.path.join(fasta_save_path, biosample_accession + '.fasta')
fasta_records = []
with Entrez.efetch(db='nuccore', id=nuccore_acc_query, rettype='fasta', retmode='text') as handle:
for record in SeqIO.parse(handle, format='fasta'):
fasta_records.append(record)
# Now write all the fasta to a single combined reference record.
with open(fasta_output, 'w') as out_handle:
SeqIO.write(fasta_records, out_handle, "fasta")
# Now let's use bowtie2 to align the reads
ref_path = os.path.join(fasta_save_path, biosample_accession + '.fasta')
f1 = os.path.join(run_save_path, sra_accession + '_1.fastq.gz')
f2 = os.path.join(run_save_path, sra_accession + '_2.fastq.gz')
sam_output = os.path.join(sam_save_path, biosample_accession + '.sam')
print("Aligning {0} to read set {1} using bowtie2...".format(ref_path, sra_accession))
bowtie2_build(ref_path, ind='INDEX')
code = bowtie2(ind='INDEX', fq1=f1, fq2=f2, sam=sam_output, use_threads=use_threads)
if not code:
print("Bowtie2 encountered an error! Skipping {0}...".format(biosample_accession))
else:
bam_output = os.path.join(sam_save_path, biosample_accession + '.bam')
# print("Indexing and sorting using SAMtools...")
bamify(sam_output, bam_output, use_threads=use_threads)
# Now, for each accession, let's extract the reads
for acc in acc_with_genes:
sor_file = os.path.join(sor_save_path, acc + '_sor.csv')
sclip_file = os.path.join(sclip_save_path, acc + '_sclip.csv')
# Extract reads for the accession
extract_reads(acc, bam_output, sor_file, sclip_file, use_threads=use_threads)
# Check to make sure the SOR and sCLIP files aren't empty. For ones that aren't add to accessions_list.txt
sor_lines = get_file_len(sor_file)
sclip_lines = get_file_len(sclip_file)
if (sor_lines >= sor_read_threshold) and (sclip_lines >= sclip_read_threshold):
good_acc.append(acc)
else:
print("{0} has insufficient SOR/sCLIP reads! Excluding from analysis. (SOR={1}, sCLIP={2})".format(acc, sor_lines, sclip_lines))
# Create a list of accessions with actual SOR/sCLIP data to feed to detect_inversion_clusters
with open(acc_list_path, 'w') as acc_list:
for acc in good_acc:
acc_list.write(acc+'\n')
# Now, with everything in place, run the detect inversions script, placing output in a Biosample folder
# Also check to make sure SOR and sCLIP have data in them first!
print("Executing detection script...")
subprocess.run(['python3', '{0}'.format(script_path), biosample_accession])
# Now remove all the data we no longer need to save hard disk space.
print("Cleaning SAM/BAM files for {0}...".format(biosample_accession))
shutil.rmtree(sam_save_path, ignore_errors=True)
print("Cleaning Run files for {0}...".format(sra_accession))
shutil.rmtree(run_save_path, ignore_errors=True)
print("Cleaning sCLIP files for {0}...".format(biosample_accession))
shutil.rmtree(sclip_save_path, ignore_errors=True)
print("Cleaning SOR files for {0}...".format(biosample_accession))
shutil.rmtree(sor_save_path, ignore_errors=True)
print("Cleaning Entrez FASTA files for {0}...".format(biosample_accession))
shutil.rmtree(fasta_save_path, ignore_errors=True)
print("Done!")
|
python
|
from __future__ import division
import os
import math
import scipy.misc
import numpy as np
import argparse
from glob import glob
from pose_evaluation_utils import mat2euler, dump_pose_seq_TUM
parser = argparse.ArgumentParser()
parser.add_argument("--dataset_dir", type=str, help="path to kitti odometry dataset")
parser.add_argument("--output_dir", type=str, help="path to output pose snippets")
parser.add_argument("--seq_id", type=int, default=9, help="sequence id to generate groundtruth pose snippets")
parser.add_argument("--seq_length", type=int, default=5, help="sequence length of pose snippets")
args = parser.parse_args()
def is_valid_sample(frames, tgt_idx, seq_length):
N = len(frames)
tgt_drive, _ = frames[tgt_idx].split(' ')
max_src_offset = int((seq_length - 1)/2)
min_src_idx = tgt_idx - max_src_offset
max_src_idx = tgt_idx + max_src_offset
if min_src_idx < 0 or max_src_idx >= N:
return False
min_src_drive, _ = frames[min_src_idx].split(' ')
max_src_drive, _ = frames[max_src_idx].split(' ')
if tgt_drive == min_src_drive and tgt_drive == max_src_drive:
return True
return False
def main():
pose_gt_dir = args.dataset_dir + 'poses/'
if not os.path.isdir(args.output_dir):
os.makedirs(args.output_dir)
seq_dir = os.path.join(args.dataset_dir, 'sequences', '%.2d' % args.seq_id)
img_dir = os.path.join(seq_dir, 'image_2')
N = len(glob(img_dir + '/*.png'))
test_frames = ['%.2d %.6d' % (args.seq_id, n) for n in range(N)]
with open(args.dataset_dir + 'sequences/%.2d/times.txt' % args.seq_id, 'r') as f:
times = f.readlines()
times = np.array([float(s[:-1]) for s in times])
with open(pose_gt_dir + '%.2d.txt' % args.seq_id, 'r') as f:
poses = f.readlines()
poses_gt = []
for pose in poses:
pose = np.array([float(s) for s in pose[:-1].split(' ')]).reshape((3,4))
rot = np.linalg.inv(pose[:,:3])
tran = -np.dot(rot, pose[:,3].transpose())
rz, ry, rx = mat2euler(rot)
poses_gt.append(tran.tolist() + [rx, ry, rz])
poses_gt = np.array(poses_gt)
max_src_offset = (args.seq_length - 1)//2
for tgt_idx in range(N):
if not is_valid_sample(test_frames, tgt_idx, args.seq_length):
continue
if tgt_idx % 100 == 0:
print('Progress: %d/%d' % (tgt_idx, N))
pred_poses = poses_gt[tgt_idx - max_src_offset:tgt_idx + max_src_offset + 1]
curr_times = times[tgt_idx - max_src_offset:tgt_idx + max_src_offset + 1]
out_file = args.output_dir + '%.6d.txt' % (tgt_idx - max_src_offset)
dump_pose_seq_TUM(out_file, pred_poses, curr_times)
main()
|
python
|
import numpy as np
import matplotlib.pyplot as plt
with open('timings.txt','r') as inp:
inp.readline()
times = np.loadtxt(inp, delimiter=',')
print(times.shape)
selected = list([0,1,3,8,13,18])
# plt.plot((2+np.array(range(19))),times[:,0],'r^-',label="Best first search algorithm")
# plt.plot((2+np.array(range(19))),times[:,1],'bd-',label="Sequential scan algorithm")
plt.plot((2+np.array(range(19)))[selected],times[:,7][selected],'bd-',label="Locality Sensitive Hashing (99% dist prec)")
# plt.plot((2+np.array(range(19)))[selected],times[:,1][selected],'bd-',label="Sequential scan algorithm")
plt.title("Average Query Time vs Dimension")
plt.xlabel('Dimension')
plt.ylabel('Average Time for 100NN query(in sec)')
# plt.ylim([0,0.01])
plt.legend()
plt.grid()
plt.show()
# import numpy as np
# import matplotlib.pyplot as plt
# with open('timings.txt','r') as inp:
# inp.readline()
# times = np.loadtxt(inp, delimiter=',')
# print(times.shape)
# selected = list([0,1,3,8,13,18])
# # plt.plot((2+np.array(range(19))),times[:,0],'r^-',label="Best first search algorithm")
# # plt.plot((2+np.array(range(19))),times[:,1],'bd-',label="Sequential scan algorithm")
# # plt.plot((2+np.array(range(19)))[selected],(times[:,1]/times[:,7])[selected],'r^-',label="Speedup")
# # plt.plot((2+np.array(range(19)))[selected],times[:,7][selected],'bd-',label="LSH (95% dist_prec)")
# plt.title("Average query time ratio(seq_scan/lsh(99% dist_prec)) vs Dimension")
# plt.xlabel('Dimension')
# plt.ylabel('Ratio of Average times for 100NN query(in sec)')
# # plt.ylim([0,0.01])
# plt.legend()
# plt.grid()
# plt.show()
|
python
|
from pessoa import Pessoa
class Aluno(Pessoa):
def __init__(self, rm, turma_id, rg, nome):
super().__init__(rg, nome)
self._rm = rm
self._turma_id = turma_id
self._notas = []
def media(self):
if len(self._notas) > 0:
return sum(self._notas)/len(self._notas)
else:
return None
def insere_nota(self, nota):
self._notas.append(nota)
#to_string --> várias linguagens --> transforma um objeto em uma representação do mesmo em texto
def __str__(self):
return f'RM: {self._rm} - Nome: {self._nome}'
|
python
|
import numpy as np
import torch
class FeaturesLinear(torch.nn.Module):
def __init__(self, field_dims, output_dim=1):
super().__init__()
self.fc = torch.nn.Embedding(sum(field_dims), output_dim)
self.bias = torch.nn.Parameter(torch.zeros((output_dim,)))
self.offsets = np.array((0, *np.cumsum(field_dims)[:-1]), dtype=np.int64)
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_fields)``
"""
x = x + x.new_tensor(self.offsets).unsqueeze(0)
return torch.sum(self.fc(x), dim=1) + self.bias
class FeaturesEmbedding(torch.nn.Module):
def __init__(self, field_dims, embed_dim):
super().__init__()
self.embedding = torch.nn.Embedding(sum(field_dims), embed_dim)
self.offsets = np.array((0, *np.cumsum(field_dims)[:-1]), dtype=np.int32)
torch.nn.init.xavier_uniform_(self.embedding.weight.data)
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_fields)``
"""
x = x + x.new_tensor(self.offsets).unsqueeze(0)
return self.embedding(x)
class MultiHotEmbedding(torch.nn.Module):
def __init__(self, multi_hotencoding_size, embed_dim):
super().__init__()
self.embed_dim = embed_dim
self.emb_w = torch.nn.Parameter(torch.zeros([multi_hotencoding_size, embed_dim], dtype=torch.float32))
torch.nn.init.xavier_uniform_(self.emb_w)
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, multi_hotencoding_size)``
return (batch_size, embed_dim)
"""
return torch.matmul(x, self.emb_w).reshape(-1, 1, self.embed_dim)
class FactorizationMachine(torch.nn.Module):
def __init__(self, reduce_sum=True):
super().__init__()
self.reduce_sum = reduce_sum
def forward(self, x):
"""
:param x: Float tensor of size ``(batch_size, num_fields, embed_dim)``
"""
square_of_sum = torch.sum(x, dim=1) ** 2
sum_of_square = torch.sum(x ** 2, dim=1)
ix = square_of_sum - sum_of_square
if self.reduce_sum:
ix = torch.sum(ix, dim=1, keepdim=True)
return 0.5 * ix
class MultiLayerPerceptron(torch.nn.Module):
def __init__(self, input_dim, embed_dims, dropout, output_layer=True):
super().__init__()
layers = list()
for embed_dim in embed_dims:
layers.append(torch.nn.Linear(input_dim, embed_dim))
layers.append(torch.nn.BatchNorm1d(embed_dim))
layers.append(torch.nn.ReLU())
layers.append(torch.nn.Dropout(p=dropout))
input_dim = embed_dim
if output_layer:
layers.append(torch.nn.Linear(input_dim, 1))
self.mlp = torch.nn.Sequential(*layers)
def forward(self, x):
"""
:param x: Float tensor of size ``(batch_size, embed_dim)``
"""
return self.mlp(x)
# class DeepFactorizationMachineModel(torch.nn.Module):
# """
# A pytorch implementation of DeepFM.
# Reference:
# H Guo, et al. DeepFM: A Factorization-Machine based Neural Network for CTR Prediction, 2017.
# """
# def __init__(self, field_dims, embed_dim, mlp_dims, dropout, device):
# super().__init__()
# self.linear = FeaturesLinear(field_dims)
# self.fm = FactorizationMachine(reduce_sum=True)
# self.embedding = FeaturesEmbedding(field_dims, embed_dim)
# self.embed_output_dim = len(field_dims) * embed_dim
# self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims, dropout)
# self.to(device)
# def forward(self, x):
# """
# :param x: Long tensor of size ``(batch_size, num_fields)``
# """
# embed_x = self.embedding(x) # [batch_size, num_fields, emb_size] <-[batch_size, num_fields]
# x = self.linear(x) + self.fm(embed_x) + self.mlp(embed_x.view(-1, self.embed_output_dim))
# return torch.sigmoid(x.squeeze(1))
class DeepFactorizationMachineModel(torch.nn.Module):
"""
A pytorch implementation of DeepFM.
Reference:
H Guo, et al. DeepFM: A Factorization-Machine based Neural Network for CTR Prediction, 2017.
"""
def __init__(self, field_dims, multi_hot_size, embed_dim, mlp_dims, dropout, device):
super().__init__()
self.linear = FeaturesLinear(field_dims)
self.fm = FactorizationMachine(reduce_sum=True)
self.embedding = FeaturesEmbedding(field_dims, embed_dim)
self.multi_embedding = MultiHotEmbedding(multi_hot_size, embed_dim)
self.embed_output_dim = (len(field_dims) + 1) * embed_dim
self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims, dropout)
self.to(device)
def forward(self, x, genres):
"""
:param x: Long tensor of size ``(batch_size, num_fields)``
"""
embed_x = self.embedding(x) # [batch_size, num_fields, emb_size] <-[batch_size, num_fields]
embed_genres = self.multi_embedding(genres)
embed_x = torch.concat([embed_x, embed_genres], dim=1)
x = self.linear(x) + self.fm(embed_x) + self.mlp(embed_x.view(-1, self.embed_output_dim))
return torch.sigmoid(x.squeeze(1))
|
python
|
# Copyright 2013 - Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Solum base exception handling.
Includes decorator for re-raising Solum-type exceptions.
"""
import collections
import functools
import sys
import uuid
from keystoneclient import exceptions as keystone_exceptions
from oslo_config import cfg
import pecan
import six
import wsme
from solum.common import safe_utils
from solum.openstack.common import excutils
from solum.openstack.common.gettextutils import _
from solum.openstack.common import log as logging
LOG = logging.getLogger(__name__)
exc_log_opts = [
cfg.BoolOpt('fatal_exception_format_errors',
default=False,
help='make exception message format errors fatal')
]
def list_opts():
yield None, exc_log_opts
CONF = cfg.CONF
CONF.register_opts(exc_log_opts)
def wrap_exception(notifier=None, publisher_id=None, event_type=None,
level=None):
"""This decorator wraps a method to catch any exceptions.
It logs the exception as well as optionally sending
it to the notification system.
"""
def inner(f):
def wrapped(self, context, *args, **kw):
# Don't store self or context in the payload, it now seems to
# contain confidential information.
try:
return f(self, context, *args, **kw)
except Exception as e:
with excutils.save_and_reraise_exception():
if notifier:
call_dict = safe_utils.getcallargs(f, *args, **kw)
payload = dict(exception=e,
private=dict(args=call_dict)
)
# Use a temp vars so we don't shadow
# our outer definitions.
temp_level = level
if not temp_level:
temp_level = notifier.ERROR
temp_type = event_type
if not temp_type:
# If f has multiple decorators, they must use
# functools.wraps to ensure the name is
# propagated.
temp_type = f.__name__
notifier.notify(context, publisher_id, temp_type,
temp_level, payload)
return functools.wraps(f)(wrapped)
return inner
OBFUSCATED_MSG = _('Your request could not be handled '
'because of a problem in the server. '
'Error Correlation id is: %s')
def wrap_controller_exception(func, func_server_error, func_client_error):
"""This decorator wraps controllers methods to handle exceptions:
- if an unhandled Exception or a SolumException with an error code >=500
is catched, raise a http 5xx ClientSideError and correlates it with a log
message
- if a SolumException is catched and its error code is <500, raise a http
4xx and logs the excp in debug mode
"""
@functools.wraps(func)
def wrapped(*args, **kw):
try:
return func(*args, **kw)
except Exception as excp:
LOG.error(excp)
http_error_code = 500
if hasattr(excp, 'code'):
http_error_code = excp.code
if http_error_code >= 500:
# log the error message with its associated
# correlation id
log_correlation_id = str(uuid.uuid4())
LOG.error("%s:%s", log_correlation_id, str(excp))
# raise a client error with an obfuscated message
func_server_error(log_correlation_id, http_error_code)
else:
# raise a client error the original message
func_client_error(excp, http_error_code)
return wrapped
def wrap_wsme_controller_exception(func):
"""This decorator wraps wsme controllers to handle exceptions."""
def _func_server_error(log_correlation_id, status_code):
raise wsme.exc.ClientSideError(
six.text_type(OBFUSCATED_MSG % log_correlation_id), status_code)
def _func_client_error(excp, status_code):
raise wsme.exc.ClientSideError(six.text_type(excp), status_code)
return wrap_controller_exception(func,
_func_server_error,
_func_client_error)
def wrap_pecan_controller_exception(func):
"""This decorator wraps pecan controllers to handle exceptions."""
def _func_server_error(log_correlation_id, status_code):
pecan.response.status = status_code
pecan.response.text = six.text_type(OBFUSCATED_MSG %
log_correlation_id)
# message body for errors is just a plain text message
# The following code is functionally equivalent to calling:
#
# pecan.override_template(None, "text/plain")
#
# We do it this way to work around a bug in our unit-test framework
# in which the mocked request object isn't properly mocked in the pecan
# core module ([email protected])
pecan.request.pecan['override_template'] = None
pecan.request.pecan['override_content_type'] = 'text/plain'
def _func_client_error(excp, status_code):
pecan.response.status = status_code
pecan.response.text = six.text_type(excp)
# The following code is functionally equivalent to calling:
#
# pecan.override_template(None, "text/plain")
#
# We do it this way to work around a bug in our unit-test framework
# in which the mocked request object isn't properly mocked in the pecan
# core module ([email protected])
pecan.request.pecan['override_template'] = None
pecan.request.pecan['override_content_type'] = 'text/plain'
return wrap_controller_exception(func,
_func_server_error,
_func_client_error)
def wrap_wsme_pecan_controller_exception(func):
"""Error handling for controllers decorated with wsmeext.pecan.wsexpose:
Controllers wrapped with wsme_pecan.wsexpose don't throw
exceptions but handle them internally. We need to intercept
the response and mask potentially sensitive information.
"""
@functools.wraps(func)
def wrapped(*args, **kw):
ret = func(*args, **kw)
ismapping = isinstance(ret, collections.Mapping)
if (pecan.response.status_code >= 500 and ismapping):
log_correlation_id = str(uuid.uuid4())
LOG.error("%s:%s", log_correlation_id, ret.get("faultstring",
"Unknown Error"))
ret['faultstring'] = six.text_type(OBFUSCATED_MSG %
log_correlation_id)
return ret
return wrapped
def wrap_keystone_exception(func):
"""Wrap keystone exceptions and throw Solum specific exceptions."""
@functools.wraps(func)
def wrapped(*args, **kw):
try:
return func(*args, **kw)
except keystone_exceptions.AuthorizationFailure:
raise AuthorizationFailure(
client=func.__name__, message="reason: %s" % sys.exc_info()[1])
except keystone_exceptions.ClientException:
raise AuthorizationFailure(
client=func.__name__,
message="unexpected keystone client error occurred: %s"
% sys.exc_info()[1])
return wrapped
@six.python_2_unicode_compatible
class SolumException(Exception):
"""Base Solum Exception
To correctly use this class, inherit from it and define
a 'msg_fmt' property. That msg_fmt will get printf'd
with the keyword arguments provided to the constructor.
"""
message = _("An unknown exception occurred.")
code = 500
def __init__(self, **kwargs):
self.kwargs = kwargs
if CONF.fatal_exception_format_errors:
assert isinstance(self.msg_fmt, six.text_type)
try:
self.message = self.msg_fmt % kwargs
except KeyError:
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_('Exception in string format operation'),
extra=dict(
private=dict(
msg=self.msg_fmt,
args=kwargs
)
)
)
if CONF.fatal_exception_format_errors:
raise
def __str__(self):
return self.message
class ResourceLimitExceeded(SolumException):
msg_fmt = _("Resource limit exceeded. Reason: %(reason)s")
class BadRequest(SolumException):
msg_fmt = _("The request is malformed. Reason: %(reason)s")
code = 400
class ObjectNotFound(SolumException):
msg_fmt = _("The %(name)s %(id)s could not be found.")
class ObjectNotUnique(SolumException):
msg_fmt = _("The %(name)s already exists.")
class RequestForbidden(SolumException):
msg_fmt = _("The request is forbidden. Reason: %(reason)s")
code = 403
class ResourceNotFound(ObjectNotFound):
msg_fmt = _("The %(name)s resource %(id)s could not be found.")
code = 404
class ResourceExists(ObjectNotUnique):
msg_fmt = _("The %(name)s resource already exists.")
code = 409
class ResourceStillReferenced(SolumException):
msg_fmt = _("The %(name)s resource cannot be deleted because one or more"
" resources reference it.")
code = 409
class UnsupportedMediaType(SolumException):
msg_fmt = _("\'%(name)s\' is not a supported media type for the %(method)s"
" method of this resource")
code = 415
class Unprocessable(SolumException):
msg_fmt = _("Server is incapable of processing the specified request.")
code = 422
class PlanStillReferenced(ResourceStillReferenced):
msg_fmt = _("Plan %(name)s cannot be deleted because one or more"
" Assemblies reference it.")
class LPStillReferenced(ResourceStillReferenced):
msg_fmt = _("Languagepack %(name)s cannot be deleted because one or more"
" applications reference it.")
class NotImplemented(SolumException):
msg_fmt = _("The requested operation is not implemented.")
code = 501
class AuthorizationFailure(SolumException):
msg_fmt = _("%(client)s connection failed. %(message)s")
class InvalidObjectSizeError(Exception):
msg_fmt = _("Invalid object size.")
class MaxRetryReached(Exception):
msg_fmt = _("Maximum retries has been reached.")
|
python
|
import fileReader
import inputHandler
import fileHandler
import os
inputStuff = inputHandler.inputHandler()
fileStuff = fileHandler.FileHandler()
def getMode():
mode = inputStuff.determineAutoOrManual()
if mode == 'man':
getFileInfo()
loadAudioFile()
elif mode =='auto':
fileProcesser = fileReader.FileReader()
fileProcesser.processFile(inputStuff.getFileName())
def getFileInfo():
filename = inputStuff.getFileName()
if os.path.isfile(filename):
fileStuff.setFileName(filename)
else:
print('file not found, please try again.')
getFileInfo()
def loadAudioFile():
try:
fileStuff.loadFile()
except Exception:
print('error: not an audio file')
getFileInfo()
fileStuff.splitFile(inputStuff.getSplitTimes())
getMode()
|
python
|
from pybullet_utils import bullet_client
import math
class QuadrupedPoseInterpolator(object):
def __init__(self):
pass
def ComputeLinVel(self,posStart, posEnd, deltaTime):
vel = [(posEnd[0]-posStart[0])/deltaTime,(posEnd[1]-posStart[1])/deltaTime,(posEnd[2]-posStart[2])/deltaTime]
return vel
def ComputeAngVel(self,ornStart, ornEnd, deltaTime, bullet_client):
dorn = bullet_client.getDifferenceQuaternion(ornStart,ornEnd)
axis,angle = bullet_client.getAxisAngleFromQuaternion(dorn)
angVel = [(axis[0]*angle)/deltaTime,(axis[1]*angle)/deltaTime,(axis[2]*angle)/deltaTime]
return angVel
def ComputeAngVelRel(self,ornStart, ornEnd, deltaTime, bullet_client):
ornStartConjugate = [-ornStart[0],-ornStart[1],-ornStart[2],ornStart[3]]
pos_diff, q_diff =bullet_client.multiplyTransforms([0,0,0], ornStartConjugate, [0,0,0], ornEnd)
axis,angle = bullet_client.getAxisAngleFromQuaternion(q_diff)
angVel = [(axis[0]*angle)/deltaTime,(axis[1]*angle)/deltaTime,(axis[2]*angle)/deltaTime]
return angVel
def Slerp(self, frameFraction, frameData, frameDataNext,bullet_client ):
keyFrameDuration = frameData[0]
basePos1Start = [frameData[1],frameData[2],frameData[3]]
basePos1End = [frameDataNext[1],frameDataNext[2],frameDataNext[3]]
self._basePos = [basePos1Start[0]+frameFraction*(basePos1End[0]-basePos1Start[0]),
basePos1Start[1]+frameFraction*(basePos1End[1]-basePos1Start[1]),
basePos1Start[2]+frameFraction*(basePos1End[2]-basePos1Start[2])]
self._baseLinVel = self.ComputeLinVel(basePos1Start,basePos1End, keyFrameDuration)
baseOrn1Start = [frameData[5],frameData[6], frameData[7],frameData[4]]
baseOrn1Next = [frameDataNext[5],frameDataNext[6], frameDataNext[7],frameDataNext[4]]
self._baseOrn = bullet_client.getQuaternionSlerp(baseOrn1Start,baseOrn1Next,frameFraction)
self._baseAngVel = self.ComputeAngVel(baseOrn1Start,baseOrn1Next, keyFrameDuration, bullet_client)
jointPositions=[]
jointVelocities=[]
for j in range (12):
index=j+8
jointPosStart=frameData[index]
jointPosEnd=frameDataNext[index]
jointPos=jointPosStart+frameFraction*(jointPosEnd-jointPosStart)
jointVel=(jointPosEnd-jointPosStart)/keyFrameDuration
jointPositions.append(jointPos)
jointVelocities.append(jointVel)
self._jointPositions = jointPositions
self._jointVelocities = jointVelocities
return jointPositions,jointVelocities
|
python
|
from typing import List
from extraction.event_schema import EventSchema
from extraction.predict_parser.predict_parser import Metric
from extraction.predict_parser.tree_predict_parser import TreePredictParser
decoding_format_dict = {
'tree': TreePredictParser,
'treespan': TreePredictParser,
}
def get_predict_parser(format_name):
return decoding_format_dict[format_name]
def eval_pred(predict_parser, gold_list, pred_list, text_list=None, raw_list=None):
well_formed_list, counter = predict_parser.decode(
gold_list, pred_list, text_list, raw_list)
relation_metric = Metric()
for instance in well_formed_list:
relation_metric.count_instance(instance['gold_relation'],
instance['pred_relation'],
verbose=False)
role_result = relation_metric.compute_f1(prefix='relation-')
result = dict()
result.update(role_result)
result.update(counter)
return result
def eval_pred_with_decoding(gold_list, pred_list, text_list=None, raw_list=None):
relation_metric = Metric()
relation_metric.count_instance(gold_list, pred_list,verbose= False)
role_result = relation_metric.compute_f1(prefix='relation-')
result = dict()
result.update(role_result)
return result
def get_extract_metrics(pred_lns: List[str], tgt_lns: List[str], label_constraint: EventSchema, decoding_format='tree'):
# predict_parser is the TreePredictParser, because decoding_format is tree
predict_parser = get_predict_parser(format_name=decoding_format)(label_constraint=label_constraint)
return eval_pred_with_decoding(
gold_list=tgt_lns,
pred_list=pred_lns
)
|
python
|
import os
filename = os.path.dirname(__file__) + "\\input"
with open(filename) as file:
x = []
start = 0
for line in file:
text = list(line.rstrip())
if start == 0:
x = [0] * len(text)
i = 0
for t in text:
if t == '1':
x[i] += 1
i += 1
start += 1
gamma_nums = []
epsilon_nums = []
for y in x:
if y > start / 2:
gamma_nums.append(1)
epsilon_nums.append(0)
else:
gamma_nums.append(0)
epsilon_nums.append(1)
gamma = [str(v) for v in gamma_nums]
epsilon = [str(r) for r in epsilon_nums]
g = int(''.join(gamma), 2)
e = int(''.join(epsilon), 2)
print(g * e)
|
python
|
def solve(input):
ans = 0
for g in input.split("\n\n"):
b = 0
for c in g:
if c.isalpha():
b |= 1 << (ord(c) - ord("a"))
ans += bin(b).count("1")
return ans
|
python
|
# https://www.hackerrank.com/challenges/three-month-preparation-kit-jack-goes-to-rapture/problem
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'getCost' function below.
#
# The function accepts WEIGHTED_INTEGER_GRAPH g as parameter.
#
#
# For the weighted graph, <name>:
#
# 1. The number of nodes is <name>_nodes.
# 2. The number of edges is <name>_edges.
# 3. An edge exists between <name>_from[i] and <name>_to[i]. The weight of the edge is <name>_weight[i].
#
#
def getCost(n, g_from, g_to, g_weight):
parent = [-1]*n
def find(n):
if parent[n]<0: return n
p = find(parent[n])
parent[n] = p
return p
edges = []
for z in range(len(g_from)):
a, b, c = g_from[z], g_to[z], g_weight[z]
a,b = a-1,b-1
edges.append((c, a, b))
edges.sort()
if(find(0)==find(n-1)): return 0
else:
for c,a,b in edges:
a = find(a)
b = find(b)
if(a!=b):
if(parent[a]==parent[b]): parent[b] -= 1
if(parent[a]>parent[b]): parent[a] = b
if(parent[a]<parent[b]): parent[b] = a
if(find(0)==find(n-1)): return c
else: return 'NO PATH EXISTS'
if __name__ == '__main__':
g_nodes, g_edges = map(int, input().rstrip().split())
g_from = [0] * g_edges
g_to = [0] * g_edges
g_weight = [0] * g_edges
for i in range(g_edges):
g_from[i], g_to[i], g_weight[i] = map(int, input().rstrip().split())
print(getCost(g_nodes, g_from, g_to, g_weight))
|
python
|
from typing import List, Callable, Optional, Dict, Set
from tqdm import tqdm
from src.data.objects.frame import Frame
from src.data.objects.stack import Stack
from src.data.readers.annotation_reader import AnnotationLoader
class LineModifiedClassifier:
def __init__(self, user_ids: Set[int], annotation_loader: AnnotationLoader,
weight_fn: Callable[[int, int], float], top_k_frames: Optional[int] = None):
self._user_ids = user_ids
self._annotation_loader = annotation_loader
self._weight_fn = weight_fn
self._top_k_frames = top_k_frames
def _frame_scores(self, frame: Frame, stack_ts: int) -> Dict[int, float]:
scores = dict.fromkeys(self._user_ids, 0)
frame = frame.raw_frame
annotation = self._annotation_loader(frame.commit_hash, frame.file_name)
if annotation and frame.line_num and frame.line_num - 1 < len(annotation):
line_author = annotation.author[frame.line_num - 1]
line_ts = annotation.ts[frame.line_num - 1]
if line_author in self._user_ids and line_ts <= stack_ts:
scores[line_author] += self._weight_fn(stack_ts, line_ts)
return scores
def _stack_scores(self, stack: Stack) -> Dict[int, float]:
user_scores = dict.fromkeys(self._user_ids, 0)
frames = stack.frames[:self._top_k_frames]
for frame in frames:
frame_scores = self._frame_scores(frame, stack.ts)
for user_id in self._user_ids:
user_scores[user_id] += frame_scores[user_id]
return user_scores
def predict(self, stacks: List[Stack]) -> List[Dict[int, float]]:
return [self._stack_scores(stack) for stack in tqdm(stacks)]
|
python
|
from scrapy import cmdline
name = 'douban_movie_top250'
cmd = 'scrapy crawl {}'.format(name)
cmdline.execute(cmd.split())
|
python
|
#you += hash(pubkey || index) to both the private scalar and public point
#<tacotime> [02:35:38] so to get priv_i and pub_i
#<tacotime> [02:36:06] priv_i = (priv + hash) mod N
#<tacotime> [02:37:17] pub_i = (pub + scalarbasemult(hash))
import MiniNero
import PaperWallet
sk, vk, pk, pvk, addr, wl, cks = PaperWallet.keysBoth()
print("making keychain")
for i in range(1, 600):
index = MiniNero.intToHex(i)
has = MiniNero.cn_fast_hash(pk + index)
sk1 = MiniNero.sc_add_keys(sk, has)
pk1 = MiniNero.addKeys(pk, MiniNero.scalarmultBase(has))
pk1_check = MiniNero.publicFromSecret(sk1)
print("Check", pk1== pk1_check)
print(sk1)
#print("i, sk, pk", i, sk1, pk1)
|
python
|
class OperationFailed(Exception):
pass
class ValidationFailed(Exception):
pass
|
python
|
"""AyudaEnPython: https://www.facebook.com/groups/ayudapython
"""
def suma(a: float, b: float) -> float:
return a + b
def resta(a: float, b: float) -> float:
return a - b
def multiplicacion(a: float, b: float) -> float:
return a * b
def division(a: float, b: float) -> float:
if b <= 0:
raise ZeroDivisionError
return a / b
|
python
|
class ArtistCollection():
"""
Matplotlib collections can't handle Text.
This is a barebones collection for text objects
that supports removing and making (in)visible
"""
def __init__(self, artistlist):
"""
Pass in a list of matplotlib.text.Text objects
(or possibly any matplotlib Artist will work)
"""
self.artistlist = artistlist
def remove(self):
for T in self.artistlist:
T.remove()
def add_to_axes(self, ax):
for T in self.artistlist:
ax.add_artist(T)
def get_visible(self):
visible = True
for T in self.artistlist:
if not T.get_visible():
visible = False
return visible
def set_visible(self, visible=True):
for T in self.artistlist:
T.set_visible(visible)
|
python
|
from qemuvolume import QEMUVolume
from ..tools import log_check_call
class VirtualHardDisk(QEMUVolume):
extension = 'vhd'
qemu_format = 'vpc'
ovf_uri = 'http://go.microsoft.com/fwlink/?LinkId=137171'
# Azure requires the image size to be a multiple of 1 MiB.
# VHDs are dynamic by default, so we add the option
# to make the image size fixed (subformat=fixed)
def _before_create(self, e):
self.image_path = e.image_path
vol_size = str(self.size.bytes.get_qty_in('MiB')) + 'M'
log_check_call(['qemu-img', 'create', '-o', 'subformat=fixed', '-f', self.qemu_format, self.image_path, vol_size])
def get_uuid(self):
if not hasattr(self, 'uuid'):
import uuid
self.uuid = uuid.uuid4()
return self.uuid
|
python
|
import pytest
import numpy
import os
import spacy
from spacy.matcher import Matcher
from spacy.attrs import ORTH, LOWER, ENT_IOB, ENT_TYPE
from spacy.attrs import ORTH, TAG, LOWER, IS_ALPHA, FLAG63
from spacy.symbols import DATE, LOC
def test_overlap_issue118(EN):
'''Test a bug that arose from having overlapping matches'''
doc = EN.tokenizer(u'how many points did lebron james score against the boston celtics last night')
ORG = doc.vocab.strings['ORG']
matcher = Matcher(EN.vocab,
{'BostonCeltics':
('ORG', {},
[
[{LOWER: 'celtics'}],
[{LOWER: 'boston'}, {LOWER: 'celtics'}],
]
)
}
)
assert len(list(doc.ents)) == 0
matches = [(ent_type, start, end) for ent_id, ent_type, start, end in matcher(doc)]
assert matches == [(ORG, 9, 11), (ORG, 10, 11)]
doc.ents = matches[:1]
ents = list(doc.ents)
assert len(ents) == 1
assert ents[0].label == ORG
assert ents[0].start == 9
assert ents[0].end == 11
def test_overlap_issue242():
'''Test overlapping multi-word phrases.'''
patterns = [
[{LOWER: 'food'}, {LOWER: 'safety'}],
[{LOWER: 'safety'}, {LOWER: 'standards'}],
]
if os.environ.get('SPACY_DATA'):
data_dir = os.environ.get('SPACY_DATA')
else:
data_dir = None
nlp = spacy.en.English(path=data_dir, tagger=False, parser=False, entity=False)
nlp.matcher = Matcher(nlp.vocab)
nlp.matcher.add('FOOD', 'FOOD', {}, patterns)
doc = nlp.tokenizer(u'There are different food safety standards in different countries.')
matches = [(ent_type, start, end) for ent_id, ent_type, start, end in nlp.matcher(doc)]
doc.ents += tuple(matches)
food_safety, safety_standards = matches
assert food_safety[1] == 3
assert food_safety[2] == 5
assert safety_standards[1] == 4
assert safety_standards[2] == 6
def test_overlap_reorder(EN):
'''Test order dependence'''
doc = EN.tokenizer(u'how many points did lebron james score against the boston celtics last night')
ORG = doc.vocab.strings['ORG']
matcher = Matcher(EN.vocab,
{'BostonCeltics':
('ORG', {},
[
[{LOWER: 'boston'}, {LOWER: 'celtics'}],
[{LOWER: 'celtics'}],
]
)
}
)
assert len(list(doc.ents)) == 0
matches = [(ent_type, start, end) for ent_id, ent_type, start, end in matcher(doc)]
assert matches == [(ORG, 9, 11), (ORG, 10, 11)]
doc.ents = matches[:1]
ents = list(doc.ents)
assert len(ents) == 1
assert ents[0].label == ORG
assert ents[0].start == 9
assert ents[0].end == 11
def test_overlap_prefix(EN):
'''Test order dependence'''
doc = EN.tokenizer(u'how many points did lebron james score against the boston celtics last night')
ORG = doc.vocab.strings['ORG']
matcher = Matcher(EN.vocab,
{'BostonCeltics':
('ORG', {},
[
[{LOWER: 'boston'}],
[{LOWER: 'boston'}, {LOWER: 'celtics'}],
]
)
}
)
assert len(list(doc.ents)) == 0
matches = [(ent_type, start, end) for ent_id, ent_type, start, end in matcher(doc)]
doc.ents = matches[1:]
assert matches == [(ORG, 9, 10), (ORG, 9, 11)]
ents = list(doc.ents)
assert len(ents) == 1
assert ents[0].label == ORG
assert ents[0].start == 9
assert ents[0].end == 11
def test_overlap_prefix_reorder(EN):
'''Test order dependence'''
doc = EN.tokenizer(u'how many points did lebron james score against the boston celtics last night')
ORG = doc.vocab.strings['ORG']
matcher = Matcher(EN.vocab,
{'BostonCeltics':
('ORG', {},
[
[{LOWER: 'boston'}, {LOWER: 'celtics'}],
[{LOWER: 'boston'}],
]
)
}
)
assert len(list(doc.ents)) == 0
matches = [(ent_type, start, end) for ent_id, ent_type, start, end in matcher(doc)]
doc.ents += tuple(matches)[1:]
assert matches == [(ORG, 9, 10), (ORG, 9, 11)]
ents = doc.ents
assert len(ents) == 1
assert ents[0].label == ORG
assert ents[0].start == 9
assert ents[0].end == 11
# @pytest.mark.models
# def test_ner_interaction(EN):
# EN.matcher.add('LAX_Airport', 'AIRPORT', {}, [[{ORTH: 'LAX'}]])
# EN.matcher.add('SFO_Airport', 'AIRPORT', {}, [[{ORTH: 'SFO'}]])
# doc = EN(u'get me a flight from SFO to LAX leaving 20 December and arriving on January 5th')
# ents = [(ent.label_, ent.text) for ent in doc.ents]
# assert ents[0] == ('AIRPORT', 'SFO')
# assert ents[1] == ('AIRPORT', 'LAX')
# assert ents[2] == ('DATE', '20 December')
# assert ents[3] == ('DATE', 'January 5th')
# @pytest.mark.models
# def test_ner_interaction(EN):
# # ensure that matcher doesn't overwrite annotations set by the NER model
# doc = EN.tokenizer.tokens_from_list(u'get me a flight from SFO to LAX leaving 20 December and arriving on January 5th'.split(' '))
# EN.tagger(doc)
# columns = [ENT_IOB, ENT_TYPE]
# values = numpy.ndarray(shape=(len(doc),len(columns)), dtype='int32')
# # IOB values are 0=missing, 1=I, 2=O, 3=B
# iobs = [2,2,2,2,2,3,2,3,2,3,1,2,2,2,3,1]
# types = [0,0,0,0,0,LOC,0,LOC,0,DATE,DATE,0,0,0,DATE,DATE]
# values[:] = zip(iobs,types)
# doc.from_array(columns,values)
# assert doc[5].ent_type_ == 'LOC'
# assert doc[7].ent_type_ == 'LOC'
# assert doc[9].ent_type_ == 'DATE'
# assert doc[10].ent_type_ == 'DATE'
# assert doc[14].ent_type_ == 'DATE'
# assert doc[15].ent_type_ == 'DATE'
# EN.matcher.add('LAX_Airport', 'AIRPORT', {}, [[{ORTH: 'LAX'}]])
# EN.matcher.add('SFO_Airport', 'AIRPORT', {}, [[{ORTH: 'SFO'}]])
# EN.matcher(doc)
# assert doc[5].ent_type_ != 'AIRPORT'
# assert doc[7].ent_type_ != 'AIRPORT'
# assert doc[5].ent_type_ == 'LOC'
# assert doc[7].ent_type_ == 'LOC'
# assert doc[9].ent_type_ == 'DATE'
# assert doc[10].ent_type_ == 'DATE'
# assert doc[14].ent_type_ == 'DATE'
# assert doc[15].ent_type_ == 'DATE'
|
python
|
# 3rd party imports
import stellar_base.utils
from stellar_base.exceptions import *
from stellar_base.keypair import Keypair
from stellar_base.address import Address
STELLAR_MEMO_TEXT_MAX_BYTES = 28
def is_address_valid(address):
"""
Checks if a given Stellar address is valid. It does not check if it exists on the Stellar
network, only if it is correctly formatted.
:param str address: address to be evaluated.
:return: Returns true if the given address is valid and false otherwise.
:rtype: bool
"""
if address is None:
return False
try:
stellar_base.utils.decode_check('account', address)
return True
except DecodeError:
return False
def is_seed_valid(key):
"""
Checks if a given Stellar seed is valid.
:param str key: Seed to be evaluated.
:return: Returns true if the seed is valid and false otherwise.
:rtype: bool
"""
if key is None:
return False
try:
stellar_base.utils.decode_check('seed', key)
return True
except DecodeError:
return False
def is_transaction_text_memo_valid(memo):
"""
Checks if a given Stellar transaction text memo is valid. To be valid the text memo
can only have, at most, 28 bytes.
:param str memo: Text memo to be evaluated.
:return: Returns true if the given text memo is valid and false otherwise.
:rtype: bool
"""
if memo is None:
return False
return False if len(memo) > STELLAR_MEMO_TEXT_MAX_BYTES else True
def is_seed_matching_address(seed, address):
"""
Checks if the specified seed address matches the specified address.
:param str seed: Seed to be evaluated.
:param str address: Address to be evaluated.
:return: Returns true if seed address matches the specified address, and false otherwise.
:rtype: bool
"""
if not is_seed_valid(seed) \
or not is_address_valid(address):
return False
keypair = Keypair.from_seed(seed=seed)
if keypair.address().decode() == address:
return True
return False
def is_account_existent(address):
"""
Checks if a given Stellar address exists in the network. It assumes that the address
parameter received is a valid address string.
:param str address: address to be evaluated.
:return: Returns true if the given address exists in the network and false otherwise.
:rtype: bool
"""
return True if get_address_details_from_network(address) is not None else False
def get_address_details_from_network(address):
"""
Queries the Stellar network regarding the details of the specified account address.
:param str address: address to be evaluated.
:return: In case of success returns a Stellar Address object with the updated address information, fetched from
the Stellar network. In case of failure returns None
:rtype: Address or None
"""
if not is_address_valid(address):
print('Trying to get information of an invalid address.')
return None
try:
address = Address(address=address)
address.get() # Get the latest information from Horizon
except AccountNotExistError:
print('The specified account does not exist.')
return None
except HorizonError:
print('A connection error occurred (Please check your Internet connection).')
return None
return address
|
python
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test graph fallback """
import pytest
import numpy as np
import mindspore.nn as nn
from mindspore import Tensor, ms_function, context
import mindspore.common.dtype as mstype
context.set_context(mode=context.GRAPH_MODE)
class ControlNet(nn.Cell):
def inner_function_1(self, a, b):
return a + b
def inner_function_2(self, a, b):
return a - b
def construct(self, x):
a = Tensor(np.array(4), mstype.int32)
b = Tensor(np.array(5), mstype.int32)
if a + b > x:
return self.inner_function_1(a, b)
return self.inner_function_2(a, b)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_fallback_control_sink_tensor():
"""
Feature: Fallback feature: support define Tensor in Class construct.
Description: Fallback feature: support define Tensor in Class construct.
Expectation: Fallback feature: support define Tensor in Class construct.
"""
x = Tensor(np.array(1), mstype.int32)
net = ControlNet()
output = net(x)
output_expect = Tensor(9, mstype.int32)
assert output == output_expect
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_tensor_list():
"""
Feature: Fallback feature
Description: support Basic method of Tensor list.
Expectation: No exception.
"""
@ms_function
def np_tensor_list():
a = Tensor(np.array(4), mstype.int32)
b = Tensor(np.array(5), mstype.int32)
c = Tensor(np.array(6), mstype.int32)
tensor_list = [a, b]
for tensor in tensor_list:
print(tensor)
tensor_list.append(tensor_list[-1] + c)
return tensor_list
tensor_list = np_tensor_list()
print("tensor_list:", tensor_list)
assert len(tensor_list) == 3
|
python
|
"""Converts ECMWF levels into heights"""
import numpy as np
def readLevels(file_name='supra/Supracenter/level_conversion_ECMWF_37.txt', header=2):
""" Gets the conversion of heights from a .txt file, for use with convLevels().
Arguments:
file_name: [string] name of conversion file, .txt
header: [int] number of headers in the .txt file to ignore
Returns:
data: [ndarray] contents of the level conversion file to convert with
"""
with open(file_name) as f:
# Skip the header
for i in range(header):
next(f)
data = np.array([0, 0, 0])
# Parse file contents
for line in f:
# Remove the newline char
line = line.replace('\n', '').replace('\r', '')
# Split the line by the delimiter
line = line.split()
# Strip whitespaces from individual entries in the line
for i, entry in enumerate(line):
line[i] = float(entry.strip())
# Add the contents of the line to the data list
data = np.vstack((data, line))
# First row was all zeroes
data = np.delete(data, 0, 0)
return data
def convLevels(typ=1):
""" HELPER FUCNTION: Converts levels from ECMWF data into geopotential or geometeric heights.
see https://www.ecmwf.int/en/forecasts/documentation-and-support/137-model-levels
The conversion is done with a .txt file with the contents of that table.
Arguments:
typ: [int] 0 - levels to geopotential heights, 1 - levels to geometric heights
Returns:
data: [list] list of converted heights
"""
data = readLevels()
if typ == 0:
# Geopotential Heights
return data[:, 1]
else:
# Geometric Heights
return data[:, 2]
|
python
|
#
# Copyright 2018 herd-mdl contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#!/usr/bin/env python
import json
import logging
import boto3
from botocore.exceptions import ClientError
from botocore.vendored import requests
SUCCESS = "SUCCESS"
FAILED = "FAILED"
logger = logging.getLogger()
handler = logging.StreamHandler()
formatter = logging.Formatter('[%(asctime)s %(levelname)-8s] %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
# Lambda function 'script' which creates/destroys an EC2 Keypair with a user-specified name. It also stores the
# private key material as a 'SecureString' parameter in SSM's parameter store. This is packaged to work with an AWS
# 'CustomResource'. Further reading here: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template
# -custom-resources-lambda.html
# Entry-point of script which is invoked by the Lambda function
def handler(event, context):
logger.info('Request: Event: \n {}'.format(event))
logger.info('Request: Context: \n {}'.format(context))
# Get the keypair name as defined in the Resource properties and convert to lowercase for consistency
keypair_name = construct_keypair_name(event)
keypair_ssm_key_name = str(event['ResourceProperties']['KeypairSsmKeyName'])
physical_resource_id = str(event['LogicalResourceId']) + '-' + keypair_name
# On stack-create, does the following things:
# 1. Checks if a keypair with the specified name already exists, if it does- skips to step #4.
# 2. Creates the keypair with the given name.
# 3. Stores the keypair material in SSM as an encrypted value.
# 4. Signals CloudFormation that the process is complete.
if event['RequestType'] == 'Create':
if not ssm_parameter_exists(keypair_ssm_key_name, event, context, physical_resource_id):
logger.info(
'Attempting to create new SSM parameter to store keypair name: \'{}\''.format(keypair_ssm_key_name))
description = 'keypair name'
put_parameter_in_ssm(keypair_ssm_key_name, description, keypair_name, 'String', event, context,
physical_resource_id)
else:
logger.warning('SSM parameter for key pair name already exists, will not create a new one.')
if not keypair_exists(keypair_name, event, context, physical_resource_id):
logger.info('Attempting to create a new keypair: \{}\''.format(keypair_name))
keypair_material = create_key_pair(keypair_name, event, context,
physical_resource_id)
description = 'private key material'
put_parameter_in_ssm(keypair_name, description, keypair_material, 'SecureString', event, context,
physical_resource_id)
response_data = construct_response_message(
'Created new keypair: \'{}\' and stored in parameter store.'.format(
keypair_name))
send(event, context, SUCCESS, response_data, physical_resource_id)
else:
response_data = construct_response_message(
'Keypair: \'{}\' already exists, nothing to do.'.format(keypair_name))
send(event, context, SUCCESS, response_data, physical_resource_id)
# On stack-delete, do the following things:
# 1. Checks if a keypair with the specified name already exists, if it does- deletes it.
# 2. Checks if an SSM parameter exists with the specified name, if it does- deletes it.
# 3. Signals CloudFormation that the process is complete.
elif event['RequestType'] == 'Delete':
message = ''
if keypair_exists(keypair_name, event, context, physical_resource_id):
logger.info('Attempting to delete the keypair')
delete_key_pair(keypair_name, event, context, physical_resource_id)
message += 'Deleted keypair: \'{}\''.format(keypair_name)
if ssm_parameter_exists(keypair_name, event, context, physical_resource_id):
delete_key_pair_parameter_key(keypair_name, event, context,
physical_resource_id)
message += '\nDeleted parameter with key: \'{}\' from SSM.'.format(
keypair_name)
response_data = construct_response_message(message)
else:
response_data = construct_response_message(
'Keypair: \'{}\' and parameter: \'{}\' do not exist. Nothing '
'to delete.'.format(keypair_name, keypair_name))
send(event, context, SUCCESS, response_data, physical_resource_id)
# On stack-update, does nothing and simply exits.
elif event['RequestType'] == 'Update':
logger.info('Nothing to update')
response_data = construct_response_message('Nothing to update')
send(event, context, SUCCESS, response_data, physical_resource_id)
def construct_keypair_name(event):
delimiter = '_'
app_prefix = 'app'
instance_name = str(event['ResourceProperties']['MDLInstanceName'])
environment = str(event['ResourceProperties']['Environment'])
# lower-case the keypair name for consistency
return delimiter.join([app_prefix, instance_name, environment]).lower()
# Function to check if a keypair exists with the specified name. Returns a Boolean.
def keypair_exists(keypair_name, event, context, physical_resource_id):
logger.info(
'Checking if a keypair already exists with the specified name: \'{}\''.format(
keypair_name))
try:
ec2 = boto3.client('ec2')
response = ec2.describe_key_pairs(
KeyNames=[
keypair_name
]
)
if response['ResponseMetadata']['HTTPStatusCode'] == 200 and len(response['KeyPairs']) == 1:
logger.warning("KeyPair: \'{}\' found.".format(keypair_name))
return True
except ClientError as e:
if e.response['Error']['Code'] == 'InvalidKeyPair.NotFound':
logger.info('KeyPair: \'{}\' not found.'.format(keypair_name))
return False
else:
logger.error('Unexpected error: {}'.format(e))
response_data = construct_response_message(
'Unexpected error while trying to \'describe\' the Keypair: \'{}\'. Exception: {}'
.format(keypair_name, e))
send(event, context, FAILED, response_data, physical_resource_id)
return False
# Function to check if a key-value pair exists in SSM with the specified name. Returns a Boolean.
def ssm_parameter_exists(key_name, event, context, physical_resource_id):
logger.info(
'Checking if a parameter exists in SSM with the specified name: \'{}\''.format(
key_name))
try:
ssm = boto3.client('ssm')
response = ssm.get_parameter(
Name=key_name
)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
logger.info(
'Found parameter with key name: \'{}\' in SSM.'.format(key_name))
return True
except ClientError as e:
if e.response['ResponseMetadata']['HTTPStatusCode'] == 400:
logger.info(
'Parameter with key: \'{}\' not found in SSM.'.format(key_name))
return False
else:
logger.error('Unexpected error: {}'.format(e))
response_data = construct_response_message(
'Unexpected error while trying to get parameter: \'{}\'. Exception: {}'.format(
key_name, e))
send(event, context, FAILED, response_data, physical_resource_id)
return False
# Creates an EC2 keypair with the specified name
def create_key_pair(keypair_name, event, context, physical_resource_id):
try:
ec2 = boto3.resource('ec2')
logging.info(
'Attempting to create a keypair with name: {}'.format(keypair_name))
response = ec2.create_key_pair(
KeyName=keypair_name,
DryRun=False
)
return response.key_material
except ClientError as e:
logger.error(
'Could not create keypair with name: \'{}\'. Exception: {}'.format(
keypair_name, e))
response_data = construct_response_message(
'Unexpected error while trying to create keypair with given name: \'{}\'. Exception: {}'
.format(keypair_name, e))
send(event, context, FAILED, response_data, physical_resource_id)
# Stores a specified key-material with a given name in SSM.
def put_parameter_in_ssm(key_name, description, material, value_type, event, context, physical_resource_id):
logger.info('Attempting to put parameter in SSM with name: \'{}\'.'.format(
key_name))
try:
ssm = boto3.client('ssm')
response = ssm.put_parameter(
Name=key_name,
Description=description,
Value=material,
Type=value_type,
Overwrite=True
)
return response
except ClientError as e:
logger.error(
'Could not store key material in SSM with key: \'{}\'. Exception: {}'.format(
key_name, e))
response_data = construct_response_message(
'Unexpected error while trying to \'pur\' parameter in SSM with given name: \'{}\'. Exception: {}'
.format(key_name, e))
send(event, context, FAILED, response_data, physical_resource_id)
# Deletes a parameter from SSM of a given name
def delete_key_pair_parameter_key(keypair_name, event, context,
physical_resource_id):
logger.info(
'Attempting to delete the key with name: \'{}\''.format(keypair_name))
try:
ssm = boto3.client('ssm')
ssm.delete_parameter(
Name=keypair_name
)
return True
except ClientError as e:
logger.error('Could not delete key: \'{}\' from SSM. Exception: {}'.format(
keypair_name, e))
response_data = construct_response_message(
'Unexpected error while trying to \'delete\' parameter from SSM with given name: \'{}\'. Exception: {}'
.format(keypair_name, e))
send(event, context, FAILED, response_data, physical_resource_id)
return False
# Deletes a keypair of a given name
def delete_key_pair(keypair_name, event, context, physical_resource_id):
logger.info(
'Attempting to delete the keypair with name: \'{}\''.format(keypair_name))
try:
ec2 = boto3.client('ec2')
ec2.delete_key_pair(
KeyName=keypair_name
)
return True
except ClientError as e:
logger.error(
'Could not delete keypair with name: \'{}\'. Exception: {}'.format(
keypair_name, e))
response_data = construct_response_message(
'Unexpected error while trying to \'delete\' keypair with given name: \'{}\'. Exception: {}'
.format(keypair_name, e))
send(event, context, FAILED, response_data, physical_resource_id)
return False
# Function to construct a formatted response message to send to CloudFormation while signaling it
def construct_response_message(message):
return {'Message': message}
# Function to signal CloudFormation.
def send(event, context, response_status, response_data, physical_resource_id):
response_url = event['ResponseURL']
logger.debug('ResponseURL: {}'.format(response_url))
responseBody = {'Status': response_status,
'Reason': 'See the details in CloudWatch Log Stream: ' + context.log_stream_name,
'PhysicalResourceId': physical_resource_id or context.log_stream_name,
'StackId': event['StackId'],
'RequestId': event['RequestId'],
'LogicalResourceId': event['LogicalResourceId'],
'NoEcho': 'false',
'Data': response_data}
json_response_body = json.dumps(responseBody)
logger.debug("Response body: {}".format(json_response_body))
headers = {
'content-type': '',
'content-length': str(len(json_response_body))
}
try:
response = requests.put(response_url,
data=json_response_body,
headers=headers)
logger.info("Status code: {}".format(response.reason))
except Exception as e:
logger.error("Send failed: {}".format(str(e)))
|
python
|
import torch.optim
import torch.nn as nn
class AgentControl:
def __init__(self, hyperparameters):
self.gamma = hyperparameters['gamma']
self.device = 'cpu'# 'cuda' if torch.cuda.is_available() else 'cpu'
self.loss = nn.MSELoss()
#Return accumulated discounted estimated reward from memory
def get_rewards(self, old_rewards, new_states, critic_nn):
# Variable i represents number of rows in memory starting from 0 (number i is basically n-step)
i = len(old_rewards) - 1
# Calculate Critic value of new state of last step which we will add to accumulated rewards
v_new = critic_nn(torch.tensor(new_states[i], dtype=torch.float64).to(self.device)).detach()
rewards = []
# We take just a value of Critic output which will act as base when we add discounted rewards backwards
temp = v_new.item()
while i > -1:
# For rewards we do backwards discounted sum
rewards.append(old_rewards[i] + self.gamma * temp)
temp = old_rewards[i] + self.gamma * temp
i -= 1
return rewards
# Return states and actions in arrays sorted backward. It needs to be backward because rewards have to be calculated from last step.
# Since we need rewards (target) to match its current state we need to sort states backwards as well.
def get_states_actions_entropies(self, st, ac, en):
# Variable i represents number of rows in memory starting from 0 (number is basically n-step)
i = len(st) - 1
states = []
actions = []
entropies = []
while i > -1:
# For states and actions we create simple lists which we need for critic/actor paralel input
states.append(st[i])
actions.append(ac[i])
entropies.append(en[i])
i -= 1
return states, actions, entropies
# Update Critic NN parameters based on estimated target (rewards) and current value (v_curr)
def update_critic(self, rewards, states, entropies, critic_nn, critic_optim):
rewards = torch.tensor(rewards, dtype=torch.float64).to(self.device)
states = torch.tensor(states, dtype=torch.float64).to(self.device)
# NN output needs to be squeeze(-1) to lower dimension from matrix to vector of outputs
v_curr = critic_nn(states).squeeze(-1)
# Calculate MSE loss between target (rewards) and NN output (v_curr)
loss = self.loss(rewards, v_curr)
# Add entropy, if flag is False it will add 0
loss += torch.mean(torch.tensor(entropies, dtype=torch.float64).to(self.device).detach())
# We need to set the gradients to zero before starting to do backpropragation because PyTorch accumulates the gradients on subsequent backward passes
critic_optim.zero_grad()
# Calculate loss derivative
loss.backward()
# Update current parameters based on calculated derivatives wtih Adam optimizer
critic_optim.step()
return loss.item()
# Estimate advantage as difference between estimated return and actual value
def estimate_advantage(self, rewards, states, critic_nn):
rewards = torch.tensor(rewards, dtype=torch.float64).to(self.device)
states = torch.tensor(states, dtype=torch.float64).to(self.device)
v_curr = critic_nn(states).squeeze(-1)
# We estimate advantage as how much Critic NN is right or wrong
return (rewards - v_curr).detach()
# Update Actor NN parameters based on gradient log(action probability) * action probability
def update_actor(self, states, actions, entropies, advantage, actor_nn, actor_optim):
states = torch.tensor(states, dtype=torch.float64).to(self.device)
action_prob = actor_nn(states)
# action_prob is n_step x 2 matrix. We will transfrorm it to n_step x 1 by selecting only probabilities of actions we took
action_prob = action_prob[range(action_prob.shape[0]), actions]
# Loss is calculated as log(x) * x for each step. We calculate mean to get single value loss and add minus because torch.log will add additional minus.
loss = -torch.mean(torch.log(action_prob) * advantage)
# Add entropy, if flag is False it will add 0
loss += torch.mean(torch.tensor(entropies, dtype=torch.float64).to(self.device).detach())
# We need to set the gradients to zero before starting to do backpropragation because PyTorch accumulates the gradients on subsequent backward passes
actor_optim.zero_grad()
# Calculate loss derivative
loss.backward()
# Update current parameters based on calculated derivatives wtih Adam optimizer
actor_optim.step()
# We need to reset entropy since we have done one n-step iteration.
return loss.item()
|
python
|
"""Support for TPLink HS100/HS110/HS200 smart switch."""
import logging
import time
from homeassistant.components.switch import (
ATTR_CURRENT_POWER_W, ATTR_TODAY_ENERGY_KWH, SwitchDevice)
from homeassistant.const import ATTR_VOLTAGE
import homeassistant.helpers.device_registry as dr
from . import CONF_SWITCH, DOMAIN as TPLINK_DOMAIN
PARALLEL_UPDATES = 0
_LOGGER = logging.getLogger(__name__)
ATTR_TOTAL_ENERGY_KWH = 'total_energy_kwh'
ATTR_CURRENT_A = 'current_a'
async def async_setup_platform(hass, config, add_entities,
discovery_info=None):
"""Set up the platform.
Deprecated.
"""
_LOGGER.warning('Loading as a platform is no longer supported, '
'convert to use the tplink component.')
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up discovered switches."""
devs = []
for dev in hass.data[TPLINK_DOMAIN][CONF_SWITCH]:
devs.append(SmartPlugSwitch(dev))
async_add_entities(devs, True)
return True
class SmartPlugSwitch(SwitchDevice):
"""Representation of a TPLink Smart Plug switch."""
def __init__(self, smartplug):
"""Initialize the switch."""
self.smartplug = smartplug
self._sysinfo = None
self._state = None
self._available = False
# Set up emeter cache
self._emeter_params = {}
@property
def unique_id(self):
"""Return a unique ID."""
return self._sysinfo["mac"]
@property
def name(self):
"""Return the name of the Smart Plug."""
return self._sysinfo["alias"]
@property
def device_info(self):
"""Return information about the device."""
return {
"name": self.name,
"model": self._sysinfo["model"],
"manufacturer": 'TP-Link',
"connections": {
(dr.CONNECTION_NETWORK_MAC, self._sysinfo["mac"])
},
"sw_version": self._sysinfo["sw_ver"],
}
@property
def available(self) -> bool:
"""Return if switch is available."""
return self._available
@property
def is_on(self):
"""Return true if switch is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn the switch on."""
self.smartplug.turn_on()
def turn_off(self, **kwargs):
"""Turn the switch off."""
self.smartplug.turn_off()
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
return self._emeter_params
def update(self):
"""Update the TP-Link switch's state."""
from pyHS100 import SmartDeviceException
try:
if not self._sysinfo:
self._sysinfo = self.smartplug.sys_info
self._state = self.smartplug.state == \
self.smartplug.SWITCH_STATE_ON
if self.smartplug.has_emeter:
emeter_readings = self.smartplug.get_emeter_realtime()
self._emeter_params[ATTR_CURRENT_POWER_W] \
= "{:.2f}".format(emeter_readings["power"])
self._emeter_params[ATTR_TOTAL_ENERGY_KWH] \
= "{:.3f}".format(emeter_readings["total"])
self._emeter_params[ATTR_VOLTAGE] \
= "{:.1f}".format(emeter_readings["voltage"])
self._emeter_params[ATTR_CURRENT_A] \
= "{:.2f}".format(emeter_readings["current"])
emeter_statics = self.smartplug.get_emeter_daily()
try:
self._emeter_params[ATTR_TODAY_ENERGY_KWH] \
= "{:.3f}".format(
emeter_statics[int(time.strftime("%e"))])
except KeyError:
# Device returned no daily history
pass
self._available = True
except (SmartDeviceException, OSError) as ex:
if self._available:
_LOGGER.warning("Could not read state for %s: %s",
self.smartplug.host, ex)
self._available = False
|
python
|
from .Util import *
|
python
|
'''This file defines user interfaces to sDNA tools and how to convert inputs to config'''
##This file (and this file only) is released under the MIT license
##
##The MIT License (MIT)
##
##Copyright (c) 2015 Cardiff University
##
##Permission is hereby granted, free of charge, to any person obtaining a copy
##of this software and associated documentation files (the "Software"), to deal
##in the Software without restriction, including without limitation the rights
##to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
##copies of the Software, and to permit persons to whom the Software is
##furnished to do so, subject to the following conditions:
##
##The above copyright notice and this permission notice shall be included in
##all copies or substantial portions of the Software.
##
##THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
##IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
##FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
##AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
##LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
##OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
##THE SOFTWARE.
def metric_dropdown(name,label,include_match_analytical=False):
optlist = ["EUCLIDEAN","ANGULAR","CUSTOM","CYCLE","CYCLE_ROUNDTRIP","EUCLIDEAN_ANGULAR"]
return (name,label,"Text",optlist,optlist[0],True)
# when this changes, add it to geodesics etc
def weighting_options():
return [("weighting","Weighting","Text",["Link","Length","Polyline"],"Link",True),
("origweight","Origin weight","Field",("Numeric","input"),"",False),
("destweight","Destination weight","Field",("Numeric","input"),"",False)]
def weighting_config(args):
return "origweight=%(origweight)s;destweight=%(destweight)s;weight_type=%(weighting)s"%args
def radius_options(include_banded = True,include_cont = True):
retval = [("radii","Radii (in units of source data projection)","Text",None,"n",True)]
if include_banded:
retval += [("bandedradii","Banded radius","Bool",None,False,False)]
if include_cont:
retval += [("cont","Continuous Space","Bool",None,False,False)]
return retval
def radius_config(args):
retval = ";radii=%(radii)s;"%args
if args.has_key("bandedradii") and args["bandedradii"]:
retval += "bandedradii;"
if args.has_key("cont") and args["cont"]:
retval += "cont;"
return retval
def quote(x):
return '"'+x+'"'
class sDNAIntegral(object):
alias = "Integral Analysis"
desc = \
"""<p>sDNA Integral is the core analysis tool of sDNA. It computes several flow, accessibility, severance and efficiency measures on networks.
<p>For full details, see the sDNA documentation.
"""
category = "Analysis"
def getInputSpec(self):
return [("input","Input polyline features","FC","Polyline","",True),
("output","Output features","OFC",None,"",True),
("betweenness","Compute betweenness","Bool",None,True,False),
("bidir","Betweenness is bidirectional","Bool",None,False,False),
("junctions","Compute junction counts","Bool",None,False,False),
("hull","Compute convex hull statistics","Bool",None,False,False),
("start_gs","Start grade separation","Field",("Numeric","input"),"",False),
("end_gs","End grade separation","Field",("Numeric","input"),"",False),
metric_dropdown("analmet","Routing and analysis metric")]\
+radius_options()\
+weighting_options()\
+[
("zonefiles","Zone table input csv files","MultiInFile","csv","",False),
("odfile","Origin Destination Matrix input file","InFile","csv","",False),
("custommetric","Custom metric field","Field",("Numeric","input"),"",False),
("disable","Disable lines (field name or expression)","Text",None,"",False),
("oneway","One way restrictions","Field",("Numeric","input"),"",False),
("intermediates","Intermediate link filter (field name or expression)","Text",None,"",False),
("advanced","Advanced config","Text",None,"",False)
]
def getSyntax(self, args):
syntax = {}
syntax["command"] = "sdnaintegral"
syntax["inputs"] = {"net":args["input"]}
syntax["outputs"] = {"net":args["output"]}
syntax["config"] = "start_gs=%(start_gs)s;end_gs=%(end_gs)s;"\
"metric=%(analmet)s;"\
"custommetric=%(custommetric)s;disable=%(disable)s;intermediates=%(intermediates)s;oneway=%(oneway)s;"\
"%(advanced)s;"%args\
+ weighting_config(args) + radius_config(args)
for arg,conf,invert in [("betweenness","nobetweenness",True),("junctions","nojunctions",True),("hull","nohull",True),("bidir","bidir",False)]:
boolval = args[arg]
if invert:
boolval = not boolval
if boolval:
syntax["config"]+=";%s"%conf
syntax["inputs"]["tables"]=""
if args["odfile"]!="":
syntax["config"]+=";odmatrix"
syntax["inputs"]["tables"]+=args["odfile"]
if args["zonefiles"]!="":
syntax["inputs"]["tables"]=",".join([syntax["inputs"]["tables"]]+args["zonefiles"].split(";"))
syntax["config"] = quote(syntax["config"])
return syntax
class sDNAIntegralFromOD(object):
alias = "Integral from OD Matrix (assignment model)"
desc = \
"""<p>Runs Integral Analysis from a pre-specified Origin-Destination Matrix, allowing import from other models.
"""
category = "Analysis"
def getInputSpec(self):
return [("input","Input polyline features","FC","Polyline","",True),
("odfile","Origin Destination Matrix input file","InFile","csv","",True),
("output","Output features","OFC",None,"",True),
("bidir","Betweenness is bidirectional","Bool",None,False,False),
("start_gs","Start grade separation","Field",("Numeric","input"),"",False),
("end_gs","End grade separation","Field",("Numeric","input"),"",False),
metric_dropdown("analmet","Routing and analysis metric"),
("custommetric","Custom metric field","Field",("Numeric","input"),"",False),
("disable","Disable lines (field name or expression)","Text",None,"",False),
("oneway","One way restrictions","Field",("Numeric","input"),"",False),
("intermediates","Intermediate link filter (field name or expression)","Text",None,"",False),
("zonedist","Zone weight distribution expression","Text",None,"euc",True),
("advanced","Advanced config","Text",None,"",False)
]
def getSyntax(self, args):
syntax = {}
syntax["command"] = "sdnaintegral"
syntax["inputs"] = {"net":args["input"],"tables":args["odfile"]}
syntax["outputs"] = {"net":args["output"]}
syntax["config"] = "odmatrix;zonedist=%(zonedist)s;start_gs=%(start_gs)s;end_gs=%(end_gs)s;"\
"metric=%(analmet)s;nojunctions;nohull;radii=n;"\
"custommetric=%(custommetric)s;disable=%(disable)s;intermediates=%(intermediates)s;oneway=%(oneway)s;"\
"%(advanced)s;"%args
for arg,conf,invert in [("bidir","bidir",False)]:
boolval = args[arg]
if invert:
boolval = not boolval
if boolval:
syntax["config"]+=";%s"%conf
syntax["config"] = quote(syntax["config"])
return syntax
class sDNASkim(object):
alias = "Skim Matrix"
desc = \
"""<p>Captures mean distance between zones as a skim matrix for input into external modelling tools.
"""
category = "Analysis"
def getInputSpec(self):
return [("input","Input polyline features","FC","Polyline","",True),
("output","Output Skim Matrix File","OutFile","csv","",True),
("skimorigzone","Origin zone field","Field",("String","input"),"",True),
("skimdestzone","Destination zone field","Field",("String","input"),"",True),
("start_gs","Start grade separation","Field",("Numeric","input"),"",False),
("end_gs","End grade separation","Field",("Numeric","input"),"",False),
metric_dropdown("analmet","Routing and analysis metric"),
("custommetric","Custom metric field","Field",("Numeric","input"),"",False)]\
+weighting_options()\
+[("disable","Disable lines (field name or expression)","Text",None,"",False),
("oneway","One way restrictions","Field",("Numeric","input"),"",False),
("odfile","Origin Destination Matrix input file","InFile","csv","",False),
("zonefiles","Zone table input csv files","MultiInFile","csv","",False),
("advanced","Advanced config","Text",None,"",False)
]
def getSyntax(self, args):
syntax = {}
syntax["command"] = "sdnaintegral"
syntax["inputs"] = {"net":args["input"]}
syntax["outputs"] = {"skim":args["output"]}
syntax["config"] = "outputskim;skipzeroweightorigins;skimorigzone=%(skimorigzone)s;skimdestzone=%(skimdestzone)s;start_gs=%(start_gs)s;end_gs=%(end_gs)s;radii=n;nobetweenness;nojunctions;nohull;nonetdata;"\
"metric=%(analmet)s;"\
"custommetric=%(custommetric)s;disable=%(disable)s;oneway=%(oneway)s;"\
"%(advanced)s;"%args\
+ weighting_config(args)
syntax["inputs"]["tables"]=""
if args["odfile"]!="":
syntax["config"]+=";odmatrix"
syntax["inputs"]["tables"]+=args["odfile"]
if args["zonefiles"]!="":
syntax["inputs"]["tables"]=",".join([syntax["inputs"]["tables"]]+args["zonefiles"].split(";"))
syntax["config"] = quote(syntax["config"])
return syntax
class sDNAGeodesics(object):
alias = "Geodesics"
desc = "<p>Outputs the geodesics (shortest paths) used by sDNA Integral analysis."
category = "Analysis geometry"
def getInputSpec(self):
return [("input","Input polyline features","FC","Polyline","",True),
("output","Output geodesic polyline features","OFC",None,"",True),
("start_gs","Start grade separation","Field",("Numeric","input"),"",False),
("end_gs","End grade separation","Field",("Numeric","input"),"",False),
("origins","Origin IDs (leave blank for all)","Text",None,"",False),
("destinations","Destination IDs (leave blank for all)","Text",None,"",False),
metric_dropdown("analmet","Routing and analysis metric"),
("custommetric","Custom metric field","Field",("Numeric","input"),"",False)]\
+weighting_options()+\
[("odfile","Origin Destination Matrix input file","InFile","csv","",False),
("zonefiles","Zone table input csv files","MultiInFile","csv","",False)]\
+radius_options()+\
[("disable","Disable lines (field name or expression)","Text",None,"",False),
("oneway","One way restrictions","Field",("Numeric","input"),"",False),
("intermediates","Intermediate link filter (field name or expression)","Text",None,"",False),
("advanced","Advanced config","Text",None,"",False)
]
def getSyntax(self, args):
syntax = {}
syntax["command"] = "sdnaintegral"
syntax["inputs"] = {"net":args["input"]}
syntax["outputs"] = {"geodesics":args["output"]}
syntax["config"] = "start_gs=%(start_gs)s;end_gs=%(end_gs)s;"\
"metric=%(analmet)s;"\
"custommetric=%(custommetric)s;"\
"nonetdata;outputgeodesics;"\
"origins=%(origins)s;destinations=%(destinations)s;disable=%(disable)s;oneway=%(oneway)s;intermediates=%(intermediates)s;"\
"%(advanced)s;"%args\
+weighting_config(args) + radius_config(args)
syntax["inputs"]["tables"]=""
if args["odfile"]!="":
syntax["config"]+=";odmatrix"
syntax["inputs"]["tables"]+=args["odfile"]
if args["zonefiles"]!="":
syntax["inputs"]["tables"]=",".join([syntax["inputs"]["tables"]]+args["zonefiles"].split(";"))
syntax["config"] = quote(syntax["config"])
return syntax
class sDNAHulls(object):
alias = "Convex Hulls"
desc = "<p>Outputs the convex hulls of network radii used in sDNA Integral analysis."
category = "Analysis geometry"
def getInputSpec(self):
return [("input","Input polyline features","FC","Polyline","",True),
("output","Output hull polygon features","OFC",None,"",True),
("start_gs","Start grade separation","Field",("Numeric","input"),"",False),
("end_gs","End grade separation","Field",("Numeric","input"),"",False)]\
+radius_options(False)+\
[("origins","Origin IDs (leave blank for all)","Text",None,"",False),
("disable","Disable lines (field name or expression)","Text",None,"",False),
("oneway","One way restrictions","Field",("Numeric","input"),"",False),
("advanced","Advanced config","Text",None,"",False)
]
def getSyntax(self, args):
syntax = {}
syntax["command"] = "sdnaintegral"
syntax["inputs"] = {"net":args["input"]}
syntax["outputs"] = {"hulls":args["output"]}
syntax["config"] = "nobetweenness;start_gs=%(start_gs)s;end_gs=%(end_gs)s;"\
"nonetdata;outputhulls;"\
"origins=%(origins)s;disable=%(disable)s;oneway=%(oneway)s;"\
"%(advanced)s;"%args + radius_config(args)
syntax["config"] = quote(syntax["config"])
return syntax
class sDNANetRadii(object):
alias = "Network Radii"
desc = "<p>Outputs the network radii used in sDNA Integral analysis."
category = "Analysis geometry"
def getInputSpec(self):
return [("input","Input polyline features","FC","Polyline","",True),
("output","Output net radius multipolyline features","OFC",None,"",True),
("start_gs","Start grade separation","Field",("Numeric","input"),"",False),
("end_gs","End grade separation","Field",("Numeric","input"),"",False)]\
+radius_options()+\
[("origins","Origin IDs (leave blank for all)","Text",None,"",False),
("disable","Disable lines (field name or expression)","Text",None,"",False),
("oneway","One way restrictions","Field",("Numeric","input"),"",False),
("advanced","Advanced config","Text",None,"",False)
]
def getSyntax(self, args):
syntax = {}
syntax["command"] = "sdnaintegral"
syntax["inputs"] = {"net":args["input"]}
syntax["outputs"] = {"netradii":args["output"]}
syntax["config"] = "nobetweenness;start_gs=%(start_gs)s;end_gs=%(end_gs)s;"\
"nonetdata;outputnetradii;"\
"origins=%(origins)s;disable=%(disable)s;oneway=%(oneway)s;"\
"%(advanced)s;"%args + radius_config(args)
syntax["config"] = quote(syntax["config"])
return syntax
class sDNAAccessibilityMap(object):
alias = "Specific Origin Accessibility Maps"
desc = "<p>Outputs accessibility maps for specific origins."
category = "Analysis"
def getInputSpec(self):
return [("input","Input polyline features","FC","Polyline","",True),
("output","Output polyline features","OFC",None,"",True),
("start_gs","Start grade separation","Field",("Numeric","input"),"",False),
("end_gs","End grade separation","Field",("Numeric","input"),"",False),
("origins","Origin IDs (leave blank for all)","Text",None,"",False),
metric_dropdown("analmet","Routing and analysis metric"),
("custommetric","Custom metric field","Field",("Numeric","input"),"",False),
("disable","Disable lines (field name or expression)","Text",None,"",False),
("oneway","One way restrictions","Field",("Numeric","input"),"",False),
("advanced","Advanced config","Text",None,"",False)
]
def getSyntax(self, args):
syntax = {}
syntax["command"] = "sdnaintegral"
syntax["inputs"] = {"net":args["input"]}
syntax["outputs"] = {"destinations":args["output"]}
syntax["config"] = "start_gs=%(start_gs)s;end_gs=%(end_gs)s;"\
"metric=%(analmet)s;"\
"custommetric=%(custommetric)s;"\
"nonetdata;outputdestinations;"\
"origins=%(origins)s;disable=%(disable)s;oneway=%(oneway)s;"\
"%(advanced)s;"%args
syntax["config"] = quote(syntax["config"])
return syntax
class sDNAPrepare(object):
alias = "Prepare Network"
desc = \
"""<p>Prepares spatial networks for analysis by checking and optionally repairing various kinds of error.
<p><b>Note that sDNA Prepare Network only provides a small subset of the functions needed for network preparation.</b> Other free tools, combined with a good understanding of the subject, can fill the gap. <b>Reading the Network Preparation chapter of the sDNA Manual is strongly advised.</b>
<p>The errors fixed by Prepare Network are:
<ul>
<li><b>endpoint near misses</b> (XY and Z tolerance specify how close a near miss)
<li><b>duplicate lines</b>
<li><b>traffic islands</b> (requires traffic island field set to 0 for no island and 1 for island). Traffic island lines are straightened; if doing so creates duplicate lines then these are removed.
<li><b>split links</b><i>. Note that fixing split links is no longer necessary as of sDNA 3.0 so this is not done by default</i>
<li><b>isolated systems</b>
</ul>
<p>Optionally, numeric data can be preserved through a prepare operation by providing the desired field names.
"""
category = "Preparation"
def getInputSpec(self):
return [("input","Input polyline features","FC","Polyline","",True),
("output","Output polyline features","OFC",None,"",True),
("start_gs","Start grade separation","Field",("Numeric","input"),"",False),
("end_gs","End grade separation","Field",("Numeric","input"),"",False),
("action","Action","Text",["DETECT","REPAIR"],"REPAIR",True),
("nearmisses","Endpoint near misses","Bool",None,True,True),
("trafficislands","Traffic islands","Bool",None,False,True),
("duplicates","Duplicate polylines","Bool",None,True,True),
("isolated","Isolated systems","Bool",None,True,True),
("splitlinks","Split links","Bool",None,False,True),
("tifield","Traffic island field","Field",("Numeric","input"),"",False),
("preserve_absolute","Absolute data to preserve (numeric field names separated by commas)","Text",None,"",False),
("preserve_unitlength","Unit length data to preserve (numeric field names separated by commas)","Text",None,"",False),
("preserve_text","Text data to preserve (text field names separated by commas)","Text",None,"",False),
("xytol","Custom XY Tolerance","Text",None,"",False),
("ztol","Custom Z Tolerance","Text",None,"",False)
]
def getSyntax(self, args):
boollist = [x for x in ["nearmisses","duplicates","isolated","trafficislands","splitlinks"]
if args[x]]
args["boolstring"] = ";".join(boollist)
syntax = {}
syntax["command"] = "sdnaprepare"
syntax["inputs"] = {"net":args["input"]}
syntax["outputs"] = {"net":args["output"],"errors":args["output"]}
syntax["config"] = "start_gs=%(start_gs)s;end_gs=%(end_gs)s;"\
"xytol=%(xytol)s;ztol=%(ztol)s;"\
"action=%(action)s;"\
"%(boolstring)s;"\
"island=%(tifield)s;"\
"data_absolute=%(preserve_absolute)s;data_unitlength=%(preserve_unitlength)s;data_text=%(preserve_text)s"\
%args
syntax["config"] = quote(syntax["config"])
return syntax
class sDNALineMeasures(object):
alias = "Individual Line Measures"
desc = \
"""<p>Outputs connectivity, bearing, euclidean, angular and hybrid metrics for individual links.
<p>Connectivity output is useful for checking errors."""
category = "Preparation"
def getInputSpec(self):
return [("input","Input polyline features","FC","Polyline","",True),
("output","Output features","OFC",None,"",True),
("start_gs","Start grade separation","Field",("Numeric","input"),"",False),
("end_gs","End grade separation","Field",("Numeric","input"),"",False),
metric_dropdown("analmet","Routing and analysis metric")]\
+weighting_options()+\
[("custommetric","Custom metric field","Field",("Numeric","input"),"",False),
("zonefiles","Zone table input csv files","MultiInFile","csv","",False),
("advanced","Advanced config","Text",None,"",False)
]
def getSyntax(self, args):
syntax = {}
syntax["command"] = "sdnaintegral"
syntax["inputs"] = {"net":args["input"]}
syntax["outputs"] = {"net":args["output"]}
syntax["config"] = "linkonly;start_gs=%(start_gs)s;end_gs=%(end_gs)s;"\
"metric=%(analmet)s;"\
"custommetric=%(custommetric)s;"\
"%(advanced)s;"%args\
+ weighting_config(args)
syntax["config"] = quote(syntax["config"])
if args["zonefiles"]!="":
syntax["inputs"]["tables"]=",".join(args["zonefiles"].split(";"))
return syntax
class sDNALearn(object):
alias = "Learn"
desc = \
"""<p>Uses measured data to calibrate an sDNA model ready for prediction. Proposed models are tested using cross-validation. The available models are
<ul>
<li>Single best variable - performs bivariate regression of target against all variables and picks single predictor with best cross-validated fit</li>
<li>Multiple variables - Regularized multivariate lassoo regression</li>
<li>All variables - Regularized multivariate ridge regression</li>
</ul>
<p>Optionally, variables to use and transform can be specified using regular expressions (regex). These follow the Python regex syntax. The equivalent to a wildcard is
<pre>.*</pre>
<p>thus for example to test Betweenness variables (from sDNA Integral) over all radii you could specify
<pre>Bt.*</pre>
<p>This would match Bt1000, Bt2000, Bt300c, etc.
<p>Optionally, the best model can be saved as a model file to be used by sDNA Predict.
<p>Weighting lambda weights data points by y^lambda/y. Setting to 1 implies unweighted regression. Setting to around 0.7 can improve GEH statistic.
<p>Regression lambda if set should specify min,max regularization parameter for multivariate regression.
"""
category = "Calibration"
def getInputSpec(self):
return [("input","Input features","FC",None,"",True),
("output","Output model file","OutFile","csv","",False),
("resids","Output residual features","OFC",None,"",False),
("target","Target variable","Field",("Numeric","input"),"",True),
("predictors","Predictor variables","MultiField",("Numeric","input"),"",False),
("regex","Predictor variable regex","Text",None,"",False),
("algorithm","Learning algorithm","Text",["Single_best_variable","Multiple_variables","All_variables"],"Single_best_variable",True),
("intercept","Use intercept in multivariate models","Bool",None,False,False),
("bctarget","Box-Cox transform target variable","Bool",None,False,False),
("bcregex","Regex for predictor variables to Box-Cox transform","Text",None,"",False),
("weightlambda","Weighting lambda","Text",None,"1",False),
("nfolds","Number of folds for cross-validation","Text",None,"7",True),
("reps","Number of repetitions for bootstrapping","Text",None,"50",True),
("gehtime","Time interval for measurements (in hours, for computing GEH)","Text",None,"1",True),
("reglambda","Regularization lambda min,max","Text",None,"",False)
]
def getSyntax(self, args):
syntax = {}
syntax["command"] = "sdnalearn"
syntax["inputs"] = {"calibfile":args["input"]}
syntax["outputs"] = {"modelout":args["output"],"resids":args["resids"]}
syntax["config"] = "--target %(target)s --mode %(algorithm)s --nfolds %(nfolds)s --weightlambda %(weightlambda)s --reps %(reps)s --gehtime %(gehtime)s --bcregex \"%(bcregex)s\""%args
if args["predictors"]:
syntax["config"] += " --vars \""+args["predictors"]+"\""
if args["regex"]:
syntax["config"] += " --varregex \""+args["regex"]+"\""
if args["bctarget"]:
syntax["config"] += " --boxcoxtarget"
if args["intercept"]:
syntax["config"] += " --intercept"
if args["reglambda"].strip() != "":
syntax["config"] += " --reglambda " + args["reglambda"]
return syntax
class sDNAPredict(object):
alias = "Predict"
desc = "<p>Uses a model file created by sDNA Learn to predict unknown data."
category = "Calibration"
def getInputSpec(self):
return [("input","Input features","FC",None,"",True),
("output","Output features","OFC",None,"",True),
("predvar","Prediction variable name","Text",None,"prediction",True),
("modelfile","Model file from sDNA Learn","InFile","csv","",True)
]
def getSyntax(self, args):
syntax = {}
syntax["command"] = "sdnapredict"
syntax["inputs"] = {"infile":args["input"]}
syntax["outputs"] = {"outfile":args["output"]}
syntax["config"] = '--predvarname %(predvar)s --modelfile "%(modelfile)s"'%args
return syntax
def get_tools():
return [sDNAIntegral,sDNASkim,sDNAIntegralFromOD,sDNAGeodesics,sDNAHulls,sDNANetRadii,sDNAAccessibilityMap,sDNAPrepare,sDNALineMeasures,sDNALearn,sDNAPredict]
|
python
|
"""
This module contains helpers for the XGBoost python wrapper: https://xgboost.readthedocs.io/en/latest/python/index.html
The largest part of the module are helper classes which make
using a validation set to select the number of trees transparent.
"""
import logging
logger = logging.getLogger(__name__)
import joblib
import numpy as np
import pathlib
import sklearn.exceptions
import sklearn.metrics
import sklearn.preprocessing
import tempfile
import xgboost as xgb
import toolz.dicttoolz
import pyllars.shell_utils as shell_utils
import pyllars.validation_utils as validation_utils
from typing import Optional
def xgbooster_predict_proba(
booster:xgb.Booster,
d_x:xgb.DMatrix) -> np.ndarray:
""" Simulate the `predict_proba` interface from sklearn
This function will only work as expected if `booster` has been
training using the `binary:logistic` loss.
Parameters
----------
booster : xgboost.Booster
The trained booster
d_x : xgboost.DMatrix
The dataset
Returns
-------
y_proba_pred : numpy.ndarray
The probabilistic predictions. The shape of the array
is (n_row, 2).
"""
y_score = booster.predict(d_x)
y_false = 1-y_score
size = (d_x.num_row(), 2)
y_probas_pred = np.zeros(size)
y_probas_pred[:,0] = y_false
y_probas_pred[:,1] = y_score
return y_probas_pred
def xgbooster_to_json(booster:xgb.Booster) -> str:
""" Get the JSON representation of `booster`
Parameters
----------
booster : xgboost.Booster
The trained booster
Returns
-------
booster_json : str
The json string
"""
fd, fname = tempfile.mkstemp(suffix=".json")
booster.save_model(fname)
with open(fname) as b_f:
booster_json = b_f.readlines()
shell_utils.remove_file(fname)
booster_json = booster_json[0]
return booster_json
def xgbooster_from_json(booster_json:str) -> xgb.Booster:
""" Create a booster based on the json string
Parameters
----------
booster_json : str
The json string
Returns
-------
booster : xgboost.Booster
The trained booster
"""
fd, fname = tempfile.mkstemp(suffix=".json")
with open(fname, 'w') as b_f:
b_f.writelines(booster_json)
booster = xgb.Booster()
booster.load_model(fname)
shell_utils.remove_file(fname)
return booster
class XGBClassifierWrapper(object):
""" This class wraps xgboost to facilitate transparent
use of a validation set to select the number of trees.
It also optionally scales the input features. (In principle,
it is not necessary to scale input features for trees. Still,
in practice, it annecdotally helps, and the theory also suggests
that is should not hurt.)
**N.B.** Currently, this class is hard-coded to use (binary) AUC
as the metric for selecting the best model on the validation set.
Attributes
----------
num_boost_round : int
The number of boosting rounds
scale_features : bool
Whether to fit a StandardScaler on the training data
and use it to transform the validation and test data.
validation_period : int
The number of training iterations (that is, the number of new
trees) between checking the validation set.
name : str
A name for use in logging statements.
booster_ : xgboost.Booster
The trained model.
best_booster_ : xgboost.Booster
The best booster found according to performance on the
validation set.
scaler_ : sklearn.preprocessing.StandardScaler
The scaler fit on the training data set.
**kwargs : key=value pairs
Additional keyword arguments are passed through to the
xgboost.train constructor.
"""
def __init__(
self,
num_boost_round:int=10,
scale_features:bool=False,
validation_period:int=1,
name:str="XGBClassiferWrapper",
**kwargs):
self._initialize()
self.num_boost_round = num_boost_round
self.scale_features = scale_features
self.validation_period = validation_period
self.name = name
self.kwargs = kwargs
def _initialize(self):
self.num_boost_round = None
self.scale_features = None
self.validation_period = None
self.name = None
self.kwargs = None
self.xgb_hyperparams = dict()
def log(self, msg, level=logging.INFO):
msg = "[{}]: {}".format(self.name, msg)
logger.log(level, msg)
def _validate(self, xgboost_callback_env):
iteration = xgboost_callback_env.iteration
booster = xgboost_callback_env.model
y_score = booster.predict(self._dval)
# TODO: allow other validation metrics
validation_roc = sklearn.metrics.roc_auc_score(
y_true=self._dval.get_label(),
y_score=y_score
)
msg = "{}\tValidation AUC: {:.6f}".format(iteration, validation_roc)
self.log(msg, logging.DEBUG)
if validation_roc > self._best_validation_roc:
self._best_validation_roc = validation_roc
self.best_booster_ = booster.copy()
msg = "*** New Best ***"
self.log(msg, logging.DEBUG)
def _callback(self, xgboost_callback_env):
iteration = xgboost_callback_env.iteration
if iteration % self.validation_period == 0:
self._validate(xgboost_callback_env)
def fit(self,
X_t:np.ndarray,
y_t:np.ndarray,
X_v:Optional[np.ndarray]=None,
y_v:Optional[np.ndarray]=None):
""" Fit a model
Parameters
----------
{X,y}_t : numpy.ndarray
The training data. **N.B.**
{X,y}_v : typing.Optional[numpy.ndarray]
The validation data
Returns
-------
self
"""
if self.scale_features:
msg = "scaling the training data"
self.log(msg)
self.scaler_ = sklearn.preprocessing.StandardScaler()
X_t = self.scaler_.fit_transform(X_t)
if X_v is not None:
msg = "scaling the validation data"
X_v = self.scaler_.transform(X_v)
else:
self.scaler_ = None
self._dtrain = xgb.DMatrix(X_t, label=y_t)
callbacks = None # we will not use any callbacks by default
if X_v is not None:
self._dval = xgb.DMatrix(X_v, label=y_v)
# we *will* use a callback if we want to use the
# validation set
callbacks = [self._callback]
# we can set these either way. they will just not be used
# if there is no validation set.
self._best_validation_roc = -np.inf
self.best_booster_ = None
msg = "training the model"
self.log(msg)
self.kwargs
xgb_kwargs = toolz.dicttoolz.merge(self.kwargs, self.xgb_hyperparams)
self.booster_ = xgb.train(
xgb_kwargs,
self._dtrain,
self.num_boost_round,
callbacks=callbacks
)
# if we did not use a validation set, then just use the
# final learned model as the best model
if self.best_booster_ is None:
self.best_booster_ = self.booster_
return self
def predict_proba(self, X:np.ndarray) -> np.ndarray:
""" Predict the likelihood of each class.
This function will only work as expected if training
used the `binary:logistic` loss.
Parameters
----------
X : numpy.ndarray
The input data
Returns
-------
y_proba_pred : numpy.ndarray
The probabilistic predictions
"""
validation_utils.check_is_fitted(self, 'best_booster_', self.name)
if self.scaler_ is not None:
msg = "transforming the input data"
self.log(msg, logging.DEBUG)
X = self.scaler_.transform(X)
d_x = xgb.DMatrix(X)
y_proba_pred = xgbooster_predict_proba(self.best_booster_, d_x)
return y_proba_pred
def get_params(self, deep=False):
""" Get the hyperparameters and other meta data about this model
"""
params = {
'num_boost_round': self.num_boost_round,
'scale_features': self.scale_features,
'validation_period': self.validation_period,
'name': self.name
}
params.update(self.kwargs)
# we do not do anything with `deep`
return params
def set_params(self, **params):
""" Set the hyperparameters of the model
"""
# very similar to the sklearn implementation
valid_params = self.get_params(deep=True)
for k,v in params.items():
if k not in valid_params:
pass
#msg = "Invalid parameter: {}".format(k)
#raise ValueError(msg)
else:
# this is a hyperparameter for xgb
self.xgb_hyperparams[k] = v
# then this is a valid hyperparameter
setattr(self, k, v)
return self
def save_model(self, out):
""" Save the scaler (if present) and best model to disk.
This *does not* save the training or validation datasets.
"""
out = pathlib.Path(out)
out.mkdir(parents=True, exist_ok=True)
scaler_out = out / "scaler.jpkl"
joblib.dump(self.scaler_, str(scaler_out))
booster_out = out / "booster.jpkl"
joblib.dump(self.best_booster_, str(booster_out))
params_out = out / "params.jpkl"
joblib.dump(self.get_params(deep=True), str(params_out))
def __getstate__(self):
state = {
'scaler': self.scaler_,
'booster': xgbooster_to_json(self.best_booster_),
'params': self.get_params(deep=True)
}
return state
def __setstate__(self, state):
self._initialize()
self.scaler_ = state['scaler']
self.best_booster_ = xgbooster_from_json(state['booster'])
for k,v in state['params'].items():
setattr(self, k, v)
# further, set the appropriate kwargs
self.kwargs = state['params'].copy()
# remove the basic parameters
self.kwargs.pop('num_boost_round')
self.kwargs.pop('scale_features')
self.kwargs.pop('validation_period')
self.kwargs.pop('name')
@classmethod
def load_model(klass, f):
""" Load the scaler, model, and hyperparameters from disk
"""
in_f = pathlib.Path(f)
params_in = in_f / "params.jpkl"
params = joblib.load(str(params_in))
model = klass(**params)
scaler_in = in_f / "scaler.jpkl"
scaler = joblib.load(str(scaler_in))
model.scaler_ = scaler
booster_in = in_f / "booster.jpkl"
booster = joblib.load(str(booster_in))
model.best_booster_ = booster
return model
|
python
|
#! /usr/bin/python3.5
# -*- coding: utf-8 -*-
import random
import numpy as np
import matplotlib.pyplot as plt
def subdivied(Liste, Size):
"""
On divise la liste Liste, en :
sum from i = 1 to N - Size [x_i,x_i+1,...x_i+Size]
"""
res = []
# pour chaque éléments de la liste
for index, ele in enumerate(Liste):
res_tmp = []
# on construit une sous-liste si on ne dépasse pas
if index + Size <= len(Liste):
for k in range(Size):
res_tmp.append(Liste[index+k])
# on ajoute la sous-liste construite au res
res.append(res_tmp)
# on retourne le reste
return res
def fitness(solution, K, dictionnaire):
"""
solution = un tableau de {0,1} qui représente une solution
K = le k du problème NK
dictionnaire = tableau de correspondance entre les suites de bits
et les valeurs
"""
# on divisie la liste en sous-liste de taille K+1
SubListe = subdivied(solution, K+1)
accumulateur = 0
# on accumule les résultats des fitness
for ele in SubListe:
accumulateur += dictionnaire[tuple(ele)]
return accumulateur
def Hamming(v1, v2):
"""
On fait la distance de hamming entre
v1 et v2
"""
d = 0
for index, ele in enumerate(v1):
d += np.abs(v1[index] - v2[index])
return d
def random_sequence(Size):
"""
On génère une séquence aléatoire de bit. Séquence de taille: Size
"""
res = []
for ele in range(Size):
# on utilise le random fourni par python
if random.random() < 0.5:
res.append(0)
else:
res.append(1)
return res
def neighbor(solution):
"""
solution est une solution. donc un vecteur de bit de taille n.
on veut trouver son voisinage: soit les vecteurs ayant un seul bits de différence avec lui
"""
res = []
# pour chaque élément, on va créer un nouveau vecteur, avec un seul bit
# qui diffère
for index, ele in enumerate(solution):
# on ajoute 1 et modulo 2. c'est comme un xor
ele = (ele + 1) % 2
# on ajoute la soluton
res_tmp = list(solution)
res_tmp[index] = ele
res.append(res_tmp)
return res
def choose_neighbor(neighbors, dictionnaire, K, sol_fitness):
"""
On va choisir le meilleur voisins parmis ceux possibles
"""
d = {}
# on calcule toutes les fitness des voisins
for ele in neighbors:
d[fitness(ele, K, dictionnaire)] = ele
# on prend le max s'il est meilleur, et on le retourne
if max(d) > sol_fitness:
return [True, d[max(d)]]
# si non on signifie qu'on s'arrête
else:
return [False]
def Hill_Climbing(N, K, dictionnaire):
"""
K = le k du NK-landscape
dictionnaire = la table de correspondance
On va appliquer le hill-climbing
"""
# on génère une solution aléatoire
sol = random_sequence(N)
steps = 0
# boucle infinie
while(True):
# on calcule les voisins
voisins = neighbor(sol)
# on choisit le meilleur voisin
Liste_tmp = choose_neighbor(voisins, dictionnaire, K, fitness(sol, K, dictionnaire))
# s'il y en a un, on continue
if Liste_tmp[0]:
sol = Liste_tmp[1]
steps += 1
# sinon on s'arrête et on renvoit le résultat
else:
print(sol, ":", fitness(sol, K, dictionnaire))
return [sol, steps]
def choose_neighbor_probabiliste(voisins, dictionnaire, K, fitness_sol, best_sol):
"""
Cf énoncé du tp. On utilise un raisonnement probabiliste
"""
# on calcul la fitness du best
fitness_best = fitness(best_sol, K, dictionnaire)
element = [a for a in range(len(voisins))]
fitness_res = []
# on calcule la fitness de tout les voisins
for ele in voisins:
fitness_res.append(fitness(ele, K, dictionnaire))
# on regarde s'il y a un meilleur élément absolu
if max(fitness_res) > fitness_best:
return [False, voisins[fitness_res.index(max(fitness_res))]]
# sinon on en choisit un
else:
fitness_res = list(map(lambda x: x/sum(fitness_res), fitness_res))
return [True, voisins[np.random.choice(element, p=fitness_res)]]
def Hill_Climbing_probabiliste(N, K, dictionnaire, Steps):
"""
On prend les mêmes paramètres que le Hill_Climbing ci dessus
mais on ajoute le nombre de pas avant la fin
"""
# on génère une solution aléatoire qui sera notre solution idéale initiale
sol = random_sequence(N)
best = list(sol)
# boucle à condition
while(Steps > 0):
# on calcule les voisins
voisins = neighbor(sol)
Liste_tmp = choose_neighbor_probabiliste(voisins, dictionnaire, K, fitness(sol, K, dictionnaire), best)
# si le premier élément est true, alors on a changé de solution, mais
# ce n'est pas la meilleure
if Liste_tmp[0]:
sol = list(Liste_tmp[1])
# si le premier élément est false, alors on a trouvé un nouveau best,
else:
best = list(Liste_tmp[1])
Steps -= 1
print(best, ":", fitness(best, K, dictionnaire))
return best
def Hill_50_times_proba(N, K, dictionnaire, Steps):
"""
On va lancer 50 fois le hill_climbing probabiliste
Avec les paramètres fixés du tp. On stock les résultats dans une liste
"""
a = 50
final = []
while(a):
a -= 1
# on récupère les résultats au fur et à mesure
final.append(Hill_Climbing_probabiliste(N, K, dictionnaire, 10*Steps))
return final
def Hill_50_times(N, K, dictionnaire):
"""
On va lancer 50 fois le hill_climbing déterministe
Avec les paramètres fixés du tp. On stock les résultats dans une liste
"""
a = 50
final = []
while(a):
a -= 1
# on récupère les résultats
final.append(Hill_Climbing(N, K, dictionnaire))
return final
def test_tp1():
d_k0 = {(0,): 2, (1,): 1}
d_k1 = {(0, 0): 2, (0, 1): 3, (1, 0): 2, (1, 1): 0}
d_k2 = {(0, 0, 0): 0, (0, 0, 1): 1, (0, 1, 0): 1, (0, 1, 1): 0, (1, 0, 0): 2, (1, 0, 1): 0, (1, 1, 0): 0, (1, 1, 1): 0}
final_0 = Hill_50_times(21, 0, d_k0)
tmp = np.matrix(final_0)
tmp = sum(tmp[:, 1])
moyenne_steps_0 = tmp[0, 0] / 50
print("Moyenne pas K=0 : ", moyenne_steps_0)
final_1 = Hill_50_times(21, 1, d_k1)
tmp = np.matrix(final_1)
tmp = sum(tmp[:, 1])
moyenne_steps_1 = tmp[0, 0] / 50
print("Moyenne pas K=1 : ", moyenne_steps_1)
final_2 = Hill_50_times(21, 2, d_k2)
tmp = np.matrix(final_2)
tmp = sum(tmp[:, 1])
moyenne_steps_2 = tmp[0, 0] / 50
print("Moyenne pas K=2 : ", moyenne_steps_2)
final_prob_0 = Hill_50_times_proba(21, 0, d_k0, moyenne_steps_0)
print("Fin proba K=0")
final_prob_1 = Hill_50_times_proba(21, 1, d_k1, moyenne_steps_1)
print("Fin proba K=1")
final_prob_2 = Hill_50_times_proba(21, 2, d_k2, moyenne_steps_2)
print("Fin proba K=2")
# la distance max est de N, donc 21, on crée un dictionnaire de
# taille 21, on aura plus qu'a incrémenter
d_d_0 = []
d_d_1 = []
d_d_2 = []
d_p_0 = []
d_p_1 = []
d_p_2 = []
for i in range(50):
for j in range(50-i):
d_d_0.append(Hamming(final_0[i][0], final_0[j][0]))
d_d_1.append(Hamming(final_1[i][0], final_1[j][0]))
d_d_2.append(Hamming(final_2[i][0], final_2[j][0]))
d_p_0.append(Hamming(final_prob_0[i], final_prob_0[j]))
d_p_1.append(Hamming(final_prob_1[i], final_prob_1[j]))
d_p_2.append(Hamming(final_prob_2[i], final_prob_2[j]))
plt.hist(d_d_0)
print("d_d_0")
plt.show()
plt.hist(d_d_1)
print("d_d_1")
plt.show()
plt.hist(d_d_2)
print("d_d_2")
plt.show()
plt.hist(d_p_0)
print("d_p_0")
plt.show()
plt.hist(d_p_1)
print("d_p_1")
plt.show()
plt.hist(d_p_2)
print("d_p_2")
plt.show()
def interface():
"""
Il s'agit d'une interface.
On prend une entrée pour choisir le type e méthode.
Puis la valeur de K
"""
# les valeurs des dictionnaires
d_k0 = {(0,): 2, (1,): 1}
d_k1 = {(0, 0): 2, (0, 1): 3, (1, 0): 2, (1, 1): 0}
d_k2 = {(0, 0, 0): 0, (0, 0, 1): 1, (0, 1, 0): 1, (0, 1, 1): 0, (1, 0, 0): 2, (1, 0, 1): 0, (1, 1, 0): 0, (1, 1, 1): 0}
print("[1] : Hill-Climbing déterministe (default)\n[2] : Hill-Climbing probabiliste\n")
i = input()
if i != 2:
print("Hill-Climbing déterministe")
k = input("K=")
if str(k) == '0':
Hill_Climbing(21, 0, d_k0)
elif str(k) == '1':
Hill_Climbing(21, 1, d_k1)
elif str(k) == '2':
Hill_Climbing(21, 2, d_k2)
else:
print("Erreur\n")
else:
print("Hill-Climbing probabiliste")
k = input("K=")
if str(k) == '0':
Hill_Climbing_probabiliste(21, 0, d_k0, 10 * 10)
elif str(k) == '1':
Hill_Climbing_probabiliste(21, 1, d_k1, 10 * 6)
elif str(k) == '2':
Hill_Climbing_probabiliste(21, 2, d_k2, 10 * 6)
else:
print("Erreur\n")
if __name__ == '__main__':
while(True):
interface()
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 2 16:22:45 2019
@author: Soumitra
"""
import math
import numpy as np
import numpy.fft as f
import matplotlib.pyplot as plt
n = np.arange(12);
x = ((-1)**n)*(n+1)
plt.xlabel('n');
plt.ylabel('x[n]');
plt.title(r'Plot of DT signal x[n]');
plt.stem(n, x);
#dft
n = np.arange(12);
x = ((-1)**n)*(n+1)
y = f.fft(x)
print(y)
#magnitude vs frequency
import cmath as cm
p=[]
for i in range(12):
p.append(cm.phase(y[i]))
m=[]
for i in range(12):
m.append(abs(y[i]))
k = [0]
for i in range(11):
k.append(((i+1)*math.pi)/12)
plt.xlabel('k');
plt.ylabel('magnitude');
plt.title(r'Plot of mag vs frequency');
plt.stem(k, m);
|
python
|
"""Iterative Compression Module."""
from experiments import RESULTS_DIR, TABLES_DIR
from pathlib import Path
# Paths
IC_DIR = Path(__file__).parent
SELF_COMPARISON_DATA_PATH = RESULTS_DIR / 'ic_preprocessing_level.csv'
IC_TABLE_FORMAT_STR = 'timeout_{}.tex'
IC_TABLES_DIR = TABLES_DIR / 'ic'
IC_TABLES_DIR.mkdir(exist_ok=True)
BASELINE_FILE = str(RESULTS_DIR / 'ic_baseline_experiment_results.csv')
# Constants
PREPROCESSING_LEVELS = [0, 1, 2]
|
python
|
"""Examples showing how one might use the Result portion of this library."""
import typing as t
import requests
from safetywrap import Result, Ok, Err
# ######################################################################
# One: Validation Pipeline
# ######################################################################
# Sometimes you've got a bunch of validation functions that you would
# like to run on some data, and you want to bail early if any of them
# fails. Particularly when you want to send back some information about
# what failed to validate, you're forced to e.g. return a 2-tuple of
# validation status and a string with info, or to raise a custom
# exception with that data ensconced inside. In either case, you wind
# up having to do a lot of if/else or try/except logic in the calling
# context. The Result type allows you to get rid of all that extra
# boilerplate and get down to what matters: defining a pipeline of
# validation errors with early exiting.
# ######################################################################
class Validator:
"""A validator for validating hopefully valid things.
In this case, let's say we've got a a string we want to validate.
We want the string to be at least X characters long, to not contain
any disallowed characters, to start with a capital letter, to end
with a period, and to contain the substring "shabazz".
"""
MIN_LEN = 10
DISALLOWED_CHARS = ("^", "_", "O")
MUST_CONTAIN = "shabazz"
def validated(self, string: str) -> Result[str, str]:
"""Return the validated string or any validation error.
We return a Result, where the Ok value is the validated string,
and the Err value is a descriptive string.
"""
# Because all of our validation methods return Results, we can
# easily chain them.
return (
self._validate_length(string)
.and_then(self._validate_chars) # and_then == flatmap
.and_then(self._validate_capitalized)
.and_then(self._validate_end_char)
.and_then(self._validate_substring)
# Because we're returning a Result, this is all we need to
# to! We don't even have to figure out if there was an error
# here, because any error would have short-circuited the
# pipeline and will get returned by this method.
)
# Because we're returning a Result, we are _forcing_ the caller
# to deal with the fact that validation might fail. They only
# way they can get the result back is by calling `.unwrap()`
# or a similar method, checking `is_ok()` first, or otherwise
# continuing to pipeline on it and pass the Result on up the
# chain.
def _validate_length(self, string: str) -> Result[str, str]:
"""Check that all the strings are of the proper length."""
if len(string) < self.MIN_LEN:
return Err("String is too short")
return Ok(string)
def _validate_chars(self, string: str) -> Result[str, str]:
"""Check that none of the strings have disallowed chars."""
if set(string).intersection(set(self.DISALLOWED_CHARS)):
return Err("String has disallowed chars")
return Ok(string)
def _validate_capitalized(self, string: str) -> Result[str, str]:
"""Check that the starting character is a capital."""
if len(string) > 0 and not string[0].isupper():
return Err("Starting character is not uppercase.")
return Ok(string)
def _validate_end_char(self, string: str) -> Result[str, str]:
"""Check the string ends with a period."""
if len(string) > 0 and string[-1] != ".":
return Err("String does not end with a period")
return Ok(string)
def _validate_substring(self, string: str) -> Result[str, str]:
"""Check the string has the required substring."""
if self.MUST_CONTAIN not in string:
return Err(f"String did not contain '{self.MUST_CONTAIN}'")
return Ok(string)
def test_self(self) -> None:
"""Quick test to make sure we're not crazy."""
goods = ("AshabazzB.", "Abshabazz.")
bads = ("shabazz", "Ab.", "Ashabazz^B.")
assert all(map(lambda g: self.validated(g).is_ok(), goods))
assert all(map(lambda g: self.validated(g).is_err(), bads))
print("Validator.test_self: everything as expected!")
# ######################################################################
# Two: Wrangling Exceptions
# ######################################################################
# It's common in FP-related tutorials to hear exceptions described as
# children throwing tantrums, but it's really worse than that. Calling
# a method that might throw involves either figuring out in detail any
# exception that might be thrown or catching every exception all
# william-nilliam and then dealing with them generically. Doing either
# of the two means that you've got to litter your code with try/except
# blocks, forcing you to consider what the _implementation_ of the thing
# you're using is than what _interface_ you're trying to create.
# Using Result.of can make life easier.
# ######################################################################
class CatFactGetter:
"""Do something fraught with error.
Let's forget them all the possible errors and just care about what
we're trying to do, which is to get a cat fact.
NOTE: this requires the `requests` library to be installed
"""
def get_fact(self) -> str:
"""Get a cat fact!"""
return (
# Create a Result from making our GET request.
# Now we can start chaining!
Result.of(
requests.get, "https://cat-fact.herokuapp.com/facts/random"
)
# Let's first consider the success path.
# If we got a response, it should be JSON, so let's try to parse
.and_then(lambda resp: Result.of(resp.json))
# If we successfully parsed JSON, we must have a dict, so let's
# grab our cat fact, or a useful message.
.map(
lambda parsed: t.cast(
str, parsed.get("text", "Unexpected cat fact format!")
)
)
# From here, all we need to do to consider the error case is
# convert our Err type (which for Result.of() is any exception
# that was raised) into the expected return type, which we
# do by passing the error to `str()`
.unwrap_or_else(str)
)
# Note it would also be totally reasonable to return something like
# Result[str, Exception] here! In which case you drop the final
# `.unwrap_or_else()`, and then the caller can decide what to
# do with any errors.
def get_fact_result(self) -> Result[str, Exception]:
"""Return a Result for a cat fact."""
return (
Result.of(
requests.get,
"https://cat-fact.herokuapp.com/facts/random",
# this is the default, but sometimes the type checker wants us
# to make it explicit. See python/mypy#3737 for deets.
catch=Exception,
)
.and_then(lambda resp: Result.of(resp.json))
.map(
lambda parsed: t.cast(
str, parsed.get("text", "Unexpected cat fact format!")
)
)
)
def test_get_fact(self) -> None:
"""Test getting a cat fact."""
fact = self.get_fact()
assert isinstance(fact, str)
print(fact)
def test_get_fact_result(self) -> None:
"""Test getting a cat fact as a result!
Note that here, the caller has to decide what to do with any
potential error in order to get to the cat fact.
"""
fact_res = self.get_fact_result()
fact_str = fact_res.unwrap_or_else(lambda exc: f"ERROR: {str(exc)}")
assert isinstance(fact_str, str)
if fact_res.is_err():
assert "ERROR" in fact_str
print(fact_str)
if __name__ == "__main__":
Validator().test_self()
CatFactGetter().test_get_fact()
CatFactGetter().test_get_fact_result()
|
python
|
from django.shortcuts import render, redirect, render_to_response
from django.views.decorators.csrf import csrf_protect
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.forms.util import ErrorList
from django.contrib import auth, messages
from django.conf import settings
from django.http import HttpResponseRedirect
from django.template import RequestContext
from datetime import datetime, timedelta
from djkatta.accounts.models import pass_reset_validb
from djkatta.accounts.forms import (
RegistrationForm, LoginForm, PasswordResetRequestForm,
PasswordChangeForm, PasswordResetChangeForm
)
from djkatta.accounts.utils import (
generate_random_string, get_username_from_email, get_email_from_username,
send_pass_reset_mail, reCaptcha,
)
# import logging
# template (DRY) for message box rendering
def message_box(request=None, message="Something went wrong.", redir=settings.LOGIN_URL):
messages.success(request, message)
return redirect(redir)
@csrf_protect
def login(request, *args, **kwargs):
"""Login view for User accounts"""
# Redirects user if already logged in
if request.user.is_authenticated():
redir = request.GET.get('next', None)
if not redir:
redir = settings.LOGIN_REDIRECT_URL
return redirect(redir)
else:
form = LoginForm()
if request.POST:
form = LoginForm(request.POST)
if form.is_valid():
usernm = form.cleaned_data['username'].strip()
if '@' in usernm:
usernm = usernm[:usernm.find('@')]
passwd = form.cleaned_data['password']
user = auth.authenticate(username=usernm, password=passwd)
if user and user.is_active:
# Correct password, and the user is marked "active"
auth.login(request, user)
if form.cleaned_data['login_rem']:
request.session.set_expiry(7*60*60*24)
return HttpResponseRedirect(settings.LOGIN_REDIRECT_URL)
else:
errors = form._errors.setdefault("username", ErrorList())
errors.append("Invalid username or password")
return render_to_response('accounts/login.html',locals(),RequestContext(request))
@csrf_protect
def register(request):
form = RegistrationForm()
if request.POST:
form = RegistrationForm(request.POST)
if form.is_valid():
usernm = form.cleaned_data['username'].strip()
if '@' in usernm:
usernm = usernm[:usernm.find('@')]
try:
user = auth.models.User.objects.get(username__iexact=usernm)
except:
user = False
if user and user.is_active:
errors = form._errors.setdefault("username", ErrorList())
errors.append("That username is already registered! "
"If you have recently registered, you need to reset your password.")
elif not usernm:
errors = form._errors.setdefault("username", ErrorList())
errors.append("Please enter a valid username.")
else:
# check for captcha response
remote_ip = request.META.get('REMOTE_ADDR', '')
captcha_response = request.POST.get('g-recaptcha-response','')
captcha_ok, captcha_msg = reCaptcha(remote_ip,captcha_response)
if captcha_ok:
passwd = generate_random_string()
email = get_email_from_username(usernm)
user = auth.models.User.objects.create_user(
username=usernm,
password=passwd,
email=email,
first_name=form.cleaned_data['first_name'].strip().title(),
last_name=form.cleaned_data['last_name'].strip().title(),
)
validb = pass_reset_validb.objects.create(username=usernm)
send_pass_reset_mail(validb.username, validb.valid_hash, reg=True)
message = "Check your Mu Sigma email for further instructions."
return message_box(request, message)
else:
errors = form._errors.setdefault("username", ErrorList())
errors.append("Invalid captcha request.")
errors.append(captcha_msg)
return render_to_response('accounts/register.html', locals(),
RequestContext(request))
def check_mail(request):
message = "Registration successful! Check your email for further instructions."
return message_box(request, message)
@login_required
@csrf_protect
def password_change_form(request):
if not request.POST:
form = PasswordChangeForm()
# logging.error('pass change')
return render_to_response('accounts/password_change_form.html',
locals(), RequestContext(request))
else:
form = PasswordChangeForm(request.POST)
if form.is_valid():
if request.user.check_password(form.cleaned_data['password_old']):
request.user.set_password(form.cleaned_data['password'])
request.user.save()
auth.logout(request)
return redirect(reverse('user:password_change_success'))
else:
# form = PasswordChangeForm()
form.add_error("password_old", "Original password is incorrect")
return render_to_response('accounts/password_change_form.html',
locals(), RequestContext(request))
def password_change_success(request):
return message_box(
request,
"Your password was successfully changed. You have been logged out."
)
# reset request validation function
def validate_pass_reset_req(username="", given_hash="", delete=False):
if username and given_hash:
try:
reset_req = pass_reset_validb.objects.filter(username=username)[0]
if delete:
reset_req.delete()
else:
if all((reset_req.valid_hash == given_hash,
reset_req.valid_upto >= datetime.today())):
return True
except pass_reset_validb.DoesNotExist:
return None
@csrf_protect
def password_reset_req(request):
"""Landing page."""
if request.POST:
form = PasswordResetRequestForm(request.POST)
if form.is_valid():
username = form.cleaned_data['username'].strip()
if '@' in username:
username = username[:username.find('@')]
try:
reset_req = pass_reset_validb.objects.get(username=username)
if reset_req.valid_upto:
reset_req.valid_upto = datetime.today() + timedelta(days=1)
reset_req.save()
except pass_reset_validb.DoesNotExist:
reset_req = pass_reset_validb.objects.create(username=username)
send_pass_reset_mail(reset_req.username, reset_req.valid_hash)
message = "Check your Mu Sigma email for further instructions."
return message_box(request, message)
else:
form = PasswordResetRequestForm()
return render_to_response('accounts/password_reset_req.html', locals(),
RequestContext(request))
@csrf_protect
def password_reset_change(request, username="", hash=""):
if not request.POST:
form = PasswordResetChangeForm()
return render_to_response('accounts/password_reset_change.html',
locals(), RequestContext(request))
else:
form = PasswordResetChangeForm(request.POST)
if form.is_valid():
try:
if validate_pass_reset_req(username, hash):
user = auth.models.User.objects.get(username=username)
user.set_password(form.cleaned_data['password'])
user.save()
# delete the reset request entry
validate_pass_reset_req(username, hash, delete=True)
return message_box(
request,
"Your password was successfully reset!"
)
# invalid request, raise error & trigger exception
raise pass_reset_validb.DoesNotExist
except pass_reset_validb.DoesNotExist:
form.add_error("password", "Invalid reset request hash")
form.add_error("password_re", "Invalid reset request hash")
return render_to_response('accounts/password_reset_change.html',
locals(), RequestContext(request))
def password_reset_success(request):
return message_box(request, "Your password was successfully reset!")
@login_required
def indi(request, username=""):
if username:
try:
user = auth.models.User.objects.get(username=username)
except auth.models.User.DoesNotExist:
user = None
return render_to_response('accounts/indi.html',
locals(), RequestContext(request))
@login_required
def index(request):
return redirect(reverse('user:indi', kwargs={'username':request.user.username}))
|
python
|
# <auto-generated>
# This code was generated by the UnitCodeGenerator tool
#
# Changes to this file will be lost if the code is regenerated
# </auto-generated>
def to_u_s_miles_per_gallon(value):
return value * 2.35215
def to_miles_per_gallon(value):
return value * 2.82481
def to_litres_per100_kilometres(value):
return 100.0 / value
|
python
|
"""
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
"""
from pyramid.compat import itervalues_
from everest.entities.utils import get_root_aggregate
from everest.querying.specifications import eq
from everest.repositories.rdb.session import ScopedSessionMaker
from thelma.interfaces import IRack
from thelma.tools.base import BaseTool
from thelma.tools.semiconstants import get_item_status_future
__docformat__ = 'reStructuredText en'
__all__ = ['PlateEraser'
]
class PlateEraser(BaseTool):
NAME = 'Plate Eraser'
def __init__(self, barcodes, parent=None):
BaseTool.__init__(self, parent=parent)
self.__barcodes = barcodes.split(',')
def run(self):
sess = ScopedSessionMaker()
for bc in self.__barcodes:
rack = self.__get_rack(bc)
for src_cnt in itervalues_(rack.container_positions):
if not src_cnt is None:
if not src_cnt.sample is None:
sess.delete(src_cnt.sample)
rack.status = get_item_status_future()
def __get_rack(self, barcode):
rack_agg = get_root_aggregate(IRack)
rack_agg.filter = eq(barcode=barcode)
return next(rack_agg.iterator())
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import logging
import logging.config
import os
import sys
import yaml
from datetime import datetime
from importlib import import_module
from pkgutil import iter_modules
from plastron import commands, version
from plastron.exceptions import FailureException
from plastron.logging import DEFAULT_LOGGING_OPTIONS
from plastron.http import Repository
logger = logging.getLogger(__name__)
now = datetime.utcnow().strftime('%Y%m%d%H%M%S')
def main():
"""Parse args and handle options."""
parser = argparse.ArgumentParser(
prog='plastron',
description='Batch operation tool for Fedora 4.'
)
parser.set_defaults(cmd_name=None)
common_required = parser.add_mutually_exclusive_group(required=True)
common_required.add_argument(
'-r', '--repo',
help='Path to repository configuration file.',
action='store'
)
common_required.add_argument(
'-V', '--version',
help='Print version and exit.',
action='version',
version=version
)
parser.add_argument(
'-v', '--verbose',
help='increase the verbosity of the status output',
action='store_true'
)
parser.add_argument(
'-q', '--quiet',
help='decrease the verbosity of the status output',
action='store_true'
)
subparsers = parser.add_subparsers(title='commands')
# load all defined subcommands from the plastron.commands package
command_modules = {}
for finder, name, ispkg in iter_modules(commands.__path__):
module = import_module(commands.__name__ + '.' + name)
if hasattr(module, 'configure_cli'):
module.configure_cli(subparsers)
command_modules[name] = module
# parse command line args
args = parser.parse_args()
# if no subcommand was selected, display the help
if args.cmd_name is None:
parser.print_help()
sys.exit(0)
# load required repository config file and create repository object
with open(args.repo, 'r') as repo_config_file:
repo_config = yaml.safe_load(repo_config_file)
fcrepo = Repository(
repo_config, ua_string='plastron/{0}'.format(version)
)
# get basic logging options
if 'LOGGING_CONFIG' in repo_config:
with open(repo_config.get('LOGGING_CONFIG'), 'r') as logging_config_file:
logging_options = yaml.safe_load(logging_config_file)
else:
logging_options = DEFAULT_LOGGING_OPTIONS
# log file configuration
log_dirname = repo_config.get('LOG_DIR')
if not os.path.isdir(log_dirname):
os.makedirs(log_dirname)
log_filename = 'plastron.{0}.{1}.log'.format(args.cmd_name, now)
logfile = os.path.join(log_dirname, log_filename)
logging_options['handlers']['file']['filename'] = logfile
# manipulate console verbosity
if args.verbose:
logging_options['handlers']['console']['level'] = 'DEBUG'
elif args.quiet:
logging_options['handlers']['console']['level'] = 'WARNING'
# configure logging
logging.config.dictConfig(logging_options)
logger.info('Loaded repo configuration from {0}'.format(args.repo))
# get the selected subcommand
command = command_modules[args.cmd_name].Command()
try:
# dispatch to the selected subcommand
print_header(args)
command(fcrepo, args)
print_footer(args)
except FailureException:
# something failed, exit with non-zero status
sys.exit(1)
except KeyboardInterrupt:
# aborted due to Ctrl+C
sys.exit(2)
def print_header(args):
"""Common header formatting."""
if not args.quiet:
title = '| PLASTRON |'
bar = '+' + '=' * (len(title) - 2) + '+'
spacer = '|' + ' ' * (len(title) - 2) + '|'
print('\n'.join(['', bar, spacer, title, spacer, bar, '']), file=sys.stderr)
def print_footer(args):
"""Report success or failure and resources created."""
if not args.quiet:
print('\nScript complete. Goodbye!\n', file=sys.stderr)
if __name__ == "__main__":
main()
|
python
|
alphabet = 'qw2rty534plkjhgfds1zxcvbnm'
alpha_dict = {'q':0,'w':1,'2':2,'r':3,'t':4,'y':5,'5':6,'3':7,'4':8,'p':9,'l':10,'k':11,'j':12,'h':13,'g':14,'f':15,'d':16,'s':17,'1':18,'z':19,'x':20,'c':21,'v':22,'b':23,'n':24,'m':25}
list_out = open("full_pass_list","w")
def reduction(my_letter):
while my_letter >= 0:
my_letter = my_letter-26
return my_letter+26
for letter1 in alphabet:
for letter2 in alphabet:
for letter3 in alphabet:
for letter4 in alphabet:
for letter5 in alphabet:
a = alpha_dict[letter1]
b = alpha_dict[letter2]
c = alpha_dict[letter3]
d = alpha_dict[letter4]
e = alpha_dict[letter5]
a = reduction(a)
b = reduction(b+a)
c = reduction(b+c)
d = reduction(d+c)
if d==e:
list_out.write(letter1+letter2+letter3+letter4+letter5+'\n')
list_out.close()
|
python
|
class Node(object):
"""
Represents a node in the query plan structure. Provides a `parse` function to
parse JSON into a heirarchy of nodes. Executors take a plan consisting of
nodes and use it to apply the Transformations to the source.
"""
@classmethod
def parse(cls, _dict):
raise NotImplemented
def to_dict(self):
_dict = {}
for key in dir(self):
if (key.startswith('_') or key.lower() != key):
continue
value = getattr(self, key)
if (callable(value) or value is None or value is False or value == []):
continue
if isinstance(value, Node):
value = value.to_dict()
if isinstance(value, list):
for i, v in enumerate(value):
if isinstance(v, Node):
value[i] = v.to_dict()
_dict[key] = value
return _dict
def __eq__(self, other):
if not isinstance(other, Node):
return False
return self.to_dict() == other.to_dict()
class ExecutableNode(Node):
pass
|
python
|
from abc import ABCMeta, abstractmethod
from dataclasses import dataclass, field, asdict
from datetime import datetime
from typing import List
@dataclass(frozen=True)
class SalaryPayment:
basic_payment: int = field(default_factory=int, metadata={"jp": "基本給"})
overtime_fee: int = field(default_factory=int, metadata={"jp": "残業代"})
static_overtime_fee: int = field(default_factory=int, metadata={"jp": "固定残業代"})
commuting_fee: int = field(default_factory=int, metadata={"jp": "通勤(非課税)"})
additional_allowance: int = field(default_factory=int, metadata={"jp": "その他手当"})
def total(self):
return sum(self.__dict__.values())
def taxable(self):
return sum([self.basic_payment, self.overtime_fee, self.static_overtime_fee])
@staticmethod
def loads(data: dict):
_data = {k: v for k, v in data.items() if k in SalaryPayment.__dataclass_fields__.keys()}
return SalaryPayment(**_data)
def dumps(self):
return asdict(self)
@dataclass(frozen=True)
class SalaryDeduction:
health_insurance: int = field(default_factory=int, metadata={"jp": "健康保険"})
nursing_insurance: int = field(default_factory=int, metadata={"jp": "介護保険"})
welfare_pension: int = field(default_factory=int, metadata={"jp": "厚生年金"})
pension_fund: int = field(default_factory=int, metadata={"jp": "年金基金"})
employment_insurance: int = field(default_factory=int, metadata={"jp": "雇用保険"})
def total(self):
return sum(self.__dict__.values())
@staticmethod
def loads(data: dict):
_data = {k: v for k, v in data.items() if k in SalaryDeduction.__dataclass_fields__.keys()}
return SalaryDeduction(**_data)
def dumps(self):
return asdict(self)
@dataclass(frozen=True)
class SalaryTax:
income_tax: int = field(default_factory=int, metadata={"jp": "源泉所得税"})
inhabitant_tax: int = field(default_factory=int, metadata={"jp": "住民税"})
year_end_tax_adjustment: int = field(default_factory=int, metadata={"jp": "年末調整"})
def total(self):
return sum(self.__dict__.values())
@staticmethod
def loads(data: dict):
_data = {k: v for k, v in data.items() if k in SalaryTax.__dataclass_fields__.keys()}
return SalaryTax(**_data)
def dumps(self):
return asdict(self)
@dataclass(frozen=True)
class Salary:
payment_date: str = field(default=str, metadata={"jp": "支給日"})
calc_start_date: str = field(default=str, metadata={"jp": "計算開始日"})
calc_end_date: str = field(default=str, metadata={"jp": "計算締め日"})
salary_payment: SalaryPayment = field(default_factory=SalaryPayment, metadata={"jp": "給与"})
salary_deduction: SalaryDeduction = field(default_factory=SalaryDeduction, metadata={"jp": "保険"})
salary_tax: SalaryTax = field(default_factory=SalaryTax, metadata={"jp": "所得税など"})
company: str = field(default=str, metadata={"jp": "所得税など"})
version: str = field(default="1", metadata={"jp": "版"})
@staticmethod
def loads(data: dict):
_data = {k: v for k, v in data.items() if k in ["payment_date", "calc_start_date", "calc_end_date"]}
_data.update(
{
"salary_payment": SalaryPayment.loads(data.get("salary_payment", {})),
"salary_deduction": SalaryDeduction.loads(data.get("salary_deduction", {})),
"salary_tax": SalaryTax.loads(data.get("salary_tax", {})),
}
)
return Salary(**_data)
def dumps(self):
return asdict(self)
def total_payments(self) -> int:
"""
総支給額
Returns:
"""
return self.salary_payment.total()
def total_deductions(self) -> int:
"""
控除額合計
Returns:
"""
return self.salary_deduction.total() + self.salary_tax.total()
def net_payment(self) -> int:
"""
差引支給額
Returns:
"""
return self.total_payments() - self.total_deductions()
def dt(self) -> str:
return datetime.strptime(self.payment_date, "%Y-%m-%d").strftime("%Y_%m")
@staticmethod
def of(
company: str,
payment_date: str,
calc_start_date: str,
calc_end_date: str,
basic_payment: int,
overtime_fee: int,
static_overtime_fee: int,
commuting_fee: int,
additional_allowance: int,
health_insurance: int,
nursing_insurance: int,
welfare_pension: int,
pension_fund: int,
employment_insurance: int,
income_tax: int,
inhabitant_tax: int,
year_end_tax_adjustment: int,
) -> "Salary":
salary_payment = SalaryPayment(
basic_payment=basic_payment,
overtime_fee=overtime_fee,
static_overtime_fee=static_overtime_fee,
commuting_fee=commuting_fee,
additional_allowance=additional_allowance
)
salary_deduction = SalaryDeduction(
health_insurance=health_insurance,
nursing_insurance=nursing_insurance,
welfare_pension=welfare_pension,
pension_fund=pension_fund,
employment_insurance=employment_insurance,
)
salary_tax = SalaryTax(
income_tax=income_tax, inhabitant_tax=inhabitant_tax, year_end_tax_adjustment=year_end_tax_adjustment
)
return Salary(
company=company,
payment_date=payment_date,
calc_start_date=calc_start_date,
calc_end_date=calc_end_date,
salary_payment=salary_payment,
salary_deduction=salary_deduction,
salary_tax=salary_tax,
)
@dataclass(frozen=True)
class SalaryRepository(metaclass=ABCMeta):
@staticmethod
def file_name(salary: Salary) -> str:
return f"{salary.dt()}_{salary.company}.json"
@abstractmethod
def path(self) -> str:
raise NotImplementedError
@abstractmethod
def save(self, salary: Salary):
raise NotImplementedError
@abstractmethod
def load(self, dt: str) -> List[Salary]:
raise NotImplementedError
|
python
|
import os
def get_ip_name():
return "base_ip"
class base_ip:
ID = "base"
def __init__(self, io_hash):
return
def matched_id(in_key):
return in_key is self.ID
def get_rst_case_text(self):
return ''
def get_dft_case_text(self):
return ''
def get_pinmux_setting(self):
return ""
def get_v_file_list(self):
return ""
def get_module_caller(self):
return ""
def get_wire_defines(self):
return ""
def get_assigement(self):
return ""
def matched_id(self, in_key):
return ""
|
python
|
import requests
import conf
import urllib2
import xml.etree.ElementTree as ET
import io
def get_min_temp_phrase_from_values(min_observed, min_forecast):
if abs(min_forecast) != 1:
degrees_text = "degrees"
else:
degrees_text = "degree"
s = "The temperature tonight will be %s %s, " % (min_forecast, degrees_text)
degrees_warmer_tonight = min_forecast - min_observed
if abs(degrees_warmer_tonight) > 1:
degrees_text = "degrees"
else:
degrees_text = "degree"
if degrees_warmer_tonight == 0:
s += "which is the same as last night"
elif degrees_warmer_tonight > 0:
s += "which is %s %s warmer than last night" % \
(abs(degrees_warmer_tonight), degrees_text)
else:
s += "which is %s %s cooler than last night" % \
(abs(degrees_warmer_tonight), degrees_text)
return s
def get_min_observed_and_forecasted(bom_obs_url, bom_forecast_url, bom_forecast_area):
# BOM observation data is available for several weather stations, and
# in several formats (including the JSON that we use here).
# e.g. http://www.bom.gov.au/products/IDN60901/IDN60901.94768.shtml
r = requests.get(bom_obs_url)
# this will only be used in the late afternoon and
# min reading is usually about 5am on the same day.
# Comes as a float, so let's round and cast
min_obs = int(round(min([reading["air_temp"] for reading
in r.json()["observations"]["data"]])))
# State forecast URLs are in XML format and are accessible from
# http://www.bom.gov.au/info/precis_forecasts.shtml
f = urllib2.urlopen(bom_forecast_url)
forecast_report = io.StringIO(unicode(f.read()))
tree = ET.parse(forecast_report)
# Get the first (zeroth) minimum air temperature reading.
# The current day will not have a minimum reading so this corresponds
# to tomorrow's minimum forecast temperature
min_forecast = int(
tree.findall("./forecast"
"/area[@aac='%s']"
"/forecast-period"
"/element[@type='air_temperature_minimum']" %
(bom_forecast_area,))[0].text)
return min_obs, min_forecast
if __name__ == "__main__":
print get_min_temp_phrase_from_values(*get_min_observed_and_forecasted(
conf.LOCAL_BOM_OBSERVATIONS_URL,
conf.STATE_BOM_FORECAST_URL,
conf.LOCAL_BOM_FORECAST_AREA))
# print get_min_temp_phrase_from_values(12, 0)
|
python
|
from flask import Flask
from flask import render_template,redirect,request
import pandas as pd
import sys
import numpy as np
import pickle
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
#from sklearn.metrics import mean_squared_log_error
from sklearn.linear_model import LogisticRegression
df=pd.read_csv('heart-data.csv')
df.rename(columns={"class":"target"},inplace=True)
df['target'].replace(['absent','present'],[0,1],inplace=True)
df=pd.get_dummies(df)
x=df.drop('target', axis=1)
y=df['target']
train_x,valid_x,train_y,valid_y=train_test_split(x,y,test_size=0.3,random_state=35)
logr=LogisticRegression()
logr.fit(train_x,train_y)
#new_data=np.array(new_data,dtype='int64')
#new_data=new_data.reshape(1,13)
#xnew_data=pd.DataFrame(new_data)
pickle.dump(logr,open('model.pkl','wb'))
model=pickle.load(open('model.pkl','rb'))
result=model.predict(valid_x)
newdata=valid_x.head(1)
print(newdata)
result2=model.predict(newdata)
print(result)
print(result2)
|
python
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler
"""
import mock
from oslo_config import cfg
from oslo_utils import timeutils
from manila import context
from manila import db
from manila import exception
from manila.scheduler import driver
from manila.scheduler import manager
from manila.scheduler import simple
from manila.share import rpcapi as share_rpcapi
from manila import test
from manila.tests import db_utils
from manila import utils
CONF = cfg.CONF
class SchedulerManagerTestCase(test.TestCase):
"""Test case for scheduler manager."""
manager_cls = manager.SchedulerManager
driver_cls = driver.Scheduler
driver_cls_name = 'manila.scheduler.driver.Scheduler'
def setUp(self):
super(SchedulerManagerTestCase, self).setUp()
self.flags(scheduler_driver=self.driver_cls_name)
self.manager = self.manager_cls()
self.context = context.RequestContext('fake_user', 'fake_project')
self.topic = 'fake_topic'
self.fake_args = (1, 2, 3)
self.fake_kwargs = {'cat': 'meow', 'dog': 'woof'}
def test_1_correct_init(self):
# Correct scheduler driver
manager = self.manager
self.assertTrue(isinstance(manager.driver, self.driver_cls))
def test_update_service_capabilities(self):
service_name = 'fake_service'
host = 'fake_host'
with mock.patch.object(self.manager.driver,
'update_service_capabilities', mock.Mock()):
self.manager.update_service_capabilities(
self.context, service_name=service_name, host=host)
self.manager.driver.update_service_capabilities.\
assert_called_once_with(service_name, host, {})
with mock.patch.object(self.manager.driver,
'update_service_capabilities', mock.Mock()):
capabilities = {'fake_capability': 'fake_value'}
self.manager.update_service_capabilities(
self.context, service_name=service_name, host=host,
capabilities=capabilities)
self.manager.driver.update_service_capabilities.\
assert_called_once_with(service_name, host, capabilities)
@mock.patch.object(db, 'share_update', mock.Mock())
def test_create_share_exception_puts_share_in_error_state(self):
"""Test that a NoValideHost exception for create_share.
Puts the share in 'error' state and eats the exception.
"""
def raise_no_valid_host(*args, **kwargs):
raise exception.NoValidHost(reason="")
fake_share_id = 1
request_spec = {'share_id': fake_share_id}
with mock.patch.object(self.manager.driver,
'schedule_create_share',
mock.Mock(side_effect=raise_no_valid_host)):
self.mock_object(manager.LOG, 'error')
self.manager.create_share_instance(
self.context, request_spec=request_spec, filter_properties={})
db.share_update.assert_called_once_with(
self.context, fake_share_id, {'status': 'error'})
self.manager.driver.schedule_create_share.assert_called_once_with(
self.context, request_spec, {})
manager.LOG.error.assert_called_once_with(mock.ANY, mock.ANY)
def test_get_pools(self):
"""Ensure get_pools exists and calls driver.get_pools."""
mock_get_pools = self.mock_object(self.manager.driver, 'get_pools',
mock.Mock(return_value='fake_pools'))
result = self.manager.get_pools(self.context, filters='fake_filters')
mock_get_pools.assert_called_once_with(self.context, 'fake_filters')
self.assertEqual('fake_pools', result)
@mock.patch.object(db, 'consistency_group_update', mock.Mock())
def test_create_cg_no_valid_host_puts_cg_in_error_state(self):
"""Test that NoValidHost is raised for create_consistency_group.
Puts the share in 'error' state and eats the exception.
"""
def raise_no_valid_host(*args, **kwargs):
raise exception.NoValidHost(reason="")
fake_cg_id = 1
cg_id = fake_cg_id
request_spec = {"consistency_group_id": cg_id}
with mock.patch.object(self.manager.driver,
'schedule_create_consistency_group',
mock.Mock(side_effect=raise_no_valid_host)):
self.manager.create_consistency_group(self.context,
fake_cg_id,
request_spec=request_spec,
filter_properties={})
db.consistency_group_update.assert_called_once_with(
self.context, fake_cg_id, {'status': 'error'})
self.manager.driver.schedule_create_consistency_group\
.assert_called_once_with(self.context, cg_id,
request_spec, {})
@mock.patch.object(db, 'consistency_group_update', mock.Mock())
def test_create_cg_exception_puts_cg_in_error_state(self):
"""Test that exceptions for create_consistency_group.
Puts the share in 'error' state and raises the exception.
"""
fake_cg_id = 1
cg_id = fake_cg_id
request_spec = {"consistency_group_id": cg_id}
with mock.patch.object(self.manager.driver,
'schedule_create_consistency_group',
mock.Mock(side_effect=exception.NotFound)):
self.assertRaises(exception.NotFound,
self.manager.create_consistency_group,
self.context, fake_cg_id,
request_spec=request_spec,
filter_properties={})
def test_migrate_share_to_host(self):
share = db_utils.create_share()
host = 'fake@backend#pool'
self.mock_object(db, 'share_get', mock.Mock(return_value=share))
self.mock_object(share_rpcapi.ShareAPI, 'migrate_share')
self.mock_object(driver.Scheduler, 'host_passes_filters',
mock.Mock(return_value=host))
self.manager.migrate_share_to_host(self.context, share['id'], host,
False, {}, None)
def test_migrate_share_to_host_no_valid_host(self):
share = db_utils.create_share()
host = 'fake@backend#pool'
self.mock_object(
driver.Scheduler, 'host_passes_filters',
mock.Mock(side_effect=[exception.NoValidHost('fake')]))
self.manager.migrate_share_to_host(self.context, share['id'], host,
False, {}, None)
class SchedulerTestCase(test.TestCase):
"""Test case for base scheduler driver class."""
# So we can subclass this test and re-use tests if we need.
driver_cls = driver.Scheduler
def setUp(self):
super(SchedulerTestCase, self).setUp()
self.driver = self.driver_cls()
self.context = context.RequestContext('fake_user', 'fake_project')
self.topic = 'fake_topic'
def test_update_service_capabilities(self):
service_name = 'fake_service'
host = 'fake_host'
capabilities = {'fake_capability': 'fake_value'}
with mock.patch.object(self.driver.host_manager,
'update_service_capabilities', mock.Mock()):
self.driver.update_service_capabilities(
service_name, host, capabilities)
self.driver.host_manager.update_service_capabilities.\
assert_called_once_with(service_name, host, capabilities)
def test_hosts_up(self):
service1 = {'host': 'host1'}
service2 = {'host': 'host2'}
services = [service1, service2]
def fake_service_is_up(*args, **kwargs):
if args[0]['host'] == 'host1':
return False
return True
with mock.patch.object(db, 'service_get_all_by_topic',
mock.Mock(return_value=services)):
with mock.patch.object(utils, 'service_is_up',
mock.Mock(side_effect=fake_service_is_up)):
result = self.driver.hosts_up(self.context, self.topic)
self.assertEqual(result, ['host2'])
db.service_get_all_by_topic.assert_called_once_with(
self.context, self.topic)
class SchedulerDriverBaseTestCase(SchedulerTestCase):
"""Test cases for base scheduler driver class methods.
These can't fail if the driver is changed.
"""
def test_unimplemented_schedule(self):
fake_args = (1, 2, 3)
fake_kwargs = {'cat': 'meow'}
self.assertRaises(NotImplementedError, self.driver.schedule,
self.context, self.topic, 'schedule_something',
*fake_args, **fake_kwargs)
class SchedulerDriverModuleTestCase(test.TestCase):
"""Test case for scheduler driver module methods."""
def setUp(self):
super(SchedulerDriverModuleTestCase, self).setUp()
self.context = context.RequestContext('fake_user', 'fake_project')
@mock.patch.object(db, 'share_update', mock.Mock())
def test_share_host_update_db(self):
with mock.patch.object(timeutils, 'utcnow',
mock.Mock(return_value='fake-now')):
driver.share_update_db(self.context, 31337, 'fake_host')
db.share_update.assert_called_once_with(
self.context, 31337,
{'host': 'fake_host', 'scheduled_at': 'fake-now'})
class SimpleSchedulerSharesTestCase(test.TestCase):
"""Test case for simple scheduler create share method."""
def setUp(self):
super(SimpleSchedulerSharesTestCase, self).setUp()
self.mock_object(share_rpcapi, 'ShareAPI')
self.driver = simple.SimpleScheduler()
self.context = context.RequestContext('fake_user', 'fake_project')
self.admin_context = context.RequestContext('fake_admin_user',
'fake_project')
self.admin_context.is_admin = True
@mock.patch.object(utils, 'service_is_up', mock.Mock(return_value=True))
def test_create_share_if_two_services_up(self):
share_id = 'fake'
fake_share = {'id': share_id, 'size': 1}
fake_service_1 = {'disabled': False, 'host': 'fake_host1'}
fake_service_2 = {'disabled': False, 'host': 'fake_host2'}
fake_result = [(fake_service_1, 2), (fake_service_2, 1)]
fake_request_spec = {
'share_id': share_id,
'share_properties': fake_share,
}
self.mock_object(db, 'service_get_all_share_sorted',
mock.Mock(return_value=fake_result))
self.mock_object(driver, 'share_update_db',
mock.Mock(return_value=db_utils.create_share()))
self.driver.schedule_create_share(self.context,
fake_request_spec, {})
utils.service_is_up.assert_called_once_with(utils.IsAMatcher(dict))
db.service_get_all_share_sorted.assert_called_once_with(
utils.IsAMatcher(context.RequestContext))
driver.share_update_db.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), share_id, 'fake_host1')
def test_create_share_if_services_not_available(self):
share_id = 'fake'
fake_share = {'id': share_id, 'size': 1}
fake_result = []
fake_request_spec = {
'share_id': share_id,
'share_properties': fake_share,
}
with mock.patch.object(db, 'service_get_all_share_sorted',
mock.Mock(return_value=fake_result)):
self.assertRaises(exception.NoValidHost,
self.driver.schedule_create_share,
self.context, fake_request_spec, {})
db.service_get_all_share_sorted.assert_called_once_with(
utils.IsAMatcher(context.RequestContext))
def test_create_share_if_max_gigabytes_exceeded(self):
share_id = 'fake'
fake_share = {'id': share_id, 'size': 10001}
fake_service_1 = {'disabled': False, 'host': 'fake_host1'}
fake_service_2 = {'disabled': False, 'host': 'fake_host2'}
fake_result = [(fake_service_1, 5), (fake_service_2, 7)]
fake_request_spec = {
'share_id': share_id,
'share_properties': fake_share,
}
with mock.patch.object(db, 'service_get_all_share_sorted',
mock.Mock(return_value=fake_result)):
self.assertRaises(exception.NoValidHost,
self.driver.schedule_create_share,
self.context, fake_request_spec, {})
db.service_get_all_share_sorted.assert_called_once_with(
utils.IsAMatcher(context.RequestContext))
@mock.patch.object(utils, 'service_is_up', mock.Mock(return_value=True))
def test_create_share_availability_zone(self):
share_id = 'fake'
fake_share = {
'id': share_id,
'size': 1,
}
fake_instance = {
'availability_zone_id': 'fake',
}
fake_service_1 = {
'disabled': False, 'host': 'fake_host1',
'availability_zone_id': 'fake',
}
fake_service_2 = {
'disabled': False, 'host': 'fake_host2',
'availability_zone_id': 'super_fake',
}
fake_result = [(fake_service_1, 0), (fake_service_2, 1)]
fake_request_spec = {
'share_id': share_id,
'share_properties': fake_share,
'share_instance_properties': fake_instance,
}
self.mock_object(db, 'service_get_all_share_sorted',
mock.Mock(return_value=fake_result))
self.mock_object(driver, 'share_update_db',
mock.Mock(return_value=db_utils.create_share()))
self.driver.schedule_create_share(self.context,
fake_request_spec, {})
utils.service_is_up.assert_called_once_with(fake_service_1)
driver.share_update_db.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), share_id,
fake_service_1['host'])
db.service_get_all_share_sorted.assert_called_once_with(
utils.IsAMatcher(context.RequestContext))
@mock.patch.object(utils, 'service_is_up', mock.Mock(return_value=True))
def test_create_share_availability_zone_on_host(self):
share_id = 'fake'
fake_share = {
'id': share_id,
'availability_zone': 'fake:fake',
'size': 1,
}
fake_service = {'disabled': False, 'host': 'fake'}
fake_request_spec = {
'share_id': share_id,
'share_properties': fake_share,
}
self.mock_object(db, 'service_get_all_share_sorted',
mock.Mock(return_value=[(fake_service, 1)]))
self.mock_object(driver, 'share_update_db',
mock.Mock(return_value=db_utils.create_share()))
self.driver.schedule_create_share(self.admin_context,
fake_request_spec, {})
utils.service_is_up.assert_called_once_with(fake_service)
db.service_get_all_share_sorted.assert_called_once_with(
utils.IsAMatcher(context.RequestContext))
driver.share_update_db.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), share_id, 'fake')
|
python
|
import os
import platform
import textwrap
from collections import OrderedDict
from jinja2 import Template
from conans.errors import ConanException
from conans.util.files import normalize
sh_activate = textwrap.dedent("""\
#!/usr/bin/env sh
{%- for it in modified_vars %}
export CONAN_OLD_{{it}}="${{it}}"
{%- endfor %}
while read -r line; do
LINE="$(eval echo $line)";
export "$LINE";
done < "{{ environment_file }}"
export CONAN_OLD_PS1="$PS1"
export PS1="({{venv_name}}) $PS1"
""")
sh_deactivate = textwrap.dedent("""\
#!/usr/bin/env sh
export PS1="$CONAN_OLD_PS1"
unset CONAN_OLD_PS1
{% for it in modified_vars %}
export {{it}}="$CONAN_OLD_{{it}}"
unset CONAN_OLD_{{it}}
{%- endfor %}
{%- for it in new_vars %}
unset {{it}}
{%- endfor %}
""")
bat_activate = textwrap.dedent("""\
@echo off
{%- for it in modified_vars %}
SET "CONAN_OLD_{{it}}=%{{it}}%"
{%- endfor %}
FOR /F "usebackq tokens=1,* delims==" %%i IN ("{{ environment_file }}") DO (
CALL SET "%%i=%%j"
)
SET "CONAN_OLD_PROMPT=%PROMPT%"
SET "PROMPT=({{venv_name}}) %PROMPT%"
""")
bat_deactivate = textwrap.dedent("""\
@echo off
SET "PROMPT=%CONAN_OLD_PROMPT%"
SET "CONAN_OLD_PROMPT="
{% for it in modified_vars %}
SET "{{it}}=%CONAN_OLD_{{it}}%"
SET "CONAN_OLD_{{it}}="
{%- endfor %}
{%- for it in new_vars %}
SET "{{it}}="
{%- endfor %}
""")
ps1_activate = textwrap.dedent("""\
{%- for it in modified_vars %}
$env:CONAN_OLD_{{venv_name}}_{{it}}=$env:{{it}}
{%- endfor %}
foreach ($line in Get-Content "{{ environment_file }}") {
$var,$value = $line -split '=',2
$value_expanded = $ExecutionContext.InvokeCommand.ExpandString($value)
Set-Item env:\\$var -Value "$value_expanded"
}
function global:_old_conan_{{venv_name}}_prompt {""}
$function:_old_conan_{{venv_name}}_prompt = $function:prompt
function global:prompt {
write-host "({{venv_name}}) " -nonewline; & $function:_old_conan_{{venv_name}}_prompt
}
""")
ps1_deactivate = textwrap.dedent("""\
$function:prompt = $function:_old_conan_{{venv_name}}_prompt
remove-item function:_old_conan_{{venv_name}}_prompt
{% for it in modified_vars %}
$env:{{it}}=$env:CONAN_OLD_{{venv_name}}_{{it}}
Remove-Item env:CONAN_OLD_{{venv_name}}_{{it}}
{%- endfor %}
{%- for it in new_vars %}
Remove-Item env:{{it}}
{%- endfor %}
""")
BAT_FLAVOR = "bat"
PS1_FLAVOR = "ps1"
SH_FLAVOR = "sh"
def _variable_placeholder(flavor, name, append_with_spaces):
"""
:param flavor: flavor of the execution environment
:param name: variable name
:return: placeholder for the variable name formatted for a certain execution environment.
(e.g., cmd, ps1, sh).
"""
if flavor == BAT_FLAVOR:
return "%{}%".format(name)
if flavor == PS1_FLAVOR:
return "$env:%s" % name
# flavor == sh
return "${%s:+ $%s}" % (name, name) if append_with_spaces else "${%s:+:$%s}" % (name, name)
def _format_values(flavor, variables, append_with_spaces):
"""
Formats the values for the different supported script language flavors.
:param flavor: flavor of the execution environment
:param variables: variables to be formatted
:return:
"""
if flavor in [BAT_FLAVOR, PS1_FLAVOR] and platform.system() == "Windows":
path_sep, quote_elements = ";", False
elif flavor == PS1_FLAVOR:
path_sep, quote_elements = ":", False
else:
path_sep, quote_elements = ":", True
for name, value in variables:
# activate values
if isinstance(value, list):
value = list(OrderedDict.fromkeys(value)) # Avoid repeated entries, while keeping order
append_space = name in append_with_spaces
placeholder = _variable_placeholder(flavor, name, append_space)
if append_space:
# Variables joined with spaces look like: CPPFLAGS="one two three"
if flavor == SH_FLAVOR:
value = " ".join(value) + placeholder
else:
value = " ".join(value + [placeholder])
value = "\"%s\"" % value if quote_elements else value
else:
# Quoted variables joined with pathset may look like:
# PATH="one path":"two paths"
# Unquoted variables joined with pathset may look like: PATH=one path;two paths
value = ["\"%s\"" % v for v in value] if quote_elements else value
if flavor == SH_FLAVOR:
value = path_sep.join(value) + placeholder
else:
value = path_sep.join(value + [placeholder])
else:
# single value
value = "\"%s\"" % value if quote_elements else value
if platform.system() != "Windows":
value = value.replace("\\", "\\\\")
# deactivate values
existing = name in os.environ
yield name, value, existing
def _files(env_vars, vars_with_spaces, flavor, activate_tpl, deactivate_tpl, venv_name,
env_filepath):
ret = list(_format_values(flavor, env_vars.items(), vars_with_spaces))
modified_vars = [name for name, _, existing in ret if existing]
new_vars = [name for name, _, existing in ret if not existing]
activate_content = activate_tpl.render(environment_file=env_filepath,
modified_vars=modified_vars, new_vars=new_vars,
venv_name=venv_name)
deactivate_content = deactivate_tpl.render(modified_vars=modified_vars, new_vars=new_vars,
venv_name=venv_name)
environment_lines = ["{}={}".format(name, value) for name, value, _ in ret]
# This blank line is important, otherwise the script doens't process last line
environment_lines.append('')
if flavor == SH_FLAVOR:
# replace CRLF->LF guarantee it is always LF, irrespective of current .py file
activate_content = activate_content.replace("\r\n", "\n")
deactivate_content = deactivate_content.replace("\r\n", "\n")
environment = "\n".join(environment_lines)
else:
activate_content = normalize(activate_content)
deactivate_content = normalize(deactivate_content)
environment = os.linesep.join(environment_lines)
return activate_content, deactivate_content, environment
def env_files(env_vars, vars_with_spaces, flavor, folder, name, venv_name):
env_filename = "environment{}.{}.env".format(name, flavor)
activate_filename = "activate{}.{}".format(name, flavor)
deactivate_filename = "deactivate{}.{}".format(name, flavor)
templates = {SH_FLAVOR: (sh_activate, sh_deactivate),
BAT_FLAVOR: (bat_activate, bat_deactivate),
PS1_FLAVOR: (ps1_activate, ps1_deactivate)}
try:
activate, deactivate = templates[flavor]
except KeyError:
raise ConanException("Unrecognized flavor: %s" % flavor)
activate_tpl, deactivate_tpl = Template(activate), Template(deactivate)
env_filepath = os.path.abspath(os.path.join(folder, env_filename))
activate, deactivate, envfile = _files(env_vars, vars_with_spaces, flavor, activate_tpl,
deactivate_tpl, venv_name, env_filepath)
result = {activate_filename: activate,
deactivate_filename: deactivate,
env_filename: envfile}
return result
|
python
|
import logging
from copy import copy
from inspect import isfunction
import ibutsu_server.tasks
from flask_testing import TestCase
from ibutsu_server import get_app
from ibutsu_server.tasks import create_celery_app
from ibutsu_server.util import merge_dicts
def mock_task(*args, **kwargs):
if args and isfunction(args[0]):
func = args[0]
def wrap(*args, **kwargs):
return func(*args, **kwargs)
wrap._orig_func = func
return wrap
else:
def decorate(func):
def _wrapped(*args, **kwargs):
return func(*args, **kwargs)
_wrapped._orig_func = func
return _wrapped
return decorate
class BaseTestCase(TestCase):
def create_app(self):
logging.getLogger("connexion.operation").setLevel("ERROR")
extra_config = {
"TESTING": True,
"LIVESERVER_PORT": 0,
"SQLALCHEMY_DATABASE_URI": "sqlite:///:memory:",
}
app = get_app(**extra_config)
create_celery_app(app.app)
if ibutsu_server.tasks.task is None:
ibutsu_server.tasks.task = mock_task
return app.app
def assert_201(self, response, message=None):
"""
Checks if response status code is 201
:param response: Flask response
:param message: Message to display on test failure
"""
self.assert_status(response, 201, message)
def assert_equal(self, first, second, msg=None):
"""Alias"""
return self.assertEqual(first, second, msg)
def assert_not_equal(self, first, second, msg=None):
"""Alias"""
return self.assertNotEqual(first, second, msg)
class MockModel(object):
"""Mock model object"""
COLUMNS = ["id"]
def __init__(self, **fields):
for column in self.COLUMNS:
if column in fields.keys():
setattr(self, column, fields[column])
else:
setattr(self, column, None)
def to_dict(self):
record_dict = copy(self.__dict__)
# when outputting info, translate data to metadata
if record_dict.get("data"):
record_dict["metadata"] = record_dict.pop("data")
return record_dict
@classmethod
def from_dict(cls, **record_dict):
# because metadata is a reserved attr name, translate it to data
if record_dict.get("metadata"):
record_dict["data"] = record_dict.pop("metadata")
return cls(**record_dict)
def update(self, record_dict):
if "id" in record_dict:
record_dict.pop("id")
group_dict = self.to_dict()
merge_dicts(group_dict, record_dict)
if group_dict.get("metadata"):
group_dict["data"] = group_dict.pop("metadata")
if record_dict.get("metadata"):
record_dict["data"] = record_dict.get("metadata")
for key, value in record_dict.items():
setattr(self, key, value)
class MockArtifact(MockModel):
COLUMNS = ["id", "filename", "result_id", "data", "content"]
class MockGroup(MockModel):
COLUMNS = ["id", "name", "data"]
class MockImport(MockModel):
COLUMNS = ["id", "filename", "format", "data", "status"]
class MockProject(MockModel):
COLUMNS = ["id", "name", "title", "owner_id", "group_id"]
class MockResult(MockModel):
COLUMNS = [
"id",
"component",
"data",
"duration",
"env",
"params",
"project_id",
"result",
"run_id",
"source",
"start_time",
"test_id",
]
class MockReport(MockModel):
COLUMNS = [
"id",
"created",
"download_url",
"filename",
"mimetype",
"name",
"params",
"project_id",
"status",
"url",
"view_url",
]
class MockRun(MockModel):
COLUMNS = [
"id",
"component",
"created",
"data",
"duration",
"env",
"project_id",
"source",
"start_time",
"summary",
]
# Mock out the task decorator
ibutsu_server.tasks.task = mock_task
|
python
|
#!/usr/bin/env python
from kivy.app import App
from kivy.clock import Clock
from kivy.config import Config
from kivy.core.window import Window
from kivy.properties import (ListProperty,
NumericProperty,
ObjectProperty,
ReferenceListProperty)
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.widget import Widget
from kivy.vector import Vector
class SnakesGame(Widget):
"""The root `Widget` for displaying and running the game."""
trails = ListProperty()
snake1 = ObjectProperty()
snake2 = ObjectProperty()
status_bar = ObjectProperty()
def __init__(self, **kwargs):
super(SnakesGame, self).__init__(**kwargs)
self._keyboard = Window.request_keyboard(self._keyboard_closed, self)
self._keyboard.bind(on_key_down=self._on_keyboard_down)
def _keyboard_closed(self):
self._keyboard.unbind(on_key_down=self._on_keyboard_down)
self._keyboard = None
def _on_keyboard_down(self, keyboard, keycode, text, modifiers):
if keycode[1] == 's':
if self.snake1.direction != [0, 1]:
self.snake1.direction = (0, -1)
elif keycode[1] == 'w':
if self.snake1.direction != [0, -1]:
self.snake1.direction = (0, 1)
elif keycode[1] == 'a':
if self.snake1.direction != [1, 0]:
self.snake1.direction = (-1, 0)
elif keycode[1] == 'd':
if self.snake1.direction != [-1, 0]:
self.snake1.direction = (1, 0)
elif keycode[1] == 'down':
if self.snake2.direction != [0, 1]:
self.snake2.direction = (0, -1)
elif keycode[1] == 'up':
if self.snake2.direction != [0, -1]:
self.snake2.direction = (0, 1)
elif keycode[1] == 'left':
if self.snake2.direction != [1, 0]:
self.snake2.direction = (-1, 0)
elif keycode[1] == 'right':
if self.snake2.direction != [-1, 0]:
self.snake2.direction = (1, 0)
def run(self):
Clock.schedule_interval(self.update, 1/60.)
def update(self, dt):
"""Moves snakes and gives points if collision occured."""
if self.snake1.move(self.snake2):
self.snake2.score += 1
self.reset()
elif self.snake2.move(self.snake1):
self.snake1.score += 1
self.reset()
def reset(self):
"""Resets the positions/directions and removes trails."""
self.snake1.center = (self.width/3., self.height/2.)
self.snake2.center = (self.width*2/3., self.height/2.)
self.snake1.direction = (0, 0)
self.snake2.direction = (0, 0)
for trail in self.trails:
self.remove_widget(trail)
del self.trails[:]
class Snake(Widget):
"""Represents the head of a snake, which can be moved around."""
color = ListProperty()
direction_x = NumericProperty()
direction_y = NumericProperty()
direction = ReferenceListProperty(direction_x, direction_y)
trail = ObjectProperty()
score = NumericProperty()
def collide_widget(self, wid):
"""Collision detection that works with negative sizes.
Args:
wid: The `Widget` to check for collision against.
Returns:
True if a collision occured, otherwise False.
"""
if wid.width < 0:
if self.right < wid.right + 1:
return False
if self.x > wid.x - 1:
return False
else:
if self.right < wid.x + 1:
return False
if self.x > wid.right - 1:
return False
if wid.height < 0:
if self.top < wid.top + 1:
return False
if self.y > wid.y - 1:
return False
else:
if self.top < wid.y + 1:
return False
if self.y > wid.top - 1:
return False
return True
def move(self, other):
"""Moves the `Snake` and returns whether a collision occured."""
# Scale speed in relation to game widget size
if self.parent.width < self.parent.height:
speed_scale = self.parent.width / 250.
else:
speed_scale = self.parent.height / 250.
self.pos = Vector(self.direction) * speed_scale + self.pos
if self.trail:
self.trail.width += self.direction_x * speed_scale
self.trail.height += self.direction_y * speed_scale
# Check for collision with edges of arena and other snake
if self.right >= self.parent.width or self.x <= 0:
return True
if self.top >= self.parent.status_bar.y or self.y <= 0:
return True
if self.collide_widget(other):
self.score += 1 # Gives point to self as well
return True
# Check for collision with all trails
for trail in self.parent.trails:
if self.collide_widget(trail):
return True
return False
def on_direction(self, snake, direction):
"""Creates and positions a new trail."""
self.trail = Trail(size=self.size, pos=self.pos, color=self.color)
# Position trail for following directly behind snake head
if self.direction_x == 1:
self.trail.width = 0
elif self.direction_x == -1:
self.trail.width = 0
self.trail.x = self.right
elif self.direction_y == 1:
self.trail.height = 0
elif self.direction_y == -1:
self.trail.height = 0
self.trail.y = self.top
self.parent.add_widget(self.trail)
self.parent.trails.append(self.trail)
class Trail(Widget):
"""Represents a trail left behind as the `Snake` moves."""
color = ListProperty()
class StatusBar(BoxLayout):
"""A container for displaying scores."""
pass
class SnakesApp(App):
def build(self):
"""Creates and runs the game."""
Config.set('kivy', 'exit_on_escape', '0')
game = SnakesGame()
game.run()
return game
def main():
SnakesApp().run()
if __name__ == '__main__':
main()
|
python
|
# -selenium webdriver-
from logging import fatal
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import TimeoutException
# config file import
import config
import requests # for discord webhook
# makes the bot only use the specified cores. (logical processors)
#import psutil
#p = psutil.Process()
# p.cpu_affinity([2,3,5,6,7,9,10,11,12,13,14])
# get the webdriver you want to use.
browser = webdriver.Firefox(executable_path=r'.\webdrivers\geckodriver.exe')
browser.get(config.PageURL)
#wait = WebDriverWait(browser, 2)
print('waiting')
element = WebDriverWait(browser, 20).until(lambda x: x.find_element_by_xpath(config.Size_Xpath)) # waits for page to finish loading
print('finished waiting')
# timestamp of when the bot was started
print('Time started =', config.current_time)
print('--------------------------------------')
SizeAvailable = browser.find_element_by_xpath(config.Size_Xpath).get_attribute('data-qa')
ShoeNamePrimary = browser.find_element_by_xpath('//*[@id="root"]/div/div/div[1]/div/div[1]/div[2]/div/section[1]/div[2]/aside/div/div[1]/h1').get_attribute('innerHTML')
ShoeNameSecondary = browser.find_element_by_xpath('//*[@id="root"]/div/div/div[1]/div/div[1]/div[2]/div/section[1]/div[2]/aside/div/div[1]/h5').get_attribute('innerHTML')
ShoeThumbnail = browser.find_element_by_xpath('/html/body/div[2]/div/div/div[1]/div/div[1]/div[2]/div/section[1]/div[1]/div/div[6]/figure/img').get_attribute('src')
#all_children_by_xpath = browser.find_elements_by_xpath(f'{config.Size_Xpath}.//*')
#print(all_children_by_xpath)
parentElement = browser.find_element_by_xpath(config.Size_Xpath)
elementList = parentElement.find_elements_by_tag_name('button')
print('===============')
print(parentElement)
print(elementList)
print('===============')
print(ShoeNamePrimary)
print(ShoeNameSecondary)
print(SizeAvailable)
# ~~ debug ~~
if config.DebugMode == True:
print('debug value is true.')
print('~~~~~~~~~~~~~~~~~~~~~~~~')
print(f'*Debug Info*\nAutoBuy = {config.AutoBuy}\nAutoCart = {config.AutoCart}\nWinToast = {config.WindowsToasts}\nTestMode = {config.TestMode}')
print('--------------------------------------')
# check if {InStockColor} is the same as the {SizeBgColor}
if SizeAvailable == 'size-unavailable': # size is unavailable
print('out of stock')
# browser.refresh()
elif SizeAvailable != 'size-unavailable': # size is available
print('in stock')
# notification settings (you can change them in the config)
if config.WindowsToasts == True:
from logging import debug
from win10toast import ToastNotifier
toaster = ToastNotifier()
toaster.show_toast(f'Sneaker Bot v{config.SneakerBotVersion}',
(f'{ShoeNamePrimary} are in stock'),
icon_path='assets\\sneakerbot-icon.ico',
duration=999999,
threaded=True)
if config.DiscordWebhooks == True:
url = config.DiscordWebhookURL
if config.DebugMode == True:
embed = {
'title': 'Sneakers in stock!',
'color': 15052624, # 15052624 orange, 14708343 bright red, 12017246 darker red
'thumbnail': {
'url': ShoeThumbnail
},
'fields': [
{
'name': (f'{ShoeNamePrimary} - {ShoeNameSecondary}'),
'value': (f'[store link]({config.PageURL})\n▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬\n```▬ Debug Info ▬\nAutoBuy = {config.AutoBuy}\nAutoCart = {config.AutoCart}\nDiscordHook = {config.DiscordWebhooks}\nWinToast = {config.WindowsToasts}\nTestMode = {config.TestMode}\n```')
}
],
'footer': {
'text': (f'Made by MBlais.dev • {config.current_time} • SneakerBot v{config.SneakerBotVersion}'),
'icon_url': 'https://i.imgur.com/MbrG9HM.png'
}
}
elif config.DebugMode == False:
embed = {
'title': 'Sneakers in stock!',
'color': 5546086, # dark green:5546086, light green:8776060
'thumbnail': {
'url': ShoeThumbnail
},
'fields': [
{
'name': (f'{ShoeNamePrimary} - {ShoeNameSecondary}'),
'value': (f'[store link]({config.PageURL})\n▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬')
}
],
'footer': {
'text': (f'Made by MBlais.dev • {config.current_time} • SneakerBot v{config.SneakerBotVersion}'),
'icon_url': 'https://i.imgur.com/MbrG9HM.png'
}
}
data = {
'username': (f'SneakerBot v{config.SneakerBotVersion}'),
'avatar_url': 'https://i.imgur.com/eVDSFTr.png',
'embeds': [
embed
],
}
headers = {
'Content-Type': 'application/json'
}
result = requests.post(url, json=data, headers=headers)
if 200 <= result.status_code < 300:
print(f'Webhook sent {result.status_code}')
else:
print(f'Not sent with {result.status_code}, response:\n{result.json()}')
|
python
|
import argparse
import os
import numpy as np
import scipy.io as sio
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from tqdm import tqdm
from net.models import LeNet_5 as LeNet
import util
os.makedirs('saves', exist_ok=True)
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST pruning from deep compression paper')
parser.add_argument('--batch-size', type=int, default=100, metavar='N',
help='input batch size for training (default: 100)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=100, metavar='N',
help='number of epochs to train (default: 100)')
parser.add_argument('--lr', type=float, default=0.0001, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=12345678, metavar='S',
help='random seed (default: 42)')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--log', type=str, default='log.txt',
help='log file name')
parser.add_argument('--model', type=str, default='saves/initial_model',
help='path to model pretrained with sparsity-inducing regularizer')
parser.add_argument('--sensitivity', type=float, default=0.001,
help="pruning threshold set as the sensitivity value")
args = parser.parse_args()
# Control Seed
torch.manual_seed(args.seed)
# Select Device
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else 'cpu')
if use_cuda:
print("Using CUDA!")
torch.cuda.manual_seed(args.seed)
else:
print('Not using CUDA!!!')
# Loader
kwargs = {'num_workers': 5, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor()])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('data', train=False, transform=transforms.Compose([
transforms.ToTensor()])),
batch_size=args.test_batch_size, shuffle=False, **kwargs)
# Define which model to use
model = LeNet(mask=False).to(device)
# NOTE : `weight_decay` term denotes L2 regularization loss term
optimizer = optim.Adam(model.parameters(), lr=args.lr)
initial_optimizer_state_dict = optimizer.state_dict()
def train(epochs):
model.train()
pbar = tqdm(range(epochs), total=epochs)
for epoch in pbar:
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
total_loss = loss
total_loss.backward()
# zero-out all the gradients corresponding to the pruned connections
for name, p in model.named_parameters():
if 'mask' in name:
continue
tensor = p.data.cpu().numpy()
grad_tensor = p.grad.data.cpu().numpy()
grad_tensor = np.where(tensor==0, 0, grad_tensor)
p.grad.data = torch.from_numpy(grad_tensor).to(device)
optimizer.step()
if batch_idx % args.log_interval == 0:
done = batch_idx * len(data)
percentage = 100. * batch_idx / len(train_loader)
pbar.set_description(f'Train Epoch: {epoch} [{done:5}/{len(train_loader.dataset)} ({percentage:3.0f}%)] Loss: {loss.item():.6f} Total: {total_loss.item():.6f}')
def test():
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
accuracy = 100. * correct / len(test_loader.dataset)
print(f'Test set: Average loss: {test_loss:.4f}, Accuracy: {correct}/{len(test_loader.dataset)} ({accuracy:.2f}%)')
return accuracy
model.load_state_dict(torch.load(args.model+'.pth'))
# Initial training
print("--- Pruning ---")
for name, p in model.named_parameters():
if 'mask' in name:
continue
tensor = p.data.cpu().numpy()
new_mask = np.where(abs(tensor) < args.sensitivity, 0, tensor)
p.data = torch.from_numpy(new_mask).to(device)
accuracy = test()
util.print_nonzeros(model)
print("--- Finetuning ---")
train(args.epochs)
accuracy = test()
torch.save(model.state_dict(), args.model+'_T_'+str(args.sensitivity)+'.pth')
|
python
|
import random
import numpy as np
import tensorflow as tf
import tqdm
from pipeline import SEED
from pipeline.dataset.gc import get_client
random.seed(SEED)
TRAIN = tf.estimator.ModeKeys.TRAIN
EVAL = tf.estimator.ModeKeys.EVAL
PREDICT = tf.estimator.ModeKeys.PREDICT
class MNISTDataset:
"""MNIST dataset."""
TABLES = {TRAIN: "train", EVAL: "valid", PREDICT: "test"}
def get_data(self):
def _string_to_float(_raw_image: str):
arr = np.asarray(_raw_image.split(","), "float")
return arr.reshape([28, 28])
mode = self.mode
dataset_ref = self.client.dataset(self.dataset_id)
print(f"Mode: {mode} -> Load data from table")
rows = self.client.list_rows(dataset_ref.table(self.TABLES[self.mode]))
arrows = rows.to_arrow()
arrows_dict = arrows.to_pydict()
labels, images = arrows_dict["key"], arrows_dict["image"]
labels = np.array(labels)
images = np.array(
[_string_to_float(image) for image in tqdm.tqdm(images)], "float"
)
# Result from BigQuery are sorted by key.
data = list(zip(images, labels))
random.shuffle(data)
images, labels = zip(*data)
feature = np.array(images, "float")
label = np.array(labels, "int")
if self.mode == PREDICT:
return feature[:1000], label[:1000]
return feature, label
def __init__(self, mode: tf.estimator.ModeKeys, dataset_id: str):
self.mode = mode
self.dataset_id = dataset_id
self.client = get_client("bigquery")
self.data = self.get_data()
def data_generator(self):
def _gen():
for image, label in zip(*self.data):
yield image, label
return _gen
def get_input_fn(self, batch_size: int, shuffle=False):
def _preprocess(image, label):
image = image / 255.0
image = tf.reshape(image, [28, 28, 1])
return {"image": image}, label
def _get_input_fn():
output_types = (tf.float32, tf.int32)
output_shapes = [28, 28]
dataset = tf.data.Dataset.from_generator(
self.data_generator(), output_types, output_shapes
)
if self.mode == TRAIN:
dataset = dataset.repeat()
if shuffle:
dataset = dataset.shuffle(batch_size * 10)
dataset = dataset.map(_preprocess)
dataset = dataset.batch(batch_size).prefetch(8)
iterator = dataset.make_one_shot_iterator()
features, labels = iterator.get_next()
return features, labels
return _get_input_fn
|
python
|
"""\
Demo app for the ARDrone.
This simple application allows to control the drone and see the drone's video
stream.
Copyright (c) 2011 Bastian Venthur
The license and distribution terms for this file may be
found in the file LICENSE in this distribution.
"""
import pygame
from pydrone import libardrone
if __name__ == '__main__':
pygame.init()
W, H = 320, 240
screen = pygame.display.set_mode((W, H))
drone = libardrone.ARDrone()
clock = pygame.time.Clock()
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.KEYUP:
drone.hover()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
drone.reset()
running = False
# takeoff / land
elif event.key == pygame.K_RETURN:
drone.takeoff()
elif event.key == pygame.K_SPACE:
drone.land()
# emergency
elif event.key == pygame.K_BACKSPACE:
drone.reset()
# forward / backward
elif event.key == pygame.K_w:
drone.move_forward()
elif event.key == pygame.K_s:
drone.move_backward()
# left / right
elif event.key == pygame.K_a:
drone.move_left()
elif event.key == pygame.K_d:
drone.move_right()
# up / down
elif event.key == pygame.K_UP:
drone.move_up()
elif event.key == pygame.K_DOWN:
drone.move_down()
# turn left / turn right
elif event.key == pygame.K_LEFT:
drone.turn_left()
elif event.key == pygame.K_RIGHT:
drone.turn_right()
# speed
elif event.key == pygame.K_1:
drone.speed = 0.1
elif event.key == pygame.K_2:
drone.speed = 0.2
elif event.key == pygame.K_3:
drone.speed = 0.3
elif event.key == pygame.K_4:
drone.speed = 0.4
elif event.key == pygame.K_5:
drone.speed = 0.5
elif event.key == pygame.K_6:
drone.speed = 0.6
elif event.key == pygame.K_7:
drone.speed = 0.7
elif event.key == pygame.K_8:
drone.speed = 0.8
elif event.key == pygame.K_9:
drone.speed = 0.9
elif event.key == pygame.K_0:
drone.speed = 1.0
try:
surface = pygame.image.fromstring(drone.image, (W, H), 'RGB')
# battery status
hud_color = (10, 10, 255)
if drone.navdata.get('drone_state', dict()).get('emergency_mask', 1):
hud_color = (255, 0, 0)
bat = drone.navdata.get(0, dict()).get('battery', 0)
f = pygame.font.Font(None, 20)
hud = f.render('Battery: %i%%' % bat, True, hud_color)
screen.blit(surface, (0, 0))
screen.blit(hud, (10, 10))
except:
pass
pygame.display.flip()
clock.tick()
pygame.display.set_caption("FPS: %.2f" % clock.get_fps())
print "Shutting down...",
drone.halt()
print "Ok."
|
python
|
from Parameter import Parameter, registerParameterType
from ParameterTree import ParameterTree
from ParameterItem import ParameterItem
import parameterTypes as types
|
python
|
# encoding: utf-8
import uuid
import os
import random
import json
from collections import Counter
from flask import request, abort, jsonify, g, url_for, current_app, session
from flask_restful import Resource, reqparse
from flask_socketio import (
emit,
disconnect
)
from app.ansibles.ansible_task import INVENTORY
from app.ansibles.ansible_core import Runner
from app import redis, socketio, api
from tasks.task import long_task
from app import redis
class LoginView(Resource):
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('username', type=str, location=[
'json', 'args', 'headers'])
self.reqparse.add_argument('password', type=str, location=[
'json', 'args', 'headers'])
self.args = self.reqparse.parse_args()
super(LoginView, self).__init__()
def get(self):
print("clients")
return jsonify({'clients': "unlogin"})
def post(self):
print(self.args)
return jsonify({"user": "admin", "token": "dsdsdufsffjfjudss789h", "code": 200})
class UserInfoView(Resource):
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('token', type=str, location=[
'json', 'args', 'headers'])
self.args = self.reqparse.parse_args()
super(UserInfoView, self).__init__()
def get(self):
print(self.args)
# if token == user["token"]:
# name = "admin"
return jsonify({"token": "dsdsdufsffjfjudss789h", "code": 200, "name": "admin"})
def post(self):
print(self.args)
return jsonify({"token": "dsdsdufsffjfjudss789h", "code": 200})
class LogoutView(Resource):
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('method', type=str, location=[
'json', 'args', 'headers'])
self.args = self.reqparse.parse_args()
super(LogoutView, self).__init__()
def post(self):
print(self.args)
return jsonify({"method": "logout", "code": 200})
|
python
|
import unittest
from random import uniform
from pysim import epcstd
class TestDataTypes(unittest.TestCase):
def test_divide_ratio_encoding(self):
self.assertEqual(epcstd.DivideRatio.DR_8.code, "0")
self.assertEqual(epcstd.DivideRatio.DR_643.code, "1")
def test_divide_ratio_str(self):
self.assertEqual(str(epcstd.DivideRatio.DR_8), '8')
self.assertEqual(str(epcstd.DivideRatio.DR_643), '64/3')
def test_divide_ratio_eval(self):
self.assertAlmostEqual(epcstd.DivideRatio.DR_8.eval(), 8.0)
self.assertAlmostEqual(epcstd.DivideRatio.DR_643.eval(), 64.0/3)
def test_session_encoding(self):
self.assertEqual(epcstd.Session.S0.code, "00")
self.assertEqual(epcstd.Session.S1.code, "01")
self.assertEqual(epcstd.Session.S2.code, "10")
self.assertEqual(epcstd.Session.S3.code, "11")
def test_session_number(self):
self.assertEqual(epcstd.Session.S0.index, 0)
self.assertEqual(epcstd.Session.S1.index, 1)
self.assertEqual(epcstd.Session.S2.index, 2)
self.assertEqual(epcstd.Session.S3.index, 3)
def test_session_str(self):
self.assertEqual(str(epcstd.Session.S0).upper(), "S0")
self.assertEqual(str(epcstd.Session.S1).upper(), "S1")
self.assertEqual(str(epcstd.Session.S2).upper(), "S2")
self.assertEqual(str(epcstd.Session.S3).upper(), "S3")
def test_tag_encoding_encoding(self):
self.assertEqual(epcstd.TagEncoding.FM0.code, '00')
self.assertEqual(epcstd.TagEncoding.M2.code, '01')
self.assertEqual(epcstd.TagEncoding.M4.code, '10')
self.assertEqual(epcstd.TagEncoding.M8.code, '11')
def test_tag_encoding_symbols_per_bit(self):
self.assertEqual(epcstd.TagEncoding.FM0.symbols_per_bit, 1)
self.assertEqual(epcstd.TagEncoding.M2.symbols_per_bit, 2)
self.assertEqual(epcstd.TagEncoding.M4.symbols_per_bit, 4)
self.assertEqual(epcstd.TagEncoding.M8.symbols_per_bit, 8)
def test_tag_encoding_str(self):
self.assertEqual(str(epcstd.TagEncoding.FM0).upper(), "FM0")
self.assertEqual(str(epcstd.TagEncoding.M2).upper(), "M2")
self.assertEqual(str(epcstd.TagEncoding.M4).upper(), "M4")
self.assertEqual(str(epcstd.TagEncoding.M8).upper(), "M8")
def test_inventory_flag_encoding(self):
self.assertEqual(epcstd.InventoryFlag.A.code, '0')
self.assertEqual(epcstd.InventoryFlag.B.code, '1')
def test_inventory_flag_str(self):
self.assertEqual(str(epcstd.InventoryFlag.A).upper(), "A")
self.assertEqual(str(epcstd.InventoryFlag.B).upper(), "B")
def test_sel_flag_encoding(self):
self.assertIn(epcstd.SelFlag.ALL.code, ['00', '01'])
self.assertEqual(epcstd.SelFlag.NOT_SEL.code, '10')
self.assertEqual(epcstd.SelFlag.SEL.code, '11')
def test_sel_flag_str(self):
self.assertEqual(str(epcstd.SelFlag.ALL).lower(), "all")
self.assertEqual(str(epcstd.SelFlag.SEL).lower(), "sl")
self.assertEqual(str(epcstd.SelFlag.NOT_SEL).lower(), "~sl")
def test_memory_bank_encoding(self):
self.assertEqual(epcstd.MemoryBank.RESERVED.code, '00')
self.assertEqual(epcstd.MemoryBank.EPC.code, '01')
self.assertEqual(epcstd.MemoryBank.TID.code, '10')
self.assertEqual(epcstd.MemoryBank.USER.code, '11')
def test_command_code_encoding(self):
self.assertEqual(epcstd.CommandCode.QUERY.code, '1000')
self.assertEqual(epcstd.CommandCode.QUERY_REP.code, '00')
self.assertEqual(epcstd.CommandCode.ACK.code, '01')
self.assertEqual(epcstd.CommandCode.REQ_RN.code, '11000001')
self.assertEqual(epcstd.CommandCode.READ.code, '11000010')
def test_command_code_str(self):
self.assertEqual(str(epcstd.CommandCode.QUERY).lower(), "query")
self.assertIn(str(epcstd.CommandCode.QUERY_REP).lower(),
['query_rep', 'qrep', 'queryrep'])
self.assertEqual(str(epcstd.CommandCode.ACK).lower(), 'ack')
self.assertIn(str(epcstd.CommandCode.REQ_RN).lower(),
['req_rn', 'reqrn'])
self.assertEqual(str(epcstd.CommandCode.READ).lower(), 'read')
class TestEncodingFunctions(unittest.TestCase):
def test_encode_bool(self):
self.assertEqual(epcstd.encode_bool(True), '1')
self.assertEqual(epcstd.encode_bool(False), '0')
def test_encode_int(self):
self.assertEqual(epcstd.encode_int(0, 4), '0000')
self.assertEqual(epcstd.encode_int(0xF, 4), '1111')
self.assertEqual(epcstd.encode_byte(0xA5), '10100101')
self.assertEqual(epcstd.encode_word(0xAB3C), '1010101100111100')
def test_ebv(self):
self.assertEqual(epcstd.encode_ebv(0), '00000000')
self.assertEqual(epcstd.encode_ebv(1), '00000001')
self.assertEqual(epcstd.encode_ebv(127), '01111111')
self.assertEqual(epcstd.encode_ebv(128), '1000000100000000')
self.assertEqual(epcstd.encode_ebv(16383), '1111111101111111')
self.assertEqual(epcstd.encode_ebv(16384), '100000011000000000000000')
class TestCommands(unittest.TestCase):
def test_query_command_encoding(self):
cmd1 = epcstd.Query(dr=epcstd.DivideRatio.DR_8,
m=epcstd.TagEncoding.FM0, trext=False,
sel=epcstd.SelFlag.ALL,
session=epcstd.Session.S0,
target=epcstd.InventoryFlag.A, q=0,
crc=0x00)
self.assertEqual(cmd1.encode(), '1000000000000000000000')
self.assertEqual(cmd1.bitlen, 22)
cmd2 = epcstd.Query(dr=epcstd.DivideRatio.DR_643,
m=epcstd.TagEncoding.M8, trext=True,
sel=epcstd.SelFlag.SEL,
session=epcstd.Session.S3,
target=epcstd.InventoryFlag.B, q=6,
crc=0x0B)
self.assertEqual(cmd2.encode(), '1000111111111011001011')
def test_query_command_str(self):
cmd = epcstd.Query(dr=epcstd.DivideRatio.DR_8,
m=epcstd.TagEncoding.FM0, trext=False,
sel=epcstd.SelFlag.ALL,
session=epcstd.Session.S0,
target=epcstd.InventoryFlag.A, q=13,
crc=0x1F)
string = str(cmd)
self.assertIn(str(epcstd.CommandCode.QUERY), string)
self.assertIn(str(epcstd.DivideRatio.DR_8), string)
self.assertIn(str(epcstd.TagEncoding.FM0), string)
self.assertIn(str(epcstd.SelFlag.ALL), string)
self.assertIn(str(epcstd.Session.S0), string)
self.assertIn(str(epcstd.InventoryFlag.A), string)
self.assertIn("13", string)
self.assertIn("1F", string)
def test_query_command_using_modelParams(self):
#
# 1) Setting some initial values for Query fields in readerParams
# and making sure they are passed to Query as default values
#
epcstd.stdParams.divide_ratio = epcstd.DivideRatio.DR_8
epcstd.stdParams.tag_encoding = epcstd.TagEncoding.FM0
epcstd.stdParams.sel = epcstd.SelFlag.SEL
epcstd.stdParams.session = epcstd.Session.S0
epcstd.stdParams.target = epcstd.InventoryFlag.A
epcstd.stdParams.Q = 3
epcstd.stdParams.trext = False
query1 = epcstd.Query()
def assert_query_params(query):
self.assertEqual(query.dr, epcstd.stdParams.divide_ratio)
self.assertEqual(query.m, epcstd.stdParams.tag_encoding)
self.assertEqual(query.sel, epcstd.stdParams.sel)
self.assertEqual(query.session, epcstd.stdParams.session)
self.assertEqual(query.target, epcstd.stdParams.target)
self.assertEqual(query.q, epcstd.stdParams.Q)
self.assertEqual(query.trext, epcstd.stdParams.trext)
assert_query_params(query1)
#
# 2) Altering values in readerParams and making sure they are
# passed to Query
#
epcstd.stdParams.divide_ratio = epcstd.DivideRatio.DR_643
epcstd.stdParams.tag_encoding = epcstd.TagEncoding.M8
epcstd.stdParams.sel = epcstd.SelFlag.NOT_SEL
epcstd.stdParams.session = epcstd.Session.S3
epcstd.stdParams.target = epcstd.InventoryFlag.B
epcstd.stdParams.Q = 8
epcstd.stdParams.trext = True
query2 = epcstd.Query()
assert_query_params(query2)
def test_query_rep_command_encoding(self):
cmd1 = epcstd.QueryRep(session=epcstd.Session.S0)
self.assertEqual(cmd1.encode(), '0000')
self.assertEqual(cmd1.bitlen, 4)
cmd2 = epcstd.QueryRep(session=epcstd.Session.S3)
self.assertEqual(cmd2.encode(), '0011')
def test_query_rep_command_str(self):
cmd = epcstd.QueryRep(session=epcstd.Session.S1)
string = str(cmd)
self.assertIn(str(epcstd.CommandCode.QUERY_REP), string)
self.assertIn(str(epcstd.Session.S1), string)
def test_query_rep_using_modelparams(self):
def assert_fields_match_reader_params(query_rep):
self.assertEqual(query_rep.session, epcstd.stdParams.session)
# 1) Setting readerParams and checking they were passed to the
# command as the default params
epcstd.stdParams.session = epcstd.Session.S0
query_rep_1 = epcstd.QueryRep()
assert_fields_match_reader_params(query_rep_1)
# 2) Changing readerParams and checking the changed
# values were passed to new command as the default params
epcstd.stdParams.session = epcstd.Session.S3
query_rep_2 = epcstd.QueryRep()
assert_fields_match_reader_params(query_rep_2)
def test_ack_command_encoding(self):
cmd1 = epcstd.Ack(rn=0x0000)
self.assertEqual(cmd1.encode(), '010000000000000000')
self.assertEqual(cmd1.bitlen, 18)
cmd2 = epcstd.Ack(rn=0xFFFF)
self.assertEqual(cmd2.encode(), '011111111111111111')
def test_ack_command_str(self):
cmd = epcstd.Ack(rn=0xAB)
string = str(cmd)
self.assertIn(str(epcstd.CommandCode.ACK), string)
self.assertIn('0x00AB', string)
def test_req_rn_command_encoding(self):
cmd1 = epcstd.ReqRN(rn=0x0000, crc=0x0000)
cmd2 = epcstd.ReqRN(rn=0xAAAA, crc=0x5555)
self.assertEqual(cmd1.encode(),
'1100000100000000000000000000000000000000')
self.assertEqual(cmd1.bitlen, 40)
self.assertEqual(cmd2.encode(),
'1100000110101010101010100101010101010101')
def test_req_rn_command_str(self):
cmd1 = epcstd.ReqRN(rn=0x1234, crc=0xABCD)
cmd2 = epcstd.ReqRN(rn=0xAABB, crc=0xCCDD)
string1 = str(cmd1)
string2 = str(cmd2)
self.assertIn('0x1234', string1)
self.assertIn('0xABCD', string1)
self.assertIn('0xAABB', string2)
self.assertIn('0xCCDD', string2)
def test_read_command_encoding(self):
cmd1 = epcstd.Read(bank=epcstd.MemoryBank.RESERVED, word_ptr=0,
word_count=0, rn=0x0000, crc=0x0000)
cmd2 = epcstd.Read(bank=epcstd.MemoryBank.USER, word_ptr=0x80,
word_count=255, rn=0xAAAA, crc=0x5555)
self.assertEqual(cmd1.encode(), '11000010' + '0' * 50)
self.assertEqual(cmd1.bitlen, 58)
self.assertEqual(cmd2.encode(), '11000010' + '11' + '1000000100000000'
+ '1' * 8 + '1010' * 4 + '0101' * 4)
def test_read_using_modelParams(self):
def assert_fields_match_reader_params(cmd):
assert isinstance(cmd, epcstd.Read)
self.assertEqual(cmd.bank, epcstd.stdParams.read_default_bank)
self.assertEqual(cmd.word_ptr,
epcstd.stdParams.read_default_word_ptr)
self.assertEqual(cmd.word_count,
epcstd.stdParams.read_default_word_count)
# 1) Setting readerParams and checking they were passed to the
# command as the default params
epcstd.stdParams.read_default_bank = epcstd.MemoryBank.EPC
epcstd.stdParams.read_default_word_ptr = 0
epcstd.stdParams.read_default_word_count = 10
cmd1 = epcstd.Read()
assert_fields_match_reader_params(cmd1)
# 2) Changing readerParams and checking the changed
# values were passed to new command as the default params
epcstd.stdParams.read_default_bank = epcstd.MemoryBank.TID
epcstd.stdParams.read_default_word_ptr = 5
epcstd.stdParams.read_default_word_count = 23
cmd2 = epcstd.Read()
assert_fields_match_reader_params(cmd2)
def test_read_command_str(self):
cmd1 = epcstd.Read(bank=epcstd.MemoryBank.EPC, word_ptr=2,
word_count=5, rn=0xAABB, crc=0xCCDD)
cmd2 = epcstd.Read(bank=epcstd.MemoryBank.TID, word_ptr=3,
word_count=1, rn=0xABCD, crc=0xEFEF)
string1 = str(cmd1)
string2 = str(cmd2)
self.assertIn('EPC', string1.upper())
self.assertIn('0x02', string1)
self.assertIn('5', string1)
self.assertIn('0xAABB', string1)
self.assertIn('0xCCDD', string1)
self.assertIn('TID', string2.upper())
self.assertIn('0x03', string2)
self.assertIn('1', string2)
self.assertIn('0xABCD', string2)
self.assertIn('0xEFEF', string2)
class TestReplies(unittest.TestCase):
def test_to_bytes(self):
self.assertEqual(epcstd.to_bytes('1122'), [0x11, 0x22])
self.assertEqual(epcstd.to_bytes((0xAB,)), [0xAB])
with self.assertRaises(ValueError):
epcstd.to_bytes(0xAB)
def test_query_reply_bitlen(self):
msg = epcstd.QueryReply(rn=0x0000)
self.assertEqual(msg.bitlen, 16)
def test_query_reply_str(self):
msg1 = epcstd.QueryReply(rn=0xABCD)
msg2 = epcstd.QueryReply(rn=0x1122)
string1 = str(msg1)
string2 = str(msg2)
self.assertIn('ABCD', string1.upper())
self.assertNotIn('1122', string1)
self.assertIn('1122', string2)
self.assertNotIn('ABCD', string2.upper())
def test_ack_reply_bitlen(self):
msg1 = epcstd.AckReply(pc=0x0000, epc='0011223344556677', crc=0x0000)
msg2 = epcstd.AckReply(pc=0x0000, epc='001122334455', crc=0x0000)
msg3 = epcstd.AckReply(pc=0x0000, epc=[0x00, 0x11, 0x22], crc=0x0000)
self.assertEqual(msg1.bitlen, 96)
self.assertEqual(msg2.bitlen, 80)
self.assertEqual(msg3.bitlen, 56)
def test_ack_reply_str(self):
msg1 = epcstd.AckReply(pc=0xABCD, epc='0011223344556677', crc=0x1234)
msg2 = epcstd.AckReply(pc=0xDCBA, epc='001122334455', crc=0x4321)
s1 = str(msg1)
s2 = str(msg2)
self.assertIn('ABCD', s1.upper())
self.assertNotIn('DCBA', s1.upper())
self.assertIn('1234', s1)
self.assertNotIn('4321', s1)
self.assertIn('0011223344556677', s1)
self.assertIn('DCBA', s2.upper())
self.assertIn('4321', s2)
self.assertIn('001122334455', s2)
self.assertNotIn('6677', s2)
def test_req_rn_reply_bitlen(self):
msg = epcstd.ReqRnReply(rn=0x0000, crc=0x0000)
self.assertEqual(msg.bitlen, 32)
def test_req_rn_reply_str(self):
msg1 = epcstd.ReqRnReply(rn=0xABCD, crc=0x1234)
msg2 = epcstd.ReqRnReply(rn=0xDCBA, crc=0x4321)
s1 = str(msg1)
s2 = str(msg2)
self.assertIn('ABCD', s1.upper())
self.assertIn('1234', s1)
self.assertNotIn('DCBA', s1.upper())
self.assertNotIn('4321', s1)
self.assertIn('DCBA', s2.upper())
self.assertIn('4321', s2)
def test_read_reply_bitlen(self):
msg1 = epcstd.ReadReply(data='00112233', rn=0x0000, crc=0x0000)
msg2 = epcstd.ReadReply(data='001122334455', rn=0x0000, crc=0x0000)
msg3 = epcstd.ReadReply(data=[0x00, 0x11], rn=0x0000, crc=0x0000)
self.assertEqual(msg1.bitlen, 65)
self.assertEqual(msg2.bitlen, 81)
self.assertEqual(msg3.bitlen, 49)
def test_read_reply_str(self):
msg1 = epcstd.ReadReply(data='00112233', rn=0x1234, crc=0xABCD)
msg2 = epcstd.ReadReply(data='AABBCC', rn=0x4321, crc=0xDCBA)
s1 = str(msg1)
s2 = str(msg2)
self.assertIn('00112233', s1)
self.assertIn('1234', s1)
self.assertIn('ABCD', s1.upper())
self.assertNotIn('AABBCC', s1.upper())
self.assertNotIn('4321', s1)
self.assertNotIn('DCBA', s1)
self.assertIn('AABBCC', s2.upper())
self.assertIn('4321', s2)
self.assertIn('DCBA', s2.upper())
class TestReaderPreambles(unittest.TestCase):
def test_reader_preamble_durations(self):
p = epcstd.ReaderPreamble(tari=6.25e-6, rtcal=18.75e-6, trcal=56.25e-6)
self.assertAlmostEqual(p.data0, p.tari, 9)
self.assertAlmostEqual(p.delim, 12.5e-6, 9)
self.assertAlmostEqual(p.data0, 6.25e-6, 9)
self.assertAlmostEqual(p.data1, 12.5e-6, 9)
self.assertAlmostEqual(p.rtcal, 18.75e-6, 9)
self.assertAlmostEqual(p.trcal, 56.25e-6, 9)
self.assertAlmostEqual(p.duration, 93.75e-6, 9)
def test_reader_preamble_str(self):
p = epcstd.ReaderPreamble(tari=12.5e-6, rtcal=33.45e-6, trcal=60.15e-6,
delim=13.0e-6)
s = str(p)
self.assertIn("12.5", s)
self.assertIn("33.45", s)
self.assertIn("60.15", s)
self.assertIn("13.0", s)
def test_reader_sync_durations(self):
sync = epcstd.ReaderSync(tari=12.5e-6, rtcal=31.25e-6, delim=13.0e-6)
self.assertAlmostEqual(sync.tari, sync.data0, 9)
self.assertAlmostEqual(sync.data0, 12.5e-6, 9)
self.assertAlmostEqual(sync.data1, 18.75e-6, 9)
self.assertAlmostEqual(sync.rtcal, 31.25e-6, 9)
self.assertAlmostEqual(sync.delim, 13.0e-6)
self.assertAlmostEqual(sync.duration, 56.75e-6, 9)
def test_reader_sync_str(self):
sync = epcstd.ReaderSync(tari=25e-6, rtcal=75e-6, delim=12.0e-6)
s = str(sync)
self.assertIn("12.0", s)
self.assertIn("25.0", s)
self.assertIn("75.0", s)
class TestTagPreambles(unittest.TestCase):
def test_tag_FM0_preamble_bitlen_and_duration(self):
short_preamble = epcstd.FM0Preamble(extended=False)
long_preamble = epcstd.FM0Preamble(extended=True)
self.assertEqual(short_preamble.bitlen, 6)
self.assertEqual(long_preamble.bitlen, 18)
self.assertAlmostEqual(short_preamble.get_duration(blf=320e3),
1.875e-5)
self.assertAlmostEqual(long_preamble.get_duration(blf=320e3),
5.625e-5)
self.assertAlmostEqual(short_preamble.get_duration(blf=40e3), 15e-5)
self.assertAlmostEqual(long_preamble.get_duration(blf=40e3), 45e-5)
def test_tag_miller_preamble_bitlen_and_duration(self):
m2_short = epcstd.MillerPreamble(m=2, extended=False)
m2_long = epcstd.MillerPreamble(m=2, extended=True)
m4_short = epcstd.MillerPreamble(m=4)
m8_long = epcstd.MillerPreamble(m=8, extended=True)
self.assertEqual(m2_short.bitlen, 10)
self.assertEqual(m2_long.bitlen, 22)
self.assertEqual(m4_short.bitlen, 10)
self.assertEqual(m8_long.bitlen, 22)
self.assertAlmostEqual(m2_short.get_duration(blf=320e3), 6.25e-5)
self.assertAlmostEqual(m2_long.get_duration(blf=320e3), 13.75e-5)
self.assertAlmostEqual(m4_short.get_duration(blf=320e3), 12.5e-5)
self.assertAlmostEqual(m8_long.get_duration(blf=320e3), 55e-5)
self.assertAlmostEqual(m2_short.get_duration(blf=64e3), 31.25e-5)
def test_tag_preamble_factory(self):
fm0_preamble = epcstd.create_tag_preamble(epcstd.TagEncoding.FM0)
fm0_extended_preamble = epcstd.create_tag_preamble(
epcstd.TagEncoding.FM0, True)
m2_preamble = epcstd.create_tag_preamble(epcstd.TagEncoding.M2)
m4_preamble = epcstd.create_tag_preamble(epcstd.TagEncoding.M2)
m8_preamble = epcstd.create_tag_preamble(epcstd.TagEncoding.M2)
self.assertIsInstance(fm0_preamble, epcstd.FM0Preamble)
self.assertIsInstance(fm0_extended_preamble, epcstd.FM0Preamble)
self.assertIsInstance(m2_preamble, epcstd.MillerPreamble)
self.assertIsInstance(m4_preamble, epcstd.MillerPreamble)
self.assertIsInstance(m8_preamble, epcstd.MillerPreamble)
self.assertEqual(fm0_preamble.bitlen, 6)
self.assertEqual(fm0_extended_preamble.bitlen, 18)
def test_tag_preamble_has_str(self):
s1 = str(epcstd.FM0Preamble(True))
s2 = str(epcstd.MillerPreamble(2, True))
self.assertNotIn("0x", s1)
self.assertNotIn("0x", s2)
def test_tag_preamble_bitlen(self):
epcstd.stdParams.trext = False
fm0_normal = epcstd.create_tag_preamble(epcstd.TagEncoding.FM0, False)
fm0_extended = epcstd.create_tag_preamble(epcstd.TagEncoding.FM0, True)
m2_normal = epcstd.create_tag_preamble(epcstd.TagEncoding.M2, False)
m2_extended = epcstd.create_tag_preamble(epcstd.TagEncoding.M2, True)
self.assertEqual(fm0_normal.bitlen, epcstd.tag_preamble_bitlen(
epcstd.TagEncoding.FM0))
self.assertEqual(fm0_extended.bitlen, epcstd.tag_preamble_bitlen(
epcstd.TagEncoding.FM0, True))
self.assertEqual(m2_normal.bitlen, epcstd.tag_preamble_bitlen(
epcstd.TagEncoding.M2))
self.assertEqual(m2_extended.bitlen, epcstd.tag_preamble_bitlen(
epcstd.TagEncoding.M2, True))
def test_tag_preamble_duration(self):
fm0_normal = epcstd.create_tag_preamble(epcstd.TagEncoding.FM0, False)
fm0_extended = epcstd.create_tag_preamble(epcstd.TagEncoding.FM0, True)
m2_normal = epcstd.create_tag_preamble(epcstd.TagEncoding.M2, False)
m2_extended = epcstd.create_tag_preamble(epcstd.TagEncoding.M2, True)
blf_slow = epcstd.get_blf(epcstd.DivideRatio.DR_8, 25.0e-6*9)
blf_fast = epcstd.get_blf(epcstd.DivideRatio.DR_643, 6.25e-6 * 9)
self.assertEqual(
fm0_normal.get_duration(blf_slow), epcstd.tag_preamble_duration(
blf_slow, epcstd.TagEncoding.FM0, False))
self.assertEqual(
fm0_normal.get_duration(blf_fast), epcstd.tag_preamble_duration(
blf_fast, epcstd.TagEncoding.FM0, False))
self.assertEqual(
fm0_extended.get_duration(blf_slow), epcstd.tag_preamble_duration(
blf_slow, epcstd.TagEncoding.FM0, True))
self.assertEqual(
fm0_extended.get_duration(blf_fast), epcstd.tag_preamble_duration(
blf_fast, epcstd.TagEncoding.FM0, True))
self.assertEqual(
m2_normal.get_duration(blf_slow), epcstd.tag_preamble_duration(
blf_slow, epcstd.TagEncoding.M2, False))
self.assertEqual(
m2_normal.get_duration(blf_fast), epcstd.tag_preamble_duration(
blf_fast, epcstd.TagEncoding.M2, False))
self.assertEqual(
m2_extended.get_duration(blf_slow), epcstd.tag_preamble_duration(
blf_slow, epcstd.TagEncoding.M2, True))
self.assertEqual(
m2_extended.get_duration(blf_fast), epcstd.tag_preamble_duration(
blf_fast, epcstd.TagEncoding.M2, True))
class TestReaderFrames(unittest.TestCase):
def setUp(self):
# The following query will be encoded as 1000011011010001101010
# number of 1s: 10, number of 0s: 12
self.query = epcstd.Query(
dr=epcstd.DivideRatio.DR_8, m=epcstd.TagEncoding.M8, trext=False,
sel=epcstd.SelFlag.SEL, session=epcstd.Session.S1,
target=epcstd.InventoryFlag.A, q=3, crc=0xAA)
# The following QueryRep will be encoded as 0011
self.query_rep = epcstd.QueryRep(session=epcstd.Session.S3)
# Now we define a fast preamble, a slow preamble and a SYNC:
self.fast_preamble = epcstd.ReaderPreamble(
tari=6.25e-6, rtcal=18.75e-6, trcal=56.25e-6)
self.slow_preamble = epcstd.ReaderPreamble(
tari=25e-6, rtcal=75e-6, trcal=225e-6)
self.fast_sync = epcstd.ReaderSync(tari=12.5e-6, rtcal=31.25e-6)
self.slow_sync = epcstd.ReaderSync(tari=25e-6, rtcal=62.5e-6)
def test_query_frame_fast_preamble_duration(self):
f = epcstd.ReaderFrame(preamble=self.fast_preamble, command=self.query)
self.assertAlmostEqual(f.duration, 293.75e-6, 9)
self.assertAlmostEqual(f.body_duration, 200e-6, 9)
def test_query_frame_slow_preamble_duration(self):
f = epcstd.ReaderFrame(preamble=self.slow_preamble, command=self.query)
self.assertAlmostEqual(f.duration, 1137.5e-6, 9)
self.assertAlmostEqual(f.body_duration, 800.0e-6, 9)
def test_query_rep_frame_fast_sync_duration(self):
f = epcstd.ReaderFrame(preamble=self.fast_sync, command=self.query_rep)
self.assertAlmostEqual(f.body_duration, 62.5e-6, 9)
self.assertAlmostEqual(f.duration, 118.75e-6, 9)
def test_query_rep_frame_slow_sync_duration(self):
f = epcstd.ReaderFrame(preamble=self.slow_sync, command=self.query_rep)
self.assertAlmostEqual(f.body_duration, 125e-6, 9)
self.assertAlmostEqual(f.duration, 225e-6, 9)
class TestTagFrames(unittest.TestCase):
def setUp(self):
self.ack_reply = epcstd.AckReply(epc="ABCDEF01", pc=0, crc=0)
self.rn16_reply = epcstd.QueryReply(rn=0)
self.slow_blf = 120e3
self.fast_blf = 640e3
def test_tag_fm0_frame_duration(self):
pn = epcstd.create_tag_preamble(epcstd.TagEncoding.FM0, extended=False)
pe = epcstd.create_tag_preamble(epcstd.TagEncoding.FM0, extended=True)
ne_ack_frame = epcstd.TagFrame(preamble=pn, reply=self.ack_reply)
ex_ack_frame = epcstd.TagFrame(preamble=pe, reply=self.ack_reply)
ex_rn16_frame = epcstd.TagFrame(preamble=pe, reply=self.rn16_reply)
self.assertAlmostEqual(ne_ack_frame.get_body_duration(self.slow_blf),
0.00053333333, 8)
self.assertAlmostEqual(ne_ack_frame.get_duration(self.slow_blf),
0.00059166667, 8)
self.assertAlmostEqual(ex_ack_frame.get_body_duration(self.slow_blf),
0.00053333333, 8)
self.assertAlmostEqual(ex_ack_frame.get_duration(self.slow_blf),
0.00069166667, 8)
self.assertAlmostEqual(ex_rn16_frame.get_body_duration(self.slow_blf),
0.00013333333, 8)
self.assertAlmostEqual(ex_rn16_frame.get_duration(self.slow_blf),
0.00029166667, 8)
self.assertAlmostEqual(ex_rn16_frame.get_body_duration(self.fast_blf),
2.5e-05, 8)
self.assertAlmostEqual(ex_rn16_frame.get_duration(self.fast_blf),
5.46875e-05)
def test_tag_m2_frame_duration(self):
preamble = epcstd.create_tag_preamble(epcstd.TagEncoding.M2, False)
ext_preamble = epcstd.create_tag_preamble(epcstd.TagEncoding.M2, True)
frame = epcstd.TagFrame(preamble, self.rn16_reply)
ext_frame = epcstd.TagFrame(ext_preamble, self.rn16_reply)
self.assertAlmostEqual(frame.get_body_duration(self.slow_blf),
0.0002666666666666667, 8)
self.assertAlmostEqual(frame.get_duration(self.slow_blf),
0.00045, 8)
self.assertAlmostEqual(frame.get_body_duration(self.fast_blf),
5e-05, 8)
self.assertAlmostEqual(frame.get_duration(self.fast_blf),
8.4375e-05, 8)
self.assertAlmostEqual(ext_frame.get_body_duration(self.slow_blf),
frame.get_body_duration(self.slow_blf), 8)
self.assertAlmostEqual(ext_frame.get_duration(self.slow_blf),
0.00065, 8)
class TestReaderFrameAccessors(unittest.TestCase):
def setUp(self):
self.slow_tari = 12.5e-6
self.slow_rtcal = 37.5e-6
self.slow_trcal = 112.5e-6
self.fast_tari = 6.25e-6
self.fast_rtcal = 15.625e-6
self.fast_trcal = 46.875e-6
self.slow_sync = epcstd.ReaderSync(self.slow_tari, self.slow_rtcal)
self.fast_sync = epcstd.ReaderSync(self.fast_tari, self.fast_rtcal)
self.slow_preamble = epcstd.ReaderPreamble(
self.slow_tari, self.slow_rtcal, self.slow_trcal)
self.fast_preamble = epcstd.ReaderPreamble(
self.fast_tari, self.fast_rtcal, self.fast_trcal)
self.ack = epcstd.Ack(0xAAAA)
self.query_rep = epcstd.QueryRep(epcstd.Session.S1)
self.query = epcstd.Query()
self.slow_ack_frame = epcstd.ReaderFrame(self.slow_sync, self.ack)
self.fast_ack_frame = epcstd.ReaderFrame(self.fast_sync, self.ack)
self.slow_query_rep_frame = epcstd.ReaderFrame(
self.slow_sync, self.query_rep)
self.fast_query_rep_frame = epcstd.ReaderFrame(
self.fast_sync, self.query_rep)
self.slow_query_frame = epcstd.ReaderFrame(
self.slow_preamble, self.query)
self.fast_query_frame = epcstd.ReaderFrame(
self.fast_preamble, self.query)
def test_reader_frame_duration_return_equals_sync_frame_getter(self):
self.assertAlmostEqual(
epcstd.reader_frame_duration(
tari=self.slow_tari, rtcal=self.slow_rtcal, command=self.ack),
self.slow_ack_frame.duration, 8)
self.assertAlmostEqual(
epcstd.reader_frame_duration(
tari=self.fast_tari, rtcal=self.fast_rtcal, command=self.ack),
self.fast_ack_frame.duration, 8)
self.assertAlmostEqual(
epcstd.reader_frame_duration(
tari=self.slow_tari, rtcal=self.slow_rtcal,
command=self.query_rep),
self.slow_query_rep_frame.duration, 8)
self.assertAlmostEqual(
epcstd.reader_frame_duration(
tari=self.fast_tari, rtcal=self.fast_rtcal,
command=self.query_rep),
self.fast_query_rep_frame.duration, 8)
def test_reader_frame_duration_return_equals_query_frame_getter(self):
self.assertAlmostEqual(
epcstd.reader_frame_duration(
tari=self.slow_tari, rtcal=self.slow_rtcal,
trcal=self.slow_trcal, command=self.query),
self.slow_query_frame.duration, 8)
self.assertAlmostEqual(
epcstd.reader_frame_duration(
tari=self.fast_tari, rtcal=self.fast_rtcal,
trcal=self.fast_trcal, command=self.query),
self.fast_query_frame.duration, 8)
def test_reader_frame_duration_recognizes_encoded_sync_commands(self):
encoded_ack = self.ack.encode()
encoded_query_rep = self.query_rep.encode()
self.assertAlmostEqual(
epcstd.reader_frame_duration(
tari=self.slow_tari, rtcal=self.slow_rtcal,
command=encoded_ack),
self.slow_ack_frame.duration, 8)
self.assertAlmostEqual(
epcstd.reader_frame_duration(
tari=self.fast_tari, rtcal=self.fast_rtcal,
command=encoded_ack),
self.fast_ack_frame.duration, 8)
self.assertAlmostEqual(
epcstd.reader_frame_duration(
tari=self.slow_tari, rtcal=self.slow_rtcal,
command=encoded_query_rep),
self.slow_query_rep_frame.duration, 8)
self.assertAlmostEqual(
epcstd.reader_frame_duration(
tari=self.fast_tari, rtcal=self.fast_rtcal,
command=encoded_query_rep),
self.fast_query_rep_frame.duration, 8)
def test_reader_frame_duration_recognizes_encoded_query_command(self):
encoded_query = self.query.encode()
self.assertAlmostEqual(
epcstd.reader_frame_duration(
tari=self.slow_tari, rtcal=self.slow_rtcal,
trcal=self.slow_trcal, command=encoded_query),
self.slow_query_frame.duration, 8)
self.assertAlmostEqual(
epcstd.reader_frame_duration(
tari=self.fast_tari, rtcal=self.fast_rtcal,
trcal=self.fast_trcal, command=encoded_query),
self.fast_query_frame.duration, 8)
def test_reader_frame_duration_uses_default_modelParams(self):
#
# 1) Setting readerParams to slow frame type
#
epcstd.stdParams.tari = self.slow_tari
epcstd.stdParams.rtcal = self.slow_rtcal
epcstd.stdParams.trcal = self.slow_trcal
self.assertAlmostEqual(
epcstd.reader_frame_duration(self.ack),
self.slow_ack_frame.duration, 8)
self.assertAlmostEqual(
epcstd.reader_frame_duration(self.query_rep),
self.slow_query_rep_frame.duration, 8)
self.assertAlmostEqual(
epcstd.reader_frame_duration(self.query),
self.slow_query_frame.duration, 8)
#
# 1) Setting readerParams to fast frame type
#
epcstd.stdParams.tari = self.fast_tari
epcstd.stdParams.rtcal = self.fast_rtcal
epcstd.stdParams.trcal = self.fast_trcal
self.assertAlmostEqual(
epcstd.reader_frame_duration(self.ack),
self.fast_ack_frame.duration, 8)
self.assertAlmostEqual(
epcstd.reader_frame_duration(self.query_rep),
self.fast_query_rep_frame.duration, 8)
self.assertAlmostEqual(
epcstd.reader_frame_duration(self.query),
self.fast_query_frame.duration, 8)
class TestCommandsDurationEstimations(unittest.TestCase):
"""
Test-cases for functions ``command_duration``, ``query_duration``,
``query_rep_duration``, etc.
"""
def setUp(self):
self.slow = dict(
tari=25.0e-6, rtcal=75.0e-6, trcal=225.0e-6, delim=12.5e-6,
dr=epcstd.DivideRatio.DR_8, m=epcstd.TagEncoding.M8, trext=True,
sel=epcstd.SelFlag.SEL, session=epcstd.Session.S3,
target=epcstd.InventoryFlag.B, q=15, rn=0xFFFF,
bank=epcstd.MemoryBank.TID, word_ptr=0xF, word_cnt=15,
crc5=0x1F, crc16=0xFFFF)
self.fast = dict(
tari=6.25e-6, rtcal=15.625e-6, trcal=17.1875e-6, delim=12.5e-6,
dr=epcstd.DivideRatio.DR_643, m=epcstd.TagEncoding.FM0, trext=False,
sel=epcstd.SelFlag.ALL, session=epcstd.Session.S0,
target=epcstd.InventoryFlag.A, q=0, rn=0x0000,
bank=epcstd.MemoryBank.EPC, word_ptr=0x0, word_cnt=1,
crc5=0x00, crc16=0x0000)
self.slow['preamble'] = epcstd.ReaderPreamble(
self.slow['tari'], self.slow['rtcal'], self.slow['trcal'],
self.slow['delim'])
self.fast['preamble'] = epcstd.ReaderPreamble(
self.fast['tari'], self.fast['rtcal'], self.fast['trcal'],
self.fast['delim'])
self.slow['sync'] = epcstd.ReaderSync(
self.slow['tari'], self.slow['rtcal'], self.slow['delim'])
self.fast['sync'] = epcstd.ReaderSync(
self.fast['tari'], self.fast['rtcal'], self.fast['delim'])
@staticmethod
def get_command_duration(command_code, params):
return epcstd.command_duration(
command_code=command_code, tari=params['tari'],
rtcal=params['rtcal'], trcal=params['trcal'], delim=params['delim'],
dr=params['dr'], m=params['m'], trext=params['trext'],
sel=params['sel'], session=params['session'],
target=params['target'], q=params['q'], rn=params['rn'],
bank=params['bank'], word_ptr=params['word_ptr'],
word_count=params['word_cnt'], crc5=params['crc5'],
crc16=params['crc16'])
@staticmethod
def set_default_parameters(par):
epcstd.stdParams.tari = par['tari']
epcstd.stdParams.rtcal = par['rtcal']
epcstd.stdParams.trcal = par['trcal']
epcstd.stdParams.delim = par['delim']
epcstd.stdParams.divide_ratio = par['dr']
epcstd.stdParams.tag_encoding = par['m']
epcstd.stdParams.trext = par['trext']
epcstd.stdParams.sel = par['sel']
epcstd.stdParams.session = par['session']
epcstd.stdParams.target = par['target']
epcstd.stdParams.Q = par['q']
epcstd.stdParams.default_rn = par['rn']
epcstd.stdParams.read_default_bank = par['bank']
epcstd.stdParams.read_default_word_ptr = par['word_ptr']
epcstd.stdParams.read_default_word_count = par['word_cnt']
epcstd.stdParams.default_crc5 = par['crc5']
epcstd.stdParams.default_crc16 = par['crc16']
def test_query_duration(self):
slow_query = epcstd.Query(
self.slow['dr'], self.slow['m'], self.slow['trext'],
self.slow['sel'], self.slow['session'], self.slow['target'],
self.slow['q'], self.slow['crc5'])
fast_query = epcstd.Query(
self.fast['dr'], self.fast['m'], self.fast['trext'],
self.fast['sel'], self.fast['session'], self.fast['target'],
self.fast['q'], self.fast['crc5'])
slow_frame = epcstd.ReaderFrame(self.slow['preamble'], slow_query)
fast_frame = epcstd.ReaderFrame(self.fast['preamble'], fast_query)
self.assertAlmostEqual(
slow_frame.duration,
self.get_command_duration(epcstd.CommandCode.QUERY, self.slow),
8, "command_duration(QUERY, slow) doesn't match frame")
self.assertAlmostEqual(
slow_frame.duration,
epcstd.query_duration(
tari=self.slow['tari'], rtcal=self.slow['rtcal'],
trcal=self.slow['trcal'], delim=self.slow['delim'],
dr=self.slow['dr'], m=self.slow['m'], trext=self.slow['trext'],
sel=self.slow['sel'], session=self.slow['session'],
target=self.slow['target'], q=self.slow['q'],
crc=self.slow['crc5']),
8, "query_duration(slow params) doesn't match frame")
self.assertAlmostEqual(
fast_frame.duration,
self.get_command_duration(epcstd.CommandCode.QUERY, self.fast),
8, "command_duration(QUERY, fast) doesn't match frame")
self.assertAlmostEqual(
fast_frame.duration,
epcstd.query_duration(
tari=self.fast['tari'], rtcal=self.fast['rtcal'],
trcal=self.fast['trcal'], delim=self.fast['delim'],
dr=self.fast['dr'], m=self.fast['m'], trext=self.fast['trext'],
sel=self.fast['sel'], session=self.fast['session'],
target=self.fast['target'], q=self.fast['q'],
crc=self.fast['crc5']),
8, "query_duration(fast params) doesn't match frame")
def test_query_duration_with_default_parameters(self):
slow_query = epcstd.Query(
self.slow['dr'], self.slow['m'], self.slow['trext'],
self.slow['sel'], self.slow['session'], self.slow['target'],
self.slow['q'], self.slow['crc5'])
fast_query = epcstd.Query(
self.fast['dr'], self.fast['m'], self.fast['trext'],
self.fast['sel'], self.fast['session'], self.fast['target'],
self.fast['q'], self.fast['crc5'])
slow_frame = epcstd.ReaderFrame(self.slow['preamble'], slow_query)
fast_frame = epcstd.ReaderFrame(self.fast['preamble'], fast_query)
self.set_default_parameters(self.slow)
self.assertAlmostEqual(
slow_frame.duration,
epcstd.command_duration(epcstd.CommandCode.QUERY), 8,
"command_duration(QUERY, default=slow) doesn't match frame")
self.assertAlmostEqual(
slow_frame.duration,
epcstd.query_duration(), 8,
"query_duration(default=slow) doesnt' match frame")
self.set_default_parameters(self.fast)
self.assertAlmostEqual(
fast_frame.duration,
epcstd.command_duration(epcstd.CommandCode.QUERY), 8,
"command_duration(QUERY, default=fast) doesn't match frame")
self.assertAlmostEqual(
fast_frame.duration,
epcstd.query_duration(), 8,
"query_duration(default=fast) doesn't match frame")
def test_query_rep_duration(self):
slow_qrep = epcstd.QueryRep(self.slow['session'])
fast_qrep = epcstd.QueryRep(self.fast['session'])
slow_frame = epcstd.ReaderFrame(self.slow['sync'], slow_qrep)
fast_frame = epcstd.ReaderFrame(self.fast['sync'], fast_qrep)
self.assertAlmostEqual(
slow_frame.duration,
self.get_command_duration(epcstd.CommandCode.QUERY_REP, self.slow),
8, "command_duration(QUERY_REP, slow) doesn't match frame")
self.assertAlmostEqual(
slow_frame.duration,
epcstd.query_rep_duration(
tari=self.slow['tari'], rtcal=self.slow['rtcal'],
trcal=self.slow['trcal'], delim=self.slow['delim'],
session=self.slow['session']),
8, "query_rep_duration(slow) doesn't match frame")
self.assertAlmostEqual(
fast_frame.duration,
self.get_command_duration(epcstd.CommandCode.QUERY_REP, self.fast),
8, "command_duration(QUERY_REP, fast) doesn't match frame")
self.assertAlmostEqual(
fast_frame.duration,
epcstd.query_rep_duration(
tari=self.fast['tari'], rtcal=self.fast['rtcal'],
trcal=self.fast['trcal'], delim=self.fast['delim'],
session=self.fast['session']),
8, "query_rep_duration(fast) doesn't match frame")
def test_query_rep_duration_with_default_parameters(self):
slow_qrep = epcstd.QueryRep(self.slow['session'])
fast_qrep = epcstd.QueryRep(self.fast['session'])
slow_frame = epcstd.ReaderFrame(self.slow['sync'], slow_qrep)
fast_frame = epcstd.ReaderFrame(self.fast['sync'], fast_qrep)
self.set_default_parameters(self.slow)
self.assertAlmostEqual(
slow_frame.duration,
epcstd.command_duration(epcstd.CommandCode.QUERY_REP), 8,
"command_duration(QUERY_REP, default=slow) doesn't match frame")
self.assertAlmostEqual(
slow_frame.duration, epcstd.query_rep_duration(), 8,
"query_rep_duration(default=slow) doesn't match frame")
self.set_default_parameters(self.fast)
self.assertAlmostEqual(
fast_frame.duration,
epcstd.command_duration(epcstd.CommandCode.QUERY_REP), 8,
"command_duration(QUERY_REP, default=fast) doesn't match frame")
self.assertAlmostEqual(
fast_frame.duration, epcstd.query_rep_duration(), 8,
"query_rep_duration(default=fast) doesn't match frame")
def test_ack_duration(self):
slow_ack = epcstd.Ack(self.slow['rn'])
fast_ack = epcstd.Ack(self.fast['rn'])
slow_frame = epcstd.ReaderFrame(self.slow['sync'], slow_ack)
fast_frame = epcstd.ReaderFrame(self.fast['sync'], fast_ack)
self.assertAlmostEqual(
slow_frame.duration,
self.get_command_duration(epcstd.CommandCode.ACK, self.slow),
8, "command_duration(ACK, slow) doesn't match frame")
self.assertAlmostEqual(
slow_frame.duration,
epcstd.ack_duration(
tari=self.slow['tari'], rtcal=self.slow['rtcal'],
trcal=self.slow['trcal'], delim=self.slow['delim'],
rn=self.slow['rn']),
8, "ack_duration(slow) doesn't match frame")
self.assertAlmostEqual(
fast_frame.duration,
self.get_command_duration(epcstd.CommandCode.ACK, self.fast),
8, "command_duration(ACK, fast) doesn't match frame")
self.assertAlmostEqual(
fast_frame.duration,
epcstd.ack_duration(
tari=self.fast['tari'], rtcal=self.fast['rtcal'],
trcal=self.fast['trcal'], delim=self.fast['delim'],
rn=self.fast['rn']),
8, "ack_duration(fast) doesn't match frame")
def test_ack_duration_with_default_parameters(self):
slow_ack = epcstd.Ack(self.slow['rn'])
fast_ack = epcstd.Ack(self.fast['rn'])
slow_frame = epcstd.ReaderFrame(self.slow['sync'], slow_ack)
fast_frame = epcstd.ReaderFrame(self.fast['sync'], fast_ack)
self.set_default_parameters(self.slow)
self.assertAlmostEqual(
slow_frame.duration,
epcstd.command_duration(epcstd.CommandCode.ACK), 8,
"command_duration(ACK, default=slow) doesn't match frame")
self.assertAlmostEqual(
slow_frame.duration, epcstd.ack_duration(), 8,
"ack_duration(default=slow) doesn't match frame")
self.set_default_parameters(self.fast)
self.assertAlmostEqual(
fast_frame.duration,
epcstd.command_duration(epcstd.CommandCode.ACK), 8,
"command_duration(ACK, default=fast) doesn't match frame")
self.assertAlmostEqual(
fast_frame.duration, epcstd.ack_duration(), 8,
"ack_duration(default=fast) doesn't match frame")
def test_reqrn_duration(self):
slow_reqrn = epcstd.ReqRN(self.slow['rn'], self.slow['crc16'])
fast_reqrn = epcstd.ReqRN(self.fast['rn'], self.fast['crc16'])
slow_frame = epcstd.ReaderFrame(self.slow['sync'], slow_reqrn)
fast_frame = epcstd.ReaderFrame(self.fast['sync'], fast_reqrn)
self.assertAlmostEqual(
slow_frame.duration,
self.get_command_duration(epcstd.CommandCode.REQ_RN, self.slow),
8, "command_duration(REQ_RN, slow) doesn't match frame")
self.assertAlmostEqual(
slow_frame.duration,
epcstd.reqrn_duration(
tari=self.slow['tari'], rtcal=self.slow['rtcal'],
trcal=self.slow['trcal'], delim=self.slow['delim'],
rn=self.slow['rn'], crc=self.slow['crc16']),
8, "reqrn_duration(slow) doesn't match frame")
self.assertAlmostEqual(
fast_frame.duration,
self.get_command_duration(epcstd.CommandCode.REQ_RN, self.fast),
8, "command_duration(REQ_RN, fast) doesn't match frame")
self.assertAlmostEqual(
fast_frame.duration,
epcstd.reqrn_duration(
tari=self.fast['tari'], rtcal=self.fast['rtcal'],
trcal=self.fast['trcal'], delim=self.fast['delim'],
rn=self.fast['rn'], crc=self.fast['crc16']),
8, "reqrn_duration(fast) doesn't match frame")
def test_reqrn_duration_with_default_parameters(self):
slow_reqrn = epcstd.ReqRN(self.slow['rn'], self.slow['crc16'])
fast_reqrn = epcstd.ReqRN(self.fast['rn'], self.fast['crc16'])
slow_frame = epcstd.ReaderFrame(self.slow['sync'], slow_reqrn)
fast_frame = epcstd.ReaderFrame(self.fast['sync'], fast_reqrn)
self.set_default_parameters(self.slow)
self.assertAlmostEqual(
slow_frame.duration,
epcstd.command_duration(epcstd.CommandCode.REQ_RN), 8,
"command_duration(REQ_RN, default=slow) doesn't match frame")
self.assertAlmostEqual(
slow_frame.duration, epcstd.reqrn_duration(), 8,
"reqrn_duration(default=slow) doesn't match frame")
self.set_default_parameters(self.fast)
self.assertAlmostEqual(
fast_frame.duration,
epcstd.command_duration(epcstd.CommandCode.REQ_RN), 8,
"command_duration(REQ_RN, default=fast) doesn't match frame")
self.assertAlmostEqual(
fast_frame.duration, epcstd.reqrn_duration(), 8,
"reqrn_duration(default=fast) doesn't match frame")
def test_read_duration(self):
slow_read = epcstd.Read(self.slow['bank'], self.slow['word_ptr'],
self.slow['word_cnt'], self.slow['rn'],
self.slow['crc16'])
fast_read = epcstd.Read(self.fast['bank'], self.fast['word_ptr'],
self.fast['word_cnt'], self.fast['rn'],
self.fast['crc16'])
slow_frame = epcstd.ReaderFrame(self.slow['sync'], slow_read)
fast_frame = epcstd.ReaderFrame(self.fast['sync'], fast_read)
self.assertAlmostEqual(
slow_frame.duration,
self.get_command_duration(epcstd.CommandCode.READ, self.slow),
8, "command_duration(READ, slow) doesn't match frame")
self.assertAlmostEqual(
slow_frame.duration,
epcstd.read_duration(
tari=self.slow['tari'], rtcal=self.slow['rtcal'],
trcal=self.slow['trcal'], delim=self.slow['delim'],
bank=self.slow['bank'], word_ptr=self.slow['word_ptr'],
word_count=self.slow['word_cnt'], rn=self.slow['rn'],
crc=self.slow['crc16']),
8, "read_duration(slow params) doesn't match frame")
self.assertAlmostEqual(
fast_frame.duration,
self.get_command_duration(epcstd.CommandCode.READ, self.fast),
8, "command_duration(READ, fast) doesn't match frame")
self.assertAlmostEqual(
fast_frame.duration,
epcstd.read_duration(
tari=self.fast['tari'], rtcal=self.fast['rtcal'],
trcal=self.fast['trcal'], delim=self.fast['delim'],
bank=self.fast['bank'], word_ptr=self.fast['word_ptr'],
word_count=self.fast['word_cnt'], rn=self.fast['rn'],
crc=self.fast['crc16']),
8, "read_duration(fast params) doesn't match frame")
def test_read_duration_with_default_parameters(self):
slow_read = epcstd.Read(self.slow['bank'], self.slow['word_ptr'],
self.slow['word_cnt'], self.slow['rn'],
self.slow['crc16'])
fast_read = epcstd.Read(self.fast['bank'], self.fast['word_ptr'],
self.fast['word_cnt'], self.fast['rn'],
self.fast['crc16'])
slow_frame = epcstd.ReaderFrame(self.slow['sync'], slow_read)
fast_frame = epcstd.ReaderFrame(self.fast['sync'], fast_read)
self.set_default_parameters(self.slow)
self.assertAlmostEqual(
slow_frame.duration,
epcstd.command_duration(epcstd.CommandCode.READ), 8,
"command_duration(READ, default=slow) doesn't match frame")
self.assertAlmostEqual(
slow_frame.duration, epcstd.read_duration(), 8,
"read_duration(default=slow) doesn't match frame")
self.set_default_parameters(self.fast)
self.assertAlmostEqual(
fast_frame.duration,
epcstd.command_duration(epcstd.CommandCode.READ), 8,
"command_duration(READ, default=fast) doesn't match frame")
self.assertAlmostEqual(
fast_frame.duration, epcstd.read_duration(), 8,
"read_duration(default=fast) doesn't match frame")
class TestTagFrameAccessors(unittest.TestCase):
def setUp(self):
self.preambles = [epcstd.create_tag_preamble(m, trext)
for m in epcstd.TagEncoding
for trext in (True, False)]
self.replies = [epcstd.QueryReply(), epcstd.AckReply(epc="01234567")]
self.blfs = [40e3, 160e3, 360e3]
def test_get_tag_frame_duration_equals_tag_frame_getter(self):
for preamble in self.preambles:
for reply in self.replies:
for blf in self.blfs:
frame = epcstd.TagFrame(preamble, reply)
self.assertAlmostEqual(
epcstd.tag_frame_duration(
reply, blf, preamble.encoding, preamble.extended),
frame.get_duration(blf), 8)
def test_get_tag_frame_duration_uses_default_modelParams(self):
epcstd.stdParams.divide_ratio = epcstd.DivideRatio.DR_8
for preamble in self.preambles:
for reply in self.replies:
for blf in self.blfs:
epcstd.stdParams.trext = preamble.extended
epcstd.stdParams.tag_encoding = preamble.encoding
epcstd.stdParams.trcal = \
epcstd.stdParams.divide_ratio.eval() / blf
frame = epcstd.TagFrame(preamble, reply)
self.assertAlmostEqual(
epcstd.tag_frame_duration(reply),
frame.get_duration(blf), 8, "frame = {}".format(frame))
class TestRepliesDurationEstimations(unittest.TestCase):
"""
Test-cases for functions ``reply_duration``, ``query_reply_duration``,
``ack_reply_duration``, etc.
"""
def setUp(self):
self.slow = dict(dr=epcstd.DivideRatio.DR_8, trcal=225.0e-6,
encoding=epcstd.TagEncoding.M8, trext=True,
epc_bytelen=12, word_cnt=15)
self.fast = dict(dr=epcstd.DivideRatio.DR_643, trcal=17.875e-6,
encoding=epcstd.TagEncoding.FM0, trext=False,
epc_bytelen=4, word_cnt=1)
for par in [self.slow, self.fast]:
par['blf'] = epcstd.get_blf(par['dr'], par['trcal'])
par['preamble'] = epcstd.create_tag_preamble(
par['encoding'], par['trext'])
@staticmethod
def get_reply_duration(reply_type, par):
return epcstd.reply_duration(
reply_type=reply_type, dr=par['dr'], trcal=par['trcal'],
encoding=par['encoding'], trext=par['trext'],
epc_bytelen=par['epc_bytelen'], words_count=par['word_cnt'])
@staticmethod
def set_default_parameters(par):
epcstd.stdParams.divide_ratio = par['dr']
epcstd.stdParams.trcal = par['trcal']
epcstd.stdParams.tag_encoding = par['encoding']
epcstd.stdParams.trext = par['trext']
epcstd.stdParams.default_epc = "FF" * par['epc_bytelen']
epcstd.stdParams.read_default_word_count = par['word_cnt']
def test_query_reply_duration(self):
reply = epcstd.QueryReply(0x0000)
slow_frame = epcstd.TagFrame(self.slow['preamble'], reply)
fast_frame = epcstd.TagFrame(self.fast['preamble'], reply)
self.assertAlmostEqual(
slow_frame.get_duration(self.slow['blf']),
self.get_reply_duration(epcstd.ReplyType.QUERY_REPLY, self.slow),
8, "reply_duration(QUERY_REPLY, slow) doesn't match frame")
self.assertAlmostEqual(
slow_frame.get_duration(self.slow['blf']),
epcstd.query_reply_duration(
dr=self.slow['dr'], trcal=self.slow['trcal'],
encoding=self.slow['encoding'], trext=self.slow['trext']),
8, "query_reply_duration(slow params) doesn't match frame")
self.assertAlmostEqual(
fast_frame.get_duration(self.fast['blf']),
self.get_reply_duration(epcstd.ReplyType.QUERY_REPLY, self.fast),
8, "reply_duration(QUERY_REPLY, fast) doesn't match frame")
self.assertAlmostEqual(
fast_frame.get_duration(self.fast['blf']),
epcstd.query_reply_duration(
self.fast['dr'], self.fast['trcal'], self.fast['encoding'],
self.fast['trext']),
8, "query_reply_duration(fast params) doesn't match frame")
def test_query_reply_duration_with_default_parameters(self):
reply = epcstd.QueryReply()
slow_frame = epcstd.TagFrame(self.slow['preamble'], reply)
fast_frame = epcstd.TagFrame(self.fast['preamble'], reply)
self.set_default_parameters(self.slow)
self.assertAlmostEqual(
slow_frame.get_duration(self.slow['blf']),
epcstd.reply_duration(epcstd.ReplyType.QUERY_REPLY), 8,
"reply_duration(QUERY_REPLY, default=slow) doesn't match frame")
self.assertAlmostEqual(
slow_frame.get_duration(self.slow['blf']),
epcstd.query_reply_duration(), 8,
"query_reply_duration(default=slow) doesn't match frame")
self.set_default_parameters(self.fast)
self.assertAlmostEqual(
fast_frame.get_duration(self.fast['blf']),
epcstd.reply_duration(epcstd.ReplyType.QUERY_REPLY), 8,
"reply_duration(QUERY_REPLY, default=fast) doesn't match frame")
self.assertAlmostEqual(
fast_frame.get_duration(self.fast['blf']),
epcstd.query_reply_duration(), 8,
"query_reply_duration(default=fast) doesn't match frame")
def test_ack_reply_duration(self):
slow_reply = epcstd.AckReply(epc=("FF" * self.slow['epc_bytelen']))
fast_reply = epcstd.AckReply(epc=("FF" * self.fast['epc_bytelen']))
slow_frame = epcstd.TagFrame(self.slow['preamble'], slow_reply)
fast_frame = epcstd.TagFrame(self.fast['preamble'], fast_reply)
self.assertAlmostEqual(
slow_frame.get_duration(self.slow['blf']),
self.get_reply_duration(epcstd.ReplyType.ACK_REPLY, self.slow),
8, "reply_duration(ACK_REPLY, slow) doesn't match frame")
self.assertAlmostEqual(
slow_frame.get_duration(self.slow['blf']),
epcstd.ack_reply_duration(
dr=self.slow['dr'], trcal=self.slow['trcal'],
encoding=self.slow['encoding'], trext=self.slow['trext'],
epc_bytelen=self.slow['epc_bytelen']),
8, "ack_reply_duration(slow params) doesn't match frame")
self.assertAlmostEqual(
fast_frame.get_duration(self.fast['blf']),
self.get_reply_duration(epcstd.ReplyType.ACK_REPLY, self.fast),
8, "reply_duration(ACK_REPLY, fast) doesn't match frame")
self.assertAlmostEqual(
fast_frame.get_duration(self.fast['blf']),
epcstd.ack_reply_duration(
dr=self.fast['dr'], trcal=self.fast['trcal'],
encoding=self.fast['encoding'], trext=self.fast['trext'],
epc_bytelen=self.fast['epc_bytelen']),
8, "ack_reply_duration(fast params) doesn't match frame")
def test_ack_reply_duration_with_default_parameters(self):
slow_reply = epcstd.AckReply(epc=("FF" * self.slow['epc_bytelen']))
fast_reply = epcstd.AckReply(epc=("FF" * self.fast['epc_bytelen']))
slow_frame = epcstd.TagFrame(self.slow['preamble'], slow_reply)
fast_frame = epcstd.TagFrame(self.fast['preamble'], fast_reply)
self.set_default_parameters(self.slow)
self.assertAlmostEqual(
slow_frame.get_duration(self.slow['blf']),
epcstd.reply_duration(epcstd.ReplyType.ACK_REPLY), 8,
"reply_duration(ACK_REPLY, default=slow) doesn't match frame")
self.assertAlmostEqual(
slow_frame.get_duration(self.slow['blf']),
epcstd.ack_reply_duration(), 8,
"ack_reply_duration(default=slow) doesn't match frame")
self.set_default_parameters(self.fast)
self.assertAlmostEqual(
fast_frame.get_duration(self.fast['blf']),
epcstd.reply_duration(epcstd.ReplyType.ACK_REPLY), 8,
"reply_duration(ACK_REPLY, default=fast) doesn't match frame")
self.assertAlmostEqual(
fast_frame.get_duration(self.fast['blf']),
epcstd.ack_reply_duration(), 8,
"ack_reply_duration(default=fast) doesn't match frame")
def test_reqrn_reply_duration(self):
reply = epcstd.ReqRnReply()
slow_frame = epcstd.TagFrame(self.slow['preamble'], reply)
fast_frame = epcstd.TagFrame(self.fast['preamble'], reply)
self.assertAlmostEqual(
slow_frame.get_duration(self.slow['blf']),
self.get_reply_duration(epcstd.ReplyType.REQRN_REPLY, self.slow),
8, "reply_duration(REQRN_REPLY, slow) doesn't match frame")
self.assertAlmostEqual(
slow_frame.get_duration(self.slow['blf']),
epcstd.reqrn_reply_duration(
dr=self.slow['dr'], trcal=self.slow['trcal'],
encoding=self.slow['encoding'], trext=self.slow['trext']),
8, "reqrn_reply_duration(slow params) doesn't match frame")
self.assertAlmostEqual(
fast_frame.get_duration(self.fast['blf']),
self.get_reply_duration(epcstd.ReplyType.REQRN_REPLY, self.fast),
8, "reply_duration(REQRN_REPLY, fast) doesn't match frame")
self.assertAlmostEqual(
fast_frame.get_duration(self.fast['blf']),
epcstd.reqrn_reply_duration(
dr=self.fast['dr'], trcal=self.fast['trcal'],
encoding=self.fast['encoding'], trext=self.fast['trext']),
8, "reqrn_reply_duration(fast params) doesn't match frame")
def test_reqrn_reply_duration_with_default_parameters(self):
reply = epcstd.ReqRnReply()
slow_frame = epcstd.TagFrame(self.slow['preamble'], reply)
fast_frame = epcstd.TagFrame(self.fast['preamble'], reply)
self.set_default_parameters(self.slow)
self.assertAlmostEqual(
slow_frame.get_duration(self.slow['blf']),
epcstd.reply_duration(epcstd.ReplyType.REQRN_REPLY), 8,
"reply_duration(REQRN_REPLY, default=slow) doesn't match frame")
self.assertAlmostEqual(
slow_frame.get_duration(self.slow['blf']),
epcstd.reqrn_reply_duration(), 8,
"reqrn_reply_duration(default=slow) doesn't match frame")
self.set_default_parameters(self.fast)
self.assertAlmostEqual(
fast_frame.get_duration(self.fast['blf']),
epcstd.reply_duration(epcstd.ReplyType.REQRN_REPLY), 8,
"reply_duration(REQRN_REPLY, default=fast) doesn't match frame")
self.assertAlmostEqual(
fast_frame.get_duration(self.fast['blf']),
epcstd.reqrn_reply_duration(), 8,
"reqrn_reply_duration(default=fast) doesn't match frame")
def test_read_reply_duration(self):
slow_reply = epcstd.ReadReply("FFFF" * self.slow['word_cnt'])
fast_reply = epcstd.ReadReply("FFFF" * self.fast['word_cnt'])
slow_frame = epcstd.TagFrame(self.slow['preamble'], slow_reply)
fast_frame = epcstd.TagFrame(self.fast['preamble'], fast_reply)
self.assertAlmostEqual(
slow_frame.get_duration(self.slow['blf']),
self.get_reply_duration(epcstd.ReplyType.READ_REPLY, self.slow),
8, "reply_duration(READ_REPLY, slow) doesn't match frame")
self.assertAlmostEqual(
slow_frame.get_duration(self.slow['blf']),
epcstd.read_reply_duration(
dr=self.slow['dr'], trcal=self.slow['trcal'],
encoding=self.slow['encoding'], trext=self.slow['trext'],
words_count=self.slow['word_cnt']),
8, "read_reply_duration(slow params) doesn't match frame")
self.assertAlmostEqual(
fast_frame.get_duration(self.fast['blf']),
self.get_reply_duration(epcstd.ReplyType.READ_REPLY, self.fast),
8, "reply_duration(READ_REPLY, fast) doesn't match frame")
self.assertAlmostEqual(
fast_frame.get_duration(self.fast['blf']),
epcstd.read_reply_duration(
dr=self.fast['dr'], trcal=self.fast['trcal'],
encoding=self.fast['encoding'], trext=self.fast['trext'],
words_count=self.fast['word_cnt']),
8, "read_reply_duration(fast params) doesn't match frame")
def test_read_reply_duration_with_default_parameters(self):
slow_reply = epcstd.ReadReply("FFFF" * self.slow['word_cnt'])
fast_reply = epcstd.ReadReply("FFFF" * self.fast['word_cnt'])
slow_frame = epcstd.TagFrame(self.slow['preamble'], slow_reply)
fast_frame = epcstd.TagFrame(self.fast['preamble'], fast_reply)
self.set_default_parameters(self.slow)
self.assertAlmostEqual(
slow_frame.get_duration(self.slow['blf']),
epcstd.reply_duration(epcstd.ReplyType.READ_REPLY), 8,
"reply_duration(READ_REPLY, default=slow) doesn't match frame")
self.assertAlmostEqual(
slow_frame.get_duration(self.slow['blf']),
epcstd.read_reply_duration(), 8,
"read_reply_duration(default=slow) doesn't match frame")
self.set_default_parameters(self.fast)
self.assertAlmostEqual(
fast_frame.get_duration(self.fast['blf']),
epcstd.reply_duration(epcstd.ReplyType.READ_REPLY), 8,
"reply_duration(READ_REPLY, default=fast) doesn't match frame")
self.assertAlmostEqual(
fast_frame.get_duration(self.fast['blf']),
epcstd.read_reply_duration(), 8,
"read_reply_duration(default=fast) doesn't match frame")
class TestFrequencyToleranceEstimator(unittest.TestCase):
NUM_RANDOM_CHECKS = 5
def assert_frt_node_values(self, node_values, dr, temp):
for trcal, frt in node_values:
self.assertAlmostEqual(
epcstd.get_frt(trcal*1e-6, dr, temp), frt, 8,
"trcal={} (table node)".format(trcal))
def assert_frt_interval_values(self, interval_values, dr, temp):
for lb, rb, frt in interval_values:
low_trcal = lb * 1.011 * 1e-6
top_trcal = rb * 0.989 * 1e-6
self.assertAlmostEqual(
epcstd.get_frt(low_trcal, dr, temp), frt, 8,
"trcal={} (interval left bound)".format(low_trcal))
self.assertAlmostEqual(
epcstd.get_frt(top_trcal, dr, temp), frt, 8,
"trcal={} (interval right bound)".format(top_trcal))
for i in range(TestFrequencyToleranceEstimator.NUM_RANDOM_CHECKS):
trcal = uniform(low_trcal, top_trcal)
self.assertAlmostEqual(
epcstd.get_frt(trcal, dr, temp), frt, 8,
"trcal={} (interval random internal point)".format(trcal))
def test_tolerance_for_dr643_nominal_temp(self):
node_values = [(33.3, 0.15), (66.7, 0.1), (83.3, 0.1)]
intervals = [(33.3, 66.7, 0.22), (66.7, 83.3, 0.12),
(83.3, 133.3, 0.1), (133.3, 200.0, 0.07),
(200.0, 225.0, 0.05)]
self.assert_frt_node_values(
node_values, epcstd.DivideRatio.DR_643, epcstd.TempRange.NOMINAL)
self.assert_frt_interval_values(
intervals, epcstd.DivideRatio.DR_643, epcstd.TempRange.NOMINAL)
def test_tolerance_for_dr643_extended_temp(self):
node_values = [(33.3, 0.15), (66.7, 0.15), (83.3, 0.1)]
intervals = [(33.3, 66.7, 0.22), (66.7, 83.3, 0.15),
(83.3, 133.3, 0.12), (133.3, 200.0, 0.07),
(200.0, 225.0, 0.05)]
self.assert_frt_node_values(
node_values, epcstd.DivideRatio.DR_643, epcstd.TempRange.EXTENDED)
self.assert_frt_interval_values(
intervals, epcstd.DivideRatio.DR_643, epcstd.TempRange.EXTENDED)
def test_tolerance_for_dr8_nominal_temp(self):
node_values = [(25.0, 0.10), (31.25, 0.10), (50.0, 0.07)]
intervals = [(17.2, 25.0, 0.19), (25.0, 31.25, 0.12),
(31.25, 50.0, 0.10), (50.0, 75.0, 0.07),
(75.0, 200.0, 0.04)]
self.assert_frt_node_values(
node_values, epcstd.DivideRatio.DR_8, epcstd.TempRange.NOMINAL)
self.assert_frt_interval_values(
intervals, epcstd.DivideRatio.DR_8, epcstd.TempRange.NOMINAL)
def test_tolerance_for_dr8_extended_temp(self):
node_values = [(25.0, 0.15), (31.25, 0.10), (50.0, 0.07)]
intervals = [(17.2, 25.0, 0.19), (25.0, 31.25, 0.15),
(31.25, 50.0, 0.10), (50.0, 75.0, 0.07),
(75.0, 200.0, 0.04)]
self.assert_frt_node_values(
node_values, epcstd.DivideRatio.DR_8, epcstd.TempRange.EXTENDED)
self.assert_frt_interval_values(
intervals, epcstd.DivideRatio.DR_8, epcstd.TempRange.EXTENDED)
def test_get_frt_uses_readerParams(self):
epcstd.stdParams.temp_range = epcstd.TempRange.NOMINAL
epcstd.stdParams.divide_ratio = epcstd.DivideRatio.DR_8
epcstd.stdParams.trcal = 31.25e-6
self.assertAlmostEqual(epcstd.get_frt(), 0.10, 3)
epcstd.stdParams.temp_range = epcstd.TempRange.EXTENDED
epcstd.stdParams.divide_ratio = epcstd.DivideRatio.DR_643
epcstd.stdParams.trcal = 66.7e-6
self.assertAlmostEqual(epcstd.get_frt(), 0.15, 3)
class TestLinkTimings(unittest.TestCase):
class Timeouts:
t1 = None
t2 = None
t3 = None
t4 = None
t5 = None
t6 = None
t7 = None
def __getitem__(self, index):
ts = [self.t1, self.t2, self.t3, self.t4, self.t5, self.t6, self.t7]
if 1 <= index <= 7:
return ts[index - 1]
else:
raise IndexError("timeout index must be between 1 and 7")
def __setitem__(self, key, value):
if key == 1:
self.t1 = value
elif key == 2:
self.t2 = value
elif key == 3:
self.t3 = value
elif key == 4:
self.t4 = value
elif key == 5:
self.t5 = value
elif key == 6:
self.t6 = value
elif key == 7:
self.t7 = value
else:
raise IndexError("timeout index must be between 1 and 7")
class TBounds:
t_min = None
t_max = None
def __init__(self):
self.t_min = TestLinkTimings.Timeouts()
self.t_max = TestLinkTimings.Timeouts()
def setUp(self):
self.temp = epcstd.TempRange.NOMINAL
self.slow_tari = 25.0e-6
self.slow_rtcal = self.slow_tari * 3
self.slow_trcal = self.slow_rtcal * 3
self.slow_dr = epcstd.DivideRatio.DR_8
self.slow_blf = epcstd.get_blf(self.slow_dr, self.slow_trcal)
self.slow_frt = epcstd.get_frt(self.slow_trcal, self.slow_dr,
self.temp)
self.fast_tari = 6.25e-6
self.fast_rtcal = self.fast_tari * 2.5
self.fast_trcal = self.fast_rtcal * 1.1
self.fast_dr = epcstd.DivideRatio.DR_643
self.fast_blf = epcstd.get_blf(self.fast_dr, self.fast_trcal)
self.fast_frt = epcstd.get_frt(self.fast_trcal, self.fast_dr,
self.temp)
self.expected_timeouts = {
"slow": self.TBounds(),
"fast": self.TBounds()
}
t_slow_min = self.expected_timeouts["slow"].t_min
t_slow_max = self.expected_timeouts["slow"].t_max
t_fast_min = self.expected_timeouts["fast"].t_min
t_fast_max = self.expected_timeouts["fast"].t_max
t_slow_min.t1 = 281.25e-6 * (1.0 - self.slow_frt) - 2e-6
t_slow_min.t2 = 84.375e-06
t_slow_min.t3 = 0.0
t_slow_min.t4 = 150e-6
t_slow_min.t5 = t_slow_min.t1
t_slow_min.t6 = t_slow_min.t1
t_slow_min.t7 = 562.5e-6
t_fast_min.t1 = 15.625e-6 * (1.0 - self.fast_frt) - 2e-6
t_fast_min.t2 = 2.4169921875e-06
t_fast_min.t3 = 0.0
t_fast_min.t4 = 31.25e-6
t_fast_min.t5 = t_fast_min.t1
t_fast_min.t6 = t_fast_min.t1
t_fast_min.t7 = 250.0e-6
t_slow_max.t1 = 281.25e-6 * (1.0 + self.slow_frt) + 2e-6
t_slow_max.t2 = 562.5e-6
t_slow_max.t5 = 20e-3
t_slow_max.t6 = 20e-3
t_slow_max.t7 = 20e-3
t_fast_max.t1 = 15.625e-6 * (1.0 + self.fast_frt) + 2e-6
t_fast_max.t2 = 16.11328125e-06
t_fast_max.t5 = 20e-3
t_fast_max.t6 = 20e-3
t_fast_max.t7 = 20e-3
#
# HELPERS FOR TIMEOUTS CHECKS
#
def assertTimeoutsEqual(self, actual, expected, num_digits=8,
prefix="", suffix=""):
for i in range(1, 8):
if expected[i] is not None:
self.assertAlmostEqual(actual[i], expected[i], num_digits,
"{} T{}({})".format(prefix, i, suffix))
def build_t_min(self, rtcal=None, trcal=None, dr=None, temp=None):
ts = self.Timeouts()
ts.t1 = epcstd.link_t1_min(rtcal=rtcal, trcal=trcal, dr=dr, temp=temp)
ts.t2 = epcstd.link_t2_min(trcal=trcal, dr=dr)
ts.t3 = epcstd.link_t3()
ts.t4 = epcstd.link_t4(rtcal=rtcal)
ts.t5 = epcstd.link_t5_min(rtcal=rtcal, trcal=trcal, dr=dr, temp=temp)
ts.t6 = epcstd.link_t6_min(rtcal=rtcal, trcal=trcal, dr=dr, temp=temp)
ts.t7 = epcstd.link_t7_min(trcal=trcal, dr=dr)
return ts
def build_t_max(self, rtcal=None, trcal=None, dr=None, temp=None):
ts = self.Timeouts()
ts.t1 = epcstd.link_t1_max(rtcal=rtcal, trcal=trcal, dr=dr, temp=temp)
ts.t2 = epcstd.link_t2_max(trcal=trcal, dr=dr)
ts.t5 = epcstd.link_t5_max()
ts.t6 = epcstd.link_t6_max()
ts.t7 = epcstd.link_t7_max()
return ts
def build_t_min_with_universal_getter(
self, rtcal=None, trcal=None, dr=None, temp=None):
ts = self.Timeouts()
for i in range(1, 8):
ts[i] = epcstd.min_link_t(i, rtcal=rtcal, trcal=trcal, dr=dr,
temp=temp)
return ts
def build_t_max_with_universal_getter(
self, rtcal=None, trcal=None, dr=None, temp=None):
ts = self.Timeouts()
for i in [1, 2, 5, 6, 7]:
ts[i] = epcstd.max_link_t(i, rtcal=rtcal, trcal=trcal, dr=dr,
temp=temp)
return ts
@staticmethod
def set_default_modelParams(rtcal, trcal, dr, temp):
epcstd.stdParams.rtcal = rtcal
epcstd.stdParams.trcal = trcal
epcstd.stdParams.divide_ratio = dr
epcstd.stdParams.temp_range = temp
#
# TESTS
#
def test_get_pri_with_explicit_parameters(self):
self.assertAlmostEqual(
epcstd.get_pri(trcal=self.slow_trcal, dr=self.slow_dr),
1.0 / self.slow_blf, 8)
self.assertAlmostEqual(
epcstd.get_pri(trcal=self.fast_trcal, dr=self.fast_dr),
1.0 / self.fast_blf, 8)
def test_get_pri_with_implicit_parameters_from_readerParams(self):
epcstd.stdParams.trcal = self.slow_trcal
epcstd.stdParams.divide_ratio = self.slow_dr
self.assertAlmostEqual(epcstd.get_pri(), 1.0 / self.slow_blf, 8)
epcstd.stdParams.trcal = self.fast_trcal
epcstd.stdParams.divide_ratio = self.fast_dr
self.assertAlmostEqual(epcstd.get_pri(), 1.0 / self.fast_blf, 8)
def test_custom_get_tX_min_with_explicit_parameters(self):
actual_timeouts = {
"slow": self.build_t_min(self.slow_rtcal, self.slow_trcal,
self.slow_dr, self.temp),
"fast": self.build_t_min(self.fast_rtcal, self.fast_trcal,
self.fast_dr, self.temp)
}
for key in ["slow", "fast"]:
self.assertTimeoutsEqual(
actual_timeouts[key], self.expected_timeouts[key].t_min,
prefix=key, suffix="min")
def test_custom_get_tX_max_with_explicit_parameters(self):
actual_timeouts = {
"slow": self.build_t_max(self.slow_rtcal, self.slow_trcal,
self.slow_dr, self.temp),
"fast": self.build_t_max(self.fast_rtcal, self.fast_trcal,
self.fast_dr, self.temp)
}
for key in ["slow", "fast"]:
self.assertTimeoutsEqual(
actual_timeouts[key], self.expected_timeouts[key].t_max,
prefix=key, suffix="max")
def test_custom_get_tX_with_implicit_parameters_from_modelParams(self):
# Setting up slow link parameters
self.set_default_modelParams(self.slow_rtcal, self.slow_trcal,
self.slow_dr, self.temp)
t_min = self.build_t_min() # leaving all parameters None
t_max = self.build_t_max() # leaving all parameters None
self.assertTimeoutsEqual(t_min, self.expected_timeouts["slow"].t_min,
prefix="default slow", suffix="min")
self.assertTimeoutsEqual(t_max, self.expected_timeouts["slow"].t_max,
prefix="default slow", suffix="max")
# Setting up fast link parameters
self.set_default_modelParams(self.fast_rtcal, self.fast_trcal,
self.fast_dr, self.temp)
t_min = self.build_t_min() # leaving all parameters None
t_max = self.build_t_max() # leaving all parameters None
self.assertTimeoutsEqual(t_min, self.expected_timeouts["fast"].t_min,
prefix="default fast", suffix="min")
self.assertTimeoutsEqual(t_max, self.expected_timeouts["fast"].t_max,
prefix="default fast", suffix="max")
def test_universal_get_t_min_with_explicit_parameters(self):
actual_timeouts = {
"slow": self.build_t_min_with_universal_getter(
self.slow_rtcal, self.slow_trcal, self.slow_dr, self.temp),
"fast": self.build_t_min_with_universal_getter(
self.fast_rtcal, self.fast_trcal, self.fast_dr, self.temp),
}
for key in ['slow', 'fast']:
self.assertTimeoutsEqual(
actual_timeouts[key], self.expected_timeouts[key].t_min,
num_digits=8, prefix=key, suffix="min")
# Check that get_t_min works for n=1..7 only
with self.assertRaises(ValueError):
epcstd.min_link_t(
0, self.slow_rtcal, self.slow_trcal, self.slow_dr, self.temp)
epcstd.min_link_t(
8, self.slow_rtcal, self.slow_trcal, self.slow_dr, self.temp)
def test_universal_get_t_max_with_explicit_parameters(self):
actual_timeouts = {
"slow": self.build_t_max_with_universal_getter(
self.slow_rtcal, self.slow_trcal, self.slow_dr, self.temp),
"fast": self.build_t_max_with_universal_getter(
self.fast_rtcal, self.fast_trcal, self.fast_dr, self.temp),
}
for key in ["slow", "fast"]:
self.assertTimeoutsEqual(
actual_timeouts[key], self.expected_timeouts[key].t_max,
num_digits=8, prefix=key, suffix="max")
self.assertAlmostEqual(
epcstd.max_link_t(3, self.slow_rtcal, self.slow_trcal, self.slow_dr,
self.temp), float('inf'))
# Check that get_t_max works for n=1..7, n != 3 only
with self.assertRaises(ValueError):
epcstd.max_link_t(0, self.slow_rtcal, self.slow_trcal, self.slow_dr,
self.temp)
epcstd.max_link_t(8, self.slow_rtcal, self.slow_trcal, self.slow_dr,
self.temp)
def test_universal_get_t_min_with_parameters_from_modelParams(self):
# Setting up slow parameters
self.set_default_modelParams(self.slow_rtcal, self.slow_trcal,
self.slow_dr, self.temp)
slow = self.build_t_min_with_universal_getter()
self.assertTimeoutsEqual(slow, self.expected_timeouts["slow"].t_min,
prefix="slow", suffix="min")
# Setting up fast parameters
self.set_default_modelParams(self.fast_rtcal, self.fast_trcal,
self.fast_dr, self.temp)
fast = self.build_t_min_with_universal_getter()
self.assertTimeoutsEqual(fast, self.expected_timeouts['fast'].t_min,
prefix="fast", suffix="min")
def test_universal_get_t_max_with_parameters_from_modelParams(self):
# Setting up slow parameters
self.set_default_modelParams(self.slow_rtcal, self.slow_trcal,
self.slow_dr, self.temp)
slow = self.build_t_max_with_universal_getter()
self.assertTimeoutsEqual(slow, self.expected_timeouts["slow"].t_max,
prefix="slow", suffix="max")
# Setting up fast parameters
self.set_default_modelParams(self.fast_rtcal, self.fast_trcal,
self.fast_dr, self.temp)
fast = self.build_t_max_with_universal_getter()
self.assertTimeoutsEqual(fast, self.expected_timeouts['fast'].t_max,
prefix="fast", suffix="max")
class TestElementaryTimings(unittest.TestCase):
def setUp(self):
self.fast_tari = 6.25e-6
self.fast_rtcal = 15.625e-6
self.fast_trcal = 17.875e-6
self.fast_trext = False
self.fast_temp = epcstd.TempRange.NOMINAL
self.fast_q = 4
self.fast_dr = epcstd.DivideRatio.DR_643
self.fast_m = epcstd.TagEncoding.FM0
self.fast_target = epcstd.InventoryFlag.A
self.fast_sel = epcstd.SelFlag.ALL
self.fast_session = epcstd.Session.S0
self.fast_bank = epcstd.MemoryBank.TID
self.fast_word_ptr = 0
self.fast_word_count = 4
self.fast_data = "89ABCDEF"
self.fast_epc = "00112233445566778899AABB"
self.fast_pc = 0x0000
self.fast_rn = 0x0000
self.fast_crc = 0x0000
self.fast_t = {
"Query": 199.125e-6,
"QueryRep": 59.375e-6,
"ACK": 150.0e-6,
"ReqRN": 293.75e-6,
"Read": 412.5e-6,
"T1(min)": 11.28125e-6,
"T1(max)": 19.96875e-6,
"T2(min)": 2.51367188e-6,
"T2(max)": 16.7578125e-6,
"T3(min)": 0.0e-6,
"T3(max)": float("inf"),
"T4(min)": 31.25e-6,
"T4(max)": float("inf"),
"RN16": 19.27148438e-6,
"Response": 113.11523438e-6,
"Handle": 32.67773438e-6,
"Data": 60.328125e-6
}
def test_get_elementary_timings(self):
d = epcstd.get_elementary_timings(
tari=self.fast_tari, rtcal=self.fast_rtcal, trcal=self.fast_trcal,
temp=self.fast_temp, dr=epcstd.DivideRatio.DR_643,
m=self.fast_m, trext=self.fast_trext, sel=self.fast_sel,
session=self.fast_session, target=self.fast_target, q=self.fast_q,
bank=self.fast_bank, word_ptr=self.fast_word_ptr,
word_count=self.fast_word_count, rn=self.fast_rn,
crc=self.fast_crc, epc=self.fast_epc, mem=self.fast_data,
pc=self.fast_pc)
for k, v in self.fast_t.items():
self.assertIn(k, d, "key {} not found in timings".format(k))
self.assertAlmostEqual(v, d[k], 8, "error in {}".format(k))
|
python
|
from __future__ import print_function
from builtins import str
import argparse
import os
import sys
import re
from .version import VERSION
from .utils import get_local_ip, DelimiterArgParser
import atexit
def add_parser_args(parser, config_type):
# General arguments
parser.add_argument(
'--trace_greenlets',
action='store_true',
default=False,
help='Collect stats about each greenlet execution time and switches.')
parser.add_argument(
'--trace_memory',
action='store_true',
default=False,
help='Collect stats about memory for each task. Incompatible with --greenlets > 1')
parser.add_argument(
'--trace_io',
action='store_true',
default=False,
help='Collect stats about all I/O operations')
parser.add_argument(
'--print_mongodb',
action='store_true',
default=False,
help='Print all MongoDB requests')
parser.add_argument(
'--trace_memory_type',
action='store',
default="",
help='Create a .png object graph in trace_memory_output_dir ' +
'with a random object of this type.')
parser.add_argument(
'--trace_memory_output_dir',
action='store',
default="memory_traces",
help='Directory where to output .pngs with object graphs')
parser.add_argument(
'--profile',
action='store_true',
default=False,
help='Run profiling on the whole worker')
parser.add_argument(
'--mongodb_jobs', '--mongodb',
action='store',
default="mongodb://127.0.0.1:27017/mrq",
help='MongoDB URI for the jobs, scheduled_jobs & workers database')
parser.add_argument(
'--mongodb_logs',
action='store',
default="1",
help='MongoDB URI for the logs database. ' +
' "0" will disable remote logs, "1" will use main MongoDB.')
parser.add_argument(
'--mongodb_logs_size',
action='store',
default=16 *
1024 *
1024,
type=int,
help='If provided, sets the log collection to capped to that amount of bytes.')
parser.add_argument(
'--no_mongodb_ensure_indexes',
action='store_true',
default=False,
help='If provided, skip the creation of MongoDB indexes at worker startup.')
parser.add_argument(
'--redis',
action='store',
default="redis://127.0.0.1:6379",
help='Redis URI')
parser.add_argument(
'--redis_prefix',
action='store',
default="mrq",
help='Redis key prefix')
parser.add_argument(
'--redis_max_connections',
action='store',
type=int,
default=1000,
help='Redis max connection pool size')
parser.add_argument(
'--redis_timeout',
action='store',
type=float,
default=30,
help='Redis connection pool timeout to wait for an available connection')
parser.add_argument(
'--name',
default=None,
action='store',
help='Specify a different name')
parser.add_argument(
'--quiet',
default=False,
action='store_true',
help='Don\'t output task logs')
parser.add_argument(
'--config',
'-c',
default=None,
action="store",
help='Path of a config file')
parser.add_argument(
'--worker_class',
default="mrq.worker.Worker",
action="store",
help='Path to a custom worker class')
parser.add_argument(
'--version',
'-v',
default=False,
action="store_true",
help='Prints current MRQ version')
parser.add_argument(
'--no_import_patch',
default=False,
action='store_true',
help='(DEPRECATED) Skips patching __import__ to fix gevent bug #108')
parser.add_argument(
'--add_network_latency',
default="0",
action='store',
type=str,
help='Adds random latency to the network calls, zero to N seconds. Can be a range (1-2)')
parser.add_argument(
'--default_job_result_ttl',
default=7 * 24 * 3600,
action='store',
type=float,
help='Seconds the results are kept in MongoDB when status is success')
parser.add_argument(
'--default_job_abort_ttl',
default=24 * 3600,
action='store',
type=float,
help='Seconds the tasks are kept in MongoDB when status is abort')
parser.add_argument(
'--default_job_cancel_ttl',
default=24 * 3600,
action='store',
type=float,
help='Seconds the tasks are kept in MongoDB when status is cancel')
parser.add_argument(
'--default_job_timeout',
default=3600,
action='store',
type=float,
help='In seconds, delay before interrupting the job')
parser.add_argument(
'--default_job_max_retries',
default=3,
action='store',
type=int,
help='Set the status to "maxretries" after retrying that many times')
parser.add_argument(
'--default_job_retry_delay',
default=3,
action='store',
type=int,
help='Seconds before a job in retry status is requeued again')
parser.add_argument(
'--use_large_job_ids',
action='store_true',
default=False,
help='Do not use compacted job IDs in Redis. For compatibility with 0.1.x only')
# mrq-run-specific arguments
if config_type == "run":
parser.add_argument(
'--queue',
action='store',
default="",
help='Queue the task on this queue instead of running it right away')
parser.add_argument(
'taskpath',
action='store',
help='Task to run')
parser.add_argument(
'taskargs',
action='store',
default='{}',
nargs='*',
help='JSON-encoded arguments, or "key value" pairs')
# Dashboard-specific arguments
elif config_type == "dashboard":
parser.add_argument(
'--dashboard_httpauth',
default="",
action="store",
help='HTTP Auth for the Dashboard. Format is user:pass')
parser.add_argument(
'--dashboard_queue',
default=None,
action="store",
help='Default queue for dashboard actions.')
parser.add_argument(
'--dashboard_port',
default=5555,
action="store",
type=int,
help='Use this port for mrq-dashboard. 5555 by default.')
parser.add_argument(
'--dashboard_ip',
default="0.0.0.0",
action="store",
type=str,
help='Bind the dashboard to this IP. Default is "0.0.0.0", use "127.0.0.1" to restrict access.')
# Worker-specific args
elif config_type == "worker":
parser.add_argument(
'--max_jobs',
default=0,
type=int,
action='store',
help='Gevent: max number of jobs to do before quitting.' +
' Temp workaround for memory leaks')
parser.add_argument(
'--max_memory',
default=0,
type=int,
action='store',
help='Max memory (in Mb) after which the process will be shut down. Use with --processes [1-N]' +
'to have supervisord automatically respawn the worker when this happens')
parser.add_argument(
'--greenlets',
'--gevent', # deprecated
'-g',
default=1,
type=int,
action='store',
help='Max number of greenlets to use')
parser.add_argument(
'--processes',
'-p',
default=0,
type=int,
action='store',
help='Number of processes to launch with supervisord')
default_template = os.path.abspath(os.path.join(
os.path.dirname(__file__),
"supervisord_templates/default.conf"
))
parser.add_argument(
'--supervisord_template',
default=default_template,
action='store',
help='Path of supervisord template to use')
parser.add_argument(
'--scheduler',
default=False,
action='store_true',
help='Run the scheduler')
parser.add_argument(
'--scheduler_interval',
default=60,
action='store',
type=float,
help='Seconds between scheduler checks')
parser.add_argument(
'--report_interval',
default=10,
action='store',
type=float,
help='Seconds between worker reports to MongoDB')
parser.add_argument(
'--report_file',
default="",
action='store',
type=str,
help='Filepath of a json dump of the worker status. Disabled if none')
parser.add_argument(
'queues',
nargs='*',
default=["default"],
help='The queues to listen on (default: \'default\')')
parser.add_argument(
'--subqueues_refresh_interval',
default=10,
action='store',
type=float,
help="Seconds between worker refreshes of the known subqueues")
parser.add_argument(
'--paused_queues_refresh_interval',
default=10,
action='store',
type=float,
help="Seconds between worker refreshes of the paused queues list")
parser.add_argument(
'--subqueues_delimiter',
default='/',
help='Delimiter between main queue and subqueue names',
action=DelimiterArgParser)
parser.add_argument(
'--admin_port',
default=0,
action="store",
type=int,
help='Start an admin server on this port, if provided. Incompatible with --processes')
parser.add_argument(
'--admin_ip',
default="127.0.0.1",
action="store",
type=str,
help='IP for the admin server to listen on. Use "0.0.0.0" to allow access from outside')
parser.add_argument(
'--local_ip',
default=get_local_ip(),
action="store",
type=str,
help='Overwrite the local IP, to be displayed in the dashboard.')
parser.add_argument(
'--max_latency',
default=1.,
type=float,
action='store',
help='Max seconds while worker may sleep waiting for a new job. ' +
'Can be < 1.')
parser.add_argument(
'--dequeue_strategy',
default="sequential",
type=str,
action='store',
help='Strategy for dequeuing multiple queues. Default is \'sequential\',' +
'to dequeue them in command-line order.')
def get_config(
sources=(
"file",
"env"),
env_prefix="MRQ_",
file_path=None,
parser=None,
extra=None,
config_type=None):
""" Returns a config dict merged from several possible sources """
if not parser:
parser = argparse.ArgumentParser()
add_parser_args(parser, config_type)
parser_types = {action.dest: action.type for action in parser._actions if action.dest}
if config_type in ["run"]:
default_config = parser.parse_args(["notask"]).__dict__
else:
default_config = parser.parse_args([]).__dict__
# Keys that can't be passed from the command line
default_config["tasks"] = {}
default_config["scheduled_tasks"] = {}
# Only keep values different from config, actually passed on the command
# line
from_args = {}
if "args" in sources:
for k, v in parser.parse_args().__dict__.items():
if default_config[k] != v:
from_args[k] = v
# If we were given another config file, use it
if file_path is not None:
config_file = file_path
elif from_args.get("config"):
config_file = from_args.get("config")
# If a mrq-config.py file is in the current directory, use it!
elif os.path.isfile(os.path.join(os.getcwd(), "mrq-config.py")):
config_file = os.path.join(os.getcwd(), "mrq-config.py")
else:
config_file = None
from_file = {}
if config_file and "file" in sources:
sys.path.insert(0, os.path.dirname(config_file))
config_module = __import__(os.path.basename(config_file.replace(".py", "")))
sys.path.pop(0)
for k, v in config_module.__dict__.items():
# We only keep variables starting with an uppercase character.
if k[0].isupper():
from_file[k.lower()] = v
# Merge the config in the order given by the user
merged_config = default_config
config_keys = set(list(default_config.keys()) + list(from_file.keys()))
for part in sources:
for name in config_keys:
if part == "env":
value = os.environ.get(env_prefix + name.upper())
if value:
if name == "queues":
value = re.split("\s+", value)
if parser_types.get(name):
value = parser_types[name](value)
merged_config[name] = value
elif part == "args" and name in from_args:
merged_config[name] = from_args[name]
elif part == "file" and name in from_file:
merged_config[name] = from_file[name]
if extra:
merged_config.update(extra)
if merged_config["profile"]:
import cProfile
profiler = cProfile.Profile()
profiler.enable()
def print_profiling():
profiler.print_stats(sort="cumulative")
atexit.register(print_profiling)
if merged_config["version"]:
print("MRQ version: %s" % VERSION)
print("Python version: %s" % sys.version)
sys.exit(1)
if "no_import_patch" in from_args:
print("WARNING: --no_import_patch will be deprecated in MRQ 1.0!")
return merged_config
|
python
|
"""add column source_file_dir
Revision ID: 3880a3a819d5
Revises: 2579e237c51a
Create Date: 2019-11-12 16:49:05.040791
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3880a3a819d5'
down_revision = '2579e237c51a'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('publishment', sa.Column('source_file_dir', sa.String(length=64), nullable=True, default='target', comment='发布文件的相对目录(相对于源项目的根目录)'))
op.add_column('publishment_staticfile', sa.Column('created_at', sa.DateTime(), nullable=False, comment='创建时间'))
op.add_column('publishment_staticfile', sa.Column('created_by', sa.String(length=32), nullable=False, comment='创建人'))
op.add_column('publishment_staticfile', sa.Column('is_deleted', sa.Integer(), nullable=False, comment='是否删除:0表示正常,1表示已删除'))
op.add_column('publishment_staticfile', sa.Column('last_updated_at', sa.DateTime(), nullable=False, comment='最后更新时间'))
op.add_column('publishment_staticfile', sa.Column('last_updated_by', sa.String(length=32), nullable=False, comment='最后更新人'))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('publishment_staticfile', 'last_updated_by')
op.drop_column('publishment_staticfile', 'last_updated_at')
op.drop_column('publishment_staticfile', 'is_deleted')
op.drop_column('publishment_staticfile', 'created_by')
op.drop_column('publishment_staticfile', 'created_at')
op.drop_column('publishment', 'source_file_dir')
# ### end Alembic commands ###
|
python
|
from collections import OrderedDict
from django.db.models import Count
from django.db.models.functions import TruncYear
from .models import Reading
def annual_reading_counts(kind="all"):
"""
Returns a list of dicts, one per year of reading. In year order.
Each dict is like this (if kind is 'all'):
{'year': datetime.date(2003, 1, 1),
'book': 12, # only included if kind is 'all' or 'book'
'periodical': 18, # only included if kind is 'all' or 'periodical'
'total': 30, # only included if kind is 'all'
}
We use the end_date of a Reading to count when that thing was read.
kind is one of 'book', 'periodical' or 'all', for both.
"""
if kind == "all":
kinds = ["book", "periodical"]
else:
kinds = [kind]
# This will have keys of years (strings) and dicts of data:
# {
# '2003': {'books': 12, 'periodicals': 18},
# }
counts = OrderedDict()
for k in kinds:
qs = (
Reading.objects.exclude(end_date__isnull=True)
.filter(publication__kind=k)
.annotate(year=TruncYear("end_date"))
.values("year")
.annotate(count=Count("id"))
.order_by("year")
)
for year_data in qs:
year_str = year_data["year"].strftime("%Y")
if year_str not in counts:
counts[year_str] = {
"year": year_data["year"],
}
counts[year_str][k] = year_data["count"]
# Now translate counts into our final list, with totals, and 0s for kinds
# when they have no Readings for that year.
counts_list = []
for year_str, data in counts.items():
year_data = {
"year": data["year"],
}
if kind == "all":
year_data["total"] = 0
for k in kinds:
if k in data:
year_data[k] = data[k]
if kind == "all":
year_data["total"] += data[k]
else:
year_data[k] = 0
counts_list.append(year_data)
return counts_list
|
python
|
import os
from CPAC.pipeline import nipype_pipeline_engine as pe
import nipype.algorithms.rapidart as ra
import nipype.interfaces.fsl as fsl
import nipype.interfaces.io as nio
import nipype.interfaces.utility as util
from .utils import *
from CPAC.vmhc import *
from nipype.interfaces.afni import preprocess
from CPAC.registration.registration import apply_transform
from CPAC.image_utils import spatial_smoothing
from CPAC.utils.utils import check_prov_for_regtool
def smooth_func_vmhc(wf, cfg, strat_pool, pipe_num, opt=None):
'''
Node Block:
{"name": "smooth_func_vmhc",
"config": "None",
"switch": ["voxel_mirrored_homotopic_connectivity", "run"],
"option_key": ["post_processing", "spatial_smoothing",
"smoothing_method"],
"option_val": ["AFNI", "FSL"],
"inputs": [["desc-cleaned_bold",
"desc-brain_bold",
"desc-preproc_bold",
"bold"],
"space-bold_desc-brain_mask"],
"outputs": ["desc-sm_bold",
"fwhm"]}
'''
fwhm = cfg.post_processing['spatial_smoothing']['fwhm']
smooth = spatial_smoothing(f'smooth_symmetric_{pipe_num}',
fwhm, opt=opt)
node, out = strat_pool.get_data(["desc-cleaned_bold",
"desc-brain_bold",
"desc-preproc_bold",
"bold"])
wf.connect(node, out, smooth, 'inputspec.in_file')
node, out = strat_pool.get_data("space-bold_desc-brain_mask")
wf.connect(node, out, smooth, 'inputspec.mask')
# 'fwhm' output for iterable
outputs = {
"desc-sm_bold": (smooth, 'outputspec.out_file'),
"fwhm": (smooth, 'fwhm_input.fwhm')
}
return (wf, outputs)
def warp_timeseries_to_sym_template(wf, cfg, strat_pool, pipe_num, opt=None):
'''
Node Block:
{"name": "transform_timeseries_to_sym_template",
"config": ["voxel_mirrored_homotopic_connectivity"],
"switch": ["run"],
"option_key": "None",
"option_val": "None",
"inputs": [["desc-cleaned-sm_bold",
"desc-brain-sm_bold",
"desc-preproc-sm_bold",
"desc-sm_bold"],
"from-bold_to-symtemplate_mode-image_xfm",
"T1w_brain_template_symmetric"],
"outputs": ["space-symtemplate_desc-sm_bold"]}
'''
xfm_prov = strat_pool.get_cpac_provenance(
'from-bold_to-symtemplate_mode-image_xfm')
reg_tool = check_prov_for_regtool(xfm_prov)
num_cpus = cfg.pipeline_setup['system_config'][
'max_cores_per_participant']
num_ants_cores = cfg.pipeline_setup['system_config']['num_ants_threads']
apply_xfm = apply_transform(f'warp_ts_to_sym_template_{pipe_num}',
reg_tool, time_series=True, num_cpus=num_cpus,
num_ants_cores=num_ants_cores,
mem_gb=5.0)
if reg_tool == 'ants':
apply_xfm.inputs.inputspec.interpolation = cfg.registration_workflows[
'functional_registration']['func_registration_to_template'][
'ANTs_pipelines']['interpolation']
elif reg_tool == 'fsl':
apply_xfm.inputs.inputspec.interpolation = cfg.registration_workflows[
'functional_registration']['func_registration_to_template'][
'FNIRT_pipelines']['interpolation']
# smoothed BOLD
connect, resource = strat_pool.get_data(["desc-cleaned-sm_bold",
"desc-brain-sm_bold",
"desc-preproc-sm_bold",
"desc-sm_bold"],
report_fetched=True)
node, out = connect
wf.connect(node, out, apply_xfm, 'inputspec.input_image')
node, out = strat_pool.get_data("T1w_brain_template_symmetric")
wf.connect(node, out, apply_xfm, 'inputspec.reference')
node, out = strat_pool.get_data("from-bold_to-symtemplate_mode-image_xfm")
wf.connect(node, out, apply_xfm, 'inputspec.transform')
outputs = {
f'space-symtemplate_{resource}':
(apply_xfm, 'outputspec.output_image')
}
return (wf, outputs)
def vmhc(wf, cfg, strat_pool, pipe_num, opt=None):
'''Compute Voxel-Mirrored Homotopic Connectivity.
VMHC is the map of brain functional homotopy, the high degree of
synchrony in spontaneous activity between geometrically corresponding
interhemispheric (i.e., homotopic) regions.
Node Block:
{"name": "vmhc",
"config": ["voxel_mirrored_homotopic_connectivity"],
"switch": ["run"],
"option_key": "None",
"option_val": "None",
"inputs": [["space-symtemplate_desc-cleaned-sm_bold",
"space-symtemplate_desc-brain-sm_bold",
"space-symtemplate_desc-preproc-sm_bold",
"space-symtemplate_desc-sm_bold"]],
"outputs": ["vmhc"]}
'''
# write out a swapped version of the file
# copy and L/R swap file
copy_and_L_R_swap = pe.Node(interface=fsl.SwapDimensions(),
name=f'copy_and_L_R_swap_{pipe_num}',
mem_gb=3.0)
copy_and_L_R_swap.inputs.new_dims = ('-x', 'y', 'z')
node, out = strat_pool.get_data(["space-symtemplate_desc-cleaned-sm_bold",
"space-symtemplate_desc-brain-sm_bold",
"space-symtemplate_desc-preproc-sm_bold",
"space-symtemplate_desc-sm_bold"])
wf.connect(node, out, copy_and_L_R_swap, 'in_file')
# calculate correlation between original and swapped images
pearson_correlation = pe.Node(interface=preprocess.TCorrelate(),
name=f'pearson_correlation_{pipe_num}',
mem_gb=3.0)
pearson_correlation.inputs.pearson = True
pearson_correlation.inputs.polort = -1
pearson_correlation.inputs.outputtype = 'NIFTI_GZ'
wf.connect(node, out, pearson_correlation, 'xset')
wf.connect(copy_and_L_R_swap, 'out_file',
pearson_correlation, 'yset')
outputs = {
'vmhc': (pearson_correlation, 'out_file')
}
return (wf, outputs)
|
python
|
##MIT License
##
##Copyright (c) 2019 Jacob Nudel
##Copyright (c) 2019 Yashu Seth
##
##Permission is hereby granted, free of charge, to any person obtaining a copy
##of this software and associated documentation files (the "Software"), to deal
##in the Software without restriction, including without limitation the rights
##to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
##copies of the Software, and to permit persons to whom the Software is
##furnished to do so, subject to the following conditions:
##
##The above copyright notice and this permission notice shall be included in all
##copies or substantial portions of the Software.
##
##THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
##IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
##FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
##AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
##LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
##OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
##SOFTWARE.
###############################################################################
#based on this blog post: https://yashuseth.blog/2018/07/22/pytorch-neural-network-for-tabular-data-with-categorical-embeddings/
#and this repo: https://github.com/yashu-seth/pytorch-tabular
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
from torch.utils.data import Dataset, DataLoader
class TabularDataset(Dataset):
def __init__(self, data, cat_cols=None, output_col=None):
"""
Characterizes a Dataset for PyTorch
Parameters
----------
data: pandas data frame
The data frame object for the input data. It must
contain all the continuous, categorical and the
output columns to be used.
cat_cols: List of strings
The names of the categorical columns in the data.
These columns will be passed through the embedding
layers in the model. These columns must be
label encoded beforehand.
output_col: string
The name of the output variable column in the data
provided.
"""
self.n = data.shape[0]
if output_col:
self.y = data[output_col].astype(np.float32).values.reshape(-1, 1)
else:
self.y = np.zeros((self.n, 1))
self.cat_cols = cat_cols if cat_cols else []
self.cont_cols = [col for col in data.columns
if col not in self.cat_cols + [output_col]]
if self.cont_cols:
self.cont_X = data[self.cont_cols].astype(np.float32).values
else:
self.cont_X = np.zeros((self.n, 1))
if self.cat_cols:
self.cat_X = data[cat_cols].astype(np.int64).values
else:
self.cat_X = np.zeros((self.n, 1))
def __len__(self):
"""
Denotes the total number of samples.
"""
return self.n
def __getitem__(self, idx):
"""
Generates one sample of data.
"""
return [self.y[idx], self.cont_X[idx], self.cat_X[idx]]
class FeedForwardNN(nn.Module):
def __init__(self, emb_dims, no_of_cont, lin_layer_sizes,
output_size, emb_dropout, lin_layer_dropouts):
"""
Parameters
----------
emb_dims: List of two element tuples
This list will contain a two element tuple for each
categorical feature. The first element of a tuple will
denote the number of unique values of the categorical
feature. The second element will denote the embedding
dimension to be used for that feature.
no_of_cont: Integer
The number of continuous features in the data.
lin_layer_sizes: List of integers.
The size of each linear layer. The length will be equal
to the total number
of linear layers in the network.
output_size: Integer
The size of the final output.
emb_dropout: Float
The dropout to be used after the embedding layers.
lin_layer_dropouts: List of floats
The dropouts to be used after each linear layer.
"""
super().__init__()
# Embedding layers
self.emb_layers = nn.ModuleList([nn.Embedding(x, y) for x, y in emb_dims])
no_of_embs = sum([y for x, y in emb_dims])
self.no_of_embs = no_of_embs
self.no_of_cont = no_of_cont
# Linear Layers
first_lin_layer = nn.Linear(self.no_of_embs + self.no_of_cont, lin_layer_sizes[0])
self.lin_layers = nn.ModuleList([first_lin_layer] + [nn.Linear(lin_layer_sizes[i], lin_layer_sizes[i + 1]) for i in range(len(lin_layer_sizes) - 1)])
for lin_layer in self.lin_layers:
nn.init.kaiming_normal_(lin_layer.weight.data)
# Output Layer
self.output_layer = nn.Linear(lin_layer_sizes[-1], output_size)
nn.init.kaiming_normal_(self.output_layer.weight.data)
self.out_act = nn.Sigmoid()
# Batch Norm Layers
self.first_bn_layer = nn.BatchNorm1d(self.no_of_cont)
self.bn_layers = nn.ModuleList([nn.BatchNorm1d(size)
for size in lin_layer_sizes])
# Dropout Layers
self.emb_dropout_layer = nn.Dropout(emb_dropout)
self.droput_layers = nn.ModuleList([nn.Dropout(size)
for size in lin_layer_dropouts])
def forward(self, cont_data, cat_data):
if self.no_of_embs != 0:
x = [emb_layer(cat_data[:, i]) for i,emb_layer in enumerate(self.emb_layers)]
x = torch.cat(x, 1)
x = self.emb_dropout_layer(x)
if self.no_of_cont != 0:
normalized_cont_data = self.first_bn_layer(cont_data)
if self.no_of_embs != 0:
x = torch.cat([x, normalized_cont_data], 1)
else:
x = normalized_cont_data
for lin_layer, dropout_layer, bn_layer in zip(self.lin_layers, self.droput_layers, self.bn_layers):
x = F.relu(lin_layer(x))
x = bn_layer(x)
x = dropout_layer(x)
x = self.output_layer(x)
x = self.out_act(x)
return x
|
python
|
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as f
from torch.autograd import Variable
import os
import numpy as np
from tqdm import tqdm
class ONE_HOT_MLP(nn.Module):
def __init__(self, hidden):
super(ONE_HOT_MLP, self).__init__()
self.cnn = nn.Sequential(
# 1, 124, 32
nn.Conv2d(1, 32, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(32),
nn.LeakyReLU(0.2, inplace=True),
# 32, 62, 16
nn.Conv2d(32, 64, kernel_size=4, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.2, inplace=True),
# 64, 15, 15
nn.Conv2d(64, 128, kernel_size=5, stride=3, padding=1),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2, inplace=True),
# 128, 20, 5
)
self.mlp = nn.Sequential(nn.Linear(128 * 10 * 5 * 2 + 1, hidden),
nn.Tanh(),
nn.Linear(hidden, 1))
for p in self.mlp.parameters():
torch.nn.init.normal_(p, mean=0, std=0.1)
torch.nn.init.constant_(self.mlp[0].bias, val=0.)
torch.nn.init.constant_(self.mlp[2].bias, val=0.)
def forward(self, x, t):
mid = self.cnn(x)
return self.mlp(torch.cat((t.reshape(t.shape[0], -1), mid.reshape(mid.shape[0], -1)), dim=1))
def train_one_hot_mlp(params, hidden, device):
mse = torch.nn.MSELoss()
os.environ['CUDA_VISIBLE_DEVICES'] = str(device)
thermal_conductivity_train_loader = torch.load('Data/thermal_conductivity_vae_mlp_train_loader.pkl')
heat_capacity_train_loader = torch.load('Data/heat_capacity_vae_mlp_train_loader.pkl')
heat_capacity_one_hot_mlp = ONE_HOT_MLP(hidden).cuda()
thermal_conductivity_one_hot_mlp = ONE_HOT_MLP(hidden).cuda()
thermal_conductivity_optimizer = optim.Adam(
thermal_conductivity_one_hot_mlp.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
heat_capacity_optimizer = optim.Adam(
heat_capacity_one_hot_mlp.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
thermal_conductivity_total_loss_list = np.ones(params['VAE_epoch_num'] + 10)
heat_capacity_total_loss_list = np.ones(params['VAE_epoch_num'] + 10)
thermal_conductivity_total_loss_list *= 5000000
heat_capacity_total_loss_list *= 5000000
thermal_conductivity_model_file_name = \
'Model_pkl/ONE_HOT_MLP_thermal_conductivity_hidden_' + str(hidden) + '.pkl'
heat_capacity_model_file_name = \
'Model_pkl/ONE_HOT_MLP_heat_capacity_hidden_' + str(hidden) + '.pkl'
for epoch in range(params['VAE_epoch_num']):
total_loss = 0
thermal_conductivity_one_hot_mlp.train()
for i, data in enumerate(tqdm(thermal_conductivity_train_loader, 0)):
one_hot = torch.cat((data[0], data[1]), dim=1)
one_hot = one_hot.reshape(one_hot.shape[0], 1, one_hot.shape[1], one_hot.shape[2])
one_hot = Variable(one_hot).cuda().type(torch.cuda.FloatTensor)
thermal_conductivity_optimizer.zero_grad()
t = data[2].cuda().reshape(data[2].shape[0], 1).type(torch.cuda.FloatTensor)
label = data[3].cuda().reshape(data[3].shape[0], 1).type(torch.cuda.FloatTensor)
prediction = thermal_conductivity_one_hot_mlp(one_hot, t)
loss = mse(prediction, label)
loss.backward()
total_loss += loss.data.item() / 1000
thermal_conductivity_optimizer.step()
print('====> Epoch: {} Average loss: {:.4f}'.format(
epoch, total_loss / len(thermal_conductivity_train_loader.dataset)))
thermal_conductivity_total_loss_list[epoch] = total_loss / len(thermal_conductivity_train_loader.dataset)
if np.argmin(thermal_conductivity_total_loss_list) == epoch:
torch.save(thermal_conductivity_one_hot_mlp, thermal_conductivity_model_file_name)
print('best result, saving the model to ' + thermal_conductivity_model_file_name)
elif np.argmin(thermal_conductivity_total_loss_list) == epoch - 25:
print('Finish: Training process over due to useless training')
break
for epoch in range(params['VAE_epoch_num']):
total_loss = 0
heat_capacity_one_hot_mlp.train()
for i, data in enumerate(tqdm(heat_capacity_train_loader, 0)):
one_hot = torch.cat((data[0], data[1]), dim=1)
one_hot = one_hot.reshape(one_hot.shape[0], 1, one_hot.shape[1], one_hot.shape[2])
one_hot = Variable(one_hot).cuda().type(torch.cuda.FloatTensor)
heat_capacity_optimizer.zero_grad()
t = data[2].cuda().reshape(data[2].shape[0], 1).type(torch.cuda.FloatTensor)
label = data[3].cuda().reshape(data[3].shape[0], 1).type(torch.cuda.FloatTensor)
prediction = heat_capacity_one_hot_mlp(one_hot, t)
loss = mse(prediction, label)
loss.backward()
total_loss += loss.data.item() / 1000
heat_capacity_optimizer.step()
print('====> Epoch: {} Average loss: {:.4f}'.format(
epoch, total_loss / len(heat_capacity_train_loader.dataset)))
heat_capacity_total_loss_list[epoch] = total_loss / len(heat_capacity_train_loader.dataset)
if np.argmin(heat_capacity_total_loss_list) == epoch:
torch.save(heat_capacity_one_hot_mlp, heat_capacity_model_file_name)
print('best result, saving the model to ' + heat_capacity_model_file_name)
elif np.argmin(heat_capacity_total_loss_list) == epoch - 25:
print('Finish: Training process over due to useless training')
break
|
python
|
import os
import string
from tqdm import tqdm
#from google.colab import drive
#conda install -c huggingface transformers
import matplotlib.pyplot as plt
#% matplotlib inline
import pandas as pd
import seaborn as sns
import numpy as np
import random
import torch
from torch.utils.data import Dataset, DataLoader, random_split, RandomSampler, SequentialSampler
from PoemGenerator.data_2 import PoemDataset
#torch.manual_seed(42)
#random.seed(42)
#np.random.seed(42)
from transformers import GPT2LMHeadModel, GPT2Tokenizer, GPT2Config, GPT2LMHeadModel
from transformers import AdamW, get_linear_schedule_with_warmup
import nltk
nltk.download('punkt')
# Plot how long the lines of our poems are; SHOULD be bounded from 3 to roughly
# 15 since that's what we did in our cleaning step....
# However, certain mismatches between this tokenizer and what we did for cleaning may make it longer (which is okay(
def plot_poem_length_distributions(df):
doc_lengths = []
for poem in df:
#Uncomment all of this if we want to pass the model LINES as opposed to invidivual poems
# paragraphs = [p for p in poem.split('\n') if p]
#
# for line in paragraphs:
# tokens = nltk.word_tokenize(line)
# doc_lengths.append(len(tokens))
tokens = nltk.word_tokenize(poem)
doc_lengths.append(len(tokens))
doc_lengths = np.array(doc_lengths)
print('Average length (of poems): ', np.average(doc_lengths))
print('Max length (of poems): ', np.max(doc_lengths))
sns.distplot(doc_lengths)
plt.show()
#Model configuration here
def configure_model(tokenizer,num_embed=768, num_layers=6, num_head=4, activation_fn='gelu'):
#n_embd (int, optional, defaults to 768) — Dimensionality of the embeddings and hidden states.
#n_layer (int, optional, defaults to 12) — Number of hidden layers in the Transformer encoder.
#n_head (int, optional, defaults to 12) — Number of attention heads for each attention layer in the Transformer encoder.
#activation_function (str, optional, defaults to "gelu") — Activation function, to be selected in the list ["relu", "silu", "gelu", "tanh", "gelu_new"].
configuration = GPT2Config(n_embd = num_embed, n_layer = num_layers, n_head=num_head, activation_function=activation_fn)
# instantiate the model
model = GPT2LMHeadModel.from_pretrained("gpt2", config=configuration)
model.resize_token_embeddings(len(tokenizer))
return model
def train_model(model, train_dataloader, validation_dataloader, epochs, optimizer, log_period, tokenizer, device, output_dir):
training_stats = []
outer_bar = tqdm(range(epochs), unit="epoch")
for epoch_i in range(0, epochs):
# ========================================
# Training
# ========================================
print("")
print('Training...')
total_train_loss = 0
total_train_ppl = 0
model.train()
for step, batch in tqdm(enumerate(train_dataloader)):
b_input_ids = batch[0].to(device)
b_labels = batch[0].to(device)
b_masks = batch[1].to(device)
model.zero_grad()
outputs = model(b_input_ids,
labels=b_labels,
attention_mask=b_masks,
token_type_ids=None
)
loss = outputs[0]
batch_loss = loss.item()
total_train_loss += batch_loss
total_train_ppl += torch.exp(loss)
# Get sample and save the model every x batches
if step % log_period == 0 and not step == 0:
model.eval()
sample_outputs = model.generate(
bos_token_id=random.randint(1, 30000),
do_sample=True,
top_k=50,
max_length=200,
top_p=0.95,
num_return_sequences=1
)
for i, sample_output in enumerate(sample_outputs):
print("{}: {}".format(i, tokenizer.decode(sample_output, skip_special_tokens=True)))
model.train()
loss.backward()
optimizer.step()
# Calculate the average loss over all of the batches.
avg_train_loss = total_train_loss / len(train_dataloader)
avg_train_ppl = total_train_ppl / len(train_dataloader)
print("")
print(" Average training loss: {0:.2f}".format(avg_train_loss))
# ========================================
# Validation
# ========================================
print("")
print("Running Validation...")
model.eval()
total_eval_loss = 0
total_eval_perp = 0
# Evaluate data for one epoch
for batch in validation_dataloader:
b_input_ids = batch[0].to(device)
b_labels = batch[0].to(device)
b_masks = batch[1].to(device)
with torch.no_grad():
outputs = model(b_input_ids,
# token_type_ids=None,
attention_mask=b_masks,
labels=b_labels)
loss = outputs[0]
batch_loss = loss.item()
batch_perp = torch.exp(loss)
total_eval_perp += batch_perp
total_eval_loss += batch_loss
avg_val_loss = total_eval_loss / len(validation_dataloader)
avg_val_ppl = total_eval_perp / len(validation_dataloader)
print(" Validation Loss: {0:.2f}".format(avg_val_loss))
# Record all statistics from this epoch.
training_stats.append(
{
'epoch': epoch_i + 1,
'Training Loss': avg_train_loss,
'Training Perplexity': avg_train_ppl,
'Valid. Loss': avg_val_loss,
'Valid. Perplexity': avg_val_ppl
})
# They can then be reloaded using `from_pretrained()`
# Save the model
f_name = 'T_Loss_'+ str(round(avg_train_loss, 3)) + '_V_Loss_' + str(round(avg_val_loss, 3))
true_output_dir = os.path.join(output_dir, f_name)
model_to_save = model.module if hasattr(model, 'module') else model
model_to_save.save_pretrained(true_output_dir)
tokenizer.save_pretrained(true_output_dir)
outer_bar.update(1)
display_training_summary(training_stats, epochs)
def display_training_summary(training_stats, epoch):
# Display summary of training progress
pd.set_option('precision', 2)
df_stats = pd.DataFrame(data=training_stats)
df_stats = df_stats.set_index('epoch')
print(df_stats)
plot_loss_perplexity(df_stats, 'l', epoch)
plot_loss_perplexity(df_stats, 'p', epoch)
def plot_loss_perplexity(df_stats, l_or_p, epochs):
a = ''
if l_or_p == 'l':
a = 'Loss'
if l_or_p == 'p':
a = 'Perplexity'
col_1 = 'Training ' + a
col_2 = 'Valid. ' + a
sns.set(style='darkgrid')
sns.set(font_scale=1.5)
plt.rcParams["figure.figsize"] = (12,6)
plt.plot(df_stats[col_1], 'b-o', label="Training")
plt.plot(df_stats[col_2], 'g-o', label="Validation")
print('\n==================')
print(a)
print(df_stats[col_1])
print(df_stats[col_2])
print('==================')
plt.title("Training & Validation " + a )
plt.xlabel("Epoch")
plt.ylabel(a)
plt.legend()
plt.xticks(range(1, epochs))
plt.show()
#Generate a sequence of tokens
def generate(model, tokenizer, device, prompt="<|startoftext|>", isval = True):
#In terms of generating; may have to play around with top_k and top_p to see if either
#Combining them, or only using one over the other gives more coherent poems
# if isval:
model.eval()
generated = torch.tensor(tokenizer.encode(prompt)).unsqueeze(0)
generated = generated.to(device)
# print(generated)
sample_outputs = model.generate(
generated,
#bos_token_id=random.randint(1,30000),
do_sample=True,
top_k=50, #the K most likely next words are filtered and the probability mass is redistributed among only those K next words.
max_length = 60, #15 max words * 4 number of lines
min_length = 12, #3 words minimum * 4 number of lines
top_p=0.95 #Top-p sampling picks the minimum number of words to exceed together p=[]%
#num_return_sequences=4 #Uncomment this for multiple, independently sampled outputs
)
for i, sample_output in enumerate(sample_outputs):
output = tokenizer.decode(sample_output, skip_special_tokens=True)
if isval:
print("{}: {}\n\n".format(i, output))
return output
# else:
# generated = torch.tensor(tokenizer.encode(prompt)).unsqueeze(0)
# generated = generated.to(device)
# sample_outputs = model.generate(
# generated,
# #bos_token_id=random.randint(1,30000),
# do_sample=True,
# top_k=50, #the K most likely next words are filtered and the probability mass is redistributed among only those K next words.
# max_length = 60, #15 max words * 4 number of lines
# min_length = 12, #3 words minimum * 4 number of lines
# top_p=0.95 #Top-p sampling picks the minimum number of words to exceed together p=[]%
# #num_return_sequences=4 #Uncomment this for multiple, independently sampled outputs
# )
# return tokenizer.decode(sample_outputs[0], skip_special_tokens = True)
def main():
batch_size = 2
epochs = 3
learning_rate = 1e-3
log_period = 100
save_dir = './model_save/'
# Create output directory if needed
if not os.path.exists(save_dir):
os.makedirs(save_dir)
num_embedded = 768
num_layers = 6
num_head = 4 # [4,6,8]
activation_function = 'gelu'
df = pd.read_csv('data/clean_poems.csv')
# Simple cleaning
df.drop_duplicates('Poem', inplace=True) # Drop any duplicate poems
df['Poem'] = df['Poem'].str.translate(str.maketrans('', '', string.punctuation)) # Get rid of punctuation
df['Poem'] = df['Poem'].apply(str.lower) # Make everything lower-case
df['Poem'] = df['Poem'].str.replace('\n', ' ')
print('Read ', len(df['Poem']), ' examples')
#df.to_csv('data/clean_poems.csv', index=False)
# Create a smaller DF to work with for testing puposes
data_percentage = 1.0
df = df.sample(frac=data_percentage, replace=False)
print('Shrank examples to ', len(df['Poem']), ' examples')
poems = df.Poem
tokenizer = GPT2Tokenizer.from_pretrained('gpt2', bos_token='<|startoftext|>', eos_token='<|endoftext|>',
pad_token='<|pad|>')
dataset = PoemDataset(poems, tokenizer, max_length=num_embedded)
# Split into training and validation sets ~ 90% Train, 10% Validation
train_size = int(0.9 * len(dataset))
val_size = len(dataset) - train_size
train_dataset, val_dataset = random_split(dataset, [train_size, val_size])
print('{:>5,} training samples'.format(train_size))
print('{:>5,} validation samples'.format(val_size))
# Create dataloaders for the datasets
train_dataloader = DataLoader(
train_dataset,
sampler=RandomSampler(train_dataset), # Select batches randomly
batch_size=batch_size)
validation_dataloader = DataLoader(
val_dataset,
sampler=SequentialSampler(val_dataset), # Pull out batches sequentially.
batch_size=batch_size)
# Move model to GPU if available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = configure_model(tokenizer, num_embed=num_embedded, num_layers=num_layers, num_head=num_head, activation_fn=activation_function)
# Create optimizer
optimizer = AdamW(model.parameters(), lr=learning_rate)
model = model.to(device)
#Train the model
train_model(model, train_dataloader, validation_dataloader, epochs, optimizer, log_period, tokenizer, device, save_dir)
#Generate with the model
generate(model, tokenizer, device, 'I love my cat ')
if __name__ == "__main__":
main()
|
python
|
import json
import unittest
from mock import Mock, patch
import delighted
get_headers = {
'Accept': 'application/json',
'Authorization': 'Basic YWJjMTIz',
'User-Agent': "Delighted Python %s" % delighted.__version__
}
post_headers = get_headers.copy()
post_headers.update({'Content-Type': 'application/json'})
class DelightedTestCase(unittest.TestCase):
def setUp(self):
super(DelightedTestCase, self).setUp()
delighted.api_key = 'abc123'
self.request_patcher = patch('requests.request')
self.request_mock = self.request_patcher.start()
def tearDown(self):
super(DelightedTestCase, self).tearDown()
self.request_patcher.stop()
def mock_response(self, status_code, headers, data, links=None):
self.mock_multiple_responses([delighted.http_response.HTTPResponse(status_code, headers, data, links)])
def mock_multiple_responses(self, responses):
mock_responses = []
for response in responses:
mock_response = Mock()
mock_response.status_code = response.status_code
mock_response.headers = response.headers
mock_response.text = json.dumps(response.body)
mock_response.links = response.links
mock_responses.append(mock_response)
self.request_mock.side_effect = mock_responses
def mock_error(self, mock):
mock.exceptions.RequestException = Exception
mock.request.side_effect = mock.exceptions.RequestException()
def check_call(self, meth, url, headers, post_data, get_params):
if post_data is not None:
post_data = json.dumps(post_data)
self.request_mock.assert_called_once_with(meth, url,
headers=headers,
data=post_data,
params=get_params)
def check_multiple_call(self, calls):
self.assertEqual(self.request_mock.call_count, len(calls))
for call in calls:
if call['kwargs']['data'] is not None:
call['kwargs']['data'] = json.dumps(call['kwargs']['data'])
self.request_mock.assert_any_call(call['meth'], call['url'], **call['kwargs'])
|
python
|
#!/usr/bin/env python3
from typing import Any, List
import configargparse
import paleomix
SUPPRESS = configargparse.SUPPRESS
class ArgumentDefaultsHelpFormatter(configargparse.ArgumentDefaultsHelpFormatter):
"""Modified ArgumentDefaultsHelpFormatter that excludes several constants (True,
False, None) and uses a custom presentation of the default value.
"""
def __init__(self, *args: Any, **kwargs: Any):
# Enable wordwrapping
kwargs.setdefault("width", 79)
super().__init__(*args, **kwargs)
def _get_help_string(self, action: configargparse.Action):
# The following values look silly as part of a help string
if isinstance(action.default, bool) or action.default in [None, [], ()]:
return action.help
# The subclass does not allow modification to the defaults string, so instead
# we access the logic by simply checking if the result was modified.
if super()._get_help_string(action) == action.help:
return action.help
assert action.help is not None
return action.help + " [%(default)s]"
class ArgumentParser(configargparse.ArgumentParser):
"""Supports keys with underscores instead of dashes, for backwards compatibility
with old paleomix config files, provided that these do not use per-host setions.
"""
def __init__(self, *args: Any, **kwargs: Any):
kwargs.setdefault("formatter_class", ArgumentDefaultsHelpFormatter)
# Workaround for configargparse (1.2.3) not considering abbreviations when
# applying options from config files, resulting in config file options
# overriding abbreviated options supplied on the command-line.
kwargs.setdefault("allow_abbrev", False)
super().__init__(*args, **kwargs)
self.add_argument(
"-v",
"--version",
action="version",
version="%(prog)s v" + paleomix.__version__,
)
def get_possible_config_keys(self, *args: Any, **kwargs: Any) -> List[str]:
keys = super().get_possible_config_keys(*args, **kwargs)
for key in keys:
key = key.strip("-").replace("-", "_")
if key not in keys:
keys.append(key)
return keys
def convert_item_to_command_line_arg(self, action, key, value):
# Ignore empty options from old config files
if action and value == "=":
return []
return super().convert_item_to_command_line_arg(action, key, value)
def add_subparsers(self, *args: Any, **kwargs: Any):
subparsers = super().add_subparsers(*args, **kwargs)
# Hack to hide aliases from subcommand help text, since aliases are only used
# for deprecated commands/command-names
subparsers._ChoicesPseudoAction = _ChoicesPseudoAction
return subparsers
class _ChoicesPseudoAction(configargparse.Action):
def __init__(self, name, aliases, help):
super(_ChoicesPseudoAction, self).__init__(
option_strings=[],
dest=name,
help=help,
metavar=name,
)
|
python
|
#!/usr/bin/env nix-shell
#!nix-shell -i python -p python3 -I nixpkgs=../../pkgs
# SPDX-FileCopyrightText: 2020 Daniel Fullmer and robotnix contributors
# SPDX-License-Identifier: MIT
import json
import os
import urllib.request
def save(filename, data):
open(filename, 'w').write(json.dumps(data, sort_keys=True, indent=2, separators=(',', ': ')))
def fetch_metadata():
metadata = {}
lineage_build_targets_str = urllib.request.urlopen("https://github.com/LineageOS/hudson/raw/master/lineage-build-targets").read().decode()
for line in lineage_build_targets_str.split("\n"):
line = line.strip()
if line == "":
continue
if line.startswith("#"):
continue
device, variant, branch, updatePeriod = line.split()
metadata[device] = {
'variant': variant,
'branch': branch,
}
###
devices = json.load(urllib.request.urlopen("https://github.com/LineageOS/hudson/raw/master/updater/devices.json"))
for data in devices:
if data['model'] not in metadata:
continue
vendor = data['oem']
vendor = vendor.lower()
# Workaround name inconsistency with LG
if vendor == 'lg':
vendor = 'lge'
# Workaround google device names source tree inconsistency
if data['model'] == 'shamu':
vendor = 'moto'
if data['model'] == 'flox':
vendor = 'asus'
metadata[data['model']].update({
'vendor': vendor,
'name': data['name'],
'lineage_recovery': data.get('lineage_recovery', False)
})
return metadata
if __name__ == '__main__':
metadata = fetch_metadata()
save('device-metadata.json', metadata)
|
python
|
from .transformer import TransformerXL
|
python
|
# encoding: utf-8
from hoopa import const
from hoopa.middlewares.stats import StatsMiddleware
NAME = "hoopa"
# 最大协程数
WORKER_NUMBERS = 1
# 请求间隔, 可以是两个int组成的list,间隔随机取两个数之间的随机浮点数
DOWNLOAD_DELAY = 3
# pending超时时间,超过这个时间,放回waiting
PENDING_THRESHOLD = 100
# 任务完成不停止
RUN_FOREVER = False
# 队列
# 调度器队列,默认redis, memory, mq
QUEUE_CLS = const.MemoryQueue
# 删除队列(包括数据集,去重队列)
CLEAN_QUEUE = False
# 指定优先级,仅当队列为redis有用
PRIORITY = None
# 下载器aiohttp httpx
DOWNLOADER_CLS = const.AiohttpDownloader
HTTP_CLIENT_KWARGS = None
# 下载中间件
MIDDLEWARES = [
StatsMiddleware
]
# 默认去重,不删除去重队列, 将根据queue的类型来决定
DUPEFILTER_CLS = const.MemoryDupeFilter
# 是否删除去重队列
CLEAN_DUPEFILTER = None
# 去重数据库连接配置
DUPEFILTER_SETTING = None
# 统计器, 默认根据队列决定,可以自行修改
STATS_CLS = const.MemoryStatsCollector
# redis配置信息
# REDIS_SETTING = "redis://127.0.0.1:6379/0?encoding=utf-8"
REDIS_SETTING = {
'host': '127.0.0.1',
'port': 6379,
'db': 0,
'password': ''
}
# MQ
MQ_MAXSIZE = 10
MQ_URI = "amqp://guest:[email protected]/"
MQ_API_PORT = 15672
# 其他配置
# 序列化: pickle, ujson, orjson
SERIALIZATION = "ujson"
# 日志配置
LOG_LEVEL = "INFO"
LOG_WRITE_FILE = False
|
python
|
from pathlib import Path
SRC = Path(__file__).parent
BLD = SRC.parent.joinpath("bld")
|
python
|
from collections import Counter
class Square(Counter):
"""Creates a special purpose counter than can store only one value, and wipes itself when zero."""
def __init__(self):
"""Object should be initialized empty."""
pass
def __setitem__(self, key, cnt):
"""Update the count."""
# If Counter already contains another key, throw an exception.
if len(self.keys()) > 0 and self.keys().isdisjoint(key):
raise KeyError(f"Square already contains key '{list(self.keys()).pop()}', can't modify with key '{key}'")
# If count is being set to zero, remove the key altogether.
if cnt == 0:
super().__delitem__(key)
# Otherwise just assign the value as usual.
else:
super().__setitem__(key, cnt)
|
python
|
from django.http import JsonResponse
from django.shortcuts import render
from django.views.generic import View
# model
from timepredictor.models import PrediccionTiempos, UltimosGps
# Create your views here.
class MapHandler(View):
'''This class manages the map where the bus and stops are shown'''
def __init__(self):
self.context={}
def get(self, request):
template = "timePredictor.html"
return render(request, template, self.context)
class GetEstimatedTimes(View):
'''This class requests to the database the estimated times for next stops for a bus'''
def __init__(self):
self.context={}
def get(self, request, licencePlate):
# the position of interest are the ones ocurred in the last 10 minutes
stops = PrediccionTiempos.objects.filter(patente = licencePlate).order_by('-tiempo_tstamp')
#positions = UltimaCargaGps.objects.all()[:10]
response = {}
busDesc = 'bus'
stopsDesc = 'stops'
response[stopsDesc] = []
for stop in stops:
if not busDesc in response:
response[busDesc] = {
'licencePlate': stop.patente,
'AuthRoute': stop.servicio
}
response[stopsDesc].append(
{'stopCode': stop.codigo,
'arrivedEstimatedTime': stop.tiempo_tstamp,
'distanceOnRoute': stop.distancia,
'arrivedEstimatedTimeInSecs': stop.tiempo})
return JsonResponse(response, safe=False)
class GetBusPosition(View):
'''This class requests to the database the position of a bus '''
def __init__(self):
"""the contructor, context are the parameter given to the html template"""
self.context={}
def get(self, request, licencePlate):
# the position of interest are the ones ocurred in the last 10 minutes
positions = UltimosGps.objects.filter(patente = licencePlate)
response = []
for aPosition in positions:
response.append({
'licencePlate': aPosition.patente,
'authRoute': aPosition.servicio,
'userRoute': aPosition.servicio_usuario,
'distOnroute': aPosition.dist_en_ruta,
'distToRoute': aPosition.dist_a_ruta,
'InstVelocity': aPosition.velocidad_instantanea,
'velocity2GPS': aPosition.velocidad_2gps,
'velocity4GPS': aPosition.velocidad_4gps,
'operator': aPosition.operador,
'latitude': aPosition.latitud,
'longitude': aPosition.longitud,
'time': aPosition.tiempo,
'orientation': aPosition.orientacion,
'type': aPosition.tipo,
'capacity': aPosition.capacidad})
return JsonResponse(response, safe=False)
class GetActiveBuses(View):
'''This class requests to the database the buses are doing a trip '''
def __init__(self):
self.context={}
def get(self, request):
activeBuses = UltimosGps.objects.order_by('patente', 'servicio').distinct('patente', 'servicio')
response = []
for activeBus in activeBuses:
response.append([
activeBus.patente,
activeBus.servicio,
activeBus.servicio_usuario])
"""
response.append({
'licencePlate': activeBus.patente,
'authRoute': activeBus.servicio,
'userRoute': activeBus.servicio_usuario,
#'distOnroute': activeBus.dist_en_ruta,
#'distToRoute': activeBus.dist_a_ruta,
#'InstVelocity': activeBus.velocidad_instantanea,
#'velocity2GPS': activeBus.velocidad_2gps,
#'velocity4GPS': activeBus.velocidad_4gps,
#'operator': activeBus.operador,
#'latitude': activeBus.latitud,
#'longitude': activeBus.longitud,
#'time': activeBus.tiempo,
#'orientation': activeBus.orientacion,
#'type': activeBus.tipo,
#'capacity': activeBus.capacidad
})
"""
return JsonResponse(response, safe=False)
|
python
|
"""
CLI Module.
Handles CLI for the Repository Updater
"""
from os import environ
from sys import argv
import click
import crayons
from . import APP_FULL_NAME, APP_VERSION
from .github import GitHub
from .repository import Repository
@click.command()
@click.option(
"--token",
hide_input=True,
prompt="GitHub access token",
help="GitHub access token",
metavar="<TOKEN>",
)
@click.option(
"--repository",
prompt="Home Assistant Addons repository to update",
help="The Home Assistant Addons repository to update",
metavar="<orgname/reponame>",
)
@click.option("--addon", help="Update a single/specific add-on", metavar="<TARGET>")
@click.option("--force", is_flag=True, help="Force an update of the add-on repository")
@click.version_option(APP_VERSION, prog_name=APP_FULL_NAME)
def repository_updater(token, repository, addon, force):
"""Community Home Assistant Add-ons Repository Updater."""
click.echo(crayons.blue(APP_FULL_NAME, bold=True))
click.echo(crayons.blue("-" * 51, bold=True))
github = GitHub(token)
click.echo(
"Authenticated with GitHub as %s"
% crayons.yellow(github.get_user().name, bold=True)
)
repository = Repository(github, repository, addon, force)
repository.update()
repository.cleanup()
def git_askpass():
"""
Git credentials helper.
Short & sweet script for use with git clone and fetch credentials.
Requires GIT_USERNAME and GIT_PASSWORD environment variables,
intended to be called by Git via GIT_ASKPASS.
"""
if argv[1] == "Username for 'https://github.com': ":
print(environ["GIT_USERNAME"])
exit()
if argv[1] == "Password for 'https://" "%(GIT_USERNAME)[email protected]': " % environ:
print(environ["GIT_PASSWORD"])
exit()
exit(1)
|
python
|
'''Functions to calculate seismic noise in suspended optics.
'''
from __future__ import division
import numpy as np
from scipy.interpolate import PchipInterpolator as interp1d
def seismic_suspension_fitered(sus, in_trans):
"""Seismic displacement noise for single suspended test mass.
:sus: gwinc suspension structure
:in_trans: input translational displacement spectrum
:returns: tuple of displacement noise power spectrum at :f:, and
horizontal and vertical components.
"""
hTable = sus.hTable
vTable = sus.vTable
theta = sus.VHCoupling.theta
# horizontal noise total
nh = (abs(hTable)**2) * in_trans**2
# vertical noise total
nv = (abs(theta * vTable)**2) * in_trans**2
# new total noise
n = nv + nh
return n, nh, nv
def seismic_BSC_ISI(f):
"""Rough seismic noise spectra on aLIGO BSC ISI table.
:f: frequency array in Hz
:returns: tuple of displacement noise power spectrum at :f: for
translational and rotational DOFs.
"""
SEI_F = np.array([0.01, 0.03, 0.1, 0.2, 0.5, 1, 10, 30, 300])
# translational DOFs
SEI_T = np.array([3e-6, 1e-6, 2e-7, 2e-7, 8e-10, 1e-11, 3e-13, 3e-14, 3e-14])
nt = 10**(interp1d(SEI_F, np.log10(SEI_T))(f))
# rotational DOFs
SEI_R = np.array([1e-8, 3e-8, 2e-8, 1e-8, 4e-10, 1e-11, 3e-13, 3e-14, 3e-14])
nr = 10**(interp1d(SEI_F, np.log10(SEI_R))(f))
return nt, nr
def seismic_BSC_ISI_6D(f):
"""Rough seismic noise spectra on aLIGO BSC ISI table with a 6D seismometer.
This largely follows Mow-Lowry and Martynov, arXiv:1801.01468.
:f: frequency array in Hz
:returns: tuple of displacement noise power spectrum at :f: for
translational and rotational DOFs.
"""
# FIXME: merge this with above, using flag
SEI_F = np.array([0.01, 0.03, 0.1, 0.2, 0.5, 1, 10, 100, 300])
SEI_T_self = np.array([1e-7, 1e-9, 3e-11, 6e-12, 3e-13, 1e-13, 3e-14, 1e-14, 1e-14])/1000
nt_self = 10**(interp1d(SEI_F, np.log10(SEI_T_self))(f))
nt_gnd = 10*seismic_ground_NLNM(f)
blend_t = np.abs(100/(1+1j*f/0.01)**4)
nt = np.sqrt(nt_self**2 + (blend_t * nt_gnd)**2)
SEI_R_self = np.array([2e-11, 5e-12, 1e-12, 6e-13, 3e-13, 2e-13, 6e-14, 2e-14, 2e-14])/1000
nr_self = 10**(interp1d(SEI_F, np.log10(SEI_R_self))(f))
nr_gnd = np.abs(1e-7/(1+1j*f/0.001))
blend_r = np.abs(100/(1+1j*f/0.01)**4)
nr = np.sqrt(nr_self**2 + (blend_r * nr_gnd)**2)
return nt, nr
def seismic_ground_NLNM(f):
"""The Peterson new generic ground motion low noise model.
:f: frequency array in Hz
:returns: displacement noise amplitude spectrum at :f:
"""
Pl = np.array([
1.00e-02, 1.00e-01, 1.70e-01, 4.00e-01, 8.00e-01, 1.24e+00,
2.40e+00, 4.30e+00, 5.00e+00, 6.00e+00, 1.00e+01, 1.20e+01,
1.56e+01, 2.19e+01, 3.16e+01, 4.50e+01, 7.00e+01, 1.01e+02,
1.54e+02, 3.28e+02, 6.00e+02, 1.00e+04])
Al = np.array([
-156.72, -162.36, -166.7, -170.0, -166.4, -168.6, -159.98,
-141.1, -71.36, -97.26, -132.18, -205.27, -37.65, -114.37,
-160.58, -187.5, -216.47, -185.0, -168.34, -217.43, -258.28,
-346.88])
Bl = np.array([
5.64, 5.64, 0.0, -8.3, 28.9, 52.48, 29.81,
0.0, -99.77, -66.49, -31.57, 36.16, -104.33, -47.1,
-16.28, 0.0, 15.7, 0.0, -7.61, 11.9, 26.6,
48.75])
nlnm = 10**(np.interp(1/f, Pl, Al+Bl*np.log10(Pl))/20) / (2 * np.pi * f)**2
return nlnm
def seismic_ground_NHNM(f):
"""The Peterson new generic ground motion high noise model.
:f: frequency array in Hz
:returns: displacement noise amplitude spectrum at :f:
"""
Pl = np.array([
1.00e-01, 2.20e-01, 3.20e-01, 8.00e-01, 3.80e+00,
4.60e+00, 6.30e+00, 7.90e+00, 1.54e+01, 2.00e+01,
3.54e+02,
])
Al = np.array([
-108.73, -150.34, -122.31, -116.85, -108.48,
-74.66, 0.66, -93.37, 73.54, -151.52,
-206.66,
])
Bl = np.array([
-17.23, -80.50, -23.87, 32.51, 18.08,
-32.95, -127.18, -22.42, -162.98, 10.01,
31.63,
])
nhnm = 10**(np.interp(1/f, Pl, Al+Bl*np.log10(Pl))/20) / (2 * np.pi * f)**2
return nhnm
|
python
|
def func(num):
return x*3
x = 2
func(x)
|
python
|
CHECKPOINT = 'model/checkpoint.bin'
MODEL_PATH = 'model/model.bin'
input_path = 'input/train.csv'
LR = 0.01
scheduler_threshold = 0.001
scheduler_patience = 2
scheduler_decay_factor = 0.5
embed_dims = 128
hidden_dims = 128
num_layers = 1
bidirectional = False
dropout = 0.2
out_dims = 128
Batch_Size = 64
Epochs = 100
similarity_thresh = 0.75
margin = 0.25
|
python
|
import pygame
# game
from cgame import CGame
def main():
try:
CGame().run()
except Exception as e:
print(e)
if __name__ == '__main__':
pygame.init()
main()
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.