content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
from dataclasses import dataclass
from bindings.csw.animate_type import AnimateType
__NAMESPACE__ = "http://www.w3.org/2001/SMIL20/Language"
@dataclass
class Animate1(AnimateType):
class Meta:
name = "animate"
namespace = "http://www.w3.org/2001/SMIL20/Language"
|
python
|
import unittest
import electricity
class VersionTestCas(unittest.TestCase):
def test_version(self):
self.assertEqual(electricity.__version__, '0.1')
|
python
|
from keris.layers.merge import Concatenate, Sum
from keris.layers.core import Input, Dense
from keris.layers.convolution import Conv2D
from keris.layers.dropout import Dropout
from keris.layers.pool import MaxPooling2D, GlobalAveragePooling2D
|
python
|
from functools import wraps
def tags(tag_name):
def tags_decorator(func):
@wraps(func)
def func_wrapper(name):
return "<{0}>{1}</{0}>".format(tag_name, func(name))
return func_wrapper
return tags_decorator
@tags("p")
def get_text(name):
"""returns some text"""
return "Hello "+name
print get_text.__name__ # get_text
print get_text.__doc__ # returns some text
print get_text.__module__ # __main__
|
python
|
import os
import torch
import numpy as np
from torch.autograd import Variable
from .base_trainer import BaseTrainer
from model import networks
from model.loss import AttnDiscriminatorLoss, AttnGeneratorLoss, KLLoss
from utils.util import convert_back_to_text
from collections import OrderedDict
dirname = os.path.dirname(__file__)
class AttnGANtrainer(BaseTrainer):
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
For pix2pix, we do not use image buffer
The training objective is: GAN Loss + lambda_L1 * ||G(A)-B||_1
By default, we use vanilla GAN loss, UNet with batchnorm, and aligned datasets.
"""
# changing the default values to match the pix2pix paper (https://phillipi.github.io/pix2pix/)
parser.set_defaults(exp_namem='AttnGAN', netG='synthesis', netD='synthesis')
if is_train:
parser.add_argument('--gamma1', type=float, default=4.0, help='gamma 1 for damsm')
parser.add_argument('--gamma2', type=float, default=5.0, help='gamma 2 for damsm')
parser.add_argument('--gamma3', type=float, default=10.0, help='gamma 3 for damsm')
parser.add_argument('--g_lambda', type=float, default=5.0, help='gamma 3 for damsm')
return parser
def __init__(self, opt):
"""Initialize the pix2pix class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
super(AttnGANtrainer, self).__init__(opt)
# specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
self.loss_names = ['G', 'D']
# specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
self.visual_names = ['fake_imgs', 'real_imgs']
# specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>
self.model_names = ['G', 'D']
# define networks (both generator and discriminator)
self.netG = networks.define_G(opt=opt, gpu_ids=self.gpu_ids)
self.netD = networks.define_D(opt=opt, gpu_ids=self.gpu_ids)
self.rnn_encoder, self.cnn_encoder = networks.define_DAMSM(opt=opt, gpu_ids=self.gpu_ids)
self.generator_loss = AttnGeneratorLoss(opt)
self.discriminator_loss = AttnDiscriminatorLoss()
self.KL_loss = KLLoss()
# initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.g_lr, betas=(opt.beta_1, 0.999))
self.optimizers.append(self.optimizer_G)
self.optimizer_D = []
for i in range(len(self.netD)):
self.optimizer_D.append(torch.optim.Adam(self.netD[i].parameters(), lr=opt.g_lr, betas=(opt.beta_1, 0.999)))
self.optimizers.append(self.optimizer_D[i])
# setup noise
self.noise = Variable(torch.FloatTensor(self.batch_size, 100), volatile=True)
self.noise.to(self.device)
# setup labels
self.real_labels, self.fake_labels, self.match_labels = self.prepare_labels()
def set_input(self, data):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): include the data itself and its metadata information.
"""
self.real_imgs = []
self.real_imgs.append(data["right_images_64"].to(self.device))
self.real_imgs.append(data["right_images_128"].to(self.device))
self.real_imgs.append(data["right_images_256"].to(self.device))
self.right_captions = data["right_captions"].to(self.device)
self.right_caption_lengths = data["right_caption_lengths"].to(self.device)
self.class_ids = np.array(data['class_id'])
self.labels = torch.LongTensor(range(self.batch_size)).to(self.device)
# # other image
# self.wrong_images = []
# self.wrong_images.append(data["wrong_images_64"].to(self.device))
# self.wrong_images.append(data["wrong_images_128"].to(self.device))
# self.wrong_images.append(data["wrong_images_256"].to(self.device))
# self.wrong_captions = data["wrong_captions"].to(self.device)
# self.wrong_caption_lengths = data["wrong_caption_lengths"].to(self.device)
def prepare_labels(self):
real_labels = Variable(torch.FloatTensor(self.batch_size).fill_(1))
fake_labels = Variable(torch.FloatTensor(self.batch_size).fill_(0))
match_labels = Variable(torch.LongTensor(range(self.batch_size)))
if torch.cuda.is_available():
real_labels = real_labels.cuda()
fake_labels = fake_labels.cuda()
match_labels = match_labels.cuda()
return real_labels, fake_labels, match_labels
def forward(self):
# words_embs: batch_size x nef x seq_len
# sent_emb: batch_size x nef
self.words_embs, self.sent_emb = self.rnn_encoder(self.right_captions, self.right_caption_lengths)
self.words_embs, self.sent_emb = self.words_embs.detach(), self.sent_emb.detach()
mask = (self.right_captions == 0)
num_words = self.words_embs.size(2)
if mask.size(1) > num_words:
mask = mask[:, :num_words]
#######################################################
# (2) Generate fake images
######################################################
self.noise.data.normal_(0, 1)
self.fake_imgs, _, self.mu, self.logvar = self.netG(self.noise, self.sent_emb, self.words_embs, mask)
def backward_D(self):
"""Calculate loss for the discriminator"""
#######################################################
# (3) calculate D network loss
######################################################
self.loss_D = 0
for i in range(len(self.netD)):
self.netD[i].zero_grad()
loss = self.discriminator_loss(self.netD[i], self.real_imgs[i], self.fake_imgs[i],
self.sent_emb, self.real_labels, self.fake_labels)
# backward and update parameters
loss.backward()
# optimizersD[i].step()
self.loss_D += loss
def backward_G(self):
#######################################################
# (4) Update G network: maximize log(D(G(z)))
######################################################
# compute total loss for training G
# do not need to compute gradient for Ds
# self.set_requires_grad_value(netsD, False)
self.netG.zero_grad()
self.loss_G = self.generator_loss(self.netD, self.cnn_encoder, self.fake_imgs, self.real_labels,
self.words_embs, self.sent_emb, self.match_labels,
self.right_caption_lengths, self.class_ids, self.opt)
kl_loss = self.KL_loss(self.mu, self.logvar)
self.loss_G += kl_loss
# backward and update parameters
self.loss_G.backward()
def optimize_parameters(self):
self.forward() # compute the fake images from text embedding: G(s, w)
# update D
self.set_requires_grad(self.netD, True)
# set D's gradients to zero
for i in range(len(self.netD)):
self.optimizer_D[i].zero_grad()
self.backward_D() # calculate gradients for D
# update D's weights
for i in range(len(self.netD)):
self.optimizer_D[i].step()
# update G
self.set_requires_grad(self.netD, False)
self.optimizer_G.zero_grad() # set G's gradients to zero
self.backward_G() # calculate graidents for G
self.optimizer_G.step() # udpate G's weights
def get_current_visuals(self, vocab):
"""Return visualization images. train.py will display these images with visdom, and save the images to a HTML"""
visual_ret = OrderedDict()
wordidarray = self.right_captions.detach().cpu().numpy()
for j, name in enumerate(self.visual_names):
if isinstance(name, str):
results = getattr(self, name)
if type(results) is list:
for i, size in enumerate(['64', '128', '256']):
title = name + '-' + size
if i == 0 and j == 0 :
title = convert_back_to_text(wordidarray[0], vocab)
visual_ret[title] = results[i]
else:
visual_ret[name] = results
return visual_ret
|
python
|
import os
from setuptools import setup
from pip.req import parse_requirements
# parse requirements
reqs = [str(r.req) for r in parse_requirements("requirements.txt", session=False)]
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
version = read('tensorbuilder/version.txt').split("\n")[0]
setup(
name = "tensorbuilder",
version = version,
author = "Cristian Garcia",
author_email = "[email protected]",
description = ("A light wrapper over TensorFlow that enables you to easily create complex deep neural networks using the Builder Pattern through a functional fluent immutable API"),
license = "MIT",
keywords = ["tensorflow", "deep learning", "neural networks"],
url = "https://github.com/cgarciae/tensorbuilder",
packages = [
'tensorbuilder',
'tensorbuilder.tensordata',
'tensorbuilder.patches',
'tensorbuilder.tests'
],
package_data={
'': ['LICENCE', 'requirements.txt', 'README.md', 'CHANGELOG.md'],
'tensorbuilder': ['version.txt', 'README-template.md']
},
download_url = 'https://github.com/cgarciae/tensorbuilder/tarball/{0}'.format(version),
include_package_data = True,
long_description = read('README.md'),
install_requires = reqs
)
|
python
|
from packetbeat import BaseTest
"""
Tests for trimming long results in pgsql.
"""
class Test(BaseTest):
def test_default_settings(self):
"""
Should store the entire rows but only
10 rows with default settings.
"""
self.render_config_template(
pgsql_ports=[5432],
pgsql_send_response=True
)
self.run_packetbeat(pcap="pgsql_long_result.pcap")
objs = self.read_output()
assert len(objs) == 1
res = objs[0]
assert res["pgsql.num_rows"] == 15
lines = res["response"].strip().split("\n")
assert len(lines) == 11 # 10 plus header
for line in lines[4:]:
print(line, len(line))
assert len(line) == 237
def test_max_row_length(self):
"""
Should be able to cap the row length.
"""
self.render_config_template(
pgsql_ports=[5432],
pgsql_send_response=True,
pgsql_max_row_length=79
)
self.run_packetbeat(pcap="pgsql_long_result.pcap",
debug_selectors=["pgsqldetailed"])
objs = self.read_output()
assert len(objs) == 1
res = objs[0]
assert res["pgsql.num_rows"] == 15
lines = res["response"].strip().split("\n")
assert len(lines) == 11 # 10 plus header
for line in lines[4:]:
print(line, len(line))
assert len(line) == 83 # 79 plus two separators and two quotes
def test_max_rows(self):
"""
Should be able to cap the number of rows
"""
self.render_config_template(
pgsql_ports=[5432],
pgsql_send_response=True,
pgsql_max_row_length=79,
pgsql_max_rows=5
)
self.run_packetbeat(pcap="pgsql_long_result.pcap",
debug_selectors=["pgsqldetailed"])
objs = self.read_output()
assert len(objs) == 1
res = objs[0]
assert res["pgsql.num_rows"] == 15
lines = res["response"].strip().split("\n")
assert len(lines) == 6 # 5 plus header
def test_larger_max_rows(self):
"""
Should be able to cap the number of rows
"""
self.render_config_template(
pgsql_ports=[5432],
pgsql_send_response=True,
pgsql_max_rows=2000
)
self.run_packetbeat(pcap="pgsql_long_result.pcap",
debug_selectors=["pgsqldetailed"])
objs = self.read_output()
assert len(objs) == 1
res = objs[0]
assert res["pgsql.num_rows"] == 15
lines = res["response"].strip().split("\n")
assert len(lines) == 16 # 15 plus header
|
python
|
# Generated by Django 2.0.3 on 2018-03-16 19:42
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('nps', '0021_auto_20180316_1239'),
]
operations = [
migrations.RenameField(
model_name='rawresults',
old_name='role_type',
new_name='user_type',
),
]
|
python
|
# Import Abaqus and External Modules
from abaqusConstants import *
from abaqus import *
import random
import regionToolset
import mesh
import step
import part
randomSeed=[41557]
for eachModel in range(0,1):
#
# Create Model Database
VerFile=Mdb(pathName="MStructure")
VerModel=VerFile.models['Model-1']
VerAssembly=VerModel.rootAssembly
#
# Underlying Geometry
xSize=0.1
ySize=0.05
#
# Microstructure Geometry
charLength=0.00595 #Grain Side Length
numX=15
numY=5
#
# Other Parametersvgrain vumat
meshSize=0.001
analysis='Tension' # Options: Tension, Bending
#
# Draw Base Part
BasePart=VerModel.Part(name='Base', dimensionality=THREE_D,type=DEFORMABLE_BODY)
BaseSketch = VerModel.ConstrainedSketch(name='Base',sheetSize=200.0)
#
BaseSketch.Line(point1=(0.,0.),point2=(xSize,0.))
BaseSketch.Line(point1=(xSize,0.),point2=(xSize,ySize))
BaseSketch.Line(point1=(xSize,ySize),point2=(0.,ySize))
BaseSketch.Line(point1=(0.,ySize),point2=(0.,0.))
BasePart.BaseSolidExtrude(sketch=BaseSketch, depth=0.006)
BasePart=VerModel.parts['Base']
#
# Draw Microstructure and Partition Base Part
ParSketch=VerModel.ConstrainedSketch(name='Par',sheetSize=200)
yLength=sin(radians(30.))*charLength
xLength=cos(radians(30.))*charLength
offsetX=0.
for i in range(0,numX):
offsetY=0.
for j in range(0,numY):
if j%2==0:
xPos=offsetX
else:
xPos=offsetX+xLength
ParSketch.Line(point1=(xLength+xPos,-yLength+offsetY),point2=(xLength+xPos,yLength+offsetY))
ParSketch.Line(point1=(xLength+xPos,+yLength+offsetY),point2=(xPos,2.*yLength+offsetY))
ParSketch.Line(point1=(xLength+xPos,-yLength+offsetY),point2=(xPos,-2.*yLength+offsetY))
offsetY=offsetY+3.*yLength
offsetX=offsetX+2.*xLength
for eachFace in BasePart.faces:
if eachFace.getNormal()==(0.0,0.0,1.0):
targetFace=eachFace
print targetFace
BasePart.PartitionFaceBySketch(faces=targetFace, sketch=ParSketch)
#
# Generate Sections and Section Assignments
labelcount=1
regions=BasePart.faces
for eachregion in regions:
mlabel='Mat'+str(labelcount)
VerModel.PEGSection(name=mlabel, material=mlabel, thickness=0.01,
wedgeAngle1=0.0, wedgeAngle2=0.0)
BasePart.SectionAssignment(region=(eachregion,),
sectionName=mlabel, offset=0.0, offsetField='')
labelcount=labelcount+1
#
# Mesh Part
BasePart.ReferencePoint(point=(0.0, 0.0, 0.0))
offsetX=0.
offsetY=0.
ParSketch2=VerModel.ConstrainedSketch(name='Hex',sheetSize=200, transform=partTransform)
for i in range(0,2*numX):
ParSketch2.Line(point1=(offsetX,0.),point2=(offsetX,2.*charLength*numY))
offsetX=offsetX+xLength
for i in range(0,numY):
ParSketch2.Line(point1=(0.,offsetY),point2=(2.*charLength*numX,offsetY))
offsetY=offsetY+3.*yLength
BasePart.PartitionFaceBySketch(faces=BasePart.faces, sketch=ParSketch2)
BasePart.setMeshControls(regions=BasePart.faces, elemShape=QUAD, technique=SWEEP)
BasePart.seedPart(size=meshSize)
pickedRegions =(BasePart.faces, )
elemType1 = mesh.ElemType(elemCode=CPEG8R, elemLibrary=STANDARD)
BasePart.setElementType(regions=pickedRegions, elemTypes=(elemType1,))
BasePart.generateMesh()
#
#Steps
VerModel.StaticStep(name='Step-1', previous='Initial',
maxNumInc=100000, initialInc=0.03, minInc=1e-07, maxInc=0.15, nlgeom=ON, timePeriod=20.)
VerModel.fieldOutputRequests['F-Output-1'].setValues(variables=(
'LE', 'RF', 'S', 'U'), timeInterval=0.2, timeMarks=OFF)
#
#Boundary Conditions
VerAssembly.Instance(name='Strut',part=BasePart, dependent=ON)
iNodes=VerAssembly.instances['Strut'].nodes
toler=0.01*meshSize
Left=iNodes.getByBoundingBox(xMin=-toler,xMax=toler,yMin=-toler,yMax=ySize+toler)
BLeft=iNodes.getByBoundingBox(xMin=-toler,xMax=toler,yMin=-toler,yMax=toler)
Right=iNodes.getByBoundingBox(xMin=xSize-toler,xMax=xSize+toler,yMin=toler,yMax=ySize+toler)
BRight=iNodes.getByBoundingBox(xMin=xSize-toler,xMax=xSize+toler,yMin=-toler,yMax=toler)
#
Lregion=regionToolset.Region(nodes=Left)
BLregion=regionToolset.Region(nodes=BLeft)
Rregion=regionToolset.Region(nodes=Right)
BRregion=regionToolset.Region(nodes=BRight)
#
VerModel.SmoothStepAmplitude(name='Amp-1', timeSpan=TOTAL, data=(( 0.0, 0.0), (24.00, 1.0)))
VerModel.DisplacementBC(name='LeftX', createStepName='Initial',
region=Lregion, u1=0.0, u2=UNSET, u3=UNSET, ur1=UNSET, ur2=UNSET,
ur3=UNSET, amplitude=UNSET, fixed=OFF, distributionType=UNIFORM)
VerModel.DisplacementBC(name='BottomY1', createStepName='Initial',
region=BLregion, u1=UNSET, u2=0.0, u3=UNSET, ur1=UNSET, ur2=UNSET,
ur3=UNSET, amplitude=UNSET, fixed=OFF, distributionType=UNIFORM)
if analysis=='Tension':
VerModel.DisplacementBC(name='Tension', createStepName='Step-1',
region=BRregion, u1=0.5*xSize, u2=UNSET, u3=UNSET, ur1=UNSET, ur2=UNSET,
ur3=UNSET, amplitude=UNSET, fixed=OFF, distributionType=UNIFORM)
VerModel.DisplacementBC(name='BottomY2', createStepName='Initial',
region=BRregion, u1=UNSET, u2=0.0, u3=UNSET, ur1=UNSET, ur2=UNSET,
ur3=UNSET, amplitude=UNSET, fixed=OFF, distributionType=UNIFORM)
VerModel.boundaryConditions['Tension'].setValues(amplitude='Amp-1')
else:
VerModel.DisplacementBC(name='Bending', createStepName='Step-1',
region=BRregion, u1=UNSET, u2=UNSET, u3=UNSET, ur1=UNSET, ur2=UNSET,
ur3=-6., amplitude=UNSET, fixed=OFF, distributionType=UNIFORM)
VerModel.boundaryConditions['Bending'].setValues(amplitude='Amp-1')
#
VerAssembly.Set(nodes=Right, name='Right')
VerAssembly.Set(nodes=BRight, name='BRight')
if analysis=='Tension':
VerModel.Equation(name='Constraint-1', terms=((1.0, 'Right', 1), ( -1.0, 'BRight', 1)))
else:
region1=VerAssembly.sets['BRight']
region2=VerAssembly.sets['Right']
VerModel.MultipointConstraint(name='Constraint-2',
controlPoint=region1, surface=region2, mpcType=BEAM_MPC,
userMode=DOF_MODE_MPC, userType=0, csys=None)
#
#Create Job and write input file
if grainType=='Square':
letter1='S'
elif grainType=='Hexagon':
letter1='H'
elif grainType=='Voronoi':
letter1='V'
if analysis=='Tension':
letter2='T'
else:
letter2='B'
label='W'+str(numY)+'L'+str(numX)+letter1+letter2+str(eachModel)
VerFile.Job(name=label, model='Model-1', type=ANALYSIS,userSubroutine='ucrystal.for')
VerFile.jobs[label].writeInput(consistencyChecking=OFF)
# VerFile.close()
|
python
|
'''
@author: Jakob Prange (jakpra)
@copyright: Copyright 2020, Jakob Prange
@license: Apache 2.0
'''
import sys
import json
import argparse as ap
from pathlib import Path
from .mode import Mode
argparser = ap.ArgumentParser()
mode = argparser.add_subparsers(help='mode', dest='mode')
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise ap.ArgumentTypeError('Boolean value expected.')
def load_json(v):
try:
with open(v) as f:
return json.load(f)
except Exception as e:
raise ap.ArgumentTypeError(e)
def parse_args():
global argparser, mode
argparser.add_argument('--format', type=str, default='auto')
argparser.add_argument('-o', '--out', type=str, default=None)
argparser.add_argument('--labels', type=str, default=None)
argparser.add_argument('--freq-threshold', type=int, default=10)
argparser.add_argument('-m', '--model', type=str, default='ccg-model')
argparser.add_argument('--derivation', action='store_true', help='print derivations as they are read')
argparser.add_argument('-i', '--interactive', action='store_true')
argparser.add_argument('-O', '--oracle-scoring', action='store_true')
argparser.add_argument('--oracle-structure', action='store_true')
argparser.add_argument('--oracle-supertags', action='store_true')
argparser.add_argument('-a', '--max-category-depth', type=int, default=6, help='maximum depth of categories')
argparser.add_argument('-k', '--global-beam', type=int, default=None, help='log-2 beam size')
argparser.add_argument('-K', '--local-beam', type=int, default=None, help='log-2 beam size')
argparser.add_argument('--lbda', type=float, default=0.1, help='minimum cost / optimal heuristic factor lambda')
argparser.add_argument('--cheap', type=float, default=1, help='cost multiplier')
argparser.add_argument('--penalty', type=float, default=100, help='cost multiplier')
argparser.add_argument('--high-penalty', type=float, default=1000, help='cost multiplier')
test_files = argparser.add_argument_group('Testing files')
test_files.add_argument('-T', '--testing-files', type=str, nargs='+', default=['sample_data/test.auto'])
test_files.add_argument('--testing-format', type=str, default='auto')
train_files = argparser.add_argument_group('Training files')
train_files.add_argument('-t', '--training-files', type=str, nargs='+', default=['sample_data/train.auto'])
train_files.add_argument('--training-format', type=str, default='auto')
train_files.add_argument('--training-ids', type=load_json, default=None, help='json file containing list of sentence ids')
train_files.add_argument('-D', '--development-files', type=str, nargs='+', default=['sample_data/train.auto'])
train_files.add_argument('--development-format', type=str, default='auto')
# learning architecture
arch = argparser.add_argument_group('Learning Architecture')
arch.add_argument('--span-encoder', type=str, choices=['rnn', 'transformer', 'bert', 'roberta'], default='roberta')
arch.add_argument('--word-vectors', type=str, default='word_vectors/glove.6B/6B.50')
arch.add_argument('--pretrained-bert', type=str, default='roberta-base', help='model identifier')
arch.add_argument('--attention-heads', type=int, default=1)
arch.add_argument('--transformer-layers', type=int, default=2)
arch.add_argument('-d', '--embedding-dim', type=int, default=50)
arch.add_argument('--feat-embedding-dim', type=int, default=12)
arch.add_argument('--feat-chars', type=int, default=4)
arch.add_argument('--feat-freq-cutoff', type=int, default=3)
arch.add_argument('--embedding-dropout', type=float, default=0.2)
arch.add_argument('--span-hidden-dims', type=int, nargs='+', default=[768, 768])
arch.add_argument('--bidirectional', type=str2bool, nargs='?', const=True, default=True)
arch.add_argument('--span-dropout', type=float, nargs='*', default=[0.2, 0.1])
arch.add_argument('--hidden-dims', type=int, nargs='*', default=[])
arch.add_argument('--dropout', type=float, nargs='*', default=[])
arch.add_argument('--tasks', type=str, nargs='*', default=['tasks/addrmlp_att_rebank'])
arch.add_argument('--tree-hidden-dim', type=int, default=64)
arch.add_argument('--enc-attention', action='store_true')
arch.add_argument('--dec-attention', action='store_true')
arch.add_argument('-b', '--batch-size', type=int, default=1)
arch.add_argument('--seed', type=int, default=42)
# CUDA
cuda = argparser.add_argument_group('CUDA')
cuda.add_argument('--cuda', action='store_true')
cuda.add_argument('--cuda-devices', type=int, nargs='*', default=[])
argparser.add_argument('-n', '--n-print', type=int, default=100)
train = mode.add_parser(Mode.train)
# hyperparams
hyp = train.add_argument_group('Hyperparameters')
hyp.add_argument('-e', '--epochs', type=int, default=10)
hyp.add_argument('--max-batches', type=int, default=None)
hyp.add_argument('--loss-fxn', type=str, choices=['crossent', 'avg', 'all'],
default='crossent')
hyp.add_argument('--teacher-forcing', type=str, choices=['global', 'dynamic_best', 'dynamic_random'], # add local?
default='global')
hyp.add_argument('--omega-native-atom', type=float, default=0.0)
hyp.add_argument('--omega-atom', type=float, default=0.0)
hyp.add_argument('--omega-full', type=float, default=0.0)
hyp.add_argument('--lambda-enc', type=float, default=0.0)
hyp.add_argument('--lambda-dec', type=float, default=0.0)
hyp.add_argument('--optimizer', type=str, default='adamw')
hyp.add_argument('--learning-rate', type=float, default=1e-4)
hyp.add_argument('--bert-learning-rate', type=float, default=1e-5)
hyp.add_argument('--momentum', type=float, default=0.7)
hyp.add_argument('--epsilon', type=float, default=1e-6)
hyp.add_argument('--decay', type=float, default=0.01)
hyp.add_argument('--use-schedule', action='store_true', default=False)
hyp.add_argument('--pct-start', type=float, default=0.3)
hyp.add_argument('--anneal-strategy', type=str, default='cos')
hyp.add_argument('--finetune', type=str2bool, nargs='?', const=True, default=True)
return argparser.parse_args()
def get_filepaths_from_path(paths, filename, suffix):
filepaths = []
try:
for path in paths:
p = Path(path)
for filepath in p.glob(f'**/{filename if filename else f"*.{suffix}"}'):
filepaths.append(filepath)
except AttributeError:
pass
return filepaths
def get_filepaths_from_glob(globs):
filepaths = []
try:
p = Path()
for glob in globs:
for filepath in p.glob(glob):
filepaths.append(filepath)
except AttributeError:
pass
return sorted(filepaths)
def get_filepaths_from_args(args):
model = Path(f'{args.model}.pt')
args.model_exists = model.is_file()
print(args, file=sys.stderr)
args.testing_files = get_filepaths_from_glob(args.testing_files)
args.training_files = get_filepaths_from_glob(args.training_files)
args.development_files = get_filepaths_from_glob(args.development_files)
def main():
args = parse_args()
get_filepaths_from_args(args)
import torch.cuda as cuda
if args.cuda and cuda.is_available():
args.device = 'cuda'
else:
args.device = 'cpu'
return args
if __name__ == '__main__':
main()
|
python
|
# SPDX-FileCopyrightText: 2020 Melissa LeBlanc-Williams, written for Adafruit Industries
#
# SPDX-License-Identifier: Unlicense
"""
`adafruit_matrixportal.network`
================================================================================
Helper library for the MatrixPortal M4 or Adafruit RGB Matrix Shield + Metro M4 Airlift Lite.
* Author(s): Melissa LeBlanc-Williams
Implementation Notes
--------------------
**Hardware:**
* `Adafruit MatrixPortal M4 <https://www.adafruit.com/product/4745>`_
* `Adafruit Metro M4 Express AirLift <https://www.adafruit.com/product/4000>`_
* `Adafruit RGB Matrix Shield <https://www.adafruit.com/product/2601>`_
* `64x32 RGB LED Matrix <https://www.adafruit.com/product/2278>`_
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
import gc
import neopixel
from adafruit_portalbase.network import NetworkBase
from adafruit_portalbase.wifi_coprocessor import WiFi
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_MatrixPortal.git"
class Network(NetworkBase):
"""Class representing the Adafruit RGB Matrix Portal.
:param status_neopixel: The pin for the status NeoPixel. Use ``board.NEOPIXEL`` for the on-board
NeoPixel. Defaults to ``None``, not the status LED
:param esp: A passed ESP32 object, Can be used in cases where the ESP32 chip needs to be used
before calling the pyportal class. Defaults to ``None``.
:param busio.SPI external_spi: A previously declared spi object. Defaults to ``None``.
:param bool extract_values: If true, single-length fetched values are automatically extracted
from lists and tuples. Defaults to ``True``.
:param debug: Turn on debug print outs. Defaults to False.
"""
def __init__(self, **kwargs):
extract_values = True
debug = False
if "extract_values" in kwargs:
extract_values = kwargs.pop("extract_values")
if "debug" in kwargs:
debug = kwargs.pop("debug")
if "status_neopixel" in kwargs:
status_neopixel = kwargs.pop("status_neopixel")
status_led = neopixel.NeoPixel(status_neopixel, 1, brightness=0.2)
else:
status_led = None
kwargs["status_led"] = status_led
wifi = WiFi(**kwargs)
super().__init__(
wifi,
extract_values=extract_values,
debug=debug,
)
gc.collect()
@property
def ip_address(self):
"""Return the IP Address nicely formatted"""
return self._wifi.esp.pretty_ip(self._wifi.esp.ip_address)
|
python
|
"""Tests for the NumericValue data class."""
from onyx_client.data.animation_keyframe import AnimationKeyframe
from onyx_client.data.animation_value import AnimationValue
from onyx_client.data.numeric_value import NumericValue
class TestNumericValue:
def test_create(self):
expected = NumericValue(
10,
10,
100,
True,
AnimationValue(10, 10, [AnimationKeyframe("linear", 10, 10, 10)]),
)
assert (
NumericValue.create(
{
"value": 10,
"minimum": 10,
"maximum": 100,
"read_only": True,
"animation": {
"start": 10,
"current_value": 10,
"keyframes": [
{
"interpolation": "linear",
"delay": 10,
"duration": 10,
"value": 10,
}
],
},
}
)
== expected
)
def test_create_value_only(self):
expected = NumericValue(10, 0, 100, False)
assert (
NumericValue.create(
{
"value": 10,
}
)
== expected
)
def test_create_no_value(self):
expected = NumericValue(None, 0, 100, False)
assert NumericValue.create({}) == expected
def test_create_none(self):
assert NumericValue.create(None) is None
def test_update_with(self):
value = NumericValue(
1,
1,
10,
False,
AnimationValue(1, 1, [AnimationKeyframe("linear", 1, 1, 1)]),
)
expected = NumericValue(
10,
10,
100,
True,
AnimationValue(10, 10, [AnimationKeyframe("linear", 10, 10, 10)]),
)
value.update_with(expected)
assert value == expected
def test_update_with_only_existing(self):
value = NumericValue(
10,
10,
100,
True,
)
expected = NumericValue(
10,
10,
100,
True,
AnimationValue(10, 10, [AnimationKeyframe("linear", 10, 10, 10)]),
)
value.update_with(
NumericValue(
None,
None,
None,
None,
AnimationValue(10, 10, [AnimationKeyframe("linear", 10, 10, 10)]),
)
)
assert value == expected
def test_not_eq(self):
assert NumericValue(10, 10, 100, True) != 10
|
python
|
from .config_diff import Config
import yaml
def save(filename: str, config: Config):
"""Save configuraion to file"""
with open(filename, 'w') as fh:
yaml.dump(config, fh, default_flow_style=False)
|
python
|
import os
SPREEDLY_AUTH_TOKEN = os.environ.get('SPREEDLY_AUTH_TOKEN','asdfasdf')
SPREEDLY_SITE_NAME = 'jamesr-c-test'
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Advent of Code 2020
Day 25, Part 1
"""
def main():
with open('in.txt') as f:
card_key, door_key = map(int, f.readlines())
subject_number = 7
value = 1
card_loop = 0
while True:
card_loop += 1
value *= subject_number
value %= 20201227
if value == card_key:
print(card_loop)
break
value = 1
door_loop = 0
while True:
door_loop += 1
value *= subject_number
value %= 20201227
if value == door_key:
print(door_loop)
break
encryption_key = 1
subject_number = card_key
for _ in range(door_loop):
encryption_key *= subject_number
encryption_key %= 20201227
print(encryption_key)
if __name__ == '__main__':
main()
|
python
|
import threading
from time import sleep
lock = threading.Lock()
# funcao que espera 1 segundo
def wait():
global lock
while True:
sleep(1)
lock.release()
def LerVelocidade():
global lock
while True:
lock.acquire()
print('Leitura da Velocidade')
print('cheguei')
# ----------------criando a thread
lock.acquire()
t = threading.Thread(target=wait, name='Wait')
t1 = threading.Thread(target=LerVelocidade, name='Velocidade')
t.start()
t1.start()
|
python
|
print('gunicorn hook')
hiddenimports = ['gunicorn.glogging', 'gunicorn.workers.sync']
|
python
|
from unittest import mock
from bx_py_utils.test_utils.datetime import parse_dt
from django.core.exceptions import ValidationError
from django.core.validators import validate_slug
from django.db.utils import IntegrityError
from django.test import TestCase
from django.utils import timezone
from bx_django_utils.models.manipulate import (
STORE_BEHAVIOR_IGNORE,
STORE_BEHAVIOR_SET_IF_EMPTY,
STORE_BEHAVIOR_SKIP_EMPTY,
InvalidStoreBehavior,
create,
create_or_update,
create_or_update2,
)
from bx_django_utils.test_utils.datetime import MockDatetimeGenerator
from bx_django_utils.test_utils.model_clean_assert import AssertModelCleanCalled
from bx_django_utils_tests.test_app.models import CreateOrUpdateTestModel, StoreSaveModel, TimetrackingTestModel
class ModelManipulateTestCase(TestCase):
def test_deprecated_create_or_update(self):
with self.assertWarns(DeprecationWarning):
instance, created, updated_fields = create_or_update(
ModelClass=CreateOrUpdateTestModel, name='foo', slug='bar'
)
assert isinstance(instance, CreateOrUpdateTestModel)
assert instance.name == 'foo'
assert created is True
assert updated_fields is None # None and not []
@mock.patch.object(timezone, 'now', MockDatetimeGenerator())
def test_create_or_update2(self):
# create a new entry:
with AssertModelCleanCalled() as cm:
result = create_or_update2(
ModelClass=CreateOrUpdateTestModel,
lookup={'id': 1},
name='First entry',
slug='first'
)
instance = result.instance
assert isinstance(instance, CreateOrUpdateTestModel)
assert instance.id == 1
assert instance.name == 'First entry'
assert instance.slug == 'first'
assert instance.create_dt == parse_dt('2001-01-01T00:00:00+0000')
assert instance.update_dt == parse_dt('2001-01-01T00:00:00+0000')
assert result.created is True
assert result.updated_fields == []
assert result.ignored_fields == []
assert result.not_overwritten_fields == []
cm.assert_no_missing_cleans()
# Change only 'slug'
with AssertModelCleanCalled() as cm:
result = create_or_update2(
ModelClass=CreateOrUpdateTestModel,
lookup={'id': 1},
name='First entry',
slug='change-value'
)
instance = result.instance
assert isinstance(instance, CreateOrUpdateTestModel)
assert instance.id == 1
assert instance.name == 'First entry'
assert instance.slug == 'change-value'
assert instance.create_dt == parse_dt('2001-01-01T00:00:00+0000') # not changed!
assert instance.update_dt == parse_dt('2002-01-01T00:00:00+0000')
assert result.created is False
assert result.updated_fields == ['slug']
assert result.ignored_fields == []
assert result.not_overwritten_fields == []
cm.assert_no_missing_cleans()
# Change 'name' and 'slug':
with AssertModelCleanCalled() as cm:
result = create_or_update2(
ModelClass=CreateOrUpdateTestModel,
lookup={'id': 1},
name='New name !',
slug='new-slug'
)
instance = result.instance
assert isinstance(instance, CreateOrUpdateTestModel)
assert instance.id == 1
assert instance.name == 'New name !'
assert instance.slug == 'new-slug'
assert instance.create_dt == parse_dt('2001-01-01T00:00:00+0000') # not changed!
assert instance.update_dt == parse_dt('2003-01-01T00:00:00+0000')
assert result.created is False
assert result.updated_fields == ['name', 'slug']
assert result.ignored_fields == []
assert result.not_overwritten_fields == []
cm.assert_no_missing_cleans()
# Nothing changed:
result = create_or_update2(
ModelClass=CreateOrUpdateTestModel,
lookup={'id': 1},
name='New name !',
slug='new-slug'
)
instance = result.instance
assert isinstance(instance, CreateOrUpdateTestModel)
assert instance.id == 1
assert instance.name == 'New name !'
assert instance.slug == 'new-slug'
assert instance.create_dt == parse_dt('2001-01-01T00:00:00+0000')
assert instance.update_dt == parse_dt('2003-01-01T00:00:00+0000') # not changed!
assert result.created is False
assert result.updated_fields == []
assert result.ignored_fields == []
assert result.not_overwritten_fields == []
def test_non_valid(self):
msg = str(validate_slug.message)
with self.assertRaisesMessage(ValidationError, msg):
create_or_update2(
ModelClass=CreateOrUpdateTestModel,
lookup={'id': 1},
name='foo',
slug='this is no Slug !'
)
# Update existing entry with non-valid values should also not work:
CreateOrUpdateTestModel(id=1, name='foo', slug='bar')
with self.assertRaisesMessage(ValidationError, msg):
create_or_update2(
ModelClass=CreateOrUpdateTestModel,
lookup={'id': 1},
name='foo',
slug='this is no Slug !'
)
def test_disable_full_clean(self):
# Create a new entry without "full_clean()" call:
with AssertModelCleanCalled() as cm:
result = create_or_update2(
ModelClass=CreateOrUpdateTestModel,
lookup={'id': 1},
call_full_clean=False,
slug='This is not a valid slug!'
)
instance = result.instance
assert isinstance(instance, CreateOrUpdateTestModel)
assert instance.id == 1
assert instance.slug == 'This is not a valid slug!'
assert result.created is True
assert result.updated_fields == []
assert cm.called_cleans == []
assert len(cm.missing_cleans) == 1
# Change existing without "full_clean()" call:
with AssertModelCleanCalled() as cm:
result = create_or_update2(
ModelClass=CreateOrUpdateTestModel,
lookup={'id': 1},
call_full_clean=False,
slug='Also no valid slug!'
)
instance = result.instance
assert isinstance(instance, CreateOrUpdateTestModel)
assert instance.id == 1
assert instance.slug == 'Also no valid slug!'
assert result.created is False
assert result.updated_fields == ['slug']
assert cm.called_cleans == []
assert len(cm.missing_cleans) == 1
@mock.patch.object(timezone, 'now', MockDatetimeGenerator())
def test_create_or_update_without_lookup(self):
# create a new entry:
with AssertModelCleanCalled() as cm:
result = create_or_update2(
ModelClass=CreateOrUpdateTestModel,
lookup=None,
name='First entry',
slug='first'
)
instance = result.instance
assert isinstance(instance, CreateOrUpdateTestModel)
assert instance.pk is not None
assert instance.name == 'First entry'
assert instance.slug == 'first'
assert instance.create_dt == parse_dt('2001-01-01T00:00:00+0000')
assert instance.update_dt == parse_dt('2001-01-01T00:00:00+0000')
assert result.created is True
assert result.updated_fields == []
cm.assert_no_missing_cleans()
@mock.patch.object(timezone, 'now', MockDatetimeGenerator())
def test_create(self):
# create a new entry:
with AssertModelCleanCalled() as cm:
instance = create(
ModelClass=CreateOrUpdateTestModel,
name='First entry',
slug='first'
)
assert isinstance(instance, CreateOrUpdateTestModel)
assert instance.pk is not None
assert instance.name == 'First entry'
assert instance.slug == 'first'
assert instance.create_dt == parse_dt('2001-01-01T00:00:00+0000')
assert instance.update_dt == parse_dt('2001-01-01T00:00:00+0000')
cm.assert_no_missing_cleans()
# Cannot create an already existing model
with self.assertRaises(IntegrityError):
create(
ModelClass=CreateOrUpdateTestModel,
id=instance.id,
name='second create',
slug='second'
)
@mock.patch.object(timezone, 'now', MockDatetimeGenerator())
def test_store_behavior(self):
test_relation1 = TimetrackingTestModel(
create_dt=parse_dt('2002-02-02T00:00:00+0000'),
update_dt=parse_dt('2003-03-03T00:00:00+0000')
)
test_relation1.save(update_dt=False)
test_relation2 = TimetrackingTestModel(
create_dt=parse_dt('2004-04-04T00:00:00+0000'),
update_dt=parse_dt('2005-05-05T00:00:00+0000')
)
test_relation2.save(update_dt=False)
# Create object and respect "store_behavior"
result = create_or_update2(
ModelClass=CreateOrUpdateTestModel,
lookup=None, # force create object!
store_behavior={
# 'name' is missing here -> normal behavior: overwrite existing values
'slug': STORE_BEHAVIOR_SET_IF_EMPTY,
'many2one_rel': STORE_BEHAVIOR_SET_IF_EMPTY,
'blank_field': STORE_BEHAVIOR_IGNORE,
'null_field': STORE_BEHAVIOR_IGNORE,
},
name='name1',
slug='slug1',
many2one_rel=test_relation1,
blank_field='ignored',
null_field='ignored',
)
assert result.created is True
assert result.updated_fields == [] # Object created!
assert sorted(result.ignored_fields) == ['blank_field', 'null_field']
assert result.not_overwritten_fields == []
assert result.skip_empty_values == []
instance = result.instance
assert instance.name == 'name1'
assert instance.slug == 'slug1'
assert instance.many2one_rel.create_dt == parse_dt('2002-02-02T00:00:00+0000')
assert instance.many2one_rel.update_dt == parse_dt('2003-03-03T00:00:00+0000')
assert instance.blank_field == ''
assert instance.null_field is None
assert instance.create_dt == parse_dt('2001-01-01T00:00:00+0000')
assert instance.update_dt == parse_dt('2001-01-01T00:00:00+0000')
# Update existing instance
result = create_or_update2(
ModelClass=CreateOrUpdateTestModel,
lookup={'pk': instance.pk},
store_behavior={
# 'name' is missing here -> normal behavior: overwrite existing values
'slug': STORE_BEHAVIOR_SET_IF_EMPTY,
'many2one_rel': STORE_BEHAVIOR_SKIP_EMPTY, # given relation is not empty
'blank_field': STORE_BEHAVIOR_SET_IF_EMPTY,
'null_field': STORE_BEHAVIOR_SET_IF_EMPTY,
},
name='name2',
slug='not-overwritten',
many2one_rel=test_relation2,
blank_field='set blank field 1',
null_field='set null field 1',
)
instance = result.instance
assert result.created is False
assert instance.name == 'name2'
assert instance.slug == 'slug1'
assert instance.many2one_rel.create_dt == parse_dt('2004-04-04T00:00:00+0000') # updated
assert instance.many2one_rel.update_dt == parse_dt('2005-05-05T00:00:00+0000') # updated
assert instance.blank_field == 'set blank field 1'
assert instance.null_field == 'set null field 1'
assert instance.create_dt == parse_dt('2001-01-01T00:00:00+0000')
assert instance.update_dt == parse_dt('2002-01-01T00:00:00+0000')
assert sorted(result.updated_fields) == [
'blank_field', 'many2one_rel', 'name', 'null_field'
]
assert result.ignored_fields == []
assert result.not_overwritten_fields == ['slug']
assert result.skip_empty_values == []
# Skip empty values
result = create_or_update2(
ModelClass=CreateOrUpdateTestModel,
lookup={'pk': instance.pk},
store_behavior={
'slug': STORE_BEHAVIOR_IGNORE,
'many2one_rel': STORE_BEHAVIOR_SKIP_EMPTY,
'blank_field': STORE_BEHAVIOR_SKIP_EMPTY,
'null_field': STORE_BEHAVIOR_SKIP_EMPTY,
},
name='name3',
slug='will-be-ignored',
many2one_rel=None,
blank_field='', # a empty value
null_field=None, # a empty value
)
instance = result.instance
assert result.created is False
assert instance.name == 'name3' # new name
assert instance.slug == 'slug1' # unchanged
assert instance.many2one_rel.create_dt == parse_dt('2004-04-04T00:00:00+0000') # unchanged
assert instance.many2one_rel.update_dt == parse_dt('2005-05-05T00:00:00+0000') # unchanged
assert instance.blank_field == 'set blank field 1' # unchanged
assert instance.null_field == 'set null field 1' # unchanged
assert instance.create_dt == parse_dt('2001-01-01T00:00:00+0000')
assert instance.update_dt == parse_dt('2003-01-01T00:00:00+0000')
assert result.updated_fields == ['name']
assert result.ignored_fields == ['slug']
assert result.not_overwritten_fields == []
assert sorted(result.skip_empty_values) == [
'blank_field', 'many2one_rel', 'null_field'
]
# Store empty values
result = create_or_update2(
ModelClass=CreateOrUpdateTestModel,
lookup={'pk': instance.pk},
store_behavior={
'name': STORE_BEHAVIOR_IGNORE,
'slug': STORE_BEHAVIOR_IGNORE,
},
name='Not Overwritten !',
# "slug" missing here, but can be set in "store_behavior"
many2one_rel=None, # can be set to "empty"
blank_field='', # can be set to "empty"
null_field=None, # can be set to "empty"
)
instance = result.instance
assert result.created is False
assert instance.name == 'name3' # unchanged
assert instance.slug == 'slug1' # unchanged
assert instance.many2one_rel is None
assert instance.blank_field == ''
assert instance.null_field is None
assert instance.create_dt == parse_dt('2001-01-01T00:00:00+0000')
assert instance.update_dt == parse_dt('2004-01-01T00:00:00+0000')
assert sorted(result.updated_fields) == ['blank_field', 'many2one_rel', 'null_field']
assert result.ignored_fields == ['name']
assert result.not_overwritten_fields == []
# We accept only existing field names in store_behavior:
err_msg = (
"store_behavior field name 'wrong' is not one of:"
" ['blank_field', 'create_dt', 'id', 'many2one_rel',"
" 'name', 'null_field', 'slug', 'update_dt']"
)
with self.assertRaisesMessage(InvalidStoreBehavior, err_msg):
create_or_update2(
ModelClass=CreateOrUpdateTestModel,
store_behavior={
'name': STORE_BEHAVIOR_IGNORE,
'slug': STORE_BEHAVIOR_SET_IF_EMPTY,
# We check the field names:
'wrong': STORE_BEHAVIOR_IGNORE,
},
)
assert CreateOrUpdateTestModel.objects.count() == 1
@mock.patch.object(timezone, 'now', MockDatetimeGenerator())
def test_save_kwargs(self):
obj = create_or_update2(
ModelClass=StoreSaveModel,
name='foobar',
save_kwargs={'arg': 'original'},
).instance
assert obj.name == 'foobar'
create_or_update2(
ModelClass=StoreSaveModel,
lookup={'pk': obj.pk},
name='bazqux',
save_kwargs={'other_arg': 'changed'},
)
obj.refresh_from_db()
assert obj.name == 'bazqux'
create_or_update2(
ModelClass=StoreSaveModel,
lookup={'pk': obj.pk},
name='final',
save_kwargs={},
)
obj.refresh_from_db()
assert obj.name == 'final'
assert obj._save_calls.saves == [
{'arg': 'original'},
{'other_arg': 'changed'},
{},
]
|
python
|
import bge
scn = bge.logic.getCurrentScene()
def CamAdapt(cont):
lum = scn.objects['und.lum1']
nab = scn.objects['Naball_gerUnderground']
def LoadPart1(cont):
loadObj = []
|
python
|
#!/usr/bin/env python3
'''Written by Corkine Ma
这个模块的大部分是子类化了一个QDialog,用来接受用户的输入,并且将其保存到daily.setting文件夹中
除此之外,还有一个函数,这个函数负责从daily.setting读取数据,并且使用checkandsend.py模块中的
两个函数来判断在数据库位置是否存在监视文件夹中符合正则表达式规则的文件,如果文件夹中有这样的文件
但是数据库中没有,就判定是一篇新日记,然后调用邮件发送程序发送邮件,其会返回一个bool值,大部分情况,
只要参数文件和日志文件不出问题,返回的都是true,至于发送邮件出错,依旧会返回true(因为考虑到可能存在
发送多个文件,并且有些文件可能无法打开,有些不能发送,所以统一返回true,不过对于每个文件的处理信息
都会保存在stdout中,如果你打开了log,则会保存在daily.log中),第四个参数会返回详细的处理信息,
包括成功和失败的。
'''
import sys,os,io,shelve,traceback,time
from tkinter import Tk
from tkinter.messagebox import showwarning
import PyQt5
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
import ui_setting
from checkandsend import *
# os.chdir(r"C:\Users\Administrator\Desktop\pyBook\Project_EveryDayNotice")
__VERSION__ = '0.2.6'
class Form(QDialog,ui_setting.Ui_Dialog):
def __init__(self,parent=None):
super(Form,self).__init__(parent)
self.setupUi(self)
self.pushButton.clicked.connect(self.selectDb)
self.pushButton_2.clicked.connect(self.selectCWD)
# self.pushButton_4.clicked.connect(self.selectAlert)
self.address=''
self.dbaddress =''
# self.alertaddress = ''
self.buttonBox.accepted.connect(self.saveIt)
try:
loadfile = open('daily.setting','r')
thefile = loadfile.read()
# print(thefile)
self.address=str(thefile.split(",")[0])
self.dbaddress=str(thefile.split(",")[1])
# self.alertaddress = str(thefile.split(",")[4])
self.label_3.setText(self.dbaddress)
self.label_4.setText(self.address)
self.lineEdit.setText(str(thefile.split(",")[2]))
self.lineEdit_2.setText(str(thefile.split(",")[3]))
# self.label_5.setText(self.alertaddress)
except:
QMessageBox.warning(self,"WARN",'从之前的文件中读取出错,如果你第一次使用此程序,请忽略此条消息')
def selectCWD(self):
address=QFileDialog.getExistingDirectory(self,"选择需要监视的文件夹",os.getcwd(),QFileDialog.ShowDirsOnly)
if address != None:
self.address = address
self.label_4.setText(self.address)
else:
self.label_4.setText('未选择')
def selectDb(self):
choose = QMessageBox.information(self,'选项',"你是否需要新建一个数据库文件?如果没有,请点击'OK',否则点击'Cancel'选择你的数据库问卷",QMessageBox.Ok|QMessageBox.Cancel)
if choose == QMessageBox.Ok:
address=QFileDialog.getExistingDirectory(self,"选择需要监视的文件夹",os.getcwd(),QFileDialog.ShowDirsOnly)
db=shelve.open(address+'/mydailydata')
db['1999年1月1日.docx']='Update at NOTIME'
self.dbaddress = address+'/mydailydata'
self.label_3.setText(self.dbaddress)
else:
filename,type = QFileDialog.getOpenFileName(self,"选择你的数据库文件",'',"cmData files (*.dat)")
# print(filename)
if filename != None:
if '.bak' in filename[-4:] or '.dat' in filename[-4:] or '.dir' in filename[-4:]:
filename = filename[:-4]
self.dbaddress = filename
self.label_3.setText(self.dbaddress)
# print(self.dbaddress)
else:
self.label_3.setText('未选择')
QMessageBox.warning(self,"WARN",'无效文件,请重新选取')
def contextMenuEvent(self, event):
menu1 = QMenu()
runAction = menu1.addAction("测试程序运行情况(&R)")
runAction.triggered.connect(self.runTest)
menu1.exec_(event.globalPos())
def runTest(self):
result_bool,result_1,result_2,result_txt = runCheck()
QMessageBox.information(self,'输出测试',result_txt)
# def selectAlert(self):
# filename,type = QFileDialog.getOpenFileName(self,"选择你的提醒程序",'',"cmEXE files (*.exe)")
# if filename != None:
# self.alertaddress = filename
# self.label_5.setText(self.alertaddress)
def saveIt(self):
emailaddress = str(self.lineEdit.text())
regularexp = str(self.lineEdit_2.text())
if emailaddress == '' or regularexp == '' or self.dbaddress =='' or self.address == '' :#不对提醒程序判断
QMessageBox.warning(self,"WARN",'输入数据无效,请检查后再试')
else:
try:
# print(emailaddress,regularexp,self.address,self.dbaddress,self.alertaddress)
savedata = open('daily.setting','w')
savedata.write('%s,%s,%s,%s'%(self.address,self.dbaddress,emailaddress,regularexp))
savedata.close()
QMessageBox.information(self,"Info",'设置数据保存在daily.setting文件中')
# print(os.getcwd())
except Exception as _err:
print(traceback.format_exc())
QMessageBox.warning(self,"WARN",'数据保存失败')
# print(os.getcwd())
def runCheck(settingsfile='daily.setting',log=True,logfile='daily.log',sendmail=True):
'''runCheck()用来自动调用checkandsend.py中的函数,使用给定文件载入函数所需参数并且进行查找,其会
返回一个bool值,并且还有处理结果。此方法接受一个bool值和一个输出地址来判断是否允许相关处理日志保存
在给定参数的文件中,比如true,daily.log,表示接受输出,保存在daily.log参数中。
'''
try:
if log == True:
tmp = sys.stdout
sys.stdout = open(logfile,'a')
else:
pass
print('\n\n','='*100)
print('=============================================',time.ctime(),'======================================')
print('='*100,'\n\n')
loadfile = open(settingsfile,'r')
thefile = loadfile.read()
address=str(thefile.split(",")[0])
dbaddress=str(thefile.split(",")[1])
# alertaddress = str(thefile.split(",")[4])
emailaddress=str(thefile.split(",")[2])
regular=str(thefile.split(",")[3])
result,infomation,clist,notedict= checkDaily(address=address+'/',
regular=regular,dbaddress=dbaddress)
processinfo = errinfo = result_txt = ''
if result == True:
if clist != []:
if sendmail == True:
print('需要写入的数据',clist)
result_2,result_num,result_txt,processinfo,errinfo= sendMail(clist,address=address+'/',emailaddress=emailaddress,
dbaddress=dbaddress,preparenotedict=notedict)
print(result_2,'\n',processinfo,'\n',errinfo,'\n',result_txt)
if log == True:
sys.stdout.close()
sys.stdout = tmp
else:pass
return True,processinfo,errinfo,result_txt
else:
return True,'','','成功检索并发现新数据,但你选择了不发送邮件'
else:
print("成功检索数据,但未发现新数据")
print(infomation,clist)
if log == True:
sys.stdout.close()
sys.stdout = tmp
else:pass
return True,'','','成功检索数据,但未发现新数据'
# 此处修改需要更改noticedlg相关判断语法
else:
return False,'','','未能成功调用checkDaily()函数,可能是因为参数传递错误。'
except:
return False,'','',str(traceback.format_exc())
if __name__=="__main__":
app = QApplication(sys.argv)
app.setApplicationName("Daily Notice")
app.setOrganizationName("Marvin Studio")
app.setOrganizationDomain("http://www.marvinstudio.cn")
form = Form()
form.show()
# runCheck()
# print(runCheck(sendmail=False)
app.exec_()
|
python
|
from flask import Blueprint
from flask import Blueprint
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import or_,and_
from .form import *
from utils import *
from decorators import admin_required, permission_required
# from .. import app
from flask import current_app
from wtforms import ValidationError, validators
from app import db, bcrypt, login_manager
from flask_login import (
UserMixin,
login_required,
login_user,
LoginManager,
current_user,
logout_user,
login_required,
)
from flask import (
Flask,
render_template,
request,
redirect,
flash,
url_for,
abort,
send_from_directory,
)
from werkzeug.routing import BuildError
from sqlalchemy.exc import (
IntegrityError,
DataError,
DatabaseError,
InterfaceError,
InvalidRequestError,
)
from PIL import Image
from flask_bcrypt import generate_password_hash, check_password_hash
from models import *
bp = Blueprint("blog", __name__, url_prefix="/blog")
# url = "http://localhost:5000/{url_for('blog.article',post_id=post.id,uname=post.author.uname,slug=post.slug)}"
@bp.route("/", methods=("GET", "POST"), strict_slashes=False)
def blog():
# All blog posts - order >> descending
page = request.args.get("page", 1, type=int)
posts = Post.query.order_by(Post.date_posted.desc()).paginate(page=page, per_page=6)
if not posts:
flash("No Posts are available", "info")
# blog posts - order >> trending
# keyword = request.args.get('sval','')
# results = Post.query.filter(Post.title.contains(keyword) |
# Post.body.contains(keyword))
trending = (
Post.query.filter(Post.views >= 1000)
.order_by(Post.date_posted.desc())
.paginate(page=page, per_page=6)
)
return render_template(
"blog/blog.html", posts=posts, trending=trending, title="Devsate | Home"
)
@bp.route("/search/", strict_slashes=False, methods=("GET", "POST"))
def search():
# keyword = request.args.get("sval")
keyword = request.form.get('sval')
if request.method == 'POST':
posts = Post.query.filter(or_(Post.title.ilike(f'%{keyword}%'), Post.body.ilike(f'%{keyword}%'))).all()
if not posts:
flash("No results matched your search", "info")
return render_template(
"blog/search_results.html",
label="Search Results",
posts=posts,
title="Devsate | Home",
)
return render_template(
"blog/search_results.html",
label="Search Results",
title="Devsate | Home",
)
@bp.route("/Technology/", strict_slashes=False)
def tech():
form = Search()
page = request.args.get("page", 1, type=int)
posts = (
Post.query.filter_by(category="Technology")
# .filter_by(s_category = 'Technology')
.paginate(page=page, per_page=10)
)
if not posts:
flash("No Posts are available", "info")
trending = (
Post.query.filter(Post.views >= 1000)
.order_by(Post.date_posted.desc())
.paginate(page=page, per_page=6)
)
# blog posts - order >> featured
return render_template(
"blog/blog.html",
posts=posts,
trending=trending,
form=form,
title="Devsate | Home",
)
@bp.route("/Business/", strict_slashes=False)
def bs():
form = Search()
page = request.args.get("page", 1, type=int)
posts = Post.query.filter_by(category="Business").paginate(page=page, per_page=10)
if not posts:
flash("No Posts are available", "info")
trending = (
Post.query.filter(Post.views >= 1000)
.order_by(Post.date_posted.desc())
.paginate(page=page, per_page=6)
)
return render_template(
"blog/blog.html",
posts=posts,
trending=trending,
form=form,
title="Devsate | Home",
)
@bp.route(
"/<int:post_id>/<string:uname>/<string:slug>",
methods=("GET", "POST"),
strict_slashes=False,
)
def article(post_id, uname, slug):
form = CommentPost()
post = Post.query.filter_by(id=post_id).first()
comments = Comments.query.filter_by(post_id=post.id).all()
replies = Replies.query.filter_by(id=Replies.id).all()
post.views += 1
db.session.commit()
url = "http://127.0.0.1:5000/"
read_time = estimate_reading_time(url)
if request.method == "POST": #and form.validate_on_submit():
message = form.comment.data
comment = Comments(
message=message,
post_id=post.id,
respondent=current_user.uname,
rimage=current_user.image,
)
db.session.add(comment)
post.count += 1
flash("Comment posted", "success")
db.session.commit()
return render_template(
"blog/article.html",
post=post,
read_time=read_time,
form=form,
comments=comments,
replies=replies,
title="Devsate | Blog",
)
@bp.route("/add", methods=("GET", "POST"), strict_slashes=False)
@login_required
@admin_required
def new_post():
posts = Post.query.order_by(Post.date_posted.desc()).all()
form = AddPost()
if form.validate_on_submit():
try:
if form.postImage.data:
picture_file = upload_img(form.postImage.data)
title = form.title.data
postImage = picture_file
body = form.body.data
category = form.category.data
s_category = form.s_category.data
post = Post(
title=title,
postImage=postImage,
body=body,
category=category,
s_category=s_category,
user_id=current_user.id,
)
db.session.add(post)
db.session.commit()
flash(f"Post succesfully published", "success")
return redirect(url_for("blog.blog"))
except InvalidRequestError:
db.session.rollback()
flash(f"Something went wrong!", "danger")
except IntegrityError:
db.session.rollback()
flash(f"User already exists!.", "warning")
except DataError:
db.session.rollback()
flash(f"Invalid Entry", "warning")
except InterfaceError:
db.session.rollback()
flash(f"Error connecting to the database", "danger")
except DatabaseError:
db.session.rollback()
flash(f"Error connecting to the database", "danger")
except BuildError:
db.session.rollback()
flash(f"An error occured !", "danger")
return render_template(
"blog/add.html",
form=form,
posts=posts,
title="Devsate | Blog",
legend="Create a new blog article",
)
@bp.route(
"/<int:post_id>/<string:slug>/update",
methods=("GET", "POST"),
strict_slashes=False,
)
@login_required
@admin_required
def update_article(post_id, slug):
post = Post.query.filter_by(id=post_id).first()
if post.author != current_user:
abort(403)
form = AddPost()
if form.validate_on_submit():
try:
post.title = form.title.data
post.body = form.body.data
db.session.commit()
flash("Post succesfully Updated", "success")
return redirect(url_for("blog.blog"))
except InvalidRequestError:
db.session.rollback()
flash(f"Something went wrong!", "danger")
except IntegrityError:
db.session.rollback()
flash(f"User already exists!.", "warning")
except DataError:
db.session.rollback()
flash(f"Invalid Entry", "warning")
except InterfaceError:
db.session.rollback()
flash(f"Error connecting to the database", "danger")
except DatabaseError:
db.session.rollback()
flash(f"Error connecting to the database", "danger")
except BuildError:
db.session.rollback()
flash(f"An error occured !", "danger")
elif request.method == "GET":
form.title.data = post.title
form.body.data = post.body
return render_template(
"blog/add.html",
form=form,
post=post,
title="Devsate|Blog-update post",
legend="Update Post",
)
@bp.route("/<int:post_id>/<string:slug>/delete",methods=("GET", "POST"),strict_slashes=False,)
@login_required
@admin_required
def delete_article(post_id, slug):
post = Post.query.filter_by(id=post_id).first()
if post.author != current_user:
abort(403)
flash("Post has been deleted succesfully ", "success")
db.session.delete(post)
db.session.commit()
return redirect(url_for("blog.blog"))
@bp.route("/user/<string:uname>", methods=("GET", "POST"), strict_slashes=False)
def profile(uname):
user = User.query.filter_by(uname=uname).first_or_404()
page = request.args.get("page", 1, type=int)
posts = (
Post.query.filter_by(author=user)
.order_by(Post.date_posted.desc())
.paginate(page=page, per_page=6)
)
if not posts:
flash("Ooops! You don't have any posts yet.", "info")
image = url_for("static", filename="images/ProfileImages/" + user.image)
return render_template(
"blog/user_profile.html",
image=image,
posts=posts,
user=user,
title="Devsate | Profile",
)
@bp.route("/subscribe/", methods=("GET", "POST"), strict_slashes=False)
def subscribe():
form = Subscribe()
if request.method == "POST":
if form.validate_on_submit():
try:
email = form.email.data
email = Subscribers(
email=email,
)
db.session.add(email)
db.session.commit()
except InvalidRequestError:
db.session.rollback()
flash(f"Something went wrong!", "danger")
except IntegrityError:
db.session.rollback()
flash(f"You are already a member !.", "warning")
except DataError:
db.session.rollback()
flash(f"Invalid Entry", "warning")
except InterfaceError:
db.session.rollback()
flash(f"Error connecting to the database", "danger")
except DatabaseError:
db.session.rollback()
flash(f"Error connecting to the database", "danger")
except BuildError:
db.session.rollback()
flash(f"An error occured !", "danger")
return ("", 204)
return render_template(
"blog/article.html",
form=form,
title="Devsate | Blog",
)
# Comments reply route handler
@bp.route("/<int:comment_id>/replyComment/", methods=("GET", "POST"), strict_slashes=False)
def replyHandler(comment_id):
form = ReplyComment()
comment = Comments.query.filter_by(id=comment_id).first()
replies = Replies.query.filter_by(comment_id=comment.id).all()
if request.method == "POST":
message = form.reply.data
author = current_user.fname
message = Replies(
message = message,
author = author,
comment_id = comment_id
)
db.session.add(message)
db.session.commit()
flash("Reply succesfully posted", "success")
return ("",204)
# Handles javascript image uploads from tinyMCE
@bp.route("/imageuploader", methods=["POST"])
@login_required
@admin_required
def imageuploader():
file = request.files.get("file")
if file:
filename = file.filename.lower()
fn, ext = filename.split(".")
# truncate filename (excluding extension) to 30 characters
fn = fn[:30]
filename = fn + "." + ext
if ext in ["jpg", "gif", "png", "jpeg"]:
try:
# everything looks good, save file
img_fullpath = os.path.join(
current_app.root_path, "static/images/blog-posts", filename
)
file.save(img_fullpath)
# get the file size to save to db
file_size = os.stat(img_fullpath).st_size
size = 160, 160
# read image into pillow
im = Image.open(img_fullpath)
# get image dimension to save to db
file_width, file_height = im.size
# convert to thumbnail
im.thumbnail(size)
thumbnail = fn + "-thumb.jpg"
tmb_fullpath = os.path.join(
current_app.root_path, "static/images/blog-posts", filename
)
# PNG is index while JPG needs RGB
if not im.mode == "RGB":
im = im.convert("RGB")
# save thumbnail
im.save(tmb_fullpath, "JPEG")
# # save to db
# img = Images(
# filename=filename,
# thumbnail=thumbnail,
# file_size=file_size,
# file_width=file_width,
# file_height=file_height,
# )
# db.session.add(img)
# db.session.commit()
except IOError:
output = make_response(404)
output.headers["Error"] = "Cannot create thumbnail for " + filename
return output
return jsonify({"location": filename})
# fail, image did not upload
output = make_response(404)
output.headers["Error"] = "Filename needs to be JPG, JPEG, GIF or PNG"
return output
|
python
|
#!/usr/bin/env python
from distutils.core import setup
import uritemplate
base_url = "http://github.com/uri-templates/uritemplate-py/"
setup(
name = 'uritemplate',
version = uritemplate.__version__,
description = 'URI Templates',
author = 'Joe Gregorio',
author_email = '[email protected]',
url = base_url,
download_url = \
'%starball/uritemplate-py-%s' % (base_url, uritemplate.__version__),
packages = ['uritemplate'],
provides = ['uritemplate'],
long_description=open("README.rst").read(),
install_requires = ['simplejson >= 2.5.0'],
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Operating System :: POSIX',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
|
python
|
#
# Copyright (C) 2019 Luca Pasqualini
# University of Siena - Artificial Intelligence Laboratory - SAILab
#
#
# USienaRL is licensed under a BSD 3-Clause.
#
# You should have received a copy of the license along with this
# work. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
# Import scripts
from .pass_through import PassThroughInterface
|
python
|
from appcontroller import AppController
class CustomAppController(AppController):
def __init__(self, *args, **kwargs):
AppController.__init__(self, *args, **kwargs)
def start(self):
print "Calling the default controller to populate table entries"
AppController.start(self)
def stop(self):
reg_val = self.readRegister('forward_count_register', 0)
print "The switch forwarded a total of %d packets" % reg_val
AppController.stop(self)
|
python
|
import json
import os, re
import numpy as np
import pandas as pd
from scipy.stats import describe
from tqdm import tqdm
# Open a file
path = "/home/ubuntu/model-inference/rayserve/gpt-optim"
dirs = os.listdir(path)
# This would print all the files and directories
latency_list = []
requests = {}
for file in dirs:
# print(file)
filepath = os.path.join(path, file)
if not "e" in file:
continue
print(filepath)
with open(filepath, "r") as fp:
text = fp.read()
groups = re.findall(
r"\[(.*),.*\].*\((\d+)\) inference \((.*), (.*), (.*), (.*)\)", text
)
# print(groups)
for group in groups:
uuid, bsz, start_time, end_time, start_power, end_power = group
bsz = int(bsz)
start_time = float(start_time)
end_time = float(end_time)
start_power = int(start_power)
end_power = int(end_power)
if not uuid in requests:
requests[uuid] = []
requests[uuid].append(
{
"uuid": uuid,
"bsz": bsz,
"start_time": start_time,
"end_time": end_time,
"start_power": start_power,
"end_power": end_power,
}
)
agg_requests = {}
for uuid in requests:
starty_time = min(record['start_time'] for record in requests[uuid])
end_time = max(record['start_time'] for record in requests[uuid])
bsz = requests[uuid][0]['bsz']
agg_requests[uuid] = {
"uuid": uuid,
"bsz": bsz,
"start_time": start_time,
"end_time": end_time,
}
latency_list = [
sum(
record["end_time"] - record["start_time"]
for record in records
if record["bsz"] == 1
)
for uuid, records in requests.items()
]
print(np.percentile(latency_list, 99))
print(np.percentile(latency_list, 50))
print(describe(latency_list))
time_list = np.array(
[
[record["end_time"], record["start_time"]]
for uuid, records in requests.items()
for record in records
if record["bsz"] == 2
]
)
# df_time = pd.DataFrame(
# [
# record
# for uuid, records in requests.items()
# for record in records
# if record["bsz"] == 2
# ]
# )
df_time = pd.DataFrame(
list(agg_requests.values())
)
print(df_time)
df_time = df_time.sort_values(by="start_time")
counts = [0 for _ in range(len(df_time.index))]
min_time = df_time["start_time"].min()
max_time = df_time["end_time"].max()
max_count = 0
max_records = None
TIME_WINDOW = 1.0
# max_interval = None
for t in tqdm(df_time["start_time"].to_numpy()):
win_l = t
win_h = t + TIME_WINDOW
tmp_records = []
tmp_counts = 0
for idx, row in df_time.iterrows():
# if (win_l <= row["end_time"] <= win_h) or (win_l <= row["start_time"] <= win_h):
# tmp_records.append(row)
if row["end_time"] <= win_h and win_l <= row["start_time"]:
tmp_counts += 1
tmp_records.append(row)
# print("enclosed", row, (win_l, win_h))
print("encolsed", tmp_counts)
elif row["end_time"] > win_h and row["start_time"] < win_h:
tmp_counts += (win_h - row["start_time"]) / (
row["end_time"] - row["start_time"]
)
tmp_records.append(row)
print("high", tmp_counts)
# print("high", row, (win_l, win_h))
elif row["end_time"] > win_l and win_l > row["start_time"]:
tmp_counts += (row["end_time"] - win_l) / (
row["end_time"] - row["start_time"]
)
tmp_records.append(row)
print("low", tmp_counts)
# print("low", row, (win_l, win_h))
if tmp_counts > max_count:
max_count = tmp_counts
max_records = tmp_records
print("tmp_counts", tmp_counts)
print("max_count", max_count / TIME_WINDOW * 2)
# print("max_records", max_records)
# ts_list = []
# power_list = []
# labels = []
# for row in max_records:
# ts_list.append(row['start_time'])
# ts_list.append(row['end_time'])
# power_list.append(row['start_power'])
# power_list.append(row['end_power'])
# labels += [1, -1]
# df_energy = pd.DataFrame({
# "ts": ts_list,
# "power": power_list,
# "labels": labels,
# })
# df_energy = df_energy.sort_values(by="ts")
# energy = (df_energy.ts - df_energy.ts.shift(1)) * (df_energy.power + df_energy.power.shift(1)) / 2
# energy = energy.to_numpy()
# labels = df_energy.labels.to_numpy()
# # print()
# count = 1
# e_sum = 0
# for i in range(1, len(labels)):
# if count > 0: e_sum += energy[i]
# count += labels[i]
# print("energy", e_sum / 1000 / len(max_records) / 2)
|
python
|
# Crei um programa que mostre na tela todos os números pares que estão no intervalo entre 1 e 50.
"""for contador in range(1, 51):
if contador % 2 == 0:
if contador == 50:
print(f'{contador}.')
else:
print(f'{contador}, ', end='')
"""
for contador in range(2, 51, 2):
if contador == 50:
print(f'{contador}.')
else:
print(f'{contador}, ', end='')
|
python
|
from functools import partial
from copy import copy
import numpy as np
from PyQt5.QtWidgets import (
QGridLayout,
QHBoxLayout,
QVBoxLayout,
QGroupBox,
QCheckBox,
QComboBox,
QScrollArea,
QLabel,
QSlider,
)
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
from matplotlib import pyplot as plt
try:
import biorbd
except ImportError:
import biorbd_casadi as biorbd
class MuscleAnalyses:
def __init__(self, parent, main_window, background_color=(0.5, 0.5, 0.5)):
# Centralize the materials
analyses_muscle_layout = QHBoxLayout(parent)
# Get some aliases
self.main_window = main_window
self.model = self.main_window.model
self.n_mus = self.model.nbMuscles()
self.n_q = self.model.nbQ()
# Add dof selector
selector_layout = QVBoxLayout()
analyses_muscle_layout.addLayout(selector_layout)
text_dof = QLabel()
text_dof.setText("DoF to run")
text_dof.setPalette(self.main_window.palette_active)
selector_layout.addWidget(text_dof)
self.combobox_dof = QComboBox()
selector_layout.addWidget(self.combobox_dof)
self.combobox_dof.setPalette(self.main_window.palette_active)
self.dof_mapping = dict()
for cmp_dof, name in enumerate(self.model.nameDof()):
self.combobox_dof.addItem(name.to_string())
self.dof_mapping[name.to_string()] = cmp_dof
self.combobox_dof.currentIndexChanged.connect(self.__set_current_dof)
# Set default value
self.current_dof = self.combobox_dof.currentText()
# Add the possibility to select from movement
self.animation_checkbox = QCheckBox()
selector_layout.addWidget(self.animation_checkbox)
self.animation_checkbox.setText("From animation")
self.animation_checkbox.setPalette(self.main_window.palette_inactive)
self.animation_checkbox.setEnabled(False)
self.animation_checkbox.stateChanged.connect(partial(self.update_all_graphs, False, False, False, False))
# Add plots
analyses_layout = QGridLayout()
analyses_muscle_layout.addLayout(analyses_layout)
self.n_point_for_q = 50
# Add muscle length plot
self.canvas_muscle_length = FigureCanvasQTAgg(plt.figure(facecolor=background_color))
analyses_layout.addWidget(self.canvas_muscle_length, 0, 0)
self.ax_muscle_length = self.canvas_muscle_length.figure.subplots()
self.ax_muscle_length.set_facecolor(background_color)
self.ax_muscle_length.set_title("Muscle length")
self.ax_muscle_length.set_ylabel("Length (m)")
# Add moment arm plot
self.canvas_moment_arm = FigureCanvasQTAgg(plt.figure(facecolor=background_color))
analyses_layout.addWidget(self.canvas_moment_arm, 0, 1)
self.ax_moment_arm = self.canvas_moment_arm.figure.subplots()
self.ax_moment_arm.set_facecolor(background_color)
self.ax_moment_arm.set_title("Moment arm")
self.ax_moment_arm.set_ylabel("Moment arm (m)")
# Add passive forces
self.canvas_passive_forces = FigureCanvasQTAgg(plt.figure(facecolor=background_color))
analyses_layout.addWidget(self.canvas_passive_forces, 1, 0)
self.ax_passive_forces = self.canvas_passive_forces.figure.subplots()
self.ax_passive_forces.set_facecolor(background_color)
self.ax_passive_forces.set_title("Passive forces")
self.ax_passive_forces.set_ylabel("Passive forces coeff")
# Add active forces
self.canvas_active_forces = FigureCanvasQTAgg(plt.figure(facecolor=background_color))
active_forces_layout = QHBoxLayout()
analyses_layout.addLayout(active_forces_layout, 1, 1)
active_forces_layout.addWidget(self.canvas_active_forces)
self.ax_active_forces = self.canvas_active_forces.figure.subplots()
self.ax_active_forces.set_facecolor(background_color)
self.ax_active_forces.set_title("Active forces")
self.ax_active_forces.set_ylabel("Active forces coeff")
self.active_forces_slider = QSlider()
active_forces_layout.addWidget(self.active_forces_slider)
self.active_forces_slider.setPalette(self.main_window.palette_active)
self.active_forces_slider.setMinimum(0)
self.active_forces_slider.setMaximum(100)
self.active_forces_slider.valueChanged.connect(partial(self.update_all_graphs, True, True, True, False))
# Add muscle selector
radio_muscle_group = QGroupBox()
muscle_layout = QVBoxLayout()
self.muscle_mapping = dict()
self.checkboxes_muscle = list()
cmp_mus = 0
for group in range(self.model.nbMuscleGroups()):
for mus in range(self.model.muscleGroup(group).nbMuscles()):
# Map the name to the right numbers
name = self.model.muscleGroup(group).muscle(mus).name().to_string()
self.muscle_mapping[name] = (group, mus, cmp_mus)
# Add the CheckBox
self.checkboxes_muscle.append(QCheckBox())
self.checkboxes_muscle[cmp_mus].setPalette(self.main_window.palette_active)
self.checkboxes_muscle[cmp_mus].setText(name)
self.checkboxes_muscle[cmp_mus].toggled.connect(
partial(self.update_all_graphs, False, False, False, False)
)
muscle_layout.addWidget(self.checkboxes_muscle[cmp_mus])
# Add the plot to the axes
self.ax_muscle_length.plot(np.nan, np.nan, "w")
self.ax_moment_arm.plot(np.nan, np.nan, "w")
self.ax_passive_forces.plot(np.nan, np.nan, "w")
self.ax_active_forces.plot(np.nan, np.nan, "w")
cmp_mus += 1
# Add vertical bar for position of current dof
self.ax_muscle_length.plot(np.nan, np.nan, "k")
self.ax_moment_arm.plot(np.nan, np.nan, "k")
self.ax_passive_forces.plot(np.nan, np.nan, "k")
self.ax_active_forces.plot(np.nan, np.nan, "k")
radio_muscle_group.setLayout(muscle_layout)
muscles_scroll = QScrollArea()
muscles_scroll.setFrameShape(0)
muscles_scroll.setWidgetResizable(True)
muscles_scroll.setWidget(radio_muscle_group)
selector_layout.addWidget(muscles_scroll)
selector_layout.addStretch()
def add_movement_to_dof_choice(self):
self.animation_checkbox.setPalette(self.main_window.palette_active)
self.animation_checkbox.setEnabled(True)
self.n_point_for_q = self.main_window.animated_Q.shape[0]
def __set_current_dof(self):
self.current_dof = self.combobox_dof.currentText()
self.update_all_graphs(False, False, False, False)
def update_all_graphs(self, skip_muscle_length, skip_moment_arm, skip_passive_forces, skip_active_forces):
x_axis, length, moment_arm, passive_forces, active_forces = self.__compute_all_values()
self.__update_specific_plot(
self.canvas_muscle_length, self.ax_muscle_length, x_axis, length, skip_muscle_length
)
self.__update_specific_plot(self.canvas_moment_arm, self.ax_moment_arm, x_axis, moment_arm, skip_moment_arm)
self.__update_specific_plot(
self.canvas_passive_forces, self.ax_passive_forces, x_axis, passive_forces, skip_passive_forces
)
self.__update_specific_plot(
self.canvas_active_forces, self.ax_active_forces, x_axis, active_forces, skip_active_forces
)
self.__update_graph_size()
def __update_graph_size(self):
self.ax_muscle_length.figure.tight_layout()
self.ax_moment_arm.figure.tight_layout()
self.ax_passive_forces.figure.tight_layout()
self.ax_active_forces.figure.tight_layout()
self.canvas_muscle_length.draw()
self.canvas_moment_arm.draw()
self.canvas_passive_forces.draw()
self.canvas_active_forces.draw()
def __compute_all_values(self):
q_idx = self.dof_mapping[self.current_dof]
x_axis, all_q = self.__generate_x_axis(q_idx)
length = np.ndarray((self.n_point_for_q, self.n_mus))
moment_arm = np.ndarray((self.n_point_for_q, self.n_mus))
passive_forces = np.ndarray((self.n_point_for_q, self.n_mus))
active_forces = np.ndarray((self.n_point_for_q, self.n_mus))
emg = biorbd.State(0, self.active_forces_slider.value() / 100)
for i, q_mod in enumerate(all_q):
self.model.UpdateKinematicsCustom(biorbd.GeneralizedCoordinates(q_mod))
for m in range(self.n_mus):
if self.checkboxes_muscle[m].isChecked():
mus_group_idx, mus_idx, cmp_mus = self.muscle_mapping[self.checkboxes_muscle[m].text()]
mus = self.model.muscleGroup(mus_group_idx).muscle(mus_idx)
mus.updateOrientations(self.model, q_mod, 1)
muscles_length_jacobian = self.model.musclesLengthJacobian().to_array()
length[i, m] = mus.length(self.model, q_mod, False)
moment_arm[i, m] = -1 * muscles_length_jacobian[cmp_mus, q_idx]
if mus.type() != biorbd.IDEALIZED_ACTUATOR:
passive_forces[i, m] = biorbd.HillType(mus).FlPE()
else:
passive_forces[i, m] = 0
if mus.type() != biorbd.IDEALIZED_ACTUATOR:
active_forces[i, m] = biorbd.HillType(mus).FlCE(emg)
else:
active_forces[i, m] = 0
return x_axis, length, moment_arm, passive_forces, active_forces
def __update_specific_plot(self, canvas, ax, x, y, skip=False):
# Plot all active muscles
number_of_active = 0
for m in range(self.n_mus):
if self.checkboxes_muscle[m].isChecked():
if not skip:
ax.get_lines()[m].set_data(x, y[:, m])
number_of_active += 1
else:
ax.get_lines()[m].set_data(np.nan, np.nan)
# Empty the vertical bar (otherwise relim takes it in account
ax.get_lines()[-1].set_data(np.nan, np.nan)
# If there is no data skip relim and vertical bar adjustment
if number_of_active != 0:
# relim so the plot looks nice
ax.relim()
ax.autoscale(enable=True)
# Adjust axis label (give a generic name)
if self.animation_checkbox.isChecked():
ax.set_xlabel("Time frame")
else:
ax.set_xlabel("Along range")
# Add vertical bar to show current dof (it must be done after relim so we know the new lims)
q_idx = self.combobox_dof.currentIndex()
if self.animation_checkbox.isChecked():
x = int(self.main_window.movement_slider[1].text()) - 1 # Frame label
else:
x = self.__get_q_from_slider()[q_idx]
ax.get_lines()[-1].set_data([x, x], ax.get_ylim())
# Redraw graphs
canvas.draw()
def __get_q_from_slider(self):
return copy(self.main_window.Q)
def __generate_x_axis(self, q_idx):
if self.animation_checkbox.isChecked():
q = self.main_window.animated_Q
x = np.arange(q.shape[0])
else:
q = np.tile(self.__get_q_from_slider(), (self.n_point_for_q, 1))
slider = self.main_window.sliders[self.combobox_dof.currentIndex()][1]
q[:, q_idx] = np.linspace(
slider.minimum() / self.main_window.double_factor,
slider.maximum() / self.main_window.double_factor,
self.n_point_for_q,
)
x = q[:, q_idx]
return x, q
|
python
|
#
# PySNMP MIB module RFC1407-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/RFC1407-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 20:48:41 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint, SingleValueConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueRangeConstraint", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsIntersection")
InterfaceIndex, = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex")
PerfTotalCount, PerfIntervalCount, PerfCurrentCount = mibBuilder.importSymbols("PerfHist-TC-MIB", "PerfTotalCount", "PerfIntervalCount", "PerfCurrentCount")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
Integer32, MibIdentifier, transmission, NotificationType, Gauge32, iso, Bits, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, ObjectIdentity, IpAddress, Counter64, ModuleIdentity, Unsigned32, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "Integer32", "MibIdentifier", "transmission", "NotificationType", "Gauge32", "iso", "Bits", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "ObjectIdentity", "IpAddress", "Counter64", "ModuleIdentity", "Unsigned32", "Counter32")
TimeStamp, DisplayString, TextualConvention, TruthValue = mibBuilder.importSymbols("SNMPv2-TC", "TimeStamp", "DisplayString", "TextualConvention", "TruthValue")
ds3 = ModuleIdentity((1, 3, 6, 1, 2, 1, 10, 30))
ds3.setRevisions(('2004-09-08 00:00', '1998-08-01 21:30', '1993-01-25 20:28',))
if mibBuilder.loadTexts: ds3.setLastUpdated('200409080000Z')
if mibBuilder.loadTexts: ds3.setOrganization('IETF AToM MIB Working Group')
dsx3ConfigTable = MibTable((1, 3, 6, 1, 2, 1, 10, 30, 5), )
if mibBuilder.loadTexts: dsx3ConfigTable.setStatus('current')
dsx3ConfigEntry = MibTableRow((1, 3, 6, 1, 2, 1, 10, 30, 5, 1), ).setIndexNames((0, "RFC1407-MIB", "dsx3LineIndex"))
if mibBuilder.loadTexts: dsx3ConfigEntry.setStatus('current')
dsx3LineIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 5, 1, 1), InterfaceIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3LineIndex.setStatus('current')
dsx3IfIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 5, 1, 2), InterfaceIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3IfIndex.setStatus('deprecated')
dsx3TimeElapsed = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 5, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 899))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3TimeElapsed.setStatus('current')
dsx3ValidIntervals = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 5, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 96))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3ValidIntervals.setStatus('current')
dsx3LineType = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 5, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9))).clone(namedValues=NamedValues(("dsx3other", 1), ("dsx3M23", 2), ("dsx3SYNTRAN", 3), ("dsx3CbitParity", 4), ("dsx3ClearChannel", 5), ("e3other", 6), ("e3Framed", 7), ("e3Plcp", 8), ("dsx3M13", 9)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsx3LineType.setStatus('current')
dsx3LineCoding = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 5, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("dsx3Other", 1), ("dsx3B3ZS", 2), ("e3HDB3", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsx3LineCoding.setStatus('current')
dsx3SendCode = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 5, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("dsx3SendNoCode", 1), ("dsx3SendLineCode", 2), ("dsx3SendPayloadCode", 3), ("dsx3SendResetCode", 4), ("dsx3SendDS1LoopCode", 5), ("dsx3SendTestPattern", 6)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsx3SendCode.setStatus('current')
dsx3CircuitIdentifier = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 5, 1, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsx3CircuitIdentifier.setStatus('current')
dsx3LoopbackConfig = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 5, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("dsx3NoLoop", 1), ("dsx3PayloadLoop", 2), ("dsx3LineLoop", 3), ("dsx3OtherLoop", 4), ("dsx3InwardLoop", 5), ("dsx3DualLoop", 6)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsx3LoopbackConfig.setStatus('current')
dsx3LineStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 5, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4095))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3LineStatus.setStatus('current')
dsx3TransmitClockSource = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 5, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("loopTiming", 1), ("localTiming", 2), ("throughTiming", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsx3TransmitClockSource.setStatus('current')
dsx3InvalidIntervals = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 5, 1, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 96))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3InvalidIntervals.setStatus('current')
dsx3LineLength = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 5, 1, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 64000))).setUnits('meters').setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsx3LineLength.setStatus('current')
dsx3LineStatusLastChange = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 5, 1, 14), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3LineStatusLastChange.setStatus('current')
dsx3LineStatusChangeTrapEnable = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 5, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsx3LineStatusChangeTrapEnable.setStatus('current')
dsx3LoopbackStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 5, 1, 16), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 127))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3LoopbackStatus.setStatus('current')
dsx3Channelization = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 5, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("disabled", 1), ("enabledDs1", 2), ("enabledDs2", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsx3Channelization.setStatus('current')
dsx3Ds1ForRemoteLoop = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 5, 1, 18), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 29))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsx3Ds1ForRemoteLoop.setStatus('current')
dsx3CurrentTable = MibTable((1, 3, 6, 1, 2, 1, 10, 30, 6), )
if mibBuilder.loadTexts: dsx3CurrentTable.setStatus('current')
dsx3CurrentEntry = MibTableRow((1, 3, 6, 1, 2, 1, 10, 30, 6, 1), ).setIndexNames((0, "RFC1407-MIB", "dsx3CurrentIndex"))
if mibBuilder.loadTexts: dsx3CurrentEntry.setStatus('current')
dsx3CurrentIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 6, 1, 1), InterfaceIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3CurrentIndex.setStatus('current')
dsx3CurrentPESs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 6, 1, 2), PerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3CurrentPESs.setStatus('current')
dsx3CurrentPSESs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 6, 1, 3), PerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3CurrentPSESs.setStatus('current')
dsx3CurrentSEFSs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 6, 1, 4), PerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3CurrentSEFSs.setStatus('current')
dsx3CurrentUASs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 6, 1, 5), PerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3CurrentUASs.setStatus('current')
dsx3CurrentLCVs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 6, 1, 6), PerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3CurrentLCVs.setStatus('current')
dsx3CurrentPCVs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 6, 1, 7), PerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3CurrentPCVs.setStatus('current')
dsx3CurrentLESs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 6, 1, 8), PerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3CurrentLESs.setStatus('current')
dsx3CurrentCCVs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 6, 1, 9), PerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3CurrentCCVs.setStatus('current')
dsx3CurrentCESs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 6, 1, 10), PerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3CurrentCESs.setStatus('current')
dsx3CurrentCSESs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 6, 1, 11), PerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3CurrentCSESs.setStatus('current')
dsx3IntervalTable = MibTable((1, 3, 6, 1, 2, 1, 10, 30, 7), )
if mibBuilder.loadTexts: dsx3IntervalTable.setStatus('current')
dsx3IntervalEntry = MibTableRow((1, 3, 6, 1, 2, 1, 10, 30, 7, 1), ).setIndexNames((0, "RFC1407-MIB", "dsx3IntervalIndex"), (0, "RFC1407-MIB", "dsx3IntervalNumber"))
if mibBuilder.loadTexts: dsx3IntervalEntry.setStatus('current')
dsx3IntervalIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 7, 1, 1), InterfaceIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3IntervalIndex.setStatus('current')
dsx3IntervalNumber = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 7, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 96))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3IntervalNumber.setStatus('current')
dsx3IntervalPESs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 7, 1, 3), PerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3IntervalPESs.setStatus('current')
dsx3IntervalPSESs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 7, 1, 4), PerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3IntervalPSESs.setStatus('current')
dsx3IntervalSEFSs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 7, 1, 5), PerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3IntervalSEFSs.setStatus('current')
dsx3IntervalUASs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 7, 1, 6), PerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3IntervalUASs.setStatus('current')
dsx3IntervalLCVs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 7, 1, 7), PerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3IntervalLCVs.setStatus('current')
dsx3IntervalPCVs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 7, 1, 8), PerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3IntervalPCVs.setStatus('current')
dsx3IntervalLESs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 7, 1, 9), PerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3IntervalLESs.setStatus('current')
dsx3IntervalCCVs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 7, 1, 10), PerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3IntervalCCVs.setStatus('current')
dsx3IntervalCESs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 7, 1, 11), PerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3IntervalCESs.setStatus('current')
dsx3IntervalCSESs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 7, 1, 12), PerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3IntervalCSESs.setStatus('current')
dsx3IntervalValidData = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 7, 1, 13), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3IntervalValidData.setStatus('current')
dsx3TotalTable = MibTable((1, 3, 6, 1, 2, 1, 10, 30, 8), )
if mibBuilder.loadTexts: dsx3TotalTable.setStatus('current')
dsx3TotalEntry = MibTableRow((1, 3, 6, 1, 2, 1, 10, 30, 8, 1), ).setIndexNames((0, "RFC1407-MIB", "dsx3TotalIndex"))
if mibBuilder.loadTexts: dsx3TotalEntry.setStatus('current')
dsx3TotalIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 8, 1, 1), InterfaceIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3TotalIndex.setStatus('current')
dsx3TotalPESs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 8, 1, 2), PerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3TotalPESs.setStatus('current')
dsx3TotalPSESs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 8, 1, 3), PerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3TotalPSESs.setStatus('current')
dsx3TotalSEFSs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 8, 1, 4), PerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3TotalSEFSs.setStatus('current')
dsx3TotalUASs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 8, 1, 5), PerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3TotalUASs.setStatus('current')
dsx3TotalLCVs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 8, 1, 6), PerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3TotalLCVs.setStatus('current')
dsx3TotalPCVs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 8, 1, 7), PerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3TotalPCVs.setStatus('current')
dsx3TotalLESs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 8, 1, 8), PerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3TotalLESs.setStatus('current')
dsx3TotalCCVs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 8, 1, 9), PerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3TotalCCVs.setStatus('current')
dsx3TotalCESs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 8, 1, 10), PerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3TotalCESs.setStatus('current')
dsx3TotalCSESs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 8, 1, 11), PerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3TotalCSESs.setStatus('current')
dsx3FarEndConfigTable = MibTable((1, 3, 6, 1, 2, 1, 10, 30, 9), )
if mibBuilder.loadTexts: dsx3FarEndConfigTable.setStatus('current')
dsx3FarEndConfigEntry = MibTableRow((1, 3, 6, 1, 2, 1, 10, 30, 9, 1), ).setIndexNames((0, "RFC1407-MIB", "dsx3FarEndLineIndex"))
if mibBuilder.loadTexts: dsx3FarEndConfigEntry.setStatus('current')
dsx3FarEndLineIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 9, 1, 1), InterfaceIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3FarEndLineIndex.setStatus('current')
dsx3FarEndEquipCode = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 9, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 10))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsx3FarEndEquipCode.setStatus('current')
dsx3FarEndLocationIDCode = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 9, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 11))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsx3FarEndLocationIDCode.setStatus('current')
dsx3FarEndFrameIDCode = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 9, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 10))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsx3FarEndFrameIDCode.setStatus('current')
dsx3FarEndUnitCode = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 9, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 6))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsx3FarEndUnitCode.setStatus('current')
dsx3FarEndFacilityIDCode = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 9, 1, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 38))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsx3FarEndFacilityIDCode.setStatus('current')
dsx3FarEndCurrentTable = MibTable((1, 3, 6, 1, 2, 1, 10, 30, 10), )
if mibBuilder.loadTexts: dsx3FarEndCurrentTable.setStatus('current')
dsx3FarEndCurrentEntry = MibTableRow((1, 3, 6, 1, 2, 1, 10, 30, 10, 1), ).setIndexNames((0, "RFC1407-MIB", "dsx3FarEndCurrentIndex"))
if mibBuilder.loadTexts: dsx3FarEndCurrentEntry.setStatus('current')
dsx3FarEndCurrentIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 10, 1, 1), InterfaceIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3FarEndCurrentIndex.setStatus('current')
dsx3FarEndTimeElapsed = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 10, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 899))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3FarEndTimeElapsed.setStatus('current')
dsx3FarEndValidIntervals = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 10, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 96))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3FarEndValidIntervals.setStatus('current')
dsx3FarEndCurrentCESs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 10, 1, 4), PerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3FarEndCurrentCESs.setStatus('current')
dsx3FarEndCurrentCSESs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 10, 1, 5), PerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3FarEndCurrentCSESs.setStatus('current')
dsx3FarEndCurrentCCVs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 10, 1, 6), PerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3FarEndCurrentCCVs.setStatus('current')
dsx3FarEndCurrentUASs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 10, 1, 7), PerfCurrentCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3FarEndCurrentUASs.setStatus('current')
dsx3FarEndInvalidIntervals = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 10, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 96))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3FarEndInvalidIntervals.setStatus('current')
dsx3FarEndIntervalTable = MibTable((1, 3, 6, 1, 2, 1, 10, 30, 11), )
if mibBuilder.loadTexts: dsx3FarEndIntervalTable.setStatus('current')
dsx3FarEndIntervalEntry = MibTableRow((1, 3, 6, 1, 2, 1, 10, 30, 11, 1), ).setIndexNames((0, "RFC1407-MIB", "dsx3FarEndIntervalIndex"), (0, "RFC1407-MIB", "dsx3FarEndIntervalNumber"))
if mibBuilder.loadTexts: dsx3FarEndIntervalEntry.setStatus('current')
dsx3FarEndIntervalIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 11, 1, 1), InterfaceIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3FarEndIntervalIndex.setStatus('current')
dsx3FarEndIntervalNumber = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 11, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 96))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3FarEndIntervalNumber.setStatus('current')
dsx3FarEndIntervalCESs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 11, 1, 3), PerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3FarEndIntervalCESs.setStatus('current')
dsx3FarEndIntervalCSESs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 11, 1, 4), PerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3FarEndIntervalCSESs.setStatus('current')
dsx3FarEndIntervalCCVs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 11, 1, 5), PerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3FarEndIntervalCCVs.setStatus('current')
dsx3FarEndIntervalUASs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 11, 1, 6), PerfIntervalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3FarEndIntervalUASs.setStatus('current')
dsx3FarEndIntervalValidData = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 11, 1, 7), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3FarEndIntervalValidData.setStatus('current')
dsx3FarEndTotalTable = MibTable((1, 3, 6, 1, 2, 1, 10, 30, 12), )
if mibBuilder.loadTexts: dsx3FarEndTotalTable.setStatus('current')
dsx3FarEndTotalEntry = MibTableRow((1, 3, 6, 1, 2, 1, 10, 30, 12, 1), ).setIndexNames((0, "RFC1407-MIB", "dsx3FarEndTotalIndex"))
if mibBuilder.loadTexts: dsx3FarEndTotalEntry.setStatus('current')
dsx3FarEndTotalIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 12, 1, 1), InterfaceIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3FarEndTotalIndex.setStatus('current')
dsx3FarEndTotalCESs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 12, 1, 2), PerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3FarEndTotalCESs.setStatus('current')
dsx3FarEndTotalCSESs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 12, 1, 3), PerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3FarEndTotalCSESs.setStatus('current')
dsx3FarEndTotalCCVs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 12, 1, 4), PerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3FarEndTotalCCVs.setStatus('current')
dsx3FarEndTotalUASs = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 12, 1, 5), PerfTotalCount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3FarEndTotalUASs.setStatus('current')
dsx3FracTable = MibTable((1, 3, 6, 1, 2, 1, 10, 30, 13), )
if mibBuilder.loadTexts: dsx3FracTable.setStatus('deprecated')
dsx3FracEntry = MibTableRow((1, 3, 6, 1, 2, 1, 10, 30, 13, 1), ).setIndexNames((0, "RFC1407-MIB", "dsx3FracIndex"), (0, "RFC1407-MIB", "dsx3FracNumber"))
if mibBuilder.loadTexts: dsx3FracEntry.setStatus('deprecated')
dsx3FracIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 13, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3FracIndex.setStatus('deprecated')
dsx3FracNumber = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 13, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 31))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx3FracNumber.setStatus('deprecated')
dsx3FracIfIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 30, 13, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsx3FracIfIndex.setStatus('deprecated')
ds3Traps = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 30, 15))
dsx3LineStatusChange = NotificationType((1, 3, 6, 1, 2, 1, 10, 30, 15, 0, 1)).setObjects(("RFC1407-MIB", "dsx3LineStatus"), ("RFC1407-MIB", "dsx3LineStatusLastChange"))
if mibBuilder.loadTexts: dsx3LineStatusChange.setStatus('current')
ds3Conformance = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 30, 14))
ds3Groups = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 30, 14, 1))
ds3Compliances = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 30, 14, 2))
ds3Compliance = ModuleCompliance((1, 3, 6, 1, 2, 1, 10, 30, 14, 2, 1)).setObjects(("RFC1407-MIB", "ds3NearEndConfigGroup"), ("RFC1407-MIB", "ds3NearEndStatisticsGroup"), ("RFC1407-MIB", "ds3FarEndGroup"), ("RFC1407-MIB", "ds3NearEndOptionalTrapGroup"), ("RFC1407-MIB", "ds3NearEndOptionalConfigGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ds3Compliance = ds3Compliance.setStatus('current')
ds3NearEndConfigGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 10, 30, 14, 1, 1)).setObjects(("RFC1407-MIB", "dsx3LineIndex"), ("RFC1407-MIB", "dsx3TimeElapsed"), ("RFC1407-MIB", "dsx3ValidIntervals"), ("RFC1407-MIB", "dsx3LineType"), ("RFC1407-MIB", "dsx3LineCoding"), ("RFC1407-MIB", "dsx3SendCode"), ("RFC1407-MIB", "dsx3CircuitIdentifier"), ("RFC1407-MIB", "dsx3LoopbackConfig"), ("RFC1407-MIB", "dsx3LineStatus"), ("RFC1407-MIB", "dsx3TransmitClockSource"), ("RFC1407-MIB", "dsx3InvalidIntervals"), ("RFC1407-MIB", "dsx3LineLength"), ("RFC1407-MIB", "dsx3LoopbackStatus"), ("RFC1407-MIB", "dsx3Channelization"), ("RFC1407-MIB", "dsx3Ds1ForRemoteLoop"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ds3NearEndConfigGroup = ds3NearEndConfigGroup.setStatus('current')
ds3NearEndStatisticsGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 10, 30, 14, 1, 2)).setObjects(("RFC1407-MIB", "dsx3CurrentIndex"), ("RFC1407-MIB", "dsx3CurrentPESs"), ("RFC1407-MIB", "dsx3CurrentPSESs"), ("RFC1407-MIB", "dsx3CurrentSEFSs"), ("RFC1407-MIB", "dsx3CurrentUASs"), ("RFC1407-MIB", "dsx3CurrentLCVs"), ("RFC1407-MIB", "dsx3CurrentPCVs"), ("RFC1407-MIB", "dsx3CurrentLESs"), ("RFC1407-MIB", "dsx3CurrentCCVs"), ("RFC1407-MIB", "dsx3CurrentCESs"), ("RFC1407-MIB", "dsx3CurrentCSESs"), ("RFC1407-MIB", "dsx3IntervalIndex"), ("RFC1407-MIB", "dsx3IntervalNumber"), ("RFC1407-MIB", "dsx3IntervalPESs"), ("RFC1407-MIB", "dsx3IntervalPSESs"), ("RFC1407-MIB", "dsx3IntervalSEFSs"), ("RFC1407-MIB", "dsx3IntervalUASs"), ("RFC1407-MIB", "dsx3IntervalLCVs"), ("RFC1407-MIB", "dsx3IntervalPCVs"), ("RFC1407-MIB", "dsx3IntervalLESs"), ("RFC1407-MIB", "dsx3IntervalCCVs"), ("RFC1407-MIB", "dsx3IntervalCESs"), ("RFC1407-MIB", "dsx3IntervalCSESs"), ("RFC1407-MIB", "dsx3IntervalValidData"), ("RFC1407-MIB", "dsx3TotalIndex"), ("RFC1407-MIB", "dsx3TotalPESs"), ("RFC1407-MIB", "dsx3TotalPSESs"), ("RFC1407-MIB", "dsx3TotalSEFSs"), ("RFC1407-MIB", "dsx3TotalUASs"), ("RFC1407-MIB", "dsx3TotalLCVs"), ("RFC1407-MIB", "dsx3TotalPCVs"), ("RFC1407-MIB", "dsx3TotalLESs"), ("RFC1407-MIB", "dsx3TotalCCVs"), ("RFC1407-MIB", "dsx3TotalCESs"), ("RFC1407-MIB", "dsx3TotalCSESs"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ds3NearEndStatisticsGroup = ds3NearEndStatisticsGroup.setStatus('current')
ds3FarEndGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 10, 30, 14, 1, 3)).setObjects(("RFC1407-MIB", "dsx3FarEndLineIndex"), ("RFC1407-MIB", "dsx3FarEndEquipCode"), ("RFC1407-MIB", "dsx3FarEndLocationIDCode"), ("RFC1407-MIB", "dsx3FarEndFrameIDCode"), ("RFC1407-MIB", "dsx3FarEndUnitCode"), ("RFC1407-MIB", "dsx3FarEndFacilityIDCode"), ("RFC1407-MIB", "dsx3FarEndCurrentIndex"), ("RFC1407-MIB", "dsx3FarEndTimeElapsed"), ("RFC1407-MIB", "dsx3FarEndValidIntervals"), ("RFC1407-MIB", "dsx3FarEndCurrentCESs"), ("RFC1407-MIB", "dsx3FarEndCurrentCSESs"), ("RFC1407-MIB", "dsx3FarEndCurrentCCVs"), ("RFC1407-MIB", "dsx3FarEndCurrentUASs"), ("RFC1407-MIB", "dsx3FarEndInvalidIntervals"), ("RFC1407-MIB", "dsx3FarEndIntervalIndex"), ("RFC1407-MIB", "dsx3FarEndIntervalNumber"), ("RFC1407-MIB", "dsx3FarEndIntervalCESs"), ("RFC1407-MIB", "dsx3FarEndIntervalCSESs"), ("RFC1407-MIB", "dsx3FarEndIntervalCCVs"), ("RFC1407-MIB", "dsx3FarEndIntervalUASs"), ("RFC1407-MIB", "dsx3FarEndIntervalValidData"), ("RFC1407-MIB", "dsx3FarEndTotalIndex"), ("RFC1407-MIB", "dsx3FarEndTotalCESs"), ("RFC1407-MIB", "dsx3FarEndTotalCSESs"), ("RFC1407-MIB", "dsx3FarEndTotalCCVs"), ("RFC1407-MIB", "dsx3FarEndTotalUASs"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ds3FarEndGroup = ds3FarEndGroup.setStatus('current')
ds3DeprecatedGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 10, 30, 14, 1, 4)).setObjects(("RFC1407-MIB", "dsx3IfIndex"), ("RFC1407-MIB", "dsx3FracIndex"), ("RFC1407-MIB", "dsx3FracNumber"), ("RFC1407-MIB", "dsx3FracIfIndex"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ds3DeprecatedGroup = ds3DeprecatedGroup.setStatus('deprecated')
ds3NearEndOptionalConfigGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 10, 30, 14, 1, 5)).setObjects(("RFC1407-MIB", "dsx3LineStatusLastChange"), ("RFC1407-MIB", "dsx3LineStatusChangeTrapEnable"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ds3NearEndOptionalConfigGroup = ds3NearEndOptionalConfigGroup.setStatus('current')
ds3NearEndOptionalTrapGroup = NotificationGroup((1, 3, 6, 1, 2, 1, 10, 30, 14, 1, 6)).setObjects(("RFC1407-MIB", "dsx3LineStatusChange"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ds3NearEndOptionalTrapGroup = ds3NearEndOptionalTrapGroup.setStatus('current')
mibBuilder.exportSymbols("RFC1407-MIB", ds3Compliances=ds3Compliances, dsx3FarEndFacilityIDCode=dsx3FarEndFacilityIDCode, dsx3IntervalNumber=dsx3IntervalNumber, dsx3IntervalCSESs=dsx3IntervalCSESs, dsx3CurrentUASs=dsx3CurrentUASs, dsx3IntervalSEFSs=dsx3IntervalSEFSs, dsx3CurrentCESs=dsx3CurrentCESs, ds3Traps=ds3Traps, dsx3TotalPCVs=dsx3TotalPCVs, dsx3FarEndTotalCSESs=dsx3FarEndTotalCSESs, dsx3TotalPESs=dsx3TotalPESs, dsx3FarEndEquipCode=dsx3FarEndEquipCode, dsx3LineCoding=dsx3LineCoding, dsx3CurrentLCVs=dsx3CurrentLCVs, ds3Compliance=ds3Compliance, dsx3FracEntry=dsx3FracEntry, dsx3IntervalLCVs=dsx3IntervalLCVs, dsx3FarEndIntervalEntry=dsx3FarEndIntervalEntry, dsx3FracTable=dsx3FracTable, ds3NearEndConfigGroup=ds3NearEndConfigGroup, dsx3CurrentIndex=dsx3CurrentIndex, dsx3TotalCSESs=dsx3TotalCSESs, dsx3LineStatus=dsx3LineStatus, dsx3LineStatusChangeTrapEnable=dsx3LineStatusChangeTrapEnable, dsx3IntervalCCVs=dsx3IntervalCCVs, dsx3IntervalEntry=dsx3IntervalEntry, dsx3IntervalUASs=dsx3IntervalUASs, dsx3FarEndCurrentEntry=dsx3FarEndCurrentEntry, ds3NearEndStatisticsGroup=ds3NearEndStatisticsGroup, dsx3FarEndIntervalIndex=dsx3FarEndIntervalIndex, dsx3ValidIntervals=dsx3ValidIntervals, dsx3FracNumber=dsx3FracNumber, dsx3CurrentCCVs=dsx3CurrentCCVs, dsx3CurrentTable=dsx3CurrentTable, dsx3FarEndTimeElapsed=dsx3FarEndTimeElapsed, dsx3TotalPSESs=dsx3TotalPSESs, dsx3FarEndCurrentTable=dsx3FarEndCurrentTable, dsx3FarEndTotalIndex=dsx3FarEndTotalIndex, dsx3ConfigTable=dsx3ConfigTable, dsx3CurrentPCVs=dsx3CurrentPCVs, dsx3FarEndValidIntervals=dsx3FarEndValidIntervals, dsx3LineStatusChange=dsx3LineStatusChange, dsx3IntervalTable=dsx3IntervalTable, dsx3Ds1ForRemoteLoop=dsx3Ds1ForRemoteLoop, dsx3ConfigEntry=dsx3ConfigEntry, dsx3TotalIndex=dsx3TotalIndex, dsx3FarEndIntervalUASs=dsx3FarEndIntervalUASs, dsx3FarEndConfigEntry=dsx3FarEndConfigEntry, ds3FarEndGroup=ds3FarEndGroup, dsx3CurrentPSESs=dsx3CurrentPSESs, dsx3IntervalPESs=dsx3IntervalPESs, dsx3TotalCCVs=dsx3TotalCCVs, dsx3FarEndCurrentCCVs=dsx3FarEndCurrentCCVs, dsx3LineIndex=dsx3LineIndex, dsx3IntervalPSESs=dsx3IntervalPSESs, dsx3FarEndLocationIDCode=dsx3FarEndLocationIDCode, dsx3FarEndInvalidIntervals=dsx3FarEndInvalidIntervals, dsx3FarEndTotalCESs=dsx3FarEndTotalCESs, dsx3FarEndIntervalCSESs=dsx3FarEndIntervalCSESs, dsx3IntervalPCVs=dsx3IntervalPCVs, dsx3FarEndIntervalCESs=dsx3FarEndIntervalCESs, dsx3CurrentSEFSs=dsx3CurrentSEFSs, dsx3TotalTable=dsx3TotalTable, dsx3FarEndLineIndex=dsx3FarEndLineIndex, dsx3TransmitClockSource=dsx3TransmitClockSource, dsx3CurrentPESs=dsx3CurrentPESs, dsx3LineStatusLastChange=dsx3LineStatusLastChange, dsx3FarEndUnitCode=dsx3FarEndUnitCode, dsx3FarEndIntervalCCVs=dsx3FarEndIntervalCCVs, dsx3FarEndCurrentUASs=dsx3FarEndCurrentUASs, dsx3IntervalIndex=dsx3IntervalIndex, dsx3FarEndFrameIDCode=dsx3FarEndFrameIDCode, dsx3CurrentCSESs=dsx3CurrentCSESs, dsx3IntervalCESs=dsx3IntervalCESs, dsx3CircuitIdentifier=dsx3CircuitIdentifier, ds3Conformance=ds3Conformance, ds3DeprecatedGroup=ds3DeprecatedGroup, dsx3FarEndCurrentIndex=dsx3FarEndCurrentIndex, dsx3FarEndIntervalValidData=dsx3FarEndIntervalValidData, dsx3FarEndTotalEntry=dsx3FarEndTotalEntry, dsx3FracIfIndex=dsx3FracIfIndex, dsx3FarEndTotalCCVs=dsx3FarEndTotalCCVs, dsx3TotalEntry=dsx3TotalEntry, dsx3LoopbackConfig=dsx3LoopbackConfig, dsx3FracIndex=dsx3FracIndex, dsx3FarEndIntervalNumber=dsx3FarEndIntervalNumber, ds3Groups=ds3Groups, dsx3IntervalLESs=dsx3IntervalLESs, dsx3TotalCESs=dsx3TotalCESs, PYSNMP_MODULE_ID=ds3, dsx3FarEndCurrentCESs=dsx3FarEndCurrentCESs, dsx3Channelization=dsx3Channelization, dsx3CurrentEntry=dsx3CurrentEntry, dsx3FarEndCurrentCSESs=dsx3FarEndCurrentCSESs, dsx3IfIndex=dsx3IfIndex, dsx3FarEndTotalUASs=dsx3FarEndTotalUASs, dsx3SendCode=dsx3SendCode, dsx3TotalUASs=dsx3TotalUASs, dsx3TimeElapsed=dsx3TimeElapsed, dsx3InvalidIntervals=dsx3InvalidIntervals, dsx3LineType=dsx3LineType, dsx3FarEndConfigTable=dsx3FarEndConfigTable, dsx3LoopbackStatus=dsx3LoopbackStatus, dsx3FarEndIntervalTable=dsx3FarEndIntervalTable, ds3NearEndOptionalTrapGroup=ds3NearEndOptionalTrapGroup, ds3NearEndOptionalConfigGroup=ds3NearEndOptionalConfigGroup, dsx3FarEndTotalTable=dsx3FarEndTotalTable, dsx3IntervalValidData=dsx3IntervalValidData, dsx3LineLength=dsx3LineLength, dsx3TotalSEFSs=dsx3TotalSEFSs, dsx3TotalLESs=dsx3TotalLESs, ds3=ds3, dsx3TotalLCVs=dsx3TotalLCVs, dsx3CurrentLESs=dsx3CurrentLESs)
|
python
|
# coding: utf8
"""语音结果和指令比对"""
from aip import AipNlp
from nlp.configs import APP_ID, API_KEY, SECRET_KEY
def match(voice2text, command):
"""匹配指令"""
client = AipNlp(APP_ID, API_KEY, SECRET_KEY)
nlp_result_grnn = client.simnet(voice2text, command, {"model": "GRNN"})
if nlp_result_grnn.get("score", 0.5) >= 0.8:
return True
return False
if __name__ == "__main__":
print(match("关闭设备", "打开设备"))
|
python
|
from setuptools import setup
setup(name='Numpyextension',
version='1.1.1',
description='A library focused functions that only require numpy.',
url='https://www.github.com/pmp47/Numpyextension',
author='pmp47',
author_email='[email protected]',
license='MIT',
packages=['numpyextension'],
install_requires=['numpy==1.17.1'],
zip_safe=False,
include_package_data=True,
python_requires='>=3.6',
package_data={'': ['data/*.*']}
)
|
python
|
from cfg import *
import socket
import re
from commands import commands
def send_message(message):
s.send(bytes("PRIVMSG #" + NICK + " :" + message + "\r\n", "UTF-8"))
s = socket.socket()
s.connect((HOST, PORT))
s.send(bytes("PASS " + PASS + "\r\n", "UTF-8"))
s.send(bytes("NICK " + NICK + "\r\n", "UTF-8"))
s.send(bytes("JOIN #" + NICK + " \r\n", "UTF-8"))
send_message("Bot has just been turned on")
CHAT_MSG=re.compile(r"^:\w+!\w+@\w+\.tmi\.twitch\.tv PRIVMSG #\w+ :")
while True:
response = s.recv(1024).decode("utf-8")
if response == "PING :tmi.twitch.tv\r\n":
s.send("PONG :tmi.twitch.tv\r\n".encode("utf-8"))
else:
username = re.search(r"\w+", response).group(0)
message = CHAT_MSG.sub("", response)
print(response)
for command in commands:
if(message.strip() == command):
send_message(commands[command])
|
python
|
from db_facts.jinja_context import pull_jinja_context
import unittest
from unittest.mock import patch
from .mock_dbcli_config import mock_dbcli_config
@patch('db_facts.env_jinja_context.os')
class TestEnvJinjaContext(unittest.TestCase):
def test_with_env_set(self, mock_os):
config = {
'jinja_context_name': 'env',
}
context, filters = pull_jinja_context(['whatever', 'whatever'], config, mock_dbcli_config)
self.assertEqual(context['getenv'], mock_os.getenv)
|
python
|
# Generated by Django 3.1.2 on 2020-10-11 09:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('trainer', '0025_user_data_adaptivity'),
]
operations = [
migrations.AddField(
model_name='user',
name='gamification',
field=models.IntegerField(choices=[(0, 'Level-System'), (1, 'Level-System + individuelles Ranking'), (1, 'Level-System + Gruppen-Ranking'), (1, 'Bayes')], db_index=True, default=0),
),
migrations.AddField(
model_name='user',
name='gamification_group',
field=models.CharField(db_index=True, max_length=32, null=True),
),
]
|
python
|
#!/usr/bin/env python
"""
Gaussian Distribution based two-tail error data reduction data-processing pipeline for dataframes
"""
# imports
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
__author__ = "Rik Ghosh"
__copyright__ = "Copyright 2021, The University of Texas at Austin"
__credits__ = ["Soham Saha", "Larissa Franco"]
__license__ = "MIT"
__version__ = "1.0.2"
__maintainer__ = "Rik Ghosh"
__email__ = "[email protected]"
__status__ = "Production"
def chop_tails(data, param, bins=25, factor=1, verbose=0):
"""
Selects a certain parameter from a pandas DataFrame and creates a Matplotlib histogram for the given data. Then it fits a Gaussian Distribution to perform statistical error reduction\n
Args:
► `data` (pandas.core.frame.DataFrame): The pandas dataframe that contains the dataset\n
► `param` (str): An attribute present in the data parameter that will be processed in the algorithm\n
► `bins` (int, optional): Number of bins for the histogram. Defaults to `25`\n
► `factor` (float, optional): Scaling factor for standard deviation for Gaussian fitting. Must be a positive value. Defaults to `1`\n
► `verbose` (int, optional): Verbosity for the algorithm. Must be in the range [0,2]. Defaults to `0`\n
`0` → No histogram or debug statements, data returned post processeing\n
`1` → Histogram displayed and data returned post processing\n
`2` → Histogram displayed, debug statements printed, and data returned\n
Returns:\n
► data (pandas.core.frame.Dataframe): the same dataframe `data` after the required restrictions have been enforced via the Gaussian Distribution and `pthresh`\n
Raises:\n
`KeyError` → invalid parameter `param`
`ValueError` → `bins < 0`\n
`ValueError` → `factor < 0`\n
`ValueError` → `verbosity != 0` or `1` or `2`\n
"""
# scope constants
raw_array = None
acceptable_verbose = [0, 1, 2]
# prerequirement checks
try:
raw_array = np.array(data[param])
except KeyError:
print(f"The parameter {param} is not present in the dataframe. Please provide a valid parameter")
if bins < 1:
raise ValueError(f"Must have at least 1 bin.\nValue provided: {bins}")
if factor < 0.:
raise ValueError(f"factor must be a positive number.\nValue provided: {factor}")
if verbose not in acceptable_verbose:
raise ValueError(f"Verbosity can only be 0, 1, or 2.\nValue provided: {verbose}")
mean = np.mean(raw_array) # mean
std = np.std(raw_array) # standard deviation
if verbose > 0: # histogram and fitting
_, bins, _ = plt.hist(x=raw_array, bins=bins, density=True, histtype="step") # histogram
if verbose == 2:
print(f"\u03bc: {mean}, \u03c3: {std}")
p = stats.norm.pdf(bins, mean, std)
plt.plot(bins, p, color="crimson", linestyle="-", label="Gaussian Fit")
plt.title(f"{param.upper()} Standardized Distribution with Gaussian Fit")
plt.axvline(mean + factor * std, color="chocolate", linestyle="-.", label="Max Threshold")
plt.axvline(mean - factor * std, color="navy", linestyle="-.", label="Min Threshold")
plt.legend(loc="best")
plt.xlabel(param)
plt.ylabel("counts")
plt.show()
# only retain data within thresholds
data = data[(data[param] <= mean + factor * std) & (data[param] >= mean - factor * std)]
return data
|
python
|
"""
Module for version router
Contains functions and api endpoints for version listing
"""
import sys
from operator import attrgetter
from typing import List
from fastapi import APIRouter
from tracktor.error import ItemNotFoundException
from tracktor.models import VersionModel
router = APIRouter(prefix="/versions", tags=["version"])
def _get_versions() -> List[VersionModel]:
return [
VersionModel(
version=x,
changelog=attrgetter(f"{x}.__CHANGELOG__")(sys.modules["tracktor.routers"]),
)
for x in dir(sys.modules[__package__])
if x.startswith("v") and x != "version" and x != "version_router"
]
@router.get("/", response_model=List[VersionModel])
async def list_versions():
"""
Request to list all versions
"""
return _get_versions()
@router.get("/latest")
async def latest_version():
"""
Request to return the latest version
"""
try:
return sorted([x.version for x in _get_versions()])[0]
except IndexError as err:
raise ItemNotFoundException from err
|
python
|
import argon2
import b64
from typing import Optional, Dict, Union, List
from securerandom import rand_bytes
from kdfs.kdf import Kdf
class Argon2Kdf(Kdf):
@staticmethod
def sensitive():
return Argon2Kdf(12, 2 * 1024 * 1024, 8, argon2.Type.ID, rand_bytes(32))
@staticmethod
def fast():
return Argon2Kdf(2, 256 * 1024, 2, argon2.Type.ID, rand_bytes(16))
@staticmethod
def type_to_str(type: argon2.Type):
return {
argon2.Type.I: "argon2i",
argon2.Type.D: "argon2d",
argon2.Type.ID: "argon2id"
}[type]
@staticmethod
def str_to_type(type: str):
try:
return {
"argon2i": argon2.Type.I,
"argon2d": argon2.Type.D,
"argon2id": argon2.Type.ID
}[type]
except KeyError:
raise ValueError(f"Type must be one of ['argon2i', 'argon2d', 'argon2id'] (was '{type}')")
@staticmethod
def __type_to_int(type: argon2.Type):
return {
argon2.Type.I: 0,
argon2.Type.D: 1,
argon2.Type.ID: 2
}[type]
@staticmethod
def __int_to_type(type: int):
try:
return {
0: argon2.Type.I,
1: argon2.Type.D,
2: argon2.Type.ID
}[type]
except KeyError:
raise ValueError(f"The Argon2 type must be 0x0, 0x1, or 0x2 (was {hex(type)})")
@staticmethod
def deserialize(props: Dict[str, Union[str, int, bool, None, Dict, List]]) -> "Argon2Kdf":
ret = Argon2Kdf.sensitive()
base_keys = set(ret.serialize().keys())
if not base_keys.issubset(props.keys()):
raise ValueError(f"The properties dict is missing required keys {base_keys - props.keys()}")
ret.type = Argon2Kdf.str_to_type(props["algorithm"])
ret.version = props["version"]
ret.time_cost = props["time_cost"]
ret.memory_cost = props["memory_cost"]
ret.parallelism = props["parallelism"]
ret.salt = b64.decode(props["salt"])
return ret
def serialize(self) -> Dict[str, Union[str, int, bool, None, Dict, List]]:
return {
"algorithm": self.type_to_str(self.type),
"version": self.version,
"time_cost": self.time_cost,
"memory_cost": self.memory_cost,
"parallelism": self.parallelism,
"salt": b64.encode(self.salt),
}
def derive(self, password: str, out_len: int) -> bytes:
return argon2.low_level.hash_secret_raw(bytes(password, "utf-8"), self.salt, self.time_cost, self.memory_cost,
self.parallelism, out_len, self.type, self.version)
def __init__(self, time_cost: int, memory_cost: int, parallelism: int, type: argon2.Type,
salt: Optional[bytes] = None, version: int = argon2.low_level.ARGON2_VERSION):
self.time_cost = time_cost
self.memory_cost = memory_cost
self.parallelism = parallelism
self.type = type
self.salt = salt if salt is not None else rand_bytes(32)
self.version = version
|
python
|
#!/usr/bin/env python
from __future__ import print_function
import subprocess as sp
import argparse
import os
import re
def get_paths(root, extension):
# scan directories
paths = []
for root, dirs, files in os.walk(root):
for name in files:
if name.endswith(extension):
paths.append(os.path.join(root, name))
paths.sort()
return paths
def print_title(title):
print('# '+'-'*77+'\n'+'# '+title+'\n'+'# '+'-'*77)
def dependencies(root, src_ext = '.cc'):
print_title('find dependencies')
# get paths
paths = get_paths(root, src_ext)
total_deps = 0
total_size = 0
for path in paths:
# run gcc and get dependeny data
command = 'g++ {0} -I{1} -MM'.format(path, root)
output = sp.Popen([command], shell=True, stdout=sp.PIPE).communicate()[0]
deps = len(output.replace('\\', '').split())-1
size = os.path.getsize(path)
# code analysis
print('{0:<60}: {1:6d}b {2:4d} deps'.format(path, size, deps))
total_deps += deps
total_size += size
print('Total {0:48d} files: {1:6d}b {2:4d} deps'.format(len(paths), total_size, total_deps))
def trailing_whitespaces(root, extensions):
print_title('check for trailing whitespaces')
paths = []
for ext in extensions:
paths += get_paths(root, ext)
paths.sort()
num = 0
for path in paths:
ifile = open(path, 'r')
lines = ifile.readlines()
ifile.close()
for linenum, line in enumerate(lines):
line = line[:-1] # strip newline
if line and line[-1].isspace():
print('{0:<60}\tline {1:3d}'.format(path, linenum+1))
num += 1
print('Total trailing whitespaces: {0:5d}'.format(num))
def get_functions(path):
class_name = ""
with open(path, 'r') as f:
m = re.search('class\s+(\w+)\s*[{|:]', f.read())
if m != None:
class_name = m.group(1)
print('file: {0}: class {1}'.format(path, class_name))
else:
print('file: {0}: could not find class name'.format(path))
# run ctags to get functions/prototype names
command = 'ctags -x --c++-kinds=pf --language-force=c++ --sort=no {0}'.format(path)
output = sp.Popen([command], shell=True, stdout=sp.PIPE).communicate()[0]
lines = [i for i in output.split('\n') if i]
functions = []
for i in lines:
m = re.search(r'(.*)\s+(function|prototype)\s+([0-9]+)\s+(.*?)\s+(.*)', i)
if m == None:
print('error while reading regex on ' + i)
functions.append(m.groups())
return class_name, functions
def lowercase_names(root, extensions):
print_title('check if function names starts with lowercase')
paths = []
# for given extensions, e.g. ['.cc', '.h']
for ext in extensions:
paths += get_paths(root, ext)
paths.sort()
lower = 0
total = 0
for path in paths:
class_name, functions = get_functions(path)
total += len(functions)
for function in functions:
name = function[0]
if name.find('operator') >= 0:
total -= 1
elif name.find('get_') >= 0:
total -= 1
elif name.find('set_') >= 0:
total -= 1
elif name[0].islower():
lower += 1
print('\t'+name)
perc = float(lower)/float(total)*100.
print('Functions : {0:6d}\nLowercase : {1:6d}\nPercentage : {2:6.2f}%'.format(total, lower, perc))
def grep(root, extensions, pattern):
print_title('grep pattern')
paths = []
for ext in extensions:
paths += get_paths(root, ext)
paths.sort()
total = 0
for path in paths:
with open(path, 'r') as f:
num = 0
print_path = False
for line in f.readlines():
m = re.search(pattern, line)
if m != None:
total += 1
if not print_path:
print('{0}'.format(path))
print_path = True
print(' {0}'.format(line.strip()))
print('Found {0:5d} occurencies of {1}'.format(total, pattern))
def self_contained_headers(root, extensions, flags):
print_title('check if header files are self contained')
paths = []
for ext in extensions:
paths += get_paths(root, ext)
paths.sort()
num = 0
for path in paths:
num += 1
f = open('tmp.cc', 'w')
f.write('#include \"{0}\"\n'.format(path))
f.close()
print('checking: {0}'.format(path))
command = 'g++ {0} -c tmp.cc'.format(flags)
output = sp.Popen([command], shell=True, stdout=sp.PIPE).communicate()[0]
os.remove('tmp.cc')
os.remove('tmp.o')
def unit_tests(root):
print_title('create unit tests')
cc_paths = get_paths(root, '.cc')
h_paths = get_paths(root, '.h')
num = 0
for header_file in h_paths:
source_file = os.path.splitext(header_file)[0]+'.cc'
test_file = os.path.splitext(header_file)[0]+'_test.cc'
print(header_file)
if test_file in cc_paths:
print('\t{0:<64} exists'.format(test_file))
continue
if source_file.endswith('_test.cc'):
continue
num +=1
class_name, functions = get_functions(header_file)
s = "// automatic generated unit test file \n"
s += '#include <gtest/gtest.h>\n\n'
s += 'class {0}Test: public testing::Test {{\n'.format(class_name)
s += ' protected:\n'
# s += '\t{0} {1}();\n'.format(class_name,class_name.lower())
s += '\tvirtual void SetUp() {\n\t}\n'
s += '\tvirtual void TearDown() {\n\t}\n'
s += '};\n\n'
for function in functions:
name, kind, line_number, file_name, first_line = function
s += '// test: {0}\n'.format(first_line)
s += 'TEST_F({0}Test, {1}) {{\n}}\n\n'.format(class_name, name.strip())
ofile = open('msc/tests/'+os.path.split(test_file)[1], 'w')
ofile.write(s)
ofile.close()
print('Remaining tests : {0}'.format(num))
if __name__ == '__main__':
flags = '-Isrc -I/usr/include -I/usr/include/python2.6 -I/opt/acml4.4.0/gfortran32_mp/include -Wno-deprecated'
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--deps', action = 'store_true',
dest = 'deps', help = 'grep pattern')
parser.add_argument('--lowercase', action = 'store_true',
dest = 'fn', help = 'functions lowercase')
parser.add_argument('-w', '--whitespaces', action = 'store_true',
dest = 'w', help = 'trailing whitespaces')
parser.add_argument('-g', '--grep', default = None,
metavar = 'pattern', dest = 'greppat', help = 'grep pattern')
parser.add_argument('-s', '--self-cont', action = 'store_true',
dest = 'sc', help = 'self contained headers')
parser.add_argument('-c', '--cxx-ext', default = '.cc',
metavar = 'src_ext', dest = 'src_ext', help = 'source file extension')
parser.add_argument('-i', '--inc-ext', default = '.h',
metavar = 'inc_ext', dest = 'inc_ext', help = 'header file extension')
parser.add_argument('-r', '--root', default = 'src',
metavar = 'src', dest = 'src', help = 'root directory')
parser.add_argument('-f', '--flags', default = flags,
metavar = 'flags', dest = 'flags', help = 'compilation flags')
parser.add_argument('--unit-tests', action = 'store_true',
dest = 'unit_tests', help = 'create unit tests')
args = parser.parse_args()
src = args.src
src_ext = args.src_ext
inc_ext = args.inc_ext
if args.deps:
dependencies(args.src)
if args.w:
trailing_whitespaces(args.src, [src_ext, inc_ext])
if args.fn:
lowercase_names(args.src, [inc_ext])
if args.greppat != None:
grep(args.src, [src_ext, inc_ext], args.greppat)
if args.sc:
self_contained_headers(args.src, [inc_ext], args.flags)
if args.unit_tests:
unit_tests(args.src)
|
python
|
from class_names import CLASS_NAMES
import json
import numpy as np
import scipy
import re
FEATURES = [
'calls_to_batchCancelOrders',
'calls_to_batchFillOrKillOrders',
'calls_to_batchFillOrders',
'calls_to_batchFillOrdersNoThrow',
'calls_to_cancelOrder',
'calls_to_cancelOrdersUpTo',
# 'calls_to_cancelled',
# 'calls_to_executeTransaction',
'calls_to_fillOrKillOrder',
'calls_to_fillOrder',
'calls_to_fillOrderNoThrow',
# 'calls_to_filled',
# 'calls_to_getAssetProxy',
# 'calls_to_getOrderInfo',
# 'calls_to_isValidSignature',
'calls_to_marketBuyOrders',
'calls_to_marketBuyOrdersNoThrow',
'calls_to_marketBuyOrdersWithEth',
'calls_to_marketSellOrders',
'calls_to_marketSellOrdersNoThrow',
'calls_to_marketSellOrdersWithEth',
'calls_to_matchOrders',
# 'calls_to_orderEpoch',
'calls_to_preSign',
# 'calls_to_registerAssetProxy',
# 'calls_to_transferOwnership',
'calls_to_tx_batchFillOrders',
'calls_to_tx_cancelOrder',
'calls_to_tx_fillOrKillOrder',
'calls_to_tx_fillOrder',
# 'max_calls',
# 'total_calls',
# 'total_fills',
# 'total_orders',
# 'unique_fee_recipients',
# 'unique_makers',
'unique_senders'
]
def softsign(x):
return x / (1 + abs(x))
def parse_cluster_data_item(data):
total_method_calls = sum(data['methods'].values())
max_method_calls = max(data['methods'].values())
total_senders = len([ a for a in data['senders'] if a != data['caller'] ])
total_fee_recipients = len(data['feeRecipients'])
total_makers = len(data['makers'])
return {
'caller': data['caller'],
'unique_senders': total_senders,
'unique_fee_recipients': total_fee_recipients,
'unique_makers': total_makers,
'total_calls': total_method_calls,
'total_orders': data['updateCount'],
'total_fills': data['fillCount'],
'max_calls': max_method_calls,
# Calls are encoded as proportions of the total calls.
**{ 'calls_to_%s' % k: v / total_method_calls for (k, v) in data['methods'].items() },
}
def find_features(call_data):
features = set()
for data_item in call_data:
features.update(data_item.keys())
return tuple(sorted(features))
def to_features(data_item):
fields = {
**data_item,
'unique_senders': softsign(data_item['unique_senders']),
'unique_fee_recipients': softsign(data_item['unique_fee_recipients']),
'unique_makers': softsign(data_item['unique_makers']),
}
return np.array([
fields[feature] if feature in fields else 0. for feature in FEATURES
])
def to_weight(data_item):
return max(1, data_item['total_orders'], data_item['unique_senders'])
def load_cluster_data(file):
with open(file) as f:
return [ parse_cluster_data_item(json.loads(line)) for line in f ]
def label_to_classs_name(label):
name = CLASS_NAMES[label % len(CLASS_NAMES)] if label >= 0 else 'WILDLINGS'
if label >= len(CLASS_NAMES):
name = '%s_%d' % (name, label // len(CLASS_NAMES))
return name
def split_by_labels(call_data, labels, numeric=False):
return {
label if numeric else label_to_classs_name(label) : [
d
for (d, dl)
in zip(call_data, labels)
if dl == label
]
for label in labels
}
# Attenuates feature columns by normal distribution.
def attenuate_values(values, factor=1):
mean = np.mean(values)
std = np.std(values)
if std > 0:
return [
n * ((1 - factor) + factor * scipy.stats.norm.pdf((n - mean) / std))
for n in values
]
return values
# Intelligently collapse a cluster's features into a single row.
def collapse_features(cluster, weights, attenuate=0, brighten=0):
cols = list(zip(*cluster))
if attenuate > 0:
for i, col in enumerate(cols):
cols[i] = attenuate_values(col, attenuate)
call_features = frozenset(f for f in FEATURES if f.startswith('calls_to_'))
calls_sum_max = sum(
np.max(np.array(col) * weights)
for (col, f) in zip(cols, FEATURES)
if f in call_features
)
def collapse_column(col, feature):
if feature == 'unique_senders':
return max(col)
if feature == 'total_orders':
return sum(col)
if feature == 'total_fills':
return sum(col)
if feature.startswith('calls_to_'):
if np.max(col) > 0:
v = np.max([ n * w for (n, w) in zip(col, weights) ])
v = (v / calls_sum_max) ** (1 - brighten)
return v
return 0
return sum([ n * w for (n, w) in zip(col, weights) ]) / sum(weights)
return np.array([
collapse_column(col, feature)
for (col, feature)
in zip(cols, FEATURES)
])
# Intelligently collapse all clusters.
def collapse_clusters(call_data, labels, attenuate=0, brighten=0):
return np.array([
collapse_features(
[
to_features(row)
for (row_label, row) in zip(labels, call_data)
if row_label == label
],
[
to_weight(row)
for (row_label, row) in zip(labels, call_data)
if row_label == label
],
attenuate=attenuate,
brighten=brighten,
)
for label in sorted(frozenset(labels))
])
|
python
|
import argparse
import base64
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
def get_args():
"""
Collect command-line arguments and return the argument values
:return: A set of three values: image file name (and path), the operation mode, and the max number of results
"""
parser = argparse.ArgumentParser(description='Call the Google Vision API to perform image analysis')
# Add arguments
parser.add_argument('-i', '--image', type=str, help='image file name', required=True)
parser.add_argument('-m', '--mode', type=str,
help='analysis mode: all, faces, landmark, labels, logos, text, or web', required=True)
parser.add_argument('-r', '--results', type=int, help='max number of results (default is 5)', default=5)
# Array for all arguments passed to script
args = parser.parse_args()
return args.image, args.mode, args.results
def request_labels(photo_file, max_results=5):
"""
Request the Google service to analyze an image and return labels (i.e. tags identifying objects in an image)
:param photo_file: The filename (or path) of the image in a local directory
:param max_results: The requested maximum number of results
:return: A list of tuples where each tuple includes a label and a confidence score. The list contains up to
max_results number of elements
"""
credentials = GoogleCredentials.get_application_default()
service = discovery.build('vision', 'v1', credentials=credentials)
with open(photo_file, 'rb') as phf:
image_content = base64.b64encode(phf.read())
service_request = service.images().annotate(body={
'requests': [{'image': {'content': image_content.decode('UTF-8')},
'features': [{'type': 'LABEL_DETECTION', 'maxResults': max_results}]
}]
})
response = service_request.execute()
try:
label_list = response['responses'][0]['labelAnnotations']
labels = map(lambda s: (s.get('description', 'no_description'),
s.get('score', 'no_score')), label_list)
return labels
except KeyError:
return []
def request_text(photo_file, max_results=5):
"""
Request the Google service to find text in an image
:param photo_file: The filename (or path) of the image in a local directory
:param max_results: The requested maximum number of results
:return: A list of text entries found in the image
Note: The argument max_results does not modify the number of results for text detection
"""
credentials = GoogleCredentials.get_application_default()
service = discovery.build('vision', 'v1', credentials=credentials)
with open(photo_file, 'rb') as phf:
image_content = base64.b64encode(phf.read())
service_request = service.images().annotate(body={
'requests': [{'image': {'content': image_content.decode('UTF-8')},
'features': [{'type': 'TEXT_DETECTION', 'maxResults': max_results}]
}]
})
response = service_request.execute()
text_list = response['responses'][0].get('textAnnotations', None)
if text_list is None:
return []
else:
text_vec = map(lambda s: s['description'].strip().strip('\n'), text_list)
return text_vec
def request_faces(photo_file, max_results=5):
"""
Request the Google service to find faces in an image
:param photo_file: The filename (or path) of the image in a local directory
:param max_results: The requested maximum number of results
:return: A list of JSON objects where each object describes a face. The JSON object includes the following
elements:
box: A list of four tuples describing the box coordinates for a face in the picture
score: A confidence score for face detection
joy: One of the variables in sentiment analysis of the face
sorrow: One of the variables in sentiment analysis of the face
anger: One of the variables in sentiment analysis of the face
surprise: One of the variables in sentiment analysis of the face
"""
credentials = GoogleCredentials.get_application_default()
service = discovery.build('vision', 'v1', credentials=credentials)
with open(photo_file, 'rb') as phf:
image_content = base64.b64encode(phf.read())
service_request = service.images().annotate(body={
'requests': [{'image': {'content': image_content.decode('UTF-8')},
'features': [{'type': 'FACE_DETECTION', 'maxResults': max_results}]
}]
})
response = service_request.execute()
faces_list = response['responses'][0].get('faceAnnotations', None)
if faces_list is None:
return []
else:
face_features = []
for face in faces_list:
score = face["detectionConfidence"]
joy = face["joyLikelihood"]
sorrow = face["sorrowLikelihood"]
surprise = face["surpriseLikelihood"]
anger = face["angerLikelihood"]
vertices = face["boundingPoly"]["vertices"]
vert_list = map(lambda el: (el['x'], el['y']), vertices)
face_obj = {'score': score,
'joy': joy, 'sorrow': sorrow, 'surprise': surprise,
'anger': anger, 'box': vert_list}
face_features.append(face_obj)
return face_features
def request_logos(photo_file, max_results=5):
"""
Request the Google service to detect the presence of logos in an image
:param photo_file: The filename (or path) of the image in a local directory
:param max_results: The requested maximum number of results
:return: A list of tuples where each tuple has text identifying the detected logo and a confidence score
"""
credentials = GoogleCredentials.get_application_default()
service = discovery.build('vision', 'v1', credentials=credentials)
with open(photo_file, 'rb') as phf:
image_content = base64.b64encode(phf.read())
service_request = service.images().annotate(body={
'requests': [{'image': {'content': image_content.decode('UTF-8')},
'features': [{'type': 'LOGO_DETECTION', 'maxResults': max_results}]
}]
})
response = service_request.execute()
logo_list = response['responses'][0].get('logoAnnotations', None)
if logo_list is None:
return []
else:
logo_features = map(lambda s: (s.get("description", "no_description"),
s.get("score", 'no_score')), logo_list)
return logo_features
def request_landmarks(photo_file, max_results=5):
"""
Request the Google service to detect the presence of landmarks in an image
:param photo_file: The filename (or path) of the image in a local directory
:param max_results: The requested maximum number of results
:return: A list of tuples where each tuple has text identifying the detected landmark and a confidence score
"""
credentials = GoogleCredentials.get_application_default()
service = discovery.build('vision', 'v1', credentials=credentials)
with open(photo_file, 'rb') as phf:
image_content = base64.b64encode(phf.read())
service_request = service.images().annotate(body={
'requests': [{'image': {'content': image_content.decode('UTF-8')},
'features': [{'type': 'LANDMARK_DETECTION', 'maxResults': max_results}]
}]
})
response = service_request.execute()
landmark_list = response['responses'][0].get('landmarkAnnotations', None)
if landmark_list is None:
return []
else:
landmarks = map(lambda s: (s.get("description", 'no_description'),
s.get("score", 'no_score')), landmark_list)
return landmarks
def request_web_entities(photo_file, max_results=5):
"""
Request the Google service to detect related web entities and web pages
:param photo_file: The filename (or path) of the image in a local directory
:param max_results: The requested maximum number of results
:return: This function returns two lists. The first is a list of web entities. The second is a list of related
web pages. Each list is a collection of tuples. For web entities, each tuple includes the entity description,
the entity mid, and the entity score. For web pages, each tuple includes the page url and the score.
"""
credentials = GoogleCredentials.get_application_default()
service = discovery.build('vision', 'v1', credentials=credentials)
with open(photo_file, 'rb') as phf:
image_content = base64.b64encode(phf.read())
service_request = service.images().annotate(body={
'requests': [{'image': {'content': image_content.decode('UTF-8')},
'features': [{'type': 'WEB_DETECTION', 'maxResults': max_results}]
}]
})
response = service_request.execute()
web_detection = response['responses'][0].get('webDetection', None)
if web_detection is None:
return [], []
else:
web_entities = web_detection.get('webEntities', None)
web_pages = web_detection.get('pagesWithMatchingImages', None)
# Add s.get('entityId', 'no_entityId') in order to see the mid for these entities
if web_entities is not None:
entities = map(lambda s: (s.get('description', 'no_description'),
s.get('score', 'no_score')), web_entities)
else:
entities = []
if web_pages is not None:
pages = map(lambda s: (s.get('url', 'no_url'), s.get('score', 'no_score')), web_pages)
else:
pages = []
return entities, pages
if __name__ == "__main__":
# Get image file name and operation mode
image, mode, mxres = get_args()
if mode in ['labels', 'all']:
results = request_labels(image, max_results=mxres)
print "\n-------- labels -----------------"
print "Number of labels: ", len(results)
for i, res in enumerate(results):
print str(i+1) + ") " + str(res)
if mode in ['text', 'all']:
results = request_text(image, max_results=mxres)
print "\n-------- text -----------------"
print "Number of text items: ", len(results)
for i, res in enumerate(results):
print str(i+1) + ") " + str(res)
if mode in ['faces', 'all']:
results = request_faces(image, max_results=mxres)
print "\n--------- faces ---------------"
print "Number of faces: ", len(results)
for i, res in enumerate(results):
print "\nface " + str(i+1) + ": "
print res
if mode in ['logos', 'all']:
results = request_logos(image, max_results=mxres)
print "\n--------- logos ---------------"
print "Number of logos: ", len(results)
for i, res in enumerate(results):
print str(i+1) + ") " + str(res)
if mode in ['landmarks', 'all']:
results = request_landmarks(image, max_results=mxres)
print "\n--------- landmarks ---------------"
print "Number of landmarks: ", len(results)
for i, res in enumerate(results):
print str(i+1) + ") " + str(res)
if mode in ['web', 'all']:
results = request_web_entities(image, max_results=mxres)
print "\n--------- web entities and pages ---------------"
print 'Number of web entities: ', len(results[0])
for i, res in enumerate(results[0]):
print str(i+1) + ") " + str(res)
print "\nNumber of web pages: ", len(results[1])
for i, res in enumerate(results[1]):
print str(i+1) + ") " + str(res)
|
python
|
"""Data is typically multi-dimensional. :class:`~smif.metadata.spec.Spec` is used to describe
each dataset which is supplied to - or output from - each :class:`~smif.model.model.Model` in a
:class:`~smif.model.model.CompositeModel`
"""
from collections import defaultdict
from smif.metadata.coordinates import Coordinates
class Spec(object):
"""N-dimensional metadata.
Spec labels each dimension with coordinates and enforces data type, units and absolute and
expected ranges.
The API here is modelled on :class:`xarray.DataArray`: dtype and shape describe a
:class:`numpy.ndarray`; dims and coords follow the xarray conventions for labelled axes;
and unit, abs_range and exp_range are introduced as supplementary metadata to
help validate connections between models.
Attributes
----------
name : str
The name of the data that this spec describes
description : str
A human-friendly description
dtype : str
Data type for data values
abs_range : tuple
Absolute range of data values
exp_range : tuple
Expected range of data values
shape : tuple[int]
Tuple of dimension sizes
ndim : int
Number of dimensions
dims : list[str]
Dimension names
coords : list[Coordinates]
Dimension coordinate labels
unit : str
Units of data values
Parameters
----------
name : str, optional
Name to identifiy the variable described (typically an input, output or parameter)
description : str, optional
Short description
dims : list[str], optional
List of dimension names, must be provided if coords is a dict
coords : list[Coordinates] or dict[str, list], optional
A list of :class`Coordinates` or a dict mapping each dimension name to a list of names
which label that dimension.
dtype : str
String suitable for contructing a simple :class:`numpy.dtype`
abs_range : tuple, optional
(min, max) absolute range for numeric values - can be used to raise errors
exp_range : tuple, optional
(min, max) expected range for numeric values - can be used to raise warnings
unit : str, optional
Unit to be used for data values
"""
def __init__(self, name=None, dims=None, coords=None, dtype=None,
abs_range=None, exp_range=None, unit=None, description=None):
self._name = name
self._description = description
# Coords may come as a dict, in which case dims must be provided to define order
if isinstance(coords, dict):
try:
coords, dims = self._coords_from_dict(coords, dims)
except (ValueError, KeyError) as error:
msg = "Coordinate metadata incorrectly formatted for variable '{}': {}"
raise ValueError(msg.format(self.name, error))
# Or as a list of Coordinates, in which case dims must not be provided
elif isinstance(coords, list):
coords, dims = self._coords_from_list(coords, dims)
# Or if None, this spec describes a zero-dimensional parameter - single value
else:
coords, dims = [], []
self._dims = dims
self._coords = coords
if dtype is None:
raise ValueError("Spec.dtype must be provided, in {}".format(self._name))
self._dtype = dtype
if abs_range is not None:
self._check_range(abs_range)
self._abs_range = abs_range
if exp_range is not None:
self._check_range(exp_range)
self._exp_range = exp_range
self._unit = unit
def _coords_from_list(self, coords, dims):
"""Set up coords and dims, checking for consistency
"""
for coord in coords:
if not isinstance(coord, Coordinates):
msg = "Spec.coords may be a dict[str,list] or a list[Coordinates], in {}"
raise ValueError(msg.format(self._name))
if dims is not None:
msg = "Spec.dims are derived from Spec.coords if provided as a list of " + \
"Coordinates, in {}"
raise ValueError(msg.format(self._name))
dims = [coord.dim for coord in coords]
if len(dims) != len(set(dims)):
msg = "Spec cannot be created with duplicate dims, in {}"
raise ValueError(msg.format(self._name))
return coords, dims
def _coords_from_dict(self, coords, dims):
"""Set up coords and dims, checking for consistency
"""
if dims is None:
msg = "Spec.dims must be specified if coords are provided as a dict, in {}"
raise ValueError(msg.format(self._name))
if len(dims) != len(set(dims)):
msg = "Spec cannot be created with duplicate dims, in {}"
raise ValueError(msg.format(self._name))
if sorted(dims) != sorted(coords.keys()):
msg = "Spec.dims must match the keys in coords, in {}"
raise ValueError(msg.format(self._name))
coords = [Coordinates(dim, coords[dim]) for dim in dims]
return coords, dims
@classmethod
def from_dict(cls, data_provided):
"""Create a Spec from a dict representation
"""
# default anything to None, let constructor handle essential missing values
data = defaultdict(lambda: None)
data.update(data_provided)
spec = Spec(
name=data['name'],
description=data['description'],
dims=data['dims'],
coords=data['coords'],
dtype=data['dtype'],
abs_range=data['abs_range'],
exp_range=data['exp_range'],
unit=data['unit']
)
return spec
def as_dict(self):
"""Serialise to dict representation
"""
return {
'name': self.name,
'description': self.description,
'dims': self._dims,
'coords': {c.name: c.ids for c in self._coords},
'dtype': self._dtype,
'abs_range': self._abs_range,
'exp_range': self._exp_range,
'unit': self._unit
}
@property
def name(self):
"""The name of the data that this spec describes.
"""
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def description(self):
"""A human-friendly description
"""
return self._description
@property
def dtype(self):
"""The dtype of the data that this spec describes.
"""
return self._dtype
@property
def abs_range(self):
"""The absolute range of data values that this spec describes.
"""
return self._abs_range
@property
def exp_range(self):
"""The expected range of data values that this spec describes.
"""
return self._exp_range
@property
def shape(self):
"""Tuple of dimension sizes. The shape of the data that this spec describes.
"""
return tuple(len(c.ids) for c in self._coords)
@property
def ndim(self):
"""The number of dimensions of the data that this spec describes.
"""
return len(self._coords)
@property
def dims(self):
"""Names for each dimension
"""
return list(self._dims)
@property
def coords(self):
"""Coordinate labels for each dimension.
"""
return list(self._coords)
def dim_coords(self, dim: str):
"""Coordinates for a given dimension
"""
if not isinstance(dim, str):
msg = "Expected string as argument, instead received {}"
raise TypeError(msg.format(type(dim)))
if dim not in self.dims:
raise KeyError("Could not find dim '{}' in Spec '{}'".format(dim, self._name))
for coord in self._coords:
if coord.dim == dim:
return coord
raise KeyError("Coords not found for dim '{}', in Spec '{}'".format(dim, self._name))
def dim_names(self, dim: str):
"""Names of each coordinate in a given dimension
"""
return self.dim_coords(dim).names
def dim_elements(self, dim: str):
"""Elements of each coordinate in a given dimension
"""
return self.dim_coords(dim).elements
@property
def unit(self):
"""The unit for all data points.
"""
return self._unit
def __eq__(self, other):
return self.dtype == other.dtype \
and self.dims == other.dims \
and self.coords == other.coords \
and self.unit == other.unit
def __hash__(self):
return hash((
self.dtype,
tuple(self.dims),
tuple(self.coords),
self.unit
))
def __repr__(self):
return "<Spec name='{}' dims='{}' unit='{}'>".format(self.name, self.dims, self.unit)
def _check_range(self, range_):
"""Error if range is not a [min, max] list or tuple
"""
if not _is_sequence(range_):
msg = "Spec range must be a list or tuple, got {} for {}"
raise TypeError(msg.format(range_, self._name))
if len(range_) != 2:
msg = "Spec range must have min and max values only, got {} for {}"
raise ValueError(msg.format(range_, self._name))
min_, max_ = range_
if max_ < min_:
msg = "Spec range min value must be smaller than max value, got {} for {}"
raise ValueError(msg.format(range_, self._name))
def _is_sequence(obj):
"""Check for iterable object that is not a string ('strip' is a method on str)
"""
return not hasattr(obj, "strip") \
and (hasattr(obj, "__getitem__") or hasattr(obj, "__iter__"))
|
python
|
#!/usr/bin/env python
# Copyright 2021 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Analytics Data API sample application demonstrating the use of
pagination to retrieve large result sets.
See https://developers.google.com/analytics/devguides/reporting/data/v1/rest/v1beta/properties/runReport#body.request_body.FIELDS.offset
for more information.
"""
# [START analyticsdata_run_report_with_pagination]
from google.analytics.data_v1beta import BetaAnalyticsDataClient
from google.analytics.data_v1beta.types import DateRange
from google.analytics.data_v1beta.types import Dimension
from google.analytics.data_v1beta.types import Metric
from google.analytics.data_v1beta.types import RunReportRequest
from run_report import print_run_report_response
def run_sample():
"""Runs the sample."""
# TODO(developer): Replace this variable with your Google Analytics 4
# property ID before running the sample.
property_id = "YOUR-GA4-PROPERTY-ID"
run_report_with_pagination(property_id)
def run_report_with_pagination(property_id="YOUR-GA4-PROPERTY-ID"):
"""Runs a report several times, each time retrieving a portion of result
using pagination."""
client = BetaAnalyticsDataClient()
# [START analyticsdata_run_report_with_pagination_page1]
request = RunReportRequest(
property=f"properties/{property_id}",
date_ranges=[DateRange(start_date="365daysAgo", end_date="yesterday")],
dimensions=[
Dimension(name="firstUserSource"),
Dimension(name="firstUserMedium"),
Dimension(name="firstUserCampaignName"),
],
metrics=[
Metric(name="sessions"),
Metric(name="conversions"),
Metric(name="totalRevenue"),
],
limit=100000,
offset=0,
)
response = client.run_report(request)
# [END analyticsdata_run_report_with_pagination_page1]
print_run_report_response(response)
# Run the same report with a different offset value to retrieve the second
# page of a response.
# [START analyticsdata_run_report_with_pagination_page2]
request = RunReportRequest(
property=f"properties/{property_id}",
date_ranges=[DateRange(start_date="365daysAgo", end_date="yesterday")],
dimensions=[
Dimension(name="firstUserSource"),
Dimension(name="firstUserMedium"),
Dimension(name="firstUserCampaignName"),
],
metrics=[
Metric(name="sessions"),
Metric(name="conversions"),
Metric(name="totalRevenue"),
],
limit=100000,
offset=100000,
)
response = client.run_report(request)
# [END analyticsdata_run_report_with_pagination_page2]
print_run_report_response(response)
# [END analyticsdata_run_report_with_pagination]
if __name__ == "__main__":
run_sample()
|
python
|
""" Transport for communicating with the LaunchKey API of HTTP"""
import requests
from .. import LAUNCHKEY_PRODUCTION
from .base import APIResponse, APIErrorResponse
class RequestsTransport(object):
"""
Transport class for performing HTTP based queries using the requests
library.
"""
url = LAUNCHKEY_PRODUCTION
testing = False
verify_ssl = True
def set_url(self, url, testing):
"""
:param url: Base url for the querying LaunchKey API
:param testing: Boolean stating whether testing mode is being
performed. This will determine whether SSL should be verified.
"""
self.url = url
self.testing = testing
self.verify_ssl = not self.testing
@staticmethod
def _parse_response(response):
try:
data = response.json()
except ValueError:
data = response.text
try:
response.raise_for_status()
parsed_response = APIResponse(data, response.headers,
response.status_code,
raw_data=response.text)
except requests.exceptions.HTTPError:
if response.status_code < 500:
parsed_response = APIErrorResponse(data, response.headers,
response.status_code,
response.reason,
response.text)
else:
raise
return parsed_response
def get(self, path, headers=None, data=None):
"""
Performs an HTTP GET request against the LaunchKey API
:param path: Path or endpoint that will be hit
:param headers: Headers to add onto the request
:param data: Dictionary or bytes to be sent in the query string for
the request.
:return:
"""
response = requests.get(self.url + path, params=data,
headers=headers, verify=self.verify_ssl)
return self._parse_response(response)
def post(self, path, headers=None, data=None):
"""
Performs and HTTP POST request against the LaunchKey API
:param path: Path or endpoint that will be hit
:param headers: Headers to add onto the request
:param data: Dictionary, bytes, or file-like object to send in the
body of the request.
:return:
"""
response = requests.post(self.url + path, data=data,
headers=headers, verify=self.verify_ssl)
return self._parse_response(response)
def put(self, path, headers=None, data=None):
"""
Performs and HTTP PUT request against the LaunchKey API
:param path: Path or endpoint that will be hit
:param headers: Headers to add onto the request
:param data: Dictionary, bytes, or file-like object to send in the
body of the request.
:return:
"""
response = requests.put(self.url + path, data=data,
headers=headers, verify=self.verify_ssl)
return self._parse_response(response)
def delete(self, path, headers=None, data=None):
"""
Performs and HTTP DELETE request against the LaunchKey API
:param path: Path or endpoint that will be hit
:param headers: Headers to add onto the request
:param data: Dictionary, bytes, or file-like object to send in the
body of the request.
:return:
"""
response = requests.delete(self.url + path, data=data,
headers=headers,
verify=self.verify_ssl)
return self._parse_response(response)
def patch(self, path, headers=None, data=None):
"""
Performs and HTTP PATCH request against the LaunchKey API
:param path: Path or endpoint that will be hit
:param headers: Headers to add onto the request
:param data: Dictionary, bytes, or file-like object to send in the
body of the request.
:return:
"""
response = requests.patch(self.url + path, data=data,
headers=headers, verify=self.verify_ssl)
return self._parse_response(response)
|
python
|
# Copyright (c) IOTIC LABS LIMITED. All rights reserved. Licensed under the Apache License, Version 2.0.
from typing import Callable
import base58
from iotics.lib.identity.api.advanced_api import AdvancedIdentityLocalApi
from iotics.lib.identity.crypto.identity import make_identifier
from iotics.lib.identity.crypto.issuer import Issuer
from iotics.lib.identity.crypto.key_pair_secrets import DIDType, KeyPairSecrets, KeyPairSecretsHelper
from iotics.lib.identity.crypto.proof import Proof
from iotics.lib.identity.register.document_builder import RegisterDocumentBuilder
from iotics.lib.identity.register.keys import RegisterDelegationProof, RegisterPublicKey
def new_seed(length: int = 128) -> bytes:
return AdvancedIdentityLocalApi.create_seed(length)
def is_validator_run_success(validator: Callable, *args, **kwargs):
""" Run a validation helper and ensure it has been run.
By design the validators return nothing and they raise when something is invalid.
For the valid case we want to highlight the fact the validator has been called returning True
at the end of this helper."""
validator(*args, **kwargs)
return True
def get_delegation_proof(issuer: Issuer, key_pair_secrets: KeyPairSecrets, delegating_doc_id: str) -> Proof:
return Proof.build(key_pair_secrets, issuer, content=delegating_doc_id.encode())
def get_delegation_register_proof(subject_key_pair_secrets: KeyPairSecrets, delegating_doc_id: str,
subject_issuer: Issuer,
deleg_key_name='#DelegKey') -> RegisterDelegationProof:
proof = Proof.build(subject_key_pair_secrets, subject_issuer, content=delegating_doc_id.encode())
return RegisterDelegationProof.build(deleg_key_name,
controller=subject_issuer,
proof=proof.signature,
revoked=False)
def get_valid_document(seed: bytes, issuer_name: str, controller: Issuer = None):
secrets = KeyPairSecrets.build(seed, 'iotics/0/something/twin')
return get_valid_document_from_secret(secrets, issuer_name, controller)
def get_valid_document_from_secret(secrets: KeyPairSecrets, issuer_name: str, controller: Issuer = None):
public_base58 = KeyPairSecretsHelper.get_public_key_base58_from_key_pair_secrets(secrets)
public_bytes = base58.b58decode(public_base58)
doc_id = make_identifier(public_bytes)
proof = Proof.build(secrets, Issuer.build(doc_id, issuer_name), content=doc_id.encode())
return RegisterDocumentBuilder() \
.add_public_key_obj(RegisterPublicKey(issuer_name, public_base58, revoked=False)) \
.build(doc_id,
DIDType.TWIN,
proof=proof.signature,
revoked=False,
controller=controller)
def get_valid_delegated_doc_and_deleg_proof(seed: bytes, issuer_name: str, delegating_doc_id: str, deleg_name: str):
secrets = KeyPairSecrets.build(seed, 'iotics/0/something/twindeleg')
public_base58 = KeyPairSecretsHelper.get_public_key_base58_from_key_pair_secrets(secrets)
public_bytes = base58.b58decode(public_base58)
doc_id = make_identifier(public_bytes)
issuer = Issuer.build(doc_id, issuer_name)
proof = Proof.build(secrets, issuer, content=doc_id.encode())
deleg_key = get_delegation_register_proof(subject_key_pair_secrets=secrets,
delegating_doc_id=delegating_doc_id,
subject_issuer=Issuer.build(doc_id, issuer_name),
deleg_key_name=deleg_name)
delegated_doc = RegisterDocumentBuilder() \
.add_public_key_obj(RegisterPublicKey(issuer_name, public_base58, revoked=False)) \
.build(doc_id, DIDType.TWIN, proof=proof.signature, revoked=False)
return delegated_doc, deleg_key
|
python
|
import re
# machine snapshot platforms
LINUX = "LINUX"
MACOS = "MACOS"
WINDOWS = "WINDOWS"
ANDROID = "ANDROID"
IOS = "IOS"
IPADOS = "IPADOS"
TVOS = "TVOS"
PLATFORM_CHOICES = (
(LINUX, 'Linux'),
(MACOS, 'macOS'),
(WINDOWS, 'Windows'),
(ANDROID, 'Android'),
(IOS, 'iOS'),
(IPADOS, 'iPadOS'),
(TVOS, 'tvOS'),
)
PLATFORM_CHOICES_DICT = dict(PLATFORM_CHOICES)
# machine snapshot types
DESKTOP = "DESKTOP"
LAPTOP = "LAPTOP"
MOBILE = "MOBILE"
SERVER = "SERVER"
TABLET = "TABLET"
TV = "TV"
VM = "VM"
TYPE_CHOICES = (
(DESKTOP, 'Desktop'),
(LAPTOP, 'Laptop'),
(MOBILE, 'Mobile'),
(SERVER, 'Server'),
(TABLET, 'Tablet'),
(TV, 'TV'),
(VM, 'Virtual machine'),
)
TYPE_CHOICES_DICT = dict(TYPE_CHOICES)
# utils
HARDWARE_MODEL_SERIAL_MACHINE_TYPE_DICT = {
'appletv': TV,
'imac': DESKTOP,
'ipad': TABLET,
'iphone': MOBILE,
'macbook': LAPTOP,
'macmini': DESKTOP,
'macpro': DESKTOP,
'powermac': DESKTOP,
'vmware': VM,
'xserve': SERVER,
}
# source http://www.techrepublic.com/blog/data-center/mac-address-scorecard-for-common-virtual-machine-platforms/
# last check 20161215
KNOWN_VM_MAC_PREFIXES = {
'0003FF', # Microsoft Corporation (Hyper-V, Virtual Server, Virtual PC)
'005056', '000C29', '000569', # VMware, Inc. (VMware ESX 3, Server, Workstation, Player)
'00163E', # Xensource, Inc.
'001C42', # Parallels, Inc.
'080027', # PCS Systemtechnik GmbH (VirtualBox)
}
def platform_with_os_name(os_name):
if not os_name:
return
os_name = os_name.lower().replace(" ", "")
if "macos" in os_name or "osx" in os_name:
return MACOS
elif "ios" in os_name:
return IOS
elif "ipados" in os_name:
return IPADOS
elif "tvos" in os_name:
return TVOS
elif "windows" in os_name:
return WINDOWS
else:
for distro in ('centos', 'fedora', 'redhat', 'rehl',
'debian', 'ubuntu',
'gentoo',
'linux'):
if distro in os_name:
return LINUX
def update_ms_tree_platform(tree):
os_version_t = tree.get("os_version", {})
os_name = os_version_t.get("name")
platform = platform_with_os_name(os_name)
if platform:
tree["platform"] = platform
def update_ms_tree_type(tree):
system_info_t = tree.get("system_info", {})
for attr in ("hardware_model", "hardware_serial"):
val = system_info_t.get(attr)
if val:
val = val.lower()
for prefix, ms_type in HARDWARE_MODEL_SERIAL_MACHINE_TYPE_DICT.items():
if val.startswith(prefix):
tree["type"] = ms_type
return
network_interfaces = tree.get("network_interfaces")
if network_interfaces and \
all(isinstance(ni.get("mac"), str) and ni["mac"].replace(":", "")[:6].upper() in KNOWN_VM_MAC_PREFIXES
for ni in network_interfaces):
tree["type"] = VM
return
cpu_brand = system_info_t.get("cpu_brand")
if cpu_brand and "xeon" in cpu_brand.lower():
tree["type"] = SERVER
def has_deb_packages(machine_snapshot):
os_version = machine_snapshot.os_version
if not os_version:
return False
os_name = os_version.name
if not os_name:
return False
os_name = os_name.lower()
return "ubuntu" in os_name or "debian" in os_name
MACOS_BUILD_RE = re.compile(r"(?P<minor>[0-9]{1,2})(?P<patch_letter>[A-Z])[1-9]+[a-z]?")
def macos_version_from_build(build):
match = MACOS_BUILD_RE.match(build)
if match:
patch = ord(match.group("patch_letter")) - 65
minor = int(match.group("minor")) - 4
if minor < 8:
# the patch letters are not always consecutive for older versions
# probably because of the different architectures.
raise ValueError("Cannot parse build str for macos < 10.8")
if minor < 12:
name = "OS X"
else:
name = "macOS"
if minor >= 16:
major = 11
minor = max(0, patch - 1)
if build in ("20B29", "20B50", "20D74", "20D75"):
patch = 1
elif build in ("20D80",):
patch = 2
else:
patch = 0
else:
major = 10
return {
"name": name,
"major": major,
"minor": minor,
"patch": patch,
"build": build
}
else:
raise ValueError("Bad build number")
|
python
|
from __future__ import absolute_import, print_function, division, unicode_literals
import sys
import os
from subprocess import Popen, PIPE
from time import time
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn import datasets
from mlfromscratch.util import (
normalize,
logloss,
roc_auc,
normLL,
)
from mlfromscratch.RegressionSGD import RegressionSGD
from mlfromscratch.RegressionTree import RegressionTree
from mlfromscratch.RandomForest import RandomForest
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
def create_models():
"Return a list of untrained models"
sgd_args = dict(learning_rate=0.02, minibatch=1, l2=0.0001,
n_epochs=100, verbose=False, holdout_proportion=0.0,
normalize_data=False)
tree_args = dict(max_depth=6, min_samples_split=20, min_samples_leaf=10)
n_forest_trees = 50
forest_args = dict(max_depth=4, max_features=12, min_samples_split=20,
min_samples_leaf=10)
# Note on the SGD args:
# Minibatch sizes >1 work better, but sklearn's SGD solver doesn't
# make it easy to use minibatches. I'm using minibatch=1 just to
# make the comparison easy.
# Sklearn also has an algo called LogisticRegression, but SGDClassifier
# is more directly comparable to mine.
return [
(
("Logistic Regression", "from scratch"),
RegressionSGD(loss="logloss", **sgd_args)
),
(
("Logistic Regression", "sklearn"),
SGDClassifier(loss="log", penalty="l2", alpha=sgd_args["l2"],
learning_rate="constant", eta0=sgd_args["learning_rate"],
n_iter=sgd_args["n_epochs"])
),
(
("Decision Tree", "from scratch"),
RegressionTree(loss="logloss", **tree_args)
),
(
("Decision Tree", "sklearn"),
DecisionTreeClassifier(criterion="entropy", **tree_args)
),
(
("Random Forest", "from scratch"),
RandomForest(loss="logloss", num_trees=n_forest_trees,
**forest_args)
),
(
("Random Forest", "sklearn"),
RandomForestClassifier(criterion="entropy", n_estimators=n_forest_trees,
**forest_args)
),
# TODO: add gradient boosting when it's done
]
def try_model(model, train_data, test_data, train_targets, test_targets):
"Train a model, test it on holdout data, return metrics"
start_train = time()
try:
# sklearn calls the method 'fit', I'm stubborn and call it train
model.train(train_data, train_targets)
except AttributeError:
model.fit(train_data, train_targets)
end_train = time()
test_pred = model.predict_proba(test_data)
# sklearn's output often has 2 columns, but since this is binary
# prediction we only need 1.
if len(test_pred.shape) == 2:
test_pred = test_pred[:,1]
end_pred = time()
test_ll = logloss(test_targets, test_pred)
test_roc = roc_auc(test_targets, test_pred)
# for fun, let's look at training error also
train_pred = model.predict_proba(train_data)
if len(train_pred.shape) == 2:
train_pred = train_pred[:,1]
train_ll = logloss(train_targets, train_pred)
train_roc = roc_auc(train_targets, train_pred)
train_time = end_train - start_train
pred_time = end_pred - end_train
return test_ll, test_roc, train_ll, train_roc, train_time, pred_time
def test_models(csv_name):
metrics_cols = ["model", "source", "target", "roc_auc", "norm_ll",
"train_roc_auc", "train_norm_ll", "train_time", "pred_time"]
metrics_data = dict((k,[]) for k in metrics_cols)
digits = datasets.load_digits() #has attributes digits.data, digits.target
# for each target, run each model 3 times on different datasets
for run in range(3):
for target_val in range(10):
# to see how these models compete on a wider variety of data,
# let's get a different train/test split for each run
np.random.seed(10 * run + target_val)
(train_data, holdout_data, train_targets, holdout_targets
) = train_test_split(
digits.data,
np.array(digits.target == target_val, dtype=float),
test_size=0.25
)
train_mean = np.mean(train_data, axis=0)
train_std = np.std(train_data, axis=0)
norm_train_data = normalize(train_data, train_mean, train_std)
norm_holdout_data = normalize(holdout_data, train_mean, train_std)
test_br = np.mean(holdout_targets)
train_br = np.mean(train_targets)
# create all models fresh, ready to be trained
for (mod_name, source), mod in create_models():
ll, roc, train_ll, train_roc, ttime, ptime = try_model(
mod,
norm_train_data,
norm_holdout_data,
train_targets,
holdout_targets
)
metrics_data["model"].append(mod_name)
metrics_data["source"].append(source)
metrics_data["target"].append(target_val)
metrics_data["roc_auc"].append(roc)
metrics_data["norm_ll"].append(normLL(ll, test_br))
metrics_data["train_roc_auc"].append(train_roc)
metrics_data["train_norm_ll"].append(normLL(train_ll, train_br))
metrics_data["train_time"].append(ttime)
metrics_data["pred_time"].append(ptime)
df = pd.DataFrame(metrics_data)
df.to_csv(csv_name, index=False)
print("Wrote {0:d} rows to {1}".format(df.shape[1], csv_name))
if __name__ == "__main__":
csv_name = os.path.abspath(sys.argv[1])
if os.path.exists(csv_name):
print("{0} exists, will skip re-creating that file".format(csv_name))
else:
print("Benchmarking...")
test_models(csv_name)
dir_path = os.path.dirname(os.path.realpath(__file__))
r_path = os.path.join(dir_path, "compare_models.R")
plots_dir = os.path.join(dir_path, "plots/")
if not os.path.isdir(plots_dir):
os.mkdir(plots_dir)
r_cmd = ["Rscript", "--vanilla", r_path, csv_name, plots_dir]
print(" ".join(r_cmd))
status = Popen(r_cmd, stderr=PIPE).wait()
if status != 0:
raise Exception("Status: {0}".format(status))
print("Wrote plots to {}".format(plots_dir))
|
python
|
''' Module defining the toolkit in use.
Created on Feb 13, 2019
@author: albertgo
'''
import os
# determine toolkit to be used when loading o molecules
TOOLKIT = os.environ.get("CDDLIB_TOOLKIT",None)
if not TOOLKIT:
# default is openeye but try falling back on rdkit if oe is not available
try:
import openeye.oechem
TOOLKIT = "openeye"
except ModuleNotFoundError:
TOOLKIT = "rdkit"
|
python
|
__all__ = ['HighCardinalityStringIndexer']
import json
import logging
from typing import Dict, List
import pyspark.sql.functions as F
from pyspark import SparkContext, keyword_only
from pyspark.ml import Estimator, Transformer
from pyspark.ml.feature import IndexToString, StringIndexer
from pyspark.ml.param import Param, Params, TypeConverters
from pyspark.ml.param.shared import HasInputCols
from pyspark.sql import Column, DataFrame
from python_data_utils.spark.data.params import BaseParams
def with_meta(self, alias, meta):
"""
In pyspark 2.1 there is no simple way to change the metdata of a column, that only became available in pyspark 2.2.
This is a function that takes a column and modifies its metadata.
:param self: A pyspark column
:param alias:
:param meta: New meta data for the column
"""
sc = SparkContext._active_spark_context
jmeta = sc._gateway.jvm.org.apache.spark.sql.types.Metadata
return Column(getattr(self._jc, "as")(alias, jmeta.fromJson(json.dumps(meta))))
class HighCardinalityStringIndexerModel(Transformer):
"""
A Transformer that transforms a DataFrame according to the logic obtained by fitting the HighCardinalityStringIndexer
"""
def __init__(self,
dict_indexers: Dict,
inputCols: List[str],
outputCols: List[str],
dropInputCols: bool = False,
groupText: str = 'other',
returnIndexed: bool = True) -> None:
"""
:param dict_indexers: A dictionary with each element being another dictionary containing an element 'indexer'
with a StringIndexer object and an element 'n_to_keep' that indicates how many indexes to keep.
:param inputCols: String columns that need to be indexed
:param outputCols:
:param dropInputCols: Should the input columns be dropped?
:param groupText: String to use as replacement for the observations that need to be grouped.
:param returnIndexed: If True, return the indexed columns. If False, return the columns with their String values,
where only the grouped observations are changed.
"""
super().__init__()
self.dict_indexers = dict_indexers
self.inputCols = inputCols
self.outputCols = outputCols
self.dropInputCols = dropInputCols
self.groupText = groupText
self.returnIndexed = returnIndexed
@staticmethod
def __logger() -> logging.Logger:
""" Returns a reference to the logger to be used in this class
Storing the logger as an attribute, and then referring to it in functions, can get it on the closure.
Resulting in a lock object related to the logger to be included, which isn't serializable.
"""
return logging.getLogger(__name__)
def _transform(self, df) -> DataFrame:
"""
:param df: A pyspark.sql.dataframe.DataFrame
"""
# Apply string indexer
for in_col, out_col in zip(self.inputCols, self.outputCols):
self.__logger().info("Applying StringIndexer on col {}".format(in_col))
df = self.dict_indexers[in_col]['indexer'].transform(df)
n_to_keep = self.dict_indexers[in_col]['n_to_keep']
# If elements occur below (threshold * number of rows), replace them with n_to_keep.
this_meta = df.select(out_col).schema.fields[0].metadata
if n_to_keep != len(this_meta['ml_attr']['vals']):
this_meta['ml_attr']['vals'] = this_meta['ml_attr']['vals'][0:(n_to_keep + 1)]
this_meta['ml_attr']['vals'][n_to_keep] = self.groupText
self.__logger().info("Truncating number of categories of {} at {}".format(in_col, n_to_keep))
df = df.withColumn(out_col,
F.when(F.col(out_col) >= n_to_keep, F.lit(n_to_keep)).otherwise(
F.col(out_col)))
# add the new indexed column with correct metadata, remove original indexed column.
df = df.withColumn(out_col,
with_meta(F.col(out_col), "", this_meta))
if not self.returnIndexed:
for output_col in self.outputCols:
df = df.withColumnRenamed(output_col, output_col + '_temp')
df = IndexToString(inputCol=output_col + '_temp', outputCol=output_col).transform(df)
df = df.drop(output_col + '_temp')
if self.dropInputCols:
df = df.drop(*self.inputCols)
return df
class HighCardinalityStringIndexer(Estimator, BaseParams, HasInputCols):
"""
This is a class that can be used in combination with HighCardinalityStringIndexerTransformer to simply reduce the
cardinality of high-cardinality categorical features, while simultaneously indexing them to be ready for use in a machine learning algorithm.
For each column, it replaces all observations that occur in less then a 'threshold' fraction of the rows in the dataframe with 'groupText'.
It does so by calling pyspark.ml.feature.StringIndexer on the column, and subsequently replacing values and changing the metadata of the column.
By also changing the metadata we ensure that we can later extract the text values from the indexed columns if desired.
Example --------------------------------------------------------------------
>>> df = pd.DataFrame({'x1': ['a', 'b', 'a', 'b', 'c'], # a: 0.4, b: 0.4, c: 0.2
>>> 'x2': ['a', 'b', 'a', 'b', 'a'], # a: 0.6, b: 0.4, c: 0.0
>>> 'x3': ['a', 'a', 'a', 'a', 'a'], # a: 1.0, b: 0.0, c: 0.0
>>> 'x4': ['a', 'b', 'c', 'd', 'e']}) # a: 0.2, b: 0.2, c: 0.2, d: 0.2, e: 0.2
>>>
>>> df = sqlContext.createDataFrame(df)
>>> df.show()
+---+---+---+---+
| x1| x2| x3| x4|
+---+---+---+---+
| a| a| a| a|
| b| b| a| b|
| a| a| a| c|
| b| b| a| d|
| c| a| a| e|
+---+---+---+---+
>>> # Replace all values that occur in less than 25% of the rows.
>>> indexer = HighCardinalityStringIndexer(inputCols=df.columns,
>>> outputCols=['ix_' + col for col in df_train.columns],
>>> threshold=0.25).fit(df)
>>> df = indexer.transform(df)
>>> df.show()
+---+---+---+---+-----+-----+-----+-----+
| x1| x2| x3| x4|ix_x1|ix_x2|ix_x3|ix_x4|
+---+---+---+---+-----+-----+-----+-----+
| a| a| a| a| 0.0| 0.0| 0.0| 0.0|
| b| b| a| b| 1.0| 1.0| 0.0| 0.0|
| a| a| a| c| 0.0| 0.0| 0.0| 0.0|
| b| b| a| d| 1.0| 1.0| 0.0| 0.0|
| c| a| a| e| 2.0| 0.0| 0.0| 0.0|
+---+---+---+---+-----+-----+-----+-----+
>>> # Optionally, obtain the labels after grouping
>>> indexer = HighCardinalityStringIndexer(inputCols=df.columns,
>>> outputCols=['grouped_' + col for col in df_train.columns],
>>> threshold=0.25,
>>> returnIndexed=False).fit(df)
>>> df = indexer.transform(df)
>>> df.show()
+---+---+---+---+----------+----------+----------+----------+
| x1| x2| x3| x4|grouped_x1|grouped_x2|grouped_x3|grouped_x4|
+---+---+---+---+----------+----------+----------+----------+
| a| a| a| a| a| a| a| other|
| b| b| a| b| b| b| a| other|
| a| a| a| c| a| a| a| other|
| b| b| a| d| b| b| a| other|
| c| a| a| e| other| a| a| other|
+---+---+---+---+----------+----------+----------+----------+
"""
outputCols = \
Param(Params._dummy(), "outputCols",
"The output columns",
typeConverter=TypeConverters.toListString)
dropInputCols = \
Param(Params._dummy(), "dropInputCols",
"Drop the input columns?",
typeConverter=TypeConverters.toBoolean)
threshold = \
Param(Params._dummy(), "threshold",
"Group observations if they occur in less than threshold*100% of the rows",
typeConverter=TypeConverters.toFloat)
groupText = \
Param(Params._dummy(), "groupText",
"The text to use to bin grouped observations",
typeConverter=TypeConverters.toString)
returnIndexed = \
Param(Params._dummy(), "returnIndexed",
"Return the indexed columns, or their string representations?",
typeConverter=TypeConverters.toBoolean)
@keyword_only
def __init__(self,
inputCols: List[str],
outputCols: List[str],
dropInputCols: bool = False,
threshold: float = .01,
groupText: str = 'other',
returnIndexed: bool = True) -> None:
"""
:param inputCols: String columns that need to be indexed
:param dropInputCols: Should the input columns be dropped?
:param threshold: Replace all observations that occur in less then a 'threshold' fraction of the rows.
:param groupText: String to use as replacement for the observations that are binned because they occur in low frequency.
:param returnIndexed: If True, return the indexed columns. If False, return the columns with their String values,
where only the grouped observations are changed.
"""
super().__init__()
self._setDefault(inputCols=None)
self._setDefault(outputCols=None)
self._setDefault(dropInputCols=False)
self._setDefault(threshold=0.01)
self._setDefault(groupText='other')
self._setDefault(returnIndexed=True)
kwargs = self._get_init_args_as_kwargs()
self.setParams(**kwargs)
@keyword_only
def setParams(self,
inputCols: List[str],
outputCols: List[str],
dropInputCols: bool = False,
threshold: float = .01,
groupText: str = 'other',
returnIndexed: bool = True):
kwargs = self._get_params_args_as_kwargs()
return self._set(**kwargs)
def getOutputCols(self) -> List[str]:
return self.getOrDefault(self.outputCols)
def getDropInputCols(self) -> bool:
return self.getOrDefault(self.dropInputCols)
def getThreshold(self) -> float:
return self.getOrDefault(self.threshold)
def getGroupText(self) -> str:
return self.getOrDefault(self.groupText)
def getReturnIndexed(self) -> bool:
return self.getOrDefault(self.returnIndexed)
@staticmethod
def __logger() -> logging.Logger:
""" Returns a reference to the logger to be used in this class
Storing the logger as an attribute, and then referring to it in functions, can get it on the closure.
Resulting in a lock object related to the logger to be included, which isn't serializable.
"""
return logging.getLogger(__name__)
def _fit(self, df) -> HighCardinalityStringIndexerModel:
"""
:param df: A pyspark.sql.dataframe.DataFrame
"""
total = df.count()
# For each column, calculate the number of unique elements to keep
dict_indexers = {}
for in_col, out_col in zip(self.getInputCols(), self.getOutputCols()):
self.__logger().info("Fitting StringIndexer on '{}'".format(in_col))
string_indexer = StringIndexer(inputCol=in_col,
outputCol=out_col,
handleInvalid='skip').fit(df)
self.__logger().info("Determining number of categories of '{}' to keep".format(in_col))
n_to_keep = df.groupby(in_col) \
.agg((F.count(in_col) / total).alias('perc')) \
.filter(F.col('perc') > self.getThreshold()) \
.count()
self.__logger().info("Finished processing '{}'.".format(in_col))
if n_to_keep == 0:
self.__logger().info("Every unique value of "
"{} occurs less than fraction {} times count {}".format(in_col,
self.getThreshold(),
total)
+ "Therefore should exclude the column from the output") # TODO: exclude it
dict_indexers[in_col] = {'indexer': string_indexer, 'n_to_keep': n_to_keep}
return HighCardinalityStringIndexerModel(
dict_indexers=dict_indexers,
inputCols=self.getOrDefault(self.inputCols),
outputCols=self.getOrDefault(self.outputCols),
dropInputCols=self.getOrDefault(self.dropInputCols),
groupText=self.getOrDefault(self.groupText),
returnIndexed=self.getOrDefault(self.returnIndexed)
)
|
python
|
from pathlib import Path
import pandas as pd
import ibis
import ibis.expr.operations as ops
import ibis.expr.types as ir
from ibis.backends.tests.base import BackendTest, RoundHalfToEven
class TestConf(BackendTest, RoundHalfToEven):
check_names = False
additional_skipped_operations = frozenset({ops.StringSQLLike})
supported_to_timestamp_units = BackendTest.supported_to_timestamp_units | {
'ns'
}
supports_divide_by_zero = True
returned_timestamp_unit = 'ns'
@staticmethod
def connect(data_directory: Path):
return ibis.pandas.connect(
dictionary={
'functional_alltypes': pd.read_csv(
str(data_directory / 'functional_alltypes.csv'),
index_col=None,
dtype={'bool_col': bool, 'string_col': str},
parse_dates=['timestamp_col'],
encoding='utf-8',
),
'batting': pd.read_csv(str(data_directory / 'batting.csv')),
'awards_players': pd.read_csv(
str(data_directory / 'awards_players.csv')
),
}
)
@property
def functional_alltypes(self) -> ir.TableExpr:
return self.connection.table("functional_alltypes")
@property
def batting(self) -> ir.TableExpr:
return self.connection.table("batting")
@property
def awards_players(self) -> ir.TableExpr:
return self.connection.table("awards_players")
|
python
|
from selenium import webdriver
# https://askubuntu.com/questions/870530/how-to-install-geckodriver-in-ubuntu
def start_firefox():
my_browser = webdriver.Firefox()
# send the browser to a specif URL
my_browser.get('https://pitpietro.github.io/')
# simulate the click on a link (and redirect to that link)
about_element = my_browser.find_element_by_css_selector('div.list__item:nth-child(6) > article:nth-child(1) > h2:nth-child(1) > a:nth-child(1)')
about_element.click()
if __name__ == '__main__':
start_firefox()
exit(0)
|
python
|
from indice_pollution.extensions import db
class Zone(db.Model):
__table_args__ = {"schema": "indice_schema"}
id = db.Column(db.Integer, primary_key=True)
type = db.Column(db.String)
code = db.Column(db.String)
@classmethod
def get(cls, code, type_):
return Zone.query.filter_by(code=code, type=type_).first()
|
python
|
from django import template
import datetime
from cart.cart import Cart
from cart.settings import CART_TEMPLATE_TAG_NAME
register = template.Library()
@register.simple_tag(takes_context=True, name=CART_TEMPLATE_TAG_NAME)
def get_cart(context, session_key=None):
"""
Make the cart object available in template.
Sample usage::
{% load carton_tags %}
{% get_cart as cart %}
{% for product in cart.products %}
{{ product }}
{% endfor %}
"""
request = context['request']
return Cart(request.session, session_key=session_key)
#register.simple_tag(takes_context=True, name=CART_TEMPLATE_TAG_NAME)
|
python
|
import unittest
#import env
from canvas import Canvas
from color import Color
class TestCanvas(unittest.TestCase):
def test_canvas(self):
c = Canvas(10, 20)
self.assertEqual(c.width, 10)
self.assertEqual(c.height, 20)
def test_write_pixel(self):
c = Canvas(10, 20)
r = Color(1, 0, 0)
c.write_pixel(2, 3, r)
self.assertEqual(c.pixel_at(2, 3), r.rgb())
|
python
|
"""
Collect email information
"""
import os
import subprocess
import json
import base64
from flask import current_app
from pathlib import Path
from ..functions import get_python_path
class Email:
""" Set up an email """
config_path = None
__addresses = []
__ccs = []
__files = []
__html = ''
__text = ''
__subject = ''
__data = {}
send_as_one = None
def __init__(self, send_as_one=False):
self.config_path = None
self.__addresses = []
self.__ccs = []
self.__files = []
self.__html = ''
self.__text = ''
self.__subject = ''
self.__data = {}
self.send_as_one = send_as_one
if 'CONFIG_PATH' in current_app.config:
self.config_path = current_app.config['CONFIG_PATH']
def add_address(self, address):
""" Add email address """
self.__addresses.append(address)
return self
def add_addresses(self, addresses):
""" Add addresses from array """
self.__addresses.extend(addresses)
return self
def add_cc(self, address):
""" Add carbon copy """
self.__ccs.append(address)
return self
def add_ccs(self, addresses):
""" Add carbon copy with array of addresses """
self.__ccs.extend(addresses)
return self
def add_file(self, absolute_file_path):
""" Add attachment to email """
self.__files.append(absolute_file_path)
return self
def html(self, html):
""" Set html content """
self.__html = html
return self
def text(self, text):
""" Set text content """
self.__text = text
return self
def subject(self, subject):
""" Set email subject """
self.__subject = subject
return self
def __create(self):
""" Construct the email """
self.__data = json.dumps({
'config_path': self.encode(self.config_path),
'subject': self.encode(self.__subject),
'text': self.encode(self.__text),
'html': self.encode(self.__html),
'files': self.__files,
'send_as_one': self.send_as_one,
'addresses': self.__addresses,
'ccs': self.__ccs,
})
def encode(self, value):
""" Encode parts of email to base64 for transfer """
return base64.b64encode(value.encode()).decode()
def send(self):
"""
Construct and execute sendemail.py script
Finds python binary by os.py, then
uses the /usr/bin/python to execute email script
"""
self.__create()
email_script = \
os.path.join(Path(__file__).parents[1], 'scripts', 'sendemail.py')
if os.path.exists(email_script):
subprocess.Popen(
[get_python_path(), email_script, self.__data],
stdin=None, stdout=None, stderr=None, close_fds=True)
|
python
|
__version__ = "0.0.10"
from .core import *
|
python
|
"""
Program to classify the snps based on the following:
1. whether are at stop codons
2. where are located on a functional gene sequence
4. whether are synonymous (S) or non-synonymous (N)
5. whether are fourfold degenerate sites (S) or non-degenerate sites (N)
and count the number the number for each of the class,
and will ulimately estimate the following:
1. Pi: Nucleotide diversity: 2*p*(1-p) where p is the alternative allele frequency
2. Pi(N)/Pi(S): Pi(non-degenerate sites) / Pi(fourfold degenerate sites) ???
3. pN/pS: the ratio of non-synonymous to synonymous polymorphism rates
"""
from __future__ import division
import re, vcf, copy, argparse
import numpy as np
from Bio import SeqIO
from gff3 import Gff3
import pnps_dnds
def parse_args():
""" Return dictionary of command line arguments
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter,
usage=argparse.SUPPRESS)
parser.add_argument('program', help=argparse.SUPPRESS)
parser.add_argument('--name', type=str, dest='name', required=True,
help="""Name or identifier of the project""")
parser.add_argument('--fna', type=str, dest='fna', required=True,
help="""Path to consensus genome sequence""")
parser.add_argument('--vcf', type=str, dest='vcf', required=True,
help="""Path to core-genome SNPs""")
parser.add_argument('--coords', type=str, dest='coords', default=None,
help="""Path to core-genome coordinates""")
parser.add_argument('--gff', type=str, dest='gff', default=None,
help="""Path to gff file defining the CDS coordinates""")
parser.add_argument('--with-mags', action='store_true', default=False,
help="""The genome sequences or samples can be separated into MAGs and others""")
parser.add_argument('--min-prev', type=float, dest='min_prev', default=None,
help="""Minimal prevalence for SNP calling""")
parser.add_argument('--with-header', action='store_true', default=False,
help="""Output the header for the parameters""")
parser.add_argument('--out', type=str, dest='out', default='/dev/stdout',
help="""Path to output file (/dev/stdout)""")
return vars(parser.parse_args())
def open_vcf_file(fpath, min_prev=None):
"""
* ``Record.CHROM``; string
* ``Record.POS``; int
* ``Record.ID``; None
* ``Record.REF``; string
* ``Record.ALT``; list
* ``Record.QUAL``; None
* ``Record.FILTER``; list
* ``Record.INFO``; dictionary
additional attributes:
* ``Record.FORMAT``; string
* ``Record.samples``; list
* ``Record.genotype``; object
"""
vcf_reader = vcf.Reader(open(fpath, 'r'))
raw_snps = [snp for snp in vcf_reader]
sample_names = [sample.sample for sample in raw_snps[0].samples]
n_sample = len(sample_names)
vcf_snps = []
for snp in raw_snps:
if min_prev is not None:
prev = snp.INFO['NS']/n_sample
if prev > min_prev:
vcf_snps.append(snp)
else:
vcf_snps.append(snp)
return vcf_snps
def snp_stats(snps):
n_snps = len(snps)
bi_snps, tri_snps, quad_snps = classify_snps(snps)
n_snps = len(snps)
n_bi_snps = len(bi_snps)
n_tri_snps = len(tri_snps)
n_quad_snps = len(quad_snps)
sample_names = [sample.sample for sample in snps[0].samples]
n_sample = len(sample_names)
values = [n_sample, n_snps, n_bi_snps, n_tri_snps, n_quad_snps]
header = ['n_sample', 'n_snps', 'n_bi_snps', 'n_tri_snps', 'n_quad_snps']
return values, header
def mag_snp_stats(snps):
n_mag_snps = 0
are_mags = ['_' in sample.sample for sample in snps[0].samples]
are_refs = ['_' not in sample.sample for sample in snps[0].samples]
if np.all(are_mags):
all_mags = 1
n_mag_snps = len(snps)
else:
all_mags = 0
ref_inds = []
for i, is_ref in enumerate(are_refs):
if is_ref:
ref_inds.append(i)
static_sample = snps[0].samples[ref_inds[0]]
for snp in snps:
flgs = []
for ind in ref_inds[1:]:
sample = snp.samples[ind]
flgs.append(static_sample.data.GP1 == sample.data.GP1)
flgs.append(static_sample.data.GP2 == sample.data.GP2)
flgs.append(static_sample.data.GP3 == sample.data.GP3)
flgs.append(static_sample.data.GP4 == sample.data.GP4)
if not np.all(flgs):
break
if np.all(flgs):
n_mag_snps = n_mag_snps + 1
else:
pass
values = [n_mag_snps, all_mags]
header = ['n_mag_snps', 'all_mags']
return values, header
def core_genome_stats(coords):
n_cg_bloc = 0
cg_bloc_sizes = []
avg_cg_bloc_size = 0
mid_cg_bloc_size = 0
cg_bloc_dists = []
avg_cg_bloc_dist = 0
mid_cg_bloc_dist = 0
last_end = 0
with open(coords, "r") as fh:
next(fh)
for line in fh:
items = line.rstrip().split("\t")
cg_bloc_size = int(items[2]) - int(items[1])+1
cg_bloc_sizes.append(cg_bloc_size)
if last_end > 0:
cg_bloc_dist = int(items[1]) - last_end
cg_bloc_dists.append(cg_bloc_dist)
last_end = int(items[2])
n_cg_bloc = len(cg_bloc_sizes)
cg_size = sum(cg_bloc_sizes)
avg_cg_bloc_size = cg_size / n_cg_bloc
mid_cg_bloc_size = sorted(cg_bloc_sizes)[int((n_cg_bloc-1)/2)]
n_cg_dist = len(cg_bloc_dists)
avg_cg_bloc_dist = sum(cg_bloc_dists)/n_cg_dist
mid_cg_bloc_dist = sorted(cg_bloc_dists)[int((n_cg_dist-1)/2)]
values = [cg_size, n_cg_bloc, avg_cg_bloc_size, mid_cg_bloc_size, avg_cg_bloc_dist, mid_cg_bloc_dist]
header = ['cg_size', 'n_cg_bloc', 'avg_cg_bloc_size', 'mid_cg_bloc_size', 'avg_cg_bloc_dist', 'mid_cg_bloc_dist']
return values, header
def classify_snps(snps):
bi_snps = []
tri_snps = []
quad_snps = []
for snp in snps:
if len(snp.ALT) == 1:
bi_snps.append(snp)
elif len(snp.ALT) == 2:
tri_snps.append(snp)
elif len(snp.ALT) == 3:
quad_snps.append(snp)
else:
assert False
return bi_snps, tri_snps, quad_snps
def genic_stats(seq_recs, rec_table, cds_recs, snps):
genic_region, genic_masks = get_genic_region(rec_table, cds_recs)
genic_region_size = len(genic_region)
bi_snps, tri_snps, quad_snps = classify_snps(snps)
alt_rec_tb = get_alt_seq_recs(rec_table, bi_snps)
alt_genic_region, alt_genic_masks = get_genic_region(alt_rec_tb, cds_recs)
sample_names = [sample.sample for sample in snps[0].samples]
n_sample = len(sample_names)
genic_array = [ genic_masks[snp.CHROM][snp.POS] for snp in snps]
genic_snp_counts = sum(genic_array)
bi_genic_array = [ genic_masks[snp.CHROM][snp.POS] for snp in bi_snps]
bi_genic_snp_counts = sum(bi_genic_array)
# print genic_region
# print len(genic_region)
# print alt_genic_region
# print len(alt_genic_region)
pn, ps, dn, ds, pn2ps, dn2ds = pnps_dnds.pnps_dnds(genic_region, alt_genic_region)
snps2codons = map_codons(rec_table, cds_recs, bi_snps)
n_syn_snp = 0
n_nonsyn_snp = 0
n_premature_stop_snp = 0
n_stop_disrupt_snp = 0
pi_all = 0
pi_syn = 0
pi_nonsyn = 0
pi_4f_deg = 0
pi_non_deg = 0
for snp in bi_snps:
pi_val = 2*float(snp.INFO['AF'][0])*(1-float(snp.INFO['AF'][0]))
if snp.ID in snps2codons:
codon_pair = snps2codons[snp.ID]
if len(codon_pair) > 0:
if not pnps_dnds.is_synonymous(codon_pair[0], codon_pair[1]):
pi_nonsyn = pi_nonsyn + pi_val
n_nonsyn_snp = n_nonsyn_snp + 1
if pnps_dnds.is_stop_codon(codon_pair[0]):
n_stop_disrupt_snp = n_stop_disrupt_snp + 1
elif pnps_dnds.is_stop_codon(codon_pair[1]):
n_premature_stop_snp = n_premature_stop_snp + 1
else:
pass
else:
pi_syn = pi_syn + pi_val
n_syn_snp = n_syn_snp + 1
if pnps_dnds.is_4f_deg(codon_pair[0], codon_pair[2]):
pi_4f_deg = pi_4f_deg + pi_val
if pnps_dnds.is_non_deg(codon_pair[0], codon_pair[2]):
pi_non_deg = pi_non_deg + pi_val
pi_all = pi_all + pi_val
values = [genic_region_size, genic_snp_counts, bi_genic_snp_counts, pn2ps, pn, ps, dn2ds, dn, ds, n_syn_snp, n_nonsyn_snp, n_premature_stop_snp, n_stop_disrupt_snp, pi_all, pi_syn, pi_nonsyn, pi_4f_deg, pi_non_deg]
header = ['genic_region_size', 'genic_snp_counts', 'bi_genic_snp_counts', 'pn2ps', 'pn', 'ps', 'dn2ds', 'dn', 'ds', 'n_syn_snp', 'n_nonsyn_snp', 'n_premature_stop_snp', 'n_stop_disrupt_snp', 'pi', 'pi_syn', 'pi_nonsyn', 'pi_4f_deg', 'pi_non_deg']
return values, header
def get_alt_seq_recs(seq_rec_tb, snps):
alt_rec_tb = copy.deepcopy(seq_rec_tb)
for snp in snps:
if seq_rec_tb[snp.CHROM][snp.POS] != snp.REF:
alt_rec_tb[snp.CHROM] = str(alt_rec_tb[snp.CHROM][:snp.POS]) + str(snp.REF) + str(alt_rec_tb[snp.CHROM][snp.POS+1:])
elif seq_rec_tb[snp.CHROM][snp.POS] != snp.ALT[0]:
alt_rec_tb[snp.CHROM] = str(alt_rec_tb[snp.CHROM][:snp.POS]) + str(snp.ALT[0]) + str(alt_rec_tb[snp.CHROM][snp.POS+1:])
else:
print "{}: {} - {}: {}, {}, {}, {}, {}".format(
snp.ID, snp.REF, snp.ALT[0], seq_rec_tb[snp.CHROM][snp.POS-2], seq_rec_tb[snp.CHROM][snp.POS-1],
seq_rec_tb[snp.CHROM][snp.POS], seq_rec_tb[snp.CHROM][snp.POS+1], seq_rec_tb[snp.CHROM][snp.POS+2])
return alt_rec_tb
### map codons to snps
def map_codons(seq_rec_tb, cds_recs, snps):
codons = dict()
for cds_rec in cds_recs:
sid = cds_rec["seqid"]
if sid not in codons:
codons[sid] = dict()
genic_region = seq_rec_tb[sid][cds_rec["start"]-1:cds_rec["end"]]
for i in range(cds_rec["start"]-1, cds_rec["end"], 3):
start_pos = i - cds_rec["start"] + 1
codon = genic_region[start_pos:start_pos+3]
codons[sid][i] = "0.{}".format(codon)
codons[sid][i+1] = "1.{}".format(codon)
codons[sid][i+2] = "2.{}".format(codon)
snps2codons = dict()
n_nonsyn = 0
for snp in snps:
sid = snp.CHROM
snps2codons[snp.ID] = []
if sid in codons:
pos = snp.POS
if pos in codons[sid]:
codon_code = codons[sid][pos]
i, codon = codon_code.split(".")
i = int(i)
alt_codon = codon[:i]+str(snp.ALT[0])+codon[i+1:]
if codon == alt_codon:
alt_codon = codon[:i]+str(snp.REF)+codon[i+1:]
snps2codons[snp.ID].append(codon)
snps2codons[snp.ID].append(alt_codon)
snps2codons[snp.ID].append(i)
n_nonsyn = n_nonsyn + 1
# print [i, codon, alt_codon]
return snps2codons
### get genic masks
def get_genic_region(seq_rec_tb, cds_recs):
genic_region = ""
genic_masks = dict()
for sid, seq in seq_rec_tb.iteritems():
genic_mask = np.repeat(False, len(seq_rec_tb[sid]))
genic_masks[sid] = genic_mask
for cds_rec in cds_recs:
sid = cds_rec["seqid"]
genic_masks[sid][cds_rec["start"]-1:cds_rec["end"]] = True
genic_region = genic_region + seq_rec_tb[sid][cds_rec["start"]-1:cds_rec["end"]]
return genic_region, genic_masks
### get pi genic diversity
def get_pi(snps):
all_pis = []
for snp in snps:
pi = 2*snps.INFO['AF']*(1-snps.INFO['AF'])
all_pis.append(pi)
return all_pis
def open_gff_file(gff):
gff_hd = Gff3(gff)
cds_recs = []
for line in gff_hd.lines[4:]:
if 'seqid' in line and 'type' in line and 'strand' in line:
if line['type'] == 'CDS' and line['strand'] == '+':
cds_recs.append(line)
print "number of cds: {}".format(len(cds_recs))
return cds_recs
def get_gff_summary(cds_recs):
n_cds = 0
cds_sizes = []
avg_cds_size = 0
mid_cds_size = 0
n_cds = len(cds_recs)
for rec in cds_recs:
cds_size = rec["end"] - rec["start"] + 1
cds_sizes.append(cds_size)
if (cds_size % 3) != 0:
print "odd cds with starting pos at: {}".format(rec["start"])
total_cds_size = sum(cds_sizes)
avg_cds_size = total_cds_size / n_cds
mid_cds_size = sorted(cds_sizes)[int((n_cds-1)/2)]
return [n_cds, avg_cds_size, mid_cds_size]
def get_seq_recs(fna):
seq_recs = list(SeqIO.parse(fna, "fasta"))
rec_table = dict()
for rec in seq_recs:
rec_table[rec.id] = str(rec.seq).upper()
return seq_recs, rec_table
def get_ref_genome_size(seq_recs):
rg_size = 0
for rec in seq_recs:
rg_size = rg_size + len(rec.seq)
return rg_size
def get_stats(args):
seq_recs, rec_table = get_seq_recs(args['fna'])
rg_size = get_ref_genome_size(seq_recs)
header = ['name', 'rg_size']
params = [args['name'], rg_size]
snps = open_vcf_file(args['vcf'], args['min_prev'])
snp_values, snp_header = snp_stats(snps)
params = params + snp_values
header = header + snp_header
if args['with_mags']:
mag_snp_values, mag_snp_header = mag_snp_stats(snps)
params = params + mag_snp_values
header = header + mag_snp_header
if args['coords'] is not None:
core_genome_values, core_genome_header = core_genome_stats(args['coords'])
params = params + core_genome_values
header = header + core_genome_header
if args['gff'] is not None:
cds_recs = open_gff_file(args['gff'])
genic_values, genic_header = genic_stats(seq_recs, rec_table, cds_recs, snps)
params = params + genic_values
header = header + genic_header
return params, header
def main():
args = parse_args()
summary, header = get_stats(args)
with open(args['out'], 'w') as fh:
if args['with_header']:
fh.write("\t".join([str(item) for item in header])+"\n")
fh.write("\t".join([str(item) for item in summary])+"\n")
if __name__ == "__main__":
main()
|
python
|
from typing import List
def _get_data_array(filename: str) -> List[int]:
lines = open(filename, 'r').readlines()
ints = [int(i.strip()) for i in lines]
return ints
def _validate(factors: List[int], target: int) -> bool:
sorted_factors = sorted(factors)
left = 0
right = len(sorted_factors) - 1
current = sorted_factors[left] + sorted_factors[right]
while (left != right):
if current == target:
return True
elif current > target:
right -= 1
elif current < target:
left += 1
current = sorted_factors[left] + sorted_factors[right]
return False
if __name__ == '__main__':
data = _get_data_array('./data_input.dat')
for i in range(len(data) - 26):
if not _validate(data[i:i+25], data[i+25]):
print(f'Found a failure at index {i+25} which is {data[i+25]}')
break
|
python
|
class Solution:
@lru_cache(None)
def allPossibleFBT(self, n: int) -> List[Optional[TreeNode]]:
if n % 2 == 0:
return []
if n == 1:
return [TreeNode(0)]
ans = []
for leftCount in range(n):
rightCount = n - 1 - leftCount
for left in self.allPossibleFBT(leftCount):
for right in self.allPossibleFBT(rightCount):
ans.append(TreeNode(0))
ans[-1].left = left
ans[-1].right = right
return ans
|
python
|
from pypy.lang.smalltalk import objspace
space = objspace.ObjSpace()
def ismetaclass(w_cls):
# Heuristic to detect if this is a metaclass. Don't use apart
# from in this test file, because classtable['w_Metaclass'] is
# bogus after loading an image.
return w_cls.w_class is space.classtable['w_Metaclass']
def test_every_class_is_an_instance_of_a_metaclass():
for (nm, w_cls) in space.classtable.items():
assert ismetaclass(w_cls) or ismetaclass(w_cls.w_class)
def test_every_metaclass_inherits_from_class_and_behavior():
s_Class = space.classtable['w_Class'].as_class_get_shadow(space)
s_Behavior = space.classtable['w_Behavior'].as_class_get_shadow(space)
for (nm, w_cls) in space.classtable.items():
if ismetaclass(w_cls):
shadow = w_cls.as_class_get_shadow(space)
assert shadow.inherits_from(s_Class)
assert s_Class.inherits_from(s_Behavior)
def test_metaclass_of_metaclass_is_an_instance_of_metaclass():
w_Metaclass = space.classtable['w_Metaclass']
assert w_Metaclass.w_class.w_class is w_Metaclass
|
python
|
import numpy as np
import pandas as pd
df = pd.DataFrame(np.arange(5*4).reshape(5,4))
df
sampler = np.random.permutation(5)
sampler
df.take(sampler)
sampler_col = np.random.permutation(4)
sampler_col
sampler_col = np.random.permutation(4)
sampler_col
df[sampler_col]
df.sample(3)
df.sample(3, axis=0)
df.sample(3, axis=1)
df.sample(5)
choice = pd.Series([5,7,-1,6,4])
choice.sample(10, replace=True)
choice.sample(10, replace=True)
|
python
|
'''save/load from S3 routines
-------
'''
from __future__ import print_function, division, unicode_literals
from .omas_utils import *
from .omas_core import save_omas_pkl, load_omas_pkl, ODS
def _base_S3_uri(user):
return 's3://omas3/{user}/'.format(user=user)
# --------------------------------------------
# save and load OMAS with S3
# --------------------------------------------
def remote_uri(uri, filename, action):
"""
:param uri: uri of the container of the file
:param filename: filename to act on
:param action: must be one of [`up`, `down`, `list`, `del`]
"""
if not re.match('\w+://\w+.*', uri):
return uri
tmp = uri.split('://')
system = tmp[0]
location = '://'.join(tmp[1:])
if action not in ['down', 'up', 'list', 'del']:
raise AttributeError('remote_uri action attribute must be one of [`up`, `down`, `list`, `del`]')
if system == 's3':
import boto3
from boto3.s3.transfer import TransferConfig
s3bucket = location.split('/')[0]
s3connection = boto3.resource('s3')
s3filename = '/'.join(location.split('/')[1:])
if action == 'list':
printd('Listing %s' % (uri), topic='s3')
files = list(map(lambda x: x.key, s3connection.Bucket(s3bucket).objects.all()))
s3filename = s3filename.strip('/')
if s3filename:
files = filter(lambda x: x.startswith(s3filename), files)
return files
if action == 'del':
if filename is None:
filename = s3filename.split('/')[-1]
printd('Deleting %s' % uri, topic='s3')
s3connection.Object(s3bucket, s3filename).delete()
elif action == 'down':
if filename is None:
filename = s3filename.split('/')[-1]
printd('Downloading %s to %s' % (uri, filename), topic='s3')
obj = s3connection.Object(s3bucket, s3filename)
if not os.path.exists(os.path.abspath(os.path.split(filename)[0])):
os.makedirs(os.path.abspath(os.path.split(filename)[0]))
obj.download_file(filename, Config=TransferConfig(use_threads=False))
elif action == 'up':
printd('Uploading %s to %s' % (filename, uri), topic='s3')
from botocore.exceptions import ClientError
if s3filename.endswith('/'):
s3filename += filename.split('/')[-1]
try:
s3connection.meta.client.head_bucket(Bucket=s3bucket)
except ClientError as _excp:
# If a client error is thrown, then check that it was a 404 error.
# If it was a 404 error, then the bucket does not exist.
error_code = int(_excp.response['Error']['Code'])
if error_code == 404:
s3connection.create_bucket(Bucket=s3bucket)
else:
raise
bucket = s3connection.Bucket(s3bucket)
with open(filename, 'rb') as data:
bucket.put_object(Key=s3filename, Body=data) # , Metadata=meta)
def save_omas_s3(ods, filename, user=os.environ.get('USER', 'dummy_user'), tmp_dir=omas_rcparams['tmp_imas_dir'], **kw):
"""
Save an OMAS object to pickle and upload it to S3
:param ods: OMAS data set
:param filename: filename to save to
:param user: username where to look for the file
:param tmp_dir: temporary folder for storing S3 file on local workstation
:param kw: arguments passed to the save_omas_pkl function
"""
printd('Saving to %s on S3' % (_base_S3_uri(user) + filename), topic='s3')
if not os.path.exists(os.path.abspath(tmp_dir)):
os.makedirs(os.path.abspath(tmp_dir))
save_omas_pkl(ods, os.path.abspath(tmp_dir) + os.sep + os.path.split(filename)[1], **kw)
return remote_uri(_base_S3_uri(user), os.path.abspath(tmp_dir) + os.sep + os.path.split(filename)[1], 'up')
def load_omas_s3(filename, user=os.environ.get('USER', 'dummy_user'), consistency_check=None, imas_version=None, tmp_dir=omas_rcparams['tmp_imas_dir']):
"""
Download an OMAS object from S3 and read it as pickle
:param filename: filename to load from
:param user: username where to look for the file
:param consistency_check: verify that data is consistent with IMAS schema (skip if None)
:param imas_version: imas version to use for consistency check (leave original if None)
:param tmp_dir: temporary folder for storing S3 file on local workstation
:return: OMAS data set
"""
printd('loading from %s on S3' % (_base_S3_uri(user) + filename), topic='s3')
if not os.path.exists(os.path.abspath(tmp_dir)):
os.makedirs(os.path.abspath(tmp_dir))
remote_uri(_base_S3_uri(user) + filename, os.path.abspath(tmp_dir) + os.sep + os.sep + os.path.split(filename)[1], 'down')
return load_omas_pkl(os.path.abspath(tmp_dir) + os.sep + os.path.split(filename)[1], consistency_check=consistency_check, imas_version=imas_version)
def list_omas_s3(user=''):
"""
List S3 content
:param user: username where to look for the file
:return: OMAS data set
"""
return remote_uri(_base_S3_uri(user), None, 'list')
def del_omas_s3(filename, user=os.environ.get('USER', 'dummy_user')):
"""
Delete an OMAS object from S3
:param user: username where to look for the file
:return: OMAS data set
"""
remote_uri(_base_S3_uri(user) + filename, None, 'del')
def through_omas_s3(ods, method=['function', 'class_method'][1]):
"""
Test save and load S3
:param ods: ods
:return: ods
"""
filename = 'test.pkl'
if method == 'function':
save_omas_s3(ods, filename, user='omas_test')
ods1 = load_omas_s3(filename, user='omas_test')
else:
ods.save('s3', filename=filename, user='omas_test')
ods1 = ODS().load('s3', filename=filename, user='omas_test')
return ods1
|
python
|
"""
Pascal sections to be inserted in code.
Copyright (C) 2019, Guillaume Gonnet
This project is under the MIT license.
"""
from typing import List, Text
def gen_bird_array(envs: List, name: Text):
"Generate a static array for bird directions / species."
upper = name.capitalize()
count = sum(int("same-as" not in e) for e in envs)
result = "k%s: array[0..%d, 0..9, 0..19] of integer = (" % (
upper, count-1)
for i, e in enumerate(envs):
if e.get("same-as"): continue
result += "\n" + " " * 4 + "( // Environment %d. " % (i+1)
for r in e["rounds"]:
array = r[name] or []
array += [9] * (20 - len(array))
result += "\n" + " " * 8 + "(" + ", ".join(map(str, array)) + "),"
result = result[:-1] + "\n"
result += " " * 4 + "),"
return result[:-1] + "\n);\n"
def gen_species(R: List, **kargs):
"Generate envirnoment hashes."
return gen_bird_array(R, "directions")
def gen_directions(R: List, **kargs):
"Generate envirnoment hashes."
return gen_bird_array(R, "species")
def gen_hashes(R: List, **kargs):
"Generate envirnoment hashes."
count = sum(int("same-as" not in e) for e in R)
hashes = (str(e.get("hash", 0)) for e in R if "same-as" not in e)
result = "kEnvHashes: array[0..%d] of uint64 = (\n " % (count-1)
result += ", ".join(hashes)
result += "\n);"
return result
def gen_target_scores(R: List, Scores: List[int], **kargs):
"Generate target scores."
count = sum(int("same-as" not in e) for e in R)
result = "kTargetScores: array[0..%d] of integer = (" % (count-1)
result += ", ".join(str(s) for s in Scores)
result += ");"
return result
|
python
|
from __future__ import annotations
from typing import TYPE_CHECKING, Optional
from ._base import TelegramObject
if TYPE_CHECKING: # pragma: no cover
from .photo_size import PhotoSize
class VideoNote(TelegramObject):
"""
This object represents a video message (available in Telegram apps as of v.4.0).
Source: https://core.telegram.org/bots/api#videonote
"""
file_id: str
"""Identifier for this file, which can be used to download or reuse the file"""
file_unique_id: str
"""Unique identifier for this file, which is supposed to be the same over time and for
different bots. Can't be used to download or reuse the file."""
length: int
"""Video width and height (diameter of the video message) as defined by sender"""
duration: int
"""Duration of the video in seconds as defined by sender"""
thumb: Optional[PhotoSize] = None
"""Video thumbnail"""
file_size: Optional[int] = None
"""File size"""
|
python
|
import datetime
from django.db import models
from django.utils import timezone
#My comments:
#Each model is represented by a class that subclasses django.db.models.Model.
#Each model has a number of class variables, each of which represents a database field in the model.
#Each field is represented by an instance of a Field class – e.g., CharField for character fields and DateTimeField for datetimes.
#This tells Django what type of data each field holds.
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_text
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
|
python
|
'''
Class to access documents and links stored in the hdf5 file.
'''
import h5py
class CorpusHDF5():
def __init__(self, path):
self.f = h5py.File(path, 'r')
def get_article_text(self, article_id):
return self.f['text'][article_id]
def get_article_title(self, article_id):
return self.f['title'][article_id]
def get_titles_pos(self):
'''
Return a dictionary where the keys are articles' titles and the values are their offset in the data array.
'''
return dict((el,i) for i,el in enumerate(self.f['title'].value))
def get_pos_titles(self):
'''
Return a dictionary where the keys are the articles' offset in the data array and the values are their titles.
'''
return dict((i,el) for i,el in enumerate(self.f['title'].value))
def get_text_iter(self):
return self.f['text']
def get_title_iter(self):
return self.f['title']
|
python
|
str1 = 'Hello'
print(str1.endswith('e', 0, 2))
print(str1.endswith('o', 0, 4))
#True (substring 'He' (0 to 1 index) ends with 'e')
#False (substring 'Hell' (0 to 3 index) does not end with 'o')
|
python
|
#
# Copyright 2015, 2016 Human Longevity, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Pipe
A "task" in disdat.
This inherits from Luigi's Abstract Task Class
The idea is that parameters are actually the parameters to the run function
requires is the tasks that have to run before this task runs
output() is basically a function that says, given the parameters, what is the output
of this task.
inputs() isn't used as much, but it says, here is a list of inputs I expect to be
available before I run.
author: Kenneth Yocum
"""
import os
import time
import luigi
from disdat.pipe_base import PipeBase
from disdat.db_link import DBLink
from disdat.driver import DriverTask
from disdat.fs import DisdatFS
from disdat.common import BUNDLE_TAG_TRANSIENT, BUNDLE_TAG_PARAMS_PREFIX
from disdat import logger as _logger
class PipeTask(luigi.Task, PipeBase):
"""
user_arg_name:
calling_task:
driver_output_bundle:
force:
output_tags:
incremental_push:
incremental_pull:
"""
user_arg_name = luigi.Parameter(default=None, significant=False) # what the outputs are referred to by downstreams
calling_task = luigi.Parameter(default=None, significant=False)
# This is used for re-running in apply:resolve_bundle to manually see if
# we need to re-run the root task.
driver_output_bundle = luigi.Parameter(default='None', significant=False)
force = luigi.BoolParameter(default=False, significant=False)
output_tags = luigi.DictParameter(default={}, significant=True)
# Each pipeline executes wrt a data context.
data_context = luigi.Parameter(default=None, significant=False)
# Each pipeline can be configured to commit and push intermediate values to the remote
incremental_push = luigi.BoolParameter(default=False, significant=False)
# Each pipeline can be configured to pull intermediate values on demand from the remote
incremental_pull = luigi.BoolParameter(default=False, significant=False)
def __init__(self, *args, **kwargs):
"""
This has the same signature as luigi.Task.
Go through this class and get the set of params we define
Args:
*args:
**kwargs:
"""
super(PipeTask, self).__init__(*args, **kwargs)
# Instance variables to track various user wishes
self.user_set_human_name = None # self.set_bundle_name()
self.user_tags = {} # self.add_tags()
self.add_deps = {} # self.add(_external)_dependency()
self.db_targets = [] # Deprecating
self._input_tags = {} # self.get_tags() of upstream tasks
self._input_bundle_uuids = {} # self.get_bundle_uuid() of upstream tasks
self._mark_force = False # self.mark_force()
def bundle_outputs(self):
"""
Each pipe creates an output bundle name
Idea: WorkflowName + task.task_id == [Pipe Class Name + params + hash]
For now: Apply Output Bundle "-" Pipe Class Name
"""
output_bundles = [(self.pipe_id(), self.pfs.get_path_cache(self).uuid)]
return output_bundles
def bundle_inputs(self):
"""
Given this pipe, return the set of bundles created by the input pipes.
Mirrors Luigi task.inputs()
NOTE: Calls task.deps which calls task._requires which calls task.requires()
:param pipe_task: A PipeTask or a DriverTask (both implement PipeBase)
:return: [(bundle_name, uuid), ... ]
"""
input_tasks = self.deps()
input_bundles = [(task.pipe_id(), self.pfs.get_path_cache(task).uuid) for task in input_tasks]
return input_bundles
def pipe_id(self):
"""
Given a pipe instance, return a unique string based on the class name and
the parameters. This re-uses Luigi code for getting the unique string.
NOTE: The PipeTask has a 'driver_output_bundle'. This the name of the pipline output bundle given by the user.
Because this is a Luigi parameter, it is included in the Luigi self.task_id string and hash. So we do not
have to append this separately.
"""
return self.task_id
def pipeline_id(self):
"""
This is the "human readable" name; a "less unique" id than the unique id.
The pipeline_id is well-defined for the output task -- it is the output bundle name. For intermediate outputs
the pipeline_id defaults to the pipe_id(). Else, it may be set by the task author.
Note: Should we provide an identify for which version of this pipe is running at which stage in the pipeline?
Short answer, no. Imagine if we name with the pipeline bundle output name, branch index, and level index. In
this case if anyone re-uses this output, the human_name for the bundle won't be meaningful. For the pipeline
owner, it may also not be helpful. The system may also place different outputs at different times under those
indices. Too complicated.
Returns:
(str)
"""
if self.driver_output_bundle is not None:
return self.driver_output_bundle
elif self.user_set_human_name is not None:
return self.user_set_human_name
else:
id_parts = self.pipe_id().split('_')
return "{}_{}".format(id_parts[0],id_parts[-1])
def get_hframe_uuid(self):
""" Return the unique ID for this tasks current output hyperframe
Returns:
hframe_uuid (str): The unique identifier for this task's hyperframe
"""
pce = self.pfs.get_path_cache(self)
assert (pce is not None)
return pce.uuid
def upstream_hframes(self):
""" Convert upstream tasks to hyperframes, return list of hyperframes
Returns:
(:list:`hyperframe.HyperFrameRecord`): list of upstream hyperframes
"""
tasks = self.requires()
hfrs = []
for t in tasks:
hfid = t.get_hframe_uuid()
hfrs.append(self.pfs.get_hframe_by_uuid(hfid, data_context=self.data_context))
return hfrs
def requires(self):
"""
Return Tasks on which this task depends.
Build them intelligently, however.
1.) The input_df so far stays the same for all upstream pipes.
2.) However, when we resolve the location of the outputs, we need to do so correctly.
:return:
"""
kwargs = self.prepare_pipe_kwargs()
self.add_deps.clear()
self.pipe_requires(**kwargs)
rslt = self.add_deps
if len(self.add_deps) == 0:
return []
tasks = []
for user_arg_name, cls_and_params in rslt.items():
pipe_class, params = cls_and_params[0], cls_and_params[1]
assert isinstance(pipe_class, luigi.task_register.Register)
# we propagate the same inputs and the same output dir for every upstream task!
params.update({
'user_arg_name': user_arg_name,
'calling_task': self,
'driver_output_bundle': None, # allow intermediate tasks pipe_id to be independent of root task.
'force': self.force,
'output_tags': dict({}), # do not pass output_tags up beyond root task
'data_context': self.data_context, # all operations wrt this context
'incremental_push': self.incremental_push, # propagate the choice to push incremental data.
'incremental_pull': self.incremental_pull # propagate the choice to incrementally pull data.
})
tasks.append(pipe_class(**params))
return tasks
def output(self):
"""
This is the *only* output function for all pipes. It declares the creation of the
one HyperFrameRecord pb and that's it. Remember, has to be idempotent.
Return:
(list:str):
"""
return PipeBase.add_bundle_meta_files(self)
def run(self):
"""
Call users run function.
1.) prepare the arguments
2.) run and gather user result
3.) interpret and wrap in a HyperFrame
Returns:
(`hyperframe.HyperFrame`):
"""
kwargs = self.prepare_pipe_kwargs(for_run=True)
pce = self.pfs.get_path_cache(self)
assert(pce is not None)
""" NOTE: If a user changes a task param in run(), and that param parameterizes a dependency in requires(),
then running requires() post run() will give different tasks. To be safe we record the inputs before run()
"""
cached_bundle_inputs = self.bundle_inputs()
try:
start = time.time() # P3 datetime.now().timestamp()
user_rtn_val = self.pipe_run(**kwargs)
stop = time.time() # P3 datetime.now().timestamp()
except Exception as error:
""" If user's pipe fails for any reason, remove bundle dir and raise """
try:
_logger.error("User pipe_run encountered exception: {}".format(error))
PipeBase.rm_bundle_dir(pce.path, pce.uuid, self.db_targets)
except OSError as ose:
_logger.error("User pipe_run encountered error, and error on remove bundle: {}".format(ose))
raise
try:
presentation, frames = PipeBase.parse_return_val(pce.uuid, user_rtn_val, self.data_context)
hfr = PipeBase.make_hframe(frames,
pce.uuid,
cached_bundle_inputs,
self.pipeline_id(),
self.pipe_id(),
self,
start_ts=start,
stop_ts=stop,
tags={"presentable": "True"},
presentation=presentation)
# Add any output tags to the user tag dict
if self.output_tags:
self.user_tags.update(self.output_tags)
# If this is the root_task, identify it as so in the tag dict
if isinstance(self.calling_task, DriverTask):
self.user_tags.update({'root_task': 'True'})
# Lastly add any parameters associated with this class as tags.
# They are differentiated by a special prefix in the key
self.user_tags.update(self._get_subcls_params())
# Overwrite the hyperframe tags with the complete set of tags
hfr.replace_tags(self.user_tags)
self.data_context.write_hframe(hfr)
transient = False
if hfr.get_tag(BUNDLE_TAG_TRANSIENT) is not None:
transient = True
if self.incremental_push and not transient:
self.pfs.commit(None, None, uuid=pce.uuid, data_context=self.data_context)
self.pfs.push(uuid=pce.uuid, data_context=self.data_context)
except Exception as error:
""" If we fail for any reason, remove bundle dir and raise """
PipeBase.rm_bundle_dir(pce.path, pce.uuid, self.db_targets)
raise
return hfr
def _get_subcls_params(self):
""" Given the child class, extract user defined Luigi parameters
The right way to do this is to use vars(cls) and filter by Luigi Parameter
types. Luigi get_params() gives us all parameters in the full class hierarchy.
It would give us the parameters in this class as well. And then we'd have to do set difference.
See luigi.Task.get_params()
NOTE: We do NOT keep the parameter order maintained by Luigi. That's critical for Luigi creating the task_id.
However, we can implicitly re-use that ordering if we re-instantiate the Luigi class.
Args:
self: The instance of the subclass. To get the normalized values for the Luigi Parameters
Returns:
dict: (BUNDLE_TAG_PARAM_PREFIX.<name>:'string value',...)
"""
# Don't need to bother with serializing parameters, just grab them
# tags only need to get serialized into hyperframes and never recovered
cls = self.__class__
params = {}
for param in vars(cls):
attribute = getattr(cls, param)
if isinstance(attribute, luigi.Parameter):
params["{}{}".format(BUNDLE_TAG_PARAMS_PREFIX, param)] = attribute.serialize(getattr(self, param))
return params
def prepare_pipe_kwargs(self, for_run=False):
""" Each upstream task produces a bundle. Prepare that bundle as input
to the user's pipe_run function.
Args:
for_run (bool): prepare args for run -- at that point all upstream tasks have completed.
Returns:
(dict): A dictionary with the arguments.
"""
kwargs = dict()
# Place upstream task outputs into the kwargs. Thus the user does not call
# self.inputs(). If they did, they would get a list of output targets for the bundle
# that isn't very helpful.
if for_run:
# Reset the stored tags, in case this instance is run multiple times.
self._input_tags = {}
self._input_bundle_uuids = {}
upstream_tasks = [(t.user_arg_name, self.pfs.get_path_cache(t)) for t in self.requires()]
for user_arg_name, pce in [u for u in upstream_tasks if u[1] is not None]:
hfr = self.pfs.get_hframe_by_uuid(pce.uuid, data_context=self.data_context)
assert hfr.is_presentable()
# Download any data that is not local (the linked files are not present).
# This is the default behavior when running in a container.
# The non-default is to download and localize ALL bundles in the context before we run.
# That's in-efficient. We only need meta-data to determine what to re-run.
if self.incremental_pull:
DisdatFS()._localize_hfr(hfr, pce.uuid, self.data_context)
if pce.instance.user_arg_name in kwargs:
_logger.warning('Task human name {} reused when naming task dependencies: Dependency hyperframe shadowed'.format(pce.instance.user_arg_name))
self._input_tags[user_arg_name] = hfr.tag_dict
self._input_bundle_uuids[user_arg_name] = pce.uuid
kwargs[user_arg_name] = self.data_context.present_hfr(hfr)
return kwargs
"""
Pipes Interface -- A pipe implements these calls
"""
def pipe_requires(self, **kwargs):
"""
This is the place to put your pipeline dependencies. Place
the upstream pipes in an array and a dict for their params
Args:
**kwargs:
Returns:
"""
return None
def pipe_run(self, **kwargs):
"""
There is only one default argument "input_df" in kwargs.
The other keys in kwargs will be identical to your Luigi parameters specified in this class.
The input_df has the data context identifiers, e.g., sampleName, sessionId, subjectId
The input_df has the data in either jsonData or fileData.
A sharded task will receive a subset of all possible inputs.
Args:
**kwargs:
Returns:
"""
raise NotImplementedError()
def add_dependency(self, name, task_class, params):
"""
Disdat Pipe API Function
Add a task and its parameters to our requirements
Args:
name (str): Name of our upstream (also name of argument in downstream)
task_class (:object): upstream task class
params (:dict): Dictionary of
Returns:
None
"""
if not isinstance(params, dict):
error = "add_dependency third argument must be a dictionary of parameters"
raise Exception(error)
assert (name not in self.add_deps)
self.add_deps[name] = (task_class, params)
return
def add_external_dependency(self, name, task_class, params):
"""
Disdat Pipe API Function
Add an external task and its parameters to our requirements. What this means is that
there is no run function and, in that case, Luigi will ignore the results of task.deps() (which calls
flatten(self.requires())). And what that means is that this requirement can only be satisfied
by the bundle actually existing.
Args:
name (str): Name of our upstream (also name of argument in downstream)
task_class (:object): upstream task class
params (:dict): Dictionary of
Returns:
None
"""
if not isinstance(params, dict):
error = "add_dependency third argument must be a dictionary of parameters"
raise Exception(error)
assert (name not in self.add_deps)
self.add_deps[name] = (luigi.task.externalize(task_class), params)
return
def add_db_target(self, db_target):
"""
Every time the user creates a db target, we add
it to the list of db_targets in this pipe.
Note: We add through the DBTarget object create, not through
pipe.create_db_target() in the case that people do some hacking and don't use that API.
Args:
db_target (`db_target.DBTarget`):
Returns:
None
"""
self.db_targets.append(db_target)
def create_output_table(self, dsn, table_name, schema_name=None):
"""
Create an output table target. Use the target to parameterize queries with the
target table name.
Args:
dsn (unicode): The dsn indicating the configuration to connect to the db
table_name (unicode): The table name.
schema_name (unicode): Optional force use of schema (default None)
Returns:
(`disdat.db_target.DBTarget`)
"""
target = DBLink(self, dsn, table_name, schema_name=schema_name)
return target
def create_output_file(self, filename):
"""
Disdat Pipe API Function
Pass in the name of your file, and get back an object to which you can write.
Under the hood, this is a Luigi.Target.
Args:
filename: The name of your file, not the path.
Returns:
(`luigi.Target`):
"""
return self.make_luigi_targets_from_basename(filename)
def create_output_dir(self, dirname):
"""
Disdat Pipe API Function
Given basename directory name, return a fully qualified path whose prefix is the
local output directory for this bundle in the current context. This call creates the
output directory as well.
Args:
dirname (str): The name of the output directory, i.e., "models"
Returns:
output_dir (str): Fully qualified path of a directory whose prefix is the bundle's local output directory.
"""
prefix_dir = self.get_output_dir()
fqp = os.path.join(prefix_dir, dirname)
try:
os.makedirs(fqp)
except IOError as why:
_logger.error("Creating directory in bundle directory failed:".format(why))
return fqp
def get_output_dir(self):
"""
Disdat Pipe API Function
Retrieve the output directory for this task's bundle. You may place
files directly into this directory.
Returns:
output_dir (str): The bundle's output directory
"""
# Find the path cache entry for this pipe to find its output path
pce = self.pfs.get_path_cache(self)
assert(pce is not None)
return pce.path
def set_bundle_name(self, human_name):
"""
Disdat Pipe API Function
Set the human name for this bundle. If not called, then intermediate outputs
will have human names identical to their process names.
Args:
human_name (str): The human name of this pipe's output bundle.
Returns:
None
"""
self.user_set_human_name = human_name
def add_tags(self, tags):
"""
Disdat Pipe API Function
Adds tags to bundle.
Args:
tags (dict (str, str)): key value pairs (string, string)
Returns:
None
"""
assert (isinstance(tags, dict))
self.user_tags.update(tags)
def get_tags(self, user_arg_name):
"""
Disdat Pipe API Function
Retrieve the tag dictionary from an upstream task.
Args:
user_arg_name (str): keyword arg name of input bundle data for which to return tags
Returns:
tags (dict (str, str)): key value pairs (string, string)
"""
assert user_arg_name in self._input_tags
return self._input_tags[user_arg_name]
def get_bundle_uuid(self, user_arg_name):
"""
Disdat Pipe API Function
Retrieve the UUID from an upstream task.
Args:
user_arg_name (str): keyword arg name of input bundle data for which to return tags
Returns:
uuid (str)
"""
assert user_arg_name in self._input_bundle_uuids
return self._input_bundle_uuids[user_arg_name]
def mark_force(self):
"""
Disdat Pipe API Function
Mark pipe to force recompution of this particular task. This means that Disdat/Luigi will
always re-run this particular pipe / task.
We mark the pipe with a particular flag so that apply.resolve_bundle()
Returns:
None
"""
self._mark_force = True
def mark_transient(self):
"""
Disdat Pipe API Function
Mark output bundle as transient. This means that during execution Disdat will not
write (push) this bundle back to the remote. That only happens in two cases:
1.) Started the pipeline with incremental_push=True
2.) Running the pipeline in a container with no_push or no_push_intermediates False
We mark the bundle with a tag. Incremental push investigates the tag before pushing.
And the entrypoint investigates the tag if we are not pushing incrementally.
Otherwise, normal push commands from the CLI or api will work, i.e., manual pushes continue to work.
Returns:
None
"""
self.add_tags({BUNDLE_TAG_TRANSIENT: 'True'})
|
python
|
# coding=utf-8
"""
OneForAll默认配置
"""
import pathlib
import warnings
# 禁用所有警告信息
warnings.filterwarnings("ignore")
# 路径设置
relative_directory = pathlib.Path(__file__).parent.parent # OneForAll代码相对路径
module_dir = relative_directory.joinpath('modules') # OneForAll模块目录
third_party_dir = relative_directory.joinpath('thirdparty') # 三方工具目录
data_storage_dir = relative_directory.joinpath('data') # 数据存放目录
result_save_dir = relative_directory.joinpath('results') # 结果保存目录
temp_save_dir = result_save_dir.joinpath('temp')
# OneForAll入口参数设置
enable_check_version = True # 开启最新版本检查
enable_brute_module = True # 使用爆破模块(默认True)
enable_dns_resolve = True # 使用DNS解析子域(默认True)
enable_http_request = True # 使用HTTP请求子域(默认True)
enable_finder_module = True # 开启finder模块,开启会从响应体和JS中再次发现子域(默认True)
enable_altdns_module = True # 开启altdns模块,开启会利用置换技术重组子域再次发现新子域(默认True)
enable_enrich_module = True # 开启enrich模块,开启会富化出信息,如ip的cdn,cidr,asn,org,addr和isp等信息
enable_banner_identify = True # 开启WEB指纹识别模块(默认True)
enable_takeover_check = False # 开启子域接管风险检查(默认False)
# 参数可选值有 'small', 'medium', 'large'
http_request_port = 'small' # HTTP请求子域(默认 'small',探测80,443端口)
# 参数可选值True,False分别表示导出存活,全部子域结果
result_export_alive = False # 只导出存活的子域结果(默认False)
# 参数可选格式有 'csv', 'json'
result_save_format = 'csv' # 子域结果保存文件格式(默认csv)
# 参数path默认None使用OneForAll结果目录自动生成路径
result_save_path = None # 子域结果保存文件路径(默认None)
# 收集模块设置
save_module_result = False # 保存各模块发现结果为json文件(默认False)
enable_all_module = True # 启用所有收集模块(默认True)
enable_partial_module = [] # 启用部分收集模块 必须禁用enable_all_module才能生效
# 只使用ask和baidu搜索引擎收集子域的示例
# enable_partial_module = ['modules.search.ask', 'modules.search.baidu']
module_thread_timeout = 90.0 # 每个收集模块线程超时时间(默认90秒)
# 爆破模块设置
enable_wildcard_check = True # 开启泛解析检测(默认True)
enable_wildcard_deal = True # 开启泛解析处理(默认True)
brute_massdns_path = None # 默认None自动选择 如需填写请填写绝对路径
brute_status_format = 'ansi' # 爆破时状态输出格式(默认asni,可选json)
brute_concurrent_num = 2000 # 并发查询数量(默认2000,最大推荐10000)
brute_socket_num = 1 # 爆破时每个进程下的socket数量
brute_resolve_num = 15 # 解析失败时尝试换名称服务器重查次数
# 爆破所使用的字典路径(默认None则使用data/subdomains.txt,自定义字典请使用绝对路径)
brute_wordlist_path = None
# 域名的权威DNS名称服务器的保存路径 当域名开启了泛解析时会使用该名称服务器来进行A记录查询
authoritative_dns_path = data_storage_dir.joinpath('authoritative_dns.txt')
enable_recursive_brute = False # 是否使用递归爆破(默认False)
brute_recursive_depth = 2 # 递归爆破深度(默认2层)
# 爆破下一层子域所使用的字典路径(默认None则使用data/subnames_next.txt,自定义字典请使用绝对路径)
recursive_nextlist_path = None
enable_check_dict = False # 是否开启字典配置检查提示(默认False)
delete_generated_dict = True # 是否删除爆破时临时生成的字典(默认True)
delete_massdns_result = True # 是否删除爆破时massdns输出的解析结果 (默认True)
only_save_valid = True # 是否在处理爆破结果时只存入解析成功的子域
check_time = 10 # 检查字典配置停留时间(默认10秒)
enable_fuzz = False # 是否使用fuzz模式枚举域名
fuzz_place = None # 指定爆破的位置 指定的位置用`@`表示 示例:[email protected]
fuzz_rule = None # fuzz域名使用的正则表达式 示例:'[a-z][0-9]' 表示第一位是字母 第二位是数字
fuzz_list = None # fuzz域名使用的字典路径
brute_ip_blacklist = {'0.0.0.0', '0.0.0.1'} # IP黑名单 子域解析到IP黑名单则标记为非法子域
ip_appear_maximum = 100 # 多个子域解析到同一IP次数超过100次则标记为非法(泛解析)子域
# altdns模块设置
altdns_increase_num = True
altdns_decrease_num = True
altdns_replace_word = False
altdns_insert_word = False
altdns_add_word = False
# banner识别模块设置
banner_process_number = 4 # 识别进程数量(默认4)
# 代理设置
enable_request_proxy = False # 是否使用代理(全局开关,默认False)
proxy_all_module = False # 代理所有模块
proxy_partial_module = ['GoogleQuery', 'AskSearch', 'DuckDuckGoSearch',
'GoogleAPISearch', 'GoogleSearch', 'YahooSearch',
'YandexSearch', 'CrossDomainXml',
'ContentSecurityPolicy'] # 代理自定义的模块
request_proxy_pool = [{'http': 'http://127.0.0.1:1080',
'https': 'https://127.0.0.1:1080'}] # 代理池
# request_proxy_pool = [{'http': 'socks5h://127.0.0.1:10808',
# 'https': 'socks5h://127.0.0.1:10808'}] # 代理池
# 请求设置
request_thread_count = None # 请求线程数量(默认None,则根据情况自动设置)
request_timeout_second = (13, 27) # 请求超时秒数(默认connect timout推荐略大于3秒)
request_ssl_verify = False # 请求SSL验证(默认False)
request_allow_redirect = True # 请求允许重定向(默认True)
request_redirect_limit = 10 # 请求跳转限制(默认10次)
# 默认请求头 可以在headers里添加自定义请求头
request_default_headers = {
'Accept': 'text/html,application/xhtml+xml,'
'application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
'Cache-Control': 'max-age=0',
'DNT': '1',
'Referer': 'https://www.google.com/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 '
'(KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36',
'Upgrade-Insecure-Requests': '1',
'X-Forwarded-For': '127.0.0.1'
}
enable_random_ua = True # 使用随机UA(默认True,开启可以覆盖request_default_headers的UA)
# 搜索模块设置
# 开启全量搜索会尽量去获取搜索引擎搜索的全部结果,不过搜索耗时可能会过长
enable_full_search = False # 启用全量搜索(默认False)
enable_recursive_search = False # 递归搜索子域(默认False)
search_recursive_times = 2 # 递归搜索层数(默认2)
# DNS解析设置
resolver_nameservers = [
'223.5.5.5', # AliDNS
'119.29.29.29', # DNSPod
'114.114.114.114', # 114DNS
'8.8.8.8', # Google DNS
'1.1.1.1' # CloudFlare DNS
] # 指定查询的DNS域名服务器
resolver_timeout = 5.0 # 解析超时时间(默认5.0秒)
resolver_lifetime = 10.0 # 解析存活时间(默认10.0秒)
# 请求端口探测设置
# 你可以在端口列表添加自定义端口
small_ports = [80, 443] # 默认使用
medium_ports = [80, 443, 8000, 8080, 8443]
# 注意:建议大厂的域名尽量不使用大端口范围,因为大厂的子域太多,加上使用大端口范围会导致生成的
# 请求上十万,百万,千万级,可能会导致内存不足程序奔溃,另外这样级别的请求量等待时间也是漫长的。
# OneForAll不是一个端口扫描工具,如果要扫端口建议使用nmap,zmap之类的工具。
large_ports = [80, 81, 280, 300, 443, 591, 593, 832, 888, 901, 981, 1010, 1080,
1100, 1241, 1311, 1352, 1434, 1521, 1527, 1582, 1583, 1944, 2082,
2082, 2086, 2087, 2095, 2096, 2222, 2301, 2480, 3000, 3128, 3333,
4000, 4001, 4002, 4100, 4125, 4243, 4443, 4444, 4567, 4711, 4712,
4848, 4849, 4993, 5000, 5104, 5108, 5432, 5555, 5800, 5801, 5802,
5984, 5985, 5986, 6082, 6225, 6346, 6347, 6443, 6480, 6543, 6789,
7000, 7001, 7002, 7396, 7474, 7674, 7675, 7777, 7778, 8000, 8001,
8002, 8003, 8004, 8005, 8006, 8008, 8009, 8010, 8014, 8042, 8069,
8075, 8080, 8081, 8083, 8088, 8090, 8091, 8092, 8093, 8016, 8118,
8123, 8172, 8181, 8200, 8222, 8243, 8280, 8281, 8333, 8384, 8403,
8443, 8500, 8530, 8531, 8800, 8806, 8834, 8880, 8887, 8888, 8910,
8983, 8989, 8990, 8991, 9000, 9043, 9060, 9080, 9090, 9091, 9200,
9294, 9295, 9443, 9444, 9800, 9981, 9988, 9990, 9999, 10000,
10880, 11371, 12043, 12046, 12443, 15672, 16225, 16080, 18091,
18092, 20000, 20720, 24465, 28017, 28080, 30821, 43110, 61600]
ports = {'small': small_ports, 'medium': medium_ports, 'large': large_ports}
common_subnames = {'i', 'w', 'm', 'en', 'us', 'zh', 'w3', 'app', 'bbs',
'web', 'www', 'job', 'docs', 'news', 'blog', 'data',
'help', 'live', 'mall', 'blogs', 'files', 'forum',
'store', 'mobile'}
# 模块API配置
# Censys可以免费注册获取API:https://censys.io/api
censys_api_id = ''
censys_api_secret = ''
# Binaryedge可以免费注册获取API:https://app.binaryedge.io/account/api
# 免费的API有效期只有1个月,到期之后可以再次生成,每月可以查询250次。
binaryedge_api = ''
# Chinaz可以免费注册获取API:http://api.chinaz.com/ApiDetails/Alexa
chinaz_api = ''
# Bing可以免费注册获取API:https://azure.microsoft.com/zh-cn/services/
# cognitive-services/bing-web-search-api/#web-json
bing_api_id = ''
bing_api_key = ''
# SecurityTrails可以免费注册获取API:https://securitytrails.com/corp/api
securitytrails_api = ''
# https://fofa.so/api
fofa_api_email = '' # fofa用户邮箱
fofa_api_key = '' # fofa用户key
# Google可以免费注册获取API:
# 免费的API只能查询前100条结果
# https://developers.google.com/custom-search/v1/overview#search_engine_id
# 创建自定义搜索引擎后需要在响应的控制面板上启用Search the entire web
google_api_id = '' # Google API自定义搜索引擎id
# https://developers.google.com/custom-search/v1/overview#api_key
google_api_key = '' # Google API自定义搜索key
# https://api.passivetotal.org/api/docs/
riskiq_api_username = ''
riskiq_api_key = ''
# Shodan可以免费注册获取API: https://account.shodan.io/register
# 免费的API限速1秒查询1次
shodan_api_key = ''
# ThreatBook API 查询子域名需要收费 https://x.threatbook.cn/nodev4/vb4/myAPI
threatbook_api_key = ''
# VirusTotal可以免费注册获取API: https://developers.virustotal.com/reference
virustotal_api_key = ''
# https://www.zoomeye.org/doc?channel=api
zoomeye_api_usermail = ''
zoomeye_api_password = ''
# Spyse可以免费注册获取API: https://spyse.com/
spyse_api_token = ''
# https://www.circl.lu/services/passive-dns/
circl_api_username = ''
circl_api_password = ''
# https://www.dnsdb.info/
dnsdb_api_key = ''
# ipv4info可以免费注册获取API: http://ipv4info.com/tools/api/
# 免费的API有效期只有2天,到期之后可以再次生成,每天可以查询50次。
ipv4info_api_key = ''
# https://github.com/360netlab/flint
# passivedns_api_addr默认空使用http://api.passivedns.cn
# passivedns_api_token可为空
passivedns_api_addr = ''
passivedns_api_token = ''
# Github Token可以访问https://github.com/settings/tokens生成,user为Github用户名
# 用于子域接管和子域收集
github_api_user = ''
github_api_token = ''
# obtain Cloudflare API key from https://dash.cloudflare.com/profile/api-tokens
cloudflare_api_token = ''
# https://hunter.qianxin.com/home/userInfo
hunter_api_key = ''
|
python
|
import logging
import math
import sys
from datetime import timedelta
from typing import TextIO, Sequence, Tuple, NoReturn
import attr
import httpx
from cached_property import cached_property
from fire import Fire
@attr.s(auto_attribs=True, kw_only=True)
class BaseCommand:
stream: TextIO = sys.stdout
def _print(self, *args):
print(*args, file=self.stream)
@attr.s(auto_attribs=True, kw_only=True)
class SpeedTestCommand(BaseCommand):
url: str = 'http://httpbin.org/status/200'
count: int = 2
_bad_response: httpx.Response = None
@cached_property
def deltas(self) -> Tuple[timedelta, ...]:
deltas = []
for _ in range(2):
response = httpx.get(self.url)
if response.status_code != 200:
self._bad_response = response
break
deltas.append(response.elapsed.microseconds)
return deltas
@cached_property
def failure(self) -> str:
if self._bad_response is None:
return ''
return '{code} {reason}'.format(
code=self._bad_response.status_code,
reason=self._bad_response.reason_phrase,
)
@cached_property
def mean(self) -> float:
return sum(self.deltas) / len(self.deltas)
@cached_property
def deviations(self) -> Tuple[float, ...]:
return tuple(self.mean - d for d in self.deltas)
@cached_property
def variance(self) -> float:
return sum(d ** 2 for d in self.deviations) / len(self.deviations)
@cached_property
def stddev(self) -> float:
return math.sqrt(self.variance)
def _do(self) -> int:
if self.failure:
self._print(self.failure)
return 1
self._print('mean:', self.mean)
self._print('std dev:', self.stddev)
def __call__(self) -> NoReturn:
sys.exit(self._do())
@attr.s(auto_attribs=True)
class Commands:
log_level: str = 'WARNING'
_registry = dict(
speed=SpeedTestCommand,
)
def __call__(self):
logging.getLogger().setLevel(self.log_level)
return self._registry
def entrypoint(argv: Sequence[str]) -> int:
try:
Fire(Commands, command=argv)
except SystemExit as exc:
return exc.code
if __name__ == "__main__":
sys.exit(entrypoint(sys.argv[1:]))
|
python
|
#!/usr/bin/python3
import rospy
from std_msgs.msg import Int16MultiArray, String, Bool
import numpy as np
import pickle
import os
import yaml
import h5py
from utils.audio import get_mfcc
from utils.model import get_deep_speaker
from utils.utils import batch_cosine_similarity, dist2id
n_embs = 0
X = []
y = []
def save_dataset(dataset_path):
predictions = np.array(X)
dt = h5py.special_dtype(vlen=str)
labels = np.array(y, dtype=dt)
with h5py.File(dataset_path, "w") as h5f :
h5f.create_dataset("predictions", data=predictions)
h5f.create_dataset("labels", data=labels)
def load_dataset(dataset_path):
predictions = []
labels = []
if os.path.isfile(dataset_path):
with h5py.File(dataset_path, 'r') as dataset:
predictions = dataset['predictions'][:].tolist()
labels = dataset['labels'][:].tolist()
return predictions, labels
def get_model(path):
REF_PATH = os.path.dirname(os.path.abspath(__file__))
return get_deep_speaker(os.path.join(REF_PATH, path))
def process_audio(data, sample_rate, num_fbanks):
result = np.array(data)
# to float32
result = result.astype(np.float32, order='C') / 32768.0
# Processing
result = get_mfcc(result, sample_rate, num_fbanks)
return result
def get_label_from(prediction, identification_threshold):
result = None
if len(X) > 0:
# Distance between the sample and the support set
emb_voice = np.repeat(prediction, len(X), 0)
cos_dist = batch_cosine_similarity(np.array(X), emb_voice)
# Matching
result = dist2id(cos_dist, y, identification_threshold, mode='avg')
return result
def callback(audio, sample_rate, num_fbanks, speaker_model, identification_threshold, sample_phrases, identity_publisher, sample_publisher, speaker_publisher):
"""
Callback called each time there is a new record.
Parameters
----------
audio
Audio source
sample_rate
The number of samples of audio recorded every second.
num_fbanks
Number of filter banks to apply
speaker_model
Deep model for speaker recognition
identification_threshold
The min value to assign a correct prediction
"""
processed_audio = process_audio(audio.data, sample_rate, num_fbanks)
prediction = speaker_model.predict(np.expand_dims(processed_audio, 0))
id_label = get_label_from(prediction, identification_threshold)
if len(X) == 0 or id_label is None:
sample_publisher.publish(True)
predictions = []
predictions.append(prediction[0])
speaker_publisher.publish("I don't recognize your voice, do you want to register?")
response = rospy.wait_for_message("identity_text", String)
if "yes" in response.data.lower():
speaker_publisher.publish("Repeat the following sentences.")
for phrase in sample_phrases:
speaker_publisher.publish(phrase)
result = rospy.wait_for_message("identity_data", Int16MultiArray)
processed_audio = process_audio(result.data, sample_rate, num_fbanks)
prediction = speaker_model.predict(np.expand_dims(processed_audio, 0))
predictions.append(prediction[0])
speaker_publisher.publish("Perfect. Tell me your name to finish the registration.")
name = rospy.wait_for_message("identity_text", String)
X.extend(predictions)
y.extend([name.data]*len(predictions))
sample_publisher.publish(False)
identity_publisher.publish(name)
else:
identity_publisher.publish(id_label)
print("The user is:", id_label)
def init_node(node_name, dataset_path, identity_topic, sample_topic, output_topic):
"""
Init the node.
Parameters
----------
node_name
Name assigned to the node
"""
rospy.init_node(node_name, anonymous=True)
identity_publisher = rospy.Publisher(identity_topic, String, queue_size=1)
sample_publisher = rospy.Publisher(sample_topic, Bool, queue_size=1)
speaker_publisher = rospy.Publisher(output_topic, String, queue_size=1)
rospy.on_shutdown(lambda:save_dataset(dataset_path))
predictions, labels = load_dataset(dataset_path)
X.extend(predictions)
y.extend(labels)
return identity_publisher, sample_publisher, speaker_publisher
def listener(sample_rate, num_fbanks, model_path, sample_phrases, identity_publisher, sample_publisher, speaker_publisher, identification_threshold, data_topic):
"""
Main function of the node.
Parameters
----------
sample_rate
The number of samples of audio recorded every second.
num_fbanks
Number of filter banks to apply
model_path
Path to deep model
identification_threshold
The min value to assign a correct prediction
data_topic
Topic in which is published audio data
"""
speaker_model = get_model(model_path)
rospy.Subscriber(data_topic, Int16MultiArray, lambda audio : callback(audio,
sample_rate,
num_fbanks,
speaker_model,
identification_threshold,
sample_phrases,
identity_publisher,
sample_publisher,
speaker_publisher))
rospy.spin()
if __name__ == '__main__':
REF_PATH = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(REF_PATH,'config.yml')) as file:
config = yaml.full_load(file)
node_name = config['nodes']['reidentification']
sample_rate = config['settings']['sampleRate']
num_fbanks = config['settings']['numFbanks']
model_path = config['models']['defaults']
identification_threshold = config['settings']['identificationThreshold']
data_topic = config['topics']['voiceData']
identity_topic = config['topics']['identity']
sample_topic = config['topics']['sample']
output_topic = config['topics']['outputText']
dataset_path = os.path.join(REF_PATH, config['models']['dataset'])
sample_phrases = ["how are you?", "add bread to my shopping list","change my shopping list"]
identity_publisher, sample_publisher, speaker_publisher = init_node(node_name,
dataset_path,
identity_topic,
sample_topic,
output_topic)
listener(sample_rate,
num_fbanks,
model_path,
sample_phrases,
identity_publisher,
sample_publisher,
speaker_publisher,
identification_threshold,
data_topic)
|
python
|
#!/user/bin/env python
'''containsAlternativeLocations.py
This filter return true if this structure contains an alternative location
'''
__author__ = "Mars (Shih-Cheng) Huang"
__maintainer__ = "Mars (Shih-Cheng) Huang"
__email__ = "[email protected]"
__version__ = "0.2.0"
__status__ = "Done"
class ContainsAlternativeLocations(object):
def __call__(self, t):
structure = t[1]
for c in structure.alt_loc_list:
if c != '':
return True
return False
|
python
|
import cv2
import gym
import numpy as np
from gym import spaces
from PIL import Image
from gym.wrappers.monitoring.video_recorder import VideoRecorder
# https://github.com/chris-chris/mario-rl-tutorial/
class MyDownSampleWrapper(gym.ObservationWrapper):
def __init__(self, env, image_size):
super(MyDownSampleWrapper, self).__init__(env)
self._image_size = image_size
# set up a new observation space
self.observation_space = gym.spaces.Box(
low=0,
high=255,
shape=(self._image_size[1], self._image_size[0], 1),
dtype=np.uint8
)
def observation(self, frame):
# convert the frame from RGB to gray scale
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
# resize the frame to the expected shape.
#cv2.INTER_AREA
#INTER_NEAREST
frame = cv2.resize(frame, self._image_size)
#frame = cv2.resize(frame, self._image_size)
return frame[:, :, np.newaxis]
class FrameMemoryWrapper(gym.ObservationWrapper):
"""
Use 4 Frames in observation space.
"""
frame_queue = None
def __init__(self, env=None):
super(FrameMemoryWrapper, self).__init__(env)
self.frame_nr = 4
self.frame_queue = []
self.observation_space = spaces.Box(low=0, high=255, shape=(84, 84, self.frame_nr))
def _observation(self, obs):
return FrameMemoryWrapper.process(obs, self.frame_queue)
def _reset(self):
super().reset()
print('clear')
self.frame_queue.clear()
@staticmethod
def process(img, frame_queue):
# first step
if len(frame_queue) == 0:
frame_queue.append(img)
frame_queue.append(img)
frame_queue.append(img)
frame_queue.append(img)
obs = np.concatenate(frame_queue[-4:], axis=2)
#x_t = np.reshape(x_t, (84, 84, 1))
return obs.astype(np.uint8)
class VideoRecorderWrapper(gym.ObservationWrapper):
"""
"""
def __init__(self, env=None, path=None, training_start= None, freq_episode=100):
super(VideoRecorderWrapper, self).__init__(env)
self.episode = 0
self.env = env
self.path= path
self.training_start = training_start
self.freq_episode = freq_episode
self.rec = None
self.rec_now = False
def _observation(self, obs):
if self.rec_now:
self.rec.capture_frame()
return obs
def reset(self, **kwargs):
observation = self.env.reset(**kwargs)
self.episode += 1
if self.rec_now:
print("Stop record episode {}".format(self.episode-1))
self.rec.close()
self.rec_now = False
if self.episode % self.freq_episode == 0:
print("Start record episode {}".format(self.episode))
path = "{}/{}_{:0>5d}.mp4".format(self.path, self.training_start, self.episode)
self.rec = VideoRecorder(self.env, path=path)
self.rec_now = True
return observation
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped._get_life()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condtion for a few frames
# so its important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
#print("was_real_done", self.env.unwrapped._get_life())
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
#print("nicht was_real_done", self.env.unwrapped._get_life())
self.lives = self.env.unwrapped._get_life()
return obs
class MyRewardWrapper(gym.Wrapper):
def __init__(self, env):
gym.Wrapper.__init__(self, env)
self._x_position = 0
def step(self, action):
obs, reward, done, info = self.env.step(action)
_x_position = self.env.unwrapped._get_x_position()
_reward = _x_position - self._x_position
self._x_position = _x_position
# resolve an issue where after death the x position resets. The x delta
# is typically has at most magnitude of 3, 5 is a safe bound
if _reward < -5 or _reward > 5:
_reward = 0
reward = _reward
#print('reward', reward)
return obs, reward, done, info
class CroppingWrapper(gym.ObservationWrapper):
"""
"""
def __init__(self, env=None):
super(CroppingWrapper, self).__init__(env)
self.env = env
self.new_size = (20,20)
self.observation_space = gym.spaces.Box(
low=0,
high=255,
shape=(self.new_size[1], self.new_size[0], 1),
dtype=np.uint8
)
def _observation(self, obs):
#x_t = cv2.resize(obs, (84, 84), interpolation=cv2.INTER_AREA)
#x_t = np.reshape(x_t, (84, 84, 1))
#x_t = np.nan_to_num(x_t)
#return x_t.astype(np.uint8)
#obs.resize((32,32))
#size = (16,16)
#obs.resize(size)
obs = obs[10:30, 10:30, :] # 32x32 -> 20x20
obs = np.array(obs)
#obs = obs.shape((20,20,1))
#obs2 = np.copy(obs)
#obs2.resize(self.new_size)
#img = Image.fromarray(obs)
#cv2.imshow('image', obs2)
#cv2.waitKey(1)
#img.save('testaww_{}.png'.format(self.new_size))
return obs
|
python
|
from CameraOrMarker import *
class Scenarios:
def __init__(self, scenario):
self.num_generated_samples = 217
self.std_corners_f_image = 0.3
self.marker_len = 0.162717998
# generate_solve_pnp_inputs options
self.use_sigma_points = False
self.use_dist_params = True
# Pick some camera calibration values
self.camera_matrix = np.array([
[921.17070200000001, 0., 459.90435400000001],
[0., 919.01837699999999, 351.23830099999998],
[0., 0., 1.]
])
self.dist_coeffs = np.array([-0.033458000000000002, 0.105152, 0.001256, -0.0066470000000000001, 0.])
self.rpy_world_marker_a = (np.pi / 2., 0., -np.pi / 2.)
self.xyz_world_marker_a = (0., -.5, 1.)
self.rpy_world_marker_b = (np.pi / 2., 0., -np.pi / 2.)
self.xyz_world_marker_b = (0., .5, 1.)
self.rpy_world_marker = self.rpy_world_marker_a
self.xyz_world_marker = self.xyz_world_marker_a
self.rpy_world_camera_a = (-np.pi / 2, 0., -np.pi / 2)
self.xyz_world_camera_a = (-2.5, -.5, 1.5)
self.rpy_world_camera_b = (-np.pi / 2, 0., -np.pi / 2)
self.xyz_world_camera_b = (-2.5, 0.5, .5)
self.rpy_world_camera = self.rpy_world_camera_a
self.xyz_world_camera = self.xyz_world_camera_a
self.ident = "looking along x"
if scenario == 1:
self.rpy_world_marker_a = (0., 0., 0.)
self.xyz_world_marker_a = (0., 0., 1.)
self.rpy_world_marker_b = (0., 0., 0.)
self.xyz_world_marker_b = (0., 0., 1.)
self.rpy_world_camera = (0., 0., 0.)
self.xyz_world_camera = (-1., 0., -1.5)
self.ident = "looking up"
@property
def marker_a(self):
return CameraOrMarker.marker_from_rpy(self.rpy_world_marker_a, self.xyz_world_marker_a)
@property
def marker_b(self):
return CameraOrMarker.marker_from_rpy(self.rpy_world_marker_b, self.xyz_world_marker_b)
@property
def camera_a(self):
return CameraOrMarker.camera_from_rpy(self.rpy_world_camera_a, self.xyz_world_camera_a)
@property
def camera_b(self):
return CameraOrMarker.camera_from_rpy(self.rpy_world_camera_b, self.xyz_world_camera_b)
def as_param_str(self):
return "std_corners={} dist_params={} sigma_points={}".format(
self.std_corners_f_image, self.use_dist_params, self.use_sigma_points)
|
python
|
"""Test SQL database migrations."""
from pathlib import Path
from typing import Generator
import pytest
import sqlalchemy
from pytest_lazyfixture import lazy_fixture # type: ignore[import]
from robot_server.persistence.database import create_sql_engine
from robot_server.persistence.tables import (
migration_table,
run_table,
action_table,
protocol_table,
analysis_table,
)
TABLES = [run_table, action_table, protocol_table, analysis_table]
@pytest.fixture
def database_v0(tmp_path: Path) -> Path:
"""Create a database matching schema version 0."""
db_path = tmp_path / "migration-test-v0.db"
sql_engine = create_sql_engine(db_path)
sql_engine.execute("DROP TABLE migration")
sql_engine.execute("DROP TABLE run")
sql_engine.execute(
"""
CREATE TABLE run (
id VARCHAR NOT NULL,
created_at DATETIME NOT NULL,
protocol_id VARCHAR,
PRIMARY KEY (id),
FOREIGN KEY(protocol_id) REFERENCES protocol (id)
)
"""
)
sql_engine.dispose()
return db_path
@pytest.fixture
def database_v1(tmp_path: Path) -> Path:
"""Create a database matching schema version 1."""
db_path = tmp_path / "migration-test-v1.db"
sql_engine = create_sql_engine(db_path)
sql_engine.dispose()
return db_path
@pytest.fixture
def subject(database_path: Path) -> Generator[sqlalchemy.engine.Engine, None, None]:
"""Get a SQLEngine test subject.
The tests in this suite will use this SQLEngine to test
that migrations happen properly. For other tests, the `sql_engine`
fixture in `conftest.py` should be used, instead.
"""
engine = create_sql_engine(database_path)
yield engine
engine.dispose()
@pytest.mark.parametrize(
"database_path",
[
lazy_fixture("database_v0"),
lazy_fixture("database_v1"),
],
)
def test_migration(subject: sqlalchemy.engine.Engine) -> None:
"""It should migrate a table."""
migrations = subject.execute(sqlalchemy.select(migration_table)).all()
assert [m.version for m in migrations] == [1]
# all table queries work without raising
for table in TABLES:
values = subject.execute(sqlalchemy.select(table)).all()
assert values == []
|
python
|
# coding: utf8
from __future__ import unicode_literals
from ...symbols import LEMMA, PRON_LEMMA
MORPH_RULES = {
"PRON": {
"jeg": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Sing", "Case": "Nom"},
"mig": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Sing", "Case": "Acc"},
"du": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two"},
"han": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Masc", "Case": "Nom"},
"ham": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Masc", "Case": "Acc"},
"hun": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Fem", "Case": "Nom"},
"hende": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Fem", "Case": "Acc"},
"den": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Neut"},
"det": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Neut"},
"vi": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Case": "Nom"},
"os": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Case": "Acc"},
"de": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Plur", "Case": "Nom"},
"dem": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Plur", "Case": "Acc"},
"min": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Sing", "Poss": "Yes", "Reflex": "Yes"},
"din": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Sing", "Poss": "Yes", "Reflex": "Yes"},
"hans": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Masc", "Poss": "Yes", "Reflex": "Yes"},
"hendes": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Fem", "Poss": "Yes", "Reflex": "Yes"},
"dens": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Neut", "Poss": "Yes", "Reflex": "Yes"},
"dets": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Neut", "Poss": "Yes", "Reflex": "Yes"},
"vores": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"},
"deres": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"},
},
"VERB": {
"er": {LEMMA: "være", "VerbForm": "Fin", "Tense": "Pres"},
"var": {LEMMA: "være", "VerbForm": "Fin", "Tense": "Past"}
}
}
for tag, rules in MORPH_RULES.items():
for key, attrs in dict(rules).items():
rules[key.title()] = attrs
|
python
|
# vim: set sw=4 ts=4 expandtab :
from const import *
import math
from scipy import stats
def teller(state, nstate):
nchain = nstate['chain']
# mimic network unpredictability using random variable
df = int(math.log10(nchain['txpending'] + 10))
rv = stats.chi2(df)
#lazy_txs = int(0.01 * nchain['txpending'] * rv.rvs() / df)
lazy_txs = int(0.1 * nchain['txpending'])
#lazy_txs = min(lazy_txs, nchain['txpending']) # compensate rv.rvs()
tx_to_process = min(
nchain['txpending'] - lazy_txs,
param['blktxsize'] * config['stepblks']
)
nchain['stat_txproc'] = tx_to_process
nchain['txpending'] -= tx_to_process
# reward
reward = int(tx_to_process * param['txreward'])
#reward = int(tx_to_process * param['txreward'] * 10)
#reward = int(config['stepblks']*10*moteperamo)
nchain['coins'] += reward
nchain['coins_active'] += reward
def depleter(state, nstate):
chain = state['chain']
# asset loss
depletion = int(chain['coins_active'] * param['deplete_coin'])
nstate['chain']['coins_active'] -= depletion
nstate['chain']['coins_lost'] += depletion
# tx loss
txlost = int(chain['txpending'] * param['deplete_tx'])
nstate['chain']['accum_txlost'] += txlost
nstate['chain']['txpending'] -= txlost
# invisible hand to intermediate supply and demand
def invisible(state, nstate):
chain = state['chain']
market = state['market']
# get usdperamo
if len(hist['exch']) == 0:
avg_exch = market['exchange_rate']
else:
avg_exch = sum(hist['exch']) / len(hist['exch'])
usdperamo = avg_exch
# update market value
tmp = market['value']
## method 1
#tmp *= math.pow(param['growth_factor'], config['stepblks'] / BLKSDAY)
#tmp = max(tmp, 0.001)
#market['value'] = tmp
## method 2
#tmp *= math.pow(0.3, chain['blks'] / BLKSMONTH)
#market['value'] += tmp
## method 3
x = chain['blks'] / BLKSMONTH
value = param['f_gdp_month'][3] * x**3 \
+ param['f_gdp_month'][2] * x**2 \
+ param['f_gdp_month'][1] * x \
+ param['f_gdp_month'][0]
## others
#tmp *= math.log10(market['liveness'] + 1) + 1
#tmp *= param['growth_factor']
#tmp *= param['growth_factor'] / (fee_usd + param['feescale'])
#tmp *= math.pow(param['growth_factor'], config['stepblks'] / BLKSMONTH)
nstate['market']['value'] = max(value, 0)
# update tx fee
# estimate remaining blocks until all of the currently pending txs would be
# processed
# one param['feescale'] USD for one hour
avg_pending = sum(hist['txpending']) / len(hist['txpending'])
blks = avg_pending / param['blktxsize']
fee_usd = param['feescale'] * blks / BLKSHOUR
fee = int(fee_usd / usdperamo * moteperamo)
# update
nstate['chain']['txfee'] = fee
#smooth = max(int(config['smooth'] / config['stepblks']), 2)
#chain['txfee'] = int((fee + (smooth-1)*chain['txfee']) / smooth)
# TODO: consider these (when assessing demand):
# - expected (real) rate of return
# - current exchage rate
# - expected exchange rate in the future
# - current interest rate
# - risk and uncertainty
# - liquidity
# update exchange rate
demand = 0
supply = 0
## money demand for market trade
v = param['velocity']
coin_value = chain['coins_active'] / moteperamo * usdperamo
market_value = market['value']
demand += market_value / v
supply += coin_value
## money demand for storing value
#sc = chain['stakes']
#demand += (market['interest_stake'] * sc) / market['interest_world'] - sc
## money demand from short-term negative feedback
fb = chain['coins_active'] / moteperamo / 10 \
* (avg_exch - market['exchange_rate'])
if fb > 0:
demand += fb
else:
supply += -fb
## money demand from long-term expectation
#f = chain['coins'] / moteperamo \
# * (market['exchange_rate'] - avg_exch)
#if f > 0:
# demand += f
#else:
# supply += -f
## sum up
# avoid infinity
demand = min(demand, chain['coins'] / moteperamo * usdperamo)
# avoid divide-by-zero error
supply = max(supply, DELTA_AMO)
exch = demand / supply
## smoothing
smooth = max(int(config['smooth'] / config['stepblks']), 2)
old = market['exchange_rate']
nstate['market']['exchange_rate'] = (exch + (smooth-1)*old) / smooth
hist_size = 1
hist = {
'txproc': [0],
'txpending': [0],
'txfee': [0],
'stakes': [0],
'exch': [],
}
def historian(state):
hist['txproc'].append(state['chain']['stat_txproc'])
hist['txpending'].append(state['chain']['txpending'])
hist['txfee'].append(state['chain']['txfee'])
hist['stakes'].append(state['chain']['stakes'])
hist['exch'].append(state['market']['exchange_rate'])
l = len(hist['txpending'])
if l > hist_size:
hist['txproc'] = hist['txproc'][l-hist_size:]
hist['txpending'] = hist['txpending'][l-hist_size:]
hist['txfee'] = hist['txfee'][l-hist_size:]
hist['stakes'] = hist['stakes'][l-hist_size:]
hist['exch'] = hist['exch'][l-hist_size:]
|
python
|
from mechanize import Browser
from bs4 import BeautifulSoup
import sys
import re
br = Browser()
br.set_handle_robots(False)
br.addheaders = [("User-agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4275.0 Safari/537.36")]
def product_search(keyword):
br.open("https://www.amazon.co.jp/", timeout=10.0)
br.select_form(action="/s/ref=nb_sb_noss")
search_box = br.form.find_control(id="twotabsearchtextbox")
search_box.value = keyword
ret = br.submit()
bs = BeautifulSoup(ret, 'html.parser')
URLs = bs.select(".a-size-base.a-link-normal.a-text-normal")
items = bs.select(".a-size-base-plus.a-color-base.a-text-normal")
fees = bs.select(".a-size-base.a-link-normal.a-text-normal")
images = bs.select(".a-section.aok-relative.s-image-square-aspect")
url_list = []
item_list = []
fee_list = []
image_list = []
for image in images:
image_list.append(image.find("img").get('src'))
for item in items:
item_list.append(item.string)
for fee in fees:
fee_list.append(fee.find("span", class_="a-price-whole").string)
for URL in URLs:
try:
URL = re.search(r'dp(?:%2Fproduct-description)?%2F([0-9a-zA-Z]{10})', URL.get('href')).group().replace('%2F', '/')
url_list.append(re.sub('^', 'https://www.amazon.co.jp/', URL))
except:
URL = re.search(r'dp(?:%2Fproduct-description)?/([0-9a-zA-Z]{10})', URL.get('href')).group()
url_list.append(re.sub('^', 'https://www.amazon.co.jp/', URL))
product_list = list(zip(item_list,fee_list,url_list,image_list))
print(product_list)
product_search(sys.argv[1])
|
python
|
from .test_languages import TestLanguages
from .test_translator import TestTranslator
|
python
|
from setuptools import setup, find_packages
PACKAGES = find_packages()
with open("README.md", "r") as fh:
long_description = fh.read()
setup(name='python-neurostore',
version='0.1',
description='NeuroStore API wrapper',
long_description=long_description,
long_description_content_type="text/markdown",
url='http://github.com/neurostuff/python-neurostore',
author='Alejandro de la Vega',
author_email='[email protected]',
install_requires=['requests>=2.21', 'pyjwt~=1.7.1', 'requests-oauthlib'],
license='MIT',
packages=PACKAGES,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
zip_safe=False)
|
python
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Pyglet GLSL Demo Dot3 Bumpmap Shader on http://www.pythonstuff.org
# pythonian_at_inode_dot_at (c) 2010
#
# based on the "graphics.py" batch/VBO demo by
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''This expands the previous example by parallax by heightfield mapping the bumpmapped texture
Find more GLSL-examples http://www.pythonstuff.org
'''
html = '''
<font size=+3 color=#FF3030>
<b>Pyglet GLSL Parallax Mapping Demo</b>
</font><br/>
<font size=+2 color=#00FF60>
P, O = Parallax inc/dec<br/>
B = Bumpmap on/off<br/>
ENTER = Shader on/off<br/>
R = Reset<br/>
Q, Esc = Quit<br/>
F = Toggle Figure<br/>
T = Toggle Texture<br/>
W, S, A, D = Up, Down, Left, Right<br/>
Space = Move/Stop<br/>
Arrows = Move Light 0<br/>
H = This Help<br/>
</font>
'''
from math import pi, sin, cos, sqrt
from euclid import *
import pyglet
from pyglet.gl import *
from pyglet.window import key
from pyglet import image, resource
from shader import Shader
resource.path.append('textures')
resource.reindex()
texturecnt = 3 # Texturemap0.jpg = Colormap Texturemap1.jpg = Bumpmap Texturemap2.jpg = Heightmap
try:
# Try and create a window with multisampling (antialiasing)
config = Config(sample_buffers=1, samples=4,
depth_size=16, double_buffer=True,)
window = pyglet.window.Window(resizable=True, config=config, vsync=False) # "vsync=False" to check the framerate
except pyglet.window.NoSuchConfigException:
# Fall back to no multisampling for old hardware
window = pyglet.window.Window(resizable=True)
label = pyglet.text.HTMLLabel(html, # location=location,
width=window.width//2,
multiline=True, anchor_x='center', anchor_y='center')
fps_display = pyglet.clock.ClockDisplay() # see programming guide pg 48
@window.event
def on_resize(width, height):
if height==0: height=1
# Keep text vertically centered in the window
label.y = window.height // 2
# Override the default on_resize handler to create a 3D projection
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(60., width / float(height), .1, 1000.)
glMatrixMode(GL_MODELVIEW)
return pyglet.event.EVENT_HANDLED
def update(dt):
global autorotate
global rot
global dist
if autorotate:
rot += Vector3(0.1, 12, 5) * dt
rot.x %= 360
rot.y %= 360
rot.z %= 360
pyglet.clock.schedule(update)
def dismiss_dialog(dt):
global showdialog
showdialog = False
pyglet.clock.schedule_once(dismiss_dialog, 10.0)
# Define a simple function to create ctypes arrays of floats:
def vec(*args):
return (GLfloat * len(args))(*args)
@window.event
def on_draw():
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
glLoadIdentity()
glTranslatef(0.0, 0.0, dist);
glRotatef(rot.x, 0, 0, 1)
glRotatef(rot.y, 0, 1, 0)
glRotatef(rot.z, 1, 0, 0)
glPolygonMode(GL_FRONT, GL_FILL)
if shaderon:
# bind our shader
shader.bind()
shader.uniformi('toggletexture', toggletexture )
shader.uniformi('togglebump', togglebump )
shader.uniformf('parallaxheight', parallaxheight )
for i in range(texturecnt):
glActiveTexture(GL_TEXTURE0+i)
glEnable(GL_TEXTURE_2D)
glBindTexture(GL_TEXTURE_2D, texture[i].id)
shader.uniformi('my_color_texture[' + str(i) + ']',i )
if togglefigure:
batch1.draw()
else:
batch2.draw()
for i in range(texturecnt):
glActiveTexture(GL_TEXTURE0+i)
glDisable(GL_TEXTURE_2D)
shader.unbind()
else:
if togglefigure:
batch1.draw()
else:
batch2.draw()
glActiveTexture(GL_TEXTURE0)
glEnable(GL_TEXTURE_2D)
glDisable(GL_LIGHTING)
glDisable(GL_DEPTH_TEST)
if showdialog:
glLoadIdentity()
glTranslatef(0, -200, -450)
label.draw()
glLoadIdentity()
glTranslatef(250, -290, -500)
fps_display.draw()
glEnable(GL_DEPTH_TEST)
glEnable(GL_LIGHTING)
glDisable(GL_TEXTURE_2D)
@window.event
def on_key_press(symbol, modifiers):
global autorotate
global rot
global dist
global togglefigure
global toggletexture
global togglebump
global parallaxheight
global light0pos
global light1pos
global showdialog
global shaderon
if symbol == key.R:
print 'Reset'
rot = Vector3(0, 0, 0)
elif symbol == key.ESCAPE or symbol == key.Q:
print 'Good Bye !' # ESC would do it anyway, but not "Q"
pyglet.app.exit()
return pyglet.event.EVENT_HANDLED
elif symbol == key.H:
showdialog = not showdialog
elif symbol == key.ENTER:
print 'Shader toggle'
shaderon = not shaderon
elif symbol == key.SPACE:
print 'Toggle autorotate'
autorotate = not autorotate
elif symbol == key.F:
togglefigure = not togglefigure
print 'Toggle Figure ', togglefigure
elif symbol == key.B:
togglebump = not togglebump
print 'Toggle Bumpmap ', togglebump
elif symbol == key.P:
parallaxheight += 0.01
print 'Parallax Height now ', parallaxheight
elif symbol == key.O:
parallaxheight -= 0.01
if parallaxheight <= 0.0:
parallaxheight = 0.0
print 'Parallax now OFF'
else:
print 'Parallax Height now ', parallaxheight
elif symbol == key.PLUS:
dist += 0.5
print 'Distance now ', dist
elif symbol == key.MINUS:
dist -= 0.5
print 'Distance now ', dist
elif symbol == key.T:
toggletexture = not toggletexture
print 'Toggle Texture ', toggletexture
elif symbol == key.A:
print 'Stop left'
if autorotate:
autorotate = False
else:
rot.y += -rotstep
rot.y %= 360
elif symbol == key.S:
print 'Stop down'
if autorotate:
autorotate = False
else:
rot.z += rotstep
rot.z %= 360
elif symbol == key.W:
print 'Stop up'
if autorotate:
autorotate = False
else:
rot.z += -rotstep
rot.z %= 360
elif symbol == key.D:
print 'Stop right'
if autorotate:
autorotate = False
else:
rot.y += rotstep
rot.y %= 360
elif symbol == key.LEFT:
print 'Light0 rotate left'
tmp = light0pos[0]
light0pos[0] = tmp * cos( lightstep ) - light0pos[2] * sin( lightstep )
light0pos[2] = light0pos[2] * cos( lightstep ) + tmp * sin( lightstep )
glLoadIdentity()
glLightfv(GL_LIGHT0, GL_POSITION, vec(*light0pos))
elif symbol == key.RIGHT:
print 'Light0 rotate right'
tmp = light0pos[0]
light0pos[0] = tmp * cos( -lightstep ) - light0pos[2] * sin( -lightstep )
light0pos[2] = light0pos[2] * cos( -lightstep ) + tmp * sin( -lightstep )
glLoadIdentity()
glLightfv(GL_LIGHT0, GL_POSITION, vec(*light0pos))
elif symbol == key.UP:
print 'Light0 up'
tmp = light0pos[1]
light0pos[1] = tmp * cos( -lightstep ) - light0pos[2] * sin( -lightstep )
light0pos[2] = light0pos[2] * cos( -lightstep ) + tmp * sin( -lightstep )
glLoadIdentity()
glLightfv(GL_LIGHT0, GL_POSITION, vec(*light0pos))
elif symbol == key.DOWN:
print 'Light0 down'
tmp = light0pos[1]
light0pos[1] = tmp * cos( lightstep ) - light0pos[2] * sin( lightstep )
light0pos[2] = light0pos[2] * cos( lightstep ) + tmp * sin( lightstep )
glLoadIdentity()
glLightfv(GL_LIGHT0, GL_POSITION, vec(*light0pos))
else:
print 'OTHER KEY'
def setup():
# One-time GL setup
global light0pos
global light1pos
global toggletexture
global togglebump
global parallaxheight
global texture
light0pos = [20.0, 20.0, 20.0, 1.0] # positional light !
light1pos = [-20.0, -20.0, 20.0, 0.0] # infinitely away light !
glClearColor(1, 1, 1, 1)
glColor4f(1.0, 1.0, 1.0, 1.0 )
glEnable(GL_DEPTH_TEST)
glEnable(GL_CULL_FACE)
texture = []
for i in range (texturecnt):
texturefile = 'Texturemap' + str(i) + '.jpg'
print "Loading Texture", texturefile
textureSurface = pyglet.resource.texture(texturefile)
texture.append( textureSurface.get_texture() )
glBindTexture(texture[i].target, texture[i].id)
print "Texture ", i, " bound to ", texture[i].id
# Uncomment this line for a wireframe view
#glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
# Simple light setup. On Windows GL_LIGHT0 is enabled by default,
# but this is not the case on Linux or Mac, so remember to always
# include it.
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glEnable(GL_LIGHT1)
glLightfv(GL_LIGHT0, GL_POSITION, vec(*light0pos))
glLightfv(GL_LIGHT0, GL_AMBIENT, vec(0.3, 0.3, 0.3, 1.0))
glLightfv(GL_LIGHT0, GL_DIFFUSE, vec(0.9, 0.9, 0.9, 1.0))
glLightfv(GL_LIGHT0, GL_SPECULAR, vec(1.0, 1.0, 1.0, 1.0))
glLightfv(GL_LIGHT1, GL_POSITION, vec(*light1pos))
glLightfv(GL_LIGHT1, GL_DIFFUSE, vec(.6, .6, .6, 1.0))
glLightfv(GL_LIGHT1, GL_SPECULAR, vec(1.0, 1.0, 1.0, 1.0))
glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, vec(0.5, 0.5, 0.5, 1.0))
glMaterialfv(GL_FRONT_AND_BACK, GL_SPECULAR, vec(1, 1, 1, 1))
glMaterialf(GL_FRONT_AND_BACK, GL_SHININESS, 50)
# create our Phong Shader by Jerome GUINOT aka 'JeGX' - jegx [at] ozone3d [dot] net
# see http://www.ozone3d.net/tutorials/glsl_lighting_phong.php
shader = Shader(['''
varying vec3 lightDir0, lightDir1, eyeVec;
varying vec3 normal, tangent, binormal;
void main()
{
// Create the Texture Space Matrix
normal = normalize(gl_NormalMatrix * gl_Normal);
tangent = normalize(gl_NormalMatrix * (gl_Color.rgb - 0.5));
binormal = cross(normal, tangent);
mat3 TBNMatrix = mat3(tangent, binormal, normal);
vec3 vVertex = vec3(gl_ModelViewMatrix * gl_Vertex);
lightDir0 = vec3(gl_LightSource[0].position.xyz - vVertex) * TBNMatrix;
lightDir1 = vec3(gl_LightSource[1].position.xyz - vVertex) * TBNMatrix;
eyeVec = -vVertex * TBNMatrix;
gl_Position = ftransform();
gl_TexCoord[0] = gl_TextureMatrix[0] * gl_MultiTexCoord0;
}
'''], ['''
varying vec3 normal, lightDir0, lightDir1, eyeVec;
uniform sampler2D my_color_texture['''+str(texturecnt)+''']; //0 = ColorMap, 1 = NormalMap, 2 = HeightMap
uniform int toggletexture; // false/true
uniform int togglebump; // false/true
uniform float parallaxheight;
void main (void)
{
// Compute parallax displaced texture coordinates
vec3 eye = normalize(-eyeVec);
vec2 offsetdir = vec2( eye.x, eye.y );
vec2 coords1 = gl_TexCoord[0].st;
float height1 = parallaxheight * (texture2D( my_color_texture[2], coords1).r - 0.5);
vec2 offset1 = height1 * offsetdir;
vec2 coords2 = coords1 + offset1;
float height2 = parallaxheight * (texture2D( my_color_texture[2], coords2).r - 0.5);
//vec2 offset2 = height2 * offsetdir;
vec2 newCoords = coords2;
if ( length( offset1 ) > 0.001 ) // 5.0 * abs( height1 ) > abs( height2 ) )
newCoords = coords1 + (height2/height1) * offset1;
vec4 texColor = vec4(texture2D(my_color_texture[0], newCoords).rgb, 1.0);
vec3 norm = normalize( texture2D(my_color_texture[1], newCoords).rgb - 0.5);
if ( toggletexture == 0 ) texColor = gl_FrontMaterial.ambient;
vec4 final_color = (gl_FrontLightModelProduct.sceneColor * vec4(texColor.rgb,1.0)) +
(gl_LightSource[0].ambient * vec4(texColor.rgb,1.0)) +
(gl_LightSource[1].ambient * vec4(texColor.rgb,1.0));
vec3 N = (togglebump != 0) ? normalize(norm) : vec3(0.0, 0.0, 1.0 );
vec3 L0 = normalize(lightDir0);
vec3 L1 = normalize(lightDir1);
float lambertTerm0 = dot(N,L0);
float lambertTerm1 = dot(N,L1);
if(lambertTerm0 > 0.0)
{
final_color += gl_LightSource[0].diffuse *
gl_FrontMaterial.diffuse *
lambertTerm0;
vec3 E = normalize(eyeVec);
vec3 R = reflect(-L0, N);
float specular = pow( max(dot(R, E), 0.0),
gl_FrontMaterial.shininess );
final_color += gl_LightSource[0].specular *
gl_FrontMaterial.specular *
specular;
}
if(lambertTerm1 > 0.0)
{
final_color += gl_LightSource[1].diffuse *
gl_FrontMaterial.diffuse *
lambertTerm1;
vec3 E = normalize(eyeVec);
vec3 R = reflect(-L1, N);
float specular = pow( max(dot(R, E), 0.0),
gl_FrontMaterial.shininess );
final_color += gl_LightSource[1].specular *
gl_FrontMaterial.specular *
specular;
}
gl_FragColor = final_color;
}
'''])
class Torus(object):
list = None
def __init__(self, radius, inner_radius, slices, inner_slices,
batch, group=None):
# Create the vertex and normal arrays.
vertices = []
normals = []
textureuvw = []
tangents = []
u_step = 2 * pi / (slices - 1)
v_step = 2 * pi / (inner_slices - 1)
u = 0.
for i in range(slices):
cos_u = cos(u)
sin_u = sin(u)
v = 0.
for j in range(inner_slices):
cos_v = cos(v)
sin_v = sin(v)
d = (radius + inner_radius * cos_v)
x = d * cos_u
y = inner_radius * sin_v
z = -d * sin_u
nx = cos_u * cos_v
ny = sin_v
nz = -sin_u * cos_v
n = sqrt( nx * nx + ny * ny + nz * nz )
if n < 0.99 or n > 1.01:
nx = nx / n
ny = ny / n
nz = nz / n
print "Torus: N normalized"
tx = -sin_u
ty = 0
tz = -cos_u
a = sqrt( tx * tx + ty * ty + tz * tz )
if a > 0.001:
tx = tx / a
ty = ty / a
tz = tz / a
vertices.extend([x, y, z])
normals.extend([nx, ny, nz])
textureuvw.extend([u / (2.0 * pi), v / (2.0 * pi), 0.0])
tangents.extend([ int(round(255 * (0.5 - 0.5 * tx))),
int(round(255 * (0.5 - 0.5 * ty))),
int(round(255 * (0.5 - 0.5 * tz))) ])
v += v_step
u += u_step
# Create a list of triangle indices.
indices = []
for i in range(slices - 1):
for j in range(inner_slices - 1):
p = i * inner_slices + j
indices.extend([p, p + inner_slices, p + inner_slices + 1])
indices.extend([p, p + inner_slices + 1, p + 1])
self.vertex_list = batch.add_indexed(len(vertices)//3,
GL_TRIANGLES,
group,
indices,
('v3f/static', vertices),
('n3f/static', normals),
('t3f/static', textureuvw),
('c3B/static', tangents))
def delete(self):
self.vertex_list.delete()
class Sphere(object):
list = None
def __init__(self, radius, slices, batch, group=None):
# Create the vertex and normal arrays.
vertices = []
normals = []
textureuvw = []
tangents = []
u_step = 2 * pi / (slices - 1)
v_step = pi / (slices - 1)
u = 0.
for i in range(slices):
cos_u = cos(u)
sin_u = sin(u)
v = 0.
for j in range(slices):
cos_v = cos(v)
sin_v = sin(v)
nx = sin_v * cos_u
ny = -cos_v
nz = -sin_v * sin_u
n = sqrt( nx * nx + ny * ny + nz * nz )
if n < 0.99 or n > 1.01:
nx = nx / n
ny = ny / n
nz = nz / n
print "Sphere: N normalized"
tx = nz
ty = 0
tz = -nx
a = sqrt( tx * tx + ty * ty + tz * tz )
if a > 0.001:
tx = tx / a
ty = ty / a
tz = tz / a
x = radius * nx
y = radius * ny
z = radius * nz
vertices.extend([x, y, z])
normals.extend([nx, ny, nz])
textureuvw.extend([u / (2 * pi), v / (pi), 0.0])
tangents.extend([ int(round(255 * (0.5 - 0.5 * tx))),
int(round(255 * (0.5 - 0.5 * ty))),
int(round(255 * (0.5 - 0.5 * tz))) ])
v += v_step
u += u_step
# Create a list of triangle indices.
indices = []
for i in range(slices - 1):
for j in range(slices - 1):
p = i * slices + j
indices.extend([p, p + slices, p + slices + 1])
indices.extend([p, p + slices + 1, p + 1])
self.vertex_list = batch.add_indexed(len(vertices)//3,
GL_TRIANGLES,
group,
indices,
('v3f/static', vertices),
('n3f/static', normals),
('t3f/static', textureuvw),
('c3B/static', tangents))
def delete(self):
self.vertex_list.delete()
dist = -3.5
rot = Vector3(0, 0, 0)
autorotate = True
rotstep = 10
lightstep = 10 * pi/180
togglefigure = False
toggletexture = True
togglebump = True
parallaxheight = 0.02
showdialog = True
shaderon = True
setup()
batch1 = pyglet.graphics.Batch()
torus = Torus(1, 0.3, 80, 25, batch=batch1)
batch2 = pyglet.graphics.Batch()
sphere = Sphere(1.2, 50, batch=batch2)
pyglet.app.run()
#thats all
|
python
|
"""
The wntr.metrics.hydraulic module contains hydraulic metrics.
"""
import wntr.network
from wntr.network.graph import _all_simple_paths
#from wntr.metrics.misc import _average_attribute
import numpy as np
import pandas as pd
import networkx as nx
import math
from collections import Counter
import logging
logger = logging.getLogger(__name__)
def fdv(node_results, average_times=False, average_nodes=False):
"""
Compute fraction delivered volume (FDV), equations modified from [1].
The metric can be averaged over times and/or nodes.
Parameters
----------
node_results : pd.Panel
A pandas Panel containing node results.
Items axis = attributes, Major axis = times, Minor axis = node names
FDV uses 'expected demand' and 'demand' attrbutes.
average_times : bool (default = False)
Flag to determine if calculations are to be averaged over each time
step. If false, FDV calculations will be performed for each time step.
If true, FDV calculations will be averaged over all time steps.
average_nodes : bool (default = False)
Flag to determine if calculations are to be averaged over each node.
If false, FDV calculations will be performed for each node. If true, FDV
calculations will be averaged over all nodes.
Returns
-------
fdv : pd.DataFrame, pd.Series, or scalar (depending on node and time averaging)
Fraction of delivered volume
References
----------
[1] Ostfeld A, Kogan D, Shamir U. (2002). Reliability simulation of water
distribution systems - single and multiquality, Urban Water, 4, 53-61
"""
exp_demand = _average_attribute(node_results['expected_demand'], average_times, average_nodes)
act_received = _average_attribute(node_results['demand'], average_times, average_nodes)
# Calculate FDV
fdv = act_received / exp_demand
# Replace NaNs (generated by nodes with 0 demand)
try:
fdv = fdv.fillna(1)
except:
if exp_demand == 0:
fdv = 1
return fdv
def fdd(node_results, Dstar, average_times=False, average_nodes=False):
"""
Compute fraction delivered demand (FDD), equations modified from [1].
The metric can be averaged over times and/or nodes.
Parameters
----------
node_results : pd.Panel
A pandas Panel containing node results.
Items axis = attributes, Major axis = times, Minor axis = node names
FDD uses 'expected demand' and 'demand' attrbutes.
Dstar : float
Threshold demand factor
average_times : bool (default = False)
Flag to determine if calculations are to be averaged over each time
step. If false, FDV calculations will be performed for each time step.
If true, FDV calculations will be averaged over all time steps.
average_nodes : bool (default = False)
Flag to determine if calculations are to be averaged over each node.
If false, FDV calculations will be performed for each node. If true, FDV
calculations will be averaged over all nodes.
Returns
-------
fdd : pd.DataFrame, pd.Series, or scalar (depending on node and time averaging)
Fraction of delivered demand
References
----------
[1] Ostfeld A, Kogan D, Shamir U. (2002). Reliability simulation of water
distribution systems - single and multiquality, Urban Water, 4, 53-61
"""
fdv_metric = fdv(node_results, average_times, average_nodes)
# Calculate FDD
fdd = (fdv_metric >= Dstar)+0
return fdd
def _average_attribute(attribute, average_times, average_nodes):
# Average for all times and nodes
if average_times==False and average_nodes==False:
pass
# Average for all nodes (averaged over all times)
if average_times==True and average_nodes==False:
attribute = attribute.sum(axis=0)
# Average for all time (averaged over all nodes)
if average_times==False and average_nodes==True:
attribute = attribute.sum(axis=1)
# Average for scenario (averaged over all times and nodes)
if average_times==True and average_nodes==True:
attribute = attribute.sum().sum()
return attribute
def todini(node_results, link_results, wn, Pstar):
"""
Compute Todini index, equations from [1].
The Todini index is related to the capability of a system to overcome
failures while still meeting demands and pressures at the nodes. The
Todini index defines resilience at a specific time as a measure of surplus
power at each node and measures relative energy redundancy.
Parameters
----------
node_results : pd.Panel
A pandas Panel containing node results.
Items axis = attributes, Major axis = times, Minor axis = node names
todini index uses 'head', 'pressure', and 'demand' attrbutes.
link_results : pd.Panel
A pandas Panel containing link results.
Items axis = attributes, Major axis = times, Minor axis = link names
todini index uses the 'flowrate' attrbute.
wn : Water Network Model
A water network model. The water network model is needed to find the start and end node to each pump.
Pstar : float
Pressure threshold.
Returns
-------
todini_index : pd.Series
Time-series of Todini indexes
References
-----------
[1] Todini E. (2000). Looped water distribution networks design using a
resilience index based heuristic approach. Urban Water, 2(2), 115-122.
"""
POut = {}
PExp = {}
PInRes = {}
PInPump = {}
for name, node in wn.nodes(wntr.network.Junction):
h = np.array(node_results.loc['head',:,name]) # m
p = np.array(node_results.loc['pressure',:,name])
e = h - p # m
q = np.array(node_results.loc['demand',:,name]) # m3/s
POut[name] = q*h
PExp[name] = q*(Pstar+e)
for name, node in wn.nodes(wntr.network.Reservoir):
H = np.array(node_results.loc['head',:,name]) # m
Q = np.array(node_results.loc['demand',:,name]) # m3/s
PInRes[name] = -Q*H # switch sign on Q.
for name, link in wn.links(wntr.network.Pump):
start_node = link._start_node_name
end_node = link._end_node_name
h_start = np.array(node_results.loc['head',:,start_node]) # (m)
h_end = np.array(node_results.loc['head',:,end_node]) # (m)
h = h_start - h_end # (m)
q = np.array(link_results.loc['flowrate',:,name]) # (m^3/s)
PInPump[name] = q*(abs(h)) # assumes that pumps always add energy to the system
todini_index = (sum(POut.values()) - sum(PExp.values()))/ \
(sum(PInRes.values()) + sum(PInPump.values()) - sum(PExp.values()))
todini_index = pd.Series(data = todini_index.tolist(), index = node_results.major_axis)
return todini_index
def entropy(G, sources=None, sinks=None):
"""
Compute entropy, equations from [1].
Entropy is a measure of uncertainty in a random variable.
In a water distribution network model, the random variable is
flow in the pipes and entropy can be used to measure alternate flow paths
when a network component fails. A network that carries maximum entropy
flow is considered reliable with multiple alternate paths.
Parameters
----------
G : NetworkX or WNTR graph
Entropy is computed using a directed graph based on pipe flow direction.
The 'weight' of each link is equal to the flow rate.
sources : list of strings, optional (default = all reservoirs)
List of node names to use as sources.
sinks : list of strings, optional (default = all nodes)
List of node names to use as sinks.
Returns
-------
S : dict
Node entropy, {node name: entropy value}
Shat : float
System entropy
References
-----------
[1] Awumah K, Goulter I, Bhatt SK. (1990). Assessment of reliability in
water distribution networks using entropy based measures. Stochastic
Hydrology and Hydraulics, 4(4), 309-320
"""
if G.is_directed() == False:
return
if sources is None:
sources = [key for key,value in nx.get_node_attributes(G,'type').items() if value == 'reservoir' ]
if sinks is None:
sinks = G.nodes()
S = {}
Q = {}
for nodej in sinks:
if nodej in sources:
S[nodej] = 0 # nodej is the source
continue
sp = [] # simple path
if G.node[nodej]['type'] == 'junction':
for source in sources:
if nx.has_path(G, source, nodej):
simple_paths = _all_simple_paths(G,source,target=nodej)
sp = sp + ([p for p in simple_paths])
# all_simple_paths was modified to check 'has_path' in the
# loop, but this is still slow for large networks
# what if the network was skeletonized based on series pipes
# that have the same flow direction?
# what about duplicating paths that have pipes in series?
#print j, nodeid, len(sp)
if len(sp) == 0:
S[nodej] = np.nan # nodej is not connected to any sources
continue
sp = np.array(sp)
# Uj = set of nodes on the upstream ends of links incident on node j
Uj = G.predecessors(nodej)
# qij = flow in link from node i to node j
qij = []
# aij = number of equivalnet independent paths through the link from node i to node j
aij = []
for nodei in Uj:
mask = np.array([nodei in path for path in sp])
# NDij = number of paths through the link from node i to node j
NDij = sum(mask)
if NDij == 0:
continue
temp = sp[mask]
# MDij = links in the NDij path
MDij = [(t[idx],t[idx+1]) for t in temp for idx in range(len(t)-1)]
flow = 0
for link in G[nodei][nodej].keys():
flow = flow + G[nodei][nodej][link]['weight']
qij.append(flow)
# dk = degree of link k in MDij
dk = Counter()
for elem in MDij:
# divide by the numnber of links between two nodes
dk[elem] += 1/len(G[elem[0]][elem[1]].keys())
V = np.array(list(dk.values()))
aij.append(NDij*(1-float(sum(V - 1))/sum(V)))
Q[nodej] = sum(qij) # Total flow into node j
# Equation 7
S[nodej] = 0
for idx in range(len(qij)):
if qij[idx]/Q[nodej] > 0:
S[nodej] = S[nodej] - \
qij[idx]/Q[nodej]*math.log(qij[idx]/Q[nodej]) + \
qij[idx]/Q[nodej]*math.log(aij[idx])
Q0 = sum(nx.get_edge_attributes(G, 'weight').values())
# Equation 3
Shat = 0
for nodej in sinks:
if not np.isnan(S[nodej]):
if nodej not in sources:
if Q[nodej]/Q0 > 0:
Shat = Shat + \
(Q[nodej]*S[nodej])/Q0 - \
Q[nodej]/Q0*math.log(Q[nodej]/Q0)
return [S, Shat]
|
python
|
import asyncio
from typing import Union
from pynput.keyboard import Key, KeyCode, Listener
from core_modules.macro_handler import MacroHandler
from core_modules.tray import Tray
from global_modules import logs
class KeyboardHandler:
def __init__(self, macro_handler: MacroHandler, tray: Tray, loop: asyncio.AbstractEventLoop):
self.__macro_handler = macro_handler
self.__tray = tray
self.__loop = loop
self.__running = {}
self.__pressed = []
listener = Listener(on_press=self.__key_press_callback, on_release=self.__key_release_callback)
listener.start()
def __key_press_callback(self, key: Union[Key, KeyCode]):
if not self.__is_key_pressed(self.__get_key_name(key)):
self.__pressed.append(self.__get_key_name(key))
if not self.__tray.enabled:
return
key_universal = None
if isinstance(key, Key):
key_universal = key.name
elif isinstance(key, KeyCode):
if key.char is None:
return
else:
key_universal = key.char
if key_universal not in str(self.__macro_handler.actual_loaded.keys()):
return
for macro_key in self.__macro_handler.actual_loaded.keys():
if self.__is_dict_key_pressed(macro_key):
macro = self.__macro_handler.actual_loaded[macro_key]
if macro['loop']:
key_running = f"{macro_key} {macro['callback']['location']}"
if key_running in self.__running.keys():
task = self.__running[key_running]['task']
task.cancel()
logs.info("keyboard_handler", f"Macro {macro['callback']['location']} stopped due to user "
f"input")
if macro['after']['func'] is not None:
logs.info("keyboard_handler", f"After {macro['after']['location']} running")
self.__loop.create_task(macro['after']['func']())
self.__running.pop(key_running)
else:
task = self.__loop.create_task(self.__create_macro_loop_task_builder(macro))
self.__running.update({key_running: {'task': task, 'location': macro['callback']['location']}})
else:
self.__loop.create_task(self.__create_macro_task_builder(macro))
def __key_release_callback(self, key: Union[Key, KeyCode]):
if self.__is_key_pressed(self.__get_key_name(key)):
self.__pressed.remove(self.__get_key_name(key))
@staticmethod
async def __create_macro_loop_task_builder(macro):
coro = macro['callback']['func']
before = macro['before']['func']
if before:
logs.info("keyboard_handler", f"Before {macro['before']['location']} running")
await before()
logs.info("keyboard_handler", f"Macro {macro['callback']['location']} running in loop")
while True:
await coro()
await asyncio.sleep(0) # Needed or the program freeze
@staticmethod
async def __create_macro_task_builder(macro):
coro = macro['callback']['func']
before = macro['before']['func']
after = macro['after']['func']
if before is not None:
logs.info("keyboard_handler", f"Before {macro['before']['location']} running")
await before()
logs.info("keyboard_handler", f"Macro {macro['callback']['location']} running")
await coro()
if after is not None:
logs.info("keyboard_handler", f"After {macro['after']['location']} running")
await after()
def __is_dict_key_pressed(self, dict_key: str) -> bool:
sub_keys = dict_key.split("+")
for i in sub_keys:
call = True
for j in i.split("."):
if not self.__is_key_pressed(j):
call = False
if call:
return True
return False
def __is_key_pressed(self, key: str) -> bool:
return key in self.__pressed
def update(self):
if self.__macro_handler.just_updated_loaded:
self.__macro_handler.just_updated_loaded = False
running_bkp = self.__running.copy()
for key, item in running_bkp.items():
task = self.__running[key]['task']
task.cancel()
logs.info("keyboard_handler", f"Macro {self.__running[key]['location']} stopped running due to window "
f"change")
self.__running.pop(key)
@staticmethod
def __get_key_name(key: Union[Key, KeyCode]) -> str:
if isinstance(key, Key):
return key.name
elif isinstance(key, KeyCode):
if key.char is not None:
return key.char
else:
return str(key.vk)
|
python
|
import requests
from slack.web.classes import messages
def webhook_response(response_url: str, json=None):
return requests.post(response_url, json=json)
def basic_responder_response(text: str) -> messages.Message:
return messages.Message(text=text)
|
python
|
from sly import Lexer, Parser
from os import path
from defs import *
global curr_file, curr_text, error_occurred, curr_namespace, reserved_names
def syntax_error(line, msg=''):
global error_occurred
error_occurred = True
print()
if msg:
print(f"Syntax Error in file {curr_file} line {line}:")
print(f" {msg}")
else:
print(f"Syntax Error in file {curr_file} line {line}")
def syntax_warning(line, is_error, msg=''):
if is_error:
global error_occurred
error_occurred = True
print()
print(f"Syntax Warning in file {curr_file}", end="")
if line is not None:
print(f" line {line}", end="")
if msg:
print(f":")
print(f" {msg}")
else:
print()
class FJLexer(Lexer):
tokens = {NS, DEF, REP,
WFLIP, SEGMENT, RESERVE,
ID, DOT_ID, NUMBER, STRING,
LE, GE, EQ, NEQ,
SHL, SHR,
NL, SC}
literals = {'=', '+', '-', '*', '/', '%',
'(', ')',
'$',
'^', '|', '&',
'?', ':',
'<', '>',
'"',
'#',
'{', '}',
"@", ","}
ignore_ending_comment = r'//.*'
# Tokens
DOT_ID = dot_id_re
ID = id_re
NUMBER = number_re
STRING = string_re
ID[r'def'] = DEF
ID[r'rep'] = REP
ID[r'ns'] = NS
ID[r'wflip'] = WFLIP
ID[r'segment'] = SEGMENT
ID[r'reserve'] = RESERVE
global reserved_names
reserved_names = {DEF, REP, NS, WFLIP, SEGMENT, RESERVE}
LE = "<="
GE = ">="
EQ = "=="
NEQ = "!="
SHL = r'<<'
SHR = r'>>'
# Punctuations
NL = r'[\r\n]'
SC = r';'
ignore = ' \t'
def NUMBER(self, t):
n = t.value
if len(n) >= 2:
if n[0] == "'":
t.value = handle_char(n[1:-1])[0]
elif n[1] in 'xX':
t.value = int(n, 16)
elif n[1] in 'bB':
t.value = int(n, 2)
else:
t.value = int(n)
else:
t.value = int(t.value)
return t
def STRING(self, t):
chars = []
s = t.value[1:-1]
i = 0
while i < len(s):
val, length = handle_char(s[i:])
chars.append(val)
i += length
t.value = sum(val << (i*8) for i, val in enumerate(chars))
return t
def NL(self, t):
self.lineno += 1
return t
def error(self, t):
global error_occurred
error_occurred = True
print()
print(f"Lexing Error in file {curr_file} line {self.lineno}: {t.value[0]}")
self.index += 1
class FJParser(Parser):
tokens = FJLexer.tokens
# TODO add Unary Minus (-), Unary Not (~). Maybe add logical or (||) and logical and (&&). Maybe handle power (**).
precedence = (
('right', '?', ':'),
('left', '|'),
('left', '^'),
('nonassoc', '<', '>', LE, GE),
('left', EQ, NEQ),
('left', '&'),
('left', SHL, SHR),
('left', '+', '-'),
('left', '*', '/', '%'),
('right', '#'),
)
# debugfile = 'src/parser.out'
def __init__(self, w, warning_as_errors, verbose=False):
self.verbose = verbose
self.defs = {'w': Expr(w)}
self.warning_as_errors = warning_as_errors
# [(params, quiet_params), statements, (curr_file, p.lineno, ns_name)]
self.macros = {main_macro: [([], []), [], (None, None, '')]}
def check_macro_name(self, name, line):
global reserved_names
base_name = self.ns_to_base_name(name[0])
if base_name in reserved_names:
syntax_error(line, f'macro name can\'t be {name[0]} ({base_name} is a reserved name)!')
if name in self.macros:
_, _, (other_file, other_line, _) = self.macros[name]
syntax_error(line, f'macro {name} is declared twice! '
f'also declared in file {other_file} (line {other_line}).')
def check_params(self, ids, macro_name, line):
for param_id in ids:
if param_id in self.defs:
syntax_error(line, f'parameter {param_id} in macro {macro_name[0]}({macro_name[1]}) '
f'is also defined as a constant variable (with value {self.defs[param_id]})')
for i1 in range(len(ids)):
for i2 in range(i1):
if ids[i1] == ids[i2]:
syntax_error(line, f'parameter {ids[i1]} in macro {macro_name[0]}({macro_name[1]}) '
f'is declared twice!')
def check_label_usage(self, labels_used, labels_declared, params, externs, global_labels, line, macro_name):
if global_labels & externs:
syntax_error(line, f"In macro {macro_name[0]}({macro_name[1]}): "
f"extern labels can't be global labels: " + ', '.join(global_labels & externs))
if global_labels & params:
syntax_error(line, f"In macro {macro_name[0]}({macro_name[1]}): "
f"extern labels can't be regular labels: " + ', '.join(global_labels & params))
if externs & params:
syntax_error(line, f"In macro {macro_name[0]}({macro_name[1]}): "
f"global labels can't be regular labels: " + ', '.join(externs & params))
# params.update([self.ns_full_name(p) for p in params])
# externs = set([self.ns_full_name(p) for p in externs])
# globals.update([self.ns_full_name(p) for p in globals])
unused_labels = params - labels_used.union(self.ns_to_base_name(label) for label in labels_declared)
if unused_labels:
syntax_warning(line, self.warning_as_errors,
f"In macro {macro_name[0]}({macro_name[1]}): "
f"unused labels: {', '.join(unused_labels)}.")
bad_declarations = labels_declared - set(self.ns_full_name(label) for label in externs.union(params))
if bad_declarations:
syntax_warning(line, self.warning_as_errors,
f"In macro {macro_name[0]}({macro_name[1]}): "
f"Declared a not extern/parameter label: {', '.join(bad_declarations)}.")
bad_uses = labels_used - global_labels - params - set(labels_declared) - {'$'}
if bad_uses:
# print('\nused:', labels_used, 'globals:', globals, 'params:', params)
syntax_warning(line, self.warning_as_errors,
f"In macro {macro_name[0]}({macro_name[1]}): "
f"Used a not global/parameter/declared-extern label: {', '.join(bad_uses)}.")
@staticmethod
def ns_name():
return '.'.join(curr_namespace)
@staticmethod
def ns_full_name(base_name):
return '.'.join(curr_namespace + [base_name])
@staticmethod
def dot_id_to_ns_full_name(p):
base_name = p.DOT_ID
without_dots = base_name.lstrip('.')
if len(without_dots) == len(base_name):
return base_name
num_of_dots = len(base_name) - len(without_dots)
if num_of_dots - 1 > len(curr_namespace):
syntax_error(p.lineno, f'Used more leading dots than current namespace depth '
f'({num_of_dots}-1 > {len(curr_namespace)})')
return '.'.join(curr_namespace[:len(curr_namespace)-(num_of_dots-1)] + [without_dots])
@staticmethod
def ns_to_base_name(name):
return name.split('.')[-1]
def error(self, token):
global error_occurred
error_occurred = True
print()
print(f'Syntax Error in file {curr_file} line {token.lineno}, token=("{token.type}", {token.value})')
@_('definable_line_statements')
def program(self, p):
ops = p.definable_line_statements
self.macros[main_macro][1] = ops
# labels_used, labels_declared = all_used_labels(ops)
# bad_uses = labels_used - set(labels_declared) - {'$'}
# if bad_uses:
# syntax_warning(None, self.warning_as_errors,
# f"Outside of macros: "
# f"Used a not declared label: {', '.join(bad_uses)}.")
@_('definable_line_statements NL definable_line_statement')
def definable_line_statements(self, p):
if p.definable_line_statement:
return p.definable_line_statements + p.definable_line_statement
return p.definable_line_statements
@_('definable_line_statement')
def definable_line_statements(self, p):
if p.definable_line_statement:
return p.definable_line_statement
return []
@_('')
def empty(self, p):
return None
@_('line_statement')
def definable_line_statement(self, p):
return p.line_statement
@_('macro_def')
def definable_line_statement(self, p):
return []
@_('NS ID')
def namespace(self, p):
curr_namespace.append(p.ID)
@_('namespace "{" NL definable_line_statements NL "}"')
def definable_line_statement(self, p):
curr_namespace.pop()
return p.definable_line_statements
@_('DEF ID macro_params "{" NL line_statements NL "}"')
def macro_def(self, p):
params, local_params, global_params, extern_params = p.macro_params
name = (self.ns_full_name(p.ID), len(params))
self.check_macro_name(name, p.lineno)
self.check_params(params + local_params, name, p.lineno)
ops = p.line_statements
self.check_label_usage(*all_used_labels(ops), set(params + local_params), set(extern_params),
set(global_params), p.lineno, name)
self.macros[name] = [(params, local_params), ops, (curr_file, p.lineno, self.ns_name())]
return None
@_('empty')
def maybe_ids(self, p):
return []
@_('IDs')
def maybe_ids(self, p):
return p.IDs
@_('empty')
def maybe_local_ids(self, p):
return []
@_('"@" IDs')
def maybe_local_ids(self, p):
return p.IDs
@_('empty')
def maybe_extern_ids(self, p):
return []
@_('empty')
def maybe_global_ids(self, p):
return []
@_('"<" ids')
def maybe_global_ids(self, p):
return p.ids
@_('">" IDs')
def maybe_extern_ids(self, p):
return p.IDs
@_('maybe_ids maybe_local_ids maybe_global_ids maybe_extern_ids')
def macro_params(self, p):
return p.maybe_ids, p.maybe_local_ids, p.maybe_global_ids, p.maybe_extern_ids
@_('IDs "," ID')
def IDs(self, p):
return p.IDs + [p.ID]
@_('ID')
def IDs(self, p):
return [p.ID]
@_('line_statements NL line_statement')
def line_statements(self, p):
return p.line_statements + p.line_statement
@_('line_statement')
def line_statements(self, p):
return p.line_statement
# @_('empty')
# def line_statements(self, p):
# return []
@_('empty')
def line_statement(self, p):
return []
@_('statement')
def line_statement(self, p):
if p.statement:
return [p.statement]
return []
@_('label statement')
def line_statement(self, p):
if p.statement:
return [p.label, p.statement]
return [p.label]
@_('label')
def line_statement(self, p):
return [p.label]
@_('ID ":"')
def label(self, p):
return Op(OpType.Label, (self.ns_full_name(p.ID),), curr_file, p.lineno)
@_('expr SC')
def statement(self, p):
return Op(OpType.FlipJump, (p.expr, next_address()), curr_file, p.lineno)
@_('expr SC expr')
def statement(self, p):
return Op(OpType.FlipJump, (p.expr0, p.expr1), curr_file, p.lineno)
@_('SC expr')
def statement(self, p):
return Op(OpType.FlipJump, (Expr(0), p.expr), curr_file, p.lineno)
@_('SC')
def statement(self, p):
return Op(OpType.FlipJump, (Expr(0), next_address()), curr_file, p.lineno)
@_('ID')
def id(self, p):
return p.ID, p.lineno
@_('DOT_ID')
def id(self, p):
return self.dot_id_to_ns_full_name(p), p.lineno
@_('ids "," id')
def ids(self, p):
return p.ids + [p.id[0]]
@_('id')
def ids(self, p):
return [p.id[0]]
@_('id')
def statement(self, p):
macro_name, lineno = p.id
return Op(OpType.Macro, ((macro_name, 0), ), curr_file, lineno)
@_('id expressions')
def statement(self, p):
macro_name, lineno = p.id
return Op(OpType.Macro, ((macro_name, len(p.expressions)), *p.expressions), curr_file, lineno)
@_('WFLIP expr "," expr')
def statement(self, p):
return Op(OpType.WordFlip, (p.expr0, p.expr1, next_address()), curr_file, p.lineno)
@_('WFLIP expr "," expr "," expr')
def statement(self, p):
return Op(OpType.WordFlip, (p.expr0, p.expr1, p.expr2), curr_file, p.lineno)
@_('ID "=" expr')
def statement(self, p):
name = self.ns_full_name(p.ID)
if name in self.defs:
syntax_error(p.lineno, f'Can\'t redeclare the variable "{name}".')
if not p.expr.eval(self.defs, curr_file, p.lineno):
self.defs[name] = p.expr
return None
syntax_error(p.lineno, f'Can\'t evaluate expression: {str(p.expr)}.')
@_('REP "(" expr "," ID ")" id')
def statement(self, p):
macro_name, lineno = p.id
return Op(OpType.Rep,
(p.expr, p.ID, Op(OpType.Macro, ((macro_name, 0), ), curr_file, lineno)),
curr_file, p.lineno)
@_('REP "(" expr "," ID ")" id expressions')
def statement(self, p):
exps = p.expressions
macro_name, lineno = p.id
return Op(OpType.Rep,
(p.expr, p.ID, Op(OpType.Macro, ((macro_name, len(exps)), *exps), curr_file, lineno)),
curr_file, p.lineno)
@_('SEGMENT expr')
def statement(self, p):
return Op(OpType.Segment, (p.expr,), curr_file, p.lineno)
@_('RESERVE expr')
def statement(self, p):
return Op(OpType.Reserve, (p.expr,), curr_file, p.lineno)
@_('expressions "," expr')
def expressions(self, p):
return p.expressions + [p.expr]
@_('expr')
def expressions(self, p):
return [p.expr]
@_('_expr')
def expr(self, p):
return p._expr[0]
@_('_expr "+" _expr')
def _expr(self, p):
a, b = p._expr0[0], p._expr1[0]
if a is int and b is int:
return Expr(a + b), p.lineno
return Expr(('+', (a, b))), p.lineno
@_('_expr "-" _expr')
def _expr(self, p):
a, b = p._expr0[0], p._expr1[0]
if a is int and b is int:
return Expr(a - b), p.lineno
return Expr(('-', (a, b))), p.lineno
@_('_expr "*" _expr')
def _expr(self, p):
a, b = p._expr0[0], p._expr1[0]
if a is int and b is int:
return Expr(a * b), p.lineno
return Expr(('*', (a, b))), p.lineno
@_('"#" _expr')
def _expr(self, p):
a = p._expr[0]
if a is int:
return Expr(a.bit_length()), p.lineno
return Expr(('#', (a,))), p.lineno
@_('_expr "/" _expr')
def _expr(self, p):
a, b = p._expr0[0], p._expr1[0]
if a is int and b is int:
return Expr(a // b), p.lineno
return Expr(('/', (a, b))), p.lineno
@_('_expr "%" _expr')
def _expr(self, p):
a, b = p._expr0[0], p._expr1[0]
if a is int and b is int:
return Expr(a % b), p.lineno
return Expr(('%', (a, b))), p.lineno
@_('_expr SHL _expr')
def _expr(self, p):
a, b = p._expr0[0], p._expr1[0]
if a is int and b is int:
return Expr(a << b), p.lineno
return Expr(('<<', (a, b))), p.lineno
@_('_expr SHR _expr')
def _expr(self, p):
a, b = p._expr0[0], p._expr1[0]
if a is int and b is int:
return Expr(a >> b), p.lineno
return Expr(('>>', (a, b))), p.lineno
@_('_expr "^" _expr')
def _expr(self, p):
a, b = p._expr0[0], p._expr1[0]
if a is int and b is int:
return Expr(a ^ b), p.lineno
return Expr(('^', (a, b))), p.lineno
@_('_expr "|" _expr')
def _expr(self, p):
a, b = p._expr0[0], p._expr1[0]
if a is int and b is int:
return Expr(a | b), p.lineno
return Expr(('|', (a, b))), p.lineno
@_('_expr "&" _expr')
def _expr(self, p):
a, b = p._expr0[0], p._expr1[0]
if a is int and b is int:
return Expr(a & b), p.lineno
return Expr(('&', (a, b))), p.lineno
@_('_expr "?" _expr ":" _expr')
def _expr(self, p):
a, b, c = p._expr0[0], p._expr1[0], p._expr2[0]
if a is int and b is int and c is int:
return Expr(b if a else c), p.lineno
return Expr(('?:', (a, b, c))), p.lineno
@_('_expr "<" _expr')
def _expr(self, p):
a, b = p._expr0[0], p._expr1[0]
if a is int and b is int:
return Expr(1 if a < b else 0), p.lineno
return Expr(('<', (a, b))), p.lineno
@_('_expr ">" _expr')
def _expr(self, p):
a, b = p._expr0[0], p._expr1[0]
if a is int and b is int:
return Expr(1 if a > b else 0), p.lineno
return Expr(('>', (a, b))), p.lineno
@_('_expr LE _expr')
def _expr(self, p):
a, b = p._expr0[0], p._expr1[0]
if a is int and b is int:
return Expr(1 if a <= b else 0), p.lineno
return Expr(('<=', (a, b))), p.lineno
@_('_expr GE _expr')
def _expr(self, p):
a, b = p._expr0[0], p._expr1[0]
if a is int and b is int:
return Expr(1 if a >= b else 0), p.lineno
return Expr(('>=', (a, b))), p.lineno
@_('_expr EQ _expr')
def _expr(self, p):
a, b = p._expr0[0], p._expr1[0]
if a is int and b is int:
return Expr(1 if a == b else 0), p.lineno
return Expr(('==', (a, b))), p.lineno
@_('_expr NEQ _expr')
def _expr(self, p):
a, b = p._expr0[0], p._expr1[0]
if a is int and b is int:
return Expr(1 if a != b else 0), p.lineno
return Expr(('!=', (a, b))), p.lineno
@_('"(" _expr ")"')
def _expr(self, p):
return p._expr
@_('NUMBER')
def _expr(self, p):
return Expr(p.NUMBER), p.lineno
@_('STRING')
def _expr(self, p):
return Expr(p.STRING), p.lineno
@_('"$"')
def _expr(self, p):
return next_address(), p.lineno
@_('id')
def _expr(self, p):
id_str, lineno = p.id
if id_str in self.defs:
return self.defs[id_str], lineno
return Expr(id_str), lineno
def exit_if_errors():
if error_occurred:
raise FJParsingException(f'Errors found in file {curr_file}. Assembly stopped.')
def parse_macro_tree(input_files, w, warning_as_errors, verbose=False):
global curr_file, curr_text, error_occurred, curr_namespace
error_occurred = False
lexer = FJLexer()
parser = FJParser(w, warning_as_errors, verbose=verbose)
for curr_file in input_files:
if not path.isfile(curr_file):
raise FJParsingException(f"No such file {curr_file}.")
curr_text = open(curr_file, 'r').read()
curr_namespace = []
lex_res = lexer.tokenize(curr_text)
exit_if_errors()
parser.parse(lex_res)
exit_if_errors()
return parser.macros
|
python
|
#!/usr/bin/env python
# coding: utf-8
# # ML Pipeline Preparation
# Follow the instructions below to help you create your ML pipeline.
# ### 1. Import libraries and load data from database.
# - Import Python libraries
# - Load dataset from database with [`read_sql_table`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_sql_table.html)
# - Define feature and target variables X and Y
# In[1]:
# import libraries
import pandas as pd
import nltk
import numpy as np
from sklearn.datasets import make_multilabel_classification
from sklearn.multioutput import MultiOutputClassifier
from sklearn.neighbors import KNeighborsClassifier
import nltk
nltk.download(['punkt', 'wordnet'])
import re
import numpy as np
import pandas as pd
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.pipeline import Pipeline
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
# In[3]:
# load data from database
from sqlalchemy import create_engine
engine = create_engine('sqlite:///InsertDatabaseName.db')
df = pd.read_sql_table('disaster_messages', 'sqlite:///InsertDatabaseName.db')
df.head()
X = df['message']
Y = df[df.columns.difference(['id', 'message','original','genre'])]
# ### 2. Write a tokenization function to process your text data
# In[4]:
url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
import re
def tokenize(text):
# get list of all urls using regex
detected_urls = re.findall(url_regex, text)
# replace each url in text string with placeholder
for url in detected_urls:
text = text.replace(url, "urlplaceholder")
# tokenize text
tokens = word_tokenize(text)
# initiate lemmatizer
lemmatizer = WordNetLemmatizer()
# iterate through each token
clean_tokens = []
for tok in tokens:
# lemmatize, normalize case, and remove leading/trailing white space
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
# ### 3. Build a machine learning pipeline
# This machine pipeline should take in the `message` column as input and output classification results on the other 36 categories in the dataset. You may find the [MultiOutputClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.multioutput.MultiOutputClassifier.html) helpful for predicting multiple target variables.
# In[5]:
X_train, X_test, y_train, y_test = train_test_split(X, Y)
knn = KNeighborsClassifier(n_neighbors=-1)
pipeline = Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier(knn, n_jobs=-1))
])
# ### 4. Train pipeline
# - Split data into train and test sets
# - Train pipeline
# In[6]:
pipeline.fit(X_train, y_train)
# ### 5. Test your model
# Report the f1 score, precision and recall for each output category of the dataset. You can do this by iterating through the columns and calling sklearn's `classification_report` on each.
# In[ ]:
y_pred = pipeline.predict(X_test)
y_pred
# ### 6. Improve your model
# Use grid search to find better parameters.
# In[ ]:
parameters = {
'clf__n_estimators': [50, 100],
'clf__min_samples_split': [2, 3]
)
}
cv = GridSearchCV(pipeline, param_grid=parameters)
# ### 7. Test your model
# Show the accuracy, precision, and recall of the tuned model.
#
# Since this project focuses on code quality, process, and pipelines, there is no minimum performance metric needed to pass. However, make sure to fine tune your models for accuracy, precision and recall to make your project stand out - especially for your portfolio!
# In[ ]:
def display_results(cv, y_test, y_pred):
labels = np.unique(y_pred)
confusion_mat = confusion_matrix(y_test, y_pred, labels=labels)
accuracy = (y_pred == y_test).mean()
print("Labels:", labels)
print("Confusion Matrix:\n", confusion_mat)
print("Accuracy:", accuracy)
print("\nBest Parameters:", cv.best_params_)
display_results(cv, y_test, y_pred)
# ### 8. Try improving your model further. Here are a few ideas:
# * try other machine learning algorithms
# * add other features besides the TF-IDF
# In[ ]:
# ### 9. Export your model as a pickle file
# In[ ]:
from sklearn.externals import joblib
joblib_file = "joblib_model.pkl"
joblib.dump(cv, joblib_file)
# ### 10. Use this notebook to complete `train.py`
# Use the template file attached in the Resources folder to write a script that runs the steps above to create a database and export a model based on a new dataset specified by the user.
# In[ ]:
|
python
|
#!/usr/bin/python
# The templating engine and the parser for the dllup markup language are hereby
# released open-source under the MIT License.
#
# Copyright (c) 2015 Daniel Lawrence Lu
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import dllup
import hashlib
import os
import re
import struct
import time
from operator import itemgetter
RASTER_IMG = ['.png', '.jpg']
# navigation markup
PORTFOLIO_NAV = '<a href="{child}/"><figure><img src="{pic}" alt="{child}"/><figcaption>{title} ({subtitle})</figcaption></figure></a>'
BLOG_NAV = '<a href="{child}/"><span class="blogdate">{date}</span><span class="blogtitle">{title}</span></a>'
ROOT_NAV = '<a href="/{child}/">{child}</a>'
# the first breadcrumb
BREAD = '<a href="/"><span id="dllu"><span style="display:none;">dllu</span><span id="D"></span><span id="L0"></span><span id="L1"></span><span id="U"></span></span></a><span>/</span>'
BREAD_HERO = '<a href="/" id="hero-a"><span id="dllu-hero"><span style="display:none;">dllu</span><span id="D"></span><span id="L0"></span><span id="L1"></span><span id="U"></span></span></a>'
# all consecutive breadcrumbs
CRUMB = '<a href="{cpath}">{child}</a><span>/</span>'
# page markup
PAGE = '<!DOCTYPE html>\n{sig}\n{htmlhead}<nav id="breadcrumbs">{breadcrumbs}</nav><nav id="rootnav">{rootnav}</nav><nav id="{navtype}">{nav}</nav><main>{output}<footer><p>© Daniel Lawrence Lu. Page generated on {time} by <a href="/programming/dllup/">dllup</a>. (<a href="{text}">text version</a>)</footer></main>{htmlfoot}'
PAGE_HERO = PAGE.replace('id="breadcrumbs"', 'id="hero"')
def readconfig(configpath):
# Reads a config file which is a simple text file of key-value pairs.
# One key-value pair per line, key (no whitespaces) is separated from
# value by whitespace.
# Valid keys are: type, root
if not os.path.exists(configpath):
return {}
config = open(configpath).read()
configsplit = [cc.split(None, 1) for cc in config.split('\n')]
return {c[0]: c[1] for c in configsplit if len(c) >= 2}
def recurse(path='', rootnav='', root=''):
global htmlhead
global htmlfoot
children = os.listdir(path)
folderdata = [
get_folderdata(os.path.join(path, c)) for c in children
if os.path.isdir(os.path.join(path, c))
]
config = readconfig(os.path.join(path, 'config'))
if 'root' in config:
root = config['root']
navtype = config['type'] if 'type' in config else None
# generate navigation markup
nav = ''
if navtype == 'blogposts':
folderdata = sorted(
[f for f in folderdata if 'date' in f],
key=itemgetter('date'),
reverse=True,
)
elif navtype == 'portfolio':
folderdata = sorted(
[f for f in folderdata if 'subtitle' in f],
key=itemgetter('subtitle'),
reverse=True,
)
else:
folderdata = sorted([f for f in folderdata if 'child' in f],
key=itemgetter('child'))
for f in folderdata:
try:
if navtype == 'root':
rootnav += ROOT_NAV.format(**f)
elif navtype == 'blogposts':
nav += BLOG_NAV.format(**f)
elif navtype == 'portfolio':
nav += PORTFOLIO_NAV.format(**f)
except KeyError:
pass # ignore folders without complete data
breadcrumbs = crumbify(path)
# recurse through children
for child in children:
cpath = os.path.join(path, child)
if os.path.isdir(cpath):
recurse(cpath, rootnav, root)
if child[-4:] in RASTER_IMG and not '_600' in child:
resize_images(path, child)
pass
elif child[-5:] == '.dllu':
markup = open(os.path.join(path, child)).read()
sig = '<!--%s-->' % hashlib.sha1(
struct.pack('f', os.path.getmtime(cpath))).hexdigest()
sig2 = None
try:
with open(os.path.join(path, child[:-5] + '.html')) as f:
f.readline()
sig2 = f.readline()
except FileNotFoundError:
pass
if sig == sig2:
continue
output = dllup.parse(markup)
f = open(os.path.join(path, child[:-5] + '.html'), 'w')
PP = PAGE
if path == '.':
PP = PAGE_HERO
ss = markup.split('\n===\n', 1)
if len(ss) > 1:
title = ss[0].strip()
else:
title = path.split('/')[-1]
head = htmlhead.format(title=title)
f.write(
PP.format(htmlhead=head,
htmlfoot=htmlfoot,
breadcrumbs=breadcrumbs,
rootnav=rootnav,
navtype=navtype,
output=output,
time=time.strftime('%Y-%m-%d', time.gmtime()),
child=child,
nav=nav,
sig=sig,
text=child).replace(
' src="/',
' src="%s/' % root,
).replace(
' href="/',
' href="%s/' % root,
))
f.close()
def resize_images(path, child):
filename = os.path.join(path, child)
filename600 = os.path.join(path, child[:-4] + '_600' + child[-4:])
filename600x2 = os.path.join(path, child[:-4] + '_600@2x' + child[-4:])
for f in (filename600, filename600x2):
scale = 600
if '@2x' in f:
scale = 1200
if not os.path.exists(f):
os.system(f'gm convert "{filename}" -resize {scale} "{f}"')
def crumbify(path):
if path == '.':
return BREAD_HERO
breadcrumbs = BREAD
crumbs = '/'
for crumb in path.split('/')[1:]:
crumbs += crumb + '/'
breadcrumbs += CRUMB.format(cpath=crumbs, child=crumb)
return breadcrumbs
def get_folderdata(path):
if os.path.exists(os.path.join(path, 'private')):
return {}
folderdata = {'child': os.path.split(path)[1]}
index = os.path.join(path, 'index.dllu')
if os.path.exists(index):
content = open(index).read().split('\n===\n', 1)[0]
content = [d for d in content.split('\n') if d.strip() != '']
if len(content) >= 1:
folderdata['title'] = dllup.parsetext(content[0])
if len(content) >= 2:
folderdata['subtitle'] = dllup.parsetext(content[1])
else:
return {}
for extension in RASTER_IMG:
if os.path.exists(path + extension):
folderdata['pic'] = os.path.split(path)[1] + extension
if re.match('y\d\d\d\dm\d\dd\d\d', os.path.split(path)[1]):
folderdata['date'] = re.sub('m|d', '-', os.path.split(path)[1][1:])
return folderdata
def main():
global htmlhead, htmlfoot
htmlhead = open('html/head.html').read()
htmlfoot = open('html/foot.html').read()
cssname = 'dllu-%s.css' % hashlib.sha1(
struct.pack('f', os.path.getmtime('css'))).hexdigest()
os.system('sassc -t compressed css/dllu.scss > %s' % cssname)
htmlhead = htmlhead.replace('dllu.css', cssname)
recurse('.')
if __name__ == '__main__':
main()
|
python
|
import argparse
from datetime import datetime, timedelta
import logging
import os
import pprint
import random
from config import DATETIME_FORMAT, EXPORT_PATH, GENDER_PREF_FUNCTIONS, AGE_PREF_FUNCTIONS,\
AGE_YOUNG_MID, AGE_MID_OLD, DEPARTMENTS, VENDORS, CATEGORIES, CATEGORIES_UNIQUE, DEVELOPMENT
from converters.summary_con import SummaryCon
from exporters.csv_exp import CsvExp
from exporters.postgres_exp import PostgresExp
from exporters.json_exp import JsonExp
from generators.customers_gen import CustomersGen
from generators.inventory_gen import InventoryGen
from generators.products_gen import ProductsGen
from generators.simple_gen import SimpleGen
from generators.stores_gen import StoresGen
from generators.customer_preferences_gen import CustomerPreferencesGen
from generators.coupon_gen import CouponGen
from generators.orders_gen import OrdersGen
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--customers", type=int, help="number of customers")
parser.add_argument("-d", "--departments", type=int, help="number of departments")
parser.add_argument("-p", "--products", type=int, help="number of products")
parser.add_argument("-C", "--coupons", type=int, help="number of coupons")
parser.add_argument("-S", "--start", type=str, help=f"start date")
parser.add_argument("-E", "--end", type=str, help=f"end date")
parser.add_argument("-D", "--days", type=int, help="number of days if end not set")
parser.add_argument("-P", "--path", type=str, help="path where to generate a files")
parser.add_argument("-v", "--verbose", help="verbose", action='store_true')
command_params = parser.parse_args()
customers_nr = command_params.customers or 1000
departments_nr = len(DEPARTMENTS)
products_nr = command_params.products or 3000
vendors_nr = len(VENDORS)
categories_nr = len(CATEGORIES_UNIQUE)
start = datetime.strptime(command_params.start or '2010-01-01 09:00:00', DATETIME_FORMAT)
end = datetime.strptime(command_params.end, DATETIME_FORMAT)\
if command_params.end else start + timedelta(days=command_params.days or 365)
path = command_params.path or EXPORT_PATH
FORMAT = '%(message)s'
logging.basicConfig(format=FORMAT, level=logging.INFO if not command_params.verbose else logging.DEBUG)
logger = logging.getLogger('datagen')
if __name__ == "__main__":
if DEVELOPMENT:
random.seed(5793)
logger.info("Generating data:")
logger.info(f" Customers: {customers_nr}")
logger.info(f" Departments: {departments_nr}")
logger.info(f" Products: {products_nr}")
logger.info(f" Vendors: {vendors_nr}")
logger.info(f" Categories: {categories_nr}")
logger.info(f"to path: {path}")
logger.info("-" * 30)
# TODO: departments generator
# dg = SimpleGen()
# departments = dg.generate()
# vg = SimpleGen(vendors_nr)
# vendors = vg.generate()
# cag = SimpleGen(categories_nr)
# categories = cag.generate()
cug = CustomersGen(customers_nr)
customers = cug.generate()
pg = ProductsGen(products_nr)
products = pg.generate()
ig = InventoryGen(products_nr)
inventory = ig.generate()
cpg = CustomerPreferencesGen(customers)
customer_preferences = cpg.generate()
cog = CouponGen(start, end, products_nr)
coupons = cog.generate()
og = OrdersGen(customer_preferences, products, coupons, start, end)
orders, order_details = og.generate()
logger.debug("-" * 30)
logger.debug("SAMPLES:")
logger.debug("Customers:")
logger.debug(pprint.pformat(customers[:5]))
logger.debug("-" * 10)
logger.debug("Products:")
logger.debug(pprint.pformat(products[:5]))
logger.debug("-" * 10)
logger.debug("Inventory:")
logger.debug(pprint.pformat(inventory[:5]))
logger.debug("-" * 10)
logger.debug("Preferences:")
logger.debug(pprint.pformat(customer_preferences[:3]))
logger.debug("-" * 10)
logger.debug("Coupons:")
logger.debug(pprint.pformat(coupons[:5]))
logger.debug("-" * 10)
logger.debug("Orders:")
logger.debug(pprint.pformat(orders[:5]))
logger.info("-" * 30)
logger.info("SUMMARY:")
logger.debug(" Customers: " + str(len(customers)))
logger.info(" Orders: " + str(len(orders)))
logger.info(" Order_details: " + str(len(order_details)))
logger.debug(" Departments: " + str(departments_nr)) # TODO: change to departments
logger.debug(" Products: " + str(len(products)))
logger.debug(" Vendors: " + str(vendors_nr))
logger.debug(" Categories: " + str(categories_nr))
logger.info(" Inventory: " + str(len(inventory)))
logger.debug(" Customer_preferences: " + str(len(customer_preferences)))
logger.info("-" * 30)
# PostgresExp.exrpot(
# path,
# # Departments=departments,
# Vendors=vendors,
# Categories=categories,
# Customers=customers,
# Products=products,
# Inventory=inventory,
# Orders=orders,
# OrderDetails=order_details
# )
# JsonExp.export(
# path,
# filename='preferences.json',
# indent=4,
# MenPreferences={
# 'departments': cpg.reveal_general_preferences(GENDER_PREF_FUNCTIONS['M'], cpg.departments),
# 'vendors': cpg.reveal_general_preferences(GENDER_PREF_FUNCTIONS['M'], cpg.vendorsd),
# 'categories': cpg.reveal_general_preferences(GENDER_PREF_FUNCTIONS['M'], cpg.categories)
# },
# WomenPreferences={
# 'departments': cpg.reveal_general_preferences(GENDER_PREF_FUNCTIONS['F'], cpg.departments),
# 'vendors': cpg.reveal_general_preferences(GENDER_PREF_FUNCTIONS['F'], cpg.vendors),
# 'categories': cpg.reveal_general_preferences(GENDER_PREF_FUNCTIONS['F'], cpg.categories)
# },
# Young={
# 'departments': cpg.reveal_general_preferences(AGE_PREF_FUNCTIONS[f"0-{AGE_YOUNG_MID - 1}"], cpg.departments_shuffled),
# 'vendors': cpg.reveal_general_preferences(AGE_PREF_FUNCTIONS[f"0-{AGE_YOUNG_MID - 1}"], cpg.vendors_shuffled),
# 'categories': cpg.reveal_general_preferences(AGE_PREF_FUNCTIONS[f"0-{AGE_YOUNG_MID - 1}"], cpg.categories_shuffled)
# },
# Mid={
# 'departments': cpg.reveal_general_preferences(AGE_PREF_FUNCTIONS[f"{AGE_YOUNG_MID}-{AGE_MID_OLD - 1}"], cpg.departments_shuffled),
# 'vendors': cpg.reveal_general_preferences(AGE_PREF_FUNCTIONS[f"{AGE_YOUNG_MID}-{AGE_MID_OLD - 1}"], cpg.vendors_shuffled),
# 'categories': cpg.reveal_general_preferences(AGE_PREF_FUNCTIONS[f"{AGE_YOUNG_MID}-{AGE_MID_OLD - 1}"], cpg.categories_shuffled)
# },
# Old={
# 'departments': cpg.reveal_general_preferences(AGE_PREF_FUNCTIONS[f"{AGE_MID_OLD}-200"], cpg.departments_shuffled),
# 'vendors': cpg.reveal_general_preferences(AGE_PREF_FUNCTIONS[f"{AGE_MID_OLD}-200"], cpg.vendors_shuffled),
# 'categories': cpg.reveal_general_preferences(AGE_PREF_FUNCTIONS[f"{AGE_MID_OLD}-200"], cpg.categories_shuffled)
# }
# )
# sum_conv = SummaryCon(orders, order_details, customers, products, vendors_nr, departments_nr, categories_nr)
# vendor_sum, department_sum, category_sum = sum_conv.convert()
CsvExp.export(path, "customers", customers)
CsvExp.export(path, "products", products)
CsvExp.export(path, "inventory", inventory)
CsvExp.export(path, "orders", orders)
CsvExp.export(path, "order_details", order_details)
# prepare coupons data
coupon_product = []
for coupon in coupons:
coupon_product += [{'coupon_id': coupon['id'], 'product_id': p} for p in coupon['products']]
del(coupon['products'])
CsvExp.export(path, "coupons", coupons)
CsvExp.export(path, "coupon_product", coupon_product)
JsonExp.export(
path,
filename='customer_preferences',
indent=4,
CustomerPreferences=customer_preferences
)
logger.info("All done")
|
python
|
from pathlib import Path
class DocsTranslateBaseException(Exception):
pass
class UnknownServiceError(DocsTranslateBaseException):
def __init__(self, service_name: str) -> None:
super().__init__(f'{service_name} service is unknown')
class ConfigurationError(DocsTranslateBaseException):
def __init__(self, property_name: str) -> None:
super().__init__(
f'The setting "{property_name}" is missing. Check your config file or cli arguments'
)
class ObjectNotFoundException(DocsTranslateBaseException):
def __init__(self, obj: Path) -> None:
super().__init__(f'{obj} not found')
class NoApiKeyFileError(DocsTranslateBaseException):
def __init__(self, api_key_path: Path) -> None:
super().__init__(
f'API_KEY file in location "{api_key_path}" not found\n'
'Provide API_KEY file path or create it, if not exist'
)
class NoConfigFileError(DocsTranslateBaseException):
def __init__(self, not_found_file: Path) -> None:
super().__init__(
f'No config file found. Create file {not_found_file} or pass custom file with `-c` param'
)
class FileIsNotMarkdown(DocsTranslateBaseException):
def __init__(self, not_md_obj: Path) -> None:
super().__init__(f'{not_md_obj} is not a Markdown or a restructuredtext file!')
class DocsFileNotFoundError(FileNotFoundError):
def __init__(self, not_md_obj: Path) -> None:
super().__init__(f'{not_md_obj} is not found Markdown or a restructuredtext files!')
class FileCreateError(DocsTranslateBaseException):
def __init__(self, not_md_obj: Path) -> None:
super().__init__(f'{not_md_obj} is not create translate file!')
|
python
|
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
import os, sys
import numpy as np
from cntk.device import set_default_device
abs_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(abs_path, "..", "..", "..", "..", "Examples", "SequenceClassification", "SimpleExample", "Python"))
from SequenceClassification import train_sequence_classifier
TOLERANCE_ABSOLUTE = 1E-1
def test_seq_classification_error(device_id):
from cntk.ops.tests.ops_test_utils import cntk_device
set_default_device(cntk_device(device_id))
evaluation_avg, loss_avg = train_sequence_classifier()
expected_avg = [0.51, 1.28]
assert np.allclose([evaluation_avg, loss_avg], expected_avg, atol=TOLERANCE_ABSOLUTE)
|
python
|
from __future__ import absolute_import, unicode_literals
import logging
import os
import pytest
from psd_tools.psd.filter_effects import (FilterEffects, FilterEffect, FilterEffectChannel, FilterEffectExtra)
from ..utils import check_write_read, check_read_write, TEST_ROOT
logger = logging.getLogger(__name__)
@pytest.mark.parametrize('args', [
('uuid', 1, (0, 0, 512, 512), 8, 3, [
FilterEffectChannel(1, 0, b'\x00'),
FilterEffectChannel(1, 0, b'\x00'),
FilterEffectChannel(1, 0, b'\x00'),
FilterEffectChannel(1, 0, b'\x00'),
FilterEffectChannel(1, 0, b'\x00'),
], None),
('uuid', 1, (0, 0, 512, 512), 8, 3, [
FilterEffectChannel(1, 0, b'\x00'),
FilterEffectChannel(1, 0, b'\x00'),
FilterEffectChannel(1, 0, b'\x00'),
FilterEffectChannel(1, 0, b'\x00'),
FilterEffectChannel(1, 0, b'\x00'),
], FilterEffectExtra(1, [0, 0, 512, 512], 0, b'\x00')),
('uuid', 1, (0, 0, 512, 512), 8, 3, [
FilterEffectChannel(1, 0, b'\x00'),
FilterEffectChannel(1, 0, b'\x00'),
FilterEffectChannel(1, 0, b'\x00'),
FilterEffectChannel(1, 0, b'\x00'),
FilterEffectChannel(1, 0, b'\x00'),
], FilterEffectExtra(0)),
])
def test_filter_effect(args):
check_write_read(FilterEffect(*args))
@pytest.mark.parametrize('is_written, compression, data', [
(0, None, b''),
(1, None, b''),
(1, 0, b''),
(1, 0, b'\x00'),
])
def test_filter_effect_channel(is_written, compression, data):
check_write_read(FilterEffectChannel(is_written, compression, data))
@pytest.mark.parametrize('filename', [
'filter_effects_1.dat',
'filter_effects_2.dat',
])
def test_filter_effects_rw(filename):
filepath = os.path.join(TEST_ROOT, 'tagged_blocks', filename)
with open(filepath, 'rb') as f:
fixture = f.read()
check_read_write(FilterEffects, fixture)
|
python
|
from django import template
from backoffice.models import *
register = template.Library()
@register.simple_tag
def number_of_authors(request):
qs = Author.objects.all()
return qs.count()
@register.simple_tag
def number_of_questions(request):
qs = Question.objects.all()
return qs.count()
@register.simple_tag
def number_of_choices(request):
qs = Choice.objects.all()
return qs.count()
|
python
|
#/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
import getopt
import re
from itertools import *
import time
import json
import csv
import codecs
import random as r
import time
import random
import pandas as pd
##此程序用来将csv文件转成json格式
|
python
|
import logging
import typing
import ckan.plugins.toolkit as toolkit
logger = logging.getLogger(__name__)
@toolkit.side_effect_free
def dcpr_request_list(context: typing.Dict, data_dict: typing.Dict) -> typing.List:
logger.debug("Inside the dcpr_request_list action")
access_result = toolkit.check_access(
"dcpr_request_list_auth", context, data_dict=data_dict
)
logger.debug(f"access_result: {access_result}")
fake_requests = [
{"name": "req1", "owner": "tester1"},
{"name": "req2", "owner": "tester1"},
{"name": "req3", "owner": "tester1"},
{"name": "req4", "owner": "tester2"},
]
result = []
current_user = context["auth_user_obj"]
for dcpr_request in fake_requests:
if dcpr_request["owner"] == current_user.name:
result.append(dcpr_request)
return result
|
python
|
"""
Unit testing
============
Note:
Tests for `ensemble.control.operational.reference`
"""
# ============================================================================
# STANDARD IMPORTS
# ============================================================================
import pytest
import numpy as np
from numpy.testing import assert_array_equal
# ============================================================================
# INTERNAL IMPORTS
# ============================================================================
from ensemble.control.operational.reference import ReferenceHeadway
from ensemble.logic.platoon_states import (
StandAlone,
Platooning,
Joining,
Splitting,
)
# ============================================================================
# TESTS AND DEFINITIONS
# ============================================================================
def test_splitting():
r = ReferenceHeadway()
r.create_time_gap_hwy(Splitting())
assert pytest.approx(r.reference_headway[-1], 3 * 1.4)
def test_joining():
r = ReferenceHeadway(gap0=2)
r.create_time_gap_hwy(Joining())
assert r.reference_headway[-1] == 1.4
|
python
|
"""Test parallel deployment."""
# pylint: disable=redefined-outer-name
from __future__ import annotations
import platform
import shutil
from pathlib import Path
from typing import TYPE_CHECKING, Generator
import pytest
from runway._cli import cli
if TYPE_CHECKING:
from click.testing import CliRunner, Result
CURRENT_DIR = Path(__file__).parent
@pytest.fixture(scope="module")
def deploy_result(cli_runner: CliRunner) -> Generator[Result, None, None]:
"""Execute `runway deploy` with `runway destory` as a cleanup step."""
yield cli_runner.invoke(cli, ["deploy"], env={"CI": "1"})
@pytest.fixture(scope="module")
def destroy_result(cli_runner: CliRunner) -> Generator[Result, None, None]:
"""Execute `runway destroy`."""
yield cli_runner.invoke(cli, ["destroy"], env={"CI": "1"})
shutil.rmtree(CURRENT_DIR / "child-01.cfn" / ".runway", ignore_errors=True)
shutil.rmtree(CURRENT_DIR / "child-02.cfn" / ".runway", ignore_errors=True)
@pytest.mark.order("first")
@pytest.mark.skipif(
platform.system() != "Linux", reason="only runs consistently on Linux"
)
def test_deploy_exit_code(deploy_result: Result) -> None:
"""Test deploy exit code."""
assert deploy_result.exit_code == 0
@pytest.mark.order(after="test_deploy_exit_code")
@pytest.mark.skipif(
platform.system() != "Linux", reason="only runs consistently on Linux"
)
def test_deploy_log_messages(deploy_result: Result) -> None:
"""Test deploy log messages."""
assert (
"deployment_1:processing regions in parallel... (output will be interwoven)"
in deploy_result.stdout
), f"expected not in stdout:\n{deploy_result.stdout}"
@pytest.mark.order("last")
@pytest.mark.skipif(
platform.system() != "Linux", reason="only runs consistently on Linux"
)
def test_destroy_exit_code(destroy_result: Result) -> None:
"""Test destory exit code."""
assert destroy_result.exit_code == 0
|
python
|
import unittest
from mock import patch
from todo import Todo, TodoManager
class TestTodo(unittest.TestCase):
def test_default(self):
todo = Todo('foo')
self.assertEqual(todo.task, 'foo')
self.assertFalse(todo.done)
def test_default_done(self):
todo = Todo('bar', True)
self.assertEqual(todo.task, 'bar')
self.assertTrue(todo.done)
def test_toggle_done(self):
todo = Todo('baz')
todo.toggle()
self.assertTrue(todo.done)
class TestTodoManager(unittest.TestCase):
def setUp(self):
patch('todo.TodoManager._load').start()
patch('todo.TodoManager._save').start()
self.manager = TodoManager()
self.manager.todos = [
Todo('foo'),
Todo('bar')
]
@patch('builtins.input', return_value='baz')
def test_add(self, patch_raw_input):
self.manager.add()
self.assertEqual(self.manager.todos[2].task, 'baz')
def test_delete(self):
self.manager.delete(1)
self.assertEqual(len(self.manager.todos), 1)
def test_delete_all(self):
self.manager.delete()
self.assertEqual(len(self.manager.todos), 0)
def test_toggle_done(self):
self.manager.toggle_done(0)
self.assertEqual(self.manager.todos[0].done, True)
if __name__ == '__main__':
unittest.main()
|
python
|
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import os
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from tqdm import tqdm
import math
from sklearn.model_selection import train_test_split
import pickle
from keras.models import load_model
def loadData(filename):
file_ptr = open(filename, 'rb')
loaded_obj = pickle.load(file_ptr)
return loaded_obj
embeddings_index = loadData("classifier_model/glove.840B.300d.pkl")
# Convert values to embeddings
def text_to_array(text):
empyt_emb = np.zeros(300)
text = text[:-1].split()[:30]
embeds = [embeddings_index.get(x, empyt_emb) for x in text]
embeds+= [empyt_emb] * (30 - len(embeds))
return np.array(embeds)
# # train_vects = [text_to_array(X_text) for X_text in tqdm(train_df["question_text"])]
# val_vects = np.array([text_to_array(X_text) for X_text in tqdm(val_df["question_text"][:3000])])
# val_y = np.array(val_df["target"][:3000])
model = load_model('classifier_model/my_modelcpu.h5')
zz = ['I like to sleep',"that's cool other cultures are nice", "where is Geneva cats?", "What public figure defended New York in Januar"]
valDF = pd.DataFrame()
valDF['question_text'] = zz
# prediction part
batch_size = 256
def batch_gen(test_df):
n_batches = math.ceil(len(test_df) / batch_size)
for i in range(n_batches):
texts = test_df.iloc[i*batch_size:(i+1)*batch_size, 0]
text_arr = np.array([text_to_array(text) for text in texts])
yield text_arr
# test_df = pd.read_csv("../input/quora-insincere-questions-classification/test.csv")
test_df = valDF
all_preds = []
for x in tqdm(batch_gen(test_df)):
all_preds.extend(model.predict(x).flatten())
y_te = (np.array(all_preds) > 0.5).astype(np.int)
print(y_te)
print(valDF['question_text'])
|
python
|
import scrapy
from scrapy.crawler import CrawlerProcess
class VersionSpider(scrapy.Spider):
name = 'versions'
custom_settings = {
"FEED_FORMAT": "json",
"FEED_URI": "data/%(name)s/%(time)s.json"
}
start_urls = ['https://docs.hortonworks.com/HDPDocuments/HDP3/HDP-3.1.0/release-notes/content/comp_versions.html',
'https://docs.hortonworks.com/HDPDocuments/HDP3/HDP-3.0.1/release-notes/content/comp_versions.html',
'https://docs.hortonworks.com/HDPDocuments/HDP3/HDP-3.0.0/release-notes/content/comp_versions.html',
'https://docs.hortonworks.com/HDPDocuments/HDP2/HDP-2.6.5/bk_release-notes/content/comp_versions.html',
'https://docs.hortonworks.com/HDPDocuments/HDP2/HDP-2.6.4/bk_release-notes/content/comp_versions.html',
'https://docs.hortonworks.com/HDPDocuments/HDP2/HDP-2.6.3/bk_release-notes/content/comp_versions.html',
'https://docs.hortonworks.com/HDPDocuments/HDP2/HDP-2.6.2/bk_release-notes/content/comp_versions.html',
'https://docs.hortonworks.com/HDPDocuments/HDP2/HDP-2.6.1/bk_release-notes/content/comp_versions.html',
'https://docs.hortonworks.com/HDPDocuments/HDP2/HDP-2.6.0/bk_release-notes/content/comp_versions.html',
'https://docs.hortonworks.com/HDPDocuments/HDP2/HDP-2.5.6/bk_release-notes/content/comp_versions.html',
'https://docs.hortonworks.com/HDPDocuments/HDP2/HDP-2.5.5/bk_release-notes/content/comp_versions.html',
'https://docs.hortonworks.com/HDPDocuments/HDP2/HDP-2.5.4/bk_release-notes/content/comp_versions.html',
'https://docs.hortonworks.com/HDPDocuments/HDP2/HDP-2.5.3/bk_release-notes/content/comp_versions.html',
'https://docs.hortonworks.com/HDPDocuments/HDP2/HDP-2.5.2/bk_release-notes/content/comp_versions.html',
'https://docs.hortonworks.com/HDPDocuments/HDP2/HDP-2.5.1/bk_release-notes/content/comp_versions.html',
'https://docs.hortonworks.com/HDPDocuments/HDP2/HDP-2.5.0/bk_release-notes/content/comp_versions.html',
'https://docs.hortonworks.com/HDPDocuments/HDP2/HDP-2.4.3/bk_HDP_RelNotes/content/ch_relnotes_v243.html',
'https://docs.hortonworks.com/HDPDocuments/HDP2/HDP-2.4.2/bk_HDP_RelNotes/content/ch_relnotes_v243.html',
'https://docs.hortonworks.com/HDPDocuments/HDP2/HDP-2.4.1/bk_HDP_RelNotes/content/ch_relnotes_v243.html',
'https://docs.hortonworks.com/HDPDocuments/HDP2/HDP-2.4.0/bk_HDP_RelNotes/content/ch_relnotes_v243.html']
def parse(self, response):
if response.url.split("/")[4] == 'HDP3':
all_ = response.xpath("//section[@class='section']/ul/li/p/text()").getall()
if len(all_) == 0:
all_ = response.xpath("//div[@class='body conbody']/ul/li/p/text()").getall()
else:
all_ = response.xpath("//div[@class='itemizedlist']/ul/li/p/text()").getall()
# print(all_)
for i in all_:
if i.strip() == '':
continue
if i.strip().split().__len__() < 2:
continue
if i.strip().replace("Apache ","").split().__len__() < 2:
continue
yield {"Product": i.replace("Apache ","").split()[0], "Version": i.replace("Apache ","").split()[1].replace("+",""), "HDP": response.url.split("/")[5].split("-")[1]}
spider = VersionSpider()
process = CrawlerProcess()
process.crawl(spider)
process.start()
|
python
|
from typing import Dict, Optional, Union
from pyspark.sql import SparkSession, Column, DataFrame
# noinspection PyUnresolvedReferences
from pyspark.sql.functions import col
from pyspark.sql.types import (
ArrayType,
LongType,
StringType,
StructField,
StructType,
TimestampType,
DataType,
)
from spark_auto_mapper.automappers.automapper import AutoMapper
from spark_auto_mapper.data_types.complex.complex_base import (
AutoMapperDataTypeComplexBase,
)
from spark_auto_mapper.data_types.data_type_base import AutoMapperDataTypeBase
from spark_auto_mapper.data_types.list import AutoMapperList
from spark_auto_mapper.data_types.number import AutoMapperNumberDataType
from spark_auto_mapper.data_types.text_like_base import AutoMapperTextLikeBase
from spark_auto_mapper.helpers.automapper_helpers import AutoMapperHelpers as A
from spark_auto_mapper.type_definitions.defined_types import AutoMapperDateInputType
class MyProcessingStatusExtensionItem(AutoMapperDataTypeComplexBase):
# noinspection PyPep8Naming
def __init__(
self,
url: str,
valueString: Optional[AutoMapperTextLikeBase] = None,
valueDateTime: Optional[AutoMapperDateInputType] = None,
) -> None:
super().__init__(url=url, valueString=valueString, valueDateTime=valueDateTime)
class MyProcessingStatusExtension(AutoMapperDataTypeComplexBase):
# noinspection PyPep8Naming
def __init__(
self,
processing_status: AutoMapperTextLikeBase,
request_id: AutoMapperTextLikeBase,
date_processed: Optional[AutoMapperDateInputType] = None,
) -> None:
definition_base_url = "https://raw.githubusercontent.com/imranq2/SparkAutoMapper.FHIR/main/StructureDefinition/"
processing_status_extensions = [
MyProcessingStatusExtensionItem(
url="processing_status",
valueString=processing_status,
),
MyProcessingStatusExtensionItem(
url="request_id",
valueString=request_id,
),
]
if date_processed:
processing_status_extensions.append(
MyProcessingStatusExtensionItem(
url="date_processed",
valueDateTime=date_processed,
)
)
self.extensions = processing_status_extensions
super().__init__(
url=definition_base_url,
extension=AutoMapperList(processing_status_extensions),
)
def include_null_properties(self, include_null_properties: bool) -> None:
for item in self.extensions:
item.include_null_properties(
include_null_properties=include_null_properties
)
def get_schema(
self, include_extension: bool
) -> Optional[Union[StructType, DataType]]:
return StructType(
[
StructField("url", StringType()),
StructField(
"extension",
ArrayType(
StructType(
[
StructField("url", StringType()),
StructField("valueString", StringType()),
StructField("valueDateTime", TimestampType()),
]
)
),
),
]
)
def get_value(
self,
value: AutoMapperDataTypeBase,
source_df: Optional[DataFrame],
current_column: Optional[Column],
) -> Column:
return super().get_value(value, source_df, current_column)
class MyClass(AutoMapperDataTypeComplexBase):
def __init__(
self,
name: AutoMapperTextLikeBase,
age: AutoMapperNumberDataType,
extension: AutoMapperList[MyProcessingStatusExtension],
) -> None:
super().__init__(name=name, age=age, extension=extension)
def get_schema(
self, include_extension: bool
) -> Optional[Union[StructType, DataType]]:
schema: StructType = StructType(
[
StructField("name", StringType(), False),
StructField("age", LongType(), True),
]
)
return schema
def test_auto_mapper_complex_with_extension(spark_session: SparkSession) -> None:
# Arrange
spark_session.createDataFrame(
[
(1, "Qureshi", "Imran", 45),
(2, "Vidal", "Michael", 35),
],
["member_id", "last_name", "first_name", "my_age"],
).createOrReplaceTempView("patients")
source_df: DataFrame = spark_session.table("patients")
df = source_df.select("member_id")
df.createOrReplaceTempView("members")
# Act
mapper = AutoMapper(
view="members",
source_view="patients",
keys=["member_id"],
drop_key_columns=False,
).complex(
MyClass(
name=A.column("last_name"),
age=A.number(A.column("my_age")),
extension=AutoMapperList(
[
MyProcessingStatusExtension(
processing_status=A.text("foo"),
request_id=A.text("bar"),
date_processed=A.date("2021-01-01"),
)
]
),
)
)
assert isinstance(mapper, AutoMapper)
sql_expressions: Dict[str, Column] = mapper.get_column_specs(source_df=source_df)
for column_name, sql_expression in sql_expressions.items():
print(f"{column_name}: {sql_expression}")
result_df: DataFrame = mapper.transform(df=df)
# Assert
assert str(sql_expressions["name"]) == str(
col("b.last_name").cast("string").alias("name")
)
assert str(sql_expressions["age"]) == str(col("b.my_age").cast("long").alias("age"))
result_df.printSchema()
result_df.show(truncate=False)
assert result_df.where("member_id == 1").select("name").collect()[0][0] == "Qureshi"
assert dict(result_df.dtypes)["age"] in ("int", "long", "bigint")
|
python
|
"""
The MIT License (MIT)
Originally in 2020, for Python 3.x
Copyright (c) 2021 Panos Achlioptas (ai.stanford.edu/~optas) & Stanford Geometric Computing Lab
"""
import torch
import numpy as np
import pandas as pd
from PIL import Image
from torch.utils.data import Dataset, DataLoader
from ..evaluation.emotion_alignment import image_to_emotion
from ..emotions import emotion_to_int
class AffectiveCaptionDataset(Dataset):
""" Basically, an image, with a caption, and an indicated emotion.
"""
def __init__(self, image_files, tokens, emotions, n_emotions=9, img_transform=None, one_hot_emo=True):
super(AffectiveCaptionDataset, self).__init__()
self.image_files = image_files
self.tokens = tokens
self.emotions = emotions
self.n_emotions = n_emotions
self.img_transform = img_transform
self.one_hot_emo = one_hot_emo
def __getitem__(self, index):
text = np.array(self.tokens[index]).astype(dtype=np.long)
if self.image_files is not None:
img = Image.open(self.image_files[index])
if img.mode is not 'RGB':
img = img.convert('RGB')
if self.img_transform is not None:
img = self.img_transform(img)
else:
img = []
if self.n_emotions > 0:
if self.one_hot_emo:
emotion = np.zeros(self.n_emotions, dtype=np.float32)
emotion[self.emotions[index]] = 1
else:
emotion = self.emotions[index]
else:
emotion = []
res = {'image': img, 'emotion': emotion, 'tokens': text, 'index': index}
return res
def __len__(self):
return len(self.tokens)
class ImageClassificationDataset(Dataset):
def __init__(self, image_files, labels=None, img_transform=None, rgb_only=True):
super(ImageClassificationDataset, self).__init__()
self.image_files = image_files
self.labels = labels
self.img_transform = img_transform
self.rgb_only = rgb_only
def __getitem__(self, index):
img = Image.open(self.image_files[index])
if self.rgb_only and img.mode is not 'RGB':
img = img.convert('RGB')
if self.img_transform is not None:
img = self.img_transform(img)
label = []
if self.labels is not None:
label = self.labels[index]
res = {'image': img, 'label': label, 'index': index}
return res
def __len__(self):
return len(self.image_files)
def sub_sample_dataloader(dataloader, sample_size, seed=None, shuffle=False):
""" Given any torch dataloader create a sub-sampled version of it.
:param dataloader:
:param sample_size:
:param seed:
:param shuffle:
:return: dataloader of Subset
"""
dataset = dataloader.dataset
n_total = len(dataset)
if sample_size > n_total:
raise ValueError
if seed is not None:
torch.manual_seed(seed)
sb_dataset = torch.utils.data.random_split(dataset, [sample_size, n_total-sample_size])[0]
bsize = min(dataloader.batch_size, sample_size)
sample_loader = torch.utils.data.DataLoader(dataset=sb_dataset,
batch_size=bsize,
shuffle=shuffle,
num_workers=dataloader.num_workers)
return sample_loader
def sub_index_affective_dataloader(affective_dataloader, indices, shuffle=False):
""" Given a torch dataloader and a sequence of integers; extract the corresponding items of the
carried dataset on the specific indices and make a new dataloader with them.
:param affective_dataloader: torch.utils.data.DataLoader for AffectiveCaptionDataset
:param indices: sequence of integers indexing the underlying dataset (dataframe).
:param shuffle: shuffle the data of the resulting dataloader
:return: dataloader of AffectiveCaptionDataset
"""
dataset = affective_dataloader.dataset
r_img_files = dataset.image_files.iloc[indices].copy()
r_tokens = dataset.tokens.iloc[indices].copy()
r_emotions = dataset.emotions.iloc[indices].copy()
r_img_files.reset_index(inplace=True, drop=True)
r_tokens.reset_index(inplace=True, drop=True)
r_emotions.reset_index(inplace=True, drop=True)
r_dset = AffectiveCaptionDataset(image_files=r_img_files, tokens=r_tokens,
emotions=r_emotions, img_transform=dataset.img_transform)
batch_size = min(len(indices), affective_dataloader.batch_size)
r_loader = torch.utils.data.DataLoader(r_dset,
shuffle=shuffle,
batch_size=batch_size,
num_workers=affective_dataloader.num_workers)
return r_loader
def group_annotations_per_image(affective_dataset):
""" Group the annotations per image.
:param affective_dataset: an AffectiveCaptionDataset
:return: for each image its tokens/emotions as pandas Dataframes
"""
df = pd.concat([affective_dataset.image_files, affective_dataset.tokens, affective_dataset.emotions], axis=1)
tokens_grouped = df.groupby('image_files')['tokens_encoded'].apply(list).reset_index(name='tokens_encoded')
emotion_grouped = df.groupby('image_files')['emotion_label'].apply(list).reset_index(name='emotion')
assert all(tokens_grouped['image_files'] == emotion_grouped['image_files'])
return tokens_grouped['image_files'], tokens_grouped, emotion_grouped
def default_grounding_dataset_from_affective_loader(loader, img2emo_clf=None, device=None, n_workers=None):
"""
Convenience function. Given a loader carrying an affective dataset, make a new loader only w.r.t.
unique images of the dataset, & optionally add to each image the emotion predicted by the img2emo_clf.
The new loader can be used to sample utterances over the unique images.
:param loader:
:param img2emo_clf:
:param device:
:return:
"""
affective_dataset = loader.dataset
img_files, tokens, emotions = group_annotations_per_image(affective_dataset)
img_trans = affective_dataset.img_transform
batch_size = loader.batch_size
if n_workers is None:
n_workers = loader.num_workers
dummy = pd.Series(np.ones(len(img_files), dtype=int) * -1)
# possibly predict grounding emotions
if img2emo_clf is not None:
temp_dataset = ImageClassificationDataset(image_files=img_files,
img_transform=img_trans)
img_dataloader = DataLoader(temp_dataset, batch_size, num_workers=n_workers)
emo_pred_distribution = image_to_emotion(img2emo_clf, img_dataloader, device)
grounding_emo = pd.Series(emo_pred_distribution.argmax(-1).tolist()) # use maximizer of emotions.
else:
grounding_emo = dummy
new_dataset = AffectiveCaptionDataset(img_files, tokens=dummy, emotions=grounding_emo,
img_transform=img_trans)
new_loader = DataLoader(dataset=new_dataset, batch_size=batch_size, num_workers=n_workers)
return new_loader
def custom_grounding_dataset_similar_to_affective_loader(grounding_data_csv, loader, n_workers=None):
"""
Convenience function. Given a csv indicating (grounding) images on the hard-drive and a loader carrying an affective
dataset, make a new loader with the csv images using the same configuration (e.g., img_transform) as the loader.
:param grounding_data_csv: (csv filename)
- has to have one column named "image_file" that corresponds to the file-names of the images.
- (optionally) can have also a "grounding_emotion" column with values like "contentment"
:param loader:
:return:
"""
df = pd.read_csv(grounding_data_csv)
image_files = df['image_file']
dummy = pd.Series(np.ones(len(image_files), dtype=int) * -1)
if 'grounding_emotion' in df.columns:
emotions = df.grounding_emotion.apply(emotion_to_int)
else:
emotions = dummy
standard_dset = loader.dataset
custom_dataset = AffectiveCaptionDataset(image_files, dummy, emotions=emotions,
n_emotions=standard_dset.n_emotions,
img_transform=standard_dset.img_transform,
one_hot_emo=standard_dset.one_hot_emo)
if n_workers is None:
n_workers = loader.num_workers
custom_data_loader = torch.utils.data.DataLoader(dataset=custom_dataset,
batch_size=min(loader.batch_size, len(custom_dataset)),
num_workers=n_workers)
return custom_data_loader
|
python
|
"""
Last edited: November 12 2020
|br| @author: FINE Developer Team (FZJ IEK-3)
"""
from FINE.component import Component, ComponentModel
from FINE import utils
from tsam.timeseriesaggregation import TimeSeriesAggregation
import pandas as pd
import numpy as np
import pyomo.environ as pyomo
import pyomo.opt as opt
import time
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("always", category=UserWarning)
class EnergySystemModel:
"""
EnergySystemModel class
The functionality provided by the EnergySystemModel class is fourfold:\n
* With it, the **basic structure** (spatial and temporal resolution, considered commodities) of
the investigated energy system is defined.
* It serves as a **container for all components** investigated in the energy system model. These components,
namely sources and sinks, conversion options, storage options, and transmission options
(in the core module), can be added to an EnergySystemModel instance.
* It provides the core functionality of **modeling and optimizing the energy system** based on the specified
structure and components on the one hand and of specified simulation parameters on the other hand.
* It **stores optimization results** which can then be post-processed with other modules.
The parameters which are stored in an instance of the class refer to:\n
* the modeled spatial representation of the energy system (**locations, lengthUnit**)
* the modeled temporal representation of the energy system (**totalTimeSteps, hoursPerTimeStep,
years, periods, periodsOrder, periodsOccurrences, timeStepsPerPeriod, interPeriodTimeSteps,
isTimeSeriesDataClustered, typicalPeriods, tsaInstance, timeUnit**)
* the considered commodities in the energy system (**commodities, commodityUnitsDict**)
* the considered components in the energy system (**componentNames, componentModelingDict, costUnit**)
* optimization related parameters (**pyM, solverSpecs**)\n
The parameters are first set when a class instance is initiated. The parameters which are related to the
components (e.g. componentNames) are complemented by adding the components to the class instance.
Instances of this class provide functions for\n
* adding components and their respective modeling classes (**add**)
* clustering the time series data of all added components using the time series aggregation package tsam, cf.
https://github.com/FZJ-IEK3-VSA/tsam (**cluster**)
* optimizing the specified energy system (**optimize**), for which a pyomo concrete model instance is built
and filled with \n
(0) basic time sets, \n
(1) sets, variables and constraints contributed by the component modeling classes, \n
(2) basic, component overreaching constraints, and \n
(3) an objective function. \n
The pyomo instance is then optimized by a specified solver. The optimization results are processed once
available.
* getting components and their attributes (**getComponent, getCompAttr, getOptimizationSummary**)
Last edited: November 12 2020
|br| @author: FINE Developer Team (FZJ IEK-3)
"""
def __init__(self,
locations,
commodities,
commodityUnitsDict,
numberOfTimeSteps=8760,
hoursPerTimeStep=1,
costUnit='1e9 Euro',
lengthUnit='km',
verboseLogLevel=0,
balanceLimit=None,
lowerBound=False):
"""
Constructor for creating an EnergySystemModel class instance
**Required arguments:**
:param locations: locations considered in the energy system
:type locations: set of strings
:param commodities: commodities considered in the energy system
:type commodities: set of strings
:param commodityUnitsDict: dictionary which assigns each commodity a quantitative unit per time
(e.g. GW_el, GW_H2, Mio.t_CO2/h). The dictionary is used for results output.
Note for advanced users: the scale of these units can influence the numerical stability of the
optimization solver, cf. http://files.gurobi.com/Numerics.pdf where a reasonable range of model
coefficients is suggested.
:type commodityUnitsDict: dictionary of strings
**Default arguments:**
:param numberOfTimeSteps: number of time steps considered when modeling the energy system (for each
time step, or each representative time step, variables and constraints are constituted). Together
with the hoursPerTimeStep, the total number of hours considered can be derived. The total
number of hours is again used for scaling the arising costs to the arising total annual costs (TAC)
which are minimized during optimization.
|br| * the default value is 8760.
:type totalNumberOfHours: strictly positive integer
:param hoursPerTimeStep: hours per time step
|br| * the default value is 1
:type hoursPerTimeStep: strictly positive float
:param costUnit: cost unit of all cost related values in the energy system. This argument sets the unit of
all cost parameters which are given as an input to the EnergySystemModel instance (e.g. for the
invest per capacity or the cost per operation).
Note for advanced users: the scale of this unit can influence the numerical stability of the
optimization solver, cf. http://files.gurobi.com/Numerics.pdf where a reasonable range of model
coefficients is suggested.
|br| * the default value is '10^9 Euro' (billion euros), which can be a suitable scale for national
energy systems.
:type costUnit: string
:param lengthUnit: length unit for all length-related values in the energy system.
Note for advanced users: the scale of this unit can influence the numerical stability of the
optimization solver, cf. http://files.gurobi.com/Numerics.pdf where a reasonable range of model
coefficients is suggested.
|br| * the default value is 'km' (kilometers).
:type lengthUnit: string
:param verboseLogLevel: defines how verbose the console logging is:\n
- 0: general model logging, warnings and optimization solver logging are displayed.
- 1: warnings are displayed.
- 2: no general model logging or warnings are displayed, the optimization solver logging is set to a
minimum.\n
Note: if required, the optimization solver logging can be separately enabled in the optimizationSpecs
of the optimize function.
|br| * the default value is 0
:type verboseLogLevel: integer (0, 1 or 2)
:param balanceLimit: defines the balanceLimit constraint (various different balanceLimitIDs possible)
for specific regions or the whole model. The balancelimitID can be assigned to various components
of e.g. SourceSinkModel or TransmissionModel to limit the balance of production, consumption and im/export.
If the balanceLimit is passed as pd.Series it will apply to the overall model, if it is passed
as pd.Dataframe each column will apply to one region of the multi-node model. In the latter case,
the number and names of the columns should match the regions/region names in the model.
Each row contains an individual balanceLimitID as index and the corresponding values for the model
(pd.Series) or regions (pd.Dataframe). Values are always given in the unit of the esM commodities unit.
Note: If bounds for sinks shall be specified (e.g. min. export, max. sink volume), values must be
defined as negative.
Example: pd.DataFrame(columns=["Region1"], index=["electricity"], data=[1000])
|br| * the default value is None
:type balanceLimit: pd.DataFrame or pd.Series
:param lowerBound: defines whether a lowerBound or an upperBound is considered in the balanceLimitConstraint.
By default an upperBound is considered. However, multiple cases can be considered:
1) Sources:
a) LowerBound=False: UpperBound for commodity from SourceComponent (Define positive value in
balanceLimit). Example: Limit CO2-Emission
b) LowerBound=True: LowerBound for commodity from SourceComponent (Define positive value in
balanceLimit). Example: Require minimum production from renewables.
2) Sinks:
a) LowerBound=False: UpperBound in a mathematical sense for commodity from SinkComponent
(Logically minimum limit for negative values, define negative value in balanceLimit).
Example: Minimum export/consumption of hydrogen.
b) LowerBound=True: LowerBound in a mathematical sense for commodity from SourceComponent
(Logically maximum limit for negative values, define negative value in balanceLimit).
Example: Define upper limit for Carbon Capture & Storage.
|br| * the default value is False
:type lowerBound: bool
"""
# Check correctness of inputs
utils.checkEnergySystemModelInput(locations, commodities, commodityUnitsDict, numberOfTimeSteps,
hoursPerTimeStep, costUnit, lengthUnit, balanceLimit)
################################################################################################################
# Spatial resolution parameters #
################################################################################################################
# The locations (set of string) name the considered locations in an energy system model instance. The parameter
# is used throughout the build of the energy system model to validate inputs and declare relevant sets,
# variables and constraints.
# The length unit refers to the measure of length referred throughout the model.
# The balanceLimit can be used to limit certain balanceLimitIDs defined in the components.
self.locations, self.lengthUnit = locations, lengthUnit
self.numberOfTimeSteps = numberOfTimeSteps
self.balanceLimit = balanceLimit
self.lowerBound = lowerBound
################################################################################################################
# Time series parameters #
################################################################################################################
# The totalTimeSteps parameter (list, ranging from 0 to the total numberOfTimeSteps-1) refers to the total
# number of time steps considered when modeling the specified energy system. The parameter is used for
# validating time series data input and for setting other time series parameters when modeling a full temporal
# resolution. The hoursPerTimeStep parameter (float > 0) refers to the temporal length of a time step in the
# totalTimeSteps. From the numberOfTimeSteps and the hoursPerTimeStep the numberOfYears parameter is computed.
self.totalTimeSteps, self.hoursPerTimeStep = list(range(numberOfTimeSteps)), hoursPerTimeStep
self.numberOfYears = numberOfTimeSteps * hoursPerTimeStep / 8760.0
# The periods parameter (list, [0] when considering a full temporal resolution, range of [0, ...,
# totalNumberOfTimeSteps/numberOfTimeStepsPerPeriod] when applying time series aggregation) represents
# the periods considered when modeling the energy system. Only one period exists when considering the full
# temporal resolution. When applying time series aggregation, the full time series are broken down into
# periods to which a typical period is assigned to.
# These periods have an order which is stored in the periodsOrder parameter (list, [0] when considering a full
# temporal resolution, [typicalPeriod(0), ... ,
# typicalPeriod(totalNumberOfTimeSteps/numberOfTimeStepsPerPeriod-1)] when applying time series aggregation).
# The occurrences of these periods are stored in the periodsOccurrences parameter (list, [1] when considering a
# full temporal resolution, [occurrences(0), ..., occurrences(numberOfTypicalPeriods-1)] when applying time
# series aggregation).
self.periods, self.periodsOrder, self.periodOccurrences = [0], [0], [1]
self.timeStepsPerPeriod = list(range(numberOfTimeSteps))
self.interPeriodTimeSteps = list(range(int(len(self.totalTimeSteps) / len(self.timeStepsPerPeriod)) + 1))
# The isTimeSeriesDataClustered parameter is used to check data consistency.
# It is set to True if the class' cluster function is called. It is set to False if a new component is added.
# If the cluster function is called, the typicalPeriods parameter is set from None to
# [0, ..., numberOfTypicalPeriods-1] and, if specified, the resulting TimeSeriesAggregation instance is stored
# in the tsaInstance parameter (default None).
# The time unit refers to time measure referred throughout the model. Currently, it has to be an hour 'h'.
self.isTimeSeriesDataClustered, self.typicalPeriods, self.tsaInstance = False, None, None
self.timeUnit = 'h'
################################################################################################################
# Commodity specific parameters #
################################################################################################################
# The commodities parameter is a set of strings which describes what commodities are considered in the energy
# system, and hence, which commodity balances need to be considered in the energy system model and its
# optimization.
# The commodityUnitsDict parameter is a dictionary which assigns each considered commodity (string) a
# unit (string) which can be used by results output functions.
self.commodities = commodities
self.commodityUnitsDict = commodityUnitsDict
################################################################################################################
# Component specific parameters #
################################################################################################################
# The componentNames parameter is a set of strings in which all in the EnergySystemModel instance considered
# components are stored. It is used to check that all components have unique indices.
# The componentModelingDict is a dictionary (modelingClass name: modelingClass instance) in which the in the
# energy system considered modeling classes are stored (in which again the components modeled with the
# modelingClass as well as the equations to model them with are stored).
# The costUnit parameter (string) is the parameter in which all cost input parameter have to be specified.
self.componentNames = {}
self.componentModelingDict = {}
self.costUnit = costUnit
################################################################################################################
# Optimization parameters #
################################################################################################################
# The pyM parameter is None when the EnergySystemModel is initialized. After calling the optimize function,
# the pyM parameter stores a Concrete Pyomo Model instance which contains parameters, sets, variables,
# constraints and objective required for the optimization set up and solving.
# The solverSpecs parameter is a dictionary (string: param) which stores different parameters that are used
# for solving the optimization problem. The parameters are: solver (string, solver which is used to solve
# the optimization problem), optimizationSpecs (string, representing **kwargs for the solver), hasTSA (boolean,
# indicating if time series aggregation is used for the optimization), buildtime (positive float, time needed
# to declare the optimization problem in seconds), solvetime (positive float, time needed to solve the
# optimization problem in seconds), runtime (positive float, runtime of the optimization run in seconds),
# timeLimit (positive float or None, if specified, indicates the maximum allowed runtime of the solver),
# threads (positive int, number of threads used for optimization, can depend on solver), logFileName
# (string, name of logfile).
# The objectiveValue parameter is None when the EnergySystemModel is initialized. After calling the
# optimize function, the objective value (i.e. TAC of the analyzed energy system) is stored in the
# objectiveValue parameter for easier access.
self.pyM = None
self.solverSpecs = {'solver': '', 'optimizationSpecs': '', 'hasTSA': False, 'buildtime': 0, 'solvetime': 0,
'runtime': 0, 'timeLimit': None, 'threads': 0, 'logFileName': ''}
self.objectiveValue = None
################################################################################################################
# General model parameters #
################################################################################################################
# The verbose parameter defines how verbose the console logging is: 0: general model logging, warnings
# and optimization solver logging are displayed, 1: warnings are displayed, 2: no general model logging or
# warnings are displayed, the optimization solver logging is set to a minimum.
# The optimization solver logging can be separately enabled in the optimizationSpecs of the optimize function.
self.verbose = verboseLogLevel
def add(self, component):
"""
Function for adding a component and, if required, its respective modeling class to the EnergySystemModel
instance. The added component has to inherit from the FINE class Component.
:param component: the component to be added
:type component: An object which inherits from the FINE Component class
"""
if not issubclass(type(component), Component):
raise TypeError('The added component has to inherit from the FINE class Component.')
if not issubclass(component.modelingClass, ComponentModel):
print(component.name, component.modelingClass, ComponentModel)
raise TypeError('The added component has to inherit from the FINE class ComponentModel.')
component.addToEnergySystemModel(self)
def removeComponent(self, componentName, track=False):
"""
Function which removes a component from the energy system.
:param componentName: name of the component that should be removed
:type componentName: string
:param track: specifies if the removed components should be tracked or not
|br| * the default value is False
:type track: boolean
:returns: dictionary with the removed componentName and component instance if track is set to True else None.
:rtype: dict or None
"""
# Test if component exists
if componentName not in self.componentNames.keys():
raise ValueError('The component ' + componentName + ' cannot be found in the energy system model.\n' +
'The components considered in the model are: ' + str(self.componentNames.keys()))
modelingClass = self.componentNames[componentName]
removedComp = dict()
# If track: Return a dictionary including the name of the removed component and the component instance
if track:
removedComp = dict({componentName : self.componentModelingDict[modelingClass].componentsDict.pop(componentName)})
# Remove component from the componentNames dict:
del self.componentNames[componentName]
# Test if all components of one modelingClass are removed. If so, remove modelingClass:
if not self.componentModelingDict[modelingClass].componentsDict: # False if dict is empty
del self.componentModelingDict[modelingClass]
return removedComp
else:
# Remove component from the componentNames dict:
del self.componentNames[componentName]
# Remove component from the componentModelingDict:
del self.componentModelingDict[modelingClass].componentsDict[componentName]
# Test if all components of one modelingClass are removed. If so, remove modelingClass:
if not self.componentModelingDict[modelingClass].componentsDict: # False if dict is empty
del self.componentModelingDict[modelingClass]
return None
def getComponent(self, componentName):
"""
Function which returns a component of the energy system.
:param componentName: name of the component that should be returned
:type componentName: string
:returns: the component which has the name componentName
:rtype: Component
"""
if componentName not in self.componentNames.keys():
raise ValueError('The component ' + componentName + ' cannot be found in the energy system model.\n' +
'The components considered in the model are: ' + str(self.componentNames.keys()))
modelingClass = self.componentNames[componentName]
return self.componentModelingDict[modelingClass].componentsDict[componentName]
def getComponentAttribute(self, componentName, attributeName):
"""
Function which returns an attribute of a component considered in the energy system.
:param componentName: name of the component from which the attribute should be obtained
:type componentName: string
:param attributeName: name of the attribute that should be returned
:type attributeName: string
:returns: the attribute specified by the attributeName of the component with the name componentName
:rtype: depends on the specified attribute
"""
return getattr(self.getComponent(componentName), attributeName)
def getOptimizationSummary(self, modelingClass, outputLevel=0):
"""
Function which returns the optimization summary (design variables, aggregated operation variables,
objective contributions) of a modeling class.
:param modelingClass: name of the modeling class from which the optimization summary should be obtained
:type modelingClass: string
:param outputLevel: states the level of detail of the output summary: \n
- 0: full optimization summary is returned \n
- 1: full optimization summary is returned but rows in which all values are NaN (not a number) are dropped\n
- 2: full optimization summary is returned but rows in which all values are NaN or 0 are dropped \n
|br| * the default value is 0
:type outputLevel: integer (0, 1 or 2)
:returns: the optimization summary of the requested modeling class
:rtype: pandas DataFrame
"""
if outputLevel == 0:
return self.componentModelingDict[modelingClass].optSummary
elif outputLevel == 1:
return self.componentModelingDict[modelingClass].optSummary.dropna(how='all')
else:
if outputLevel != 2 and self.verbose < 2:
warnings.warn('Invalid input. An outputLevel parameter of 2 is assumed.')
df = self.componentModelingDict[modelingClass].optSummary.dropna(how='all')
return df.loc[((df != 0) & (~df.isnull())).any(axis=1)]
def cluster(self,
numberOfTypicalPeriods=7,
numberOfTimeStepsPerPeriod=24,
segmentation=False,
numberOfSegmentsPerPeriod=24,
clusterMethod='hierarchical',
sortValues=True,
storeTSAinstance=False,
**kwargs):
"""
Cluster the time series data of all components considered in the EnergySystemModel instance and then
stores the clustered data in the respective components. For this, the time series data is broken down
into an ordered sequence of periods (e.g. 365 days) and to each period a typical period (e.g. 7 typical
days with 24 hours) is assigned. Moreover, the time steps within the periods can further be clustered to bigger
time steps with an irregular duration using the segmentation option.
For the clustering itself, the tsam package is used (cf. https://github.com/FZJ-IEK3-VSA/tsam). Additional
keyword arguments for the TimeSeriesAggregation instance can be added (facilitated by kwargs). As an example: it
might be useful to add extreme periods to the clustered typical periods.
Note: The segmentation option can be freely combined with all subclasses. However, an irregular time step length
is not meaningful for the minimumDownTime and minimumUpTime in the conversionDynamic module, because the time
would be different for each segment. The same holds true for the DSM module.
**Default arguments:**
:param numberOfTypicalPeriods: states the number of typical periods into which the time series data
should be clustered. The number of time steps per period must be an integer multiple of the total
number of considered time steps in the energy system.
Note: Please refer to the tsam package documentation of the parameter noTypicalPeriods for more
information.
|br| * the default value is 7
:type numberOfTypicalPeriods: strictly positive integer
:param numberOfTimeStepsPerPeriod: states the number of time steps per period
|br| * the default value is 24
:type numberOfTimeStepsPerPeriod: strictly positive integer
:param segmentation: states whether the typical periods should be further segmented to fewer time steps
|br| * the default value is False
:type segmentation: boolean
:param numberOfSegmentsPerPeriod: states the number of segments per period
|br| * the default value is 24
:type numberOfSegmentsPerPeriod: strictly positive integer
:param clusterMethod: states the method which is used in the tsam package for clustering the time series
data. Options are for example 'averaging','k_means','exact k_medoid' or 'hierarchical'.
Note: Please refer to the tsam package documentation of the parameter clusterMethod for more information.
|br| * the default value is 'hierarchical'
:type clusterMethod: string
:param sortValues: states if the algorithm in the tsam package should use
(a) the sorted duration curves (-> True) or
(b) the original profiles (-> False)
of the time series data within a period for clustering.
Note: Please refer to the tsam package documentation of the parameter sortValues for more information.
|br| * the default value is True
:type sortValues: boolean
:param storeTSAinstance: states if the TimeSeriesAggregation instance created during clustering should be
stored in the EnergySystemModel instance.
|br| * the default value is False
:type storeTSAinstance: boolean
Last edited: November 12 2020
|br| @author: FINE Developer Team (FZJ IEK-3)
"""
# Check input arguments which have to fit the temporal representation of the energy system
utils.checkClusteringInput(numberOfTypicalPeriods, numberOfTimeStepsPerPeriod, len(self.totalTimeSteps))
if segmentation:
if numberOfSegmentsPerPeriod > numberOfTimeStepsPerPeriod:
if self.verbose < 2:
warnings.warn('The chosen number of segments per period exceeds the number of time steps per'
'period. The number of segments per period is set to the number of time steps per '
'period.')
numberOfSegmentsPerPeriod = numberOfTimeStepsPerPeriod
hoursPerPeriod = int(numberOfTimeStepsPerPeriod*self.hoursPerTimeStep)
timeStart = time.time()
if segmentation:
utils.output('\nClustering time series data with ' + str(numberOfTypicalPeriods) + ' typical periods and '
+ str(numberOfTimeStepsPerPeriod) + ' time steps per period \nfurther clustered to '
+ str(numberOfSegmentsPerPeriod) + ' segments per period...', self.verbose, 0)
else:
utils.output('\nClustering time series data with ' + str(numberOfTypicalPeriods) + ' typical periods and '
+ str(numberOfTimeStepsPerPeriod) + ' time steps per period...', self.verbose, 0)
# Format data to fit the input requirements of the tsam package:
# (a) append the time series data from all components stored in all initialized modeling classes to a pandas
# DataFrame with unique column names
# (b) thereby collect the weights which should be considered for each time series as well in a dictionary
timeSeriesData, weightDict = [], {}
for mdlName, mdl in self.componentModelingDict.items():
for compName, comp in mdl.componentsDict.items():
compTimeSeriesData, compWeightDict = comp.getDataForTimeSeriesAggregation()
if compTimeSeriesData is not None:
timeSeriesData.append(compTimeSeriesData), weightDict.update(compWeightDict)
timeSeriesData = pd.concat(timeSeriesData, axis=1)
# Note: Sets index for the time series data. The index is of no further relevance in the energy system model.
timeSeriesData.index = pd.date_range('2050-01-01 00:30:00', periods=len(self.totalTimeSteps),
freq=(str(self.hoursPerTimeStep) + 'H'), tz='Europe/Berlin')
# Cluster data with tsam package (the reindex call is here for reproducibility of TimeSeriesAggregation
# call) depending on whether segmentation is activated or not
timeSeriesData = timeSeriesData.reindex(sorted(timeSeriesData.columns), axis=1)
if segmentation:
clusterClass = TimeSeriesAggregation(timeSeries=timeSeriesData, noTypicalPeriods=numberOfTypicalPeriods,
segmentation=segmentation, noSegments=numberOfSegmentsPerPeriod,
hoursPerPeriod=hoursPerPeriod,
clusterMethod=clusterMethod, sortValues=sortValues,
weightDict=weightDict, **kwargs)
# Convert the clustered data to a pandas DataFrame with the first index as typical period number and the
# second index as segment number per typical period.
data = pd.DataFrame.from_dict(clusterClass.clusterPeriodDict).reset_index(level=2, drop=True)
# Get the length of each segment in each typical period with the first index as typical period number and
# the second index as segment number per typical period.
timeStepsPerSegment = pd.DataFrame.from_dict(clusterClass.segmentDurationDict)['Segment Duration']
else:
clusterClass = TimeSeriesAggregation(timeSeries=timeSeriesData, noTypicalPeriods=numberOfTypicalPeriods,
hoursPerPeriod=hoursPerPeriod,
clusterMethod=clusterMethod, sortValues=sortValues,
weightDict=weightDict, **kwargs)
# Convert the clustered data to a pandas DataFrame with the first index as typical period number and the
# second index as time step number per typical period.
data = pd.DataFrame.from_dict(clusterClass.clusterPeriodDict)
# Store the respective clustered time series data in the associated components
for mdlName, mdl in self.componentModelingDict.items():
for compName, comp in mdl.componentsDict.items():
comp.setAggregatedTimeSeriesData(data)
# Store time series aggregation parameters in class instance
if storeTSAinstance:
self.tsaInstance = clusterClass
self.typicalPeriods = clusterClass.clusterPeriodIdx
self.timeStepsPerPeriod = list(range(numberOfTimeStepsPerPeriod))
self.segmentation = segmentation
if segmentation:
self.segmentsPerPeriod = list(range(numberOfSegmentsPerPeriod))
self.timeStepsPerSegment = timeStepsPerSegment
self.hoursPerSegment = self.hoursPerTimeStep * self.timeStepsPerSegment
# Define start time hour of each segment in each typical period
segmentStartTime = self.hoursPerSegment.groupby(level=0).cumsum()
segmentStartTime.index = segmentStartTime.index.set_levels(segmentStartTime.index.levels[1] + 1, level=1)
lvl0, lvl1 = segmentStartTime.index.levels
segmentStartTime = segmentStartTime.reindex(pd.MultiIndex.from_product([lvl0, [0, *lvl1]]))
segmentStartTime[segmentStartTime.index.get_level_values(1) == 0] = 0
self.segmentStartTime = segmentStartTime
self.periods = list(range(int(len(self.totalTimeSteps) / len(self.timeStepsPerPeriod))))
self.interPeriodTimeSteps = list(range(int(len(self.totalTimeSteps) / len(self.timeStepsPerPeriod)) + 1))
self.periodsOrder = clusterClass.clusterOrder
self.periodOccurrences = [(self.periodsOrder == tp).sum() for tp in self.typicalPeriods]
# Set cluster flag to true (used to ensure consistently clustered time series data)
self.isTimeSeriesDataClustered = True
utils.output("\t\t(%.4f" % (time.time() - timeStart) + " sec)\n", self.verbose, 0)
def declareTimeSets(self, pyM, timeSeriesAggregation, segmentation):
"""
Set and initialize basic time parameters and sets.
:param pyM: a pyomo ConcreteModel instance which contains parameters, sets, variables,
constraints and objective required for the optimization set up and solving.
:type pyM: pyomo ConcreteModel
:param timeSeriesAggregation: states if the optimization of the energy system model should be done with
(a) the full time series (False) or
(b) clustered time series data (True).
|br| * the default value is False
:type timeSeriesAggregation: boolean
:param segmentation: states if the optimization of the energy system model based on clustered time series data
should be done with
(a) aggregated typical periods with the original time step length (False) or
(b) aggregated typical periods with further segmented time steps (True).
|br| * the default value is False
:type segmentation: boolean
"""
# Store the information if aggregated time series data is considered for modeling the energy system in the pyomo
# model instance and set the time series which is again considered for modeling in all components accordingly
pyM.hasTSA = timeSeriesAggregation
pyM.hasSegmentation = segmentation
for mdl in self.componentModelingDict.values():
for comp in mdl.componentsDict.values():
comp.setTimeSeriesData(pyM.hasTSA)
# Set the time set and the inter time steps set. The time set is a set of tuples. A tuple consists of two
# entries, the first one indicates an index of a period and the second one indicates a time step inside that
# period. If time series aggregation is not considered, only one period (period 0) exists and the time steps
# range from 0 up until the specified number of total time steps - 1. Otherwise, the time set is initialized for
# each typical period (0 ... numberOfTypicalPeriods-1) and the number of time steps per period (0 ...
# numberOfTimeStepsPerPeriod-1).
# The inter time steps set is a set of tuples as well, which again consist of two values. The first value again
# indicates the period, however, the second one now refers to a point in time right before or after a time step
# (or between two time steps). Hence, the second value reaches values from (0 ... numberOfTimeStepsPerPeriod).
if not pyM.hasTSA:
# Reset timeStepsPerPeriod in case it was overwritten by the clustering function
self.timeStepsPerPeriod = self.totalTimeSteps
self.interPeriodTimeSteps = list(range(int(len(self.totalTimeSteps) /
len(self.timeStepsPerPeriod)) + 1))
self.periods = [0]
self.periodsOrder = [0]
self.periodOccurrences = [1]
# Define sets
def initTimeSet(pyM):
return ((p, t) for p in self.periods for t in self.timeStepsPerPeriod)
def initInterTimeStepsSet(pyM):
return ((p, t) for p in self.periods for t in range(len(self.timeStepsPerPeriod) + 1))
else:
if not pyM.hasSegmentation:
utils.output('Time series aggregation specifications:\n'
'Number of typical periods:' + str(len(self.typicalPeriods)) +
', number of time steps per period:' + str(len(self.timeStepsPerPeriod)) + '\n',
self.verbose, 0)
# Define sets
def initTimeSet(pyM):
return ((p, t) for p in self.typicalPeriods for t in self.timeStepsPerPeriod)
def initInterTimeStepsSet(pyM):
return ((p, t) for p in self.typicalPeriods for t in range(len(self.timeStepsPerPeriod) + 1))
else:
utils.output('Time series aggregation specifications:\n'
'Number of typical periods:' + str(len(self.typicalPeriods)) +
', number of time steps per period:' + str(len(self.timeStepsPerPeriod)) +
', number of segments per period:' + str(len(self.segmentsPerPeriod)) + '\n',
self.verbose, 0)
# Define sets
def initTimeSet(pyM):
return ((p, t) for p in self.typicalPeriods for t in self.segmentsPerPeriod)
def initInterTimeStepsSet(pyM):
return ((p, t) for p in self.typicalPeriods for t in range(len(self.segmentsPerPeriod) + 1))
# Initialize sets
pyM.timeSet = pyomo.Set(dimen=2, initialize=initTimeSet)
pyM.interTimeStepsSet = pyomo.Set(dimen=2, initialize=initInterTimeStepsSet)
def declareBalanceLimitConstraint(self, pyM, timeSeriesAggregation):
"""
Declare balance limit constraint.
Balance limit constraint can limit the exchange of commodities within the model or over the model region
boundaries. See the documentation of the parameters for further explanation. In general the following equation
applies:
E_source - E_sink + E_exchange,in - E_exchange,out <= E_lim (self.LowerBound=False)
E_source - E_sink + E_exchange,in - E_exchange,out >= E_lim (self.LowerBound=True)
:param pyM: a pyomo ConcreteModel instance which contains parameters, sets, variables,
constraints and objective required for the optimization set up and solving.
:type pyM: pyomo ConcreteModel
:param timeSeriesAggregation: states if the optimization of the energy system model should be done with
(a) the full time series (False) or
(b) clustered time series data (True).
|br| * the default value is False
:type timeSeriesAggregation: boolean
"""
balanceLimitDict = {}
# 2 differentiations (or 4 cases). 1st: Locational or not; 2nd: lowerBound or not (lower bound)
# DataFrame with locational input. Otherwise error is thrown in input check.
if type(self.balanceLimit) == pd.DataFrame:
for mdl_type, mdl in self.componentModelingDict.items():
if mdl_type=="SourceSinkModel" or mdl_type=="TransmissionModel":
for compName, comp in mdl.componentsDict.items():
if comp.balanceLimitID is not None:
[balanceLimitDict.setdefault((comp.balanceLimitID, loc), []).append(compName)
for loc in self.locations]
setattr(pyM, "balanceLimitDict", balanceLimitDict)
def balanceLimitConstraint(pyM, ID, loc):
# Check whether we want to consider an upper or lower bound.
if not self.lowerBound:
return sum(mdl.getBalanceLimitContribution(esM=self, pyM=pyM, ID=ID,
timeSeriesAggregation=timeSeriesAggregation, loc=loc)
for mdl_type, mdl in self.componentModelingDict.items() if (
mdl_type=="SourceSinkModel" or mdl_type=="TransmissionModel")
) <= self.balanceLimit.loc[ID, loc]
else:
return sum(mdl.getBalanceLimitContribution(esM=self, pyM=pyM, ID=ID,
timeSeriesAggregation=timeSeriesAggregation, loc=loc)
for mdl_type, mdl in self.componentModelingDict.items() if (
mdl_type=="SourceSinkModel" or mdl_type=="TransmissionModel")
) >= self.balanceLimit.loc[ID, loc]
# Series as input. Whole model is considered.
else:
for mdl_type, mdl in self.componentModelingDict.items():
if mdl_type=="SourceSinkModel":
for compName, comp in mdl.componentsDict.items():
if comp.balanceLimitID is not None:
balanceLimitDict.setdefault((comp.balanceLimitID), []).append(compName)
setattr(pyM, "balanceLimitDict", balanceLimitDict)
def balanceLimitConstraint(pyM, ID):
# Check wether we want to consider an upper or lower bound
if not self.lowerBound:
return sum(mdl.getBalanceLimitContribution(esM=self, pyM=pyM, ID=ID,
timeSeriesAggregation=timeSeriesAggregation)
for mdl_type, mdl in self.componentModelingDict.items() if (
mdl_type=="SourceSinkModel")) <= self.balanceLimit.loc[ID]
else:
return sum(mdl.getBalanceLimitContribution(esM=self, pyM=pyM, ID=ID,
timeSeriesAggregation=timeSeriesAggregation)
for mdl_type, mdl in self.componentModelingDict.items() if (
mdl_type == "SourceSinkModel")) >= self.balanceLimit.loc[ID]
pyM.balanceLimitConstraint = \
pyomo.Constraint(pyM.balanceLimitDict.keys(), rule=balanceLimitConstraint)
def declareSharedPotentialConstraints(self, pyM):
"""
Declare shared potential constraints, e.g. if a maximum potential of salt caverns has to be shared by
salt cavern storing methane and salt caverns storing hydrogen.
.. math::
\\underset{\\text{comp} \in \mathcal{C}^{ID}}{\sum} \\text{cap}^{comp}_{loc} / \\text{capMax}^{comp}_{loc} \leq 1
:param pyM: a pyomo ConcreteModel instance which contains parameters, sets, variables,
constraints and objective required for the optimization set up and solving.
:type pyM: pyomo ConcreteModel
"""
utils.output('Declaring shared potential constraint...', self.verbose, 0)
# Create shared potential dictionary (maps a shared potential ID and a location to components who share the
# potential)
potentialDict = {}
for mdl in self.componentModelingDict.values():
for compName, comp in mdl.componentsDict.items():
if comp.sharedPotentialID is not None:
[potentialDict.setdefault((comp.sharedPotentialID, loc), []).append(compName)
for loc in comp.locationalEligibility.index if comp.capacityMax[loc] != 0]
pyM.sharedPotentialDict = potentialDict
# Define and initialize constraints for each instance and location where components have to share an available
# potential. Sum up the relative contributions to the shared potential and ensure that the total share is
# <= 100%. For this, get the contributions to the shared potential for the corresponding ID and
# location from each modeling class.
def sharedPotentialConstraint(pyM, ID, loc):
return sum(mdl.getSharedPotentialContribution(pyM, ID, loc)
for mdl in self.componentModelingDict.values()) <= 1
pyM.ConstraintSharedPotentials = \
pyomo.Constraint(pyM.sharedPotentialDict.keys(), rule=sharedPotentialConstraint)
def declareComponentLinkedQuantityConstraints(self, pyM):
"""
Declare linked component quantity constraint, e.g. if an engine (E-Motor) is built also a storage (Battery)
and a vehicle body (e.g. BEV Car) needs to be built. Not the capacity of the components, but the number of
the components is linked.
:param pyM: a pyomo ConcreteModel instance which contains parameters, sets, variables,
constraints and objective required for the optimization set up and solving.
:type pyM: pyomo ConcreteModel
"""
utils.output('Declaring linked component quantity constraint...', self.verbose, 0)
compDict = {}
for mdl in self.componentModelingDict.values():
for compName, comp in mdl.componentsDict.items():
if comp.linkedQuantityID is not None:
[compDict.setdefault((comp.linkedQuantityID, loc), []).append(compName)
for loc in comp.locationalEligibility.index]
pyM.linkedQuantityDict = compDict
def linkedQuantityConstraint(pyM, ID, loc, compName1, compName2):
abbrvName1 = self.componentModelingDict[self.componentNames[compName1]].abbrvName
abbrvName2 = self.componentModelingDict[self.componentNames[compName2]].abbrvName
capVar1 = getattr(pyM, 'cap_' + abbrvName1)
capVar2 = getattr(pyM, 'cap_' + abbrvName2)
capPPU1 = self.componentModelingDict[self.componentNames[compName1]].componentsDict[compName1].capacityPerPlantUnit
capPPU2 = self.componentModelingDict[self.componentNames[compName2]].componentsDict[compName2].capacityPerPlantUnit
return capVar1[loc, compName1] / capPPU1 == capVar2[loc, compName2] / capPPU2
for (i,j) in pyM.linkedQuantityDict.keys():
linkedQuantityList = []
linkedQuantityList.append((i, j))
setattr(pyM, 'ConstraintLinkedQuantity_' + str(i) + '_' + str(j),\
pyomo.Constraint(\
linkedQuantityList,\
pyM.linkedQuantityDict[i, j],\
pyM.linkedQuantityDict[i, j],\
rule=linkedQuantityConstraint))
def declareCommodityBalanceConstraints(self, pyM):
"""
Declare commodity balance constraints (one balance constraint for each commodity, location and time step)
.. math::
\\underset{\\text{comp} \in \mathcal{C}^{comm}_{loc}}{\sum} \\text{C}^{comp,comm}_{loc,p,t} = 0
:param pyM: a pyomo ConcreteModel instance which contains parameters, sets, variables,
constraints and objective required for the optimization set up and solving.
:type pyM: pyomo ConcreteModel
"""
utils.output('Declaring commodity balances...', self.verbose, 0)
# Declare and initialize a set that states for which location and commodity the commodity balance constraints
# are non-trivial (i.e. not 0 == 0; trivial constraints raise errors in pyomo).
def initLocationCommoditySet(pyM):
return ((loc, commod) for loc in self.locations for commod in self.commodities
if any([mdl.hasOpVariablesForLocationCommodity(self, loc, commod)
for mdl in self.componentModelingDict.values()]))
pyM.locationCommoditySet = pyomo.Set(dimen=2, initialize=initLocationCommoditySet)
# Declare and initialize commodity balance constraints by checking for each location and commodity in the
# locationCommoditySet and for each period and time step within the period if the commodity source and sink
# terms add up to zero. For this, get the contribution to commodity balance from each modeling class.
def commodityBalanceConstraint(pyM, loc, commod, p, t):
return sum(mdl.getCommodityBalanceContribution(pyM, commod, loc, p, t)
for mdl in self.componentModelingDict.values()) == 0
pyM.commodityBalanceConstraint = pyomo.Constraint(pyM.locationCommoditySet, pyM.timeSet,
rule=commodityBalanceConstraint)
def declareObjective(self, pyM):
"""
Declare the objective function by obtaining the contributions to the objective function from all modeling
classes. Currently, the only objective function which can be selected is the sum of the total annual cost of all
components.
.. math::
z^* = \\min \\underset{comp \\in \\mathcal{C}}{\\sum} \\ \\underset{loc \\in \\mathcal{L}^{comp}}{\\sum}
\\left( TAC_{loc}^{comp,cap} + TAC_{loc}^{comp,bin} + TAC_{loc}^{comp,op} \\right)
Objective Function detailed:
.. math::
:nowrap:
\\begin{eqnarray*}
z^* = \\min & & \\underset{comp \\in \\mathcal{C}}{\\sum} \\ \\underset{loc \\in \\mathcal{L}^{comp}}{\\sum}
\\left[ \\text{F}^{comp,cap}_{loc} \\cdot \\left( \\frac{\\text{investPerCap}^{comp}_{loc}}{\\text{CCF}^{comp}_{loc}} \\right.
+ \\text{opexPerCap}^{comp}_{loc} \\right) \\cdot cap^{comp}_{loc} \\\\
& & + \\ \\text{F}^{comp,bin}_{loc} \\cdot \\left( \\frac{\\text{investIfBuilt}^{comp}_{loc}} {CCF^{comp}_{loc}}
+ \\text{opexIfBuilt}^{comp}_{loc} \\right) \\cdot bin^{comp}_{loc} \\\\
& & \\left. + \\left( \\underset{(p,t) \\in \\mathcal{P} \\times \\mathcal{T}}{\\sum} \\ \\underset{\\text{opType} \\in \\mathcal{O}^{comp}}{\\sum} \\text{factorPerOp}^{comp,opType}_{loc} \\cdot op^{comp,opType}_{loc,p,t} \\cdot \\frac{\\text{freq(p)}}{\\tau^{years}} \\right) \\right]
\\end{eqnarray*}
:param pyM: a pyomo ConcreteModel instance which contains parameters, sets, variables,
constraints and objective required for the optimization set up and solving.
:type pyM: pyomo ConcreteModel
"""
utils.output('Declaring objective function...', self.verbose, 0)
def objective(pyM):
TAC = sum(mdl.getObjectiveFunctionContribution(self, pyM) for mdl in self.componentModelingDict.values())
return TAC
pyM.Obj = pyomo.Objective(rule=objective)
def declareOptimizationProblem(self, timeSeriesAggregation=False, segmentation=False, relaxIsBuiltBinary=False):
"""
Declare the optimization problem belonging to the specified energy system for which a pyomo concrete model
instance is built and filled with
* basic time sets,
* sets, variables and constraints contributed by the component modeling classes,
* basic, component overreaching constraints, and
* an objective function.
**Default arguments:**
:param timeSeriesAggregation: states if the optimization of the energy system model should be done with
(a) the full time series (False) or
(b) clustered time series data (True).
|br| * the default value is False
:type timeSeriesAggregation: boolean
:param segmentation: states if the optimization of the energy system model based on clustered time series data
should be done with
(a) aggregated typical periods with the original time step length (False) or
(b) aggregated typical periods with further segmented time steps (True).
|br| * the default value is False
:type segmentation: boolean
:param relaxIsBuiltBinary: states if the optimization problem should be solved as a relaxed LP to get the lower
bound of the problem.
|br| * the default value is False
:type declaresOptimizationProblem: boolean
Last edited: March 26, 2020
|br| @author: FINE Developer Team (FZJ IEK-3)
"""
# Get starting time of the optimization to, later on, obtain the total run time of the optimize function call
timeStart = time.time()
# Check correctness of inputs
utils.checkDeclareOptimizationProblemInput(timeSeriesAggregation, self.isTimeSeriesDataClustered)
################################################################################################################
# Initialize mathematical model (ConcreteModel) instance #
################################################################################################################
# Initialize a pyomo ConcreteModel which will be used to store the mathematical formulation of the model.
# The ConcreteModel instance is stored in the EnergySystemModel instance, which makes it available for
# post-processing or debugging. A pyomo Suffix with the name dual is declared to make dual values associated
# to the model's constraints available after optimization.
self.pyM = pyomo.ConcreteModel()
pyM = self.pyM
pyM.dual = pyomo.Suffix(direction=pyomo.Suffix.IMPORT)
# Set time sets for the model instance
self.declareTimeSets(pyM, timeSeriesAggregation, segmentation)
################################################################################################################
# Declare component specific sets, variables and constraints #
################################################################################################################
for key, mdl in self.componentModelingDict.items():
_t = time.time()
utils.output('Declaring sets, variables and constraints for ' + key, self.verbose, 0)
utils.output('\tdeclaring sets... ', self.verbose, 0), mdl.declareSets(self, pyM)
utils.output('\tdeclaring variables... ', self.verbose, 0), mdl.declareVariables(self, pyM, relaxIsBuiltBinary)
utils.output('\tdeclaring constraints... ', self.verbose, 0), mdl.declareComponentConstraints(self, pyM)
utils.output('\t\t(%.4f' % (time.time() - _t) + ' sec)\n', self.verbose, 0)
################################################################################################################
# Declare cross-componential sets and constraints #
################################################################################################################
# Declare constraints for enforcing shared capacities
_t = time.time()
self.declareSharedPotentialConstraints(pyM)
utils.output('\t\t(%.4f' % (time.time() - _t) + ' sec)\n', self.verbose, 0)
# Declare constraints for linked quantities
_t = time.time()
self.declareComponentLinkedQuantityConstraints(pyM)
utils.output('\t\t(%.4f' % (time.time() - _t) + ' sec)\n', self.verbose, 0)
# Declare commodity balance constraints (one balance constraint for each commodity, location and time step)
_t = time.time()
self.declareCommodityBalanceConstraints(pyM)
utils.output('\t\t(%.4f' % (time.time() - _t) + ' sec)\n', self.verbose, 0)
# Declare constraint for balanceLimit
_t = time.time()
self.declareBalanceLimitConstraint(pyM, timeSeriesAggregation)
utils.output('\t\t(%.4f' % (time.time() - _t) + ' sec)\n', self.verbose, 0)
################################################################################################################
# Declare objective function #
################################################################################################################
# Declare objective function by obtaining the contributions to the objective function from all modeling classes
_t = time.time()
self.declareObjective(pyM)
utils.output('\t\t(%.4f' % (time.time() - _t) + ' sec)\n', self.verbose, 0)
# Store the build time of the optimize function call in the EnergySystemModel instance
self.solverSpecs['buildtime'] = time.time() - timeStart
def optimize(self,
declaresOptimizationProblem=True,
relaxIsBuiltBinary=False,
timeSeriesAggregation=False,
logFileName='',
threads=3,
solver='None',
timeLimit=None,
optimizationSpecs='',
warmstart=False):
"""
Optimize the specified energy system for which a pyomo ConcreteModel instance is built or called upon.
A pyomo instance is optimized with the specified inputs, and the optimization results are further
processed.
**Default arguments:**
:param declaresOptimizationProblem: states if the optimization problem should be declared (True) or not (False).
(a) If true, the declareOptimizationProblem function is called and a pyomo ConcreteModel instance is built.
(b) If false a previously declared pyomo ConcreteModel instance is used.
|br| * the default value is True
:type declaresOptimizationProblem: boolean
:param relaxIsBuiltBinary: states if the optimization problem should be solved as a relaxed LP to get the lower
bound of the problem.
|br| * the default value is False
:type declaresOptimizationProblem: boolean
:param timeSeriesAggregation: states if the optimization of the energy system model should be done with
(a) the full time series (False) or
(b) clustered time series data (True).
|br| * the default value is False
:type timeSeriesAggregation: boolean
:param segmentation: states if the optimization of the energy system model based on clustered time series data
should be done with
(a) aggregated typical periods with the original time step length (False) or
(b) aggregated typical periods with further segmented time steps (True).
|br| * the default value is False
:type segmentation: boolean
:param logFileName: logFileName is used for naming the log file of the optimization solver output
if gurobi is used as the optimization solver.
If the logFileName is given as an absolute path (e.g. logFileName = os.path.join(os.getcwd(),
'Results', 'logFileName.txt')) the log file will be stored in the specified directory. Otherwise,
it will be stored by default in the directory where the executing python script is called.
|br| * the default value is 'job'
:type logFileName: string
:param threads: number of computational threads used for solving the optimization (solver dependent
input) if gurobi is used as the solver. A value of 0 results in using all available threads. If
a value larger than the available number of threads are chosen, the value will reset to the maximum
number of threads.
|br| * the default value is 3
:type threads: positive integer
:param solver: specifies which solver should solve the optimization problem (which of course has to be
installed on the machine on which the model is run).
|br| * the default value is 'gurobi'
:type solver: string
:param timeLimit: if not specified as None, indicates the maximum solve time of the optimization problem
in seconds (solver dependent input). The use of this parameter is suggested when running models in
runtime restricted environments (such as clusters with job submission systems). If the runtime
limitation is triggered before an optimal solution is available, the best solution obtained up
until then (if available) is processed.
|br| * the default value is None
:type timeLimit: strictly positive integer or None
:param optimizationSpecs: specifies parameters for the optimization solver (see the respective solver
documentation for more information). Example: 'LogToConsole=1 OptimalityTol=1e-6'
|br| * the default value is an empty string ('')
:type timeLimit: string
:param warmstart: specifies if a warm start of the optimization should be considered
(not always supported by the solvers).
|br| * the default value is False
:type warmstart: boolean
Last edited: March 26, 2020
|br| @author: FINE Developer Team (FZJ IEK-3)
"""
if not timeSeriesAggregation:
self.segmentation = False
if declaresOptimizationProblem:
self.declareOptimizationProblem(timeSeriesAggregation=timeSeriesAggregation, segmentation=self.segmentation,
relaxIsBuiltBinary=relaxIsBuiltBinary)
else:
if self.pyM is None:
raise TypeError('The optimization problem is not declared yet. Set the argument declaresOptimization'
' problem to True or call the declareOptimizationProblem function first.')
# Get starting time of the optimization to, later on, obtain the total run time of the optimize function call
timeStart = time.time()
# Check correctness of inputs
utils.checkOptimizeInput(timeSeriesAggregation, self.isTimeSeriesDataClustered, logFileName, threads, solver,
timeLimit, optimizationSpecs, warmstart)
# Store keyword arguments in the EnergySystemModel instance
self.solverSpecs['logFileName'], self.solverSpecs['threads'] = logFileName, threads
self.solverSpecs['solver'], self.solverSpecs['timeLimit'] = solver, timeLimit
self.solverSpecs['optimizationSpecs'], self.solverSpecs['hasTSA'] = optimizationSpecs, timeSeriesAggregation
# Check which solvers are available and choose default solver if no solver is specified explicitely
# Order of possible solvers in solverList defines the priority of chosen default solver.
solverList = ['gurobi', 'coincbc', 'glpk']
if solver != 'None':
try:
opt.SolverFactory(solver).available()
except:
solver = 'None'
if solver == 'None':
for nSolver in solverList:
if solver == 'None':
try:
if opt.SolverFactory(nSolver).available():
solver = nSolver
utils.output('Either solver not selected or specified solver not available.' + str(nSolver) + ' is set as solver.', self.verbose, 0)
except:
pass
if solver == 'None':
raise TypeError('At least one solver must be installed.'
' Have a look at the FINE documentation to see how to install possible solvers.'
' https://vsa-fine.readthedocs.io/en/latest/')
################################################################################################################
# Solve the specified optimization problem #
################################################################################################################
# Set which solver should solve the specified optimization problem
optimizer = opt.SolverFactory(solver)
# Set, if specified, the time limit
if self.solverSpecs['timeLimit'] is not None and solver == 'gurobi':
optimizer.options['timelimit'] = timeLimit
# Set the specified solver options
if 'LogToConsole=' not in optimizationSpecs and solver == "gurobi":
if self.verbose == 2:
optimizationSpecs += ' LogToConsole=0'
# Solve optimization problem. The optimization solve time is stored and the solver information is printed.
if solver=='gurobi':
optimizer.set_options('Threads=' + str(threads) + ' logfile=' + logFileName + ' ' + optimizationSpecs)
solver_info = optimizer.solve(self.pyM, warmstart=warmstart, tee=True)
elif solver=="glpk":
optimizer.set_options(optimizationSpecs)
solver_info = optimizer.solve(self.pyM, tee=True)
else:
solver_info = optimizer.solve(self.pyM, tee=True)
self.solverSpecs['solvetime'] = time.time() - timeStart
utils.output(solver_info.solver(), self.verbose, 0), utils.output(solver_info.problem(), self.verbose, 0)
utils.output('Solve time: ' + str(self.solverSpecs['solvetime']) + ' sec.', self.verbose, 0)
################################################################################################################
# Post-process optimization output #
################################################################################################################
_t = time.time()
# Post-process the optimization output by differentiating between different solver statuses and termination
# conditions. First, check if the status and termination_condition of the optimization are acceptable.
# If not, no output is generated.
# TODO check if this is still compatible with the latest pyomo version
status, termCondition = solver_info.solver.status, solver_info.solver.termination_condition
self.solverSpecs['status'] = str(status)
self.solverSpecs['terminationCondition'] = str(termCondition)
if status == opt.SolverStatus.error or status == opt.SolverStatus.aborted or status == opt.SolverStatus.unknown:
utils.output('Solver status: ' + str(status) + ', termination condition: ' + str(termCondition) +
'. No output is generated.', self.verbose, 0)
elif solver_info.solver.termination_condition == opt.TerminationCondition.infeasibleOrUnbounded or \
solver_info.solver.termination_condition == opt.TerminationCondition.infeasible or \
solver_info.solver.termination_condition == opt.TerminationCondition.unbounded:
utils.output('Optimization problem is ' + str(solver_info.solver.termination_condition) +
'. No output is generated.', self.verbose, 0)
else:
# If the solver status is not okay (hence either has a warning, an error, was aborted or has an unknown
# status), show a warning message.
if not solver_info.solver.termination_condition == opt.TerminationCondition.optimal and self.verbose < 2:
warnings.warn('Output is generated for a non-optimal solution.')
utils.output("\nProcessing optimization output...", self.verbose, 0)
# Declare component specific sets, variables and constraints
w = str(len(max(self.componentModelingDict.keys()))+6)
for key, mdl in self.componentModelingDict.items():
__t = time.time()
mdl.setOptimalValues(self, self.pyM)
outputString = ('for {:' + w + '}').format(key + ' ...') + "(%.4f" % (time.time() - __t) + "sec)"
utils.output(outputString, self.verbose, 0)
# Store the objective value in the EnergySystemModel instance.
self.objectiveValue = self.pyM.Obj()
utils.output('\t\t(%.4f' % (time.time() - _t) + ' sec)\n', self.verbose, 0)
# Store the runtime of the optimize function call in the EnergySystemModel instance
self.solverSpecs['runtime'] = self.solverSpecs['buildtime'] + time.time() - timeStart
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.