content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
###
#
# Lenovo Redfish examples - Get metric inventory
#
# Copyright Notice:
#
# Copyright 2019 Lenovo Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
###
import sys
import redfish
import json
import lenovo_utils as utils
def get_metric_definition_report(ip, login_account, login_password):
"""Get metric inventory
:params ip: BMC IP address
:type ip: string
:params login_account: BMC user name
:type login_account: string
:params login_password: BMC user password
:type login_password: string
:returns: returns metric inventory when succeeded or error message when failed
"""
result = {}
try:
# Connect using the BMC address, account name, and password
# Create a REDFISH object
login_host = "https://" + ip
REDFISH_OBJ = redfish.redfish_client(base_url=login_host, username=login_account,
password=login_password, default_prefix='/redfish/v1', cafile=utils.g_CAFILE)
# Login into the server and create a session
REDFISH_OBJ.login(auth=utils.g_AUTH)
except:
result = {'ret': False, 'msg': "Please check if the username, password, IP is correct."}
return result
# Get ServiceRoot resource
response_base_url = REDFISH_OBJ.get('/redfish/v1', None)
# Get response_telemetry_service_url
if response_base_url.status == 200:
if 'TelemetryService' in response_base_url.dict:
telemetry_service_url = response_base_url.dict['TelemetryService']['@odata.id']
else:
result = {'ret': False, 'msg': "TelemetryService is not supported"}
REDFISH_OBJ.logout()
return result
else:
result = {'ret': False, 'msg': "Access url /redfish/v1 failed. Error code %s" % response_base_url.status}
REDFISH_OBJ.logout()
return result
response_telemetry_service_url = REDFISH_OBJ.get(telemetry_service_url, None)
if response_telemetry_service_url.status != 200:
result = {'ret': False, 'msg': "Access url %s failed. Error code %s" % (telemetry_service_url, response_telemetry_service_url.status)}
REDFISH_OBJ.logout()
return result
metric_inventory = {}
# Get MetricDefinition collection
metric_collection_url = response_telemetry_service_url.dict['MetricDefinitions']['@odata.id']
response_metric_collection_url = REDFISH_OBJ.get(metric_collection_url, None)
if response_metric_collection_url.status != 200:
result = {'ret': False, 'msg': "Access url %s failed. Error code %s" % (metric_collection_url, response_metric_collection_url.status)}
REDFISH_OBJ.logout()
return result
# Get each MetricDefinition
metric_definitons = []
for metric_member in response_metric_collection_url.dict["Members"]:
metric_url = metric_member['@odata.id']
metric_list = metric_url.split("/")
response_metric_url = REDFISH_OBJ.get(metric_url, None)
if response_metric_url.status == 200:
metric_detail = {}
for property in response_metric_url.dict:
if property not in ["Description","@odata.context","@odata.id","@odata.type","@odata.etag", "Links", "Actions", "RelatedItem"]:
metric_detail[property] = response_metric_url.dict[property]
metric_entry = {metric_list[-1]: metric_detail}
metric_definitons.append(metric_entry)
else:
result = {'ret': False,
'msg': "Access url %s failed. Error code %s" %(metric_url, response_metric_url.status)}
REDFISH_OBJ.logout()
return result
# Get MetricReports collection
metric_collection_url = response_telemetry_service_url.dict['MetricReports']['@odata.id']
response_metric_collection_url = REDFISH_OBJ.get(metric_collection_url, None)
if response_metric_collection_url.status != 200:
result = {'ret': False, 'msg': "Access url %s failed. Error code %s" % (metric_collection_url, response_metric_collection_url.status)}
REDFISH_OBJ.logout()
return result
# Get each MetricReport
metric_reports = []
for metric_member in response_metric_collection_url.dict["Members"]:
metric_url = metric_member['@odata.id']
metric_list = metric_url.split("/")
response_metric_url = REDFISH_OBJ.get(metric_url, None)
if response_metric_url.status == 200:
metric_detail = {}
for property in response_metric_url.dict:
if property not in ["Description","@odata.context","@odata.id","@odata.type","@odata.etag", "Links", "Actions", "RelatedItem"]:
metric_detail[property] = response_metric_url.dict[property]
metric_entry = {metric_list[-1]: metric_detail}
metric_reports.append(metric_entry)
else:
result = {'ret': False,
'msg': "Access url %s failed. Error code %s" %(metric_url, response_metric_url.status)}
REDFISH_OBJ.logout()
return result
# Set result
metric_inventory['MetricDefinitions'] = metric_definitons
metric_inventory['MetricReports'] = metric_reports
result['ret'] = True
result['metric_inventory'] = metric_inventory
try:
REDFISH_OBJ.logout()
except:
pass
return result
def add_parameter():
argget = utils.create_common_parameter_list()
args = argget.parse_args()
parameter_info = utils.parse_parameter(args)
return parameter_info
if __name__ == '__main__':
# Get parameters from config.ini and/or command line
parameter_info = add_parameter()
# Get connection info from the parameters user specified
ip = parameter_info['ip']
login_account = parameter_info["user"]
login_password = parameter_info["passwd"]
# Get metric inventory and check result
result = get_metric_definition_report(ip, login_account, login_password)
if result['ret'] is True:
del result['ret']
sys.stdout.write(json.dumps(result['metric_inventory'], sort_keys=True, indent=2) + '\n')
else:
sys.stderr.write(result['msg'] + '\n')
| python |
#!usr/bin/python
# -*- coding:utf8 -*-
# 列表生成式(列表推导式)
# 1. 提取出1-20之间的奇数
# odd_list = []
# for i in range(21):
# if i % 2 == 1:
# odd_list.append(i)
# odd_list = [i for i in range(21) if i % 2 == 1]
# print(odd_list)
# 2. 逻辑复杂的情况 如果是奇数将结果平方
# 列表生成式性能高于列表操作
def handle_item(item):
return item * item
odd_list = [handle_item(i) for i in range(21) if i % 2 == 1]
print(odd_list)
# 生成器表达式
odd_gen = (i for i in range(21) if i % 2 == 1)
print(type(odd_gen))
for item in odd_gen:
print(item)
# 字典推导式
my_dict = {"bobby1": 22, "bobby2": 23, "imooc.com": 5}
reversed_dict = {value:key for key, value in my_dict.items()}
print(reversed_dict)
# 集合推导式
my_set = set(my_dict.keys())
my_set = {key for key, value in my_dict.items()}
print(type(my_set))
| python |
"""
Desenvolva uma lógica que leia o peso e a altura de uma pessoa,
calcule seu IMC e mostre seu status.
Rasgue as minhas cartas
E não me procure mais
Assim será melhor, meu bem
O retrato que eu te dei
Se ainda tens, não sei
Mas se tiver, devolva-me
Devolva-me - Adriana Calcanhotto ♪♫
"""
peso = float(input('Informe o seu peso: '))
altura = float(input('Informe a sua altura: '))
imc = peso / altura ** 2
print('Com o IMC de {:.2f} você está '.format(imc), end='')
if imc < 18.5:
print('abaixo do peso !')
elif imc < 25:
print('no peso ideal !')
elif imc < 30:
print('com sobrepeso !')
elif imc < 40:
print('obeso !')
else:
print('com obesidade mórbida !')
| python |
# -*- coding: utf-8 -*-
from flask import Flask
from peewee import MySQLDatabase
from celery import Celery
from config import config
db = MySQLDatabase(None)
def create_app(config_name):
"""
创建flask应用对象
:param config_name:
:return:
"""
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
db.init(**app.config['MYSQL'])
from .models import models
db.create_tables(models, safe=True)
from .hooks import before_app_request, after_app_request
app.before_request(before_app_request)
app.teardown_request(after_app_request)
from .blueprints.cms_main import bp_cms_main
from .blueprints.cms_api import bp_cms_api
from .blueprints.open_main import bp_open_main
from .blueprints.open_api import bp_open_api
from .blueprints.sample_h5_main import bp_sample_h5_main
from .blueprints.sample_h5_api import bp_sample_h5_api
app.register_blueprint(bp_cms_main, subdomain=app.config['SUBDOMAIN'].get('cms_main'))
app.register_blueprint(bp_cms_api, subdomain=app.config['SUBDOMAIN'].get('cms_api'), url_prefix='/api')
app.register_blueprint(bp_open_main, subdomain=app.config['SUBDOMAIN'].get('open_main'))
app.register_blueprint(bp_open_api, subdomain=app.config['SUBDOMAIN'].get('open_api'), url_prefix='/api')
app.register_blueprint(bp_sample_h5_main, subdomain=app.config['SUBDOMAIN'].get('sample_h5_main'))
app.register_blueprint(bp_sample_h5_api, subdomain=app.config['SUBDOMAIN'].get('sample_h5_api'), url_prefix='/api')
return app
def create_celery_app(app=None):
"""
创建celery应用对象
:param app:
:return:
"""
import os
app = app or create_app(os.getenv('FLASK_CONFIG') or 'default')
celery = Celery(app.import_name)
celery.conf.update(app.config)
TaskBase = celery.Task
class ContextTask(TaskBase):
abstract = True
def __call__(self, *args, **kwargs):
with app.app_context():
return TaskBase.__call__(self, *args, **kwargs)
celery.Task = ContextTask
return celery
| python |
from __future__ import print_function, division
#
import sys,os
quspin_path = os.path.join(os.getcwd(),"../../")
sys.path.insert(0,quspin_path)
#
from quspin.operators import hamiltonian # Hamiltonians and operators
from quspin.basis import spinful_fermion_basis_1d # Hilbert space spinful fermion basis
import numpy as np # generic math functions
#
##### define model parameters #####
L=6 # system size
J=1.0 # hopping strength
U=np.sqrt(2) # onsite interaction strength
#
##### construct basis at half-filling in the 0-total momentum and +1-spin flip sector
basis=spinful_fermion_basis_1d(L=L,Nf=(L//2,L//2),a=1,kblock=0,sblock=1)
print(basis)
#
##### define PBC site-coupling lists for operators
# define site-coupling lists
hop_right = [[-J,i,(i+1)%L] for i in range(L)] # hopping to the right PBC
hop_left = [[J,i,(i+1)%L] for i in range(L)] # hopping to the left PBC
int_list = [[U,i,i] for i in range(L)] # onsite interaction
# static and dynamic lists
static= [
["+-|", hop_left], # up hop left
["-+|", hop_right], # up hop right
["|+-", hop_left], # down hop left
["|-+", hop_right], # down hop right
["n|n", int_list], # onsite interaction
]
dynamic=[]
###### construct Hamiltonian
H=hamiltonian(static,dynamic,dtype=np.float64,basis=basis) | python |
"""
Sponge Knowledge Base
Action metadata Record type
"""
def createBookType(name):
return RecordType(name, [
IntegerType("id").withNullable().withLabel("Identifier"),
StringType("author").withLabel("Author"),
StringType("title").withLabel("Title")
])
BOOK = {"id":1, "author":"James Joyce", "title":"Ulysses"}
class RecordAsResultAction(Action):
def onConfigure(self):
self.withArg(IntegerType("bookId")).withResult(createBookType("book").withNullable())
def onCall(self, bookId):
global BOOK
return BOOK if bookId == BOOK["id"] else None
class RecordAsArgAction(Action):
def onConfigure(self):
self.withArg(createBookType("book")).withNoResult()
def onCall(self, book):
global BOOK
BOOK = {"id":1, "author":book["author"], "title":book["title"]}
| python |
"""
restriction generaters representing sets of packages
"""
| python |
# http://www.geeksforgeeks.org/design-a-stack-that-supports-getmin-in-o1-time-and-o1-extra-space/
from sys import maxint
class MyStack:
def __init__(self):
self.minimum = -maxint-1
self.stack = []
def push(self,val):
if not self.stack:
self.minimum = val
self.stack.append(val)
else:
if val > self.minimum:
self.stack.append(val)
else:
self.stack.append(2*val - self.minimum)
self.minimum = val
def pop(self):
if self.stack:
val = self.stack.pop()
if val >= self.minimum:
return val
else:
self.minimum = 2*self.minimum - val
return self.minimum
else:
return None
if __name__ == "__main__":
s = MyStack()
print s.push(3), s.stack,s.minimum
print s.push(5), s.stack,s.minimum
print s.push(2), s.stack,s.minimum
print s.push(1), s.stack,s.minimum
print s.push(1), s.stack,s.minimum
print s.push(-1), s.stack,s.minimum
print s.pop(), s.stack,s.minimum
print s.pop(), s.stack,s.minimum
print s.pop(), s.stack,s.minimum
print s.pop(), s.stack,s.minimum
print s.pop(), s.stack,s.minimum
print s.pop(), s.stack,s.minimum
print s.pop(), s.stack,s.minimum
| python |
import pygame
import math
pygame.font.init()
DEBUG_FONT = pygame.font.Font(None, 22)
def get_surface(obj):
""" Returns a Surface representing the parameter.
if obj is the filename of an image, a surface containing the image will be returned.
if obj is a Surface, it will be returned unchanged.
"""
if isinstance(obj, pygame.Surface):
return obj
return pygame.image.load(obj)
def get_anchor(obj, anchor):
""" Returns the point representing the anchor on the given Surface or Rect.
obj can be a Surface or Rect.
anchor should be a string of one of the point attributes (e.g. 'topleft',
'center', 'midbottom', etc.).
"""
if anchor not in ['topleft', 'bottomleft', 'topright', 'bottomright',
'midtop', 'midleft', 'midbottom', 'midright', 'center']:
raise ValueError('Invalid anchor')
try:
return getattr(obj.get_rect(), anchor)
except AttributeError:
return getattr(obj, anchor)
def blit_anchors(dest, dest_anchor, src, src_anchor):
""" Blits the source onto the destination such that their anchors align.
src_anchor and dest_anchor can be strings of one of the point attributes (topleft, center,
midbottom, etc.) or a position on their respective surfaces (e.g [100, 200]).
"""
try:
src_anchor = get_anchor(src, src_anchor)
except ValueError:
pass # Assume src_anchor is already a point. If not, it will fail in the map().
try:
dest_anchor = get_anchor(dest, dest_anchor)
except ValueError:
pass # Assume dest_anchor is already a point. If not, it will fail in the map().
topleft = list(map(lambda a,b,c: a - b + c, src.get_rect().topleft, src_anchor, dest_anchor))
dest.blit(src, topleft)
def get_color(obj):
""" Returns a Color object representing the parameter.
"""
try:
return pygame.Color(obj)
except ValueError:
if isinstance(obj, basestring): # Invalid color name
raise
elif len(obj) not in range(1, 5):
raise ValueError('Object does not represent a color')
else:
return obj
def draw_fps(surface, clock, anchor='topright', color='red'):
""" Draws an FPS counter on a surface at the given anchor.
"""
fps_surface = DEBUG_FONT.render(str(int(clock.get_fps())), True, get_color(color))
blit_anchors(surface, anchor, fps_surface, anchor)
def font_render_multiline(font, text, antialias, color, background=None, justify='left', line_spacing=0):
""" Returns a Surface containing the text in the given font.
The first five parameters are the ones used to render single line text.
justify can be 'left', 'right', or 'center'.
line_spacing is how much space to leave between lines in units of the font's height.
"""
anchors = {'left':'topleft', 'right':'topright', 'center':'center'}
lines = text.split('\n')
width = max([font.size(line)[0] for line in lines])
line_height = font.size('')[1]
height = math.ceil(line_height * (len(lines) + line_spacing * (len(lines) - 1)))
multiline = pygame.Surface((width, height))
if background is not None:
multiline.set_colorkey(background)
multiline.fill(background)
else:
multiline.convert_alpha()
multiline.fill([128, 128, 128, 0])
anchor_x = getattr(multiline.get_rect(), justify)
try:
anchor_x = anchor_x[0]
except:
pass
y = 0
while len(lines):
if background is None:
line = font.render(lines.pop(0), antialias, color)
else:
line = font.render(lines.pop(0), antialias, color, background)
dest_anchor = [anchor_x, int(y)]
blit_anchors(multiline, dest_anchor, line, anchors[justify])
y += (1 + line_spacing) * line_height
return multiline
def offset(point, offset):
""" Offsets a point by an amount.
Equivalent to adding vectors.
"""
return tuple(map(sum, zip(point, offset)))
def rect_largest_fit(inner, outer):
""" Moves and resizes a Rect to the largest it can be while still fitting in another Rect and maintaining its aspect ratio.
"""
# TODO: check behavior when inner is larger than outer in one or both dimensions
inner.topleft = outer.topleft
w_ratio = outer.w / inner.w
h_ratio = outer.h / inner.h
if w_ratio < h_ratio:
inner.w = outer.w
inner.h *= w_ratio
else:
inner.h = outer.h
inner.w *= h_ratio
class FloatRect(object):
def __init__(self, topleft, size):
self._left, self._top = map(float, topleft)
self._width, self._height = map(float, size)
self._half_height, self._half_width = [a / 2.0 for a in size]
self._centerx = self._left + self._half_height
self._centery = self._top + self._half_width
self._right = self._left + self._width
self._bottom = self._top + self._height
def left():
doc = "The left property."
def fget(self):
return self._left
def fset(self, value):
flt = float(value)
self._right += flt - self._left
self._left = flt
self._centerx = flt + self._half_width
def fdel(self):
del self._left
return locals()
left = property(**left())
def right():
doc = "The right property."
def fget(self):
return self._right
def fset(self, value):
flt = float(value)
self._left += flt - self._right
self._right = flt
self._centerx = self._left + self._half_width
def fdel(self):
del self._right
return locals()
right = property(**right())
def top():
doc = "The top property."
def fget(self):
return self._top
def fset(self, value):
flt = float(value)
self._bottom += flt - self._top
self._top = flt
self._centery = flt + self._half_height
def fdel(self):
del self._top
return locals()
top = property(**top())
def bottom():
doc = "The bottom property."
def fget(self):
return self._bottom
def fset(self, value):
flt = float(value)
self._top += flt - self._bottom
self._bottom = flt
self._centery = self._top + self._half_height
def fdel(self):
del self._bottom
return locals()
bottom = property(**bottom())
def centerx():
doc = "The centerx property."
def fget(self):
return self._centerx
def fset(self, value):
flt = float(value)
self._left = flt - self._half_width
self._right = flt + self._half_width
self._centerx = flt
def fdel(self):
del self._centerx
return locals()
centerx = property(**centerx())
def centery():
doc = "The centery property."
def fget(self):
return self._centery
def fset(self, value):
flt = float(value)
self._top = flt - self._half_height
self._bottom = flt + self._half_height
self._centery = flt
def fdel(self):
del self._centery
return locals()
centery = property(**centery())
def width():
doc = "The width property."
def fget(self):
return self._width
def fset(self, value):
flt = float(value)
self._width = flt
self._half_width = flt / 2
self.centerx = self.centerx # Set left and right
def fdel(self):
del self._width
return locals()
w = width = property(**width())
def height():
doc = "The height property."
def fget(self):
return self._height
def fset(self, value):
flt = float(value)
self._height = flt
self._half_height = flt / 2
self.centery = self.centery # Set top and bottom
def fdel(self):
del self._height
return locals()
h = height = property(**height())
def size():
doc = "The size property."
def fget(self):
return [self.width, self.height]
def fset(self, value):
self.width, self.height = value
return locals()
size = property(**size())
def topleft():
doc = "The topleft property."
def fget(self):
return [self.left, self.top]
def fset(self, value):
self.left, self.top = value
return locals()
topleft = property(**topleft())
def bottomleft():
doc = "The bottomleft property."
def fget(self):
return [self.left, self.bottom]
def fset(self, value):
self.left, self.bottom = value
return locals()
bottomleft = property(**bottomleft())
def topright():
doc = "The topright property."
def fget(self):
return [self.right, self.top]
def fset(self, value):
self.right, self.top = value
return locals()
topright = property(**topright())
def bottomright():
doc = "The bottomright property."
def fget(self):
return [self.right, self.bottom]
def fset(self, value):
self.right, self.bottom = value
return locals()
bottomright = property(**bottomright())
def midtop():
doc = "The midtop property."
def fget(self):
return [self.centerx, self.top]
def fset(self, value):
self.centerx, self.top = value
return locals()
midtop = property(**midtop())
def midleft():
doc = "The midleft property."
def fget(self):
return [self.left, self.centery]
def fset(self, value):
self.left, self.centery = value
return locals()
midleft = property(**midleft())
def midbottom():
doc = "The midbottom property."
def fget(self):
return [self.centerx, self.bottom]
def fset(self, value):
self.centerx, self.bottom = value
return locals()
midbottom = property(**midbottom())
def midright():
doc = "The midright property."
def fget(self):
return [self.right, self.centery]
def fset(self, value):
self.right, self.centery = value
return locals()
midright = property(**midright())
def __repr__(self):
return 'FloatRect(%s, %s)' % (str(self.bottomleft), str(self.size))
class RectDivider(object):
""" Given a large Rect and a small one, allow iteration through non-overlapping locations of the small Rect
"""
returned_start = False
def __init__(self, outer, inner, direction='horizontal', horizontal='right', vertical='down', zigzag=False):
"""
outer is the outer Rect.
inner is the inner Rect and the first return value.
direction is whether to move 'vertically' or 'horizontally' first.
horizontal is whether to move 'left' or 'right' when moving horizontally.
vertical is whether to move 'up' or 'down' when moving vertically.
zigzag is whether to zigzag when reaching an edge rather than reset to the other side.
"""
self.outer = outer.copy()
self.inner = inner.copy()
self.zigzag = zigzag
# Resize self.outer so inner fits without any left over.
# This makes zigzagging simpler.
self.outer.w -= self.outer.w % self.inner.w
self.outer.h -= self.outer.h % self.inner.h
dir_err = ValueError('Invalid direction')
if direction == 'vertical':
self.d = 'v'
elif direction == 'horizontal':
self.d = 'h'
else:
raise dir_err
if horizontal == 'left':
self.h = -1
elif horizontal == 'right':
self.h = 1
else:
raise dir_err
if vertical == 'up':
self.v = -1
elif vertical == 'down':
self.v = 1
else:
raise dir_err
def __iter__(self): return self
def next(self):
if not self.returned_start:
self.returned_start = True
return self.inner
if self.d == 'h':
self.inner.left += self.h * self.inner.w
clamped = self.inner.clamp(self.outer)
if clamped != self.inner:
self.inner.top += self.v * self.inner.h
if self.zigzag:
self.h *= -1
if self.h == 1:
self.inner.left = self.outer.left
else:
self.inner.right = self.outer.right
else:
self.inner.top += self.v * self.inner.h
clamped = self.inner.clamp(self.outer)
if clamped != self.inner:
self.inner.left += self.h * self.inner.w
if self.zigzag:
self.v *= -1
if self.v == 1:
self.inner.top = self.outer.top
else:
self.inner.bottom = self.outer.bottom
clamped = self.inner.clamp(self.outer)
if clamped != self.inner:
raise StopIteration
return self.inner
| python |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
import torch
import torch.distributions as dist
from beanmachine.ppl.experimental.abc.abc_infer import ApproximateBayesianComputation
class ApproximateBayesianComputationTest(unittest.TestCase):
def setUp(self):
torch.manual_seed(8665309)
class CoinTossModel:
def __init__(self, observation_shape):
self.observation_shape = observation_shape
@bm.random_variable
def bias(self):
return dist.Beta(0.5, 0.5)
@bm.random_variable
def coin_toss(self):
return dist.Bernoulli(self.bias().repeat(self.observation_shape))
def toss_head_count(self, toss_vals):
return torch.sum(toss_vals)
def toss_mean(self, toss_vals):
return torch.mean(toss_vals)
@bm.functional
def num_heads(self):
return self.toss_head_count(self.coin_toss())
@bm.functional
def mean_value(self):
return self.toss_mean(self.coin_toss())
def test_abc_inference(self):
model = self.CoinTossModel(observation_shape=10)
COIN_TOSS_DATA = dist.Bernoulli(0.9).sample([10])
num_heads_key = model.num_heads()
mean_value_key = model.mean_value()
abc = ApproximateBayesianComputation(
tolerance={num_heads_key: 1.0, mean_value_key: 0.1}
)
observations = {
num_heads_key: model.toss_head_count(COIN_TOSS_DATA),
mean_value_key: model.toss_mean(COIN_TOSS_DATA),
}
queries = [model.bias()]
samples = abc.infer(
queries, observations, num_samples=10, num_chains=1, verbose=None
)
mean = torch.mean(samples[model.bias()][0])
self.assertTrue(mean.item() > 0.65)
abc.reset()
def test_abc_inference_with_singleton_arguments(self):
model = self.CoinTossModel(observation_shape=10)
COIN_TOSS_DATA = dist.Bernoulli(0.95).sample([10])
abc = ApproximateBayesianComputation(
distance_function=torch.dist, tolerance=1.0
)
observations = {
model.num_heads(): model.toss_head_count(COIN_TOSS_DATA),
model.mean_value(): model.toss_mean(COIN_TOSS_DATA),
}
queries = [model.bias()]
samples = abc.infer(
queries, observations, num_samples=10, num_chains=1, verbose=None
)
mean = torch.mean(samples[model.bias()][0])
self.assertTrue(mean.item() > 0.65)
abc.reset()
def test_single_inference_step(self):
model = self.CoinTossModel(observation_shape=10)
abc = ApproximateBayesianComputation(tolerance={model.num_heads(): 1.0})
abc.observations_ = {model.num_heads(): torch.tensor(15.0)}
self.assertEqual(abc._single_inference_step(), 0.0)
abc.reset()
def test_max_attempts(self):
model = self.CoinTossModel(observation_shape=100)
COIN_TOSS_DATA = dist.Bernoulli(0.9).sample([100])
abc = ApproximateBayesianComputation(
tolerance={model.num_heads(): 0.1}, max_attempts_per_sample=2
)
observations = {model.num_heads(): model.toss_head_count(COIN_TOSS_DATA)}
queries = [model.bias()]
with self.assertRaises(RuntimeError):
abc.infer(
queries, observations, num_samples=100, num_chains=1, verbose=None
)
abc.reset()
def test_shape_mismatch(self):
model = self.CoinTossModel(observation_shape=100)
abc = ApproximateBayesianComputation(tolerance={model.num_heads(): 0.1})
observations = {model.num_heads(): torch.tensor([3, 4])}
queries = [model.bias()]
with self.assertRaises(ValueError):
abc.infer(
queries, observations, num_samples=100, num_chains=1, verbose=None
)
abc.reset()
def test_simulate_mode(self):
model = self.CoinTossModel(observation_shape=10)
COIN_TOSS_DATA = dist.Bernoulli(0.9).sample([10])
abc = ApproximateBayesianComputation(
tolerance={model.num_heads(): 1, model.mean_value(): 0.1}
)
observations = {
model.num_heads(): model.toss_head_count(COIN_TOSS_DATA),
model.mean_value(): model.toss_mean(COIN_TOSS_DATA),
}
queries = [model.bias()]
samples = abc.infer(
queries, observations, num_samples=1, num_chains=1, verbose=None
)
# simulate 10 coin tosses from accepted bias sample
sim_observations = {model.bias(): samples[model.bias()][0]}
sim_queries = [model.coin_toss()]
sim_abc = ApproximateBayesianComputation(simulate=True)
sim_samples = sim_abc.infer(
sim_queries, sim_observations, num_samples=10, num_chains=1, verbose=None
)
self.assertTrue(torch.sum(sim_samples[model.coin_toss()][0] == 1.0) > 5)
| python |
from .base import init
| python |
__author__ = 'zaxlct'
__date__ = '2017/4/6 下午12:14'
import re
from django import forms
from operation.models import UserAsk
# class UserAskForm(forms.Form):
# name = forms.CharField(required=True, min_length=2, max_length=20)
# phone = forms.CharField(required=True, min_length=11, max_length=11)
# course_name = forms.CharField(required=True, min_length=5, max_length=50)
class UserAskForm(forms.ModelForm):
# 还可以新增字段
# price = forms.CharField(required=True, min_length=2, max_length=20)
class Meta:
model = UserAsk
fields = ['name', 'mobile', 'course_name']
# def clean_name(self):
# def clean_course_name(self):
def clean_mobile(self):
# 手机号验证
mobile = self.cleaned_data['mobile']
p = re.compile('^0\d{2,3}\d{7,8}$|^1[358]\d{9}$|^147\d{8}')
if p.match(mobile):
# 这里还能返回外键
return mobile
raise forms.ValidationError('手机号码格式不对', code='mobile_inval')
| python |
from .libs import metadata
from .libs import utils
from .libs.athena import Athena
from .libs.s3 import S3
from .libs.csv_parser import single_column_csv_to_list, csv_to_list_of_dicts
from .libs.policy_generator import PolicyGenerator
import argparse
import logging
def arguments():
parser = argparse.ArgumentParser()
parser.add_argument("metadata")
parser.add_argument("--setup", action="store_true")
parser.add_argument("--debug", action="store_true")
args = parser.parse_args()
return args
def initialize_classes(args):
""" Reading metadata, performing metadata validation, initializing required classes.
Classes / metadata stored in initc dictionary. """
initc = {}
meta = metadata.read(args.metadata)
initc['boto'] = utils.Boto(meta)
initc['meta'] = metadata.set_defaults(meta, initc['boto'])
initc['s3'] = S3(initc['meta'], initc['boto'].session)
initc['athena'] = Athena(initc['meta'], initc['boto'].session)
initc['policygen'] = PolicyGenerator()
return initc
def get_arns_from_athena_output(users_or_roles, initc):
""" Function to get list of arns of active users or roles. """
if users_or_roles == "users":
athena_output_files = initc['athena'].active_users_output_files
services_by_query = initc['athena'].services_by_user_query
elif users_or_roles == "roles":
athena_output_files = initc['athena'].active_roles_output_files
services_by_query = initc['athena'].services_by_role_query
for dictionary in athena_output_files:
obj = initc['s3'].get_object(initc['meta']["behold_bucket"], dictionary["path"])
list_of_arns = single_column_csv_to_list(obj)
initc['s3'].put_object(
bucket=initc['meta']['behold_bucket'],
key=f"behold_results/{dictionary['account']}/{users_or_roles}/active_{users_or_roles}_in"
f"_last_{initc['meta']['days_back']}_days.txt",
encoded_object="\n".join(list_of_arns).encode()
)
services_by_query(
account=dictionary["account"],
list_of_arns=list_of_arns
)
def build_behold_output_files(users_or_roles, initc):
""" Builds list of services/actions and IAM policy for each role or user. """
if users_or_roles == "users":
athena_services_by_output_files = initc['athena'].services_by_user_output_files
elif users_or_roles == "roles":
athena_services_by_output_files = initc['athena'].services_by_role_output_files
for dictionary in athena_services_by_output_files:
obj = initc['s3'].get_object(initc['meta']["behold_bucket"], dictionary["path"])
list_of_dicts = csv_to_list_of_dicts(obj)
path_to_output = f"behold_results/{dictionary['account']}/{users_or_roles}/{dictionary['name']}/{dictionary['name']}_"
supported_actions, unsupported_actions = initc['policygen'].generate_list_of_actions(list_of_dicts)
formatted_supported_actions = initc['policygen'].format_actions(supported_actions)
initc['s3'].put_object(
bucket=initc['meta']["behold_bucket"],
key=path_to_output + "iam_actions.txt",
encoded_object=formatted_supported_actions.encode()
)
policy = initc['policygen'].build_policy(supported_actions)
initc['s3'].put_object(
bucket=initc['meta']['behold_bucket'],
key=path_to_output + "iam_policy.json",
encoded_object=policy.encode()
)
if unsupported_actions:
initc['s3'].put_object(
bucket=initc['meta']['behold_bucket'],
key=path_to_output + "unsupported_actions.txt",
encoded_object="\n".join(unsupported_actions).encode()
)
def main():
args = arguments()
if args.debug:
log_level = logging.DEBUG
else:
log_level = logging.INFO
logging.basicConfig(
level=log_level,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
)
initc = initialize_classes(args)
# If --setup flag is passed, the Athena table and partition tables are set up.
# Only needs to be done once unless metadata is updated to add more accounts, regions, or years.
if args.setup:
initc['athena'].set_up_table_and_partitions()
initc['athena'].active_resources()
get_arns_from_athena_output("users", initc)
get_arns_from_athena_output("roles", initc)
build_behold_output_files("users", initc)
build_behold_output_files("roles", initc)
if __name__ == '__main__':
main()
| python |
import boto3
import json
from datetime import datetime
#to download, <bucket, obj name, file path to dl to>
# s3.download_file(
# "iot-fastgame-proj-ads","beard.jpg","downloads/beard.jpg"
# )
#to upload <file path to upload from, bucket, obj name>
# s3.upload_file('images/pokemon.jpg','iot-fastgame-proj-ads','pokemon.jpg')
#download_all_ads --> save img name and tags into a file, json?
#choose_ad --> check file, choose best match according to tags, display ad
#
def upload_images(viewerbucketname, imagepath, imagename):
# Declare
s3 = boto3.client("s3")
s3buckets = boto3.resource("s3")
adsbucket = s3buckets.Bucket(viewerbucketname)
name = datetime.now().strftime("%H:%M:%S") + ".png"
s3.upload_file(imagepath + imagename, viewerbucketname, name)
def download_images(adbucketname, download_path ,filter='all'):
# Declare
s3 = boto3.client("s3")
s3buckets = boto3.resource("s3")
adsbucket = s3buckets.Bucket(adbucketname)
object_summary_iterator = adsbucket.objects.all()
tosave=[]
for i in object_summary_iterator: #iterate thru all objs
print(i.key)
object = s3buckets.Object(adbucketname,i.key)
try:
objtopics = object.metadata['topics']
objtopiclist = [x.strip() for x in objtopics.split(',')]
print(objtopiclist)
#maybe can check if downloaded alr
if filter == 'all':
s3.download_file(adbucketname,i.key,download_path+i.key)
elif filter in objtopiclist:
s3.download_file(adbucketname,i.key,download_path+i.key)
tofile={"name":i.key,"tags":objtopiclist}
tosave.append(tofile)
except:
pass
with open("tags.json", "w") as outfile:
json.dump(tosave, outfile)
def download_image(adbucketname, download_path, img_name):
s3 = boto3.client("s3")
s3buckets = boto3.resource("s3")
f = open("tags.json")
tosave = json.load(f)
print(tosave)
object = s3buckets.Object(adbucketname,img_name) # get the bucket :)
try:
objtopics = object.metadata['topics']
objtopiclist = [x.strip() for x in objtopics.split(',')]
tofile={"name":img_name,"tags":objtopiclist}
if tofile not in tosave:
print("Save file")
tosave.append(tofile)
s3.download_file(adbucketname,img_name,download_path+img_name)
except:
pass
with open("tags.json", "w") as outfile:
json.dump(tosave, outfile)
| python |
import turtle as t # підключення бібліотеки
from random import randint
from turtle import *
screen = t.getscreen() # вікно
t.title("Черепашка")
my_turtle = t.Turtle()
my_turtle.shape("turtle") # square , triangle , classic
#my_turtle.color("green")
my_turtle.color("black","red")
my_turtle.shapesize(2,2,0)
#for i in range(0,50):
# print(randint(3,5))
#my_turtle.forward(1)
#for num in range(8):
# penup()
# forward(10)
# pendown()
# forward(10)
#my_turtle.goto(-100,-100)
#print(my_turtle.pos())
# forward вперед
# backward назад
# left вліво
# right вправо
#my_turtle.forward(100)
#for i in range(0,180):
# my_turtle.left(1)
# my_turtle.forward(1)
#print(my_turtle.pos())
#my_turtle.circle(30)
#my_turtle.circle(40)
# (x,y) (0,0)
#my_turtle.goto(100,100)
#number = 0
#number2 = 40
#for i in range(1,20):
# my_turtle.shapesize(i,i,0)
# number2 = number2 - 1
# my_turtle.forward(5)
# my_turtle.shapesize(number2,number2,0)
| python |
cisco_ios = "Cisco IOS Software, C880 Software (C880DATA-UNIVERSALK9-M), Version 15.0(1)M4, RELEASE SOFTWARE (fc1)"
a = cisco_ios.split()
print(a)
b = a.index('Version')
c = a[b+1]
d = c[:-1]
print(d)
# for i in a:
# if i=='Version':
# print(i) | python |
from math import sqrt, ceil
def p1(num: int):
size = ceil(sqrt(num))
center = ceil((size - 1) / 2)
return max(0, center - 1 + abs(center - num % size))
assert p1(1) == 0
assert p1(12) == 3
assert p1(23) == 2
assert p1(1024) == 31
assert p1(347991) == 480
# p2 349975
# https://oeis.org/A141481
| python |
# Volatility
# Copyright (C) 2007-2013 Volatility Foundation
# Copyright (c) 2008 Brendan Dolan-Gavitt <[email protected]>
#
# Additional Authors:
# Mike Auty <[email protected]>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
import os
import re
from volatility import renderers
import volatility.plugins.procdump as procdump
from volatility.renderers.basic import Address
import volatility.win32.tasks as tasks
import volatility.debug as debug
import volatility.utils as utils
import volatility.cache as cache
class DLLDump(procdump.ProcDump):
"""Dump DLLs from a process address space"""
def __init__(self, config, *args, **kwargs):
procdump.ProcDump.__init__(self, config, *args, **kwargs)
config.remove_option("OFFSET")
config.add_option('REGEX', short_option = 'r',
help = 'Dump dlls matching REGEX',
action = 'store', type = 'string')
config.add_option('IGNORE-CASE', short_option = 'i',
help = 'Ignore case in pattern match',
action = 'store_true', default = False)
config.add_option('OFFSET', short_option = 'o', default = None,
help = 'Dump DLLs for Process with physical address OFFSET',
action = 'store', type = 'int')
config.add_option('BASE', short_option = 'b', default = None,
help = 'Dump DLLS at the specified BASE offset in the process address space',
action = 'store', type = 'int')
@cache.CacheDecorator(lambda self: "tests/dlldump/regex={0}/ignore_case={1}/offset={2}/base={3}".format(self._config.REGEX, self._config.IGNORE_CASE, self._config.OFFSET, self._config.BASE))
def calculate(self):
addr_space = utils.load_as(self._config)
if self._config.DUMP_DIR == None:
debug.error("Please specify a dump directory (--dump-dir)")
if not os.path.isdir(self._config.DUMP_DIR):
debug.error(self._config.DUMP_DIR + " is not a directory")
if self._config.OFFSET != None:
data = [self.virtual_process_from_physical_offset(addr_space, self._config.OFFSET)]
else:
data = self.filter_tasks(tasks.pslist(addr_space))
if self._config.REGEX:
try:
if self._config.IGNORE_CASE:
mod_re = re.compile(self._config.REGEX, re.I)
else:
mod_re = re.compile(self._config.REGEX)
except re.error, e:
debug.error('Error parsing regular expression: %s' % e)
for proc in data:
ps_ad = proc.get_process_address_space()
if ps_ad == None:
continue
mods = dict((mod.DllBase.v(), mod) for mod in proc.get_load_modules())
if self._config.BASE:
if mods.has_key(self._config.BASE):
mod_name = mods[self._config.BASE].BaseDllName
else:
mod_name = "UNKNOWN"
yield proc, ps_ad, int(self._config.BASE), mod_name
else:
for mod in mods.values():
if self._config.REGEX:
if not mod_re.search(str(mod.FullDllName or '')) and not mod_re.search(str(mod.BaseDllName or '')):
continue
yield proc, ps_ad, mod.DllBase.v(), mod.BaseDllName
def generator(self, data):
for proc, ps_ad, mod_base, mod_name in data:
if not ps_ad.is_valid_address(mod_base):
result = "Error: DllBase is unavailable (possibly due to paging)"
else:
process_offset = ps_ad.vtop(proc.obj_offset)
dump_file = "module.{0}.{1:x}.{2:x}.dll".format(proc.UniqueProcessId, process_offset, mod_base)
result = self.dump_pe(ps_ad, mod_base, dump_file)
yield (0,
[Address(proc.obj_offset),
str(proc.ImageFileName),
Address(mod_base),
str(mod_name or ''),
str(result)])
def unified_output(self, data):
return renderers.TreeGrid(
[("Process(V)", Address),
("Name", str),
("Module Base", Address),
("Module Name", str),
("Result", str)], self.generator(data))
def render_text(self, outfd, data):
if self._config.DUMP_DIR == None:
debug.error("Please specify a dump directory (--dump-dir)")
if not os.path.isdir(self._config.DUMP_DIR):
debug.error(self._config.DUMP_DIR + " is not a directory")
self.table_header(outfd,
[("Process(V)", "[addrpad]"),
("Name", "20"),
("Module Base", "[addrpad]"),
("Module Name", "20"),
("Result", "")])
for proc, ps_ad, mod_base, mod_name in data:
if not ps_ad.is_valid_address(mod_base):
result = "Error: DllBase is paged"
else:
process_offset = ps_ad.vtop(proc.obj_offset)
dump_file = "module.{0}.{1:x}.{2:x}.dll".format(proc.UniqueProcessId, process_offset, mod_base)
result = self.dump_pe(ps_ad, mod_base, dump_file)
self.table_row(outfd,
proc.obj_offset,
proc.ImageFileName,
mod_base, str(mod_name or ''), result)
| python |
# -*- coding: utf-8 -*-
__author__ = 'abbot'
import requests
# response = requests.get("https://www.12306.cn/mormhweb/", verify = False)
# print(response.text)
response = requests.get("http://www.baidu.com")
print(response.content)
| python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'a test module'
import sys
_author_ = 'tianmaolin'
def fun1(*a):
print(a)
def fun2(**b):
print(b)
# fun1(1, 2, 5)
# fun2(name='tianmlin', age=22)
def test():
args = sys.argv
if len(args) == 1:
print("Hello World!")
elif len(args) == 2:
print("Hello,%s!" % args[1])
else:
print("Too many arguments!")
if __name__ == '__main__':
test()
| python |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from contextlib import closing
import mox
import requests
from six import StringIO
from pants.net.http.fetcher import Fetcher
from pants.util.contextutil import temporary_file
class FetcherTest(mox.MoxTestBase):
def setUp(self):
super(FetcherTest, self).setUp()
self.requests = self.mox.CreateMockAnything()
self.response = self.mox.CreateMock(requests.Response)
self.fetcher = Fetcher(requests_api=self.requests)
self.listener = self.mox.CreateMock(Fetcher.Listener)
def expect_get(self, url, chunk_size_bytes, timeout_secs, listener=True):
self.requests.get(url, stream=True, timeout=timeout_secs).AndReturn(self.response)
self.response.status_code = 200
self.response.headers = {'content-length': '11'}
if listener:
self.listener.status(200, content_length=11)
chunks = ['0123456789', 'a']
self.response.iter_content(chunk_size=chunk_size_bytes).AndReturn(chunks)
return chunks
def test_get(self):
for chunk in self.expect_get('http://bar', chunk_size_bytes=1024, timeout_secs=60):
self.listener.recv_chunk(chunk)
self.listener.finished()
self.response.close()
self.mox.ReplayAll()
self.fetcher.fetch('http://bar',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
def test_checksum_listener(self):
digest = self.mox.CreateMockAnything()
for chunk in self.expect_get('http://baz', chunk_size_bytes=1, timeout_secs=37):
self.listener.recv_chunk(chunk)
digest.update(chunk)
self.listener.finished()
digest.hexdigest().AndReturn('42')
self.response.close()
self.mox.ReplayAll()
checksum_listener = Fetcher.ChecksumListener(digest=digest)
self.fetcher.fetch('http://baz',
checksum_listener.wrap(self.listener),
chunk_size_bytes=1,
timeout_secs=37)
self.assertEqual('42', checksum_listener.checksum)
def test_download_listener(self):
downloaded = ''
for chunk in self.expect_get('http://foo', chunk_size_bytes=1048576, timeout_secs=3600):
self.listener.recv_chunk(chunk)
downloaded += chunk
self.listener.finished()
self.response.close()
self.mox.ReplayAll()
with closing(StringIO()) as fp:
self.fetcher.fetch('http://foo',
Fetcher.DownloadListener(fp).wrap(self.listener),
chunk_size_bytes=1024 * 1024,
timeout_secs=60 * 60)
self.assertEqual(downloaded, fp.getvalue())
def test_size_mismatch(self):
self.requests.get('http://foo', stream=True, timeout=60).AndReturn(self.response)
self.response.status_code = 200
self.response.headers = {'content-length': '11'}
self.listener.status(200, content_length=11)
self.response.iter_content(chunk_size=1024).AndReturn(['a', 'b'])
self.listener.recv_chunk('a')
self.listener.recv_chunk('b')
self.response.close()
self.mox.ReplayAll()
with self.assertRaises(self.fetcher.Error):
self.fetcher.fetch('http://foo',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
def test_get_error_transient(self):
self.requests.get('http://foo', stream=True, timeout=60).AndRaise(requests.ConnectionError)
self.mox.ReplayAll()
with self.assertRaises(self.fetcher.TransientError):
self.fetcher.fetch('http://foo',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
def test_get_error_permanent(self):
self.requests.get('http://foo', stream=True, timeout=60).AndRaise(requests.TooManyRedirects)
self.mox.ReplayAll()
with self.assertRaises(self.fetcher.PermanentError) as e:
self.fetcher.fetch('http://foo',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
self.assertTrue(e.exception.response_code is None)
def test_http_error(self):
self.requests.get('http://foo', stream=True, timeout=60).AndReturn(self.response)
self.response.status_code = 404
self.listener.status(404)
self.response.close()
self.mox.ReplayAll()
with self.assertRaises(self.fetcher.PermanentError) as e:
self.fetcher.fetch('http://foo',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
self.assertEqual(404, e.exception.response_code)
def test_iter_content_error(self):
self.requests.get('http://foo', stream=True, timeout=60).AndReturn(self.response)
self.response.status_code = 200
self.response.headers = {}
self.listener.status(200, content_length=None)
self.response.iter_content(chunk_size=1024).AndRaise(requests.Timeout)
self.response.close()
self.mox.ReplayAll()
with self.assertRaises(self.fetcher.TransientError):
self.fetcher.fetch('http://foo',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
def expect_download(self, path_or_fd=None):
downloaded = ''
for chunk in self.expect_get('http://1', chunk_size_bytes=13, timeout_secs=13, listener=False):
downloaded += chunk
self.response.close()
self.mox.ReplayAll()
path = self.fetcher.download('http://1',
path_or_fd=path_or_fd,
chunk_size_bytes=13,
timeout_secs=13)
return downloaded, path
def test_download(self):
downloaded, path = self.expect_download()
try:
with open(path) as fp:
self.assertEqual(downloaded, fp.read())
finally:
os.unlink(path)
def test_download_fd(self):
with temporary_file() as fd:
downloaded, path = self.expect_download(path_or_fd=fd)
self.assertEqual(path, fd.name)
fd.close()
with open(path) as fp:
self.assertEqual(downloaded, fp.read())
def test_download_path(self):
with temporary_file() as fd:
fd.close()
downloaded, path = self.expect_download(path_or_fd=fd.name)
self.assertEqual(path, fd.name)
with open(path) as fp:
self.assertEqual(downloaded, fp.read())
| python |
import os
from typing import Any
from urllib.parse import parse_qs, urlencode, urlparse, urlunparse
import cssutils
import requests
url_re = r'https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b([-a-zA-Z0-9()@:%_\+.~#?&//=]*)'
def delete_duplicates(l: list) -> list:
new_l = []
for element in l:
if element not in new_l:
new_l.append(element)
return new_l
def parse_css(css: str) -> dict:
dct = {}
sheet = cssutils.parseString(css)
for rule in sheet:
selector = rule.selectorText
styles = rule.style.cssText
dct[selector] = styles
return dct
def delete_query(uri: str, query_name: str) -> str:
parsed_url = urlparse(uri)
url_query = parse_qs(parsed_url.query, keep_blank_values=True)
url_query.pop(query_name, None)
cleaned = urlunparse(parsed_url._replace(query=urlencode(url_query, True)))
return cleaned
def dump_html(uri: str) -> None:
with open('dumo.html', 'w', encoding='utf-8') as f:
f.write(requests.get(uri).text)
def get_env_var(var_name: str, default: Any = None, required: bool = False) -> Any:
value = os.environ.get(var_name, default=default)
if not value and required:
raise ValueError(
f'You must specify environment variable named {var_name}. '
'In Heroku go to App settings -> Config Vars -> Reveal Config Vars -> Add. '
f'In Bash type \"export {var_name}=your_value\".'
)
return value
| python |
# Generated by Django 2.2 on 2019-06-21 07:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0055_auto_20190620_1527'),
]
operations = [
migrations.AddField(
model_name='presentation',
name='is_keynote',
field=models.BooleanField(default=False, help_text='키노트 스피커인 경우 TRUE로 설정합니다.'),
),
]
| python |
# AUTOGENERATED FILE! PLEASE DON'T EDIT
from .callbacks import Callback, Callbacks, Cbs
import k1lib, os, torch
__all__ = ["Autosave", "DontTrainValid", "InspectLoss", "ModifyLoss", "Cpu", "Cuda",
"DType", "InspectBatch", "ModifyBatch", "InspectOutput", "ModifyOutput",
"Beep"]
@k1lib.patch(Cbs)
class Autosave(Callback):
"""Autosaves 3 versions of the network to disk"""
def __init__(self): super().__init__(); self.order = 23
def endRun(self):
os.system("mv autosave-1.pth autosave-0.pth")
os.system("mv autosave-2.pth autosave-1.pth")
self.l.save("autosave-2.pth")
@k1lib.patch(Cbs)
class DontTrainValid(Callback):
"""If is not training, then don't run m.backward() and opt.step().
The core training loop in k1lib.Learner don't specifically do this,
cause there may be some weird cases where you want to also train valid."""
def _common(self):
if not self.l.model.training: return True
def startBackward(self): return self._common()
def startStep(self): return self._common()
@k1lib.patch(Cbs)
class InspectLoss(Callback):
"""Expected `f` to take in 1 float."""
def __init__(self, f): super().__init__(); self.f = f; self.order = 15
def endLoss(self): self.f(self.loss.detach())
@k1lib.patch(Cbs)
class ModifyLoss(Callback):
"""Expected `f` to take in 1 float and return 1 float."""
def __init__(self, f): super().__init__(); self.f = f; self.order = 13
def endLoss(self): self.l.loss = self.f(self.loss)
@k1lib.patch(Cbs)
class Cuda(Callback):
"""Moves batch and model to the default GPU"""
def startRun(self): self.l.model.cuda()
def startBatch(self):
self.l.xb = self.l.xb.cuda()
self.l.yb = self.l.yb.cuda()
@k1lib.patch(Cbs)
class Cpu(Callback):
"""Moves batch and model to CPU"""
def startRun(self): self.l.model.cpu()
def startBatch(self):
self.l.xb = self.l.xb.cpu()
self.l.yb = self.l.yb.cpu()
@k1lib.patch(Cbs)
class DType(Callback):
"""Moves batch and model to a specified data type"""
def __init__(self, dtype): super().__init__(); self.dtype = dtype
def startRun(self): self.l.model = self.l.model.to(self.dtype)
def startBatch(self):
self.l.xb = self.l.xb.to(self.dtype)
self.l.yb = self.l.yb.to(self.dtype)
@k1lib.patch(Cbs)
class InspectBatch(Callback):
"""Expected `f` to take in 2 tensors."""
def __init__(self, f:callable): super().__init__(); self.f = f; self.order = 15
def startBatch(self): self.f(self.l.xb, self.l.yb)
@k1lib.patch(Cbs)
class ModifyBatch(Callback):
"""Modifies xb and yb on the fly. Expected `f`
to take in 2 tensors and return 2 tensors."""
def __init__(self, f): super().__init__(); self.f = f; self.order = 13
def startBatch(self): self.l.xb, self.l.yb = self.f(self.l.xb, self.l.yb)
@k1lib.patch(Cbs)
class InspectOutput(Callback):
"""Expected `f` to take in 1 tensor."""
def __init__(self, f): super().__init__(); self.f = f; self.order = 15
def endPass(self): self.f(self.y)
@k1lib.patch(Cbs)
class ModifyOutput(Callback):
"""Modifies output on the fly. Expected `f` to take
in 1 tensor and return 1 tensor"""
def __init__(self, f): super().__init__(); self.f = f; self.order = 13
def endPass(self): self.l.y = self.f(self.y)
@k1lib.patch(Cbs)
class Beep(Callback):
"""Plays a beep sound when the run is over"""
def endRun(self): k1lib.beep() | python |
import cowsay
print(cowsay.get_output_string('trex', 'Hello (extinct) World')) | python |
#!/usr/bin/env python
# coding=utf-8
#list:[]
bicycles = ['trek', 'cannodale', 'redline', 'speciakixrdd']
print(bicycles)
#下标正数: 0,1,2,... , n - 1; 到着数: -1, -2, ...., n
print(bicycles[0].title())
print(bicycles[-1])
motorcycles = ['honda', 'yamaha', 'suzyki']
print(motorcycles)
## 修改
motorcycles[0] = 'ducati'
print(motorcycles)
##末尾添加append(str)
motorcycles = ['honda', 'yamaha', 'suzuki']
print(motorcycles)
motorcycles.append('ducati')
print(motorcycles)
motorcycles = []
print(motorcycles)
motorcycles.append('honda')
motorcycles.append('yamaha')
motorcycles.append('suzuki')
print(motorcycles)
print("=============")
##在列表下标x处添加insert(n, str)
motorcycles = ['honda', 'yamaha', 'suzuhi']
print(motorcycles)
motorcycles.insert(0, 'ducati')
print(motorcycles)
print("====================")
## 删除del
motorcycles = ['honda', 'yamaha', 'suzuki']
print(motorcycles)
del motorcycles[0]
print(motorcycles)
##删除pop(x = n - 1),将末尾元素弹出,返回弹出的值
motorcycles = ['honda', 'yamaha', 'suzuki']
print(motorcycles)
pop_motorcycles = motorcycles.pop()
print(motorcycles)
print(pop_motorcycles)
motorcycles.pop(0)
print(motorcycles)
##不知道要删除的元素的下标时, 用remove(str), 删除第一个str
day = ['mon', 'tue', 'wed', 'thu', 'fri']
print(day)
day.remove('wed')
print(day)
##sort()
day = ['mon', 'tue', 'wed', 'thu', 'fri']
print(day)
day.sort()
print(day)
day.sort(reverse=True)
print(day)
##sorted(str), 返回排序后的列表, 但本列表顺序不变
num = [1, 4, 7, 2, 0, 5]
print(num)
num2 = sorted(num)
print(num2)
print(num)
print("\n")
##reverse(), 反转列表
print(day)
day.reverse()
print(day)
#len, 确定列表长度
l = len(day)
print(l)
| python |
import torch
class ModelPipeline:
def __init__(self, preprocessor, model, return_numpy=True):
self.preprocessor = preprocessor
self.model = model
self.return_numpy = return_numpy
def __call__(self, *args, **kwargs):
inputs = self.preprocessor(*args, **kwargs)
if isinstance(inputs, dict):
inputs = {key: torch.tensor(val) for key, val in inputs.items()}
else:
inputs = torch.tensor(inputs)
if isinstance(inputs, dict):
outputs = self.model(**inputs)
else:
outputs = self.model(inputs)
if self.return_numpy:
if isinstance(outputs, dict):
outputs = {key: val.detach().numpy() for key, val in outputs.items()}
else:
outputs = outputs.detach().numpy()
return outputs
| python |
#!/Users/rblount/.pyenv/versions/AdOfCode/bin/python
import sys
import os
import numpy as np
from TerminalColors import BRED, BGREEN, ENDCOLOR
from AOC import AOC
testing = False
days = 100
def parse_input(data_input: list):
array = np.genfromtxt(data_input, dtype=int, delimiter=1)
return array
def print_octupuses(array: np.array):
_, x_size = array.shape
for (_, x), val in np.ndenumerate(array):
if val >= 10:
print(f"{BRED}", end="")
elif val == 0:
print(f"{BGREEN}", end="")
print(f"{val:>4}{ENDCOLOR}", end="")
if x == x_size - 1:
print()
print()
def process_cycle(array: np.array):
y_size, x_size = array.shape
count = 0
array += 1
array_map = [(y, x) for y, x in np.argwhere(array > 9)]
checked_locations = list()
while len(array_map) > 0:
for point in array_map:
y, x = point
y_min = y - 1 if y > 0 else 0
y_max = y + 1 if y < y_size - 1 else y_size - 1
x_min = x - 1 if x > 0 else 0
x_max = x + 1 if x < x_size - 1 else x_size - 1
array[y_min:y_max + 1, x_min:x_max + 1] += 1
count = np.sum(array >= 10)
checked_locations += array_map
new_array_map = [(y, x) for y, x in np.argwhere(array > 9)]
array_map = list(set(new_array_map).difference(set(checked_locations)))
array = array * (array < 10)
return array, count
def part1(array: np.array):
count = 0
for cycle in range(1, days + 1):
array, flash_count = process_cycle(array)
count += flash_count
print(f"After {cycle} Days: Total Flashes: {count}")
def part2(array: np.array):
all_syncd = array.shape[0] * array.shape[1]
count = 0
cycle = 1
while count < all_syncd:
array, count = process_cycle(array)
cycle += 1
print(f"After {cycle} Days: Total Flashes: {count}")
def main():
# Get the path name and strip to the last 1 or 2 characters
codePath = os.path.dirname(sys.argv[0])
codeDate = int(codePath.split("/")[-1][3:])
codeYear = int(codePath.split("/")[-2])
print(f"Running Advent of Code for Year: {codeYear} - Day {codeDate}")
# global data
code_data = AOC(codeDate, codeYear, test=testing)
data_input = code_data.read_lines()
data_input = parse_input(data_input)
part1(data_input)
part2(data_input)
if __name__ == "__main__":
main()
| python |
from .realtime import interface, urlib
################################################################
## Simulated robot implementation
################################################################
class SimConnection:
"""Implements functionality to read simulated robot state (arm and F/T sensor) and command the robot in real-time."""
def __init__(self, env):
self.env = env
urlib.sim = env
def connect(self):
print('System ready.')
def disconnect(self):
pass
def execute(self, cmd, state):
"""Sends the command to control layer and reads back the state, emulating the wire protocol used with the real robot."""
state[:] = interface.execute_arm_command(cmd, 0)
self.env.update() # note that the sim update is called twice, once here and once by the hand's sim_connection
| python |
import sys
import time
from networktables import NetworkTables
import logging
logging.basicConfig(level=logging.DEBUG)
NetworkTables.initialize(server = "localhost")
sd = NetworkTables.getTable("/vision")
while True:
try:
x = sd.getNumberArray('centerX')
width = sd.getNumberArray('width')
try:
firstEdge = x[1] - (width[1]/2)
secondEdge = x[0] + (width[0]/2)
edgeDiff = secondEdge - firstEdge
location = firstEdge + (edgeDiff/2)
locationError = location - 200
except IndexError:
locationError = 0
if (locationError == 0):
neededDirection = "Straight"
elif (locationError > 5):
neededDirection = "Right"
elif (locationError < -5):
neededDirection = "Left"
elif (-5 <= locationError <= 5):
neededDirection = "Stop"
else:
neededDirection = "Unknown"
print(neededDirection)
except KeyError:
print('Waiting for Connection...')
time.sleep(1)
| python |
# Generated by Django 3.0.5 on 2020-09-02 22:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0020_auto_20200903_0339'),
]
operations = [
migrations.AlterField(
model_name='questiontable',
name='count1',
field=models.DecimalField(decimal_places=2, default=0, max_digits=5),
),
migrations.AlterField(
model_name='questiontable',
name='count2',
field=models.DecimalField(decimal_places=2, default=0, max_digits=5),
),
migrations.AlterField(
model_name='questiontable',
name='count3',
field=models.DecimalField(decimal_places=2, default=0, max_digits=5),
),
migrations.AlterField(
model_name='questiontable',
name='count4',
field=models.DecimalField(decimal_places=2, default=0, max_digits=5),
),
]
| python |
from django.apps import AppConfig
class HarvesterConfig(AppConfig):
default_auto_field = 'django.db.models.AutoField'
name = 'harvester'
| python |
#!/usr/bin/env python3.7
"""
The copyrights of this software are owned by Duke University.
Please refer to the LICENSE and README.md files for licensing instructions.
The source code can be found on the following GitHub repository: https://github.com/wmglab-duke/ascent
"""
import json
import os
from typing import Union
from .enums import TemplateMode
class TemplateOutput:
@staticmethod
def read(mode: TemplateMode) -> Union[list, dict]:
path = os.path.join(TemplateMode.path.value, mode.value)
with open(path, "r") as handle:
return json.load(handle)
@staticmethod
def write(data: Union[list, dict], dest_path):
with open(dest_path, "w") as handle:
handle.write(json.dumps(data, indent=2))
| python |
"""ssoadmin module initialization; sets value for base decorator."""
from .models import ssoadmin_backends
from ..core.models import base_decorator
mock_ssoadmin = base_decorator(ssoadmin_backends)
| python |
import unittest
from page.thread_page import Page
import time
class threadDemo(unittest.TestCase):
def __repr__(self):
return 'appdemo'
@classmethod
def setUpClass(cls):
cls.page = Page()
def test_a_thread(self):
time.sleep(6)
self.page.login_btn()
time.sleep(2)
self.page.account()
time.sleep(2)
self.page.password()
time.sleep(2)
self.page.login()
self.assertTrue(self.page.check(self.test_a_thread.__name__), 'msg')
# self.page.url()
# time.sleep(2)
# self.page.enter()
# self.assertTrue(self.page.check(self.test_a_thread.__name__), 'msg')
@classmethod
def tearDownClass(cls):
cls.page.quit()
| python |
import falcon
from falcon.testing import TestResource as ResourceMock
from tests import RestTestBase
from monitorrent.rest import no_auth, AuthMiddleware
def is_auth_enabled():
return False
class TestAuthMiddleware(RestTestBase):
def setUp(self, disable_auth=False):
super(TestAuthMiddleware, self).setUp(disable_auth)
def test_auth_success(self):
self.api.add_route(self.test_route, ResourceMock())
self.simulate_request(self.test_route, headers={'Cookie': self.get_cookie()})
self.assertEqual(falcon.HTTP_OK, self.srmock.status)
def test_no_auth_success(self):
self.api.add_route(self.test_route, no_auth(ResourceMock()))
self.simulate_request(self.test_route)
self.assertEqual(falcon.HTTP_OK, self.srmock.status)
def test_authenticate(self):
resp = falcon.Response()
AuthMiddleware.authenticate(resp)
self.assertIsNotNone(resp._cookies)
jwt = resp._cookies[AuthMiddleware.cookie_name]
self.assertEqual(jwt.key, AuthMiddleware.cookie_name)
self.assertEqual(jwt.value, self.auth_token_verified)
self.assertEqual(jwt['path'], '/')
def test_auth_failed_without_cookie(self):
self.api.add_route(self.test_route, ResourceMock())
self.simulate_request(self.test_route)
self.assertEqual(falcon.HTTP_UNAUTHORIZED, self.srmock.status)
def test_auth_failed_with_modified_cookie(self):
self.api.add_route(self.test_route, ResourceMock())
self.simulate_request(self.test_route, headers={'Cookie': self.get_cookie(True)})
self.assertEqual(falcon.HTTP_UNAUTHORIZED, self.srmock.status)
def test_auth_failed_with_random_cookie(self):
self.api.add_route(self.test_route, ResourceMock())
self.simulate_request(self.test_route, headers={'Cookie': 'jwt=random; HttpOnly; Path=/'})
self.assertEqual(falcon.HTTP_UNAUTHORIZED, self.srmock.status)
def test_disabled_auth(self):
self.api.add_route(self.test_route, ResourceMock())
AuthMiddleware.init('secret!', 'monitorrent', is_auth_enabled)
self.simulate_request(self.test_route, headers={'Cookie': 'jwt=random; HttpOnly; Path=/'})
self.assertEqual(falcon.HTTP_OK, self.srmock.status)
| python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import rospy
from std_msgs.msg import String, Bool
from burger_war_dev.msg import war_state
from actionlib_msgs.msg import GoalStatusArray
class StateControlBot():
def __init__(self):
self.pub = rospy.Publisher("main_state",String, queue_size=10)
self.sub = rospy.Subscriber("war_state_info", war_state, self.warStateCallback)
self.sub_navi_status = rospy.Subscriber('move_base/status', GoalStatusArray, self.navStateCallback)
self.sub_detectingEnemy = rospy.Subscriber('detect_enemy', Bool, self.detectEnemyCallback)
self.detecting_enemy = False
self.detected_time = None
self.state = "UNDEFINED"
self.navi_status = None
self.war_state = war_state()
def strategy(self):
self.publish_state("IDLING")
self.rate = rospy.Rate(1)
while not rospy.is_shutdown():
if self.state == "IDLING":
if self.war_state.state == "running":
self.publish_state("GO")
elif self.war_state.state == "stop":
if self.war_state.my_point < self.war_state.enemy_point:
self.publish_state("LOSE")
elif self.war_state.my_point > self.war_state.enemy_point:
self.publish_state("WIN")
else:
self.publish_state("EVEN")
elif self.state == "GO" and self.detecting_enemy:
self.publish_state("ESCAPE")
rospy.sleep(rospy.Duration(10))
self.publish_state("GO")
self.rate.sleep()
def navStateCallback(self, data):
if len(data.status_list) > 0:
status = data.status_list[0]
if status == self.navi_status:
return
self.navi_status = status
rospy.logdebug("Navi Status : {}".format(status))
def detectEnemyCallback(self,msg):
self.detecting_enemy = msg.data
def publish_state(self, state):
rospy.loginfo("STATE : {}".format(state))
self.state = state
msg = String(data=state)
self.pub.publish(msg)
def warStateCallback(self, msg):
self.war_state = msg
rospy.logdebug("msg.state {}".format(msg.state))
def main():
rospy.init_node('state_control')
bot = StateControlBot()
bot.strategy()
if __name__ == "__main__":
main() | python |
valor1 = 0
acumu1 = 0
valor2 = 10
acumu2 = 10
while valor <= 8:
print(acumulador, valor1)
else:
print('terminou o laço') | python |
from ._pyg_decoders import (
LogSoftmaxDecoderMaintainer,
SumPoolMLPDecoderMaintainer,
DiffPoolDecoderMaintainer,
DotProductLinkPredictionDecoderMaintainer
)
| python |
import random
def sort_by_length(words):
t = []
for word in words:
t.append((len(word), word))
t = t[::-1]
res = []
for length, word in t:
res.append(word)
return res
def sort_by_length_random(words):
"""Modify this example so that words with the same length appear in random order."""
t = []
for word in words:
t.append((len(word), word))
random.shuffle(t)
res = []
for length, word in t:
res.append(word)
return res
print(sort_by_length(["milan", "jovan", "maksa"]))
print(sort_by_length_random(["milan", "jovan", "maksa"])) | python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
# fix sys path so we don't need to setup PYTHONPATH
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
os.environ['DJANGO_SETTINGS_MODULE'] = 'userena.runtests.settings'
import django
if django.VERSION >= (1, 7, 0):
# starting from 1.7.0 we need to run setup() in order to populate
# app config
django.setup()
from django.conf import settings
from django.db.models import get_app
from django.test.utils import get_runner
def usage():
return """
Usage: python runtests.py [UnitTestClass].[method]
You can pass the Class name of the `UnitTestClass` you want to test.
Append a method name if you only want to test a specific method of that
class.
"""
def main():
TestRunner = get_runner(settings)
test_runner = TestRunner(verbosity=2, failfast=False)
if len(sys.argv) > 1:
test_modules = sys.argv[1:]
elif len(sys.argv) == 1:
test_modules = []
else:
print(usage())
sys.exit(1)
if django.VERSION >= (1, 6, 0):
# this is a compat hack because in django>=1.6.0 you must provide
# module like "userena.contrib.umessages" not "umessages"
test_modules = [
# be more strict by adding .tests to not run umessages tests twice
# if both userena and umessages are tested
get_app(module_name).__name__[:-7] + ".tests"
for module_name
in test_modules
]
if django.VERSION < (1, 7, 0):
# starting from 1.7.0 built in django migrations are run
# for older releases this patch is required to enable testing with
# migrations
from south.management.commands import patch_for_test_db_setup
patch_for_test_db_setup()
failures = test_runner.run_tests(test_modules or ['userena'])
sys.exit(failures)
if __name__ == '__main__':
main()
| python |
import pytest
from omniscient.utils.query_graph_utils import QueryGraphUtils
@pytest.fixture(scope="class")
def setup():
sparql = """
PREFIX ns: <http://rdf.freebase.com/ns/>
SELECT DISTINCT ?x
WHERE {
FILTER (?x != ?c)
FILTER (!isLiteral(?x) OR lang(?x) = '' OR langMatches(lang(?x), 'en'))
?c ns:location.country.administrative_divisions ns:m.010vz .
?c ns:government.governmental_jurisdiction.governing_officials ?y .
?y ns:government.government_position_held.office_holder ?x .
?y ns:government.government_position_held.basic_title ns:m.060c4 .
FILTER(NOT EXISTS {?y ns:government.government_position_held.from ?sk0} ||
EXISTS {?y ns:government.government_position_held.from ?sk1 .
FILTER(xsd:gYear(?sk1) <= \"1980\"^^xsd:gYear) })
FILTER(NOT EXISTS {?y ns:government.government_position_held.to ?sk2} ||
EXISTS {?y ns:government.government_position_held.to ?sk3 .
FILTER(xsd:gYear(?sk3) >= \"1980\"^^xsd:gYear) })}
"""
utils = QueryGraphUtils(use_tdb_query=True, kb_type="freebase", kb_index_path="/tuna1/indexes/d-freebase")
parse, graph = utils.sparql_to_graph(sparql=sparql, is_grounding=True)
return graph, utils
class TestQueryGraphUtils(object):
def test_sparql_to_graph(self, setup):
graph, utils = setup
assert len(graph.get_edges()) == 4
def test_query_graph_stage_generation(self, setup):
graph, utils = setup
query_graph_stages = utils.query_graph_stage_generation(
sentence="Who was the president in 1980 of the country that has Azad Kashmir?",
query_graph=graph)
assert len(query_graph_stages) == 4
for stage in query_graph_stages:
stage_testing_examples = stage.to_testing_example(utils=utils)
stage_training_examples = stage.to_training_example(utils=utils)
| python |
A = ['C', 'D', "E", "F", "G"]
B = [3, 0, 4, 1, 2]
def sort(A, B):
t = zip(A,B)
t = sorted(t, key=lambda x: x[1])
A, B = zip(*t)
return A
print sort(A,B) | python |
""""""
# Standard library modules.
import os
# Third party modules.
import pytest
import pyxray
# Local modules.
from pymontecarlo_penepma.importer import PenepmaImporter
# Globals and constants variables.
@pytest.fixture
def importer():
return PenepmaImporter()
@pytest.mark.asyncio
async def test_import(event_loop, importer, options, testdatadir):
dirpath = os.path.join(testdatadir, "sim1")
results = await importer.import_(options, dirpath)
assert len(results) == 2
result = results[0]
assert len(result) == 7 + 5
intensity = result[(29, "Ka1")]
assert intensity.n == pytest.approx(2.861705e-6, rel=1e-4)
assert intensity.s == pytest.approx(2.44e-6 / 3, rel=1e-4)
intensity = result[(29, "Ka")]
assert intensity.n == pytest.approx(2.861705e-6 + 1.040620e-6, rel=1e-4)
intensity = result[(29, "K")]
assert intensity.n == pytest.approx(
2.861705e-6 + 1.040620e-6 + 2.601550e-7, rel=1e-4
)
| python |
import os
import streamlit as st
import pandas as pd
import plotly.express as px
from PIL import Image
favicon = Image.open("media/favicon.ico")
st.set_page_config(
page_title = "AICS Results",
page_icon = favicon,
menu_items={
'Get Help': 'https://github.com/All-IISER-Cubing-Society/Results',
'Report a bug': "https://github.com/All-IISER-Cubing-Society/Results/issues",
'About': "AICS Results is a Streamlit app to visualize data of weekly event results. Contact Purva at AICS for any issues or help."
}
)
results = "results/"
@st.cache
def load_data():
# Get all files in the results directory
files = os.listdir("results")
frames = []
# Loop through all files and append dataframes to a list
for f in files:
df = pd.read_csv(os.path.join("results", f))
# Convert Date column to datetime field
df['Date'] = pd.to_datetime(df['Date'])
# Create an event column
event = f.rstrip(".csv")
df['Event'] = [event for i in range(len(df))]
# Append to list
frames.append(df)
# Create combined data frame
cdf = pd.concat(frames)
return cdf
@st.cache
def load_event_data(data, name, events):
frames = []
for event in events:
df = data[data['Event'] == event]
frames.append(df)
combined_data = pd.concat(frames)
return combined_data
cdf = load_data()
st.sidebar.title("AICS - Results")
category = st.sidebar.radio(
'Select Category',
('Individual Results', 'Best Results', 'Institute-wise Results', 'Institute Leaderboard'))
if category == 'Individual Results':
# Get list of names in sorted order
names = sorted(cdf['Name'].unique())
# Sidebar name selector
name = st.sidebar.selectbox('Name', names)
# Person specific data
df = cdf[cdf['Name'] == name]
institute = df['Institute'].iloc[0]
st.header(name)
st.subheader(institute)
# Get events
events = df['Event'].unique()
selected_events = st.multiselect('Events', events, '3x3')
if len(selected_events) > 0:
selected_events_df = load_event_data(df, name, selected_events)
st.write("The graph is interactive. Feel free to play around with it.")
if 'FMC' in selected_events and len(selected_events) > 1:
st.write("FMC results are in Moves, and others in seconds. It would be better to plot FMC as a single graph.")
fig = px.line(selected_events_df, x='Date', y='Result', color='Event', markers=True)
st.plotly_chart(fig, use_container_width=True)
else:
st.write("Please select some events.")
st.write("If on mobile, select name from sidebar on top left.")
st.header("Event Participation")
participation_df = df['Event'].value_counts().reset_index()
participation_df.columns = ['Event', 'Count']
participation_df = participation_df.sort_values('Count', ascending=False)
st.dataframe(participation_df)
elif category == 'Best Results':
events = sorted(cdf['Event'].unique())
event = st.sidebar.selectbox('Event', events)
df = cdf[cdf['Event'] == event]
# First sort by Result, then do a stable sort on Name
df = df.sort_values('Result').sort_values('Name', kind='stable')
# Drop duplicates, then sort by result again
df = df.loc[df['Name'] != df['Name'].shift()].sort_values('Result')
df['Result'] = df['Result'].astype(str)
df['Date'] = df['Date'].astype(str)
df = df.reset_index().drop(columns=['index', 'Event'])
st.header(event)
st.write(df)
elif category == "Institute-wise Results":
institutes = sorted(cdf['Institute'].unique())
institute = st.sidebar.selectbox('Institute', institutes)
idf = cdf[cdf['Institute'] == institute]
st.header("Institute-wise Results")
st.subheader(institute)
events = sorted(idf['Event'].unique())
event = st.selectbox('Eevnt', events)
df = idf[idf['Event'] == event]
# First sort by Result, then do a stable sort on Name
df = df.sort_values('Result').sort_values('Name', kind='stable')
# Drop duplicates, then sort by result again
df = df.loc[df['Name'] != df['Name'].shift()].sort_values('Result')
df['Result'] = df['Result'].astype(str)
df['Date'] = df['Date'].astype(str)
df = df.reset_index().drop(columns=['index', 'Event'])
st.write(df)
elif category == "Institute Leaderboard":
events = sorted(cdf['Event'].unique())
event = st.sidebar.selectbox('Event', events)
df = cdf[cdf['Event'] == event]
# First sort by Result, then do a stable sort on Institute
df = df.sort_values('Result').sort_values('Institute', kind='stable')
# Drop duplicates, then sort by result again
df = df.loc[df['Institute'] != df['Institute'].shift()].sort_values('Result')
df['Result'] = df['Result'].astype(str)
df['Date'] = df['Date'].astype(str)
df = df.reset_index().drop(columns=['index', 'Event'])
st.header("Institute Leaderboard")
st.subheader(event)
st.write(df)
image = Image.open("media/AICS-Logo-Dark.png")
st.sidebar.image(image)
st.sidebar.markdown("[Website](https://all-iiser-cubing-society.github.io/#/) | [Instagram](https://www.instagram.com/all.iiser.cubing.society/) | [YouTube](https://www.youtube.com/channel/UCXOIh4FS48Dwy3BC9_FhprA)")
| python |
"""
Create a movie
==============
This example shows how to create a movie, which is only possible if `ffmpeg` is
installed in a standard location.
"""
from pde import UnitGrid, ScalarField, DiffusionPDE, MemoryStorage, movie_scalar
grid = UnitGrid([16, 16]) # generate grid
state = ScalarField.random_uniform(grid, 0.2, 0.3) # generate initial condition
storage = MemoryStorage() # create storage
tracker = storage.tracker(interval=1) # create associated tracker
eq = DiffusionPDE() # define the physics
eq.solve(state, t_range=2, dt=0.005, tracker=tracker)
# create movie from stored data
movie_scalar(storage, '/tmp/diffusion.mov')
| python |
import warnings
warnings.simplefilter('ignore')
import pytest
import numpy as np
import keras
from hand_classifier.hand_cnn import HandCNN
@pytest.mark.parametrize("img_shape, target_shape", [((512, 512, 3), (224, 224, 3)), ((820, 430, 3), (96, 96, 3)), ((400, 800, 3), (114, 114, 3))])
def test_preprocessing(img_shape, target_shape):
# Test size and normalization
warnings.simplefilter('ignore')
input_img = np.random.random_sample(img_shape) * 255
preprocessed_img = HandCNN.preprocess_input(input_img, target_shape[0], target_shape[1])
assert (np.asarray(preprocessed_img) < -1).sum() == 0, "preprocessed image contains values below 1"
assert (np.asarray(preprocessed_img) > 1).sum() == 0, "preprocessed image contains values above 1"
assert preprocessed_img.shape == target_shape, "preprocessed image doesn't have target shape"
@pytest.mark.parametrize("n_classes", [3, 6])
def test_model(n_classes):
warnings.simplefilter('ignore')
inputs = np.zeros((1, 224, 224, 3), dtype=np.float32)
targets = np.zeros((1, n_classes), np.float32)
model = HandCNN.get_model(n_classes, 224, 224)
model.compile(
loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(lr=1e-5))
model.fit(inputs, targets, batch_size=1)
@pytest.mark.parametrize("img_path", ["tests/hand_classifier/testdataset/fist/closeup1_0.jpg",
"tests/hand_classifier/testdataset/spok/closeup1_0.jpg",
"tests/hand_classifier/testdataset/palm/closeup1_0.jpg"])
def test_predictions(img_path):
warnings.simplefilter('ignore')
hand_cnn = HandCNN()
hand_cnn.LABELS = ["fist", "palm", "pointer", "spok", "thumb_down", "thumb_up"]
hand_cnn.train("tests/hand_classifier/testdataset/", batch_size=1, epochs=1, learning_rate=0.01,
checkpoints_callback=False)
res = hand_cnn.predict_img_path(img_path)
assert len(res[0]) == len(hand_cnn.LABELS)
np.testing.assert_almost_equal(np.sum(res[0]), 1, 5)
| python |
from flask import Blueprint
main=Blueprint("main",__name__)
from .views import * | python |
# # scan_test.py
# # Author: Thomas MINIER - MIT License 2017-2018
# from query_engine.sage_engine import SageEngine
# from query_engine.iterators.scan import ScanIterator
# from query_engine.iterators.union import BagUnionIterator, RandomBagUnionIterator
# from database.hdt_file_connector import HDTFileConnector
#
# hdtDoc = HDTFileConnector('tests/data/test.hdt')
# engine = SageEngine()
# triple1 = {
# 'subject': 'http://example.org/s1',
# 'predicate': '?p',
# 'object': '?o'
# }
# triple2 = {
# 'subject': 'http://example.org/s2',
# 'predicate': '?p',
# 'object': '?o'
# }
#
#
# def test_bag_union_read():
# iterator1, card1 = hdtDoc.search(triple1['subject'], triple1['predicate'], triple1['object'])
# iterator2, card2 = hdtDoc.search(triple2['subject'], triple2['predicate'], triple2['object'])
# left = ScanIterator(iterator1, triple1, card1)
# right = ScanIterator(iterator2, triple2, card2)
# union = BagUnionIterator(left, right)
# (results, saved, done) = engine.execute(union, 10e7)
# assert len(results) == card1 + card2
# assert done
#
#
# def test_bag_union_interrupt():
# iterator1, card1 = hdtDoc.search(triple1['subject'], triple1['predicate'], triple1['object'])
# iterator2, card2 = hdtDoc.search(triple2['subject'], triple2['predicate'], triple2['object'])
# left = ScanIterator(iterator1, triple1, card1)
# right = ScanIterator(iterator2, triple2, card2)
# union = BagUnionIterator(left, right)
# (results, saved, done) = engine.execute(union, 10e-4)
# assert len(results) < card1 + card2
# assert not done
#
#
# def test_random_union_read():
# iterator1, card1 = hdtDoc.search(triple1['subject'], triple1['predicate'], triple1['object'])
# iterator2, card2 = hdtDoc.search(triple2['subject'], triple2['predicate'], triple2['object'])
# left = ScanIterator(iterator1, triple1, card1)
# right = ScanIterator(iterator2, triple2, card2)
# union = RandomBagUnionIterator(left, right)
# (results, saved, done) = engine.execute(union, 10e7)
# assert len(results) == card1 + card2
# assert done
#
#
# def test_random_union_interrupt():
# iterator1, card1 = hdtDoc.search(triple1['subject'], triple1['predicate'], triple1['object'])
# iterator2, card2 = hdtDoc.search(triple2['subject'], triple2['predicate'], triple2['object'])
# left = ScanIterator(iterator1, triple1, card1)
# right = ScanIterator(iterator2, triple2, card2)
# union = RandomBagUnionIterator(left, right)
# (results, saved, done) = engine.execute(union, 10e-4)
# assert len(results) < card1 + card2
# assert not done
| python |
# Copyright 2018 Sabino Miranda Jimenez
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup
import ConceptModelling
version = ConceptModelling.__version__
setup(
name="ConceptModelling",
description="""ConceptModelling""",
version=version,
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX :: Linux",
'Programming Language :: Python :: 3',
"Topic :: Scientific/Engineering :: Artificial Intelligence"],
url='https://github.com/ingeotec/ConceptModelling',
author="Sabino Miranda Jimenez",
author_email="[email protected]",
packages=['ConceptModelling', 'ConceptModelling/tests'],
include_package_data=True,
zip_safe=False,
# package_data={'ConceptModelling/conf': ['default_parameters.json'],
# 'ConceptModelling/tests': ['tweets.json']},
# install_requires=['B4MSA', 'EvoDAG'],
# entry_points={
# 'console_scripts': ['ConceptModelling-train=ConceptModelling.command_line:train',
# 'ConceptModelling-predict=ConceptModelling.command_line:predict',
# 'ConceptModelling-utils=ConceptModelling.command_line:utils',
# 'ConceptModelling-performance=ConceptModelling.command_line:performance']
# }
)
| python |
from ._helpers import export_data, ExportScope
from . import orders, nested_orders
| python |
RESNET = "resnet"
XCEPTION = "xception"
INCEPTIONV3 = "inceptionv3"
VGG16 = "vgg16"
IMAGENET = "imagenet"
CONFIG_FILE = "config.json"
MODEL_INFO_FILE = "model_info.json"
SCORING = "scoring"
RETRAINING = "retraining"
BEFORE_TRAIN = "before_train"
RETRAINED_SUFFIX="_retrained"
CUSTOM_TOP_SUFFIX = "_customtop"
RETRAINED = "retrained"
RETRAINED_PARAMS = "retrained_params"
TOP_PARAMS = "top_params"
NOTOP_SUFFIX = "_notop"
TENSORBOARD_LOGS = "tensorboard_logs"
LABEL = "__dku__image_label"
FILENAME = "__dku__image_filename"
MODEL_LABELS_FILE = "model_labels.csv"
PLUGIN_NAME = "dl-image-toolbox"
TENSORFLOW_VERSION_FOR_TENSORBOARD="tensorflow==1.13.1"
| python |
import datetime
from dateutil import tz
def identity(x):
'''return the input value'''
return x
def local_timestamp(ts):
'''return a dst aware `datetime` object from `ts`'''
return datetime.datetime.fromtimestamp(ts, tz.tzlocal())
def strftime(ts):
if ts is None:
return 'None'
if isinstance(ts, int):
ts = local_timestamp(ts)
return ts.strftime('%F %T %z')
| python |
"""
Copyright (c) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pathlib import Path
import pickle
from functools import partial
from collections import OrderedDict
import numpy as np
from ..base_evaluator import BaseEvaluator
from ..quantization_model_evaluator import create_dataset_attributes
from ...adapters import create_adapter
from ...config import ConfigError
from ...launcher import create_launcher
from ...utils import contains_all, contains_any, extract_image_representations, get_path
from ...progress_reporters import ProgressReporter
from ...logging import print_info
def generate_name(prefix, with_prefix, layer_name):
return prefix + layer_name if with_prefix else layer_name.split(prefix)[-1]
class SuperResolutionFeedbackEvaluator(BaseEvaluator):
def __init__(self, dataset_config, launcher, model):
self.dataset_config = dataset_config
self.preprocessing_executor = None
self.preprocessor = None
self.dataset = None
self.postprocessor = None
self.metric_executor = None
self.launcher = launcher
self.srmodel = model
self._metrics_results = []
@classmethod
def from_configs(cls, config, delayed_model_loading=False):
dataset_config = config['datasets']
launcher_config = config['launchers'][0]
if launcher_config['framework'] == 'dlsdk' and 'device' not in launcher_config:
launcher_config['device'] = 'CPU'
launcher = create_launcher(launcher_config, delayed_model_loading=True)
model = SRFModel(
config.get('network_info', {}), launcher, config.get('_models', []), config.get('_model_is_blob'),
delayed_model_loading
)
return cls(dataset_config, launcher, model)
def process_dataset(
self, subset=None,
num_images=None,
check_progress=False,
dataset_tag='',
output_callback=None,
allow_pairwise_subset=False,
dump_prediction_to_annotation=False,
calculate_metrics=True,
**kwargs):
if self.dataset is None or (dataset_tag and self.dataset.tag != dataset_tag):
self.select_dataset(dataset_tag)
self._annotations, self._predictions = [], []
self._create_subset(subset, num_images, allow_pairwise_subset)
metric_config = self.configure_intermediate_metrics_results(kwargs)
compute_intermediate_metric_res, metric_interval, ignore_results_formatting = metric_config
if 'progress_reporter' in kwargs:
_progress_reporter = kwargs['progress_reporter']
_progress_reporter.reset(self.dataset.size)
else:
_progress_reporter = None if not check_progress else self._create_progress_reporter(
check_progress, self.dataset.size
)
self.srmodel.init_feedback(self.dataset.data_reader)
for batch_id, (batch_input_ids, batch_annotation, batch_inputs, batch_identifiers) in enumerate(self.dataset):
self.srmodel.fill_feedback(batch_inputs)
batch_inputs = self.preprocessor.process(batch_inputs, batch_annotation)
batch_inputs_extr, _ = extract_image_representations(batch_inputs)
callback = None
if callback:
callback = partial(output_callback,
metrics_result=None,
element_identifiers=batch_identifiers,
dataset_indices=batch_input_ids)
batch_raw_prediction, batch_prediction = self.srmodel.predict(
batch_identifiers, batch_inputs_extr, callback=callback
)
annotation, prediction = self.postprocessor.process_batch(batch_annotation, batch_prediction)
self.srmodel.feedback(prediction)
metrics_result = None
if self.metric_executor and calculate_metrics:
metrics_result, _ = self.metric_executor.update_metrics_on_batch(
batch_input_ids, annotation, prediction
)
if self.metric_executor.need_store_predictions:
self._annotations.extend(annotation)
self._predictions.extend(prediction)
if output_callback:
output_callback(
batch_raw_prediction[0],
metrics_result=metrics_result,
element_identifiers=batch_identifiers,
dataset_indices=batch_input_ids
)
if _progress_reporter:
_progress_reporter.update(batch_id, len(prediction))
if compute_intermediate_metric_res and _progress_reporter.current % metric_interval == 0:
self.compute_metrics(
print_results=True, ignore_results_formatting=ignore_results_formatting
)
if _progress_reporter:
_progress_reporter.finish()
if self.srmodel.store_predictions:
self.srmodel.save_predictions()
def compute_metrics(self, print_results=True, ignore_results_formatting=False):
if self._metrics_results:
del self._metrics_results
self._metrics_results = []
for result_presenter, evaluated_metric in self.metric_executor.iterate_metrics(
self._annotations, self._predictions):
self._metrics_results.append(evaluated_metric)
if print_results:
result_presenter.write_result(evaluated_metric, ignore_results_formatting)
return self._metrics_results
def extract_metrics_results(self, print_results=True, ignore_results_formatting=False):
if not self._metrics_results:
self.compute_metrics(False, ignore_results_formatting)
result_presenters = self.metric_executor.get_metric_presenters()
extracted_results, extracted_meta = [], []
for presenter, metric_result in zip(result_presenters, self._metrics_results):
result, metadata = presenter.extract_result(metric_result)
if isinstance(result, list):
extracted_results.extend(result)
extracted_meta.extend(metadata)
else:
extracted_results.append(result)
extracted_meta.append(metadata)
if print_results:
presenter.write_result(metric_result, ignore_results_formatting)
return extracted_results, extracted_meta
def print_metrics_results(self, ignore_results_formatting=False):
if not self._metrics_results:
self.compute_metrics(True, ignore_results_formatting)
return
result_presenters = self.metric_executor.get_metric_presenters()
for presenter, metric_result in zip(result_presenters, self._metrics_results):
presenter.write_result(metric_result, ignore_results_formatting)
@property
def dataset_size(self):
return self.dataset.size
def release(self):
self.srmodel.release()
self.launcher.release()
def reset(self):
if self.metric_executor:
self.metric_executor.reset()
if hasattr(self, '_annotations'):
del self._annotations
del self._predictions
del self._input_ids
del self._metrics_results
self._annotations = []
self._predictions = []
self._input_ids = []
self._metrics_results = []
if self.dataset:
self.dataset.reset(self.postprocessor.has_processors)
@staticmethod
def get_processing_info(config):
module_specific_params = config.get('module_config')
model_name = config['name']
dataset_config = module_specific_params['datasets'][0]
launcher_config = module_specific_params['launchers'][0]
return (
model_name, launcher_config['framework'], launcher_config['device'], launcher_config.get('tags'),
dataset_config['name']
)
def _create_subset(self, subset=None, num_images=None, allow_pairwise=False):
if self.dataset.batch is None:
self.dataset.batch = 1
if subset is not None:
self.dataset.make_subset(ids=subset, accept_pairs=allow_pairwise)
elif num_images is not None:
self.dataset.make_subset(end=num_images, accept_pairs=allow_pairwise)
@staticmethod
def configure_intermediate_metrics_results(config):
compute_intermediate_metric_res = config.get('intermediate_metrics_results', False)
metric_interval, ignore_results_formatting = None, None
if compute_intermediate_metric_res:
metric_interval = config.get('metrics_interval', 1000)
ignore_results_formatting = config.get('ignore_results_formatting', False)
return compute_intermediate_metric_res, metric_interval, ignore_results_formatting
def load_network(self, network=None):
self.srmodel.load_network(network, self.launcher)
def load_network_from_ir(self, models_list):
self.srmodel.load_model(models_list, self.launcher)
def get_network(self):
return self.srmodel.get_network()
def get_metrics_attributes(self):
if not self.metric_executor:
return {}
return self.metric_executor.get_metrics_attributes()
def register_metric(self, metric_config):
if isinstance(metric_config, str):
self.metric_executor.register_metric({'type': metric_config})
elif isinstance(metric_config, dict):
self.metric_executor.register_metric(metric_config)
else:
raise ValueError('Unsupported metric configuration type {}'.format(type(metric_config)))
def register_postprocessor(self, postprocessing_config):
pass
def register_dumped_annotations(self):
pass
def select_dataset(self, dataset_tag):
if self.dataset is not None and isinstance(self.dataset_config, list):
return
dataset_attributes = create_dataset_attributes(self.dataset_config, dataset_tag)
self.dataset, self.metric_executor, self.preprocessor, self.postprocessor = dataset_attributes
@staticmethod
def _create_progress_reporter(check_progress, dataset_size):
pr_kwargs = {}
if isinstance(check_progress, int) and not isinstance(check_progress, bool):
pr_kwargs = {"print_interval": check_progress}
return ProgressReporter.provide('print', dataset_size, **pr_kwargs)
class BaseModel:
def __init__(self, network_info, launcher, delayed_model_loading=False):
self.network_info = network_info
self.launcher = launcher
def predict(self, identifiers, input_data):
raise NotImplementedError
def release(self):
pass
# pylint: disable=E0203
class BaseDLSDKModel:
def print_input_output_info(self):
print_info('{} - Input info:'.format(self.default_model_suffix))
has_info = hasattr(self.network if self.network is not None else self.exec_network, 'input_info')
if self.network:
if has_info:
network_inputs = OrderedDict(
[(name, data.input_data) for name, data in self.network.input_info.items()]
)
else:
network_inputs = self.network.inputs
network_outputs = self.network.outputs
else:
if has_info:
network_inputs = OrderedDict([
(name, data.input_data) for name, data in self.exec_network.input_info.items()
])
else:
network_inputs = self.exec_network.inputs
network_outputs = self.exec_network.outputs
for name, input_info in network_inputs.items():
print_info('\tLayer name: {}'.format(name))
print_info('\tprecision: {}'.format(input_info.precision))
print_info('\tshape {}\n'.format(input_info.shape))
print_info('{} - Output info'.format(self.default_model_suffix))
for name, output_info in network_outputs.items():
print_info('\tLayer name: {}'.format(name))
print_info('\tprecision: {}'.format(output_info.precision))
print_info('\tshape: {}\n'.format(output_info.shape))
def automatic_model_search(self, network_info):
model = Path(network_info['srmodel'])
if model.is_dir():
is_blob = network_info.get('_model_is_blob')
if is_blob:
model_list = list(model.glob('*{}.blob'.format(self.default_model_suffix)))
if not model_list:
model_list = list(model.glob('*.blob'))
else:
model_list = list(model.glob('*{}.xml'.format(self.default_model_suffix)))
blob_list = list(model.glob('*{}.blob'.format(self.default_model_suffix)))
if not model_list and not blob_list:
model_list = list(model.glob('*.xml'))
blob_list = list(model.glob('*.blob'))
if not model_list:
model_list = blob_list
if not model_list:
raise ConfigError('Suitable model for {} not found'.format(self.default_model_suffix))
if len(model_list) > 1:
raise ConfigError('Several suitable models for {} found'.format(self.default_model_suffix))
model = model_list[0]
print_info('{} - Found model: {}'.format(self.default_model_suffix, model))
if model.suffix == '.blob':
return model, None
weights = get_path(network_info.get('weights', model.parent / model.name.replace('xml', 'bin')))
print_info('{} - Found weights: {}'.format(self.default_model_suffix, weights))
return model, weights
def load_network(self, network, launcher):
self.network = network
self.exec_network = launcher.ie_core.load_network(network, launcher.device)
def update_inputs_outputs_info(self):
raise NotImplementedError
def load_model(self, network_info, launcher, log=False):
model, weights = self.automatic_model_search(network_info)
if weights is not None:
self.network = launcher.read_network(str(model), str(weights))
self.exec_network = launcher.ie_core.load_network(self.network, launcher.device)
else:
self.exec_network = launcher.ie_core.import_network(str(model))
self.update_inputs_outputs_info()
if log:
self.print_input_output_info()
def create_model(model_config, launcher, delayed_model_loading=False):
launcher_model_mapping = {
'dlsdk': ModelDLSDKModel,
'tf': ModelTFModel,
}
framework = launcher.config['framework']
if 'predictions' in model_config and not model_config.get('store_predictions', False):
framework = 'dummy'
model_class = launcher_model_mapping.get(framework)
if not model_class:
raise ValueError('model for framework {} is not supported'.format(framework))
return model_class(model_config, launcher, delayed_model_loading)
class SRFModel(BaseModel):
def __init__(self, network_info, launcher, models_args, is_blob, delayed_model_loading=False):
super().__init__(network_info, launcher)
if models_args and not delayed_model_loading:
model = network_info.get('srmodel', {})
if not contains_any(model, ['model', 'onnx_model']) and models_args:
model['srmodel'] = models_args[0]
model['_model_is_blob'] = is_blob
network_info.update({'sr_model': model})
if not contains_all(network_info, ['srmodel']) and not delayed_model_loading:
raise ConfigError('network_info should contain srmodel field')
self.srmodel = create_model(network_info['srmodel'], launcher, delayed_model_loading)
self.feedback = self.srmodel.feedback
self.init_feedback = self.srmodel.init_feedback
self.fill_feedback = self.srmodel.fill_feedback
self.store_predictions = network_info['srmodel'].get('store_predictions', False)
self._predictions = [] if self.store_predictions else None
self._part_by_name = {'srmodel': self.srmodel}
self._raw_outs = OrderedDict()
def predict(self, identifiers, input_data, callback=None):
predictions, raw_outputs = [], []
for data in input_data:
output, prediction = self.srmodel.predict(identifiers, data)
if self.store_predictions:
self._predictions.append(prediction)
raw_outputs.append(output)
predictions.append(prediction)
return raw_outputs, predictions
def reset(self):
self.processing_frames_buffer = []
if self._predictions is not None:
self._predictions = []
def release(self):
self.srmodel.release()
def save_predictions(self):
if self._predictions is not None:
prediction_file = Path(self.network_info['srmodel'].get('predictions', 'model_predictions.pickle'))
with prediction_file.open('wb') as file:
pickle.dump(self._predictions, file)
def load_network(self, network_list, launcher):
for network_dict in network_list:
self._part_by_name[network_dict['name']].load_network(network_dict['srmodel'], launcher)
self.update_inputs_outputs_info()
def load_model(self, network_list, launcher):
for network_dict in network_list:
self._part_by_name[network_dict['name']].load_model(network_dict, launcher)
self.update_inputs_outputs_info()
def _add_raw_predictions(self, prediction):
for key, output in prediction.items():
if key not in self._raw_outs:
self._raw_outs[key] = []
self._raw_outs[key].append(output)
def get_network(self):
return [{'name': 'srmodel', 'model': self.srmodel.network}]
class FeedbackMixin:
def configure_feedback(self):
self._idx_to_name = {}
self._name_to_idx = {}
self._feedback_name = self.network_info['feedback_input']
self._feedback_data = {self._feedback_name: None}
self._first_step = True
self._inputs = self.network_info['inputs']
self._feedback_inputs = {self._feedback_name: [t for t in self._inputs if t['name'] == self._feedback_name][0]}
for input_info in self._inputs:
idx = int(input_info['value'])
self._idx_to_name[idx] = input_info['name']
self._name_to_idx[input_info['name']] = idx
self._feedback_idx = self._name_to_idx[self._feedback_name]
def init_feedback(self, reader):
info = self._feedback_inputs[self._feedback_name]
self._feedback_data[self._feedback_name] = reader.read(info['initializer'])
def feedback(self, data):
data = data[0]
self._feedback_data[self._feedback_name] = data[0].value
def fill_feedback(self, data):
data[0].data[self._feedback_idx] = self._feedback_data[self._feedback_name]
return data
class ModelDLSDKModel(BaseModel, BaseDLSDKModel, FeedbackMixin):
default_model_suffix = 'srmodel'
def __init__(self, network_info, launcher, delayed_model_loading=False):
super().__init__(network_info, launcher)
self.input_blob, self.output_blob = None, None
self.with_prefix = None
if not delayed_model_loading:
self.load_model(network_info, launcher, log=True)
self.adapter = create_adapter(network_info.get('adapter', 'super_resolution'))
self.configure_feedback()
def predict(self, identifiers, input_data):
input_data = self.fit_to_input(input_data)
raw_result = self.exec_network.infer(input_data)
result = self.adapter.process([raw_result], identifiers, [{}])
return raw_result, result
def release(self):
del self.exec_network
del self.launcher
def fit_to_input(self, input_data):
has_info = hasattr(self.exec_network, 'input_info')
if has_info:
input_info = self.exec_network.input_info
else:
input_info = self.exec_network.inputs
fitted = {}
for name, info in input_info.items():
data = input_data[self._name_to_idx[name]]
data = np.expand_dims(data, axis=0)
data = np.transpose(data, [0, 3, 1, 2])
assert tuple(info.input_data.shape) == np.shape(data)
fitted[name] = data
return fitted
def update_inputs_outputs_info(self):
has_info = hasattr(self.exec_network, 'input_info')
input_info = self.exec_network.input_info if has_info else self.exec_network.inputs
input_blob = next(iter(input_info))
with_prefix = input_blob.startswith(self.default_model_suffix + '_')
if (with_prefix != self.with_prefix) and with_prefix:
self.network_info['feedback_input'] = '_'.join([self.default_model_suffix,
self.network_info['feedback_input']])
for inp in self.network_info['inputs']:
inp['name'] = '_'.join([self.default_model_suffix, inp['name']])
if 'blob' in inp.keys():
inp['blob'] = '_'.join([self.default_model_suffix, inp['blob']])
self.network_info['adapter']['target_out'] = '_'.join([self.default_model_suffix,
self.network_info['adapter']['target_out']])
self.with_prefix = with_prefix
class ModelTFModel(BaseModel, FeedbackMixin):
default_model_suffix = 'srmodel'
def __init__(self, network_info, launcher, *args, **kwargs):
super().__init__(network_info, launcher)
model = self.automatic_model_search(network_info)
self.inference_session = launcher.create_inference_session(str(model))
self.adapter = create_adapter(network_info.get('adapter', 'super_resolution'))
self.configure_feedback()
def predict(self, identifiers, input_data):
input_data = self.fit_to_input(input_data)
raw_result = self.inference_session.predict([input_data])
result = self.adapter.process(raw_result, identifiers, [{}])
return raw_result, result
def fit_to_input(self, input_data):
fitted = {}
for idx, data in enumerate(input_data):
name = self._idx_to_name[idx]
data = np.expand_dims(data, axis=0)
fitted[name] = data
return fitted
def release(self):
del self.inference_session
@staticmethod
def automatic_model_search(network_info):
model = Path(network_info['model'])
return model
| python |
"""
This module details user input api
"""
import time
from queue import Queue, Empty
from pubsub import pub
from fixate.config import RESOURCES
from collections import OrderedDict
USER_YES_NO = ("YES", "NO")
USER_RETRY_ABORT_FAIL = ("RETRY", "ABORT", "FAIL")
def _user_req_input(msg, target=None, attempts=5, **kwargs):
"""
A blocking function that waits for the user returned values
:param msg:
A message that will be shown to the user
:param target:
A function that will verify the user input
:param args:
Args for the target
:param kwargs:
Kwargs for the target
:return:
Returns the user response
"""
q = Queue()
pub.sendMessage("UI_block_start")
pub.sendMessage(
"UI_req_input", msg=msg, q=q, target=target, attempts=attempts, kwargs=kwargs
)
resp = q.get()
pub.sendMessage("UI_block_end")
return resp
def _user_req_choices(msg, choices, target=None, attempts=5):
"""
A blocking function that waits for the user returned values
:param msg:
A message that will be shown to the user
:param target:
A function that will verify the user input
:param args:
Args for the target
:param kwargs:
Kwargs for the target
:return:
Returns the user response
"""
if len(choices) < 2:
raise ValueError(
"Requires at least two choices to work, {} provided".format(choices)
)
q = Queue()
pub.sendMessage("UI_block_start")
pub.sendMessage(
"UI_req_choices",
msg=msg,
q=q,
choices=choices,
target=target,
attempts=attempts,
)
resp = q.get()
pub.sendMessage("UI_block_end")
return resp
def user_info(msg):
pub.sendMessage("UI_display", msg=msg)
def user_info_important(msg):
pub.sendMessage("UI_display_important", msg=msg)
def user_input(msg):
"""
Get information from the user
:param msg:
text string indicating the request to the user
:return:
user response
"""
# TODO - fix validation, bring it all into one method?? or move validation into target function for consistency
return _user_req_input(msg)
def _float_validate(entry):
try:
return float(entry)
except ValueError:
user_info("Please enter a number")
return False
def user_input_float(msg):
"""
Get information from the user
:param msg:
text string indicating the request to the user
:return:
user response if valid
"""
return _user_req_input(msg, target=_float_validate)
def user_action(msg, target):
"""
Prompts the user to complete an action.
Actively monitors the target infinitely until the event is detected or a user fail event occurs
:param msg:
Message to display to the user
:param target: A function that will be called until the user action is cancelled. The function
should return False if it hasn't completed. If the action is finished return True.
:return: True if target returns True to finish the loop, False if user
cancels vi the UserActionCallback
"""
class UserActionCallback:
def __init__(self):
# The UI implementation must provide queue.Queue object. We
# monitor that object. If it is non-empty, we get the message
# in the q and cancel the target call.
self.user_cancel_queue = None
# In the case that the target exists the user action instead
# of the user, we need to tell the UI to do any clean up that
# might be required. (e.g. return GUI buttons to the default state
# Does not need to be implemented by the UI.
# Function takes no args and should return None.
self.target_finished_callback = lambda: None
def set_user_cancel_queue(self, cancel_queue):
self.user_cancel_queue = cancel_queue
def set_target_finished_callback(self, callback):
self.target_finished_callback = callback
callback_obj = UserActionCallback()
pub.sendMessage("UI_action", msg=msg, callback_obj=callback_obj)
try:
while True:
try:
callback_obj.user_cancel_queue.get_nowait()
return False
except Empty:
pass
if target():
return True
# Yield control for other threads but don't slow down target
time.sleep(0)
finally:
# No matter what, if we exit, we want to reset the UI
callback_obj.target_finished_callback()
def user_ok(msg):
"""
Display the provided message and waits for the user to acknowledge
:param msg:
A message that will be shown to the user
"""
q = Queue()
pub.sendMessage("UI_block_start")
pub.sendMessage("UI_req", msg=msg, q=q)
resp = q.get()
pub.sendMessage("UI_block_end")
return resp
def user_image(path):
pub.sendMessage("UI_image", path=path)
def user_image_clear():
pub.sendMessage("UI_image_clear")
# TODO: This is used by the sequencer. Should make internal. Doesn't makes
# sense that a test script would call this.
def user_retry_abort_fail(msg):
return _user_req_choices(msg, target=_user_choices, choices=USER_RETRY_ABORT_FAIL)
def user_yes_no(msg, attempts=1):
return _user_req_choices(
msg, attempts=attempts, target=_user_choices, choices=USER_YES_NO
)
def _user_choices(response, choices):
if len(response) > 0:
for choice in choices:
if choice.startswith(response.upper()):
return choice
return False
def _ten_digit_serial(response):
return (len(response) == 10) and int(response)
def user_serial(msg, target=_ten_digit_serial, attempts=5):
serial = _user_req_input(msg, attempts=attempts, target=target)
return serial
def user_post_sequence_info_pass(msg):
"""
Adds information to be displayed to the user at the end of the sequence passes
This information will be displayed in the order that post sequence info calls are made and will remove duplicates
:param msg: String as it should be displayed
:return:
"""
if "_post_sequence_info" not in RESOURCES["SEQUENCER"].context_data:
RESOURCES["SEQUENCER"].context_data["_post_sequence_info"] = OrderedDict()
RESOURCES["SEQUENCER"].context_data["_post_sequence_info"][msg] = "PASSED"
def user_post_sequence_info_fail(msg):
"""
Adds information to be displayed to the user at the end of the sequence if the tests fail or error.
This information will be displayed in the order that post sequence info calls are made and will remove duplicates
:param msg: String as it should be displayed
:return:
"""
if "_post_sequence_info" not in RESOURCES["SEQUENCER"].context_data:
RESOURCES["SEQUENCER"].context_data["_post_sequence_info"] = OrderedDict()
RESOURCES["SEQUENCER"].context_data["_post_sequence_info"][msg] = "FAILED"
def user_post_sequence_info(msg):
"""
Adds information to be displayed to the user at the end of the sequence
This information will be displayed in the order that post sequence info calls are made and will remove duplicates
:param msg: String as it should be displayed
:return:
"""
if "_post_sequence_info" not in RESOURCES["SEQUENCER"].context_data:
RESOURCES["SEQUENCER"].context_data["_post_sequence_info"] = OrderedDict()
RESOURCES["SEQUENCER"].context_data["_post_sequence_info"][msg] = "ALL"
| python |
"""
NOTE: Здесь можно описывать и другие аспекты, которые идут параллельно основному использованию.
Если слишком длинно - можно и ссылками на офиц. доку
"""
def example_1():
pass
if __name__ == "__main__":
example_1() | python |
import os.path
from data.base_dataset import BaseDataset, get_transforms_reid, get_transforms_LR_reid, get_transforms_norm_reid
from data.image_folder import make_reid_dataset
from PIL import Image
from scipy.io import loadmat
import numpy as np
class SingleMarketDataset(BaseDataset):
@staticmethod
def modify_commandline_options(parser, is_train):
# parser.add_argument('--dataset_type', type=str, default='A', help='the A set')
Market_attr_class_num = [4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
parser.add_argument('--up_scale', type=int, default=4, help='up_scale of the image super-resolution')
parser.add_argument('--num_attr', type=int, default=27, help='the number of the attributes')
parser.add_argument('--resize_h', type=int, default=256, help='the size of the height should be resized')
parser.add_argument('--resize_w', type=int, default=128, help='the size of the width should be resized')
parser.add_argument('--num_classes', type=int, default=751, help='the total num of the id classes')
parser.add_argument('--attr_class_num', nargs='+', type=int, help='the number of classes of each attributes')
parser.set_defaults(attr_class_num=Market_attr_class_num)
return parser
def initialize(self, opt):
self.opt = opt
self.dataPath = '/home/share/jiening/dgd_datasets/raw'
# self.root = opt.dataroot # opt.dataroot = Market-1501-v15.09.15
if opt.dataroot == 'Market':
self.root = 'Market-1501-v15.09.15'
self.dataset_type = opt.dataset_type
# load the attributes from the formatted attributes file, total 27 attributes
self.attrFile = os.path.join(self.dataPath, self.root, 'Market_attributes.mat') # get the attributes mat file
self.total_attr = loadmat(self.attrFile)
self.train_attr = self.total_attr['train_attr'] # 751 * 27
self.test_attr = self.total_attr['test_attr'] # 750 * 27
# load the attributes index from the index file, total 27 attributes
self.attrIndexFile = os.path.join(self.dataPath, self.root, 'Market_index.mat')
self.total_attrIndex = loadmat(self.attrIndexFile)
self.train_attrIndex = self.total_attrIndex['train_index'][0] # 751
self.test_attrIndex = self.total_attrIndex['test_index'][0] # 750
# -----------------------------------------
# query (test B) LR
dir_query = os.path.join(self.dataPath, self.root, 'query') # images in the query
query_paths, query_labels = make_reid_dataset(dir_query)
query_num = len(query_paths) # 2228
print('total %d images in query' % query_num)
# -----------------------------------------
# gallery (test A) HR
dir_gallery = os.path.join(self.dataPath, self.root, 'bounding_box_test')
gallery_paths, gallery_labels = make_reid_dataset(dir_gallery)
gallery_num = len(gallery_paths) # 17661
print('total %d images in bounding_box_test' % gallery_num)
self.test_attr_map = {}
# the query_labels are included in the gallery_labels
for i, label in enumerate(self.test_attrIndex):
self.test_attr_map[label] = i
if self.dataset_type == 'A':
self.img_paths = gallery_paths
self.img_labels = gallery_labels
else:
self.img_paths = query_paths
self.img_labels = query_labels
self.img_attrs = []
for i in query_labels:
# obtain the according id
attr_id = self.test_attr_map[i]
self.img_attrs.append(self.test_attr[attr_id])
# A: high-resolution, B: low-resolution
self.transform = get_transforms_reid(opt)
self.transform_LR = get_transforms_LR_reid(opt)
self.transform_norm = get_transforms_norm_reid()
def __getitem__(self, index):
img_path = self.img_paths[index]
img = Image.open(img_path).convert('RGB')
# img = self.transform_A(img)
img_label = self.img_labels[index]
# A: high-resolution, B: low-resolution
if self.dataset_type == 'A':
# high-resolution image
img = self.transform(img)
GT_img = self.transform_LR(img) # ground-truth low-resolution image
img = self.transform_norm(img)
GT_img = self.transform_norm(GT_img)
# do not need the attributes, do not have the attributes
img_attr = img_label
else:
# low-resolution image
GT_img = self.transform(img) # ground-truth high-resolution image
img = self.transform_LR(GT_img)
GT_img = self.transform_norm(GT_img)
img = self.transform_norm(img)
img_attr = self.img_attrs[index]
if self.opt.direction == 'BtoA':
input_nc = self.opt.output_nc
else:
input_nc = self.opt.input_nc
if input_nc == 1: # RGB to gray
tmp = img[0, ...] * 0.299 + img[1, ...] * 0.587 + img[2, ...] * 0.114
img = tmp.unsqueeze(0)
return {'img': img, 'img_paths': img_path,
'GT_img': GT_img,
'img_attr': img_attr,
'img_label': img_label}
def __len__(self):
return len(self.img_paths)
def name(self):
return 'SingleMarketDataset'
| python |
from pm4pymdl.algo.mvp.utils import succint_mdl_to_exploded_mdl, clean_objtypes
import pandas as pd
def preprocess(df, parameters=None):
if parameters is None:
parameters = {}
conversion_needed = False
try:
if df.type == "succint":
conversion_needed = True
except:
pass
if len(df) == 0:
df = pd.DataFrame({"event_id": [], "event_activity": []})
if conversion_needed:
df = succint_mdl_to_exploded_mdl.apply(df)
#df = clean_objtypes.perfom_cleaning(df, parameters=parameters)
if len(df) == 0:
df = pd.DataFrame({"event_id": [], "event_activity": []})
return df
| python |
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import torchvision
import torch
import matplotlib.pyplot as plt
from pathlib import Path
import logging
import time
import pickle
from sklearn.model_selection import train_test_split
from torch.utils.data import TensorDataset, DataLoader
from torch.utils.data import Dataset
from mingpt.utils import set_seed, sample
from mingpt.model import GPT, GPTConfig
from mingpt.trainer import Trainer, TrainerConfig
import os
logging.basicConfig(
format='%(asctime)s|%(levelname)s|%(name)s|%(message)s',
datefmt='%Y-%d-%d %H:%M:%S',
level=logging.INFO,
)
set_seed(42) # make deterministic
GPT_S = dict(
embd_pdrop=0.0,
resid_pdrop=0.0,
attn_pdrop=0.0,
n_layer=24,
n_head=8,
n_embd=512,
)
def now_utc(): # unix time
seconds = round(time.time())
millis = seconds * 1000
unix = int(millis)
return unix
def load_pickle(f_path):
with open(f_path, 'rb') as fp:
return pickle.load(fp)
def get_train_test_split(X, y, test_size, random_state=42, verbose=False):
X_train, X_test, y_train, y_test = train_test_split(
X, y,
test_size=test_size,
random_state=random_state # reproducible results
)
if verbose:
logging.getLogger(__name__).info('train data: X ~ {}, y ~ {}'.format(X_train.shape, y_train.shape))
logging.getLogger(__name__).info('test data: X ~ {}, y ~ {}'.format(X_test.shape, y_test.shape))
return X_train, X_test, y_train, y_test
def get_data(file_path, max_imgs=2000):
dataset = load_pickle(Path(file_path).expanduser())
if len(dataset) == 2: # (images, masks)
X = dataset[0] # list of images
y = dataset[1] # list of corresponding mask
else: # unsupervised list of images
X = np.array(dataset, dtype='float32')[:max_imgs]
y = np.zeros(len(X))
pixel_size = X.shape[1] # should be == X.shape[2] == 32
X = np.array(np.ceil(X * 255), dtype='float32') # convert pixels to [0, 255] range
y = np.array(np.ceil(y * 255), dtype='float32')
X_train, X_test, y_train, y_test = get_train_test_split(X, y, 0.3, verbose=True)
tensor_X_train = torch.Tensor(X_train) # tensors
tensor_y_train = torch.Tensor(y_train)
tensor_X_test = torch.Tensor(X_test)
tensor_y_test = torch.Tensor(y_test)
t_train_dataset = TensorDataset(tensor_X_train, tensor_y_train)
t_test_dataset = TensorDataset(tensor_X_test, tensor_y_test)
return t_train_dataset, t_test_dataset, X_train
class ImageDataset(Dataset):
def __init__(self, pt_dataset, perm=None):
self.pt_dataset = pt_dataset
flattened_image_size = 32 * 32
self.perm = torch.arange(flattened_image_size) if perm is None else perm
self.vocab_size = 256 # possible values for pixels
self.block_size = flattened_image_size - 1
def __len__(self):
return len(self.pt_dataset)
def __getitem__(self, idx):
image_channels = 1 # grayscale
x, y = self.pt_dataset[idx]
x = torch.from_numpy(np.array(x)).view(-1, image_channels) # flatten out all pixels
x = x[self.perm].float() # reshuffle pixels with any fixed permutation and -> float
a = x[:, 0]
return a[:-1], a[1:] # always just predict the next one in the sequence
def get_model(mconf):
return GPT(mconf)
def train(model, n_epochs, train_dataset, test_dataset, checkpoint_path):
tokens_per_epoch = len(train_dataset) * train_dataset.block_size
# initialize a trainer instance and kick off training
tconf = TrainerConfig(
max_epochs=n_epochs,
batch_size=4,
learning_rate=3e-3,
betas=(0.9, 0.95),
weight_decay=0,
lr_decay=True,
warmup_tokens=tokens_per_epoch,
final_tokens=n_epochs * tokens_per_epoch,
ckpt_path=checkpoint_path,
num_workers=1
)
trainer = Trainer(model, train_dataset, test_dataset, tconf)
trainer.train()
return trainer
def model_first_token(dataset, X_train, n_clusters=256):
counts = torch.ones(n_clusters) # start counts as 1 not zero, this is called "smoothing"
rp = torch.randperm(len(dataset))
nest = X_train.shape[0] // 2 # how many images to use for the estimation
for i in range(nest):
a, _ = dataset[int(rp[i])]
t = a[0].item() # index of first token in the sequence
counts[int(t)] += 1
prob = counts / counts.sum() # normalize to have sum (prob) = 1
return prob
def sample_some(trainer, model, dataset, X_train, n_samples=40, out_path='./samples.png'):
prob = model_first_token(dataset, X_train)
start_pixel = np.random.choice(np.arange(dataset.vocab_size), size=(n_samples, 1), replace=True, p=prob.numpy())
start_pixel = torch.from_numpy(start_pixel).to(trainer.device)
flattened_image_size = 32 * 32
pixels = sample(model, start_pixel, flattened_image_size - 1, temperature=1.0, sample=True, top_k=40)
# for visualization we have to invert the permutation used to produce the pixels
iperm = torch.argsort(dataset.perm)
pixel_size = 32
n_cols = 8
n_rows = n_samples // n_cols
fig, axis = plt.subplots(n_rows, n_cols, figsize=(16, 8))
for i, ax in enumerate(axis.ravel()):
pxi = pixels[i][iperm] # undo the encoding permutation
pxi = pxi.view(pixel_size, pixel_size).cpu().numpy().astype(np.uint8) # grayscale -> 2D
ax.imshow(pxi, cmap='magma')
ax.axis('off')
plt.savefig(out_path)
def fine_tune(model):
pass
def do_it(data_path, n_embd, use_embd, folder_out):
os.makedirs(folder_out)
filename = './{}/log_{}.log'.format(folder_out, now_utc())
fileh = logging.FileHandler(filename, 'a')
log = logging.getLogger() # root logger
for hdlr in log.handlers[:]: # remove all old handlers
log.removeHandler(hdlr)
log.addHandler(fileh) # set the new handler
t_train_dataset, t_test_dataset, X_train = get_data(data_path) # raw data
train_dataset = ImageDataset(t_train_dataset) # build dataset
test_dataset = ImageDataset(t_test_dataset)
MY_GPT = dict(
n_layer=16,
n_embd=n_embd
)
MY_GPT = {**GPT_S, **MY_GPT} # inherit all other params
mconf = GPTConfig(
train_dataset.vocab_size,
train_dataset.block_size,
**MY_GPT,
bert=False,
use_embd=use_embd,
)
model = get_model(mconf)
checkpoint_path = './{}/latest_model.pt'.format(folder_out)
trainer = train(model, 10, train_dataset, test_dataset, checkpoint_path)
checkpoint = torch.load(checkpoint_path, map_location=torch.device('cuda:0')) # also on CPU
model.load_state_dict(checkpoint)
out_path='./{}/samples.png'.format(folder_out)
sample_some(trainer, model, train_dataset, X_train, out_path=out_path)
def do_them():
params = [
{
'data_path': './data/brain.pkl',
'n_embd': 256,
'use_embd': False,
'folder_out': './results/cremi/ll_256/',
}
]
for param in params:
do_it(**param)
if __name__ == "__main__":
do_them()
| python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
# Meta-info
Author: Nelson Brochado
Created: 08/10/2017
Updated: 02/04/2018
# Description
Unit tests for the functions in the ands.algorithms.numerical.barycentric
module.
"""
import unittest
from ands.algorithms.numerical.barycentric import barycentric, compute_weights
from tests.algorithms.numerical.polynomial_interpolation_tests import *
class TestBarycentric(unittest.TestCase, PolynomialInterpolationTests):
def __init__(self, method_name="__init__"):
unittest.TestCase.__init__(self, method_name)
PolynomialInterpolationTests.__init__(self, barycentric)
def test_when_weights_are_provided(self):
# n points, so polynomial would be of degree n - 1.
xs = [8, 16, 64]
n = len(xs)
# Given that we want to call barycentric multiple times with different y
# values and different points of evaluation of the polynomial, i.e.
# different x0's, then we pre-compute the weights and pass them to the
# function barycentric.
ws = compute_weights(xs)
# f and g are functions.
for h in [f, g]:
ys = [h(x) for x in xs] # Evaluate the function at all xs points.
for x0 in [-2, 2]:
y0 = barycentric(xs, ys, x0, ws)
bi0 = barycentric_interpolate(xs, ys, x0)
self.assertAlmostEqual(bi0, np.array(y0))
| python |
# -*- coding=utf-8 -*-
__all__ = [
'tiny_imagenet',
'imagewoof2',
'imagenette2'
]
import os
import torch
import torchvision
_default_batch_size = 32
_default_num_workers = 4
def _transform(train=True):
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
if train:
return torchvision.transforms.Compose([
torchvision.transforms.RandomResizedCrop(224),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean, std)
])
else:
return torchvision.transforms.Compose([
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean, std)
])
def tiny_imagenet(name='train',
batch_size=_default_batch_size,
num_workers=_default_num_workers):
dataset = torchvision.datasets.ImageFolder(
os.path.join('datasets', 'tiny-imagenet-200', name),
transform=_transform(name == 'train')
)
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
num_workers=num_workers,
drop_last=True,
shuffle=name == 'train')
return dataloader
def imagewoof2(name='train',
batch_size=_default_batch_size,
num_workers=_default_num_workers):
dataset = torchvision.datasets.ImageFolder(
os.path.join('datasets', 'imagewoof2', name),
transform=_transform(name == 'train')
)
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
num_workers=num_workers,
drop_last=True,
shuffle=name == 'train')
return dataloader
def imagenette2(name='train',
batch_size=_default_batch_size,
num_workers=_default_num_workers):
dataset = torchvision.datasets.ImageFolder(
os.path.join('datasets', 'imagenette2', name),
transform=_transform(name == 'train')
)
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
num_workers=num_workers,
drop_last=True,
shuffle=name == 'train')
return dataloader
| python |
#!/usr/bin/python
import util
TAG_LIST_1 = ['keyspace', 'shard', 'type']
TAG_LIST_2 = ['type']
TAG_LIST_3 = ['method', 'keyspace', 'shard', 'type']
TAG_LIST_4 = ['method', 'keyspace', 'type']
def process_data(json_data):
epoch_time = util.get_epoch_time()
util.create_metric(epoch_time, "vitess.healthcheckConnections", json_data['HealthcheckConnections']
, TAG_LIST_1)
util.create_metric(epoch_time, "vitess.healthcheckErrors", json_data['HealthcheckErrors']
, TAG_LIST_1)
util.create_metric(epoch_time, "vitess.vtgateApiErrorCounts", json_data['VtgateApiErrorCounts']
, TAG_LIST_4)
util.create_metric(epoch_time, "vitess.vtgateApiRowsReturned", json_data['VtgateApiRowsReturned']
, TAG_LIST_4)
util.create_metric(epoch_time, "vitess.vtgateInfoErrorCounts", json_data['VtgateInfoErrorCounts']
, TAG_LIST_2)
util.create_metric(epoch_time, "vitess.vtgateInternalErrorCounts"
, json_data['VtgateInternalErrorCounts'], TAG_LIST_2)
util.create_metric(epoch_time, "vitess.vttabletCallErrorCount", json_data['VttabletCallErrorCount']
, TAG_LIST_3)
util.publish_metric(epoch_time, "vitess.vtgateApi.totalCount", json_data['VtgateApi']['TotalCount']
, None)
util.create_metric_histogram(epoch_time, "vitess.vtgateApi.count", json_data['VtgateApi']
, TAG_LIST_4)
util.publish_metric(epoch_time, "vitess.vttabletCall.totalCount"
, json_data['VttabletCall']['TotalCount'], None)
util.create_metric_histogram(epoch_time, "vitess.vttabletCall.count", json_data['VttabletCall']
, TAG_LIST_3)
def main():
url = util.get_url()
json_data = util.get_json_data(url)
process_data(json_data)
if __name__ == '__main__':
main()
| python |
import requests
import os
API_URL = 'http://127.0.0.1:8000/api/devices/devicetype/1/'
API_KEY = os.environ['TESTAUTH']
headers = {'Authorization': f'Token {API_KEY}'}
r = requests.delete(API_URL, headers=headers)
print(r.status_code)
| python |
from django.test import TestCase
class AnalyzerTasksTestCase(TestCase):
@classmethod
def setUpTestData(cls):
pass
| python |
#!/usr/bin/env python
import sys,argparse
import numpy
import os
import time, datetime
import h5py
import scipy.misc
import configobj
def get_valid_stacks(f_names):
f_names_valid = []
for fn in f_names:
with h5py.File(fn,"r") as f:
if "mean" in f.keys():
f_names_valid.append(fn)
return f_names_valid
def get_dims(f_name):
with h5py.File(f_name,"r") as f:
s = numpy.shape(f["mean"])
list(s).pop(0)
return tuple(s)
def get_max_mask(f_names, ds_name, threshold):
d = []
for fn in f_names:
with h5py.File(fn, "r") as f:
d.append(numpy.array(f[ds_name]))
return (numpy.mean(d,axis=0) < threshold)
def get_min_mask(f_names, ds_name, threshold):
d = []
for fn in f_names:
with h5py.File(fn, "r") as f:
d.append(numpy.array(f[ds_name]))
return (numpy.mean(d,axis=0) > threshold)
def get_badpixelmask(f_name):
if f_name[-3:] == ".h5":
with h5py.File(f_name, "r"):
m = numpy.array(f["/data/data"])
elif f_name[-4:] == ".png":
m = scipy.misc.imread(f_name,flatten=True) / 255.
return m
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Hummingbird mask tool. Creates mask from stack files in current directory and given configuration file.')
parser.add_argument('config', type=str,
help="Configuration file")
parser.add_argument('-l', '--link', type=str, help="Creates symbolic link to the H5 mask from given path")
if(len(sys.argv) == 1):
parser.print_help()
args = parser.parse_args()
C = configobj.ConfigObj(args.config)
files = os.listdir(".")
files = [f for f in files if len(f) > 3]
files = [f for f in files if f[-3:] == ".h5"]
files = get_valid_stacks(files)
if len(files) == 0:
sys.exit(0)
s = get_dims(files[0])
mask = numpy.ones(shape=s, dtype="bool")
if C["mean_max"].lower() != 'none':
mask *= get_max_mask(files, "mean", float(C["mean_max"]))
if C["std_max"].lower() != 'none':
mask *= get_max_mask(files, "std", float(C["std_max"]))
if C["median_max"].lower() != 'none':
mask *= get_max_mask(files, "median", float(C["median_max"]))
if C["mean_min"].lower() != 'none':
mask *= get_min_mask(files, "mean", float(C["mean_min"]))
if C["std_min"].lower() != 'none':
mask *= get_min_mask(files, "std", float(C["std_min"]))
if C["median_min"].lower() != 'none':
mask *= get_min_mask(files, "median", float(C["median_min"]))
if C["badpixelmask"].lower() != 'none':
mask *= get_badpixelmask(C["badpixelmask"])
fn_root = files[-1].split("/")[-1][:-3]
outdir = C["outdir"]
os.system("mkdir -p %s" % outdir)
if bool(C["output_png"].lower()):
import matplotlib.pyplot as pypl
pypl.imsave("%s/mask_%s.png" % (outdir,fn_root), mask, cmap="binary_r", vmin=0, vmax=1)
with h5py.File("%s/mask_%s.h5" % (outdir,fn_root), "w") as f:
f["data/data"] = mask
os.system("cp %s %s/mask_%s.conf" % (args.config,outdir,fn_root))
if args.link:
os.system("ln -s -f %s/mask_%s.h5 %s" % (outdir, fn_root, args.link))
| python |
#coding=utf-8
# Copyright (c) 2018 Baidu, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import solr_tools
if sys.argv[1] == "add_engine":
solr_tools.add_engine(sys.argv[2], sys.argv[3], sys.argv[4],
shard=1, replica=1, maxshardpernode=5, conf='myconf')
elif sys.argv[1] == "delete_engine":
solr_tools.delete_engine(sys.argv[2], sys.argv[3], sys.argv[4])
elif sys.argv[1] == "upload_doc":
solr_tools.upload_documents(sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], num_thread=1)
elif sys.argv[1] == "clear_doc":
solr_tools.clear_documents(sys.argv[2], sys.argv[3], sys.argv[4])
| python |
import json
import requests
import code
class Demand():
def __init__(self, region='ap-southeast-1', instanceType='m4.large', operatingSystem='Linux'):
self.url = 'https://pricing.us-east-1.amazonaws.com/offers/v1.0/aws/AmazonEC2/current/{}/index.json'.format(region)
self.instanceType = instanceType
self.operatingSystem = operatingSystem
pass
def get_price(self):
response = requests.get(self.url)
offers = json.loads(response.text)
# code.interact(local=locals())
SKU = [sku for sku in offers['products'] if offers['products'][sku]['attributes'].get('instanceType') == self.instanceType and offers['products'][sku]['attributes'].get('operatingSystem') == self.operatingSystem][0]
SKU_TERM = [sku_term for sku_term in offers['terms']['OnDemand'][SKU] if offers['terms']['OnDemand'][SKU][sku_term]['sku'] == SKU][0]
priceDimensionKey = offers['terms']['OnDemand'][SKU][SKU_TERM]['priceDimensions'].keys()[0]
price = offers['terms']['OnDemand'][SKU][SKU_TERM]['priceDimensions'][priceDimensionKey]['pricePerUnit']['USD']
return price | python |
"""
Our exception hierarchy:
* HTTPError
x RequestError
+ TransportError
- TimeoutException
· ConnectTimeout
· ReadTimeout
· WriteTimeout
· PoolTimeout
- NetworkError
· ConnectError
· ReadError
· WriteError
· CloseError
- ProtocolError
· LocalProtocolError
· RemoteProtocolError
- ProxyError
- UnsupportedProtocol
+ DecodingError
+ TooManyRedirects
+ RequestBodyUnavailable
x HTTPStatusError
* NotRedirectResponse
* CookieConflict
* StreamError
x StreamConsumed
x ResponseNotRead
x RequestNotRead
x ResponseClosed
"""
import contextlib
import typing
import httpcore
if typing.TYPE_CHECKING:
from ._models import Request, Response # pragma: nocover
class HTTPError(Exception):
"""
Base class for `RequestError` and `HTTPStatusError`.
Useful for `try...except` blocks when issuing a request,
and then calling `.raise_for_status()`.
For example:
```
try:
response = httpx.get("https://www.example.com")
response.raise_for_status()
except httpx.HTTPError as exc:
print(f"HTTP Exception for {exc.request.url} - {exc.message}")
```
"""
def __init__(self, message: str, *, request: "Request") -> None:
super().__init__(message)
self.request = request
class RequestError(HTTPError):
"""
Base class for all exceptions that may occur when issuing a `.request()`.
"""
def __init__(self, message: str, *, request: "Request") -> None:
super().__init__(message, request=request)
class TransportError(RequestError):
"""
Base class for all exceptions that occur at the level of the Transport API.
All of these exceptions also have an equivelent mapping in `httpcore`.
"""
# Timeout exceptions...
class TimeoutException(TransportError):
"""
The base class for timeout errors.
An operation has timed out.
"""
class ConnectTimeout(TimeoutException):
"""
Timed out while connecting to the host.
"""
class ReadTimeout(TimeoutException):
"""
Timed out while receiving data from the host.
"""
class WriteTimeout(TimeoutException):
"""
Timed out while sending data to the host.
"""
class PoolTimeout(TimeoutException):
"""
Timed out waiting to acquire a connection from the pool.
"""
# Core networking exceptions...
class NetworkError(TransportError):
"""
The base class for network-related errors.
An error occurred while interacting with the network.
"""
class ReadError(NetworkError):
"""
Failed to receive data from the network.
"""
class WriteError(NetworkError):
"""
Failed to send data through the network.
"""
class ConnectError(NetworkError):
"""
Failed to establish a connection.
"""
class CloseError(NetworkError):
"""
Failed to close a connection.
"""
# Other transport exceptions...
class ProxyError(TransportError):
"""
An error occurred while establishing a proxy connection.
"""
class UnsupportedProtocol(TransportError):
"""
Attempted to make a request to an unsupported protocol.
For example issuing a request to `ftp://www.example.com`.
"""
class ProtocolError(TransportError):
"""
The protocol was violated.
"""
class LocalProtocolError(ProtocolError):
"""
A protocol was violated by the client.
For example if the user instantiated a `Request` instance explicitly,
failed to include the mandatory `Host:` header, and then issued it directly
using `client.send()`.
"""
class RemoteProtocolError(ProtocolError):
"""
The protocol was violated by the server.
For exaample, returning malformed HTTP.
"""
# Other request exceptions...
class DecodingError(RequestError):
"""
Decoding of the response failed, due to a malformed encoding.
"""
class TooManyRedirects(RequestError):
"""
Too many redirects.
"""
class RequestBodyUnavailable(RequestError):
"""
Had to send the request again, but the request body was streaming, and is
no longer available.
"""
# Client errors
class HTTPStatusError(HTTPError):
"""
The response had an error HTTP status of 4xx or 5xx.
May be raised when calling `response.raise_for_status()`
"""
def __init__(
self, message: str, *, request: "Request", response: "Response"
) -> None:
super().__init__(message, request=request)
self.response = response
class NotRedirectResponse(Exception):
"""
Response was not a redirect response.
May be raised if `response.next()` is called without first
properly checking `response.is_redirect`.
"""
def __init__(self, message: str) -> None:
super().__init__(message)
class CookieConflict(Exception):
"""
Attempted to lookup a cookie by name, but multiple cookies existed.
Can occur when calling `response.cookies.get(...)`.
"""
def __init__(self, message: str) -> None:
super().__init__(message)
# Stream exceptions...
# These may occur as the result of a programming error, by accessing
# the request/response stream in an invalid manner.
class StreamError(Exception):
"""
The base class for stream exceptions.
The developer made an error in accessing the request stream in
an invalid way.
"""
def __init__(self, message: str) -> None:
super().__init__(message)
class StreamConsumed(StreamError):
"""
Attempted to read or stream response content, but the content has already
been streamed.
"""
def __init__(self) -> None:
message = (
"Attempted to read or stream response content, but the content has "
"already been streamed."
)
super().__init__(message)
class ResponseNotRead(StreamError):
"""
Attempted to access response content, without having called `read()`
after a streaming response.
"""
def __init__(self) -> None:
message = (
"Attempted to access response content, without having called `read()` "
"after a streaming response."
)
super().__init__(message)
class RequestNotRead(StreamError):
"""
Attempted to access request content, without having called `read()`.
"""
def __init__(self) -> None:
message = "Attempted to access request content, without having called `read()`."
super().__init__(message)
class ResponseClosed(StreamError):
"""
Attempted to read or stream response content, but the request has been
closed.
"""
def __init__(self) -> None:
message = (
"Attempted to read or stream response content, but the request has "
"been closed."
)
super().__init__(message)
# The `InvalidURL` class is no longer required. It was being used to enforce only
# 'http'/'https' URLs being requested, but is now treated instead at the
# transport layer using `UnsupportedProtocol()`.`
# We are currently still exposing this class, but it will be removed in 1.0.
InvalidURL = UnsupportedProtocol
@contextlib.contextmanager
def map_exceptions(
mapping: typing.Mapping[typing.Type[Exception], typing.Type[Exception]],
**kwargs: typing.Any,
) -> typing.Iterator[None]:
try:
yield
except Exception as exc:
mapped_exc = None
for from_exc, to_exc in mapping.items():
if not isinstance(exc, from_exc):
continue
# We want to map to the most specific exception we can find.
# Eg if `exc` is an `httpcore.ReadTimeout`, we want to map to
# `httpx.ReadTimeout`, not just `httpx.TimeoutException`.
if mapped_exc is None or issubclass(to_exc, mapped_exc):
mapped_exc = to_exc
if mapped_exc is None:
raise
message = str(exc)
raise mapped_exc(message, **kwargs) from None # type: ignore
HTTPCORE_EXC_MAP = {
httpcore.TimeoutException: TimeoutException,
httpcore.ConnectTimeout: ConnectTimeout,
httpcore.ReadTimeout: ReadTimeout,
httpcore.WriteTimeout: WriteTimeout,
httpcore.PoolTimeout: PoolTimeout,
httpcore.NetworkError: NetworkError,
httpcore.ConnectError: ConnectError,
httpcore.ReadError: ReadError,
httpcore.WriteError: WriteError,
httpcore.CloseError: CloseError,
httpcore.ProxyError: ProxyError,
httpcore.UnsupportedProtocol: UnsupportedProtocol,
httpcore.ProtocolError: ProtocolError,
httpcore.LocalProtocolError: LocalProtocolError,
httpcore.RemoteProtocolError: RemoteProtocolError,
}
| python |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Juniper MS-MPC generator for capirca."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import logging
from capirca.lib import aclgenerator
from capirca.lib import juniper
from capirca.lib import nacaddr
import six
MAX_IDENTIFIER_LEN = 55 # It is really 63, but leaving room for added chars
class Term(juniper.Term):
"""Representation of an individual Juniper MS-MPC term.
The __str__ method must be implemented.
Args: term policy.Term object
"""
_PLATFORM = 'msmpc'
_DEFAULT_INDENT = 20
_ACTIONS = {'accept': 'accept', 'deny': 'discard', 'reject': 'reject'}
def __init__(self, term, term_type, noverbose, filter_name):
self.term = term
self.term_type = term_type
self.noverbose = noverbose
self.filter_name = filter_name
def __str__(self):
# Verify platform specific terms. Skip whole term if platform does not
# match.
if self.term.platform:
if self._PLATFORM not in self.term.platform:
return ''
if self.term.platform_exclude:
if self._PLATFORM in self.term.platform_exclude:
return ''
ret_str = juniper.Config(indent=self._DEFAULT_INDENT)
# COMMENTS
# this deals just fine with multi line comments, but we could probably
# output them a little cleaner; do things like make sure the
# len(output) < 80, etc. Note, if 'noverbose' is set for the filter, skip
# all comment processing.
if not self.noverbose:
if self.term.owner:
self.term.comment.append('Owner: %s' % self.term.owner)
if self.term.comment:
ret_str.Append('/*')
for comment in self.term.comment:
for line in comment.split('\n'):
ret_str.Append('** ' + line)
ret_str.Append('*/')
# Term verbatim output - this will skip over normal term creation
# code. Warning generated from policy.py if appropriate.
if self.term.verbatim:
for next_term in self.term.verbatim:
if next_term[0] == self._PLATFORM:
ret_str.Append(str(next_term[1]), verbatim=True)
return str(ret_str)
# Determine whether there are any match conditions for the term.
has_match_criteria = (
self.term.address or self.term.dscp_except or self.term.dscp_match or
self.term.destination_address or self.term.destination_port or
self.term.destination_prefix or self.term.destination_prefix_except or
self.term.encapsulate or self.term.ether_type or
self.term.flexible_match_range or self.term.forwarding_class or
self.term.forwarding_class_except or self.term.fragment_offset or
self.term.hop_limit or self.term.next_ip or self.term.port or
self.term.precedence or self.term.protocol or
self.term.protocol_except or self.term.source_address or
self.term.source_port or self.term.source_prefix or
self.term.source_prefix_except or self.term.traffic_type or
self.term.ttl)
suffixes = []
duplicate_term = False
if self.term_type == 'mixed':
if not (self.term.GetAddressOfVersion('source_address',
self.AF_MAP.get('inet6')) or
self.term.GetAddressOfVersion('source_address_exclude',
self.AF_MAP.get('inet6')) or
self.term.GetAddressOfVersion('destination_address',
self.AF_MAP.get('inet6')) or
self.term.GetAddressOfVersion('destination_address_exclude',
self.AF_MAP.get('inet6'))):
suffixes = ['inet']
elif not (self.term.GetAddressOfVersion('source_address',
self.AF_MAP.get('inet')) or
self.term.GetAddressOfVersion('source_address_exclude',
self.AF_MAP.get('inet')) or
self.term.GetAddressOfVersion('destination_address',
self.AF_MAP.get('inet')) or
self.term.GetAddressOfVersion('destination_address_exclude',
self.AF_MAP.get('inet'))):
suffixes = ['inet6']
else:
suffixes = ['inet', 'inet6']
duplicate_term = True
if not suffixes:
suffixes = [self.term_type]
for suffix in suffixes:
source_address = self.term.GetAddressOfVersion('source_address',
self.AF_MAP.get(suffix))
source_address_exclude = self.term.GetAddressOfVersion(
'source_address_exclude', self.AF_MAP.get(suffix))
source_address, source_address_exclude = self._MinimizePrefixes(
source_address, source_address_exclude)
destination_address = self.term.GetAddressOfVersion(
'destination_address', self.AF_MAP.get(suffix))
destination_address_exclude = self.term.GetAddressOfVersion(
'destination_address_exclude', self.AF_MAP.get(suffix))
destination_address, destination_address_exclude = self._MinimizePrefixes(
destination_address, destination_address_exclude)
if ((not source_address) and self.term.GetAddressOfVersion(
'source_address', self.AF_MAP.get('mixed')) and
not source_address_exclude) or (
(not destination_address) and self.term.GetAddressOfVersion(
'destination_address', self.AF_MAP.get('mixed')) and
not destination_address_exclude):
continue
if ('icmp' in self.term.protocol and
suffix == 'inet6') or ('icmpv6' in self.term.protocol and
suffix == 'inet'):
logging.debug(
self.NO_AF_LOG_PROTO.substitute(
term=self.term.name,
proto=', '.join(self.term.protocol),
af=suffix))
continue
# NAME
# if the term is inactive we have to set the prefix
if self.term.inactive:
term_prefix = 'inactive:'
else:
term_prefix = ''
ret_str.Append(
'%s term %s%s {' %
(term_prefix, self.term.name, '-' + suffix if duplicate_term else ''))
# We only need a "from {" clause if there are any conditions to match.
if has_match_criteria:
ret_str.Append('from {')
# SOURCE ADDRESS
if source_address or source_address_exclude:
ret_str.Append('source-address {')
if source_address:
for saddr in source_address:
for comment in self._Comment(saddr):
ret_str.Append('%s' % comment)
if saddr.version == 6 and 0 < saddr.prefixlen < 16:
for saddr2 in saddr.subnets(new_prefix=16):
ret_str.Append('%s;' % saddr2)
else:
if saddr == nacaddr.IPv6('0::0/0'):
saddr = 'any-ipv6'
elif saddr == nacaddr.IPv4('0.0.0.0/0'):
saddr = 'any-ipv4'
ret_str.Append('%s;' % saddr)
# SOURCE ADDRESS EXCLUDE
if source_address_exclude:
for ex in source_address_exclude:
for comment in self._Comment(ex):
ret_str.Append('%s' % comment)
if ex.version == 6 and 0 < ex.prefixlen < 16:
for ex2 in ex.subnets(new_prefix=16):
ret_str.Append('%s except;' % ex2)
else:
if ex == nacaddr.IPv6('0::0/0'):
ex = 'any-ipv6'
elif ex == nacaddr.IPv4('0.0.0.0/0'):
ex = 'any-ipv4'
ret_str.Append('%s except;' % ex)
ret_str.Append('}') # source-address {...}
# DESTINATION ADDRESS
if destination_address or destination_address_exclude:
ret_str.Append('destination-address {')
if destination_address:
for daddr in destination_address:
for comment in self._Comment(daddr):
ret_str.Append('%s' % comment)
if daddr.version == 6 and 0 < daddr.prefixlen < 16:
for daddr2 in daddr.subnets(new_prefix=16):
ret_str.Append('%s;' % daddr2)
else:
if daddr == nacaddr.IPv6('0::0/0'):
daddr = 'any-ipv6'
elif daddr == nacaddr.IPv4('0.0.0.0/0'):
daddr = 'any-ipv4'
ret_str.Append('%s;' % daddr)
# DESTINATION ADDRESS EXCLUDE
if destination_address_exclude:
for ex in destination_address_exclude:
for comment in self._Comment(ex):
ret_str.Append('%s' % comment)
if ex.version == 6 and 0 < ex.prefixlen < 16:
for ex2 in ex.subnets(new_prefix=16):
ret_str.Append('%s except;' % ex2)
else:
if ex == nacaddr.IPv6('0::0/0'):
ex = 'any-ipv6'
elif ex == nacaddr.IPv4('0.0.0.0/0'):
ex = 'any-ipv4'
ret_str.Append('%s except;' % ex)
ret_str.Append('}') # destination-address {...}
# source prefix <except> list
if self.term.source_prefix or self.term.source_prefix_except:
for pfx in self.term.source_prefix:
ret_str.Append('source-prefix-list ' + pfx + ';')
for epfx in self.term.source_prefix_except:
ret_str.Append('source-prefix-list ' + epfx + ' except;')
# destination prefix <except> list
if self.term.destination_prefix or self.term.destination_prefix_except:
for pfx in self.term.destination_prefix:
ret_str.Append('destination-prefix-list ' + pfx + ';')
for epfx in self.term.destination_prefix_except:
ret_str.Append('destination-prefix-list ' + epfx + ' except;')
# APPLICATION
if (self.term.source_port or self.term.destination_port or
self.term.icmp_type or self.term.protocol):
if hasattr(self.term, 'replacement_application_name'):
ret_str.Append('application-sets ' +
self.term.replacement_application_name + '-app;')
else:
ret_str.Append('application-sets ' +
self.filter_name[:((MAX_IDENTIFIER_LEN) // 2)] +
self.term.name[-((MAX_IDENTIFIER_LEN) // 2):] +
'-app;')
ret_str.Append('}') # from {...}
ret_str.Append('then {')
# ACTION
for action in self.term.action:
ret_str.Append(self._ACTIONS.get(str(action)) + ';')
if self.term.logging and 'disable' not in [
x.value for x in self.term.logging
]:
ret_str.Append('syslog;')
ret_str.Append('}') # then {...}
ret_str.Append('}') # term {...}
return str(ret_str)
class JuniperMSMPC(aclgenerator.ACLGenerator):
"""Juniper MSMPC rendering class.
This class takes a policy object and renders output into
a syntax which is understood ny Juniper routers with MS-MPC cards.
Args:
pol: policy.Policy object
"""
_PLATFORM = 'msmpc'
SUFFIX = '.msmpc'
_SUPPORTED_AF = frozenset(('inet', 'inet6', 'mixed'))
_AF_MAP = {'inet': 4, 'inet6': 6, 'mixed': None}
_AF_ICMP_MAP = {'icmp': 'inet', 'icmpv6': 'inet6'}
_SUPPORTED_DIRECTION = {
'': 'input-output',
'ingress': 'input',
'egress': 'output',
}
_OPTIONAL_SUPPORTED_KEYWORDS = frozenset([
'expiration',
])
def __init__(self, pol, exp_info):
self.applications = {}
super(JuniperMSMPC, self).__init__(pol, exp_info)
def _BuildTokens(self):
"""Build supported tokens for platform.
Returns:
tuple containing both supported tokens and sub tokens
"""
supported_tokens, supported_sub_tokens = super(JuniperMSMPC,
self)._BuildTokens()
supported_tokens |= {
'destination_prefix', 'destination_prefix_except', 'icmp_code',
'logging', 'owner', 'source_prefix', 'source_prefix_except'
}
supported_sub_tokens.update({
'option': {
'established',
# TODO(sneakywombat): add all options to lex.
'.*', # make ArbitraryOptions work, yolo.
'tcp-established',
'inactive'
}
})
return supported_tokens, supported_sub_tokens
def _BuildPort(self, ports):
"""Transform specified ports into list and ranges.
Args:
ports: a policy terms list of ports
Returns:
port_list: list of ports and port ranges
"""
port_list = []
for p in ports:
if p[0] == p[1]:
port_list.append(str(p[0]))
else:
port_list.append('%s-%s' % (str(p[0]), str(p[1])))
return port_list
def _GenerateApplications(self, filter_name):
target = []
apps_set_list = []
target.append('applications {')
done_apps = []
for app in sorted(self.applications[filter_name], key=lambda x: x['name']):
app_list = []
if app in done_apps:
continue
if app['protocol'] or app['sport'] or app['dport'] or app['icmp-type']:
# generate ICMP statements
if app['icmp-type']:
if app['timeout']:
timeout = app['timeout']
else:
timeout = 60
num_terms = len(app['protocol']) * len(app['icmp-type'])
apps_set_list.append('application-set ' + app['name'] + '-app {')
for i in range(num_terms):
apps_set_list.append('application ' + app['name'] + '-app%d' %
(i + 1) + ';')
apps_set_list.append('}') # application-set {...}
term_counter = 0
for i, code in enumerate(app['icmp-type']):
for proto in app['protocol']:
target.append('application ' + app['name'] + '-app%d' %
(term_counter + 1) + ' {')
if proto == 'icmp':
target.append('application-protocol %s;' % proto)
target.append('protocol %s;' % proto)
target.append('%s-type %s;' % (proto, str(code)))
if app['icmp-code']:
target.append('%s-code %s;' %
(proto, self._Group(app['icmp-code'])))
if int(timeout):
target.append('inactivity-timeout %s;' % int(timeout))
target.append('}') # application {...}
term_counter += 1
# generate non-ICMP statements
else:
i = 1
apps_set_list.append('application-set ' + app['name'] + '-app {')
for proto in app['protocol'] or ['']:
for sport in app['sport'] or ['']:
for dport in app['dport'] or ['']:
chunks = []
if proto:
# MSMPC does not like proto vrrp
if proto == 'vrrp':
proto = '112'
chunks.append('protocol %s;' % proto)
if sport and ('udp' in proto or 'tcp' in proto):
chunks.append('source-port %s;' % sport)
if dport and ('udp' in proto or 'tcp' in proto):
chunks.append('destination-port %s;' % dport)
if app['timeout']:
chunks.append(' inactivity-timeout %d;' % int(app['timeout']))
if chunks:
apps_set_list.append('application ' + app['name'] +
'-app%d;' % i)
app_list.append('application ' + app['name'] + '-app%d {' % i)
for chunk in chunks:
app_list.append(chunk)
app_list.append('}')
i += 1
apps_set_list.append('}')
done_apps.append(app)
if app_list:
for item in app_list:
target.append(item)
for item in apps_set_list:
target.append(item)
target.append('}')
# Return the output only if there is content inside of
# the "applications {\n}" lines, otherwise return nothing.
if len(target) > 2:
return target
else:
return []
def _TranslatePolicy(self, pol, exp_info):
current_date = datetime.date.today()
exp_info_date = current_date + datetime.timedelta(weeks=exp_info)
self.junipermsmpc_policies = []
for header, terms in pol.filters:
if self._PLATFORM not in header.platforms:
continue
filter_options = header.FilterOptions(self._PLATFORM)
filter_name = header.FilterName(self._PLATFORM)
filter_options.remove(filter_name)
filter_direction = None
filter_type = None
noverbose = 'noverbose' in filter_options
self.applications[filter_name] = []
if noverbose:
# noverbose is a strict boolean, remove it
# prior to iterating through the other options
# that require additional processing.
filter_options.remove('noverbose')
for filter_opt in filter_options:
# validate address families
if filter_opt in self._SUPPORTED_AF:
if not filter_type:
filter_type = filter_opt
continue
else:
raise ConflictingTargetOptionsError(
'only one address family can be '
'specified per header "%s"' % ' '.join(filter_options))
# validate direction
if filter_opt in self._SUPPORTED_DIRECTION.keys():
if not filter_direction:
filter_direction = self._SUPPORTED_DIRECTION.get(filter_opt)
continue
else:
raise ConflictingTargetOptionsError('only one direction can be '
'specified per header "%s"' %
' '.join(filter_options))
raise UnsupportedHeaderError(
'MSMPC Generator currently does not support '
'%s as a header option "%s"' %
(filter_opt, ' '.join(filter_options)))
if not filter_direction:
filter_direction = self._SUPPORTED_DIRECTION.get('')
if not filter_type:
filter_type = 'mixed'
term_names = set()
new_terms = []
for term in terms:
# Application sets need to be unique system-wide, so we construct
# a name from a combination of the filter and term names, shortening
# to the roughly half of the max identifier length for each part.
# When shortening, we take the start of the filter name and the end of
# the term name in a hope that we omit the most common bits
# like -inbound and accept-.
modified_term_name = filter_name[:(
(MAX_IDENTIFIER_LEN) // 2)] + term.name[-(
(MAX_IDENTIFIER_LEN) // 2):]
if term.stateless_reply:
logging.warning(
"WARNING: Term %s is a stateless reply term and will not be "
"rendered.", term.name)
continue
if set(['established', 'tcp-established']).intersection(term.option):
logging.debug(
'Skipping established term %s because MSMPC is stateful.',
term.name)
continue
# if inactive is set, deactivate the term and remove the option.
if 'inactive' in term.option:
term.inactive = True
term.option.remove('inactive')
if term.name in term_names:
raise JuniperMSMPCFilterError('Duplicate term name')
term_names.add(term.name)
if term.expiration:
if term.expiration <= exp_info_date:
logging.info(
'INFO: Term %s in policy %s expires '
'in less than two weeks.', term.name, filter_name)
if term.expiration <= current_date:
logging.warning(
'WARNING: Term %s in policy %s is expired and '
'will not be rendered.', term.name, filter_name)
continue
new_term = Term(term, filter_type, noverbose, filter_name)
new_terms.append(new_term)
# Because MSMPC terms can contain inet and inet6 addresses. We have to
# have ability to recover proper AF for ICMP type we need.
# If protocol is empty or we cannot map to inet or inet6 we insert bogus
# af_type name which will cause new_term.NormalizeIcmpTypes to fail.
if not term.protocol:
icmp_af_type = 'unknown_af_icmp'
else:
icmp_af_type = self._AF_ICMP_MAP.get(term.protocol[0],
'unknown_af_icmp')
tmp_icmptype = new_term.NormalizeIcmpTypes(term.icmp_type,
term.protocol, icmp_af_type)
# NormalizeIcmpTypes returns [''] for empty, convert to [] for eval
normalized_icmptype = tmp_icmptype if tmp_icmptype != [''] else []
# rewrites the protocol icmpv6 to icmp6
if 'icmpv6' in term.protocol:
protocol = list(term.protocol)
protocol[protocol.index('icmpv6')] = 'icmp6'
else:
protocol = term.protocol
# MSMPC requires tcp and udp to specify ports, rather than imply all
# ports
if 'udp' in term.protocol or 'tcp' in term.protocol:
if not term.source_port and not term.destination_port:
term.destination_port = [[1, 65535]]
new_application_set = {
'sport': self._BuildPort(term.source_port),
'dport': self._BuildPort(term.destination_port),
'protocol': protocol,
'icmp-type': normalized_icmptype,
'icmp-code': term.icmp_code,
'timeout': term.timeout
}
for application_set in self.applications[filter_name]:
if all(
item in list(application_set.items())
for item in new_application_set.items()):
new_application_set = ''
term.replacement_application_name = application_set['name']
break
if (modified_term_name == application_set['name'] and
new_application_set != application_set):
raise ConflictingApplicationSetsError(
'Application set %s has a conflicting entry' %
modified_term_name)
if new_application_set:
new_application_set['name'] = modified_term_name
self.applications[filter_name].append(new_application_set)
self.junipermsmpc_policies.append(
(header, filter_name, filter_direction, new_terms))
def _Group(self, group, lc=True):
"""If 1 item return it, else return [ item1 item2 ].
Args:
group: a list. could be a list of strings (protocols) or a list of tuples
(ports)
lc: return a lower cased result for text. Default is True.
Returns:
rval: a string surrounded by '[' and '];' if len(group) > 1
or with just ';' appended if len(group) == 1
"""
def _FormattedGroup(el, lc=True):
"""Return the actual formatting of an individual element.
Args:
el: either a string (protocol) or a tuple (ports)
lc: return lower cased result for text. Default is True.
Returns:
string: either the lower()'ed string or the ports, hyphenated
if they're a range, or by itself if it's not.
"""
if isinstance(el, str) or isinstance(el, six.text_type):
if not lc:
return el
else:
return el.lower()
elif isinstance(el, int):
return str(el)
# type is a tuple below here
elif el[0] == el[1]:
return '%d' % el[0]
else:
return '%d-%d' % (el[0], el[1])
if len(group) > 1:
rval = '[ ' + ' '.join([_FormattedGroup(x, lc=lc) for x in group]) + ' ];'
else:
rval = _FormattedGroup(group[0], lc=lc) + ';'
return rval
def __str__(self):
target = juniper.Config()
for (header, filter_name, filter_direction,
terms) in self.junipermsmpc_policies:
target.Append('groups {')
target.Append('replace:')
target.Append('/*')
# we want the acl to contain id and date tags, but p4 will expand
# the tags here when we submit the generator, so we have to trick
# p4 into not knowing these words. like taking c-a-n-d-y from a
# baby.
for line in aclgenerator.AddRepositoryTags('** '):
target.Append(line)
target.Append('**')
for comment in header.comment:
for line in comment.split('\n'):
target.Append('** ' + line)
target.Append('*/')
target.Append('%s {' % filter_name)
target.Append('services {')
target.Append('stateful-firewall {')
target.Append('rule %s {' % filter_name)
target.Append('match-direction %s;' % filter_direction)
for term in terms:
term_str = str(term)
if term_str:
target.Append(term_str, verbatim=True)
target.Append('}') # rule { ... }
target.Append('}') # stateful-firewall { ... }
target.Append('}') # services { ... }
for line in self._GenerateApplications(filter_name):
target.Append(line)
target.Append('}') # filter_name { ... }
target.Append('}') # groups { ... }
target.Append('apply-groups %s;' % filter_name)
return str(target) + '\n'
class Error(Exception):
pass
class JuniperMSMPCFilterError(Error):
pass
class ConflictingApplicationSetsError(Error):
pass
class ConflictingTargetOptionsError(Error):
pass
class UnsupportedHeaderError(Error):
pass
| python |
"""
GMail! Woo!
"""
__title__ = 'gmail'
__version__ = '0.1'
__author__ = 'Charlie Guo'
__build__ = 0x0001
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2013 Charlie Guo'
from gmail import Gmail
from mailbox import Mailbox
from message import Message
from utils import login, authenticate
| python |
import pytest
from pype import *
from timeseries import *
__author__ = "Mynti207"
__copyright__ = "Mynti207"
__license__ = "mit"
def test_lexer():
# sample data
data = '''
3 + 4 * 10
+ -20 *2
'''
# pass data to lexer and tokenize
lexer.input(data)
for tok in lexer:
assert isinstance(tok, ply.lex.LexToken)
# sample data
data = '''
# sample comment
x := 3 + 42 * (s - t)
'''
# pass data to lexer and tokenize
lexer.input(data)
for tok in lexer:
assert isinstance(tok, ply.lex.LexToken)
# sample data
data = '''
# sample comment
sample_string = "bla"
'''
# pass data to lexer and tokenize
lexer.input(data)
for tok in lexer:
assert isinstance(tok, ply.lex.LexToken)
| python |
# -*- coding:utf-8; -*-
class SolutionV1:
def letterCombinations(self, digits):
# 1. 定义一个集合存储最后的字符串
result = []
# 2. 然后定义一个递归函数,来生成符合条件的字符串
# 递归函数的参数如何定义:
# i 表示递归层数,虽然不知道i此时到底什么意思。
# digits表示要传递的数字字符,因为生成数字对应的字母字符串肯定是离不开这个参数的
def helper(i, digits, s):
# 3. 首先写递归模板
# 1)递归终止条件
# 5. 阅读题意,终止条件应该是:字符串的长度=len(digits),那么字符串肯定也是递归参数之一
if len(s) == len(digits):
# 4. 如果满足,就应该将字符串返回
result.append(s)
return
# 2) 处理当前层
# 6. 当前层处理逻辑是什么呢?应该是s+ (digit[i]上对应的字母),但是每个数字对应多个字母,所以有当前会有多种结果,这时候需要定义一个map,用于遍历数字对应的字母
digitAlpha = {
"2": ["a", "b", "c"],
"3": ["d", "e", "f"],
"4": ["g", "h", "i"],
"5": ["j", "k", "l"],
"6": ["m", "n", "o"],
"7": ["p", "q", "r", "s"],
"8": ["t", "u", "v"],
"9": ["w", "x", "y", "z"],
}
newS = []
for c in digitAlpha[digits[i]]:
newS.append(s + c)
# 3)递归处理下一层:对所有新生成的s调用递归函数,生成新长度的s
for s in newS:
helper(i + 1, digits, s)
# 4)清理当前层:当前层没有需要清理的
helper(0, digits, "")
return result
class Solution:
""" 从语言层面优化一下v1代码
"""
def letterCombinations(self, digits):
if not digits:
return []
digitAlpha = {
"2": ["a", "b", "c"],
"3": ["d", "e", "f"],
"4": ["g", "h", "i"],
"5": ["j", "k", "l"],
"6": ["m", "n", "o"],
"7": ["p", "q", "r", "s"],
"8": ["t", "u", "v"],
"9": ["w", "x", "y", "z"],
}
result = []
def helper(i, digits, s):
if len(s) == len(digits):
result.append(s)
return
for c in digitAlpha[digits[i]]:
helper(i + 1, digits, s + c)
helper(0, digits, "")
return result
| python |
##
## This file is part of the libsigrok project.
##
## Copyright (C) 2013 Martin Ling <[email protected]>
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
from functools import partial
from fractions import Fraction
from .lowlevel import *
from . import lowlevel
import itertools
__all__ = ['Error', 'Context', 'Driver', 'Device', 'Session', 'Packet', 'Log',
'LogLevel', 'PacketType', 'Quantity', 'Unit', 'QuantityFlag', 'ConfigKey',
'ProbeType', 'Probe', 'ProbeGroup']
class Error(Exception):
def __str__(self):
return sr_strerror(self.args[0])
def check(result):
if result != SR_OK:
raise Error(result)
def gvariant_to_python(value):
type_string = g_variant_get_type_string(value)
if type_string == 't':
return g_variant_get_uint64(value)
if type_string == 'b':
return g_variant_get_bool(value)
if type_string == 'd':
return g_variant_get_double(value)
if type_string == 's':
return g_variant_get_string(value, None)
if type_string == '(tt)':
return Fraction(
g_variant_get_uint64(g_variant_get_child_value(value, 0)),
g_variant_get_uint64(g_variant_get_child_value(value, 1)))
raise NotImplementedError(
"Can't convert GVariant type '%s' to a Python type." % type_string)
def python_to_gvariant(value):
if isinstance(value, int):
return g_variant_new_uint64(value)
if isinstance(value, bool):
return g_variant_new_boolean(value)
if isinstance(value, float):
return g_variant_new_double(value)
if isinstance(value, str):
return g_variant_new_string(value)
if isinstance(value, Fraction):
array = new_gvariant_ptr_array(2)
gvariant_ptr_array_setitem(array, 0,
g_variant_new_uint64(value.numerator))
gvariant_ptr_array_setitem(array, 1,
g_variant_new_uint64(value.denominator))
result = g_variant_new_tuple(array, 2)
delete_gvariant_ptr_array(array)
return result
raise NotImplementedError(
"Can't convert Python '%s' to a GVariant." % type(value))
def callback_wrapper(session, callback, device_ptr, packet_ptr):
device = session.context._devices[int(device_ptr.this)]
packet = Packet(session, packet_ptr)
callback(device, packet)
class Context(object):
def __init__(self):
context_ptr_ptr = new_sr_context_ptr_ptr()
check(sr_init(context_ptr_ptr))
self.struct = sr_context_ptr_ptr_value(context_ptr_ptr)
self._drivers = None
self._devices = {}
self.session = None
def __del__(self):
sr_exit(self.struct)
@property
def drivers(self):
if not self._drivers:
self._drivers = {}
driver_list = sr_driver_list()
for i in itertools.count():
driver_ptr = sr_dev_driver_ptr_array_getitem(driver_list, i)
if driver_ptr:
self._drivers[driver_ptr.name] = Driver(self, driver_ptr)
else:
break
return self._drivers
class Driver(object):
def __init__(self, context, struct):
self.context = context
self.struct = struct
self._initialized = False
@property
def name(self):
return self.struct.name
def scan(self, **kwargs):
if not self._initialized:
check(sr_driver_init(self.context.struct, self.struct))
self._initialized = True
options = []
for name, value in kwargs.items():
key = getattr(ConfigKey, name.upper())
src = sr_config()
src.key = key.id
src.data = python_to_gvariant(value)
options.append(src.this)
option_list = python_to_gslist(options)
device_list = sr_driver_scan(self.struct, option_list)
g_slist_free(option_list)
devices = [Device(self, gpointer_to_sr_dev_inst_ptr(ptr))
for ptr in gslist_to_python(device_list)]
g_slist_free(device_list)
return devices
class Device(object):
def __new__(cls, driver, struct):
address = int(struct.this)
if address not in driver.context._devices:
device = super(Device, cls).__new__(cls, driver, struct)
driver.context._devices[address] = device
return driver.context._devices[address]
def __init__(self, driver, struct):
self.driver = driver
self.struct = struct
self._probes = None
self._probe_groups = None
def __getattr__(self, name):
key = getattr(ConfigKey, name.upper())
data = new_gvariant_ptr_ptr()
try:
check(sr_config_get(self.driver.struct, self.struct, None,
key, data))
except Error as error:
if error.errno == SR_ERR_NA:
raise NotImplementedError(
"Device does not implement %s" % name)
else:
raise AttributeError
value = gvariant_ptr_ptr_value(data)
return gvariant_to_python(value)
def __setattr__(self, name, value):
try:
key = getattr(ConfigKey, name.upper())
except AttributeError:
super(Device, self).__setattr__(name, value)
return
check(sr_config_set(self.struct, None, key, python_to_gvariant(value)))
@property
def vendor(self):
return self.struct.vendor
@property
def model(self):
return self.struct.model
@property
def version(self):
return self.struct.version
@property
def probes(self):
if self._probes is None:
self._probes = {}
probe_list = self.struct.probes
while (probe_list):
probe_ptr = void_ptr_to_sr_probe_ptr(probe_list.data)
self._probes[probe_ptr.name] = Probe(self, probe_ptr)
probe_list = probe_list.next
return self._probes
@property
def probe_groups(self):
if self._probe_groups is None:
self._probe_groups = {}
probe_group_list = self.struct.probe_groups
while (probe_group_list):
probe_group_ptr = void_ptr_to_sr_probe_group_ptr(
probe_group_list.data)
self._probe_groups[probe_group_ptr.name] = ProbeGroup(self,
probe_group_ptr)
probe_group_list = probe_group_list.next
return self._probe_groups
class Probe(object):
def __init__(self, device, struct):
self.device = device
self.struct = struct
@property
def type(self):
return ProbeType(self.struct.type)
@property
def enabled(self):
return self.struct.enabled
@property
def name(self):
return self.struct.name
class ProbeGroup(object):
def __init__(self, device, struct):
self.device = device
self.struct = struct
self._probes = None
def __iter__(self):
return iter(self.probes)
def __getattr__(self, name):
key = config_key(name)
data = new_gvariant_ptr_ptr()
try:
check(sr_config_get(self.device.driver.struct, self.device.struct,
self.struct, key, data))
except Error as error:
if error.errno == SR_ERR_NA:
raise NotImplementedError(
"Probe group does not implement %s" % name)
else:
raise AttributeError
value = gvariant_ptr_ptr_value(data)
return gvariant_to_python(value)
def __setattr__(self, name, value):
try:
key = config_key(name)
except AttributeError:
super(ProbeGroup, self).__setattr__(name, value)
return
check(sr_config_set(self.device.struct, self.struct,
key, python_to_gvariant(value)))
@property
def name(self):
return self.struct.name
@property
def probes(self):
if self._probes is None:
self._probes = []
probe_list = self.struct.probes
while (probe_list):
probe_ptr = void_ptr_to_sr_probe_ptr(probe_list.data)
self._probes.append(Probe(self, probe_ptr))
probe_list = probe_list.next
return self._probes
class Session(object):
def __init__(self, context):
assert context.session is None
self.context = context
self.struct = sr_session_new()
context.session = self
def __del__(self):
check(sr_session_destroy())
def add_device(self, device):
check(sr_session_dev_add(device.struct))
def open_device(self, device):
check(sr_dev_open(device.struct))
def add_callback(self, callback):
wrapper = partial(callback_wrapper, self, callback)
check(sr_session_datafeed_python_callback_add(wrapper))
def start(self):
check(sr_session_start())
def run(self):
check(sr_session_run())
def stop(self):
check(sr_session_stop())
class Packet(object):
def __init__(self, session, struct):
self.session = session
self.struct = struct
self._payload = None
@property
def type(self):
return PacketType(self.struct.type)
@property
def payload(self):
if self._payload is None:
pointer = self.struct.payload
if self.type == PacketType.LOGIC:
self._payload = Logic(self,
void_ptr_to_sr_datafeed_logic_ptr(pointer))
elif self.type == PacketType.ANALOG:
self._payload = Analog(self,
void_ptr_to_sr_datafeed_analog_ptr(pointer))
else:
raise NotImplementedError(
"No Python mapping for packet type %s" % self.struct.type)
return self._payload
class Logic(object):
def __init__(self, packet, struct):
self.packet = packet
self.struct = struct
self._data = None
@property
def data(self):
if self._data is None:
self._data = cdata(self.struct.data, self.struct.length)
return self._data
class Analog(object):
def __init__(self, packet, struct):
self.packet = packet
self.struct = struct
self._data = None
@property
def num_samples(self):
return self.struct.num_samples
@property
def mq(self):
return Quantity(self.struct.mq)
@property
def unit(self):
return Unit(self.struct.unit)
@property
def mqflags(self):
return QuantityFlag.set_from_mask(self.struct.mqflags)
@property
def data(self):
if self._data is None:
self._data = float_array.frompointer(self.struct.data)
return self._data
class Log(object):
@property
def level(self):
return LogLevel(sr_log_loglevel_get())
@level.setter
def level(self, l):
check(sr_log_loglevel_set(l.id))
@property
def domain(self):
return sr_log_logdomain_get()
@domain.setter
def domain(self, d):
check(sr_log_logdomain_set(d))
class EnumValue(object):
_enum_values = {}
def __new__(cls, id):
if cls not in cls._enum_values:
cls._enum_values[cls] = {}
if id not in cls._enum_values[cls]:
value = super(EnumValue, cls).__new__(cls)
value.id = id
cls._enum_values[cls][id] = value
return cls._enum_values[cls][id]
class LogLevel(EnumValue):
pass
class PacketType(EnumValue):
pass
class Quantity(EnumValue):
pass
class Unit(EnumValue):
pass
class QuantityFlag(EnumValue):
@classmethod
def set_from_mask(cls, mask):
result = set()
while mask:
new_mask = mask & (mask - 1)
result.add(cls(mask ^ new_mask))
mask = new_mask
return result
class ConfigKey(EnumValue):
pass
class ProbeType(EnumValue):
pass
for symbol_name in dir(lowlevel):
for prefix, cls in [
('SR_LOG_', LogLevel),
('SR_DF_', PacketType),
('SR_MQ_', Quantity),
('SR_UNIT_', Unit),
('SR_MQFLAG_', QuantityFlag),
('SR_CONF_', ConfigKey),
('SR_PROBE_', ProbeType)]:
if symbol_name.startswith(prefix):
name = symbol_name[len(prefix):]
value = getattr(lowlevel, symbol_name)
setattr(cls, name, cls(value))
| python |
import os
import shutil
import audeer
import audformat
import audiofile as af
import pandas as pd
src_dir = 'src'
build_dir = audeer.mkdir('build')
# Prepare functions for getting information from file names
def parse_names(names, from_i, to_i, is_number=False, mapping=None):
for name in names:
key = name[from_i:to_i]
if is_number:
key = int(key)
yield mapping[key] if mapping else key
# Gather metadata
description = (
'Berlin Database of Emotional Speech. '
'A German database of emotional utterances '
'spoken by actors '
'recorded as a part of the DFG funded research project '
'SE462/3-1 in 1997 and 1999. '
'Recordings took place in the anechoic chamber '
'of the Technical University Berlin, '
'department of Technical Acoustics. '
'It contains about 500 utterances '
'from ten different actors '
'expressing basic six emotions and neutral.'
)
files = sorted(
[os.path.join('wav', f) for f in os.listdir(os.path.join(src_dir, 'wav'))]
)
names = [audeer.basename_wo_ext(f) for f in files]
emotion_mapping = {
'W': 'anger',
'L': 'boredom',
'E': 'disgust',
'A': 'fear',
'F': 'happiness',
'T': 'sadness',
'N': 'neutral',
}
emotions = list(parse_names(names, from_i=5, to_i=6, mapping=emotion_mapping))
y = pd.read_csv(
os.path.join(src_dir, 'erkennung.txt'),
usecols=['Satz', 'erkannt'],
index_col='Satz',
delim_whitespace=True,
encoding='Latin-1',
decimal=',',
converters={'Satz': lambda x: os.path.join('wav', x)},
squeeze=True,
)
y = y.loc[files]
y = y.replace(to_replace=u'\xa0', value='', regex=True)
y = y.replace(to_replace=',', value='.', regex=True)
confidences = y.astype('float').values
male = audformat.define.Gender.MALE
female = audformat.define.Gender.FEMALE
language = audformat.utils.map_language('de')
speaker_mapping = {
3: {'gender': male, 'age': 31, 'language': language},
8: {'gender': female, 'age': 34, 'language': language},
9: {'gender': male, 'age': 21, 'language': language},
10: {'gender': female, 'age': 32, 'language': language},
11: {'gender': male, 'age': 26, 'language': language},
12: {'gender': female, 'age': 30, 'language': language},
13: {'gender': male, 'age': 32, 'language': language},
14: {'gender': female, 'age': 35, 'language': language},
15: {'gender': male, 'age': 25, 'language': language},
16: {'gender': female, 'age': 31, 'language': language},
}
speakers = list(parse_names(names, from_i=0, to_i=2, is_number=True))
transcription_mapping = {
'a01': 'Der Lappen liegt auf dem Eisschrank.',
'a02': 'Das will sie am Mittwoch abgeben.',
'a04': 'Heute abend könnte ich es ihm sagen.',
'a05': 'Das schwarze Stück Papier befindet sich da oben neben dem '
'Holzstück.',
'a07': 'In sieben Stunden wird es soweit sein.',
'b01': 'Was sind denn das für Tüten, die da unter dem Tisch '
'stehen.',
'b02': 'Sie haben es gerade hochgetragen und jetzt gehen sie '
'wieder runter.',
'b03': 'An den Wochenenden bin ich jetzt immer nach Hause '
'gefahren und habe Agnes besucht.',
'b09': 'Ich will das eben wegbringen und dann mit Karl was '
'trinken gehen.',
'b10': 'Die wird auf dem Platz sein, wo wir sie immer hinlegen.',
}
transcriptions = list(parse_names(names, from_i=2, to_i=5))
durations = audeer.run_tasks(
task_func=lambda x: pd.to_timedelta(
af.duration(os.path.join(src_dir, x)),
unit='s',
),
params=[([f], {}) for f in files],
num_workers=12,
)
# Convert to audformat
db = audformat.Database(
name='emodb',
author=(
'Felix Burkhardt, '
'Astrid Paeschke, '
'Miriam Rolfes, '
'Walter Sendlmeier, '
'Benjamin Weiss'
),
organization='audEERING',
license=audformat.define.License.CC0_1_0,
source='http://emodb.bilderbar.info/download/download.zip',
usage=audformat.define.Usage.UNRESTRICTED,
languages=[language],
description=description,
meta={
'pdf': (
'http://citeseerx.ist.psu.edu/viewdoc/'
'download?doi=10.1.1.130.8506&rep=rep1&type=pdf'
),
},
)
# Media
db.media['microphone'] = audformat.Media(
format='wav',
sampling_rate=16000,
channels=1,
)
# Raters
db.raters['gold'] = audformat.Rater()
# Schemes
db.schemes['emotion'] = audformat.Scheme(
labels=[str(x) for x in emotion_mapping.values()],
description='Six basic emotions and neutral.',
)
db.schemes['confidence'] = audformat.Scheme(
audformat.define.DataType.FLOAT,
minimum=0,
maximum=1,
description='Confidence of emotion ratings.',
)
db.schemes['speaker'] = audformat.Scheme(
labels=speaker_mapping,
description=(
'The actors could produce each sentence as often as '
'they liked and were asked to remember a real '
'situation from their past when they had felt this '
'emotion.'
),
)
db.schemes['transcription'] = audformat.Scheme(
labels=transcription_mapping,
description='Sentence produced by actor.',
)
db.schemes['duration'] = audformat.Scheme(dtype=audformat.define.DataType.TIME)
# Tables
index = audformat.filewise_index(files)
db['files'] = audformat.Table(index)
db['files']['duration'] = audformat.Column(scheme_id='duration')
db['files']['duration'].set(durations, index=index)
db['files']['speaker'] = audformat.Column(scheme_id='speaker')
db['files']['speaker'].set(speakers)
db['files']['transcription'] = audformat.Column(scheme_id='transcription')
db['files']['transcription'].set(transcriptions)
db['emotion'] = audformat.Table(index)
db['emotion']['emotion'] = audformat.Column(
scheme_id='emotion',
rater_id='gold',
)
db['emotion']['emotion'].set(emotions)
db['emotion']['emotion.confidence'] = audformat.Column(
scheme_id='confidence',
rater_id='gold',
)
db['emotion']['emotion.confidence'].set(confidences / 100.0)
# Save database to build folder
shutil.copytree(
os.path.join(src_dir, 'wav'),
os.path.join(build_dir, 'wav'),
)
db.save(build_dir)
| python |
X_raw_0 = [
[0, 0.1, 0, 0, 0],
[0.1, 0, 0.1, 0, 0.1],
[0, 0.1, 0, 0.1, 0],
[0, 0, 0.1, 0, 0],
[0, 0.1, 0, 0, 0]
]
node_info_0 = [
{
'min_load': 0,
'max_load': 30,
'min_power': 0,
'max_power': 15,
'load_coeff': 10,
'load_ref': 20,
'power_coeff_a': 0.1,
'power_coeff_b': 2.5,
'power_coeff_c': 0,
'gen_ramp_up': 50,
'gen_ramp_down': 50
},
{
'min_load': 0,
'max_load': 0,
'min_power': 0,
'max_power': 0,
'load_coeff': 0,
'load_ref': 0,
'power_coeff_a': 1,
'power_coeff_b': 1,
'power_coeff_c': 1,
'gen_ramp_up': 5,
'gen_ramp_down': 5
},
{
'min_load': 0,
'max_load': 0,
'min_power': 0,
'max_power': 0,
'load_coeff': 0,
'load_ref': 1,
'power_coeff_a': 1,
'power_coeff_b': 1,
'power_coeff_c': 1,
'gen_ramp_up': 5,
'gen_ramp_down': 5
},
{
'min_load': 0,
'max_load': 0,
'min_power': 0,
'max_power': 0,
'load_coeff': 1,
'load_ref': 0,
'power_coeff_a': 10.1,
'power_coeff_b': 1,
'power_coeff_c': 1,
'gen_ramp_up': 5,
'gen_ramp_down': 5
},
{
'min_load': 0,
'max_load': 0,
'min_power': 0,
'max_power': 0,
'load_coeff': 1,
'load_ref': 0,
'power_coeff_a': 10.1,
'power_coeff_b': 1,
'power_coeff_c': 1,
'gen_ramp_up': 5,
'gen_ramp_down': 5
},
] # 9 - 13 - 11 5 - 10
connection_info_0 = {
'connection_index': [1],
'connection_x': [0.1],
'connection_area': [1],
'connection_exchange_max': [100]
}
player0_info = {
'index': 0,
'X_raw': X_raw_0,
'node_info': node_info_0,
'connection_info': connection_info_0
}
X_raw_1 = [
[0, 0.1, 0, 0, 0],
[0.1, 0, 0.1, 0, 0.1],
[0, 0.1, 0, 0.1, 0],
[0, 0, 0.1, 0, 0],
[0, 0.1, 0, 0, 0]
]
node_info_1 = [
{
'min_load': 0,
'max_load': 25,
'min_power': 0,
'max_power': 40,
'load_coeff': 10,
'load_ref': 20,
'power_coeff_a': 0.1,
'power_coeff_b': 2,
'power_coeff_c': 0,
'gen_ramp_up': 50,
'gen_ramp_down': 50
},
{
'min_load': 0,
'max_load': 0,
'min_power': 0,
'max_power': 0,
'load_coeff': 1,
'load_ref': 0,
'power_coeff_a': 0.1,
'power_coeff_b': 1,
'power_coeff_c': 1,
'gen_ramp_up': 5,
'gen_ramp_down': 5
},
{
'min_load': 0,
'max_load': 0,
'min_power': 0,
'max_power': 0,
'load_coeff': 1,
'load_ref': 0,
'power_coeff_a': 0.1,
'power_coeff_b': 1,
'power_coeff_c': 1,
'gen_ramp_up': 5,
'gen_ramp_down': 5
},
{
'min_load': 0,
'max_load': 0,
'min_power': 0,
'max_power': 0,
'load_coeff': 4,
'load_ref': 0,
'power_coeff_a': 0.1,
'power_coeff_b': 1,
'power_coeff_c': 1,
'gen_ramp_up': 5,
'gen_ramp_down': 5
},
{
'min_load': 0,
'max_load': 0,
'min_power': 0,
'max_power': 0,
'load_coeff': 2,
'load_ref': 0,
'power_coeff_a': 0.1,
'power_coeff_b': 1,
'power_coeff_c': 1,
'gen_ramp_up': 5,
'gen_ramp_down': 5
},
] # 8 - 10 - 9 4 - 19
connection_info_1 = {
'connection_index': [0, 2, 3],
'connection_x': [0.1, 0.1, 0.1],
'connection_area': [0, 2, 3],
'connection_exchange_max': [100, 100, 100]
}
player1_info = {
'index': 1,
'X_raw': X_raw_1,
'node_info': node_info_1,
'connection_info': connection_info_1,
}
X_raw_2 = [
[0, 0.1, 0, 0, 0],
[0.1, 0, 0.1, 0, 0.1],
[0, 0.1, 0, 0.1, 0],
[0, 0, 0.1, 0, 0],
[0, 0.1, 0, 0, 0]
]
node_info_2 = [
{
'min_load': 0,
'max_load': 25,
'min_power': 0,
'max_power': 20,
'load_coeff': 5,
'load_ref': 15,
'power_coeff_a': 0.1,
'power_coeff_b': 3,
'power_coeff_c': 0,
'gen_ramp_up': 50,
'gen_ramp_down': 50
},
{
'min_load': 0,
'max_load': 0,
'min_power': 0,
'max_power': 0,
'load_coeff': 4,
'load_ref': 0,
'power_coeff_a': 0.1,
'power_coeff_b': 1,
'power_coeff_c': 0,
'gen_ramp_up': 5,
'gen_ramp_down': 5
},
{
'min_load': 0,
'max_load': 0,
'min_power': 0,
'max_power': 0,
'load_coeff': 3,
'load_ref': 0,
'power_coeff_a': 0.1,
'power_coeff_b': 1,
'power_coeff_c': 0,
'gen_ramp_up': 5,
'gen_ramp_down': 5
},
{
'min_load': 0,
'max_load': 0,
'min_power': 0,
'max_power': 0,
'load_coeff': 1,
'load_ref': 0,
'power_coeff_a': 0.1,
'power_coeff_b': 1,
'power_coeff_c': 0,
'gen_ramp_up': 5,
'gen_ramp_down': 5
},
{
'min_load': 0,
'max_load': 0,
'min_power': 0,
'max_power': 0,
'load_coeff': 1,
'load_ref': 0,
'power_coeff_a': 0.1,
'power_coeff_b': 1,
'power_coeff_c': 0,
'gen_ramp_up': 5,
'gen_ramp_down': 5
},
] # 9 - 13 - 11 5 - 8
connection_info_2 = {
'connection_index': [1, 4],
'connection_x': [0.1, 0.1],
'connection_area': [1, 4],
'connection_exchange_max': [100, 100]
}
player2_info = {
'index': 2,
'X_raw': X_raw_2,
'node_info': node_info_2,
'connection_info': connection_info_2
}
X_raw_3 = [
[0, 0.1, 0, 0, 0],
[0.1, 0, 0.1, 0, 0.1],
[0, 0.1, 0, 0.1, 0],
[0, 0, 0.1, 0, 0],
[0, 0.1, 0, 0, 0]
]
node_info_3 = [
{
'min_load': 0,
'max_load': 25,
'min_power': 0,
'max_power': 10,
'load_coeff': 5,
'load_ref': 20,
'power_coeff_a': 0.1,
'power_coeff_b': 2,
'power_coeff_c': 0,
'gen_ramp_up': 50,
'gen_ramp_down': 50
},
{
'min_load': 0,
'max_load': 0,
'min_power': 0,
'max_power': 0,
'load_coeff': 4,
'load_ref': 0,
'power_coeff_a': 0.1,
'power_coeff_b': 1,
'power_coeff_c': 0,
'gen_ramp_up': 5,
'gen_ramp_down': 5
},
{
'min_load': 0,
'max_load': 0,
'min_power': 0,
'max_power': 0,
'load_coeff': 3,
'load_ref': 0,
'power_coeff_a': 0.1,
'power_coeff_b': 1,
'power_coeff_c': 0,
'gen_ramp_up': 5,
'gen_ramp_down': 5
},
{
'min_load': 0,
'max_load': 0,
'min_power': 0,
'max_power': 0,
'load_coeff': 1,
'load_ref': 0,
'power_coeff_a': 0.1,
'power_coeff_b': 1,
'power_coeff_c': 0,
'gen_ramp_up': 5,
'gen_ramp_down': 5
},
{
'min_load': 0,
'max_load': 0,
'min_power': 0,
'max_power': 0,
'load_coeff': 1,
'load_ref': 0,
'power_coeff_a': 0.1,
'power_coeff_b': 1,
'power_coeff_c': 0,
'gen_ramp_up': 5,
'gen_ramp_down': 5
},
] # 9 - 13 - 11 5 - 8
connection_info_3 = {
'connection_index': [1],
'connection_x': [0.1],
'connection_area': [1],
'connection_exchange_max': [100]
}
player3_info = {
'index': 3,
'X_raw': X_raw_3,
'node_info': node_info_3,
'connection_info': connection_info_3
}
X_raw_4 = [
[0, 0.1, 0, 0, 0],
[0.1, 0, 0.1, 0, 0.1],
[0, 0.1, 0, 0.1, 0],
[0, 0, 0.1, 0, 0],
[0, 0.1, 0, 0, 0]
]
node_info_4 = [
{
'min_load': 0,
'max_load': 25,
'min_power': 0,
'max_power': 10,
'load_coeff': 5,
'load_ref': 20,
'power_coeff_a': 0.1,
'power_coeff_b': 3,
'power_coeff_c': 0,
'gen_ramp_up': 50,
'gen_ramp_down': 50
},
{
'min_load': 0,
'max_load': 0,
'min_power': 0,
'max_power': 0,
'load_coeff': 4,
'load_ref': 0,
'power_coeff_a': 0.1,
'power_coeff_b': 1,
'power_coeff_c': 0,
'gen_ramp_up': 5,
'gen_ramp_down': 5
},
{
'min_load': 0,
'max_load': 0,
'min_power': 0,
'max_power': 0,
'load_coeff': 3,
'load_ref': 0,
'power_coeff_a': 0.1,
'power_coeff_b': 1,
'power_coeff_c': 0,
'gen_ramp_up': 5,
'gen_ramp_down': 5
},
{
'min_load': 0,
'max_load': 0,
'min_power': 0,
'max_power': 0,
'load_coeff': 1,
'load_ref': 0,
'power_coeff_a': 0.1,
'power_coeff_b': 1,
'power_coeff_c': 0,
'gen_ramp_up': 5,
'gen_ramp_down': 5
},
{
'min_load': 0,
'max_load': 0,
'min_power': 0,
'max_power': 0,
'load_coeff': 1,
'load_ref': 0,
'power_coeff_a': 0.1,
'power_coeff_b': 1,
'power_coeff_c': 0,
'gen_ramp_up': 5,
'gen_ramp_down': 5
},
] # 9 - 13 - 11 5 - 8
connection_info_4 = {
'connection_index': [2],
'connection_x': [0.1],
'connection_area': [2],
'connection_exchange_max': [100]
}
player4_info = {
'index': 4,
'X_raw': X_raw_4,
'node_info': node_info_4,
'connection_info': connection_info_4
}
namejqy = 'jqy'
| python |
from typing import List
from pydantic import BaseModel, Field
__all__ = [
"ArticleRankDTO",
]
class ArticleRankDTO(BaseModel):
articleTitle: str = Field(
... ,
description = "文章标题"
)
viewCount: int = Field(
... ,
description = "文章浏览量"
)
| python |
# ===========================================================================
# Copyright 2013 University of Limerick
#
# This file is part of DREAM.
#
# DREAM is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DREAM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DREAM. If not, see <http://www.gnu.org/licenses/>.
# ===========================================================================
"""
Created on 4 Dec 2013
@author: George
"""
"""
test script to test the generator
"""
from SimPy.Simulation import now, activate, simulate, infinity, initialize
from .EventGenerator import EventGenerator
from .Machine import Machine
from .Source import Source
from .Exit import Exit
from .Part import Part
from .Queue import Queue
from .Globals import G
from . import ExcelHandler
from . import Globals
G.trace = "Yes"
S = Source("S1", "Source", mean=1, item=Part)
M1 = Machine("M1", "Machine1", mean=0.75)
Q1 = Queue("Q1", "Queue1", capacity=infinity)
M2 = Machine("M2", "Machine2", mean=0.75)
Q2 = Queue("Q2", "Queue2", capacity=infinity)
E = Exit("E1", "Exit")
# define predecessors and successors for the objects
S.defineRouting([M1])
M1.defineRouting([S], [Q1])
Q1.defineRouting([M1], [M2])
M2.defineRouting([Q1], [Q2])
Q2.defineRouting([M2])
argumentDict = {"from": "Q2", "to": "E1", "safetyStock": 70, "consumption": 20}
EG = EventGenerator(
id="EV",
name="ExcessEntitiesMover",
start=60,
interval=60,
method=Globals.moveExcess,
argumentDict=argumentDict,
)
G.ObjList = [S, M1, M2, E, Q1, Q2, EG]
initialize() # initialize the simulation (SimPy method)
for object in G.ObjList:
object.initialize()
for object in G.ObjList:
activate(object, object.run())
G.maxSimTime = 400
simulate(until=G.maxSimTime) # run the simulation
# carry on the post processing operations for every object in the topology
for object in G.ObjList:
object.postProcessing()
ExcelHandler.outputTrace("TRACE")
print(("the system produced", E.numOfExits, "parts"))
print(
(
"the waiting ratio of",
M1.objName,
"is",
(M1.totalWaitingTime / G.maxSimTime) * 100,
"%",
)
)
print(
(
"the waiting ratio of",
M2.objName,
"is",
(M2.totalWaitingTime / G.maxSimTime) * 100,
"%",
)
)
| python |
# (c) 2012-2014, Michael DeHaan <[email protected]>
# (c) 2015 Toshio Kuratomi <[email protected]>
# (c) 2017, Peter Sprygada <[email protected]>
# (c) 2017 Ansible Project
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import fcntl
import gettext
import os
import shlex
from abc import abstractmethod, abstractproperty
from functools import wraps
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_bytes, to_text
from ansible.plugins import AnsiblePlugin
from ansible.plugins.loader import shell_loader
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['ConnectionBase', 'ensure_connect']
BUFSIZE = 65536
def ensure_connect(func):
@wraps(func)
def wrapped(self, *args, **kwargs):
if not self._connected:
self._connect()
return func(self, *args, **kwargs)
return wrapped
class ConnectionBase(AnsiblePlugin):
'''
A base class for connections to contain common code.
'''
has_pipelining = False
has_native_async = False # eg, winrm
always_pipeline_modules = False # eg, winrm
become_methods = C.BECOME_METHODS
# When running over this connection type, prefer modules written in a certain language
# as discovered by the specified file extension. An empty string as the
# language means any language.
module_implementation_preferences = ('',)
allow_executable = True
# the following control whether or not the connection supports the
# persistent connection framework or not
supports_persistence = False
force_persistence = False
default_user = None
def __init__(self, play_context, new_stdin, shell=None, *args, **kwargs):
super(ConnectionBase, self).__init__()
# All these hasattrs allow subclasses to override these parameters
if not hasattr(self, '_play_context'):
self._play_context = play_context
if not hasattr(self, '_new_stdin'):
self._new_stdin = new_stdin
# Backwards compat: self._display isn't really needed, just import the global display and use that.
if not hasattr(self, '_display'):
self._display = display
if not hasattr(self, '_connected'):
self._connected = False
self.success_key = None
self.prompt = None
self._connected = False
self._socket_path = None
if shell is not None:
self._shell = shell
# load the shell plugin for this action/connection
if play_context.shell:
shell_type = play_context.shell
elif hasattr(self, '_shell_type'):
shell_type = getattr(self, '_shell_type')
else:
shell_type = 'sh'
shell_filename = os.path.basename(self._play_context.executable)
try:
shell = shell_loader.get(shell_filename)
except Exception:
shell = None
if shell is None:
for shell in shell_loader.all():
if shell_filename in shell.COMPATIBLE_SHELLS:
break
shell_type = shell.SHELL_FAMILY
self._shell = shell_loader.get(shell_type)
if not self._shell:
raise AnsibleError("Invalid shell type specified (%s), or the plugin for that shell type is missing." % shell_type)
@property
def connected(self):
'''Read-only property holding whether the connection to the remote host is active or closed.'''
return self._connected
@property
def socket_path(self):
'''Read-only property holding the connection socket path for this remote host'''
return self._socket_path
def _become_method_supported(self):
''' Checks if the current class supports this privilege escalation method '''
if self._play_context.become_method in self.become_methods:
return True
raise AnsibleError("Internal Error: this connection module does not support running commands via %s" % self._play_context.become_method)
@staticmethod
def _split_ssh_args(argstring):
"""
Takes a string like '-o Foo=1 -o Bar="foo bar"' and returns a
list ['-o', 'Foo=1', '-o', 'Bar=foo bar'] that can be added to
the argument list. The list will not contain any empty elements.
"""
try:
# Python 2.6.x shlex doesn't handle unicode type so we have to
# convert args to byte string for that case. More efficient to
# try without conversion first but python2.6 doesn't throw an
# exception, it merely mangles the output:
# >>> shlex.split(u't e')
# ['t\x00\x00\x00', '\x00\x00\x00e\x00\x00\x00']
return [to_text(x.strip()) for x in shlex.split(to_bytes(argstring)) if x.strip()]
except AttributeError:
# In Python3, shlex.split doesn't work on a byte string.
return [to_text(x.strip()) for x in shlex.split(argstring) if x.strip()]
@abstractproperty
def transport(self):
"""String used to identify this Connection class from other classes"""
pass
@abstractmethod
def _connect(self):
"""Connect to the host we've been initialized with"""
# Check if PE is supported
if self._play_context.become:
self._become_method_supported()
@ensure_connect
@abstractmethod
def exec_command(self, cmd, in_data=None, sudoable=True):
"""Run a command on the remote host.
:arg cmd: byte string containing the command
:kwarg in_data: If set, this data is passed to the command's stdin.
This is used to implement pipelining. Currently not all
connection plugins implement pipelining.
:kwarg sudoable: Tell the connection plugin if we're executing
a command via a privilege escalation mechanism. This may affect
how the connection plugin returns data. Note that not all
connections can handle privilege escalation.
:returns: a tuple of (return code, stdout, stderr) The return code is
an int while stdout and stderr are both byte strings.
When a command is executed, it goes through multiple commands to get
there. It looks approximately like this::
[LocalShell] ConnectionCommand [UsersLoginShell (*)] ANSIBLE_SHELL_EXECUTABLE [(BecomeCommand ANSIBLE_SHELL_EXECUTABLE)] Command
:LocalShell: Is optional. It is run locally to invoke the
``Connection Command``. In most instances, the
``ConnectionCommand`` can be invoked directly instead. The ssh
connection plugin which can have values that need expanding
locally specified via ssh_args is the sole known exception to
this. Shell metacharacters in the command itself should be
processed on the remote machine, not on the local machine so no
shell is needed on the local machine. (Example, ``/bin/sh``)
:ConnectionCommand: This is the command that connects us to the remote
machine to run the rest of the command. ``ansible_ssh_user``,
``ansible_ssh_host`` and so forth are fed to this piece of the
command to connect to the correct host (Examples ``ssh``,
``chroot``)
:UsersLoginShell: This shell may or may not be created depending on
the ConnectionCommand used by the connection plugin. This is the
shell that the ``ansible_ssh_user`` has configured as their login
shell. In traditional UNIX parlance, this is the last field of
a user's ``/etc/passwd`` entry We do not specifically try to run
the ``UsersLoginShell`` when we connect. Instead it is implicit
in the actions that the ``ConnectionCommand`` takes when it
connects to a remote machine. ``ansible_shell_type`` may be set
to inform ansible of differences in how the ``UsersLoginShell``
handles things like quoting if a shell has different semantics
than the Bourne shell.
:ANSIBLE_SHELL_EXECUTABLE: This is the shell set via the inventory var
``ansible_shell_executable`` or via
``constants.DEFAULT_EXECUTABLE`` if the inventory var is not set.
We explicitly invoke this shell so that we have predictable
quoting rules at this point. ``ANSIBLE_SHELL_EXECUTABLE`` is only
settable by the user because some sudo setups may only allow
invoking a specific shell. (For instance, ``/bin/bash`` may be
allowed but ``/bin/sh``, our default, may not). We invoke this
twice, once after the ``ConnectionCommand`` and once after the
``BecomeCommand``. After the ConnectionCommand, this is run by
the ``UsersLoginShell``. After the ``BecomeCommand`` we specify
that the ``ANSIBLE_SHELL_EXECUTABLE`` is being invoked directly.
:BecomeComand ANSIBLE_SHELL_EXECUTABLE: Is the command that performs
privilege escalation. Setting this up is performed by the action
plugin prior to running ``exec_command``. So we just get passed
:param:`cmd` which has the BecomeCommand already added.
(Examples: sudo, su) If we have a BecomeCommand then we will
invoke a ANSIBLE_SHELL_EXECUTABLE shell inside of it so that we
have a consistent view of quoting.
:Command: Is the command we're actually trying to run remotely.
(Examples: mkdir -p $HOME/.ansible, python $HOME/.ansible/tmp-script-file)
"""
pass
@ensure_connect
@abstractmethod
def put_file(self, in_path, out_path):
"""Transfer a file from local to remote"""
pass
@ensure_connect
@abstractmethod
def fetch_file(self, in_path, out_path):
"""Fetch a file from remote to local"""
pass
@abstractmethod
def close(self):
"""Terminate the connection"""
pass
def check_become_success(self, b_output):
b_success_key = to_bytes(self._play_context.success_key)
for b_line in b_output.splitlines(True):
if b_success_key == b_line.rstrip():
return True
return False
def check_password_prompt(self, b_output):
if self._play_context.prompt is None:
return False
elif isinstance(self._play_context.prompt, string_types):
b_prompt = to_bytes(self._play_context.prompt).strip()
b_lines = b_output.splitlines()
return any(l.strip().startswith(b_prompt) for l in b_lines)
else:
return self._play_context.prompt(b_output)
def check_incorrect_password(self, b_output):
b_incorrect_password = to_bytes(gettext.dgettext(self._play_context.become_method, C.BECOME_ERROR_STRINGS[self._play_context.become_method]))
return b_incorrect_password and b_incorrect_password in b_output
def check_missing_password(self, b_output):
b_missing_password = to_bytes(gettext.dgettext(self._play_context.become_method, C.BECOME_MISSING_STRINGS[self._play_context.become_method]))
return b_missing_password and b_missing_password in b_output
def connection_lock(self):
f = self._play_context.connection_lockfd
display.vvvv('CONNECTION: pid %d waiting for lock on %d' % (os.getpid(), f), host=self._play_context.remote_addr)
fcntl.lockf(f, fcntl.LOCK_EX)
display.vvvv('CONNECTION: pid %d acquired lock on %d' % (os.getpid(), f), host=self._play_context.remote_addr)
def connection_unlock(self):
f = self._play_context.connection_lockfd
fcntl.lockf(f, fcntl.LOCK_UN)
display.vvvv('CONNECTION: pid %d released lock on %d' % (os.getpid(), f), host=self._play_context.remote_addr)
def reset(self):
display.warning("Reset is not implemented for this connection")
| python |
#!/usr/bin/env python3
#encoding=utf-8
#-----------------------------------------
# Usage: python3 4-getattr-builtins.py
# Description: compare __getattr__ and __getattribute__
#-----------------------------------------
class GetAttr:
eggs = 88 # eggs stored on class, spam on instance
def __init__(self):
self.spam = 77
def __len__(self): # len here, else __getattr__ called with __len__
print('__len__: 42')
return 42
def __getattr__(self, attr): # Provide __str__ if asked, else dummy func
print('getattr: ' + attr)
if attr == '__str__':
return lambda *args: '[Getattr str]'
else:
return lambda *args: None
class GetAttribute(object): # object required in 2.X, implied in 3.X
eggs = 88 # In 2.X all are isinstance(object) auto
def __init__(self): # But must derive to get new-style tools,
self.spam = 77 # incl __getattribute__, some __X__ defaults
def __len__(self):
print('__len__: 42')
return 42
def __getattribute__(self, attr):
print('getattribute: ' + attr)
if attr == '__str__':
return lambda *args: '[GetAttribute str]'
else:
return lambda *args: None
if __name__ == '__main__':
for Class in GetAttr, GetAttribute:
print('\n' + Class.__name__.ljust(50, '='))
X = Class()
X.eggs # Class attr
X.spam # Instance attr
X.other # Missing attr
len(X) # __len__ defined explicitly
# New-styles must support [], +, call directly: redefine
try:
X[0] # __getitem__?
except:
print('fail []')
try:
X + 99 # __add__?
except:
print('fail +')
try:
X() # __call__? (implicit via built-in)
except:
print('fail ()')
X.__call__() # __call__? (explicit, not inherited)
print(X.__str__()) # __str__? (explicit, inherited from type)
print(X) # __str__? (implicit via built-in)
| python |
# Exercício Python 024
# Leia o nome de uma cidade. Começa com 'SANTO'
cidade = str(input('Digite o nome de uma cidade: ')).strip()
minusculo = cidade.lower()
santo = 'santo'in minusculo[0:5]
print(santo)
# outra forma
print(cidade[:5].lower() == 'santo')
| python |
import numpy
import pytest
import helpers
import meshio
@pytest.mark.parametrize(
"mesh",
[
helpers.tri_mesh,
helpers.tri_mesh_2d,
helpers.tet_mesh,
helpers.add_cell_data(helpers.tri_mesh, 1, dtype=float),
helpers.add_cell_data(helpers.tri_mesh, 1, dtype=numpy.int32),
],
)
def test_dolfin(mesh):
helpers.write_read(meshio.dolfin.write, meshio.dolfin.read, mesh, 1.0e-15)
def test_generic_io():
helpers.generic_io("test.xml")
# With additional, insignificant suffix:
helpers.generic_io("test.0.xml")
| python |
"""
LINK: https://leetcode.com/problems/factorial-trailing-zeroes/
Given an integer n, return the number of trailing zeroes in n!.
Follow up: Could you write a solution that works in logarithmic time complexity?
Example 1:
Input: n = 3
Output: 0
Explanation: 3! = 6, no trailing zero.
Example 2:
Input: n = 5
Output: 1
Explanation: 5! = 120, one trailing zero.
Example 3:
Input: n = 0
Output: 0
Constraints:
0 <= n <= 104
"""
def trailingZeroes(n):
fives = 0
while n:
fives += n//5
n //= 5
return fives
def trailingZeroes_recursive(n):
if not n:
return 0
else:
return n//5 + trailingZeroes_recursive(n//5) | python |
from generators import *
from laws import (monoid_laws, functor_laws, applicative_laws, monad_laws, trans_laws)
from fplib.maybe import Maybe
from fplib.transformer import trans
from fplib.ident_t import IdT
T = trans(IdT, Maybe)
def cmpidt(idt0, idt1):
return idt0.unwrap == idt1.unwrap
def test_idt_functor():
xs = map(T.unit, random_strings(10))
functor_laws(xs, 100, cmp_fun=cmpidt)
def test_idt_applicative():
xs = map(T.unit, random_strings(10))
applicative_laws(T, xs, 100, cmp_fun=cmpidt)
def test_idt_monad():
xs = map(T.unit, random_strings(10))
monad_laws(T, xs, 100, cmp_fun=cmpidt)
def test_idt_transformer():
xs = random_maybes(random_strings(10))
trans_laws(T, xs, 100, cmp_fun=cmpidt)
| python |
"""
Energy level and transitions classes
"""
import numpy as np
import astropy.units as u
import astropy.constants as const
from fiasco.util import vectorize_where
__all__ = ['Level', 'Transitions']
class Level(object):
def __init__(self, index, elvlc):
self._index = index
self._elvlc = elvlc
def __repr__(self):
return f"""Level: {self.level}
Configuration: {self.configuration}
Orbital Angular Momentum: {self.orbital_angular_momentum_label}
Energy: {self.energy.to(u.eV)}"""
@property
def level(self):
return self._elvlc['level'][self._index]
@property
def configuration(self):
return self._elvlc['config'][self._index]
@property
def multiplicity(self):
return self._elvlc['multiplicity'][self._index]
@property
def total_angular_momentum(self):
return self._elvlc['J'][self._index]
@property
def orbital_angular_momentum_label(self):
return self._elvlc['L_label'][self._index]
@property
@u.quantity_input
def energy(self) -> u.erg:
key = 'E_th' if self._elvlc['E_obs'][self._index] < 0 else 'E_obs'
return self._elvlc[key][self._index]*const.h*const.c
class Transitions(object):
def __init__(self, elvlc, wgfa):
self._elvlc = elvlc
self._wgfa = wgfa
@property
def is_twophoton(self):
"""
True if the transition is a two-photon decay
"""
return self._wgfa['wavelength'] == 0.*u.angstrom
@property
def is_observed(self):
"""
True for transitions that connect two observed energy levels
"""
return self._wgfa['wavelength'] > 0.*u.angstrom
@property
@u.quantity_input
def A(self) -> u.s**(-1):
"""
Spontaneous transition probability due to radiative decay
"""
return self._wgfa['A']
@property
@u.quantity_input
def wavelength(self) -> u.angstrom:
return np.fabs(self._wgfa['wavelength'])
@property
def upper_level(self):
return self._wgfa['upper_level']
@property
def lower_level(self):
return self._wgfa['lower_level']
@property
@u.quantity_input
def delta_energy(self) -> u.erg:
energy = u.Quantity(np.where(
self._elvlc['E_obs'].value == -1, self._elvlc['E_th'].value,
self._elvlc['E_obs'].value), self._elvlc['E_obs'].unit)
indices = np.vstack([vectorize_where(self._elvlc['level'], self.lower_level),
vectorize_where(self._elvlc['level'], self.upper_level)])
return np.diff(energy[indices], axis=0).flatten() * const.h * const.c
| python |
"""
# -*- coding: utf-8 -*-
__author__ = "Akash"
__email__ = "[email protected]"
__version__ = 1.0.0"
__copyright__ = "Copyright (c) 2004-2020 Leonard Richardson"
# Use of this source code is governed by the MIT license.
__license__ = "MIT"
Description:
Py-Insta Is A Python Library
Created By Akash Pattnaik From
India..
Py-Insta Helps Users To Easily
Scrape Instagram Data
And Print It Or You Can Define It Into A Variable...
If You Find Bugs Then Please Report To
@AKASH_AM1 On Telegram...
Pre-Requests:
from bs4 import BeautifulSoup
import requests
Documentation:
Github: https://github.com/BLUE-DEVIL1134/Py-Insta
PyPi: https://pypi.org/user/AkashPattnaik/
"""
__version__ = 1.0
import requests
from bs4 import BeautifulSoup
__url__ = "https://www.instagram.com/{}/"
def Insta(username):
try:
response = requests.get(__url__.format(username.replace('@','')),timeout=5) # InCase Someone Types @UserName
if '404' in str(response): # If The Username Is Invalid
data = 'No Such Username'
return data
else:
soup = BeautifulSoup(response.text, "html.parser")
meta = soup.find("meta", property="og:description")
try:
s = meta.attrs['content'].split(' ')
data = {
'Followers': s[0],
'Following': s[2],
'Posts': s[4],
'Name': s[13]
}
return data
except requests.exceptions.InvalidURL:
return 'No Such Username'
except (requests.ConnectionError, requests.Timeout):
return 'No InterNet Connection'
| python |
import scrython
import time
query = input("Type the name of the set: ")
time.sleep(0.05)
sets = scrython.sets.Sets()
for i in range(sets.data_length()):
if sets.set_name(i) == query:
print("Set code:", sets.set_code(i).upper())
break
else:
continue
| python |
import json
import binascii
import struct
import random
from io import BytesIO
import sys
from operator import itemgetter
class Item():
def __init__(self, name, index, quantity, rate):
self.name = name
self.index = index
self.quantity = quantity
self.rate = rate
def __repr__(self):
return self.__class__.__name__ + "({}, index={}, quantity={}, rate={})".format(self.name, self.index, self.quantity, self.rate)
class CommonItem(Item):
def __init__(self, *args):
super().__init__(*args)
class RareItem(Item):
def __init__(self, *args):
super().__init__(*args)
class CommonIngredient(Item):
def __init__(self, *args):
super().__init__(*args)
class RareIngredient(Item):
def __init__(self, *args):
super().__init__(*args)
class Shard(Item):
def __init__(self, *args):
super().__init__(*args)
class Coin():
def __init__(self, name, index, rate, override):
self.name = name
self.index = index
self.rate = rate
self.override = override
def __repr__(self):
return "Coin({}, index={}, rate={}, override={})".format(self.name, self.index, self.rate, self.override)
def getNameFromEntry(entry):
return entry["Key"]["Value"]["Value"]
def getRareItemFromEntry(entry):
name = entry["Properties"]["RareItemId\x00"][1]["Value"]
index = entry["Properties"]["RareItemId\x00"][1]["Index"]
quantity = entry["Properties"]["RareItemQuantity\x00"][1]
rate = entry["Properties"]["RareItemRate\x00"][1]
return RareItem(name, index, quantity, rate)
def getCommonItemFromEntry(entry):
name = entry["Properties"]["CommonItemId\x00"][1]["Value"]
index = entry["Properties"]["CommonItemId\x00"][1]["Index"]
quantity = entry["Properties"]["CommonItemQuantity\x00"][1]
rate = entry["Properties"]["CommonRate\x00"][1]
return CommonItem(name, index, quantity, rate)
def getRareIngredientFromEntry(entry):
name = entry["Properties"]["RareIngredientId\x00"][1]["Value"]
index = entry["Properties"]["RareIngredientId\x00"][1]["Index"]
quantity = entry["Properties"]["RareIngredientQuantity\x00"][1]
rate = entry["Properties"]["RareIngredientRate\x00"][1]
return RareIngredient(name, index, quantity, rate)
def getCommonIngredientFromEntry(entry):
name = entry["Properties"]["CommonIngredientId\x00"][1]["Value"]
index = entry["Properties"]["CommonIngredientId\x00"][1]["Index"]
quantity = entry["Properties"]["CommonIngredientQuantity\x00"][1]
rate = entry["Properties"]["CommonIngredientRate\x00"][1]
return CommonIngredient(name, index, quantity, rate)
def getShardFromEntry(entry):
name = entry["Properties"]["ShardId\x00"][1]["Value"]
index = entry["Properties"]["ShardId\x00"][1]["Index"]
rate = entry["Properties"]["ShardRate\x00"][1]
return Shard(name, index, 1, rate)
def getCoinFromEntry(entry):
name = entry["Properties"]["CoinType\x00"][1]["Value"]
index = entry["Properties"]["CoinType\x00"][1]["Index"]
override = entry["Properties"]["CoinOverride\x00"][1]
rate = entry["Properties"]["CoinRate\x00"][1]
return Coin(name, index, rate, override)
def getAllFromEntry(entry):
name = getNameFromEntry(entry)
shard = getShardFromEntry(entry)
ritem = getRareItemFromEntry(entry)
citem = getCommonItemFromEntry(entry)
ring = getRareIngredientFromEntry(entry)
cing = getCommonIngredientFromEntry(entry)
coin = getCoinFromEntry(entry)
return (name, shard, ritem, citem, ring, cing, coin)
class DropLocation():
def __init__(self, name, shard, rare_item, common_item, rare_ingredient, common_ingredient, coin):
self.name = name
self.shard = shard
self.rare_item = rare_item
self.common_item = common_item
self.rare_ingredient = rare_ingredient
self.common_ingredient = common_ingredient
self.coin = coin
def __repr__(self):
return "DropLocation(\n\t{},\n\t{},\n\t{},\n\t{},\n\t{},\n\t{},\n\t{}\n)".format( \
self.name, \
self.shard, \
self.rare_item, \
self.common_item, \
self.rare_ingredient, \
self.common_ingredient, \
self.coin)
#Yield all chests
def allChests(locs):
for loc in locs:
if "Treasurebox" in loc.name and filterChests(loc):
yield loc
#True: accept item into randomizer logic
#False: reject item from randomizer logic
def filterChests(loc):
#Names to filter out
bad_item_names = [
"MaxHPUP", "MaxMPUP", "MaxBulletUP", #Max HP/MP/Bullet upgrades
"ChangeHP", #Dunno what this is
"Silverbromide", #Progression item
"SpikeBreast" #Spike Aegis needed for progression, lock for now
]
for name in bad_item_names:
if name in loc.rare_item.name["Value"]:
print("Rejecting chest item: {}".format(name))
return False
if name in loc.common_item.name["Value"]:
print("Rejecting chest item: {}".format(name))
return False
return True
#Yield all shard entries
def allMobs(locs):
for loc in locs:
if "_Shard" in loc.name and filterMobs(loc):
yield loc
other_good_names = [
"_1ST_Treasure", #Carpenter
"_2ND_Treasure" #Also Carpenter
]
for other in other_good_names:
if other in loc.name:
yield loc
#True/False whether to include this specific shard in random pool
def filterMobs(loc):
progression_shard_names = [
"Reflectionray", #Reflect Ray
"Dimensionshift", #Dimension Shift
"Invert", #Invert
"Doublejump", #Double Jump
"Demoniccapture", #Craftwork
"Aquastream", #Only to make sure water access is available
"Bloodsteel", #Blood Steal
"SwingTentacle", #Galleon Minerva boss drop, must be valid
"Ceruleansplash", #really just need to make sure N3006_OpeningDemo has valid shard drop. I think...
]
for shard_name in progression_shard_names:
if shard_name in loc.shard.name["Value"]:
print("Rejecting shard: {}".format(loc.shard.name))
return False
return True
def allWalls(locs):
for loc in locs:
if "Wall_" in loc.name and filterWalls(loc):
yield loc
def filterWalls(loc):
bad_item_names = [
"MaxHPUP", "MaxMPUp", "MaxBulletUP", #Max HP/MP/Bullet upgrades
"ChangeHP", #Dunno what this is
]
for name in bad_item_names:
if name in loc.rare_item.name["Value"]:
print("Rejecting item: {}".format(name))
return False
if name in loc.common_item.name["Value"]:
print("Rejecting item: {}".format(name))
return False
return True
class Patch():
def __init__(self, offset, value):
self.offset = offset
self.value = value
def __repr__(self):
return "Patch(offset={}, value={})".format(self.offset, self.value)
def clearAllDrops(locs):
patches = []
for loc in locs:
patches.append(Patch(loc.shard.index["offset"], empty_drop.index["Value"]))
patches.append(Patch(loc.shard.rate["offset"], 0.0))
patches.append(Patch(loc.rare_item.index["offset"], empty_drop.index["Value"]))
patches.append(Patch(loc.rare_item.quantity["offset"], 0))
patches.append(Patch(loc.rare_item.rate["offset"], 0.0))
patches.append(Patch(loc.common_item.index["offset"], empty_drop.index["Value"]))
patches.append(Patch(loc.common_item.quantity["offset"], 0))
patches.append(Patch(loc.common_item.rate["offset"], 0.0))
patches.append(Patch(loc.rare_ingredient.index["offset"], empty_drop.index["Value"]))
patches.append(Patch(loc.rare_ingredient.quantity["offset"], 0))
patches.append(Patch(loc.rare_ingredient.rate["offset"], 0.0))
patches.append(Patch(loc.common_ingredient.index["offset"], empty_drop.index["Value"]))
patches.append(Patch(loc.common_ingredient.quantity["offset"], 0))
patches.append(Patch(loc.common_ingredient.rate["offset"], 0.0))
patches.append(Patch(loc.coin.index["offset"], empty_coin.index["Value"]))
patches.append(Patch(loc.coin.override["offset"], empty_coin.override["Value"]))
patches.append(Patch(loc.coin.rate["offset"], 100.0))
return patches
def assignShards(origs, news):
patchset = []
for orig, new in zip(origs,news):
patchset.append( Patch(orig.shard.index["offset"], new.index["Value"]) )
patchset.append( Patch(orig.shard.rate["offset"], new.rate["Value"]))
return patchset
def assignRareItems(origs, news):
patchset = []
for orig, new in zip(origs, news):
patchset.append( Patch(orig.rare_item.index["offset"], new.index["Value"]))
patchset.append( Patch(orig.rare_item.quantity["offset"], new.quantity["Value"]))
patchset.append( Patch(orig.rare_item.rate["offset"], new.rate["Value"]))
return patchset
def assignCommonItems(origs, news):
patchset = []
for orig, new in zip(origs, news):
patchset.append( Patch(orig.common_item.index["offset"], new.index["Value"]))
patchset.append( Patch(orig.common_item.quantity["offset"], new.quantity["Value"]))
patchset.append( Patch(orig.common_item.rate["offset"], new.rate["Value"]))
return patchset
def assignRareIngredients(origs, news):
patchset = []
for orig, new in zip(origs, news):
patchset.append( Patch(orig.rare_ingredient.index["offset"], new.index["Value"]))
patchset.append( Patch(orig.rare_ingredient.quantity["offset"], new.quantity["Value"]))
patchset.append( Patch(orig.rare_ingredient.rate["offset"], new.rate["Value"]))
return patchset
def assignCommonIngredients(origs, news):
patchset = []
for orig, new in zip(origs, news):
patchset.append( Patch(orig.common_ingredient.index["offset"], new.index["Value"]))
patchset.append( Patch(orig.common_ingredient.quantity["offset"], new.quantity["Value"]))
patchset.append( Patch(orig.common_ingredient.rate["offset"], new.rate["Value"]))
return patchset
def assignCoins(origs, news):
patchset = []
for orig, new in zip(origs, news):
if new.rate["Value"] == 0.0:
continue
patchset.append( Patch(orig.coin.index["offset"], new.index["Value"]))
patchset.append( Patch(orig.coin.override["offset"], new.override["Value"]))
patchset.append( Patch(orig.coin.rate["offset"], new.rate["Value"]))
return patchset
def applyPatches(raw, patches):
stream = BytesIO(raw)
for patch in patches:
stream.seek(patch.offset)
if isinstance(patch.value, int):
stream.write(struct.pack("i", patch.value))
elif isinstance(patch.value, float):
stream.write(struct.pack("f", patch.value))
else:
raise NotImplementedError(type(patch.offset))
return stream.getbuffer()
#Set drop rates to 100% for mobs that can only be fought once
#TODO: Untested!
def handleNonRepeatableMobs(locs):
relevantMobs = ['N1001', 'N1011', 'N1003', 'N2004', 'N1005',
'N2001', 'N1006', 'N1012', 'N1002', 'N2014',
'N2007', 'N2006', 'N1004', 'N1008', 'N1009',
'N1013', 'N2012']
patchset = []
for loc in locs:
for mobnum in relevantMobs:
if mobnum in loc.name:
patchset.append( Patch( loc.shard.rate["offset"], 100.0) )
patchset.append( Patch( loc.common_item.rate["offset"], 100.0) )
patchset.append( Patch( loc.rare_item.rate["offset"], 100.0) )
patchset.append( Patch( loc.common_ingredient.rate["offset"], 100.0) )
patchset.append( Patch( loc.rare_ingredient.rate["offset"], 100.0) )
return patchset
if __name__ == "__main__":
import argparse
import os
from uasset_dt_to_json import dumper as udump
parser = argparse.ArgumentParser( \
description="Bloodstained drop randomizer",
usage="%(prog)s --input [infile]"
)
parser.add_argument("--debug", help="Enable debug output", action='store_true', default=False)
parser.add_argument("--input", help="Original 'PB_DT_DropRateMaster.uasset' file", \
action='store', required=True)
parser.add_argument("--seed", help="Seed for randomizer", action='store', default=random.random())
#Parse arguments
args = parser.parse_args()
#Create JSON from original input file
with open(args.input, "rb") as original_file:
uasset = udump.UAsset(original_file)
items = [udump.Item(obj) for obj in uasset.Summary.Exports[0].Object.ObjectData.Data]
drop_rate_master = json.loads(json.dumps(items, cls=udump.UAssetEncoder))
#Set random seed
random.seed(args.seed)
#get all possible locations with associated drops
all_locations = [DropLocation(*getAllFromEntry(entry)) for entry in drop_rate_master]
#get just chests
all_chests = [loc for loc in allChests(all_locations)]
#get just mobs
all_mobs = [loc for loc in allMobs(all_locations)]
#get just walls
all_walls = [loc for loc in allWalls(all_locations)]
#Find empty/low drops to use if needed.
#Since they can be copied endlessly without breaking anything it's a safe default drop. Usually.
#find empty coin to copy into all chests without a valid drop
#FIXME: empty coin still screws up, using low-value coin instead
empty_coin = [c.coin for c in all_chests if "D10\u0000" in c.coin.name["Value"]][0]
#find empty drop
empty_drop = [e.common_item for e in all_chests if "None" in e.common_item.name["Value"]][0]
#Get list of all locations to be entered into the randomization pool
combined = all_chests + all_mobs + all_walls
#list of patches to apply to the final file
patches = []
#Clear all drop slots
patches += clearAllDrops(combined)
#Get all items
shards = [loc.shard for loc in combined]
rare_items = [loc.rare_item for loc in combined]
common_items = [loc.common_item for loc in combined]
rare_ingredients = [loc.rare_ingredient for loc in combined]
common_ingredients = [loc.common_ingredient for loc in combined]
coins = [loc.coin for loc in combined]
#shuffle them all around
random.shuffle(shards)
random.shuffle(rare_items)
random.shuffle(common_items)
random.shuffle(rare_ingredients)
random.shuffle(common_ingredients)
random.shuffle(coins)
#shuffle locations
random.shuffle(combined)
#re-assign random shards to first len(shards) locations
patches += assignShards(combined[: len(shards)], shards)
#'' '' '' first len(rare_items) locations
patches += assignRareItems(combined[: len(rare_items)], rare_items)
#etc etc
patches += assignCommonItems(combined[: len(common_items)], common_items)
patches += assignRareIngredients(combined[: len(rare_ingredients)], rare_ingredients)
patches += assignCommonIngredients(combined[: len(common_ingredients)], common_ingredients)
patches += assignCoins(combined[: len(coins)], coins)
#Should result in all shards/items/coins being re-assigned to somewhere.
#Does nothing to guarantee things intended to be re-aquired like ingredients are infinitely available.
#For mobs that are single-fight only, set drop rates to 100% for any none-None items/shards
#TODO: UNTESTED
patches += handleNonRepeatableMobs(combined)
#with open("PB_DT_DropRateMaster.uasset", "rb") as file:
with open(args.input, "rb") as file:
raw = file.read()
mod = applyPatches(raw, patches)
outputfile = "unrealpak\Randomizer\BloodstainedRotN\Content\Core\DataTable\PB_DT_DropRateMaster.uasset"
with open(outputfile, "wb") as file:
file.write(mod)
#create mod .pak file
os.system(r".\unrealpak\UnrealPak-With-Compression.bat Randomizer")
os.system(r"move .\unrealpak\Randomizer.pak .")
sys.exit() | python |
import os
from pytest import fixture
from zpz.filesys.path import relative_path
from zpz.spark import PySparkSession, ScalaSparkSession, SparkSession, SparkSessionError
livy_server_url = None
@fixture(scope='module')
def pysession():
return PySparkSession(livy_server_url)
@fixture(scope='module')
def scalasession():
return ScalaSparkSession(livy_server_url)
pi_py = """\
import random
NUM_SAMPLES = 100000
def sample(p):
x, y = random.random(), random.random()
return 1 if x*x + y*y < 1 else 0
count = sc.parallelize(range(0, NUM_SAMPLES)).map(sample).reduce(lambda a, b: a + b)
pi = 4.0 * count / NUM_SAMPLES
mylist = [1, 3, 'abc']
mytuple = ('a', 'b', 'c', 1, 2, 3)
mydict = {'a': 13, 'b': 'usa'}
# spark 2.0
# from pyspark.sql import Row
# pi_df = spark.createDataFrame([Row(value=pi)])
# spark 1.6:
from pyspark.sql import SQLContext, Row
pi_df = SQLContext(sc).createDataFrame([Row(value=pi)])
"""
def test_py(pysession):
print()
pysession.run('z = 1 + 3')
z = pysession.read('z')
assert z == 4
pysession.run(pi_py)
pi = pysession.read('pi')
print('printing a number:')
print(pi)
assert 3.0 < pi < 3.2
code = '''pip2 = pi + 2'''
pysession.run(code)
pip2 = pysession.read('pip2')
assert 3.0 < pip2 - 2 < 3.2
mylist = pysession.read('mylist')
assert mylist == [1, 3, 'abc']
mytuple = pysession.read('mytuple')
assert mytuple == ('a', 'b', 'c', 1, 2, 3)
mydict = pysession.read('mydict')
assert mydict == {'a': 13, 'b': 'usa'}
local_df = pysession.read('pi_df')
print()
print('printing a {}:'.format(type(local_df)))
print(local_df)
pi = local_df.iloc[0, 0]
assert 3.0 < pi < 3.2
assert pysession.read('3 + 6') == 9
print()
print('printing in Spark session:')
z = pysession.run('''print(type(pip2))''')
# `run` does not print.
# printouts in Spark are collected in the return of `run`.
print(z)
# `str` comes out as `str`
print()
print(pysession.read('str(type(pi))'))
print(pysession.read('type(pi_df).__name__'))
# `bool` comes out as `bool`
z = pysession.read('''isinstance(pi, float)''')
print()
print('printing boolean:')
print(z)
print(type(z))
assert z is True
assert pysession.read('str(isinstance(pi, float))') == 'True'
# `bool` comes out as `numpy.bool_`
# assert session.read(
# '''isinstance(pi_df, pyspark.sql.dataframe.DataFrame)''')
py_error = """\
class MySparkError(Exception):
pass
a = 3
b = 4
raise MySparkError('some thing is so wrong!)
print('abcd')
"""
def test_py_error(pysession):
try:
z = pysession.run(py_error)
except SparkSessionError as e:
print(e)
def test_file(pysession):
pysession.run_file(relative_path('./spark_test_scripts/script_a.py'))
z = pysession.read('magic')
assert 6.0 < z < 7.0
def test_func(pysession):
f = '''\
def myfunc(a, b, names, squared=False):
assert len(a) == 3
assert len(b) == 3
assert len(names) == 3
c = [aa + bb for (aa, bb) in zip(a, b)]
if squared:
c = [x*x for x in c]
d = {k:v for (k,v) in zip(names, c)}
return d
'''
pysession.run(f)
z = pysession.run_function('myfunc', [1, 2, 3], [4, 6, 8], [
'first', 'second', 'third'])
assert {k: z[k]
for k in sorted(z)} == {'first': 5, 'second': 8, 'third': 11}
z = pysession.run_function('myfunc', [1, 2, 3], [4, 6, 8], squared=True, names=[
'first', 'second', 'third'])
assert {k: z[k]
for k in sorted(z)} == {'first': 25, 'second': 64, 'third': 121}
pi_scala = """
val NUM_SAMPLES = 100000;
val count = sc.parallelize(1 to NUM_SAMPLES).map { i =>
val x = Math.random();
val y = Math.random();
if (x*x + y*y < 1) 1 else 0
}.reduce(_ + _);
val pi = 4.0 * count / NUM_SAMPLES;
println(\"Pi is roughly \" + pi)
"""
def test_scala(scalasession):
z = scalasession.run('1 + 1')
assert z == 'res0: Int = 2'
z = scalasession.run(pi_scala)
assert 'Pi is roughly 3.1' in z
scala_error = """
val NUM = 1000
val count = abc.NUM
"""
def test_scala_error(scalasession):
try:
z = scalasession.run(scala_error)
except SparkSessionError as e:
print(e)
def test_pyspark():
sess = SparkSession(livy_server_url, kind='pyspark')
z = sess.run('1 + 1')
assert z == '2'
z = sess.run('import math; math.sqrt(2.0)')
assert z.startswith('1.4142')
| python |
from benchbuild.projects.benchbuild.group import BenchBuildGroup
from benchbuild.utils.wrapping import wrap
from benchbuild.settings import CFG
from benchbuild.utils.compiler import lt_clang, lt_clang_cxx
from benchbuild.utils.downloader import Git
from benchbuild.utils.run import run
from benchbuild.utils.versions import get_git_hash
from plumbum import local
from benchbuild.utils.cmd import make, mkdir, tar
from functools import partial
from os import path
class SpiderMonkey(BenchBuildGroup):
"""
SpiderMonkey requires a legacy version of autoconf: autoconf-2.13
"""
NAME = 'js'
DOMAIN = 'compilation'
src_uri = "https://github.com/mozilla/gecko-dev.git"
src_dir = "gecko-dev.git"
version = get_git_hash(src_uri)
if version == None:
VERSION = None
elif len(version) <= 7:
VERSION = str(version)
else:
VERSION = str(version)[:7]
def download(self):
Git(self.SRC_FILE, self.src_dir)
def configure(self):
js_dir = path.join(self.src_dir, "js", "src")
clang = lt_clang(self.cflags, self.ldflags, self.compiler_extension)
clang_cxx = lt_clang_cxx(self.cflags, self.ldflags,
self.compiler_extension)
with local.cwd(js_dir):
make_src_pkg = local["./make-source-package.sh"]
with local.env(DIST=self.builddir,
MOZJS_MAJOR_VERSION=0,
MOZJS_MINOR_VERSION=0,
MOZJS_PATCH_VERSION=0):
make_src_pkg()
mozjs_dir = "mozjs-0.0.0"
tar("xfj", mozjs_dir + ".tar.bz2")
with local.cwd(path.join(mozjs_dir, "js", "src")):
mkdir("obj")
autoconf = local["autoconf-2.13"]
autoconf()
with local.cwd("obj"):
with local.env(CC=str(clang),
CXX=str(clang_cxx)):
configure = local["../configure"]
run(configure)
def build(self):
mozjs_dir = path.join("mozjs-0.0.0", "js", "src", "obj")
with local.cwd(mozjs_dir):
run(make["-j", CFG["jobs"].value()])
def run_tests(self, experiment, run):
mozjs_dir = path.join("mozjs-0.0.0", "js", "src", "obj")
wrap(path.join(mozjs_dir, "js", "src", "shell", "js"),
partial(experiment, may_wrap=False))
with local.cwd(mozjs_dir):
run(make["check-jstests"])
| python |
# Elaine Laguerta (github: @elaguerta)
# LBNL GIG
# File created: 28 May 2021
# Smell tests to verify Solution API functions
from gigpower.solution import Solution
from gigpower.solution_dss import SolutionDSS
from gigpower.solution_fbs import SolutionFBS
from gigpower.solution_nr3 import SolutionNR3
from gigpower.utils import get_nominal_bus_powers
import pytest
from pathlib import Path
import opendssdirect as dss
import pandas as pd
DSS_FILE_DIR = Path('./tests/test_feeders/')
@pytest.mark.parametrize(
"dss_file",
[
('IEEE_13_Bus_allwye.dss'),
('IEEE_13_Bus_allwye_noxfm_noreg.dss'),
('IEEE_34_Bus_allwye.dss'),
('IEEE_34_Bus_allwye_noxfm_noreg.dss'),
('IEEE_37_Bus_allwye.dss'),
('IEEE_37_Bus_allwye_noxfm_noreg.dss')
]
)
@pytest.mark.parametrize(
"algorithm",
[
(SolutionNR3),
(SolutionFBS),
(SolutionDSS)
]
)
class TestSolutionDFs:
def get_solution(self, dss_file, algorithm):
fp = str(Path(DSS_FILE_DIR, dss_file))
solution = algorithm(str(fp))
solution.solve()
return solution
def test_dfs(self, dss_file, algorithm):
"""
Run calls to get Solution.V, Solution.I, Solution.sV, Solution.VMag
as data frames
"""
solution = self.get_solution(dss_file, algorithm)
for param in Solution.SOLUTION_PARAMS:
df = solution.get_data_frame(param)
pytest.assume(not(df.empty)) # make sure df is not empty
def test_dfs_orient(self, dss_file, algorithm):
"""
Run calls to get solution params (Solution.V, Solution.I, Solution.sV,
Solution.VMag, Solution.Stx, Solution.Srx)
as data frames with both orientations (rows, columns) and make sure
that they have transposed shapes
"""
solution = self.get_solution(dss_file, algorithm)
for param in Solution.SOLUTION_PARAMS:
df_rows = solution.get_data_frame(param, orient='rows')
df_cols = solution.get_data_frame(param, orient='cols')
pytest.assume(df_rows.shape[-1::-1] == df_cols.shape)
# check that 3 phases are oriented correctly
pytest.assume(df_rows.shape[1] == 3)
pytest.assume(df_cols.shape[0] == 3)
def test_nominals(self, dss_file, algorithm):
"""
Make sure that Circuit class's nominal powers match those from
opendss' api
"""
solution = self.get_solution(dss_file, algorithm)
solution_nominals = solution.get_nominal_bus_powers(orient='rows')
# get a fresh dss object for each new dss file
fp = str(Path(DSS_FILE_DIR, dss_file))
dss.run_command('Redirect ' + fp)
dss.Solution.Solve()
dss_nominals = get_nominal_bus_powers(dss)
pd.testing.assert_frame_equal(solution_nominals, dss_nominals)
| python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Django Models documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 29 06:50:23 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../'))
sys.path.insert(0, PROJECT_ROOT)
from django_models import __version__
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
"sphinx.ext.intersphinx",
"sphinx.ext.ifconfig",
"sphinx.ext.graphviz",
"sphinx.ext.githubpages",
"sphinxjp.themes.basicstrap",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Django Models'
copyright = '2021, Rafael Henter'
author = 'Rafael Henter'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'pt_BR'
locale_dirs = ['locale/'] # path is example but recommended.
gettext_compact = False
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
# pygments_style = 'sphinx'
pygments_style = None
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
# on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# if not on_rtd:
# import sphinx_rtd_theme
#
# html_theme = 'sphinx_rtd_theme'
# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# -- Options for HTML output -------------------------------------------------
html_theme = 'basicstrap'
html_theme_options = {
# Set the lang attribute of the html tag. Defaults to 'en'
'lang': language,
# Disable showing the sidebar. Defaults to 'false'
'nosidebar': False,
# Show header searchbox. Defaults to false. works only "nosidber=True",
'header_searchbox': False,
# Put the sidebar on the right side. Defaults to false.
'rightsidebar': False,
# Set the width of the sidebar. Defaults to 3
'sidebar_span': 3,
# Fix navbar to top of screen. Defaults to true
'nav_fixed_top': True,
# Fix the width of the sidebar. Defaults to false
'nav_fixed': False,
# Set the width of the sidebar. Defaults to '900px'
'nav_width': '900px',
# Fix the width of the content area. Defaults to false
'content_fixed': False,
# Set the width of the content area. Defaults to '900px'
'content_width': '900px',
# Fix the width of the row. Defaults to false
'row_fixed': False,
# Disable the responsive design. Defaults to false
'noresponsive': False,
# Disable the responsive footer relbar. Defaults to false
'noresponsiverelbar': False,
# Disable flat design. Defaults to false.
# Works only "bootstrap_version = 3"
'noflatdesign': False,
# Enable Google Web Font. Defaults to false
'googlewebfont': False,
# Set the URL of Google Web Font's CSS.
# Defaults to 'http://fonts.googleapis.com/css?family=Text+Me+One'
'googlewebfont_url': 'http://fonts.googleapis.com/css?family=Lily+Script+One', # NOQA
# Set the Style of Google Web Font's CSS.
# Defaults to "font-family: 'Text Me One', sans-serif;"
'googlewebfont_style': u"font-family: 'Lily Script One' cursive;",
# Set 'navbar-inverse' attribute to header navbar. Defaults to false.
'header_inverse': False,
# Set 'navbar-inverse' attribute to relbar navbar. Defaults to false.
'relbar_inverse': False,
# Enable inner theme by Bootswatch. Defaults to false
'inner_theme': False,
# Set the name of innner theme. Defaults to 'bootswatch-simplex'
'inner_theme_name': 'bootswatch-simplex',
# Select Twitter bootstrap version 2 or 3. Defaults to '3'
'bootstrap_version': '3',
# Show "theme preview" button in header navbar. Defaults to false.
'theme_preview': False,
# Set the Size of Heading text. Defaults to None
# 'h1_size': '3.0em',
# 'h2_size': '2.6em',
# 'h3_size': '2.2em',
# 'h4_size': '1.8em',
# 'h5_size': '1.4em',
# 'h6_size': '1.1em',
}
# html_sidebars = {"**": ["sidebar.html"]}
show_related = True
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
# html_sidebars = {
# '**': [
# 'relations.html', # needs 'show_related': True theme option to display
# 'searchbox.html',
# ]
# }
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-api-client-doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'DjangoApiClient.tex', 'Django Models Documentation',
'Rafael Henter', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'DjangoApiClient', 'Django Models Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'DjangoApiClient', 'Django Models Documentation',
author, 'DjangoApiClient',
'Django Models is a client for APIs in general, which allows iterating with the API as if they were using a Local model in their project, through a client and Custom CBV (Class based Views).',
'Miscellaneous'),
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
| python |
load("@bazel_tools//tools/jdk:toolchain_utils.bzl", "find_java_runtime_toolchain", "find_java_toolchain")
def _proto_path(proto):
"""
The proto path is not really a file path
It's the path to the proto that was seen when the descriptor file was generated.
"""
path = proto.path
root = proto.root.path
ws = proto.owner.workspace_root
if path.startswith(root):
path = path[len(root):]
if path.startswith("/"):
path = path[1:]
if path.startswith(ws):
path = path[len(ws):]
if path.startswith("/"):
path = path[1:]
return path
def _protoc_cc_output_files(proto_file_sources):
cc_hdrs = []
cc_srcs = []
for p in proto_file_sources:
basename = p.basename[:-len(".proto")]
cc_hdrs.append(basename + ".pb.h")
cc_hdrs.append(basename + ".pb.validate.h")
cc_srcs.append(basename + ".pb.cc")
cc_srcs.append(basename + ".pb.validate.cc")
return cc_hdrs + cc_srcs
def _proto_sources(ctx):
protos = []
for dep in ctx.attr.deps:
protos += [f for f in dep[ProtoInfo].direct_sources]
return protos
def _output_dir(ctx):
dir_out = ctx.genfiles_dir.path
if ctx.label.workspace_root:
dir_out += "/" + ctx.label.workspace_root
return dir_out
def _protoc_gen_validate_cc_impl(ctx):
"""Generate C++ protos using protoc-gen-validate plugin"""
protos = _proto_sources(ctx)
cc_files = _protoc_cc_output_files(protos)
out_files = [ctx.actions.declare_file(out) for out in cc_files]
dir_out = _output_dir(ctx)
args = [
"--cpp_out=" + dir_out,
"--validate_out=lang=cc:" + dir_out,
]
return _protoc_gen_validate_impl(
ctx = ctx,
lang = "cc",
protos = protos,
out_files = out_files,
protoc_args = args,
package_command = "true",
)
def _protoc_python_output_files(proto_file_sources):
python_srcs = []
for p in proto_file_sources:
basename = p.basename[:-len(".proto")]
python_srcs.append(basename.replace("-", "_", maxsplit = None) + "_pb2.py")
return python_srcs
def _protoc_gen_validate_python_impl(ctx):
"""Generate Python protos using protoc-gen-validate plugin"""
protos = _proto_sources(ctx)
python_files = _protoc_python_output_files(protos)
out_files = [ctx.actions.declare_file(out) for out in python_files]
dir_out = _output_dir(ctx)
args = [
"--python_out=" + dir_out,
]
return _protoc_gen_validate_impl(
ctx = ctx,
lang = "python",
protos = protos,
out_files = out_files,
protoc_args = args,
package_command = "true",
)
def _protoc_gen_validate_impl(ctx, lang, protos, out_files, protoc_args, package_command):
protoc_args.append("--plugin=protoc-gen-validate=" + ctx.executable._plugin.path)
dir_out = ctx.genfiles_dir.path
if ctx.label.workspace_root:
dir_out += "/" + ctx.label.workspace_root
tds = depset([], transitive = [dep[ProtoInfo].transitive_descriptor_sets for dep in ctx.attr.deps])
descriptor_args = [ds.path for ds in tds.to_list()]
if len(descriptor_args) != 0:
protoc_args += ["--descriptor_set_in=%s" % ctx.configuration.host_path_separator.join(descriptor_args)]
package_command = package_command.format(dir_out = dir_out)
ctx.actions.run_shell(
outputs = out_files,
inputs = protos + tds.to_list(),
tools = [ctx.executable._plugin, ctx.executable._protoc],
command = " && ".join([
ctx.executable._protoc.path + " $@",
package_command,
]),
arguments = protoc_args + [_proto_path(proto) for proto in protos],
mnemonic = "ProtoGenValidate" + lang.capitalize() + "Generate",
use_default_shell_env = True,
)
return struct(
files = depset(out_files),
)
cc_proto_gen_validate = rule(
attrs = {
"deps": attr.label_list(
mandatory = True,
providers = [ProtoInfo],
),
"_protoc": attr.label(
cfg = "host",
default = Label("@com_google_protobuf//:protoc"),
executable = True,
allow_single_file = True,
),
"_plugin": attr.label(
cfg = "host",
default = Label("@com_envoyproxy_protoc_gen_validate//:protoc-gen-validate"),
allow_files = True,
executable = True,
),
},
output_to_genfiles = True,
implementation = _protoc_gen_validate_cc_impl,
)
_ProtoValidateSourceInfo = provider(
fields = {
"sources": "Depset of sources created by protoc with protoc-gen-validate plugin",
},
)
def _create_include_path(include):
return "--proto_path={0}={1}".format(_proto_path(include), include.path)
def _java_proto_gen_validate_aspect_impl(target, ctx):
proto_info = target[ProtoInfo]
includes = proto_info.transitive_imports
srcs = proto_info.direct_sources
options = ",".join(["lang=java"])
srcjar = ctx.actions.declare_file("%s-validate-gensrc.jar" % ctx.label.name)
args = ctx.actions.args()
args.add(ctx.executable._plugin.path, format = "--plugin=protoc-gen-validate=%s")
args.add("--validate_out={0}:{1}".format(options, srcjar.path))
args.add_all(includes, map_each = _create_include_path)
args.add_all(srcs, map_each = _proto_path)
ctx.actions.run(
inputs = depset(transitive = [proto_info.transitive_imports]),
outputs = [srcjar],
executable = ctx.executable._protoc,
arguments = [args],
tools = [ctx.executable._plugin],
progress_message = "Generating %s" % srcjar.path,
)
return [_ProtoValidateSourceInfo(
sources = depset(
[srcjar],
transitive = [dep[_ProtoValidateSourceInfo].sources for dep in ctx.rule.attr.deps],
),
)]
_java_proto_gen_validate_aspect = aspect(
_java_proto_gen_validate_aspect_impl,
provides = [_ProtoValidateSourceInfo],
attr_aspects = ["deps"],
attrs = {
"_protoc": attr.label(
cfg = "host",
default = Label("@com_google_protobuf//:protoc"),
executable = True,
allow_single_file = True,
),
"_plugin": attr.label(
cfg = "host",
default = Label("@com_envoyproxy_protoc_gen_validate//:protoc-gen-validate"),
allow_files = True,
executable = True,
),
},
)
def _java_proto_gen_validate_impl(ctx):
source_jars = [source_jar for dep in ctx.attr.deps for source_jar in dep[_ProtoValidateSourceInfo].sources.to_list()]
deps = [java_common.make_non_strict(dep[JavaInfo]) for dep in ctx.attr.java_deps]
deps += [dep[JavaInfo] for dep in ctx.attr._validate_deps]
java_info = java_common.compile(
ctx,
source_jars = source_jars,
deps = deps,
output_source_jar = ctx.outputs.srcjar,
output = ctx.outputs.jar,
java_toolchain = find_java_toolchain(ctx, ctx.attr._java_toolchain),
host_javabase = find_java_runtime_toolchain(ctx, ctx.attr._host_javabase),
)
return [java_info]
"""Bazel rule to create a Java protobuf validation library from proto sources files.
Args:
deps: proto_library rules that contain the necessary .proto files
java_deps: the java_proto_library of the protos being compiled.
"""
java_proto_gen_validate = rule(
attrs = {
"deps": attr.label_list(
providers = [ProtoInfo],
aspects = [_java_proto_gen_validate_aspect],
mandatory = True,
),
"java_deps": attr.label_list(
providers = [JavaInfo],
mandatory = True,
),
"_validate_deps": attr.label_list(
default = [
Label("@com_envoyproxy_protoc_gen_validate//validate:validate_java"),
Label("@com_google_re2j//jar"),
Label("@com_google_protobuf//:protobuf_java"),
Label("@com_google_protobuf//:protobuf_java_util"),
Label("@com_envoyproxy_protoc_gen_validate//java/pgv-java-stub/src/main/java/io/envoyproxy/pgv"),
Label("@com_envoyproxy_protoc_gen_validate//java/pgv-java-validation/src/main/java/io/envoyproxy/pgv"),
],
),
"_java_toolchain": attr.label(default = Label("@bazel_tools//tools/jdk:current_java_toolchain")),
"_host_javabase": attr.label(
cfg = "host",
default = Label("@bazel_tools//tools/jdk:current_host_java_runtime"),
),
},
fragments = ["java"],
provides = [JavaInfo],
outputs = {
"jar": "lib%{name}.jar",
"srcjar": "lib%{name}-src.jar",
},
implementation = _java_proto_gen_validate_impl,
)
python_proto_gen_validate = rule(
attrs = {
"deps": attr.label_list(
mandatory = True,
providers = ["proto"],
),
"_protoc": attr.label(
cfg = "host",
default = Label("@com_google_protobuf//:protoc"),
executable = True,
allow_single_file = True,
),
"_plugin": attr.label(
cfg = "host",
default = Label("@com_envoyproxy_protoc_gen_validate//:protoc-gen-validate"),
allow_files = True,
executable = True,
),
},
output_to_genfiles = True,
implementation = _protoc_gen_validate_python_impl,
)
| python |
"""
Methods for working with releases, including the releaseObject class
definition live here.
"""
# standard library imports
from datetime import datetime, timedelta
import json
# second party imports
from bson import json_util
from bson.objectid import ObjectId
import flask
import pymongo
# local imports
from app import API, models, utils
def public_router(action):
""" Our "broker" method for accepting public API requests to perform an
action. The endpoints we support here are relatively basic, but we do
support one that handles OIDs, so that gets kind of sticky. """
# set platforms first, since actions below depend on knowing what platforms
# we support
platforms = []
for key, app_dict in API.config['KEYS'].items():
platforms.append(
{
'app': app_dict['owner'],
'api_key': key
}
)
# 1.) first handle misc./public actions that return lists
output = None
if action in ['dump', 'releases','all']:
output = list(utils.mdb.releases.find().sort('created_on', -1))
elif action in ['latest', 'current']:
if flask.request.method == 'POST':
platform = flask.request.get_json().get('platform', None)
if platform is not None:
output = utils.mdb.releases.find_one(
{'platform': platform, 'published': True},
sort=[( 'published_on', pymongo.DESCENDING )]
)
else:
output = []
for platform in platforms:
latest = utils.mdb.releases.find_one(
{'platform': platform['app'], 'published': True},
sort=[( 'published_on', pymongo.DESCENDING )]
)
if latest is not None:
output.append(latest)
elif action in ['upcoming']:
output = []
for platform in platforms:
upcoming = utils.mdb.releases.find(
{
'platform': platform['app'],
'$or': [
{'published': False},
{'published': None}
],
},
sort=[( 'created_on', pymongo.DESCENDING )]
)
if upcoming is not None:
output.extend(upcoming)
elif action == 'platforms':
output = platforms
if output is not None:
return flask.Response(
json.dumps(output, default=json_util.default),
status=200,
mimetype="application/json"
)
# finally, check and see if we're looking for a specific release
record = utils.mdb.releases.find_one({'_id': ObjectId(action)})
if ObjectId.is_valid(action) and record is not None:
return flask.Response('got it!', 200)
elif ObjectId.is_valid(action) and record is None:
return flask.Response('Release not found!', 404)
err = "'%s' method not allowed!" % action
return flask.Response(err, status=405)
def private_router(action):
""" The private version of the previous method. This one handles routes
where we require, at a minimum, a user that is recognized by the API as a
registered user. We also check to see if they're an admin. """
# we need to be an admin to get into here
if not flask.request.User.user.get('admin', False):
return utils.http_403
if action == 'new':
r_obj = releaseObject()
return flask.Response(
json.dumps(r_obj.record, default=json_util.default),
status=200,
mimetype="application/json"
)
# 3.) JSON is required below, so sanity check for it here:
if flask.request.get_json() is None:
err = (
"The '%s' action requires valid JSON in the POST (or is not a "
"valid endpoint)!"
)
raise utils.InvalidUsage(err % action, 422)
release_oid = flask.request.get_json().get('_id', None)
if release_oid is None:
raise utils.InvalidUsage('_id is required!', 422)
r_obj = releaseObject(_id=release_oid['$oid'])
if action == 'update':
r_obj.update()
return flask.Response(
json.dumps(r_obj.record, default=json_util.default),
status=200,
mimetype="application/json"
)
elif action == 'delete':
return flask.Response(
json.dumps(r_obj.delete().raw_result, default=json_util.default),
status=200,
mimetype="application/json"
)
# if we're still here, throw an error, because obviously we've got POST data
# to some oddball/unknown endpoint...
err = "'%s' method not allowed!" % action
return flask.Response(err, status=405)
class releaseObject(models.StructuredObject):
""" The releaseObject class definition. Initialize one of these to work
with a release. Initialize with no arguments to use the values in the
request.json. """
def __init__(self, *args, **kwargs):
""" Initialize with no args to create a new one. """
# first, execute the init of our base class method
super().__init__(self, *args, **kwargs)
self.request = flask.request.get_json()
self.logger = utils.get_logger(log_name='admin')
self.mdb = utils.mdb.releases
self.data_model = {
'created_on': datetime,
'created_by': ObjectId,
'modified_on': datetime,
'platform': str,
'version': dict,
'summary': str,
'sections': list,
'items': list,
'details': list,
'published': bool,
'published_on': datetime,
}
self.load() # sets self._id if it isn't set
def __repr__(self):
""" A nice repr string that shows the platform and version. """
return "%s release (%s)" % (self.platform, self.get_version_string())
def load(self):
""" Load a release record. """
if getattr(self, '_id', None) is None:
self.new()
self.record = self.mdb.find_one({'_id': self._id})
if self.record is None:
err = "Release OID '%s' not found!" % self._id
raise utils.InvalidUsage(err, status_code=400)
for key, value in self.data_model.items():
setattr(self, key, self.record.get(key, None))
def new(self):
""" Create a new release record. """
platform = self.request.get('platform', None)
if platform is None:
raise utils.InvalidUsage(
'Platform must be specified when creating a new release!',
status_code=422
)
self.logger.info("Creating a new release for '%s'" % platform)
self._id = self.mdb.insert({})
self.created_on = datetime.now()
self.created_by = flask.request.User._id
self.platform = platform
self.set_latest_version()
self.save()
def update(self):
""" Updates attributes, saves. Uses the request JSON! """
published_pre_update = getattr(self, 'published', False)
# call the base class method; update attrs
super().update(source=flask.request.get_json(), verbose=True)
published_post_update = getattr(self, 'published', False)
# handle published_on logic
if not published_pre_update and published_post_update:
self.published_on = datetime.now()
elif published_pre_update and not published_post_update:
self.published_on = None
# sort things we want to sort
self.sections = sorted(self.sections)
self.modified_on = datetime.now()
self.save(verbose=True)
#
# gets/sets
#
def get_version_string(self):
""" Returns the version dict as a string. """
if self.version is None:
self.version = {}
return "%s.%s.%s" % (
self.version.get('major', 0),
self.version.get('minor', 0),
self.version.get('patch', 0),
)
def set_latest_version(self):
""" Uses self.platform to get the latest release for that platform and
set the current self.version to that release's version. """
# set default
self.version = {'major': 0, 'minor': 0, 'patch': 0}
# try to get latest
latest = self.mdb.find_one(
{'platform': self.platform},
sort=[( 'created_on', pymongo.DESCENDING )]
)
# if latest a.) exists and b.) has a version, use it:
if latest is not None and latest.get('version', None) is not None:
for bit in ['major', 'minor', 'patch']:
self.version[bit] = latest['version'].get(bit, 0)
| python |
#!/usr/bin/python3
# -*- coding:utf-8 -*-
import os
import sys
import signal
import time
from datetime import datetime
from datetime import timedelta
# import cv2 as cv
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt # 导入模块 matplotlib.pyplot,并简写成 plt
import numpy as np # 导入模块 numpy,并简写成 np
import csv
# 解决中文显示问题
mpl.matplotlib_fname()
mpl.rcParams[u'font.sans-serif'] = ['simhei']
mpl.rcParams['axes.unicode_minus'] = False
statistics = [[0 for row in range(0)] for col in range(14)]
class Show(object):
def __init__(self, data=None, code='', path='./stocks/', freq='D', name=''):
signal.signal(signal.SIGINT, self.signal_handler)
if path == '':
self.path = './'
else:
self.path = path + '/'
self.name = name
self.code = code
csv_data = pd.read_csv(self.path + self.code + '_price_' +
freq + '.csv', usecols=[2, 3, 10], header=None) # 读取数据
self.data = csv_data.values.tolist()
self.freq = freq
self.colors = {'ma4': 'gold', 'ma9': 'pink',
'ma18': 'blueviolet', 'ma60': 'cyan'}
def signal_handler(self, signal, frame):
sys.exit(0)
def get_position(self):
x = [i[0] for i in self.data]
x.reverse()
# print(x)
# print(len(x))
xs = [datetime.strptime(str(d)[0:-2], '%Y%m%d').date() for d in x]
# print(xs)
y = [i[1] for i in self.data]
y = [round(i, 2) for i in y]
y.reverse()
# print(y)
amount = [i[2] for i in self.data]
amount = [round(i, 2) for i in amount]
amount.reverse()
return xs, y, amount
def get_point(self, xs, y):
price_last = 0
price = 0
high_x = []
high_y = []
low_x = []
low_y = []
for i in range(len(y)):
if i == 1:
if price >= y[i]:
high_x.append(xs[i-1])
high_y.append(price)
elif price <= y[i]:
low_x.append(xs[i-1])
low_y.append(price)
if i == len(y) - 1:
if price <= y[i]:
high_x.append(xs[i])
high_y.append(y[i])
elif price >= y[i]:
low_x.append(xs[i])
low_y.append(y[i])
if price >= y[i] and price >= price_last and price_last != 0:
high_x.append(xs[i-1])
high_y.append(price)
if price <= y[i] and price <= price_last and price_last != 0:
low_x.append(xs[i-1])
low_y.append(price)
price_last = price
price = y[i]
return high_x, high_y, low_x, low_y
def draw_point(self, high_x, high_y, low_x, low_y):
# 绘制散点(3, 6)
for i in range(len(high_y)):
plt.scatter(high_x[i], high_y[i], s=25,
color='red') # s 为点的 size
plt.annotate(str(high_y[i]), color='red', xy=(
high_x[i], high_y[i]+0.003*high_y[i]), fontsize=10, xycoords='data') # 在(3.3, 5.5)上做标注
# 绘制散点(3, 6)
for i in range(len(low_y)):
plt.scatter(low_x[i], low_y[i], s=25,
color='green') # s 为点的 size
plt.annotate(str(low_y[i]), color='green', xy=(
low_x[i], low_y[i]-0.007*low_y[i]), fontsize=10, xycoords='data') # 在(3.3, 5.5)上做标注
# plt.text(3.3, 5, "this point very important",
# fontdict={'size': 12, 'color': 'green'}) # xycoords='data' 是说基于数据的值来选位置
def draw_high_line(self, high_x, high_y):
plt.plot(high_x, high_y, color='red',
linewidth=1.0, linestyle="--", label="y")
x = high_x
y = high_y
linewidth = 1.0
while len(y) >= 2:
high_x, high_y, temp_x, temp_y = self.get_point(x, y)
x = high_x
y = high_y
linewidth += 0.75
plt.plot(x, y, color='red', linewidth=linewidth,
linestyle="--", label="y")
def draw_low_line(self, low_x, low_y):
plt.plot(low_x, low_y, color='green',
linewidth=1.0, linestyle="--", label="y")
x = low_x
y = low_y
linewidth = 1.0
while len(x) >= 2:
temp_x, temp_y, low_x, low_y = self.get_point(x, y)
x = low_x
y = low_y
linewidth += 0.75
plt.plot(x, y, color='green', linewidth=linewidth,
linestyle="--", label="y")
def get_statistics(self, xs, ys, index, tag, meta):
if index+22 > len(ys)-1 or index < 22:
return
statistics[0].append(self.code)
statistics[1].append(self.name)
statistics[2].append(tag)
statistics[3].append(meta)
statistics[4].append(datetime.strftime(xs[index], "%Y%m%d"))
statistics[5].append(ys[index])
statistics[6].append(ys[index+1])
statistics[7].append(ys[index+2])
statistics[8].append(ys[index+3])
statistics[9].append(ys[index+4])
statistics[10].append(ys[index+5])
statistics[11].append(ys[index+10])
statistics[12].append(ys[index+15])
statistics[13].append(ys[index+22])
# print(statistics)
def amount_price_select(self, xs, ys, amount):
code = self.code + ':'
for i in range(5, len(ys)):
if(ys[i-4] < ys[i-5]) and amount[i-4] < amount[i-5]*0.9:
if(ys[i-3] < ys[i-4]) and amount[i-3] < amount[i-4]*0.9:
if(ys[i-2] < ys[i-3]) and amount[i-2] < amount[i-3]*0.9:
# self.get_statistics(xs, ys, i, 'amount0')
if(ys[i-1] < ys[i-2]) and amount[i-1] < amount[i-2]*0.9:
# self.get_statistics(xs, ys, i, 'amount1')
if(ys[i] > ys[i-1]) and amount[i] > amount[i-1]*1.2:
# self.get_statistics(xs, ys, i, 'amount2')
if (len(ys) - i - 1) < 2:
print(code, self.name,
xs[i], 'amount_price rush!!!')
def price_select(self, xs, ys):
code = self.code + ':'
max = 0
rush = False
rsi6 = self.get_rsi(ys, 6)
rsi12 = self.get_rsi(ys, 12)
for i in range(0, len(ys)):
if ys[i] >= max:
max = ys[i]
# if rush == False:
# rush = True
# self.get_statistics(xs, ys, i, 'price__', 'rush')
# if (len(ys) - i - 1) < 2:
# print(code, self.name, xs[i], 'price rush!!!')
if rsi6[i] > rsi12[i]:
if rush == False and rsi12[i] < 40 and rsi12[i] > 30:
rush = True
self.get_statistics(xs, ys, i, 'test', 'rush')
max = ys[i]
if (len(ys) - i - 1) < 2:
print(code, self.name, xs[i], 'rsi rush!!!')
if ys[i] < max*0.95 and rush == True:
rush = False
self.get_statistics(xs, ys, i, 'test', 'run')
if (len(ys) - i - 1) < 2:
print(code, self.name, xs[i], 'test run!!!')
def get_smooth(self, price, number):
smooth = [0]
for i in range(1, len(price)):
p = price[i]/number+smooth[i-1]*(number-1)/number
smooth.append(p)
return smooth
def get_rsi(self, price, number):
rsi = [0]
up = [0]
down = [0]
for i in range(1, len(price)):
temp = price[i] - price[i-1]
if temp >= 0:
up.append(temp)
down.append(0)
else:
down.append(abs(temp))
up.append(0)
up_smooth = self.get_smooth(up, number)
down_smooth = self.get_smooth(down, number)
for i in range(1, len(price)):
if up_smooth[i] == 0 and down_smooth[i] == 0:
r = rsi[i-1]
else:
r = up_smooth[i]/(up_smooth[i]+down_smooth[i])*100
rsi.append(round(r, 2))
return rsi
def rsi_select(self, xs, ys):
code = self.code + ':'
rsi6 = self.get_rsi(ys, 6)
rsi12 = self.get_rsi(ys, 12)
# rsi24 = self.get_rsi(ys, 24)
rush = False
run = False
for i in range(0, len(ys)):
if rsi6[i] > rsi12[i]:
run = False
if rush == False and rsi12[i] < 40 and rsi12[i] > 30:
rush = True
self.get_statistics(xs, ys, i, 'rsi6_12', 'rush')
max = ys[i]
if (len(ys) - i - 1) < 2:
print(code, self.name, xs[i], 'rsi rush!!!')
if rsi6[i] < rsi12[i]:
rush = False
if run == False and rsi6[i] > 60 and rsi6[i] < 70:
run = True
self.get_statistics(xs, ys, i, 'rsi6_12', 'run')
if (len(ys) - i - 1) < 2:
print(code, self.name, xs[i], 'rsi run!!!')
def get_average(self, price, number):
average = []
index = 0
for i in range(len(price)):
if i < number:
index = 0
else:
index = i-(number-1)
p = price[index:i+1]
average.append(round(np.mean(p), 2))
return average
def average_line_select(self, xs, ys):
ma4 = self.get_average(ys, 4)
ma9 = self.get_average(ys, 9)
ma18 = self.get_average(ys, 18)
# ma60 = self.get_average(ys, 60)
pre_rush = False
rush = False
pre_run = False
run = False
ret = False
code = self.code + ':'
for i in range(0, len(ys)):
# rush
if ma4[i] > ma9[i]:
if pre_rush == False:
pre_rush = True
self.get_statistics(xs, ys, i, 'ma4___9', 'rush')
# if (len(ys) - i - 1) < 2:
# print(code, self.name, xs[i], 'average pre_rush!')
if ma9[i] > ma18[i]:
if rush == False:
rush = True
self.get_statistics(xs, ys, i, 'ma9__18', 'rush')
plt.scatter(xs[i], ys[i], s=50,
color='red') # s 为点的 size
if (len(ys) - i - 1) < 2:
print(code, self.name, xs[i], 'average rush!!!')
ret = True
if ma9[i] < ma18[i]:
rush = False
if ma4[i] < ma9[i]:
if rush == False:
pre_rush = False
# run
if ma4[i] < ma9[i]:
if pre_run == False:
pre_run = True
self.get_statistics(xs, ys, i, 'ma4___9', 'run')
# print(code, xs[i], 'pre_run!')
if ma9[i] < ma18[i]:
if run == False:
run = True
self.get_statistics(xs, ys, i, 'ma9__18', 'run')
plt.scatter(xs[i], ys[i], s=50,
color='green') # s 为点的 size
if (len(ys) - i - 1) < 2:
print(code, self.name, xs[i], 'average run!!!')
if ma9[i] > ma18[i]:
run = False
if ma4[i] > ma9[i]:
if run == False:
pre_run = False
plt.plot(xs, ma4, color=self.colors['ma4'],
linewidth=1.5, linestyle="-", label='ma4')
plt.plot(xs, ma9, color=self.colors['ma9'],
linewidth=1.5, linestyle="-", label='ma9')
plt.plot(xs, ma18, color=self.colors['ma18'],
linewidth=1.5, linestyle="-", label='ma18')
# plt.plot(xs, ma60, color=self.colors['ma60'], linewidth=1.5, linestyle="-", label='ma60')
return ret
def show(self):
# 创建一个点数为 8 x 6 的窗口, 并设置分辨率为 80像素/每英寸
plt.figure(figsize=(24, 13.5), dpi=80)
# 再创建一个规格为 1 x 1 的子图
plt.subplot(111)
# fig1, ax = plt.subplots()
plt.title(self.name)
xs, ys, amount = self.get_position()
flag = False
flag = self.average_line_select(xs, ys)
# self.rsi_select(xs, ys)
self.price_select(xs, ys)
self.amount_price_select(xs, ys, amount)
high_x, high_y, low_x, low_y = self.get_point(xs, ys)
self.draw_point(high_x, high_y, low_x, low_y)
# self.draw_high_line(high_x, high_y)
# self.draw_low_line(low_x, low_y)
plt.plot(xs, ys, color='blue', linewidth=1.0,
linestyle="-", label="price")
plt.legend(loc='upper left', ncol=2) # 图例
# 设置横轴的上下限
# plt.xlim(20160818, 20200901)
# 设置纵轴的上下限
# plt.ylim(30, 500)
# 设置横轴标签
plt.xlabel("X")
# 设置纵轴标签
# plt.ylabel("Y")
# 设置横轴精准刻度
# plt.xticks([-1, -0.5, 0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5])
# 设置纵轴精准刻度
# plt.yticks([-2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
# 设置横轴精准刻度
xticks = list(range(0, len(xs), 5))
xlabels = [xs[x] for x in xticks]
xlabels.append(xs[-1])
plt.xticks(xlabels, rotation=-90)
# # 设置纵轴精准刻度
# plt.yticks([-2, 0, 2, 4, 6, 8, 10],
# ["-2m", "0m", "2m", "4m", "6m", "8m", "10m"])
if flag is True:
plt.savefig(self.path + self.code + '_' +
self.name + '_' + self.freq + '.png')
filename = './statistics.csv'
if not os.path.exists(filename):
with open(filename, 'w') as f:
f_csv = csv.writer(f)
ar2 = [[row[i] for row in statistics]
for i in range(len(statistics[0]))]
f_csv.writerows(ar2)
else:
with open(filename, 'a') as f:
f_csv = csv.writer(f)
ar2 = [[row[i] for row in statistics]
for i in range(len(statistics[0]))]
f_csv.writerows(ar2)
# plt.show(block=False)
# while plt.waitforbuttonpress() == False:
# time.sleep(0.1)
if __name__ == "__main__":
csv_file = sys.argv[1]
freq = 'D'
name = ''
path = './stocks/'
if len(sys.argv) == 5:
path = sys.argv[4]
freq = sys.argv[3]
name = sys.argv[2]
elif len(sys.argv) == 4:
freq = sys.argv[3]
name = sys.argv[2]
elif len(sys.argv) == 3:
name = sys.argv[2]
show = Show(code=csv_file, name=name, freq=freq, path=path)
show.show()
| python |
"""
Class to represent the results of a prediction.
"""
import codecs
import logging
import os
import warnings
from numpy import ndarray
from sklearn.exceptions import UndefinedMetricWarning
from sklearn.metrics import \
confusion_matrix, \
recall_score, \
precision_score, \
f1_score, \
accuracy_score
from tabulate import tabulate
from .argmin_components import evaluate_argmin_components
from .argmin_post_processing import relative_2_absolute
from .argmin_relations import evaluate_argmin_relations
from .metrics import compute_f1, compute_precision, compute_recall, pre_process
from .seq_2_seq_metrics import word_accuracy, edit_distance
from ..config.TaskConfig import TaskConfig
from ..constants import ENCODING_NONE, METRIC_ACCURACY, METRIC_F1, METRIC_PRECISION, METRIC_RECALL, TASK_TYPE_AM, \
METRIC_WORD_ACCURACY, METRIC_F1_O, METRIC_F1_B, \
METRIC_PRECISION_O, METRIC_PRECISION_B, METRIC_RECALL_O, METRIC_RECALL_B, METRIC_AM_COMPONENTS_05, \
METRIC_AM_COMPONENTS_0999, METRIC_AM_RELATIONS_05, METRIC_AM_RELATIONS_0999, METRIC_AVG_EDIT_DISTANCE, \
METRIC_MEDIAN_EDIT_DISTANCE
from ..data.Sample import Sample
from ..util import swap_dict
warnings.filterwarnings("ignore", category=UndefinedMetricWarning)
class ResultList(list):
"""
Class to represent the results of a prediction.
"""
def __init__(self, result_tuples, label_2_idx, task=None):
"""
Initialize a result list.
Creates swapped mapping functions and populates the internal list.
The list contains tuples with the following entries:
* Sentence with actual tokens
* Predicted labels as strings
* Gold labels as strings
* Sentence with indices
* Predicted labels with indices
* Gold labels with indices
* Sample object
Args:
result_tuples (`list` of `tuple` of object): A list of results represented as tuples consisting of
(sentence, gold label, predicted label, sample object). The sample object can be used to restore the
original sentence (words).
label_2_idx (`dict` of int): A mapping from label names to indices.
task (TaskConfig): The task to which the results belong to
"""
assert isinstance(label_2_idx, dict)
assert isinstance(task, TaskConfig)
logger = logging.getLogger("shared.result_list.init")
list.__init__(self)
self.label_2_idx = label_2_idx
self.idx_2_label = swap_dict(label_2_idx)
self.task = task
logger.debug("Initializing a result list for %d sentences", len (result_tuples))
for sentence, gold_labels, predicted_labels, sample in result_tuples:
assert isinstance(sample, Sample)
assert len(sentence) == len(gold_labels) == len(predicted_labels)
word_sentence = sample.raw_tokens
word_gold_labels = sample.raw_labels
docid = sample.docid
word_predicted_labels = [self.idx_2_label[idx] for idx in predicted_labels]
# Removal of padding if necessary
if len(word_sentence) != len(sentence):
# logger.debug("There is a padded sentence. Remove padding.")
# The raw sentence as stored in the sample object has the true length
true_length = len(word_sentence)
sentence = sentence[:true_length]
gold_labels = gold_labels[:true_length]
predicted_labels = predicted_labels[:true_length]
self.append((
word_sentence,
word_predicted_labels,
word_gold_labels,
sentence,
predicted_labels,
gold_labels,
sample
))
def get_true_and_pred(self):
"""
From the unmasked data in the result list, create a list of predictions and a list of truths.
Returns:
`tuple` of `list` of str: A tuple consisting of the truths and the predictions (in this order).
"""
y_true = []
y_pred = []
for _, pred, gold, _, _, _, sample in self:
for pred_label, gold_label in zip(pred, gold):
y_true.append(gold_label)
y_pred.append(pred_label)
return y_true, y_pred
def get_true_and_pred_sentences(self, word=False):
"""
Retrieve all true and predicted sentence labels. If `word` is True, retrieve the word representation for labels.
Otherwise, retrieve the index representation. The latter is required for calculating metrics on BIO.
Args:
word (bool): Whether to use word or index representations for the labels.
Returns:
`tuple` of `list` of `list` of str or `tuple` of `list` of `list` of int: A tuple consisting of gold label
sentences and predictions (in this order).
"""
true_sentences = []
predicted_sentences = []
for entry in self:
if word:
predicted_sentences.append(entry[1])
true_sentences.append(entry[2])
else:
predicted_sentences.append(entry[4])
true_sentences.append(entry[5])
return true_sentences, predicted_sentences
def confusion_matrix(self):
"""
Compute the confusion matrix for the result list.
Returns:
Confusion matrix
"""
y_true, y_pred = self.get_true_and_pred()
return confusion_matrix(y_true, y_pred, labels=list(self.idx_2_label.values()))
def print_confusion_matrix(self, matrix=None):
"""
Generate a ASCII representation for the confusion matrix.
Args:
matrix: A confusion matrix.
Returns:
A well-formatted confusion matrix.
"""
if matrix is None:
matrix = self.confusion_matrix()
if isinstance(matrix, ndarray):
matrix = matrix.tolist()
labels = list(self.idx_2_label.values())
for row_idx in range(len(matrix)):
# Prepend label for rows
matrix[row_idx] = [labels[row_idx]] + matrix[row_idx]
print (tabulate(matrix, headers=labels))
def accuracy(self):
"""
See http://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html#sklearn.metrics.accuracy_score
Returns:
float: accuracy score
"""
y_true, y_pred = self.get_true_and_pred()
return accuracy_score(y_true, y_pred)
def precision(self, correct_bio_errors="No"):
"""
Calculate the precision. If the task uses BIO, IOB or IOBES encoding, a special calculation method is used.
Otherwise, we fall back to the scikit learn implementation.
Args:
correct_bio_errors (str): If this is set to "O" or "B", a correction of incorrect "I-" labels is performed.
See `metrics.py` for further details.
Returns:
float: precision score
"""
if self.task is None or self.task.encoding == ENCODING_NONE:
# Not BIO, IOB or IOBES
y_true, y_pred = self.get_true_and_pred()
return precision_score(y_true, y_pred, labels=list(self.idx_2_label.values()), average="macro")
else:
y_true, y_pred = self.get_true_and_pred_sentences(word=False)
y_true, y_pred = pre_process(
y_pred,
y_true,
self.idx_2_label,
correct_bio_errors=correct_bio_errors,
encoding_scheme=self.task.encoding
)
return compute_precision(y_pred, y_true)
def recall(self, correct_bio_errors="No"):
"""
Calculate the precision. If the task uses BIO, IOB or IOBES encoding, a special calculation method is used.
Otherwise, we fall back to the scikit learn implementation.
Args:
correct_bio_errors (str): If this is set to "O" or "B", a correction of incorrect "I-" labels is performed.
See `metrics.py` for further details.
Returns:
float: precision score
"""
if self.task is None or self.task.encoding == ENCODING_NONE:
# Not BIO, IOB or IOBES
y_true, y_pred = self.get_true_and_pred()
return recall_score(y_true, y_pred, labels=list(self.idx_2_label.values()), average="macro")
else:
y_true, y_pred = self.get_true_and_pred_sentences(word=False)
y_true, y_pred = pre_process(
y_pred,
y_true,
self.idx_2_label,
correct_bio_errors=correct_bio_errors,
encoding_scheme=self.task.encoding
)
return compute_recall(y_pred, y_true)
def f1(self, correct_bio_errors="No"):
"""
Calculate the precision. If the task uses BIO, IOB or IOBES encoding, a special calculation method is used.
Otherwise, we fall back to the scikit learn implementation.
Args:
correct_bio_errors (str): If this is set to "O" or "B", a correction of incorrect "I-" labels is performed.
See `metrics.py` for further details.
Returns:
float: precision score
"""
if self.task is None or self.task.encoding == ENCODING_NONE:
# Not BIO, IOB or IOBES
y_true, y_pred = self.get_true_and_pred()
return f1_score(y_true, y_pred, labels=list(self.idx_2_label.values()), average="macro")
else:
y_true, y_pred = self.get_true_and_pred_sentences(word=False)
y_true, y_pred = pre_process(
y_pred,
y_true,
self.idx_2_label,
correct_bio_errors=correct_bio_errors,
encoding_scheme=self.task.encoding
)
return compute_f1(y_pred, y_true)
def argmin_components(self, ratio=0.5):
"""
Calculate the AM components score at the specified ratio.
Args:
ratio (float): Ratio for score calculation.
Returns:
float: f1 score
"""
conll_list = self.as_conll_list()
prediction_list = relative_2_absolute(conll_list, 0, 2)
truth_list = relative_2_absolute(conll_list, 0, 1)
result = evaluate_argmin_components(prediction_list, truth_list, 2, 2, ratio=ratio)
return result[3]
def argmin_relations(self, ratio=0.5):
"""
Calculate the AM relations score at the specified ratio.
Args:
ratio (float): Ratio for score calculation.
Returns:
float: f1 score
"""
conll_list = self.as_conll_list()
prediction_list = relative_2_absolute(conll_list, 0, 2)
truth_list = relative_2_absolute(conll_list, 0, 1)
result = evaluate_argmin_relations(prediction_list, truth_list, 2, 2, ratio=ratio)
return result[3]
def word_accuracy(self):
"""
Calculate the word accuracy.
Use this only for seq2seq tasks.
Returns:
float: word accuracy
"""
y_true, y_pred = self.get_true_and_pred_sentences(word=True)
return word_accuracy(y_pred, y_true)
def edit_distance(self, mode="avg"):
"""
Calculate the edit distance.
Use this only for seq2seq tasks.
Args:
mode (str, optional): How to combine the edit distances of the words. Valid options are "avg" and "median".
Defaults to "avg".
Returns:
float: average edit distance
"""
assert mode in ["avg", "median"]
y_true, y_pred = self.get_true_and_pred_sentences(word=True)
return edit_distance(y_pred, y_true, mode)
def compute_metric_by_name(self, metric_name):
"""
Compute the metric identified by `metric_name`. If the metric name is unknown,
a value error is raised.
Args:
metric_name (str): The name of a metric.
Returns:
float: metric value
"""
if metric_name == METRIC_ACCURACY:
return self.accuracy()
elif metric_name == METRIC_F1:
return self.f1()
elif metric_name == METRIC_F1_O:
return self.f1(correct_bio_errors="O")
elif metric_name == METRIC_F1_B:
return self.f1(correct_bio_errors="B")
elif metric_name == METRIC_PRECISION:
return self.precision()
elif metric_name == METRIC_PRECISION_O:
return self.precision(correct_bio_errors="O")
elif metric_name == METRIC_PRECISION_B:
return self.precision(correct_bio_errors="B")
elif metric_name == METRIC_RECALL:
return self.recall()
elif metric_name == METRIC_RECALL_O:
return self.recall(correct_bio_errors="O")
elif metric_name == METRIC_RECALL_B:
return self.recall(correct_bio_errors="B")
elif metric_name == METRIC_AM_COMPONENTS_05:
return self.argmin_components(ratio=0.5)
elif metric_name == METRIC_AM_COMPONENTS_0999:
return self.argmin_components(ratio=0.999)
elif metric_name == METRIC_AM_RELATIONS_05:
return self.argmin_relations(ratio=0.5)
elif metric_name == METRIC_AM_RELATIONS_0999:
return self.argmin_components(ratio=0.999)
elif metric_name == METRIC_WORD_ACCURACY:
return self.word_accuracy()
elif metric_name == METRIC_AVG_EDIT_DISTANCE:
return self.edit_distance(mode="avg")
elif metric_name == METRIC_MEDIAN_EDIT_DISTANCE:
return self.edit_distance(mode="median")
else:
raise ValueError("Metric with name %s is not supported by this method." % metric_name)
def as_conll_list(self, delimiter="\t"):
"""
Build a document in CoNNL format, but each line is a separate string within
a list.
Args:
delimiter (str, optional): Which character is used as a column separator. Defaults to tab (`\t`).
Returns:
`list` of str: A list of lines in CoNLL format (token truth prediction).
"""
output = []
for x, y, gold, _, _, _, sample in self:
#print(sample.docid)
docid = ""
if sample.docid != None:
docid = sample.docid
output.append(docid)
for i in range(len(x)):
output.append(delimiter.join([x[i], gold[i], y[i]]))
# Add empty line to separate sentences
output.append("")
return output
def __str__(self):
"""
Build a string representation for an instance of the result list class.
Returns:
Data in CONLL format with predicted labels in the last row.
"""
return "\n".join(self.as_conll_list())
def predictions_to_file(self, prediction_dir_path, filename):
"""
Write predictions to a file.
If the task is AM, two files are written that adhere to the format used by SE and JD.
Args:
prediction_dir_path (str): Path to prediction directory.
filename (str): Prediction filename
"""
assert os.path.exists(prediction_dir_path), "Expected that prediction directory path exists"
assert os.path.isdir(prediction_dir_path), "Expected that prediction directory path points to a directory"
logger = logging.getLogger("shared.result_list.predictions_to_file")
logger.debug("Writing predictions to file(s)")
if self.task and self.task.type == TASK_TYPE_AM:
pred_file_path = os.path.join(prediction_dir_path, filename + ".pred.corr.abs")
gold_file_path = os.path.join(prediction_dir_path, filename + ".truth.corr.abs")
logger.debug("Files: %s", [pred_file_path, gold_file_path])
conll_list = self.as_conll_list()
prediction_list = relative_2_absolute(conll_list, 0, 2)
truth_list = relative_2_absolute(conll_list, 0, 1)
with codecs.open(pred_file_path, mode="w", encoding="utf8") as f:
f.write("\n".join(prediction_list))
with codecs.open(gold_file_path, mode="w", encoding="utf8") as f:
f.write("\n".join(truth_list))
else:
file_path = os.path.join(prediction_dir_path, filename)
logger.debug("File: %s", file_path)
with codecs.open(file_path, mode="w", encoding="utf8") as f:
f.write(self.__str__())
def metrics_as_list(self):
"""
Provides the performance metrics for the result list as a list (useful for storing in CSV format).
Entries in the list:
* Number of performed predictions
* Number of correct predictions
* Number of incorrect predictions
* Accuracy
* Precision
* Recall
* F1 score
Returns:
`list` of int or `list` of float: List of metrics
"""
y_true, y_pred = self.get_true_and_pred()
num_total = len(y_true)
num_correct = len([1 for t, p in zip(y_true, y_pred) if t == p])
num_false = num_total - num_correct
return [
num_total,
num_correct,
num_false,
self.accuracy(),
self.precision(),
self.recall(),
self.f1()
]
| python |
import server_socket
import threading
class Microphone(object):
def __init__(self, host, port, steer):
self.steer = steer
self.socket = server_socket.Server(host, port)
self.client = self.socket.Get_Client()
def Recv(self) :
while True :
# 스레드를 돌면서 steer 객체의 microphone 변수를 갱신함
speech = self.client.recv(128).decode()
print('speech', speech)
self.steer.Set_Microphone(speech)
def Run(self) :
# 코드를 병렬로 실행하기 위해서 스레드를 선언
# target으로 설정된 함수 스레드가 실행함
# 스레드가 실행하는 함수가 입력 파라미터가 필요한 경우 args에 선언함
mic_thread = threading.Thread(target=self.Recv, args=())
mic_thread.start()
| python |
#!/usr/bin/env python2
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""This simulates a real job by producing a lot of output."""
from __future__ import print_function
__author__ = '[email protected] (Ahmad Sharif)'
import time
def Main():
"""The main function."""
for j in range(10):
for i in range(10000):
print(str(j) + 'The quick brown fox jumped over the lazy dog.' + str(i))
time.sleep(60)
return 0
if __name__ == '__main__':
Main()
| python |
############## Configurator for command line programs
#### tests
# 2016 Portia Frances Limited for UBS
# Author: Thomas Haederle
import logging
logger = logging.getLogger(__name__)
import pytest
#from nose.tools import *
from configurator import Configurator
def test_configurator_initialize():
conf = Configurator("This is a test")
assert conf.description == "This is a test"
def test_configurator_standardparse():
conf = Configurator()
args = conf.parser.parse_args()
print(args)
assert args.cobdate
assert args.rundate
assert args.mode
def test_configurator_commandconfig():
conf = Configurator()
assert conf.commandconfig is None
args = conf.configureCommandline()
assert conf.commandconfig
def test_configurator_setupAppconfig():
conf = Configurator()
#assert conf.appconfig is None
args = conf.setupAppconfig()
assert conf.appconfig
def test_configurator_setuplogger():
conf = Configurator()
rootlogger = conf.setupLogger()
assert type(rootlogger) == logging.RootLogger #is returned logger of type rootlogger
def test_cobdate():
conf = Configurator()
args = conf.configureCommandline()
assert conf.commandconfig["cobdate"]
assert conf.commandconfig["rundate"]
def test_cobdate():
conf = Configurator()
args = conf.setupAppconfig()
assert conf.appconfig.cobdate
assert conf.appconfig.rundate | python |
def signed8(b):
if b > 127:
return -256 + b
else:
return b
def signed16(v):
v &= 0xFFFF
if v > 0x7FFF:
return - 0x10000 + v
else:
return v
def signed24(v):
v &= 0xFFFFFF
if v > 0x7FFFFF:
return - 0x1000000 + v
else:
return v
def read_signed(stream, n):
byte = bytearray(stream.read(n))
signed_bytes = []
for b in byte:
signed_bytes.append(signed8(b))
return signed_bytes
def read_sint_8(stream):
byte = bytearray(stream.read(1))
if len(byte) is 1:
return signed8(byte[0])
return None
def read_int_8(stream):
byte = bytearray(stream.read(1))
if len(byte) is 1:
return byte[0]
return None
def read_int_16le(stream):
byte = bytearray(stream.read(2))
if len(byte) is 2:
return (byte[0] & 0xFF) + ((byte[1] & 0xFF) << 8)
return None
def read_int_16be(stream):
byte = bytearray(stream.read(2))
if len(byte) is 2:
return (byte[1] & 0xFF) + ((byte[0] & 0xFF) << 8)
return None
def read_int_24le(stream):
b = bytearray(stream.read(3))
if len(b) is 3:
return (b[0] & 0xFF) + ((b[1] & 0xFF) << 8) + \
((b[2] & 0xFF) << 16)
return None
def read_int_24be(stream):
b = bytearray(stream.read(3))
if len(b) is 3:
return (b[2] & 0xFF) + ((b[1] & 0xFF) << 8) + \
((b[0] & 0xFF) << 16)
return None
def read_int_32le(stream):
b = bytearray(stream.read(4))
if len(b) is 4:
return (b[0] & 0xFF) + ((b[1] & 0xFF) << 8) + \
((b[2] & 0xFF) << 16) + ((b[3] & 0xFF) << 24)
return None
def read_int_32be(stream):
b = bytearray(stream.read(4))
if len(b) is 4:
return (b[3] & 0xFF) + ((b[2] & 0xFF) << 8) + \
((b[1] & 0xFF) << 16) + ((b[0] & 0xFF) << 24)
return None
def read_string_8(stream, length):
byte = stream.read(length)
try:
return byte.decode('utf8')
except UnicodeDecodeError:
return None # Must be > 128 chars.
def read_string_16(stream, length):
byte = stream.read(length)
try:
return byte.decode('utf16')
except UnicodeDecodeError:
return None
| python |
# import geopandas
# from utils.common import load_shape
# from pathlib import Path
# import sys
# sys.path.append(str(Path(__file__).parent.parent))
# from configs import server_config
# # from shapely.geometry import shape
# from db_connection import DBConnection
# from alchemy import Eez
# import shapely.geometry as sh
# eez = load_shape(server_config.EEZ_GEOJSON)
# eez = geopandas.GeoDataFrame(eez)
# # eez['geometry'] = [shape(e) for e in eez['geometry']]
# db = DBConnection() # Database Object
# for row in eez.itertuples():
# sovs = [row.properties[sov] for sov in ['SOVEREIGN1', 'SOVEREIGN2', 'SOVEREIGN3'] if row.properties[sov] is not None]
# geom = row.geometry
# # geom = geom.update({"crs" : {"properties" : {"name" : "urn:ogc:def:crs:EPSG:8.8.1:4326"}}}) # This is equivalent to the existing projectionn, but is recognized by postgres as mappable, so slightly preferred.
# e = Eez(
# mrgid=int(row.properties['MRGID']),
# geoname=row.properties['GEONAME'],
# pol_type=row.properties['POL_TYPE'],
# sovereigns=sovs,
# geometry="SRID=4326;"+sh.shape(row.geometry).wkt
# )
# db.sess.add(e)
# db.sess.commit()
# db.sess.close() | python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.