content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
# Write to a text file
#Open the file, write the value and close the file
f = open("output.txt","w")
message="Hi all! Welcome from CEEO Innovations!"
text=f.write(message)
f.close()
|
python
|
#!/usr/bin/env python3
import argparse
import os
cgroup = '/sys/fs/cgroup'
class Containers(object):
def __init__(self, glob_dir: str = 'devices/lxc') -> None:
self.containers = []
for name in filter(lambda d: os.path.isdir(os.path.join(cgroup, glob_dir, d)),
os.listdir(os.path.join(cgroup, glob_dir))):
self.containers.append(Container(name))
def cpu_usage(self) -> float:
"""Get sum of all containers cpu usage"""
return sum(map(lambda c: c.get_cpu, self.containers))
def print_stats(self, args: object) -> None:
"""Print container usage statistics"""
def sort_by(method: str) -> callable:
if method in ('name', 'cpu', 'memory', 'percent', 'procs'):
return lambda c: getattr(c, 'get_{0}'.format(method))
return lambda c: c.get_cpu
cpu_usage = self.cpu_usage()
print('{0:26} {1:18} {2:5} {3} {4}'.format('name ', 'memory', 'cpu', 'cpu%', 'procs'))
print('-' * 62)
template = '{0.get_name:20} {0.get_memory:10.2f} M {0.get_cpu:15.2f} {1:6.2f} {0.get_procs}'
sort = getattr(args, 'sort')
for container in sorted(self.containers, key=sort_by(sort), reverse=(sort != 'name')):
print(template.format(container, container.get_percent(cpu_usage)))
class Container(object):
"""Define a container object with its related properties"""
def __init__(self, name: str) -> None:
"""Class constructor"""
self.name = name
self._cache = {}
@property
def get_name(self) -> str:
return self.name
@property
def get_memory(self) -> float:
"""Return memory usage in bytes"""
if 'memory' not in self._cache:
with open(os.path.join(cgroup, 'memory/lxc', self.name, 'memory.usage_in_bytes'), 'r') as fh:
self._cache['memory'] = round(int(fh.read().strip()) / 1024 / 1024, 2)
return self._cache.get('memory')
@property
def get_cpu(self) -> float:
"""Return cpu usage in seconds"""
if 'cpu' not in self._cache:
with open(os.path.join(cgroup, 'cpu,cpuacct/lxc', self.name, 'cpuacct.usage'), 'r') as fh:
self._cache['cpu'] = round(int(fh.read().strip()) / 10 ** 9, 2)
return self._cache.get('cpu')
def get_percent(self, total: float = 0.0) -> float:
"""Get cpu usage in percent"""
if 'percent' not in self._cache:
self._cache['percent'] = round(self.get_cpu * 100 / total, 2)
return self._cache.get('percent')
@property
def get_procs(self) -> int:
"""Get number of processes"""
if 'procs' not in self._cache:
with open(os.path.join(cgroup, 'pids/lxc', self.name, 'pids.current'), 'r') as fh:
self._cache['procs'] = int(fh.read().strip())
return self._cache.get('procs')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='LXC 2.0 Statistics utility')
parser.add_argument('--sort', type=str, default='cpu', help='Sort column (could be name, cpu, memory or procs)')
Containers().print_stats(parser.parse_args())
|
python
|
# encoding: utf-8
# module PySide.QtGui
# from C:\Python27\lib\site-packages\PySide\QtGui.pyd
# by generator 1.147
# no doc
# imports
import PySide.QtCore as __PySide_QtCore
import Shiboken as __Shiboken
class QPaintEngine(__Shiboken.Object):
# no doc
def begin(self, *args, **kwargs): # real signature unknown
pass
def clearDirty(self, *args, **kwargs): # real signature unknown
pass
def coordinateOffset(self, *args, **kwargs): # real signature unknown
pass
def drawEllipse(self, *args, **kwargs): # real signature unknown
pass
def drawImage(self, *args, **kwargs): # real signature unknown
pass
def drawLines(self, *args, **kwargs): # real signature unknown
pass
def drawPath(self, *args, **kwargs): # real signature unknown
pass
def drawPixmap(self, *args, **kwargs): # real signature unknown
pass
def drawPoints(self, *args, **kwargs): # real signature unknown
pass
def drawPolygon(self, *args, **kwargs): # real signature unknown
pass
def drawRects(self, *args, **kwargs): # real signature unknown
pass
def drawTextItem(self, *args, **kwargs): # real signature unknown
pass
def drawTiledPixmap(self, *args, **kwargs): # real signature unknown
pass
def end(self, *args, **kwargs): # real signature unknown
pass
def hasFeature(self, *args, **kwargs): # real signature unknown
pass
def isActive(self, *args, **kwargs): # real signature unknown
pass
def isExtended(self, *args, **kwargs): # real signature unknown
pass
def paintDevice(self, *args, **kwargs): # real signature unknown
pass
def painter(self, *args, **kwargs): # real signature unknown
pass
def setActive(self, *args, **kwargs): # real signature unknown
pass
def setDirty(self, *args, **kwargs): # real signature unknown
pass
def setSystemClip(self, *args, **kwargs): # real signature unknown
pass
def setSystemRect(self, *args, **kwargs): # real signature unknown
pass
def syncState(self, *args, **kwargs): # real signature unknown
pass
def systemClip(self, *args, **kwargs): # real signature unknown
pass
def systemRect(self, *args, **kwargs): # real signature unknown
pass
def testDirty(self, *args, **kwargs): # real signature unknown
pass
def type(self, *args, **kwargs): # real signature unknown
pass
def updateState(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
active = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
extended = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
gccaps = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
selfDestruct = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
state = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
AllDirty = PySide.QtGui.QPaintEngine.DirtyFlag.AllDirty
AllFeatures = PySide.QtGui.QPaintEngine.PaintEngineFeature.AllFeatures
AlphaBlend = PySide.QtGui.QPaintEngine.PaintEngineFeature.AlphaBlend
Antialiasing = PySide.QtGui.QPaintEngine.PaintEngineFeature.Antialiasing
BlendModes = PySide.QtGui.QPaintEngine.PaintEngineFeature.BlendModes
Blitter = PySide.QtGui.QPaintEngine.Type.Blitter
BrushStroke = PySide.QtGui.QPaintEngine.PaintEngineFeature.BrushStroke
ConicalGradientFill = PySide.QtGui.QPaintEngine.PaintEngineFeature.ConicalGradientFill
ConstantOpacity = PySide.QtGui.QPaintEngine.PaintEngineFeature.ConstantOpacity
ConvexMode = PySide.QtGui.QPaintEngine.PolygonDrawMode.ConvexMode
CoreGraphics = PySide.QtGui.QPaintEngine.Type.CoreGraphics
Direct3D = PySide.QtGui.QPaintEngine.Type.Direct3D
DirtyBackground = PySide.QtGui.QPaintEngine.DirtyFlag.DirtyBackground
DirtyBackgroundMode = PySide.QtGui.QPaintEngine.DirtyFlag.DirtyBackgroundMode
DirtyBrush = PySide.QtGui.QPaintEngine.DirtyFlag.DirtyBrush
DirtyBrushOrigin = PySide.QtGui.QPaintEngine.DirtyFlag.DirtyBrushOrigin
DirtyClipEnabled = PySide.QtGui.QPaintEngine.DirtyFlag.DirtyClipEnabled
DirtyClipPath = PySide.QtGui.QPaintEngine.DirtyFlag.DirtyClipPath
DirtyClipRegion = PySide.QtGui.QPaintEngine.DirtyFlag.DirtyClipRegion
DirtyCompositionMode = PySide.QtGui.QPaintEngine.DirtyFlag.DirtyCompositionMode
DirtyFlag = None # (!) real value is "<type 'PySide.QtGui.QPaintEngine.DirtyFlag'>"
DirtyFlags = None # (!) real value is "<type 'DirtyFlags'>"
DirtyFont = PySide.QtGui.QPaintEngine.DirtyFlag.DirtyFont
DirtyHints = PySide.QtGui.QPaintEngine.DirtyFlag.DirtyHints
DirtyOpacity = PySide.QtGui.QPaintEngine.DirtyFlag.DirtyOpacity
DirtyPen = PySide.QtGui.QPaintEngine.DirtyFlag.DirtyPen
DirtyTransform = PySide.QtGui.QPaintEngine.DirtyFlag.DirtyTransform
LinearGradientFill = PySide.QtGui.QPaintEngine.PaintEngineFeature.LinearGradientFill
MacPrinter = PySide.QtGui.QPaintEngine.Type.MacPrinter
MaskedBrush = PySide.QtGui.QPaintEngine.PaintEngineFeature.MaskedBrush
MaxUser = PySide.QtGui.QPaintEngine.Type.MaxUser
ObjectBoundingModeGradients = PySide.QtGui.QPaintEngine.PaintEngineFeature.ObjectBoundingModeGradients
OddEvenMode = PySide.QtGui.QPaintEngine.PolygonDrawMode.OddEvenMode
OpenGL = PySide.QtGui.QPaintEngine.Type.OpenGL
OpenGL2 = PySide.QtGui.QPaintEngine.Type.OpenGL2
OpenVG = PySide.QtGui.QPaintEngine.Type.OpenVG
PaintBuffer = PySide.QtGui.QPaintEngine.Type.PaintBuffer
PaintEngineFeature = None # (!) real value is "<type 'PySide.QtGui.QPaintEngine.PaintEngineFeature'>"
PaintEngineFeatures = None # (!) real value is "<type 'PaintEngineFeatures'>"
PainterPaths = PySide.QtGui.QPaintEngine.PaintEngineFeature.PainterPaths
PaintOutsidePaintEvent = PySide.QtGui.QPaintEngine.PaintEngineFeature.PaintOutsidePaintEvent
PatternBrush = PySide.QtGui.QPaintEngine.PaintEngineFeature.PatternBrush
PatternTransform = PySide.QtGui.QPaintEngine.PaintEngineFeature.PatternTransform
Pdf = PySide.QtGui.QPaintEngine.Type.Pdf
PerspectiveTransform = PySide.QtGui.QPaintEngine.PaintEngineFeature.PerspectiveTransform
Picture = PySide.QtGui.QPaintEngine.Type.Picture
PixmapTransform = PySide.QtGui.QPaintEngine.PaintEngineFeature.PixmapTransform
PolygonDrawMode = None # (!) real value is "<type 'PySide.QtGui.QPaintEngine.PolygonDrawMode'>"
PolylineMode = PySide.QtGui.QPaintEngine.PolygonDrawMode.PolylineMode
PorterDuff = PySide.QtGui.QPaintEngine.PaintEngineFeature.PorterDuff
PostScript = PySide.QtGui.QPaintEngine.Type.PostScript
PrimitiveTransform = PySide.QtGui.QPaintEngine.PaintEngineFeature.PrimitiveTransform
QuickDraw = PySide.QtGui.QPaintEngine.Type.QuickDraw
QWindowSystem = PySide.QtGui.QPaintEngine.Type.QWindowSystem
RadialGradientFill = PySide.QtGui.QPaintEngine.PaintEngineFeature.RadialGradientFill
Raster = PySide.QtGui.QPaintEngine.Type.Raster
RasterOpModes = PySide.QtGui.QPaintEngine.PaintEngineFeature.RasterOpModes
SVG = PySide.QtGui.QPaintEngine.Type.SVG
Type = None # (!) real value is "<type 'PySide.QtGui.QPaintEngine.Type'>"
User = PySide.QtGui.QPaintEngine.Type.User
WindingMode = PySide.QtGui.QPaintEngine.PolygonDrawMode.WindingMode
Windows = PySide.QtGui.QPaintEngine.Type.Windows
X11 = PySide.QtGui.QPaintEngine.Type.X11
|
python
|
import asyncio
import aioredis
import jinja2
import peewee_async
import aiohttp_jinja2
import aiohttp_debugtoolbar
from aiohttp import web
from aiohttp_session import session_middleware
from aiohttp_session.redis_storage import RedisStorage
import settings
from settings import logger
from helpers.middlewares import request_user_middleware
from helpers.template_tags import tags
from helpers.models import database
async def create_app(loop):
""" Prepare application """
redis_pool = await aioredis.create_pool(settings.REDIS_CON, loop=loop)
middlewares = [session_middleware(RedisStorage(redis_pool)), request_user_middleware]
if settings.DEBUG:
middlewares.append(aiohttp_debugtoolbar.middleware)
# init application
app = web.Application(loop=loop, middlewares=middlewares)
app.redis_pool = redis_pool
app.wslist = {}
jinja_env = aiohttp_jinja2.setup(
app, loader=jinja2.FileSystemLoader(settings.TEMPLATE_DIR),
context_processors=[aiohttp_jinja2.request_processor], )
jinja_env.globals.update(tags)
if settings.DEBUG:
aiohttp_debugtoolbar.setup(app, intercept_redirects=False)
# db conn
database.init(**settings.DATABASE)
app.database = database
app.database.set_allow_sync(False)
app.objects = peewee_async.Manager(app.database)
# make routes
from urls import routes
for route in routes:
app.router.add_route(**route)
app.router.add_static('/static', settings.STATIC_DIR, name='static')
app.logger = logger
handler = app.make_handler(access_log=logger)
serv_generator = loop.create_server(handler, settings.HOST, settings.PORT)
return serv_generator, handler, app
async def shutdown(server, app, handler):
""" Safe close server """
for room in app.wslist.values():
for peer in room.values():
peer.send_json({'text': 'Server shutdown'})
server.close()
await server.wait_closed()
app.redis_pool.close()
await app.redis_pool.wait_closed()
await app.objects.close()
await app.shutdown()
await handler.finish_connections(10.0)
await app.cleanup()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
serv_generator, handler, app = loop.run_until_complete(create_app(loop))
server = loop.run_until_complete(serv_generator)
logger.debug(f'Start server {server.sockets[0].getsockname()}')
try:
loop.run_forever()
except KeyboardInterrupt:
logger.debug('Keyboard Interrupt ^C')
finally:
logger.debug('Stop server begin')
loop.run_until_complete(shutdown(server, app, handler))
loop.close()
logger.debug('Stop server end')
|
python
|
import os
from elasticsearch import Elasticsearch
from elasticsearch import helpers
class Pes:
def __init__(self):
self.client = Elasticsearch([
{"host": os.getenv("ES_GATEWAY"),
"port": os.getenv("ES_PORT") or 9200}
])
def create_index(self, index_name: str):
# 创建 index
self.client.indices.create(index=index_name, body={
'settings': {
'index': {
'number_of_shards': 1,
'number_of_replicas': 0,
}
},
'mappings': {
'properties': {
'subnet': {'type': 'text'}
}
}
})
def drop_index(self, index_name: str):
# 删除 index
self.client.indices.delete(index=index_name, ignore=[400, 404])
def get_index_info(self, index_name: str):
# 获取 index 信息
index_info = self.client.indices.get(index=index_name)
return index_info
def do_bulk(self, index_name: str, op_type: str):
# bulk
action = [{
'_op_type': op_type,
'_index': index_name,
'_id': 'test',
'_source': {
'subnets': '218038272',
'mask': '16',
}
}]
helpers.bulk(self.client, action)
# 关闭连接
client.close()
|
python
|
import csv
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class Logger(object):
def __init__(self, path, header):
self.log_file = open(path, 'w')
self.logger = csv.writer(self.log_file, delimiter='\t')
self.logger.writerow(header)
self.header = header
def __del(self):
self.log_file.close()
def log(self, values):
write_values = []
for col in self.header:
assert col in values
write_values.append(values[col])
self.logger.writerow(write_values)
self.log_file.flush()
def load_value_file(file_path):
with open(file_path, 'r') as input_file:
value = float(input_file.read().rstrip('\n\r'))
return value
def calculate_accuracy(outputs, targets):
batch_size = targets.size(0)
_, pred = outputs.topk(1, 1, True)
pred = pred.t()
correct = pred.eq(targets.view(1, -1))
n_correct_elems = correct.float().sum().item()
return n_correct_elems / batch_size
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0).item()
res.append(correct_k / batch_size)
return res
import os
from itertools import combinations, chain, product
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import animation
def save_gif(frames, file_path, vmax=255, vmin=0, interval=3000/25):
fig = plt.figure(figsize=(10, 10))
fig.subplots_adjust(
left=0, bottom=0, right=1, top=1, wspace=None, hspace=None)
ims = []
plt.xticks([])
plt.yticks([])
plt.grid(True)
for frame in frames:
m = plt.imshow(
(frame).reshape(*frame.shape[:-1]).astype(np.uint8),
cmap=plt.cm.gray, vmax=vmax, vmin=vmin)
plt.axis('off')
ims.append([m])
ani = animation.ArtistAnimation(fig, ims, interval=interval, repeat=False)
ani.save(file_path, writer="imagemagick")
plt.close()
def gen_hama_photo(h, w):
mask = np.zeros((8, 8, 1))
if w.size:
mask[:, w] = 1
if h.size:
mask[h, :] = 1
return mask
def gen_hama_photo_patterns():
index = list(chain(*[list(combinations(range(8), i)) for i in range(9)]))
g = (gen_hama_photo(np.array(h), np.array(w)) for (h, w) in product(index, repeat=2))
masks = np.array(list(g))
# assert masks.shape[0] == 2**16
mask_hama_photo = np.unique(masks, axis=0)
return mask_hama_photo[1:-1]
def fit_hama_photo(raw, mask=gen_hama_photo_patterns()):
res = np.array([
mask[np.argmin(np.mean(np.square(mask - raw[i]), axis=(1, 2, 3)))]
for i in range(16)])
return res
def fit_hitomi(raw):
agmx = raw.argmax(axis=0)
res = np.array([np.ones((8, 8, 1)) * i == agmx for i in range(16)]).astype(np.uint8)
return res
def fit_rand(raw, th=0):
res = (raw>=th).astype(np.uint8)
return res
# def fit_hama_photo_tensor(raw, mask=gen_hama_photo_patterns()):
# from keras import backend as K
# import tensorflow as tf
# mask_tensor = tf.reshape(
# tf.convert_to_tensor(mask, dtype=tf.float32), (-1, 8, 8, 1))
# n = 16
# i1 = tf.constant(1)
# res0 = tf.reshape(mask_tensor[K.argmin(K.mean(K.square(
# mask_tensor - raw[0]), axis=(1, 2, 3)))],
# (1, 8, 8, 1))
# c = lambda i, res: i < n
# b = lambda i, res: (
# i+1,
# tf.concat([res, tf.reshape(mask_tensor[K.argmin(K.mean(K.square(
# mask_tensor - raw[i]), axis=(1, 2, 3)))],
# (1, 8, 8, 1))], axis=0))
# _, res = tf.while_loop(
# c, b, loop_vars=[i1, res0],
# shape_invariants=[i1.get_shape(), tf.TensorShape((None, 8, 8, 1))])
# return res
#
#
# def fit_hitomi_tensor(raw):
# from keras import backend as K
# import tensorflow as tf
# i1 = tf.constant(1, dtype=tf.int64)
# agmx = K.argmax(raw, axis=0)
# res0 = tf.reshape(
# K.cast(K.equal(agmx, 0), dtype=K.floatx()), (1, 8, 8, 1))
# c = lambda i, res: i < 16
# b = lambda i, res: (
# i+1,
# tf.concat([
# res,
# tf.reshape(
# K.cast(K.equal(agmx, i), dtype=K.floatx()), (1, 8, 8, 1))
# ], axis=0)
# )
# _, res = tf.while_loop(
# c, b, loop_vars=[i1, res0],
# shape_invariants=[i1.get_shape(), tf.TensorShape((None, 8, 8, 1))])
# return res
#
#
# def fit_rand_tensor(raw, th=0):
# from keras import backend as K
# res = K.cast(K.greater_equal(raw, th), K.floatx())
# return res
|
python
|
import requests
from configparser import ConfigParser
import os
import json
import pandas as pd
lat_long_request_url= 'https://cdn-api.co-vin.in/api/v2/appointment/centers/public/findByLatLong?'
class DetailsAssigner:
def __init__(self, *args) -> None:
self.config_obj= args[0]
self.dose_type= 'available_capacity_dose1' if int(self.config_obj.items('dose_type')[0][1])==1 else 'available_capacity_dose2'
self.age_details= self.config_obj.items('age_details')[0][1]
self.vaccine_name= 'COVISHIELD' if int(self.config_obj.get('vaccine_name','covishield'))!=0 and int(self.config_obj.get('vaccine_name','covaxin'))==0 else 'COVAXIN' if int(self.config_obj.get('vaccine_name','covaxin'))!=0 and int(self.config_obj.get('vaccine_name','covishield')) ==0 else 'any'
# print(self.vaccine_name)
print('checking for age {} and dose number {}'.format(self.age_details,self.dose_type))
class ObjectModifier:
def __init__(self, *args) -> None:
self.config_file= args[0]
def get_config(self):
self.config= ConfigParser()
self.config.read(self.config_file)
self.config.optionxform= str
class VaccineGenerator(DetailsAssigner, ObjectModifier):
def __init__(self, *args) -> None:
super(VaccineGenerator, self).__init__(*args)
def get_latitude_longitude(self):
lat_long_request_param= {}
my_ip_resp= requests.get('https://get.geojs.io/v1/ip.json').json()['ip']
lat_long_url= 'https://get.geojs.io/v1/ip/geo/'+my_ip_resp+'.json'
address_response= requests.get(lat_long_url).json()
lat_long_request_param['lat']=address_response['latitude']
lat_long_request_param['long']=address_response['longitude']
return lat_long_request_param
def get_nearby_centres(self):
self.centre_ids= set()
lat_long_request_param= self.get_latitude_longitude()
resp= requests.get('https://cdn-api.co-vin.in/api/v2/appointment/centers/public/findByLatLong?', params= lat_long_request_param)
centre_dict= json.loads(resp.text)
for dict_elem in centre_dict['centers']:
self.centre_ids.add(dict_elem['center_id'])
def filter_specific_vaccine(self, vaccine_available_dict):
new_dict= {}
if self.vaccine_name!='any':
for key,val in vaccine_available_dict.items():
if val[0][5]==self.vaccine_name:
new_dict[key]=val[0]
return new_dict
return vaccine_available_dict
def parse_json(self, vaccine_available_centre_detailed_list):
dose_type= self.dose_type
age= int(self.age_details)
vaccine_available_dict={}
for centre_idx in range(len(vaccine_available_centre_detailed_list)):
centre_id= vaccine_available_centre_detailed_list[centre_idx]['center_id']
# print(vaccine_available_centre_detailed_list[centre_idx])
centre_details= [vaccine_available_centre_detailed_list[centre_idx]['name'],vaccine_available_centre_detailed_list[centre_idx]['address'],vaccine_available_centre_detailed_list[centre_idx]['block_name'],vaccine_available_centre_detailed_list[centre_idx]['pincode']]
for session_idx in range(len(vaccine_available_centre_detailed_list[centre_idx]['sessions'])):
# check part for age and dose type
if vaccine_available_centre_detailed_list[centre_idx]['sessions'][session_idx][dose_type]>0 and vaccine_available_centre_detailed_list[centre_idx]['sessions'][session_idx]['min_age_limit']<age:
slot_details_value= [vaccine_available_centre_detailed_list[centre_idx]['sessions'][session_idx]['date'],vaccine_available_centre_detailed_list[centre_idx]['sessions'][session_idx]['vaccine'],dose_type, vaccine_available_centre_detailed_list[centre_idx]['sessions'][session_idx][dose_type],vaccine_available_centre_detailed_list[centre_idx]['sessions'][session_idx]['min_age_limit'],vaccine_available_centre_detailed_list[centre_idx]['sessions'][session_idx]['slots']]
slot_val=[]
slot_val.extend(centre_details)
slot_val.extend(slot_details_value)
if centre_id not in vaccine_available_dict:
vaccine_available_dict[centre_id]= list()
vaccine_available_dict[centre_id].append(slot_val)
return vaccine_available_dict
def check_availability(self):
availability_details_dict= {}
centre_id_api_params= {}
vaccine_available_centre_detailed_list= []
test_list=[]
cur_date= pd.to_datetime('now').strftime('%d-%m-%Y')
for centre_id in self.centre_ids:
centre_id_api_params['center_id']= centre_id
centre_id_api_params['date']= cur_date
response= requests.get('https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByCenter?', params= centre_id_api_params)
availability_details_dict= json.loads(response.text)
if len(availability_details_dict)>0:
vaccine_available_centre_detailed_list.append(availability_details_dict['centers'])
test_list.append(availability_details_dict)
vaccine_available_dict= self.parse_json(vaccine_available_centre_detailed_list)
# print(vaccine_available_dict)
if len(vaccine_available_dict)>=1:
specific_vaccine_available_dict= self.filter_specific_vaccine(vaccine_available_dict)
if len(specific_vaccine_available_dict)>=1:
print(specific_vaccine_available_dict)
os.system("afplay " + 'vaccine_alert.WAV')
if __name__ == '__main__':
config_file_abs_path= os.path.abspath(os.path.join(os.path.dirname(__file__),'config_details.ini'))
obj_modify= ObjectModifier(config_file_abs_path)
obj_modify.get_config()
vaccine_generator= VaccineGenerator(obj_modify.config)
vaccine_generator.get_nearby_centres()
vaccine_generator.check_availability()
|
python
|
from ftis.analyser.descriptor import Chroma
from ftis.analyser.audio import CollapseAudio
from ftis.world import World
from ftis.corpus import Corpus
import argparse
parser = argparse.ArgumentParser(description="Process input and output location")
parser.add_argument(
"-i",
"--input",
default="~/corpus-folder/corpus1",
type=str,
help="Folder for input. This should contain some audio files.",
)
parser.add_argument(
"-o",
"--output",
default="~/corpus-folder/chroma",
type=str,
help="Folder for output. This will be made if it doesnt exist.",
)
args = parser.parse_args()
src = Corpus(args.input)
out = args.output
process = World(source=src, sink=out)
process.add(CollapseAudio(), Chroma())
if __name__ == "__main__":
process.run()
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2018 Daniel Koguciuk <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
'''
@author: Daniel Koguciuk <[email protected]>
@note: Created on 04.07.2018
'''
import os
import sys
import cv2
import time
import tqdm
import shutil
import argparse
import numpy as np
from utils import data_generator as gen
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
FEAT_DIR = os.path.join(BASE_DIR, 'features')
HOGF_DIR = os.path.join(FEAT_DIR, 'hog')
if not os.path.exists(FEAT_DIR):
os.mkdir(FEAT_DIR)
if os.path.exists(HOGF_DIR):
shutil.rmtree(HOGF_DIR)
os.mkdir(HOGF_DIR)
def __HOG(img, cell_size=(8,8), nbins=9):
gx = cv2.Sobel(img, cv2.CV_32F, 1, 0)
gy = cv2.Sobel(img, cv2.CV_32F, 0, 1)
mag, ang = cv2.cartToPolar(gx, gy)
bin = np.int32(nbins*ang/(2*np.pi))
bin_cells = []
mag_cells = []
cellx, celly = cell_size
for i in range(0,int(img.shape[0]/celly)):
for j in range(0,int(img.shape[1]/cellx)):
bin_cells.append(bin[i*celly : i*celly+celly, j*cellx : j*cellx+cellx])
mag_cells.append(mag[i*celly : i*celly+celly, j*cellx : j*cellx+cellx])
hists = [np.bincount(b.ravel(), m.ravel(), nbins) for b, m in zip(bin_cells, mag_cells)]
hist = np.hstack(hists)
# transform to Hellinger kernel
eps = 1e-7
hist /= hist.sum() + eps
hist = np.sqrt(hist)
hist /= cv2.norm(hist) + eps
return hist
def _HOG(images, image_size):
"""
Calc HOG features for grayscale images.
Args:
images (ndarray of size [images, some_size, some_size]): Grayscale images.
Returns:
(ndarray of size [images, features_no]): HOG features for each image.
"""
NBINS = 9
CELL_SIZE = (int(image_size/16), int(image_size/16))
hog_features = [__HOG(image, cell_size=CELL_SIZE, nbins=NBINS) for image in images]
return np.stack(hog_features, axis=0)
def extract_HOG(generator, category='species', image_size=256, train=True, verbose=True):
"""
Extract HOG features for specified dataset (train/test).
Args:
generator (Generator class object): Generator class object.
train (bool): Am I working with train or test data?
verbose (bool): Should I print some additional info?
category (str): What category do you want: species or breeds?
Returns:
(ndarray of size [images, features_no], ndarray of size [images]) Features and labels.
"""
all_featrs = []
all_labels = []
batch_size = 64
start_time = time.time()
batches = generator.images_count(train=train) / batch_size
print("Calculating HOG featues..")
for images, labels in tqdm.tqdm(generator.generate_batch(train=train, batch_size=batch_size, category=category, image_size=image_size), total=batches):
all_featrs.append(_HOG(images, image_size))
all_labels.append(labels)
all_featrs = np.concatenate(all_featrs, axis=0)
all_labels = np.concatenate(all_labels, axis=0)
hog_time = time.time()
if verbose:
print ("Features calculated in ", hog_time - start_time, " seconds")
return all_featrs, all_labels
def main(argv):
# Parser ==================================================================
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--image_size", help="images size (defaults: 256)", type=int, default=256)
args = vars(parser.parse_args())
# Extract features ========================================================
generator = gen.OxfordIIITPets(colorspace='GRAY', train_size=0.8)
features_train, labels_train = extract_HOG(generator, category='species', image_size=args['image_size'], train=True, verbose=True)
features_test, labels_test = extract_HOG(generator, category='species', image_size=args['image_size'], train=False, verbose=True)
# Save ====================================================================
np.save(os.path.join(HOGF_DIR, "features_train.npy"), features_train)
np.save(os.path.join(HOGF_DIR, "labels_train.npy"), labels_train)
np.save(os.path.join(HOGF_DIR, "features_test.npy"), features_test)
np.save(os.path.join(HOGF_DIR, "labels_test.npy"), labels_test)
if __name__ == "__main__":
main(sys.argv[1:])
|
python
|
#!/usr/bin/python
import sys
sys.path.insert(0,"/var/www/janus/")
from janus import app as application
|
python
|
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("input_file", help="file to encode/decode using the provided key")
parser.add_argument("output_file", help="name under which the processed file should be saved")
parser.add_argument("key", help="cryptographic key to process file with")
args = parser.parse_args()
with open(args.input_file, 'rb') as in_file:
with open(args.output_file, 'wb') as out_file:
key = args.key.encode('utf-8')
key_length = len(key)
key_index = 0
in_data = in_file.read()
out_data = bytearray(b'')
for byte in in_data:
out_data.append(byte ^ key[key_index])
if key_index == key_length - 1:
key_index = 0
else:
key_index += 1
out_file.write(out_data)
print("Operation completed successfully!")
|
python
|
# coding: utf-8
from fabkit import filer, sudo, env
from fablib.base import SimpleBase
# from fablib import git
from fablib.python import Python
from oslo_config import cfg
CONF = cfg.CONF
class FabClient(SimpleBase):
def __init__(self):
self.data_key = 'fabkit_tools'
self.data = {
'user': 'nobody',
'group': 'nobody',
'prefix': '/opt/fabkit',
'task_patterns': 'local.*,check.*',
}
self.services = [
'fabagent',
'fabagent-central',
]
def init_before(self):
self.python = Python(self.data['prefix'])
def init_after(self):
self.data['owner'] = '{0}:{1}'.format(self.data['user'], self.data['group'])
self.data['host'] = env.host
def setup(self):
data = self.init()
var_dir = CONF.client.package_var_dir
common_repo = '{0}/fabkit-repo-common'.format(var_dir)
client_repo = '{0}/fabkit-repo-client'.format(var_dir)
filer.template('{0}/fabfile.ini'.format(client_repo), data=data)
sudo('rm -rf {0}/fabfile && '
'cp -r {1}/fabfile {0}/fabfile && '
'chown -R {2}:{3} {0}/fabfile'.format(
client_repo, common_repo, data['user'], data['group']))
if env.host == env.hosts[0]:
sudo('/opt/fabkit/bin/fabclient sync_db')
self.start_services().enable_services()
self.restart_services()
sudo('/opt/fabkit/bin/fabclient -l')
|
python
|
class LinearElasticMaterialModel:
def __init__(self, youngs_modulus, poissons_ratio):
self.young_modulus = youngs_modulus
self.poissons_ratio = poissons_ratio
class LinearElasticPlaneMaterialModel(LinearElasticMaterialModel):
def __init__(self, youngs_modulus, poissons_ratio, thickness):
super().__init__(youngs_modulus, poissons_ratio)
self.thickness = thickness
class LinearElasticPlaneStressMaterialModel(LinearElasticPlaneMaterialModel):
def compute_stresses(self, ex, ey, gamma_xy):
factor = self.young_modulus * self.thickness / (1 - self.poissons_ratio**2)
sx = factor * (ex + self.poissons_ratio * ey)
sy = factor * (self.poissons_ratio * ex + ey)
tau_xy = factor * (1 - self.poissons_ratio) * gamma_xy / 2
return sx, sy, tau_xy
class LinearElasticPlaneStrainMaterialModel(LinearElasticPlaneMaterialModel):
def compute_stresses(self, ex, ey, gamma_xy):
factor = self.young_modulus * self.thickness / ((1 + self.poissons_ratio) * (1 - 2 * self.poissons_ratio))
sx = factor * ((1 - self.poissons_ratio) * ex + self.poissons_ratio * ey)
sy = factor * (self.poissons_ratio * ex + (1 - self.poissons_ratio) * ey)
tau_xy = factor * (1 - 2 * self.poissons_ratio) * gamma_xy / 2
return sx, sy, tau_xy
|
python
|
"""
The purpose of this script is to train an AI agent to play the custom-built
Kuiper Escape game using the A2C reinforcement learning algorithm.
"""
# 3rd party imports
import gym
import gym_kuiper_escape
# from code.evaluation import evaluate_policy
from stable_baselines.common.evaluation import evaluate_policy
from stable_baselines.common.callbacks import EvalCallback
from stable_baselines.common import make_vec_env
from stable_baselines import A2C
# Create environment instance(s)
env_eval = gym.make('kuiper-escape-base-v0')
env = make_vec_env('kuiper-escape-base-v0', n_envs=4)
# Define callback function
eval_callback = EvalCallback(
env_eval,
best_model_save_path='./logs/agent_best',
log_path='./logs/',
eval_freq=10000,
deterministic=True,
render=False
)
# Create agent model
agent = A2C('MlpPolicy', env, verbose=1, tensorboard_log='./tensorboard/')
# Train agent
agent.learn(5000000,
reset_num_timesteps=False,
callback=eval_callback
)
agent.save("agent")
|
python
|
from copy import deepcopy
from unyt import dimensions
from mosdef_cassandra.utils.units import validate_unit, validate_unit_list
import parmed
import warnings
import unyt as u
class MoveSet(object):
def __init__(self, ensemble, species_topologies):
"""A class to contain all the move probabilities and related
values required to perform a simulation in ``Cassandra``.
A MoveSet contains the move probabilities
and other related quantities (e.g., max translation/rotation)
that are required to run Cassandra. When the MoveSet
is created the specified ``ensemble`` and ``species_topologies``
are used to generate initial guesses for all required values.
Depending upon the specifics of your system, these guesses may
be very reasonable or downright terrible. Use the same
``species_topologies`` for your call to ``mosdef_cassandra.System()``
and ``mosdef_cassandra.MoveSet()``.
Parameters
----------
ensemble : str
string describing the desired ensembled. Supported
values include ``'nvt'``, ``'npt'``, ``'gcmc'``,
``'gemc'``, ``'gemc_npt'``
species_topologies : list
list of ``parmed.Structures``, with one species per element
Returns
-------
``mosdef_cassandra.MoveSet``
"""
if not isinstance(species_topologies, list):
raise TypeError(
"species_topologies should be a " "list of species"
)
for species in species_topologies:
if not isinstance(species, parmed.Structure):
raise TypeError("each species should be a " "parmed.Structure")
# Extract self._n_species
self._n_species = len(species_topologies)
# Set the ensemble
self.ensemble = ensemble
# Infer the number of boxes
if (
self.ensemble == "nvt"
or self.ensemble == "npt"
or self.ensemble == "gcmc"
):
self._n_boxes = 1
else:
self._n_boxes = 2
# Set '_restricted_typed' and '_restricted_value'
self._restricted_type = None
self._restricted_value = None
# Define default probabilities
# Most are ensemble-dependent
self.prob_angle = 0.0
self.prob_dihedral = 0.0
if self.ensemble == "nvt":
self.prob_translate = 0.33
self.prob_rotate = 0.33
self.prob_regrow = 0.34
self.prob_volume = 0.0
self.prob_insert = 0.0
self.prob_swap = 0.0
elif self.ensemble == "npt":
self.prob_translate = 0.33
self.prob_rotate = 0.33
self.prob_regrow = 0.335
self.prob_volume = 0.005
self.prob_insert = 0.0
self.prob_swap = 0.0
# GCMC sums to 0.9 b/c symmetric prob_delete
elif self.ensemble == "gcmc":
self.prob_translate = 0.25
self.prob_rotate = 0.25
self.prob_regrow = 0.30
self.prob_volume = 0.0
self.prob_insert = 0.1
self.prob_swap = 0.0
elif self.ensemble == "gemc":
self.prob_translate = 0.30
self.prob_rotate = 0.30
self.prob_regrow = 0.295
self.prob_volume = 0.005
self.prob_insert = 0.0
self.prob_swap = 0.1
elif self.ensemble == "gemc_npt":
self.prob_translate = 0.30
self.prob_rotate = 0.30
self.prob_regrow = 0.295
self.prob_volume = 0.005
self.prob_insert = 0.0
self.prob_swap = 0.1
else:
raise ValueError("Uh oh, how did we end up here?")
# Max translation and rotations specified per-species-per-box
self.max_translate = [
[2.00 * u.angstrom] * self._n_species
] * self._n_boxes
self.max_rotate = [[30.0 * u.degree] * self._n_species] * self._n_boxes
# Prob swap and max vol are per-box
self.prob_swap_from_box = [1.0 / self._n_boxes] * self._n_boxes
# Default max deltas for volume moves
if self.ensemble == "npt" or self.ensemble == "gemc":
self.max_volume = [500.0 * (u.angstrom ** 3)]
elif self.ensemble == "gemc_npt":
self.max_volume = [
500.0 * (u.angstrom ** 3),
5000.0 * (u.angstrom ** 3),
]
else:
self.max_volume = [0.0 * (u.angstrom ** 3)]
# Set the default CBMC options
self.cbmc_n_insert = 10
self.cbmc_n_dihed = 10
self.cbmc_rcut = 6.0 * u.angstrom
# Remaining options are per-species
self.max_dihedral = [0.0 * u.degree] * self._n_species
self.prob_regrow_species = [1.0] * self._n_species
if self.ensemble in ["gcmc", "gemc", "gemc_npt"]:
self.insertable = [True] * self._n_species
else:
self.insertable = [False] * self._n_species
if self.ensemble in ["gemc", "gemc_npt"]:
self.prob_swap_species = [1.0] * self._n_species
else:
self.prob_swap_species = [0.0] * self._n_species
# Here we handle species-wise exceptions
for ispec, species in enumerate(species_topologies):
if len(species.atoms) == 1:
for ibox in range(self._n_boxes):
self.max_rotate[ibox][ispec] = 0.0 * u.degree
self.prob_regrow_species[ispec] = 0.0
elif len(species.bonds) == 0:
print(
"Treating {} as a non-insertable rigid species "
"since it has no bonds".format(species)
)
for ibox in range(self._n_boxes):
self.max_translate[ibox][ispec] = 0.0 * u.angstrom
self.max_rotate[ibox][ispec] = 0.0 * u.degree
self.prob_regrow_species[ispec] = 0.0
self.insertable[ispec] = False
self.prob_swap_species[ispec] = 0.0
# Correct species_prob_regrow
if sum(self.prob_regrow_species) > 0:
sp_regrowth_prob = 1.0 / sum(self.prob_regrow_species)
for i, prob in enumerate(self.prob_regrow_species):
if prob > 0.0:
self.prob_regrow_species[i] = sp_regrowth_prob
if sum(self.prob_swap_species) > 0:
# Correct species_prob_swap
prob_swap_species = 1.0 / sum(self.prob_swap_species)
for idx, insert in enumerate(self.insertable):
if insert:
self.prob_swap_species[idx] = prob_swap_species
# If all species have no prob regrowth, set prob_regrow to
# zero and redistribute prob to translate/rotate
if sum(self.prob_regrow_species) == 0.0:
self.prob_translate += self.prob_regrow / 2.0
self.prob_rotate += self.prob_regrow / 2.0
self.prob_regrow = 0.0
# If all species are not rotatable change prob rotation
# move to zero. Redistribute prob to translate
if self.ensemble == "gemc" or self.ensemble == "gemc_npt":
if (
sum(self.max_rotate[0]).to_value()
+ sum(self.max_rotate[1]).to_value()
== 0.0
):
self.prob_translate += self.prob_rotate
self.prob_rotate = 0.0
else:
if sum(self.max_rotate[0]).to_value() == 0.0:
self.prob_translate += self.prob_rotate
self.prob_rotate = 0.0
def add_restricted_insertions(
self, species_topologies, restricted_type, restricted_value
):
"""Add restricted insertions for specific species and boxes
Parameters
----------
species_topologies : list
list of ``parmed.Structures`` containing one list per box of species
restricted_type : list
list of restricted insertion types containing one list per box of species
restricted_value : list
list of restricted insertion values (unyt arrays) containing one list per box of species
"""
if self._restricted_type and self._restricted_value:
warnings.warn(
"Restricted insertion has been previously"
" added and will be replaced."
)
if self.ensemble not in ["gcmc", "gemc", "gemc_npt"]:
raise ValueError(
"Restricted insertions are only valid for"
" 'gcmc', 'gemc', and 'gemc_npt' ensembles."
)
if len(restricted_type) != len(restricted_value):
raise ValueError(
"Length of 'restricted_type' and "
" 'restricted_value' must match."
)
for box in restricted_type:
if isinstance(box, (str, int, float)):
raise TypeError(
"Restricted type must be passed as a list"
" of lists corresponding to each box."
)
if len(box) != len(species_topologies):
raise ValueError(
"Length of 'species' and "
" length of box list in 'restricted_type'"
" must match. `species` has a length of {}"
" and the box list in 'restricted_type' has a "
" length of {}".format(len(species_topologies), len(box))
)
for box in restricted_value:
if isinstance(box, (str, int, float)):
raise TypeError(
"Restricted value must be passed as a list"
" of lists corresponding to each box."
)
if len(box) != len(species_topologies):
raise ValueError(
"Length of 'species' and "
" length of species list in 'restricted_value'"
" must match. `species` has a length of {}"
" and the box list in 'restricted_value' has a "
" length of {}".format(len(species_topologies), len(box))
)
if self.ensemble == "gcmc" and len(restricted_type) != 1:
raise ValueError(
"GCMC ensemble contains 1 box but"
" `restricted_type` of length {}"
" was passed.".format(len(restricted_type))
)
if self.ensemble in ["gemc", "gemc_npt"] and len(restricted_type) != 2:
raise ValueError(
"GEMC ensembles contain 2 boxes but"
" `restricted_type` of length {}"
" was passed.".format(len(restricted_type))
)
for types, values in zip(restricted_type, restricted_value):
for typ, val in zip(types, values):
if not typ and not val:
pass
elif typ and not val:
raise ValueError(
"`restricted_type` {} was passed"
" but `restricted_value` is None.".format(typ, val)
)
elif val and not typ:
raise ValueError(
"`restricted_value` {} was passed"
" but `restricted_type` is None.".format(val, typ)
)
else:
_check_restriction_type(typ, val)
# Check units of restricted value
if typ == "interface":
[validate_unit(i, dimensions.length) for i in val]
else:
validate_unit(val, dimensions.length)
self._restricted_type = restricted_type
self._restricted_value = restricted_value
@property
def ensemble(self):
return self._ensemble
@ensemble.setter
def ensemble(self, ensemble):
if hasattr(self, "_ensemble"):
raise AttributeError(
"Ensemble cannot be changed. Please create a new MoveSet instead."
)
valid_ensembles = ["nvt", "npt", "gcmc", "gemc", "gemc_npt"]
if ensemble not in valid_ensembles:
raise ValueError(
'Invalid ensemble "{}" Supported '
"ensembles include {}".format(ensemble, valid_ensembles)
)
self._ensemble = ensemble
@property
def prob_translate(self):
return self._prob_translate
@prob_translate.setter
def prob_translate(self, prob_translate):
prob_translate = self._validate_probability(
prob_translate,
"prob_translate",
)
self._prob_translate = prob_translate
@property
def prob_rotate(self):
return self._prob_rotate
@prob_rotate.setter
def prob_rotate(self, prob_rotate):
prob_rotate = self._validate_probability(
prob_rotate,
"prob_rotate",
)
self._prob_rotate = prob_rotate
@property
def prob_angle(self):
return self._prob_angle
@prob_angle.setter
def prob_angle(self, prob_angle):
prob_angle = self._validate_probability(
prob_angle,
"prob_angle",
)
self._prob_angle = prob_angle
@property
def prob_dihedral(self):
return self._prob_dihedral
@prob_dihedral.setter
def prob_dihedral(self, prob_dihedral):
prob_dihedral = self._validate_probability(
prob_dihedral,
"prob_dihedral",
)
self._prob_dihedral = prob_dihedral
@property
def prob_regrow(self):
return self._prob_regrow
@prob_regrow.setter
def prob_regrow(self, prob_regrow):
prob_regrow = self._validate_probability(
prob_regrow,
"prob_regrow",
)
self._prob_regrow = prob_regrow
@property
def prob_volume(self):
return self._prob_volume
@prob_volume.setter
def prob_volume(self, prob_volume):
prob_volume = self._validate_probability(
prob_volume,
"prob_volume",
)
if prob_volume > 0.0:
if self.ensemble == "nvt" or self.ensemble == "gcmc":
raise ValueError(
"Ensemble is {}. prob_volume cannot be "
"non-zero in the {} ensemble".format(
self._ensemble, self.ensemble
)
)
elif prob_volume == 0.0:
if (
self.ensemble == "npt"
or self.ensemble == "gemc"
or self.ensemble == "gemc_npt"
):
raise ValueError(
"Ensemble is {}. prob_volume must be "
"> 0.0 in this ensemble".format(self.ensemble)
)
# Pass all checks. Update prob_volume.
self._prob_volume = prob_volume
@property
def prob_insert(self):
return self._prob_insert
@prob_insert.setter
def prob_insert(self, prob_insert):
prob_insert = self._validate_probability(
prob_insert,
"prob_insert",
)
if self.ensemble != "gcmc" and prob_insert != 0.0:
raise ValueError(
"Ensemble is {}. Insertion probability "
"must be = 0.0".format(self.ensemble)
)
if self.ensemble == "gcmc" and prob_insert == 0.0:
raise ValueError(
"Ensemble is {}. Insertion probability "
"must be > 0.0".format(self.ensemble)
)
self._prob_insert = prob_insert
@property
def prob_swap(self):
return self._prob_swap
@prob_swap.setter
def prob_swap(self, prob_swap):
prob_swap = self._validate_probability(
prob_swap,
"prob_swap",
)
if self.ensemble != "gemc" and self.ensemble != "gemc_npt":
if prob_swap != 0.0:
raise ValueError(
"Ensemble is {}. Swapping probability "
"must be = 0.0".format(self.ensemble)
)
if self.ensemble == "gemc" or self.ensemble == "gemc_npt":
if prob_swap == 0.0:
raise ValueError(
"Ensemble is {}. Swapping probability "
"must be > 0.0".format(self.ensemble)
)
self._prob_swap = prob_swap
@property
def max_translate(self):
return self._max_translate
@max_translate.setter
def max_translate(self, max_translate):
max_translate = validate_unit_list(
max_translate,
(self._n_boxes, self._n_species),
dimensions.length,
"max_translate",
)
for max_val in max_translate.flatten():
if max_val.to_value() < 0.0:
raise ValueError(
"Max translation values cannot be less than zero"
)
self._max_translate = max_translate
@property
def max_rotate(self):
return self._max_rotate
@max_rotate.setter
def max_rotate(self, max_rotate):
max_rotate = validate_unit_list(
max_rotate,
(self._n_boxes, self._n_species),
dimensions.angle,
"max_rotate",
)
for max_val in max_rotate.flatten():
if (
max_val.to_value("degree") < 0.0
or max_val.to_value("degree") > 360.0
):
raise ValueError(
"Max rotation values must be between 0.0 and 360.0 degrees."
)
self._max_rotate = max_rotate
@property
def max_dihedral(self):
return self._max_dihedral
@max_dihedral.setter
def max_dihedral(self, max_dihedral):
max_dihedral = validate_unit_list(
max_dihedral,
(self._n_species,),
dimensions.angle,
"max_dihedral",
)
for max_val in max_dihedral:
if (
max_val.to_value("degree") < 0.0
or max_val.to_value("degree") > 360.0
):
raise ValueError(
"Max dihedral rotation values must be between 0.0 and 360.0 degrees."
)
self._max_dihedral = max_dihedral
@property
def prob_swap_from_box(self):
return self._prob_swap_from_box
@prob_swap_from_box.setter
def prob_swap_from_box(self, prob_swap_from_box):
if (
not isinstance(prob_swap_from_box, list)
or len(prob_swap_from_box) != self._n_boxes
):
raise TypeError(
"prob_swap_from_box must be a list with length "
"(number of boxes)"
)
validated_prob_swap_from_box = []
for prob_swap in prob_swap_from_box:
prob_swap = self._validate_probability(
prob_swap,
"prob_swap_from_box",
)
validated_prob_swap_from_box.append(prob_swap)
self._prob_swap_from_box = validated_prob_swap_from_box
@property
def max_volume(self):
return self._max_volume
@max_volume.setter
def max_volume(self, max_volume):
if type(max_volume) not in (list, u.unyt_array):
if self.ensemble == "gemc_npt":
max_volume = [max_volume] * self._n_boxes
else:
max_volume = [max_volume]
if self.ensemble == "gemc_npt":
shape = (self._n_boxes,)
else:
shape = (1,)
max_volume = validate_unit_list(
max_volume,
shape,
dimensions.length ** 3,
"max_volume",
)
for max_vol in max_volume.flatten():
if max_vol < 0.0:
raise ValueError("max_volume cannot be less than zero.")
self._max_volume = max_volume
@property
def insertable(self):
return self._insertable
@insertable.setter
def insertable(self, insertable):
if (
not isinstance(insertable, list)
or len(insertable) != self._n_species
):
raise TypeError(
"insertable must be a list with length " "(number of species)"
)
for insert in insertable:
if not isinstance(insert, bool):
raise TypeError(
"The insertability of each species "
"must be provided as a boolean type."
)
self._insertable = insertable
@property
def prob_swap_species(self):
return self._prob_swap_species
@prob_swap_species.setter
def prob_swap_species(self, prob_swap_species):
if (
not isinstance(prob_swap_species, list)
or len(prob_swap_species) != self._n_species
):
raise TypeError(
"prob_swap_species must be a list with length "
"(number of species)"
)
validated_prob_swap_species = []
for prob_swap in prob_swap_species:
prob_swap = self._validate_probability(
prob_swap,
"prob_swap_species",
)
validated_prob_swap_species.append(prob_swap)
self._prob_swap_species = validated_prob_swap_species
@property
def prob_regrow_species(self):
return self._prob_regrow_species
@prob_regrow_species.setter
def prob_regrow_species(self, prob_regrow_species):
if (
not isinstance(prob_regrow_species, list)
or len(prob_regrow_species) != self._n_species
):
raise TypeError(
"prob_regrow_species must be a list with length "
"(number of species)"
)
validated_prob_regrow_species = []
for prob_regrow in prob_regrow_species:
prob_regrow = self._validate_probability(
prob_regrow, "prob_regrow"
)
validated_prob_regrow_species.append(prob_regrow)
self._prob_regrow_species = validated_prob_regrow_species
@property
def cbmc_n_insert(self):
return self._cbmc_n_insert
@cbmc_n_insert.setter
def cbmc_n_insert(self, cbmc_n_insert):
if type(cbmc_n_insert) != int:
raise TypeError("cbmc_n_insert must be of type int")
if cbmc_n_insert <= 0:
raise ValueError("cbmc_n_insert must be greater than zero")
self._cbmc_n_insert = cbmc_n_insert
@property
def cbmc_n_dihed(self):
return self._cbmc_n_dihed
@cbmc_n_dihed.setter
def cbmc_n_dihed(self, cbmc_n_dihed):
if type(cbmc_n_dihed) != int:
raise TypeError("cbmc_n_dihed must be of type int")
if cbmc_n_dihed <= 0:
raise ValueError("cbmc_n_dihed must be greater than zero")
self._cbmc_n_dihed = cbmc_n_dihed
@property
def cbmc_rcut(self):
return self._cbmc_rcut
@cbmc_rcut.setter
def cbmc_rcut(self, cbmc_rcut):
if type(cbmc_rcut) not in (list, u.unyt_array):
cbmc_rcut = [cbmc_rcut] * self._n_boxes
cbmc_rcut = validate_unit_list(
cbmc_rcut,
(self._n_boxes,),
dimensions.length,
"cbmc_rcut",
)
for rcut in cbmc_rcut.flatten():
if rcut.to_value() < 0.0:
raise ValueError("cbmc_rcut cannot be less than zero.")
self._cbmc_rcut = cbmc_rcut
def print(self):
"""Print the current contents of the MoveSet"""
contents = """
Ensemble: {ensemble}
Probability of selecting each move type:
Translate: {prob_translate}
Rotate: {prob_rotate}
Regrow: {prob_regrow}
Volume: {prob_volume}
Insert: {prob_insert}
Delete: {prob_delete}
Swap: {prob_swap}
Angle: {prob_angle}
Dihedral: {prob_dihedral}
""".format(
ensemble=self.ensemble,
prob_translate=self.prob_translate,
prob_rotate=self.prob_rotate,
prob_regrow=self.prob_regrow,
prob_volume=self.prob_volume,
prob_insert=self.prob_insert,
prob_delete=self.prob_insert,
prob_swap=self.prob_swap,
prob_angle=self.prob_angle,
prob_dihedral=self.prob_dihedral,
)
contents += """
CBMC selections:
Number of trial positions: {n_insert}
Number of trial dihedral angles: {n_dihed}
CBMC cutoff(s):
""".format(
n_insert=self.cbmc_n_insert,
n_dihed=self.cbmc_n_dihed,
)
for idx, value in enumerate(self.cbmc_rcut):
contents += " Box {}: {}\n".format(idx + 1, value)
contents += "\n\nPer species quantities:\n\n"
contents += " "
for idx in range(self._n_species):
contents += "species{idx} ".format(idx=idx + 1)
contents += "\n"
contents += " "
for idx in range(self._n_species):
contents += "======== ".format(idx=idx + 1)
contents += "\n"
contents += " Max translate (Ang): "
for (box, max_translate_box) in enumerate(self.max_translate):
if box > 0:
contents += " "
for (idx, max_translate) in enumerate(max_translate_box):
contents += "{max_trans:4.2f} ".format(
max_trans=max_translate
)
contents += "(Box {box})".format(box=box + 1)
contents += "\n"
contents += " Max rotate (deg): "
for (box, max_rotate_box) in enumerate(self.max_rotate):
if box > 0:
contents += " "
for (idx, max_rotate) in enumerate(max_rotate_box):
contents += "{max_rot:4.2f} ".format(
max_rot=max_rotate
)
contents += "(Box {box})".format(box=box + 1)
contents += "\n"
contents += " Insertable: "
for (idx, insert) in enumerate(self.insertable):
contents += "{insert} ".format(insert=insert)
contents += "\n"
contents += " Max dihedral: "
for (idx, max_dih) in enumerate(self.max_dihedral):
contents += "{max_dih:4.2f} ".format(max_dih=max_dih)
contents += "\n"
contents += " Prob swap: "
for (idx, prob_swap) in enumerate(self.prob_swap_species):
contents += "{prob_swap:4.2f} ".format(
prob_swap=prob_swap
)
contents += "\n"
contents += " Prob regrow: "
for (idx, prob_regrow) in enumerate(self.prob_regrow_species):
contents += "{regrow:4.2f} ".format(regrow=prob_regrow)
contents += "\n"
contents += "\n\nMax volume (Ang^3):\n"
for (box, max_vol) in enumerate(self.max_volume):
contents += " Box {box}: {max_vol}\n".format(
box=box + 1, max_vol=max_vol
)
if self._restricted_type != None:
contents += "\nRestricted Insertions (Ang):\n"
for box in range(self._n_boxes):
for species, (typ, value) in enumerate(
zip(
self._restricted_type[box], self._restricted_value[box]
)
):
if typ == "sphere":
contents += "Box {box}, Species {species}: sphere, R = {r_value}\n".format(
box=box + 1, species=species + 1, r_value=value
)
elif typ == "cylinder":
contents += "Box {box}, Species {species}: cylinder, R = {r_value}\n".format(
box=box + 1, species=species + 1, r_value=value
)
elif typ == "slitpore":
contents += "Box {box}, Species {species}: slitpore, z_max = {z_max}\n".format(
box=box + 1, species=species + 1, z_max=value
)
elif typ == "interface":
contents += "Box {box}, Species {species}: interface, z_min = {z_min}, z_max = {z_max}\n".format(
box=box + 1,
species=species + 1,
z_min=value[0],
z_max=value[1],
)
else:
contents += (
"Box {box}, Species {species}: None\n".format(
box=box + 1, species=species + 1
)
)
print(contents)
def _validate_probability(self, probability, name):
if type(probability) not in (float, int):
raise TypeError(f"{name} must be of type float")
else:
probability = float(probability)
if probability < 0.0 or probability > 1.0:
raise ValueError(f"{name} must be between 0.0 and 1.0.")
return probability
def _check_restriction_type(restriction_type, restriction_value):
valid_restrict_types = ["sphere", "cylinder", "slitpore", "interface"]
# Check restriction insertion type
if restriction_type not in valid_restrict_types:
raise ValueError(
'Invalid restriction type "{}". Supported '
"restriction types include {}".format(
restriction_type, valid_restrict_types
)
)
# Check if correct number of arguments passed
if restriction_type == "interface":
if len(restriction_value) != 2:
raise ValueError(
"Invalid number of arguments passed."
"{} arguments for restriction type {}"
"were passed. 2 are required".format(
len(restriction_value), restriction_type
)
)
else:
if not isinstance(restriction_value, u.unyt_array):
raise TypeError(
"Invalid type for `restriction_value` passed. A"
" single argument of type `unyt_array"
" should be passed".format(restriction_type)
)
|
python
|
import unittest
import unittest.mock
import uuid
from g1.asyncs import kernels
from g1.asyncs.bases import tasks
from g1.messaging import reqrep
from g1.messaging.reqrep import clients
from g1.messaging.reqrep import servers
from g1.messaging.wiredata import jsons
class InvalidRequestError(Exception):
pass
class InternalServerError(Exception):
pass
@reqrep.raising(InvalidRequestError, InternalServerError)
class TestInterface:
@reqrep.raising(ValueError)
def greet(self, name: str) -> str:
raise NotImplementedError
def f(self):
raise NotImplementedError
def g(self):
raise NotImplementedError
def h(self):
raise NotImplementedError
@reqrep.raising(InternalServerError)
class TestOnlyOneError:
def f(self):
raise NotImplementedError
# Don't inherit from ``TestInterface`` because we intentionally leave
# out ``f`` unimplemented.
class TestApplication:
async def greet(self, name):
return 'Hello, %s' % name
async def g(self):
return object()
async def h(self):
# Test error that is not declared in the interface.
raise RuntimeError
Request, Response = reqrep.generate_interface_types(TestInterface, 'Test')
WIRE_DATA = jsons.JsonWireData()
class ServerTest(unittest.TestCase):
def test_only_one_error(self):
request_type, response_type = \
reqrep.generate_interface_types(TestOnlyOneError)
server = servers.Server(
TestOnlyOneError(),
request_type,
response_type,
WIRE_DATA,
)
self.assertEqual(
server._declared_error_types,
{InternalServerError: 'internal_server_error'},
)
@kernels.with_kernel
def test_serve(self):
server = servers.Server(
TestApplication(),
Request,
Response,
WIRE_DATA,
invalid_request_error=InvalidRequestError(),
internal_server_error=InternalServerError(),
)
wire_request = WIRE_DATA.to_lower(
Request(args=Request.m.greet(name='world'))
)
self.assertEqual(
WIRE_DATA.to_upper(
Response,
kernels.run(server._serve(wire_request)),
),
Response(result=Response.Result(greet='Hello, world')),
)
with self.assertLogs(servers.__name__, level='DEBUG') as cm:
self.assertEqual(
kernels.run(server._serve(b'')),
server._invalid_request_error_wire,
)
self.assertRegex('\n'.join(cm.output), r'to_upper error: ')
wire_request = WIRE_DATA.to_lower(Request(args=Request.m.f()))
with self.assertLogs(servers.__name__, level='DEBUG') as cm:
self.assertEqual(
kernels.run(server._serve(wire_request)),
server._invalid_request_error_wire,
)
self.assertRegex('\n'.join(cm.output), r'unknown method: f: ')
wire_request = WIRE_DATA.to_lower(Request(args=Request.m.g()))
with self.assertLogs(servers.__name__, level='DEBUG') as cm:
self.assertEqual(
kernels.run(server._serve(wire_request)),
server._internal_server_error_wire,
)
self.assertRegex('\n'.join(cm.output), r'to_lower error: ')
wire_request = WIRE_DATA.to_lower(Request(args=Request.m.h()))
with self.assertLogs(servers.__name__, level='DEBUG') as cm:
self.assertEqual(
kernels.run(server._serve(wire_request)),
server._internal_server_error_wire,
)
self.assertRegex('\n'.join(cm.output), r'server error: ')
@kernels.with_kernel
def test_end_to_end(self):
def do_test(client, server, server_serve):
url = 'inproc://%s' % uuid.uuid4()
server.socket.listen(url)
client.socket.dial(url)
server_task = tasks.spawn(server_serve)
client_task = tasks.spawn(client.m.greet(name='world'))
with self.assertRaises(kernels.KernelTimeout):
kernels.run(timeout=0.005)
self.assertTrue(client_task.is_completed())
self.assertEqual(
client_task.get_result_nonblocking(), 'Hello, world'
)
self.assertFalse(server_task.is_completed())
server.socket.close()
kernels.run(timeout=1)
self.assertTrue(server_task.is_completed())
self.assertIsNone(server_task.get_result_nonblocking())
app = TestApplication()
with servers.Server(app, Request, Response, WIRE_DATA) as server:
with clients.Client(Request, Response, WIRE_DATA) as client:
do_test(client, server, server.serve)
app = TestApplication()
server = servers.Server(app, Request, Response, WIRE_DATA)
with clients.Client(Request, Response, WIRE_DATA) as client:
with server:
do_test(client, server, server.serve)
if __name__ == '__main__':
unittest.main()
|
python
|
GET_PACKAGE_ADT_XML='''<?xml version="1.0" encoding="utf-8"?>
<pak:package xmlns:pak="http://www.sap.com/adt/packages" xmlns:adtcore="http://www.sap.com/adt/core" adtcore:masterLanguage="EN" adtcore:name="$IAMTHEKING" adtcore:type="DEVC/K" adtcore:changedAt="2019-01-29T23:00:00Z" adtcore:version="active" adtcore:createdAt="2019-01-29T23:00:00Z" adtcore:changedBy="DEVELOPER" adtcore:description="This is a package" adtcore:descriptionTextLimit="60" adtcore:language="EN">
<atom:link xmlns:atom="http://www.w3.org/2005/Atom" href="/sap/bc/adt/vit/wb/object_type/devck/object_name/%24IAMTHEKING" rel="self" type="application/vnd.sap.sapgui" title="Representation in SAP Gui"/>
<atom:link xmlns:atom="http://www.w3.org/2005/Atom" href="/sap/bc/adt/packages/valuehelps/applicationcomponents" rel="applicationcomponents" type="application/vnd.sap.adt.nameditems.v1+xml" title="Application Components Value Help"/>
<atom:link xmlns:atom="http://www.w3.org/2005/Atom" href="/sap/bc/adt/packages/valuehelps/softwarecomponents" rel="softwarecomponents" type="application/vnd.sap.adt.nameditems.v1+xml" title="Software Components Value Help"/>
<atom:link xmlns:atom="http://www.w3.org/2005/Atom" href="/sap/bc/adt/packages/valuehelps/transportlayers" rel="transportlayers" type="application/vnd.sap.adt.nameditems.v1+xml" title="Transport Layers Value Help"/>
<atom:link xmlns:atom="http://www.w3.org/2005/Atom" href="/sap/bc/adt/packages/valuehelps/translationrelevances" rel="translationrelevances" type="application/vnd.sap.adt.nameditems.v1+xml" title="Transport Relevances Value Help"/>
<pak:attributes pak:packageType="development" pak:isPackageTypeEditable="false" pak:isAddingObjectsAllowed="false" pak:isAddingObjectsAllowedEditable="true" pak:isEncapsulated="false" pak:isEncapsulationEditable="false" pak:recordChanges="false" pak:isRecordChangesEditable="false" pak:isSwitchVisible="false"/>
<pak:superPackage/>
<pak:applicationComponent pak:name="-" pak:description="No application component assigned" pak:isVisible="true" pak:isEditable="false"/>
<pak:transport>
<pak:softwareComponent pak:name="LOCAL" pak:description="" pak:isVisible="true" pak:isEditable="false"/>
<pak:transportLayer pak:name="" pak:description="" pak:isVisible="false" pak:isEditable="false"/>
</pak:transport>
<pak:useAccesses pak:isVisible="false"/>
<pak:packageInterfaces pak:isVisible="false"/>
<pak:subPackages>
<pak:packageRef adtcore:uri="/sap/bc/adt/packages/%24iamtheking_doc" adtcore:type="DEVC/K" adtcore:name="$IAMTHEKING_DOC" adtcore:description="Documentation stuff"/>
<pak:packageRef adtcore:uri="/sap/bc/adt/packages/%24iamtheking_src" adtcore:type="DEVC/K" adtcore:name="$IAMTHEKING_SRC" adtcore:description="Production source codes"/>
<pak:packageRef adtcore:uri="/sap/bc/adt/packages/%24iamtheking_tests" adtcore:type="DEVC/K" adtcore:name="$IAMTHEKING_TESTS" adtcore:description="Package with Tests"/>
</pak:subPackages>
</pak:package>
'''
GET_PACKAGE_ADT_XML_NOT_FOUND='''<?xml version="1.0" encoding="utf-8"?>
<exc:exception xmlns:exc="http://www.sap.com/abapxml/types/communicationframework">
<namespace id="com.sap.adt"/>
<type id="ExceptionResourceNotFound"/>
<message lang="EN">Error while importing object PKG_NAME from the database.</message>
<localizedMessage lang="EN">Error while importing object PKG_NAME from the database.</localizedMessage>
<properties/>
</exc:exception>
'''.replace('\n', '').replace('\r', '')
|
python
|
#
# 13. Roman to Integer
#
# Roman numerals are represented by seven different symbols: I, V, X, L, C, D, M
#
# Symbols Value
#
# I 1
# V 5
# X 10
# L 50
# C 100
# D 500
# M 1000
#
# For example, two is written as II in Roman numeral, just two one's added
# together.
# Twelve is written as, XII, which is simply X + II. The number twenty seven is
# written as XXVII, which is XX + V + II.
#
# Roman numerals are usually written largest to smallest from left to right.
# However, the numeral for four is not IIII. Instead, number four is written as
# IV. Because the one is before the five we subtract it making four. The same
# principle applies to the number nine, which is written as IX. There are six
# instances where subtraction is used:
#
# - I can be placed before V and X to make 4 and 9.
# - X can be placed before L and C to make 40 and 90.
# - C can be placed before D and M to make 400 and 900.
#
# Given a roman numeral, convert it to an integer. Input is guaranteed to be
# within the range from 1 to 3999.
#
# Example 1:
# Input: "III"
# Output: 3
#
# Example 2:
# Input: "IV"
# Output: 4
#
# Example 3:
# Input: "IX"
# Output: 9
#
# Example 4:
# Input: "LVIII"
# Output: 58
#
# Example 5:
# Input: "MCMXCIV"
# Output: 1994
#
class Solution(object):
# 遍历字符串始终累加,
# 当前一字符对应数字比当前字符对应数字小时,
# 结果减二倍前一字符对应数字
def romanToInt(self, s):
mapping = {
'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000
}
res = 0
front = mapping[s[0]] if s else 0
for c in s:
res += mapping[c]
if front < mapping[c]:
res -= 2 * front
front = mapping[c]
return res
# 分为单个和两个,两种情况
def romanToInt2(self, s):
doubles = {'IV': 4, 'IX': 9, 'XL': 40, 'XC': 90, 'CD': 400, 'CM': 900}
singles = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
res, i, length = 0, 0, len(s)
while i < length:
if i < length - 1 and s[i:i + 2] in doubles:
res += doubles[s[i:i + 2]]
i += 2
else:
res += singles[s[i]]
i += 1
return res
|
python
|
from factory import DjangoModelFactory, Sequence, SubFactory
from movie_planet.movies.models import Comment, Movie
class MovieFactory(DjangoModelFactory):
title = Sequence(lambda n: "Title %03d" % n)
class Meta:
model = Movie
class CommentFactory(DjangoModelFactory):
body = "test body"
movie = SubFactory(MovieFactory)
class Meta:
model = Comment
def create_movies_with_rank():
movie_1 = MovieFactory()
movie_2 = MovieFactory()
movie_3 = MovieFactory()
CommentFactory(movie=movie_1)
CommentFactory(movie=movie_2)
CommentFactory(movie=movie_2)
CommentFactory(movie=movie_3)
CommentFactory(movie=movie_3)
return movie_1, movie_2, movie_3
|
python
|
import uuid
import time
import pickle
from redis import Redis
class AcquireTimeoutError(Exception):
"""
在规定时间内,没有获取到到锁时,抛出的异常
"""
class RedisLock:
"""
redis 分布式锁
"""
@classmethod
def register_redis(cls, redis: Redis):
cls.redis = redis
def __init__(self, lock_key, acquire_time=10, lock_timeout=60):
"""
:param lock_key 锁名称
:param acquire_time 尝试获取锁的时间,如果在指定时间内没有获取到锁,则返回 False
:param lock_timeout 过期时间
"""
self.lock_key = 'lock:' + lock_key
self.acquire_time = acquire_time
self.lock_timeout = lock_timeout
self.identifier = ''
def acquire(self, blocking=True):
"""
:param blocking 是否阻塞
:return 如果获取到锁,则返回 True,否则 False
"""
identifier = str(uuid.uuid4())
end = time.time() + self.acquire_time
while time.time() < end:
if self.redis.set(self.lock_key, identifier, ex=self.lock_timeout, nx=True):
self.identifier = identifier
return True
if blocking:
time.sleep(0.01)
else:
return False
raise AcquireTimeoutError()
def release(self):
"""
删除锁
"""
if self.identifier == '':
return
pipe = self.redis.pipeline(True)
pipe.watch(self.lock_key)
if pipe.get(self.lock_key).decode(encoding='utf-8') == self.identifier:
pipe.multi()
pipe.delete(self.lock_key)
pipe.execute()
self.identifier = ''
@staticmethod
def lock(lockname, acquire_time=10, lock_timeout=60, blocking=True):
"""
使用方法:
@RedisLock.lock('test', acquire_time=1, lock_timeout=60, blocking=False)
def test():
pass
:param lockname: 锁名称
:param acquire_time: 阻塞获取锁的时间
:param lock_timeout: 锁的超时时间
:param blocking: 非阻塞获取锁,如果没有获取到锁,则不会执行修饰的方法
"""
def decorator(func):
def wrapper(*args, **kwargs):
lock = RedisLock(lockname, acquire_time=acquire_time, lock_timeout=lock_timeout)
if lock.acquire(blocking=blocking):
try:
ret = func(*args, **kwargs)
return ret
except BaseException as e:
raise e
finally:
lock.release()
return wrapper
return decorator
class RedisCache:
"""
redis 分布式缓存
"""
@classmethod
def register_redis(cls, redis: Redis):
cls.redis = redis
@staticmethod
def __get_one_name(name):
"""
获取一级缓存名称
"""
return "cache:%s" % name
@staticmethod
def __get_two_name(name):
"""
获取二级缓存名称
"""
return "cache2:%s" % name
@classmethod
def cache(cls, cachename, timeout=60):
"""
使用方法:
@RedisCache.cache('test', timeout=60)
def test():
pass
:param cachename: 锁名称
:param timeout: 锁的超时时间
:return:
"""
def decorator(func):
def wrapper(*args, **kwargs):
val = cls.redis.get(cls.__get_one_name(cachename))
if val is not None:
# 从缓存中获取数据
return pickle.loads(val)
# 缓存失效的解决方案:
# 使用分布式锁,只有一个进程去原始数据中获取
lock = RedisLock('cachelock.' + cachename, lock_timeout=timeout)
if lock.acquire() is False:
# 没有获取到锁,则使用二级缓存
val = cls.redis.get(cls.__get_two_name(cachename))
ret = pickle.loads(val)
else:
# 获取到锁,从原始数据获取锁,同时设置一级缓存和二级缓存
ret = func(*args, **kwargs)
val = pickle.dumps(ret)
cls.redis.set(cls.__get_one_name(cachename), val, ex=timeout)
cls.redis.set(cls.__get_two_name(cachename), val, ex=timeout * 10)
lock.release()
return ret
return wrapper
return decorator
|
python
|
from django.db import models
class MyPublicModel(models.Model):
name = models.CharField(max_length=32)
class MyPrivateModel(models.Model):
name = models.CharField(max_length=32)
class MyPresenceModel(models.Model):
name = models.CharField(max_length=32)
|
python
|
# -*- coding: utf-8 -*-
from gekko import GEKKO
import numpy as np
import matplotlib.pyplot as plt
## Linear model of a Boeing 747
# Level flight at 40,000 ft elevation
# Velocity at 774 ft/sec (0.80 Mach)
# States
# u - uw (ft/sec) - horizontal velocity - horizontal wind
# w - ww (ft/sec) - vertical velocity - vertical wind
# q (crad/sec) - angular velocity
# theta (crad) - angle from horizontal
# note: crad = 0.01 rad
# Inputs
# e - elevator
# t - throttle
# Outputs
# u - uw (ft/sec) - horizontal airspeed
# hdot = -w + u0 * theta with u0 = 774 ft/sec
A = np.array([[-.003, 0.039, 0, -0.322],
[-0.065, -0.319, 7.74, 0],
[0.020, -0.101, -0.429, 0],
[0, 0, 1, 0]])
B = np.array([[0.01, 1],
[-0.18, -0.04],
[-1.16, 0.598],
[0, 0]])
C = np.array([[1, 0, 0, 0],
[0, -1, 0, 7.74]])
#%% Build model
m = GEKKO()
x,y,u = m.state_space(A,B,C)
m.time = [0,0.1,0.2,0.4,1,1.5,2,3,4,5,6,7,8,10,12,15,20]
m.time = np.linspace(0,10,101)
m.options.imode = 6
m.options.NODES = 2
## MV tuning
for i in range(len(u)):
u[i].lower = -5
u[i].upper = 5
u[i].dcost = 1
u[i].status = 1
## CV tuning
# tau = first order time constant for trajectories
y[0].tau = 3
y[1].tau = 5
# tr_init = 2 (first order traj, re-center with each cycle)
y[0].tr_init = 2
y[1].tr_init = 2
# targets (dead-band needs upper and lower values)
y[0].sphi= -8.5
y[0].splo= -9.5
y[1].sphi= 5.4
y[1].splo= 4.6
y[0].status = 1
y[1].status = 1
m.solve()
#%% plot results
plt.figure(1)
plt.subplot(311)
plt.plot(m.time,u[0],'r-',linewidth=2.0)
plt.plot(m.time,u[1],'k:',linewidth=2.0)
plt.legend(['Elevator','Thrust'])
plt.ylabel('MV Action')
plt.subplot(312)
plt.plot(m.time,y[0],'b:',linewidth=2.0)
plt.legend('Air Speed')
plt.subplot(313)
plt.plot(m.time,y[1],'g--',linewidth=2.0)
plt.legend('Climb Rate')
plt.show()
|
python
|
#! /usr/bin/env python3.7
from modules.commands.helpers.textutil import add as quote_add
HELP_TEXT = ["!addquote <quote>", "Add the selected text for review (broadcasters adding bypass review."]
def call(salty_inst, c_msg, **kwargs):
success, response = quote_add(salty_inst, c_msg, "quote", **kwargs)
return success, response
def test(salty_inst, c_msg, **kwargs):
assert True
|
python
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import json
import time
import luigi
from servicecatalog_factory import constants
from servicecatalog_factory.workflow.portfolios.get_bucket_task import GetBucketTask
from servicecatalog_factory.workflow.tasks import FactoryTask, logger
class CreateProductTask(FactoryTask):
uid = luigi.Parameter()
region = luigi.Parameter()
name = luigi.Parameter()
owner = luigi.Parameter(significant=False)
description = luigi.Parameter(significant=False)
distributor = luigi.Parameter(significant=False)
support_description = luigi.Parameter(significant=False)
support_email = luigi.Parameter(significant=False)
support_url = luigi.Parameter(significant=False)
tags = luigi.ListParameter(default=[], significant=False)
def params_for_results_display(self):
return {
"region": self.region,
"uid": self.uid,
"name": self.name,
}
def requires(self):
return {"s3_bucket_url": GetBucketTask()}
def output(self):
return luigi.LocalTarget(
f"output/CreateProductTask/{self.region}-{self.name}.json"
)
def run(self):
logger_prefix = f"{self.region}-{self.name}"
with self.regional_client("servicecatalog") as service_catalog:
search_products_as_admin_response = service_catalog.search_products_as_admin_single_page(
Filters={"FullTextSearch": [self.name]}
)
found = False
product_view_summary = None
for product_view_details in search_products_as_admin_response.get(
"ProductViewDetails"
):
product_view_summary = product_view_details.get("ProductViewSummary")
if product_view_summary.get("Name") == self.name:
found = True
logger.info(f"Found product: {self.name}: {product_view_summary}")
things_to_change = dict()
if product_view_summary.get("Owner") != self.owner:
things_to_change["Owner"] = self.owner
if product_view_summary.get("ShortDescription") != self.description:
things_to_change["Description"] = self.description
if product_view_summary.get("Distributor") != self.distributor:
things_to_change["Distributor"] = self.distributor
if (
product_view_summary.get("SupportDescription")
!= self.support_description
):
things_to_change[
"SupportDescription"
] = self.support_description
if product_view_summary.get("SupportEmail") != self.support_email:
things_to_change["SupportEmail"] = self.support_email
if product_view_summary.get("SupportUrl") != self.support_url:
things_to_change["SupportUrl"] = self.support_url
if len(things_to_change.keys()) > 0:
service_catalog.update_product(
Id=product_view_summary.get("ProductId"), **things_to_change
)
break
if not found:
logger.info(f"Not found product: {self.name}, creating")
tags = [{"Key": "ServiceCatalogFactory:Actor", "Value": "Product"}] + [
{"Key": t.get("Key"), "Value": t.get("Value"),} for t in self.tags
]
create_product_args = {
"ProductType": "CLOUD_FORMATION_TEMPLATE",
"ProvisioningArtifactParameters": {
"Name": "-",
"Type": "CLOUD_FORMATION_TEMPLATE",
"Description": "Placeholder version, do not provision",
"Info": {
"LoadTemplateFromURL": "https://{}.s3.{}.amazonaws.com/{}".format(
self.load_from_input("s3_bucket_url").get(
"s3_bucket_url"
),
constants.HOME_REGION,
"empty.template.yaml",
)
},
},
"Name": self.name,
"Owner": self.owner,
"Description": self.description,
"Distributor": self.distributor,
"SupportDescription": self.support_description,
"SupportEmail": self.support_email,
"SupportUrl": self.support_url,
"Tags": tags,
}
product_view_summary = (
service_catalog.create_product(**create_product_args)
.get("ProductViewDetail")
.get("ProductViewSummary")
)
product_id = product_view_summary.get("ProductId")
logger.info(f"Created product {self.name}, waiting for completion")
while True:
time.sleep(2)
search_products_as_admin_response = (
service_catalog.search_products_as_admin_single_page()
)
products_ids = [
product_view_detail.get("ProductViewSummary").get("ProductId")
for product_view_detail in search_products_as_admin_response.get(
"ProductViewDetails"
)
]
logger.info(f"Looking for {product_id} in {products_ids}")
if product_id in products_ids:
logger.info(f"Found {product_id} ")
break
if product_view_summary is None:
raise Exception(f"{logger_prefix}: did not find or create a product")
product_view_summary["uid"] = self.uid
with self.output().open("w") as f:
logger.info(f"{logger_prefix}: about to write! {product_view_summary}")
f.write(json.dumps(product_view_summary, indent=4, default=str,))
|
python
|
"""Delta-v estimation for propulsive landing."""
import numpy as np
from matplotlib import pyplot as plt
from scipy.optimize import fsolve
# Speed of sound in air at 290 K [units: meter second**-1].
a = 342
# Graviational acceleration [units: meter second**-2].
g_0 = 9.81
# Atmosphere scale height [units: meter].
# At 290 K, near surface
H_0 = 8500.
# Atmosphere sea level density [units: kilogram meter**-3].
# At 290 K
rho_0 = 1.20
def drag(M):
"""Drag coefiicient of a cylinder in transonic flight.
Reference: S. F. Hoerner, "Fluid-Dynamic Drag" Ch 16.3
Arguments:
M (scalar): Mach number [units: dimensionless].
"""
# Hoerner says K_fore = 0.9. The formula below is a hack to
# make the curve match Hoerner ch 16 figure 14.
K_fore = 0.9 if M > 1 else 0.8
# Stagnation pressure increment / dynamic pressure
qq = 1 + M**2/4 + M**4/10 # Eqn 15.4
if M >= 1:
# Include pressure loss due to normal shock
qq = 1.84 - 0.76/M**2 + 0.166/M**4 + 0.035/M**6 # Eqn 16.4
C_D = K_fore * qq
return C_D
def terminal_velocity(m_A, H):
"""Terminal velocity of a falling cylinder.
Arguments:
m_A (scalar): mass/area ratio [units: kilogram meter**-2].
H (scalar): altitude [units: meter].
Returns:
Terminal velocity [units: meter second**-1].
"""
def root_fun(v):
M = v / a
v_t = (2 * m_A * g_0 / (drag(M) * rho_0))**0.5 * np.exp(H / (2 * H_0))
return v - v_t
v_t = fsolve(root_fun, 300.)[0]
return v_t
def landing_dv(m_A, accel):
"""Landing dv.
Arguments:
m_A (scalar): mass/area ratio [units: kilogram meter**-2].
accel (scalar): landing acceleartion [units: meter second**-2].
Returns:
Terminal velocity [units: meter second**-1].
"""
def root_fun(v):
M = v / a
t_b = v / accel
H = 0.5 * accel * t_b**2
v_t = (2 * m_A * g_0 / (drag(M) * rho_0))**0.5 * np.exp(H / (2 * H_0))
return v - v_t
v_t = fsolve(root_fun, 300.)[0]
return v_t * (1 + g_0 / accel)
def main():
# Plot the drag model
M = np.linspace(0, 4)
C_D = [drag(M_) for M_ in M]
plt.plot(M, C_D)
plt.grid(True)
plt.xlabel('$M$')
plt.ylabel('$C_D$')
# Range of mass/area ratios to consider
m_A = np.linspace(300, 4000)
# Compute and plot delta-v for landing
plt.figure()
accels = [2*g_0, 3*g_0, 4*g_0]
colors = ['C0', 'C1', 'C2']
for accel, color in zip(accels, colors):
dv_land = np.array([landing_dv(m_A_, accel) for m_A_ in m_A])
v_t = dv_land / (1 + g_0 / accel)
plt.plot(m_A, dv_land,
label='$\Delta v_{{land}}, a={:.0f} g_0$'.format(accel / g_0),
color=color, linestyle='-')
plt.plot(m_A, v_t,
label='$v_t, a={:.0f} g_0$'.format(accel / g_0),
color=color, linestyle='--')
plt.axhline(y=a, color='grey', label='sonic')
plt.xlabel('Mass / frontal area ratio [kg/m^2]')
plt.ylabel('Velocity [m/s]')
plt.legend()
plt.savefig('landing_dv.png')
plt.show()
if __name__ == '__main__':
main()
|
python
|
"""Auto-generated file, do not edit by hand. EG metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_EG = PhoneMetadata(id='EG', country_code=20, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='(?:[189]\\d?|[24-6])\\d{8}|[13]\\d{7}', possible_length=(8, 9, 10), possible_length_local_only=(6, 7)),
fixed_line=PhoneNumberDesc(national_number_pattern='(?:1(?:3[23]|5\\d?)|(?:2[2-4]|3)\\d|4(?:0[2-5]|[578][23]|64)|5(?:0[2-7]|5\\d|7[23])|6[24-689]3|8(?:2[2-57]|4[26]|6[237]|8[2-4])|9(?:2[27]|3[24]|52|6[2356]|7[2-4]))\\d{6}', example_number='234567890', possible_length=(8, 9), possible_length_local_only=(6, 7)),
mobile=PhoneNumberDesc(national_number_pattern='1[0-25]\\d{8}', example_number='1001234567', possible_length=(10,)),
toll_free=PhoneNumberDesc(national_number_pattern='800\\d{7}', example_number='8001234567', possible_length=(10,)),
premium_rate=PhoneNumberDesc(national_number_pattern='900\\d{7}', example_number='9001234567', possible_length=(10,)),
national_prefix='0',
national_prefix_for_parsing='0',
number_format=[NumberFormat(pattern='(\\d)(\\d{7,8})', format='\\1 \\2', leading_digits_pattern=['[23]'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(\\d{2})(\\d{6,7})', format='\\1 \\2', leading_digits_pattern=['1[35]|[4-6]|8[2468]|9[235-7]'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(\\d{3})(\\d{3})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['[189]'], national_prefix_formatting_rule='0\\1')],
mobile_number_portable_region=True)
|
python
|
import tensorflow as tf
from tensorflow.contrib.framework.python.ops import arg_scope
#from utils_fn import *
from ops import *
import time
class InpaintModel():
def __init__(self, args):
self.model_name = "InpaintModel" # name for checkpoint
self.img_size = args.IMG_SHAPES
# yj
def build_inpaint_net(self, x, edge, grad, mask, args=None, reuse=False,
training=True, padding='SAME', name='inpaint_net'):
"""Inpaint network.
Args:
x: incomplete image[-1, 1] with shape of (batch_size, h, w, c)
edge: incomplete edge {0, 1} with shape of (batch_size, h, w)
grad map: incomplete grad with shape of (batch_size, h, w, 6)
mask: mask region {0, 1}
Returns:
complete image, grad map, middle result
"""
x = tf.reshape(x, [-1, args.IMG_SHAPES[0], args.IMG_SHAPES[1], args.IMG_SHAPES[2]])
mask = tf.reshape(mask, [-1, args.IMG_SHAPES[0], args.IMG_SHAPES[1], 1])
edge = tf.reshape(edge, [-1, args.IMG_SHAPES[0], args.IMG_SHAPES[1], 1])
# grad = tf.reshape(grad, [-1, args.IMG_SHAPES[0], args.IMG_SHAPES[1], 6])
xin = x
ones_x = tf.ones_like(x)[:, :, :, 0:1]
x = tf.concat([x, ones_x * edge, ones_x * mask, grad], axis=3) # add a mask channel,the input channel is 4
# encoder-decoder network: channel 64-128-256-128-64
cnum = 64 # initial channel
# a decorate: arg_scope([op1, op2,..], xx,..) means:
# attributes or parameters xx defined here are the default in op1 and op2,..
with tf.variable_scope(name, reuse=reuse), \
arg_scope([gen_conv, gen_deconv],
training=training, padding=padding):
# Encoder
# scale 256 channels activation: relu
x = gen_conv(x, cnum, 7, stride=1, activation=tf.nn.relu, name='en_conv1') # 9 -> 64, ksize=7x7, stride=1
# scale 128
x = gen_conv(x, 2 * cnum, 4, stride=2, activation=tf.nn.relu, name='en_conv2')
# scale 64
x = gen_conv(x, 4 * cnum, 4, stride=2, activation=tf.nn.relu, name='en_conv3')
# res block
x = resnet_blocks(x, 4 * cnum, 3, stride=1, rate=2, block_num=8, activation=tf.nn.relu, name='en_64_8')
# Decoder
# TODO: output scale 64 Down scale = 2 (origin) pool scale = 2 (origin)
# share attention
x = attention(x, 4 * cnum, down_scale=2, pool_scale=2, name='attention_pooling_64')
# out of predict grad map
x_64 = gen_conv(x, 4 * cnum, 5, stride=1, activation=tf.nn.relu, name='out64_grad_out')
x_grad_out_64 = gen_conv(x_64, 6, 1, stride=1, activation=None, name='grad64')
x_out_64 = gen_conv(x_64, 3, 1, stride=1, activation=tf.nn.tanh, name='out64')
# scale 64 - 128
x = tf.concat([x, x_64], axis=3)
x = gen_deconv(x, 2 * cnum, 4, method='deconv', activation=tf.nn.relu, name='de128_conv4_upsample')
# TODO: output scale 128
# share attention
x = attention(x, 2 * cnum, down_scale=2, pool_scale=2, name='attention_pooling_128')
# out of predict grad map
x_128 = gen_conv(x, 2 * cnum, 5, stride=1, activation=tf.nn.relu, name='out128_grad_out')
x_grad_out_128 = gen_conv(x_128, 6, 1, stride=1, activation=None, name='grad128')
x_out_128 = gen_conv(x_128, 3, 1, stride=1, activation=tf.nn.tanh, name='out128')
# scale 128 - 256
x = tf.concat([x, x_128], axis=3)
x = gen_deconv(x, cnum, 4, method='deconv', activation=tf.nn.relu, name='de256_conv5_upsample')
# TODO: output scale 256
# share attention
x = attention(x, cnum, down_scale=2, pool_scale=2, name='attention_pooling_256')
# out of predict grad map
x = gen_conv(x, cnum, 5, stride=1, activation=tf.nn.relu, name='out256_grad_out')
x_grad = gen_conv(x, 6, 1, stride=1, activation=None, name='grad256') # grad map no activation
x = gen_conv(x, 3, 1, stride=1, activation=tf.nn.tanh, name='out256')
return x
def evaluate(self, x, edge, mask, args, training=False, reuse=False):
# image, grad map
image = normalize(x)
grad = tf.image.sobel_edges(image) # normalization?
grad = tf.reshape(grad, [1, args.IMG_SHAPES[0], args.IMG_SHAPES[1], 6]) # 6 channel
# x for image
x = tf.reshape(image, [1, args.IMG_SHAPES[0], args.IMG_SHAPES[1],
args.IMG_SHAPES[2]]) # [1, args.IMG_SHAPES[0], args.IMG_SHAPES[1], args.IMG_SHAPES[2]]
mask = tf.reshape(mask, [-1, args.IMG_SHAPES[0], args.IMG_SHAPES[1], 1])
edge = tf.reshape(edge, [-1, args.IMG_SHAPES[0], args.IMG_SHAPES[1], 1])
# incomplete image
x_incomplete = x * (1. - mask)
# incomplete edge at full scale
input_edge = 1 - edge
edge_incomplete = input_edge * (1 - mask) + mask # 0 (black) for edge when save and input, 1 (white) for non edge
# grad
grad_incomplete = (1. - mask) * grad
out_256 = self.build_inpaint_net(x_incomplete, edge_incomplete, grad_incomplete,
mask, args, reuse=reuse,training=training, padding=args.PADDING)
raw_x = inverse_transform(x)
raw_x_incomplete = raw_x * (1 - mask)
raw_x_complete = raw_x_incomplete + inverse_transform(out_256) * mask
return raw_x_complete
|
python
|
import os
#determine n - the number of training observations
nFile = open('CV_folds/training_set_0.txt','r')
n = 0.0
for i in nFile:
n += 1
#determine the number of folds
#nFolds = sum(os.path.isdir(i) for i in os.listdir('CV_decomp'))
nFolds = len(os.listdir('CV_decomp'))
print nFolds
#determine values of p
inList = os.listdir('CV_result')
numList = []
for i in inList:
numList.append(float(i.split("_")[2]))
print "p SSD MARE RMSPE"
#for each value of p
for j in set(numList):
i = 0
SSDsum = MAREsum = RMSPEsum = 0
#for each fold
while i < nFolds:
#compute sum of errors
inFile = open('CV_result/st_idw_' + str(j) + "_" + str(i) + ".txt", 'r')
r = inFile.readline().split(",")
SSD, MARE, RMSPE = float(r[0]),float(r[1]),float(r[2])
SSDsum += SSD
MAREsum += MARE
RMSPEsum += RMSPE
i += 1
print j, ((SSDsum/n) ** 0.5) / 10.0, (MAREsum/n)/10.0, (((RMSPEsum/n) ** 0.5) * 100.0) / 10.0
|
python
|
import re
import random
import os
import pandas as pd
try:
import torch
except ImportError:
pass
from tqdm import tqdm
import spacy
from spacy import displacy
from visuUtils import train2myVisu, build_color_scheme
from visuUtils import conll2sent_list, sent_list2spacy, myScores
class visualizer(object):
''' Integrate spacy visualization for conll and spacy formats:'''
def __init__(self, data, predictions, verbose = False, column = -1):
''' Input: - data, conll file path,
- prediction, conll file path for predictions
- column, the column to be selected as annotation in the conll file (default is lasts column)
'''
self.path2conll = data
# convert to spaCy readable json format
self.data = sent_list2spacy(*conll2sent_list(data, column = column))
unique_entities = []
nb_sents = 0
nb_tokens = 0
for sent in self.data:
for ent in sent[1]['entities']:
if ent[2] not in unique_entities:
unique_entities.append(ent[2])
nb_sents += 1
nb_tokens += len(sent[0].split(' '))
# Set summary statistics
self.nb_sents = nb_sents
self.nb_tokens = nb_tokens
self.unique_ents = unique_entities
# Create a separate container for visualizable data
self.visu_gold = [train2myVisu(sent) for sent in self.data]
# Build options for the displayer
self.options = build_color_scheme([ent.upper() for ent in self.unique_ents])
self.path2predicted = predictions
self.pred_data = sent_list2spacy(*conll2sent_list(self.path2predicted))
self.visu_pred = [train2myVisu(sent) for sent in self.pred_data]
if verbose:
print('Data contains {} sentences with an average of {:.2f} tokens per sentence totalizing {} tokens.'.format(
self.nb_sents, self.nb_tokens/self.nb_sents, self.nb_tokens))
print('There are {} entities plus the "O" label: {}'.format(len(self.unique_ents), self.unique_ents))
return
''' Apply pre-annotation and get the predictions and scores'''
def pre_annot_pred(self, column = 6):
pred_data = sent_list2spacy(*conll2sent_list(self.path2conll, column = column))
self.pred_data = pred_data
self.visu_pred = [train2myVisu(sent) for sent in self.pred_data]
self.path2predicted = self.path2conll
return
def score_predictions(self, average = 'weighted', grouping = None, column = -1, punct_ignore = False, col_sep = '\t'):
''' Score the model on the predicted data
input: - average, [None, 'macro', 'micro', 'weighted']
see. http://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_fscore_support.html for explanantion
- grouping, dictionnary with entry of the form {'NAME':[FNAME, LNAME]} (one to many)
- column, integer, column from the prediction file to be selected as the prediction (in case of multiple prediction for example)
- punct_ignore, should the punctuation be removed from the evaluation
- col_sep, separator between word and label in the conll file
'''
self.scores = myScores(self.path2conll, self.path2predicted, average = average, grouping = grouping, column = column, punct_ignore = punct_ignore, col_sep = col_sep)
if column != -1:
print('Scoring using the {} column of the test set'.format(column))
# build false positive and false negative lists
self.gold_per_label = {}
self.pred_per_label = {}
self.FP_dic = {}
self.FN_dic = {}
for lab in self.unique_ents:
self.gold_per_label[lab] = []
self.pred_per_label[lab] = []
self.FP_dic[lab] = []
self.FN_dic[lab] = []
punct = ['_', '-', "'", ',', ';', '.', '(', ')', '/', '\\', ':']
# fill gold_ents entities dict
for i, sents in enumerate(self.data):
ents = sents[1]['entities']
for ent in ents:
if (sents[0][ent[0]:ent[1]] not in punct) & punct_ignore:
self.gold_per_label[ent[2]].append((i, *ent))
elif not punct_ignore:
self.gold_per_label[ent[2]].append((i, *ent))
# fill pred_ents entities dict
for i, sents in enumerate(self.pred_data):
ents = sents[1]['entities']
for ent in ents:
if (sents[0][ent[0]:ent[1]] not in punct) & punct_ignore:
self.pred_per_label[ent[2]].append((i, *ent))
elif not punct_ignore:
self.pred_per_label[ent[2]].append((i, *ent))
for lab in self.unique_ents:
for ent in self.pred_per_label[lab]:
if ent not in self.gold_per_label[lab]:
self.FP_dic[lab].append(ent)
for ent in self.gold_per_label[lab]:
if ent not in self.pred_per_label[lab]:
self.FN_dic[lab].append(ent)
return self.scores
def scores2pd(self):
''' Wrapper to return scores as dataframe'''
df_scores = pd.DataFrame(data = {'precision':self.scores[0],
'recall':self.scores[1],
'f1':self.scores[2],
'count':self.scores[3]}, index = self.unique_ents)
df_scores.sort_index(inplace=True)
return df_scores
def visu_gold_sample(self, ix = None, verbose = True, context = 0):
''' Visualize a random gold sample'''
if ix is None:
ix = random.randint(0, len(self.data))
#elif(ix >= len(self.data)):
# print('ix out of bound, please selecte an index smaller than {}'.format(len(self.data)))
# return
if verbose:
print('sentence {}/{}'.format(ix, len(self.data)))
displacy.render(self.visu_gold[(ix-context):(ix+context+1)], style = 'ent', jupyter = True, manual = True, options = self.options)
return
def visu_pred_sample(self, ix = None, verbose = True, context = 0):
''' Visualize a random pred sample'''
if ix is None:
ix = random.randint(0, len(self.data))
elif(ix >= len(self.data)):
print('ix out of bound, please selecte an index smaller than {}'.format(len(self.data)))
return
if verbose:
print('sentence {}/{}'.format(ix, len(self.data)))
displacy.render(self.visu_pred[(ix-context):(ix+context+1)], style = 'ent', jupyter = True, manual = True, options = self.options)
return
def visu_compare(self, ix = None, context = 0):
''' Visualize the same sample from gold and pred'''
if ix is None:
ix = random.randint(0, len(self.data))
print('Gold:')
self.visu_gold_sample(ix, verbose = True, context = context)
print('Predicted:')
self.visu_pred_sample(ix, verbose = False, context = context)
return
def visu_FP_sample(self, lab = None, i = None, context = 0, verbose = True):
''' Visualize one False Positive for a given category'''
if lab is None:
lab = random.choice(self.unique_ents)
nb_errors = len(self.FP_dic[lab])
if i is None:
i = random.randint(0, nb_errors)
ix = self.FP_dic[lab][i][0]
if verbose:
print('There are {} FP for the {} category.'.format(nb_errors, lab))
print('Displaying FP {}/{}'.format(i, nb_errors))
self.visu_compare(ix, context = context)
return
def visu_FPs(self, lab = None, context = 0):
''' Visualize all FPs of a given category'''
if lab is None:
lab = random.choice(self.unique_ents)
nb_errors = len(self.FP_dic[lab])
print('There are {} FP for the {} category.'.format(nb_errors, lab))
# little astuce to avoid showing several time the same sentence
ix_prec = -1
for i in range(0, nb_errors):
ix_tmp = self.FP_dic[lab][i][0]
if ix_prec != ix_tmp:
self.visu_compare(ix_tmp, context = context)
ix_prec = ix_tmp
print('----------------------------------------------\n')
def visu_FN_sample(self, lab = None, i = None, context = 0, verbose = True):
''' Visualize one False Negative for a given category'''
if lab is None:
lab = random.choice(self.unique_ents)
nb_errors = len(self.FN_dic[lab])
if i is None:
i = random.randint(0, nb_errors)
ix = self.FN_dic[lab][i][0]
if verbose:
print('There are {} FN for the {} category.'.format(nb_errors, lab))
print('Displaying FN {}/{}'.format(i, nb_errors))
self.visu_compare(ix, context = context)
return
def visu_FNs(self, lab = None, context = 0):
''' Visualize all FNs of a given category'''
if lab is None:
lab = random.choice(self.unique_ents)
nb_errors = len(self.FN_dic[lab])
print('There are {} FN for the {} category.'.format(nb_errors, lab))
# little astuce to avoid showing several time the same sentence
ix_prec = -1
for i in range(0, nb_errors):
ix_tmp = self.FN_dic[lab][i][0]
if ix_prec != ix_tmp:
self.visu_compare(ix_tmp, context = context)
ix_prec = ix_tmp
print('----------------------------------------------')
def group_labs(self, grouping):
''' Given a grouping of categories, change the categories of the visu object to perform new visualization and scoring based on these new labels.'''
# change the labels in gold_data
new_data = []
for sent in self.data:
ents = sent[1]['entities']
new_ents = []
for ent in ents:
for k, v in grouping.items():
if ent[2] in v:
ent = (ent[0], ent[1], k)
if ent[2]!='O':
new_ents.append(ent)
new_sent = (sent[0], {'entities':new_ents})
new_data.append(new_sent)
self.data = new_data
self.visu_gold = [train2myVisu(sent) for sent in self.data]
# change the label in pred_data
new_data = []
for sent in self.pred_data:
ents = sent[1]['entities']
new_ents = []
for ent in ents:
for k, v in grouping.items():
if ent[2] in v:
ent = (ent[0], ent[1], k)
if ent[2]!='O':
new_ents.append(ent)
new_sent = (sent[0], {'entities':new_ents})
new_data.append(new_sent)
self.pred_data = new_data
self.visu_pred = [train2myVisu(sent) for sent in self.pred_data]
# recompute the unique entities
unique_entities = []
for sent in self.data:
for ent in sent[1]['entities']:
if ent[2] not in unique_entities:
unique_entities.append(ent[2])
self.unique_ents = unique_entities
print('New unique entities after grouping:', self.unique_ents)
self.options = build_color_scheme(self.unique_ents)
return
# Decoding functions for the different frameworks
# For now on there are Yaset, NCRFpp and spaCy
try:
from ncrfpp.utils.myUtils import evaluate, load_model_decode
from ncrfpp.utils.data import Data
except ImportError:
pass
def ncrf_decoding(decode_conf_dict, verbose = False):
''' Perform ncrf decoding from a config file'''
data = Data()
data.read_config(decode_conf_dict)
status = data.status.lower()
data.HP_gpu = torch.cuda.is_available()
print("MODEL: decode")
data.load(data.dset_dir)
## needed after data.load(data.dset_dir)
data.read_config(decode_conf_dict)
print(data.raw_dir)
# exit(0)
data.show_data_summary()
data.generate_instance('raw')
print("nbest: %s"%(data.nbest))
decode_results, pred_scores = load_model_decode(data, 'raw')
if data.nbest:
data.write_nbest_decoded_results(decode_results, pred_scores, 'raw')
else:
data.write_decoded_results(decode_results, 'raw')
# convert to the same format as yaset
with open(decode_conf_dict['decode_dir'], 'r') as f:
predictions = f.read().splitlines()
new_preds = []
for l in predictions:
if l != '':
if l[0] != '#':
ll = '\t'.join(l.split(' ')) + '\n'
new_preds.append(ll)
elif l == '':
new_preds.append('\n')
with open(decode_conf_dict['decode_dir'], 'w') as f:
f.writelines(new_preds)
print('end')
return
def yaset_pred(path2model, pathgold, path2save):
''' Apply yaset and get the predictions and scores'''
print('Applying yaset model...(1 to 2 mins)')
apply_yaset = 'yaset APPLY --working-dir '+path2save+' --input-file '+pathgold+' --model-path '+ path2model
os.system(apply_yaset)
return
|
python
|
from ._text import BufferText
__all__ = [
"BufferText",
]
|
python
|
#!/usr/bin/env python
import xml.etree.ElementTree as ET
import six
from leather import svg
from leather import theme
class Axis(object):
"""
A horizontal or vertical chart axis.
:param ticks:
Instead of inferring tick values from the data, use exactly this
sequence of ticks values. These will still be passed to the
:code:`tick_formatter`.
:param tick_formatter:
An optional :func:`.tick_format_function`.
"""
def __init__(self, ticks=None, tick_formatter=None, name=None):
self._ticks = ticks
self._tick_formatter = tick_formatter
self._name = six.text_type(name) if name is not None else None
def _estimate_left_tick_width(self, scale):
"""
Estimate the y axis space used by tick labels.
"""
tick_values = self._ticks or scale.ticks()
tick_count = len(tick_values)
tick_formatter = self._tick_formatter or scale.format_tick
max_len = 0
for i, value in enumerate(tick_values):
max_len = max(max_len, len(tick_formatter(value, i, tick_count)))
return max_len * theme.tick_font_char_width
def estimate_label_margin(self, scale, orient):
"""
Estimate the space needed for the tick labels.
"""
margin = 0
if orient == "left":
margin += self._estimate_left_tick_width(scale) + (theme.tick_size * 2)
elif orient == "bottom":
margin += theme.tick_font_char_height + (theme.tick_size * 2)
if self._name:
margin += theme.axis_title_font_char_height + theme.axis_title_gap
return margin
def to_svg(self, width, height, scale, orient):
"""
Render this axis to SVG elements.
"""
group = ET.Element("g")
group.set("class", "axis " + orient)
# Axis title
if self._name is not None:
if orient == "left":
title_x = -(
self._estimate_left_tick_width(scale) + theme.axis_title_gap
)
title_y = height / 2
dy = ""
transform = svg.rotate(270, title_x, title_y)
elif orient == "bottom":
title_x = width / 2
title_y = (
height
+ theme.tick_font_char_height
+ (theme.tick_size * 2)
+ theme.axis_title_gap
)
dy = "1em"
transform = ""
title = ET.Element(
"text",
x=six.text_type(title_x),
y=six.text_type(title_y),
dy=dy,
fill=theme.axis_title_color,
transform=transform,
)
title.set("text-anchor", "middle")
title.set("font-family", theme.axis_title_font_family)
title.text = self._name
group.append(title)
# Ticks
if orient == "left":
label_x = -(theme.tick_size * 2)
x1 = -theme.tick_size
x2 = width
range_min = height
range_max = 0
elif orient == "bottom":
label_y = height + (theme.tick_size * 2)
y1 = 0
y2 = height + theme.tick_size
range_min = 0
range_max = width
tick_values = self._ticks or scale.ticks()
tick_count = len(tick_values)
tick_formatter = self._tick_formatter or scale.format_tick
zero_tick_group = None
for i, value in enumerate(tick_values):
# Tick group
tick_group = ET.Element("g")
tick_group.set("class", "tick")
if value == 0:
zero_tick_group = tick_group
else:
group.append(tick_group)
# Tick line
projected_value = scale.project(value, range_min, range_max)
if value == 0:
tick_color = theme.zero_color
else:
tick_color = theme.tick_color
if orient == "left":
y1 = projected_value
y2 = projected_value
elif orient == "bottom":
x1 = projected_value
x2 = projected_value
tick = ET.Element(
"line",
x1=six.text_type(x1),
y1=six.text_type(y1),
x2=six.text_type(x2),
y2=six.text_type(y2),
stroke=tick_color,
)
tick.set("stroke-width", six.text_type(theme.tick_width))
tick_group.append(tick)
# Tick label
if orient == "left":
x = label_x
y = projected_value
dy = "0.32em"
text_anchor = "end"
elif orient == "bottom":
x = projected_value
y = label_y
dy = "1em"
text_anchor = "middle"
label = ET.Element(
"text",
x=six.text_type(x),
y=six.text_type(y),
dy=dy,
fill=theme.label_color,
)
label.set("text-anchor", text_anchor)
label.set("font-family", theme.tick_font_family)
value = tick_formatter(value, i, tick_count)
label.text = six.text_type(value)
tick_group.append(label)
if zero_tick_group is not None:
group.append(zero_tick_group)
return group
def tick_format_function(value, index, tick_count):
"""
This example shows how to define a function to format tick values for
display.
:param x:
The value to be formatted.
:param index:
The index of the tick.
:param tick_count:
The total number of ticks being displayed.
:returns:
A stringified tick value for display.
"""
return six.text_type(value)
|
python
|
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class GaussianMixture(nn.Module):
def __init__(self, n_mix, d_inp, learn_var=True, share_prior=False):
super(GaussianMixture, self).__init__()
"""
The current implementation is super simplified, treating each dim
of the target as a one-dimensional Gaussian mixture with separate
mixture weights if `share_prior == False` (default).
When `share_prior == True`, all `d_tgt` target dims share the same
mixture weights, which poses some inductive bias.
However, neither is the optimal case, as corellations between the
target dims is essentially ignored.
Input:
inp : [... x d_inp]
target : [... x d_tgt]
Return:
nll : [... x d_tgt]
"""
self.n_mix = n_mix
self.d_tgt = d_tgt = 1
self.d_inp = d_inp
self.learn_var = learn_var
self.share_prior = share_prior
self.mean = nn.Linear(d_inp, d_tgt * n_mix)
if learn_var:
self.var = nn.Linear(d_inp, d_tgt * n_mix, bias=False)
if n_mix > 1:
if share_prior:
self.prior = nn.Linear(d_inp, n_mix)
else:
self.prior = nn.Linear(d_inp, d_tgt * n_mix)
else:
assert n_mix == 1, '`n_mix` must be positive integers'
self.const = -0.5 * math.log(2 * math.pi)
def log_likelihood(self, target, mean, log_std, log_prior=None):
"""
target : [... x d_tgt]
mean : [... x d_tgt x n_mix]
log_std : [... x d_tgt x n_mix]
log_prior : [... x d_tgt x n_mix] or None
"""
# Gaussian log-likelihood is not safe for half precision due to the
# `log_std.exp()` operation, especially in the backward pass.
# For simplicity, we use float32 for log-likelihood computation.
tgt_ = target.unsqueeze(-1).float()
mean = mean.float()
# [... x d_tgt x n_mix]
log_probs = self.const - log_std \
- 0.5 * (((tgt_ - mean) / log_std.exp()) ** 2)
if log_prior is None: # n_mix = 1
log_prob = log_probs.squeeze(-1)
else:
log_prior = log_prior.float()
# [... x d_tgt x dim]
w_log_probs = log_prior + log_probs
# [... x d_tgt x 1]
max_w_log_prob = w_log_probs.max(-1, keepdim=True)[0]
# [... x d_tgt]
log_prob = torch.logsumexp(w_log_probs - max_w_log_prob, dim=-1) \
+ max_w_log_prob.squeeze(-1)
return log_prob
def forward(self, inp, target, return_mean=False):
mean = self.mean(inp)
mean = mean.view(*mean.size()[:-1], self.d_tgt, self.n_mix)
if self.learn_var:
log_std = self.var(inp)
log_std = log_std.view(*log_std.size()[:-1], self.d_tgt, self.n_mix)
else:
log_std = torch.zeros(1, dtype=inp.dtype, device=inp.device)
if self.n_mix > 1:
prior = self.prior(inp)
if self.share_prior:
prior = prior.view(*prior.size()[:-1], 1, self.n_mix)
else:
prior = prior.view(*prior.size()[:-1], self.d_tgt, self.n_mix)
log_prior = F.log_softmax(prior, dim=-1)
else:
log_prior = None
log_prob = self.log_likelihood(target, mean, log_std, log_prior)
nll = - log_prob
if return_mean:
return nll, mean
else:
return nll
|
python
|
#!/usr/bin/env python
from iris_sdk.models.base_resource import BaseData
from iris_sdk.models.data.rate_centers_list import RateCentersList
from iris_sdk.models.maps.rate_centers import RateCentersMap
class RateCentersData(RateCentersMap, BaseData):
@property
def total_count(self):
return self.result_count
@total_count.setter
def total_count(self, total_count):
self.result_count = total_count
def __init__(self):
self.rate_centers = RateCentersList()
|
python
|
from rest_framework import permissions
from rest_framework.generics import CreateAPIView
from django.contrib.auth.models import User
from rest_framework import viewsets
from usuarios.serializers import UserSerializer
class UserViewSet(viewsets.ModelViewSet):
serializer_class = UserSerializer
def get_queryset(self):
queryset = User.objects.filter(username=self.request.user)
return queryset
class CreateUserView(CreateAPIView):
model = User
permission_classes = [
permissions.AllowAny
]
serializer_class = UserSerializer
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lpp_test', '0003_auto_20191024_1701'),
]
operations = [
migrations.AddField(
model_name='uhost',
name='name',
field=models.CharField(default=b'', max_length=128, verbose_name='\u4e3b\u673a\u540d\u79f0'),
),
]
|
python
|
import pglet
from pglet import Icon
def test_icon_add():
c = Icon(name="Mail", color="#FF7F50", size="tiny")
assert isinstance(c, pglet.Control)
assert isinstance(c, pglet.Icon)
# raise Exception(s.get_cmd_str())
assert c.get_cmd_str() == (
'icon color="#FF7F50" name="Mail" size="tiny"'
), "Test failed"
|
python
|
# CyberHeist Lab - Beginner Level
# Good luck :D
import hashlib
import binascii
import colorama
import cowsay
USERNAME = "Grubsy"
PASSWORD = "4aa765fdbe4bf83f7a51a1af53b170ad9e2aab35a9b9f0b066fd069952cffe44"
# PASSWORD HINT: Tristan really likes noodles
# In order he likes:
# 1) udonnoodles (not the password)
# 2) ********noodles (probably the password)
# 3) ramannoodles (probably not the password)
# ASK HIM!
# This has nothing to do with completing this challenge :P
# The only purpose of this list is for text output.
synonymsForBankButNotTheMoneyKindTheLandmassKindBecauseImHilariousAndLoveLongVariableNamesThatAreSuperDescriptive = [
"edge", "side", "embankment", "levee", "border", "verge", "boundary", "margin", "rim", "fringe", "fringes", "flank", "brink",
"perimeter", "circumference", "extremity", "periphery", "limit", "outer" "limit", "limits", "bound", "bounds", "literarymarge",
"bourn", "skirt"
]
def handleSelection(selection):
# If the user selected "View our products"
if selection == 1:
print(colorama.Style.RESET_ALL)
print("Wowee look at all these cool product names :D")
synonymList = ""
for synonym in synonymsForBankButNotTheMoneyKindTheLandmassKindBecauseImHilariousAndLoveLongVariableNamesThatAreSuperDescriptive:
synonymList += synonym + "\n"
cowsay.turtle(synonymList)
# If the user selected "Talk to a professional banker"
elif selection == 2:
print(colorama.Style.RESET_ALL)
cowsay.turtle("There's no such thing as a professional banker, hehe. :P")
# If the user selected "Secret Login Portal (FOR ADMINS ONLY)"
# This might be a place of interest...
#############################################################################
# !!!!!!!!!!!!!!!!!!!!!!!! SUPER SECURE CODE !!!!!!!!!!!!!!!!!!!!!!!!!!!!!! #
#############################################################################
elif selection == 3:
print(colorama.Style.RESET_ALL)
print(colorama.Back.BLUE + colorama.Fore.WHITE)
try:
username = input("Username: ")
password = input("Password: ")
hashedPassword = hashlib.sha256(password.encode()).hexdigest()
except ValueError:
print("WRONG")
print("Stop trying to rek my code ;__;")
else:
# NOTE: For future developer reference, USERNAME and PASSWORD are
# displayed at the top of this file. :D
print(colorama.Style.RESET_ALL)
if username != USERNAME:
print("WRONG USERNAME .____.")
if hashedPassword != PASSWORD:
print("WRONG PASSWORD ;__;")
if password == PASSWORD:
cowsay.turtle("You copy and pasted the PASSWORD variable as the password you silly goose :P" + "\nThat variable is a SHA256 hash of the REAL password so it wont work :/ sry")
if password == "ramannoodles":
print("I told you it PROBABLY wasnt raman noodles but you tried it anyway, ya bum.")
if password == "udonnoodles":
cowsay.turtle("I love these noodles, their THIQness makes them superior to all other noodles.")
if username == USERNAME and hashedPassword == PASSWORD:
cowsay.dragon("CHALLENGE 1 COMPLETE \n Remember that username and password ;)")
exit(1)
#############################################################################
#############################################################################
#############################################################################
def run():
while True:
print(colorama.Style.RESET_ALL)
print(colorama.Back.YELLOW + colorama.Fore.RED)
print("MAIN MENU")
print("1) View our products")
print("2) Talk to a professional banker")
print("3) Secret Login Portal (FOR ADMINS ONLY)")
print("4) Quit" + colorama.Style.RESET_ALL)
print(colorama.Back.RED + colorama.Fore.WHITE)
try:
selection = int(input("Enter your choice: "))
except ValueError:
print(colorama.Style.RESET_ALL)
cowsay.turtle("Plz be nice to my program ;__;")
continue
else:
if selection not in set([1,2,3,4]):
print(colorama.Style.RESET_ALL)
cowsay.turtle(";__; computer sad, why u give invalid input? ;__;")
continue
if selection == 4:
exit(1)
handleSelection(selection)
if __name__ == "__main__":
colorama.init()
print(colorama.Back.WHITE + '\033[31m' + 'Welcome to Grubsy Banks Inc. - Your embankment needs are our business' + '\033[0m')
print(colorama.Back.WHITE + '\033[31m' + "Banker - Someone who works on banks, not the money ones, the geographical ones :)" + '\033[0m')
run()
|
python
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import click
from ..data import EventList
from ..maps import Map
from ..cube import fill_map_counts
log = logging.getLogger(__name__)
@click.command("bin")
@click.argument("event_file", type=str)
@click.argument("reference_file", type=str)
@click.argument("out_file", type=str)
@click.option("--overwrite", is_flag=True, help="Overwrite existing files?")
def cli_image_bin(event_file, reference_file, out_file, overwrite):
"""Bin events into an image.
You have to give the event, reference and out FITS filename.
"""
log.info("Executing cli_image_bin")
log.info("Reading {}".format(event_file))
events = EventList.read(event_file)
log.info("Reading {}".format(reference_file))
m_ref = Map.read(reference_file)
counts_map = Map.from_geom(m_ref.geom)
fill_map_counts(counts_map, events)
log.info("Writing {}".format(out_file))
counts_map.write(out_file, overwrite=overwrite)
|
python
|
import json
from threading import Lock
from requests.exceptions import HTTPError
from py42.exceptions import Py42ChecksumNotFoundError
from py42.exceptions import Py42Error
from py42.exceptions import Py42HTTPError
from py42.exceptions import Py42SecurityPlanConnectionError
from py42.exceptions import raise_py42_error
from py42.sdk.queries.fileevents.file_event_query import FileEventQuery
from py42.sdk.queries.fileevents.filters.file_filter import MD5
from py42.sdk.queries.fileevents.filters.file_filter import SHA256
from py42.settings import debug
class SecurityModule(object):
def __init__(
self, security_client, storage_client_factory, microservices_client_factory
):
self._security_client = security_client
self._storage_client_factory = storage_client_factory
self._microservices_client_factory = microservices_client_factory
self._client_cache = {}
self._client_cache_lock = Lock()
@property
def savedsearches(self):
"""A collection of methods related to retrieving forensic search data.
Returns:
:class: `py42._internal.clients.securitydata.SavedSearchClient`
"""
return self._microservices_client_factory.get_saved_search_client()
def get_security_plan_storage_info_list(self, user_uid):
"""Gets IDs (plan UID, node GUID, and destination GUID) for the storage nodes containing
the file activity event data for the user with the given UID.
`REST Documentation <https://console.us.code42.com/swagger/#/Feature/getStorageNode>`__
Args:
user_uid (str): The UID of the user to get plan storage information for.
Returns:
list[:class:`py42.modules.securitydata.PlanStorageInfo`]
"""
locations = None
try:
response = self._security_client.get_security_event_locations(user_uid)
locations = response[u"securityPlanLocationsByDestination"]
except HTTPError as err:
if err.response.status_code == 404:
pass
else:
raise_py42_error(err)
if locations:
plan_destination_map = _get_plan_destination_map(locations)
selected_plan_infos = self._get_plan_storage_infos(plan_destination_map)
if not selected_plan_infos:
raise Py42SecurityPlanConnectionError(
u"Could not establish a connection to retrieve "
u"security events for user {}".format(user_uid)
)
return selected_plan_infos
def get_all_plan_security_events(
self,
plan_storage_info,
cursor=None,
include_files=True,
event_types=None,
min_timestamp=None,
max_timestamp=None,
):
"""Gets events for legacy Endpoint Monitoring file activity on removable media, in cloud
sync folders, and browser uploads.
`Support Article <https://support.code42.com/Administrator/6/Configuring/Endpoint_monitoring>`__
Args:
plan_storage_info (:class:`py42.sdk.modules.securitydata.PlanStorageInfo`):
Information about storage nodes for a plan to get file event activity for.
cursor (str, optional): A cursor position for only getting file events you did not
previously get. Defaults to None.
include_files (bool, optional): Whether to include the files related to the file events.
Defaults to None.
event_types: (str, optional): A comma-separated list of event types to filter by.
Available options are:
- ``DEVICE_APPEARED``
- ``DEVICE_DISAPPEARED``
- ``DEVICE_FILE_ACTIVITY``
- ``PERSONAL_CLOUD_FILE_ACTIVITY``
- ``RESTORE_JOB``
- ``RESTORE_FILE``
- ``FILE_OPENED``
- ``RULE_MATCH``
- ``DEVICE_SCAN_RESULT``
- ``PERSONAL_CLOUD_SCAN_RESULT``
Defaults to None.
min_timestamp (float, optional): A POSIX timestamp representing the beginning of the
date range of events to get. Defaults to None.
max_timestamp (float, optional): A POSIX timestamp representing the end of the date
range of events to get. Defaults to None.
Returns:
generator: An object that iterates over :class:`py42.response.Py42Response` objects
that each contain a page of events.
"""
return self._get_security_detection_events(
plan_storage_info,
cursor,
include_files,
event_types,
min_timestamp,
max_timestamp,
)
def get_all_user_security_events(
self,
user_uid,
cursor=None,
include_files=True,
event_types=None,
min_timestamp=None,
max_timestamp=None,
):
"""Gets legacy Endpoint Monitoring file activity events for the user with the given UID.
Args:
user_uid (str): The UID of the user to get security events for.
cursor (str, optional): A cursor position for only getting events you did not
previously get. Defaults to None.
include_files (bool, optional): Whether to include the files related to the file
activity events. Defaults to None.
event_types: (str, optional): A comma-separated list of event types to filter by.
Available options are:
- ``DEVICE_APPEARED``
- ``DEVICE_DISAPPEARED``
- ``DEVICE_FILE_ACTIVITY``
- ``PERSONAL_CLOUD_FILE_ACTIVITY``
- ``RESTORE_JOB``
- ``RESTORE_FILE``
- ``FILE_OPENED``
- ``RULE_MATCH``
- ``DEVICE_SCAN_RESULT``
- ``PERSONAL_CLOUD_SCAN_RESULT``
Defaults to None.
min_timestamp (float, optional): A POSIX timestamp representing the beginning of the
date range of events to get. Defaults to None.
max_timestamp (float, optional): A POSIX timestamp representing the end of the date
range of events to get. Defaults to None.
Returns:
generator: An object that iterates over :class:`py42.response.Py42Response` objects
that each contain a page of events.
"""
security_plan_storage_infos = self.get_security_plan_storage_info_list(user_uid)
return self._get_security_detection_events(
security_plan_storage_infos,
cursor,
include_files,
event_types,
min_timestamp,
max_timestamp,
)
def search_file_events(self, query):
"""Searches for file events.
`REST Documentation <https://support.code42.com/Administrator/Cloud/Monitoring_and_managing/Forensic_File_Search_API>`__
Args:
query (:class:`py42.sdk.queries.fileevents.file_event_query.FileEventQuery`): Also
accepts a raw JSON str.
Returns:
:class:`py42.response.Py42Response`: A response containing the first 10,000
events.
"""
file_event_client = self._microservices_client_factory.get_file_event_client()
return file_event_client.search(query)
def _search_by_hash(self, hash, type):
query = FileEventQuery.all(type.eq(hash))
response = self.search_file_events(query)
return response[u"fileEvents"]
def _find_file_versions(self, md5_hash, sha256_hash):
file_event_client = self._microservices_client_factory.get_file_event_client()
pds_client = (
self._microservices_client_factory.get_preservation_data_service_client()
)
response = file_event_client.get_file_location_detail_by_sha256(sha256_hash)
if u"locations" not in response and not len(response[u"locations"]):
raise Py42Error(
u"PDS service can't find requested file "
u"with md5 hash {} and sha256 hash {}.".format(md5_hash, sha256_hash)
)
for device_id, paths in _parse_file_location_response(response):
try:
yield pds_client.find_file_versions(
md5_hash, sha256_hash, device_id, paths
)
except Py42HTTPError as err:
# API searches multiple paths to find the file to be streamed, as returned by
# 'get_file_location_detail_by_sha256', hence we keep looking until we find a stream
# to return
debug.logger.warning(
u"Failed to find any file version for md5 hash {} / sha256 hash {}. "
u"Error: ".format(md5_hash, sha256_hash),
err,
)
def _stream_file(self, file_generator, checksum):
for response in file_generator:
if response.status_code == 204:
continue
try:
storage_node_client = self._microservices_client_factory.create_storage_preservation_client(
response[u"storageNodeURL"]
)
token = storage_node_client.get_download_token(
response[u"archiveGuid"],
response[u"fileId"],
response[u"versionTimestamp"],
)
return storage_node_client.get_file(str(token))
except Py42HTTPError:
# API searches multiple paths to find the file to be streamed, as returned by
# 'get_file_location_detail_by_sha256', hence we keep looking until we find a stream
# to return
debug.logger.warning(
u"Failed to stream file with hash {}, info: {}.".format(
checksum, response.text
)
)
raise Py42Error(
u"No file with hash {} available for download on any storage node.".format(
checksum
)
)
def stream_file_by_sha256(self, checksum):
"""Stream file based on SHA256 checksum.
Args:
checksum (str): SHA256 hash of the file.
Returns:
Returns a stream of the requested file.
"""
events = self._search_by_hash(checksum, SHA256)
if not len(events):
raise Py42ChecksumNotFoundError(u"SHA256", checksum)
md5_hash = events[0][u"md5Checksum"]
return self._stream_file(self._find_file_versions(md5_hash, checksum), checksum)
def stream_file_by_md5(self, checksum):
"""Stream file based on MD5 checksum.
Args:
checksum (str): MD5 hash of the file.
Returns:
Returns a stream of the requested file.
"""
events = self._search_by_hash(checksum, MD5)
if not len(events):
raise Py42ChecksumNotFoundError(u"MD5", checksum)
sha256_hash = events[0][u"sha256Checksum"]
return self._stream_file(
self._find_file_versions(checksum, sha256_hash), checksum
)
def _get_plan_storage_infos(self, plan_destination_map):
plan_infos = []
for plan_uid in plan_destination_map:
destinations = plan_destination_map[plan_uid]
storage_info = self._get_storage_info_for_plan(plan_uid, destinations)
if storage_info:
plan_infos.append(storage_info)
return plan_infos
def _get_storage_info_for_plan(self, plan_uid, destinations):
for destination in destinations:
# try to connect to every storage node for this plan until one works
plan_storage_info = self._get_storage_info_for_plan_destination(
plan_uid, destination
)
if plan_storage_info:
return plan_storage_info
def _get_storage_info_for_plan_destination(self, plan_uid, destination):
try:
destination_guid = destination[u"destinationGuid"]
node_guid = destination[u"nodeGuid"]
plan_storage_info = PlanStorageInfo(plan_uid, destination_guid, node_guid)
self._try_get_security_detection_event_client(plan_storage_info)
return plan_storage_info
except HTTPError:
# This function is called in a loop until we get a result that is not None.
# If all return None, then the calling function raises Py42SecurityPlanConnectionError.
pass
def _try_get_security_detection_event_client(self, plan_storage_info):
# check if we have already created and stored this client
client = self._client_cache.get(plan_storage_info.node_guid)
# otherwise, create it
if client is None:
client = self._storage_client_factory.from_plan_info(
plan_storage_info.plan_uid, plan_storage_info.destination_guid
).securitydata
# store this client via its guid so that we don't have to call StorageAuthToken
# just to determine what storage client to use
with self._client_cache_lock:
self._client_cache.update({plan_storage_info.node_guid: client})
return client
def _get_security_detection_events(
self,
plan_storage_infos,
cursor,
include_files,
event_types,
min_timestamp,
max_timestamp,
):
if not isinstance(plan_storage_infos, (list, tuple)):
plan_storage_infos = [plan_storage_infos]
# get the storage node client for each plan
for plan_storage_info in plan_storage_infos:
client = self._try_get_security_detection_event_client(plan_storage_info)
started = False
# get all pages of events for this plan
while cursor or not started:
started = True
response = client.get_plan_security_events(
plan_storage_info.plan_uid,
cursor=cursor,
include_files=include_files,
event_types=event_types,
min_timestamp=min_timestamp,
max_timestamp=max_timestamp,
)
if response.text:
# we use json.loads here because the cursor prop doesn't appear
# on responses that have no results
cursor = json.loads(response.text).get(u"cursor")
# if there are no results, we don't get a cursor and have reached the end
if cursor:
yield response, cursor
def _get_plan_destination_map(locations_list):
plan_destination_map = {}
for plans in _get_destinations_in_locations_list(locations_list):
for plan_uid in plans:
plan_destination_map[plan_uid] = plans[plan_uid]
return plan_destination_map
def _get_destinations_in_locations_list(locations_list):
for destination in locations_list:
for node in destination[u"securityPlanLocationsByNode"]:
yield _get_plans_in_node(destination, node)
def _get_plans_in_node(destination, node):
return {
plan_uid: [
{
u"destinationGuid": destination[u"destinationGuid"],
u"nodeGuid": node[u"nodeGuid"],
}
]
for plan_uid in node[u"securityPlanUids"]
}
def _parse_file_location_response(response):
for location in response[u"locations"]:
paths = []
file_name = location[u"fileName"]
device_id = location[u"deviceUid"]
paths.append(u"{}{}".format(location[u"filePath"], file_name))
yield device_id, paths
class PlanStorageInfo(object):
def __init__(self, plan_uid, destination_guid, node_guid):
self._plan_uid = plan_uid
self._destination_uid = destination_guid
self._node_guid = node_guid
@property
def plan_uid(self):
"""The UID of the storage plan."""
return self._plan_uid
@property
def destination_guid(self):
"""The GUID of the destination containing the storage archive."""
return self._destination_uid
@property
def node_guid(self):
"""The GUID of the storage node containing the archive."""
return self._node_guid
|
python
|
from trame import state
from trame.html import vuetify, Element, simput
from ..engine.simput import KeyDatabase
def update_cycle_list(*args, **kwargs):
pxm = KeyDatabase().pxm
cycleIds = []
subCycleIds = {}
for cycle in pxm.get_instances_of_type("Cycle"):
cycleIds.append(cycle.id)
subCycleIds[cycle.id] = []
for subCycleId in cycle.own:
subCycleIds[cycle.id].append(subCycleId)
state.cycleIds = cycleIds
state.subCycleIds = subCycleIds
def create_cycle(proxy_type, owner_id=None, **kwargs):
pxm = KeyDatabase().pxm
proxy = pxm.create(proxy_type, **kwargs)
if owner_id is not None:
owner = pxm.get(owner_id)
owner._own.add(proxy.id)
update_cycle_list()
return proxy
def delete_cycle(id, proxy_type, owner_id=None):
pxm = KeyDatabase().pxm
if owner_id is not None:
owner = pxm.get(owner_id)
owner._own.remove(id)
pxm.delete(id)
update_cycle_list()
def initialize():
state.update({
"cycleIds": [],
"subCycleIds": {},
})
cycle = create_cycle("Cycle", Name="constant", repeat=-1)
create_cycle("SubCycle", cycle.id, Name="alltime", Length=1)
cycle = create_cycle("Cycle", Name="rainrec", repeat=-1)
create_cycle("SubCycle", cycle.id, Name="rain")
create_cycle("SubCycle", cycle.id, Name="rec")
def create_ui():
Element("H1", "Timing")
simput.SimputItem(itemId=("timingId",))
Element("H1", "Cycles")
with vuetify.VContainer(v_for=("(cycleId, index) in cycleIds",), fluid=True):
with vuetify.VContainer(style="display: flex;", fluid=True):
simput.SimputItem(itemId=("cycleId",), style="flex-grow: 1;")
with vuetify.VBtn(click=(delete_cycle, "[cycleId, 'Cycle']"), small=True, icon=True):
vuetify.VIcon('mdi-delete')
with vuetify.VContainer(fluid=True, style="padding: 2rem;"):
with vuetify.VContainer(v_for=("(subId, subI) in subCycleIds[cycleId]",), fluid=True, style="display: flex;"):
simput.SimputItem(itemId=("subId",), style="flex-grow: 1;")
with vuetify.VBtn(click=(delete_cycle, "[subId, 'SubCycle', cycleId]"), small=True, icon=True):
vuetify.VIcon('mdi-delete')
with vuetify.VBtn(click=(create_cycle, "['SubCycle', cycleId]")):
vuetify.VIcon('mdi-plus')
Element("span", "Add Sub Cycle")
with vuetify.VBtn(click=(create_cycle, "['Cycle']")):
vuetify.VIcon('mdi-plus')
Element("span", "Add Cycle")
|
python
|
from typing import List
def pascal(N: int) -> List[int]:
"""
Return the Nth row of Pascal triangle
"""
# you code ...
if N == 0:
return []
triangle_rows = []
for i in range(1, N+1):
add_row = [None]*i
add_row[0] = 1
add_row[-1] = 1
if i == 3:
add_row[1] = triangle_rows[1][0] + triangle_rows[1][1]
if i >= 4:
for j in range(1,i-1):
add_row[j] = triangle_rows[i-2][j-1] + triangle_rows[i-2][j]
triangle_rows.append(add_row)
return triangle_rows[N-1]
|
python
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid.core as core
import paddle
import numpy as np
from paddle.fluid.framework import _test_eager_guard, EagerParamBase, _in_eager_mode
from paddle.fluid.data_feeder import convert_dtype
import unittest
import copy
import paddle.compat as cpt
class EagerScaleTestCase(unittest.TestCase):
def test_scale_base(self):
with _test_eager_guard():
paddle.set_device("cpu")
arr = np.ones([4, 16, 16, 32]).astype('float32')
tensor = paddle.to_tensor(arr, 'float32', core.CPUPlace())
print(tensor)
tensor = core.eager.scale(tensor, 2.0, 0.9, True, False)
for i in range(0, 100):
tensor = core.eager.scale(tensor, 2.0, 0.9, True, False)
print(tensor)
self.assertEqual(tensor.shape, [4, 16, 16, 32])
self.assertEqual(tensor.stop_gradient, True)
def test_retain_grad_and_run_backward(self):
with _test_eager_guard():
paddle.set_device("cpu")
input_data = np.ones([4, 16, 16, 32]).astype('float32')
data_eager = paddle.to_tensor(input_data, 'float32',
core.CPUPlace(), False)
grad_data = np.ones([4, 16, 16, 32]).astype('float32')
grad_eager = paddle.to_tensor(grad_data, 'float32', core.CPUPlace())
data_eager.retain_grads()
out_eager = core.eager.scale(data_eager, 1.0, 0.9, True, True)
self.assertIsNone(data_eager.grad)
out_eager.backward(grad_eager, False)
self.assertIsNotNone(data_eager.grad)
self.assertTrue(np.array_equal(data_eager.grad.numpy(), input_data))
def test_retain_grad_and_run_backward_raises(self):
with _test_eager_guard():
paddle.set_device("cpu")
input_data = np.ones([4, 16, 16, 32]).astype('float32')
data_eager = paddle.to_tensor(input_data, 'float32',
core.CPUPlace(), False)
grad_data = np.ones([4, 16, 16, 32]).astype('float32')
grad_data2 = np.ones([4, 16]).astype('float32')
grad_eager = paddle.to_tensor(grad_data, 'float32', core.CPUPlace())
grad_eager2 = paddle.to_tensor(grad_data2, 'float32',
core.CPUPlace())
data_eager.retain_grads()
out_eager = core.eager.scale(data_eager, 1.0, 0.9, True, True)
self.assertIsNone(data_eager.grad)
with self.assertRaisesRegexp(
AssertionError,
"The type of grad_tensor must be paddle.Tensor"):
out_eager.backward(grad_data, False)
with self.assertRaisesRegexp(
AssertionError,
"Tensor shape not match, Tensor of grad_tensor /*"):
out_eager.backward(grad_eager2, False)
class EagerDtypeTestCase(unittest.TestCase):
def check_to_tesnsor_and_numpy(self, dtype, proto_dtype):
with _test_eager_guard():
arr = np.random.random([4, 16, 16, 32]).astype(dtype)
tensor = paddle.to_tensor(arr, dtype)
self.assertEqual(tensor.dtype, proto_dtype)
self.assertTrue(np.array_equal(arr, tensor.numpy()))
def test_dtype_base(self):
print("Test_dtype")
self.check_to_tesnsor_and_numpy('bool', core.VarDesc.VarType.BOOL)
self.check_to_tesnsor_and_numpy('int8', core.VarDesc.VarType.INT8)
self.check_to_tesnsor_and_numpy('uint8', core.VarDesc.VarType.UINT8)
self.check_to_tesnsor_and_numpy('int16', core.VarDesc.VarType.INT16)
self.check_to_tesnsor_and_numpy('int32', core.VarDesc.VarType.INT32)
self.check_to_tesnsor_and_numpy('int64', core.VarDesc.VarType.INT64)
self.check_to_tesnsor_and_numpy('float16', core.VarDesc.VarType.FP16)
self.check_to_tesnsor_and_numpy('float32', core.VarDesc.VarType.FP32)
self.check_to_tesnsor_and_numpy('float64', core.VarDesc.VarType.FP64)
self.check_to_tesnsor_and_numpy('complex64',
core.VarDesc.VarType.COMPLEX64)
self.check_to_tesnsor_and_numpy('complex128',
core.VarDesc.VarType.COMPLEX128)
class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
def constructor(self, place):
egr_tensor = core.eager.Tensor()
self.assertEqual(egr_tensor.persistable, False)
self.assertTrue("generated" in egr_tensor.name)
self.assertEqual(egr_tensor.shape, [])
self.assertEqual(egr_tensor.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor.stop_gradient, True)
egr_tensor0 = core.eager.Tensor(core.VarDesc.VarType.FP32,
[4, 16, 16, 32], "test_eager_tensor",
core.VarDesc.VarType.LOD_TENSOR, True)
self.assertEqual(egr_tensor0.persistable, True)
self.assertEqual(egr_tensor0.name, "test_eager_tensor")
self.assertEqual(egr_tensor0.shape, [4, 16, 16, 32])
self.assertEqual(egr_tensor0.dtype, core.VarDesc.VarType.FP32)
arr0 = np.random.rand(4, 16, 16, 32).astype('float32')
egr_tensor1 = core.eager.Tensor(arr0, place, True, False,
"numpy_tensor1", False)
self.assertEqual(egr_tensor1.persistable, True)
self.assertEqual(egr_tensor1.name, "numpy_tensor1")
self.assertEqual(egr_tensor1.shape, [4, 16, 16, 32])
self.assertEqual(egr_tensor1.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor1.stop_gradient, False)
self.assertTrue(egr_tensor1.place._equals(place))
self.assertTrue(np.array_equal(egr_tensor1.numpy(), arr0))
arr1 = np.random.randint(100, size=(4, 16, 16, 32), dtype=np.int64)
egr_tensor2 = core.eager.Tensor(arr1, place, False, True,
"numpy_tensor2", True)
self.assertEqual(egr_tensor2.persistable, False)
self.assertEqual(egr_tensor2.name, "numpy_tensor2")
self.assertEqual(egr_tensor2.shape, [4, 16, 16, 32])
self.assertEqual(egr_tensor2.dtype, core.VarDesc.VarType.INT64)
self.assertEqual(egr_tensor2.stop_gradient, True)
self.assertTrue(egr_tensor2.place._equals(place))
self.assertTrue(np.array_equal(egr_tensor2.numpy(), arr1))
arr2 = np.random.rand(4, 16, 16, 32, 64).astype('float32')
egr_tensor3 = core.eager.Tensor(arr2)
self.assertEqual(egr_tensor3.persistable, False)
self.assertTrue("generated_tensor" in egr_tensor3.name)
self.assertEqual(egr_tensor3.shape, [4, 16, 16, 32, 64])
self.assertEqual(egr_tensor3.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor3.stop_gradient, True)
self.assertTrue(
egr_tensor3.place._equals(
paddle.fluid.framework._current_expected_place()))
self.assertTrue(np.array_equal(egr_tensor3.numpy(), arr2))
egr_tensor3.stop_gradient = False
egr_tensor4 = core.eager.Tensor(egr_tensor3)
self.assertEqual(egr_tensor4.persistable, False)
self.assertTrue("generated_tensor" in egr_tensor4.name)
self.assertEqual(egr_tensor4.shape, egr_tensor3.shape)
self.assertEqual(egr_tensor4.dtype, egr_tensor3.dtype)
self.assertEqual(egr_tensor4.stop_gradient, True)
self.assertTrue(
egr_tensor4.place._equals(
paddle.fluid.framework._current_expected_place()))
self.assertTrue(
np.array_equal(egr_tensor4.numpy(), egr_tensor3.numpy()))
arr4 = np.random.rand(4, 16, 16, 32).astype('float32')
egr_tensor5 = core.eager.Tensor(arr4, place)
self.assertEqual(egr_tensor5.persistable, False)
self.assertTrue("generated_tensor" in egr_tensor5.name)
self.assertEqual(egr_tensor5.shape, [4, 16, 16, 32])
self.assertEqual(egr_tensor5.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor5.stop_gradient, True)
self.assertTrue(egr_tensor5.place._equals(place))
self.assertTrue(np.array_equal(egr_tensor5.numpy(), arr4))
egr_tensor6 = core.eager.Tensor(egr_tensor5, core.CPUPlace())
self.assertEqual(egr_tensor6.persistable, False)
self.assertTrue("generated_tensor" in egr_tensor6.name)
self.assertEqual(egr_tensor6.shape, [4, 16, 16, 32])
self.assertEqual(egr_tensor6.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor6.stop_gradient, True)
self.assertEqual(egr_tensor6.place.is_cpu_place(), True)
self.assertTrue(
np.array_equal(egr_tensor6.numpy(), egr_tensor5.numpy()))
egr_tensor7 = core.eager.Tensor(arr4, place, True)
self.assertEqual(egr_tensor7.persistable, True)
self.assertTrue("generated_tensor" in egr_tensor7.name)
self.assertEqual(egr_tensor7.shape, [4, 16, 16, 32])
self.assertEqual(egr_tensor7.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor7.stop_gradient, True)
self.assertTrue(egr_tensor7.place._equals(place))
self.assertTrue(np.array_equal(egr_tensor7.numpy(), arr4))
egr_tensor8 = core.eager.Tensor(egr_tensor6, place, "egr_tensor8")
self.assertEqual(egr_tensor8.persistable, False)
self.assertEqual(egr_tensor8.name, "egr_tensor8")
self.assertEqual(egr_tensor8.shape, [4, 16, 16, 32])
self.assertEqual(egr_tensor8.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor8.stop_gradient, True)
self.assertTrue(egr_tensor8.place._equals(place))
self.assertTrue(
np.array_equal(egr_tensor8.numpy(), egr_tensor5.numpy()))
egr_tensor9 = core.eager.Tensor(arr4, place, True, True)
self.assertEqual(egr_tensor9.persistable, True)
self.assertTrue("generated_tensor" in egr_tensor9.name)
self.assertEqual(egr_tensor9.shape, [4, 16, 16, 32])
self.assertEqual(egr_tensor9.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor9.stop_gradient, True)
self.assertTrue(egr_tensor9.place._equals(place))
self.assertTrue(np.array_equal(egr_tensor9.numpy(), arr4))
x = np.random.rand(3, 3).astype('float32')
t = paddle.fluid.Tensor()
t.set(x, paddle.fluid.CPUPlace())
egr_tensor10 = core.eager.Tensor(t, place)
self.assertEqual(egr_tensor10.persistable, False)
self.assertTrue("generated_tensor" in egr_tensor10.name)
self.assertEqual(egr_tensor10.shape, [3, 3])
self.assertEqual(egr_tensor10.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor10.stop_gradient, True)
self.assertTrue(egr_tensor10.place._equals(place))
self.assertTrue(np.array_equal(egr_tensor10.numpy(), x))
egr_tensor11 = core.eager.Tensor(t, place, "framework_constructed")
self.assertEqual(egr_tensor11.persistable, False)
self.assertTrue("framework_constructed" in egr_tensor11.name)
self.assertEqual(egr_tensor11.shape, [3, 3])
self.assertEqual(egr_tensor11.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor11.stop_gradient, True)
self.assertTrue(egr_tensor11.place._equals(place))
self.assertTrue(np.array_equal(egr_tensor11.numpy(), x))
egr_tensor12 = core.eager.Tensor(t)
self.assertEqual(egr_tensor12.persistable, False)
self.assertTrue("generated_tensor" in egr_tensor12.name)
self.assertEqual(egr_tensor12.shape, [3, 3])
self.assertEqual(egr_tensor12.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor12.stop_gradient, True)
self.assertTrue(egr_tensor12.place._equals(paddle.fluid.CPUPlace()))
self.assertTrue(np.array_equal(egr_tensor12.numpy(), x))
egr_tensor13 = paddle.randn([2, 2])
self.assertTrue("eager_tmp" in egr_tensor13.name)
with self.assertRaisesRegexp(
ValueError, "The shape of Parameter should not be None"):
eager_param = EagerParamBase(shape=None, dtype="float32")
with self.assertRaisesRegexp(
ValueError, "The dtype of Parameter should not be None"):
eager_param = EagerParamBase(shape=[1, 1], dtype=None)
with self.assertRaisesRegexp(
ValueError,
"The dimensions of shape for Parameter must be greater than 0"):
eager_param = EagerParamBase(shape=[], dtype="float32")
with self.assertRaisesRegexp(
ValueError,
"Each dimension of shape for Parameter must be greater than 0, but received /*"
):
eager_param = EagerParamBase(shape=[-1], dtype="float32")
eager_param = EagerParamBase(shape=[1, 1], dtype="float32")
self.assertTrue(eager_param.trainable)
eager_param.trainable = False
self.assertFalse(eager_param.trainable)
with self.assertRaisesRegexp(
ValueError,
"The type of trainable MUST be bool, but the type is /*"):
eager_param.trainable = "False"
def test_constructor(self):
print("Test_constructor")
paddle.set_device("cpu")
place_list = [core.CPUPlace()]
if core.is_compiled_with_cuda():
place_list.append(core.CUDAPlace(0))
with _test_eager_guard():
for p in place_list:
self.constructor(p)
def constructor_with_kwargs(self, place):
# init Tensor by Python array
arr = np.random.rand(4, 16, 16, 32).astype('float32')
egr_tensor0 = core.eager.Tensor(value=arr)
self.assertEqual(egr_tensor0.persistable, False)
self.assertTrue("generated" in egr_tensor0.name)
self.assertEqual(egr_tensor0.shape, [4, 16, 16, 32])
self.assertTrue(
egr_tensor0.place._equals(
paddle.fluid.framework._current_expected_place()))
self.assertEqual(egr_tensor0.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor0.stop_gradient, True)
egr_tensor1 = core.eager.Tensor(value=arr, place=place)
self.assertEqual(egr_tensor1.persistable, False)
self.assertTrue("generated" in egr_tensor1.name)
self.assertEqual(egr_tensor1.shape, [4, 16, 16, 32])
self.assertTrue(egr_tensor1.place._equals(place))
self.assertEqual(egr_tensor1.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor1.stop_gradient, True)
egr_tensor2 = core.eager.Tensor(arr, place=place)
self.assertEqual(egr_tensor2.persistable, False)
self.assertTrue("generated" in egr_tensor2.name)
self.assertEqual(egr_tensor2.shape, [4, 16, 16, 32])
self.assertTrue(egr_tensor2.place._equals(place))
self.assertEqual(egr_tensor2.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor2.stop_gradient, True)
egr_tensor3 = core.eager.Tensor(
arr, place=place, name="new_eager_tensor")
self.assertEqual(egr_tensor3.persistable, False)
self.assertTrue("new_eager_tensor" in egr_tensor3.name)
self.assertEqual(egr_tensor3.shape, [4, 16, 16, 32])
self.assertTrue(egr_tensor3.place._equals(place))
self.assertEqual(egr_tensor3.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor3.stop_gradient, True)
egr_tensor4 = core.eager.Tensor(
arr, place=place, persistable=True, name="new_eager_tensor")
self.assertEqual(egr_tensor4.persistable, True)
self.assertTrue("new_eager_tensor" in egr_tensor4.name)
self.assertEqual(egr_tensor4.shape, [4, 16, 16, 32])
self.assertTrue(egr_tensor4.place._equals(place))
self.assertEqual(egr_tensor4.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor4.stop_gradient, True)
egr_tensor5 = core.eager.Tensor(
arr,
core.CPUPlace(),
persistable=True,
name="new_eager_tensor",
zero_copy=True)
self.assertEqual(egr_tensor5.persistable, True)
self.assertTrue("new_eager_tensor" in egr_tensor5.name)
self.assertEqual(egr_tensor5.shape, [4, 16, 16, 32])
self.assertTrue(egr_tensor5.place.is_cpu_place())
self.assertEqual(egr_tensor5.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor5.stop_gradient, True)
egr_tensor6 = core.eager.Tensor(
arr,
place=core.CPUPlace(),
persistable=True,
name="new_eager_tensor",
zero_copy=True)
self.assertEqual(egr_tensor6.persistable, True)
self.assertTrue("new_eager_tensor" in egr_tensor6.name)
self.assertEqual(egr_tensor6.shape, [4, 16, 16, 32])
self.assertTrue(egr_tensor6.place.is_cpu_place())
self.assertEqual(egr_tensor6.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor6.stop_gradient, True)
egr_tensor7 = core.eager.Tensor(
arr,
place=place,
persistable=True,
name="new_eager_tensor",
zero_copy=True)
self.assertEqual(egr_tensor7.persistable, True)
self.assertTrue("new_eager_tensor" in egr_tensor7.name)
self.assertEqual(egr_tensor7.shape, [4, 16, 16, 32])
self.assertTrue(egr_tensor7.place._equals(place))
self.assertEqual(egr_tensor7.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor7.stop_gradient, True)
egr_tensor8 = core.eager.Tensor(
arr,
place=place,
persistable=True,
name="new_eager_tensor",
zero_copy=True,
stop_gradient=False)
self.assertEqual(egr_tensor8.persistable, True)
self.assertTrue("new_eager_tensor" in egr_tensor8.name)
self.assertEqual(egr_tensor8.shape, [4, 16, 16, 32])
self.assertTrue(egr_tensor8.place._equals(place))
self.assertEqual(egr_tensor8.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor8.stop_gradient, False)
egr_tensor9 = core.eager.Tensor(
arr, place, True, True, "new_eager_tensor", stop_gradient=False)
self.assertEqual(egr_tensor9.persistable, True)
self.assertTrue("new_eager_tensor" in egr_tensor9.name)
self.assertEqual(egr_tensor9.shape, [4, 16, 16, 32])
self.assertTrue(egr_tensor9.place._equals(place))
self.assertEqual(egr_tensor9.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor9.stop_gradient, False)
egr_tensor10 = core.eager.Tensor(
arr,
place,
True,
True,
name="new_eager_tensor",
stop_gradient=False)
self.assertEqual(egr_tensor10.persistable, True)
self.assertTrue("new_eager_tensor" in egr_tensor10.name)
self.assertEqual(egr_tensor10.shape, [4, 16, 16, 32])
self.assertTrue(egr_tensor10.place._equals(place))
self.assertEqual(egr_tensor10.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor10.stop_gradient, False)
egr_tensor11 = core.eager.Tensor(
arr,
place,
True,
zero_copy=True,
name="new_eager_tensor",
stop_gradient=False)
self.assertEqual(egr_tensor11.persistable, True)
self.assertTrue("new_eager_tensor" in egr_tensor11.name)
self.assertEqual(egr_tensor11.shape, [4, 16, 16, 32])
self.assertTrue(egr_tensor11.place._equals(place))
self.assertEqual(egr_tensor11.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor11.stop_gradient, False)
egr_tensor12 = core.eager.Tensor(
arr,
place,
persistable=True,
zero_copy=True,
name="new_eager_tensor",
stop_gradient=False)
self.assertEqual(egr_tensor12.persistable, True)
self.assertTrue("new_eager_tensor" in egr_tensor12.name)
self.assertEqual(egr_tensor12.shape, [4, 16, 16, 32])
self.assertTrue(egr_tensor12.place._equals(place))
self.assertEqual(egr_tensor12.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor12.stop_gradient, False)
egr_tensor13 = core.eager.Tensor(
value=arr,
place=place,
persistable=True,
zero_copy=True,
name="new_eager_tensor",
stop_gradient=False)
self.assertEqual(egr_tensor13.persistable, True)
self.assertTrue("new_eager_tensor" in egr_tensor13.name)
self.assertEqual(egr_tensor13.shape, [4, 16, 16, 32])
self.assertTrue(egr_tensor13.place._equals(place))
self.assertEqual(egr_tensor13.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor13.stop_gradient, False)
# special case
egr_tensor14 = core.eager.Tensor(
dtype=core.VarDesc.VarType.FP32,
dims=[4, 16, 16, 32],
name="special_eager_tensor",
type=core.VarDesc.VarType.LOD_TENSOR,
persistable=True)
self.assertEqual(egr_tensor14.persistable, True)
self.assertEqual(egr_tensor14.name, "special_eager_tensor")
self.assertEqual(egr_tensor14.shape, [4, 16, 16, 32])
self.assertEqual(egr_tensor14.dtype, core.VarDesc.VarType.FP32)
# init Tensor by Tensor
egr_tensor15 = core.eager.Tensor(value=egr_tensor4)
self.assertEqual(egr_tensor15.persistable, True)
self.assertTrue("generated" in egr_tensor15.name)
self.assertEqual(egr_tensor15.shape, egr_tensor4.shape)
self.assertEqual(egr_tensor15.dtype, egr_tensor4.dtype)
self.assertEqual(egr_tensor15.stop_gradient, True)
self.assertTrue(
egr_tensor15.place._equals(
paddle.fluid.framework._current_expected_place()))
self.assertTrue(
np.array_equal(egr_tensor15.numpy(), egr_tensor4.numpy()))
egr_tensor16 = core.eager.Tensor(
value=egr_tensor4, name="new_eager_tensor")
self.assertEqual(egr_tensor16.persistable, True)
self.assertTrue("new_eager_tensor" in egr_tensor16.name)
self.assertEqual(egr_tensor16.shape, egr_tensor4.shape)
self.assertEqual(egr_tensor16.dtype, egr_tensor4.dtype)
self.assertEqual(egr_tensor16.stop_gradient, True)
self.assertTrue(
egr_tensor16.place._equals(
paddle.fluid.framework._current_expected_place()))
self.assertTrue(
np.array_equal(egr_tensor16.numpy(), egr_tensor4.numpy()))
egr_tensor17 = core.eager.Tensor(
value=egr_tensor4,
place=place,
name="new_eager_tensor", )
self.assertEqual(egr_tensor17.persistable, True)
self.assertTrue("new_eager_tensor" in egr_tensor17.name)
self.assertEqual(egr_tensor17.shape, egr_tensor4.shape)
self.assertEqual(egr_tensor17.dtype, egr_tensor4.dtype)
self.assertEqual(egr_tensor17.stop_gradient, True)
self.assertTrue(egr_tensor17.place._equals(place))
self.assertTrue(
np.array_equal(egr_tensor17.numpy(), egr_tensor4.numpy()))
egr_tensor18 = core.eager.Tensor(
egr_tensor4,
place=place,
name="new_eager_tensor", )
self.assertEqual(egr_tensor18.persistable, True)
self.assertTrue("new_eager_tensor" in egr_tensor18.name)
self.assertEqual(egr_tensor18.shape, egr_tensor4.shape)
self.assertEqual(egr_tensor18.dtype, egr_tensor4.dtype)
self.assertEqual(egr_tensor18.stop_gradient, True)
self.assertTrue(egr_tensor18.place._equals(place))
self.assertTrue(
np.array_equal(egr_tensor18.numpy(), egr_tensor4.numpy()))
egr_tensor19 = core.eager.Tensor(
egr_tensor4,
place,
name="new_eager_tensor", )
self.assertEqual(egr_tensor19.persistable, True)
self.assertTrue("new_eager_tensor" in egr_tensor19.name)
self.assertEqual(egr_tensor19.shape, egr_tensor4.shape)
self.assertEqual(egr_tensor19.dtype, egr_tensor4.dtype)
self.assertEqual(egr_tensor19.stop_gradient, True)
self.assertTrue(egr_tensor19.place._equals(place))
self.assertTrue(
np.array_equal(egr_tensor19.numpy(), egr_tensor4.numpy()))
# init eager tensor by framework tensor
x = np.random.rand(3, 3).astype('float32')
t = paddle.fluid.Tensor()
t.set(x, paddle.fluid.CPUPlace())
egr_tensor20 = core.eager.Tensor(value=t)
self.assertEqual(egr_tensor20.persistable, False)
self.assertTrue("generated_tensor" in egr_tensor20.name)
self.assertEqual(egr_tensor20.shape, [3, 3])
self.assertEqual(egr_tensor20.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor20.stop_gradient, True)
self.assertTrue(
egr_tensor20.place._equals(
paddle.fluid.framework._current_expected_place()))
self.assertTrue(np.array_equal(egr_tensor20.numpy(), x))
egr_tensor21 = core.eager.Tensor(value=t, place=place)
self.assertEqual(egr_tensor21.persistable, False)
self.assertTrue("generated_tensor" in egr_tensor21.name)
self.assertEqual(egr_tensor21.shape, [3, 3])
self.assertEqual(egr_tensor21.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor21.stop_gradient, True)
self.assertTrue(egr_tensor21.place._equals(place))
self.assertTrue(np.array_equal(egr_tensor21.numpy(), x))
egr_tensor22 = core.eager.Tensor(t, place=place)
self.assertEqual(egr_tensor22.persistable, False)
self.assertTrue("generated_tensor" in egr_tensor22.name)
self.assertEqual(egr_tensor22.shape, [3, 3])
self.assertEqual(egr_tensor22.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor22.stop_gradient, True)
self.assertTrue(egr_tensor22.place._equals(place))
self.assertTrue(np.array_equal(egr_tensor22.numpy(), x))
egr_tensor23 = core.eager.Tensor(t, place, name="from_framework_tensor")
self.assertEqual(egr_tensor23.persistable, False)
self.assertTrue("from_framework_tensor" in egr_tensor23.name)
self.assertEqual(egr_tensor23.shape, [3, 3])
self.assertEqual(egr_tensor23.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor23.stop_gradient, True)
self.assertTrue(egr_tensor23.place._equals(place))
self.assertTrue(np.array_equal(egr_tensor23.numpy(), x))
egr_tensor24 = core.eager.Tensor(
value=t, place=place, name="from_framework_tensor")
self.assertEqual(egr_tensor24.persistable, False)
self.assertTrue("from_framework_tensor" in egr_tensor24.name)
self.assertEqual(egr_tensor24.shape, [3, 3])
self.assertEqual(egr_tensor24.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor24.stop_gradient, True)
self.assertTrue(egr_tensor24.place._equals(place))
self.assertTrue(np.array_equal(egr_tensor24.numpy(), x))
# Bad usage
# SyntaxError: positional argument follows keyword argument
# egr_tensor25 = core.eager.Tensor(value=t, place)
def test_constructor_with_kwargs(self):
print("Test_constructor_with_kwargs")
paddle.set_device("cpu")
place_list = [core.CPUPlace()]
if core.is_compiled_with_cuda():
place_list.append(core.CUDAPlace(0))
with _test_eager_guard():
for p in place_list:
self.constructor_with_kwargs(p)
def test_copy_and_copy_to(self):
print("Test_copy_and_copy_to")
with _test_eager_guard():
paddle.set_device("cpu")
arr = np.ones([4, 16, 16, 32]).astype('float32')
arr1 = np.zeros([4, 16]).astype('float32')
arr2 = np.ones([4, 16, 16, 32]).astype('float32') + np.ones(
[4, 16, 16, 32]).astype('float32')
tensor = paddle.to_tensor(arr, core.VarDesc.VarType.FP32,
core.CPUPlace())
self.assertEqual(tensor.stop_gradient, True)
tensor.stop_gradient = False
print("Set persistable")
tensor.persistable = False
tensor1 = paddle.to_tensor(arr1, core.VarDesc.VarType.FP32,
core.CPUPlace())
tensor1.persistable = True
self.assertEqual(tensor1.stop_gradient, True)
self.assertTrue(np.array_equal(tensor.numpy(), arr))
print("Test copy_")
tensor.copy_(tensor1, True)
self.assertEqual(tensor.persistable, False)
self.assertEqual(tensor.shape, [4, 16])
self.assertEqual(tensor.dtype, core.VarDesc.VarType.FP32)
self.assertTrue(np.array_equal(tensor.numpy(), arr1))
print("Test _copy_to")
tensor2 = paddle.to_tensor(arr2, core.VarDesc.VarType.FP32,
core.CPUPlace())
self.assertTrue(np.array_equal(tensor2.numpy(), arr2))
self.assertTrue(tensor2.place.is_cpu_place())
tensor2.persistable = True
tensor2.stop_gradient = False
if core.is_compiled_with_cuda():
tensor3 = tensor2._copy_to(core.CUDAPlace(0), True)
self.assertTrue(np.array_equal(tensor3.numpy(), arr2))
self.assertEqual(tensor3.persistable, True)
self.assertEqual(tensor3.stop_gradient, True)
self.assertTrue(tensor3.place.is_gpu_place())
tensor4 = tensor2.cuda(0, True)
self.assertTrue(np.array_equal(tensor4.numpy(), arr2))
self.assertEqual(tensor4.persistable, True)
self.assertEqual(tensor4.stop_gradient, False)
self.assertTrue(tensor4.place.is_gpu_place())
tensor5 = tensor4.cpu()
self.assertTrue(np.array_equal(tensor5.numpy(), arr2))
self.assertEqual(tensor5.persistable, True)
self.assertEqual(tensor5.stop_gradient, False)
self.assertTrue(tensor5.place.is_cpu_place())
tensor10 = paddle.to_tensor([1, 2, 3], place='gpu_pinned')
tensor11 = tensor10._copy_to(core.CUDAPlace(0), True)
self.assertTrue(
np.array_equal(tensor10.numpy(), tensor11.numpy()))
else:
tensor3 = tensor2._copy_to(core.CPUPlace(), True)
self.assertTrue(np.array_equal(tensor3.numpy(), arr2))
self.assertEqual(tensor3.persistable, True)
self.assertEqual(tensor3.stop_gradient, True)
self.assertTrue(tensor3.place.is_cpu_place())
tensor4 = tensor2.cpu()
self.assertTrue(np.array_equal(tensor4.numpy(), arr2))
self.assertEqual(tensor4.persistable, True)
self.assertEqual(tensor4.stop_gradient, False)
self.assertTrue(tensor4.place.is_cpu_place())
def test_share_buffer_to(self):
with _test_eager_guard():
arr = np.ones([4, 16, 16, 32]).astype('float32')
arr1 = np.zeros([4, 16]).astype('float32')
arr2 = np.ones([4, 16, 16, 32]).astype('float32') + np.ones(
[4, 16, 16, 32]).astype('float32')
tensor = None
tensor2 = None
tensor = paddle.to_tensor(arr, core.VarDesc.VarType.FP32,
core.CPUPlace())
tensor3 = core.eager.Tensor()
if core.is_compiled_with_cuda():
tensor2 = paddle.to_tensor(arr2, core.VarDesc.VarType.FP32,
core.CUDAPlace(0))
else:
tensor2 = paddle.to_tensor(arr2, core.VarDesc.VarType.FP32,
core.CPUPlace())
self.assertTrue(np.array_equal(tensor.numpy(), arr))
self.assertTrue(np.array_equal(tensor2.numpy(), arr2))
tensor2._share_buffer_to(tensor)
self.assertTrue(np.array_equal(tensor.numpy(), arr2))
self.assertTrue(np.array_equal(tensor2.numpy(), arr2))
self.assertTrue(tensor._is_shared_buffer_with(tensor2))
self.assertTrue(tensor2._is_shared_buffer_with(tensor))
tensor._share_buffer_to(tensor3)
self.assertTrue(np.array_equal(tensor3.numpy(), arr2))
self.assertTrue(tensor3._is_shared_buffer_with(tensor))
def test_share_underline_tensor_to(self):
with _test_eager_guard():
arr = np.ones([4, 16, 16, 32]).astype('float32')
arr1 = np.zeros([4, 16]).astype('float32')
arr2 = np.ones([4, 16, 16, 32]).astype('float32') + np.ones(
[4, 16, 16, 32]).astype('float32')
tensor = None
tensor2 = None
tensor = paddle.to_tensor(arr, core.VarDesc.VarType.FP32,
core.CPUPlace())
tensor3 = core.eager.Tensor()
if core.is_compiled_with_cuda():
tensor2 = paddle.to_tensor(arr2, core.VarDesc.VarType.FP32,
core.CUDAPlace(0))
else:
tensor2 = paddle.to_tensor(arr2, core.VarDesc.VarType.FP32,
core.CPUPlace())
self.assertTrue(np.array_equal(tensor.numpy(), arr))
self.assertTrue(np.array_equal(tensor2.numpy(), arr2))
tensor2._share_underline_tensor_to(tensor)
self.assertTrue(np.array_equal(tensor.numpy(), arr2))
self.assertTrue(np.array_equal(tensor2.numpy(), arr2))
self.assertTrue(tensor._is_shared_underline_tensor_with(tensor2))
self.assertTrue(tensor2._is_shared_underline_tensor_with(tensor))
tensor._share_underline_tensor_to(tensor3)
self.assertTrue(np.array_equal(tensor3.numpy(), arr2))
self.assertTrue(tensor3._is_shared_underline_tensor_with(tensor))
def test_properties(self):
print("Test_properties")
with _test_eager_guard():
paddle.set_device("cpu")
arr = np.ones([4, 16, 16, 32]).astype('float32')
tensor = paddle.to_tensor(arr, core.VarDesc.VarType.FP32,
core.CPUPlace())
self.assertEqual(tensor.shape, [4, 16, 16, 32])
tensor.name = 'tensor_name_test'
self.assertEqual(tensor.name, 'tensor_name_test')
self.assertEqual(tensor.persistable, False)
tensor.persistable = True
self.assertEqual(tensor.persistable, True)
tensor.persistable = False
self.assertEqual(tensor.persistable, False)
self.assertTrue(tensor.place.is_cpu_place())
self.assertEqual(tensor._place_str, 'Place(cpu)')
self.assertEqual(tensor.stop_gradient, True)
tensor.stop_gradient = False
self.assertEqual(tensor.stop_gradient, False)
tensor.stop_gradient = True
self.assertEqual(tensor.stop_gradient, True)
self.assertEqual(tensor.type, core.VarDesc.VarType.LOD_TENSOR)
def test_global_properties(self):
print("Test_global_properties")
self.assertFalse(core._in_eager_mode())
with _test_eager_guard():
self.assertTrue(core._in_eager_mode())
self.assertFalse(core._in_eager_mode())
def test_place_guard(self):
core._enable_eager_mode()
if core.is_compiled_with_cuda():
paddle.set_device("gpu:0")
with paddle.fluid.framework._dygraph_place_guard(core.CPUPlace()):
self.assertTrue(core.eager._get_expected_place().is_cpu_place())
else:
paddle.set_device("cpu")
with paddle.fluid.framework._dygraph_place_guard(core.CPUPlace()):
self.assertTrue(core.eager._get_expected_place().is_cpu_place())
core._disable_eager_mode()
def test_value(self):
with _test_eager_guard():
arr = np.random.rand(4, 16, 16, 32).astype('float64')
egr_tensor0 = core.eager.Tensor(value=arr)
self.assertEqual(egr_tensor0.persistable, False)
self.assertTrue("generated" in egr_tensor0.name)
self.assertEqual(egr_tensor0.shape, [4, 16, 16, 32])
self.assertTrue(
egr_tensor0.place._equals(
paddle.fluid.framework._current_expected_place()))
self.assertEqual(egr_tensor0.dtype, core.VarDesc.VarType.FP64)
self.assertEqual(egr_tensor0.stop_gradient, True)
self.assertTrue(egr_tensor0.value().get_tensor()._dtype(),
core.VarDesc.VarType.FP64)
self.assertTrue(egr_tensor0.value().get_tensor()._place(),
paddle.fluid.framework._current_expected_place())
self.assertTrue(egr_tensor0.value().get_tensor()._is_initialized())
def test_set_value(self):
with _test_eager_guard():
ori_arr = np.random.rand(4, 16, 16, 32).astype('float32')
egr_tensor = core.eager.Tensor(value=ori_arr)
self.assertEqual(egr_tensor.stop_gradient, True)
self.assertEqual(egr_tensor.shape, [4, 16, 16, 32])
self.assertTrue(np.array_equal(egr_tensor.numpy(), ori_arr))
ori_place = egr_tensor.place
new_arr = np.random.rand(4, 16, 16, 32).astype('float32')
self.assertFalse(np.array_equal(egr_tensor.numpy(), new_arr))
egr_tensor.set_value(new_arr)
self.assertEqual(egr_tensor.stop_gradient, True)
self.assertTrue(egr_tensor.place._equals(ori_place))
self.assertEqual(egr_tensor.shape, [4, 16, 16, 32])
self.assertTrue(np.array_equal(egr_tensor.numpy(), new_arr))
def test_sharding_related_api(self):
with _test_eager_guard():
arr0 = np.random.rand(4, 16, 16, 32).astype('float32')
egr_tensor1 = core.eager.Tensor(arr0,
core.CPUPlace(), True, False,
"numpy_tensor1", False)
self.assertEqual(egr_tensor1._numel(), 32768)
self.assertEqual(egr_tensor1._slice(0, 2)._numel(), 16384)
def test_copy_gradient_from(self):
with _test_eager_guard():
np_x = np.random.random((2, 2))
np_y = np.random.random((2, 2))
x = paddle.to_tensor(np_x, dtype="float64", stop_gradient=False)
y = paddle.to_tensor(np_y, dtype="float64")
out = x + x
out.backward()
x._copy_gradient_from(y)
self.assertTrue(np.array_equal(x.grad.numpy(), np_y))
def test_clear(self):
with _test_eager_guard():
np_x = np.random.random((3, 8, 8))
x = paddle.to_tensor(np_x, dtype="float64")
self.assertTrue(x._is_initialized())
x._clear()
self.assertFalse(x._is_initialized())
class EagerParamBaseUsageTestCase(unittest.TestCase):
def test_print(self):
with _test_eager_guard():
linear = paddle.nn.Linear(3, 3, bias_attr=False)
print(linear.weight)
def test_copy(self):
with _test_eager_guard():
linear = paddle.nn.Linear(1, 3)
linear_copy = copy.deepcopy(linear)
linear_copy2 = linear.weight._copy_to(core.CPUPlace(), True)
self.assertTrue(
np.array_equal(linear.weight.numpy(),
linear_copy.weight.numpy()))
self.assertTrue(
np.array_equal(linear.weight.numpy(), linear_copy2.numpy()))
def func_fp16_initilaizer(self):
paddle.set_default_dtype("float16")
linear1 = paddle.nn.Linear(1, 3, bias_attr=False)
linear2 = paddle.nn.Linear(
1,
3,
bias_attr=False,
weight_attr=paddle.fluid.initializer.Uniform())
linear3 = paddle.nn.Linear(
1,
3,
bias_attr=False,
weight_attr=paddle.fluid.initializer.TruncatedNormalInitializer())
linear4 = paddle.nn.Linear(
1,
3,
bias_attr=False,
weight_attr=paddle.fluid.initializer.MSRAInitializer())
res = [
linear1.weight.numpy(), linear2.weight.numpy(),
linear3.weight.numpy(), linear4.weight.numpy()
]
paddle.set_default_dtype("float32")
return res
def test_fp16_initializer(self):
res1 = list()
res2 = list()
paddle.seed(102)
paddle.framework.random._manual_program_seed(102)
with _test_eager_guard():
res1 = self.func_fp16_initilaizer()
res2 = self.func_fp16_initilaizer()
for i in range(len(res1)):
self.assertTrue(np.array_equal(res1[i], res2[i]))
def func_layer_helper_base(self, value):
base = paddle.fluid.layer_helper_base.LayerHelperBase("test_layer",
"test_layer")
return base.to_variable(value).numpy()
def func_base_to_variable(self, value):
paddle.fluid.dygraph.base.to_variable(value)
def test_to_variable(self):
value = np.random.rand(4, 16, 16, 32).astype('float32')
res1 = None
res3 = None
with _test_eager_guard():
res1 = self.func_layer_helper_base(value)
res3 = self.func_base_to_variable(value)
res2 = self.func_layer_helper_base(value)
res4 = self.func_base_to_variable(value)
self.assertTrue(np.array_equal(res1, res2))
self.assertTrue(np.array_equal(res3, res4))
def test_backward_with_single_tensor(self):
with _test_eager_guard():
arr4 = np.random.rand(4, 16, 16, 32).astype('float32')
egr_tensor12 = core.eager.Tensor(arr4, core.CPUPlace())
egr_tensor12.retain_grads()
arr = np.ones([4, 16, 16, 32]).astype('float32')
self.assertEqual(egr_tensor12.persistable, False)
self.assertTrue("generated_tensor" in egr_tensor12.name)
self.assertEqual(egr_tensor12.shape, [4, 16, 16, 32])
self.assertEqual(egr_tensor12.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor12.stop_gradient, True)
self.assertTrue(egr_tensor12.place._equals(paddle.fluid.CPUPlace()))
self.assertTrue(np.array_equal(egr_tensor12.numpy(), arr4))
self.assertTrue(np.array_equal(egr_tensor12.gradient(), None))
egr_tensor12.stop_gradient = False
egr_tensor12.backward()
self.assertTrue(np.array_equal(egr_tensor12.gradient(), arr))
def test_set_value(self):
with _test_eager_guard():
linear = paddle.nn.Linear(1, 3)
ori_place = linear.weight.place
new_weight = np.ones([1, 3]).astype('float32')
self.assertFalse(np.array_equal(linear.weight.numpy(), new_weight))
linear.weight.set_value(new_weight)
self.assertTrue(np.array_equal(linear.weight.numpy(), new_weight))
self.assertTrue(linear.weight.place._equals(ori_place))
class EagerGuardTestCase(unittest.TestCase):
def test__test_eager_guard(self):
tracer = paddle.fluid.dygraph.tracer.Tracer()
with _test_eager_guard(tracer):
self.assertTrue(_in_eager_mode())
if __name__ == "__main__":
unittest.main()
|
python
|
import numpy as np
import pandas as pd
from pandas import DataFrame
from pandas.core.indexes.timedeltas import timedelta_range
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal
class TestTimedeltaIndex(object):
def test_asfreq_bug(self):
import datetime as dt
df = DataFrame(data=[1, 3],
index=[dt.timedelta(), dt.timedelta(minutes=3)])
result = df.resample('1T').asfreq()
expected = DataFrame(data=[1, np.nan, np.nan, 3],
index=timedelta_range('0 day',
periods=4,
freq='1T'))
assert_frame_equal(result, expected)
def test_resample_with_nat(self):
# GH 13223
index = pd.to_timedelta(['0s', pd.NaT, '2s'])
result = DataFrame({'value': [2, 3, 5]}, index).resample('1s').mean()
expected = DataFrame({'value': [2.5, np.nan, 5.0]},
index=timedelta_range('0 day',
periods=3,
freq='1S'))
assert_frame_equal(result, expected)
def test_resample_as_freq_with_subperiod(self):
# GH 13022
index = timedelta_range('00:00:00', '00:10:00', freq='5T')
df = DataFrame(data={'value': [1, 5, 10]}, index=index)
result = df.resample('2T').asfreq()
expected_data = {'value': [1, np.nan, np.nan, np.nan, np.nan, 10]}
expected = DataFrame(data=expected_data,
index=timedelta_range('00:00:00',
'00:10:00', freq='2T'))
tm.assert_frame_equal(result, expected)
|
python
|
# -*- coding: utf-8 -*-
import os
import sys
package_path = '/user/specified/path/to/matsdp/'
sys.path.insert(0, os.path.abspath(package_path))
def test_plot_proxigram_csv():
from matsdp.apt import apt_plot
retn_val = apt_plot.plot_proxigram_csv(
proxigram_csv_file_path = './apt/profile-interface0.csv',
sysname = 'M2',
visible_elmt_list = ['Ni','Al'],
interplation_on = False,
fig_width = 6,
fig_height = 5,
fig_dpi = 600,
fig_format = 'png',
)
assert retn_val == 0
|
python
|
import os
import pandas as pd
import nltk
import gensim
from gensim import corpora, models, similarities
os.chdir("D:\semicolon\Deep Learning");
df=pd.read_csv('jokes.csv');
x=df['Question'].values.tolist()
y=df['Answer'].values.tolist()
corpus= x+y
tok_corp= [nltk.word_tokenize(sent.decode('utf-8')) for sent in corpus]
model = gensim.models.Word2Vec(tok_corp, min_count=1, size = 32)
#model.save('testmodel')
#model = gensim.models.Word2Vec.load('test_model')
#model.most_similar('word')
#model.most_similar([vector])
|
python
|
#!/usr/bin/env python
import subprocess
x = list(range(1,9))
print(x)
y = [];
resultf = open('avg', 'a')
for i in x:
i = 2 ** i
print(i)
out_bytes = subprocess.check_output(['../../build/bin/mapreduce_hand', '131072', str(i)])
out_text = out_bytes.decode('ascii')
value = out_text.split('\n')[-2]
value = value.split('\t')[1]
resultf.write(str(i) + '\t' + str(value) + '\n')
resultf.close()
|
python
|
# coding: utf-8
"""
InfluxDB OSS API Service.
The InfluxDB v2 API provides a programmatic interface for all interactions with InfluxDB. Access the InfluxDB API using the `/api/v2/` endpoint. # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class TelegrafPlugins(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'version': 'str',
'os': 'str',
'plugins': 'list[TelegrafPlugin]'
}
attribute_map = {
'version': 'version',
'os': 'os',
'plugins': 'plugins'
}
def __init__(self, version=None, os=None, plugins=None): # noqa: E501,D401,D403
"""TelegrafPlugins - a model defined in OpenAPI.""" # noqa: E501
self._version = None
self._os = None
self._plugins = None
self.discriminator = None
if version is not None:
self.version = version
if os is not None:
self.os = os
if plugins is not None:
self.plugins = plugins
@property
def version(self):
"""Get the version of this TelegrafPlugins.
:return: The version of this TelegrafPlugins.
:rtype: str
""" # noqa: E501
return self._version
@version.setter
def version(self, version):
"""Set the version of this TelegrafPlugins.
:param version: The version of this TelegrafPlugins.
:type: str
""" # noqa: E501
self._version = version
@property
def os(self):
"""Get the os of this TelegrafPlugins.
:return: The os of this TelegrafPlugins.
:rtype: str
""" # noqa: E501
return self._os
@os.setter
def os(self, os):
"""Set the os of this TelegrafPlugins.
:param os: The os of this TelegrafPlugins.
:type: str
""" # noqa: E501
self._os = os
@property
def plugins(self):
"""Get the plugins of this TelegrafPlugins.
:return: The plugins of this TelegrafPlugins.
:rtype: list[TelegrafPlugin]
""" # noqa: E501
return self._plugins
@plugins.setter
def plugins(self, plugins):
"""Set the plugins of this TelegrafPlugins.
:param plugins: The plugins of this TelegrafPlugins.
:type: list[TelegrafPlugin]
""" # noqa: E501
self._plugins = plugins
def to_dict(self):
"""Return the model properties as a dict."""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Return the string representation of the model."""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`."""
return self.to_str()
def __eq__(self, other):
"""Return true if both objects are equal."""
if not isinstance(other, TelegrafPlugins):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return true if both objects are not equal."""
return not self == other
|
python
|
# flake8: noqa
"""
This is the local_settings file for Mezzanine's docs.
"""
from random import choice
from mezzanine.project_template.project_name.settings import *
DEBUG = False
ROOT_URLCONF = "mezzanine.project_template.project_name.urls"
characters = "abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)"
# Generate a SECRET_KEY for this build
SECRET_KEY = "".join([choice(characters) for i in range(50)])
if "mezzanine.accounts" not in INSTALLED_APPS:
INSTALLED_APPS = tuple(INSTALLED_APPS) + ("mezzanine.accounts",)
|
python
|
import json
import datetime as dt
import dateutil.parser
import backends.entities.Players as Players
import backends.database as db
import backends.trueskillWrapper as ts
## A comment on why the login-offset is nessesary ##
## - losing teams tend to have players leaving and joining more rapidly
## - every time a new player joins he has to setup
## - new players are unfamiliar with postions of enemy team
## - new players have to run from spawn
## --> their impact factor (which is calculated from their active time) must account for that
loginoffset = dt.timedelta(seconds=60)
class Round:
def __init__(self, winnerTeam, loserTeam, _map, duration, startTime, winnerSide):
if duration <= dt.timedelta(0):
raise ValueError("Duration cannot be zero")
if duration <= dt.timedelta(seconds=120):
raise ValueError("Games was too short")
self.winners = winnerTeam
self.losers = loserTeam
self.winnerSide = winnerSide
self.map = _map
self.duration = duration
self.start = startTime
### Sync players from Databse ###
for p in self.winners + self.losers:
playerInDB = db.getOrCreatePlayer(p, startTime)
p.rating = playerInDB.rating
self.prediction, self.confidence = ts.predictOutcome(self.winners, self.losers)
def normalized_playtimes(self):
'''returns a dict-Object with {key=(teamid,player):value=player_time_played/total_time_of_round}'''
np = dict()
for p in self.winners:
if self.duration == None:
d = 1.0
else:
d = (p.activeTime-loginoffset)/self.duration
if d < -1:
raise AssertionError("Normalized Playtime was less than -1 ??")
if d < 0:
d = 0.0
elif d > 1:
d = 1.0
np.update({(0,p):d})
for p in self.losers:
if self.duration == None:
d = 1.0
else:
d = (p.activeTime-loginoffset)/self.duration
if d < 0:
d = 0.0
elif d > 1:
d = 1.0
np.update({(1,p):d})
return np
def pt_difference(self):
'''Used to check difference in playtimes per team'''
if self.duration == None:
return 1
w1 = w2 = 0
for p in self.winners:
if p.is_fake:
w1 += 1.0
continue
d = (p.activeTime-loginoffset)/self.duration
if d < 0:
d = 0.0
elif d > 1:
d = 1.0
w1 += d
for p in self.losers:
d = (p.activeTime-loginoffset)/self.duration
if p.is_fake:
w2 += 1.0
continue
if d < 0:
d = 0.0
elif d > 1:
d = 1.0
w2 += d
# no div0 plox
if min(w1,w2) <= 0:
return 0
return max(w1,w2)/min(w1,w2)
def toJson(self):
winnersList = []
losersList = []
for w in self.winners:
winnersList += [{ "playerId" : w.id,
"playerName" : w.name,
"isFake" : w.is_fake,
"activeTime" : w.activeTime.total_seconds() }]
for w in self.losers:
losersList += [{ "playerId" : w.id,
"playerName" : w.name,
"isFake" : w.is_fake,
"activeTime" : w.activeTime.total_seconds() }]
retDict = { "winners" : winnersList,
"losers" : losersList,
"startTime" : self.start.isoformat(),
"duration" : self.duration.total_seconds(),
"map" : self.map,
"winner-side" : self.winnerSide }
return json.dumps(retDict)
def fromJson(jsonDict):
winnersList = []
losersList = []
timestamp = dateutil.parser.isoparse(jsonDict["startTime"])
winnerTeam = jsonDict.get("winner-side")
if not winnerTeam:
winnerTeam = -1
loserTeam = -2
else:
loserTeam = (winnerTeam % 2) + 2
for p in jsonDict["winners"]:
pObj = Players.PlayerInRound(p["playerId"], p["playerName"], winnerTeam, timestamp)
pObj.activeTime = dt.timedelta(int(p["activeTime"]))
winnersList += [pObj]
for p in jsonDict["losers"]:
pObj = Players.PlayerInRound(p["playerId"], p["playerName"], loserTeam, timestamp)
pObj.activeTime = dt.timedelta(int(p["activeTime"]))
losersList += [pObj]
duration = dt.timedelta(seconds=int(jsonDict["duration"]))
return Round(winnersList, losersList, jsonDict["map"], duration, timestamp, winnerTeam)
|
python
|
from .declare_a_mapping import User, create_table
from .connection import Session
session = Session()
def user_operator():
ed_user = User(name='ed', fullname='Ed Jones', nickname='edsnickname')
session.add(ed_user)
our_user = session.query(User).filter_by(name='ed').first()
print(ed_user is our_user)
# Add more users.
session.add_all([
User(name='wendy', fullname='Wendy Williams', nickname='windy'),
User(name='mary', fullname='Mary Contrary', nickname='mary'),
User(name='fred', fullname='Fred Flintstone', nickname='freddy')])
# Dirty data.
ed_user.nickname = 'eddie'
dirty = session.dirty
print(f'session dirty data: {dirty}')
# New data.
new_data = session.new
print(f'session new data: {new_data}')
session.commit()
# Rolling back.
ed_user.name = 'Edwardo'
fake_user = User(name='fakeuser', fullname='Invalid', nickname='12345')
session.add(fake_user)
users = session.query(User).filter(User.name.in_(['Edwardo', 'fakeuser'])).all()
print(f'users: {users}')
session.rollback()
print(f'ed_user.name: {ed_user.name}')
is_fake_user_exist = fake_user in session
print(f'is fake user in session: {is_fake_user_exist}')
def main():
create_table()
user_operator()
if __name__ == '__main__':
main()
|
python
|
import enchant
import re
import itertools
# derived from a google image search of an "old fashioned" phone
letters_from_numbers_lookup = {'2': ['A', 'B', 'C'],
'3': ['D', 'E', 'F'],
'4': ['G', 'H', 'I'],
'5': ['J', 'K', 'L'],
'6': ['M', 'N', 'O'],
'7': ['P', 'Q', 'R', 'S'],
'8': ['T', 'U', 'V'],
'9': ['W', 'X', 'Y' 'Z']}
numbers_from_letters_lookup = {'A': '2', 'B': '2', 'C': '2',
'D': '3', 'E': '3', 'F': '3',
'G': '4', 'H': '4', 'I': '4',
'J': '5', 'K': '5', 'L': '5',
'M': '6', 'N': '6', 'O': '6',
'P': '7', 'Q': '7', 'R': '7', 'S': '7',
'T': '8', 'U': '8', 'V': '8',
'W': '9', 'X': '9', 'Y': '9', 'Z': '9'}
english_word_lookup = enchant.Dict("en_US")
# TODO: it might make sense to allow 'I' and 'a' with the stipulation that they be followed by a valid word...
def is_valid_word(word_to_check: str,
min_length=2,
exceptions=list()) -> bool:
if type(word_to_check) is not str:
raise ValueError("Non-string entered")
if (len(word_to_check) < min_length) and (word_to_check not in exceptions):
return False
else:
return english_word_lookup.check(word_to_check)
def format_phone_number(phone_digit_list: list) -> str:
#TODO: we should actually probably check that each 'digit' is a string rather than forcing it
out_str = ''
# length check
if (len(phone_digit_list) not in [10, 11]) or (type(phone_digit_list) is not list):
raise ValueError("not a valid phone number")
# country code
if len(phone_digit_list) == 11:
out_str = (phone_digit_list.pop(0) + '-')
# zipcode
for digit in phone_digit_list[:3]:
out_str += str(digit)
out_str += '-'
# the...next three digits (I'm sure this has a name)
for digit in phone_digit_list[3:6]:
out_str += str(digit)
out_str += '-'
# and the last four
for digit in phone_digit_list[6:]:
out_str += str(digit)
return out_str
def get_character_list(phone_words: str) -> list:
if type(phone_words) is not str:
raise ValueError("Not a Valid Input")
return [x for x in re.sub('\W+', '', phone_words)]
def all_values_from_number(num: str) -> list:
letters = letters_from_numbers_lookup.get(num, [num])
if num not in letters:
letters += [num]
return letters
def all_combinations(number_list: list) -> list:
"""
:param number_list: array of strings representing digits between 0 and 9
:return: all possible number-letter combinations
"""
all_chars = [all_values_from_number(x) for x in number_list]
# note: I broke this out for ease of testing,
# but really we'd want this to return the iterable for efficiency
return list(itertools.product(*all_chars))
def has_valid_word(char_list: list) -> bool:
"""
:param char_list: array of strings, can be combination of digits and letters
:return: whether there is a valid English word in this array, based on the letters in order
note that this word must be surrounded on both sides by numbers (1800-PAINTX is not a valid word)
"""
phone_number = ''.join(char_list)
only_letters = re.sub("\d", " ", phone_number).strip()
letters_split = only_letters.split(' ')
n_valid = 0
n_char = 0
has_preposition = False
for i in range(len(letters_split)):
sub_word = letters_split[i]
if sub_word != '':
if i == 0:
if (len(sub_word) < 3) and (sub_word not in ['A', 'AN', 'I']):
return False
elif sub_word in ['A', 'AN', 'I']:
n_valid += 1
n_char += 1
has_preposition = True
elif (len(sub_word) < 3) or (is_valid_word(''.join(sub_word)) is False):
return False
else:
n_valid += 1
n_char += 1
elif (len(sub_word) < 3) or (is_valid_word(''.join(sub_word)) is False):
return False
else:
n_valid += 1
n_char += 1
if has_preposition:
if len(letters_split) > 1:
return (n_valid == n_char) and (n_valid > 0)
else:
return False
else:
return (n_valid == n_char) and (n_valid > 0)
def format_wordification(char_list: list) -> str:
"""
:param char_list: letter-number combination in an array (all strings)
:return: valid wordification with dashes between any letter/number chunks
"""
out = ''
n = len(char_list)
char_str = ''.join(char_list)
num_letter_list = re.split('(\d+)', char_str)
if len(num_letter_list) == 3:
out = format_phone_number(list(char_list))
else:
for chunk in num_letter_list:
if chunk in ['', ' ']:
pass
else:
out += chunk
out += '-'
out = out[:-1]
if n == 11:
if (char_list[0] == '1') and(out[1] != '-'):
out = '1-' + out[1:]
if out[2:5].isdigit():
out = out[:5] + "-" + out[5:]
if (n == 10) and (out[:3].isdigit()):
out = out[:3] + "-" + out[3:]
out = re.sub(r'([A-Z])-([A-Z])', r'\1\2', out)
return out.replace('--', '-')
|
python
|
# --depends-on config
# --depends-on format_activity
from src import ModuleManager, utils
from src.Logging import Logger as log
@utils.export("botset", utils.BoolSetting("print-motd", "Set whether I print /motd"))
@utils.export(
"botset",
utils.BoolSetting("pretty-activity", "Whether or not to pretty print activity"),
)
# Used to migrate word stats from prior to v1.19.0
@utils.export(
"channelset",
utils.BoolSetting("print", "Whether or not to print activity a channel to logs"),
)
class Module(ModuleManager.BaseModule):
def _print(self, event):
if event["channel"] and not event["channel"].get_setting("print", True):
return
line = event["line"]
if event["pretty"] and self.bot.get_setting("pretty-activity", False):
line = event["pretty"]
server = event["server"]
server = server if not hasattr(server, "alias") else server.alias
context = (
event["context"]
if (event["context"] not in ["*", ""]) and (event["context"] is not None)
else "Server"
)
log.info(line, server=server, context=context, formatting=True)
@utils.hook("formatted.message.channel")
@utils.hook("formatted.notice.channel")
@utils.hook("formatted.notice.private")
@utils.hook("formatted.join")
@utils.hook("formatted.part")
@utils.hook("formatted.nick")
@utils.hook("formatted.invite")
@utils.hook("formatted.mode.channel")
@utils.hook("formatted.topic")
@utils.hook("formatted.topic-timestamp")
@utils.hook("formatted.kick")
@utils.hook("formatted.quit")
@utils.hook("formatted.rename")
@utils.hook("formatted.chghost")
@utils.hook("formatted.account")
@utils.hook("formatted.delete")
def formatted(self, event):
self._print(event)
@utils.hook("formatted.motd")
def motd(self, event):
if self.bot.get_setting("print-motd", True):
self._print(event)
|
python
|
import numpy as np
import dask.array as da
from napari.components import ViewerModel
from napari.util import colormaps
base_colormaps = colormaps.CYMRGB
two_colormaps = colormaps.MAGENTA_GREEN
def test_multichannel():
"""Test adding multichannel image."""
viewer = ViewerModel()
np.random.seed(0)
data = np.random.random((15, 10, 5))
viewer.add_image(data, channel_axis=-1)
assert len(viewer.layers) == data.shape[-1]
for i in range(data.shape[-1]):
assert np.all(viewer.layers[i].data == data.take(i, axis=-1))
assert viewer.layers[i].colormap[0] == base_colormaps[i]
def test_two_channel():
"""Test adding multichannel image with two channels."""
viewer = ViewerModel()
np.random.seed(0)
data = np.random.random((15, 10, 2))
viewer.add_image(data, channel_axis=-1)
assert len(viewer.layers) == data.shape[-1]
for i in range(data.shape[-1]):
assert np.all(viewer.layers[i].data == data.take(i, axis=-1))
assert viewer.layers[i].colormap[0] == two_colormaps[i]
def test_one_channel():
"""Test adding multichannel image with one channel."""
viewer = ViewerModel()
np.random.seed(0)
data = np.random.random((15, 10, 1))
viewer.add_image(data, channel_axis=-1)
assert len(viewer.layers) == data.shape[-1]
for i in range(data.shape[-1]):
assert np.all(viewer.layers[i].data == data.take(i, axis=-1))
assert viewer.layers[i].colormap[0] == two_colormaps[i]
def test_specified_multichannel():
"""Test adding multichannel image with color channel set."""
viewer = ViewerModel()
np.random.seed(0)
data = np.random.random((5, 10, 15))
viewer.add_image(data, channel_axis=0)
assert len(viewer.layers) == data.shape[0]
for i in range(data.shape[0]):
assert np.all(viewer.layers[i].data == data.take(i, axis=0))
def test_names():
"""Test adding multichannel image with custom names."""
viewer = ViewerModel()
np.random.seed(0)
data = np.random.random((15, 10, 5))
names = ['multi ' + str(i + 3) for i in range(data.shape[-1])]
viewer.add_image(data, name=names, channel_axis=-1)
assert len(viewer.layers) == data.shape[-1]
for i in range(data.shape[-1]):
assert viewer.layers[i].name == names[i]
viewer = ViewerModel()
name = 'example'
names = [name] + [name + f' [{i + 1}]' for i in range(data.shape[-1] - 1)]
viewer.add_image(data, name=name, channel_axis=-1)
assert len(viewer.layers) == data.shape[-1]
for i in range(data.shape[-1]):
assert viewer.layers[i].name == names[i]
def test_colormaps():
"""Test adding multichannel image with custom colormaps."""
viewer = ViewerModel()
np.random.seed(0)
data = np.random.random((15, 10, 5))
colormap = 'gray'
viewer.add_image(data, colormap=colormap, channel_axis=-1)
assert len(viewer.layers) == data.shape[-1]
for i in range(data.shape[-1]):
assert viewer.layers[i].colormap[0] == colormap
viewer = ViewerModel()
colormaps = ['gray', 'blue', 'red', 'green', 'yellow']
viewer.add_image(data, colormap=colormaps, channel_axis=-1)
assert len(viewer.layers) == data.shape[-1]
for i in range(data.shape[-1]):
assert viewer.layers[i].colormap[0] == colormaps[i]
def test_split_rgb_image():
"""Test adding multichannel image with custom colormaps."""
viewer = ViewerModel()
np.random.seed(0)
data = np.random.random((15, 10, 3))
colormaps = ['red', 'green', 'blue']
viewer.add_image(data, colormap=colormaps, channel_axis=-1)
assert len(viewer.layers) == data.shape[-1]
for i in range(data.shape[-1]):
assert viewer.layers[i].colormap[0] == colormaps[i]
def test_contrast_limits():
"""Test adding multichannel image with custom contrast limits."""
viewer = ViewerModel()
np.random.seed(0)
data = np.random.random((15, 10, 5))
clims = [0.3, 0.7]
viewer.add_image(data, contrast_limits=clims, channel_axis=-1)
assert len(viewer.layers) == data.shape[-1]
for i in range(data.shape[-1]):
assert viewer.layers[i].contrast_limits == clims
viewer = ViewerModel()
clims = [[0.3, 0.7], [0.1, 0.9], [0.3, 0.9], [0.4, 0.9], [0.2, 0.9]]
viewer.add_image(data, contrast_limits=clims, channel_axis=-1)
assert len(viewer.layers) == data.shape[-1]
for i in range(data.shape[-1]):
assert viewer.layers[i].contrast_limits == clims[i]
def test_gamma():
"""Test adding multichannel image with custom gamma."""
viewer = ViewerModel()
np.random.seed(0)
data = np.random.random((15, 10, 5))
gamma = 0.7
viewer.add_image(data, gamma=gamma, channel_axis=-1)
assert len(viewer.layers) == data.shape[-1]
for i in range(data.shape[-1]):
assert viewer.layers[i].gamma == gamma
viewer = ViewerModel()
gammas = [0.3, 0.4, 0.5, 0.6, 0.7]
viewer.add_image(data, gamma=gammas, channel_axis=-1)
assert len(viewer.layers) == data.shape[-1]
for i in range(data.shape[-1]):
assert viewer.layers[i].gamma == gammas[i]
def test_multichannel_pyramid():
"""Test adding multichannel pyramid."""
viewer = ViewerModel()
np.random.seed(0)
shapes = [(40, 20, 4), (20, 10, 4), (10, 5, 4)]
np.random.seed(0)
data = [np.random.random(s) for s in shapes]
viewer.add_image(data, channel_axis=-1, is_pyramid=True)
assert len(viewer.layers) == data[0].shape[-1]
for i in range(data[0].shape[-1]):
assert np.all(
[
np.all(l_d == d)
for l_d, d in zip(
viewer.layers[i].data,
[data[j].take(i, axis=-1) for j in range(len(data))],
)
]
)
assert viewer.layers[i].colormap[0] == base_colormaps[i]
def test_rgb_images():
"""Test adding multiple rgb images."""
viewer = ViewerModel()
np.random.seed(0)
data = np.random.random((15, 10, 5, 3))
viewer.add_image(data, channel_axis=2, rgb=True)
assert len(viewer.layers) == data.shape[2]
for i in range(data.shape[-1]):
assert viewer.layers[i].rgb is True
assert viewer.layers[i]._data_view.ndim == 3
def test_dask_array():
"""Test adding multichannel dask array."""
viewer = ViewerModel()
np.random.seed(0)
data = da.random.random((2, 10, 10, 5))
viewer.add_image(data, channel_axis=0)
assert len(viewer.layers) == data.shape[0]
for i in range(data.shape[0]):
assert viewer.layers[i].data.shape == data.shape[1:]
assert isinstance(viewer.layers[i].data, da.Array)
|
python
|
import argparse
import collections
import datetime
import os
import shutil
import time
import dataset
import mlconfig
import toolbox
import torch
import util
import madrys
import numpy as np
from evaluator import Evaluator
from tqdm import tqdm
from trainer import Trainer
mlconfig.register(madrys.MadrysLoss)
# General Options
parser = argparse.ArgumentParser(description='ClasswiseNoise')
parser.add_argument('--seed', type=int, default=0, help='seed')
parser.add_argument('--version', type=str, default="resnet18")
parser.add_argument('--exp_name', type=str, default="test_exp")
parser.add_argument('--config_path', type=str, default='configs/cifar10')
parser.add_argument('--load_model', action='store_true', default=False)
parser.add_argument('--data_parallel', action='store_true', default=False)
# Datasets Options
parser.add_argument('--train_batch_size', default=512, type=int, help='perturb step size')
parser.add_argument('--eval_batch_size', default=512, type=int, help='perturb step size')
parser.add_argument('--num_of_workers', default=8, type=int, help='workers for loader')
parser.add_argument('--train_data_type', type=str, default='CIFAR10')
parser.add_argument('--train_data_path', type=str, default='../datasets')
parser.add_argument('--test_data_type', type=str, default='CIFAR10')
parser.add_argument('--test_data_path', type=str, default='../datasets')
# Perturbation Options
parser.add_argument('--universal_train_portion', default=0.2, type=float)
parser.add_argument('--universal_stop_error', default=0.5, type=float)
parser.add_argument('--universal_train_target', default='train_subset', type=str)
parser.add_argument('--train_step', default=10, type=int)
parser.add_argument('--use_subset', action='store_true', default=False)
parser.add_argument('--attack_type', default='min-min', type=str, choices=['min-min', 'min-max', 'random'], help='Attack type')
parser.add_argument('--perturb_type', default='classwise', type=str, choices=['classwise', 'samplewise'], help='Perturb type')
parser.add_argument('--patch_location', default='center', type=str, choices=['center', 'random'], help='Location of the noise')
parser.add_argument('--noise_shape', default=[10, 3, 32, 32], nargs='+', type=int, help='noise shape')
parser.add_argument('--epsilon', default=8, type=float, help='perturbation')
parser.add_argument('--num_steps', default=1, type=int, help='perturb number of steps')
parser.add_argument('--step_size', default=0.8, type=float, help='perturb step size')
parser.add_argument('--random_start', action='store_true', default=False)
args = parser.parse_args()
# Convert Eps
args.epsilon = args.epsilon / 255
args.step_size = args.step_size / 255
# Set up Experiments
if args.exp_name == '':
args.exp_name = 'exp_' + datetime.datetime.now()
exp_path = os.path.join(args.exp_name, args.version)
log_file_path = os.path.join(exp_path, args.version)
checkpoint_path = os.path.join(exp_path, 'checkpoints')
checkpoint_path_file = os.path.join(checkpoint_path, args.version)
util.build_dirs(exp_path)
util.build_dirs(checkpoint_path)
logger = util.setup_logger(name=args.version, log_file=log_file_path + ".log")
# CUDA Options
logger.info("PyTorch Version: %s" % (torch.__version__))
if torch.cuda.is_available():
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
device = torch.device('cuda')
device_list = [torch.cuda.get_device_name(i) for i in range(0, torch.cuda.device_count())]
logger.info("GPU List: %s" % (device_list))
else:
device = torch.device('cpu')
# Load Exp Configs
config_file = os.path.join(args.config_path, args.version)+'.yaml'
config = mlconfig.load(config_file)
config.set_immutable()
for key in config:
logger.info("%s: %s" % (key, config[key]))
shutil.copyfile(config_file, os.path.join(exp_path, args.version+'.yaml'))
def train(starting_epoch, model, optimizer, scheduler, criterion, trainer, evaluator, ENV, data_loader):
for epoch in range(starting_epoch, config.epochs):
logger.info("")
logger.info("="*20 + "Training Epoch %d" % (epoch) + "="*20)
# Train
ENV['global_step'] = trainer.train(epoch, model, criterion, optimizer)
ENV['train_history'].append(trainer.acc_meters.avg*100)
scheduler.step()
# Eval
logger.info("="*20 + "Eval Epoch %d" % (epoch) + "="*20)
evaluator.eval(epoch, model)
payload = ('Eval Loss:%.4f\tEval acc: %.2f' % (evaluator.loss_meters.avg, evaluator.acc_meters.avg*100))
logger.info(payload)
ENV['eval_history'].append(evaluator.acc_meters.avg*100)
ENV['curren_acc'] = evaluator.acc_meters.avg*100
# Reset Stats
trainer._reset_stats()
evaluator._reset_stats()
# Save Model
target_model = model.module if args.data_parallel else model
util.save_model(ENV=ENV,
epoch=epoch,
model=target_model,
optimizer=optimizer,
scheduler=scheduler,
filename=checkpoint_path_file)
logger.info('Model Saved at %s', checkpoint_path_file)
return
def universal_perturbation_eval(noise_generator, random_noise, data_loader, model, eval_target=args.universal_train_target):
loss_meter = util.AverageMeter()
err_meter = util.AverageMeter()
random_noise = random_noise.to(device)
model = model.to(device)
for i, (images, labels) in enumerate(data_loader[eval_target]):
images, labels = images.to(device, non_blocking=True), labels.to(device, non_blocking=True)
if random_noise is not None:
for i in range(len(labels)):
class_index = labels[i].item()
noise = random_noise[class_index]
mask_cord, class_noise = noise_generator._patch_noise_extend_to_img(noise, image_size=images[i].shape, patch_location=args.patch_location)
images[i] += class_noise
pred = model(images)
err = (pred.data.max(1)[1] != labels.data).float().sum()
loss = torch.nn.CrossEntropyLoss()(pred, labels)
loss_meter.update(loss.item(), len(labels))
err_meter.update(err / len(labels))
return loss_meter.avg, err_meter.avg
def universal_perturbation(noise_generator, trainer, evaluator, model, criterion, optimizer, scheduler, random_noise, ENV):
# Class-Wise perturbation
# Generate Data loader
datasets_generator = dataset.DatasetGenerator(train_batch_size=args.train_batch_size,
eval_batch_size=args.eval_batch_size,
train_data_type=args.train_data_type,
train_data_path=args.train_data_path,
test_data_type=args.test_data_type,
test_data_path=args.test_data_path,
num_of_workers=args.num_of_workers,
seed=args.seed, no_train_augments=True)
if args.use_subset:
data_loader = datasets_generator._split_validation_set(train_portion=args.universal_train_portion,
train_shuffle=True, train_drop_last=True)
else:
data_loader = datasets_generator.getDataLoader(train_shuffle=True, train_drop_last=True)
condition = True
data_iter = iter(data_loader['train_dataset'])
logger.info('=' * 20 + 'Searching Universal Perturbation' + '=' * 20)
if hasattr(model, 'classify'):
model.classify = True
while condition:
if args.attack_type == 'min-min' and not args.load_model:
# Train Batch for min-min noise
for j in range(0, args.train_step):
try:
(images, labels) = next(data_iter)
except:
data_iter = iter(data_loader['train_dataset'])
(images, labels) = next(data_iter)
images, labels = images.to(device), labels.to(device)
# Add Class-wise Noise to each sample
train_imgs = []
for i, (image, label) in enumerate(zip(images, labels)):
noise = random_noise[label.item()]
mask_cord, class_noise = noise_generator._patch_noise_extend_to_img(noise, image_size=image.shape, patch_location=args.patch_location)
train_imgs.append(images[i]+class_noise)
# Train
model.train()
for param in model.parameters():
param.requires_grad = True
trainer.train_batch(torch.stack(train_imgs).to(device), labels, model, optimizer)
for i, (images, labels) in tqdm(enumerate(data_loader[args.universal_train_target]), total=len(data_loader[args.universal_train_target])):
images, labels, model = images.to(device), labels.to(device), model.to(device)
# Add Class-wise Noise to each sample
batch_noise, mask_cord_list = [], []
for i, (image, label) in enumerate(zip(images, labels)):
noise = random_noise[label.item()]
mask_cord, class_noise = noise_generator._patch_noise_extend_to_img(noise, image_size=image.shape, patch_location=args.patch_location)
batch_noise.append(class_noise)
mask_cord_list.append(mask_cord)
# Update universal perturbation
model.eval()
for param in model.parameters():
param.requires_grad = False
batch_noise = torch.stack(batch_noise).to(device)
if args.attack_type == 'min-min':
perturb_img, eta = noise_generator.min_min_attack(images, labels, model, optimizer, criterion, random_noise=batch_noise)
elif args.attack_type == 'min-max':
perturb_img, eta = noise_generator.min_max_attack(images, labels, model, optimizer, criterion, random_noise=batch_noise)
else:
raise('Invalid attack')
class_noise_eta = collections.defaultdict(list)
for i in range(len(eta)):
x1, x2, y1, y2 = mask_cord_list[i]
delta = eta[i][:, x1: x2, y1: y2]
class_noise_eta[labels[i].item()].append(delta.detach().cpu())
for key in class_noise_eta:
delta = torch.stack(class_noise_eta[key]).mean(dim=0) - random_noise[key]
class_noise = random_noise[key]
class_noise += delta
random_noise[key] = torch.clamp(class_noise, -args.epsilon, args.epsilon)
# Eval termination conditions
loss_avg, error_rate = universal_perturbation_eval(noise_generator, random_noise, data_loader, model, eval_target=args.universal_train_target)
logger.info('Loss: {:.4f} Acc: {:.2f}%'.format(loss_avg, 100 - error_rate*100))
random_noise = random_noise.detach()
ENV['random_noise'] = random_noise
if args.attack_type == 'min-min':
condition = error_rate > args.universal_stop_error
elif args.attack_type == 'min-max':
condition = error_rate < args.universal_stop_error
return random_noise
def samplewise_perturbation_eval(random_noise, data_loader, model, eval_target='train_dataset', mask_cord_list=[]):
loss_meter = util.AverageMeter()
err_meter = util.AverageMeter()
# random_noise = random_noise.to(device)
model = model.to(device)
idx = 0
for i, (images, labels) in enumerate(data_loader[eval_target]):
images, labels = images.to(device, non_blocking=True), labels.to(device, non_blocking=True)
if random_noise is not None:
for i, (image, label) in enumerate(zip(images, labels)):
if not torch.is_tensor(random_noise):
sample_noise = torch.tensor(random_noise[idx]).to(device)
else:
sample_noise = random_noise[idx].to(device)
c, h, w = image.shape[0], image.shape[1], image.shape[2]
mask = np.zeros((c, h, w), np.float32)
x1, x2, y1, y2 = mask_cord_list[idx]
mask[:, x1: x2, y1: y2] = sample_noise.cpu().numpy()
sample_noise = torch.from_numpy(mask).to(device)
images[i] = images[i] + sample_noise
idx += 1
pred = model(images)
err = (pred.data.max(1)[1] != labels.data).float().sum()
loss = torch.nn.CrossEntropyLoss()(pred, labels)
loss_meter.update(loss.item(), len(labels))
err_meter.update(err / len(labels))
return loss_meter.avg, err_meter.avg
def sample_wise_perturbation(noise_generator, trainer, evaluator, model, criterion, optimizer, scheduler, random_noise, ENV):
datasets_generator = dataset.DatasetGenerator(train_batch_size=args.train_batch_size,
eval_batch_size=args.eval_batch_size,
train_data_type=args.train_data_type,
train_data_path=args.train_data_path,
test_data_type=args.test_data_type,
test_data_path=args.test_data_path,
num_of_workers=args.num_of_workers,
seed=args.seed, no_train_augments=True)
if args.train_data_type == 'ImageNetMini' and args.perturb_type == 'samplewise':
data_loader = datasets_generator._split_validation_set(0.2, train_shuffle=False, train_drop_last=False)
data_loader['train_dataset'] = data_loader['train_subset']
else:
data_loader = datasets_generator.getDataLoader(train_shuffle=False, train_drop_last=False)
mask_cord_list = []
idx = 0
for images, labels in data_loader['train_dataset']:
for i, (image, label) in enumerate(zip(images, labels)):
noise = random_noise[idx]
mask_cord, _ = noise_generator._patch_noise_extend_to_img(noise, image_size=image.shape, patch_location=args.patch_location)
mask_cord_list.append(mask_cord)
idx += 1
condition = True
train_idx = 0
data_iter = iter(data_loader['train_dataset'])
logger.info('=' * 20 + 'Searching Samplewise Perturbation' + '=' * 20)
while condition:
if args.attack_type == 'min-min' and not args.load_model:
# Train Batch for min-min noise
for j in tqdm(range(0, args.train_step), total=args.train_step):
try:
(images, labels) = next(data_iter)
except:
train_idx = 0
data_iter = iter(data_loader['train_dataset'])
(images, labels) = next(data_iter)
images, labels = images.to(device), labels.to(device)
# Add Sample-wise Noise to each sample
for i, (image, label) in enumerate(zip(images, labels)):
sample_noise = random_noise[train_idx]
c, h, w = image.shape[0], image.shape[1], image.shape[2]
mask = np.zeros((c, h, w), np.float32)
x1, x2, y1, y2 = mask_cord_list[train_idx]
if type(sample_noise) is np.ndarray:
mask[:, x1: x2, y1: y2] = sample_noise
else:
mask[:, x1: x2, y1: y2] = sample_noise.cpu().numpy()
# mask[:, x1: x2, y1: y2] = sample_noise.cpu().numpy()
sample_noise = torch.from_numpy(mask).to(device)
images[i] = images[i] + sample_noise
train_idx += 1
model.train()
for param in model.parameters():
param.requires_grad = True
trainer.train_batch(images, labels, model, optimizer)
# Search For Noise
idx = 0
for i, (images, labels) in tqdm(enumerate(data_loader['train_dataset']), total=len(data_loader['train_dataset'])):
images, labels, model = images.to(device), labels.to(device), model.to(device)
# Add Sample-wise Noise to each sample
batch_noise, batch_start_idx = [], idx
for i, (image, label) in enumerate(zip(images, labels)):
sample_noise = random_noise[idx]
c, h, w = image.shape[0], image.shape[1], image.shape[2]
mask = np.zeros((c, h, w), np.float32)
x1, x2, y1, y2 = mask_cord_list[idx]
if type(sample_noise) is np.ndarray:
mask[:, x1: x2, y1: y2] = sample_noise
else:
mask[:, x1: x2, y1: y2] = sample_noise.cpu().numpy()
# mask[:, x1: x2, y1: y2] = sample_noise.cpu().numpy()
sample_noise = torch.from_numpy(mask).to(device)
batch_noise.append(sample_noise)
idx += 1
# Update sample-wise perturbation
model.eval()
for param in model.parameters():
param.requires_grad = False
batch_noise = torch.stack(batch_noise).to(device)
if args.attack_type == 'min-min':
perturb_img, eta = noise_generator.min_min_attack(images, labels, model, optimizer, criterion, random_noise=batch_noise)
elif args.attack_type == 'min-max':
perturb_img, eta = noise_generator.min_max_attack(images, labels, model, optimizer, criterion, random_noise=batch_noise)
else:
raise('Invalid attack')
for i, delta in enumerate(eta):
x1, x2, y1, y2 = mask_cord_list[batch_start_idx+i]
delta = delta[:, x1: x2, y1: y2]
if torch.is_tensor(random_noise):
random_noise[batch_start_idx+i] = delta.detach().cpu().clone()
else:
random_noise[batch_start_idx+i] = delta.detach().cpu().numpy()
# Eval termination conditions
loss_avg, error_rate = samplewise_perturbation_eval(random_noise, data_loader, model, eval_target='train_dataset',
mask_cord_list=mask_cord_list)
logger.info('Loss: {:.4f} Acc: {:.2f}%'.format(loss_avg, 100 - error_rate*100))
if torch.is_tensor(random_noise):
random_noise = random_noise.detach()
ENV['random_noise'] = random_noise
if args.attack_type == 'min-min':
condition = error_rate > args.universal_stop_error
elif args.attack_type == 'min-max':
condition = error_rate < args.universal_stop_error
# Update Random Noise to shape
if torch.is_tensor(random_noise):
new_random_noise = []
for idx in range(len(random_noise)):
sample_noise = random_noise[idx]
c, h, w = image.shape[0], image.shape[1], image.shape[2]
mask = np.zeros((c, h, w), np.float32)
x1, x2, y1, y2 = mask_cord_list[idx]
mask[:, x1: x2, y1: y2] = sample_noise.cpu().numpy()
new_random_noise.append(torch.from_numpy(mask))
new_random_noise = torch.stack(new_random_noise)
return new_random_noise
else:
return random_noise
def main():
# Setup ENV
datasets_generator = dataset.DatasetGenerator(train_batch_size=args.train_batch_size,
eval_batch_size=args.eval_batch_size,
train_data_type=args.train_data_type,
train_data_path=args.train_data_path,
test_data_type=args.test_data_type,
test_data_path=args.test_data_path,
num_of_workers=args.num_of_workers,
seed=args.seed)
data_loader = datasets_generator.getDataLoader()
model = config.model().to(device)
logger.info("param size = %fMB", util.count_parameters_in_MB(model))
optimizer = config.optimizer(model.parameters())
scheduler = config.scheduler(optimizer)
criterion = config.criterion()
if args.perturb_type == 'samplewise':
train_target = 'train_dataset'
else:
if args.use_subset:
data_loader = datasets_generator._split_validation_set(train_portion=args.universal_train_portion,
train_shuffle=True, train_drop_last=True)
train_target = 'train_subset'
else:
data_loader = datasets_generator.getDataLoader(train_shuffle=True, train_drop_last=True)
train_target = 'train_dataset'
trainer = Trainer(criterion, data_loader, logger, config, target=train_target)
evaluator = Evaluator(data_loader, logger, config)
ENV = {'global_step': 0,
'best_acc': 0.0,
'curren_acc': 0.0,
'best_pgd_acc': 0.0,
'train_history': [],
'eval_history': [],
'pgd_eval_history': [],
'genotype_list': []}
if args.data_parallel:
model = torch.nn.DataParallel(model)
if args.load_model:
checkpoint = util.load_model(filename=checkpoint_path_file,
model=model,
optimizer=optimizer,
alpha_optimizer=None,
scheduler=scheduler)
ENV = checkpoint['ENV']
trainer.global_step = ENV['global_step']
logger.info("File %s loaded!" % (checkpoint_path_file))
noise_generator = toolbox.PerturbationTool(epsilon=args.epsilon,
num_steps=args.num_steps,
step_size=args.step_size)
if args.attack_type == 'random':
noise = noise_generator.random_noise(noise_shape=args.noise_shape)
torch.save(noise, os.path.join(args.exp_name, 'perturbation.pt'))
logger.info(noise)
logger.info(noise.shape)
logger.info('Noise saved at %s' % (os.path.join(args.exp_name, 'perturbation.pt')))
elif args.attack_type == 'min-min' or args.attack_type == 'min-max':
if args.attack_type == 'min-max':
# min-max noise need model to converge first
train(0, model, optimizer, scheduler, criterion, trainer, evaluator, ENV, data_loader)
if args.random_start:
random_noise = noise_generator.random_noise(noise_shape=args.noise_shape)
else:
random_noise = torch.zeros(*args.noise_shape)
if args.perturb_type == 'samplewise':
noise = sample_wise_perturbation(noise_generator, trainer, evaluator, model, criterion, optimizer, scheduler, random_noise, ENV)
elif args.perturb_type == 'classwise':
noise = universal_perturbation(noise_generator, trainer, evaluator, model, criterion, optimizer, scheduler, random_noise, ENV)
torch.save(noise, os.path.join(args.exp_name, 'perturbation.pt'))
logger.info(noise)
logger.info(noise.shape)
logger.info('Noise saved at %s' % (os.path.join(args.exp_name, 'perturbation.pt')))
else:
raise('Not implemented yet')
return
if __name__ == '__main__':
for arg in vars(args):
logger.info("%s: %s" % (arg, getattr(args, arg)))
start = time.time()
main()
end = time.time()
cost = (end - start) / 86400
payload = "Running Cost %.2f Days \n" % cost
logger.info(payload)
|
python
|
"""
"""
from jax import numpy as jnp
from jax import jit as jjit
@jjit
def _calc_weights(x, x_table):
n_table = x_table.size
lgt_interp = jnp.interp(x, x_table, jnp.arange(0, n_table))
it_lo = jnp.floor(lgt_interp).astype("i4")
it_hi = it_lo + 1
weight_hi = lgt_interp - it_lo
weight_lo = 1 - weight_hi
it_hi = jnp.where(it_hi > n_table - 1, n_table - 1, it_hi)
return (it_lo, weight_lo), (it_hi, weight_hi)
@jjit
def _calc_weighted_table(x, x_table, y_table):
(it_lo, weight_lo), (it_hi, weight_hi) = _calc_weights(x, x_table)
return weight_lo * y_table[it_lo] + weight_hi * y_table[it_hi]
@jjit
def _calc_2d_weighted_table(x, y, x_table, y_table, z_table):
(it_xlo, weight_xlo), (it_xhi, weight_xhi) = _calc_weights(x, x_table)
(it_ylo, weight_ylo), (it_yhi, weight_yhi) = _calc_weights(y, y_table)
z_xlo_ylo = z_table[it_xlo, it_ylo, :] * weight_xlo * weight_ylo
z_xlo_yhi = z_table[it_xlo, it_yhi, :] * weight_xlo * weight_yhi
z_xhi_ylo = z_table[it_xhi, it_ylo, :] * weight_xhi * weight_ylo
z_xhi_yhi = z_table[it_xhi, it_yhi, :] * weight_xhi * weight_yhi
return z_xlo_ylo + z_xlo_yhi + z_xhi_ylo * z_xhi_yhi
|
python
|
import os
DEBUG = True
SECRET_KEY = os.getenv("APP_SECRET_KEY")
MYSQL_USERNAME = os.getenv("MYSQL_USERNAME")
MYSQL_PASSWORD = os.getenv("MYSQL_PASSWORD")
MYSQL_PORT = 3306
MYSQL_DB = os.getenv("MYSQL_DB")
LOGGING_LEVEL = "DEBUG"
LOGGIN_FILE = "activity.log"
LOGGING_BACKUPS = 2
LOGGING_MAXBYTES = 1024
TIMEZONE = "America/Montreal"
STORE_SCREENSHOT_URI = "https://marketvault-bucket.s3.ca-central-1.amazonaws.com/screenshots/"
|
python
|
#
# PySNMP MIB module HP-ICF-OOBM-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HP-ICF-OOBM-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:34:51 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint")
hpSwitch, = mibBuilder.importSymbols("HP-ICF-OID", "hpSwitch")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
InetAddress, InetAddressType = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddress", "InetAddressType")
snmpTargetAddrEntry, = mibBuilder.importSymbols("SNMP-TARGET-MIB", "snmpTargetAddrEntry")
ObjectGroup, ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "ModuleCompliance", "NotificationGroup")
Gauge32, Counter32, Bits, NotificationType, ModuleIdentity, Integer32, TimeTicks, ObjectIdentity, iso, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, Unsigned32, IpAddress, Counter64 = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "Counter32", "Bits", "NotificationType", "ModuleIdentity", "Integer32", "TimeTicks", "ObjectIdentity", "iso", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Unsigned32", "IpAddress", "Counter64")
DisplayString, TruthValue, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TruthValue", "TextualConvention")
hpicfOobmMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58))
hpicfOobmMIB.setRevisions(('2010-03-26 00:00', '2009-02-13 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: hpicfOobmMIB.setRevisionsDescriptions(('Added oobm member tables', 'Initial Revision',))
if mibBuilder.loadTexts: hpicfOobmMIB.setLastUpdated('201003260000Z')
if mibBuilder.loadTexts: hpicfOobmMIB.setOrganization('HP Networking')
if mibBuilder.loadTexts: hpicfOobmMIB.setContactInfo('Hewlett-Packard Company 8000 Foothills Blvd. Roseville, CA 95747')
if mibBuilder.loadTexts: hpicfOobmMIB.setDescription('The MIB module is for representing Oobm entity')
hpicfOobmNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 0))
hpicfOobmObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1))
hpicfOobmConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 3))
class HpicfOobmServerIndex(TextualConvention, Integer32):
description = 'An enumerated value that indications the Server application type. Server application type is index for this table.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))
namedValues = NamedValues(("telnet", 1), ("ssh", 2), ("tftp", 3), ("http", 4), ("snmp", 5))
class HpicfOobmServerState(TextualConvention, Integer32):
description = "An enumerated value which provides an indication of the Application server's presence. Default value is oobm only. Application server can be run for oobm only, data only, or for both."
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3))
namedValues = NamedValues(("oobm", 1), ("data", 2), ("both", 3))
hpicfOobmScalars = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 1))
hpicfOobmStatus = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('enable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpicfOobmStatus.setStatus('current')
if mibBuilder.loadTexts: hpicfOobmStatus.setDescription('Global Oobm (Out Of Band Management) status. By default oobm is globally enabled. On the stackable device, when stacking is enabled, this enables oobm on all the member switches.')
hpicfOobmServers = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 2))
hpicfOobmServerTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 2, 1), )
if mibBuilder.loadTexts: hpicfOobmServerTable.setStatus('current')
if mibBuilder.loadTexts: hpicfOobmServerTable.setDescription('This table contains one row for every server application in the switch entity. On a stackable device, when stacking is enabled, the server entry is created on all the member switches.')
hpicfOobmServerEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 2, 1, 1), ).setIndexNames((0, "HP-ICF-OOBM-MIB", "hpicfOobmServerType"))
if mibBuilder.loadTexts: hpicfOobmServerEntry.setStatus('current')
if mibBuilder.loadTexts: hpicfOobmServerEntry.setDescription('Information about Server Application table.')
hpicfOobmServerType = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 2, 1, 1, 1), HpicfOobmServerIndex())
if mibBuilder.loadTexts: hpicfOobmServerType.setStatus('current')
if mibBuilder.loadTexts: hpicfOobmServerType.setDescription('The index that is used to access the switch server application table.')
hpicfOobmServerListenMode = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 2, 1, 1, 2), HpicfOobmServerState()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpicfOobmServerListenMode.setStatus('current')
if mibBuilder.loadTexts: hpicfOobmServerListenMode.setDescription('The current state of the server application. Default value is Oobm. Depending on the interface on which server application is running, incoming queries will be listened by the switch.')
hpicfOobmSnmpTargetAddrIsOobm = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 3))
hpicfSnmpTargetAddrIsOobmTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 3, 1), )
if mibBuilder.loadTexts: hpicfSnmpTargetAddrIsOobmTable.setStatus('current')
if mibBuilder.loadTexts: hpicfSnmpTargetAddrIsOobmTable.setDescription('Adds an HpicfSnmpTargetAddrIsOobmEntry to snmpTargetAddrTable.')
hpicfSnmpTargetAddrIsOobmEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 3, 1, 1), )
snmpTargetAddrEntry.registerAugmentions(("HP-ICF-OOBM-MIB", "hpicfSnmpTargetAddrIsOobmEntry"))
hpicfSnmpTargetAddrIsOobmEntry.setIndexNames(*snmpTargetAddrEntry.getIndexNames())
if mibBuilder.loadTexts: hpicfSnmpTargetAddrIsOobmEntry.setStatus('current')
if mibBuilder.loadTexts: hpicfSnmpTargetAddrIsOobmEntry.setDescription('Adds an HpicfSnmpTargetAddrIsOobmEntry to snmpTargetAddrTable.')
hpicfSnmpTargetAddrIsOobm = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 3, 1, 1, 1), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpicfSnmpTargetAddrIsOobm.setStatus('current')
if mibBuilder.loadTexts: hpicfSnmpTargetAddrIsOobm.setDescription('This object indicates if the target is reachable over OOBM (Out OF Band Management) interface or not. This mib object will be applicable only if there is a physical OOBM port on the device.')
hpicfOobmDefGateway = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 4))
hpicfOobmDefGatewayTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 4, 1), )
if mibBuilder.loadTexts: hpicfOobmDefGatewayTable.setStatus('current')
if mibBuilder.loadTexts: hpicfOobmDefGatewayTable.setDescription('This table contains one row for every default gateway configured for OOBM Interface.')
hpicfOobmDefGatewayEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 4, 1, 1), ).setIndexNames((0, "HP-ICF-OOBM-MIB", "hpicfOobmDefGatewayType"))
if mibBuilder.loadTexts: hpicfOobmDefGatewayEntry.setStatus('current')
if mibBuilder.loadTexts: hpicfOobmDefGatewayEntry.setDescription('Information about Default Gateway table.')
hpicfOobmDefGatewayType = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 4, 1, 1, 1), InetAddressType())
if mibBuilder.loadTexts: hpicfOobmDefGatewayType.setStatus('current')
if mibBuilder.loadTexts: hpicfOobmDefGatewayType.setDescription('Address type of default gateway configured for OOBM Interface.')
hpicfOobmDefGatewayAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 4, 1, 1, 2), InetAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpicfOobmDefGatewayAddr.setStatus('current')
if mibBuilder.loadTexts: hpicfOobmDefGatewayAddr.setDescription('The IP Address of the default gateway configured for OOBM interface.')
hpicfOobmStackMembers = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 5))
hpicfOobmMemberDefGatewayTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 5, 3), )
if mibBuilder.loadTexts: hpicfOobmMemberDefGatewayTable.setStatus('current')
if mibBuilder.loadTexts: hpicfOobmMemberDefGatewayTable.setDescription('This table contains one row for every default gateway configured for OOBM Interface and for each member of the stack.')
hpicfOobmMemberDefGatewayEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 5, 3, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "HP-ICF-OOBM-MIB", "hpicfOobmMemberDefGatewayType"))
if mibBuilder.loadTexts: hpicfOobmMemberDefGatewayEntry.setStatus('current')
if mibBuilder.loadTexts: hpicfOobmMemberDefGatewayEntry.setDescription('Information about Default Gateway table.')
hpicfOobmMemberDefGatewayType = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 5, 3, 1, 1), InetAddressType())
if mibBuilder.loadTexts: hpicfOobmMemberDefGatewayType.setStatus('current')
if mibBuilder.loadTexts: hpicfOobmMemberDefGatewayType.setDescription('Address type of default gateway configured for OOBM Interface.')
hpicfOobmMemberDefGatewayAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 5, 3, 1, 2), InetAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpicfOobmMemberDefGatewayAddr.setStatus('current')
if mibBuilder.loadTexts: hpicfOobmMemberDefGatewayAddr.setDescription('The IP Address of the default gateway configured for OOBM interface.')
hpicfOobmCompliance = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 3, 1))
hpicfOobmGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 3, 2))
hpicfOobmMibCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 3, 1, 1)).setObjects(("HP-ICF-OOBM-MIB", "hpicfOobmScalarsGroup"), ("HP-ICF-OOBM-MIB", "hpicfOobmServersGroup"), ("HP-ICF-OOBM-MIB", "hpicfSnmpTargetAddrIsOobmGroup"), ("HP-ICF-OOBM-MIB", "hpicfOobmDefGatewayGroup"), ("HP-ICF-OOBM-MIB", "hpicfOobmMemberGroup"), ("HP-ICF-OOBM-MIB", "hpicfOobmGroups"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfOobmMibCompliance = hpicfOobmMibCompliance.setStatus('current')
if mibBuilder.loadTexts: hpicfOobmMibCompliance.setDescription('The compliance statement for entries which implement the Oobm application servers MIB.')
hpicfOobmScalarsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 3, 2, 1)).setObjects(("HP-ICF-OOBM-MIB", "hpicfOobmStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfOobmScalarsGroup = hpicfOobmScalarsGroup.setStatus('current')
if mibBuilder.loadTexts: hpicfOobmScalarsGroup.setDescription('Basic Scalars required in Oobm MIB implementation.')
hpicfOobmServersGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 3, 2, 2)).setObjects(("HP-ICF-OOBM-MIB", "hpicfOobmServerListenMode"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfOobmServersGroup = hpicfOobmServersGroup.setStatus('current')
if mibBuilder.loadTexts: hpicfOobmServersGroup.setDescription('Oobm Server MIB parameters.')
hpicfSnmpTargetAddrIsOobmGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 3, 2, 3)).setObjects(("HP-ICF-OOBM-MIB", "hpicfSnmpTargetAddrIsOobm"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfSnmpTargetAddrIsOobmGroup = hpicfSnmpTargetAddrIsOobmGroup.setStatus('current')
if mibBuilder.loadTexts: hpicfSnmpTargetAddrIsOobmGroup.setDescription('A group of objects to add an HpicfSnmpTargetAddrIsOobmEntry to snmpTargetAddrTable.')
hpicfOobmDefGatewayGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 3, 2, 4)).setObjects(("HP-ICF-OOBM-MIB", "hpicfOobmDefGatewayAddr"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfOobmDefGatewayGroup = hpicfOobmDefGatewayGroup.setStatus('current')
if mibBuilder.loadTexts: hpicfOobmDefGatewayGroup.setDescription('OOBM Default Gateway MIB parameters')
hpicfOobmMemberGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 3, 2, 5)).setObjects(("HP-ICF-OOBM-MIB", "hpicfOobmMemberDefGatewayAddr"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfOobmMemberGroup = hpicfOobmMemberGroup.setStatus('current')
if mibBuilder.loadTexts: hpicfOobmMemberGroup.setDescription('OOBM stack member parameters')
mibBuilder.exportSymbols("HP-ICF-OOBM-MIB", HpicfOobmServerState=HpicfOobmServerState, hpicfOobmDefGatewayGroup=hpicfOobmDefGatewayGroup, hpicfOobmServerEntry=hpicfOobmServerEntry, hpicfOobmDefGateway=hpicfOobmDefGateway, hpicfOobmStatus=hpicfOobmStatus, hpicfOobmDefGatewayEntry=hpicfOobmDefGatewayEntry, hpicfOobmServerTable=hpicfOobmServerTable, hpicfOobmCompliance=hpicfOobmCompliance, hpicfOobmObjects=hpicfOobmObjects, hpicfOobmDefGatewayType=hpicfOobmDefGatewayType, hpicfOobmMemberDefGatewayAddr=hpicfOobmMemberDefGatewayAddr, hpicfOobmMemberGroup=hpicfOobmMemberGroup, hpicfOobmDefGatewayAddr=hpicfOobmDefGatewayAddr, hpicfOobmSnmpTargetAddrIsOobm=hpicfOobmSnmpTargetAddrIsOobm, hpicfOobmServerType=hpicfOobmServerType, hpicfOobmServers=hpicfOobmServers, hpicfSnmpTargetAddrIsOobmEntry=hpicfSnmpTargetAddrIsOobmEntry, hpicfOobmMemberDefGatewayEntry=hpicfOobmMemberDefGatewayEntry, hpicfSnmpTargetAddrIsOobm=hpicfSnmpTargetAddrIsOobm, hpicfOobmMemberDefGatewayType=hpicfOobmMemberDefGatewayType, hpicfOobmServerListenMode=hpicfOobmServerListenMode, PYSNMP_MODULE_ID=hpicfOobmMIB, hpicfOobmGroups=hpicfOobmGroups, hpicfOobmScalars=hpicfOobmScalars, hpicfOobmConformance=hpicfOobmConformance, HpicfOobmServerIndex=HpicfOobmServerIndex, hpicfSnmpTargetAddrIsOobmTable=hpicfSnmpTargetAddrIsOobmTable, hpicfOobmNotifications=hpicfOobmNotifications, hpicfSnmpTargetAddrIsOobmGroup=hpicfSnmpTargetAddrIsOobmGroup, hpicfOobmScalarsGroup=hpicfOobmScalarsGroup, hpicfOobmMemberDefGatewayTable=hpicfOobmMemberDefGatewayTable, hpicfOobmMIB=hpicfOobmMIB, hpicfOobmMibCompliance=hpicfOobmMibCompliance, hpicfOobmDefGatewayTable=hpicfOobmDefGatewayTable, hpicfOobmServersGroup=hpicfOobmServersGroup, hpicfOobmStackMembers=hpicfOobmStackMembers)
|
python
|
"""
MARL environment for google football
"""
import numpy as np
import gym
import gfootball.env.football_env as football_env
from gfootball.env import _process_representation_wrappers
from gfootball.env import _process_reward_wrappers
from gfootball.env import config
from gfootball.env import wrappers
class GoogleFootballEnv(object):
def __init__(self,
num_of_left_agents,
num_of_right_agents=0,
env_name="test_example_multiagent",
stacked=False,
representation='extracted',
rewards='scoring',
write_goal_dumps=False,
write_full_episode_dumps=False,
render=False,
write_video=False,
dump_frequency=1,
extra_players=None,
channel_dimensions=(96, 72),
other_config_options={}) -> None:
assert num_of_left_agents >= 0
assert num_of_right_agents >= 0
assert num_of_left_agents + num_of_right_agents != 0
# config the environment
scenario_config = config.Config({'level': env_name}).ScenarioConfig()
players = [('agent:left_players=%d,right_players=%d' %
(num_of_left_agents, num_of_right_agents))]
if extra_players is not None:
players.extend(extra_players)
config_values = {
'dump_full_episodes': write_full_episode_dumps,
'dump_scores': write_goal_dumps,
'players': players,
'level': env_name,
'tracesdir': "/tmp/gfootball_log",
'write_video': write_video,
}
config_values.update(other_config_options)
c = config.Config(config_values)
self._env = football_env.FootballEnv(c)
if dump_frequency > 1:
self._env = wrappers.PeriodicDumpWriter(self._env, dump_frequency,
render)
elif render:
self._env.render()
# _apply_output_wrappers 在只有一个agent时非要加 wrapper
self._env = _process_reward_wrappers(self._env, rewards)
self._env = _process_representation_wrappers(self._env, representation,
channel_dimensions)
if stacked:
self._env = wrappers.FrameStack(self._env, 4)
self._env = wrappers.GetStateWrapper(self._env)
self._action_space = gym.spaces.Discrete(
self._env.action_space.nvec[0])
self._observation_space = None if representation == "raw" else gym.spaces.Box(
low=self._env.observation_space.low[0],
high=self._env.observation_space.high[0],
dtype=self._env.observation_space.dtype)
self._num_left = num_of_left_agents
self._num_right = num_of_right_agents
self._share_observation_space = gym.spaces.Box(
low=np.concatenate([
self._observation_space.low
for i in range(self._num_left + self._num_right)
],
axis=-1),
high=np.concatenate([
self._observation_space.high
for i in range(self._num_left + self._num_right)
],
axis=-1),
dtype=self._observation_space.dtype)
@property
def action_space(self):
return [
self._action_space for i in range(self._num_left + self._num_right)
]
@property
def observation_space(self):
return [
self._observation_space
for i in range(self._num_left + self._num_right)
]
@property
def share_observation_space(self):
return [
self._share_observation_space
for i in range(self._num_left + self._num_right)
]
def seed(self, seed=None):
return self._env.seed(seed)
def reset(self):
return self._env.reset()
def step(self, actions):
return self._env.step(actions)
@property
def num_of_left_agents(self):
return self._num_left
@property
def num_of_right_agents(self):
return self._num_right
def random_step(self):
return self._env.step([
self._action_space.sample()
for i in range(self._num_left + self._num_right)
])
if __name__ == "__main__":
e = GoogleFootballEnv(num_of_left_agents=2,
num_of_right_agents=2,
env_name='5_vs_5',
representation="simple115v2")
print(e.share_observation_space)
print(e.action_space)
|
python
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""This module provides factory functions for creating authentication providers"""
from .sk_authentication_provider import SymmetricKeyAuthenticationProvider
from .sas_authentication_provider import SharedAccessSignatureAuthenticationProvider
from .iotedge_authentication_provider import IotEdgeAuthenticationProvider
def from_connection_string(connection_string):
"""Provides an AuthenticationProvider object that can be created simply with a connection string.
:param connection_string: The connecting string.
:return: a Symmetric Key AuthenticationProvider.
"""
return SymmetricKeyAuthenticationProvider.parse(connection_string)
def from_shared_access_signature(sas_token_str):
"""Provides an `AuthenticationProvider` object that can be created simply with a shared access signature.
:param sas_token_str: The shared access signature.
:return: Shared Access Signature AuthenticationProvider.
"""
return SharedAccessSignatureAuthenticationProvider.parse(sas_token_str)
def from_environment():
"""Provides an `AuthenticationProvider` object that can be used inside of an Azure IoT Edge module.
This method does not need any parameters because all of the information necessary to connect
to Azure IoT Edge comes from the operating system of the module container and also from the
IoTEdge service.
:return: iotedge AuthenticationProvider.
"""
return IotEdgeAuthenticationProvider()
|
python
|
# -*- coding: utf-8 -*-
"""
Bottle is a fast and simple micro-framework for small web applications. It
offers request dispatching (Routes) with url parameter support, templates,
a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and
template engines - all in a single file and with no dependencies other than the
Python Standard Library.
Homepage and documentation: http://bottlepy.org/
Copyright (c) 2011, Marcel Hellkamp.
License: MIT (see LICENSE.txt for details)
"""
from __future__ import with_statement
__author__ = 'Marcel Hellkamp'
__version__ = '0.9.dev'
__license__ = 'MIT'
import base64
import cgi
import email.utils
import functools
import hmac
import httplib
import itertools
import mimetypes
import os
import re
import subprocess
import sys
import tempfile
import thread
import threading
import time
import warnings
from Cookie import SimpleCookie
from tempfile import TemporaryFile
from traceback import format_exc, print_exc
from urllib import urlencode
from urlparse import urlunsplit, urljoin
from Queue import Empty
from multiprocessing import Process, Queue, Event
from multiprocessing import active_children
try: from collections import MutableMapping as DictMixin
except ImportError: # pragma: no cover
from UserDict import DictMixin
try: from urlparse import parse_qs
except ImportError: # pragma: no cover
from cgi import parse_qs
try: import cPickle as pickle
except ImportError: # pragma: no cover
import pickle
try: from json import dumps as json_dumps
except ImportError: # pragma: no cover
try: from simplejson import dumps as json_dumps
except ImportError: # pragma: no cover
try: from django.utils.simplejson import dumps as json_dumps
except ImportError: # pragma: no cover
json_dumps = None
NCTextIOWrapper = None
if sys.version_info >= (3,0,0): # pragma: no cover
# See Request.POST
from io import BytesIO
def touni(x, enc='utf8', err='strict'):
""" Convert anything to unicode """
return str(x, enc, err) if isinstance(x, bytes) else str(x)
if sys.version_info < (3,2,0):
from io import TextIOWrapper
class NCTextIOWrapper(TextIOWrapper):
''' Garbage collecting an io.TextIOWrapper(buffer) instance closes
the wrapped buffer. This subclass keeps it open. '''
def close(self): pass
else:
from StringIO import StringIO as BytesIO
bytes = str
def touni(x, enc='utf8', err='strict'):
""" Convert anything to unicode """
return x if isinstance(x, unicode) else unicode(str(x), enc, err)
def tob(data, enc='utf8'):
""" Convert anything to bytes """
return data.encode(enc) if isinstance(data, unicode) else bytes(data)
# Convert strings and unicode to native strings
if sys.version_info >= (3,0,0):
tonat = touni
else:
tonat = tob
tonat.__doc__ = """ Convert anything to native strings """
class classinstancemethod(object):
"""
Acts like a class method when called from a class, like an
instance method when called by an instance. The method should
take two arguments, 'self' and 'cls'; one of these will be None
depending on how the method was called.
"""
def __init__(self, func):
self.func = func
self.__doc__ = func.__doc__
def __get__(self, obj, type=None):
return _methodwrapper(self.func, obj=obj, type=type)
class _methodwrapper(object):
def __init__(self, func, obj, type):
self.func = func
self.obj = obj
self.type = type
def __call__(self, *args, **kw):
assert not kw.has_key('self') and not kw.has_key('cls'), (
"You cannot use 'self' or 'cls' arguments to a "
"classinstancemethod")
return self.func(*((self.obj, self.type) + args), **kw)
def __repr__(self):
if self.obj is None:
return ('<bound class method %s.%s>'
% (self.type.__name__, self.func.func_name))
else:
return ('<bound method %s.%s of %r>'
% (self.type.__name__, self.func.func_name, self.obj))
# Backward compatibility
def depr(message, critical=False):
if critical: raise DeprecationWarning(message)
warnings.warn(message, DeprecationWarning, stacklevel=3)
# Small helpers
def makelist(data):
if isinstance(data, (tuple, list, set, dict)): return list(data)
elif data: return [data]
else: return []
class DictProperty(object):
''' Property that maps to a key in a local dict-like attribute. '''
def __init__(self, attr, key=None, read_only=False):
self.attr, self.key, self.read_only = attr, key, read_only
def __call__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter, self.key = func, self.key or func.__name__
return self
def __get__(self, obj, cls):
if not obj: return self
key, storage = self.key, getattr(obj, self.attr)
if key not in storage: storage[key] = self.getter(obj)
return storage[key]
def __set__(self, obj, value):
if self.read_only: raise AttributeError("Read-Only property.")
getattr(obj, self.attr)[self.key] = value
def __delete__(self, obj):
if self.read_only: raise AttributeError("Read-Only property.")
del getattr(obj, self.attr)[self.key]
def cached_property(func):
''' A property that, if accessed, replaces itself with the computed
value. Subsequent accesses won't call the getter again. '''
return DictProperty('__dict__')(func)
class lazy_attribute(object): # Does not need configuration -> lower-case name
''' A property that caches itself to the class object. '''
def __init__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter = func
def __get__(self, obj, cls):
value = self.getter(cls)
setattr(cls, self.__name__, value)
return value
###############################################################################
# Exceptions and Events ########################################################
###############################################################################
class BottleException(Exception):
""" A base class for exceptions used by bottle. """
pass
class HTTPResponse(BottleException):
""" Used to break execution and immediately finish the response """
def __init__(self, output='', status=200, header=None):
super(BottleException, self).__init__("HTTP Response %d" % status)
self.status = int(status)
self.output = output
self.headers = HeaderDict(header) if header else None
def apply(self, response):
if self.headers:
for key, value in self.headers.iterallitems():
response.headers[key] = value
response.status = self.status
class HTTPError(HTTPResponse):
""" Used to generate an error page """
def __init__(self, code=500, output='Unknown Error', exception=None,
traceback=None, header=None):
super(HTTPError, self).__init__(output, code, header)
self.exception = exception
self.traceback = traceback
def __repr__(self):
return template(ERROR_PAGE_TEMPLATE, e=self)
###############################################################################
# Routing ######################################################################
###############################################################################
class RouteError(BottleException):
""" This is a base class for all routing related exceptions """
class RouteReset(BottleException):
""" If raised by a plugin or request handler, the route is reset and all
plugins are re-applied. """
class RouteSyntaxError(RouteError):
""" The route parser found something not supported by this router """
class RouteBuildError(RouteError):
""" The route could not been built """
class Router(object):
''' A Router is an ordered collection of route->target pairs. It is used to
efficiently match WSGI requests against a number of routes and return
the first target that satisfies the request. The target may be anything,
usually a string, ID or callable object. A route consists of a path-rule
and a HTTP method.
The path-rule is either a static path (e.g. `/contact`) or a dynamic
path that contains wildcards (e.g. `/wiki/:page`). By default, wildcards
consume characters up to the next slash (`/`). To change that, you may
add a regular expression pattern (e.g. `/wiki/:page#[a-z]+#`).
For performance reasons, static routes (rules without wildcards) are
checked first. Dynamic routes are searched in order. Try to avoid
ambiguous or overlapping rules.
The HTTP method string matches only on equality, with two exceptions:
* ´GET´ routes also match ´HEAD´ requests if there is no appropriate
´HEAD´ route installed.
* ´ANY´ routes do match if there is no other suitable route installed.
An optional ``name`` parameter is used by :meth:`build` to identify
routes.
'''
default = '[^/]+'
@lazy_attribute
def syntax(cls):
return re.compile(r'(?<!\\):([a-zA-Z_][a-zA-Z_0-9]*)?(?:#(.*?)#)?')
def __init__(self):
self.routes = {} # A {rule: {method: target}} mapping
self.rules = [] # An ordered list of rules
self.named = {} # A name->(rule, build_info) mapping
self.static = {} # Cache for static routes: {path: {method: target}}
self.dynamic = [] # Cache for dynamic routes. See _compile()
def add(self, rule, method, target, name=None, static=False):
''' Add a new route or replace the target for an existing route. '''
if static:
depr("Use a backslash to escape ':' in routes.") # 0.9
rule = rule.replace(':','\\:')
if rule in self.routes:
self.routes[rule][method.upper()] = target
else:
self.routes[rule] = {method.upper(): target}
self.rules.append(rule)
if self.static or self.dynamic: # Clear precompiler cache.
self.static, self.dynamic = {}, {}
if name:
self.named[name] = (rule, None)
def build(self, _name, *anon, **args):
''' Return a string that matches a named route. Use keyword arguments
to fill out named wildcards. Remaining arguments are appended as a
query string. Raises RouteBuildError or KeyError.'''
if _name not in self.named:
raise RouteBuildError("No route with that name.", _name)
rule, pairs = self.named[_name]
if not pairs:
token = self.syntax.split(rule)
parts = [p.replace('\\:',':') for p in token[::3]]
names = token[1::3]
if len(parts) > len(names): names.append(None)
pairs = zip(parts, names)
self.named[_name] = (rule, pairs)
try:
anon = list(anon)
url = [s if k is None
else s+str(args.pop(k)) if k else s+str(anon.pop())
for s, k in pairs]
except IndexError:
msg = "Not enough arguments to fill out anonymous wildcards."
raise RouteBuildError(msg)
except KeyError, e:
raise RouteBuildError(*e.args)
if args: url += ['?', urlencode(args)]
return ''.join(url)
def match(self, environ):
''' Return a (target, url_agrs) tuple or raise HTTPError(404/405). '''
targets, urlargs = self._match_path(environ)
if not targets:
raise HTTPError(404, "Not found: " + repr(environ['PATH_INFO']))
method = environ['REQUEST_METHOD'].upper()
if method in targets:
return targets[method], urlargs
if method == 'HEAD' and 'GET' in targets:
return targets['GET'], urlargs
if 'ANY' in targets:
return targets['ANY'], urlargs
allowed = [verb for verb in targets if verb != 'ANY']
if 'GET' in allowed and 'HEAD' not in allowed:
allowed.append('HEAD')
raise HTTPError(405, "Method not allowed.",
header=[('Allow',",".join(allowed))])
def _match_path(self, environ):
''' Optimized PATH_INFO matcher. '''
path = environ['PATH_INFO'] or '/'
# Assume we are in a warm state. Search compiled rules first.
match = self.static.get(path)
if match: return match, {}
for combined, rules in self.dynamic:
match = combined.match(path)
if not match: continue
gpat, match = rules[match.lastindex - 1]
return match, gpat.match(path).groupdict() if gpat else {}
# Lazy-check if we are really in a warm state. If yes, stop here.
if self.static or self.dynamic or not self.routes: return None, {}
# Cold state: We have not compiled any rules yet. Do so and try again.
if not environ.get('wsgi.run_once'):
self._compile()
return self._match_path(environ)
# For run_once (CGI) environments, don't compile. Just check one by one.
epath = path.replace(':','\\:') # Turn path into its own static rule.
match = self.routes.get(epath) # This returns static rule only.
if match: return match, {}
for rule in self.rules:
#: Skip static routes to reduce re.compile() calls.
if rule.count(':') < rule.count('\\:'): continue
match = self._compile_pattern(rule).match(path)
if match: return self.routes[rule], match.groupdict()
return None, {}
def _compile(self):
''' Prepare static and dynamic search structures. '''
self.static = {}
self.dynamic = []
def fpat_sub(m):
return m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:'
for rule in self.rules:
target = self.routes[rule]
if not self.syntax.search(rule):
self.static[rule.replace('\\:',':')] = target
continue
gpat = self._compile_pattern(rule)
fpat = re.sub(r'(\\*)(\(\?P<[^>]*>|\((?!\?))', fpat_sub, gpat.pattern)
gpat = gpat if gpat.groupindex else None
try:
combined = '%s|(%s)' % (self.dynamic[-1][0].pattern, fpat)
self.dynamic[-1] = (re.compile(combined), self.dynamic[-1][1])
self.dynamic[-1][1].append((gpat, target))
except (AssertionError, IndexError), e: # AssertionError: Too many groups
self.dynamic.append((re.compile('(^%s$)'%fpat),
[(gpat, target)]))
except re.error, e:
raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, e))
def _compile_pattern(self, rule):
''' Return a regular expression with named groups for each wildcard. '''
out = ''
for i, part in enumerate(self.syntax.split(rule)):
if i%3 == 0: out += re.escape(part.replace('\\:',':'))
elif i%3 == 1: out += '(?P<%s>' % part if part else '(?:'
else: out += '%s)' % (part or '[^/]+')
return re.compile('^%s$'%out)
###############################################################################
# Application Object ###########################################################
###############################################################################
class Bottle(object):
""" WSGI application """
def __init__(self, catchall=True, autojson=True, config=None):
""" Create a new bottle instance.
You usually don't do that. Use `bottle.app.push()` instead.
"""
self.routes = [] # List of installed routes including metadata.
self.router = Router() # Maps requests to self.route indices.
self.ccache = {} # Cache for callbacks with plugins applied.
self.plugins = [] # List of installed plugins.
self.mounts = {}
self.error_handler = {}
#: If true, most exceptions are catched and returned as :exc:`HTTPError`
self.catchall = catchall
self.config = config or {}
self.serve = True
# Default plugins
self.hooks = self.install(HooksPlugin())
self.typefilter = self.install(TypeFilterPlugin())
if autojson:
self.install(JSONPlugin())
def optimize(self, *a, **ka):
depr("Bottle.optimize() is obsolete.")
def mount(self, app, prefix, **options):
''' Mount an application to a specific URL prefix. The prefix is added
to SCIPT_PATH and removed from PATH_INFO before the sub-application
is called.
:param app: an instance of :class:`Bottle`.
:param prefix: path prefix used as a mount-point.
All other parameters are passed to the underlying :meth:`route` call.
'''
if not isinstance(app, Bottle):
raise TypeError('Only Bottle instances are supported for now.')
prefix = '/'.join(filter(None, prefix.split('/')))
if not prefix:
raise TypeError('Empty prefix. Perhaps you want a merge()?')
for other in self.mounts:
if other.startswith(prefix):
raise TypeError('Conflict with existing mount: %s' % other)
path_depth = prefix.count('/') + 1
options.setdefault('method', 'ANY')
self.mounts[prefix] = app
@self.route('/%s/:#.*#' % prefix, **options)
def mountpoint():
request.path_shift(path_depth)
return app.handle(request.environ)
def add_filter(self, ftype, func):
depr("Filters are deprecated. Replace any filters with plugins.") #0.9
self.typefilter.add(ftype, func)
def install(self, plugin):
''' Add a plugin to the list of plugins and prepare it for beeing
applied to all routes of this application. A plugin may be a simple
decorator or an object that implements the :class:`Plugin` API.
'''
if hasattr(plugin, 'setup'): plugin.setup(self)
if not callable(plugin) and not hasattr(plugin, 'apply'):
raise TypeError("Plugins must be callable or implement .apply()")
self.plugins.append(plugin)
self.reset()
return plugin
def uninstall(self, plugin):
''' Uninstall plugins. Pass an instance to remove a specific plugin.
Pass a type object to remove all plugins that match that type.
Subclasses are not removed. Pass a string to remove all plugins with
a matching ``name`` attribute. Pass ``True`` to remove all plugins.
The list of affected plugins is returned. '''
removed, remove = [], plugin
for i, plugin in list(enumerate(self.plugins))[::-1]:
if remove is True or remove is plugin or remove is type(plugin) \
or getattr(plugin, 'name', True) == remove:
removed.append(plugin)
del self.plugins[i]
if hasattr(plugin, 'close'): plugin.close()
if removed: self.reset()
return removed
def reset(self, id=None):
''' Reset all routes (re-apply plugins) and clear all caches. If an ID
is given, only that specific route is affected. '''
if id is None: self.ccache.clear()
else: self.ccache.pop(id, None)
def close(self):
''' Close the application and all installed plugins. '''
for plugin in self.plugins:
if hasattr(plugin, 'close'): plugin.close()
self.stopped = True
def match(self, environ):
""" Search for a matching route and return a (callback, urlargs) tuple.
The first element is the associated route callback with plugins
applied. The second value is a dictionary with parameters extracted
from the URL. The :class:`Router` raises :exc:`HTTPError` (404/405)
on a non-match."""
handle, args = self.router.match(environ)
environ['route.handle'] = handle # TODO move to router?
environ['route.url_args'] = args
try:
return self.ccache[handle], args
except KeyError:
config = self.routes[handle]
callback = self.ccache[handle] = self._build_callback(config)
return callback, args
def _build_callback(self, config):
''' Apply plugins to a route and return a new callable. '''
wrapped = config['callback']
plugins = self.plugins + config['apply']
skip = config['skip']
try:
for plugin in reversed(plugins):
if True in skip: break
if plugin in skip or type(plugin) in skip: continue
if getattr(plugin, 'name', True) in skip: continue
if hasattr(plugin, 'apply'):
wrapped = plugin.apply(wrapped, config)
else:
wrapped = plugin(wrapped)
if not wrapped: break
functools.update_wrapper(wrapped, config['callback'])
return wrapped
except RouteReset: # A plugin may have changed the config dict inplace.
return self._build_callback(config) # Apply all plugins again.
def get_url(self, routename, **kargs):
""" Return a string that matches a named route """
scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'
location = self.router.build(routename, **kargs).lstrip('/')
return urljoin(urljoin('/', scriptname), location)
def route(self, path=None, method='GET', callback=None, name=None,
apply=None, skip=None, **config):
""" A decorator to bind a function to a request URL. Example::
@app.route('/hello/:name')
def hello(name):
return 'Hello %s' % name
The ``:name`` part is a wildcard. See :class:`Router` for syntax
details.
:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.
:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
methods to listen to. (default: `GET`)
:param callback: An optional shortcut to avoid the decorator
syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
:param name: The name for this route. (default: None)
:param apply: A decorator or plugin or a list of plugins. These are
applied to the route callback in addition to installed plugins.
:param skip: A list of plugins, plugin classes or names. Matching
plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
"""
if callable(path): path, callback = None, path
plugins = makelist(apply)
skiplist = makelist(skip)
if 'decorate' in config:
depr("The 'decorate' parameter was renamed to 'apply'") # 0.9
plugins += makelist(config.pop('decorate'))
if 'template' in config: # TODO Make plugin
depr("The 'template' parameter is no longer used. Add the view() "\
"decorator to the 'apply' parameter instead.") # 0.9
tpl, tplo = config.pop('template'), config.pop('template_opts', {})
plugins.insert(0, view(tpl, **tplo))
if config.pop('no_hooks', False):
depr("The no_hooks parameter is no longer used. Add 'hooks' to the"\
"list of skipped plugins instead.") # 0.9
skiplist.append('hooks')
static = config.get('static', False) # depr 0.9
def decorator(callback):
for rule in makelist(path) or yieldroutes(callback):
for verb in makelist(method):
verb = verb.upper()
cfg = dict(rule=rule, method=verb, callback=callback,
name=name, app=self, config=config,
apply=plugins, skip=skiplist)
self.routes.append(cfg)
cfg['id'] = self.routes.index(cfg)
self.router.add(rule, verb, cfg['id'], name=name, static=static)
return callback
return decorator(callback) if callback else decorator
def get(self, path=None, method='GET', **options):
""" Equals :meth:`route`. """
return self.route(path, method, **options)
def post(self, path=None, method='POST', **options):
""" Equals :meth:`route` with a ``POST`` method parameter. """
return self.route(path, method, **options)
def put(self, path=None, method='PUT', **options):
""" Equals :meth:`route` with a ``PUT`` method parameter. """
return self.route(path, method, **options)
def delete(self, path=None, method='DELETE', **options):
""" Equals :meth:`route` with a ``DELETE`` method parameter. """
return self.route(path, method, **options)
def error(self, code=500):
""" Decorator: Register an output handler for a HTTP error code"""
def wrapper(handler):
self.error_handler[int(code)] = handler
return handler
return wrapper
def hook(self, name):
""" Return a decorator that attaches a callback to a hook. """
def wrapper(func):
self.hooks.add(name, func)
return func
return wrapper
def add_hook(self, name, func):
depr("Call Bottle.hooks.add() instead.") #0.9
self.hooks.add(name, func)
def remove_hook(self, name, func):
depr("Call Bottle.hooks.remove() instead.") #0.9
self.hooks.remove(name, func)
def handle(self, environ, method='GET'):
""" Execute the first matching route callback and return the result.
:exc:`HTTPResponse` exceptions are catched and returned. If :attr:`Bottle.catchall` is true, other exceptions are catched as
well and returned as :exc:`HTTPError` instances (500).
"""
if isinstance(environ, str):
depr("Bottle.handle() takes an environ dictionary.") # v0.9
environ = {'PATH_INFO': environ, 'REQUEST_METHOD': method.upper()}
if not self.serve:
return HTTPError(503, "Server stopped")
try:
callback, args = self.match(environ)
return callback(**args)
except HTTPResponse, r:
return r
except RouteReset: # Route reset requested by the callback or a plugin.
del self.ccache[handle]
return self.handle(environ) # Try again.
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception, e:
if not self.catchall: raise
return HTTPError(500, "Internal Server Error", e, format_exc(10))
def _cast(self, out, request, response, peek=None):
""" Try to convert the parameter into something WSGI compatible and set
correct HTTP headers when possible.
Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,
iterable of strings and iterable of unicodes
"""
# Empty output is done here
if not out:
response.headers['Content-Length'] = 0
return []
# Join lists of byte or unicode strings. Mixed lists are NOT supported
if isinstance(out, (tuple, list))\
and isinstance(out[0], (bytes, unicode)):
out = out[0][0:0].join(out) # b'abc'[0:0] -> b''
# Encode unicode strings
if isinstance(out, unicode):
out = out.encode(response.charset)
# Byte Strings are just returned
if isinstance(out, bytes):
response.headers['Content-Length'] = str(len(out))
return [out]
# HTTPError or HTTPException (recursive, because they may wrap anything)
if isinstance(out, HTTPError):
out.apply(response)
return self._cast(self.error_handler.get(out.status, repr)(out), request, response)
if isinstance(out, HTTPResponse):
out.apply(response)
return self._cast(out.output, request, response)
# File-like objects.
if hasattr(out, 'read'):
if 'wsgi.file_wrapper' in request.environ:
return request.environ['wsgi.file_wrapper'](out)
elif hasattr(out, 'close') or not hasattr(out, '__iter__'):
return WSGIFileWrapper(out)
# Handle Iterables. We peek into them to detect their inner type.
try:
out = iter(out)
first = out.next()
while not first:
first = out.next()
except StopIteration:
return self._cast('', request, response)
except HTTPResponse, e:
first = e
except Exception, e:
first = HTTPError(500, 'Unhandled exception', e, format_exc(10))
if isinstance(e, (KeyboardInterrupt, SystemExit, MemoryError))\
or not self.catchall:
raise
# These are the inner types allowed in iterator or generator objects.
if isinstance(first, HTTPResponse):
return self._cast(first, request, response)
if isinstance(first, bytes):
return itertools.chain([first], out)
if isinstance(first, unicode):
return itertools.imap(lambda x: x.encode(response.charset),
itertools.chain([first], out))
return self._cast(HTTPError(500, 'Unsupported response type: %s'\
% type(first)), request, response)
def wsgi(self, environ, start_response):
""" The bottle WSGI-interface. """
try:
environ['bottle.app'] = self
request.bind(environ)
response.bind()
out = self.handle(environ)
out = self._cast(out, request, response)
# rfc2616 section 4.3
if response.status in (100, 101, 204, 304) or request.method == 'HEAD':
if hasattr(out, 'close'): out.close()
out = []
status = '%d %s' % (response.status, HTTP_CODES[response.status])
start_response(status, response.headerlist)
return out
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception, e:
if not self.catchall: raise
err = '<h1>Critical error while processing request: %s</h1>' \
% environ.get('PATH_INFO', '/')
if DEBUG:
err += '<h2>Error:</h2>\n<pre>%s</pre>\n' % repr(e)
err += '<h2>Traceback:</h2>\n<pre>%s</pre>\n' % format_exc(10)
environ['wsgi.errors'].write(err) #TODO: wsgi.error should not get html
start_response('500 INTERNAL SERVER ERROR', [('Content-Type', 'text/html')])
return [tob(err)]
def __call__(self, environ, start_response):
return self.wsgi(environ, start_response)
###############################################################################
# HTTP and WSGI Tools ##########################################################
###############################################################################
class Request(threading.local, DictMixin):
""" Represents a single HTTP request using thread-local attributes.
The Request object wraps a WSGI environment and can be used as such.
"""
def __init__(self, environ=None):
""" Create a new Request instance.
You usually don't do this but use the global `bottle.request`
instance instead.
"""
self.bind(environ or {},)
def bind(self, environ):
""" Bind a new WSGI environment.
This is done automatically for the global `bottle.request`
instance on every request.
"""
self.environ = environ
# These attributes are used anyway, so it is ok to compute them here
self.path = '/' + environ.get('PATH_INFO', '/').lstrip('/')
self.method = environ.get('REQUEST_METHOD', 'GET').upper()
@property
def _environ(self):
depr("Request._environ renamed to Request.environ")
return self.environ
def copy(self):
''' Returns a copy of self '''
return Request(self.environ.copy())
def path_shift(self, shift=1):
''' Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:param shift: The number of path fragments to shift. May be negative
to change the shift direction. (default: 1)
'''
script_name = self.environ.get('SCRIPT_NAME','/')
self['SCRIPT_NAME'], self.path = path_shift(script_name, self.path, shift)
self['PATH_INFO'] = self.path
def __getitem__(self, key): return self.environ[key]
def __delitem__(self, key): self[key] = ""; del(self.environ[key])
def __iter__(self): return iter(self.environ)
def __len__(self): return len(self.environ)
def keys(self): return self.environ.keys()
def __setitem__(self, key, value):
""" Shortcut for Request.environ.__setitem__ """
self.environ[key] = value
todelete = []
if key in ('PATH_INFO','REQUEST_METHOD'):
self.bind(self.environ)
elif key == 'wsgi.input': todelete = ('body','forms','files','params')
elif key == 'QUERY_STRING': todelete = ('get','params')
elif key.startswith('HTTP_'): todelete = ('headers', 'cookies')
for key in todelete:
if 'bottle.' + key in self.environ:
del self.environ['bottle.' + key]
@property
def query_string(self):
""" The part of the URL following the '?'. """
return self.environ.get('QUERY_STRING', '')
@property
def fullpath(self):
""" Request path including SCRIPT_NAME (if present). """
spath = self.environ.get('SCRIPT_NAME','').rstrip('/') + '/'
rpath = self.path.lstrip('/')
return urljoin(spath, rpath)
@property
def url(self):
""" Full URL as requested by the client (computed).
This value is constructed out of different environment variables
and includes scheme, host, port, scriptname, path and query string.
Special characters are NOT escaped.
"""
scheme = self.environ.get('wsgi.url_scheme', 'http')
host = self.environ.get('HTTP_X_FORWARDED_HOST')
host = host or self.environ.get('HTTP_HOST', None)
if not host:
host = self.environ.get('SERVER_NAME')
port = self.environ.get('SERVER_PORT', '80')
if (scheme, port) not in (('https','443'), ('http','80')):
host += ':' + port
parts = (scheme, host, self.fullpath, self.query_string, '')
return urlunsplit(parts)
@property
def content_length(self):
""" Content-Length header as an integer, -1 if not specified """
return int(self.environ.get('CONTENT_LENGTH', '') or -1)
@property
def header(self):
depr("The Request.header property was renamed to Request.headers")
return self.headers
@DictProperty('environ', 'bottle.headers', read_only=True)
def headers(self):
''' Request HTTP Headers stored in a :class:`HeaderDict`. '''
return WSGIHeaderDict(self.environ)
@DictProperty('environ', 'bottle.get', read_only=True)
def GET(self):
""" The QUERY_STRING parsed into an instance of :class:`MultiDict`. """
data = parse_qs(self.query_string, keep_blank_values=True)
get = self.environ['bottle.get'] = MultiDict()
for key, values in data.iteritems():
for value in values:
get[key] = value
return get
@DictProperty('environ', 'bottle.post', read_only=True)
def POST(self):
""" The combined values from :attr:`forms` and :attr:`files`. Values are
either strings (form values) or instances of
:class:`cgi.FieldStorage` (file uploads).
"""
post = MultiDict()
safe_env = {'QUERY_STRING':''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ: safe_env[key] = self.environ[key]
if NCTextIOWrapper:
fb = NCTextIOWrapper(self.body, encoding='ISO-8859-1', newline='\n')
else:
fb = self.body
data = cgi.FieldStorage(fp=fb, environ=safe_env, keep_blank_values=True)
for item in data.list or []:
post[item.name] = item if item.filename else item.value
return post
@DictProperty('environ', 'bottle.forms', read_only=True)
def forms(self):
""" POST form values parsed into an instance of :class:`MultiDict`.
This property contains form values parsed from an `url-encoded`
or `multipart/form-data` encoded POST request bidy. The values are
native strings.
"""
forms = MultiDict()
for name, item in self.POST.iterallitems():
if not hasattr(item, 'filename'):
forms[name] = item
return forms
@DictProperty('environ', 'bottle.files', read_only=True)
def files(self):
""" File uploads parsed into an instance of :class:`MultiDict`.
This property contains file uploads parsed from an
`multipart/form-data` encoded POST request body. The values are
instances of :class:`cgi.FieldStorage`.
"""
files = MultiDict()
for name, item in self.POST.iterallitems():
if hasattr(item, 'filename'):
files[name] = item
return files
@DictProperty('environ', 'bottle.params', read_only=True)
def params(self):
""" A combined :class:`MultiDict` with values from :attr:`forms` and
:attr:`GET`. File-uploads are not included. """
params = MultiDict(self.GET)
for key, value in self.forms.iterallitems():
params[key] = value
return params
@DictProperty('environ', 'bottle.body', read_only=True)
def _body(self):
""" The HTTP request body as a seekable file-like object.
This property returns a copy of the `wsgi.input` stream and should
be used instead of `environ['wsgi.input']`.
"""
maxread = max(0, self.content_length)
stream = self.environ['wsgi.input']
body = BytesIO() if maxread < MEMFILE_MAX else TemporaryFile(mode='w+b')
while maxread > 0:
part = stream.read(min(maxread, MEMFILE_MAX))
if not part: break
body.write(part)
maxread -= len(part)
self.environ['wsgi.input'] = body
body.seek(0)
return body
@property
def body(self):
self._body.seek(0)
return self._body
@property
def auth(self): #TODO: Tests and docs. Add support for digest. namedtuple?
""" HTTP authorization data as a (user, passwd) tuple. (experimental)
This implementation currently only supports basic auth and returns
None on errors.
"""
return parse_auth(self.headers.get('Authorization',''))
@DictProperty('environ', 'bottle.cookies', read_only=True)
def COOKIES(self):
""" Cookies parsed into a dictionary. Signed cookies are NOT decoded
automatically. See :meth:`get_cookie` for details.
"""
raw_dict = SimpleCookie(self.headers.get('Cookie',''))
cookies = {}
for cookie in raw_dict.itervalues():
cookies[cookie.key] = cookie.value
return cookies
def get_cookie(self, key, secret=None):
""" Return the content of a cookie. To read a `Signed Cookies`, use the
same `secret` as used to create the cookie (see
:meth:`Response.set_cookie`). If anything goes wrong, None is
returned.
"""
value = self.COOKIES.get(key)
if secret and value:
dec = cookie_decode(value, secret) # (key, value) tuple or None
return dec[1] if dec and dec[0] == key else None
return value or None
@property
def is_ajax(self):
''' True if the request was generated using XMLHttpRequest '''
#TODO: write tests
return self.header.get('X-Requested-With') == 'XMLHttpRequest'
class Response(threading.local):
""" Represents a single HTTP response using thread-local attributes.
"""
def __init__(self):
self.bind()
def bind(self):
""" Resets the Response object to its factory defaults. """
self._COOKIES = None
self.status = 200
self.headers = HeaderDict()
self.content_type = 'text/html; charset=UTF-8'
@property
def header(self):
depr("Response.header renamed to Response.headers")
return self.headers
def copy(self):
''' Returns a copy of self. '''
copy = Response()
copy.status = self.status
copy.headers = self.headers.copy()
copy.content_type = self.content_type
return copy
def wsgiheader(self):
''' Returns a wsgi conform list of header/value pairs. '''
for c in self.COOKIES.values():
if c.OutputString() not in self.headers.getall('Set-Cookie'):
self.headers.append('Set-Cookie', c.OutputString())
# rfc2616 section 10.2.3, 10.3.5
if self.status in (204, 304) and 'content-type' in self.headers:
del self.headers['content-type']
if self.status == 304:
for h in ('allow', 'content-encoding', 'content-language',
'content-length', 'content-md5', 'content-range',
'content-type', 'last-modified'): # + c-location, expires?
if h in self.headers:
del self.headers[h]
return list(self.headers.iterallitems())
headerlist = property(wsgiheader)
@property
def charset(self):
""" Return the charset specified in the content-type header.
This defaults to `UTF-8`.
"""
if 'charset=' in self.content_type:
return self.content_type.split('charset=')[-1].split(';')[0].strip()
return 'UTF-8'
@property
def COOKIES(self):
""" A dict-like SimpleCookie instance. Use :meth:`set_cookie` instead. """
if not self._COOKIES:
self._COOKIES = SimpleCookie()
return self._COOKIES
def set_cookie(self, key, value, secret=None, **kargs):
''' Add a cookie or overwrite an old one. If the `secret` parameter is
set, create a `Signed Cookie` (described below).
:param key: the name of the cookie.
:param value: the value of the cookie.
:param secret: required for signed cookies. (default: None)
:param max_age: maximum age in seconds. (default: None)
:param expires: a datetime object or UNIX timestamp. (defaut: None)
:param domain: the domain that is allowed to read the cookie.
(default: current domain)
:param path: limits the cookie to a given path (default: /)
If neither `expires` nor `max_age` are set (default), the cookie
lasts only as long as the browser is not closed.
Signed cookies may store any pickle-able object and are
cryptographically signed to prevent manipulation. Keep in mind that
cookies are limited to 4kb in most browsers.
Warning: Signed cookies are not encrypted (the client can still see
the content) and not copy-protected (the client can restore an old
cookie). The main intention is to make pickling and unpickling
save, not to store secret information at client side.
'''
if secret:
value = touni(cookie_encode((key, value), secret))
elif not isinstance(value, basestring):
raise TypeError('Secret missing for non-string Cookie.')
self.COOKIES[key] = value
for k, v in kargs.iteritems():
self.COOKIES[key][k.replace('_', '-')] = v
def delete_cookie(self, key, **kwargs):
''' Delete a cookie. Be sure to use the same `domain` and `path`
parameters as used to create the cookie. '''
kwargs['max_age'] = -1
kwargs['expires'] = 0
self.set_cookie(key, '', **kwargs)
def get_content_type(self):
""" Current 'Content-Type' header. """
return self.headers['Content-Type']
def set_content_type(self, value):
self.headers['Content-Type'] = value
content_type = property(get_content_type, set_content_type, None,
get_content_type.__doc__)
###############################################################################
# Plugins ######################################################################
###############################################################################
class JSONPlugin(object):
name = 'json'
def __init__(self, json_dumps=json_dumps):
self.json_dumps = json_dumps
def apply(self, callback, context):
dumps = self.json_dumps
if not dumps: return callback
def wrapper(*a, **ka):
rv = callback(*a, **ka)
if isinstance(rv, dict):
response.content_type = 'application/json'
return dumps(rv)
return rv
return wrapper
class HooksPlugin(object):
name = 'hooks'
def __init__(self):
self.hooks = {'before_request': [], 'after_request': []}
self.app = None
def _empty(self):
return not (self.hooks['before_request'] or self.hooks['after_request'])
def setup(self, app):
self.app = app
def add(self, name, func):
''' Attach a callback to a hook. '''
if name not in self.hooks:
raise ValueError("Unknown hook name %s" % name)
was_empty = self._empty()
self.hooks[name].append(func)
if self.app and was_empty and not self._empty(): self.app.reset()
def remove(self, name, func):
''' Remove a callback from a hook. '''
if name not in self.hooks:
raise ValueError("Unknown hook name %s" % name)
was_empty = self._empty()
self.hooks[name].remove(func)
if self.app and not was_empty and self._empty(): self.app.reset()
def apply(self, callback, context):
if self._empty(): return callback
before_request = self.hooks['before_request']
after_request = self.hooks['after_request']
def wrapper(*a, **ka):
for hook in before_request: hook()
rv = callback(*a, **ka)
for hook in after_request[::-1]: hook()
return rv
return wrapper
class TypeFilterPlugin(object):
def __init__(self):
self.filter = []
self.app = None
def setup(self, app):
self.app = app
def add(self, ftype, func):
if not self.filter and app: self.app.reset()
if not isinstance(ftype, type):
raise TypeError("Expected type object, got %s" % type(ftype))
self.filter = [(t, f) for (t, f) in self.filter if t != ftype]
self.filter.append((ftype, func))
def apply(self, callback, context):
filter = self.filter
if not filter: return callback
def wrapper(*a, **ka):
rv = callback(*a, **ka)
for testtype, filterfunc in filter:
if isinstance(rv, testtype):
rv = filterfunc(rv)
return rv
return wrapper
###############################################################################
# Common Utilities #############################################################
###############################################################################
class MultiDict(DictMixin):
""" A dict that remembers old values for each key """
# collections.MutableMapping would be better for Python >= 2.6
def __init__(self, *a, **k):
self.dict = dict()
for k, v in dict(*a, **k).iteritems():
self[k] = v
def __len__(self): return len(self.dict)
def __iter__(self): return iter(self.dict)
def __contains__(self, key): return key in self.dict
def __delitem__(self, key): del self.dict[key]
def keys(self): return self.dict.keys()
def __getitem__(self, key): return self.get(key, KeyError, -1)
def __setitem__(self, key, value): self.append(key, value)
def append(self, key, value): self.dict.setdefault(key, []).append(value)
def replace(self, key, value): self.dict[key] = [value]
def getall(self, key): return self.dict.get(key) or []
def get(self, key, default=None, index=-1):
if key not in self.dict and default != KeyError:
return [default][index]
return self.dict[key][index]
def iterallitems(self):
for key, values in self.dict.iteritems():
for value in values:
yield key, value
class HeaderDict(MultiDict):
""" Same as :class:`MultiDict`, but title()s the keys and overwrites. """
def __contains__(self, key):
return MultiDict.__contains__(self, self.httpkey(key))
def __getitem__(self, key):
return MultiDict.__getitem__(self, self.httpkey(key))
def __delitem__(self, key):
return MultiDict.__delitem__(self, self.httpkey(key))
def __setitem__(self, key, value): self.replace(key, value)
def get(self, key, default=None, index=-1):
return MultiDict.get(self, self.httpkey(key), default, index)
def append(self, key, value):
return MultiDict.append(self, self.httpkey(key), str(value))
def replace(self, key, value):
return MultiDict.replace(self, self.httpkey(key), str(value))
def getall(self, key): return MultiDict.getall(self, self.httpkey(key))
def httpkey(self, key): return str(key).replace('_','-').title()
class WSGIHeaderDict(DictMixin):
''' This dict-like class wraps a WSGI environ dict and provides convenient
access to HTTP_* fields. Keys and values are native strings
(2.x bytes or 3.x unicode) and keys are case-insensitive. If the WSGI
environment contains non-native string values, these are de- or encoded
using a lossless 'latin1' character set.
The API will remain stable even on changes to the relevant PEPs.
Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one
that uses non-native strings.)
'''
def __init__(self, environ):
self.environ = environ
def _ekey(self, key): # Translate header field name to environ key.
return 'HTTP_' + key.replace('-','_').upper()
def raw(self, key, default=None):
''' Return the header value as is (may be bytes or unicode). '''
return self.environ.get(self._ekey(key), default)
def __getitem__(self, key):
return tonat(self.environ[self._ekey(key)], 'latin1')
def __setitem__(self, key, value):
raise TypeError("%s is read-only." % self.__class__)
def __delitem__(self, key):
raise TypeError("%s is read-only." % self.__class__)
def __iter__(self):
for key in self.environ:
if key[:5] == 'HTTP_':
yield key[5:].replace('_', '-').title()
def keys(self): return list(self)
def __len__(self): return len(list(self))
def __contains__(self, key): return self._ekey(key) in self.environ
class AppStack(list):
""" A stack-like list. Calling it returns the head of the stack. """
def __call__(self):
""" Return the current default application. """
return self[-1]
def push(self, value=None):
""" Add a new :class:`Bottle` instance to the stack """
if not isinstance(value, Bottle):
value = Bottle()
self.append(value)
return value
class WSGIFileWrapper(object):
def __init__(self, fp, buffer_size=1024*64):
self.fp, self.buffer_size = fp, buffer_size
for attr in ('fileno', 'close', 'read', 'readlines'):
if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr))
def __iter__(self):
read, buff = self.fp.read, self.buffer_size
while True:
part = read(buff)
if not part: break
yield part
###############################################################################
# Application Helper ###########################################################
###############################################################################
def dict2json(d):
depr('JSONPlugin is the preferred way to return JSON.') #0.9
response.content_type = 'application/json'
return json_dumps(d)
def abort(code=500, text='Unknown Error: Application stopped.'):
""" Aborts execution and causes a HTTP error. """
raise HTTPError(code, text)
def redirect(url, code=303):
""" Aborts execution and causes a 303 redirect """
scriptname = request.environ.get('SCRIPT_NAME', '').rstrip('/') + '/'
location = urljoin(request.url, urljoin(scriptname, url))
raise HTTPResponse("", status=code, header=dict(Location=location))
def send_file(*a, **k): #BC 0.6.4
""" Raises the output of static_file(). (deprecated) """
raise static_file(*a, **k)
def static_file(filename, root, guessmime=True, mimetype=None, download=False):
""" Opens a file in a safe way and returns a HTTPError object with status
code 200, 305, 401 or 404. Sets Content-Type, Content-Length and
Last-Modified header. Obeys If-Modified-Since header and HEAD requests.
"""
root = os.path.abspath(root) + os.sep
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
header = dict()
if not filename.startswith(root):
return HTTPError(403, "Access denied.")
if not os.path.exists(filename) or not os.path.isfile(filename):
return HTTPError(404, "File does not exist.")
if not os.access(filename, os.R_OK):
return HTTPError(403, "You do not have permission to access this file.")
if not mimetype and guessmime:
header['Content-Type'] = mimetypes.guess_type(filename)[0]
else:
header['Content-Type'] = mimetype if mimetype else 'text/plain'
if download == True:
download = os.path.basename(filename)
if download:
header['Content-Disposition'] = 'attachment; filename="%s"' % download
stats = os.stat(filename)
lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime))
header['Last-Modified'] = lm
ims = request.environ.get('HTTP_IF_MODIFIED_SINCE')
if ims:
ims = ims.split(";")[0].strip() # IE sends "<date>; length=146"
ims = parse_date(ims)
if ims is not None and ims >= int(stats.st_mtime):
header['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
return HTTPResponse(status=304, header=header)
header['Content-Length'] = stats.st_size
if request.method == 'HEAD':
return HTTPResponse('', header=header)
else:
return HTTPResponse(open(filename, 'rb'), header=header)
###############################################################################
# HTTP Utilities and MISC (TODO) ###############################################
###############################################################################
def debug(mode=True):
""" Change the debug level.
There is only one debug level supported at the moment."""
global DEBUG
DEBUG = bool(mode)
def parse_date(ims):
""" Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
try:
ts = email.utils.parsedate_tz(ims)
return time.mktime(ts[:8] + (0,)) - (ts[9] or 0) - time.timezone
except (TypeError, ValueError, IndexError, OverflowError):
return None
def parse_auth(header):
""" Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None"""
try:
method, data = header.split(None, 1)
if method.lower() == 'basic':
name, pwd = base64.b64decode(data).split(':', 1)
return name, pwd
except (KeyError, ValueError, TypeError):
return None
def _lscmp(a, b):
''' Compares two strings in a cryptographically save way:
Runtime is not affected by a common prefix. '''
return not sum(0 if x==y else 1 for x, y in zip(a, b)) and len(a) == len(b)
def cookie_encode(data, key):
''' Encode and sign a pickle-able object. Return a (byte) string '''
msg = base64.b64encode(pickle.dumps(data, -1))
sig = base64.b64encode(hmac.new(key, msg).digest())
return tob('!') + sig + tob('?') + msg
def cookie_decode(data, key):
''' Verify and decode an encoded string. Return an object or None.'''
data = tob(data)
if cookie_is_encoded(data):
sig, msg = data.split(tob('?'), 1)
if _lscmp(sig[1:], base64.b64encode(hmac.new(key, msg).digest())):
return pickle.loads(base64.b64decode(msg))
return None
def cookie_is_encoded(data):
''' Return True if the argument looks like a encoded cookie.'''
return bool(data.startswith(tob('!')) and tob('?') in data)
def yieldroutes(func):
""" Return a generator for routes that match the signature (name, args)
of the func parameter. This may yield more than one route if the function
takes optional keyword arguments. The output is best described by example::
a() -> '/a'
b(x, y) -> '/b/:x/:y'
c(x, y=5) -> '/c/:x' and '/c/:x/:y'
d(x=5, y=6) -> '/d' and '/d/:x' and '/d/:x/:y'
"""
import inspect # Expensive module. Only import if necessary.
path = '/' + func.__name__.replace('__','/').lstrip('/')
spec = inspect.getargspec(func)
argc = len(spec[0]) - len(spec[3] or [])
path += ('/:%s' * argc) % tuple(spec[0][:argc])
yield path
for arg in spec[0][argc:]:
path += '/:%s' % arg
yield path
def path_shift(script_name, path_info, shift=1):
''' Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:return: The modified paths.
:param script_name: The SCRIPT_NAME path.
:param script_name: The PATH_INFO path.
:param shift: The number of path fragments to shift. May be negative to
change the shift direction. (default: 1)
'''
if shift == 0: return script_name, path_info
pathlist = path_info.strip('/').split('/')
scriptlist = script_name.strip('/').split('/')
if pathlist and pathlist[0] == '': pathlist = []
if scriptlist and scriptlist[0] == '': scriptlist = []
if shift > 0 and shift <= len(pathlist):
moved = pathlist[:shift]
scriptlist = scriptlist + moved
pathlist = pathlist[shift:]
elif shift < 0 and shift >= -len(scriptlist):
moved = scriptlist[shift:]
pathlist = moved + pathlist
scriptlist = scriptlist[:shift]
else:
empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO'
raise AssertionError("Cannot shift. Nothing left from %s" % empty)
new_script_name = '/' + '/'.join(scriptlist)
new_path_info = '/' + '/'.join(pathlist)
if path_info.endswith('/') and pathlist: new_path_info += '/'
return new_script_name, new_path_info
# Decorators
#TODO: Replace default_app() with app()
def validate(**vkargs):
"""
Validates and manipulates keyword arguments by user defined callables.
Handles ValueError and missing arguments by raising HTTPError(403).
"""
def decorator(func):
def wrapper(**kargs):
for key, value in vkargs.iteritems():
if key not in kargs:
abort(403, 'Missing parameter: %s' % key)
try:
kargs[key] = value(kargs[key])
except ValueError:
abort(403, 'Wrong parameter format for: %s' % key)
return func(**kargs)
return wrapper
return decorator
def auth_basic(check, realm="private", text="Access denied"):
''' Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. '''
def decorator(func):
def wrapper(*a, **ka):
user, password = request.auth or (None, None)
if user is None or not check(user, password):
response.headers['WWW-Authenticate'] = 'Basic realm="%s"' % realm
return HTTPError(401, text)
return func(*a, **ka)
return wrapper
return decorator
def make_default_app_wrapper(name):
''' Return a callable that relays calls to the current default app. '''
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
for name in '''route get post put delete error mount
hook install uninstall'''.split():
globals()[name] = make_default_app_wrapper(name)
url = make_default_app_wrapper('get_url')
del name
def default():
depr("The default() decorator is deprecated. Use @error(404) instead.")
return error(404)
###############################################################################
# Server Adapter ###############################################################
###############################################################################
class ServerAdapter(object):
quiet = False
def __init__(self, host='127.0.0.1', port=8080, **config):
self.options = config
self.host = host
self.port = int(port)
def run(self, handler): # pragma: no cover
pass
def __repr__(self):
args = ', '.join(['%s=%s'%(k,repr(v)) for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
class CGIServer(ServerAdapter):
quiet = True
def run(self, handler): # pragma: no cover
from wsgiref.handlers import CGIHandler
CGIHandler().run(handler) # Just ignore host and port here
class FlupFCGIServer(ServerAdapter):
def run(self, handler): # pragma: no cover
import flup.server.fcgi
kwargs = {'bindAddress':(self.host, self.port)}
kwargs.update(self.options) # allow to override bindAddress and others
flup.server.fcgi.WSGIServer(handler, **kwargs).run()
class WSGIRefServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from wsgiref.simple_server import make_server, WSGIRequestHandler
if self.quiet:
class QuietHandler(WSGIRequestHandler):
def log_request(*args, **kw): pass
self.options['handler_class'] = QuietHandler
srv = make_server(self.host, self.port, handler, **self.options)
srv.serve_forever()
class CherryPyServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from cherrypy import wsgiserver
server = wsgiserver.CherryPyWSGIServer((self.host, self.port), handler)
server.start()
class PasteServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from paste import httpserver
if not self.quiet:
from paste.translogger import TransLogger
handler = TransLogger(handler)
httpserver.serve(handler, host=self.host, port=str(self.port),
**self.options)
class MeinheldServer(ServerAdapter):
def run(self, handler):
from meinheld import server
server.listen((self.host, self.port))
server.run(handler)
class FapwsServer(ServerAdapter):
""" Extremely fast webserver using libev. See http://www.fapws.org/ """
def run(self, handler): # pragma: no cover
import fapws._evwsgi as evwsgi
from fapws import base, config
port = self.port
if float(config.SERVER_IDENT[-2:]) > 0.4:
# fapws3 silently changed its API in 0.5
port = str(port)
evwsgi.start(self.host, port)
# fapws3 never releases the GIL. Complain upstream. I tried. No luck.
if 'BOTTLE_CHILD' in os.environ and not self.quiet:
print "WARNING: Auto-reloading does not work with Fapws3."
print " (Fapws3 breaks python thread support)"
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return handler(environ, start_response)
evwsgi.wsgi_cb(('', app))
evwsgi.run()
class TornadoServer(ServerAdapter):
""" The super hyped asynchronous server by facebook. Untested. """
def run(self, handler): # pragma: no cover
import tornado.wsgi
import tornado.httpserver
import tornado.ioloop
container = tornado.wsgi.WSGIContainer(handler)
server = tornado.httpserver.HTTPServer(container)
server.listen(port=self.port)
tornado.ioloop.IOLoop.instance().start()
class AppEngineServer(ServerAdapter):
""" Adapter for Google App Engine. """
quiet = True
def run(self, handler):
from google.appengine.ext.webapp import util
# A main() function in the handler script enables 'App Caching'.
# Lets makes sure it is there. This _really_ improves performance.
module = sys.modules.get('__main__')
if module and not hasattr(module, 'main'):
module.main = lambda: util.run_wsgi_app(handler)
util.run_wsgi_app(handler)
class TwistedServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
reactor.run()
class DieselServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(handler, port=self.port)
app.run()
class GeventServer(ServerAdapter):
""" Untested. Options:
* `monkey` (default: True) fixes the stdlib to use greenthreads.
* `fast` (default: False) uses libevent's http server, but has some
issues: No streaming, no pipelining, no SSL.
"""
def run(self, handler):
from gevent import wsgi as wsgi_fast, pywsgi as wsgi, monkey
if self.options.get('monkey', True):
monkey.patch_all()
if self.options.get('fast', False):
wsgi = wsgi_fast
wsgi.WSGIServer((self.host, self.port), handler).serve_forever()
class GunicornServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from gunicorn.arbiter import Arbiter
from gunicorn.config import Config
handler.cfg = Config({'bind': "%s:%d" % (self.host, self.port), 'workers': 4})
arbiter = Arbiter(handler)
arbiter.run()
class EventletServer(ServerAdapter):
""" Untested """
def run(self, handler):
from eventlet import wsgi, listen
wsgi.server(listen((self.host, self.port)), handler)
class RocketServer(ServerAdapter):
""" Untested. As requested in issue 63
https://github.com/defnull/bottle/issues/#issue/63 """
def run(self, handler):
from rocket import Rocket
server = Rocket((self.host, self.port), 'wsgi', { 'wsgi_app' : handler })
server.start()
class BjoernServer(ServerAdapter):
""" Screamingly fast server written in C: https://github.com/jonashaag/bjoern """
def run(self, handler):
from bjoern import run
run(handler, self.host, self.port)
class Mongrel2Server(ServerAdapter):
def run(self, app_handler):
from mongrel2 import handler
from mongrel2_wsgi import wsgi_server
print "Starting 0MQ server."
wsgi_server(app_handler, handler.Connection("279a117-5be1-4da7-9c4e-702b3412baba", "tcp://127.0.0.1:9997", "tcp://127.0.0.1:9996"))
class AutoServer(ServerAdapter):
""" Untested. """
adapters = [PasteServer, CherryPyServer, TwistedServer, WSGIRefServer]
def run(self, handler):
for sa in self.adapters:
try:
return sa(self.host, self.port, **self.options).run(handler)
except ImportError:
pass
server_names = {
'cgi': CGIServer,
'flup': FlupFCGIServer,
'wsgiref': WSGIRefServer,
'cherrypy': CherryPyServer,
'paste': PasteServer,
'fapws3': FapwsServer,
'tornado': TornadoServer,
'gae': AppEngineServer,
'twisted': TwistedServer,
'diesel': DieselServer,
'meinheld': MeinheldServer,
'gunicorn': GunicornServer,
'eventlet': EventletServer,
'gevent': GeventServer,
'rocket': RocketServer,
'bjoern' : BjoernServer,
'mongrel2': Mongrel2Server,
'auto': AutoServer,
}
########################################################
# Advanced reloading
########################################################
POLL_INTERVAL = 1 # check for changes every n seconds.
SPINUP_TIME = 10 # application must start within this time.
class ReloadingMonitor(object):
instances = []
global_extra_files = []
global_file_callbacks = []
def __init__(self, tx=None, rx=None, poll_interval=POLL_INTERVAL):
self.module_mtimes = {}
self.keep_running = True
self.poll_interval = poll_interval
self.extra_files = list(self.global_extra_files)
self.instances.append(self)
self.file_callbacks = list(self.global_file_callbacks)
self.state = 'RUN'
self.tx = tx
self.rx = rx
def periodic_reload(self):
while not self.rx.is_set():
if not self.check_reload():
self.state = 'STANDBY'
# inform code change
self.tx.put({'pid':os.getpid(), 'status':'changed'})
self.rx.wait(SPINUP_TIME)
if self.rx.is_set():
return
self.state = 'RUN'
self.module_mtimes = {}
time.sleep(self.poll_interval)
def check_reload(self):
filenames = list(self.extra_files)
for file_callback in self.file_callbacks:
try:
filenames.extend(file_callback())
except:
print >> sys.stderr, "Error calling reloader callback %r:" % file_callback
traceback.print_exc()
for module in sys.modules.values():
try:
filename = module.__file__
except (AttributeError, ImportError), exc:
continue
if filename is not None:
filenames.append(filename)
for filename in filenames:
try:
stat = os.stat(filename)
if stat:
mtime = stat.st_mtime
else:
mtime = 0
except (OSError, IOError):
continue
if filename.endswith('.pyc') and os.path.exists(filename[:-1]):
mtime = max(os.stat(filename[:-1]).st_mtime, mtime)
elif filename.endswith('$py.class') and \
os.path.exists(filename[:-9] + '.py'):
mtime = max(os.stat(filename[:-9] + '.py').st_mtime, mtime)
if not self.module_mtimes.has_key(filename):
self.module_mtimes[filename] = mtime
elif self.module_mtimes[filename] < mtime:
print >> sys.stderr, (
"%s changed; reloading..." % filename)
return False
return True
def watch_file(self, cls, filename):
"""Watch the named file for changes"""
filename = os.path.abspath(filename)
if self is None:
for instance in cls.instances:
instance.watch_file(filename)
cls.global_extra_files.append(filename)
else:
self.extra_files.append(filename)
watch_file = classinstancemethod(watch_file)
def add_file_callback(self, cls, callback):
"""Add a callback -- a function that takes no parameters -- that will
return a list of filenames to watch for changes."""
if self is None:
for instance in cls.instances:
instance.add_file_callback(callback)
cls.global_file_callbacks.append(callback)
else:
self.file_callbacks.append(callback)
add_file_callback = classinstancemethod(add_file_callback)
def _reloader_new_serve(server, app, interval, tx, rx):
try:
tx.put({'pid':os.getpid(), 'status':'loaded'})
def go():
server.run(app)
t = threading.Thread(target=go)
t.setDaemon(True)
t.start()
monitor = ReloadingMonitor(tx=tx, rx=rx, poll_interval=interval)
monitor.periodic_reload()
except KeyboardInterrupt:
pass
###############################################################################
# Application Control ##########################################################
###############################################################################
def _load(target, **vars):
""" Fetch something from a module. The exact behaviour depends on the the
target string:
If the target is a valid python import path (e.g. `package.module`),
the rightmost part is returned as a module object.
If the target contains a colon (e.g. `package.module:var`) the module
variable specified after the colon is returned.
If the part after the colon contains any non-alphanumeric characters
(e.g. `package.module:func(var)`) the result of the expression
is returned. The expression has access to keyword arguments supplied
to this function.
Example::
>>> _load('bottle')
<module 'bottle' from 'bottle.py'>
>>> _load('bottle:Bottle')
<class 'bottle.Bottle'>
>>> _load('bottle:cookie_encode(v, secret)', v='foo', secret='bar')
'!F+hN4dQxaDJ4QxxaZ+Z3jw==?gAJVA2Zvb3EBLg=='
"""
module, target = target.split(":", 1) if ':' in target else (target, None)
if module not in sys.modules:
__import__(module)
if not target:
return sys.modules[module]
if target.isalnum():
return getattr(sys.modules[module], target)
package_name = module.split('.')[0]
vars[package_name] = sys.modules[package_name]
return eval('%s.%s' % (module, target), vars)
def load_app(target):
""" Load a bottle application based on a target string and return the
application object.
If the target is an import path (e.g. package.module), the application
stack is used to isolate the routes defined in that module.
If the target contains a colon (e.g. package.module:myapp) the
module variable specified after the colon is returned instead.
"""
tmp = app.push() # Create a new "default application"
rv = _load(target) # Import the target module
app.remove(tmp) # Remove the temporary added default application
return rv if isinstance(rv, Bottle) else tmp
def run(app=None, server='wsgiref', host='127.0.0.1', port=8080,
interval=1, reloader=False, quiet=False, **kargs):
""" Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`)
:param server: Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`)
:param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1)
:param port: Server port to bind to. Values below 1024 require root
privileges. (default: 8080)
:param reloader: Start auto-reloading server? (default: False)
:param interval: Auto-reloader interval in seconds (default: 1)
:param quiet: Suppress output to stdout and stderr? (default: False)
:param options: Options passed to the server adapter.
"""
app = app or default_app()
if isinstance(app, basestring):
app = load_app(app)
if isinstance(server, basestring):
server = server_names.get(server)
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise RuntimeError("Server must be a subclass of ServerAdapter")
server.quiet = server.quiet or quiet
if not server.quiet and not os.environ.get('BOTTLE_CHILD'):
print "Bottle server starting up (using %s)..." % repr(server)
print "Listening on http://%s:%d/" % (server.host, server.port)
print "Use Ctrl-C to quit."
print
try:
if reloader: # old reloading !!!
interval = min(interval, 1)
if os.environ.get('BOTTLE_CHILD'):
_reloader_child(server, app, interval)
else:
_reloader_observer(server, app, interval)
elif False:
print "reloader !!!"
interval = min(interval, 1)
# tx, rx from the subprocess' perspective.
tx = Queue()
def spinup():
rx = Event()
worker = Process(target=_reloader_new_serve, args=(server, app, interval, tx, rx))
worker.rx = rx
worker.start()
return worker
spinup()
while True:
try:
msg = tx.get(True, 1)
sys.stderr.write("%r\n" % msg)
if msg['status'] == 'changed':
spinup()
elif msg['status'] == 'loaded':
for worker in active_children():
if worker.ident != msg['pid']:
worker.rx.set()
except Empty:
if not active_children():
return
else:
server.run(app)
except KeyboardInterrupt:
pass
if not server.quiet and not os.environ.get('BOTTLE_CHILD'):
print "Shutting down..."
class FileCheckerThread(threading.Thread):
''' Thread that periodically checks for changed module files. '''
def __init__(self, lockfile, interval):
threading.Thread.__init__(self)
self.lockfile, self.interval = lockfile, interval
#1: lockfile to old; 2: lockfile missing
#3: module file changed; 5: external exit
self.status = 0
def run(self):
exists = os.path.exists
mtime = lambda path: os.stat(path).st_mtime
files = dict()
for module in sys.modules.values():
path = getattr(module, '__file__', '')
if path[-4:] in ('.pyo', '.pyc'): path = path[:-1]
if path and exists(path): files[path] = mtime(path)
while not self.status:
for path, lmtime in files.iteritems():
if not exists(path) or mtime(path) > lmtime:
self.status = 3
if not exists(self.lockfile):
self.status = 2
elif mtime(self.lockfile) < time.time() - self.interval - 5:
self.status = 1
if not self.status:
time.sleep(self.interval)
if self.status != 5:
thread.interrupt_main()
def _reloader_child(server, app, interval):
''' Start the server and check for modified files in a background thread.
As soon as an update is detected, KeyboardInterrupt is thrown in
the main thread to exit the server loop. The process exists with status
code 3 to request a reload by the observer process. If the lockfile
is not modified in 2*interval second or missing, we assume that the
observer process died and exit with status code 1 or 2.
'''
lockfile = os.environ.get('BOTTLE_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
try:
bgcheck.start()
server.run(app)
except KeyboardInterrupt:
pass
bgcheck.status, status = 5, bgcheck.status
bgcheck.join() # bgcheck.status == 5 --> silent exit
if status: sys.exit(status)
def _reloader_observer(server, app, interval):
''' Start a child process with identical commandline arguments and restart
it as long as it exists with status code 3. Also create a lockfile and
touch it (update mtime) every interval seconds.
'''
fd, lockfile = tempfile.mkstemp(prefix='bottle-reloader.', suffix='.lock')
os.close(fd) # We only need this file to exist. We never write to it
try:
while os.path.exists(lockfile):
args = [sys.executable] + sys.argv
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
environ['BOTTLE_LOCKFILE'] = lockfile
p = subprocess.Popen(args, env=environ)
while p.poll() is None: # Busy wait...
os.utime(lockfile, None) # I am alive!
time.sleep(interval)
if p.poll() != 3:
if os.path.exists(lockfile): os.unlink(lockfile)
sys.exit(p.poll())
elif not server.quiet:
print "Reloading server..."
except KeyboardInterrupt:
pass
if os.path.exists(lockfile): os.unlink(lockfile)
###############################################################################
# Template Adapters ############################################################
###############################################################################
class TemplateError(HTTPError):
def __init__(self, message):
HTTPError.__init__(self, 500, message)
class BaseTemplate(object):
""" Base class and minimal API for template adapters """
extentions = ['tpl','html','thtml','stpl']
settings = {} #used in prepare()
defaults = {} #used in render()
def __init__(self, source=None, name=None, lookup=[], encoding='utf8', **settings):
""" Create a new template.
If the source parameter (str or buffer) is missing, the name argument
is used to guess a template filename. Subclasses can assume that
self.source and/or self.filename are set. Both are strings.
The lookup, encoding and settings parameters are stored as instance
variables.
The lookup parameter stores a list containing directory paths.
The encoding parameter should be used to decode byte strings or files.
The settings parameter contains a dict for engine-specific settings.
"""
self.name = name
self.source = source.read() if hasattr(source, 'read') else source
self.filename = source.filename if hasattr(source, 'filename') else None
self.lookup = map(os.path.abspath, lookup)
self.encoding = encoding
self.settings = self.settings.copy() # Copy from class variable
self.settings.update(settings) # Apply
if not self.source and self.name:
self.filename = self.search(self.name, self.lookup)
if not self.filename:
raise TemplateError('Template %s not found.' % repr(name))
if not self.source and not self.filename:
raise TemplateError('No template specified.')
self.prepare(**self.settings)
@classmethod
def search(cls, name, lookup=[]):
""" Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. """
if os.path.isfile(name): return name
for spath in lookup:
fname = os.path.join(spath, name)
if os.path.isfile(fname):
return fname
for ext in cls.extentions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext)
@classmethod
def global_config(cls, key, *args):
''' This reads or sets the global settings stored in class.settings. '''
if args:
cls.settings[key] = args[0]
else:
return cls.settings[key]
def prepare(self, **options):
""" Run preparations (parsing, caching, ...).
It should be possible to call this again to refresh a template or to
update settings.
"""
raise NotImplementedError
def render(self, *args, **kwargs):
""" Render the template with the specified local variables and return
a single byte or unicode string. If it is a byte string, the encoding
must match self.encoding. This method must be thread-safe!
Local variables may be provided in dictionaries (*args)
or directly, as keywords (**kwargs).
"""
raise NotImplementedError
class MakoTemplate(BaseTemplate):
def prepare(self, **options):
from mako.template import Template
from mako.lookup import TemplateLookup
options.update({'input_encoding':self.encoding})
options.setdefault('format_exceptions', bool(DEBUG))
#TODO: This is a hack... https://github.com/defnull/bottle/issues#issue/8
mylookup = TemplateLookup(directories=['.']+self.lookup, **options)
if self.source:
self.tpl = Template(self.source, lookup=mylookup, **options)
else: #mako cannot guess extentions. We can, but only at top level...
name = self.name
if not os.path.splitext(name)[1]:
name += os.path.splitext(self.filename)[1]
self.tpl = mylookup.get_template(name)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
class CheetahTemplate(BaseTemplate):
def prepare(self, **options):
from Cheetah.Template import Template
self.context = threading.local()
self.context.vars = {}
options['searchList'] = [self.context.vars]
if self.source:
self.tpl = Template(source=self.source, **options)
else:
self.tpl = Template(file=self.filename, **options)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
self.context.vars.update(self.defaults)
self.context.vars.update(kwargs)
out = str(self.tpl)
self.context.vars.clear()
return [out]
class Jinja2Template(BaseTemplate):
def prepare(self, filters=None, tests=None, **kwargs):
from jinja2 import Environment, FunctionLoader
if 'prefix' in kwargs: # TODO: to be removed after a while
raise RuntimeError('The keyword argument `prefix` has been removed. '
'Use the full jinja2 environment name line_statement_prefix instead.')
self.env = Environment(loader=FunctionLoader(self.loader), **kwargs)
if filters: self.env.filters.update(filters)
if tests: self.env.tests.update(tests)
if self.source:
self.tpl = self.env.from_string(self.source)
else:
self.tpl = self.env.get_template(self.filename)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults).encode("utf-8")
def loader(self, name):
fname = self.search(name, self.lookup)
if fname:
with open(fname, "rb") as f:
return f.read().decode(self.encoding)
class SimpleTALTemplate(BaseTemplate):
''' Untested! '''
def prepare(self, **options):
from simpletal import simpleTAL
# TODO: add option to load METAL files during render
if self.source:
self.tpl = simpleTAL.compileHTMLTemplate(self.source)
else:
with open(self.filename, 'rb') as fp:
self.tpl = simpleTAL.compileHTMLTemplate(tonat(fp.read()))
def render(self, *args, **kwargs):
from simpletal import simpleTALES
from StringIO import StringIO
for dictarg in args: kwargs.update(dictarg)
# TODO: maybe reuse a context instead of always creating one
context = simpleTALES.Context()
for k,v in self.defaults.items():
context.addGlobal(k, v)
for k,v in kwargs.items():
context.addGlobal(k, v)
output = StringIO()
self.tpl.expand(context, output)
return output.getvalue()
class SimpleTemplate(BaseTemplate):
blocks = ('if','elif','else','try','except','finally','for','while','with','def','class')
dedent_blocks = ('elif', 'else', 'except', 'finally')
@lazy_attribute
def re_pytokens(cls):
''' This matches comments and all kinds of quoted strings but does
NOT match comments (#...) within quoted strings. (trust me) '''
return re.compile(r'''
(''(?!')|""(?!")|'{6}|"{6} # Empty strings (all 4 types)
|'(?:[^\\']|\\.)+?' # Single quotes (')
|"(?:[^\\"]|\\.)+?" # Double quotes (")
|'{3}(?:[^\\]|\\.|\n)+?'{3} # Triple-quoted strings (')
|"{3}(?:[^\\]|\\.|\n)+?"{3} # Triple-quoted strings (")
|\#.* # Comments
)''', re.VERBOSE)
def prepare(self, escape_func=cgi.escape, noescape=False):
self.cache = {}
enc = self.encoding
self._str = lambda x: touni(x, enc)
self._escape = lambda x: escape_func(touni(x, enc))
if noescape:
self._str, self._escape = self._escape, self._str
@classmethod
def split_comment(cls, code):
""" Removes comments (#...) from python code. """
if '#' not in code: return code
#: Remove comments only (leave quoted strings as they are)
subf = lambda m: '' if m.group(0)[0]=='#' else m.group(0)
return re.sub(cls.re_pytokens, subf, code)
@cached_property
def co(self):
return compile(self.code, self.filename or '<string>', 'exec')
@cached_property
def code(self):
stack = [] # Current Code indentation
lineno = 0 # Current line of code
ptrbuffer = [] # Buffer for printable strings and token tuple instances
codebuffer = [] # Buffer for generated python code
multiline = dedent = oneline = False
template = self.source if self.source else open(self.filename).read()
def yield_tokens(line):
for i, part in enumerate(re.split(r'\{\{(.*?)\}\}', line)):
if i % 2:
if part.startswith('!'): yield 'RAW', part[1:]
else: yield 'CMD', part
else: yield 'TXT', part
def flush(): # Flush the ptrbuffer
if not ptrbuffer: return
cline = ''
for line in ptrbuffer:
for token, value in line:
if token == 'TXT': cline += repr(value)
elif token == 'RAW': cline += '_str(%s)' % value
elif token == 'CMD': cline += '_escape(%s)' % value
cline += ', '
cline = cline[:-2] + '\\\n'
cline = cline[:-2]
if cline[:-1].endswith('\\\\\\\\\\n'):
cline = cline[:-7] + cline[-1] # 'nobr\\\\\n' --> 'nobr'
cline = '_printlist([' + cline + '])'
del ptrbuffer[:] # Do this before calling code() again
code(cline)
def code(stmt):
for line in stmt.splitlines():
codebuffer.append(' ' * len(stack) + line.strip())
for line in template.splitlines(True):
lineno += 1
line = line if isinstance(line, unicode)\
else unicode(line, encoding=self.encoding)
if lineno <= 2:
m = re.search(r"%.*coding[:=]\s*([-\w\.]+)", line)
if m: self.encoding = m.group(1)
if m: line = line.replace('coding','coding (removed)')
if line.strip()[:2].count('%') == 1:
line = line.split('%',1)[1].lstrip() # Full line following the %
cline = self.split_comment(line).strip()
cmd = re.split(r'[^a-zA-Z0-9_]', cline)[0]
flush() ##encodig (TODO: why?)
if cmd in self.blocks or multiline:
cmd = multiline or cmd
dedent = cmd in self.dedent_blocks # "else:"
if dedent and not oneline and not multiline:
cmd = stack.pop()
code(line)
oneline = not cline.endswith(':') # "if 1: pass"
multiline = cmd if cline.endswith('\\') else False
if not oneline and not multiline:
stack.append(cmd)
elif cmd == 'end' and stack:
code('#end(%s) %s' % (stack.pop(), line.strip()[3:]))
elif cmd == 'include':
p = cline.split(None, 2)[1:]
if len(p) == 2:
code("_=_include(%s, _stdout, %s)" % (repr(p[0]), p[1]))
elif p:
code("_=_include(%s, _stdout)" % repr(p[0]))
else: # Empty %include -> reverse of %rebase
code("_printlist(_base)")
elif cmd == 'rebase':
p = cline.split(None, 2)[1:]
if len(p) == 2:
code("globals()['_rebase']=(%s, dict(%s))" % (repr(p[0]), p[1]))
elif p:
code("globals()['_rebase']=(%s, {})" % repr(p[0]))
else:
code(line)
else: # Line starting with text (not '%') or '%%' (escaped)
if line.strip().startswith('%%'):
line = line.replace('%%', '%', 1)
ptrbuffer.append(yield_tokens(line))
flush()
return '\n'.join(codebuffer) + '\n'
def subtemplate(self, _name, _stdout, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
if _name not in self.cache:
self.cache[_name] = self.__class__(name=_name, lookup=self.lookup)
return self.cache[_name].execute(_stdout, kwargs)
def execute(self, _stdout, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
env = self.defaults.copy()
env.update({'_stdout': _stdout, '_printlist': _stdout.extend,
'_include': self.subtemplate, '_str': self._str,
'_escape': self._escape})
env.update(kwargs)
eval(self.co, env)
if '_rebase' in env:
subtpl, rargs = env['_rebase']
subtpl = self.__class__(name=subtpl, lookup=self.lookup)
rargs['_base'] = _stdout[:] #copy stdout
del _stdout[:] # clear stdout
return subtpl.execute(_stdout, rargs)
return env
def render(self, *args, **kwargs):
""" Render the template using keyword arguments as local variables. """
for dictarg in args: kwargs.update(dictarg)
stdout = []
self.execute(stdout, kwargs)
return ''.join(stdout)
def template(*args, **kwargs):
'''
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
'''
tpl = args[0] if args else None
template_adapter = kwargs.pop('template_adapter', SimpleTemplate)
if tpl not in TEMPLATES or DEBUG:
settings = kwargs.pop('template_settings', {})
lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
if isinstance(tpl, template_adapter):
TEMPLATES[tpl] = tpl
if settings: TEMPLATES[tpl].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tpl] = template_adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tpl] = template_adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tpl]:
abort(500, 'Template (%s) not found' % tpl)
for dictarg in args[1:]: kwargs.update(dictarg)
return TEMPLATES[tpl].render(kwargs)
mako_template = functools.partial(template, template_adapter=MakoTemplate)
cheetah_template = functools.partial(template, template_adapter=CheetahTemplate)
jinja2_template = functools.partial(template, template_adapter=Jinja2Template)
simpletal_template = functools.partial(template, template_adapter=SimpleTALTemplate)
def view(tpl_name, **defaults):
''' Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters.
'''
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return template(tpl_name, **tplvars)
return result
return wrapper
return decorator
mako_view = functools.partial(view, template_adapter=MakoTemplate)
cheetah_view = functools.partial(view, template_adapter=CheetahTemplate)
jinja2_view = functools.partial(view, template_adapter=Jinja2Template)
simpletal_view = functools.partial(view, template_adapter=SimpleTALTemplate)
###############################################################################
# Constants and Globals ########################################################
###############################################################################
TEMPLATE_PATH = ['./', './views/']
TEMPLATES = {}
DEBUG = False
MEMFILE_MAX = 1024*100
#: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found')
HTTP_CODES = httplib.responses
HTTP_CODES[418] = "I'm a teapot" # RFC 2324
#: The default template used for error pages. Override with @error()
ERROR_PAGE_TEMPLATE = """
%try:
%from bottle import DEBUG, HTTP_CODES, request, touni
%status_name = HTTP_CODES.get(e.status, 'Unknown').title()
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>Error {{e.status}}: {{status_name}}</title>
<style type="text/css">
html {background-color: #eee; font-family: sans;}
body {background-color: #fff; border: 1px solid #ddd; padding: 15px; margin: 15px;}
pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;}
</style>
</head>
<body>
<h1>Error {{e.status}}: {{status_name}}</h1>
<p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt> caused an error:</p>
<pre>{{e.output}}</pre>
%if DEBUG and e.exception:
<h2>Exception:</h2>
<pre>{{repr(e.exception)}}</pre>
%end
%if DEBUG and e.traceback:
<h2>Traceback:</h2>
<pre>{{e.traceback}}</pre>
%end
</body>
</html>
%except ImportError:
<b>ImportError:</b> Could not generate the error page. Please add bottle to sys.path
%end
"""
#: A thread-save instance of :class:`Request` representing the `current` request.
request = Request()
#: A thread-save instance of :class:`Response` used to build the HTTP response.
response = Response()
#: A thread-save namepsace. Not used by Bottle.
local = threading.local()
# Initialize app stack (create first empty Bottle app)
# BC: 0.6.4 and needed for run()
app = default_app = AppStack()
app.push()
|
python
|
#!/usr/bin/env python
def getinput(): return open('day23.input.txt').read()
### PART 1
import re
def solve(program, a=0, hack=False):
instrs = [l.split() + [''] for l in program.strip().splitlines()]
tgl = { 'cpy': 'jnz', 'inc': 'dec', 'dec': 'inc', 'jnz': 'cpy', 'tgl': 'inc' }
ip, regs = 0, { 'a': a, 'b': 0, 'c': 0, 'd': 0 }
def get(v): return int(regs.get(v, v))
while ip < len(instrs):
if hack and ip == 4:
regs['a'] = get('a') + get('b') * get('d')
regs['b'] = get('b') - 1
regs['c'] = 2 * get('b')
ip = 16
continue
instr, ip = instrs[ip], ip + 1
op, x, y = instr[0], instr[1], instr[2]
if op == 'cpy': regs[y] = get(x)
elif op == 'inc': regs[x] += 1
elif op == 'dec': regs[x] -= 1
elif op == 'jnz': ip += get(y) - 1 if get(x) != 0 else 0
elif op == 'tgl':
ipx = ip + get(x) - 1
if 0 <= ipx < len(instrs):
instrs[ipx][0] = tgl[instrs[ipx][0]]
return regs['a']
# sample
assert solve('''
cpy 2 a
tgl a
tgl a
tgl a
cpy 1 a
dec a
dec a''') == 3
# problem
s1 = solve(getinput(), 7)
print(s1)
assert s1 == 11662 == solve(getinput(), 7, True)
### PART 2
s2 = solve(getinput(), 12, True)
print(s2)
assert s2 == 479008222
|
python
|
"""
二维数组
"""
# use numpy
import numpy as np
import pandas as pd
sdarry = np.array([
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]
])
# different way to get element
print(sdarry[1, 2])
print(sdarry[:, 2])
print(sdarry[2, :])
# get a row or a column mean of data
# axis=1 is row
# axis=0 is column
print(sdarry.mean(), sdarry.mean(axis=1), sdarry.mean(axis=0))
# use pandas
saleLog = {
'date': ['2018-01-01', '2018-01-02', '2018-01-03'],
'cardno': ['001', '002', '001'],
'name': ['vc银翘片', '清热解毒片', '感康'],
'num': [3, 2, 5],
'money': [18, 22.1, 61.6],
'actually': [16.1, 20.3, 52.1]
}
saleDf = pd.DataFrame(saleLog)
from collections import OrderedDict as od
saleLog = {
'date': ['2018-01-01', '2018-01-02', '2018-01-03'],
'cardno': ['001', '002', '001'],
'name': ['vc银翘片', '清热解毒片', '感康'],
'num': [3, 2, 5],
'money': [18, 22.1, 61.6],
'actually': [16.1, 20.3, 52.1]
}
saleOrderDict = od(saleLog)
saleDf = pd.DataFrame(saleOrderDict)
# iloc only accept int type for index
print(saleDf.iloc[0, 1], saleDf[:, 1])
# loc column only accept string for index
# print(saleDf.loc[0, 1]) this is error
print(saleDf.loc[0, 'name'])
# easy way
saleDf.loc[:, 'name'] == saleDf['name']
|
python
|
from .confirm import ShutdownConfirmationDialog
def load(manager, params):
"""Launch shutdown confirmation manager"""
pos = (100, 100)
if params and len(params) > 0:
pos = params[0]
ShutdownConfirmationDialog(pos, manager)
|
python
|
from handlers.chord import ChordHandler
from handlers.base import IndexHandler
url_patterns = [
(r"/index/", IndexHandler),
]
|
python
|
"""
Pytools
Server module
Run server
>> (base) C:\\Users\\ginanjar\\AppData\\Roaming\\Sublime Text 3\\Packages\\pythontools>python core\\server\\pytools
"""
|
python
|
import unittest
from operator import itemgetter
import tonos_ts4.ts4 as ts4
from utils.wallet import create_wallet, DEFAULT_WALLET_BALANCE
from utils.nft_root import create_nft_root, mint_nft, get_nft_addr, DEFAULT_NFT_ROOT_BALANCE
from utils.nft import restore_nft_by_addr, get_nft_info
from random import randint
unittest.TestLoader.sortTestMethodsUsing = lambda _, x, y: randint(-1, 1)
ts4.init('test_build', verbose = False)
ZERO_ADDRES = ts4.Address.zero_addr(0)
MINT_COMMISSION = 5 * ts4.GRAM
MIN_FOR_MINTING = 1_700_000_000
MIN_FOR_DATA_DEPLOY = 1_500_000_000
# error codes
NOT_ENOUGH_VALUE_TO_MINT = 107
def prepare_for_minting():
wallet_minter = create_wallet()
wallet_commission_agent = create_wallet()
wallet_royalty_agent = create_wallet()
nft_root = create_nft_root(wallet_commission_agent, wallet_royalty_agent, MINT_COMMISSION)
prepared_info = {
'minter': wallet_minter,
'root': nft_root,
'commission_agent': wallet_commission_agent,
}
return prepared_info
class TestNftMinting(unittest.TestCase):
def tearDown(self):
ts4.reset_all()
# checking the NFT minting correctness
def test_wallet_can_mint_nft(self):
wallet_minter, nft_root = itemgetter('minter', 'root')(prepare_for_minting())
MINT_PRICE = MINT_COMMISSION + MIN_FOR_MINTING
mint_nft(nft_root, wallet_minter, MINT_COMMISSION + MINT_PRICE)
nft = restore_nft_by_addr(get_nft_addr(nft_root, 0))
nft_info = get_nft_info(nft)
self.assertEqual(nft_info['addrOwner'], wallet_minter.address.str())
# checking the withdraw from minter's wallet with commission
def test_minter_wallet_withdraw_with_commission(self):
wallet_minter, wallet_commission_agent, nft_root = \
itemgetter('minter', 'commission_agent', 'root')(prepare_for_minting())
MINT_PRICE = 2*MINT_COMMISSION + MIN_FOR_MINTING
mint_nft(nft_root, wallet_minter, MINT_PRICE)
nft = restore_nft_by_addr(get_nft_addr(nft_root, 0))
nft_info = get_nft_info(nft)
self.assertEqual(nft_info['addrOwner'], wallet_minter.address.str())
self.assertEqual(wallet_minter.balance, DEFAULT_WALLET_BALANCE - MIN_FOR_DATA_DEPLOY - MINT_COMMISSION)
self.assertEqual(wallet_commission_agent.balance, MINT_COMMISSION + DEFAULT_WALLET_BALANCE)
self.assertEqual(nft_root.balance, DEFAULT_NFT_ROOT_BALANCE)
# checking the withdraw from commission agent's wallet without commission
def test_agent_wallet_withdraw_with_commission(self):
wallet_commission_agent, nft_root = itemgetter('commission_agent', 'root')(prepare_for_minting())
MINT_PRICE = 2*MINT_COMMISSION + MIN_FOR_MINTING
mint_nft(nft_root, wallet_commission_agent, MINT_PRICE)
nft = restore_nft_by_addr(get_nft_addr(nft_root, 0))
nft_info = get_nft_info(nft)
self.assertEqual(nft_info['addrOwner'], wallet_commission_agent.address.str())
self.assertEqual(wallet_commission_agent.balance, DEFAULT_WALLET_BALANCE - MIN_FOR_DATA_DEPLOY)
self.assertEqual(nft_root.balance, DEFAULT_NFT_ROOT_BALANCE)
# checking the withdraw from nft root balance if commission agent sent not enought
def test_agent_can_mint_using_nft_root_balance(self):
wallet_commission_agent, nft_root = itemgetter('commission_agent', 'root')(prepare_for_minting())
MINT_PRICE = MIN_FOR_MINTING - 1
mint_nft(nft_root, wallet_commission_agent, MINT_PRICE)
nft = restore_nft_by_addr(get_nft_addr(nft_root, 0))
nft_info = get_nft_info(nft)
self.assertEqual(nft_info['addrOwner'], wallet_commission_agent.address.str())
self.assertEqual(wallet_commission_agent.balance, DEFAULT_WALLET_BALANCE)
self.assertEqual(nft_root.balance, DEFAULT_NFT_ROOT_BALANCE - MIN_FOR_DATA_DEPLOY)
# checking error throw if minter tries to mint nft without enough money
def test_error_throw_if_minting_with_low_balance(self):
wallet_minter, nft_root = itemgetter('minter', 'root')(prepare_for_minting())
MINT_PRICE = MINT_COMMISSION + MIN_FOR_MINTING - 1
mint_nft(nft_root, wallet_minter, MINT_PRICE, expected_err=107)
if __name__ == '__main__':
print('\nNftMinting testing:')
unittest.main()
|
python
|
__author__ = 'akshay'
import socket
import time
import RPi.GPIO as GPIO
GPIO.setwarnings(False)
# create a socket and bind socket to the host
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect(('10.42.0.1', 8001))
buffe=1024
def measure():
"""
measure distance
"""
GPIO.output(GPIO_TRIGGER, True)
time.sleep(0.00001)
GPIO.output(GPIO_TRIGGER, False)
start = time.time()
while GPIO.input(GPIO_ECHO)==0:
start = time.time()
while GPIO.input(GPIO_ECHO)==1:
stop = time.time()
elapsed = stop-start
distance = (elapsed * 34300)/2
return distance
# referring to the pins by GPIO numbers
GPIO.setmode(GPIO.BCM)
# define pi GPIO
GPIO_TRIGGER = 23
GPIO_ECHO = 24
l1=21
l2=20
r1=16
r2=12
# output pin: Trigger
GPIO.setup(GPIO_TRIGGER,GPIO.OUT)
# input pin: Echo
GPIO.setup(GPIO_ECHO,GPIO.IN)
GPIO.setup(l1,GPIO.OUT)
GPIO.setup(l2,GPIO.OUT)
GPIO.setup(r1,GPIO.OUT)
GPIO.setup(r2,GPIO.OUT)
# initialize trigger pin to low
GPIO.output(GPIO_TRIGGER, False)
GPIO.output(l1, False)
GPIO.output(l2, False)
GPIO.output(r1, False)
GPIO.output(r2, False)
try:
while True:
distance = measure()
#print "Distance : %.1f cm" % distance
# send data to the host every 0.5 sec
#client_socket.send(str(distance))
time.sleep(0.01)
data=client_socket.recv(buffe)
print 'prediction:',data[0],'distance:',distance
if distance >10:
if data[0]=='0':
GPIO.output(l1, False) #forward
GPIO.output(l2, True)
GPIO.output(r1, False)
GPIO.output(r2, True)
elif data[0]=='1': #right
GPIO.output(l1, True)
GPIO.output(l2, False)
GPIO.output(r1, False)
GPIO.output(r2, True)
elif data[0]=='2':
GPIO.output(l1, False) #left
GPIO.output(l2, True)
GPIO.output(r1, True)
GPIO.output(r2, False)
else:
GPIO.output(l1, False)
GPIO.output(l2, False)
GPIO.output(r1, False)
GPIO.output(r2, False)
else:
GPIO.output(l1, False)
GPIO.output(l2, False)
GPIO.output(r1, False)
GPIO.output(r2, False)
finally:
client_socket.close()
GPIO.cleanup()
client_socket.close()
|
python
|
"""
Function for matching delimiters in an arithmetic expression
"""
from ArrayStack import *
def is_matched(expr):
"""
Return True if all delimiters are properly match; False otherwise
"""
lefty = '({['
righty = ')}]'
S = ArrayStack()
for c in expr:
if c in lefty:
S.push(c)
elif c in righty:
if S.is_empty():
return False
if righty.index(c) != lefty.index(S.pop()):
return False
return S.is_empty()
es = is_matched('[(5+x)-(y+z)]')
print(es)
|
python
|
"""
File: caesar.py
Name: Max Chang
------------------------------
This program demonstrates the idea of caesar cipher.
Users will be asked to input a number to produce shifted
ALPHABET as the cipher table. After that, any strings typed
in will be encrypted.
"""
# This constant shows the original order of alphabetic sequence
ALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def main():
"""
TODO: Decipher the code using Caesar Cipher
"""
secret = secret_input('Secret Number: ')
cipher_text = input('What\'s the ciphered string? ')
print('The deciphered string is: ' + decipher(cipher_text.upper(), secret))
def secret_input(display):
"""
Get the shift index for Caesar Cipher
:param display: (string) text to show in the console
:return: (int) the Caesar Cipher shift index
"""
while True:
secret = input(display)
if secret.isdigit() or (secret[0] == '-' and secret[1:].isdigit()):
secret = int(secret)
while True:
if 0 <= secret < len(ALPHABET):
return secret
elif secret < 0: # Handle for minus
secret += len(ALPHABET)
else: # Handle for larger than 26
secret -= len(ALPHABET)
print('Not an integer number')
def decipher(cipher, key):
"""
Decipher the encrypted code from user using key
:param cipher: (string) the encrypted code
:param key: (int) Caesar Cipher key
:return: (string) deciphered string
"""
decipher_text = ''
for i in range(len(cipher)):
if cipher[i].isalpha():
position = ALPHABET.find(cipher[i])
if position + key < len(ALPHABET):
decipher_text += ALPHABET[position + key]
else: # Handle for index larger than 26
decipher_text += ALPHABET[position + key - len(ALPHABET)]
else: # Handle for non-alphabet
decipher_text += cipher[i]
return decipher_text
##### DO NOT EDIT THE CODE BELOW THIS LINE #####
if __name__ == '__main__':
main()
|
python
|
from django.db import models
from django.contrib.auth.models import AbstractUser
class StudentUser(AbstractUser):
matric_no = models.CharField(max_length=14, unique=True)
mac_add = models.CharField(max_length=17, unique=True)
|
python
|
from typing import Any, Dict
import attr
from targets.config import is_in_memory_cache_target_value
@attr.s(frozen=True)
class TargetCacheKey(object):
target = attr.ib(converter=str)
value_type = attr.ib(converter=str)
class TargetCache(object):
def __init__(self, cache=None):
self._cache = cache if cache is not None else {}
def get_cache_group(self):
# type: () -> Dict[TargetCacheKey, Any]
return self._cache
def set(self, value, key):
if not self.enabled:
return
cache = self.get_cache_group()
if cache is not None:
cache[key] = value
def get(self, key):
if not self.enabled:
return
cache = self.get_cache_group()
if cache is not None:
return cache.get(key)
def has(self, key):
cache = self.get_cache_group()
if cache is not None:
return key in cache
return False
def __getitem__(self, item):
return self.get(key=item)
def __setitem__(self, key, value):
return self.set(value=value, key=key)
def __contains__(self, item):
return self.has(key=item)
def clear_for_targets(self, targets_to_clear):
if not targets_to_clear:
return
cache = self.get_cache_group()
if not cache:
return
targets_to_clear = set(targets_to_clear)
for key in list(cache.keys()):
if key.target in targets_to_clear and key in cache:
del cache[key]
def clear(self):
cache = self.get_cache_group()
if cache:
cache.clear()
def clear_all(self):
self._cache.clear()
@property
def enabled(self):
return is_in_memory_cache_target_value()
TARGET_CACHE = TargetCache()
|
python
|
import datetime
from Module.MainThread_Socket_Client import MainThread_Socket_Client
from Module.SocketServer_Client import SocketServer_Client
class Socket_Client_Core():
def __init__(self):
try:
self.MainThread_Socket_Client=MainThread_Socket_Client
self.SocketServer_Client=SocketServer_Client
print(datetime.datetime.now(),self.__class__,' Ready')
except Exception as Errr:
raise(Errr)
|
python
|
from tqdm import tqdm
from src.reranker.lambdamart import LambdaMart
from src.interface.corpus import Corpus
from src.interface.iohandler import InputOutputHandler
from src.interface.features import FeatureEngineer
# import src.evaluation.validate_run as validate
OUT = f"./evaluation/fairRuns/submission_lambdamart-full{i}.json"
QUERIES_EVAL = "./evaluation/fair-TREC-evaluation-sample.json"
SEQUENCE_EVAL = "./evaluation/fair-TREC-evaluation-sequences.csv"
QUERIES_TRAIN = "./training/fair-TREC-training-sample-cleaned.json"
SEQUENCE_TRAIN = "./training/training-sequence-full.tsv"
corpus = Corpus()
ft = FeatureEngineer(corpus)
input_train = InputOutputHandler(corpus,
fsequence=SEQUENCE_TRAIN,
fquery=QUERIES_TRAIN)
input_test = InputOutputHandler(corpus,
fsequence=SEQUENCE_EVAL,
fquery=QUERIES_EVAL)
lambdamart = LambdaMart(ft)
lambdamart.train(input_train)
lambdamart.predict(input_test)
input_test.write_submission(lambdamart, outfile=OUT)
# args = validate.Args(queries=QUERIES_EVAL, query_sequence_file = SEQUENCE_EVAL, run_file=OUT)
# validate.main(args)
|
python
|
# coding: utf-8
import time
i=0
while(i<20):
print('-----------main--------[',i,']')
i+=1
time.sleep(1)
print("ok man yeaaaaahh!")
#send block --> input IP and message
#message='abcedf'
#IP='127.0.0.1'
#send_len=client.sendto(message.encode('utf-8'),(IP,recieve_port))
#recieve block -->None
#rx_message,addr=server.recvfrom(M_SIZE)
#print(rx_message.decode('utf-8'))
|
python
|
#main.py
# set up track structure
# this one loops over alpha and epsilon
import numpy as np
from placerg.funcs import *
from placerg.funcsrg import *
from placerg.objects import *
from placerg.runfunc import *
N0 = 2048 # number of cells
nstim = 10 # number of nonplace stimuli
percell= 1.0 # probability that each field is accepted
placeprob = 'None'
bothprob = 0.5
time= np.float(0.1)
phi=1.0 # multiply by this constant to adjust overall activity of
# nonplace cells
eta = 6.0 # multiply by this constant to increase overall activity of
# network
epsilon= -16./6.
runsim(N0, nstim, percell, placeprob, bothprob, time, phi, eta, epsilon)
|
python
|
#Created by Oli MacPherson and Ben O'Sullivan!
#For use with only Python 2 at the moment cause i cant be bothered changing all the raw_inputs.
import sys, os, random
import time
intro_phrases = ["The world is changing. \n",
"Humanity's voracious appetite for and consumption of electricity born of burning coal and oil is releasing harmful gas into the atmosphere. \n",
"You will now experience the effects of this upon the world's climate. \n",
"Your objective is to survive for as long as you can, in a world of violent climate change. \n",
"Good luck. \n"]
def intro ():
for phrase in intro_phrases:
print(phrase)
time.sleep(4)
extreme_rain_1()
def enterdaspace ():
answerrr = raw_input()
if answerrr == '1':
print("You enter the airlock, and enter a brand new world. The spacecraft rockets away into space, and you live happily ever after.")
print("Well done! You finished! Keep your eyes open for the second game, coming out in December 2014!")
print("Of course, none of this would have been neccessary if we just looked after the planet a bit more.")
print("Would you like to play again?")
answertoques = raw_input().upper()
if answertoques == "YES":
extreme_rain_1()
else:
sys.exit()
if answerrr == '2':
print("You turn away from the airlock, unwilling to trust fate. You die of suffocation, many lonely days later.")
print("You have died. You have failed. Your score is 1000 points.")
print("Would you like to play again?")
answertoques = raw_input().upper()
if answertoques == "YES":
extreme_rain_1()
else:
sys.exit()
else:
print("Invalid answer. Please type either 1 or 2.")
enterdaspace()
def spacecraftflying ():
print("The spacecraft is soaring overhead. Your head looks up at it's ceiling. You wonder whether you should go explore.")
answerr = raw_input("Quick! Do you remember. Did you open a window (1) or not (2)? No lying please.")
if answerr == '1':
print("You run out of air, and the vacuum sucks you into space. You manage to grab a spacesuit, and hurredly put it on. You look back at the spacecraft, and realise, Venus is too far away. But just as you begin to despair, a new spacecraft appears. It opens it's airlock. Do you enter (1), or not (2)?")
enterdaspace()
if answerr == '2':
print("The spacecraft rocks wildly, as it enters Venus's atmosphere. You run, but lose control of the craft. You crash. And. Burn.")
print("You have died. You have failed. Your score is 950 points.")
print("Would you like to play again?")
answertoques = raw_input().upper()
if answertoques == "YES":
extreme_rain_1()
else:
sys.exit()
else:
print("Invalid answer. Please type either 1 or 2.")
spacecraftflying()
def spacecrafttakeoff ():
print("The spacecraft begins to shake. There are lizards about! Do you take off (1) or not (2)?")
spacecraft = raw_input()
if spacecraft == '1':
print("You strap yourself in, and key the ignition. The spacecraft rockets off into the atmosphere.")
spacecraftflying()
if spacecraft == '2':
print("The spacecraft topples over as the lizards begin to rip up the landing gear. The spacecraft ignites, exploding, melting the flesh from your bones.")
print("You have died. You have failed.Your score is 900 points.")
print("Would you like to play again?")
answertoques = raw_input().upper()
if answertoques == "YES":
extreme_rain_1()
else:
sys.exit()
else:
print("Invalid answer. Please type either 1 or 2.")
spacecrafttakeoff()
def doyuodiewindow ():
windowopen = raw_input("Do you open a window (1) or not (2)?")
if windowopen == '1':
windowisopen = True
spacecrafttakeoff()
if windowopen == '2':
windowisopen = False
spacecrafttakeoff()
else:
print("Invalid answer. Plese type '1' or '2'.")
doyuodiewindow()
def campornot ():
camppls = raw_input("")
if camppls == '1':
print("You and Michael begin to retrieve the camping equipment from your bags, but before you can, the lizard's friends come roaring out of the desert, and ravage your camp. You are picked up and slowly ripped in two.")
time.sleep(2)
print("You have died. You have failed. Your score is 850 points.")
print("Would you like to play again?")
answertoques = raw_input().upper()
if answertoques == "YES":
extreme_rain_1()
else:
sys.exit()
if camppls == '2':
print("You and Michael hustle up the ramp. Once inside, you realise it is extremely humid and hot. You are sweating very much.")
time.sleep(2)
doyuodiewindow()
else:
print("Invalid answer. Please type either '1' or '2'.")
campornot()
def whichway ():
theway = (raw_input("Do you go towards the spacecraft to leave immediately (1), or to the next settlement, for food and water (2)?").upper()).replace(" ", "")
if theway == '1':
print("You point at the very far off spacecraft, and beckon Michael towards you. The two of you begin to walk.")
time.sleep(2)
print("After two hours in the desert, you are hopelessly lost. You fall to the ground, weakened. Michael has already given up, and died. You crawl towards the ship, determined to make it. You don't even realise it when you slip into death.")
time.sleep(2)
print("You have died. You have failed. Your score is 800 points.")
print("Would you like to play again?")
answertoques = raw_input().upper()
if answertoques == "YES":
extreme_rain_1()
else:
sys.exit()
if theway == '2':
print("You set off towards the settlement, and within the hour, you arrive, and replenish your supplies. By he time you eventually reach the spacecraft, it is nightfall. Do you set up camp (1) or do you go inside immediately (2)?")
campornot()
else:
print("Invalid answer. Please type either '1' or '2'.")
whichway()
def backatthehousetrekstart ():
print("Back at the house, you ready yourself and the team for the journey. Your team runs out of water, and John dies of dehydration, but you and Michael continue on.")
time.sleep(3)
print("You and Michael leave the house, and Michael asks you which way we should go.")
whichway()
def whatthefdoyoudo ():
whatdoyoudo = (raw_input("Do you kill him (1), capture him (2), or run (3)?").upper()).replace(" ", "")
if whatdoyoudo == "1":
print("You move forwards, and slice down with the sword. The man's head rolls away.")
time.sleep(2)
print("There are some bullets on the counter. You grab them, then head back to the house.")
backatthehousetrekstart()
if whatdoyoudo == "2":
print("You lower your sword, and point it at the man. He snarls, and throws himself onto your sword, killing you in the process.")
time.sleep(2)
print("You have died. You have failed. Your score is 750 points.")
if whatdoyoudo == "3":
print("You turn to run, but are cut down in two strides.")
print("You have died. You have failed. Your score is 700 points.")
print("Would you like to play again?")
answertoques = raw_input().upper()
if answertoques == "YES":
extreme_rain_1()
else:
sys.exit()
else:
print("Invalid answer. Please type '1', '2', or '3'.")
whatthefdoyoudo()
def abandonedhouse ():
print("Still grasping your bloody sword, you stride out towards the house. It does indeed appear empty.")
time.sleep(2)
print("You walk to the door, and enter slowly. There is a man inside that you did not hear. He turns, and hisses at you. What do you do?")
whatthefdoyoudo()
def thegun ():
gunanswer = (raw_input("Do you pick up the gun (1), or the sword that's lying beside it (2)?").upper()).replace(" ", "")
if gunanswer == "1":
print("You pick up the gun, load a bullet, and walk outside. The lizard is there, and he charges. You raise the gun and- \n")
time.sleep(5)
print("The gun misfires, and explodes in your hands. You are knocked backwards into the door. As you begin to fall unconscious, you witness the lizard begin to eat you.")
time.sleep(2)
print("You have died. You have failed. Your score is 650 points.")
print("Would you like to play again?")
answertoques = raw_input().upper()
if answertoques == "YES":
extreme_rain_1()
else:
sys.exit()
if gunanswer == "2":
print("You grab the sword and stride outside. ")
print("The lizard charges, but you raise your sword and cut it's throat as it comes.")
time.sleep(2)
print("You skin the beast and take the skin in to Michael. He smiles and tanks you. Now he needs some bullets, he says. he points to an abandoned house ten metres from the door. There should be some in there.")
time.sleep(3)
abandonedhouse()
else:
print("Invalid answer. Please type either '1' or '2'.")
thegun()
def doyoustillhelp ():
doyoustillhelp1 = (raw_input("Do you still want to help? Yes (1) or no (2)").upper()).replace(" ", "")
if doyoustillhelp1 == "1":
print("Good. Michael says he needs some reptile skin first. He points to a gun by the door, and says to kill the one outside.")
thegun()
if doyoustillhelp1 == "2":
print("The stranger shakes his head regretfully, and turns away. 'You can leave, then.' he says.")
time.sleep(2)
print("You turn, and stalk out, outraged. You walk right into the giant lizard still waiting outside.")
time.sleep(2)
print("You are eaten. You have died. You have failed. your score is 600 points.")
print("Would you like to play again?")
answertoques = raw_input().upper()
if answertoques == "YES":
extreme_rain_1()
else:
sys.exit()
else:
print("Invalid answer. Please type either '1' or '2'.")
def insidethehouse ():
print("You turn, and take in the room around you. It is a neatly furnished home, complete with two men, sitting frozen at a table, in the midst of an afternoon tea. They are staring at you.")
time.sleep(3)
print("They stand, and one walks over. He puts his arm on your shoulder, and says 'You have come at a great moment, stranger. Will you undertake some tasks for me and my friend here?'.")
time.sleep(3)
print("Do you undertake these tasks? Yes (1) or no (2)?")
taskchoice = (raw_input().upper()).replace(" ", "")
if taskchoice == "1":
print("The stranger smiles, and extends his hand. 'I'm Michael,' he says, 'and this is my friend, John. We are trying to get to venus.'.")
time.sleep(3)
print("John chimes in, says it's the only way to excape the now uncontrollable climate change threatening Earth. He says there's a spacecraft in the north, they only have to reach it.")
time.sleep(3)
print("They say they need you to find one or two things for them, then they can leave for the spacecraft.")
time.sleep(3)
doyoustillhelp()
if taskchoice == "2":
print("The stranger shakes his head regretfully, and turns away. 'You can leave, then.' he says.")
time.sleep(2)
print("You turn, and stalk out, outraged. You walk right into the giant lizard still waiting outside.")
time.sleep(2)
print("You are eaten. You have died. You have failed. your score is 600 points.")
print("Would you like to play again?")
answertoques = raw_input().upper()
if answertoques == "YES":
extreme_rain_1()
else:
sys.exit()
else:
print("Invalid answer. Please type either '1' or '2'.")
insidethehouse()
def housedoorchoice1 ():
housedoorchoice = raw_input("Do you run around the side of the house (1), or do you open the door and enter (2)?").replace(" ","").upper()
if housedoorchoice == "1":
print("You attempt to run, but the lizard catches you before you go three steps. It eats you alive.")
time.sleep(3)
print("You have died. You have failed. Your score is 550 points.")
print("Would you like to play again?")
answertoques = raw_input().upper()
if answertoques == "YES":
extreme_rain_1()
else:
sys.exit()
if housedoorchoice == "2":
print("You grab the handle and turn it, running in and slamming the door behind you.")
time.sleep(2)
insidethehouse()
else:
print("Invalid answer. Please type either '1' or '2'.")
housedoorchoice1()
def route_choice_1 ():
answer_routechoice1 = (raw_input("Which way do you go? To the house (1) or to the oasis (2)?").replace(" ","")).upper()
if answer_routechoice1 == "T2":
print("You run towards the glorious sight, splashing water from the bucket in your haste to reach it. Once you arrive, however, it fades. It was a mirage.")
time.sleep(3)
print("You gaze at your now empty bucket, then sit, and weep.")
print("You die of thirst. You have failed. Your score is 500 points.")
print("Would you like to play again?")
answertoques = raw_input().upper()
if answertoques == "YES":
extreme_rain_1()
else:
sys.exit()
if answer_routechoice1 == "1":
print("You stroll towards the house, drinking leisurely from the bucket as you do. You have made it to the door when you hear thundering feet behind you. You turn, and see a massive lizard thundering towards you.")
time.sleep(4)
housedoorchoice1()
else:
print("Invalid answer. Please type either '1' or '2'.")
route_choice_1()
def stuck_in_room_2 ():
print("It seems you only have a few seconds until the door collapses. Do you leave the house through the back window (1), or climb through the trapdoor in the ceiling (2)?")
answer_stuckinroom = (raw_input().upper()).replace(" ", "")
if answer_stuckinroom == "2":
print("You scramble up to the trapdoor, and climb through. The roof, weakened by the downpour, cannot withstand your weight. You fall, and die in pain. You have failed. Your score is 450 points.")
print("Would you like to play again?")
answertoques = raw_input().upper()
if answertoques == "YES":
extreme_rain_1()
else:
sys.exit()
if answer_stuckinroom == "1":
print("You leap out of the window.")
print("The sun shines down upon you. Suddenly thirsty, you drink from the bucket.")
time.sleep(2)
print("You begin to walk, and are faced with two routes. In the distance, their looms a beautiful oasis. To the left, however, their is a small house that seems uninhabited.")
time.sleep(3)
route_choice_1()
else:
print("Invalid answer. Please type either '1' or '2'.")
stuck_in_room_2()
def extreme_rain_choice_bucket ():
stuck_in_room_2
def extreme_rain_2 ():
print("Your door is blown off it's hinges with the force of the gale, and water pours in the door. Do you stay where you are (1), or barricade yourself in the other room (2)?")
extreme_rain_2_answer = (raw_input().replace(" ","")).upper()
if extreme_rain_2_answer == "1":
print("You stay in your chair, watching as the water floods the room.")
time.sleep(2)
print("You doze off, peacefully.")
time.sleep(2)
print("You do not wake up. You have drowned. You have failed. Your score is 400 points.")
print("Would you like to play again?")
answertoques = raw_input().upper()
if answertoques == "YES":
extreme_rain_1()
else:
sys.exit()
if extreme_rain_2_answer == "2":
print("You run to the door, and dive inside, slamming it behind you.")
stuck_in_room_2()
else:
print("That is not a valid answer. Please type either '1' or '2'.")
extreme_rain_2()
def very_thirsty_rain_big_roof_leak ():
print("You are extremely thirsty. You stumble over to the stream of water dribbling down from the tiles, and drink until your thirst is sated.")
extreme_rain_2()
def rain_1_roof_leak_big ():
time.sleep(1)
print("The leak in the roof gets slowly larger. There are some wooden boards to the side of the room, and some tiles. Do you plug the hole, and if so, with what? Do you leave the hole (1), plug the hole with boards (2), or plug the hole with tiles (3)?")
rain_1_roof_leak_big_answer = (raw_input()).upper()
if (rain_1_roof_leak_big_answer.replace(" ","")).upper() == "2":
print("You pick up the boards, and nail them in place. The leak stops completely.")
time.sleep(2)
print("An hour later, you are unbearable thirsty. You gaze at the empty bucket longingly, and at the plugged hole in the ceiling.")
time.sleep(2)
print("You open the door, and stumble outside, only to find the sun blinding you with it's heat. \n You fall backwards.")
time.sleep(2)
print("You stay there until you die of thirst. You have failed. Your score is 350 points.")
print("Would you like to play again?")
answertoques = raw_input().upper()
if answertoques == "YES":
extreme_rain_1()
else:
sys.exit()
if rain_1_roof_leak_big_answer.replace(" ","") == "3":
print("You pick up the tiles, and glue them to the roof. Some water still seeps through.")
very_thirsty_rain_big_roof_leak()
if rain_1_roof_leak_big_answer.replace(" ","") == "1":
time.sleep(1)
print("The roof, weakened by the downpour and the hole, collapses, crushing you. \n" + "You have failed. Your score is 300 points.")
print("Would you like to play again?")
answertoques = raw_input().upper()
if answertoques == "YES":
extreme_rain_1()
else:
sys.exit()
else:
print("Invalid answer. Please type '1', 'Plug the hole with tiles', or 'Leave the hole'.")
rain_1_roof_leak_big()
def rain_1_answer_function ():
rainanswer = raw_input("Do you fetch a bucket and catch the water (1), or do you let it drip(2)?").replace(" ","").upper()
if rainanswer == "1":
print("You get a bucket and place it under the dripping ceiling. Water begins to collect in the bottom of the bucket.")
time.sleep(2)
extreme_rain_2()
if rainanswer == "2":
print("You sit back in your chair, content to let the rain drip. It's only a small leak after all.")
time.sleep(2)
rain_1_roof_leak_big()
else:
print("Invalid response. Please type either '1' or '2'.")
rain_1_answer_function()
def extreme_rain_1 ():
print("The rain is bucketing down around you. You are sitting at home, snug inside your small, two-room house. The roof begins to leak, just a little. What do you do?")
rain_1_answer_function()
def introfunction ():
haveyouplayed = raw_input("Have you played this game before?").upper()
if haveyouplayed == "YES":
extreme_rain_1()
if haveyouplayed == "NO":
intro()
else:
print("Invalid answer. Please type either 'yes' or 'no'.")
introfunction()
introfunction()
|
python
|
HW_SOURCE_FILE = __file__
def num_eights(x):
"""Returns the number of times 8 appears as a digit of x.
>>> num_eights(3)
0
>>> num_eights(8)
1
>>> num_eights(88888888)
8
>>> num_eights(2638)
1
>>> num_eights(86380)
2
>>> num_eights(12345)
0
>>> from construct_check import check
>>> # ban all assignment statements
>>> check(HW_SOURCE_FILE, 'num_eights',
... ['Assign', 'AugAssign'])
True
"""
"*** YOUR CODE HERE ***"
if x==0:
return 0
if x%10==8:
return num_eights(x//10)+1
return num_eights(x//10)
def pingpong(n):
"""Return the nth element of the ping-pong sequence.
>>> pingpong(8)
8
>>> pingpong(10)
6
>>> pingpong(15)
1
>>> pingpong(21)
-1
>>> pingpong(22)
-2
>>> pingpong(30)
-2
>>> pingpong(68)
0
>>> pingpong(69)
-1
>>> pingpong(80)
0
>>> pingpong(81)
1
>>> pingpong(82)
0
>>> pingpong(100)
-6
>>> from construct_check import check
>>> # ban assignment statements
>>> check(HW_SOURCE_FILE, 'pingpong', ['Assign', 'AugAssign'])
True
"""
"*** YOUR CODE HERE ***"
def help(i,d,result):
if i==n:
return result
if num_eights(i)>0 or i%8==0:
return help(i+1,-d,result-d)
return help(i+1,d,result+d)
return help(1,1,1)
def missing_digits(n):
"""Given a number a that is in sorted, increasing order,
return the number of missing digits in n. A missing digit is
a number between the first and last digit of a that is not in n.
>>> missing_digits(1248) # 3, 5, 6, 7
4
>>> missing_digits(19) # 2, 3, 4, 5, 6, 7, 8
7
>>> missing_digits(1122) # No missing numbers
0
>>> missing_digits(123456) # No missing numbers
0
>>> missing_digits(3558) # 4, 6, 7
3
>>> missing_digits(35578) # 4, 6
2
>>> missing_digits(12456) # 3
1
>>> missing_digits(16789) # 2, 3, 4, 5
4
>>> missing_digits(4) # No missing numbers between 4 and 4
0
>>> from construct_check import check
>>> # ban while or for loops
>>> check(HW_SOURCE_FILE, 'missing_digits', ['While', 'For'])
True
"""
"*** YOUR CODE HERE ***"
def help(x,last):
if x==0:
return 0
if x%10==last:
return help(x//10,x%10)
return help(x//10,x%10)+last-x%10-1
return help(n//10,n%10)
def get_next_coin(coin):
"""Return the next coin.
>>> get_next_coin(1)
5
>>> get_next_coin(5)
10
>>> get_next_coin(10)
25
>>> get_next_coin(2) # Other values return None
"""
if coin == 1:
return 5
elif coin == 5:
return 10
elif coin == 10:
return 25
def count_coins(change):
"""Return the number of ways to make change using coins of value of 1, 5, 10, 25.
>>> count_coins(15)
6
>>> count_coins(10)
4
>>> count_coins(20)
9
>>> count_coins(100) # How many ways to make change for a dollar?
242
>>> from construct_check import check
>>> # ban iteration
>>> check(HW_SOURCE_FILE, 'count_coins', ['While', 'For'])
True
"""
"*** YOUR CODE HERE ***"
def help(change,smallest_coin):
if change == 0:
return 1
if change<0:
return 0
if smallest_coin ==None:
return 0
without_coin = help(change,get_next_coin(smallest_coin))
with_coin = help(change-smallest_coin,smallest_coin)
return without_coin+with_coin
return help(change,1)
from operator import sub, mul
def make_anonymous_factorial():
"""Return the value of an expression that computes factorial.
>>> make_anonymous_factorial()(5)
120
>>> from construct_check import check
>>> # ban any assignments or recursion
>>> check(HW_SOURCE_FILE, 'make_anonymous_factorial', ['Assign', 'AugAssign', 'FunctionDef', 'Recursion'])
True
"""
return lambda x: (lambda f: f(f, x))(lambda f, n: 1 if n == 1 else mul(n, f(f, sub(n, 1))))
def print_move(origin, destination):
"""Print instructions to move a disk."""
print("Move the top disk from rod", origin, "to rod", destination)
def move_stack(n, start, end):
"""Print the moves required to move n disks on the start pole to the end
pole without violating the rules of Towers of Hanoi.
n -- number of disks
start -- a pole position, either 1, 2, or 3
end -- a pole position, either 1, 2, or 3
There are exactly three poles, and start and end must be different. Assume
that the start pole has at least n disks of increasing size, and the end
pole is either empty or has a top disk larger than the top n start disks.
>>> move_stack(1, 1, 3)
Move the top disk from rod 1 to rod 3
>>> move_stack(2, 1, 3)
Move the top disk from rod 1 to rod 2
Move the top disk from rod 1 to rod 3
Move the top disk from rod 2 to rod 3
>>> move_stack(3, 1, 3)
Move the top disk from rod 1 to rod 3
Move the top disk from rod 1 to rod 2
Move the top disk from rod 3 to rod 2
Move the top disk from rod 1 to rod 3
Move the top disk from rod 2 to rod 1
Move the top disk from rod 2 to rod 3
Move the top disk from rod 1 to rod 3
"""
assert 1 <= start <= 3 and 1 <= end <= 3 and start != end, "Bad start/end"
"*** YOUR CODE HERE ***"
if n==1:
print_move(start,end)
else:
other = 6-start-end
move_stack(n-1,start,other)
print_move(start,end)
move_stack(n-1,other,end)
|
python
|
from cacahuate.auth.base import BaseHierarchyProvider
class BackrefHierarchyProvider(BaseHierarchyProvider):
def find_users(self, **params):
return [
(params.get('identifier'), {
'identifier': params.get('identifier'),
'email': params.get('identifier'),
'fullname': params.get('identifier'),
}),
]
|
python
|
# coding: utf-8
# # Categorical VAE with Gumbel-Softmax
#
# Partial implementation of the paper [Categorical Reparameterization with Gumbel-Softmax](https://arxiv.org/abs/1611.01144)
# A categorical VAE with discrete latent variables. Tensorflow version is 0.10.0.
# # 1. Imports and Helper Functions
# In[1]:
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# import matplotlib.pyplot as plt
import numpy as np
import sys
sys.path.insert(0, "./data")
from read_data import load_mnist, next_batch
# get_ipython().magic('matplotlib inline')
slim=tf.contrib.slim
Bernoulli = tf.contrib.distributions.Bernoulli
# In[2]:
def sample_gumbel(shape, eps=1e-20):
"""Sample from Gumbel(0, 1)"""
U = tf.random_uniform(shape,minval=0,maxval=1)
return -tf.log(-tf.log(U + eps) + eps)
def gumbel_softmax_sample(logits, temperature):
""" Draw a sample from the Gumbel-Softmax distribution"""
y = logits + sample_gumbel(tf.shape(logits))
return tf.nn.softmax( y / temperature)
def gumbel_softmax(logits, temperature, hard=False):
"""Sample from the Gumbel-Softmax distribution and optionally discretize.
Args:
logits: [batch_size, n_class] unnormalized log-probs
temperature: non-negative scalar
hard: if True, take argmax, but differentiate w.r.t. soft sample y
Returns:
[batch_size, n_class] sample from the Gumbel-Softmax distribution.
If hard=True, then the returned sample will be one-hot, otherwise it will
be a probabilitiy distribution that sums to 1 across classes
"""
y = gumbel_softmax_sample(logits, temperature)
if hard:
k = tf.shape(logits)[-1]
#y_hard = tf.cast(tf.one_hot(tf.argmax(y,1),k), y.dtype)
y_hard = tf.cast(tf.equal(y,tf.reduce_max(y,1,keep_dims=True)),y.dtype)
y = tf.stop_gradient(y_hard - y) + y
return y
# # 2. Build Model
# In[3]:
K=10 # number of classes
N=30 # number of categorical distributions
# In[4]:
# input image x (shape=(batch_size,784))
x = tf.placeholder(tf.float32,[None,784])
# variational posterior q(y|x), i.e. the encoder (shape=(batch_size,200))
net = slim.stack(x,slim.fully_connected,[512,256])
# unnormalized logits for N separate K-categorical distributions (shape=(batch_size*N,K))
logits_y = tf.reshape(slim.fully_connected(net,K*N,activation_fn=None),[-1,K])
q_y = tf.nn.softmax(logits_y)
log_q_y = tf.log(q_y+1e-20)
# temperature
tau = tf.Variable(5.0,name="temperature")
# sample and reshape back (shape=(batch_size,N,K))
# set hard=True for ST Gumbel-Softmax
y = tf.reshape(gumbel_softmax(logits_y,tau,hard=False),[-1,N,K])
# generative model p(x|y), i.e. the decoder (shape=(batch_size,200))
net = slim.stack(slim.flatten(y),slim.fully_connected,[256,512])
logits_x = slim.fully_connected(net,784,activation_fn=None)
# (shape=(batch_size,784))
p_x = Bernoulli(logits=logits_x)
# In[5]:
# loss and train ops
kl_tmp = tf.reshape(q_y*(log_q_y-tf.log(1.0/K)),[-1,N,K])
KL = tf.reduce_sum(kl_tmp,[1,2])
elbo=tf.reduce_sum(p_x.log_prob(x),1) - KL
# In[6]:
loss=tf.reduce_mean(-elbo)
lr=tf.constant(0.001)
train_op=tf.train.AdamOptimizer(learning_rate=lr).minimize(loss,var_list=slim.get_model_variables())
# init_op=tf.initialize_all_variables()
init_op = tf.global_variables_initializer()
print "Build model successfully!"
# # 3. Train
# In[7]:
# get data
data = load_mnist("./data/train-images.idx3-ubyte", "./data/train-labels.idx1-ubyte")
print data[0][0]
print "Get data successful!"
# In[8]:
BATCH_SIZE=100
NUM_ITERS=50000
tau0=1.0 # initial temperature
np_temp=tau0
np_lr=0.001
ANNEAL_RATE=0.00003
MIN_TEMP=0.5
# In[9]:
dat=[]
sess=tf.InteractiveSession()
sess.run(init_op)
for i in range(1,NUM_ITERS):
np_x,np_y=next_batch(data, BATCH_SIZE)
_,np_loss=sess.run([train_op,loss],{
x:np_x,
tau:np_temp,
lr:np_lr
})
if i % 100 == 1:
dat.append([i,np_temp,np_loss])
if i % 1000 == 1:
np_temp=np.maximum(tau0*np.exp(-ANNEAL_RATE*i),MIN_TEMP)
np_lr*=0.9
if i % 5000 == 1:
print('Step %d, ELBO: %0.3f' % (i,-np_loss))
'''
# ## save to animation
# In[10]:
np_x1,_=data.next_batch(100)
np_x2,np_y1 = sess.run([p_x.mean(),y],{x:np_x1})
# In[11]:
import matplotlib.animation as animation
# In[12]:
def save_anim(data,figsize,filename):
fig=plt.figure(figsize=(figsize[1]/10.0,figsize[0]/10.0))
im = plt.imshow(data[0].reshape(figsize),cmap=plt.cm.gray,interpolation='none')
plt.gca().set_axis_off()
#fig.tight_layout()
fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=None, hspace=None)
def updatefig(t):
im.set_array(data[t].reshape(figsize))
return im,
anim=animation.FuncAnimation(fig, updatefig, frames=100, interval=50, blit=True, repeat=True)
Writer = animation.writers['imagemagick']
writer = Writer(fps=1, metadata=dict(artist='Me'), bitrate=1800)
anim.save(filename, writer=writer)
return
# In[13]:
# save_anim(np_x1,(28,28),'x0.gif')
# save_anim(np_y1,(N,K),'y.gif')
# save_anim(np_x2,(28,28),'x1.gif')
# # 4. Plot Training Curves
# In[14]:
dat=np.array(dat).T
# In[15]:
f,axarr=plt.subplots(1,2)
axarr[0].plot(dat[0],dat[1])
axarr[0].set_ylabel('Temperature')
axarr[1].plot(dat[0],dat[2])
axarr[1].set_ylabel('-ELBO')
# # 5. Unconditional Generation
#
# This consists of sampling from the prior $p_\theta(y)$ and passing it through the generative model.
# In[16]:
M=100*N
np_y = np.zeros((M,K))
np_y[range(M),np.random.choice(K,M)] = 1
np_y = np.reshape(np_y,[100,N,K])
# In[17]:
x_p=p_x.mean()
np_x= sess.run(x_p,{y:np_y})
# In[18]:
np_y = np_y.reshape((10,10,N,K))
np_y = np.concatenate(np.split(np_y,10,axis=0),axis=3)
np_y = np.concatenate(np.split(np_y,10,axis=1),axis=2)
y_img = np.squeeze(np_y)
# In[19]:
np_x = np_x.reshape((10,10,28,28))
# split into 10 (1,10,28,28) images, concat along columns -> 1,10,28,280
np_x = np.concatenate(np.split(np_x,10,axis=0),axis=3)
# split into 10 (1,1,28,280) images, concat along rows -> 1,1,280,280
np_x = np.concatenate(np.split(np_x,10,axis=1),axis=2)
x_img = np.squeeze(np_x)
# In[26]:
f,axarr=plt.subplots(1,2,figsize=(15,15))
# samples
axarr[0].matshow(y_img,cmap=plt.cm.gray)
axarr[0].set_title('Z Samples')
# reconstruction
axarr[1].imshow(x_img,cmap=plt.cm.gray,interpolation='none')
axarr[1].set_title('Generated Images')
# In[31]:
f.tight_layout()
f.savefig('/Users/ericjang/Desktop/gumbel_softmax/code.png')
'''
|
python
|
#!/usr/bin/env python3
""" Checks if a new ts3 version is available """
import re
import smtplib
import json
import sys
from email.mime.text import MIMEText
from email import utils
import argparse
import requests
"""
CONFIG = {}
CONFIG['CHANGELOG'] = ''
CONFIG['URL'] = 'https://www.teamspeak.com/versions/server.json'
CONFIG['MAIL'] = {}
CONFIG['MAIL']['HOST'] = ''
CONFIG['MAIL']['PORT'] = ''
CONFIG['MAIL']['USER'] = ''
CONFIG['MAIL']['PASSWORD'] = ''
CONFIG['MAIL']['TARGET'] = ''
"""
PARSER = argparse.ArgumentParser()
PARSER.add_argument("-c", help="-c config_file")
ARGS = PARSER.parse_args()
class Ts3Notify():
"""docstring for Ts3Notify"""
def __init__(self, config):
super().__init__()
self.config = config
self.result_json = self.load_update_data()
def load_update_data(self, retries=0):
""" loads json from teamspeak.com and handles errors"""
try:
if retries < 5:
request = requests.get(self.config['URL'])
request.raise_for_status()
else:
print("Too many retries. {}".format(retries))
sys.exit(1)
return request.json()
except requests.exceptions.HTTPError as err:
print("HTTP Error. {}".format(err))
retries += 1
self.load_update_data(retries)
except requests.exceptions.Timeout:
print("Timeout.")
retries += 1
self.load_update_data(retries)
except requests.exceptions.TooManyRedirects:
print("Too many redirects. Please check the url.")
sys.exit(1)
except requests.exceptions.RequestException as err:
print("Something went wrong {}".format(err))
sys.exit(1)
def get_local_version(self):
""" parse and return the local server version """
pattern = re.compile("Server Release ((\d+\.)?(\d+\.)?(\*|\d+))")
versions = ""
with open(self.config['CHANGELOG'], 'r') as changelog:
versions = re.findall(pattern, str(changelog.read()))
return str(versions[0][0])
def get_current_version(self):
""" returns current version """
return str(self.result_json['linux']['x86_64']['version'])
def get_update_url(self):
""" returns current version """
return str(self.result_json['linux']['x86_64']['mirrors']['teamspeak.com'])
def get_checksum(self):
""" returns current version """
return str(self.result_json['linux']['x86_64']['checksum'])
def send_mail(self, message):
""" send mail according to config"""
msg = MIMEText(message)
msg['Subject'] = '[TS3] Your TS3 Server needs an update'
msg['From'] = self.config['MAIL']['USER']
msg['To'] = self.config['MAIL']['TARGET']
msg['Date'] = utils.formatdate(localtime=True)
server = smtplib.SMTP(host=self.config['MAIL']['HOST'], port=self.config['MAIL']['PORT'])
# server.set_debuglevel(1)
server.ehlo()
server.starttls()
server.ehlo()
server.login(self.config['MAIL']['USER'], self.config['MAIL']['PASSWORD'])
server.sendmail(self.config['MAIL']['USER'], self.config['MAIL']['TARGET'], msg.as_string())
def main():
""" load conifg file and create TS3 Notify object """
config = {}
try:
json_config = None
with open(ARGS.c, 'r') as config_file:
json_config = json.load(config_file)
config['CHANGELOG'] = str(json_config['CHANGELOG'])
config['URL'] = str(json_config['URL'])
config['MAIL'] = {}
config['MAIL']['HOST'] = str(json_config['MAIL']['HOST'])
config['MAIL']['PORT'] = str(json_config['MAIL']['PORT'])
config['MAIL']['USER'] = str(json_config['MAIL']['USER'])
config['MAIL']['PASSWORD'] = str(json_config['MAIL']['PASSWORD'])
config['MAIL']['TARGET'] = str(json_config['MAIL']['TARGET'])
except ValueError:
print("No config was found.", file=sys.stderr)
sys.exit()
except KeyError as key_error:
print("Setting not found: {}".format(key_error), file=sys.stderr)
sys.exit()
except FileNotFoundError:
print("No config was found.", file=sys.stderr)
sys.exit()
ts3_notify = Ts3Notify(config)
local_version = ts3_notify.get_local_version()
current_version = ts3_notify.get_current_version()
url = ts3_notify.get_update_url()
checksum = ts3_notify.get_checksum()
if current_version != local_version:
try:
ts3_notify.send_mail("Your Server has version: {}\nAvailable version is: {}\nURL: {}\nChecksum: {}".format(local_version, current_version, url, checksum))
except smtplib.SMTPException as smtp_exception:
print("Could not send an email: {}".format(smtp_exception), file=sys.stderr)
if __name__ == '__main__':
main()
|
python
|
import bsddb3
import struct
import json
import flask
import time
from threading import Thread
from Queue import Queue
from StringIO import StringIO
from sqlalchemy import text
from sqlalchemy.sql.elements import TextClause
from table_pb2 import *
from itertools import product, chain, combinations
from dct import *
def powerset(iterable):
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
def decode_table(buf):
table = Table()
table.ParseFromString(buf)
return table
def encode_table(schema, rows):
"""
assume everything is uints
@schema list of attr names
@rows
"""
s = Table.Schema()
s.name.extend(schema)
table = Table(schema=s)
# for col in zip(*rows):
# # print "col:", col, len(col)
# for el in col:
# print el
table.cols.extend(Table.Col(val=col) for col in zip(*rows))
return table.SerializeToString()
class DS(object):
"""
Data structures are parameterized so that they are able to answer particular classes of queries.
The encoding number specifies the _type_ of data structure (the class)
The id specifies a particular instance of the data struture.
For example, a datacube data structure with encoding 2 may be instantiated
for queries grouped by (lat, lon, hour), and queries grouped by (hour, month)
Given a data structure, we can represent a query simply with set of parameter values.
Data structures expose methods for offline setup and online serving.
Offline:
setup()
Online:
cost_est(data)
__call__(data)
get_iter(data)
"""
def __init__(self):
self.id = None
self.encoding = None
def setup(self):
"""
setup any offline data structures
"""
pass
def __call__(self, data):
return None
def get_iter(self, data):
return None
@staticmethod
def can_answer(query_template):
"""
@query_template is the output of the client's QueryTemplate.toWire() method.
"""
return False
def cost_est(self, data):
"""
data is the "data" attribute in the output of the client's Query.toWire()
Currently, it's a dictionary mapping param names to their values
"""
return None
class Precompute(DS):
"""
Helper model for data structuruse that pre-compute and cache results
"""
name = "precompute"
def __init__(self, db, *args, **kwargs):
super(Precompute, self).__init__()
self.name = Precompute.name
self.db = db
self.fname = kwargs.get("fname", "precompute.cache")
self.cache = bsddb3.hashopen(self.fname)
print "loaded Precompute file %s" % self.fname
print "%d items" % len(self.cache)
def __call__(self, data, block_size=50):
return self.lookup(self.key(data))
def get_iter(self, data, block_size=50):
"""
XXX: note that it ignores block_size right now
"""
block = self.lookup_bytes(self.key(data))
if block:
key = self.key(data)
buf = StringIO()
buf.write(struct.pack("2I", len(key), self.id))
buf.write(struct.pack("%ds" % len(key), key))
buf.write(block)
yield buf.getvalue()
buf.close()
def key(self, data):
"""
This maps the query data to a unique key.
The analagous function in javascript is js/datastruct.js:queryToKey()
THESE FUNCTIONS MUST MATCH!!
"""
return json.dumps(sorted(data.items())).replace(" ", "")
def setup_cache(self, query_iterable):
"""
This is called ahead of time to create data structures
query_iterable is an iterator that yields pairs of (key, db.exec data) to run
"""
for key, exec_args in query_iterable:
cur = self.db.execute(*exec_args)
schema = cur.keys()
rows = cur.fetchall()
self.cache[key] = encode_table(schema, rows)
def lookup(self, key):
s = self.lookup_bytes(key)
if not s: return None, None
return decode_table(s)
def lookup_bytes(self, key):
return self.cache.get(key, None)
def cost_est(self, data):
if self.key(data) in self.cache:
return 100
return None
class GBDataStruct(Precompute):
name = "gbquery"
def __init__(self, db, spec, *args, **kwargs):
"""
spec = {
select: { alias: expr },
fr: <tablename>,
groupby: [ "expr", ... ],
params: { attr: <data type "num" or "str"> }
}
"""
fname = "gb_%s.cache" % ",".join(spec["groupby"])
kwargs['fname'] = kwargs.get("fname", fname)
super(GBDataStruct, self).__init__(db, *args, **kwargs)
self.name = GBDataStruct.name
self.spec = spec
self.encoding = 1
def spec_to_sql(self, params):
qtemplate = """ SELECT %s FROM %s WHERE %s GROUP BY %s """
s = ["%s AS %s" % (expr, alias) for alias, expr in self.spec['select'].items()]
s = ", ".join(s)
g = ", ".join(self.spec["groupby"])
w = ["true"]
for attr, val in params.iteritems():
if attr in self.spec['params']:
if self.spec['params'][attr] == "num":
w.append("%s = %s" % (attr, val))
else:
w.append("%s = '%s'" % (attr, val))
w = w and " AND ".join(w)
q = text(qtemplate % (s, self.spec["fr"], w, g))
return q
def setup_cache(self, param_ranges):
def f():
all_names = param_ranges.keys()
for names in powerset(all_names):
print names
iters = map(param_ranges.get, names)
for i, vals in enumerate(product(*iters)):
data = dict(zip(names, vals))
key = self.key(data)
q = self.spec_to_sql(data)
yield key, [q]
Precompute.setup_cache(self, f())
@staticmethod
def can_answer(query_template):
"""
@query_template is the output of the client's QueryTemplate.toWire() method.
"""
return query_template.get('name') == GBDataStruct.name
class ProgressiveDataStruct(Precompute):
"""
Python version of js/progds.js
The signature and spec are the same as GBdataStruct, however it encodes data progressively
TODO: the data structure that you will implement and fill in
TODO: write a custom get_iter() in order to return blocks of partial results
"""
name = "progressive"
def __init__(self, db, spec, *args, **kwargs):
"""
spec = {
select: { alias: expr },
fr: <tablename>,
groupby: [ "expr", ... ],
params: { attr: <data type "num" or "str"> }
}
"""
# name of the file cache
fname = "prog_%s.cache" % ",".join(spec["groupby"])
kwargs['fname'] = kwargs.get("fname", fname)
super(ProgressiveDataStruct, self).__init__(db, *args, **kwargs)
self.name = ProgressiveDataStruct.name
self.spec = spec
self.encoding = 2
def cost_est(self, data):
"""
Force the cost estimate for progressive data structure to be
lower than group by data structure (10 vs 100)
"""
if self.key(data) in self.cache:
return 10
return None
def spec_to_sql(self, params):
"""
Translates query parameters into an actual SQL string
Identical to function in GBDataStruct
"""
qtemplate = """ SELECT %s FROM %s WHERE %s GROUP BY %s """
s = ["%s AS %s" % (expr, alias) for alias, expr in self.spec['select'].items()]
s = ", ".join(s)
g = ", ".join(self.spec["groupby"])
w = ["true"]
for attr, val in params.iteritems():
if attr in self.spec['params']:
if self.spec['params'][attr] == "num":
w.append("%s = %s" % (attr, val))
else:
w.append("%s = '%s'" % (attr, val))
w = w and " AND ".join(w)
q = text(qtemplate % (s, self.spec["fr"], w, g))
return q
def setup_cache(self, param_ranges):
def f():
"""
This generator yields all SQL queries that should be precomputed
"""
all_names = param_ranges.keys()
for names in powerset(all_names):
print names
iters = map(param_ranges.get, names)
for i, vals in enumerate(product(*iters)):
data = dict(zip(names, vals))
key = self.key(data)
q = self.spec_to_sql(data)
yield key, [q]
for key, exec_args in f():
cur = self.db.execute(*exec_args)
schema = cur.keys()
rows = cur.fetchall()
self.cache[key] = self.progressively_encode_table(schema, rows)
print "cache contains %d items" % len(self.cache)
def progressively_encode_table(self, schema, rows):
"""
You can byte encode the progressive data using protocol buffers, or something custom.
If you plan to do things custom, take a look at StringIO and struct.pack/unpack
There are examples above
"""
# TODO: implement me
#raise Exception("Implement Me!")
"""
assume everything is uints
@schema list of attr names
@rows
"""
data = []
index = []
s = Table.Schema()
s.name.extend(schema)
table = Table(schema=s)
cnt = 0
temp_tuple = rows
temp_tuple.sort(key = lambda t: t[1]);
#print "temp", temp_tuple
for col in zip(*temp_tuple):
if cnt == 0:
for el in col:
data.append(el)
#print "list:", data
# DCT
dct = DCT(data)
encode = dct.encodeDct2()
dct.quantize(encode)
data = encode
else:
index = list(col)
cnt = cnt + 1
encodedRows = zip(data, index)
#print "tuple:", encodedRows
# get data & index lists which have no 0 element
dataNot0 = []
indexNot0 = []
length = len(data)
for x in xrange(0,length):
if data[x] != 0:
dataNot0.append(data[x])
indexNot0.append(index[x])
dataNot0.append(length)
indexNot0.append(-1)
encodedRowsNot0 = zip(dataNot0, indexNot0)
#print encodedRowsNot0, len(encodedRowsNot0)
table.cols.extend(Table.Col(val=col) for col in zip(*encodedRowsNot0))
return table.SerializeToString()
@staticmethod
def can_answer(query_template):
"""
@query_template is the output of the client's QueryTemplate.toWire() method.
"""
return query_template.get('name') in (GBDataStruct.name, ProgressiveDataStruct.name)
# Currently deprecated
class SQLTemplates(Precompute):
"""
Precomputes templated queries
The query template is expresed as a SQL string with parameters
SELECT a - a%:a, avg(d)::int
FROM data
WHERE b = :b
GROUP BY a - a%:a
The above parameterized query can vary the filter condition
and the discretization of the group by attribute
TODO: use this data structure
"""
name = "templates"
def __init__(self, db, query_templates, *args, **kwargs):
super(SQLTemplates, self).__init__(db, *args, **kwargs)
def to_text(qstr):
if not isinstance(qstr, TextClause):
return text(qstr)
return qstr
self.name = SQLTemplates.name
self.query_template = to_text(query_template)
self.encoding = 2
def key(self, data):
keys = tuple([c.key for c in q.get_children()])
return hash(tuple(map(data.get, keys)))
def setup_cache(self, param_ranges):
"""
This is called ahead of time to create data structures
@param_ranges dictionary of param name --> iterable of assignable values
"""
def f():
names = param_ranges.keys()
iters = map(param_ranges.get, names)
for i, vals in enumerate(product(*iters)):
data = dict(zip(names, vals))
yield self.key(data), [self.query_template, data]
print "cache contains %d items" % len(self.cache)
return Precompute.setup_cache(self, f())
# register relevant data structer classes
ds_klasses = [GBDataStruct, SQLTemplates, ProgressiveDataStruct]
|
python
|
from collections import deque
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
res = 0
def diameterOfBinaryTree(self, root: TreeNode) -> int:
self.dfs(root)
return self.res
def dfs(self, node: TreeNode) -> int:
if not node:
return -1
left = self.dfs(node.left)
right = self.dfs(node.right)
self.res = max(self.res, left + right + 2)
return max(left, right) + 1
# def diameterOfBinaryTree(self, root: TreeNode) -> int:
# # exception
# if not root:
# return 0
# # left deepest depth + right deepest depth = answer
# # divide subtree into twos (left-right)
# sub_tree =[]
# if root.left:
# left_sub = root.left
# sub_tree.append(left_sub)
# if root.right:
# right_sub = root.right
# sub_tree.append(right_sub)
# ans = []
# # BFS
# for sub in sub_tree:
# queue = deque()
# depth = 1
# queue.append((sub, depth))
# while queue:
# node, depth = queue.popleft()
# if node.left:
# queue.append((node.left, depth+1))
# if node.right:
# queue.append((node.right, depth+1))
# ans.append(depth)
# return sum(ans)
|
python
|
def f(<caret>x):
return 42
|
python
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import scriptcontext as sc
import compas_fofin
__commandname__ = "FoFin_init"
def RunCommand(is_interactive):
sc.sticky["FoFin"] = {
'cablenet' : None,
'settings' : {
'scale.reactions' : 0.1,
'layer' : "FoFin::Cablenet"
}
}
# ==============================================================================
# Main
# ==============================================================================
if __name__ == '__main__':
RunCommand(True)
|
python
|
import pytest
from geocodr.keys import APIKeys
from werkzeug.test import EnvironBuilder
from werkzeug.wrappers import Request
@pytest.fixture()
def key_file(tmpdir):
key_file = tmpdir.join("keys.csv")
key_file.write(
'key,domains\n'
'wildcard,\n'
'# comment,with,commas,\n'
'multi,example.org;example.com\n'
'single,test.local'
)
yield key_file.strpath
@pytest.mark.parametrize("key,referrer,permitted", [
["single", None, True],
["single", "", True],
["single", "--", True],
["single", "http", True],
["single", "http://", True],
["single", "test.local", True],
["single", "http://test.local", True],
["single", "https://test.local", True],
["single", "http://sub.test.local/path?arg=1", True],
["single", "http://sub.test.local", True],
["single", "https://subtest.local", False],
["single", "http://sub.test.local.com", False],
["single", "https://1.2.3.4", False],
["multi", "https://example.org", True],
["multi", "https://example.com", True],
["multi", "https://example.net", False],
["wildcard", "https://example.net", True],
["wildcard", "https://1.2.3.4", True],
])
def test_api_key(key_file, key, referrer, permitted):
a = APIKeys(key_file)
headers = []
if referrer:
headers.append(('Referer', referrer))
builder = EnvironBuilder(method='GET',
query_string={'key': key},
headers=headers)
req = Request(builder.get_environ())
assert a.is_permitted(req) == permitted
|
python
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import Header
from humanoid_league_msgs.msg import BallRelative, ObstaclesRelative, ObstacleRelative, Strategy, GameState, RobotControlState
from geometry_msgs.msg import Point, PoseWithCovarianceStamped, Pose2D
import math
import yaml
import rospkg
import os
import tf
# Dictonary for roles
actionDecoder = {'ROLE_IDLING': 0, 'ROLE_OTHER': 1, 'ROLE_STRIKER': 2, 'ROLE_SUPPORTER': 3, 'ROLE_DEFENDER': 4, 'ROLE_GOALIE': 5 }
# Loads the dictonary of coordinates from pathmaker
def getCoordinates(filename):
rp = rospkg.RosPack()
fname = os.path.join(rp.get_path('bitbots_live_tool_rqt'), 'resource', 'paths', filename)
with open(fname, "r") as file:
positions = yaml.load(file) # da ist ein Dictonary drin
file.close()
return positions.get("positions")
def vec_rotate(x, y, angle_rad):
xneu = x * math.cos(angle_rad) - y * math.sin(angle_rad)
yneu = y * math.cos(angle_rad) + x * math.sin(angle_rad)
return [xneu, yneu]
def publisher_main():
#initiieren des publishers
rospy.init_node('publisher')
print('started publisher node')
pub = rospy.Publisher('ball_relative', BallRelative, queue_size = 10)
pubRobo = rospy.Publisher('amcl_pose', PoseWithCovarianceStamped, queue_size = 10)
pubTeam = rospy.Publisher('obstacles_relative', ObstaclesRelative, queue_size = 10)
pubStrategy = rospy.Publisher('strategy', Strategy, queue_size = 10)
pubGame = rospy.Publisher('gamestate', GameState, queue_size = 10)
pubState = rospy.Publisher('robot_state', RobotControlState, queue_size = 10)
pubTarget = rospy.Publisher('move_base_simple/goal', Pose2D, queue_size = 10)
rate = rospy.Rate(10)
timeCounter = 30
roboActionCounter = 30
firsthalf = True
durationHalfGame = 60
# Coordinates from pathMaker ========================================================================================
# robo1 with pathmaker
robo1 = getCoordinates("robo4.yaml")
robo1Length = len(robo1)
robo1Counter = 1
# teammates with pathmaker
teammate1 = getCoordinates('TeamClubMate1.yaml')
team1Length = len(teammate1) #anzahl eintraege
team1Counter = 1
teammate2 = getCoordinates('TeamClubMate2.yaml')
team2Length = len(teammate2)
team2Counter = 1
# opponents with pathmaker
opponent1 = getCoordinates('SuperScaryOpponent.yaml')
op1Length = len(opponent1)
op1Counter = 1
# opponents with pathmaker
undef = getCoordinates('undef.yaml')
undefLength = len(opponent1)
undefCounter = 1
# ball with pathmaker
ball = getCoordinates('HeartBall.yaml')
ballLength = len(ball)
ballCounter = 1
#teammate1[0 % length ].get('x') # fuer 0 ein counter, dann entsteht loop
#teammate1[1].get('x') # an der ersten Stelle x-wert
while not rospy.is_shutdown():
# Ball with pathmaker
msgBall = BallRelative()
msgBall.header.stamp = rospy.Time.now()
msgBall.header.frame_id = "base_link"
msgBall.ball_relative.y = ball[ballCounter % ballLength].get('x')
msgBall.ball_relative.x = ball[ballCounter % ballLength].get('y')
msgBall.confidence = 1.0
pub.publish(msgBall)
ballCounter += 1
# Robo1 with pathmaker
msgRobo = PoseWithCovarianceStamped()
msgRobo.header.stamp = rospy.Time.now()
msgRobo.pose.pose.position.x = robo1[int(robo1Counter) % robo1Length].get('x')
msgRobo.pose.pose.position.y = robo1[int(robo1Counter) % robo1Length].get('y')
# Angle of robot in quaternions
angle = robo1[int(robo1Counter) % robo1Length].get('ang')
quaternion = tf.transformations.quaternion_from_euler(0, 0, float(angle))
msgRobo.pose.pose.orientation.x = quaternion[0]
msgRobo.pose.pose.orientation.y = quaternion[1]
msgRobo.pose.pose.orientation.z = quaternion[2]
msgRobo.pose.pose.orientation.w = quaternion[3]
pubRobo.publish(msgRobo)
# Role of Robo1, gets information from pathMaker
msgStrategy = Strategy()
msgRoleString = robo1[int(robo1Counter) % robo1Length].get('action')
msgStrategy.role = actionDecoder.get(msgRoleString) #actiondecoder gleicht den string ab mit dictonary und gibt int zurueck
# Action of Robo1, changes after short time (roboActionCounter)
if roboActionCounter == 0:
msgStrategy.action = 3 # TRYING_TO_SCORE
else:
msgStrategy.action = 2 # GOING_TO_BALL
pubStrategy.publish(msgStrategy)
roboActionCounter -= 1
roboActionCounter = max(roboActionCounter, 0)
robo1Counter += 1
# Teammates with pathmaker, contains list of teammates
msgTeam = ObstaclesRelative()
msgTeam1 = ObstacleRelative()
msgTeam1.color = 2 # magenta
msgTeam1.position.x = teammate1[int(team1Counter) % team1Length].get('x')
msgTeam1.position.y = teammate1[int(team1Counter) % team1Length].get('y')
msgTeam2 = ObstacleRelative()
msgTeam2.color = 2 # magenta
msgTeam2.position.x = teammate2[int(team2Counter) % team2Length].get('x')
msgTeam2.position.y = teammate2[int(team2Counter) % team2Length].get('y')
# Opponents with pathmaker, contains list of opponents
msgOp = ObstaclesRelative()
msgUndef = ObstacleRelative()
msgUndef.color = 1 # undef
msgUndef.position.x = undef[int(undefCounter) % undefLength].get('x')
msgUndef.position.y = undef[int(undefCounter) % undefLength].get('y')
msgOp1 = ObstacleRelative()
msgOp1.color = 3 # cyan
msgOp1.position.x = opponent1[int(op1Counter) % op1Length].get('x')
msgOp1.position.y = opponent1[int(op1Counter) % op1Length].get('y')
# Publish all obstacles
msgTeam.obstacles = [msgTeam1, msgTeam2, msgOp1, msgUndef]
pubTeam.publish(msgTeam)
team1Counter += 1
team2Counter += 1
op1Counter += 1
undefCounter += 1
# GameState msgs ===========================================================================================
# Penalty: Seconds till unpenalized and boolean
msgGame = GameState()
msgBall.header.stamp = rospy.Time.now()
msgGame.secondsTillUnpenalized = timeCounter
# Penalty boolean
msgGame.penalized = timeCounter > 0
# Sets halftime and rest secs
msgGame.firstHalf = firsthalf
msgGame.secondsRemaining = durationHalfGame
# Sets Score
msgGame.ownScore = 7
msgGame.rivalScore = 1
# team colors
msgGame.teamColor = 1 # magenta
pubGame.publish(msgGame)
timeCounter -= 1
timeCounter = max(timeCounter, 0)
durationHalfGame -= 1
if durationHalfGame == 0:
durationHalfGame = 60
firsthalf = False
# Sets hardware state
msgState = RobotControlState()
msgState.state = 10
pubState.publish(msgState)
# Target
msgTarget = Pose2D()
if firsthalf:
msgTarget.x = 3.5
msgTarget.y = 2.0
else:
msgTarget.x = 2.0
msgTarget.y = 1.0
pubTarget.publish(msgTarget)
rate.sleep()
if __name__ == '__main__':
try:
publisher_main()
except rospy.ROSInterruptException:
pass
|
python
|
import os
import sys
sys.path.append('../')
import numpy as np
import convert_weights
import tensorflow as tf
############## REPRODUCIBILITY ############
tf.set_random_seed(0)
np.random.seed(0)
###########################################
from keras.models import load_model
from keras.models import Sequential, Model
from keras.utils.vis_utils import plot_model
from keras.layers import Dense, BatchNormalization, Input
input = x = Input((5,))
for i in range(3):
x = Dense(30)(x)
x = BatchNormalization()(x)
# MULTIPLE OUTPUTS
output1 = Dense(1)(x)
output2 = Dense(1)(x)
output3 = Dense(1)(x)
# CREATE THE MODEL
multi_output_model = Model(input,outputs=[output1, output2, output3])
multi_output_model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy']
)
# SAVE TO FILE FOR PARSING
multi_output_model.save('multi_output_model.h5')
# CONVERT TO TXT
convert_weights.h5_to_txt('multi_output_model.h5', 'single_output_model.txt')
# CONVERT TO H5
convert_weights.txt_to_h5('single_output_model.txt', 'single_output_model.h5')
single_output_model = load_model('single_output_model.h5')
# GRAPHIC PLOT OF MODEL
plot_model(multi_output_model, to_file='../../Figures/multi_output_model.png', show_shapes=True, show_layer_names=True)
plot_model(single_output_model, to_file='../../Figures/single_output_model.png', show_shapes=True, show_layer_names=True)
# TEST INPUT
input = np.array(
[[1,2,3,4,5]]
)
# COMPARE PREDICTIONS FROM MULTI OUTPUT AND SINGLE OUTPUT MODELS
multiple_output = np.array(multi_output_model.predict(input)).squeeze()
single_output = single_output_model.predict(input).squeeze()
assert np.allclose(multiple_output, single_output)
print('MULTI-OUTPUT:', multiple_output)
print('SINGLE-OUTPUT:', single_output)
|
python
|
from django.contrib import admin
# DJANGAE
from djangae.contrib.gauth.sql.models import GaeUser
admin.site.register(GaeUser)
|
python
|
from .handler import handler
|
python
|
"""OVK learning, unit tests.
The :mod:`sklearn.tests.test_learningrate` tests the different learning rates.
"""
import operalib as ovk
def test_constant():
"""Test whether constant learning rate."""
eta = ovk.Constant(1)
assert eta(10) == 1
def test_invscaling():
"""Test whether inverse scaling learning rate."""
eta = ovk.InvScaling(1., 2.)
assert eta(10) == 1. / 10. ** 2.
|
python
|
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Discovery
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Third-party modules
import six
from mongoengine.document import Document
from mongoengine.fields import (
StringField,
IntField,
ListField,
DictField,
DateTimeField,
FloatField,
)
# NOC modules
from noc.core.mongo.fields import ForeignKeyField
from noc.sa.models.managedobject import ManagedObject
@six.python_2_unicode_compatible
class Discovery(Document):
meta = {
"collection": "noc.schedules.inv.discovery",
"strict": False,
"auto_create_index": False,
}
job_class = StringField(db_field="jcls")
schedule = DictField()
ts = DateTimeField(db_field="ts")
last = DateTimeField()
last_success = DateTimeField(db_field="st")
last_duration = FloatField(db_field="ldur")
last_status = StringField(db_field="ls")
status = StringField(db_field="s")
managed_object = ForeignKeyField(ManagedObject, db_field="key")
data = DictField()
traceback = DictField()
runs = IntField()
faults = IntField(db_field="f")
log = ListField()
def __str__(self):
return "%s: %s" % (self.managed_object, self.job_class)
|
python
|
# coding: utf-8
from common.db import write_session_scope
from mall_spider.common.enums import TaobaoPageType, TaobaoTaskType
from mall_spider.dao.stream_handle_task_dao import get_stream_handle_task_dao
from mall_spider.dao.stream_opt_data_dao import get_stream_opt_data_dao
from mall_spider.dao.stream_unhandle_task_dao import get_stream_unhandle_task_dao
from mall_spider.model.cmm_sys_stream_unhandle_task import CmmSysStreamUnhandleTask
from mall_spider.spiders.actions.context import Context
from mall_spider.spiders.actions.default_action import DefaultAction
class TaobaoListPagePersistAction(DefaultAction):
def build_integrate_infos(self, integrate_result):
# listItem = integrate_result['listItem']
listItem = integrate_result.get('listItem', [])
return list(
{'itemId': item['item_id'], 'title': item['title'], 'userType': item['userType'],
'originalPrice': item['originalPrice'], 'price': item['price'], 'priceWap': item['priceWap'],
'category': item.get('category', '0')} for item in
listItem)
def build_sale_infos(self, sale_result):
# listItem = sale_result['listItem']
listItem = sale_result.get('listItem', [])
return list(
{'itemId': item['item_id'], 'title': item['title'], 'userType': item['userType'],
'originalPrice': item['originalPrice'], 'price': item['price'], 'priceWap': item['priceWap'],
'category': item['category']} for item in
listItem)
def do_execute(self, context):
integrate_result = context.get(Context.KEY_TAOBAO_INTERGRATE_RESULT)
sale_result = context.get(Context.KEY_TAOBAO_SALE_RESULT)
with write_session_scope() as session:
good = context.get(Context.KEY_GOOD_DICT)
stream_opt_data_dao = get_stream_opt_data_dao(session=session)
stream_unhandle_task_dao = get_stream_unhandle_task_dao(session=session)
stream_handle_task_dao = get_stream_handle_task_dao(session=session)
opt_data_entity = {
'raw_data': {
'integrateResult': integrate_result,
'saleResult': sale_result,
'goodResult': good
},
'type': int(TaobaoPageType.taobao_list)
}
entity = stream_opt_data_dao.insert(**opt_data_entity)
unhandle_task_entity = {
'raw_data': {
'integrateInfos': self.build_integrate_infos(integrate_result),
'saleInfos': self.build_sale_infos(sale_result),
'goodResult': good
},
'type': int(TaobaoTaskType.taobao_list),
'origin_id': entity.id,
'date': good['date']
}
stream_unhandle_task_dao.insert(**unhandle_task_entity)
task = context.get(Context.KEY_CURRENT_TASK)
task_entity = stream_unhandle_task_dao.delete(_filter=[CmmSysStreamUnhandleTask.id == task.id])
stream_handle_task_dao.insert(**{
'type': task.type,
'raw_data': task.raw_data,
'origin_id': task.origin_id,
'date': good['date']
})
# stream_handle_task_dao.insert_entity(entity=task)
# stream_opt_data_entities = list()
# # stream_opt_data_entity = CmmSysStreamOptData()
# # stream_opt_data_entity.raw_data = integrate_result
#
# stream_opt_data_entities.append({'raw_data': integrate_result, 'type':})
# stream_opt_data_entities.append({'raw_data': sale_result})
#
# # stream_opt_data_dao.insert_entity()
# stream_opt_data_dao.bulk_insert(stream_opt_data_entities)
return True
def on_create(self, context):
pass
def on_start(self, context):
pass
def on_complete(self, context):
pass
def on_destroy(self, context):
pass
|
python
|
from qtpy import QtCore, QtGui
class DataTreeModel(QtCore.QAbstractItemModel):
def __init__(self, data_node, parent=None):
super(DataTreeModel, self).__init__(parent)
self.data_node = data_node
self.rootItem = DataTreeItem(self.data_node)
data_node.changed.connect(self.on_node_changed, sender=data_node)
def columnCount(self, parent):
if parent.isValid():
return parent.internalPointer().columnCount()
else:
return self.rootItem.columnCount()
def data(self, index, role):
"""
:type index: QtCore.QModelIndex
:type role: int
"""
if not index.isValid():
return None
item = index.internalPointer()
if role == QtCore.Qt.DisplayRole:
return item.data(index.column())
elif role == QtCore.Qt.DecorationRole:
if item.data_node.icon:
return item.data_node.icon
else:
if item.data_node.children:
return QtGui.QIcon.fromTheme("folder")
elif item.data_node.has_subtree():
return QtGui.QIcon.fromTheme("package-x-generic")
else:
return None # TODO: provide default
return None
def rowCount(self, parent):
if parent.column() > 0:
return 0
if not parent.isValid():
parentItem = self.rootItem
else:
parentItem = parent.internalPointer()
return parentItem.childCount()
def index(self, row, column, parent):
if not self.hasIndex(row, column, parent):
return QtCore.QModelIndex()
if not parent.isValid():
parentItem = self.rootItem
else:
parentItem = parent.internalPointer()
childItem = parentItem.child(row)
if childItem:
return self.createIndex(row, column, childItem)
else:
return QtCore.QModelIndex()
def parent(self, index):
if not index.isValid():
return QtCore.QModelIndex()
childItem = index.internalPointer()
parentItem = childItem.parent
if parentItem == self.rootItem:
return QtCore.QModelIndex()
return self.createIndex(parentItem.row(), 0, parentItem)
def headerData(self, section, orientation, role):
if (orientation == QtCore.Qt.Horizontal
and role == QtCore.Qt.DisplayRole):
return ["Name", "Type", "Shape"][section]
return None
def on_node_changed(self, sender):
self.reload()
def reload(self):
self.rootItem.reload_items()
self.modelReset.emit()
@property
def title(self):
return self.data_node.title
class DataTreeItem:
def __init__(self, data_node, parent=None, subtrees=True):
self.parent = parent
self.data_node = data_node
self.reload_items()
def data(self, column):
if column == 0:
return self.data_node.title
elif column == 1:
node_type = self.data_node.node_type
if self.data_node.has_subtree():
node_type += " (%s)" % self.data_node.subtree().node_type
return node_type
elif self.data_node.has_object():
if column == 2:
if self.data_node.data_object.shape:
return " x ".join(str(dim) for dim in self.data_node.data_object.shape)
return ""
def row(self):
if self.parent:
return self.parent.childItems.index(self)
return 0
def childCount(self):
return len(self.childItems)
def child(self, row):
return self.childItems[row]
def columnCount(self):
return 1
# return 3
def reload_items(self):
self.childItems = []
for node_child in self.data_node.children:
self.childItems.append(DataTreeItem(node_child, self))
if self.data_node.has_subtree():
subtree = self.data_node.subtree()
if subtree:
for tree_child in subtree.children:
self.childItems.append(DataTreeItem(tree_child, self))
|
python
|
import math
style_normal = "\033[0m"
style_great_success = "\033[1;32m"
style_success = "\033[32m"
style_error = "\033[31m"
style_warning = "\033[33m"
style_info = "\033[0m"
style_stealthy = "\033[1;37m"
def __generic_style(c):
def _x(s):
return c + s + style_normal
return _x
success = __generic_style(style_success)
error = __generic_style(style_error)
warning = __generic_style(style_warning)
great_success = __generic_style(style_great_success)
stealthy = __generic_style(style_stealthy)
info = __generic_style(style_info)
def base32hex(s):
ctable = '0123456789abcdefghijklmnopqrstuv'
return "".join([ ctable.index(c) for c in s])
def alphabet(tks):
return set("".join(tks))
def phi(z):
return (1/2.) * (1 + math.erf(z/math.sqrt(2)))
|
python
|
# pip install feedparser
# pip install notify2
# [Python Desktop News Notifier in 20 lines](http://geeksforgeeks.org/python-desktop-news-notifier-in-20-lines/)
# [Desktop Notifier in Python](https://www.geeksforgeeks.org/desktop-notifier-python/)
import feedparser
import notify2
import os
import time
# https://www.espncricinfo.com/ci/content/rss/feeds_rss_cricket.html
LIVE_SCORES_RSS = 'http://static.cricinfo.com/rss/livescores.xml'
GLOBAL_NEWS_RSS = 'https://www.espncricinfo.com/rss/content/story/feeds/0.xml'
def parseFeed():
f = feedparser.parse(LIVE_SCORES_RSS)
ICON_PATH = os.getcwd() + "/icon.ico"
notify2.init('News Notify')
for item in f['items']:
n = notify2.Notification(item['title'], item['summary'], icon=ICON_PATH)
n.set_urgency(notify2.URGENCY_NORMAL)
n.show()
n.set_timeout(15000)
time.sleep(1200)
if __name__ == '__main__':
parseFeed()
|
python
|
import requests
from sniplink.utils import *
from sniplink.api import API
from sniplink.objects import ShortLinkData
class Client:
"""
The Backend Client
Sniplink-Py is powered by a back-end client/runner, this system is responsible for ensuring safety among API access.
Once you've registered a client, you can access all the Sniplink-Py API features available without worry.
It's best to declare your client in the global scope to ensure you only ever have one client active.
"""
def __init__(self):
pass
@staticmethod
def get_link(public_id):
"""
Fetches the data of shortlink with provided public ID.
:param public_id:
:returns ShortLinkData:
"""
resp = requests.get(API.link_endpoint + f"/{public_id}").json()
return ShortLinkData(resp['id'], resp['creationTime'], resp['expirationTime'], resp['value'], resp['shortUrl'])
@staticmethod
def create_link(expires_in, url):
"""
Creates a new shortlink with provided expires_in, url values.
Note: expires_in value represents a unix timestamp.
the maximum expiration time is 30 days.
:param expires_in:
:param url:
:returns ShortLinkData:
"""
body = {
"value": url
}
if isinstance(expires_in, float) or isinstance(expires_in, int):
body["expirationTime"] = int(expires_in)
elif isinstance(expires_in, str):
body["expirationTime"] = int(expires_in)
else:
raise SnipLinkError("Invalid expires in value passed.")
resp = requests.post(API.link_endpoint, json=body, headers={'content-type': 'application/json'}).json()
return ShortLinkData(resp['id'], resp['creationTime'], resp['expirationTime'], resp['value'], resp['shortUrl'])
|
python
|
class AbilityChangeEvent:
"""Event that indicates an ability change"""
def __init__(self, data, type) -> None:
"""Init event"""
self.data = data
self.type = type
@property
def sphere_id(self) -> str:
return self.data['sphere']['id']
@property
def cloud_id(self) -> str:
return self.data['stone']['id']
@property
def unique_id(self) -> str:
return self.data['stone']['uid']
@property
def ability_type(self) -> str:
return self.data['ability']['type']
@property
def ability_enabled(self) -> bool:
return self.data['ability']['enabled']
@property
def ability_synced_to_crownstone(self) -> bool:
return self.data['ability']['syncedToCrownstone']
|
python
|
from tkinter import messagebox
import pandas as pd
import matplotlib.pyplot as plt
import tkinter as tk
import os
def get_chart_user(date):
if os.path.exists("re/drowsiness_files/"+date+".csv"):
data=pd.read_csv("re/drowsiness_files/"+date+".csv")
data.fillna("Unknown",inplace=True)
gb=data.groupby("name")
su=gb.sum()
l=list(su.index.values)
user=[]
for i in l:
user.append(gb.get_group(i))
grb=gb.get_group(i)
grb.plot(x="time", y="EAR", rot=45, title=i)
plt.show()
else:
messagebox.showinfo(title="sample", message="Report/data is not available")
def bar_chart():
root = tk.Toplevel()
title=tk.Label(root,text="Report Page(Line chart)")
title.pack()
hint_date=tk.Label(root,text="dd-mm-yyyy please fill in this format")
hint_date.pack()
date=tk.Entry(root)
date.pack()
btn=tk.Button(root,text="Genrate",command=lambda :get_chart_user(date.get()))
btn.pack()
root.mainloop()
|
python
|
"""User memberships in teams."""
import dataclasses
import kaptos.db
import roax.schema as s
from roax.resource import operation
@dataclasses.dataclass
class Member:
"""User membership in team."""
id: s.uuid(description="Identifies the membership.")
team_id: s.uuid(description="Identifies the team.")
user_id: s.uuid(description="Identifies the user.")
status: s.str(
description="Status of user's group membership.",
enum={"active", "suspended", "requested", "denied"},
)
roles: s.set(
description="User role(s) in team.",
items=s.str(enum={"read", "submit", "admin", "owner"}),
)
_required = "team_id user_id status roles"
schema = s.dataclass(Member)
class Members(kaptos.db.TableResource):
schema = schema
@operation
def create(self, _body: schema) -> s.dict({"id": schema.attrs.id}):
return super().create(_body)
@operation
def read(self, id: schema.attrs.id) -> schema:
return super().read(id)
@operation
def update(self, id: schema.attrs.id, _body: schema) -> None:
return super().update(id, _body)
@operation
def delete(self, id: schema.attrs.id) -> None:
return super().delete(id)
|
python
|
#
# Copyright (c) 2016, deepsense.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from neptune.internal.common import NeptuneException, NeptuneIOException
class NeptuneReadConfigException(NeptuneIOException):
def __init__(self, io_error):
super(NeptuneReadConfigException, self).__init__(io_error)
self.message += " Failed to load job configuration file."
class ParameterYAMLInvalidDefinitionException(NeptuneException):
def __init__(self, parameter_name):
super(ParameterYAMLInvalidDefinitionException, self).__init__(
u'The provided definition in YAML configuration file for parameter \"{}\" is not supported!'. \
format(parameter_name))
class ParameterCLIDuplicatedDefinitionException(NeptuneException):
def __init__(self, parameter_name):
super(ParameterCLIDuplicatedDefinitionException, self).__init__(
u'The provided definition in -p/--parameter option for parameter \"{}\" is duplicated!'. \
format(parameter_name))
class JobConfigFileNotYAMLException(NeptuneException):
def __init__(self, job_config_path):
super(JobConfigFileNotYAMLException, self).__init__(
u'The provided job config file {} is not in YAML format!'.format(job_config_path))
class InvalidJobConfigException(NeptuneException):
def __init__(self, job_config_path, cause):
message = u'The provided job configuration {} is invalid! {}'.format(job_config_path, cause)
super(InvalidJobConfigException, self).__init__(message)
class MetricNotDeclaredException(NeptuneException):
def __init__(self, param_name):
cause = u"Parameter '{param_name}' is declared using hyper-parameter notation but "\
u"no metric is declared in the experiment configuration file."\
.format(param_name=param_name)
super(MetricNotDeclaredException, self).__init__(cause)
class NoReferenceParameterException(NeptuneException):
def __init__(self, param_name):
cause = u"Parameter '{param_name}' must reference to existing parameter definition"\
.format(param_name=param_name)
super(NoReferenceParameterException, self).__init__(cause)
self.param_name = param_name
class NoReferenceParameterInException(NeptuneException):
def __init__(self, param_name, arg, message=None):
cause = u"Parameter '{param_name}' in '{arg}' must reference to existing parameter definition.\n"\
u"{message}"\
.format(param_name=param_name, arg=arg, message=message or "")
super(NoReferenceParameterInException, self).__init__(cause)
class NoValueSetException(NeptuneException):
def __init__(self, param_name):
cause = u"Parameter '{param_name}' doesn't have a value".format(param_name=param_name)
super(NoValueSetException, self).__init__(cause)
class JobConfigValidationFailException(InvalidJobConfigException):
def __init__(self, job_config_path, validation_errors):
enumerated_validation_errors = [
u'{}. {}\n'.format(index + 1, validation_error)
for index, validation_error in enumerate(validation_errors)
]
cause = 'Validation errors: ' + ', '.join(enumerated_validation_errors)
super(JobConfigValidationFailException, self).__init__(job_config_path, cause)
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.