content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
# coding:utf-8
from django.contrib.auth import authenticate,login,logout
from django.shortcuts import render_to_response
from django.contrib.auth.decorators import login_required, permission_required,user_passes_test
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import get_object_or_404
from django.template import RequestContext
from django.http import HttpResponse, HttpResponseRedirect, StreamingHttpResponse
from django.template.loader import get_template
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse
from wap.models import *
from django.core.mail import EmailMultiAlternatives
from django.core.mail import send_mail as core_send_mail
from django.utils import timezone
from django.core import serializers
from django.template import Context, loader
from SizeConverter.SizeConverter import *
from cyexl import xizhuang,chenshan
import json
import logging
import time
import datetime
import threading
def get_logger():
logger = logging.getLogger()
logger.setLevel(__debug__)
return logger
logging.basicConfig(level=logging.DEBUG)
def login_view(request):
user = authenticate(username=request.POST['username'], password=request.POST['password'])
if user is not None :
request.session['login_from'] = request.META.get('HTTP_REFERER', '/')
login(request, user)
print request.user
return HttpResponseRedirect('/order/order_manage')
else:
#验证失败,暂时不做处理
return HttpResponseRedirect('/order/order_login')
def logout_view(request):
logout(request)
return HttpResponseRedirect('/order/order_login')
def order_login(request):
t = get_template('order/order_login.html')
c = RequestContext(request,locals())
return HttpResponse(t.render(c))
def order_denie(request):
t = get_template('order/order_denie.html')
c = RequestContext(request,locals())
return HttpResponse(t.render(c))
def not_in_orders_group(user):
if user:
return user.groups.filter(name='orders').count() == 1
return False
def not_in_Factory_group(user):
if user:
return user.groups.filter(name='Factory').count() == 1
return False
def not_in_addorder_group(user):
if user:
return user.groups.filter(name='addorder').count() == 1
return False
@login_required
#@user_passes_test(not_in_addorder_group, login_url='/admin/')
def order_manage(request,tab):
mailtext = '已发送'
c = RequestContext(request,locals())
mailurl = '#'
if tab == 'order':
if request.user.groups.filter(name='addorder').count() ==1:
nav = '订单审核'
plant_update_issue = Plant_update.objects.filter(plant_status='退回订单',order__gh=request.user)
plant_update_list = Plant_update.objects.filter(plant_status='订单审查',order__gh=request.user)
mailtext = '提交复审'
mailurl = '/order/order_update_post'
return render_to_response('order/order_add.html', {'a': plant_update_issue,'b':plant_update_list,
'user':c,'nav':nav,'mailtext':mailtext,
'mailurl':mailurl})
if tab == 'wxd':
nav = '复审订单'
if request.user.groups.filter(name='orders').count() ==1:
plant_update_list = Plant_update.objects.filter(plant_status='复审中')
order_list = Order.objects.filter(order_status='复审中')
mailtext = '发送邮件'
mailurl = '/order/kmail'
return render_to_response('order/order_manage.html', {'a': plant_update_list,'user':c,'nav':nav,'mailtext':mailtext,'mailurl':mailurl})
return HttpResponseRedirect('/admin/')
elif tab == 'dzing':
nav = '定制中订单'
order_list = Order.objects.filter(order_status='定制中')
elif tab == 'dzwc':
nav = '制作完成订单'
order_list = Order.objects.filter(order_status='定制完成')
elif tab == 'psing':
nav = '配送中订单'
order_list = Order.objects.filter(order_status='配送中')
elif tab == 'ywc':
nav = '已完成订单'
order_list = Order.objects.filter(order_status='已收货')
return render_to_response('order/orderok.html', {'a': order_list,'user':c,'nav':nav,'mailtext':mailtext,'mailurl':mailurl})
@csrf_exempt
def manage_post(request):
response_data = {}
response_data['code'] = -1
if request.method == 'POST':
orderlist = request.POST.get('id')
orderlist_number = [str(i) for i in orderlist.split(',')]
for ordernumber in orderlist_number:
plant_update = get_object_or_404(Plant_update, order__order_number=ordernumber)
if plant_update.plant_status == '订单审查' or plant_update.plant_status == '退回订单' :
plant_update.plant_status = '复审中'
plant_update.save()
else:
#if plant_update.plant_status == '复审中':
issue = request.POST.get('issue')
plant_update.plant_status = '退回订单'
plant_update.issue = issue
plant_update.save()
return HttpResponseRedirect('/order/order_manage/wxd/')
return HttpResponseRedirect('/order/order_manage/order/')
@csrf_exempt
@user_passes_test(not_in_orders_group, login_url='/order/order_denie')
def kmail(request):
#send_mail(u'123', u'456789','[email protected]',['[email protected]'], fail_silently=False)
#order = Order.objects.get(order_number= orderid)
orderlist = request.POST.get('id')
orderlist_number = [str(i) for i in orderlist.split(',')]
get_logger().debug('---------------------%s'% orderlist_number)
response_data ={}
subject, from_email, to = '订单号'+orderlist +'下单表', '[email protected]', '[email protected]'
#subject, from_email, to = '订单号'+orderlist +'下单表', '[email protected]', '[email protected]'
text_content = '下单表'
html_content = '<p>This is an <strong>important</strong> message.</p>'
msg = EmailMultiAlternatives(subject, text_content, from_email, [to])
msg.attach_alternative(html_content, "text/html")
for ordernumber in orderlist_number:
oederlist = get_object_or_404(Order, order_number=ordernumber)
fir_name = unicode(oederlist.id) + unicode(oederlist.user.nickname) + '-' + unicode(oederlist.user.phonenumber) + '.xls'
try:
msg.attach_file('/home/Download/'+ fir_name)
except:
exl_download(request,ordernumber)
msg.attach_file('/home/Download/'+ fir_name)
order = Order.objects.get(order_number= ordernumber)
order.order_status = '定制中'
order.save()
plant_update = Plant_update.objects.get(order__order_number= ordernumber)
plant_update.plant_status = '等待制作'
plant_update.save()
pack = Pack.objects.all()
for packs in pack:
packs.volume = packs.volume -1
packs.save()
try:
fabric = Fabric.objects.get(id= order.fabric_id)
if order.product.type == 'suit':
fabric.volume = fabric.volume - 3.5
if order.product.type == 'shirt':
fabric.volume = fabric.volume - 1.7
fabric.save()
except:
pass
msg.send()
response_data['code'] = 0
return HttpResponse(json.dumps(response_data), content_type="application/json")
#return render_to_response('order/mailok.html', {'mailok': mailok,'orderid':orderid})
@login_required
#@user_passes_test(not_in_orders_group, login_url='/order/order_denie')
def exl_download(request,orderid):
oederlist = get_object_or_404(Order, order_number=orderid)
user_id = oederlist.user_id
userid = oederlist.user_id
if oederlist.is4friend:
userlist = get_object_or_404(User, phonenumber=oederlist.friend_phone)
user_id = userlist.id
userinfo = get_object_or_404(User, id=user_id)
users = get_object_or_404(User, id=userid)
sleeve_lefet = userinfo.sleeve_lefet
stomach = userinfo.stomach
favor = userinfo.favor
istie = userinfo.istie
iswatch = userinfo.iswatch
suit_shangyi = userinfo.suit_shangyi
majia_qianchang = userinfo.majia_qianchang
majia_houchang = userinfo.majia_houchang
if not sleeve_lefet or sleeve_lefet == '0':
sleeve_lefet = userinfo.sleeve_right
if not majia_qianchang or majia_qianchang == '0':
majia_qianchang = 0
if not majia_houchang or majia_houchang == '0':
majia_houchang = 0
if not stomach:
stomach = 0
if not favor:
favor = 1
if not istie:
istie = 0
if not iswatch:
iswatch = 2
if not suit_shangyi:
suit_shangyi = 1
#get_logger().debug('-------stomach-------%s'%stomach)
sizeList=[
float(userinfo.lingwei),#领围
float(userinfo.chest),#胸围
float(userinfo.waist),#腰围
float(userinfo.shoulder),#肩宽
float(userinfo.sleeve_right),#袖长(右)
float(sleeve_lefet),#袖长(左)
float(userinfo.back_cloth),#后衣长
float(userinfo.hip),#臀围
float(userinfo.kuyao),#裤腰围
float(userinfo.kuchang),#裤长
float(userinfo.hengdang),#横档
float(userinfo.xiwei),#膝围
float(userinfo.kukou),#裤口
float(majia_houchang),#后长
float(userinfo.xiulong),#袖笼
float(userinfo.chougenfen),#袖根肥
float(userinfo.xiukou_right),#袖口,暂时用的右袖口
float(stomach),#肚围
float(majia_qianchang),#马甲前长
float(userinfo.height),#身高
float(userinfo.weight),#体重
]
userChoice={
'm':int(favor),#m取值 0-修身 1-合身 2-宽松
'i':int(istie),#i取值 0-打领带 1-不打领带
'j':int(iswatch),#j取值 0-手表左 1-手表右 2-无手表
'q':int(suit_shangyi),#q取值0-长款 1-短款
}
get_logger().debug('-------sizeList-%s',sizeList)
get_logger().debug('-------userChoice-%s',userChoice)
d1 = datetime.date.today()
timdata = time.strftime('%Y-%m-%d',time.localtime(time.time()))
d2 = d1 + datetime.timedelta(10)
if oederlist.add_xiuzi and oederlist.product.type == 'shirt':
d2 = d1 + datetime.timedelta(12)
get_logger().debug('-------sizes-'+ str(d2))
order_xx ={'user':users.name,'phone':users.phonenumber,'tmime':timdata,'d2':str(d2)}
bzcc ={}
#get_logger().debug('-------sizes-------%s'%order_xx)
xzks ={}
cycc = SizeConverter(sizeList,userChoice).convert()
get_logger().debug('-------sizes-:%s', cycc)
if oederlist.product.type == 'suit':
bcc = xizhuang(userinfo,users,cycc,oederlist,order_xx)
else:
bcc = chenshan(userinfo,users,cycc,oederlist,order_xx)
def file_iterator(file_name, chunk_size=512):
with open('/home/Download/'+file_name) as f:
while True:
c = f.read(chunk_size)
if c:
yield c
else:
break
the_file_name = str(oederlist.id) + str(users.nickname) +'-'+ str(users.phonenumber) +".xls"
response = StreamingHttpResponse(file_iterator(the_file_name))
response['Content-Type'] = 'application/octet-stream'
response['Content-Disposition'] = 'attachment;filename="{0}"'.format(the_file_name)
return response
@login_required
@user_passes_test(not_in_Factory_group, login_url='/admin/')
def plant_statu(request,tab):
naotext = '已发送'
naourl = '#'
if tab == 'ddzz':
nao = '等待制作订单'
naotext = '提交制作'
naourl = '/order/order_update_post'
plant_update = Plant_update.objects.filter(plant_status ='等待制作')
elif tab == 'zzing':
nao = '制作中订单'
naotext = '提交完成'
naourl = '/order/order_update_post'
plant_update = Plant_update.objects.filter(plant_status ='制作中')
elif tab == 'zzwc':
nao = '定制中订单'
naotext = '提交发货'
naourl = '/order/order_update_post'
plant_update = Plant_update.objects.filter(plant_status ='制作完成')
elif tab == 'psing':
nao = '送货中订单'
naotext = '提交完成'
naourl = '/order/order_update_post'
plant_update = Plant_update.objects.filter(plant_status ='配送中')
elif tab == 'yjf':
nao = '已收货订单'
naotext = '已收货订单'
try:
numberid = request.GET['state']
orderid = request.GET['id']
order = Order.objects.get(id= orderid)
plant = Plant_update.objects.get(order_id= order.id)
order.huifang = numberid
order.save()
plant.plant_status = '订单完成'
#plant.jiaofu_time = time
plant.save()
except:
pass
plant_update = Plant_update.objects.filter(plant_status ='已收货')
c = RequestContext(request,locals())
return render_to_response('order/plant_statu_is.html', {'user':c,'naotext':naotext,'nao':nao,'naourl':naourl,'a':plant_update})
elif tab == 'ddwc':
nao = '订单已完成'
naotext = '订单已完成'
plant_update = Plant_update.objects.filter(plant_status ='订单完成')
c = RequestContext(request,locals())
return render_to_response('order/plant_statu.html', {'user':c,'naotext':naotext,'nao':nao,'naourl':naourl,'a':plant_update})
@csrf_exempt
def order_update_post(request,orderid):
order = Order.objects.get(order_number= orderid)
plant = Plant_update.objects.get(order_id= order.id)
time = timezone.localtime(timezone.now()).strftime("%Y-%m-%d %H:%M:%S")
plant_statu = plant.plant_status
order_statu = order.order_status
if plant_statu == '等待制作':
order.order_status = '定制中'
order.save()
plant.plant_status = '制作中'
plant.zhizuo_time = time
plant.save()
return HttpResponseRedirect('/order/plant_statu/ddzz/')
elif plant_statu == '制作中':
order.order_status = '定制完成'
order.save()
plant.plant_status = '制作完成'
plant.wancheng_time = time
plant.save()
return HttpResponseRedirect('/order/plant_statu/zzing/')
elif plant_statu == '制作完成':
order.order_status = '配送中'
order.save()
plant.plant_status = '配送中'
plant.peishong_time = time
plant.save()
return HttpResponseRedirect('/order/plant_statu/zzwc/')
elif plant_statu == '配送中':
order.order_status = '已收货'
order.save()
plant.plant_status = '已收货'
plant.jiaofu_time = time
plant.save()
return HttpResponseRedirect('/order/plant_statu/psing/')
return HttpResponseRedirect('/order/plant_statu/zzwc')
def order_post(request):
response_data ={}
nam =[]
fabricid = []
addressnam = []
addressid = []
product = request.POST.get('product')
if not product:
product = 1
user = request.POST.get('user')
response_list = Fabric.objects.filter(product=product)
address_list = Address4Order.objects.filter(user=user)
username = User.objects.get(id=user)
for i in response_list:
nam.append(i.name)
fabricid.append(str(i.id))
for i in address_list:
addressnam.append(str(i))
addressid.append(str(i.id))
response_data = {'fabricid':fabricid,'nam':nam,'addressnam':addressnam,'addressid':addressid,'username':str(username)}
return HttpResponse(json.dumps(response_data), content_type="application/json")
def get_user_name(request):
response_data ={}
user_id = request.GET['user_id']
username = User.objects.get(id=user_id)
response_data={'user_name':str(username)}
return HttpResponse(json.dumps(response_data), content_type="application/json")
def get_address_name(request):
response_data ={}
address_id = request.POST['user']
return HttpResponse(json.dumps(response_data), content_type="application/json")
def get_product_name(request):
response_data ={}
product_name = request.POST['product']
try:
products = Product.objects.get(id = product_name)
response_data['type'] = products.type
except:
response_data['type'] = 'suit'
return HttpResponse(json.dumps(response_data), content_type="application/json")
|
python
|
import random
import numpy as np
a = [5, -1, 0, -1, 2]
b=-9
if sum(a) > 3 and b < -1:
print(True)
|
python
|
from keras.callbacks import TensorBoard
import tensorflow as tf
from keras.callbacks import EarlyStopping, ModelCheckpoint
from exercise_performance_scorer.data_creator import ExercisePerformanceDataCreator
from exercise_performance_scorer.model import ExercisePerformanceModel
class ExercisePerformanceTrainer:
def __init__(self, config):
self._config = config
self._data_creator = ExercisePerformanceDataCreator(config)
self._session = None
self._train_features, self._train_labels, self._test_features, \
self._test_labels, self._val_features, self._val_labels = self._data_creator.get_feature_datasets()
self._ml_model = ExercisePerformanceModel(self._config)
self._ml_model.compile()
def train(self):
with tf.Session() as self._session:
self._session.run(tf.global_variables_initializer())
try:
self._ml_model.load(self._config.model.path)
except OSError:
print("Can't find model. Training from scratch.")
print('Starting training')
tensorboard_cb = TensorBoard(log_dir=self._config.training.log_path, histogram_freq=0,
write_graph=True, write_images=True)
self._ml_model.model.fit(
self._train_features, self._train_labels, validation_data=(self._val_features, self._val_labels),
epochs=self._config.training.epoch_num,verbose=1,
batch_size=self._config.training.batch_size,
callbacks=[ModelCheckpoint(self._config.model.path, 'val_loss', save_best_only=True,
save_weights_only=True),
EarlyStopping(monitor='val_loss', verbose=0,
patience=self._config.training.lr_decrease_patience,
restore_best_weights=True),
tensorboard_cb])
test_loss = self._ml_model.model.evaluate(self._test_features, self._test_labels,
batch_size=self._config.training.batch_size)
print(f'Test loss: {test_loss}')
if __name__ == '__main__':
from config import general_config
trainer = ExercisePerformanceTrainer(general_config.exercise_performance_pipeline)
trainer.train()
|
python
|
from urllib.parse import urlparse
from app import logger
import requests
class ControllerService:
def init_app(self, app):
return
def send_command(self, gate, command='open', conditional=False):
try:
if gate.type == 'gatekeeper':
if command == 'open' and not conditional:
requests.get(f'http://{gate.controller_ip}/?a=open', timeout=2)
elif command == 'close' and not conditional:
requests.get(f'http://{gate.controller_ip}/?a=close', timeout=2)
elif command == 'open' and conditional:
requests.get(f'http://{gate.controller_ip}/?a=grant', timeout=2)
elif command == 'close' and conditional:
requests.get(f'http://{gate.controller_ip}/?a=deny', timeout=2)
elif gate.type == 'generic':
if command == 'open':
requests.get(gate.uri_open, timeout=2)
if command == 'close':
requests.get(gate.uri_close, timeout=2)
return True
except:
logger.error("Could not send command to controller (gate: {})".format(gate.name))
return False
def get_status(self, gate):
if gate.type == 'gatekeeper':
try:
response = requests.get(f'http://{gate.controller_ip}/json', timeout=2)
except:
return {'is_alive': False, 'controller_ip': gate.controller_ip}
if not response.ok:
return {'is_alive': False, 'controller_ip': gate.controller_ip}
uptime = 0
try:
data = response.json()
uptime = data['uptime']
except:
pass
return {'is_alive': True, 'controller_ip': gate.controller_ip, 'uptime': uptime}
elif gate.type == 'generic':
# If there is no open uri then we can't do much
if gate.uri_open == '':
return {'is_alive': False}
try:
controller_ip = urlparse(gate.uri_open).hostname
response = requests.get(f'http://{controller_ip}/', timeout=2)
except:
return {'is_alive': False}
if not response.ok:
return {'is_alive': False, 'controller_ip': controller_ip}
return {'is_alive': True, 'controller_ip': controller_ip}
|
python
|
from quart import Blueprint
home = Blueprint("home", __name__)
@home.route("/")
def index():
"""Home view.
This view will return an empty JSON mapping.
"""
return {}
|
python
|
from django.contrib import admin
from froide.helper.admin_utils import ForeignKeyFilter
class FollowerAdmin(admin.ModelAdmin):
raw_id_fields = (
"user",
"content_object",
)
date_hierarchy = "timestamp"
list_display = ("user", "email", "content_object", "timestamp", "confirmed")
list_filter = (
"confirmed",
("content_object", ForeignKeyFilter),
("user", ForeignKeyFilter),
)
search_fields = ("email",)
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.prefetch_related("user", "content_object")
|
python
|
"""
@author: Viet Nguyen <[email protected]>
"""
import cv2
import numpy as np
from collections import OrderedDict
# https://github.com/tensorflow/datasets/blob/master/tensorflow_datasets/image_classification/quickdraw_labels.txt
# Rule: key of category = index -1, with index from the link above
CLASS_IDS = OrderedDict()
CLASS_IDS[8] = "apple"
CLASS_IDS[35] = "book"
CLASS_IDS[38] = "bowtie"
CLASS_IDS[58] = "candle"
CLASS_IDS[74] = "cloud"
CLASS_IDS[87] = "cup"
CLASS_IDS[94] = "door"
CLASS_IDS[104] = "envelope"
CLASS_IDS[107] = "eyeglasses"
CLASS_IDS[136] = "hammer"
CLASS_IDS[139] = "hat"
CLASS_IDS[156] = "ice cream"
CLASS_IDS[167] = "leaf"
CLASS_IDS[252] = "scissors"
CLASS_IDS[283] = "star"
CLASS_IDS[301] = "t-shirt"
CLASS_IDS[209] = "pants"
CLASS_IDS[323] = "tree"
def get_images(path, classes):
images = [cv2.imread("{}/{}.png".format(path, item), cv2.IMREAD_UNCHANGED) for item in classes]
return images
def get_overlay(bg_image, fg_image, sizes=(40, 40)):
fg_image = cv2.resize(fg_image, sizes)
fg_mask = fg_image[:, :, 3:]
fg_image = fg_image[:, :, :3]
bg_mask = 255 - fg_mask
bg_image = bg_image / 255
fg_image = fg_image / 255
fg_mask = cv2.cvtColor(fg_mask, cv2.COLOR_GRAY2BGR) / 255
bg_mask = cv2.cvtColor(bg_mask, cv2.COLOR_GRAY2BGR) / 255
image = cv2.addWeighted(bg_image * bg_mask, 255, fg_image * fg_mask, 255, 0.).astype(np.uint8)
return image
|
python
|
import json
import os
TEST_FILE_BASE_PATH = 'tests'
def __test_pages(app):
testapp = app.test_client()
pages = ('/')
for page in pages:
resp = testapp.get(page)
assert resp.status_code == 200
def test_nearest_stations_mapper():
from transporter.utils import NearestStationsMapper
source_dict = {
"arsId": "0",
"dist": "153",
"gpsX": "127.12347574483393",
"gpsY": "37.39985681895763",
"posX": "210931.81833",
"posY": "433403.53304",
"stationId": "52913",
"stationNm": "\uc544\ub984\ub9c8\uc744.\ubc29\uc544\ub2e4\ub9ac\uc0ac\uac70\ub9ac", # noqa
"stationTp": "0"
}
mapper = NearestStationsMapper()
target_dict = mapper.transform(source_dict)
assert 'latitude' in target_dict
assert 'longitude' in target_dict
assert 'ars_id' in target_dict
assert 'station_name' in target_dict
assert 'distance_from_current_location' in target_dict
assert isinstance(target_dict['latitude'], float)
assert isinstance(target_dict['longitude'], float)
assert isinstance(target_dict['ars_id'], int)
assert isinstance(target_dict['station_name'], str)
assert isinstance(target_dict['distance_from_current_location'], int)
def test_routes_for_station_mapper():
from transporter.utils import RoutesForStationMapper
path = os.path.join(TEST_FILE_BASE_PATH, 'get_station_by_uid.json')
with open(path) as fin:
source_dict = json.loads(fin.read())
mapper = RoutesForStationMapper()
target_dict = mapper.transform(source_dict)
assert 'latitude' in target_dict
assert 'longitude' in target_dict
assert 'entries' in target_dict
def test_route_mapper():
from transporter.utils import RouteMapper
path = os.path.join(TEST_FILE_BASE_PATH, 'get_route_and_pos.json')
with open(path) as fin:
source_dict = json.loads(fin.read())
mapper = RouteMapper()
target_dict = mapper.transform(source_dict)
assert 'route_type' in target_dict
|
python
|
import unittest
from pyalink.alink import *
import numpy as np
import pandas as pd
class TestPrintStreamOp(unittest.TestCase):
def test_printstreamop(self):
df = pd.DataFrame([
[0, "abcde", "aabce"],
[1, "aacedw", "aabbed"],
[2, "cdefa", "bbcefa"],
[3, "bdefh", "ddeac"],
[4, "acedm", "aeefbc"]
])
inOp = StreamOperator.fromDataframe(df, schemaStr='id long, text1 string, text2 string')
inOp.print()
StreamOperator.execute()
pass
|
python
|
from sqlalchemy import *
from sqlalchemy.orm import relationship, validates
from db import db
class ConfigTemplate(db.Model):
__tablename__ = "config_template"
id = Column(String, primary_key=True)
doc = Column(Text, nullable=False)
language_id = Column(String, ForeignKey('language.id'))
service_configs = relationship('ServiceConfig', back_populates='template')
deployment_configs = relationship('DeploymentConfig', back_populates='template')
language = relationship('Language', back_populates='config_templates')
def __repr__(self):
return self.id
|
python
|
import random
import logging
from . import scheme
__all__ = ('MTProtoSessionData',)
log = logging.getLogger(__package__)
class MTProtoSessionData:
def __init__(self, id):
if id is None:
id = random.SystemRandom().getrandbits(64)
log.debug('no session_id provided, generated new session_id: {}'.format(id))
self._id = scheme.int64_c(id)
self._auth_keys = dict()
|
python
|
import argparse
import torch
import os
from tqdm import tqdm
from datetime import datetime
import torch.nn as nn
import torch.utils.data as data
from torch.utils.tensorboard import SummaryWriter
import deepab
from deepab.models.PairedSeqLSTM import PairedSeqLSTM
from deepab.util.util import RawTextArgumentDefaultsHelpFormatter
from deepab.datasets.H5PairedSeqDataset import H5PairedSeqDataset
def train_epoch(model, train_loader, criterion, optimizer, device):
"""Trains a model for one epoch"""
model.train()
running_loss = 0
e_i = 0
for inputs, labels, _ in tqdm(train_loader, total=len(train_loader)):
inputs = inputs.to(device)
labels = labels.to(device)[:, 1:]
optimizer.zero_grad()
def handle_batch():
"""Function done to ensure variables immediately get dealloced"""
output = model(src=inputs, trg=inputs)
output = output[1:].permute(1, 2, 0)
loss = criterion(output, labels)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
return loss.item()
loss = handle_batch()
running_loss += loss
if e_i % 100 == 0:
print(loss)
e_i += 1
# running_loss += handle_batch()
return running_loss
def validate(model, validation_loader, criterion, device):
""""""
with torch.no_grad():
model.eval()
running_loss = 0
for inputs, labels, _ in tqdm(validation_loader,
total=len(validation_loader)):
inputs = inputs.to(device)
labels = labels.to(device)[:, 1:]
def handle_batch():
"""Function done to ensure variables immediately get dealloced"""
output = model(src=inputs, trg=inputs)
output = output[1:].permute(1, 2, 0)
loss = criterion(output, labels)
return loss.item()
running_loss += handle_batch()
return running_loss
def train(model,
train_loader,
validation_loader,
criterion,
optimizer,
epochs,
device,
lr_modifier,
writer,
save_file,
save_every,
properties=None):
""""""
properties = {} if properties is None else properties
print('Using {} as device'.format(str(device).upper()))
model = model.to(device)
for epoch in range(epochs):
train_loss = train_epoch(model, train_loader, criterion, optimizer,
device)
avg_train_loss = train_loss / len(train_loader)
train_loss_dict = {"cce": avg_train_loss}
writer.add_scalars('train_loss', train_loss_dict, global_step=epoch)
print('\nAverage training loss (epoch {}): {}'.format(
epoch, avg_train_loss))
val_loss = validate(model, validation_loader, criterion, device)
avg_val_loss = val_loss / len(validation_loader)
val_loss_dict = {"cce": avg_val_loss}
writer.add_scalars('validation_loss', val_loss_dict, global_step=epoch)
print('\nAverage validation loss (epoch {}): {}'.format(
epoch, avg_val_loss))
lr_modifier.step(val_loss)
if (epoch + 1) % save_every == 0:
properties.update({'model_state_dict': model.state_dict()})
properties.update({
'optimizer_state_dict': optimizer.state_dict(),
'train_loss': train_loss_dict,
'val_loss': val_loss_dict,
'epoch': epoch
})
torch.save(properties, save_file + ".e{}".format(epoch + 1))
properties.update({'model_state_dict': model.state_dict()})
properties.update({
'optimizer_state_dict': optimizer.state_dict(),
'train_loss': train_loss_dict,
'val_loss': val_loss_dict,
'epoch': epoch
})
torch.save(properties, save_file)
def init_weights(m: nn.Module):
for name, param in m.named_parameters():
if 'weight' in name:
nn.init.normal_(param.data, mean=0, std=0.01)
else:
nn.init.constant_(param.data, 0)
def _get_args():
"""Gets command line arguments"""
project_path = os.path.abspath(os.path.join(deepab.__file__, "../.."))
desc = ('''
Desc pending
''')
parser = argparse.ArgumentParser(
description=desc, formatter_class=RawTextArgumentDefaultsHelpFormatter)
# Model architecture arguments
parser.add_argument('--enc_hid_dim', type=int, default=64)
parser.add_argument('--dec_hid_dim', type=int, default=64)
# Training arguments
parser.add_argument('--epochs', type=int, default=100)
parser.add_argument('--save_every', type=int, default=10)
parser.add_argument('--batch_size', type=int, default=4)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--use_gpu',
type=bool,
default=False,
action="store_true")
parser.add_argument('--train_split', type=float, default=0.95)
default_h5_file = os.path.join(project_path, 'data/abSeq.h5')
parser.add_argument('--h5_file', type=str, default=default_h5_file)
now = str(datetime.now().strftime('%y-%m-%d %H:%M:%S'))
default_model_path = os.path.join(project_path,
'trained_models/model_{}/'.format(now))
parser.add_argument('--output_dir', type=str, default=default_model_path)
return parser.parse_args()
def _cli():
"""Command line interface for train.py when it is run as a script"""
args = _get_args()
device_type = 'cuda' if torch.cuda.is_available(
) and args.use_gpu else 'cpu'
device = torch.device(device_type)
properties = dict(seq_dim=23,
enc_hid_dim=args.enc_hid_dim,
dec_hid_dim=args.dec_hid_dim)
model = PairedSeqLSTM(**properties)
model.apply(init_weights)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
properties.update({'lr': args.lr})
# Load dataset loaders from h5 file
h5_file = args.h5_file
dataset = H5PairedSeqDataset(h5_file)
train_split_length = int(len(dataset) * args.train_split)
torch.manual_seed(0)
train_dataset, validation_dataset = data.random_split(
dataset, [train_split_length,
len(dataset) - train_split_length])
train_loader = data.DataLoader(
train_dataset,
batch_size=args.batch_size,
collate_fn=H5PairedSeqDataset.merge_samples_to_minibatch)
validation_loader = data.DataLoader(
validation_dataset,
batch_size=args.batch_size,
collate_fn=H5PairedSeqDataset.merge_samples_to_minibatch)
lr_modifier = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
verbose=True)
out_dir = args.output_dir
if not os.path.isdir(out_dir):
print('Making {} ...'.format(out_dir))
os.mkdir(out_dir)
writer = SummaryWriter(os.path.join(out_dir, 'tensorboard'))
print('Arguments:\n', args)
print('Model:\n', model)
train(model=model,
train_loader=train_loader,
validation_loader=validation_loader,
criterion=criterion,
optimizer=optimizer,
device=device,
epochs=args.epochs,
lr_modifier=lr_modifier,
writer=writer,
save_file=os.path.join(out_dir, 'model.p'),
save_every=args.save_every,
properties=properties)
if __name__ == '__main__':
_cli()
|
python
|
from flask_wtf import FlaskForm as Form
from wtforms import StringField
from wtforms.validators import DataRequired
class LoginForm(Form):
username = StringField('username', validators=[DataRequired()])
password = StringField('password', validators=[DataRequired()])
|
python
|
import os
import json
import numpy as np
import matplotlib.pyplot as plt
def compute_iou(box_1, box_2):
'''
This function takes a pair of bounding boxes and returns intersection-over-
union (IoU) of two bounding boxes.
'''
iou = np.random.random()
width = min(box_1[2], box_2[2]) - max(box_1[0], box_2[0]);
height = min(box_1[3], box_2[3]) - max(box_1[1], box_2[1]);
# Boxes don't intersect
if width<0 or height<0:
iou = 0
return iou
# Boxes intersect. Continue
intersect = width * height;
area_b1 = (box_1[2]-box_1[0]) * (box_1[3]-box_1[1])
area_b2 = (box_2[2]-box_2[0]) * (box_2[3]-box_2[1])
union = area_b1 + area_b2 - intersect
iou = intersect/union
if (iou < 0):
iou = 0
assert (iou >= 0) and (iou <= 1.0)
return iou
def compute_counts(preds, gts, iou_thr=0.5, conf_thr=0.5):
'''
This function takes a pair of dictionaries (with our JSON format; see ex.)
corresponding to predicted and ground truth bounding boxes for a collection
of images and returns the number of true positives, false positives, and
false negatives.
<preds> is a dictionary containing predicted bounding boxes and confidence
scores for a collection of images.
<gts> is a dictionary containing ground truth bounding boxes for a
collection of images.
'''
TP = 0
FP = 0
FN = 0
'''
BEGIN YOUR CODE
'''
associated = [] # list of predictions that have already been associated
for pred_file, pred in preds.items():
gt = gts[pred_file]
for i in range(len(gt)):
iou_max = iou_thr
best_pred = -1
for j in range(len(pred)):
iou = compute_iou(pred[j][:4], gt[i])
conf = pred[j][4]
# Check if object can be associated, and is not already associated
# if iou greater than max, greater than thresh
if (iou > iou_max and conf > conf_thr and j not in associated):
iou_max = iou
best_pred = j
if best_pred != -1: # An object was correctly detected - true positive
TP = TP+1
associated.append(j)
else: # No detection made - false negative
FN = FN+1
# Count total number of predictions meeting threshold
P = 0
for pred_file, pred in preds.items():
for j in range(len(pred)):
conf = pred[j][4]
if conf > conf_thr:
P = P+1
# False positive: total positive - true positives
FP = P - TP
'''
END YOUR CODE
'''
return TP, FP, FN
global plot_PR_graph
def plot_PR_graph(use_train=True, use_weak=False, thresh=0.5):
''' Load in data to gts and preds '''
if use_weak:
with open(os.path.join(gts_path, 'annotations_train.json'),'r') as f:
gts_train = json.load(f)
with open(os.path.join(gts_path, 'annotations_test.json'),'r') as f:
gts_test = json.load(f)
gts = {**gts_train, **gts_test}
with open(os.path.join(preds_path,'preds_train_weak.json'),'r') as f:
preds_train = json.load(f)
with open(os.path.join(preds_path,'preds_test_weak.json'),'r') as f:
preds_test = json.load(f)
preds = {**preds_train, **preds_test}
else:
if use_train:
with open(os.path.join(gts_path, 'annotations_train.json'),'r') as f:
gts = json.load(f)
with open(os.path.join(preds_path,'preds_train.json'),'r') as f:
preds = json.load(f)
else:
with open(os.path.join(gts_path, 'annotations_test.json'),'r') as f:
gts = json.load(f)
with open(os.path.join(preds_path,'preds_test.json'),'r') as f:
preds = json.load(f)
# Load in confidence values
confidence_thrs = []
for fname in preds:
for i in range(len(preds[fname])):
pred = preds[fname][i]
confidence_thrs.append(np.array([pred[4]], dtype=float))
# Compute the counds
tp = np.zeros(len(confidence_thrs))
fp = np.zeros(len(confidence_thrs))
fn = np.zeros(len(confidence_thrs))
for i, conf_thr in enumerate(confidence_thrs):
tp[i], fp[i], fn[i] = compute_counts(preds, gts, iou_thr=thresh, conf_thr=conf_thr)
# Plot training set PR curves
precision = (tp / (fp + tp))# true/total predictions
recall = (tp / (fn + tp)) # detected/total objects
inds = np.lexsort((precision, recall))
plot = [recall[inds],precision[inds]]
plt.plot(plot[0][:], plot[1][:], label=thresh)
# set a path for predictions and annotations:
preds_path = '../data/hw02_preds'
gts_path = '../../data/hw02_annotations'
# load splits:
split_path = '../../data/hw02_splits'
file_names_train = np.load(os.path.join(split_path,'file_names_train.npy'))
file_names_test = np.load(os.path.join(split_path,'file_names_test.npy'))
plot_PR_graph(use_train=True, use_weak=True, thresh=0.75)
plot_PR_graph(use_train=True, use_weak=True, thresh=0.5)
plot_PR_graph(use_train=True, use_weak=True, thresh=0.25)
plot_PR_graph(use_train=True, use_weak=True, thresh=0.01)
plt.legend(loc="bottom left")
plt.xlabel('R')
plt.ylabel('P')
plt.show()
# =============================================================================
# '''
# Load data.
# '''
# with open(os.path.join(preds_path,'preds_train.json'),'r') as f:
# preds_train = json.load(f)
#
# with open(os.path.join(gts_path, 'annotations_train.json'),'r') as f:
# gts_train = json.load(f)
#
# with open(os.path.join(preds_path,'preds_test.json'),'r') as f:
# preds_test = json.load(f)
#
# with open(os.path.join(gts_path, 'annotations_test.json'),'r') as f:
# gts_test = json.load(f)
#
# # For a fixed IoU threshold, vary the confidence thresholds.
# # The code below gives an example on the training set for one IoU threshold.
# confidence_thrs = []
# for fname in preds_train:
# for i in range(len(preds_train[fname])):
# pred = preds_train[fname][i]
# confidence_thrs.append(np.array([pred[4]], dtype=float))
# tp_train = np.zeros(len(confidence_thrs))
# fp_train = np.zeros(len(confidence_thrs))
# fn_train = np.zeros(len(confidence_thrs))
# for i, conf_thr in enumerate(confidence_thrs):
# tp_train[i], fp_train[i], fn_train[i] = compute_counts(preds_train, gts_train, iou_thr=0.01, conf_thr=conf_thr)
#
# # Plot training set PR curves
# precision = (tp_train/(fp_train+tp_train))# true/total predictions
# recall = (tp_train/(fn_train+tp_train)) # detected/total objects
# inds = np.lexsort((precision, recall))
# plot = [recall[inds],precision[inds]]
# plt.plot(plot[0][:], plot[1][:])
#
# =============================================================================
|
python
|
#
# Project:
# glideinWMS
#
# File Version:
#
# Description:
# This module implements functions and classes
# to handle forking of processes
# and the collection of results
#
# Author:
# Burt Holzman and Igor Sfiligoi
#
import cPickle
import os
import time
import select
from pidSupport import register_sighandler, unregister_sighandler, termsignal
import logSupport
class ForkResultError(RuntimeError):
def __init__(self, nr_errors, good_results, failed=[]):
RuntimeError.__init__(self, "Found %i errors" % nr_errors)
self.nr_errors = nr_errors
self.good_results = good_results
self.failed = failed
################################################
# Low level fork and collect functions
def fork_in_bg(function, *args):
# fork and call a function with args
# return a dict with {'r': fd, 'pid': pid} where fd is the stdout from a pipe.
# example:
# def add(i, j): return i+j
# d = fork_in_bg(add, i, j)
r, w = os.pipe()
unregister_sighandler()
pid = os.fork()
if pid == 0:
logSupport.disable_rotate = True
os.close(r)
try:
out = function(*args)
os.write(w, cPickle.dumps(out))
finally:
os.close(w)
# Exit, immediately. Don't want any cleanup, since I was created
# just for performing the work
os._exit(0)
else:
register_sighandler()
os.close(w)
return {'r': r, 'pid': pid}
###############################
def fetch_fork_result(r, pid):
"""
Used with fork clients
@type r: pipe
@param r: Input pipe
@type pid: int
@param pid: pid of the child
@rtype: Object
@return: Unpickled object
"""
try:
rin = ""
s = os.read(r, 1024*1024)
while (s != ""): # "" means EOF
rin += s
s = os.read(r,1024*1024)
finally:
os.close(r)
os.waitpid(pid, 0)
out = cPickle.loads(rin)
return out
def fetch_fork_result_list(pipe_ids):
"""
Read the output pipe of the children, used after forking to perform work
and after forking to entry.writeStats()
@type pipe_ids: dict
@param pipe_ids: Dictinary of pipe and pid
@rtype: dict
@return: Dictionary of fork_results
"""
out = {}
failures = 0
failed = []
for key in pipe_ids:
try:
# Collect the results
out[key] = fetch_fork_result(pipe_ids[key]['r'],
pipe_ids[key]['pid'])
except Exception, e:
logSupport.log.warning("Failed to extract info from child '%s'" % key)
logSupport.log.exception("Failed to extract info from child '%s'" % key)
# Record failed keys
failed.append(key)
failures += 1
if failures>0:
raise ForkResultError(failures, out, failed=failed)
return out
def fetch_ready_fork_result_list(pipe_ids):
"""
Read the output pipe of the children, used after forking. If there is data
on the pipes to consume, read the data and close the pipe.
and after forking to entry.writeStats()
@type pipe_ids: dict
@param pipe_ids: Dictinary of pipe and pid
@rtype: dict
@return: Dictionary of work_done
"""
work_info = {}
failures = 0
failed = []
fds_to_entry = dict((pipe_ids[x]['r'], x) for x in pipe_ids)
readable_fds = select.select(fds_to_entry.keys(), [], [], 0)[0]
for fd in readable_fds:
try:
key = fds_to_entry[fd]
pid = pipe_ids[key]['pid']
out = fetch_fork_result(fd, pid)
work_info[key] = out
except Exception, e:
logSupport.log.warning("Failed to extract info from child '%s'" % str(key))
logSupport.log.exception("Failed to extract info from child '%s'" % str(key))
# Record failed keys
failed.append(key)
failures += 1
if failures>0:
raise ForkResultError(failures, work_info, failed=failed)
return work_info
def wait_for_pids(pid_list):
"""
Wait for all pids to finish.
Throw away any stdout or err
"""
for pidel in pid_list:
pid=pidel['pid']
r=pidel['r']
try:
#empty the read buffer first
s=os.read(r,1024)
while (s!=""): # "" means EOF
s=os.read(r,1024)
finally:
os.close(r)
os.waitpid(pid,0)
################################################
# Fork Class
class ForkManager:
def __init__(self):
self.functions_tofork = {}
# I need a separate list to keep the order
self.key_list = []
return
def __len__(self):
return len(self.functions_tofork)
def add_fork(self, key, function, *args):
if key in self.functions_tofork:
raise KeyError("Fork key '%s' already in use"%key)
self.functions_tofork[key] = ( (function, ) + args)
self.key_list.append(key)
def fork_and_wait(self):
pids=[]
for key in self.key_list:
pids.append(fork_in_bg(*self.functions_tofork[key]))
wait_for_pids(pids)
def fork_and_collect(self):
pipe_ids = {}
for key in self.key_list:
pipe_ids[key] = fork_in_bg(*self.functions_tofork[key])
results = fetch_fork_result_list(pipe_ids)
return results
def bounded_fork_and_collect(self, max_forks,
log_progress=True, sleep_time=0.01):
post_work_info = {}
nr_errors = 0
pipe_ids = {}
forks_remaining = max_forks
functions_remaining = len(self.functions_tofork)
# try to fork all the functions
for key in self.key_list:
# Check if we can fork more
if (forks_remaining == 0):
if log_progress:
# log here, since we will have to wait
logSupport.log.info("Active forks = %i, Forks to finish = %i"%(max_forks,functions_remaining))
while (forks_remaining == 0):
failed_keys = []
# Give some time for the processes to finish the work
# logSupport.log.debug("Reached parallel_workers limit of %s" % parallel_workers)
time.sleep(sleep_time)
# Wait and gather results for work done so far before forking more
try:
# logSupport.log.debug("Checking finished workers")
post_work_info_subset = fetch_ready_fork_result_list(pipe_ids)
except ForkResultError, e:
# Collect the partial result
post_work_info_subset = e.good_results
# Expect all errors logged already, just count
nr_errors += e.nr_errors
functions_remaining -= e.nr_errors
failed_keys = e.failed
post_work_info.update(post_work_info_subset)
forks_remaining += len(post_work_info_subset)
functions_remaining -= len(post_work_info_subset)
for i in (post_work_info_subset.keys() + failed_keys):
del pipe_ids[i]
#end for
#end while
# yes, we can, do it
pipe_ids[key] = fork_in_bg(*self.functions_tofork[key])
forks_remaining -= 1
#end for
if log_progress:
logSupport.log.info("Active forks = %i, Forks to finish = %i"%(max_forks-forks_remaining,functions_remaining))
# now we just have to wait for all to finish
while (functions_remaining>0):
failed_keys = []
# Give some time for the processes to finish the work
time.sleep(sleep_time)
# Wait and gather results for work done so far before forking more
try:
# logSupport.log.debug("Checking finished workers")
post_work_info_subset = fetch_ready_fork_result_list(pipe_ids)
except ForkResultError, e:
# Collect the partial result
post_work_info_subset = e.good_results
# Expect all errors logged already, just count
nr_errors += e.nr_errors
functions_remaining -= e.nr_errors
failed_keys = e.failed
post_work_info.update(post_work_info_subset)
forks_remaining += len(post_work_info_subset)
functions_remaining -= len(post_work_info_subset)
for i in (post_work_info_subset.keys() + failed_keys):
del pipe_ids[i]
if len(post_work_info_subset)>0:
if log_progress:
logSupport.log.info("Active forks = %i, Forks to finish = %i"%(max_forks-forks_remaining,functions_remaining))
#end while
if nr_errors>0:
raise ForkResultError(nr_errors, post_work_info)
return post_work_info
|
python
|
from click import Option
from preacher.app.cli.executor import PROCESS_POOL_FACTORY, THREAD_POOL_FACTORY
from preacher.app.cli.option import LevelType, ExecutorFactoryType
from preacher.core.status import Status
def test_level_type():
tp = LevelType()
param = Option(["--level"])
assert tp.get_metavar(param) == "[skipped|success|unstable|failure]"
assert tp.get_missing_message(param) == (
"Choose from:\n\tskipped,\n\tsuccess,\n\tunstable,\n\tfailure"
)
assert tp.convert("skipped", None, None) == Status.SKIPPED
assert tp.convert("SUCCESS", None, None) == Status.SUCCESS
assert tp.convert("UnStable", None, None) == Status.UNSTABLE
assert tp.convert("FAILURE", None, None) == Status.FAILURE
assert tp.convert(Status.SUCCESS, None, None) == Status.SUCCESS
def test_executor_factory_type():
tp = ExecutorFactoryType()
param = Option(["--executor"])
assert tp.get_metavar(param) == "[process|thread]"
assert tp.get_missing_message(param) == "Choose from:\n\tprocess,\n\tthread"
assert tp.convert("process", None, None) is PROCESS_POOL_FACTORY
assert tp.convert("Thread", None, None) is THREAD_POOL_FACTORY
assert tp.convert(PROCESS_POOL_FACTORY, None, None) is PROCESS_POOL_FACTORY
|
python
|
# Represents smallest unit of a list with value, reference to succeding Node and referenece previous Node
class Node(object):
def __init__(self, value, succeeding=None, previous=None):
pass
class LinkedList(object):
def __init__(self):
pass
|
python
|
## @package csnListener
# Definition of observer pattern related classes.
class Event:
""" Generic event class. """
def __init__(self, code, source):
self.__code = code
self.__source = source
def GetCode(self):
return self.__code
def GetSource(self):
return self.__source
def ToString(self):
if self.IsNull():
return "null"
elif self.IsChange():
return "change"
elif self.IsProgress():
return "progress"
else:
return None
def GetNullCode(self):
return 0
def GetChangeCode(self):
return 1
def GetProgressCode(self):
return 2
def IsNull(self):
return self.__code == self.GetNullCode()
def IsChange(self):
return self.__code == self.GetChangeCode()
def IsProgress(self):
return self.__code == self.GetProgressCode()
class ChangeEvent(Event):
""" Change event class. """
def __init__(self, source):
Event.__init__(self, self.GetChangeCode(), source)
class ProgressEvent(Event):
""" Change event class. """
def __init__(self, source, progress, message = ""):
self.__progress = progress
self.__message = message
Event.__init__(self, self.GetProgressCode(), source)
def GetProgress(self):
return self.__progress
def GetMessage(self):
return self.__message
class Listener:
""" Generic listener class. """
def __init__(self, source):
self._source = source
def GetSource(self):
""" Get the listener source. """
return self._source
def Update(self):
""" Abstract. """
class ChangeListener(Listener):
""" Listener for ChangeEvent. The listener source needs to implement StateChanged(event). """
def Update(self, event):
""" Call the source to tell it the state has changed. """
if event.IsChange():
self._source.StateChanged(event)
class ProgressListener(Listener):
""" Listener for ProgressEvent. The listener source needs to implement ProgressChanged(event). """
def Update(self, event):
""" Call the source to tell it the state has changed. """
if event.IsProgress():
self._source.ProgressChanged(event)
|
python
|
#code for feature1
|
python
|
import unittest
import pytest
from botocore.exceptions import ClientError
from localstack import config
from localstack.utils.aws import aws_stack
from localstack.utils.common import short_uid
from .lambdas import lambda_integration
from .test_integration import PARTITION_KEY, TEST_TABLE_NAME
TEST_STREAM_NAME = lambda_integration.KINESIS_STREAM_NAME
def should_run():
return config.is_env_true("TEST_ERROR_INJECTION")
class TestErrorInjection(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
if not should_run():
pytest.skip("skipping TestErrorInjection (TEST_ERROR_INJECTION not set or false)")
def test_kinesis_error_injection(self):
kinesis = aws_stack.connect_to_service("kinesis")
aws_stack.create_kinesis_stream(TEST_STREAM_NAME)
records = [{"Data": "0", "ExplicitHashKey": "0", "PartitionKey": "0"}]
# by default, no errors
test_no_errors = kinesis.put_records(StreamName=TEST_STREAM_NAME, Records=records)
assert test_no_errors["FailedRecordCount"] == 0
# with a probability of 1, always throw errors
config.KINESIS_ERROR_PROBABILITY = 1.0
test_all_errors = kinesis.put_records(StreamName=TEST_STREAM_NAME, Records=records)
assert test_all_errors["FailedRecordCount"] == 1
# reset probability to zero
config.KINESIS_ERROR_PROBABILITY = 0.0
def get_dynamodb_table(self):
dynamodb = aws_stack.connect_to_resource("dynamodb")
# create table with stream forwarding config
aws_stack.create_dynamodb_table(TEST_TABLE_NAME, partition_key=PARTITION_KEY)
return dynamodb.Table(TEST_TABLE_NAME)
def assert_zero_probability_read_error_injection(self, table, partition_key):
# by default, no errors
test_no_errors = table.get_item(Key={PARTITION_KEY: partition_key})
assert test_no_errors["ResponseMetadata"]["HTTPStatusCode"] == 200
def test_dynamodb_error_injection(self):
table = self.get_dynamodb_table()
partition_key = short_uid()
self.assert_zero_probability_read_error_injection(table, partition_key)
# with a probability of 1, always throw errors
config.DYNAMODB_ERROR_PROBABILITY = 1.0
with self.assertRaises(ClientError):
table.get_item(Key={PARTITION_KEY: partition_key})
# reset probability to zero
config.DYNAMODB_ERROR_PROBABILITY = 0.0
def test_dynamodb_read_error_injection(self):
table = self.get_dynamodb_table()
partition_key = short_uid()
self.assert_zero_probability_read_error_injection(table, partition_key)
# with a probability of 1, always throw errors
config.DYNAMODB_READ_ERROR_PROBABILITY = 1.0
with self.assertRaises(ClientError):
table.get_item(Key={PARTITION_KEY: partition_key})
# reset probability to zero
config.DYNAMODB_READ_ERROR_PROBABILITY = 0.0
def test_dynamodb_write_error_injection(self):
table = self.get_dynamodb_table()
# by default, no errors
test_no_errors = table.put_item(Item={PARTITION_KEY: short_uid(), "data": "foobar123"})
self.assertEqual(200, test_no_errors["ResponseMetadata"]["HTTPStatusCode"])
# with a probability of 1, always throw errors
config.DYNAMODB_WRITE_ERROR_PROBABILITY = 1.0
with self.assertRaises(ClientError):
table.put_item(Item={PARTITION_KEY: short_uid(), "data": "foobar123"})
# BatchWriteItem throws ProvisionedThroughputExceededException if ALL items in Batch are Throttled
with self.assertRaises(ClientError):
table.batch_write_item(
RequestItems={
table: [
{
"PutRequest": {
"Item": {
PARTITION_KEY: short_uid(),
"data": "foobar123",
}
}
}
]
}
)
# reset probability to zero
config.DYNAMODB_WRITE_ERROR_PROBABILITY = 0.0
|
python
|
import json
import os
import tkinter as tk
def cancelclick():
exit(0)
class GUI:
def okclick(self):
data = {"url": self.urlentry.get(),
"telegramAPIKEY": self.apikeyentry.get(),
"telegramCHATID": self.chatidentry.get(),
"databaseFile": self.databaseentry.get(),
"sleep": self.sleepeentry.get()
}
with open(self.file_name, 'w') as config_file:
json.dump(data, config_file)
config_file.close()
self.window.destroy()
pass
def __init__(self, file_name):
self.file_name = file_name
self.window = tk.Tk()
self.window.geometry('605x300')
urllabel = tk.Label(text="url")
self.urlentry = tk.Entry(width=100, justify=tk.CENTER)
databaselabel = tk.Label(text="databaseFile")
self.databaseentry = tk.Entry(width=100, justify="center")
apikeylabel = tk.Label(text="telegramAPIKEY")
self.apikeyentry = tk.Entry(width=100, justify="center")
chatidlabel = tk.Label(text="telegramCHATID")
self.chatidentry = tk.Entry(width=100, justify="center")
sleeplabel = tk.Label(text="sleep")
self.sleepeentry = tk.Entry(width=100, justify="center")
okbutton = tk.Button(self.window, text="OK", command=self.okclick)
cancelbutton = tk.Button(self.window, text="Cancel", command=cancelclick)
urllabel.grid(column=0, row=0)
self.urlentry.grid(column=0, row=1)
databaselabel.grid(column=0, row=2)
self.databaseentry.grid(column=0, row=3)
apikeylabel.grid(column=0, row=4)
self.apikeyentry.grid(column=0, row=5)
chatidlabel.grid(column=0, row=6)
self.chatidentry.grid(column=0, row=7)
sleeplabel.grid(column=0, row=8)
self.sleepeentry.grid(column=0, row=9)
okbutton.grid(column=0, row=10)
cancelbutton.grid(column=0, row=11)
if os.path.isfile(file_name):
with open(file_name) as config_file:
config = json.load(config_file)
self.urlentry.insert(0, config["url"])
self.apikeyentry.insert(0, config["telegramAPIKEY"])
self.chatidentry.insert(0, config["telegramCHATID"])
self.databaseentry.insert(0, config["databaseFile"])
self.sleepeentry.insert(0, config["sleep"])
config_file.close()
else:
self.databaseentry.insert(0, "database.db")
self.sleepeentry.insert(0, "10")
self.window.mainloop()
|
python
|
import io
from collections import deque
from concurrent.futures import ThreadPoolExecutor
import numba
from bampy.mt import CACHE_JIT, THREAD_NAME, DEFAULT_THREADS
from . import zlib
from ...bgzf import Block
from ...bgzf.reader import BufferReader, EmptyBlock, StreamReader, _Reader as __Reader
@numba.jit(nopython=True, nogil=True, cache=CACHE_JIT)
def inflate(data, buffer, offset=0):
zlib.raw_decompress(data, buffer[offset:])
return buffer, offset
class _Reader(__Reader):
"""
Base class for buffer and stream readers.
Provides Iterable interface to read in blocks.
"""
def __init__(self, input, threadpool: ThreadPoolExecutor):
"""
Constructor.
:param input: Block data source.
"""
super().__init__(input)
self.pool = threadpool
self.blockqueue = deque()
self.max_queued = 0
def __iter__(self):
return self
def __next__(self):
raise NotImplementedError()
def Reader(input, offset: int = 0, peek=None, threadpool: ThreadPoolExecutor = ThreadPoolExecutor(max_workers=DEFAULT_THREADS, thread_name_prefix=THREAD_NAME)) -> _Reader:
"""
Factory to provide a unified reader interface.
Resolves if input is randomly accessible and provides the appropriate _Reader implementation.
:param input: A stream or buffer object.
:param offset: If input is a buffer, the offset into the buffer to begin reading. Ignored otherwise.
:param peek: Data consumed from stream while peeking. Will be prepended to read data. Ignored if buffer passed as input.
:return: An instance of StreamReader or BufferReader.
"""
if isinstance(input, (io.RawIOBase, io.BufferedIOBase)):
return StreamReader(input, peek, threadpool)
else:
return BufferReader(input, offset, threadpool)
class StreamReader(_Reader):
"""
Implements _Reader to handle input data that is not accessible through a buffer interface.
"""
def __init__(self, input, peek=None, threadpool: ThreadPoolExecutor = ThreadPoolExecutor(max_workers=DEFAULT_THREADS, thread_name_prefix=THREAD_NAME)):
"""
Constructor.
:param input: Stream object to read from.
:param peek: Data consumed from stream while peeking. Will be prepended to read data.
"""
super().__init__(input, threadpool)
self._peek = peek
def __next__(self):
if not self.max_queued or not self.blockqueue[0].done(): self.max_queued += 1
try:
while len(self.blockqueue) < self.max_queued:
block, cdata = Block.from_stream(self._input, self._peek)
self._peek = None
self.total_in += len(block)
self.total_out += block.uncompressed_size
if block.uncompressed_size:
self.blockqueue.append(self.pool.submit(inflate, cdata, memoryview(bytearray(block.uncompressed_size)))) # TODO reuse buffers
else:
raise EmptyBlock()
except EOFError:
pass
if not len(self.blockqueue):
raise StopIteration()
self.buffer = self.blockqueue.popleft().result()
self.remaining = len(self.buffer)
return self.buffer
class BufferReader(_Reader):
"""
Implements _Reader to handle input data that is accessible through a buffer interface.
"""
def __init__(self, input, offset=0, threadpool: ThreadPoolExecutor = ThreadPoolExecutor(max_workers=DEFAULT_THREADS, thread_name_prefix=THREAD_NAME)):
"""
Constructor.
:param input: Buffer object to read from.
:param offset: The offset into the input buffer to begin reading from.
"""
super().__init__(input, threadpool)
self._len = len(input)
self.offset = offset
def __next__(self):
if not self.max_queued or not self.blockqueue[0].done(): self.max_queued += 1
while self.offset < self._len and len(self.blockqueue) < self.max_queued:
block, cdata = Block.from_buffer(self._input, self.offset)
block_len = len(block)
self.offset += block_len
self.total_in += block_len
self.total_out += block.uncompressed_size
if block.uncompressed_size:
self.blockqueue.append(self.pool.submit(inflate, cdata, memoryview(bytearray(block.uncompressed_size)))) # TODO reuse buffers
else:
raise EmptyBlock()
if not len(self.blockqueue):
raise StopIteration()
self.buffer = self.blockqueue.popleft().result()
self.remaining = len(self.buffer)
return self.buffer
|
python
|
"""test format
Revision ID: b45b1bf02a80
Revises: a980b74a499f
Create Date: 2022-05-11 22:31:19.613893
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b45b1bf02a80'
down_revision = 'a980b74a499f'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
python
|
from myhdl import *
import random
from random import randrange
from .alu import def_alu
from .ctrl import def_ctrl
random.seed(4)
def test_ctrl():
"""Test bench for the ALU control.
"""
clk = Signal(bool(0))
reset = ResetSignal(0, active=1, async=True)
alu_op = Signal(intbv(0)[2:])
funct = Signal(intbv(0)[6:])
alu_ctrl = Signal(intbv(0)[4:])
alu_op1 = Signal(intbv(0, min=-2**32, max=2**32))
alu_op2 = Signal(intbv(0, min=-2**32, max=2**32))
alu_res = Signal(intbv(0, min=-2**32, max=2**32))
alu_z = Signal(bool(0))
ctrl_inst = def_ctrl(alu_op, funct, alu_ctrl)
alu_inst = def_alu(alu_op1, alu_op2, alu_ctrl, alu_res, alu_z)
@always(delay(10))
def tb_clk():
clk.next = not clk
@instance
def tb_ctrl():
oplist = [0,1,2] # 2bit : [00,01,10]
functlist = [32,34,36,37,42] #[100000,100010,100100,100101,101010]
for ii in range(100):
r_op = oplist[randrange(3)]
r_func = functlist[randrange(5)]
op1, op2 = randrange(-2**31, 2**31), randrange(-2**31, 2**31)
if (r_op == 0):
res = op1 + op2
elif r_op == 1:
res = op1 - op2
elif r_op == 2:
if r_func == 32:
res = op1 + op2
elif r_func == 34:
res = op1 - op2
elif r_func == 36:
res = op1 & op2
elif r_func == 37:
res = op1 | op2
elif r_func == 42:
if op1 < op2:
res = 1
else: res = 0
alu_op.next = r_op
funct.next = r_func
alu_op1.next,alu_op2.next = op1, op2
yield delay(10)
assert res == alu_res
if res == 0:
assert alu_z == 1
raise StopSimulation
# run simulation on test bench
sim = Simulation(ctrl_inst, alu_inst, tb_clk, tb_ctrl)
sim.run()
|
python
|
import asyncio
import logging
from datetime import datetime, timedelta
from .login import BiliUser
from .api import WebApi, medals, get_info, WebApiRequestError
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger("dailyclockin")
class DailyClockIn:
def __init__(self, user: BiliUser):
self.user = user
async def do_work(self):
logger.info("开始每日弹幕打卡任务")
err_num = 0
try:
rooms = []
async for m in medals(self.user.session):
rooms.append(m)
try:
info = await get_info(self.user.session, m["roomid"])
except KeyError:
continue
try:
await WebApi.send_msg(self.user.session, info["room_id"], self.user.csrf)
logger.info(f"{m['uname']}({m['target_id']})直播间打卡成功")
except Exception as e:
message_err = f"{m['uname']}({m['target_id']})直播间打卡失败: {e}"
logger.error(message_err)
self.user.message_err.append(message_err)
err_num += 1
await asyncio.sleep(6)
except Exception as e:
logger.error(e)
self.user.message_err.append(e)
err_num += 1
self.user.message.append(
f"弹幕打卡成功: {len(rooms) - err_num}/{len(rooms)}"
)
if self.user.ruid:
medal_0 = (await WebApi.get_weared_medal(self.user.session, self.user.csrf))
if medal_0:
medal_0_id = medal_0['medal_id']
await asyncio.sleep(1)
await WebApi.wear_medal(
self.user.session, self.user.medal_id, self.user.csrf
) # wear medal
medal = await WebApi.get_weared_medal(self.user.session, self.user.csrf)
if medal["today_feed"] == 0 and medal['level'] > 20:
self.user.message_err.append(f"{medal['medal_name']}{medal['level']}级大于20级,打卡不加亲密度,只会点亮牌子")
return
if medal["today_feed"] == 0:
self.user.message_err.append(f"你设置的主播亲密度获取失败")
return
now = datetime.now()
now += timedelta(
days=(medal["next_intimacy"] - medal["intimacy"]) // medal["today_feed"]
+ 1
)
message = f"目前:{medal['medal_name']}{medal['level']}级\n今日亲密度:{medal['today_feed']}/{medal['day_limit']}\n当前等级上限:{medal['intimacy']}/{medal['next_intimacy']}\n预计还需要{(medal['next_intimacy'] - medal['intimacy']) // medal['today_feed'] + 1}天({now.strftime('%m.%d')})到达{medal['level'] + 1}级 "
self.user.message.append(message)
if medal_0:
await asyncio.sleep(1)
await WebApi.wear_medal(
self.user.session, medal_0_id, self.user.csrf
)
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2016-2017 China Telecommunication Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from _mysql import result
from MySQLdb.constants.CR import IPSOCK_ERROR
__author__ = 'pzhang'
import tornado.web
import json
import time
from db_util import mysql_utils
ip2num = lambda x:sum([256**j*int(i) for j,i in enumerate(x.split('.')[::-1])])
num2ip = lambda x: '.'.join([str(x/(256**i)%256) for i in range(3,-1,-1)])
def get_mask_int(mask):
sum=0
for i in range(mask):
sum = sum*2+1
sum = sum << (32-mask)
return sum
class ms_customer_handler(tornado.web.RequestHandler):
def initialize(self):
super(ms_customer_handler, self).initialize()
self.resp_func = {'ms_cust_get_customer':self.get_customer,
'ms_cust_add_customer':self.add_customer,
'ms_cust_del_customer':self.del_customer,
'ms_cust_update_customer':self.update_customer,
'ms_cust_get_customer_by_ip':self.get_customer_by_ip,
'ms_cust_add_flow':self.add_flow,
'ms_cust_del_flow':self.del_flow,
'ms_cust_update_flow':self.update_flow,
'ms_cust_set_flow_speed':self.set_flow_speed
}
self.log = 0
self.ip_cust_map = {}
pass
def form_response(self, req):
resp = {}
resp['response'] = req['request']
#resp['ts'] = req['ts']
resp['ts'] = time.strftime("%Y%m%d%H%M%S")
resp['trans_id'] = req['trans_id']
resp['err_code'] = 0
resp['msg'] = ''
self.set_header('Content-Type', 'application/json')
return resp
def post(self):
ctnt = self.request.body
if self.log == 1:
print 'The request:'
print str(ctnt)
req = json.loads(str(ctnt))
resp = self.form_response(req)
result = self.resp_func[req['request']](req['args'])
resp['result'] = result
if self.log == 1:
print 'response:'
print json.dumps(resp)
self.write(json.dumps(resp))
pass
def array_to_inlist(self, arr):
lst = '(' + ",".join(arr) + ')'
return lst
def get_customer(self, args):
customers = {}
sql_str = 'select * from t_customer join t_customer_ip on t_customer_ip.customer_id = t_customer.id'
if 'uids' in args:
uids = args['uids']
lst = self.array_to_inlist(uids)
sql_str += ' where t_customer.id in' + lst
db = mysql_utils('customer')
results = db.exec_sql(sql_str)
db.close()
if results is None:
return {'customers':[]}
cs_map = {}
cs = []
for c in results:
uid = str(c[0])
if uid in cs_map:
one_c = cs_map[uid]
else:
one_c = {'uid':str(c[0]), 'name':c[1] }
cs_map[uid] = one_c
ip = str(c[6]) + '/' + str(c[7])
#FIXME: src and dst.
if 'ips' in one_c:
one_c['ips'].append({'dst':ip, 'src':ip, 'uid':str(c[3])})
else:
one_c['ips'] = [{'dst':ip, 'src':ip, 'uid':str(c[3])}]
pass
cs = [cs_map[c] for c in cs_map]
customers['customers'] = cs
return customers
def del_customer(self, args):
uids = args['uids']
lst = self.array_to_inlist(uids)
sql_str = 'delete from t_customer where t_customer.id in %s' % lst
# print sql_str
db = mysql_utils('customer')
result = db.exec_sql(sql_str)
if not result:
db.commit()
db.close()
return result
def add_customer(self, args):
customer = {}
customer['name'] = args['name']
#customer['uid'] = args['uid']
#print customer
#insert into t_customer values (1, 'Google');
#sql_str = 'insert into t_customer(id,name) values (%s, \'%s\')' % (customer['uid'], customer['name'])
sql_str = 'insert into t_customer(name) values (\'%s\')' % customer['name']
#print sql_str
db = mysql_utils('customer')
result = db.exec_sql(sql_str)
if not result:
db.commit()
customer_id = db.exec_sql('SELECT LAST_INSERT_ID()')[0][0]
#print customer_id
#insert into t_customer_ip values (1, 1, 16843009, '1.1.1.0', 4294967040, '255.255.255.0');
if args.has_key('ips'):
ips = args['ips']
for ip in ips:
ip_addr = ip['src'].split('/')[0]
ip_mask = int(ip['src'].split('/')[1])
sql_str = 'insert into t_customer_ip(customer_id,netip,netip_str,mask_bit,mask_int) values (%s, %s, \'%s\', %s, %s)' \
% (customer_id, ip2num(ip_addr), ip_addr, ip_mask, get_mask_int(ip_mask))
print sql_str
result = db.exec_sql(sql_str)
if not result:
db.commit()
db.close()
return {"cust_uid": customer_id}
def update_customer(self,args):
customer = {}
name = args['name']
uid = args['uid']
if args.has_key('ips'):
ips = args['ips']
#check if customer exist
sql_str = 'select * from t_customer where id = %s' % uid
db = mysql_utils('customer')
result = db.exec_sql(sql_str)
#print result
#if not exist
if not result:
sql_str = 'insert into t_customer (id, name) VALUES (%s, \'%s\')' % (uid, name)
ret = db.exec_sql(sql_str)
db.commit()
#if exist
else:
sql_str = 'update t_customer set name = \'%s\' where id = %s' % (name, uid)
print sql_str
db.exec_sql(sql_str)
db.commit()
pass
db.close()
# To pzhang: Are you crazy?
# self.del_customer(args)
# self.add_customer(args)
pass
def get_customer_by_ip(self, args):
ips = args['ips']
cs = {}
for ip in ips:
# sql_str = 'select * from t_customer_ip inner join t_customer on t_customer_ip.customer_id = t_customer.id ' + \
# 'and t_customer_ip.netip & t_customer_ip.mask_int = %s & t_customer_ip.mask_int' % ip2num(ip)
# results = self.db.exec_sql(sql_str)
# if results:
# # print results
# cs[ip] = {'name':results[0][7], 'cust_uid':results[8]}
times = self.application.split_bits
mask = 0xFFFFFFFF
match = 0
nets = self.application.ip_cust_map
sub_ip = ip2num(ip)
while times > 0:
if sub_ip in nets:
match = 1
break
mask <<= 1
sub_ip &= mask
times -= 1
if match:
cs[ip] = nets[sub_ip]
pass
return cs
def add_flow(self,args):
customer = {}
customer_id = args['cust_uid']
#check if customer exist
sql_str = 'select * from t_customer where id=%s' % customer_id
db = mysql_utils('customer')
result = db.exec_sql(sql_str)
if not result:
return
#insert into t_customer_ip values (1, 1, 16843009, '1.1.1.0', 4294967040, '255.255.255.0');
flows = []
if args.has_key('flows'):
ips = args['flows']
for ip in ips:
one_flow = {}
one_flow['src'] = ip['src']
ip_addr = ip['src'].split('/')[0]
ip_mask = int(ip['src'].split('/')[1])
sql_str = 'insert into t_customer_ip(customer_id,netip,netip_str,mask_bit,mask_int) values (%s, %s, \'%s\', %s, %s)' \
% (customer_id, ip2num(ip_addr), ip_addr, ip_mask, get_mask_int(ip_mask))
print sql_str
result = db.exec_sql(sql_str)
if not result:
db.commit()
flow_id = db.exec_sql('SELECT LAST_INSERT_ID()')[0][0]
ip['uid'] = str(flow_id)
db.close()
#return the request object. the only difference is each added flow has 'uid' attrib
return args
def del_flow(self,args):
uids = args['flow_uids']
lst = self.array_to_inlist(uids)
sql_str = 'delete from t_customer_ip where t_customer_ip.id in %s' % lst
# print sql_str
db = mysql_utils('customer')
result = db.exec_sql(sql_str)
if not result:
db.commit()
db.close()
return result
def update_flow(self,args):
flows = args['flows']
db = mysql_utils('customer')
for flow in flows:
flow_id = flow['uid']
if 'src' in flow:
ip_addr = flow['src'].split('/')[0]
ip_mask = int(flow['src'].split('/')[1])
sql_str = 'update t_customer_ip set netip=%s,netip_str=\'%s\',mask_bit=%s,mask_int=%s where t_customer_ip.id=%s' \
% (ip2num(ip_addr), ip_addr, ip_mask, get_mask_int(ip_mask), flow_id)
print sql_str
result = db.exec_sql(sql_str)
if not result:
db.commit()
pass
db.close()
pass
def set_flow_speed(self,args):
pass
|
python
|
import os
from awacs.aws import Policy, Allow, Statement, Principal, Action
from cfn_encrypt import Encrypt, EncryptionContext, SecureParameter, GetSsmValue
from troposphere import (Template, iam, GetAtt, Join, Ref, logs, Output, Sub, Parameter, awslambda,
Base64, Export)
from sys import argv
do_example = False
for arg in argv:
if '-we' in arg:
do_example = True
t = Template()
kms_key_arn = t.add_parameter(Parameter(
"KmsKeyArn",
Type="String",
Description="KMS alias ARN for lambda",
))
if do_example:
plain_text = t.add_parameter(Parameter(
"PlainText",
Type="String",
Description="Text that you want to encrypt ( Hello World )",
Default="Hello World",
NoEcho=True
))
# Create loggroup
log_group_ssm = t.add_resource(logs.LogGroup(
"LogGroupSsm",
LogGroupName=Join("", ["/aws/lambda/", Join("-", [Ref("AWS::StackName"), "ssm"])]),
RetentionInDays=14
))
log_group_get_ssm_value = t.add_resource(logs.LogGroup(
"LogGroupGetSsmValue",
LogGroupName=Join("", ["/aws/lambda/", Join("-", [Ref("AWS::StackName"), "get-ssm-value"])]),
RetentionInDays=14
))
log_group_simple = t.add_resource(logs.LogGroup(
"LogGroupSimple",
LogGroupName=Join("", ["/aws/lambda/", Join("-", [Ref("AWS::StackName"), "simple"])]),
RetentionInDays=14
))
def lambda_from_file(python_file):
"""
Reads a python file and returns a awslambda.Code object
:param python_file:
:return:
"""
lambda_function = []
with open(python_file, 'r') as f:
lambda_function.extend(f.read().splitlines())
return awslambda.Code(ZipFile=(Join('\n', lambda_function)))
kms_policy = iam.Policy(
PolicyName="encrypt",
PolicyDocument=Policy(
Version="2012-10-17",
Statement=[
Statement(
Effect=Allow,
Action=[
Action("kms", "Encrypt"),
],
Resource=[Ref(kms_key_arn)]
)
],
)
)
ssm_policy = iam.Policy(
PolicyName="ssm",
PolicyDocument=Policy(
Version="2012-10-17",
Statement=[
Statement(
Effect=Allow,
Action=[
Action("ssm", "PutParameter"),
Action("ssm", "DeleteParameter"),
],
Resource=[Join("", ["arn:aws:ssm:", Ref("AWS::Region"), ":", Ref("AWS::AccountId"), ":parameter/*"])]
),
Statement(
Effect=Allow,
Action=[
Action("ssm", "DescribeParameters")
],
Resource=["*"]
)
],
)
)
encrypt_lambda_role = t.add_resource(iam.Role(
"EncryptLambdaRole",
AssumeRolePolicyDocument=Policy(
Version="2012-10-17",
Statement=[
Statement(
Effect=Allow,
Principal=Principal("Service", "lambda.amazonaws.com"),
Action=[Action("sts", "AssumeRole")]
)
]),
Path="/",
ManagedPolicyArns=["arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole"],
Policies=[
kms_policy
]
))
ssm_lambda_role = t.add_resource(iam.Role(
"SsmLambdaRole",
AssumeRolePolicyDocument=Policy(
Version="2012-10-17",
Statement=[
Statement(
Effect=Allow,
Principal=Principal("Service", "lambda.amazonaws.com"),
Action=[Action("sts", "AssumeRole")]
)
]),
Path="/",
ManagedPolicyArns=["arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole"],
Policies=[
kms_policy,
ssm_policy
]
))
get_ssm_value_role = t.add_resource(iam.Role(
"GetSsmValueRole",
AssumeRolePolicyDocument=Policy(
Version="2012-10-17",
Statement=[
Statement(
Effect=Allow,
Principal=Principal("Service", "lambda.amazonaws.com"),
Action=[Action("sts", "AssumeRole")]
)
]),
Path="/",
ManagedPolicyArns=["arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole"],
Policies=[
iam.Policy(
PolicyName="decrypt",
PolicyDocument=Policy(
Version="2012-10-17",
Statement=[
Statement(
Effect=Allow,
Action=[
Action("kms", "Decrypt"),
],
Resource=[Ref(kms_key_arn)]
)
],
)
),
iam.Policy(
PolicyName="ssm",
PolicyDocument=Policy(
Version="2012-10-17",
Statement=[
Statement(
Effect=Allow,
Action=[
Action("ssm", "GetParameterHistory"),
],
Resource=[
Join("", ["arn:aws:ssm:", Ref("AWS::Region"), ":", Ref("AWS::AccountId"), ":parameter/*"])]
),
Statement(
Effect=Allow,
Action=[
Action("ssm", "DescribeParameters")
],
Resource=["*"]
)
],
)
)
]
))
simple_encrypt_filename = os.path.join(os.path.dirname(__file__), "cfn_encrypt/simple_encrypt.py")
ssm_parameter_filename = os.path.join(os.path.dirname(__file__), "cfn_encrypt/ssm_parameter.py")
get_ssm_value_filename = os.path.join(os.path.dirname(__file__), "cfn_encrypt/get_ssm_value.py")
encrypt_lambda = t.add_resource(awslambda.Function(
"EncryptLambda",
FunctionName=Join("-", [Ref("AWS::StackName"), "simple"]),
DependsOn=[log_group_simple.title],
Handler="index.handler",
Role=GetAtt(encrypt_lambda_role, "Arn"),
Runtime="python3.7",
Timeout=300,
MemorySize=1536,
Code=lambda_from_file(simple_encrypt_filename),
))
ssm_parameter_lambda = t.add_resource(awslambda.Function(
"SsmParameterLambda",
FunctionName=Join("-", [Ref("AWS::StackName"), "ssm"]),
DependsOn=[log_group_ssm.title],
Handler="index.handler",
Role=GetAtt(ssm_lambda_role, "Arn"),
Runtime="python3.7",
Timeout=300,
MemorySize=1536,
Code=lambda_from_file(ssm_parameter_filename),
))
get_ssm_value_lambda = t.add_resource(awslambda.Function(
"GetSsmValueLambda",
FunctionName=Join("-", [Ref("AWS::StackName"), "get-ssm-value"]),
DependsOn=[log_group_get_ssm_value.title],
Handler="index.handler",
Role=GetAtt(get_ssm_value_role, "Arn"),
Runtime="python3.7",
Timeout=300,
MemorySize=1536,
Code=lambda_from_file(get_ssm_value_filename),
))
t.add_output(Output(
"EncryptLambdaArn",
Description="Encrypt lambda arn",
Value=GetAtt(encrypt_lambda, "Arn"),
Export=Export(
Sub(
"${AWS::StackName}-EncryptLambdaArn"
)
)
))
t.add_output(Output(
"KmsKeyArn",
Description="My secure parameter name",
Value=Ref(kms_key_arn),
Export=Export(
Sub(
"${AWS::StackName}-KmsKeyArn"
)
)
))
t.add_output(Output(
"SsmParameterLambdaArn",
Description="Ssm parameter lambda arn",
Value=GetAtt(ssm_parameter_lambda, "Arn"),
Export=Export(
Sub(
"${AWS::StackName}-SsmParameterLambdaArn"
)
)
))
t.add_output(Output(
get_ssm_value_lambda.title + "Arn",
Description="get ssm value lambda arn",
Value=GetAtt(get_ssm_value_lambda, "Arn"),
Export=Export(
Sub(
"${AWS::StackName}-" + get_ssm_value_lambda.title + "Arn",
)
)
))
if do_example:
my_encrypted_value = t.add_resource(Encrypt(
"MyEncryptedValue",
ServiceToken=GetAtt(encrypt_lambda, "Arn"),
Base64Data=Base64(Ref(plain_text)),
KmsKeyArn=Ref(kms_key_arn)
))
my_encrypted_value_with_context = t.add_resource(Encrypt(
"MyEncryptedValueWithContext",
ServiceToken=GetAtt(encrypt_lambda, "Arn"),
Base64Data=Base64(Ref(plain_text)),
KmsKeyArn=Ref(kms_key_arn),
EncryptionContext=EncryptionContext(
Name="Test",
Value="Test"
)
))
my_secure_parameter = t.add_resource(SecureParameter(
"MySecureParameter",
ServiceToken=GetAtt(ssm_parameter_lambda, "Arn"),
Name="MySecureParameter",
Description="Testing secure parameter",
Value=Ref(plain_text),
KeyId=Ref(kms_key_arn)
))
my_decrypted_value = t.add_resource(GetSsmValue(
"MyDecryptedValue",
ServiceToken=GetAtt(get_ssm_value_lambda, "Arn"),
Name=Ref(my_secure_parameter),
KeyId=Ref(kms_key_arn),
Version=GetAtt(my_secure_parameter,"Version")
))
t.add_output(Output(
"MySecureParameter",
Description="My secure parameter name",
Value=Ref(my_secure_parameter)
))
t.add_output(Output(
"EncryptedValue",
Description="Encrypted value, base64 encoded",
Value=GetAtt(my_encrypted_value, "CiphertextBase64"),
))
t.add_output(Output(
"EncryptedValueWithContext",
Description="Encrypted value, base64 encoded",
Value=GetAtt(my_encrypted_value_with_context, "CiphertextBase64"),
))
t.add_output(Output(
my_decrypted_value.title + "Value",
Value=GetAtt(my_decrypted_value, "Value")
))
t.add_output(Output(
my_decrypted_value.title + "Version",
Value=GetAtt(my_decrypted_value, "Version")
))
print(t.to_json())
|
python
|
# Copyright 2016 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from vitrage.common.constants import UpdateMethod
NOVA_INSTANCE_DATASOURCE = 'nova.instance'
OPTS = [
cfg.StrOpt('transformer',
default='vitrage.datasources.nova.instance.transformer.'
'InstanceTransformer',
help='Nova instance transformer class path',
required=True),
cfg.StrOpt('driver',
default='vitrage.datasources.nova.instance.driver.'
'InstanceDriver',
help='Nova instance driver class path',
required=True),
cfg.StrOpt('update_method',
default=UpdateMethod.PUSH,
help='None: updates only via Vitrage periodic snapshots.'
'Pull: updates every [changes_interval] seconds.'
'Push: updates by getting notifications from the'
' datasource itself.',
required=True),
]
|
python
|
import os
import pandas as pd
import glob
import random
import shutil
INDEX = {
'Shift_png':0,
'Random_png':1,
'Original_png':2,
'Expand_png':3,
'Contract_png':4
}
def make_label_csv(input_path,csv_path,mid_dir=None):
info = []
for subdir in os.scandir(input_path):
index = INDEX[subdir.name]
if mid_dir is None:
path_list = glob.glob(os.path.join(subdir.path,"*.*g"))
else:
mid_path = os.path.join(subdir.path,mid_dir)
# print(mid_path)
path_list = glob.glob(os.path.join(mid_path,"*.*g"))
sub_info = [[item,index] for item in path_list]
info.extend(sub_info)
random.shuffle(info)
# print(len(info))
col = ['id','label']
info_data = pd.DataFrame(columns=col,data=info)
info_data.to_csv(csv_path,index=False)
def make_csv(input_path,csv_path):
id_list = glob.glob(os.path.join(input_path,'*.*g'))
print(len(id_list))
info = {'id':[]}
info['id'] = id_list
df = pd.DataFrame(data=info)
df.to_csv(csv_path,index=False)
if __name__ == "__main__":
# input_path = '/staff/shijun/torch_projects/MLC_CLS/dataset/MLC/train'
# csv_path = './csv_file/MLC.csv'
# input_path = '/staff/shijun/torch_projects/MLC_CLS/dataset/MLC/test'
# csv_path = './csv_file/MLC_test.csv'
# make_label_csv(input_path,csv_path)
input_path = '/staff/shijun/torch_projects/MLC_CLS/dataset/raw_data/train'
csv_path = './csv_file/MLC_gamma2mm.csv'
# input_path = '/staff/shijun/torch_projects/MLC_CLS/dataset/raw_data/test'
# csv_path = './csv_file/MLC_gamma2mm_test.csv'
make_label_csv(input_path,csv_path,mid_dir='gamma2mm')
|
python
|
# coding=utf-8
import numpy as np
from time import time
from aux import *
'''
Obre un fitxer a partir del seu nom i guarda cada una de les seves línies en cada posició d'una llista
Paràmetres:
- fileName: Nom del fitxer que es vol obrir.
Return:
- llista amb les paraules que conformen la llista Swadesh d'un idioma en concret
'''
def openFile(fileName):
f = open(fileName, 'r')
llistaParaules = list()
lines = f.readlines()
for line in lines:
llistaParaules.append(line) # afegim a cada posició de la llista una paraula
return llistaParaules
'''
Determina si un fonema és una vocal o no.
Paràmetres:
- char: Fonema
Return:
- True si char és vocal
- False si char no és vocal (i per tant és consonant)
'''
def esVocal(char):
if char in vocals_transcr:
return True # és vocal
return False # no és vocal
'''
Calcula la distància entre dues paraules. Per fer-ho crea una matriu a partir d'un diccionari i es calcula el nombre
mínim de canvis que s'ha de dur a terme per tal d'arribar d'una paraula a una altra. Normalitza la distància en funció de
la mida de la paraula més llarga.
La distància total entre dues paraules es troba a la posició més inferior a la dreta de la matriu.
Paràmetres:
- str1: una de les paraules que es vol comparar
- str2: l'altra paraula que es vol comparar
Return:
- La distància normalitzada entre les dues paraules
'''
def distancia_paraula_lexic(str1, str2):
d = dict() # declarem un diccionari que utilitzarem de matriu
# numerem les lletres de cada paraula
for i in range(len(str1) + 1):
d[i] = dict()
d[i][0] = i
for i in range(len(str2) + 1):
d[0][i] = i
for i in range(1, len(str1) + 1):
for j in range(1, len(str2) + 1):
# calculem tots els elements de la matriu
d[i][j] = min(d[i][j - 1] + 1, d[i - 1][j] + 1, d[i - 1][j - 1] + (not str1[i - 1] == str2[j - 1]))
#print ('distància entre ', str1, " i ", str2, '=', d[len(str1)][len(str2)])
distancia = d[len(str1)][len(str2)] ##retornem el valor de la última posició de la matriu
normalitzador = max(len(str1), len(str2)) # calculem el nombre de lletres de la paraula més llarga
res = distancia / normalitzador # normalitzem la distància
return res
'''
Calcula la distància entre dues transcripcions fonètiques de dues paraules. Per fer-ho crea una matriu a partir d'un
diccionari i s'adapta la distància de Levenshtein de manera que el cost de la substitució és la distància que hi ha entre
dos fonemes (es calcula amb la funció distanciaFonemes(fonema1,fonema2))
La distància total entre les dues transcripcons es troba a la posició més inferior a la dreta de la matriu.
Paràmetres:
- str1: una de les transcripcions que es vol comparar
- str2: l'altra transcripció que es vol comparar
Return:
- La distància normalitzada entre les dues dues transcripcions
'''
def distancia_paraula_fonema(str1, str2):
d = dict() # declarem un diccionari que utilitzarem de matriu
# numerem les lletres de cada paraula
for i in range(len(str1) + 1):
d[i] = dict()
d[i][0] = i
for i in range(len(str2) + 1):
d[0][i] = i
for i in range(1, len(str1) + 1):
for j in range(1, len(str2) + 1):
# calculem tots els elements de la matriu
d[i][j] = min(d[i][j - 1] + 1, d[i - 1][j] + 1,
d[i - 1][j - 1] + distanciaFonemes(str1[i - 1], str2[j - 1]))
#print ('distància entre ', str1, ' i ', str2, ' = ', d[len(str1)][len(str2)])
distancia = d[len(str1)][len(str2)] ##retornem el valor de la última posició de la matriu
normalitzador = max(len(str1), len(str2)) # calculem el nombre de lletres de la paraula més llarga
res = distancia / normalitzador # normalitzem la distància
return res
'''
Calcula la distància entre dos fonemes. Per fer-ho mira si els dos fonemes que es comparen són els dos vocàlics o
consonàntics. Si els dos són vocàlics es crida a la funció distanciaVocals(fonema1,fonema2), que calcula la distància
entre dos sons que es corresponen a vocals. Si els dos són consonàntics es crida a la funció
distanciaConsonants(fonema1,fonema2), que calcula la distància entre dos sons que es corresponen a consonants. Si un fonema
és una vocal i l'altre és una consonant, la distància que se li atorga a l'operació és 1.
Paràmetres:
- fonema1
- fonema2
Return:
- La distància entre els dos fonemes
'''
def distanciaFonemes(fonema1, fonema2):
if esVocal(fonema1) and esVocal(fonema2): # mirem si els dos fonemes són vocals
distancia = distanciaVocals(fonema1, fonema2) # calculem la distància entre els fonemes
elif not (esVocal(fonema1)) and not (esVocal(fonema2)): # mirem si els dos fonemes són consonants
distancia = distanciaConsonants(fonema1, fonema2) # calculem la distància entre els fonemes
else:
#print("Vocal i consonant!")
distancia = 1 # si tractem amb una vocal i una consonant, la distància és màxima
#print ("Distància entre ", fonema1, " i ", fonema2, " = ", distancia)
return distancia
'''
Calcula la distància, des d'un punt de vista lèxic, acumulada entre dos idiomes. Per fer-ho suma la distància de totes
les paraules dels dos idiomes i les normalitza dividint-les per 207, que és el nombre de paraules que hi ha a cada llista.
Paràmetres:
- idioma1: llista de paraules d'un idioma escrites des d'un punt de vista lèxic
- idioma2: llista de paraules d'un idioma escrites des d'un punt de vista lèxic
Return:
- La distància total que hi ha entre dos idiomes. És un valor entre 0 i 1.
'''
def distanciaIdioma_lexic(idioma1, idioma2):
distanciaAcumulada = 0
for x in range(1, 207):
distanciaAcumulada += distancia_paraula_lexic(idioma1[x], idioma2[x]) # calculem la distància entre els idiomes
distanciaAcumulada = distanciaAcumulada / 207 # dividim pel nombre de paraules per normalitzar
#print ("La distància acumulada entre ", idioma1[0], " i ", idioma2[0], "és de ", distanciaAcumulada)
return distanciaAcumulada
'''
Calcula la distància fonètica acumulada entre dos idiomes. Per fer-ho suma la distància de totes les paraules dels dos
idiomes i les normalitza dividint-les per 207, que és el nombre de paraules que hi ha a cada llista.
Paràmetres:
- idioma1: llista de transcripcions fonètiques de les paraules d'un idioma
- idioma2: llista de transcripcions fonètiques de les paraules d'un idioma
Return:
- La distància total que hi ha entre dos idiomes. És un valor entre 0 i 1.
'''
def distanciaIdioma_fonetic(idioma1, idioma2):
distanciaAcumulada = 0
for x in range(1, 207):
distanciaAcumulada += distancia_paraula_fonema(idioma1[x],
idioma2[x]) # calculem la distància entre els idiomes
distanciaAcumulada = distanciaAcumulada / 207 # dividim pel nombre de paraules per normalitzar
#print ("La distància acumulada entre ", idioma1[0], " i ", idioma2[0], "és de ", distanciaAcumulada)
return distanciaAcumulada
'''
Calcula la distància fonètica entre dos vocals. Per fer-ho mira quines posicions ocupa cada vocal dins de la
taula de fonemes vocàlics obtenint els seus índex i els compara per veure en quines característiques coincideixen
(és a dir, mira si els fonemes coincideixen en fila, columna i costat de la columna de la matriu que modeolitza els
sons vocàlics). Si comparteixen tres característiques, la distància és 0; si comparteixen 2 característiques la
distància és 1/3; si comparteixen 1, la distància és de 2/3 i si comparteixen cap característica la distància és 1.
En el cas del fonema w es fa un tractament especial perquè ocupa la mateixa posició que el fonema u. Això es deu a que
expressen el mateix so en situacions diferents. Per simplificar, s'ha considerat a la matriu el so u i, per tenir en compte
el so w, s'utilitzen uns if's per determinar quina posició hauria d'ocupar.
Paràmetres:
- v1: fonema a comparar
- v2: fonema a comparar
Return:
- La distància entre els dos fonemes comparats que pot ser 0, 1/3, 2/3 o 1
'''
def distanciaVocals(v1, v2):
denominador = 3
numerador = 3
v1_pos = (0, 0, 0)
v2_pos = (0, 0, 0)
# calculem els índexs de la primera vocal
for i in range(0, 7):
for j in range(0, 3):
for k in range(0, 2):
if v1 == vocals_transcr_mat[i][j][k]:
v1_pos = (i, j, k)
#print(v1_pos)
# calculem els índexs de la segona vocal
for i in range(0, 7):
for j in range(0, 3):
for k in range(0, 2):
if v2 == vocals_transcr_mat[i][j][k]:
v2_pos = (i, j, k)
#print(v2_pos)
# si el fonema és w, calculem la seva posició directament perquè no es troba a la matriu
if v1 == 'w':
v1_pos = (0, 2, 1)
if v2 == 'w':
v2_pos = (0, 2, 1)
for i in range(0, 3):
if v1_pos[i] == v2_pos[i]:
numerador -= 1
distancia = numerador / denominador
#print ("Distància entre les vocals ", v1, " i ", v2, " = ", distancia)
return distancia
'''
Calcula la distància fonètica entre dos consonant. Per fer-ho mira quines posicions ocupa cada consonant dins de la
taula de fonemes consonàntics obtenint els seus índex i els compara per veure en quines característiques coincideixen
(és a dir, mira si els fonemes coincideixen en fila, columna i costat de la columna de la matriu que modeolitza els sons
vocàlics). Si comparteixen tres característiques, la distància és 0; si comparteixen 2 característiques la distància és
1/3; si comparteixen 1, la distància és de 2/3 i si comparteixen cap característica la distància és 1.
Paràmetres:
- c1: fonema a comparar
- c2: fonema a comparar
Return:
- La distància entre els dos fonemes comparats que pot ser 0, 1/3, 2/3 o 1
'''
def distanciaConsonants(c1, c2):
denominador = 3
numerador = 3
c1_pos = (0, 0, 0)
c2_pos = (0, 0, 0)
# calculem els índexs de la primera consonant
for i in range(0, 8):
for j in range(0, 9):
for k in range(0, 2):
if c1 == conson_transcr_mat[i][j][k]:
c1_pos = (i, j, k)
#print(c1_pos)
# calculem els índexs de la segona consonant
for i in range(0, 8):
for j in range(0, 9):
for k in range(0, 2):
if c2 == conson_transcr_mat[i][j][k]:
c2_pos = (i, j, k)
#print(c2_pos)
for i in range(0, 3):
if c1_pos[i] == c2_pos[i]:
numerador -= 1
distancia = numerador / denominador
#print ("Distància entre les consonants ", c1, " i ", c2, " = ", distancia)
return distancia
'''
Crida a les altres funcions i mostra per pantalla els resultats obtinguts
'''
if __name__ == '__main__':
start = time()
# obrim els fitxer per a cada parella d'idiomes possibles
for i in range(0, 8):
for j in range(0, 8):
idioma1 = openFile("Llistes/" + llista_idiomes[i])
idioma2 = openFile("Llistes/" + llista_idiomes[j])
# calculem la distància lèxica entre cada aprella d'idiomes possible
matriu_resultant_lexic[i][j] = distanciaIdioma_lexic(idioma1, idioma2)
#print ("\n")
# transformem la llista en un np.array per simplificar la impressió per pantalla
matriu_resultant_array_lexic = np.array(matriu_resultant_lexic)
# obrim els fitxer per a cada parella de transcripcions fonètiques d'idiomes possibles
for i in range(0, 9):
for j in range(0, 9):
idioma3 = openFile("Llistes/" + llista_idiomes_fonetic[i])
idioma4 = openFile("Llistes/" + llista_idiomes_fonetic[j])
# calculem la distància fonètica els fitxer per a cada parella de transcripcions fonètiques d'idiomes possible
matriu_resultant_fonetica[i][j] = distanciaIdioma_fonetic(idioma3, idioma4)
#print ("\n")
matriu_resultant_array_fonetic = np.array(
matriu_resultant_fonetica) # transformem la llista en un np.array per simplificar la impressió per pantalla
np.set_printoptions(precision=3) # determinem que el nombre de decimals a mostrar és 3
# imprimim per pantalla les matrius de distàncies
print("Matriu de distàncies lèxiques: \n")
print(matriu_resultant_array_lexic)
print ("\n")
print("Matriu de distàncies fonètiques: \n")
print (matriu_resultant_array_fonetic)
print ("\n")
end = time()
time_elapsed = end - start #calculem el que triga l'execució
print("Time elapsed: ")
print(time_elapsed)
|
python
|
def swap_case(s):
swapped_s = ''
for letter in s:
swapped_s += letter.lower() if letter.isupper() else letter.upper()
return swapped_s
if __name__ == '__main__':
s = input()
result = swap_case(s)
print(result)
|
python
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class PutCustomEventRuleRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cms', '2019-01-01', 'PutCustomEventRule','cms')
self.set_method('POST')
def get_Webhook(self):
return self.get_query_params().get('Webhook')
def set_Webhook(self,Webhook):
self.add_query_param('Webhook',Webhook)
def get_RuleName(self):
return self.get_query_params().get('RuleName')
def set_RuleName(self,RuleName):
self.add_query_param('RuleName',RuleName)
def get_Threshold(self):
return self.get_query_params().get('Threshold')
def set_Threshold(self,Threshold):
self.add_query_param('Threshold',Threshold)
def get_EffectiveInterval(self):
return self.get_query_params().get('EffectiveInterval')
def set_EffectiveInterval(self,EffectiveInterval):
self.add_query_param('EffectiveInterval',EffectiveInterval)
def get_EventName(self):
return self.get_query_params().get('EventName')
def set_EventName(self,EventName):
self.add_query_param('EventName',EventName)
def get_EmailSubject(self):
return self.get_query_params().get('EmailSubject')
def set_EmailSubject(self,EmailSubject):
self.add_query_param('EmailSubject',EmailSubject)
def get_Period(self):
return self.get_query_params().get('Period')
def set_Period(self,Period):
self.add_query_param('Period',Period)
def get_ContactGroups(self):
return self.get_query_params().get('ContactGroups')
def set_ContactGroups(self,ContactGroups):
self.add_query_param('ContactGroups',ContactGroups)
def get_Level(self):
return self.get_query_params().get('Level')
def set_Level(self,Level):
self.add_query_param('Level',Level)
def get_GroupId(self):
return self.get_query_params().get('GroupId')
def set_GroupId(self,GroupId):
self.add_query_param('GroupId',GroupId)
def get_RuleId(self):
return self.get_query_params().get('RuleId')
def set_RuleId(self,RuleId):
self.add_query_param('RuleId',RuleId)
|
python
|
from os.path import join
import matplotlib.pylab as plt
import numpy as np
from matplotlib.backends.backend_pdf import PdfPages
from sklearn.metrics import roc_curve, auc
import src.utils
from src.methods.methods import good_methods as methods
from src.methods.methods import mutliple_time_series_combiner
from src.readers.json_dataset_reader import JsonDatasetReader
from src.utils import debug, PROJECT_DIR, s_timestamp, getFeedbackLinks, getForwardLinks, getSubplots, \
plotDiGraphViaGraphViz
def normalize_rowwise(x):
return np.absolute(x) / np.max(np.absolute(x), axis=1, keepdims=True) # /np.std(x,axis=1,keepdims=True)
def evaluateMethodOnInstance(i, method, normalize=False):
cc = mutliple_time_series_combiner(method, i)
for idx_node in range(i.n_nodes):
if normalize:
if cc[idx_node][idx_node] != 0:
cc[idx_node] /= np.absolute(cc[idx_node][idx_node])
cc[idx_node][idx_node] = 0
if normalize:
cc = normalize_rowwise(cc)
y_true = np.reshape(1.0 * (np.absolute(i.y) > 0.01), (i.n_nodes * i.n_nodes, 1))
cc = np.absolute(cc)
cc_flat = cc.flatten()
if debug:
plt.figure()
plt.subplot(1, 2, 1)
plt.imshow(np.reshape(y_true, (i.n_nodes, i.n_nodes)))
plt.subplot(1, 2, 2)
plt.imshow(np.reshape(cc_flat, (i.n_nodes, i.n_nodes)))
plt.title(method.__name__)
plt.show()
return np.reshape(y_true, (i.n_nodes * i.n_nodes,)), np.reshape(cc_flat, (i.n_nodes * i.n_nodes,))
def evaluateMethod(dataset, method, normalize=False):
n = dataset.n_instances
combined_y_true = np.empty((n, dataset.n_nodes * dataset.n_nodes), dtype=np.float64)
combined_y_pred = np.empty((n, dataset.n_nodes * dataset.n_nodes), dtype=np.float64)
result = [evaluateMethodOnInstance(dataset.get(idx_instance), method, normalize) for idx_instance in range(n)]
for i in range(n):
y_true, y_pred = result[i]
combined_y_true[i, :] = y_true
combined_y_pred[i, :] = y_pred
return combined_y_true, combined_y_pred
def evaluateCombinedTotalRocCurves(predictions, true, methods):
res = "{0: <20} {1: <19} {2: <19} {3: <19}\n".format(" ", "auc", "auc forward", "auc feedbacks")
plt.figure(figsize=(35, 12))
best_auc = 0
subplot_i, subplot_k = getSubplots(3)
for f in methods:
y_true, y_pred = true[f], predictions[f]
feedbacks_y_true = np.reshape([getFeedbackLinks(temp) for temp in y_true], (-1, 1))
feedbacks_y_pred = np.reshape([getFeedbackLinks(temp) for temp in y_pred], (-1, 1))
forward_y_true = np.reshape([getForwardLinks(temp) for temp in y_true], (-1, 1))
forward_y_pred = np.reshape([getForwardLinks(temp) for temp in y_pred], (-1, 1))
combined_y_pred = np.reshape(y_pred, (-1, 1))
combined_y_true = np.reshape(y_true, (-1, 1))
plt.subplot(subplot_i, subplot_k, 1)
roc_auc = plotROC(combined_y_true, combined_y_pred, f)
if roc_auc > best_auc and roc_auc < 0.99:
best_auc = roc_auc
plt.subplot(subplot_i, subplot_k, 2)
roc_auc_forward = plotROC(forward_y_true, forward_y_pred, f)
plt.title('ROC for forward only')
plt.subplot(subplot_i, subplot_k, 3)
roc_auc_feedbacks = plotROC(feedbacks_y_true, feedbacks_y_pred, f)
plt.title('ROC for feedbacks only')
res += "{0: <20} {1:16.3f} {2:16.3f} {3:16.3f}\n".format(f, roc_auc, roc_auc_forward, roc_auc_feedbacks)
plt.savefig(join(PROJECT_DIR, 'output', 'evaluation', s_timestamp() + '.pdf'))
with open(join(PROJECT_DIR, 'output', 'evaluation', s_timestamp() + '.txt'), 'w') as pout:
pout.write(res)
return best_auc
def plotROC(y_true, y_pred, label):
fpr, tpr, _ = roc_curve(y_true, y_pred)
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, label=label + ' (auc = %0.2f)' % roc_auc)
plt.legend(loc="lower right")
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
return roc_auc
def plotPredicted(y_pred, label, predict_n, cmap, n_nodes, pos, node_labels):
y_pred[np.argsort(np.absolute(y_pred))[:-predict_n]] = 0
ebunch = [(k, i, y_pred[n_nodes * i + k]) for i in range(n_nodes) for k in range(n_nodes) if
y_pred[n_nodes * i + k] != 0]
plotDiGraphViaGraphViz(n_nodes, ebunch, cmap, pos, node_labels=node_labels)
plt.title(label)
def evaluateIndividualRocCurvesAndPredictions(d, predictions, true, predict_n, methods):
with PdfPages(join(PROJECT_DIR, 'output', 'visualization_graph_predictions', s_timestamp() + '.pdf')) as pdf:
cmap = plt.cm.Accent
for idx_instance in range(d.n_instances):
instance = d.get(idx_instance)
n_nodes = instance.n_nodes
node_labels = instance.labels
plt.figure(figsize=(40, 20))
subplot_i, subplot_k = getSubplots(len(predictions) + 4)
for subplot_idx, f in enumerate(methods):
y_true = true[f][idx_instance][:]
y_pred = predictions[f][idx_instance][:]
# plot the roc curve for the instance
plt.subplot(subplot_i, subplot_k, len(predictions) + 1)
plotROC(y_true, y_pred, f)
# plot the roc curve for the feedbacks only
plt.subplot(subplot_i, subplot_k, len(predictions) + 2)
plotROC(getFeedbackLinks(y_true), getFeedbackLinks(y_pred), f)
plt.title('ROC for feedbacks only')
# plot the roc curve for the feedbacks only
plt.subplot(subplot_i, subplot_k, len(predictions) + 3)
plotROC(getForwardLinks(y_true), getForwardLinks(y_pred), f)
plt.title('ROC for forward only')
# plot the predicted networks
plt.subplot(subplot_i, subplot_k, subplot_idx + 1)
plotPredicted(y_pred, f, predict_n, cmap, n_nodes, instance.pos, node_labels)
plt.subplot(subplot_i, subplot_k, len(predictions) + 4)
instance.plotTimeSeries_(cmap)
pdf.savefig() # saves the current figure into a pdf page
plt.close()
def evaluateAll(d, normalize=False, predict_n=18, methods=methods):
predictions = {}
true = {}
for f in methods:
y_true, y_pred = evaluateMethod(d, methods[f], normalize=normalize)
predictions[f] = y_pred
true[f] = y_true
res = evaluateCombinedTotalRocCurves(predictions, true, methods)
evaluateIndividualRocCurvesAndPredictions(d, predictions, true, predict_n, methods)
return res
def plotPredictions(dataset, method, predict_n):
cmap = plt.cm.Accent
with PdfPages(join(PROJECT_DIR, 'output', 'visualization_graph_predictions_really', s_timestamp() + '.pdf')) as pdf:
for idx_instance in range(dataset.n_instances):
instance = dataset.get(idx_instance)
plt.figure(figsize=(20, 14))
subplot_i, subplot_k = getSubplots(instance.n_time_series + 1)
labels = ['Pulse', '1 Inhibition', '2 Inhibitions', 'Oscilatory', 'Oscilatory+1 Inhibition']
plt.subplot(subplot_i, subplot_k, 1)
instance.plotDiGraphViaGraphViz_(cmap)
for idx_time_series in range(instance.n_time_series):
plt.subplot(subplot_i, subplot_k, idx_time_series + 2)
instance.setx(idx_time_series)
y_pred = method(instance)
for idx_node in range(instance.n_nodes):
y_pred[idx_node][idx_node] = 0
y_pred = y_pred.reshape(-1, )
plotPredicted(y_pred, labels[idx_time_series], predict_n, cmap, instance.n_nodes, instance.pos,
instance.labels)
pdf.savefig() # saves the current figure into a pdf page
plt.close()
def plotRocForDataset(d, methods=methods):
plt.figure(figsize=(6, 5))
for f in methods:
y_true, y_pred = evaluateMethod(d, methods[f], normalize=False)
combined_y_pred = np.reshape(y_pred, (-1, 1))
combined_y_true = np.reshape(y_true, (-1, 1))
roc_auc = plotROC(combined_y_true, combined_y_pred, f)
plt.grid()
plt.savefig(join(PROJECT_DIR, 'output', 'roc', s_timestamp() + '.pdf'))
def main():
src.utils.s_timestamp_prefix = '50_nodes'
reader = JsonDatasetReader('50_nodes.json.zip')
d = reader.getDataset(n_instances=1, n_nodes=14 * 3, n_time_series=1)
plotPredictions(d, methods["partial_corr"], 70)
if __name__ == "__main__":
main()
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Defines functionality for pipelined execution of interfaces
The `EngineBase` class implements the more general view of a task.
.. testsetup::
# Change directory to provide relative paths for doctests
import os
filepath = os.path.dirname(os.path.realpath( __file__ ))
datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
os.chdir(datadir)
"""
from __future__ import (print_function, division, unicode_literals,
absolute_import)
from builtins import object
from future import standard_library
standard_library.install_aliases()
from copy import deepcopy
import re
import numpy as np
from ... import logging
from ...interfaces.base import DynamicTraitedSpec
from ...utils.filemanip import loadpkl, savepkl
logger = logging.getLogger('workflow')
class EngineBase(object):
"""Defines common attributes and functions for workflows and nodes."""
def __init__(self, name=None, base_dir=None):
""" Initialize base parameters of a workflow or node
Parameters
----------
name : string (mandatory)
Name of this node. Name must be alphanumeric and not contain any
special characters (e.g., '.', '@').
base_dir : string
base output directory (will be hashed before creations)
default=None, which results in the use of mkdtemp
"""
self.base_dir = base_dir
self.config = None
self._verify_name(name)
self.name = name
# for compatibility with node expansion using iterables
self._id = self.name
self._hierarchy = None
@property
def inputs(self):
raise NotImplementedError
@property
def outputs(self):
raise NotImplementedError
@property
def fullname(self):
fullname = self.name
if self._hierarchy:
fullname = self._hierarchy + '.' + self.name
return fullname
@property
def itername(self):
itername = self._id
if self._hierarchy:
itername = self._hierarchy + '.' + self._id
return itername
def clone(self, name):
"""Clone an EngineBase object
Parameters
----------
name : string (mandatory)
A clone of node or workflow must have a new name
"""
if (name is None) or (name == self.name):
raise Exception('Cloning requires a new name')
self._verify_name(name)
clone = deepcopy(self)
clone.name = name
clone._id = name
clone._hierarchy = None
return clone
def _check_outputs(self, parameter):
return hasattr(self.outputs, parameter)
def _check_inputs(self, parameter):
if isinstance(self.inputs, DynamicTraitedSpec):
return True
return hasattr(self.inputs, parameter)
def _verify_name(self, name):
valid_name = bool(re.match('^[\w-]+$', name))
if not valid_name:
raise ValueError('[Workflow|Node] name \'%s\' contains'
' special characters' % name)
def __repr__(self):
if self._hierarchy:
return '.'.join((self._hierarchy, self._id))
else:
return '{}'.format(self._id)
def save(self, filename=None):
if filename is None:
filename = 'temp.pklz'
savepkl(filename, self)
def load(self, filename):
if '.npz' in filename:
DeprecationWarning(('npz files will be deprecated in the next '
'release. you can use numpy to open them.'))
return np.load(filename)
return loadpkl(filename)
|
python
|
from netCDF4 import Dataset
#-------------------------------------------------------------------------------
def set_difference_fields():
files = {"./output_hex_wachspress_0082x0094_120/output.2000.nc":
["./output_hex_pwl_0082x0094_120/output.2000.nc",
"./output_hex_weak_0082x0094_120/output.2000.nc"],
"./output_quad_wachspress_0080x0080_120/output.2000.nc":
["./output_quad_pwl_0080x0080_120/output.2000.nc",
"./output_quad_weak_0080x0080_120/output.2000.nc"]}
#files = {"./output_hex_wachspress_0082x0094_120/output.2000.nc":
# ["./output/output.2000.nc"]}
fieldnames = ["uVelocity","vVelocity","stressDivergenceU","stressDivergenceV"]
#fieldnames = ["stressDivergenceU"]
#fieldnames = ["uVelocity","vVelocity","stressDivergenceU","stressDivergenceV","strain11var","strain22var","strain12var"]
for filenameBase in files:
for filenameDiff in files[filenameBase]:
for fieldname in fieldnames:
print(fieldname)
filein = Dataset(filenameBase,"r")
field1 = filein.variables[fieldname][:]
dimensionsBase = filein.variables[fieldname].dimensions
filein.close()
fileDiff = Dataset(filenameDiff,"a")
field2 = fileDiff.variables[fieldname][:]
try:
fieldDiff = fileDiff.createVariable(fieldname+"Diff", "d", dimensions=dimensionsBase)
except:
fieldDiff = fileDiff.variables[fieldname+"Diff"]
fieldDiff[:] = field2[:] - field1[:]
fileDiff.close()
#-------------------------------------------------------------------------------
if __name__ == "__main__":
set_difference_fields()
|
python
|
import imp
import librosa
import numpy as np
from keras.models import load_model
genres = {0: "metal", 1: "disco", 2: "classical", 3: "hiphop", 4: "jazz",
5: "country", 6: "pop", 7: "blues", 8: "reggae", 9: "rock"}
song_samples = 660000
def load_song(filepath):
y, sr = librosa.load(filepath)
y = y[:song_samples]
return y, sr
def splitsongs(X, window = 0.1, overlap = 0.5):
temp_X = []
xshape = X.shape[0]
chunk = int(xshape*window)
offset = int(chunk*(1.-overlap))
spsong = [X[i:i+chunk] for i in range(0, xshape - chunk + offset, offset)]
for s in spsong:
temp_X.append(s)
return np.array(temp_X)
def to_melspec(signals):
melspec = lambda x : librosa.feature.melspectrogram(x, n_fft=1024, hop_length=512)[:, :, np.newaxis]
spec_array = map(melspec, signals)
return np.array(list(spec_array))
def get_genre(path, debug=False):
model = load_model('./weights/genres_full_vgg16.h5')
y = load_song(path)[0]
predictions = []
spectro = []
signals = splitsongs(y)
spec_array = to_melspec(signals)
spectro.extend(spec_array)
spectro = np.array(spectro)
spectro = np.squeeze(np.stack((spectro,)*3,-1))
pr = np.array(model.predict(spectro))
predictions = np.argmax(pr, axis=1)
if debug:
print('Load audio:', path)
print("\nFull Predictions:")
for p in pr: print(list(p))
print("\nPredictions:\n{}".format(predictions))
print("Confidences:\n{}".format([round(x, 2) for x in np.amax(pr, axis=1)]))
print("\nOutput Predictions:\n{}\nPredicted class:".format(np.mean(pr, axis=0)))
return genres[np.bincount(predictions).argmax()] # list(np.mean(pr, axis=0))
if __name__ == '__main__':
print(get_genre('./audios/classical_music.mp3', True))
|
python
|
""" Tests for the StarCluster Job """
import sure
from mock import Mock
from .fixtures import *
from metapipe.models import sge_job
def test_qstat_queued():
j = sge_job.SGEJob('', None)
sge_job.call = Mock(return_value=sge_job_qstat_queued)
j.is_queued().should.equal(True)
def test_qstat_running():
j = sge_job.SGEJob('', None)
sge_job.call = Mock(return_value=sge_job_qstat_running)
j.is_running().should.equal(True)
def test_submit():
j = sge_job.SGEJob('', None)
sge_job.call = Mock(return_value=sge_job_qsub)
j.make = Mock()
j.submit()
j.id.should.equal('1')
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def initial_log_action(apps, schema_editor):
LogAction = apps.get_model('logs', 'LogAction')
action, created = LogAction.objects.get_or_create(
name='USER_CREATE',
)
action.template = 'User `{{ log_item.object1.username }}` was created.'
action.save()
action, created = LogAction.objects.get_or_create(
name='USER_EDIT',
)
action.template = 'User `{{ log_item.object1.username }}` field `{{ log_item.data.field }}` \
was changed from {{ log_item.data.old }} to {{ log_item.data.new }}.'
action.save()
def backwards(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('logs', '0002_auto_20150128_0257'),
]
operations = [
migrations.RunPython(initial_log_action, backwards),
]
|
python
|
__author__ = 'Neil Butcher'
from Institution import InstitutionSavingObject
|
python
|
import sys
import os.path
def main():
sys.argv[:] = sys.argv[1:] # Must rewrite the command line arguments
progname = sys.argv[0]
sys.path.insert(0, os.path.dirname(progname))
with open(progname, 'rb') as fp:
code = compile(fp.read(), progname, 'exec')
globs = {
'__file__' : progname,
'__name__' : '__main__',
'__package__' : None,
'__cached__' : None
}
exec(code, globs)
|
python
|
from __future__ import unicode_literals
import os
import shutil
import subprocess
import sys
import optparse
import datetime
import time
sys.path[:0] = ['.']
from yt_dlp.utils import check_executable
try:
iterations = str(int(os.environ['ZOPFLI_ITERATIONS']))
except BaseException:
iterations = '30'
parser = optparse.OptionParser(usage='%prog PYTHON')
options, args = parser.parse_args()
if len(args) != 1:
parser.error('Expected python executable name for shebang')
PYTHON = args[0]
# 200001010101
date = datetime.datetime(year=2000, month=1, day=1, hour=1, minute=1, second=1)
modTime = time.mktime(date.timetuple())
try:
shutil.rmtree('zip/')
except FileNotFoundError:
pass
os.makedirs('zip/', exist_ok=True)
files = [(dir, file) for (dir, _, c) in os.walk('yt_dlp') for file in c if file.endswith('.py')]
for (dir, file) in files:
joined = os.path.join(dir, file)
dest = os.path.join('zip', joined)
os.makedirs(os.path.join('zip', dir), exist_ok=True)
shutil.copy(joined, dest)
os.utime(dest, (modTime, modTime))
os.rename('zip/yt_dlp/__main__.py', 'zip/__main__.py')
files.remove(('yt_dlp', '__main__.py'))
files[:0] = [('', '__main__.py')]
all_paths = [os.path.join(dir, file) for (dir, file) in files]
if check_executable('7z', []):
ret = subprocess.Popen(
['7z', 'a', '-mm=Deflate', '-mfb=258', '-mpass=15', '-mtc-', '../youtube-dl.zip'] + all_paths,
cwd='zip/').wait()
elif check_executable('zip', ['-h']):
ret = subprocess.Popen(
['zip', '-9', '../youtube-dl.zip'] + all_paths,
cwd='zip/').wait()
else:
raise Exception('Cannot find ZIP archiver')
if ret != 0:
raise Exception('ZIP archiver returned error: %d' % ret)
if check_executable('advzip', []):
subprocess.Popen(
['advzip', '-z', '-4', '-i', iterations, 'youtube-dl.zip']).wait()
shutil.rmtree('zip/')
with open('youtube-dl', 'wb') as ytdl:
ytdl.write(b'#!%s\n' % PYTHON.encode('utf8'))
with open('youtube-dl.zip', 'rb') as zip:
ytdl.write(zip.read())
os.remove('youtube-dl.zip')
os.chmod('youtube-dl', 0o755)
|
python
|
import json
from django.http.response import HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response
from django.urls.base import reverse
from lykops.views import Base
class Report(Base):
def summary(self,request):
result = self._is_login(request)
if result[0] :
username = result[1]
else :
return HttpResponseRedirect(reverse('login'))
http_referer = self.uri_api.get_httpreferer(username, no=-1)
force = request.GET.get('force', False)
result = self.ansible_report_api.get_date_list(username, force=force)
if not result[0] :
return render_to_response('report_list.html', {'login_user':username, 'error_message':result[1], 'http_referer':http_referer, 'nav_html':self.nav_html, 'lately_whereabouts':self.latelywhere_html, 'nav_html':self.nav_html, 'lately_whereabouts':self.latelywhere_html})
else :
date_list = result[1]
create_date = request.GET.get('create_date', None)
if create_date == 'None' :
create_date = None
mode = request.GET.get('mode', 'all')
result = self.ansible_report_api.summary(username, dt=create_date, mode=mode)
if not result[0] :
error_message = self.username + ' 查看用户' + username + '的ansible任务执行报告列表失败,提交保存时发生错误,原因:' + result[1]
self.logger.error(error_message)
return render_to_response('report_list.html', {'login_user':username, 'error_message':error_message, 'http_referer':http_referer, 'nav_html':self.nav_html, 'lately_whereabouts':self.latelywhere_html, 'nav_html':self.nav_html, 'lately_whereabouts':self.latelywhere_html})
else :
work_list = result[1]
self.logger.info(self.username + ' 查看用户' + username + '的ansible任务执行报告列表成功')
return render_to_response('report_list.html', {'login_user':username, 'error_message':{}, 'http_referer':http_referer, 'date_list':date_list, 'work_list':work_list, 'nav_html':self.nav_html, 'lately_whereabouts':self.latelywhere_html, 'nav_html':self.nav_html, 'lately_whereabouts':self.latelywhere_html})
def detail(self, request):
result = self._is_login(request)
if result[0] :
username = result[1]
else :
return HttpResponseRedirect(reverse('login'))
http_referer = self.uri_api.get_httpreferer(username, no=-1)
force = request.GET.get('force', False)
force = bool(force)
uuid_str = request.GET.get('uuid', False)
exec_mode = request.GET.get('mode', False)
orig_content = request.GET.get('orig_content', False)
orig_content = bool(orig_content)
result = self.ansible_report_api.detail(username, uuid_str, force=force, orig_content=orig_content)
if result[0] :
report_data = result[1]
if orig_content :
self.logger.info(self.username + ' 查看用户' + username + '的uuid为' + uuid_str + '的ansible任务执行报告成功(原始数据)')
return HttpResponse(json.dumps(report_data))
return render_to_response('result.html', {'login_user':username, 'content':report_data, 'http_referer':http_referer, 'nav_html':self.nav_html, 'lately_whereabouts':self.latelywhere_html, 'nav_html':self.nav_html, 'lately_whereabouts':self.latelywhere_html})
self.logger.info(self.username + ' 查看用户' + username + '的uuid为' + uuid_str + '的ansible任务执行报告成功(格式化数据)')
if exec_mode == 'adhoc' :
return render_to_response('report_adhoc.html', {'login_user':username, 'report_data':report_data, 'http_referer':http_referer, 'nav_html':self.nav_html, 'lately_whereabouts':self.latelywhere_html, 'nav_html':self.nav_html, 'lately_whereabouts':self.latelywhere_html})
else :
return render_to_response('report_playbook.html', {'login_user':username, 'report_data':report_data, 'http_referer':http_referer, 'nav_html':self.nav_html, 'lately_whereabouts':self.latelywhere_html, 'nav_html':self.nav_html, 'lately_whereabouts':self.latelywhere_html})
else :
error_message = self.username + ' 查看用户' + username + '的uuid为' + uuid_str + '的ansible任务执行报告失败,查询时发生错误,原因:' + result[1]
self.logger.error(error_message)
return render_to_response('result.html', {'login_user':username, 'content':error_message, 'http_referer':http_referer, 'nav_html':self.nav_html, 'lately_whereabouts':self.latelywhere_html, 'nav_html':self.nav_html, 'lately_whereabouts':self.latelywhere_html})
|
python
|
##
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
import pandas as pd
from pycylon.index import Index, RangeIndex, NumericIndex, CategoricalIndex, ColumnIndex, \
range_calculator
from pycylon import Table
from pycylon import CylonContext
import pyarrow as pa
import numpy as np
from pycylon.io import CSVReadOptions
from pycylon.io import read_csv
def test_with_pandas():
pdf = pd.DataFrame([[1, 2, 3, 4, 5, 'a'], [6, 7, 8, 9, 10, 'b'], [11, 12, 13, 14, 15, 'c'],
[16, 17, 18, 19, 20, 'a'], [16, 17, 18, 19, 20, 'd'],
[111, 112, 113, 114, 5,
'a']])
# print(pdf)
pdf1 = pdf.set_index([1, 2])
# print(pdf1)
print(pdf1.index)
def test_numeric_index():
rg = range(0, 10, 1)
rg1 = range(0, 10, 2)
r = NumericIndex(data=rg)
assert r.index_values == rg
assert r.index_values != rg1
def test_range_index():
rg = range(0, 10, 1)
rg1 = range(0, 10, 2)
r = RangeIndex(start=rg.start, stop=rg.stop, step=rg.step)
assert r.index_values == rg
assert r.index_values != rg1
r1 = RangeIndex(rg)
r2 = RangeIndex(rg)
assert r1.index_values == rg
assert r2.index_values != rg1
def calculate_range_size_manual(rg: range):
sum = 0
for i in rg:
sum += 1
return sum
def test_range_count():
rg_1 = range(0, 10)
rg_2 = range(0, 10, 2)
rg_3 = range(0, 10, 3)
rg_4 = range(0, 11, 2)
rg_5 = range(0, 14, 3)
rgs = [rg_1, rg_2, rg_3, rg_4, rg_5]
for rg in rgs:
assert range_calculator(rg) == calculate_range_size_manual(rg)
def test_cylon_set_index_from_column():
from pycylon.indexing.cyindex import IndexingType
from pycylon.indexing.index_utils import IndexUtil
pdf_float = pd.DataFrame({'a': pd.Series([1, 4, 7, 10, 20, 23, 10], dtype=np.int64()),
'b': pd.Series([2, 5, 8, 11, 22, 25, 12], dtype='int')})
pdf = pd.DataFrame([[1, 2], [4, 5], [7, 8], [10, 11], [20, 22], [23, 25], [10, 12]])
ctx: CylonContext = CylonContext(config=None, distributed=False)
cn_tb: Table = Table.from_pandas(ctx, pdf_float)
indexing_type = IndexingType.LINEAR
drop_index = True
print("Before Indexing")
print(cn_tb)
# cn_tb.set_index('a', indexing_schema, drop_index)
cn_tb.set_index('a', indexing_type, drop_index)
print("After Indexing")
assert cn_tb.column_names == ['b']
assert cn_tb.get_index().get_type() == IndexingType.LINEAR
def test_reset_index():
from pycylon.indexing.cyindex import IndexingType
from pycylon.indexing.index_utils import IndexUtil
pdf_float = pd.DataFrame({'a': pd.Series([1, 4, 7, 10, 20, 23, 10], dtype=np.int64()),
'b': pd.Series([2, 5, 8, 11, 22, 25, 12], dtype='int')})
pdf = pd.DataFrame([[1, 2], [4, 5], [7, 8], [10, 11], [20, 22], [23, 25], [10, 12]])
ctx: CylonContext = CylonContext(config=None, distributed=False)
cn_tb: Table = Table.from_pandas(ctx, pdf_float)
indexing_type = IndexingType.LINEAR
drop_index = True
# cn_tb.set_index('a', indexing_schema, drop_index)
cn_tb.set_index('a', indexing_type, drop_index)
# assert cn_tb.get_index().get_type() == IndexingSchema.LINEAR
assert cn_tb.get_index().get_type() == IndexingType.LINEAR
rest_drop_index = False
# cn_tb.reset_index(rest_drop_index)
cn_tb.reset_index(rest_drop_index)
assert cn_tb.column_names == ['index', 'b']
# assert cn_tb.get_index().get_schema() == IndexingSchema.RANGE
assert cn_tb.get_index().get_type() == IndexingType.RANGE
def test_cylon_cpp_single_column_indexing():
# TODO: REMOVE
pass
# from pycylon.indexing.cyindex import IndexingSchema
# from pycylon.indexing.index_utils import IndexUtil
#
#
# pdf_float = pd.DataFrame({'a': pd.Series([1, 4, 7, 10, 20, 23, 10], dtype=np.int64()),
# 'b': pd.Series([2, 5, 8, 11, 22, 25, 12], dtype='int')})
# pdf = pd.DataFrame([[1, 2], [4, 5], [7, 8], [10, 11], [20, 22], [23, 25], [10, 12]])
# ctx: CylonContext = CylonContext(config=None, distributed=False)
# cn_tb: Table = Table.from_pandas(ctx, pdf_float)
# indexing_schema = IndexingSchema.LINEAR
#
# print("Input Table")
# print(cn_tb)
# print(cn_tb.to_arrow())
#
# output = IndexUtil.build_index(indexing_schema, cn_tb, 0, True)
# print("Output Indexed Table")
# print(output)
#
# loc_ix = LocIndexer(indexing_schema)
# start_index = 1
# end_index = 7
# column_index = 0
#
# loc_out = loc_ix.loc_with_single_column(slice(start_index, end_index), column_index, output)
# #
# print(loc_out)
#
# print(loc_out.to_arrow())
#
# index = loc_out.get_index()
#
# print(index)
#
# print(index.get_index_array())
#
# indices = [4, 7, 23, 20]
#
# loc_out2 = loc_ix.loc_with_single_column(indices, column_index, output)
#
# print(loc_out2)
#
# loc_index = 10
# loc_out3 = loc_ix.loc_with_single_column(loc_index, column_index, output)
#
# print(loc_out3)
def test_cylon_cpp_multi_column_indexing():
# TODO REMOVE
pass
# from pycylon.indexing.cyindex import IndexingSchema
# from pycylon.indexing.index_utils import IndexUtil
#
#
# pdf_float = pd.DataFrame({'a': pd.Series([1, 4, 7, 10, 20, 23, 10], dtype=np.float64()),
# 'b': pd.Series([2, 5, 8, 11, 22, 25, 12], dtype='int'),
# 'c': pd.Series([11, 12, 14, 15, 16, 17, 18], dtype='int')
# })
# pdf = pd.DataFrame([[1, 2, 11], [4, 5, 12], [7, 8, 14], [10, 11, 15], [20, 22, 16], [23, 25,
# 17],
# [10, 12, 18]])
# ctx: CylonContext = CylonContext(config=None, distributed=False)
# cn_tb: Table = Table.from_pandas(ctx, pdf)
# indexing_schema = IndexingSchema.LINEAR
#
# print("Input Table")
# print(cn_tb)
# print(cn_tb.to_arrow())
#
# output = IndexUtil.build_index(indexing_schema, cn_tb, 0, True)
# print("Output Indexed Table")
# print(output)
#
# loc_ix = LocIndexer(indexing_schema)
# start_index = 1
# end_index = 7
# column_index = [0, 1]
#
# loc_out = loc_ix.loc_with_multi_column(slice(start_index, end_index), column_index, output)
# #
# print(loc_out)
#
# print(loc_out.to_arrow())
#
# index = loc_out.get_index()
#
# print(index)
#
# print(index.get_index_array())
#
# indices = [4, 7, 23, 20]
#
# loc_out2 = loc_ix.loc_with_multi_column(indices, column_index, output)
#
# print(loc_out2)
#
# loc_index = 10
# loc_out3 = loc_ix.loc_with_multi_column(loc_index, column_index, output)
#
# print(loc_out3)
def test_cylon_cpp_str_single_column_indexing():
# TODO REMOVE
pass
# from pycylon.indexing.cyindex import IndexingSchema
# from pycylon.indexing.index_utils import IndexUtil
#
#
# pdf_str = pd.DataFrame([["1", 2], ["4", 5], ["7", 8], ["10", 11], ["20", 22], ["23", 25], ["10",
# 12]])
# pdf_float = pd.DataFrame({'a': pd.Series([1, 4, 7, 10, 20, 23, 10], dtype=np.float64()),
# 'b': pd.Series([2, 5, 8, 11, 22, 25, 12], dtype='int')})
# pdf = pd.DataFrame([[1, 2], [4, 5], [7, 8], [10, 11], [20, 22], [23, 25], [10, 12]])
# ctx: CylonContext = CylonContext(config=None, distributed=False)
# cn_tb: Table = Table.from_pandas(ctx, pdf_str)
# indexing_schema = IndexingSchema.LINEAR
#
# print("Input Table")
# print(cn_tb)
# print(cn_tb.to_arrow())
#
# output = IndexUtil.build_index(indexing_schema, cn_tb, 0, True)
# print("Output Indexed Table")
# print(output)
#
# loc_ix = LocIndexer(indexing_schema)
# start_index = "1"
# end_index = "7"
# column_index = 0
#
# loc_out = loc_ix.loc_with_single_column(slice(start_index, end_index), column_index, output)
#
# print(loc_out)
#
# print(loc_out.to_arrow())
#
# index = loc_out.get_index()
#
# print(index)
#
# print(index.get_index_array())
#
# indices = ["100", "4", "7", "23", "20"]
#
# indices = ['4']
#
# # indices = [4, 7]
#
# loc_out2 = loc_ix.loc_with_single_column(indices, column_index, output)
#
# print(loc_out2)
#
# loc_index = '10'
# loc_out3 = loc_ix.loc_with_single_column(loc_index, column_index, output)
#
# print(loc_out3)
def test_cylon_cpp_str_multi_column_indexing():
# TODO REMOVE
pass
# from pycylon.indexing.cyindex import IndexingSchema
# from pycylon.indexing.index_utils import IndexUtil
#
#
# pdf_str = pd.DataFrame([["1", 2, 3], ["4", 5, 4], ["7", 8, 10], ["10", 11, 12], ["20", 22, 20],
# ["23", 25, 20], ["10", 12, 35]])
# pdf_float = pd.DataFrame({'a': pd.Series([1, 4, 7, 10, 20, 23, 10], dtype=np.float64()),
# 'b': pd.Series([2, 5, 8, 11, 22, 25, 12], dtype='int'),
# 'c': pd.Series([3, 4, 10, 12, 20, 20, 35], dtype='int')})
# pdf = pd.DataFrame([[1, 2], [4, 5], [7, 8], [10, 11], [20, 22], [23, 25], [10, 12]])
# ctx: CylonContext = CylonContext(config=None, distributed=False)
# cn_tb: Table = Table.from_pandas(ctx, pdf_str)
# indexing_schema = IndexingSchema.LINEAR
#
# print("Input Table")
# print(cn_tb)
# print(cn_tb.to_arrow())
#
# output = IndexUtil.build_index(indexing_schema, cn_tb, 0, True)
# print("Output Indexed Table")
# print(output)
#
# loc_ix = LocIndexer(indexing_schema)
# start_index = "1"
# end_index = "7"
# column_index = [0, 1]
#
# loc_out = loc_ix.loc_with_multi_column(slice(start_index, end_index), column_index, output)
#
# print(loc_out)
#
# print(loc_out.to_arrow())
#
# index = loc_out.get_index()
#
# print(index)
#
# print(index.get_index_array())
#
# indices = ["100", "4", "7", "23", "20"]
#
# indices = ['4']
#
# # indices = [4, 7]
#
# loc_out2 = loc_ix.loc_with_multi_column(indices, column_index, output)
#
# print(loc_out2)
#
# loc_index = '10'
# loc_out3 = loc_ix.loc_with_multi_column(loc_index, column_index, output)
#
# print(loc_out3)
def test_cylon_cpp_range_column_indexing():
# TODO REMOVE
pass
# from pycylon.indexing.cyindex import IndexingSchema
# from pycylon.indexing.index_utils import IndexUtil
#
#
# pdf_float = pd.DataFrame({'a': pd.Series([1, 4, 7, 10, 20, 23, 10], dtype=np.float64()),
# 'b': pd.Series([2, 5, 8, 11, 22, 25, 12], dtype='int'),
# 'c': pd.Series([11, 12, 14, 15, 16, 17, 18], dtype='int')
# })
# pdf = pd.DataFrame([[1, 2, 11], [4, 5, 12], [7, 8, 14], [10, 11, 15], [20, 22, 16], [23, 25,
# 17],
# [10, 12, 18]])
# ctx: CylonContext = CylonContext(config=None, distributed=False)
# cn_tb: Table = Table.from_pandas(ctx, pdf)
# indexing_schema = IndexingSchema.LINEAR
#
# print("Input Table")
# print(cn_tb)
# print(cn_tb.to_arrow())
#
# output = IndexUtil.build_index(indexing_schema, cn_tb, 0, True)
# print("Output Indexed Table")
# print(output)
#
# loc_ix = LocIndexer(indexing_schema)
# start_index = 1
# end_index = 7
# column_index = slice(0, 1)
#
# loc_out = loc_ix.loc_with_range_column(slice(start_index, end_index), column_index, output)
# #
# print(loc_out)
#
# print(loc_out.to_arrow())
#
# index = loc_out.get_index()
#
# print(index)
#
# print(index.get_index_array())
#
# indices = [4, 7, 23, 20]
#
# loc_out2 = loc_ix.loc_with_range_column(indices, column_index, output)
#
# print(loc_out2)
#
# loc_index = 10
# loc_out3 = loc_ix.loc_with_range_column(loc_index, column_index, output)
#
# print(loc_out3)
def test_cylon_cpp_str_range_column_indexing():
# TODO REMOVE
pass
# from pycylon.indexing.cyindex import IndexingSchema
# from pycylon.indexing.index_utils import IndexUtil
#
#
# pdf_str = pd.DataFrame([["1", 2, 3], ["4", 5, 4], ["7", 8, 10], ["10", 11, 12], ["20", 22, 20],
# ["23", 25, 20], ["10", 12, 35]])
# pdf_float = pd.DataFrame({'a': pd.Series([1, 4, 7, 10, 20, 23, 10], dtype=np.float64()),
# 'b': pd.Series([2, 5, 8, 11, 22, 25, 12], dtype='int'),
# 'c': pd.Series([3, 4, 10, 12, 20, 20, 35], dtype='int')})
# pdf = pd.DataFrame([[1, 2], [4, 5], [7, 8], [10, 11], [20, 22], [23, 25], [10, 12]])
# ctx: CylonContext = CylonContext(config=None, distributed=False)
# cn_tb: Table = Table.from_pandas(ctx, pdf_str)
# indexing_schema = IndexingSchema.LINEAR
#
# print("Input Table")
# print(cn_tb)
# print(cn_tb.to_arrow())
#
# output = IndexUtil.build_index(indexing_schema, cn_tb, 0, True)
# print("Output Indexed Table")
# print(output)
#
# loc_ix = LocIndexer(indexing_schema)
# start_index = "1"
# end_index = "7"
# column_index = slice(0, 1)
#
# loc_out = loc_ix.loc_with_range_column(slice(start_index, end_index), column_index, output)
#
# print(loc_out)
#
# print(loc_out.to_arrow())
#
# index = loc_out.get_index()
#
# print(index)
#
# print(index.get_index_array())
#
# indices = ["100", "4", "7", "23", "20"]
#
# indices = ['4']
#
# # indices = [4, 7]
#
# loc_out2 = loc_ix.loc_with_range_column(indices, column_index, output)
#
# print(loc_out2)
#
# loc_index = '10'
# loc_out3 = loc_ix.loc_with_range_column(loc_index, column_index, output)
#
# print(loc_out3)
def test_loc_op_mode_1():
from pycylon.indexing.cyindex import IndexingType
pdf_float = pd.DataFrame({'a': pd.Series([1, 4, 7, 10, 20, 23, 11], dtype=np.int64()),
'b': pd.Series([2, 5, 8, 11, 22, 25, 12], dtype='int'),
'c': pd.Series([12, 15, 18, 111, 122, 125, 112], dtype='int'),
'd': pd.Series([212, 215, 218, 211, 222, 225, 312], dtype='int'),
'e': pd.Series([1121, 12151, 12181, 12111, 12221, 12251, 13121],
dtype='int')})
ctx: CylonContext = CylonContext(config=None, distributed=False)
cn_tb: Table = Table.from_pandas(ctx, pdf_float)
indexing_type = IndexingType.LINEAR
drop_index = True
print("Before Indexing")
print(cn_tb)
cn_tb.set_index('a', indexing_type, drop_index)
pdf_float = pdf_float.set_index('a')
print("After Indexing")
assert cn_tb.column_names == ['b', 'c', 'd', 'e']
# assert cn_tb.get_index().get_schema() == IndexingSchema.LINEAR
assert cn_tb.get_index().get_type() == IndexingType.LINEAR
loc_cn_1 = cn_tb.loc[7:20, 'c':'e']
loc_pd_1 = pdf_float.loc[7:20, 'c':'e']
print(loc_cn_1.get_index().values)
print(loc_pd_1.index.values)
assert loc_pd_1.values.tolist() == loc_cn_1.to_pandas().values.tolist()
assert loc_cn_1.get_index().get_index_array() == pa.array(loc_pd_1.index)
# assert loc_cn_1.get_arrow_index().get_index_array() == pa.array(loc_pd_1.index)
loc_cn_2 = cn_tb.loc[7:20, 'd':]
loc_pd_2 = pdf_float.loc[7:20, 'd':]
assert loc_pd_2.values.tolist() == loc_cn_2.to_pandas().values.tolist()
assert loc_cn_2.get_index().get_index_array() == pa.array(loc_pd_2.index)
# assert loc_cn_2.get_arrow_index().get_index_array() == pa.array(loc_pd_2.index)
loc_cn_3 = cn_tb.loc[7:, 'd':]
loc_pd_3 = pdf_float.loc[7:, 'd':]
assert loc_pd_3.values.tolist() == loc_cn_3.to_pandas().values.tolist()
assert loc_cn_3.get_index().get_index_array() == pa.array(loc_pd_3.index)
# assert loc_cn_3.get_arrow_index().get_index_array() == pa.array(loc_pd_3.index)
loc_cn_4 = cn_tb.loc[:7, 'd':]
loc_pd_4 = pdf_float.loc[:7, 'd':]
assert loc_pd_4.values.tolist() == loc_cn_4.to_pandas().values.tolist()
assert loc_cn_4.get_index().get_index_array() == pa.array(loc_pd_4.index)
# assert loc_cn_4.get_arrow_index().get_index_array() == pa.array(loc_pd_4.index)
loc_cn_5 = cn_tb.loc[:, 'd':]
loc_pd_5 = pdf_float.loc[:, 'd':]
assert loc_pd_5.values.tolist() == loc_cn_5.to_pandas().values.tolist()
assert loc_cn_5.get_index().get_index_array() == pa.array(loc_pd_5.index)
# assert loc_cn_5.get_arrow_index().get_index_array() == pa.array(loc_pd_5.index)
loc_cn_6 = cn_tb.loc[[7, 20], 'd':]
loc_pd_6 = pdf_float.loc[[7, 20], 'd':]
assert loc_pd_6.values.tolist() == loc_cn_6.to_pandas().values.tolist()
assert loc_cn_6.get_index().get_index_array() == pa.array(loc_pd_6.index)
def test_loc_op_mode_2():
from pycylon.indexing.cyindex import IndexingType
pdf_float = pd.DataFrame({'a': pd.Series(["1", "4", "7", "10", "20", "23", "11"]),
'b': pd.Series([2, 5, 8, 11, 22, 25, 12], dtype='int'),
'c': pd.Series([12, 15, 18, 111, 122, 125, 112], dtype='int'),
'd': pd.Series([212, 215, 218, 211, 222, 225, 312], dtype='int'),
'e': pd.Series([1121, 12151, 12181, 12111, 12221, 12251, 13121],
dtype='int')})
ctx: CylonContext = CylonContext(config=None, distributed=False)
cn_tb: Table = Table.from_pandas(ctx, pdf_float)
indexing_type = IndexingType.LINEAR
drop_index = True
print("Before Indexing")
print(cn_tb)
cn_tb.set_index('a', indexing_type, drop_index)
pdf_float = pdf_float.set_index('a')
print("After Indexing")
assert cn_tb.column_names == ['b', 'c', 'd', 'e']
assert cn_tb.get_index().get_type() == IndexingType.LINEAR
loc_cn_1 = cn_tb.loc["7":"20", 'c':'e']
loc_pd_1 = pdf_float.loc["7":"20", 'c':'e']
assert loc_pd_1.values.tolist() == loc_cn_1.to_pandas().values.tolist()
assert loc_cn_1.get_index().get_index_array() == pa.array(loc_pd_1.index)
# assert loc_cn_1.get_arrow_index().get_index_array() == pa.array(loc_pd_1.index)
loc_cn_2 = cn_tb.loc["7":"20", 'd':]
loc_pd_2 = pdf_float.loc["7":"20", 'd':]
assert loc_pd_2.values.tolist() == loc_cn_2.to_pandas().values.tolist()
assert loc_cn_2.get_index().get_index_array() == pa.array(loc_pd_2.index)
# assert loc_cn_2.get_arrow_index().get_index_array() == pa.array(loc_pd_2.index)
loc_cn_3 = cn_tb.loc["7":, 'd':]
loc_pd_3 = pdf_float.loc["7":, 'd':]
assert loc_pd_3.values.tolist() == loc_cn_3.to_pandas().values.tolist()
assert loc_cn_3.get_index().get_index_array() == pa.array(loc_pd_3.index)
# assert loc_cn_3.get_arrow_index().get_index_array() == pa.array(loc_pd_3.index)
loc_cn_4 = cn_tb.loc[:"7", 'd':]
loc_pd_4 = pdf_float.loc[:"7", 'd':]
assert loc_pd_4.values.tolist() == loc_cn_4.to_pandas().values.tolist()
assert loc_cn_4.get_index().get_index_array() == pa.array(loc_pd_4.index)
# assert loc_cn_4.get_arrow_index().get_index_array() == pa.array(loc_pd_4.index)
loc_cn_5 = cn_tb.loc[:, 'd':]
loc_pd_5 = pdf_float.loc[:, 'd':]
assert loc_pd_5.values.tolist() == loc_cn_5.to_pandas().values.tolist()
assert loc_cn_5.get_index().get_index_array() == pa.array(loc_pd_5.index)
# assert loc_cn_5.get_arrow_index().get_index_array() == pa.array(loc_pd_5.index)
loc_cn_6 = cn_tb.loc[["7", "20"], 'd':]
loc_pd_6 = pdf_float.loc[["7", "20"], 'd':]
assert loc_pd_6.values.tolist() == loc_cn_6.to_pandas().values.tolist()
assert loc_cn_6.get_index().get_index_array() == pa.array(loc_pd_6.index)
def test_loc_op_mode_3():
from pycylon.indexing.cyindex import IndexingType
from pycylon.indexing.index_utils import IndexUtil
pdf_float = pd.DataFrame({'a': pd.Series(["1", "4", "7", "10", "20", "23", "11"]),
'b': pd.Series([2, 5, 8, 11, 22, 25, 12], dtype='int'),
'c': pd.Series([12, 15, 18, 111, 122, 125, 112], dtype='int'),
'd': pd.Series([212, 215, 218, 211, 222, 225, 312], dtype='int'),
'e': pd.Series([1121, 12151, 12181, 12111, 12221, 12251, 13121],
dtype='int')})
ctx: CylonContext = CylonContext(config=None, distributed=False)
cn_tb: Table = Table.from_pandas(ctx, pdf_float)
indexing_type = IndexingType.LINEAR
drop_index = True
print("Before Indexing")
print(cn_tb)
cn_tb.set_index('a', indexing_type, drop_index)
pdf_float = pdf_float.set_index('a')
print("After Indexing")
assert cn_tb.column_names == ['b', 'c', 'd', 'e']
assert cn_tb.get_index().get_type() == IndexingType.LINEAR
loc_cn_1 = cn_tb.loc["7":"20"]
loc_pd_1 = pdf_float.loc["7":"20"]
print(loc_cn_1.get_index().get_index_array())
print(loc_pd_1.index.values)
assert loc_pd_1.values.tolist() == loc_cn_1.to_pandas().values.tolist()
assert loc_cn_1.get_index().get_index_array() == pa.array(loc_pd_1.index)
loc_cn_2 = cn_tb.loc["7":]
loc_pd_2 = pdf_float.loc["7":]
assert loc_pd_2.values.tolist() == loc_cn_2.to_pandas().values.tolist()
assert loc_cn_2.get_index().get_index_array() == pa.array(loc_pd_2.index)
loc_cn_3 = cn_tb.loc[:"7"]
loc_pd_3 = pdf_float.loc[:"7"]
assert loc_pd_3.values.tolist() == loc_cn_3.to_pandas().values.tolist()
assert loc_cn_3.get_index().get_index_array() == pa.array(loc_pd_3.index)
loc_cn_4 = cn_tb.loc[:]
loc_pd_4 = pdf_float.loc[:]
assert loc_pd_4.values.tolist() == loc_cn_4.to_pandas().values.tolist()
assert loc_cn_4.get_index().get_index_array() == pa.array(loc_pd_4.index)
loc_cn_5 = cn_tb.loc[["7", "20"], :]
loc_pd_5 = pdf_float.loc[["7", "20"], :]
assert loc_pd_5.values.tolist() == loc_cn_5.to_pandas().values.tolist()
assert loc_cn_5.get_index().get_index_array() == pa.array(loc_pd_5.index)
def test_iloc_op_mode_1():
from pycylon.indexing.cyindex import IndexingType
from pycylon.indexing.index_utils import IndexUtil
pdf_float = pd.DataFrame({'a': pd.Series(["1", "4", "7", "10", "20", "23", "11"]),
'b': pd.Series([2, 5, 8, 11, 22, 25, 12], dtype='int'),
'c': pd.Series([12, 15, 18, 111, 122, 125, 112], dtype='int'),
'd': pd.Series([212, 215, 218, 211, 222, 225, 312], dtype='int'),
'e': pd.Series([1121, 12151, 12181, 12111, 12221, 12251, 13121],
dtype='int')})
ctx: CylonContext = CylonContext(config=None, distributed=False)
cn_tb: Table = Table.from_pandas(ctx, pdf_float)
indexing_type = IndexingType.LINEAR
drop_index = True
print("Before Indexing")
print(cn_tb)
cn_tb.set_index('a', indexing_type, drop_index)
pdf_float = pdf_float.set_index('a')
print("After Indexing")
assert cn_tb.column_names == ['b', 'c', 'd', 'e']
assert cn_tb.get_index().get_type() == IndexingType.LINEAR
iloc_cn_1 = cn_tb.iloc[3:5, 1:3]
iloc_pd_1 = pdf_float.iloc[3:5, 1:3]
print(iloc_cn_1)
print(iloc_pd_1)
assert iloc_pd_1.values.tolist() == iloc_cn_1.to_pandas().values.tolist()
iloc_cn_2 = cn_tb.iloc[3:5, 1:]
iloc_pd_2 = pdf_float.iloc[3:5, 1:]
print(iloc_cn_2)
print(iloc_pd_2)
assert iloc_pd_2.values.tolist() == iloc_cn_2.to_pandas().values.tolist()
iloc_cn_3 = cn_tb.iloc[3:, 1:]
iloc_pd_3 = pdf_float.iloc[3:, 1:]
assert iloc_pd_3.values.tolist() == iloc_cn_3.to_pandas().values.tolist()
iloc_cn_4 = cn_tb.iloc[:3, 1:]
iloc_pd_4 = pdf_float.iloc[:3, 1:]
print(iloc_cn_4)
print(iloc_pd_4)
assert iloc_pd_4.values.tolist() == iloc_cn_4.to_pandas().values.tolist()
iloc_cn_5 = cn_tb.iloc[:, :]
iloc_pd_5 = pdf_float.iloc[:, :]
assert iloc_pd_5.values.tolist() == iloc_cn_5.to_pandas().values.tolist()
iloc_cn_6 = cn_tb.iloc[[0, 2, 3], :]
iloc_pd_6 = pdf_float.iloc[[0, 2, 3], :]
assert iloc_pd_6.values.tolist() == iloc_cn_6.to_pandas().values.tolist()
def test_isin():
ctx = CylonContext(config=None, distributed=False)
csv_read_options = CSVReadOptions().use_threads(True).block_size(1 << 30)
table_path = 'data/input/duplicate_data_0.csv'
tb: Table = read_csv(ctx, table_path, csv_read_options)
pdf: pd.DataFrame = tb.to_pandas()
tb.set_index(tb.column_names[0], drop=True)
pdf.set_index(pdf.columns[0], drop=True, inplace=True)
assert tb.index.values.tolist() == pdf.index.values.tolist()
compare_values = [4, 1, 10, 100, 150]
tb_res_isin = tb.index.isin(compare_values)
pdf_res_isin = pdf.index.isin(compare_values)
assert tb_res_isin.tolist() == pdf_res_isin.tolist()
def test_isin_with_getitem():
ctx = CylonContext(config=None, distributed=False)
csv_read_options = CSVReadOptions().use_threads(True).block_size(1 << 30)
table_path = 'data/input/duplicate_data_0.csv'
tb: Table = read_csv(ctx, table_path, csv_read_options)
pdf: pd.DataFrame = tb.to_pandas()
tb.set_index(tb.column_names[0], drop=True)
pdf.set_index(pdf.columns[0], drop=True, inplace=True)
assert tb.index.values.tolist() == pdf.index.values.tolist()
compare_values = [4, 1, 10, 100, 150]
tb_res_isin = tb.index.isin(compare_values)
pdf_res_isin = pdf.index.isin(compare_values)
assert tb_res_isin.tolist() == pdf_res_isin.tolist()
print(tb_res_isin)
print(pdf_res_isin)
pdf1 = pdf[pdf_res_isin]
print("Pandas Output")
print(pdf1)
print(pdf1.index.values)
tb_filter = Table.from_list(ctx, ['filter'], [tb_res_isin.tolist()])
tb1 = tb[tb_filter]
resultant_index = tb.index.values[tb_res_isin].tolist()
print(resultant_index)
tb1.set_index(resultant_index)
print("PyCylon Output")
print(tb1)
print(tb1.index.values)
assert pdf1.values.tolist() == tb1.to_pandas().values.tolist()
print(tb1.index.values)
print(pdf1.index.values)
assert tb1.index.values.tolist() == pdf1.index.values.tolist()
def test_arrow_index():
from pycylon.indexing.cyindex import IndexingType
from pycylon.indexing.cyindex import ArrowLocIndexer
pdf_float = pd.DataFrame({'a': pd.Series([1, 4, 7, 10, 20, 23, 11]),
'b': pd.Series([2, 5, 8, 11, 22, 25, 12], dtype='int'),
'c': pd.Series([12, 15, 18, 111, 122, 125, 112], dtype='int'),
'd': pd.Series([212, 215, 218, 211, 222, 225, 312], dtype='int'),
'e': pd.Series([1121, 12151, 12181, 12111, 12221, 12251, 13121],
dtype='int')})
ctx: CylonContext = CylonContext(config=None, distributed=False)
cn_tb: Table = Table.from_pandas(ctx, pdf_float)
indexing_type = IndexingType.LINEAR
drop_index = True
print("Before Indexing")
print(cn_tb)
cn_tb.set_index('a', indexing_type, drop_index)
pdf_float = pdf_float.set_index('a')
print("After Indexing")
assert cn_tb.column_names == ['b', 'c', 'd', 'e']
assert cn_tb.get_index().get_type() == IndexingType.LINEAR
print(cn_tb.get_index().values)
index_array = cn_tb.get_index().get_index_array()
print(index_array)
print(index_array.type)
scalar_value = pa.scalar(10, index_array.type)
print(scalar_value)
arrow_loc_indexer = ArrowLocIndexer(IndexingType.LINEAR)
output1 = arrow_loc_indexer.loc_with_index_range(4, 20, 0, cn_tb)
print(output1)
print(output1.get_index().values)
output2 = arrow_loc_indexer.loc_with_index_range(4, 20, slice(0, 1), cn_tb)
print(output2)
print(output2.get_index().values)
output3 = arrow_loc_indexer.loc_with_index_range(4, 20, [0, 1, 2], cn_tb)
print(output3)
print(output3.get_index().values)
output4 = arrow_loc_indexer.loc_with_indices([4], 0, cn_tb)
print(output4)
print(output4.get_index().values)
output5 = arrow_loc_indexer.loc_with_indices([4, 20], slice(0, 1), cn_tb)
print(output5)
print(output5.get_index().values)
output6 = arrow_loc_indexer.loc_with_indices([4, 20], [0, 1, 2], cn_tb)
print(output6)
print(output6.get_index().values)
def test_index_set_index():
from pycylon.indexing.cyindex import IndexingType
from pycylon.indexing.index_utils import IndexUtil
pdf_float = pd.DataFrame({'a': pd.Series(["1", "4", "7", "10", "20", "23", "11"]),
'b': pd.Series([2, 5, 8, 11, 22, 25, 12], dtype='int'),
'c': pd.Series([12, 15, 18, 111, 122, 125, 112], dtype='int'),
'd': pd.Series([212, 215, 218, 211, 222, 225, 312], dtype='int'),
'e': pd.Series([1121, 12151, 12181, 12111, 12221, 12251, 13121],
dtype='int')})
ctx: CylonContext = CylonContext(config=None, distributed=False)
# pdf_float = pdf_float.set_index('a')
# pdf_float = pdf_float.reset_index()
cn_tb: Table = Table.from_pandas(ctx, pdf_float)
print("PyCylon Orignal Table")
print(cn_tb)
artb = cn_tb.to_arrow()
print("Arrow Table")
print(artb)
indexing_type = IndexingType.HASH
drop_index = True
print("Before Indexing : ", cn_tb.column_names)
print("index values", cn_tb.index.values)
print(cn_tb)
cn_tb.set_index(key='a', indexing_type=indexing_type, drop=drop_index)
print("After Indexing : ", cn_tb.column_names)
print(cn_tb)
print(cn_tb.index.values)
print(pdf_float.index.values)
filter = [False, True, False, True, False, False, False]
pdf_loc = pdf_float.loc[filter]
res = cn_tb.isin([10, 20, 30])
print(res)
print(pdf_loc)
# test_isin_with_getitem()
# test_loc_op_mode_1()
# test_loc_op_mode_2()
# test_loc_op_mode_3()
#
# test_iloc_op_mode_1()
test_index_set_index()
|
python
|
""" Loader for Maya api sub-package """
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
# this can be imported without having maya fully initialized
from .allapi import *
|
python
|
#!/usr/bin/python
#
# Copyright (c) 2012 Mikkel Schubert <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import copy
import collections
from pypeline.node import Node
from pypeline.common.fileutils import move_file, reroot_path
from pypeline.common.formats.msa import MSA
from pypeline.common.formats.phylip import interleaved_phy, sequential_phy
from pypeline.common.utilities import \
safe_coerce_to_frozenset, \
safe_coerce_to_tuple
_VALID_KEYS = frozenset(["partitions", "filenames"])
class FastaToPartitionedInterleavedPhyNode(Node):
def __init__(self, infiles, out_prefix, exclude_groups=(), reduce=False,
dependencies=(), file_dependencies=()):
"""
infiles = {names : {"partitions" : ..., "filenames" : [...]}}
"""
if not (isinstance(infiles, dict)
and all(isinstance(dd, dict) for dd in infiles.values())):
raise TypeError("'infiles' must be a dictionary of dictionaries")
input_filenames = []
for (name, subdd) in infiles.iteritems():
if set(subdd) - _VALID_KEYS:
raise ValueError("Invalid keys found for %r: %s"
% (name, ", ".join(set(subdd) - _VALID_KEYS)))
elif not isinstance(subdd["filenames"], list):
raise ValueError("filenames must be a list of strings")
input_filenames.extend(subdd["filenames"])
# Optional file dependencies; used to depend on the list of sequcences
input_filenames.extend(safe_coerce_to_tuple(file_dependencies))
self._reduce = bool(reduce)
self._infiles = copy.deepcopy(infiles)
self._out_prefix = out_prefix
self._excluded = safe_coerce_to_frozenset(exclude_groups)
description = "<FastaToPartitionedPhy%s: %i file(s) -> '%s.*'>" % \
(" (reducing)" if reduce else "", len(infiles), out_prefix)
Node.__init__(self,
description=description,
input_files=input_filenames,
output_files=[out_prefix + ".phy",
out_prefix + ".partitions"],
dependencies=dependencies)
def _run(self, _config, temp):
merged_msas = []
for (name, files_dd) in sorted(self._infiles.iteritems()):
partitions = files_dd["partitions"]
msas = dict((key, []) for key in partitions)
for filename in files_dd["filenames"]:
msa = MSA.from_file(filename)
if self._excluded:
msa = msa.exclude(self._excluded)
for (key, msa_part) in msa.split(partitions).iteritems():
msas[key].append(msa_part)
msas.pop("X", None)
for (key, msa_parts) in sorted(msas.iteritems()):
merged_msa = MSA.join(*msa_parts)
if self._reduce:
merged_msa = merged_msa.reduce()
if merged_msa is not None:
merged_msas.append(("%s_%s" % (name, key),
merged_msa))
out_fname_phy = reroot_path(temp, self._out_prefix + ".phy")
with open(out_fname_phy, "w") as output_phy:
final_msa = MSA.join(*(msa for (_, msa) in merged_msas))
output_phy.write(interleaved_phy(final_msa))
partition_end = 0
out_fname_parts = reroot_path(temp, self._out_prefix + ".partitions")
with open(out_fname_parts, "w") as output_part:
for (name, msa) in merged_msas:
length = msa.seqlen()
output_part.write("DNA, %s = %i-%i\n"
% (name,
partition_end + 1,
partition_end + length))
partition_end += length
def _teardown(self, _config, temp):
move_file(reroot_path(temp, self._out_prefix + ".phy"),
self._out_prefix + ".phy")
move_file(reroot_path(temp, self._out_prefix + ".partitions"),
self._out_prefix + ".partitions")
class FastaToPartitionsNode(Node):
def __init__(self, infiles, out_partitions, partition_by = "123", dependencies = ()):
if (len(partition_by) != 3):
raise ValueError("Default 'partition_by' must be 3 entires long!")
elif not isinstance(infiles, dict):
raise TypeError("'infiles' must be a dictionary")
elif any(len(dd.get("partition_by", "123")) != 3 for dd in infiles.itervalues()):
raise ValueError("'partition_by' must be 3 entires long!")
elif not all(isinstance(dd, dict) for dd in infiles.values()):
raise TypeError("'infiles' must be a dictionary of dictionaries")
elif not any(("name" in dd) for dd in infiles.values()):
raise ValueError("'name' must be specified for all input files")
elif any((set(dd) - _VALID_KEYS) for dd in infiles.values()):
raise ValueError("Invalid keys found: %s" % ", ".join(set(dd) - _VALID_KEYS))
self._infiles = infiles
self._out_part = out_partitions
self._part_by = partition_by
description = "<FastaToPartitions (default: %s): %i file(s) -> '%s'>" % \
(partition_by, len(infiles), out_partitions)
Node.__init__(self,
description = description,
input_files = infiles.keys(),
output_files = out_partitions,
dependencies = dependencies)
def _run(self, _config, temp):
end = 0
partitions = collections.defaultdict(list)
for (filename, msa) in _read_sequences(self._infiles):
length = msa.seqlen()
start, end = end + 1, end + length
for (group, offsets) in self._get_partition_by(filename):
if len(offsets) != 3:
parts = [("%i-%i\\3" % (start + offset, end)) for offset in offsets]
else:
parts = ["%i-%i" % (start, end)]
name = "%s_%s" % (self._infiles[filename]["name"], group)
partitions[name].extend(parts)
with open(reroot_path(temp, self._out_part), "w") as part_file:
for (name, parts) in sorted(partitions.items()):
part_file.writelines("DNA, %s = %s\n" % (name, ", ".join(parts)))
def _teardown(self, _config, temp):
move_file(reroot_path(temp, self._out_part), self._out_part)
def _get_partition_by(self, filename):
groups = self._infiles[filename].get("partition_by", self._part_by)
partition_by = {}
for (group, offset) in zip(groups, range(3)):
partition_by.setdefault(group, []).append(offset)
return list(sorted(partition_by.items()))
class FastaToInterleavedPhyNode(Node):
def __init__(self, infiles, out_phy, add_flag = False, dependencies = ()):
self._add_flag = add_flag
self._out_phy = out_phy
description = "<FastaToInterleavedPhy: %i file(s) -> '%s'%s>" % \
(len(infiles), out_phy, (" (w/ flag)" if add_flag else ""))
Node.__init__(self,
description = description,
input_files = infiles,
output_files = [out_phy],
dependencies = dependencies)
def _run(self, _config, temp):
msa = MSA.join(*(MSA.from_file(filename) for filename in sorted(self.input_files)))
with open(reroot_path(temp, self._out_phy), "w") as output:
output.write(interleaved_phy(msa, add_flag = self._add_flag))
def _teardown(self, _config, temp):
move_file(reroot_path(temp, self._out_phy), self._out_phy)
class FastaToSequentialPhyNode(Node):
def __init__(self, infiles, out_phy, add_flag = False, dependencies = ()):
self._add_flag = add_flag
self._out_phy = out_phy
description = "<FastaToInterleavedPhy: %i file(s) -> '%s'%s>" % \
(len(infiles), out_phy, (" (w/ flag)" if add_flag else ""))
Node.__init__(self,
description = description,
input_files = infiles,
output_files = [out_phy],
dependencies = dependencies)
def _run(self, _config, temp):
# Read and check that MSAs share groups
msas = [MSA.from_file(filename) for filename in sorted(self.input_files)]
MSA.validate(*msas)
blocks = []
for msa in msas:
blocks.append(sequential_phy(msa, add_flag = self._add_flag))
with open(reroot_path(temp, self._out_phy), "w") as output:
output.write("\n\n".join(blocks))
def _teardown(self, _config, temp):
move_file(reroot_path(temp, self._out_phy), self._out_phy)
def _read_sequences(filenames):
results = {}
for filename in filenames:
results[filename] = MSA.from_file(filename)
MSA.validate(*results.values())
return results.iteritems()
|
python
|
import sys
import srvdb
db = srvdb.SrvDb("./pdb-aggregator.db")
file = sys.argv[1]
print(file)
with open(file) as f:
content = f.read().splitlines()
print(content)
ips = []
for ip in content:
if ip not in ips:
print("Adding node {}".format(ip))
db.add_node(ip, False, 0, "")
ips.append(ip)
file = sys.argv[2]
print(file)
with open(file) as f:
content = f.read().splitlines()
print(content)
# routes = []
# for ip in content:
# if ip not in routes:
# print("Adding routes {}".format(ip))
# db.add_route(ip, route)
# ips.append(routes)
#
# ips = db.get_node_ips()
# for ip in ips:
# print("Found IP: {}".format(ip)
|
python
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import random
import time
import math
from functools import partial
import numpy as np
import paddle
from paddle.io import DataLoader
import paddlenlp as ppnlp
from paddlenlp.transformers import LinearDecayWithWarmup
from paddlenlp.metrics import ChunkEvaluator
from datasets import load_dataset
from paddlenlp.transformers import BertForTokenClassification, BertTokenizer
from paddlenlp.transformers import ErnieForTokenClassification, ErnieTokenizer
from paddlenlp.transformers import ErnieCtmForTokenClassification, ErnieCtmTokenizer
from paddlenlp.data import DataCollatorForTokenClassification
from paddlenlp.utils.log import logger
MODEL_CLASSES = {
"bert": (BertForTokenClassification, BertTokenizer),
"ernie": (ErnieForTokenClassification, ErnieTokenizer),
"ernie-ctm": (ErnieCtmForTokenClassification, ErnieCtmTokenizer)
}
parser = argparse.ArgumentParser()
# yapf: disable
parser.add_argument("--model_type", default="bert", type=str, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()), )
parser.add_argument("--model_name_or_path", default=None, type=str, required=True, help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join( sum([ list(classes[-1].pretrained_init_configuration.keys()) for classes in MODEL_CLASSES.values() ], [])), )
parser.add_argument("--dataset", default="msra_ner", type=str, choices=["msra_ner", "peoples_daily_ner"] ,help="The named entity recognition datasets.")
parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--max_seq_length", default=128, type=int, help="The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.")
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=3, type=int, help="Total number of training epochs to perform.", )
parser.add_argument("--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.",)
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--logging_steps", type=int, default=1, help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=100, help="Save checkpoint every X updates steps.")
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument("--device", default="gpu", type=str, choices=["cpu", "gpu", "xpu"] ,help="The device to select to train the model, is must be cpu/gpu/xpu.")
# yapf: enable
@paddle.no_grad()
def evaluate(model, loss_fct, metric, data_loader, label_num, mode="valid"):
model.eval()
metric.reset()
avg_loss, precision, recall, f1_score = 0, 0, 0, 0
for batch in data_loader:
logits = model(batch['input_ids'], batch['token_type_ids'])
loss = loss_fct(logits, batch['labels'])
avg_loss = paddle.mean(loss)
preds = logits.argmax(axis=2)
num_infer_chunks, num_label_chunks, num_correct_chunks = metric.compute(
batch['seq_len'], preds, batch['labels'])
metric.update(num_infer_chunks.numpy(),
num_label_chunks.numpy(), num_correct_chunks.numpy())
precision, recall, f1_score = metric.accumulate()
print("%s: eval loss: %f, precision: %f, recall: %f, f1: %f" %
(mode, avg_loss, precision, recall, f1_score))
model.train()
def do_train(args):
paddle.set_device(args.device)
if paddle.distributed.get_world_size() > 1:
paddle.distributed.init_parallel_env()
# Create dataset, tokenizer and dataloader.
if args.dataset == "peoples_daily_ner":
raw_datasets = load_dataset(args.dataset)
else:
raw_datasets = load_dataset(args.dataset)
AutoForTokenClassification, AutoTokenizer = MODEL_CLASSES[args.model_type]
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)
train_ds = raw_datasets['train']
label_list = train_ds.features['ner_tags'].feature.names
label_num = len(label_list)
no_entity_id = 0
def tokenize_and_align_labels(examples):
tokenized_inputs = tokenizer(
examples['tokens'],
max_seq_len=args.max_seq_length,
# We use this argument because the texts in our dataset are lists of words (with a label for each word).
is_split_into_words=True,
return_length=True)
labels = []
for i, label in enumerate(examples['ner_tags']):
label_ids = label
if len(tokenized_inputs['input_ids'][i]) - 2 < len(label_ids):
label_ids = label_ids[:len(tokenized_inputs['input_ids'][i]) -
2]
label_ids = [no_entity_id] + label_ids + [no_entity_id]
label_ids += [no_entity_id] * (
len(tokenized_inputs['input_ids'][i]) - len(label_ids))
labels.append(label_ids)
tokenized_inputs["labels"] = labels
return tokenized_inputs
train_ds = train_ds.select(range(len(train_ds) - 1))
column_names = train_ds.column_names
train_ds = train_ds.map(tokenize_and_align_labels,
batched=True,
remove_columns=column_names)
ignore_label = -100
batchify_fn = DataCollatorForTokenClassification(
tokenizer=tokenizer, label_pad_token_id=ignore_label)
train_batch_sampler = paddle.io.DistributedBatchSampler(
train_ds, batch_size=args.batch_size, shuffle=True, drop_last=True)
train_data_loader = DataLoader(
dataset=train_ds,
collate_fn=batchify_fn,
num_workers=0,
batch_sampler=train_batch_sampler,
return_list=True)
test_ds = raw_datasets['test']
test_ds = test_ds.select(range(len(test_ds) - 1))
test_ds = test_ds.map(tokenize_and_align_labels,
batched=True,
remove_columns=column_names)
test_data_loader = DataLoader(
dataset=test_ds,
collate_fn=batchify_fn,
num_workers=0,
batch_size=args.batch_size,
return_list=True)
if args.dataset == "peoples_daily_ner":
dev_ds = raw_datasets['validation']
dev_ds = dev_ds.select(range(len(dev_ds) - 1))
dev_ds = dev_ds.map(tokenize_and_align_labels,
batched=True,
remove_columns=column_names)
dev_data_loader = DataLoader(
dataset=dev_ds,
collate_fn=batchify_fn,
num_workers=0,
batch_size=args.batch_size,
return_list=True)
# Define the model netword and its loss
model = AutoForTokenClassification.from_pretrained(
args.model_name_or_path, num_classes=label_num)
if paddle.distributed.get_world_size() > 1:
model = paddle.DataParallel(model)
num_training_steps = args.max_steps if args.max_steps > 0 else len(
train_data_loader) * args.num_train_epochs
lr_scheduler = LinearDecayWithWarmup(args.learning_rate, num_training_steps,
args.warmup_steps)
# Generate parameter names needed to perform weight decay.
# All bias and LayerNorm parameters are excluded.
decay_params = [
p.name for n, p in model.named_parameters()
if not any(nd in n for nd in ["bias", "norm"])
]
optimizer = paddle.optimizer.AdamW(
learning_rate=lr_scheduler,
epsilon=args.adam_epsilon,
parameters=model.parameters(),
weight_decay=args.weight_decay,
apply_decay_param_fun=lambda x: x in decay_params)
loss_fct = paddle.nn.loss.CrossEntropyLoss(ignore_index=ignore_label)
metric = ChunkEvaluator(label_list=label_list)
global_step = 0
last_step = args.num_train_epochs * len(train_data_loader)
tic_train = time.time()
for epoch in range(args.num_train_epochs):
for step, batch in enumerate(train_data_loader):
global_step += 1
logits = model(batch['input_ids'], batch['token_type_ids'])
loss = loss_fct(logits, batch['labels'])
avg_loss = paddle.mean(loss)
if global_step % args.logging_steps == 0:
print(
"global step %d, epoch: %d, batch: %d, loss: %f, speed: %.2f step/s"
% (global_step, epoch, step, avg_loss,
args.logging_steps / (time.time() - tic_train)))
tic_train = time.time()
avg_loss.backward()
optimizer.step()
lr_scheduler.step()
optimizer.clear_grad()
if global_step % args.save_steps == 0 or global_step == num_training_steps:
if paddle.distributed.get_rank() == 0:
if args.dataset == "peoples_daily_ner":
evaluate(model, loss_fct, metric, dev_data_loader,
label_num, "valid")
evaluate(model, loss_fct, metric, test_data_loader,
label_num, "test")
paddle.save(model.state_dict(),
os.path.join(args.output_dir,
"model_%d.pdparams" % global_step))
if global_step >= num_training_steps:
return
if __name__ == "__main__":
args = parser.parse_args()
for arg in vars(args):
logger.info('{:20}:{}'.format(arg, getattr(args, arg)))
do_train(args)
|
python
|
from output.models.nist_data.list_pkg.qname.schema_instance.nistschema_sv_iv_list_qname_pattern_2_xsd.nistschema_sv_iv_list_qname_pattern_2 import NistschemaSvIvListQnamePattern2
__all__ = [
"NistschemaSvIvListQnamePattern2",
]
|
python
|
#!/usr/bin/env python3
from datetime import datetime, timedelta
from math import floor
import sys
from time import perf_counter
from betterprint.betterprint import bp, bp_dict
from modules.notations import byte_notation
from modules.createfolder import folder_logic, folder_stat_reset
from betterprint.colortext import Ct
from modules.freespace import free_space
from modules.multifile import file_logic
import modules.options as options
import modules.treewalk
START_PROG_TIME = perf_counter()
start_time = datetime.now()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
def main():
try:
# ~~~ # -init display-
bp([f'\t{options.copyright}\n\t{options.license_info}\n{"━" * 40}',
Ct.A], veb=2)
bp([f'Program start: {start_time}\nSource: ', Ct.A, f'{args.source}',
Ct.GREEN, '\nTarget: ', Ct.A, f'{args.target}\n', Ct.GREEN,
'Excluded Folders: ', Ct.A, f'{args.exdir}\n', Ct.GREEN, 'Excluded'
' Files: ', Ct.A, f'{args.exfile}', Ct.GREEN])
bp(['Args: ', Ct.A], inl=1)
for k, v in vars(args).items():
if k != 'source' and k != 'target' and k != 'exdir' and k != \
'exfile' and k != 'available':
if k == 'hash':
bp([f' {k}: ', Ct.A, f'{v}', Ct.RED, ' |', Ct.A], num=0,
inl=1, log=0)
else:
bp([f' {k}: {v} |', Ct.A], inl=1, log=0)
bp([f'\n\n{"━" * 40}\n', Ct.A], log=0)
# ~~~ # -tree walk-
tree_return = modules.treewalk.tree_walk()
tw_tup = tree_return[2]
folder_total = f'{tw_tup[2]["num_dirs"]:,}'
file_total = f'{tw_tup[2]["num_files"]:,}'
file_size_total = byte_notation(tw_tup[2]["file_size"], ntn=1)
# ~~~ # -free space-
target_space = free_space(args.target)
target_space_bytenote = byte_notation(target_space['free_bytes'],
ntn=1)
# print out the tree walk data
bp([f'Source - Size: {file_size_total[1]:>10} | Folders: '
f'{folder_total} | Files: {file_total}\nTarget - Free: '
f'{target_space_bytenote[1]:>10}', Ct.A])
if tw_tup[2]["file_size"] >= target_space['free_bytes']:
bp(['not enough free space to copy all the data.', Ct.RED], err=2)
sys.exit(1)
bp([f'\n{"━" * 40}\n', Ct.A], log=0)
# ~~~ # -folder creation-
bp(['Create folders...', Ct.A])
folder_return = folder_logic(tw_tup[0])
f_time = folder_return[1]
folder_time = f'{f_time:,.4f}'
folder_success = folder_return[2]['success']
folder_failure = folder_return[2]['failure']
bp([f'Success: {folder_success}/{folder_total}\nFailure: '
f'{folder_failure}/{folder_total}\nDuration: '
f'{timedelta(seconds=floor(f_time))}', Ct.A])
bp([f'\n{"━" * 40}\n', Ct.A], log=0)
# ~~~ # -file creation-
file_return = file_logic(tw_tup[1], tw_tup[2])
file_size_success = byte_notation(file_return["val_size"], ntn=1)
file_size_failure = byte_notation(tw_tup[2]["file_size"] -
file_return["val_size"], ntn=1)
hex_tot = file_return["hash_time"] + file_return["val_hash_time"]
file_tot = int(file_return['read_time'] + file_return["write_time"])
bp([f'\n{"━" * 40}\n', Ct.A], log=0)
# ~~~ # -folder stat reset-
folder_reset = folder_stat_reset(folder_return[2]['success_dict'])
f_time += folder_reset[1]
# ~~~ # -final display-
bp([f'\n{" " * 16}Source Target FAILED TIME', Ct.A])
bp([f' Folders: {folder_total:>10}{folder_success:>10,}'
f'{folder_failure:>10,}{folder_time:>12s}s', Ct.A])
bp([f' Files: {file_total:>10}{file_return["success"]:>10,}'
f'{file_return["failure"]:>10,}{file_tot:>12,.4f}s', Ct.A])
bp([f' Bytes: {file_size_total[1]:>10}{file_size_success[1]:>10}'
f'{file_size_failure[1]:>10}', Ct.A])
bp([f'Validation: {file_total:>10}{file_return["val_success"]:>10,}'
f'{file_return["val_failure"]:>10,}{hex_tot:>12,.4f}s (+'
f'{file_return["val_read_time"]:,.4f}s)', Ct.A])
bp([f'\n\n{"━" * 40}\n', Ct.A], log=0)
end_time = perf_counter()
total_time = end_time - START_PROG_TIME
tft = (tree_return[1] + f_time + file_return["read_time"] +
file_return["hash_time"] + file_return["write_time"] +
file_return["val_read_time"] + file_return["val_hash_time"])
bp([f'\n{total_time:,.4f}s - Total Time\n{tree_return[1]:,.4f}s - Tree'
f' Walk Time\n{folder_time:}s - FolderCreation Time\n'
f'{file_return["read_time"]:,.4f}s - Source Read Time\n'
f'{file_return["hash_time"]:,.4f}s - Source Hash Validation Time\n'
f'{file_return["write_time"]:,.4f}s - Target Write Time\n'
f'{file_return["val_read_time"]:,.4f}s - Target Read Time\n'
f'{file_return["val_hash_time"]:,.4f}s - Target Hash Validation '
f'Time\n{tft:,.4f}s - Total Function Time\n{"━" * 40}\n'
f'{total_time - tft:,.4f}s - Program Overhead Time', Ct.A])
except KeyboardInterrupt:
bp(['Ctrl+C pressed...\n', Ct.RED], err=2)
sys.exit(1)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
if __name__ == '__main__':
# ~~~ # -title-
bp([f'{options.ver} - {options.purpose}\n', Ct.BBLUE])
# ~~~ # -args-
args = options.args
# ~~~ # -variables-
bp_dict['verbose'] = args.verbose
bp_dict['date_log'] = args.date_log
bp_dict['log_file'] = args.log_file
bp_dict['error_log_file'] = args.error_log_file
bp_dict['color'] = 0 if args.no_color else 1
bp_dict['quiet'] = args.quiet
# ~~~ # -main-
bp(['calling main().', Ct.BMAGENTA], veb=2, num=0)
main()
|
python
|
# https://www.hackerrank.com/challenges/s10-geometric-distribution-2/problem
# Enter your code here. Read input from STDIN. Print output to STDOUT
x,y = map(int, input().split())
p = x/y
n = int(input())
answer = 0
for z in range(1,n+1):
temp = (1-p)**(z-1) * p
answer = answer + temp
print(round(answer,3))
|
python
|
# Generated by Django 3.0 on 2021-07-21 11:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wwwrate', '0007_auto_20210720_1450'),
]
operations = [
migrations.AlterModelOptions(
name='project',
options={'ordering': ['posted_at']},
),
migrations.AlterField(
model_name='review',
name='content_rating',
field=models.PositiveIntegerField(default=10),
),
migrations.AlterField(
model_name='review',
name='design_rating',
field=models.PositiveIntegerField(default=10),
),
migrations.AlterField(
model_name='review',
name='usability_rating',
field=models.PositiveIntegerField(default=10),
),
]
|
python
|
import subprocess
from vendor.addressfetcher.addressfetcher import fetcher
# constant
ADDRESS_PATH = ''
def input_number(prompt):
while True:
try:
num = float(input(prompt))
break
except ValueError:
pass
return num
def display_menu(options):
for i in range(len(options)):
print("{:d}. {:s}".format(i+1, options[i]))
choice = 0
possible = list(range(1, len(options)+1))
while choice not in possible:
choice = input_number("Select an option:\n> ")
return choice
def fetch():
global ADDRESS_PATH
whitelist_path = input(
"\nEnter the path of address whitelist file (Leave blank if not exist):\n> ")
blacklist_path = input(
"\nEnter the path of address blacklist file (Leave blank if not exist):\n> ")
# call address module
address = fetcher(whitelist_path, blacklist_path)
ADDRESS_PATH = address
def distri():
global ADDRESS_PATH
confirm_distri = input("\nDo distribution now? [Y/N]\n> ")
if confirm_distri == 'Y' or confirm_distri == 'y':
modelist = [
f'Interactive mode (do confirm per-transaction)',
f"Non-Interactive mode (doesn't confirm per-transaction)"
]
unfundlist = [
f'Skip',
f'Force send'
]
amount = input("\nEnter airdrop amount for each address:\n> ")
print("\nChoose distribution mode:\n")
mode = display_menu(modelist)
print("\nWhat to do if recipient don't have SOL in their account:\n")
unfund = display_menu(unfundlist)
if mode == 1:
if unfund == 1:
subprocess.run("python3 vendor/flatdistributor/flatdistributor.py transfer -a " +
ADDRESS_PATH + " --drop " + amount + "", shell=True)
if unfund == 2:
subprocess.run("python3 vendor/flatdistributor/flatdistributor.py transfer -a " +
ADDRESS_PATH + " --drop " + amount + " --allow-unfunded-recipient", shell=True)
if mode == 2:
if unfund == 1:
subprocess.run("python3 vendor/flatdistributor/flatdistributor.py transfer -a " +
ADDRESS_PATH + " --drop " + amount + " --non-interactive", shell=True)
if unfund == 2:
subprocess.run("python3 vendor/flatdistributor/flatdistributor.py transfer -a " + ADDRESS_PATH +
" --drop " + amount + " --non-interactive --allow-unfunded-recipient", shell=True)
else:
exit()
def main():
fetch()
distri()
if __name__ == "__main__":
main()
|
python
|
import os
import shutil
import tarfile
import urllib
import zipfile
from .constants import RU_VOWELS, DEFAULT_DATA_DIR
from pathlib import Path
from typing import Union
def count_syllables(word: str) -> int:
"""
Вычисление количества слогов в слове
Аргументы:
word (str): Строка слова
Вывод:
int: Количество слогов
"""
return sum((1 for char in word if char in RU_VOWELS))
def to_path(path: str) -> Path:
"""
Перевод строкового представления пути в объект Path
Аргументы:
path (str): Cтроковое представление пути
Вывод:
Path: Объект Path
Исключения:
TypeError: Если передаваемое значение не является строкой или объектом Path
"""
if isinstance(path, str):
return Path(path)
elif isinstance(path, Path):
return path
else:
raise TypeError("Некорректно указан путь")
def download_file(
url: str,
filename: str = None,
dirpath: Union[str, Path] = DEFAULT_DATA_DIR,
force: bool = False
) -> str:
"""
Загрузка файла из сети
Аргументы:
url (str): Адрес загружаемого файла
filename (str): Название файла после загрузки
dirpath (str|Path): Путь к директории для загруженного файла
force (bool): Загрузить набор данных, даже если он уже загружен
Вывод:
str: Путь к загруженному файлу
Исключения:
RuntimeError: Если не удалось загрузить файл
"""
if not os.path.exists(dirpath):
os.makedirs(dirpath)
if not filename:
filename = os.path.basename(urllib.parse.urlparse(urllib.parse.unquote_plus(url)).path)
filepath = to_path(dirpath).resolve() / filename
if filepath.is_file() and force is False:
print(f"Файл {filepath} уже загружен")
return None
else:
try:
print(f"Загрузка файла {url}...")
req = urllib.request.Request(url)
with urllib.request.urlopen(req) as response, open(filepath, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
except:
raise RuntimeError("Не удалось загрузить файл")
else:
print(f"Файл успешно загружен: {filepath}")
return str(filepath)
def extract_archive(
archive_file: Union[str, Path],
extract_dir: Union[str, Path] = None
) -> str:
"""
Извлечение файлов из архива в формате ZIP или TAR
Аргументы:
archive_file (str|Path): Путь к файлу архива
extract_dir (str|Path): Путь к директории для извлеченных файлов
Вывод:
str: Путь к директории с извлеченными файлами
"""
archive_file = to_path(archive_file).resolve()
if not extract_dir:
extract_dir = str(archive_file.parent)
archive_file = str(archive_file)
os.makedirs(extract_dir, exist_ok=True)
is_zip = zipfile.is_zipfile(archive_file)
is_tar = tarfile.is_tarfile(archive_file)
if not is_zip and not is_tar:
print(f"Файл {archive_file} не является архивом в формате ZIP или TAR")
return extract_dir
else:
print(f"Извлечение файлов из архива {archive_file}...")
shutil.unpack_archive(archive_file, extract_dir=extract_dir, format=None)
if is_zip:
with zipfile.ZipFile(archive_file, mode='r') as f:
members = f.namelist()
else:
with tarfile.open(archive_file, mode='r') as f:
members = f.getnames()
src_basename = os.path.commonpath(members)
dest_basename = os.path.basename(archive_file)
if src_basename:
while True:
tmp, _ = os.path.splitext(dest_basename)
if tmp == dest_basename:
break
else:
dest_basename = tmp
if src_basename != dest_basename:
return shutil.move(
os.path.join(extract_dir, src_basename),
os.path.join(extract_dir, dest_basename),
)
else:
return os.path.join(extract_dir, src_basename)
else:
return extract_dir
if __name__ == "__main__":
text = "самооборона"
print(count_syllables(text))
print(extract_archive('test.tar.xz'))
|
python
|
# -*- coding: utf-8 -*-
'''
Copyright © 2014 by Virginia Polytechnic Institute and State University
All rights reserved
Virginia Polytechnic Institute and State University (Virginia Tech) owns the copyright for the BEMOSS software and its
associated documentation (“Software”) and retains rights to grant research rights under patents related to
the BEMOSS software to other academic institutions or non-profit research institutions.
You should carefully read the following terms and conditions before using this software.
Your use of this Software indicates your acceptance of this license agreement and all terms and conditions.
You are hereby licensed to use the Software for Non-Commercial Purpose only. Non-Commercial Purpose means the
use of the Software solely for research. Non-Commercial Purpose excludes, without limitation, any use of
the Software, as part of, or in any way in connection with a product or service which is sold, offered for sale,
licensed, leased, loaned, or rented. Permission to use, copy, modify, and distribute this compilation
for Non-Commercial Purpose to other academic institutions or non-profit research institutions is hereby granted
without fee, subject to the following terms of this license.
Commercial Use If you desire to use the software for profit-making or commercial purposes,
you agree to negotiate in good faith a license with Virginia Tech prior to such profit-making or commercial use.
Virginia Tech shall have no obligation to grant such license to you, and may grant exclusive or non-exclusive
licenses to others. You may contact the following by email to discuss commercial use: [email protected]
Limitation of Liability IN NO EVENT WILL VIRGINIA TECH, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO
LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE
OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF VIRGINIA TECH OR OTHER PARTY HAS BEEN ADVISED
OF THE POSSIBILITY OF SUCH DAMAGES.
For full terms and conditions, please visit https://bitbucket.org/bemoss/bemoss_os.
Address all correspondence regarding this license to Virginia Tech’s electronic mail address: [email protected]
__author__ = "Warodom Khamphanchai"
__credits__ = ""
__version__ = "1.2.1"
__maintainer__ = "Warodom Khamphanchai"
__email__ = "[email protected]"
__website__ = "kwarodom.wordpress.com"
__status__ = "Prototype"
__created__ = "2014-8-28 16:19:00"
__lastUpdated__ = "2015-02-11 17:12:03"
'''
import logging
import sys
import datetime
import json
import os
from volttron.lite.agent import BaseAgent, PublishMixin, periodic
from volttron.lite.agent import utils, matching
from volttron.lite.messaging import headers as headers_mod
import settings
import psycopg2 # PostgresQL database adapter
import re
utils.setup_logging()
_log = logging.getLogger(__name__)
app_name = "appLauncher"
debug_agent = False
clock_time = 1
time_to_start_previous_apps = 30 # sec
#@params agent & DB interfaces
db_host = settings.DATABASES['default']['HOST']
db_port = settings.DATABASES['default']['PORT']
db_database = settings.DATABASES['default']['NAME']
db_user = settings.DATABASES['default']['USER']
db_password = settings.DATABASES['default']['PASSWORD']
db_table_application_registered = settings.DATABASES['default']['TABLE_application_registered']
db_table_application_running = settings.DATABASES['default']['TABLE_application_running']
class AppLauncherAgent(PublishMixin, BaseAgent):
'''Listens to UI to launch new APP in the BEMOSS APP Store'''
def __init__(self, config_path, **kwargs):
super(AppLauncherAgent, self).__init__(**kwargs)
self.config = utils.load_config(config_path)
# self.app_number = 0
#connect to the database
try:
self.con = psycopg2.connect(host=db_host, port=db_port, database=db_database, user=db_user,
password=db_password)
self.cur = self.con.cursor() # open a cursor to perform database operations
print("AppLauncher Agent connects to the database name {} successfully".format(db_database))
except:
print("ERROR: {} fails to connect to the database name {}".format(app_name, db_database))
self.time_applauncher_start = datetime.datetime.now()
self.already_started_previous_apps = False
def setup(self):
# Demonstrate accessing a value from the config file
_log.info(self.config['message'])
self._agent_id = self.config['agentid']
# Always call the base class setup()
super(AppLauncherAgent, self).setup()
# self.appLauncherInitiator()
print "AppLauncher Agent is waiting for UI to activate/disable APPs"
# clockBehavior (CyclicBehavior)
@periodic(clock_time)
def clockBehavior(self):
#1. check current time
self.time_applauncher_now = datetime.datetime.now()
if self.already_started_previous_apps:
# print "AppLauncher Agent >> appLauncherInitiator has already run"
pass
else:
# print "AppLauncher Agent >> appLauncherInitiator has not run yet"
if (self.time_applauncher_now - self.time_applauncher_start).seconds > time_to_start_previous_apps:
print "AppLauncher Agent is starting previously running Apps"
self.appLauncherInitiator()
self.already_started_previous_apps = True
else:
pass
# Add Cyclic behavior to track current status of app then update DB
def appLauncherInitiator(self):
try:
self.cur.execute("SELECT * FROM "+db_table_application_running)
# self.cur.execute("SELECT status FROM applications_running WHERE app_name=%s", (ui_app_name,))
print self.cur.rowcount
if self.cur.rowcount != 0:
all_row = self.cur.fetchall()
for row in all_row:
if row[3] == 'running': # rerun app for the agent
# To launch agent: 1.get app_name, 2.get agent_id, 3.get auth_token
print "This {} is {}".format(row[1], row[3])
_temp_app_agent_id = str(row[1]).split('_')
app_name = _temp_app_agent_id[0]+'_'+_temp_app_agent_id[1]
agent_id = _temp_app_agent_id[2]
self.cur.execute("SELECT auth_token FROM "+db_table_application_registered+" WHERE app_name=%s",
(app_name,))
if self.cur.rowcount != 0:
auth_token = str(self.cur.fetchone()[0])
app_setting = row[4]
print "AppLauncher >> is trying the previous run App {} for agent {} with auth_token {} and " \
"app_setting {}".format(app_name, agent_id, auth_token, app_setting)
self.app_has_already_launched = False
self.launch_app(app_name, agent_id, auth_token)
else: # do nothing
print "This {} is {}".format(row[1], row[3])
else:
print "AppLauncher >> no App was running"
except:
"AppLauncher >> failed to launch the previous run Apps"
# on_match (Cyclic Behavior) to filter message from the UI to launch new APP
@matching.match_start('/ui/appLauncher/')
def on_match(self, topic, headers, message, match):
print "AppLauncher Agent got Topic: {topic}".format(topic=topic)
_sub_topic = str(topic).split('/')
app_name = _sub_topic[3]
agent_id = _sub_topic[4]
_data = json.dumps(message[0])
_data = json.loads(message[0])
auth_token = _data.get('auth_token')
if _sub_topic[5] == 'launch':
self.app_has_already_launched = False
self.launch_app(app_name, agent_id, auth_token)
elif _sub_topic[5] == 'disable':
self.app_has_already_launched = False
self.disable_app(app_name, agent_id, auth_token)
else:
"AppLauncher Agent does not understand this message"
def launch_app(self, ui_app_name, ui_agent_id, ui_auth_token):
#1. query database whether the app_name is verified and registered
#if app_name is in database with the valid authorization_token, then launch agent
self.cur.execute("SELECT auth_token FROM "+db_table_application_registered+" WHERE app_name=%s", (ui_app_name,))
if self.cur.rowcount != 0:
app_auth_token = self.cur.fetchone()[0]
if ui_auth_token == app_auth_token:
# 1. launch app
PROJECT_DIR = settings.PROJECT_DIR
sys.path.append(PROJECT_DIR)
os.system("bin/volttron-ctrl list-agent > app_running_agent.txt")
infile = open('app_running_agent.txt', 'r')
for line in infile:
#print(line, end='') #write to a next file name outfile
match = re.search(ui_app_name+'_'+ui_agent_id+'.launch.json', line) \
and re.search('running', line) # have results in match
if match: # The app that ui requested has already launched
self.app_has_already_launched = True
print "AppLauncher failed to launch APP: {}, APP has actually been launched"\
.format(ui_app_name)
print "AppLauncher >> {}".format(line)
if self.app_has_already_launched:
_launch_file_to_check = str(ui_app_name) + "_" + str(ui_agent_id)
self.cur.execute("SELECT status FROM "+db_table_application_running+" WHERE app_agent_id=%s",
(_launch_file_to_check,))
if self.cur.rowcount != 0: # this APP used to be launched before
_app_status = str(self.cur.fetchone()[0])
if _app_status == "running": # no need to launch new app
pass
else:
self.cur.execute("UPDATE application_running SET status=%s WHERE app_agent_id=%s",
("running", _launch_file_to_check,))
self.con.commit()
else:
# 2. log app that has been launched to the database
_launch_file_name = str(ui_app_name) + "_" + str(ui_agent_id)
_start_time = str(datetime.datetime.now())
_app_status = "running"
self.cur.execute("SELECT application_id FROM "+db_table_application_running)
if self.cur.rowcount != 0:
# print 'cur.fetchall()' + str(max(cur.fetchall())[0])
app_no = max(self.cur.fetchall())[0] + 1
else: #default no_app
app_no = 1
self.cur.execute("INSERT INTO application_running(application_id, app_agent_id, start_time, status) "
"VALUES(%s,%s,%s,%s)",
(app_no, _launch_file_name, _start_time, _app_status))
self.con.commit()
print "AppLauncher >> the requested APP {} for {} is running but not in db, " \
"now it is added to db".format(ui_app_name, ui_agent_id)
print "AppLauncher >> NOTE Date and Time launch APP is the current time not actual time"
_topic_appLauncher_ui = '/appLauncher/ui/' + ui_app_name + '/' + ui_agent_id + '/' \
+ 'launch/response'
_headers = {
headers_mod.FROM: app_name,
headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.JSON,
}
_message = "failure"
self.publish(_topic_appLauncher_ui, _headers, _message)
else: # APP has not launched yet
_launch_file_to_check = str(ui_app_name) + "_" + str(ui_agent_id)
self.cur.execute("SELECT status FROM "+db_table_application_running+" WHERE app_agent_id=%s",
(_launch_file_to_check,))
if self.cur.rowcount != 0: # delete existing row from the table before launching new app
# self.cur.execute("DELETE FROM "+db_table_application_running+" WHERE app_agent_id=%s",
# (_launch_file_to_check,))
# self.con.commit()
self.launch_existing_app(ui_app_name, ui_agent_id)
else: #this APP has never been launched and not in db launch new app
self.launch_new_app(ui_app_name, ui_agent_id)
else:
print "UI failed to authorize with AppLauncher Agent before launching the requested APP"
else:
print "The APP that UI requested is neither REGISTERED nor AVAILABLE"
def launch_existing_app(self, ui_app_name, ui_agent_id):
self.cur.execute("SELECT executable FROM "+db_table_application_registered+" WHERE app_name=%s", (ui_app_name,))
# 1. launch app for an agent based on the exec file and agent_id
if self.cur.rowcount != 0:
_exec_name = str(self.cur.fetchone()[0])
_exec = _exec_name+"-0.1-py2.7.egg --config \"%c\" --sub \"%s\" --pub \"%p\""
data = {
"agent": {
"exec": _exec
},
"agent_id": ui_agent_id
}
PROJECT_DIR = settings.PROJECT_DIR
_launch_file = os.path.join(PROJECT_DIR, "bemoss/Applications/launch/"
+ str(ui_app_name) + "_" + str(ui_agent_id) +".launch.json")
if debug_agent: print(_launch_file)
with open(_launch_file, 'w') as outfile:
json.dump(data, outfile, indent=4, sort_keys=True)
if debug_agent: print(os.path.basename(_launch_file))
os.system("bin/volttron-ctrl load-agent "+_launch_file)
os.system("bin/volttron-ctrl start-agent "+os.path.basename(_launch_file))
os.system("bin/volttron-ctrl list-agent")
print "AppLauncher has successfully launched APP: {} for Agent: {}"\
.format(ui_app_name, ui_agent_id)
# send reply back to UI
_topic_appLauncher_ui = '/appLauncher/ui/' + ui_app_name + '/' + ui_agent_id + '/' + 'launch/response'
_headers = {
headers_mod.FROM: app_name,
headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.JSON,
}
_message = "success"
self.publish(_topic_appLauncher_ui, _headers, _message)
def launch_new_app(self, ui_app_name, ui_agent_id):
self.cur.execute("SELECT executable FROM "+db_table_application_registered+" WHERE app_name=%s", (ui_app_name,))
# 1. launch app for an agent based on the exec file and agent_id
if self.cur.rowcount != 0:
_exec_name = str(self.cur.fetchone()[0])
_exec = _exec_name+"-0.1-py2.7.egg --config \"%c\" --sub \"%s\" --pub \"%p\""
data = {
"agent": {
"exec": _exec
},
"agent_id": ui_agent_id
}
PROJECT_DIR = settings.PROJECT_DIR
_launch_file = os.path.join(PROJECT_DIR, "bemoss/Applications/launch/"
+ str(ui_app_name) + "_" + str(ui_agent_id) +".launch.json")
if debug_agent: print(_launch_file)
with open(_launch_file, 'w') as outfile:
json.dump(data, outfile, indent=4, sort_keys=True)
if debug_agent: print(os.path.basename(_launch_file))
os.system("bin/volttron-ctrl load-agent "+_launch_file)
os.system("bin/volttron-ctrl start-agent "+os.path.basename(_launch_file))
os.system("bin/volttron-ctrl list-agent")
print "AppLauncher has successfully launched APP: {} for Agent: {}"\
.format(ui_app_name, ui_agent_id)
# send reply back to UI
_topic_appLauncher_ui = '/appLauncher/ui/' + ui_app_name + '/' + ui_agent_id + '/' + 'launch/response'
_headers = {
headers_mod.FROM: app_name,
headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.JSON,
}
_message = "success"
self.publish(_topic_appLauncher_ui, _headers, _message)
# self.app_number += 1
self.cur.execute("SELECT description FROM "+db_table_application_registered+" WHERE app_name=%s", (ui_app_name,))
if self.cur.rowcount != 0:
_app_description = str(self.cur.fetchone()[0])
print "The description of APP: {} is {}".format(ui_app_name, _app_description)
else:
print "AppLauncher failed to get APP: {} description".format(ui_app_name)
# 2. log app that has been launched to the database
_launch_file_name = str(ui_app_name) + "_" + str(ui_agent_id)
_start_time = str(datetime.datetime.now())
_app_status = "running"
self.cur.execute("SELECT application_id FROM "+db_table_application_running)
if self.cur.rowcount != 0:
# print 'cur.fetchall()' + str(max(cur.fetchall())[0])
app_no = max(self.cur.fetchall())[0] + 1
else: #default no_app
app_no = 1
self.cur.execute("INSERT INTO application_running(application_id, app_agent_id, start_time, status) "
"VALUES(%s,%s,%s,%s)",
(app_no, _launch_file_name, _start_time, _app_status))
self.con.commit()
print "AppLauncher finished update table applications_running of APP: {}".format(ui_app_name)
print "with launch_file: {}, at timestamp {}".format(_launch_file, _start_time)
else:
print "AppLauncher failed to launch APP: {} for Agent: {}".format(ui_app_name, ui_agent_id)
def disable_app(self, ui_app_name, ui_agent_id, ui_auth_token):
#1. query database whether the ui_app_name is verified and registered
self.cur.execute("SELECT auth_token FROM "+db_table_application_registered+" WHERE app_name=%s", (ui_app_name,))
if self.cur.rowcount != 0:
app_auth_token = self.cur.fetchone()[0]
if ui_auth_token == app_auth_token:
#check whether the ui_app_name and ui_agent_id is actually running
PROJECT_DIR = settings.PROJECT_DIR
sys.path.append(PROJECT_DIR)
os.system("bin/volttron-ctrl list-agent > app_running_agent.txt")
infile = open('app_running_agent.txt', 'r')
for line in infile:
#print(line, end='') #write to a next file name outfile
match = re.search(ui_app_name+'_'+ui_agent_id+'.launch.json', line) \
and re.search('running', line) # have results in match
if match: # The app that ui requested has already launched
self.app_has_already_launched = True
else:
pass
if self.app_has_already_launched:
_launch_file_to_check = str(ui_app_name) + "_" + str(ui_agent_id)
self.cur.execute("SELECT status FROM "+db_table_application_running+" WHERE app_agent_id=%s",
(_launch_file_to_check,))
if self.cur.rowcount != 0:
_app_status = str(self.cur.fetchone()[0])
#if it's running disable app
if _app_status == "running":
_lauch_file_to_disable = _launch_file_to_check+".launch.json"
os.system("bin/volttron-ctrl stop-agent "+_lauch_file_to_disable)
os.system("bin/volttron-ctrl list-agent")
print "AppLauncher has successfully disabled APP: {} ".format(ui_app_name)
self.cur.execute("UPDATE application_running SET status=%s WHERE app_agent_id=%s"
, ('disabled', _launch_file_to_check))
self.con.commit()
# send reply back to UI
topic_appLauncher_ui = '/appLauncher/ui/' + ui_app_name + '/' + ui_agent_id + '/' \
+ 'disable/response'
headers = {
headers_mod.FROM: app_name,
headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.JSON,
}
message = "success"
self.publish(topic_appLauncher_ui, headers, message)
elif _app_status == "disabled":
print "AppLauncher: the requested APP: {} for Agent: {} has already disabled"\
.format(ui_app_name, ui_agent_id)
else:
print "AppLauncher: the requested APP: {} for Agent: {} has unknown status"\
.format(ui_app_name, ui_agent_id)
else:
print "AppLauncher: APP {} for Agent: {} is not running".format(ui_app_name, ui_agent_id)
else: # app is acutally not running no need to do action
"AppLauncher: discard request to disable APP: {} for Agent: {} since it's not running"\
.format(ui_app_name, ui_agent_id)
else:
print "UI failed to authorize with AppLauncher Agent before disabling the requested APP"
else:
print "The APP that UI requested is neither REGISTERED nor AVAILABLE"
def main(argv=sys.argv):
'''Main method called by the eggsecutable.'''
try:
utils.default_main(AppLauncherAgent,
description='this is an AppLauncher agent',
argv=argv)
except Exception as e:
_log.exception('unhandled exception')
if __name__ == '__main__':
# Entry point for script
try:
sys.exit(main())
except KeyboardInterrupt:
pass
|
python
|
import webbrowser
import cv2
import numpy as np
import pyautogui
import PIL
import time
def init():
# TODO: Add more example websites with recaptcha
webbrowser.open(
'''https://jsso.indiatimes.com/sso/identity/register?channel=businessinsider&[email protected]'''
)
# Move to a temporary location and wait for window to open
pyautogui.moveTo(1200, 200)
time.sleep(5)
def get_coords():
# Grab a screenshot and save it
screenshot = PIL.ImageGrab.grab()
screenshot.save("hay.png")
# Convert the PIL image to an OpenCV one and read in the needle
haystack = cv2.cvtColor(np.array(screenshot), cv2.COLOR_RGB2BGR)
needle = cv2.imread('needle.png')
# Find the coordinates of the ReCaptcha logo
diff = cv2.matchTemplate(haystack, needle, cv2.TM_CCORR_NORMED)
x, y = np.unravel_index(np.argmax(diff), diff.shape)
# Subtract offset of Checkbox from logo
return x - 230, y + 60
def click_captcha(x, y):
# Move to the captcha, but overshoot and then fine-tune
pyautogui.moveTo(x - 28, y + 50, duration=0.5)
pyautogui.moveTo(x + 3, y - 51, duration=0.20)
pyautogui.moveTo(x, y, duration=0.2)
# Pause momentarily before clicking
time.sleep(0.2)
pyautogui.click()
# Once click has been registered, move away
time.sleep(0.5)
pyautogui.moveTo(x - 12, y + 42, duration=0.1)
def main():
print("Starting...")
init()
print("Finding Captcha...")
x, y = get_coords()
print("Coords: (%d, %d)" % (x, y))
click_captcha(x, y)
print("Done!")
if __name__ == '__main__':
main()
|
python
|
"""
A class/method/function should have only 1 reason to change!!!
It should have single responsibility
eg. book movie for a theatre
"""
class BookMovie(object):
"""
Bad code
"""
def book_movie_seat(self, movie, seat):
if self.is_seat_available(seat):
return False
self.book_seat()
def is_seat_available(self, seat):
pass
def book_seat(self):
pass
"""
In above class if we change how the seat availability is defined, it will change,
if booking seat process changes it will be changed so, move it to somthing like SeatValidator class
give it seat and ask if the seat is available
"""
|
python
|
import unittest
from Credentials import Credentials
class TestCredentials(unittest.TestCase):
"""
Test class that defines test cases for the Credentials class behaviours
"""
def setUp(self):
"""
Set up method to run befor each test case
"""
self.new_credentials = Credentials("Instagram", "123654")
def test_credentials_instance(self):
"""
Method that tests if the new_credentials have been instantiated correctly
"""
self.assertEqual(self.new_credentials.account_name, "Instagram")
self.assertEqual(self.new_credentials.account_password, "123654")
def test_save_credentials(self):
"""
Method that tests if the new credentials have been saved
"""
self.new_credentials.save_credentials()
self.assertEqual(len(Credentials.credentials_list), 1)
def test_save_multiple_credentials(self):
"""
Method that saves multiple credentials to credentials_list test
"""
self.new_credentials.save_credentials()
new_test_credential = Credentials("Twitter", "741258963")
new_test_credential.save_credentials()
self.assertEqual(len(Credentials.credentials_list), 2)
def tearDown(self):
"""
Method that clears the credentials_list after every test to ensure that there is no error
"""
Credentials.credentials_list = []
def test_find_credential_by_name(self):
"""
Test to check if we can find credentials and display them
"""
self.new_credentials.save_credentials()
new_test_credential = Credentials("Twitter", "741258963")
new_test_credential.save_credentials()
found_credential = Credentials.find_by_name("Twitter")
self.assertEqual(found_credential.account_name, new_test_credential.account_name)
def test_display_all_credentials(self):
"""
TestCase to test if all credentials are displayed
"""
self.assertEqual(Credentials.display_credentials(), Credentials.credentials_list)
if __name__ == '__main__':
unittest.main()
|
python
|
"""Module for handling buses."""
from sinfactory.component import Component
from sinfactory.load import Load
from sinfactory.generator import Generator
class Bus(Component):
"""Node class"""
def __init__(self, pf_object):
"""Constructor for the Bus class.
Args:
pf_object: The power factory object we will store.
"""
super().__init__(pf_object)
elms = pf_object.GetConnectedElements()
self.loads = {}
self.gens = {}
# If this makes initialisation too slow, only calculate this on
# request.
for elm in elms:
elm_name = elm.GetFullName()
if "ElmLod" in elm_name:
self.loads[elm.cDisplayName] = Load(elm)
if "ElmSym" in elm_name:
self.gens[elm.cDisplayName] = Generator(elm)
self.cubs = []
for elm in pf_object.GetConnectedCubicles():
self.cubs.append(elm)
@property
def u(self):
"""The voltage magnitude of the bus in p.u."""
return self.get_attribute("m:u")
@property
def island_id(self):
"""The island id of the bus.
In case the system has been split up into different islands
the buses in the same island will have the same id."""
return self.get_attribute("b:ipat")
|
python
|
#!/usr/bin/python
# coding:utf-8
# ServerSan - ss-agent.py
# 2018/3/14 15:03
#
__author__ = 'Benny <[email protected]>'
__version__ = '1.0.0'
import os
import platform
import socket
import sys
import time
import cpuinfo
import psutil
import requests
# API = 'http://127.0.0.1:5000/'
API = 'https://api.serversan.date:5000/'
def get_uptime():
return psutil.boot_time()
def get_os():
if platform.system() == 'Windows':
uname = platform.uname()
return '%s %s %s' % (uname[0], uname[2], uname[4])
else:
uname = platform.dist()
return '%s %s %s %s' % (uname[0], uname[1], uname[2], platform.machine())
def get_kernel():
info = platform.version() if platform.system() == 'Windows' else platform.release()
return info
def get_process_count():
return len(psutil.pids())
def get_sessions():
info = '%d user(s) in Total' % len(psutil.users())
for user in psutil.users():
info += '\n%s on %s from %s at %s' % (
user[0], user[1], user[2], time.strftime("%Y-%m-%d %H:%M", time.localtime(user[3])))
return info
def get_cpu_model():
return cpuinfo.get_cpu_info()['brand']
def get_cpu_count():
return psutil.cpu_count()
def get_cpu_freq():
# psutil won't return current cpu freq in visualization.
# return psutil.cpu_freq()[0]
return round(float(cpuinfo.get_cpu_info()['hz_actual'].split(' ')[0]), 2)
def get_host_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0]
s.close()
return ip
def network_activity():
old_value = old_value2 = 0
while True:
new_value = psutil.net_io_counters().bytes_recv
new_value2 = psutil.net_io_counters().bytes_sent
if old_value:
rx = round((new_value - old_value) / 1024.0, 2)
tx = round((new_value2 - old_value2) / 1024.0, 2)
rx_tx = round((new_value - old_value + new_value2 - old_value2) / 1024.0, 2)
break
old_value = new_value
old_value2 = new_value2
time.sleep(1)
return [tx, rx, rx_tx]
def current_network_flow():
rx = round(psutil.net_io_counters().bytes_recv / 1024.0 / 1024 / 1024, 2)
tx = round(psutil.net_io_counters().bytes_sent / 1024.0 / 1024 / 1024, 2)
return [tx, rx]
def average_load():
return psutil.cpu_percent()
def mem():
used = round(psutil.virtual_memory().used / 1024.0 / 1024, 2)
total = round(psutil.virtual_memory().total / 1024.0 / 1024, 2)
percent = psutil.virtual_memory().percent
return [used, total, percent]
def swap():
used = round(psutil.swap_memory().used / 1024.0 / 1024, 2)
total = round(psutil.swap_memory().total / 1024.0 / 1024, 2)
percent = psutil.swap_memory().percent
return [used, total, percent]
def disk():
used = round(psutil.disk_usage('/').used / 1024.0 / 1024 / 1024, 2)
total = round(psutil.disk_usage('/').total / 1024.0 / 1024 / 1024, 2)
percent = psutil.disk_usage('/').percent
return [used, total, percent]
def top_process():
cmd = 'ps axc -o uname:12,pcpu,rss,cmd --sort=-pcpu,-rss --noheaders --width 120|head'
with os.popen(cmd) as p:
pro = p.read()
info = pro if pro else 'Windows is not supported.'
return info
def get_hostname():
return platform.node()
def build():
message = dict(auth=get_auth_token().rstrip('\n'), hostname=get_hostname(),
uptime=get_uptime(), os=[get_os(), get_kernel()], pro=get_process_count(),
session=get_sessions(), cpu=[get_cpu_model(), get_cpu_count(), get_cpu_freq()],
ip=get_host_ip(), network=network_activity(), flow=current_network_flow(),
percent=average_load(), mem=mem(), swap=swap(), disk=disk(), top=top_process()
)
return message
def send_request(dic):
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
print(requests.post(API + 'v1/create', json=dic, headers=headers).text)
def get_auth_token():
path = os.environ.get('HOMEPATH') + '/ss-auth.log' if platform.system() == 'Windows' \
else '/etc/serversan/ss-auth.log'
with open(path) as f:
return f.read()
# TODO: upgrade client agent: shell scripts or ...?
def upgrade():
pass
def main():
json_result = build()
send_request(json_result)
if __name__ == '__main__':
if len(sys.argv) == 1:
main()
elif sys.argv[1] == 'version':
print('The current ServerSan agent verson is %s' % __version__)
else:
print('Wrong parameters.')
|
python
|
import pyautogui
import time
# time to change tabs from editor to paint;
time.sleep(10)
# it will remain clicked till program ends;
pyautogui.click()
# can be varied according to convininence
distance = 250
while distance > 0:
# right
pyautogui.dragRel(distance, 0, duration = 0.1)
distance -= 5
# down
pyautogui.dragRel(0, distance, duration = 0.1)
# left
pyautogui.dragRel(-distance, 0, duration = 0.1)
distance -= 5
#up
pyautogui.dragRel(0, -distance, duration = 0.1)
|
python
|
from flask import (
Flask, render_template, send_from_directory, redirect, url_for, request)
from . import settings as st
from .persistency import PersistencyManager
import markdown
from pygments.formatters import HtmlFormatter
from flask_wtf import FlaskForm
from flask_pagedown.fields import PageDownField
from wtforms.fields import SubmitField
from flask_pagedown import PageDown
import os
app = Flask(
__name__, template_folder=st.TEMPLATE_FOLDER,
static_folder=st.STATIC_FOLDER)
pagedown = PageDown(app)
class PageDownForm(FlaskForm):
pagedown = PageDownField('Enter your markdown')
submit = SubmitField('Submit')
def run_flask_server():
"""Run the flask server"""
SECRET_KEY = os.urandom(32)
app.config['SECRET_KEY'] = SECRET_KEY
app.debug = True
app.run()
@app.route('/')
def index():
persistencyManager = PersistencyManager(
st.ZETTELKASTEN)
zettelkasten_list = persistencyManager.get_list_of_filenames()
zettelkasten_list.sort()
return render_template('startpage.html', zettelkasten=zettelkasten_list)
@app.route('/<file>')
def show_md_file(file):
persistencyManager = PersistencyManager(
st.ZETTELKASTEN)
filename = file
input_file = persistencyManager.get_string_from_file_content(filename)
htmlString = markdown.markdown(
input_file, output_format='html5',
extensions=[
"fenced_code",
'codehilite',
'attr_list',
'pymdownx.arithmatex'],
extension_configs={'pymdownx.arithmatex': {'generic': True}}
)
formatter = HtmlFormatter(style="emacs", full=True, cssclass="codehilite")
css_string = formatter.get_style_defs()
return render_template(
"mainpage.html",
codeCSSString="<style>" + css_string + "</style>",
htmlString=htmlString,
filename=filename)
@app.route('/edit/<filename>', methods=['GET', 'POST'])
def edit(filename):
persistencyManager = PersistencyManager(
st.ZETTELKASTEN)
input_file = persistencyManager.get_string_from_file_content(filename)
markdown_string = input_file
form = PageDownForm()
form.pagedown.data = markdown_string
if form.validate_on_submit():
if request.method == 'POST':
new_markdown_string = request.form['pagedown']
form.pagedown.data = new_markdown_string
persistencyManager.overwrite_file_content(
filename, new_markdown_string)
return redirect(url_for('show_md_file', file=filename))
return render_template('edit.html', form=form)
@app.route('/images/<path:filename>')
def send_image(filename):
return send_from_directory(
st.ABSOLUTE_PATH_IMAGES,
filename)
|
python
|
from pwn import * # NOQA
flag = b"flag{AAAAAAAAAAA}"
flag = bytes(bin(int(binascii.hexlify(flag), 16)), 'utf8')
class GuessIterator:
def __init__(self):
self.known_part = b""
self.first_block = True
self.i = -1
def know_guess(self):
self.known_part = self.current_guess()
self.first_block = False
self.i = -1
def current_guess(self):
if self.first_block:
guess = bytes(bin(self.i).rjust(16, ' '), 'utf8')
else:
guess = bytes(bin(self.i)[2:].rjust(16, '0'), 'utf8')
return self.known_part + guess
def __iter__(self):
return self
def __next__(self):
self.i += 1
guess = self.current_guess()
return guess
guessing = GuessIterator()
best_index = 0
def take_guess():
return next(guessing)
def wrong_byte_feedback(index):
global best_index
if index is None: # No wrong byte
guessing.know_guess()
best_index += 16
elif index % 16 == 0 and index > best_index:
guessing.know_guess()
best_index += 16
# GAME ########################
p = process(['python3', './remote.py'])
try:
while True:
p.sendline(take_guess())
result = p.recvline()
i = 0
for c in result:
if c == ord('0'):
i += 1
else:
break
i = i // 2
if i % 16 == 0 and i > best_index:
print(guessing.current_guess())
wrong_byte_feedback(i)
except Exception as e:
print(guessing.current_guess())
print(binascii.unhexlify(hex(int(str(guessing.current_guess(), 'utf8').strip()[2:], 2))[:2]))
|
python
|
from setuptools import setup
from os import path
from io import open
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='SimpleHTTPSAuthServer',
version='1.1.0',
description='HTTPS server with Basic authentication and client certificate authentication.',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/oza6ut0ne/SimpleHTTPSAuthServer',
license='MIT',
author='Ryota Okimatsu',
author_email='[email protected]',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
py_modules=['SimpleHTTPSAuthServer']
)
|
python
|
""" A simple example which prints out parsed streaming responses.
Python version: 3.6+
Dependencies (use `pip install X` to install a dependency):
- websockets
Usage:
python deepgram_streaming.py -k 'YOUR_DEEPGRAM_API_KEY' /path/to/audio.wav
Limitations:
- Only parses signed, 16-bit little-endian encoded WAV files.
"""
import argparse
import asyncio
import base64
import json
import sys
import wave
import websockets
import subprocess
# Mimic sending a real-time stream by sending this many seconds of audio at a time.
REALTIME_RESOLUTION = 0.100
async def run(data, key, channels, sample_width, sample_rate, filepath):
# How many bytes are contained in one second of audio.
byte_rate = sample_width * sample_rate * channels
print('This demonstration will print all finalized results, not interim results.')
# Connect to the real-time streaming endpoint, attaching our credentials.
async with websockets.connect(
# Alter the protocol and base URL below.
f'wss://api.deepgram.com/v1/listen?punctuate=true&channels={channels}&sample_rate={sample_rate}&encoding=linear16',
extra_headers={
'Authorization': 'Token {}'.format(key)
}
) as ws:
async def sender(ws):
""" Sends the data, mimicking a real-time connection.
"""
nonlocal data
try:
total = len(data)
while len(data):
# How many bytes are in `REALTIME_RESOLUTION` seconds of audio?
i = int(byte_rate * REALTIME_RESOLUTION)
chunk, data = data[:i], data[i:]
# Send the data
await ws.send(chunk)
# Mimic real-time by waiting `REALTIME_RESOLUTION` seconds
# before the next packet.
await asyncio.sleep(REALTIME_RESOLUTION)
# An empty binary message tells Deepgram that no more audio
# will be sent. Deepgram will close the connection once all
# audio has finished processing.
await ws.send(b'')
except Exception as e:
print(f'Error while sending: {e}')
raise
async def receiver(ws):
""" Print out the messages received from the server.
"""
async for msg in ws:
res = json.loads(msg)
try:
# To see interim results in this demo, remove the conditional `if res['is_final']:`.
if res['is_final']:
transcript = res['channel']['alternatives'][0]['transcript']
start = res['start']
print(f'{transcript}')
except KeyError:
print(msg)
await asyncio.wait([
asyncio.ensure_future(sender(ws)),
asyncio.ensure_future(receiver(ws))
])
print()
def parse_args():
""" Parses the command-line arguments.
"""
parser = argparse.ArgumentParser(description='Submits data to the real-time streaming endpoint.')
parser.add_argument('-k', '--key', required=True, help='YOUR_DEEPGRAM_API_KEY (authorization)')
parser.add_argument('input', help='Input file.')
return parser.parse_args()
def main():
""" Entrypoint for the example.
"""
# Parse the command-line arguments.
args = parse_args()
# Open the audio file.
with wave.open(args.input, 'rb') as fh:
(channels, sample_width, sample_rate, num_samples, _, _) = fh.getparams()
assert sample_width == 2, 'WAV data must be 16-bit.'
data = fh.readframes(num_samples)
print(f'Channels = {channels}, Sample Rate = {sample_rate} Hz, Sample width = {sample_width} bytes, Size = {len(data)} bytes', file=sys.stderr)
# Run the example.
asyncio.get_event_loop().run_until_complete(run(data, args.key, channels, sample_width, sample_rate, args.input))
if __name__ == '__main__':
sys.exit(main() or 0)
|
python
|
# This file is part of pure-dispatch.
# https://github.com/SeedyROM/pure-dispatch
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT-license
# Copyright (c) 2017, Zack Kollar <[email protected]>
'''Test our database module.
'''
from tests.base import TestCase
from pure_dispatch.database import DatabaseEngine, DatabaseSession
from preggy import expect
class DatabaseTestCase(TestCase):
'''Create a generic test case for our suite.
'''
@classmethod
def setUpClass(cls):
cls.engine = DatabaseEngine()
cls.session = DatabaseSession()
class TestDatabaseBase(DatabaseTestCase):
'''Test the basic functionality of our module.
'''
def test_globals_initialize(self):
'''Test our singleton's initialize at all.
'''
expect(DatabaseEngine).error_not_to_happen()
expect(DatabaseSession).error_not_to_happen()
def test_database_engine(self):
'''Test if our singleton initializes the engine property.
'''
expect(self.engine).error_not_to_happen()
def test_database_session(self):
'''Test if our singleton initializes the session property.
'''
expect(self.session).error_not_to_happen()
|
python
|
# By Justin Walgran
# Copyright (c) 2012 Azavea, Inc.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import unittest
from blend import Result
class TestResult(unittest.TestCase):
"""Asserts that the properties and methods of the Result class behave correctly."""
def setUp(self):
self.result = Result()
def tearDown(self):
pass
def test_adding_none_to_messages_does_not_create_a_message(self):
self.result.add_message(None)
self.result.add_warning("warning")
self.result.add_error("error")
self.assertIsNone(self.result.messages, "Expected adding a None message to not add an item to Result.messages")
def test_adding_none_to_warnings_does_not_create_a_warning(self):
self.result.add_message("message")
self.result.add_warning(None)
self.result.add_error("error")
self.assertIsNone(self.result.warnings, "Expected adding a None warning to not add an item to Result.warnings")
def test_adding_none_to_error_does_not_create_a_message(self):
self.result.add_message("message")
self.result.add_warning("warning")
self.result.add_error(None)
self.assertIsNone(self.result.errors, "Expected adding a None error to not add an item to Result.errors")
def test_errors_warnings_and_messages_as_string_with_one_of_each(self):
self.result.add_message("message")
self.result.add_warning("warning")
self.result.add_error("error")
self.assertEqual("error\nwarning\nmessage", self.result.errors_warnings_and_messages_as_string)
def test_errors_warnings_and_messages_as_string_with_message_and_warning(self):
self.result.add_message("message")
self.result.add_warning("warning")
self.assertEqual("warning\nmessage", self.result.errors_warnings_and_messages_as_string)
|
python
|
# Copyright (c) 2017 Midokura SARL
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from neutron_lib import constants as lib_constants
from neutron_lib.services.qos import constants as qos_consts
from neutron_lib.utils import test
from tempest.common import utils
from tempest.common import waiters
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions
import testscenarios
from testscenarios.scenarios import multiply_scenarios
from neutron_tempest_plugin.api import base as base_api
from neutron_tempest_plugin.common import ssh
from neutron_tempest_plugin.common import utils as common_utils
from neutron_tempest_plugin import config
from neutron_tempest_plugin.scenario import base
from neutron_tempest_plugin.scenario import constants
from neutron_tempest_plugin.scenario import test_qos
CONF = config.CONF
load_tests = testscenarios.load_tests_apply_scenarios
class FloatingIpTestCasesMixin(object):
credentials = ['primary', 'admin']
@classmethod
@utils.requires_ext(extension="router", service="network")
def resource_setup(cls):
super(FloatingIpTestCasesMixin, cls).resource_setup()
cls.network = cls.create_network()
cls.subnet = cls.create_subnet(cls.network)
cls.router = cls.create_router_by_client()
cls.create_router_interface(cls.router['id'], cls.subnet['id'])
cls.keypair = cls.create_keypair()
cls.secgroup = cls.os_primary.network_client.create_security_group(
name=data_utils.rand_name('secgroup'))['security_group']
cls.security_groups.append(cls.secgroup)
cls.create_loginable_secgroup_rule(secgroup_id=cls.secgroup['id'])
cls.create_pingable_secgroup_rule(secgroup_id=cls.secgroup['id'])
if cls.same_network:
cls._dest_network = cls.network
else:
cls._dest_network = cls._create_dest_network()
@classmethod
def _get_external_gateway(cls):
if CONF.network.public_network_id:
subnets = cls.os_admin.network_client.list_subnets(
network_id=CONF.network.public_network_id)
for subnet in subnets['subnets']:
if (subnet['gateway_ip'] and
subnet['ip_version'] == lib_constants.IP_VERSION_4):
return subnet['gateway_ip']
@classmethod
def _create_dest_network(cls):
network = cls.create_network()
subnet = cls.create_subnet(network)
cls.create_router_interface(cls.router['id'], subnet['id'])
return network
def _create_server(self, create_floating_ip=True, network=None):
if network is None:
network = self.network
port = self.create_port(network, security_groups=[self.secgroup['id']])
if create_floating_ip:
fip = self.create_floatingip(port=port)
else:
fip = None
server = self.create_server(
flavor_ref=CONF.compute.flavor_ref,
image_ref=CONF.compute.image_ref,
key_name=self.keypair['name'],
networks=[{'port': port['id']}])['server']
waiters.wait_for_server_status(self.os_primary.servers_client,
server['id'],
constants.SERVER_STATUS_ACTIVE)
return {'port': port, 'fip': fip, 'server': server}
def _test_east_west(self):
# The proxy VM is used to control the source VM when it doesn't
# have a floating-ip.
if self.src_has_fip:
proxy = None
proxy_client = None
else:
proxy = self._create_server()
proxy_client = ssh.Client(proxy['fip']['floating_ip_address'],
CONF.validation.image_ssh_user,
pkey=self.keypair['private_key'])
# Source VM
if self.src_has_fip:
src_server = self._create_server()
src_server_ip = src_server['fip']['floating_ip_address']
else:
src_server = self._create_server(create_floating_ip=False)
src_server_ip = src_server['port']['fixed_ips'][0]['ip_address']
ssh_client = ssh.Client(src_server_ip,
CONF.validation.image_ssh_user,
pkey=self.keypair['private_key'],
proxy_client=proxy_client)
# Destination VM
if self.dest_has_fip:
dest_server = self._create_server(network=self._dest_network)
else:
dest_server = self._create_server(create_floating_ip=False,
network=self._dest_network)
# Check connectivity
self.check_remote_connectivity(ssh_client,
dest_server['port']['fixed_ips'][0]['ip_address'],
servers=[src_server, dest_server])
if self.dest_has_fip:
self.check_remote_connectivity(ssh_client,
dest_server['fip']['floating_ip_address'],
servers=[src_server, dest_server])
class FloatingIpSameNetwork(FloatingIpTestCasesMixin,
base.BaseTempestTestCase):
scenarios = multiply_scenarios([
('SRC with FIP', dict(src_has_fip=True)),
('SRC without FIP', dict(src_has_fip=False)),
], [
('DEST with FIP', dict(dest_has_fip=True)),
('DEST without FIP', dict(dest_has_fip=False)),
])
same_network = True
@test.unstable_test("bug 1717302")
@decorators.idempotent_id('05c4e3b3-7319-4052-90ad-e8916436c23b')
def test_east_west(self):
self._test_east_west()
class FloatingIpSeparateNetwork(FloatingIpTestCasesMixin,
base.BaseTempestTestCase):
scenarios = multiply_scenarios([
('SRC with FIP', dict(src_has_fip=True)),
('SRC without FIP', dict(src_has_fip=False)),
], [
('DEST with FIP', dict(dest_has_fip=True)),
('DEST without FIP', dict(dest_has_fip=False)),
])
same_network = False
@test.unstable_test("bug 1717302")
@decorators.idempotent_id('f18f0090-3289-4783-b956-a0f8ac511e8b')
def test_east_west(self):
self._test_east_west()
class DefaultSnatToExternal(FloatingIpTestCasesMixin,
base.BaseTempestTestCase):
same_network = True
@decorators.idempotent_id('3d73ea1a-27c6-45a9-b0f8-04a283d9d764')
def test_snat_external_ip(self):
"""Check connectivity to an external IP"""
gateway_external_ip = self._get_external_gateway()
if not gateway_external_ip:
raise self.skipTest("IPv4 gateway is not configured for public "
"network or public_network_id is not "
"configured")
proxy = self._create_server()
proxy_client = ssh.Client(proxy['fip']['floating_ip_address'],
CONF.validation.image_ssh_user,
pkey=self.keypair['private_key'])
src_server = self._create_server(create_floating_ip=False)
src_server_ip = src_server['port']['fixed_ips'][0]['ip_address']
ssh_client = ssh.Client(src_server_ip,
CONF.validation.image_ssh_user,
pkey=self.keypair['private_key'],
proxy_client=proxy_client)
self.check_remote_connectivity(ssh_client,
gateway_external_ip,
servers=[proxy, src_server])
class FloatingIPPortDetailsTest(FloatingIpTestCasesMixin,
base.BaseTempestTestCase):
same_network = True
@classmethod
@utils.requires_ext(extension="router", service="network")
@utils.requires_ext(extension="fip-port-details", service="network")
def resource_setup(cls):
super(FloatingIPPortDetailsTest, cls).resource_setup()
@test.unstable_test("bug 1815585")
@decorators.idempotent_id('a663aeee-dd81-492b-a207-354fd6284dbe')
def test_floatingip_port_details(self):
"""Tests the following:
1. Create a port with floating ip in Neutron.
2. Create two servers in Nova.
3. Attach the port to the server.
4. Detach the port from the server.
5. Attach the port to the second server.
6. Detach the port from the second server.
"""
port = self.create_port(self.network)
fip = self.create_and_associate_floatingip(port['id'])
server1 = self._create_server(create_floating_ip=False)
server2 = self._create_server(create_floating_ip=False)
for server in [server1, server2]:
# attach the port to the server
self.create_interface(
server['server']['id'], port_id=port['id'])
waiters.wait_for_interface_status(
self.os_primary.interfaces_client, server['server']['id'],
port['id'], lib_constants.PORT_STATUS_ACTIVE)
fip = self.client.show_floatingip(fip['id'])['floatingip']
self._check_port_details(
fip, port, status=lib_constants.PORT_STATUS_ACTIVE,
device_id=server['server']['id'], device_owner='compute:nova')
# detach the port from the server; this is a cast in the compute
# API so we have to poll the port until the device_id is unset.
self.delete_interface(server['server']['id'], port['id'])
port = self._wait_for_port_detach(port['id'])
fip = self._wait_for_fip_port_down(fip['id'])
self._check_port_details(
fip, port, status=lib_constants.PORT_STATUS_DOWN,
device_id='', device_owner='')
def _check_port_details(self, fip, port, status, device_id, device_owner):
self.assertIn('port_details', fip)
port_details = fip['port_details']
self.assertEqual(port['name'], port_details['name'])
self.assertEqual(port['network_id'], port_details['network_id'])
self.assertEqual(port['mac_address'], port_details['mac_address'])
self.assertEqual(port['admin_state_up'],
port_details['admin_state_up'])
self.assertEqual(status, port_details['status'])
self.assertEqual(device_id, port_details['device_id'])
self.assertEqual(device_owner, port_details['device_owner'])
def _wait_for_port_detach(self, port_id, timeout=120, interval=10):
"""Waits for the port's device_id to be unset.
:param port_id: The id of the port being detached.
:returns: The final port dict from the show_port response.
"""
port = self.client.show_port(port_id)['port']
device_id = port['device_id']
start = int(time.time())
# NOTE(mriedem): Nova updates the port's device_id to '' rather than
# None, but it's not contractual so handle Falsey either way.
while device_id:
time.sleep(interval)
port = self.client.show_port(port_id)['port']
device_id = port['device_id']
timed_out = int(time.time()) - start >= timeout
if device_id and timed_out:
message = ('Port %s failed to detach (device_id %s) within '
'the required time (%s s).' %
(port_id, device_id, timeout))
raise exceptions.TimeoutException(message)
return port
def _wait_for_fip_port_down(self, fip_id, timeout=120, interval=10):
"""Waits for the fip's attached port status to be 'DOWN'.
:param fip_id: The id of the floating IP.
:returns: The final fip dict from the show_floatingip response.
"""
fip = self.client.show_floatingip(fip_id)['floatingip']
self.assertIn('port_details', fip)
port_details = fip['port_details']
status = port_details['status']
start = int(time.time())
while status != lib_constants.PORT_STATUS_DOWN:
time.sleep(interval)
fip = self.client.show_floatingip(fip_id)['floatingip']
self.assertIn('port_details', fip)
port_details = fip['port_details']
status = port_details['status']
timed_out = int(time.time()) - start >= timeout
if status != lib_constants.PORT_STATUS_DOWN and timed_out:
port_id = fip.get("port_id")
port = self.os_admin.network_client.show_port(port_id)['port']
message = ('Floating IP %s attached port status failed to '
'transition to DOWN (current status %s) within '
'the required time (%s s). Port details: %s' %
(fip_id, status, timeout, port))
raise exceptions.TimeoutException(message)
return fip
class FloatingIPQosTest(FloatingIpTestCasesMixin,
test_qos.QoSTestMixin,
base.BaseTempestTestCase):
same_network = True
@classmethod
@utils.requires_ext(extension="router", service="network")
@utils.requires_ext(extension="qos", service="network")
@utils.requires_ext(extension="qos-fip", service="network")
@base_api.require_qos_rule_type(qos_consts.RULE_TYPE_BANDWIDTH_LIMIT)
def resource_setup(cls):
super(FloatingIPQosTest, cls).resource_setup()
@decorators.idempotent_id('5eb48aea-eaba-4c20-8a6f-7740070a0aa3')
def test_qos(self):
"""Test floating IP is binding to a QoS policy with
ingress and egress bandwidth limit rules. And it applied correctly
by sending a file from the instance to the test node.
Then calculating the bandwidth every ~1 sec by the number of bits
received / elapsed time.
"""
self._test_basic_resources()
policy_id = self._create_qos_policy()
ssh_client = self._create_ssh_client()
self.os_admin.network_client.create_bandwidth_limit_rule(
policy_id, max_kbps=constants.LIMIT_KILO_BITS_PER_SECOND,
max_burst_kbps=constants.LIMIT_KILO_BYTES,
direction=lib_constants.INGRESS_DIRECTION)
self.os_admin.network_client.create_bandwidth_limit_rule(
policy_id, max_kbps=constants.LIMIT_KILO_BITS_PER_SECOND,
max_burst_kbps=constants.LIMIT_KILO_BYTES,
direction=lib_constants.EGRESS_DIRECTION)
rules = self.os_admin.network_client.list_bandwidth_limit_rules(
policy_id)
self.assertEqual(2, len(rules['bandwidth_limit_rules']))
fip = self.os_admin.network_client.get_floatingip(
self.fip['id'])['floatingip']
self.assertEqual(self.port['id'], fip['port_id'])
self.os_admin.network_client.update_floatingip(
self.fip['id'],
qos_policy_id=policy_id)
fip = self.os_admin.network_client.get_floatingip(
self.fip['id'])['floatingip']
self.assertEqual(policy_id, fip['qos_policy_id'])
self._create_file_for_bw_tests(ssh_client)
common_utils.wait_until_true(lambda: self._check_bw(
ssh_client,
self.fip['floating_ip_address'],
port=self.NC_PORT),
timeout=120,
sleep=1)
class TestFloatingIPUpdate(FloatingIpTestCasesMixin,
base.BaseTempestTestCase):
same_network = None
@decorators.idempotent_id('1bdd849b-03dd-4b8f-994f-457cf8a36f93')
def test_floating_ip_update(self):
"""Test updating FIP with another port.
The test creates two servers and attaches floating ip to first server.
Then it checks server is accesible using the FIP. FIP is then
associated with the second server and connectivity is checked again.
"""
ports = [self.create_port(
self.network, security_groups=[self.secgroup['id']])
for i in range(2)]
servers = []
for port in ports:
name = data_utils.rand_name("server-%s" % port['id'][:8])
server = self.create_server(
name=name,
flavor_ref=CONF.compute.flavor_ref,
key_name=self.keypair['name'],
image_ref=CONF.compute.image_ref,
networks=[{'port': port['id']}])['server']
server['name'] = name
servers.append(server)
for server in servers:
self.wait_for_server_active(server)
self.fip = self.create_floatingip(port=ports[0])
self.check_connectivity(self.fip['floating_ip_address'],
CONF.validation.image_ssh_user,
self.keypair['private_key'],
servers=servers)
self.client.update_floatingip(self.fip['id'], port_id=ports[1]['id'])
def _wait_for_fip_associated():
try:
self.check_servers_hostnames(servers[-1:], log_errors=False)
except (AssertionError, exceptions.SSHTimeout):
return False
return True
# The FIP is now associated with the port of the second server.
try:
common_utils.wait_until_true(_wait_for_fip_associated,
timeout=15, sleep=3)
except common_utils.WaitTimeout:
self._log_console_output(servers[-1:])
self.fail(
"Server %s is not accessible via its floating ip %s" % (
servers[-1]['id'], self.fip['id']))
|
python
|
from django.shortcuts import render_to_response, render
from django.template.response import TemplateResponse
from django.template import RequestContext
from django.contrib.sites.models import Site
from django.urls import reverse
from django.views.decorators.clickjacking import xframe_options_exempt
from django.views.decorators.cache import cache_page
from django.utils.safestring import mark_safe
from django.conf import settings
from embed9.utils import get_params, common_view, get_encoded_params
WIDGET_CACHE_TIME = getattr(settings, 'WIDGET_CACHE_TIME', 60*60)
@cache_page(WIDGET_CACHE_TIME)
@xframe_options_exempt
def widget(request, app, model, pk):
""" Renders an iframe with the widget. """
embed, obj = common_view(app, model, pk)
params = get_params(embed.get_form_class(), request.GET)
template = embed.get_widget_template()
return TemplateResponse(request, template, {
model: obj,
'params': params,
},)
@cache_page(WIDGET_CACHE_TIME)
@xframe_options_exempt
def loader(request, app, model, pk):
""" Renders JavaScript loader of the widget. """
embed, obj = common_view(app, model, pk)
params = get_params(embed.get_form_class(), request.GET)
template = embed.get_loader_template()
return TemplateResponse(request, template, {
model: obj,
'widget_name': 'widget_' + model + str(pk),
'domain': Site.objects.get_current().domain,
'iframe_url': mark_safe(reverse('embed9:widget', kwargs={'app': app, 'model': model, 'pk': pk}) + get_encoded_params(params)),
'params': params,
})
def preview(request, app, model, pk):
#print("this function is being called")
""" Handles previewing and adjusting the widget. """
embed, obj = common_view(app, model, pk)
template = embed.get_form_template()
show_preview = True
params = {}
if request.method == 'POST':
form = embed.get_form_class()(request.POST)
if form.is_valid():
for n, v in form.cleaned_data.items():
params[n] = v
else:
show_preview = False
else:
form = embed.get_form_class()()
return TemplateResponse(request, template, {
'obj': obj,
'form': form,
'params': params,
'show_preview': show_preview,
},)
|
python
|
from rdflib.graph import Graph
from rdflib.namespace import Namespace, RDFS, RDF
from owmeta_core.rdf_query_modifiers import (ZeroOrMoreTQLayer,
rdfs_subclassof_subclassof_zom_creator as mod,
rdfs_subclassof_zom,
rdfs_subclassof_zom_creator)
ex = Namespace('http://example.org/')
def test_zom_triples_choices():
g = Graph()
g.add((ex.a, RDFS.subClassOf, ex.b))
g.add((ex.b, RDFS.subClassOf, ex.c))
g.add((ex.c, RDFS.subClassOf, ex.d))
g.add((ex.d, RDFS.subClassOf, ex.e))
g.add((ex.e, RDFS.subClassOf, ex.f))
g.add((ex.f, RDFS.subClassOf, ex.g))
g = ZeroOrMoreTQLayer(mod(ex.c), g)
choices = set(g.triples_choices((None, RDFS.subClassOf, [ex.f, ex.c])))
expected = [(ex.a, RDFS.subClassOf, ex.c),
(ex.a, RDFS.subClassOf, ex.b),
(ex.a, RDFS.subClassOf, ex.d),
(ex.a, RDFS.subClassOf, ex.e),
(ex.a, RDFS.subClassOf, ex.f),
(ex.b, RDFS.subClassOf, ex.c),
(ex.b, RDFS.subClassOf, ex.d),
(ex.b, RDFS.subClassOf, ex.e),
(ex.b, RDFS.subClassOf, ex.f),
(ex.c, RDFS.subClassOf, ex.d),
(ex.c, RDFS.subClassOf, ex.e),
(ex.c, RDFS.subClassOf, ex.f),
(ex.d, RDFS.subClassOf, ex.e),
(ex.d, RDFS.subClassOf, ex.f),
(ex.e, RDFS.subClassOf, ex.f)]
assert choices == set(expected)
def test_zom_triples_choices_1():
g = Graph()
g.add((ex.a, RDFS.subClassOf, ex.b))
g.add((ex.b, RDFS.subClassOf, ex.c))
g.add((ex.c, RDFS.subClassOf, ex.d))
g.add((ex.d, RDFS.subClassOf, ex.e))
g.add((ex.e, RDFS.subClassOf, ex.f))
g.add((ex.f, RDFS.subClassOf, ex.g))
g.add((ex.obj, RDF.type, ex.c))
g = ZeroOrMoreTQLayer(rdfs_subclassof_zom, g)
choices = set(g.triples_choices(([ex.obj], RDF.type, ex.g)))
expected = [(ex.obj, RDF.type, ex.c),
(ex.obj, RDF.type, ex.d),
(ex.obj, RDF.type, ex.e),
(ex.obj, RDF.type, ex.f),
(ex.obj, RDF.type, ex.g)]
assert choices == set(expected)
def test_zom_triples_choices_2():
g = Graph()
g.add((ex.a, RDFS.subClassOf, ex.b))
g.add((ex.b, RDFS.subClassOf, ex.c))
g.add((ex.c, RDFS.subClassOf, ex.d))
g.add((ex.d, RDFS.subClassOf, ex.e))
g.add((ex.e, RDFS.subClassOf, ex.f))
g.add((ex.f, RDFS.subClassOf, ex.g))
g.add((ex.obj, RDF.type, ex.c))
g = ZeroOrMoreTQLayer(rdfs_subclassof_zom_creator(ex.g), g)
choices = set(g.triples_choices(([ex.obj], RDF.type, ex.g)))
expected = [(ex.obj, RDF.type, ex.c),
(ex.obj, RDF.type, ex.d),
(ex.obj, RDF.type, ex.e),
(ex.obj, RDF.type, ex.f),
(ex.obj, RDF.type, ex.g)]
assert choices == set(expected)
def test_zom_triples():
g = Graph()
g.add((ex.a, RDFS.subClassOf, ex.b))
g.add((ex.b, RDFS.subClassOf, ex.c))
g.add((ex.c, RDFS.subClassOf, ex.d))
g.add((ex.d, RDFS.subClassOf, ex.e))
g.add((ex.e, RDFS.subClassOf, ex.f))
g.add((ex.f, RDFS.subClassOf, ex.g))
g.add((ex.obj, RDF.type, ex.c))
g = ZeroOrMoreTQLayer(rdfs_subclassof_zom_creator(ex.g), g)
choices = set(g.triples((None, RDF.type, ex.g)))
expected = [(ex.obj, RDF.type, ex.c),
(ex.obj, RDF.type, ex.d),
(ex.obj, RDF.type, ex.e),
(ex.obj, RDF.type, ex.f),
(ex.obj, RDF.type, ex.g)]
assert choices == set(expected)
|
python
|
#!/usr/bin/env python
'''This script will compile 1D spectra into cubes, and visualize them.'''
# create (and save!) a cube, and visualize it
from mosasaurus.Cube import Cube
import sys
try:
c = Cube(sys.argv[1])
c.populate(remake=False, visualize=False)
c.movieCube(stride=1)
except IndexError:
print '''
Example usage:
./show.py gj1132_0227.obs
'''
|
python
|
# requires:
# - phantomjs or geckodriver installed
# - selenium from pip
import sys
import unittest
import json
import argparse
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
class SmokeTest(unittest.TestCase):
def __init__(self, testName, config):
super(SmokeTest, self).__init__(testName)
self.config = json.loads(config.read())
def setUp(self):
driver = self.config.get("driver", "phantomjs")
if driver == "PhantomJS":
self.driver = webdriver.PhantomJS()
self.driver.delete_all_cookies()
elif driver == "Firefox":
self.driver = webdriver.Firefox()
else:
raise SystemExit("unknown driver")
self.img_path = self.config.get("img_path", "/tmp/")
self.driver.set_window_size(1120, 550)
def _login(self):
"""
Do login and wait for the user dashboard to be visible
Could be made more generic by permitting admin dashboard visible
:return:
"""
self.driver.get(self.config["url"])
self.driver.find_element_by_name(
'click-show-login').click()
self.driver.find_element_by_id("email").send_keys(self.config["email"])
elem = self.driver.find_element_by_id("password")
elem.send_keys(self.config["password"])
elem.submit()
WebDriverWait(self.driver, 3).until(
expected_conditions.presence_of_element_located((By.ID,
"user-dashboard"))
)
def _logout(self):
self.driver.get(self.config["url"])
self.driver.find_element_by_id('logout').click()
def _test_blueprint_start(self, elem, wait_for_open=False):
launch_button = elem.find_element_by_css_selector(".panel-footer "
"span").click()
start_timeout = self.config.get("timeouts", {}).get(
"start", 60)
if not wait_for_open:
WebDriverWait(self.driver, start_timeout).until(
expected_conditions.visibility_of_element_located(
(By.PARTIAL_LINK_TEXT, "pb-") # object
)
)
else:
WebDriverWait(self.driver, start_timeout).until(
expected_conditions.visibility_of_element_located(
(By.PARTIAL_LINK_TEXT, "Open in") # object
)
)
# have to check non-dummies differently
# they create a "Click to open" link
def _test_blueprint_shutdown(self, elem):
shutdown_button = elem.find_element_by_css_selector("table "
"button.btn-danger")
id_ = shutdown_button.id
shutdown_button.click()
self._dismiss_shutdown_modal()
shutdown_timeout = self.config.get("timeouts", {}).get(
"shutdown", 60)
WebDriverWait(self.driver, shutdown_timeout).until(
expected_conditions.invisibility_of_element_located(
(By.CLASS_NAME, "btn-danger"))
)
def _dismiss_shutdown_modal(self):
"""
Attempts to dismiss a modal by clicking on the first btn-primary
inside the modal
:return:
"""
WebDriverWait(self.driver, 10).until(
expected_conditions.visibility_of_element_located(
(By.CLASS_NAME, "modal"))
)
yes_button = self.driver.find_element_by_css_selector(
".modal .btn-primary").click()
WebDriverWait(self.driver, 10).until(
expected_conditions.invisibility_of_element_located(
(By.CLASS_NAME, "modal"))
)
def smoke_test(self):
try:
self._login()
elem = self.driver.find_element_by_xpath(
'//*[@id="user-dashboard"]/div')
elements = elem.find_elements_by_css_selector("div.panel")
for child in elem.find_elements_by_css_selector("div.panel"):
cur_element = child.find_element_by_css_selector(
"h3.panel-title")
blueprint_name = cur_element.text
for bp in self.config["blueprints"]:
if bp in blueprint_name:
if "dummy" in blueprint_name.lower():
self._test_blueprint_start(child, wait_for_open=False)
else:
self._test_blueprint_start(child, wait_for_open=True)
self._test_blueprint_shutdown(child)
self._logout()
except Exception, e:
import datetime
fname = datetime.datetime.now().isoformat()+ "_screenshot.png"
self.driver.save_screenshot(self.img_path + fname)
sys.stderr.write("failed: " + str(e))
self._logout()
def tearDown(self):
self.driver.quit()
def main(args=None):
parser = argparse.ArgumentParser(description="Pebbles smoke tester",
usage=("Run with configs to smoke test "
"a running Pebbles instance. "
"Outputs a string (OK/FAIL) that "
"can be "
"redirected to a file. Also "
"returns 0 or nonzero for Posix "
"compliance"))
parser.add_argument("-c", "--config", type=argparse.FileType("r"),
default=sys.stdin, help=("config file in JSON "
"format. see "
"example in "
"example.config.json for "
"defaults"
))
parser.add_argument("-o", "--output", default=sys.stdout,
type=argparse.FileType("w"),
help=("file to print test status string"))
parser.add_argument("--success", default="OK",
help=("text to display if tests run ok"))
parser.add_argument("--fail", default="FAIL", help=("text to display if "
"tests do not run ok"))
args = parser.parse_args()
suite = unittest.TestSuite()
suite.addTest(SmokeTest("smoke_test", args.config))
res = unittest.TextTestRunner(verbosity=0).run(suite)
if res.wasSuccessful():
args.output.write(args.success)
else:
args.output.write(args.fail)
if __name__ == '__main__':
main()
|
python
|
#102
# Time: O(n)
# Space: O(n)
# Given a binary tree, return the level order traversal
# of its nodes' values. (ie, from left to right, level by level).
#
# For example:
# Given binary tree [3,9,20,null,null,15,7],
# 3
# / \
# 9 20
# / \
# 15 7
# return its level order traversal as:
# [
# [3],
# [9,20],
# [15,7]
#]
class TreeNode():
def __init__(self,val):
self.val=val;
self.right=None
self.left=None
class BFSSol():
def levelOrderTraversalBTI(self,root):
if not root:
return None
next_level_node,levelOrder_Traversal=[root],[]
while next_level_node:
cur_level_node=next_level_node
next_level_node,cur_level_val=[],[]
for cur_node in cur_level_node:
cur_level_val.append(cur_node.val)
if cur_node.left:
next_level_node.append(cur_node.left)
if cur_node.right:
next_level_node.append(cur_node.right)
levelOrder_Traversal.append(cur_level_val)
return levelOrder_Traversal
|
python
|
"""Some useful data structures and functions"""
import datetime
espa_env = {
"dev": "https://espa-dev.cr.usgs.gov",
"tst": "https://espa-tst.cr.usgs.gov",
"ops": "https://espa.cr.usgs.gov"
}
api_urls = {
"status": "/api/v1/item-status/",
"order": "/api/v1/order/"
}
def timestamp() -> str:
"""
Get system timestamp for output text file name in the format YYYYMMDDhhmmss
:return:
"""
return str(int(float(str(datetime.datetime.now()).replace('-', '')
.replace(':', '').replace(' ', ''))))
|
python
|
import os
import glob
import shutil
# dictionary mapping each extension with its corresponding folder
# For example, 'jpg', 'png', 'ico', 'gif', 'svg' files will be moved to 'images' folder
# feel free to change based on your needs
extensions = {
"jpg": "images",
"png": "images",
"ico": "images",
"gif": "images",
"svg": "images",
"sql": "sql",
"exe": "programs",
"msi": "programs",
"pdf": "pdf",
"xlsx": "excel",
"csv": "excel",
"rar": "archive",
"zip": "archive",
"gz": "archive",
"tar": "archive",
"docx": "word",
"torrent": "torrent",
"txt": "text",
"ipynb": "python",
"py": "python",
"pptx": "powerpoint",
"ppt": "powerpoint",
"mp3": "audio",
"wav": "audio",
"mp4": "video",
"m3u8": "video",
"webm": "video",
"ts": "video",
"json": "json",
"css": "web",
"js": "web",
"html": "web",
"apk": "apk",
"sqlite3": "sqlite3",
}
if __name__ == "__main__":
path = r"E:\Downloads"
# setting verbose to 1 (or True) will show all file moves
# setting verbose to 0 (or False) will show basic necessary info
verbose = 0
for extension, folder_name in extensions.items():
# get all the files matching the extension
files = glob.glob(os.path.join(path, f"*.{extension}"))
print(f"[*] Found {len(files)} files with {extension} extension")
if not os.path.isdir(os.path.join(path, folder_name)) and files:
# create the folder if it does not exist before
print(f"[+] Making {folder_name} folder")
os.mkdir(os.path.join(path, folder_name))
for file in files:
# for each file in that extension, move it to the correponding folder
basename = os.path.basename(file)
dst = os.path.join(path, folder_name, basename)
if verbose:
print(f"[*] Moving {file} to {dst}")
shutil.move(file, dst)
|
python
|
#! /usr/bin/env python3
from sklearn import svm
import numpy as np
import json
import random
import sys
import os
import argparse
def estimate(data, target, trainingsubset, testingsubset, gamma='auto', C=1):
if(len(set(target)) < 2): # only one class
return 0;
clf = svm.SVC(gamma=gamma, C=C)
clf.fit(data[trainingsubset], target[trainingsubset])
prediction = clf.predict(data[testingsubset])
results = zip(prediction, target[testingsubset])
matches = 0
total = 0
for p, t in results:
total += 1
if p == t:
matches += 1
return matches/total
def successrate_stats(successrates):
successrate_avg = 0
successrate_min = successrates[0]
successrate_max = successrates[0]
for rate in successrates:
successrate_avg += rate
successrate_min = min(rate, successrate_min)
successrate_max = max(rate, successrate_max)
successrate_avg /= len(successrates)
return (successrate_min, successrate_max, successrate_avg)
def successrate_cdf(successrates):
# create dict with x:0 for x in {0..100}
cdf = dict(enumerate([0 for i in range(101)]))
for x in successrates:
cdf[int(x*100)] += 1
for key in range(1,101):
cdf[key] += cdf[key-1]
return cdf
def main():
parser = argparse.ArgumentParser(prog='testData')
parser.add_argument('--default-training-size', help='set the default size of the training set. True size will be min(default_training_size, len(data)-1).', action='store', default=5)
parser.add_argument('--limittraining', help='limit where training data may come from, e.g. a value of 0.5 limits training data to the first half of the dataset.', action='store', default=1.0)
parser.add_argument('--limitfeatures', help='the features used for training and testing', action='store', default=None)
parser.add_argument('--csv', help='write data to csv file', action='store')
parser.add_argument('--cdf', help='write cdf data to file', action='store')
parser.add_argument('-s', '--silent', help='do not output anything except errors', action='store_true')
parser.add_argument('target', help='target directory')
args = parser.parse_args()
silent = args.silent
if(not os.path.isdir(args.target)):
print("Error: Target '{}' does not exist".format(args.target))
sys.exit()
limittraining = float(args.limittraining)
if (limittraining < 0) or (limittraining > 1):
print("Error: limittraining must be between 0 and 1")
sys.exit()
default_training_size = float(args.default_training_size)
if default_training_size < 0:
print("Error: default_training_size must be >= 0")
sys.exit()
limitfeatures = None
if(args.limitfeatures):
limitfeatures = int(args.limitfeatures)
if(limitfeatures < 1):
print("Error: limitfeatures must be > 0")
sys.exit()
normalizeddata = {}
sensortypes = {}
with open(os.path.join(args.target, "normalizeddata.json"), "r") as f:
normalizeddata = json.load(f)
with open(os.path.join(args.target, "enhancedsensors.json"), "r") as f:
sensortypes = json.load(f)
f = None
if(args.csv):
f = open(args.csv, "w")
print("sensor type\t# sensors\t# devices\tmin\tmax\tavg", file=f)
cdf_data = {}
for selected_sensor_type in sorted(sensortypes):
if not silent:
print("Selected sensor type: {0}".format(selected_sensor_type))
print("Contains {0} sensors: ".format(len(sensortypes[selected_sensor_type])), end="")
for sensor in sensortypes[selected_sensor_type]:
print("{0} ({1}), ".format(sensor, sensortypes[selected_sensor_type][sensor]), end="")
print("")
data = np.matrix(normalizeddata[selected_sensor_type]['data'])
if(limitfeatures and data.shape[1] >= limitfeatures):
if not silent:
print("Cutting feature vector at {}".format(limitfeatures))
data = data[:,[i for i in range(limitfeatures)]]
target_sensor_name = np.array(normalizeddata[selected_sensor_type]['target_sensor_name'])
target_device_id = np.array(normalizeddata[selected_sensor_type]['target_device_id'])
if not silent:
print("Contains {0} devices: {1}".format(len(set(target_device_id)), ", ".join(set(target_device_id))))
if(len(set(target_device_id)) < 1):
if not silent:
print("\n")
continue
#print(data.shape)
#print(target_sensor_name.shape)
#print(target_device_id.shape)
#successrates_sensor = []
successrates_device = []
#successrates_device2 = []
upper_training_limit = limittraining * data.shape[0]
training_size = int(min(default_training_size, upper_training_limit, (data.shape[0]-1 / len(set(target_device_id)))))
if(default_training_size < 1):
training_size = int(min(data.shape[0] * default_training_size, upper_training_limit, data.shape[0]-1))
for i in range(100):
completeset = set(range(data.shape[0]))
trainingsubset = set()
for deviceid in set(target_device_id):
device_training_set = set()
while(len(device_training_set) < training_size):
index = random.randrange(0, upper_training_limit)
if(target_device_id[index] == deviceid):
device_training_set.add(index)
trainingsubset |= device_training_set
testingsubset = completeset - trainingsubset
trainingsubset = list(trainingsubset)
testingsubset = list(testingsubset)
#print(target_device_id[trainingsubset])
#successrates_sensor.append(estimate(data, target_sensor_name, trainingsubset, testingsubset))
successrates_device.append(estimate(data, target_device_id, trainingsubset, testingsubset))
#successrates_device2.append(estimate(data, target_device_id, trainingsubset, testingsubset, gamma=0.001, C=100.))
#sr_min, sr_max, sr_avg = successrate_stats(successrates_sensor)
#print("Success rate (Sensor): min {0:.2f} / max {1:.2f} / avg {2:.2f}".format(sr_min, sr_max, sr_avg))
if not silent:
print("Training: {} values per device; Testing: {} values per device".format(training_size, (data.shape[0]-(training_size * len(set(target_device_id))) / len(set(target_device_id)))))
sr_min, sr_max, sr_avg = successrate_stats(successrates_device)
cdf = successrate_cdf(successrates_device)
cdf_data[selected_sensor_type] = cdf
if not silent:
print("Success rate (Device): min {0:.2f} / max {1:.2f} / avg {2:.2f}".format(sr_min, sr_max, sr_avg))
print("\n")
if(args.csv):
print("{}\t{}\t{}\t{}\t{}\t{}".format(
selected_sensor_type,
len(sensortypes[selected_sensor_type]),
len(set(target_device_id)),
sr_min,
sr_max,
sr_avg,
), file=f)
if(args.cdf):
with open(args.cdf, "w") as f:
for index in sorted(cdf_data):
print(index, end="\t", file=f)
print("", file=f)
for i in range(101):
for index in sorted(cdf_data):
print(cdf_data[index][i], end="\t", file=f)
print("", file=f)
if __name__ == "__main__":
main()
|
python
|
# Initializes the datastore with sample data.
#
# Possibilities to execute the code in this file:
#
# * GAE SDK 1.5 or compatible: Paste the code in the interactive console and
# execute it.
#
# * GAE (production):
#
# a) Enter the directory of the Reality Builder.
#
# b) Connect to the remote API shell.
#
# On Windows XP's "cmd.exe" (substitute %-placeholders):
#
# %PYTHON_PATH%\python.exe %GAE_PATH%\remote_api_shell.py -s ^
# %VERSION%.%APPLICATION%.appspot.com
#
# Note that, despite specifying a version above, the same datastore as for
# all other versions is used: There is only one.
#
# c) Paste the code and press enter. It will execute automatically.
# Copyright 2010-2012 Felix E. Klee <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import os.path
import sys
sys.path.append(os.path.realpath('.'))
from google.appengine.dist import use_library
use_library('django', '0.96')
from main import Construction
from main import Block
from main import BlockProperties
from main import NewBlock
from main import NewBlockEmail
from django.utils import simplejson
from google.appengine.api import namespace_manager
if 'CURRENT_VERSION_ID' in os.environ:
# Works in the SDK's interactive console.
app_version = os.environ['CURRENT_VERSION_ID'].split('.')[0]
else:
# Takes the version from the command line:
parser = optparse.OptionParser()
parser.add_option('-s', '--server', dest='server')
(options, args) = parser.parse_args()
app_version = options.server.split('.')[0]
namespace_manager.set_namespace('demo')
# Deletes all construction entries:
queries = [Construction.all()]
for query in queries:
for result in query:
result.delete()
# Creates the construction configuration.
construction = Construction(key_name = 'main')
construction.update_interval_client = 2000
construction.validator_version = '0'
construction.validator_src = 'scene/validator.js'
construction.validator_function_name = 'validator' # attached to "window"!
construction.blocks_data_version = '0'
construction.camera_data_version = '0'
construction.camera_pos = [189.57, -159.16, 140.11]
construction.camera_a_x = 2.1589
construction.camera_a_y = -0.46583
construction.camera_a_z = 0.29
construction.camera_fl = 40.
construction.camera_sensor_resolution = 19.9
construction.put()
# Deletes all block properties entries:
queries = [BlockProperties.all()]
for query in queries:
for result in query:
result.delete()
# Sets up the block properties (construction as parent is important so
# that the properties form one entity group with the construction,
# which is necessary when doing transactions):
blockProperties = BlockProperties(parent=construction)
blockProperties.data_version = '0'
blockProperties.has_2_fold_symmetry = False
blockProperties.pos_spacing_xy = 20.
blockProperties.pos_spacing_z = 10.
blockProperties.outline_bxy = '[[0, 0], [1, 0], [2, 1], [0, 1]]'
blockProperties.collision_offsets_list_bxy = \
['[[-1, 0], [0, 0], [1, 0]]',
'[[0, 0], [1, 0], [0, -1], [1, -1]]',
'[[0, 0], [1, 0]]',
'[[0, 1], [1, 1], [0, 0], [1, 0]]']
blockProperties.attachment_offsets_list_b = \
['[[0, 0, -1], [0, 0, 1]]',
'[[0, 0, -1], [0, 0, 1]]',
'[[0, 0, -1], [0, 0, 1], [1, 0, -1], [1, 0, 1]]',
'[[0, 0, -1], [0, 0, 1]]']
blockProperties.rot_center_bxy = [0.5, 0.5]
blockProperties.put()
# Deletes all new block entries:
queries = [NewBlock.all()]
for query in queries:
for result in query:
result.delete()
# Sets up the new block:
newBlock = NewBlock(parent=construction)
newBlock.data_version = '0'
newBlock.init_pos_b = [4, 0, 4]
newBlock.init_a = 0
newBlock.put()
# Deletes all block entries:
queries = [Block.all()]
for query in queries:
for result in query:
result.delete()
# Creates block entries:
cs = [[1, 4, 3, 1], [1, 4, 2, 0], [1, 4, 1, 3], [1, 4, 0, 2],
[5, 5, 1, 2], [5, 5, 0, 2], [0, 1, 0, 3], [3, 0, 0, 2],
[4, 0, 0, 0], [1, 0, 0, 0], [4, 4, 0, 0]]
for c in cs:
x_b = c[0]
y_b = c[1]
z_b = c[2]
a = c[3]
block = Block.insert_at(construction, [x_b, y_b, z_b], a)
block.state = 2
block.put()
# Deletes all new block email entries:
queries = [NewBlockEmail.all()]
for query in queries:
for result in query:
result.delete()
# Creates new block email entries:
newBlockEmail = NewBlockEmail(parent=construction)
newBlockEmail.sender_address = 'Admin <[email protected]>'
newBlockEmail.recipient_address = 'Block Builders <[email protected]>'
newBlockEmail.put()
print 'Done.'
|
python
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
lvmmodel.io
============
I/O utility functions for files in lvmmodel.
"""
import os
from astropy.io import fits
import yaml
import numpy as np
import warnings
from lvmutil.log import get_logger
log = get_logger()
_thru = dict()
def load_throughput(channel):
"""Returns specter Throughput object for the given channel 'b', 'r', or 'z'.
Parameters
----------
channel : {'b', 'r', 'z'}
Spectrograph channel.
"""
import specter.throughput
channel = channel.lower()
global _thru
if channel not in _thru:
thrufile = os.path.join(os.environ['LVMMODEL'], 'data', 'throughput', 'thru-{0}.fits'.format(channel))
_thru[channel] = specter.throughput.load_throughput(thrufile)
return _thru[channel]
_psf = dict()
def load_psf(channel):
"""Returns specter PSF object for the given channel 'b', 'r', or 'z'.
Parameters
----------
channel : {'b', 'r', 'z'}
Spectrograph channel.
"""
import specter.psf
channel = channel.lower()
global _psf
if channel not in _psf:
psffile = os.path.join(os.environ['LVMMODEL'], 'data', 'specpsf', 'psf-{0}.fits'.format(channel))
_psf[channel] = specter.psf.load_psf(psffile)
return _psf[channel]
_params = None
def load_lvmparams(config='lvm', telescope='1m'):
"""Returns LVM parameter dictionary loaded from lvmmodel/data/lvm.yaml.
Parameters:
config (str):
Which config yaml to load
telescope (str):
Which telescope config to load.
"""
# build param name
if config == 'lvm':
config_name = '{0}_{1}.yaml'.format(config, telescope)
else:
config_name = '{0}.yaml'.format(config)
global _params
sametele = _params is not None and 'telescope' in _params and telescope == _params['telescope']
if _params is None or not sametele:
lvmparamsfile = os.path.join(os.environ['LVMMODEL'], 'data', config_name)
with open(lvmparamsfile) as par:
_params = yaml.load(par)
# - add config and telescope name
_params['config_name'] = config_name
_params['telescope'] = telescope
# - for temporary backwards compability after 'exptime' -> 'exptime_dark'
if ('exptime' not in _params) and ('exptime_dark' in _params):
_params['exptime'] = _params['exptime_dark']
# - Augment params with wavelength coverage from specpsf files
# - wavemin/max = min/max wavelength covered by *any* fiber on the CCD
# - wavemin/max_all = min/max wavelength covered by *all* fibers
for channel in ['b', 'r', 'z']:
hdr = fits.getheader(findfile('specpsf/psf-{}.fits'.format(channel)), 0)
_params['ccd'][channel]['wavemin'] = hdr['WAVEMIN']
_params['ccd'][channel]['wavemax'] = hdr['WAVEMAX']
_params['ccd'][channel]['wavemin_all'] = hdr['WMIN_ALL']
_params['ccd'][channel]['wavemax_all'] = hdr['WMAX_ALL']
return _params
# Added and still needs to be committed and pushed to desihub
_gfa = None
def load_gfa():
"""Returns GFA table from lvmmodel/data/focalplane/gfa.ecsv"""
global _gfa
from astropy.table import Table
# os is imported already in the lvmmodel io.py
import os
if _gfa is None:
gfaFile = os.path.join(os.environ['LVMMODEL'], 'data', 'focalplane', 'gfa.ecsv')
_gfa = Table.read(gfaFile, format='ascii.ecsv')
return _gfa
_fiberpos = None
def load_fiberpos():
"""Returns fiberpos table from lvmmodel/data/focalplane/fiberpos.fits.
"""
global _fiberpos
from astropy.table import Table
if _fiberpos is None:
fiberposfile = os.path.join(os.environ['LVMMODEL'], 'data', 'focalplane', 'fiberpos.fits')
_fiberpos = Table.read(fiberposfile)
# - Convert to upper case if needed
# - Make copy of colnames b/c they are updated during iteration
for col in list(_fiberpos.colnames):
if col.islower():
_fiberpos.rename_column(col, col.upper())
# - Temporary backwards compatibility for renamed columns
if 'POSITIONER' in _fiberpos.colnames:
import warnings
warnings.warn('old fiberpos.fits with POSITIONER column instead of LOCATION; please update your $LVMMODEL checkout', DeprecationWarning)
_fiberpos['LOCATION'] = _fiberpos['POSITIONER']
else:
_fiberpos['POSITIONER'] = _fiberpos['LOCATION']
if 'SPECTROGRAPH' in _fiberpos.colnames:
import warnings
warnings.warn('old fiberpos.fits with SPECTROGRAPH column instead of SPECTRO; please update your $LVMMODEL checkout', DeprecationWarning)
_fiberpos['SPECTRO'] = _fiberpos['SPECTROGRAPH']
else:
_fiberpos['SPECTROGRAPH'] = _fiberpos['SPECTRO']
return _fiberpos
_tiles = dict()
def load_tiles(onlydesi=True, extra=False, tilesfile=None, cache=True):
"""Return DESI tiles structure from lvmmodel/data/footprint/desi-tiles.fits.
Parameters
----------
onlydesi : :class:`bool` (default True)
If ``True``, trim to just the tiles in the DESI footprint.
extra : :class:`bool`, (default False)
If ``True``, include extra layers with PROGRAM='EXTRA'.
tilesfile : (str)
Name of tiles file to load; or None for default.
Without path, look in $LVMMODEL/data/footprint, otherwise load file.
cache : :class:`bool`, (default True)
Use cache of tiles data.
"""
global _tiles
if tilesfile is None:
tilesfile = 'desi-tiles.fits'
# - Check if tilesfile includes a path (absolute or relative)
tilespath, filename = os.path.split(tilesfile)
if tilespath == '':
tilesfile = os.path.join(os.environ['LVMMODEL'], 'data', 'footprint', filename)
# - standarize path location
tilesfile = os.path.abspath(tilesfile)
if cache and tilesfile in _tiles:
tiledata = _tiles[tilesfile]
else:
with fits.open(tilesfile, memmap=False) as hdulist:
tiledata = hdulist[1].data
#
# Temporary workaround for problem identified in
# https://github.com/desihub/lvmmodel/issues/30
#
if any([c.bzero is not None for c in tiledata.columns]):
foo = [_tiles[k].dtype for k in tiledata.dtype.names]
# - Check for out-of-date tiles file
if np.issubdtype(tiledata['OBSCONDITIONS'].dtype, 'u2'):
import warnings
warnings.warn('old desi-tiles.fits with uint16 OBSCONDITIONS; please update your $LVMMODEL checkout', DeprecationWarning)
# - load cache for next time
if cache:
_tiles[tilesfile] = tiledata
# - Filter to only the DESI footprint if requested
subset = np.ones(len(tiledata), dtype=bool)
if onlydesi:
subset &= tiledata['IN_DESI'] > 0
# - Filter out PROGRAM=EXTRA tiles if requested
if not extra:
subset &= ~np.char.startswith(tiledata['PROGRAM'], 'EXTRA')
if np.all(subset):
return tiledata
else:
return tiledata[subset]
_platescale = None
def load_platescale():
'''
Loads platescale.txt, returning structured array with columns
radius: radius from center of focal plane [mm]
theta: radial angle that has a centroid at this radius [deg]
radial_platescale: Meridional (radial) plate scale [um/arcsec]
az_platescale: Sagittal (azimuthal) plate scale [um/arcsec]
'''
global _platescale
if _platescale is not None:
return _platescale
infile = findfile('focalplane/platescale.txt')
columns = [
('radius', 'f8'),
('theta', 'f8'),
('radial_platescale', 'f8'),
('az_platescale', 'f8'),
]
_platescale = np.loadtxt(infile, usecols=[0, 1, 6, 7], dtype=columns)
return _platescale
def reset_cache():
'''Reset I/O cache'''
global _thru, _psf, _params, _gfa, _fiberpos, _tiles, _platescale
_thru = dict()
_psf = dict()
_params = None
_gfa = None
_fiberpos = None
_tiles = dict()
_platescale = None
def load_target_info():
'''
Loads data/targets/targets.yaml and returns the nested dictionary
This is primarily syntactic sugar to avoid end users constructing
paths and filenames by hand (which e.g. broke when targets.dat was
renamed to targets.yaml)
'''
targetsfile = os.path.join(datadir(), 'targets', 'targets.yaml')
if not os.path.exists(targetsfile):
targetsfile = os.path.join(datadir(), 'targets', 'targets.dat')
with open(targetsfile) as fx:
data = yaml.load(fx)
return data
def load_pixweight(nside):
'''
Loads lvmmodel/data/footprint/desi-healpix-weights.fits
nside: after loading, the array will be resampled to the
passed HEALPix nside
'''
import healpy as hp
# ADM read in the standard pixel weights file
pixfile = os.path.join(os.environ['LVMMODEL'], 'data', 'footprint', 'desi-healpix-weights.fits')
with fits.open(pixfile) as hdulist:
pix = hdulist[0].data
# ADM determine the file's nside, and flag a warning if the passed nside exceeds it
npix = len(pix)
truenside = hp.npix2nside(len(pix))
if truenside < nside:
log.warning("downsampling is fuzzy...Passed nside={}, "
"but file {} is stored at nside={}".format(nside, pixfile, truenside))
# ADM resample the map
return hp.pixelfunc.ud_grade(pix, nside, order_in='NESTED', order_out='NESTED')
def findfile(filename):
'''
Return full path to data file $LVMMODEL/data/filename
Note: this is a precursor for a potential future refactor where
lvmmodel data would be installed with the package and $LVMMODEL
would become an optional override.
'''
return os.path.join(datadir(), filename)
def datadir():
'''
Returns location to lvmmodel data
if set, $LVMMODEL overrides data installed with the package
'''
if 'LVMMODEL' in os.environ:
return os.path.abspath(os.path.join(os.environ['LVMMODEL'], 'data'))
else:
import pkg_resources
return pkg_resources.resource_filename('lvmmodel', 'data')
|
python
|
import glob
import bz2
with open('all_data.ndjson','w') as out:
for div in glob.glob('./OpenAccess-master/metadata/objects/*'):
print('Working on: ',div)
for file in glob.glob(f'{div}/*'):
with bz2.open(file, "rb") as f:
out.write(f.read().decode())
|
python
|
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import mars.oscar as mo
from mars.tests.core import require_ray
from mars.tests.conftest import * # noqa
from mars.utils import lazy_import
from mars.oscar.backends.ray.utils import placement_group_info_to_addresses
from mars.services.task.supervisor.task_manager import \
TaskConfigurationActor
ray = lazy_import('ray')
@require_ray
@pytest.mark.asyncio
async def test_task_manager_creation(ray_start_regular):
mo.setup_cluster(address_to_resources=placement_group_info_to_addresses('test_cluster', [{'CPU': 2}]))
# the pool is an ActorHandle, it does not have an async context.
pool = await mo.create_actor_pool('ray://test_cluster/0/0', n_process=2,
labels=[None] + ['numa-0'] * 2)
assert pool
# create configuration
await mo.create_actor(TaskConfigurationActor, dict(),
uid=TaskConfigurationActor.default_uid(),
address='ray://test_cluster/0/0')
configuration_ref = await mo.actor_ref(
TaskConfigurationActor.default_uid(),
address='ray://test_cluster/0/0')
await configuration_ref.get_config()
|
python
|
from django.urls import path,include
from . import views
urlpatterns = [
path('api/projects/', views.ProjectList.as_view()),
path('api/projects/profile', views.ProfileList.as_view()),
path('api/projects/ratings', views.RatingList.as_view()),
]
|
python
|
from output.models.ms_data.attribute.att_j004_xsd.att_j004 import Test
__all__ = [
"Test",
]
|
python
|
import param
from . import API1TestCase
# TODO: I copied the tests from testobjectselector, although I
# struggled to understand some of them. Both files should be reviewed
# and cleaned up together.
# TODO: tests copied from testobjectselector could use assertRaises
# context manager (and could be updated in testobjectselector too).
class TestListParameters(API1TestCase):
def setUp(self):
super(TestListParameters, self).setUp()
class P(param.Parameterized):
e = param.List([5,6,7], item_type=int)
l = param.List(["red","green","blue"], item_type=str, bounds=(0,10))
self.P = P
def test_default_None(self):
class Q(param.Parameterized):
r = param.List(default=[]) # Also check None)
def test_set_object_constructor(self):
p = self.P(e=[6])
self.assertEqual(p.e, [6])
def test_set_object_outside_bounds(self):
p = self.P()
try:
p.l=[6]*11
except ValueError:
pass
else:
raise AssertionError("Object set outside range.")
def test_set_object_wrong_type(self):
p = self.P()
try:
p.e=['s']
except TypeError:
pass
else:
raise AssertionError("Object allowed of wrong type.")
def test_set_object_not_None(self):
p = self.P(e=[6])
try:
p.e = None
except ValueError:
pass
else:
raise AssertionError("Object set outside range.")
if __name__ == "__main__":
import nose
nose.runmodule()
|
python
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from django.conf.urls import url
from .views import create_view
from .views import update_view
from .views import delete_view
from .views import list_view
stagesetting_create = url(regex=r'^add/$',
view=create_view,
name='stagesetting_create',
kwargs={})
stagesetting_update = url(regex=r'^(?P<pk>\d+)/update/$',
view=update_view,
name='stagesetting_update',
kwargs={})
stagesetting_delete = url(regex=r'^(?P<pk>\d+)/delete/$',
view=delete_view,
name='stagesetting_delete',
kwargs={})
stagesetting_list = url(regex=r'^$',
view=list_view,
name='stagesetting_list',
kwargs={})
urlpatterns = [
stagesetting_create,
stagesetting_update,
stagesetting_delete,
stagesetting_list,
]
|
python
|
#
# Copyright (c) 2013-2020 Contributors to the Eclipse Foundation
#
# See the NOTICE file distributed with this work for additional information regarding copyright
# ownership. All rights reserved. This program and the accompanying materials are made available
# under the terms of the Apache License, Version 2.0 which accompanies this distribution and is
# available at http://www.apache.org/licenses/LICENSE-2.0.txt
# ===============================================================================================
from ..query_constraints_factory import QueryConstraintsFactory
from ..query_constraints import QueryConstraints
from .spatial_temporal_constraints_builder import SpatialTemporalConstraintsBuilder
class VectorQueryConstraintsFactory(QueryConstraintsFactory):
"""
A query constraints factory with additional methods for creating spatial and/or
temporal constraints for vector data. Do not construct this class manually, instead,
get the constraints factory by using the `constraints_factory()` method of the
query builder.
"""
def spatial_temporal_constraints(self):
"""
Creates a spatial temporal constraints builder that can be used to construct
spatial and/or temporal constraints.
Returns:
A new `pygw.query.vector.spatial_temporal_constraints_builder.SpatialTemporalConstraintsBuilder`.
"""
return SpatialTemporalConstraintsBuilder(self._java_ref.spatialTemporalConstraints())
def filter_constraints(self, filter_constraint):
"""
Constrain a query using a filter created by pygw.query.FilterFactory.
Args:
filter_constraint (filter): The filter to constrain the query by.
Returns:
A `pygw.query.query_constraints.QueryConstraints` with the given filter.
"""
return QueryConstraints(self._java_ref.filterConstraints(filter_constraint))
def cql_constraints(self, cql_expression):
"""
Constrain a query using a CQL expression.
Args:
cql_expression (str): The CQL expression to constrain the query by.
Returns:
A `pygw.query.query_constraints.QueryConstraints` with the given CQL expression.
"""
return QueryConstraints(self._java_ref.cqlConstraints(cql_expression))
|
python
|
from insights.parsers.route import Route
from insights.tests import context_wrap
ROUTE = '''
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
10.66.208.0 0.0.0.0 255.255.255.0 U 0 0 0 eth0
169.254.0.0 0.0.0.0 255.255.0.0 U 1002 0 0 eth0
0.0.0.0 10.66.208.254 0.0.0.0 UG 0 0 0 eth0
'''
def test_route():
route_info = Route(context_wrap(ROUTE))
for route in route_info:
assert route == {'Destination': '10.66.208.0',
'Gateway': '0.0.0.0',
'Genmask': '255.255.255.0',
'Flags': 'U',
'Metric': '0',
'Ref': '0',
'Use': '0',
'Iface': 'eth0'}
break
assert '169.254.0.0' in route_info
|
python
|
#!/usr/bin/env python3.7
# Copyright: Ismael Narváez Berenjeno
from datetime import datetime
def get_time_isoformat():
"""
Get timestamp with ISO format.
:return: ISO timestamp
:rtype: str
"""
return datetime.now().isoformat()
|
python
|
"""
面试题21:包含min函数的栈
题目:定义栈的数据结构,请在该类型中实现一个能够得到栈的最小元素的min函数。在该栈中,调用min、push及pop的时间复杂度都是O(1)。
https://leetcode.com/problems/min-stack/
Design a stack that supports push, pop, top, and retrieving the minimum element in constant time.
push(x) -- Push element x onto stack.
pop() -- Removes the element on top of the stack.
top() -- Get the top element.
getMin() -- Retrieve the minimum element in the stack.
Example:
MinStack minStack = new MinStack();
minStack.push(-2);
minStack.push(0);
minStack.push(-3);
minStack.getMin(); --> Returns -3.
minStack.pop();
minStack.top(); --> Returns 0.
minStack.getMin(); --> Returns -2.
"""
from collections import deque
class Stack:
def __init__(self):
self.items = deque()
def push(self, val):
return self.items.append(val)
def pop(self):
return self.items.pop()
def empty(self):
return len(self.items) == 0
def top(self):
return self.items[-1]
class MinStack:
def __init__(self):
"""
initialize your data structure here.
"""
self.s = Stack()
self.mins = Stack()
def push(self, x):
"""
:type x: int
:rtype: void
"""
self.s.push(x)
if self.mins.empty():
self.mins.push(x)
else:
min_val = self.mins.top()
if x < min_val:
self.mins.push(x)
else:
self.mins.push(min_val)
def pop(self):
"""
:rtype: void
"""
self.mins.pop()
return self.s.pop()
def top(self):
"""
:rtype: int
"""
return self.s.top()
def getMin(self):
"""Retrieve the minimum element in the stack.
:rtype: int
"""
return self.mins.top()
def test():
minStack = MinStack()
minStack.push(-2)
minStack.push(0)
minStack.push(-3)
assert minStack.getMin() == -3 # --> Returns -3.
minStack.pop()
assert minStack.top() == 0 # --> Returns 0.
assert minStack.getMin() == -2 # --> Returns -2.
if __name__ == '__main__':
test()
|
python
|
#!/usr/bin/python3
from PyQt5 import QtCore, QtGui
from PyQt5.QtWidgets import QVBoxLayout, QHBoxLayout, QFrame, QPushButton, QLabel
class LaunchScreen(QFrame):
def __init__(self, parent): # constructor
super().__init__(parent)
self.windowClass = parent # allows calling of parent class methods
self.setStyleSheet(open('css/window.css').read())
self.initScreen()
def initScreen(self): # gui
QtGui.QFontDatabase.addApplicationFont("fonts\Lora\static\Lora-Regular.ttf")
self.verticalBox = QVBoxLayout()
self.verticalBox.setAlignment(QtCore.Qt.AlignTop)
self.upperHBox = QHBoxLayout()
self.upperHBox.setAlignment(QtCore.Qt.AlignCenter)
self.lowerHBox = QHBoxLayout()
self.lowerHBox.setAlignment(QtCore.Qt.AlignCenter)
self.lowerHBox.setSpacing(50)
self.lowerHBox.setContentsMargins(50, 50, 50, 50)
self.upperHBox.addSpacing(35)
self.titleLabel = QLabel('Inventory', self)
self.titleLabel.setStyleSheet(open("css/titleLabels.css").read())
self.titleLabel.setFixedSize(650,195)
self.upperHBox.addWidget(self.titleLabel)
self.collectionsButton = QPushButton('Access Your\nCollections', self)
self.collectionsButton.setFixedSize(350,350)
self.collectionsButton.setStyleSheet(open('css/bigButtons.css').read())
self.collectionsButton.clicked.connect(self.collectionEvent)
self.lowerHBox.addWidget(self.collectionsButton)
self.newButton = QPushButton('Make A New\nCollection', self)
self.newButton.setFixedSize(350,350)
self.newButton.setStyleSheet(open('css/bigButtons.css').read())
self.newButton.clicked.connect(self.newEvent)
self.lowerHBox.addWidget(self.newButton)
self.verticalBox.addLayout(self.upperHBox)
self.verticalBox.addLayout(self.lowerHBox)
self.setLayout(self.verticalBox)
# navigation events
def collectionEvent(self):
self.windowClass.collectionScreen()
def newEvent(self):
self.windowClass.chooseScreen()
|
python
|
"""Conditional Grammar."""
from sqlfluff.core.parser.segments import Indent
from sqlfluff.core.parser.match_result import MatchResult
from sqlfluff.core.parser.match_wrapper import match_wrapper
from sqlfluff.core.parser.grammar.base import (
BaseGrammar,
)
class Conditional(BaseGrammar):
"""A grammar which is conditional on the parse context.
| NOTE: The Conditional grammar is assumed to be operating
| within a Sequence grammar, and some of the functionality
| may not function within a different context.
Args:
*args: A meta segment which is instantiated
conditionally upon the rules set.
config_type: The area of the config that is used
when evaluating the status of the given rules.
rules: A set of `rule=boolean` pairs, which are
evaluated when understanding whether conditions
are met for this grammar to be enabled.
Example:
.. code-block::
Conditional(Dedent, config_type="indent", indented_joins=False)
This effectively says that if `indented_joins` in the "indent" section
of the current config is set to `True`, then this grammar will allow
a `Dedent` segment to be matched here. If `indented_joins` is set to
`False`, it will be as though there was no `Dedent` in this sequence.
| NOTE: While the Conditional grammar is set up to allow different
| sources of configuration, it relies on configuration keys being
| available within the ParseContext. Practically speaking only the
| "indentation" keys are currently set up.
"""
def __init__(self, *args, config_type: str = "indentation", **rules):
if not all(issubclass(arg, Indent) for arg in args):
raise ValueError(
"Conditional is only designed to work with Indent segments."
)
if len(args) != 1:
raise ValueError(
"Conditional is only designed to work with a single element."
)
if not config_type:
raise ValueError("Conditional config_type must be set.")
elif config_type not in ("indentation"):
raise ValueError(
"Only 'indentation' is supported as a Conditional config_type."
)
if not rules:
raise ValueError("Conditional requires rules to be set.")
self._config_type = config_type
self._config_rules = rules
super().__init__(*args)
def is_enabled(self, parse_context):
"""Evaluate conditionals and return whether enabled."""
# NOTE: Because only "indentation" is the only current config_type
# supported, this code is much simpler that would be required in
# future if multiple options are available.
if self._config_type != "indentation":
raise ValueError(
"Only 'indentation' is supported as a Conditional config_type."
)
config_section = parse_context.indentation_config
# If any rules fail, return no match.
for rule, val in self._config_rules.items():
# Assume False if not set.
conf_val = config_section.get(rule, False)
# Coerce to boolean.
if val != bool(conf_val):
return False
return True
@match_wrapper()
def match(self, segments, parse_context):
"""Evaluate conditionals and return content."""
if not self.is_enabled(parse_context):
return MatchResult.from_unmatched(segments)
# Instantiate the new element and return
new_seg = self._elements[0]()
return MatchResult((new_seg,), segments)
|
python
|
from textformer.models import Seq2Seq
# Creating the Seq2Seq model
seq2seq = Seq2Seq(n_input=1, n_output=1, n_hidden=512, n_embedding=256, n_layers=2,
ignore_token=None, init_weights=None, device='cpu')
|
python
|
from django.conf.urls import include, url
import django.contrib.auth.views
from work_evid import views
urlpatterns = [
url(r'^overviews/$', views.overviews, name='overviews'),
url(r'^delete_work/$', views.delete_work, name='delete_work'),
url(r'^work/$', views.WorkList.as_view(), name='work_list'),
url(r'^work/add/$', views.WorkCreate.as_view(), name='work_create'),
url(r'^work/detail/(?P<pk>\d+)/$', views.WorkDetail.as_view(), name='work_detail'),
url(r'^work/update/(?P<pk>\d+)/$', views.WorkUpdate.as_view(), name='work_update'),
url(r'^work/delete/(?P<pk>\d+)/$', views.WorkDelete.as_view(), name='work_delete'),
url(r'^firm/$', views.FirmList.as_view(), name='firm_list'),
url(r'^firm/add/$', views.FirmCreate.as_view(), name='firm_create'),
url(r'^firm/detail/(?P<pk>\d+)/$', views.FirmDetail.as_view(), name='firm_detail'),
url(r'^firm/update/(?P<pk>\d+)/$', views.FirmUpdate.as_view(), name='firm_update'),
url(r'^firm/delete/(?P<pk>\d+)/$', views.FirmDelete.as_view(), name='firm_delete'),
url(r'^todo/$', views.TodoList.as_view(), name='todo_list'),
url(r'^todo/(?P<firm>\d+)/$', views.TodoList.as_view(), name='todo_list_firm'),
url(r'^todo/add/$', views.TodoCreate.as_view(), name='todo_create'),
url(r'^todo/detail/(?P<pk>\d+)/$', views.TodoDetail.as_view(), name='todo_detail'),
url(r'^todo/update/(?P<pk>\d+)/$', views.TodoUpdate.as_view(), name='todo_update'),
url(r'^todo/delete/(?P<pk>\d+)/$', views.TodoDelete.as_view(), name='todo_delete'),
url(r'^accounts/login/$', django.contrib.auth.views.login, name='login'),
url(r'^accounts/logout/$', django.contrib.auth.views.logout, name='logout'),
url(r'^$', views.WorkList.as_view(), name='index'),
]
|
python
|
#!/usr/bin/env python3
import tensorflow as tf
import pickle
import cv2
import os
import os.path as path
from utils import predict, predict_no_tiles
from model import dilation_model_pretrained
from datasets import CONFIG
if __name__ == '__main__':
test = True
# Choose between 'cityscapes' and 'camvid'
dataset = 'cityscapes'
# Load dict of pretrained weights
print('Loading pre-trained weights...')
with open(CONFIG[dataset]['weights_file'], 'rb') as f:
w_pretrained = pickle.load(f)
print('Done.')
# Create checkpoint directory
checkpoint_dir = path.join('data/checkpoint', 'dilation_' + dataset)
if not path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
# Image in / out parameters
input_image_path = path.join('data', dataset + '.png')
output_image_path = path.join('data', dataset + '_out.png')
# Build pretrained model and save it as TF checkpoint
with tf.Session() as sess:
# Choose input shape according to dataset characteristics
if not test:
input_h, input_w, input_c = CONFIG[dataset]['input_shape']
else:
input_h, input_w, input_c = (1452, 2292, 3) # REVIEW: dr-eye-ve size.
input_tensor = tf.placeholder(tf.float32, shape=(None, input_h, input_w, input_c), name='input_placeholder')
# Create pretrained model
model = dilation_model_pretrained(dataset, input_tensor, w_pretrained, trainable=False)
sess.run(tf.global_variables_initializer())
# Save both graph and weights
saver = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES))
saver.save(sess, path.join(checkpoint_dir, 'dilation'))
asdf = saver.save(sess, path.join(checkpoint_dir, 'dilation.ckpt'))
print("saved asdf:", asdf)
# Restore both graph and weights from TF checkpoint
with tf.Session() as sess:
saver = tf.train.import_meta_graph(path.join(checkpoint_dir, 'dilation.meta'))
saver.restore(sess, tf.train.latest_checkpoint(checkpoint_dir))
graph = tf.get_default_graph()
output = 'softmax:0'
model = graph.get_tensor_by_name(output)
model = tf.reshape(model, shape=(1,)+CONFIG[dataset]['output_shape'])
# Read and predict on a test image
input_image = cv2.imread(input_image_path)
# import matplotlib.pyplot as plt
# plt.imshow(input_image)
# plt.show()
input_tensor = graph.get_tensor_by_name('input_placeholder:0')
if test:
tensors = [n.name for n in tf.get_default_graph().as_graph_def().node]
for tensor in tensors:
print(tensor)
import numpy as np
import os
path = '/home/josephz/tmp/data/dr-eyeve/35/frames/0057.png'
image = cv2.imread(path)
# output = 'input_placeholder:0'
outputs = ('conv1_1/Relu:0', 'conv1_2/Relu:0',
# 'pool1/MaxPool:0',
'conv2_1/Relu:0', 'conv2_2/Relu:0',
# 'conv3_1/Relu:0', 'conv3_2/Relu:0', 'conv3_3/Relu:0',
'conv5_3/Relu:0',
'fc6/Relu:0',
'fc7/Relu:0',
'final/Relu:0',
'ctx_pad1_1:0',
'ctx_conv1_1/Relu:0',
'ctx_conv7_1/Relu:0',
'ctx_fc1/Relu:0',
'ctx_final/BiasAdd:0',
'ctx_upsample/Relu:0',
)
for output in outputs:
print("Checking", output)
import pdb
pdb.set_trace()
model = graph.get_tensor_by_name(output)
outp = os.path.join('/home/josephz/ws/git/ml/framework/scripts/dilation/outs/tf', output.split('/')[0])
if not os.path.isfile(outp + '.npy'):
print("Saving to ", outp)
y = predict_no_tiles(image, input_tensor, model, dataset, sess, test=test)
np.save(outp, y)
out_tensor = graph.get_tensor_by_name('softmax:0')
out_tensor = tf.reshape(out_tensor, shape=(1,) + (1080, 1920, 19))
y = predict_no_tiles(image, input_tensor, out_tensor, dataset, sess, test=False)
else:
# Convert colorspace (palette is in RGB) and save prediction result
predicted_image = predict(input_image, input_tensor, model, dataset, sess, test=test)
predicted_image = cv2.cvtColor(predicted_image, cv2.COLOR_BGR2RGB)
cv2.imwrite(output_image_path, predicted_image)
|
python
|
# -*- coding: utf-8 -*-
"""Unit test package for tatortot."""
|
python
|
import requests
import re
from bs4 import BeautifulSoup
import logging
import os
import json
base_url = "https://vulncat.fortify.com/en/weakness?q="
logpath=f'{os.getcwd()}/log'
def scrape_url(url):
soup:BeautifulSoup=None
try:
r=requests.get(url)
soup=BeautifulSoup(r.text, 'html.parser')
except requests.exceptions.RequestException as ex:
logging.warning("There was an error with the request")
logging.error(ex)
except Exception as ex:
logging.warning("An unknown exception has occured")
logging.error(ex)
finally:
return soup
def get_filter_list(html):
""""""
soup = BeautifulSoup(html, 'html.parser')
return soup.find("input", attrs={"data-filtername":"category"})
def scrape_filters(filtername):
soup = scrape_url(base_url)
logfile=f'{logpath}/{filtername}.json'
open(logfile, 'w').close()
filter_list={}
try:
for data in soup.find_all("input", attrs={"data-filtername":filtername}):
#print(data["data-name"].replace("+"," "))
key = data["data-name"].replace("+"," ")
logging.info(f"found category '{key}'")
link = f"https://vulncat.fortify.com/en/weakness?{filtername}={data['data-name']}"
#print(f"{category}\nHyper-Link:{link}")
filter_list[key]=link
with open(logfile, 'w+') as f:
f.write(json.dumps(filter_list))
except Exception as ex:
logging.error(ex)
finally:
return filter_list
def get_issue_detail(url, soup:BeautifulSoup):
links = soup.find_all(class_="external-link")
for link in links:
parse_issue_data(f"{url}{link['href']}")
def parse_issue_data(url):
try:
soup=scrape_url(url)
title = soup.find(class_="detail-title")
print(title.text)
content = soup.find(class_="tab-content")
sections = content.find_all(class_="sub-title")
if sections:
for s in sections:
print(s.text + "\n")
metadata = s.findNext()
print(metadata.text.replace("[", "\n[").replace(". ", ".\n\n") +"\n\n")
except Exception as err:
print("--------ERROR!!! Unable to get explanation of vulnerability")
print(err)
def navigatePages(soup, base_url):
if soup is None: return
get_issue_detail(base_url, soup)
pagination = soup.find(class_="pagination")
if pagination is None:
print("Unable to find location of page navigation links")
return
link = pagination.find("li", class_="active")
if link and link.text !=">":
next_link = link.findNext("li")
if next_link:
next_url = next_link.find("a")
target_url = f"{base_url}{next_url['href']}"
print(target_url + "\n")
r = requests.get(url=target_url)
soup = BeautifulSoup(r.text,"html.parser")
if soup:
navigatePages(soup, base_url)
else:
print("No more links")
|
python
|
from fastai.vision.all import *
import fastai
from fastai.tabular.all import *
from fastai.data.load import _FakeLoader, _loaders
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import os
import random
# CUSTOM VIS DATABLOCK FUNCTIONS
def get_npy(dataframe):
"Get the images (.npy) that will be used as input for the model"
# get sample names from the dataframe
samples = dataframe['Barcode']
fnames = []
# for each sample in the dataframe
for sp in samples:
img_getter = lambda x: path/f'images/{sp}.npy'
fnames.append(img_getter(sp))
# returns a list of the image paths
return fnames
def get_y(fname):
"Get the target yield value"
fname = str(fname)
fname = fname.split(sep='/')[-1]
fname = fname.replace('.npy', '')
y_target = mixed_df[mixed_df["Barcode"] == fname]
y_target = float(y_target['Yield'])
return y_target
def mix_npy_blocks(img):
"This function will be used to build the plot image and add transforms"
# Cut the image in half and stack the chunks side-by-side
chunk0 = img[:40, :20, :]
chunk1 = img[40:80, :20, :]
if random.choice([True,False]):
chunk0 = np.flip(chunk0[:,:,:], axis=0) # Flip vertically equals img[X,:,:]
if random.choice([True,False]):
chunk1 = np.flip(chunk1[:,:,:], axis=0) # Flip vertically equals img[X,:,:]
if random.choice([True,False]):
chunk0 = np.flip(chunk0[:,:,:], axis=1) # Flip horizontally equals img[:,X,:]
if random.choice([True,False]):
chunk1 = np.flip(chunk1[:,:,:], axis=1) # Flip horizontally equals img[:,X,:]
if random.choice([True,False]):
new_img = np.hstack((chunk0, chunk1))
else:
new_img =np.hstack((chunk1, chunk0))
return new_img
def vegetation_idxs(img):
"Calculate VI and add as new bands"
e = 0.00015 # Add a small value to avoid division by zero
im = img
# Calculate the VIs - change to np functions
ndvi = np.divide(np.subtract(im[:,:,4], im[:,:,2]), (np.add(im[:,:,4], im[:,:,2])+e))
ndvi_re = (im[:,:,4] - im[:,:,3]) / ((im[:,:,4] + im[:,:,3]) + e)
ndre = (im[:,:,3] - im[:,:,2]) / ((im[:,:,3] + im[:,:,3]) + e)
envi = ((im[:,:,4] + im[:,:,1]) - (2 * im[:,:,0])) / (((im[:,:,4] - im[:,:,1]) + (2 * im[:,:,0])) + e)
ccci = ndvi_re / (ndvi + e)
gndvi = (im[:,:,4] - im[:,:,1])/ ((im[:,:,4] + im[:,:,1]) + e)
gli = ((2* im[:,:,1]) - im[:,:,0] - im[:,:,2]) / (((2* im[:,:,1]) + im[:,:,0] + im[:,:,2]) + e)
osavi = ((im[:,:,4] - im[:,:,3])/ ((im[:,:,4] + im[:,:,3] + 0.16)) *(1 + 0.16) + e)
vi_list = [ndvi, ndvi_re, ndre, envi, ccci, gndvi , gli, osavi]
vis = np.zeros((40,40,13))
vis_stacked = np.stack(vi_list, axis=2)
vis[:,:,:5] = im
vis[:,:,5:] = vis_stacked
return vis
def load_npy(fn):
im = np.load(str(fn), allow_pickle=True)
im = im*3 # increase image signal
# Padding with zeros
w, h , c = im.shape
im = np.pad(im, ((0, 100-w), (0, 100-h), (0,0)),mode='constant', constant_values=0)
im = mix_npy_blocks(im) # Add transforms and stacking
im = vegetation_idxs(im) # Add vegetation indexes bands
# Normalise bands by deleting no-data values
for band in range(13):
im[:,:,band] = np.clip(im[:,:,band], 0, 1)
# Swap axes because np is: width, height, channels
# and torch wants : channel, width , height
im = np.swapaxes(im, 2, 0)
im = np.swapaxes(im, 1, 2)
im = np.nan_to_num(im)
return torch.from_numpy(im)
class MSITensorImage(TensorImage):
_show_args = {'cmap':'Rdb'}
def show(self, channels=3, ctx=None, vmin=None, vmax=None, **kwargs):
"Visualise the images"
if channels == 3 :
return show_composite(self, 3, ctx=ctx, **{**self._show_args, **kwargs})
else:
return show_single_channel(self, channels, ctx=ctx, **{**self._show_args, **kwargs} )
@classmethod
def create(cls, fn:(Path, str), **kwargs) -> None:
" Uses the load fn the array and turn into tensor"
return cls(load_npy(fn))
def __repr__(self): return f'{self.__class__.__name__} size={"x".join([str(d) for d in self.shape])}'
def MSITensorBlock(cls=MSITensorImage):
" A `TransformBlock` for numpy array images"
# Calls the class create function to transform the x input using custom functions
return TransformBlock(type_tfms=cls.create, batch_tfms=None)
def root_mean_squared_error(p, y):
return torch.sqrt(F.mse_loss(p.view(-1), y.view(-1)))
def create_rgb(img):
# make RGB plot to visualise the "show batch"
RGB = np.zeros((3, 40, 40))
RGB[0] = img[2]
RGB[2] = img[0]
RGB[1] = img[1]
#Change from tensor format to pyplot
RGB = np.swapaxes(RGB, 0, 2)
RGB = np.swapaxes(RGB, 1, 0)
RGB = RGB
return RGB
def show_composite(img, channels, ax=None,figsize=(3,3), title=None, scale=True,
ctx=None, vmin=0, vmax=1, scale_axis=(0,1), **kwargs)->plt.Axes:
"Show three channel composite"
ax = ifnone(ax, ctx)
dims = img.shape[0]
RGBim = create_rgb(img)
ax.imshow(RGBim)
ax.axis('off')
if title is not None: ax.set_title(title)
return ax
def show_single_channel(img, channel, ax=None, figsize=(3,3), ctx=None,
title=None, **kwargs) -> plt.Axes:
ax = ifnone(ax, ctx)
if ax is None: _, ax = plt.subplots(figsize=figsize)
tempim = img.data.cpu().numpy()
if tempim.ndim >2:
ax.imshow(tempim[channel,:,:])
ax.axis('off')
if title is not None: ax.set_title(f'{fname} with {title}')
else:
ax.imshow(tempim)
ax.axis('off')
if title is not None: ax.set_title(f'{fname} with {title}')
return ax
|
python
|
from __future__ import absolute_import
import re
from cStringIO import StringIO
from datetime import date, datetime, timedelta
from psycopg2.extensions import AsIs, Binary, QuotedString
from pytz import timezone
class PostgresWriter(object):
"""Base class for :py:class:`mysql2pgsql.lib.postgres_file_writer.PostgresFileWriter`
and :py:class:`mysql2pgsql.lib.postgres_db_writer.PostgresDbWriter`.
"""
def __init__(self, file_options, tz=False):
index_prefix = file_options.get("index_prefix")
self.column_types = {}
self.log_detail = '\n%s\n'%(file_options['destination']['postgres']['database'])
self.is_gpdb = file_options.get("is_gpdb")
self.index_prefix = index_prefix if index_prefix else ''
if tz:
self.tz = timezone('UTC')
self.tz_offset = '+00:00'
else:
self.tz = None
self.tz_offset = ''
""" 'UPPER_ID' is different with '"column"' in CREATE statement:
'UPPER_ID' will create column with name 'upper_id'
'"UPPER_ID"' will create column with name 'UPPER_ID'
"""
def column_description(self, column):
return '"%s" %s' % (column['name'], self.column_type_info(column))
def column_type(self, column):
hash_key = hash(frozenset(column.items()))
self.column_types[hash_key] = self.column_type_info(column).split(" ")[0]
return self.column_types[hash_key]
def column_type_info(self, column):
"""
"""
null = "" if column['null'] else " NOT NULL"
def get_type(column):
"""This in conjunction with :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader._convert_type`
determines the PostgreSQL data type. In my opinion this is way too fugly, will need
to refactor one day.
"""
t = lambda v: not v == None
default = (' DEFAULT %s' % QuotedString(column['default']).getquoted()) if t(column['default']) else None
if column['type'] == 'char':
default = ('%s::char' % default) if t(default) else None
return default, 'character(%s)' % column['length']
elif column['type'] == 'varchar':
default = ('%s::character varying' % default) if t(default) else None
return default, 'character varying(%s)' % column['length']
elif column['type'] == 'json':
default = None
return default, 'json'
elif column['type'] == 'integer':
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'integer'
elif column['type'] == 'bigint':
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'bigint'
elif column['type'] == 'tinyint':
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'smallint'
elif column['type'] == 'boolean':
default = (" DEFAULT %s" % ('true' if int(column['default']) == 1 else 'false')) if t(default) else None
return default, 'boolean'
elif column['type'] == 'float':
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'real'
elif column['type'] == 'float unsigned':
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'real'
elif column['type'] in ('numeric', 'decimal'):
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'numeric(%s, %s)' % (column['length'] or 20, column['decimals'] or 0)
elif column['type'] == 'double precision':
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'double precision'
elif column['type'] == 'datetime' or column['type'].startswith('datetime('):
default = None
if self.tz:
return default, 'timestamp with time zone'
else:
return default, 'timestamp without time zone'
elif column['type'] == 'date':
default = None
return default, 'date'
elif column['type'] == 'timestamp':
if column['default'] == None:
default = None
elif "current_timestamp()" in column['default']:
default = ' DEFAULT CURRENT_TIMESTAMP'
elif "CURRENT_TIMESTAMP" in column['default']:
default = ' DEFAULT CURRENT_TIMESTAMP'
elif "0000-00-00 00:00" in column['default']:
if self.tz:
default = " DEFAULT '1970-01-01T00:00:00.000000%s'" % self.tz_offset
elif "0000-00-00 00:00:00" in column['default']:
default = " DEFAULT '1970-01-01 00:00:00'"
else:
default = " DEFAULT '1970-01-01 00:00'"
if self.tz:
return default, 'timestamp with time zone'
else:
return default, 'timestamp without time zone'
elif column['type'] == 'time' or column['type'].startswith('time('):
default = " DEFAULT NOW()" if t(default) else None
if self.tz:
return default, 'time with time zone'
else:
return default, 'time without time zone'
elif column['type'] in ('blob', 'binary', 'longblob', 'mediumblob', 'tinyblob', 'varbinary'):
return default, 'bytea'
elif column['type'].startswith('binary(') or column['type'].startswith('varbinary('):
return default, 'bytea'
elif column['type'] in ('tinytext', 'mediumtext', 'longtext', 'text'):
return default, 'text'
elif column['type'].startswith('enum'):
default = (' %s::character varying' % default) if t(default) else None
enum = re.sub(r'^enum\(|\)$', '', column['type'])
# TODO: will work for "'.',',',''''" but will fail for "'.'',','.'"
max_enum_size = max([len(e.replace("''", "'")) for e in enum.split("','")])
return default, ' character varying(%s) check("%s" in (%s))' % (max_enum_size, column['name'], enum)
elif column['type'].startswith('bit('):
return ' DEFAULT %s' % column['default'].upper() if column['default'] else column['default'], 'varbit(%s)' % re.search(r'\((\d+)\)', column['type']).group(1)
elif column['type'].startswith('set('):
if default:
default = ' DEFAULT ARRAY[%s]::text[]' % ','.join(QuotedString(
v).getquoted() for v in re.search(r"'(.*)'", default).group(1).split(','))
return default, 'text[]'
else:
raise Exception('unknown %s' % column['type'])
default, column_type = get_type(column)
"""Refactor for GPDB."""
if not self.is_gpdb and column.get('auto_increment', None):
return '%s DEFAULT nextval(\'"%s_%s_seq"\'::regclass) NOT NULL' % (
column_type, column['table_name'], column['name'])
return '%s%s%s' % (column_type, (default if not default == None else ''), null)
"""QuotedString API: http://initd.org/psycopg/docs/extensions.html?highlight=quotedstring#psycopg2.extensions.QuotedString
ERROR:
UnicodeEncodeError: 'latin-1' codec can't encode characters in position 18-19: ordinal not in range(256)
UnicodeDecodeError: 'ascii' codec can't decode byte 0xe5 in position 16: ordinal not in range(128)
"""
def table_comments(self, table):
comments = []
if table.comment:
"""comments.append('COMMENT ON TABLE %s is %s;' % (table.name, QuotedString(table.comment).getquoted()))
comments.append('COMMENT ON TABLE %s is %s;' % (table.name, "'"+table.comment+"'"))"""
table_comment = QuotedString(table.comment.encode('utf8')).getquoted()
comments.append('COMMENT ON TABLE {} is {};'.format(table.name, table_comment))
for column in table.columns:
if column['comment']:
"""comments.append('COMMENT ON COLUMN %s.%s is %s;' % (table.name, column['name'], QuotedString(column['comment']).getquoted()))
comments.append('COMMENT ON COLUMN %s.%s is %s;' % (table.name, column['name'], "'"+column['comment'].decode('utf8')+"'"))"""
comments.append('COMMENT ON COLUMN {}.{} is {};'.format(table.name, column['name'], QuotedString(column['comment']).getquoted()))
return comments
def process_row(self, table, row):
"""Examines row data from MySQL and alters
the values when necessary to be compatible with
sending to PostgreSQL via the copy command
"""
for index, column in enumerate(table.columns):
hash_key = hash(frozenset(column.items()))
column_type = self.column_types[hash_key] if hash_key in self.column_types else self.column_type(column)
if row[index] == None and ('timestamp' not in column_type or not column['default']):
row[index] = '\N'
elif row[index] == None and column['default']:
if self.tz:
row[index] = '1970-01-01T00:00:00.000000' + self.tz_offset
else:
row[index] = '1970-01-01 00:00:00'
elif 'bit' in column_type:
row[index] = bin(ord(row[index]))[2:]
elif isinstance(row[index], (str, unicode, basestring)):
if column_type == 'bytea':
row[index] = Binary(row[index]).getquoted()[1:-8] if row[index] else row[index]
elif 'text[' in column_type:
row[index] = '{%s}' % ','.join('"%s"' % v.replace('"', r'\"') for v in row[index].split(','))
else:
row[index] = row[index].replace('\\', r'\\').replace('\n', r'\n').replace(
'\t', r'\t').replace('\r', r'\r').replace('\0', '')
elif column_type == 'boolean':
# We got here because you used a tinyint(1), if you didn't want a bool, don't use that type
row[index] = 't' if row[index] not in (None, 0) else 'f' if row[index] == 0 else row[index]
elif isinstance(row[index], (date, datetime)):
if isinstance(row[index], datetime) and self.tz:
try:
if row[index].tzinfo:
row[index] = row[index].astimezone(self.tz).isoformat()
else:
row[index] = datetime(*row[index].timetuple()[:6], tzinfo=self.tz).isoformat()
except Exception as e:
print e.message
else:
row[index] = row[index].isoformat()
elif isinstance(row[index], timedelta):
row[index] = datetime.utcfromtimestamp(_get_total_seconds(row[index])).time().isoformat()
else:
row[index] = AsIs(row[index]).getquoted()
def table_attributes(self, table):
primary_keys = []
serial_key = None
maxval = None
columns = StringIO()
for column in table.columns:
if column['auto_increment']:
serial_key = column['name']
maxval = 1 if column['maxval'] < 1 else column['maxval'] + 1
if column['primary_key']:
primary_keys.append(column['name'])
columns.write(' %s,\n' % self.column_description(column))
return primary_keys, serial_key, maxval, columns.getvalue()[:-2]
def truncate(self, table):
serial_key = None
maxval = None
for column in table.columns:
if column['auto_increment']:
serial_key = column['name']
maxval = 1 if column['maxval'] < 1 else column['maxval'] + 1
truncate_sql = 'TRUNCATE "%s" CASCADE;' % table.name
serial_key_sql = None
if serial_key:
serial_key_sql = "SELECT pg_catalog.setval(pg_get_serial_sequence(%(table_name)s, %(serial_key)s), %(maxval)s, true);" % {
'table_name': QuotedString('"%s"' % table.name).getquoted(),
'serial_key': QuotedString(serial_key).getquoted(),
'maxval': maxval}
return (truncate_sql, serial_key_sql)
"""Exclude PRIMARY KEY, create with write_indexes"""
def write_table(self, table):
primary_keys, serial_key, maxval, columns = self.table_attributes(table)
serial_key_sql = []
table_sql = []
table_comment_sql = []
if serial_key:
serial_key_seq = '%s_%s_seq' % (table.name, serial_key)
serial_key_sql.append('DROP SEQUENCE IF EXISTS "%s" CASCADE;' % serial_key_seq)
serial_key_sql.append("""CREATE SEQUENCE "%s" INCREMENT BY 1
NO MAXVALUE NO MINVALUE CACHE 1;""" % serial_key_seq)
serial_key_sql.append('SELECT pg_catalog.setval(\'"%s"\', %s, true);' % (serial_key_seq, maxval))
""" 'CREATE TABLE schema.table' is different with 'CREATE TABLE "schema.table"':
'CREATE TABLE schema1.table1' will create table in schema1
'CREATE TABLE "schema1.table1"' will create 'schema1.table1' in selected or public schema
If use SQL Key Word in scripts, necessarily with double quate, like "user".
"""
table_sql.append('DROP TABLE IF EXISTS "%s" CASCADE;' % table.name)
table_sql.append('CREATE TABLE "%s" (\n%s\n)\nWITHOUT OIDS;' % (table.name.encode('utf8'), columns))
if not self.is_gpdb:
table_comment_sql.extend(self.table_comments(table))
return (table_sql, serial_key_sql, table_comment_sql)
def write_indexes(self, table):
index_sql = []
primary_index = [idx for idx in table.indexes if idx.get('primary', None)]
index_prefix = self.index_prefix
if primary_index:
index_sql.append('ALTER TABLE "%(table_name)s" ADD CONSTRAINT "%(index_name)s_pkey" PRIMARY KEY(%(column_names)s);' % {
'table_name': table.name,
'index_name': '%s%s_%s' % (index_prefix, table.name,
'_'.join(primary_index[0]['columns'])),
'column_names': ', '.join('"%s"' % col for col in primary_index[0]['columns']),
})
self.process_log(' create index: '+table.name+'|'+','.join(primary_index[0]['columns'])+'|PRIMARY')
if self.is_gpdb:
for index in table.indexes:
if 'primary' in index:
continue
unique = 'UNIQUE ' if index.get('unique', None) else ''
self.process_log(' ignore index: '+table.name+'|'+','.join(index['columns'])+ ('|UNIQUE' if unique else ''))
return index_sql
'''For Greenplum Database(base on PSQL):
psycopg2.ProgrammingError: UNIQUE index must contain all columns in the distribution key
Detail refer to:
https://stackoverflow.com/questions/40987460/how-should-i-deal-with-my-unique-constraints-during-my-data-migration-from-postg
http://gpdb.docs.pivotal.io/4320/ref_guide/sql_commands/CREATE_INDEX.html
EXCERPT: In Greenplum Database, unique indexes are allowed only if the columns of the index key are the same as (or a superset of)
the Greenplum distribution key. On partitioned tables, a unique index is only supported within an individual partition
- not across all partitions.
'''
for index in table.indexes:
if 'primary' in index:
continue
unique = 'UNIQUE ' if index.get('unique', None) else ''
index_name = '%s%s_%s' % (index_prefix, table.name, '_'.join(index['columns']))
index_sql.append('DROP INDEX IF EXISTS "%s" CASCADE;' % index_name)
index_sql.append('CREATE %(unique)sINDEX "%(index_name)s" ON "%(table_name)s" (%(column_names)s);' % {
'unique': unique,
'index_name': index_name,
'table_name': table.name,
'column_names': ', '.join('"%s"' % col for col in index['columns']),
})
self.process_log(' create index: '+table.name+'|'+','.join(index['columns'])+ ('|UNIQUE' if unique else ''))
return index_sql
def write_constraints(self, table):
constraint_sql = []
if self.is_gpdb:
for key in table.foreign_keys:
self.process_log(' ignore constraints: '+table.name+'|'+key['column']+'| ref:'+key['ref_table']+'.'+key['ref_column'])
return constraint_sql
for key in table.foreign_keys:
constraint_sql.append("""ALTER TABLE "%(table_name)s" ADD FOREIGN KEY ("%(column_name)s")
REFERENCES "%(ref_table_name)s"(%(ref_column_name)s);""" % {
'table_name': table.name,
'column_name': key['column'],
'ref_table_name': key['ref_table'],
'ref_column_name': key['ref_column']})
self.process_log(' create constraints: '+table.name+'|'+key['column']+'| ref:'+key['ref_table']+'.'+key['ref_column'])
return constraint_sql
def write_triggers(self, table):
trigger_sql = []
if self.is_gpdb:
for key in table.triggers:
self.process_log(' ignore triggers: '+table.name+'|'+key['name']+'|'+key['event']+'|'+key['timing'])
return trigger_sql
for key in table.triggers:
trigger_sql.append("""CREATE OR REPLACE FUNCTION %(fn_trigger_name)s RETURNS TRIGGER AS $%(trigger_name)s$
BEGIN
%(trigger_statement)s
RETURN NULL;
END;
$%(trigger_name)s$ LANGUAGE plpgsql;""" % {
'table_name': table.name,
'trigger_time': key['timing'],
'trigger_event': key['event'],
'trigger_name': key['name'],
'fn_trigger_name': 'fn_' + key['name'] + '()',
'trigger_statement': key['statement']})
trigger_sql.append("""CREATE TRIGGER %(trigger_name)s %(trigger_time)s %(trigger_event)s ON %(table_name)s
FOR EACH ROW
EXECUTE PROCEDURE fn_%(trigger_name)s();""" % {
'table_name': table.name,
'trigger_time': key['timing'],
'trigger_event': key['event'],
'trigger_name': key['name']})
self.process_log(' create triggers: '+table.name+'|'+key['name']+'|'+key['event']+'|'+key['timing'])
return trigger_sql
def process_log(self, log):
print(log)
self.log_detail += log+'\n'
def close(self):
raise NotImplementedError
def write_contents(self, table, reader):
raise NotImplementedError
# Original fix for Py2.6: https://github.com/mozilla/mozdownload/issues/73
def _get_total_seconds(dt):
# Keep backward compatibility with Python 2.6 which doesn't have this method
if hasattr(datetime, 'total_seconds'):
return dt.total_seconds()
else:
return (dt.microseconds + (dt.seconds + dt.days * 24 * 3600) * 10**6) / 10**6
|
python
|
from app import server as user
if __name__ == "__main__":
user.run()
|
python
|
"""
Faça um programa que possua um vetor denominado 'A' que armazene 6 números inteiros. O programa deve executar
os seguintes passos.
(a) Atribua os seguintes valores a esse vetor: 1, 0, 5, -2, -5, 7.
(b) Armezene em uma variável inteira (simples) a soma entre os valores das posições A[0], A[1], e A[5] do vetor
e mostre na tela esta soma
(c) Modifique o vetor da posição 4, atribuindo a esta posição o valor 100
(d) Mostre na tela cada valor do vetor A, um em cada linha
"""
A = [1, 0, 5, -2, -5, 7]
soma = A[0] + A[1] + A[5]
print(soma)
A.insert(4, 100)
for num in A:
print(num)
|
python
|
"""
Django settings for coralcity project.
Generated by 'django-admin startproject' using Django 2.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+=mb2q!t+yg7(m$!_$iki#2*z(+ub^lcas0jx$l2-dp%bp8pt)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['koralcity.herokuapp.com', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'pages.apps.PagesConfig',
'Ages.apps.AgesConfig',
'listings.apps.ListingsConfig',
'realtors.apps.RealtorsConfig',
'accounts.apps.AccountsConfig',
'contacts.apps.ContactsConfig',
'django.contrib.humanize'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'coralcity.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR , 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'coralcity.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR , 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS=[(os.path.join(BASE_DIR, 'coralcity/static'))]
# Media Folder Settings
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
MEDIA_URL = '/media/'
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
from django.contrib.messages import constants as messages
MESSAGE_TAGS = {
messages.ERROR: 'danger',
}
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
if 'DATABASE_URL' in os.environ:
import dj_database_url
DATABASES = {'default': dj_database_url.config()}
|
python
|
# https://jsonapi.org/format/#document-resource-identifier-objects
def build_resource_identifier(type, id):
return {"type": type, "id": id}
#https://jsonapi.org/format/#document-meta
def build_meta(meta):
return meta
# https://jsonapi.org/format/#document-links
def build_links_object(links):
links_object = {}
#links is a dict, loop through it and build_link
# {
# 'key': {
# 'responder': ResponderClass,
# 'href': 'http://example.com/comments/{posts.comments}',
# 'meta': {"whatever": "data", "here": true}
# },
# "more keys" : {...},
# ...
# }
for key, value in links.items():
try:
meta_info = value['meta']
except KeyError:
meta_info = None
links_object[key] = build_link(
value['href'],
meta = meta_info
)
return links_object
#builds an individual link inside a links object
#returns either a string or a "link object"
# see https://jsonapi.org/format/#document-links
def build_link(url, meta=None):
if meta is not None:
link = {}
link['href'] = url
link['meta'] = build_meta(meta)
return link
else:
return url
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.