content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
from rest_framework import viewsets
from rest_framework.decorators import action
from rest_framework.pagination import PageNumberPagination
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.response import Response
from rest_framework import status
from perm.models import PerMisson
from perm.filters import PerMissonFilter
from application.models import Application
from users.models import tGroup
from django.contrib.auth import get_user_model
from perm.serializers import PermListSerializer, \
PermDetailSerializer, \
PermCreateSerializer, \
PermListSimpleSerializer, \
PermUserSerializer, \
PermAppSerializer, \
PermtGroupSerializer, \
PermUpdateSerializer
User = get_user_model()
# 权限分页
class PermissonPagination(PageNumberPagination):
page_size = 10
page_size_query_param = 'page_size'
page_query_param = 'page'
max_page_size = 100
# 权限视图
class PermissonViewSet(viewsets.ModelViewSet):
queryset = PerMisson.objects.all()
serializer_class = PermDetailSerializer
pagination_class = PermissonPagination
filter_backends = (DjangoFilterBackend,)
filter_class = PerMissonFilter
def get_serializer_class(self):
if self.action == 'list':
return PermListSerializer
if self.action == 'create':
return PermCreateSerializer
if self.action == 'update':
return PermUpdateSerializer
return PermDetailSerializer
@action(detail=False, methods=['get'], name="get all permisson", url_path="getall")
def get_perm_all(self, request, pk=None):
permsqs = PerMisson.objects.all()
serializer = PermListSimpleSerializer(permsqs, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
@action(detail=False, methods=['get'], name="get all users", url_path="getusers")
def get_all_users(self, request, pk=None):
users = User.objects.all()
serializer = PermUserSerializer(users, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
@action(detail=False, methods=['get'], name="get all apps", url_path="getapps")
def get_all_apps(self, request, pk=None):
apps = Application.objects.all()
serializer = PermAppSerializer(apps, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
@action(detail=False, methods=['get'], name="get all groups", url_path="getgroups")
def get_all_tgroups(self, request, pk=None):
tgroups = tGroup.objects.all()
serializer = PermtGroupSerializer(tgroups, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
@action(detail=True, methods=['get'], name='group outside user', url_path="getusers_out")
def get_outside_user(self, request, pk=None):
users = User.objects.exclude(granted_by_permissions__id=pk)
serializer = PermUserSerializer(users, many=True)
return Response(
serializer.data,
status=status.HTTP_200_OK
)
@action(detail=True, methods=['get'], name='group outside apps', url_path="getapps_out")
def get_outside_apps(self, request, pk=None):
apps = Application.objects.exclude(granted_by_permissions__id=pk)
serializer = PermAppSerializer(apps, many=True)
return Response(
serializer.data,
status=status.HTTP_200_OK
)
@action(detail=True, methods=['get'], name='group outside groups', url_path="getgroups_out")
def get_outside_tgroup(self, request, pk=None):
groups = tGroup.objects.exclude(granted_by_permissions__id=pk)
serializer = PermtGroupSerializer(groups, many=True)
return Response(
serializer.data,
status=status.HTTP_200_OK
)
| python |
import gc
import json
import warnings
import flask_restful
from eventlet import greenthread
from injector import CallableProvider, inject
from flask import Blueprint, Flask
from flask.templating import render_template_string
from flask.views import View
from nose.tools import eq_
from flask_injector import request, FlaskInjector
def test_injections():
l = [1, 2, 3]
counter = [0]
def inc():
counter[0] += 1
def conf(binder):
binder.bind(str, to="something")
binder.bind(list, to=l)
app = Flask(__name__)
@app.route('/view1')
@inject(content=str)
def view1(content):
inc()
return render_template_string(content)
@inject(content=list)
class View2(View):
def dispatch_request(self):
inc()
return render_template_string('%s' % self.content)
@app.before_request
@inject(c=list)
def br(c):
inc()
eq_(c, l)
@app.after_request
@inject(c=list)
def ar(response_class, c):
inc()
eq_(c, l)
return response_class
@app.context_processor
@inject(c=list)
def cp(c):
inc()
eq_(c, l)
return {}
@app.teardown_request
@inject(c=list)
def tr(sender, exc=None, c=None):
inc()
eq_(c, l)
app.add_url_rule('/view2', view_func=View2.as_view('view2'))
FlaskInjector(app=app, modules=[conf])
with app.test_client() as c:
response = c.get('/view1')
eq_(response.get_data(as_text=True), "something")
with app.test_client() as c:
response = c.get('/view2')
eq_(response.get_data(as_text=True), '%s' % (l,))
eq_(counter[0], 10)
def test_resets():
app = Flask(__name__)
counter = [0]
class Scope(object):
def __init__(self, injector):
pass
def prepare(self):
pass
def cleanup(self):
counter[0] += 1
@app.route('/')
def index():
eq_(counter[0], 1)
return 'asd'
FlaskInjector(app, request_scope_class=Scope)
eq_(counter[0], 0)
with app.test_client() as c:
c.get('/')
eq_(counter[0], 1)
def test_memory_leak():
# The RequestScope holds references to GreenThread objects which would
# cause memory leak
app = Flask(__name__)
FlaskInjector(app)
@app.route('/')
def index():
return 'test'
def get_request():
with app.test_client() as c:
c.get('/')
green_thread = greenthread.spawn(get_request)
green_thread.wait()
# Delete green_thread so the GreenThread object is dereferenced
del green_thread
# Force run garbage collect to make sure GreenThread object is collected if
# there is no memory leak
gc.collect()
greenthread_count = len([
obj for obj in gc.get_objects()
if type(obj) is greenthread.GreenThread])
eq_(greenthread_count, 0)
def test_doesnt_raise_deprecation_warning():
app = Flask(__name__)
def provide_str():
return 'this is string'
def configure(binder):
binder.bind(str, to=CallableProvider(provide_str), scope=request)
@app.route('/')
@inject(s=str)
def index(s):
return s
FlaskInjector(app=app, modules=[configure])
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with app.test_client() as c:
c.get('/')
eq_(len(w), 0, map(str, w))
def test_jinja_env_globals_support_injection():
app = Flask(__name__)
def configure(binder):
binder.bind(str, to='xyz')
@inject(s=str)
def do_something_helper(s):
return s
app.jinja_env.globals['do_something'] = do_something_helper
@app.route('/')
def index():
return render_template_string('{{ do_something() }}')
FlaskInjector(app=app, modules=[configure])
with app.test_client() as c:
eq_(c.get('/').get_data(as_text=True), 'xyz')
def test_error_handlers_support_injection():
app = Flask(__name__)
class CustomException(Exception):
pass
@app.route('/custom-exception')
def custom_exception():
raise CustomException()
@app.errorhandler(404)
@inject(s=str)
def handle_404(error, s):
return s, 404
@app.errorhandler(CustomException)
@inject(s=str)
def handle_custom_exception(error, s):
return s, 500
def configure(binder):
binder.bind(str, to='injected content')
FlaskInjector(app=app, modules=[configure])
with app.test_client() as c:
response = c.get('/this-page-does-not-exist')
eq_((response.status_code, response.get_data(as_text=True)),
(404, 'injected content'))
response = c.get('/custom-exception')
eq_((response.status_code, response.get_data(as_text=True)),
(500, 'injected content'))
def test_view_functions_arent_modified_globally():
# Connected to GH #6 "Doing multiple requests on a flask test client on an injected route
# fails for all but the first request"
# The code would modify view functions generated by View.as_view(), it wasn't an issue with
# views added directly to an application but if function was added to a blueprint and
# that blueprint was used in multiple applications it'd raise an error
class MyView(View):
pass
blueprint = Blueprint('test', __name__)
blueprint.add_url_rule('/', view_func=MyView.as_view('view'))
app = Flask(__name__)
app.register_blueprint(blueprint)
FlaskInjector(app=app)
app2 = Flask(__name__)
app2.register_blueprint(blueprint)
# it'd fail here
FlaskInjector(app=app2)
def test_view_args_and_class_args_are_passed_to_class_based_views():
class MyView(View):
def __init__(self, class_arg):
self.class_arg = class_arg
def dispatch_request(self, dispatch_arg):
return '%s %s' % (self.class_arg, dispatch_arg)
app = Flask(__name__)
app.add_url_rule('/<dispatch_arg>', view_func=MyView.as_view('view', class_arg='aaa'))
FlaskInjector(app=app)
client = app.test_client()
response = client.get('/bbb')
print(response.data)
eq_(response.data, b'aaa bbb')
def test_flask_restful_integration_works():
@inject(_int=int)
class HelloWorld(flask_restful.Resource):
def get(self):
return {'int': self._int}
app = Flask(__name__)
api = flask_restful.Api(app)
api.add_resource(HelloWorld, '/')
FlaskInjector(app=app)
client = app.test_client()
response = client.get('/')
data = json.loads(response.data.decode('utf-8'))
eq_(data, {'int': 0})
| python |
# -*- coding: utf-8 -*-
from validator import Validator
class VimLParserLint(Validator):
__filetype__ = 'vim'
checker = 'vimlparser'
args = ''
regex = r"""
.+?:
(?P<lnum>\d+):
(?P<col>\d+):
\svimlparser:\s
(?P<text>
(
(
(?P<error>E)
|
(?P<warning>W)
)
(?P<code>\d+):\s
)?
.+
)"""
| python |
a = 4.9
b = 9.8
sum1 = a + b
print('resultado:', sum1)
| python |
from functools import reduce
from itertools import combinations
from operator import mul
from aocd import data as expense_report
entries = list(map(int, expense_report.splitlines()))
for part in (1, 2):
for combo in combinations(entries, part+1):
if sum(combo) == 2020:
print(f'Part {part}:', reduce(mul, combo))
break
| python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 8 18:50:45 2021
@author: patrick
"""
from .Facebook_Chat_Analysis import *
| python |
"""Module for the base objects of the abstract argumentation frameworks."""
from .relation import RelationType
from .relation import Relation
from .premise import FallacyType
from .premise import Premise
from .graph import Graph
from .extension import Extension
| python |
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import PoseStamped, Quaternion
from mavros_msgs.srv import CommandBool, CommandTOL, SetMode, SetModeRequest
from mavros_msgs.msg import State
import time
from tf.transformations import quaternion_from_euler
flight_alt = 1.0 # (m)
class TakeOffLand():
def __init__(self, altitude = flight_alt):
rospy.init_node('takeoff_land') # creates the node
# Subscribers
self.state_sub = rospy.Subscriber("uav1/mavros/state", State, self.state_cb)
# Publishers
self.local_pose_pub = rospy.Publisher("uav1/mavros/setpoint_position/local", PoseStamped, queue_size=10)
# Clients
self.arm_client = rospy.ServiceProxy("uav1/mavros/cmd/arming", CommandBool)
self.land_client = rospy.ServiceProxy("uav1/mavros/cmd/land", CommandTOL)
self.current_state = None
self.des_z = altitude
self.rate = rospy.Rate(20)
self.arm()
def state_cb(self, msg):
self.current_state = msg
def arm(self):
# wait for connect
while not rospy.is_shutdown() and self.current_state == None:
rospy.loginfo("waiting for connection")
self.rate.sleep()
# must be streaming points before allowed to switch to offboard
pose = PoseStamped()
pose.pose.position.x = 0
pose.pose.position.y = 0
pose.pose.position.z = self.des_z
for i in range(100):
self.local_pose_pub.publish(pose)
self.rate.sleep()
# enable offboard mode and arm
last_request = rospy.get_time()
set_mode = rospy.ServiceProxy("uav1/mavros/set_mode", SetMode)
req = SetModeRequest()
req.custom_mode = "OFFBOARD"
while not rospy.is_shutdown() and (self.current_state.mode != req.custom_mode):
self.local_pose_pub.publish(pose)
if rospy.get_time() - last_request > 5.0: # check every 5 seconds
try:
set_mode.call(req)
except rospy.ServiceException, e:
print "Service did not process request: %s"%str(e)
last_request = rospy.get_time()
self.rate.sleep()
rospy.loginfo("Switched to offboard mode")
while not rospy.is_shutdown() and not self.current_state.armed:
if not self.current_state.armed and rospy.get_time() - last_request > 5.0:
if self.arm_client(True):
rospy.loginfo("Vehicle armed")
last_request = rospy.get_time()
self.rate.sleep()
def take_off_hover(self):
# define hover pose (set point)
pose = PoseStamped()
pose.header.stamp = rospy.get_rostime()
pose.header.frame_id = 'mavsetp'
pose.pose.position.x = 0
pose.pose.position.y = 0
pose.pose.position.z = self.des_z
q = quaternion_from_euler(0, 0, 0)
pose.pose.orientation = Quaternion(*q)
rospy.loginfo("Vehicle taking off")
# publish pose for however long we want to hover
while not rospy.is_shutdown():
self.local_pose_pub.publish(pose)
self.rate.sleep()
rospy.loginfo("Vehicle hovering")
if __name__ == "__main__":
takeoff_land = TakeOffLand()
takeoff_land.take_off_hover()
rospy.spin() | python |
from flask import ( g, redirect, url_for )
from tmc.db import get_db, make_dicts
# Get list of all industries available in the database.
def get_industries():
db = get_db()
try:
db.row_factory = make_dicts
query = db.execute(
'SELECT id as db_id, industry_name as Industry FROM industries ORDER BY industry_name ASC').fetchall()
return query
except TypeError:
#embed()
return False #Change this for something more meaningful -- warning/alert | python |
'''
Miscellaneous math functions.
'''
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
def matrix_sqrt(X=None, symmetric=False, inverse=False, eigs=None):
'''Returns the matrix square root of X.
Arguments:
`X` (square class::`numpy.ndarrray`)
`symmetric` (bool, default False):
If True, `X` is assumed to be symmetric, which speeds up
calculation of the square root.
`inverse` (bool, default False):
If True, computes the matrix square root of inv(X).
`eigs` (2-tuple):
`eigs` must be a 2-tuple whose first element is an array of
eigenvalues and whose second element is an ndarray of eigenvectors
(individual eigenvectors are in columns). If this argument is
provided, computation of the matrix square root is much faster. If
this argument is provided, the `X` argument is ignored (in this
case, it can be set to None).
Returns a class::`numpy.ndarray` `S`, such that S.dot(S) = X
'''
if eigs is not None:
(vals, V) = eigs
else:
(vals, V) = np.linalg.eig(X)
k = len(vals)
if inverse is False:
SRV = np.diag(np.sqrt(vals))
else:
SRV = np.diag(1. / np.sqrt(vals))
if symmetric:
return V.dot(SRV).dot(V.T)
else:
return V.dot(SRV).dot(np.linalg.inv(V))
def get_histogram_cdf_points(data, cdf_vals, ignore=None, mask=None):
'''Returns input values corresponding to the data's CDF values.
Arguments:
`data` (ndarray):
The data for which to determine the CDF values
`cdf_vals` (sequence of floats):
A sequence defining the CDF values for which the values of `data`
should be returned. Each value should be in the range [0, 1]. For
example, to get the values of `data` corresponding to the 1% lower
tail and 5% upper tail, this argument would be (0.01, 0.95).
`ignore` (numeric, default `None`):
A scalar value that should be ignored when computing histogram
points (e.g., a value that indicates bad data). If this valus is
not specified, all data are used.
Return value:
A list specifying the values in `data` that correspond to the
associated CDF values in `cdf_vals`.
'''
data = data.ravel()
if mask is not None:
data = data[mask.ravel() != 0]
if len(data) == 0:
raise Exception('All pixels are masked.')
if ignore is not None and ignore in data:
data = data[np.where(data != ignore)]
if len(data) == 0:
raise Exception('No data to display after masking and ignoring.')
isort = np.argsort(data)
N = len(data)
return [data[isort[int(x * (N - 1))]] for x in cdf_vals]
| python |
import tkinter as tk
def get_line_numbers():
output = ''
row, col = text_editor.index("end").split('.') #row give the no of row in text
#print(int(row)-1)
for i in range(1, int(row)):
output += str(i) + '\n' #making a string with row no. with \n(next line)
#print(output)
return output
def update_line_numbers(event=None):
line_numbers = get_line_numbers()
line_number_bar.config(state='normal')
line_number_bar.delete('1.0', 'end')
line_number_bar.insert('1.0', line_numbers)
line_number_bar.config(state='disabled')
def on_content_changed(event=None):
if text_editor.edit_modified():
update_line_numbers()
text_editor.edit_modified(False)
root = tk.Tk()
line_number_bar = tk.Text(root, width=2, padx=3, takefocus=1,font=('Arial',14,'normal'), border=0,background='DarkOliveGreen1', state='disabled', wrap='none')
line_number_bar.pack(side='left', fill='y')
text_editor = tk.Text(root,font=('Arial',14,'normal'))
text_editor.config(wrap='word', relief=tk.FLAT)
text_editor.pack(fill=tk.BOTH, expand=True)
text_editor.bind('<<Modified>>',on_content_changed)
#text_editor.edit_modified(False)
root.mainloop()
| python |
# -*- encoding=utf8 -*-
__author__ = "srz_zumix"
sys.path.append(r"../pmbase")
from airtest.core.api import *
from pmbase import PmBase
auto_setup(__file__)
# adb = ADB()
# def update():
# print adb.shell('dumpsys battery')
sleep_mul = 1
pm = PmBase(sleep_mul)
pm.setup()
def pm_sleep(s):
pm.pm_sleep(s)
def touch_positive_button():
return pm.touch_positive_button()
def touch_oncemore_button():
return pm.touch_oncemore_button()
def touch_next_button():
if touch_positive_button():
return True
return touch_oncemore_button()
def is_quest_select():
return pm.is_quest_select()
def touch_quest_banner(lv):
return pm.touch_quest_banner(lv)
def touch_result():
return pm.touch_result()
def check_bar():
im = pm.exists_battle_symbol()
if im:
pos = (im[0], im[1])
touch(pos)
pm_sleep(10)
return True
return False
def is_wait_bar():
if check_bar():
if check_bar():
check_bar()
return True
return False
def wait_battle():
if not pm.is_result_bg():
if not is_wait_bar():
return
pm.step_result()
def auto_battle(lv):
# once
if touch_quest_banner(lv):
touch_positive_button()
pm_sleep(10)
else:
touch_next_button()
while True:
wait_battle()
if is_quest_select():
break
else:
touch_next_button()
def auto_select_battle(lv):
while True:
auto_battle(lv)
def main():
auto_select_battle(4)
main()
| python |
# coding=utf8
from __future__ import unicode_literals, absolute_import, division, print_function
"""
This is the SpiceBot AI system. Based On Chatty cathy
"""
from sopel.tools import Identifier
from sopel.config.types import StaticSection, ListAttribute, ValidatedAttribute
import os
import tempfile
import aiml
from .Database import db as botdb
from .Config import config as botconfig
from .Read import read as botread
from .Users import users as botusers
from sopel_modules.spicemanip import spicemanip
class SpiceBot_AI_MainSection(StaticSection):
extra = ListAttribute('extra')
gender = ValidatedAttribute('gender', default='female')
class SpiceBot_AI():
def __init__(self):
self.setup_ai()
self.braindirs = []
self.dict = {
"patterncounts": 0,
"filecounts": 0,
"sessioncache": {},
"files": {}
}
# Load AIML kernel
self.aiml_kernel = aiml.Kernel()
# aiml parser
self.aiml_parser = aiml.AimlParser.create_parser()
# Don't warn for no matches
self.aiml_kernel._verboseMode = False
# Learn responses
self.load_saved_brain()
self.load_brain()
# Load bot values
self.load_bot_values()
def setup_ai(self):
botconfig.define_section("SpiceBot_AI", SpiceBot_AI_MainSection, validate=False)
botconfig.config.aibrain = os.path.join(botconfig.homedir, botconfig.config.core.basename + '.aibrain.brn')
def load_saved_brain(self):
if os.path.isfile(botconfig.config.aibrain):
self.aiml_kernel.bootstrap(brainFile=botconfig.config.aibrain)
self.save_brain()
def load_brain(self):
braindirs = botread.get_config_dirs("SpiceBot_AI")
# learn directories
self.learn(braindirs)
self.save_brain()
def load_bot_values(self):
current_bot_db = botdb.get_plugin_value('SpiceBot_AI', 'brain') or None
if current_bot_db:
for predicate in list(current_bot_db.keys()):
predval = current_bot_db[predicate]
self.aiml_kernel.setBotPredicate(predicate, predval)
# sopel nick
self.aiml_kernel.setBotPredicate("nick", botconfig.nick)
# gender
self.aiml_kernel.setBotPredicate("gender", botconfig.SpiceBot_AI.gender.lower())
if botconfig.SpiceBot_AI.gender.lower() not in ["male", "female"]:
self.aiml_kernel.setBotPredicate("gendertype", "item")
else:
self.aiml_kernel.setBotPredicate("gendertype", botconfig.SpiceBot_AI.gender.lower())
def learn(self, braindirs):
for braindir in braindirs:
if braindir not in self.braindirs:
self.braindirs.append(braindir)
# Count matches
for pathname in os.listdir(braindir):
self.dict["filecounts"] += 1
aimlfile = os.path.join(braindir, pathname)
data = open(aimlfile).read()
count = data.count('pattern')
count = count / 2
self.dict["patterncounts"] += int(count)
tempbrain = tempfile.mkstemp()[1]
with open(tempbrain, 'w') as fileo:
fileo.write(
"<aiml version='1.0.1' encoding='UTF-8'>"
" <!-- std-startup.xml -->\n"
" <category>\n"
" <pattern>LOAD AIML B</pattern>\n"
" <template>\n"
" <learn>{}</learn>\n"
" </template>\n"
" </category>\n"
"</aiml>".format(os.path.join(braindir, "*.aiml"))
)
self.aiml_kernel.learn(tempbrain)
self.aiml_kernel.respond("LOAD AIML B")
def on_message(self, bot, trigger, message):
nick = Identifier(trigger.nick)
nick_id = botusers.get_nick_id(nick, True)
self.check_user_import(nick, nick_id)
message = self.bot_message_precipher(bot, trigger, message)
aiml_response = self.aiml_kernel.respond(message, nick_id)
if aiml_response:
aiml_response = self.bot_message_decipher(bot, trigger, aiml_response)
self.save_nick_session(nick, nick_id)
self.save_brain()
return aiml_response
def bot_message_precipher(self, bot, trigger, message):
# punctuation
puct_dict = {"!": "exclamationmark", ".": "period", "?": "questionmark", ",": "comma"}
for puctuation in list(puct_dict.keys()):
message = message.replace(puctuation, puct_dict[puctuation])
# bot items
for botitem in ["nick"]:
messagelist = spicemanip(message, "create")
for i in range(len(messagelist)):
if messagelist[i].upper() == str(eval("bot." + botitem)).upper():
messagelist[i] = str("bot" + botitem).upper()
message = spicemanip(messagelist, 0)
for triggeritem in ["nick", "sender"]:
messagelist = spicemanip(message, "create")
for i in range(len(messagelist)):
if messagelist[i].upper() == str(eval("trigger." + botitem)).upper():
messagelist[i] = str("trigger" + botitem).upper()
message = spicemanip(messagelist, 0)
return message
def bot_message_decipher(self, bot, trigger, aiml_response):
# bot items
for botitem in ["nick"]:
aiml_response = aiml_response.replace("bot" + botitem, str(eval("bot." + botitem)))
# trigger items
for triggeritem in ["nick", "sender"]:
aiml_response = aiml_response.replace("trigger" + triggeritem, str(eval("trigger." + triggeritem)))
# pronouns
botgendertype = self.aiml_kernel.getBotPredicate("gendertype")
pronounsdict = {
"male": {
"main": "he",
"possess": "his",
"self": "himself",
},
"female": {
"main": "her",
"possess": "hers",
"self": "herself",
},
"item": {
"main": "it",
"possess": "its",
"self": "itself",
},
"point": {
"main": "you",
"possess": "yours",
"self": "yourself",
},
"group": {
"main": "them",
"possess": "theirs",
"self": "themselves",
},
}
for pronounitem in list(pronounsdict[botgendertype].keys()):
aiml_response = aiml_response.replace("BOTPRONOUN" + pronounitem, pronounsdict[botgendertype][pronounitem])
triggergendertype = self.getPredicate("gender", trigger.nick)
if not triggergendertype or triggergendertype == "":
triggergendertype = "point"
for pronounitem in list(pronounsdict[triggergendertype].keys()):
aiml_response = aiml_response.replace("TRIGGERPRONOUN" + pronounitem, pronounsdict[triggergendertype][pronounitem])
aiml_response = "\x0315" + aiml_response + "\x03"
return aiml_response
def getPredicate(self, predicate, nick, nick_id=None):
if not nick_id:
nick = Identifier(nick)
nick_id = botusers.get_nick_id(nick, True)
self.aiml_kernel.getPredicate(predicate, nick_id)
def check_user_import(self, nick, nick_id=None):
if not nick_id:
nick = Identifier(nick)
nick_id = botusers.get_nick_id(nick, True)
if nick_id not in list(self.dict["sessioncache"].keys()):
self.dict["sessioncache"][nick_id] = botdb.get_nick_value(nick, 'botai') or {}
for predicate in list(self.dict["sessioncache"][nick_id].keys()):
predval = self.dict["sessioncache"][nick_id][predicate]
self.aiml_kernel.setPredicate(predicate, predval, nick_id)
# defaults
if "nick" not in list(self.dict["sessioncache"][nick_id].keys()):
self.dict["sessioncache"][nick_id]["nick"] = nick
self.aiml_kernel.setPredicate("nick", nick, nick_id)
def save_nick_session(self, nick, nick_id=None):
if not nick_id:
nick = Identifier(nick)
nick_id = botusers.get_nick_id(nick, True)
sessionData = self.aiml_kernel.getSessionData(nick_id)
botdb.set_nick_value(nick, 'botai', sessionData)
def save_brain(self):
self.aiml_kernel.saveBrain(botconfig.config.aibrain)
botsessiondata = self.aiml_kernel._botPredicates
botdb.set_plugin_value('SpiceBot_AI', 'brain', botsessiondata)
botai = SpiceBot_AI()
| python |
"""
TODO TESTS:
- Syntax errors,
- general tests
"""
from helper import (
ValueChecker,
FlaskValueCheckerSyntaxError,
FlaskValueCheckerValueError,
)
import random
import string
import pytest
import io
test_restriction_code = """
# some simple data for tests here
firstName : str/lenlim(5, 15) # a random comment
middleName : str/lenlim(5, inf)/optional
lastName : str/optional
email : str
password : str/lenlim(8, 15)
phone : str/lenlim(8, 15)
age : int/lim(18, 99)
height : float/lim(1, inf)/optional
someNegativeFloat : float/optional/lim(-inf, 0)
team : str/accept(["red", "blue", "yellow", "green", "orange"])
acceptTermsAndConditions : str/accept(['on'])/optional
someEdgeCase : str/accept(['on'])
"""
checker = ValueChecker(test_restriction_code)
sample_test_dict = {
"firstName": "Garyashver",
"email": "[email protected]",
"phone": "9120921022",
"age": "76",
"password": "12345678",
"team": "red",
"someEdgeCase": "on",
"needed_file": (io.BytesIO(b"something"), "file.txt"),
"optional_file": (io.BytesIO(b"something"), "other_file.txt"),
}
def create_sample_dict(modifications=None):
modifications = {} if modifications is None else modifications
test_dict = sample_test_dict.copy()
for key, value in modifications.items():
if value is None:
if key in test_dict:
del test_dict[key]
else:
test_dict[key] = value
return test_dict
def run_tests_for_param(param, tests, pre_func=None):
for test in tests:
pre_value, expected_output = test
if pre_func:
value = pre_func(pre_value)
else:
value = pre_value
test_dict = create_sample_dict({param: value})
errs = checker.check_for(test_dict)
bad_err_text = f"""
param : {param},
pre_value : {pre_value},
value : {value},
expected_output : {expected_output},
"""
if expected_output is None:
assert errs is None, bad_err_text
else:
assert errs[param] == expected_output, bad_err_text
def create_rand_text(length, max_len=None):
"""
create random text for a specific length,
if max_len is specified creates a random piece of text
which is of a random length between length and max_len
"""
if max_len is not None:
length = random.randint(length, max_len)
to_ret_string = ""
for _ in range(length):
to_ret_string += random.choice(string.printable)
return to_ret_string
def test_simple_pass():
error = checker.check_for(sample_test_dict)
assert error is None
def test_simple_fail():
test_dict = create_sample_dict({"age": None})
errors = checker.check_for(test_dict)
assert errors is not None
fields = errors
assert "age" in fields
assert len(fields.items()) == 1
def test_optional_field():
test_dict = create_sample_dict({"middleName": "sarah"})
errors = checker.check_for(test_dict)
assert errors is None
test_dict = create_sample_dict({})
errors = checker.check_for(test_dict)
assert errors is None
def test_string_length_limits():
def pre_func(val):
if type(val) != tuple:
val = (val,)
return create_rand_text(*val)
# tests are run on the modif_param
modif_param = "firstName"
invalid_range_err = "string length must be between 5 and 15"
# tests represent parameters, text_len, expected_output_error
tests = [
[2, invalid_range_err],
[3, invalid_range_err],
[(0, 4), invalid_range_err],
[5, None],
[(5, 15), None],
[(5, 15), None],
[(16, 1000), invalid_range_err],
[(16, 1000), invalid_range_err],
]
run_tests_for_param(modif_param, tests, pre_func)
# tests are run on the modif_param
modif_param = "middleName"
invalid_range_err = "string length must be between 5 and inf"
# tests represent parameters, text_len, expected_output_error
tests = [
[2, invalid_range_err],
[3, invalid_range_err],
[(0, 4), invalid_range_err],
[5, None],
[(5, 15), None],
[(5, 15), None],
[(15, 1000), None],
[(15, 1000), None],
]
run_tests_for_param(modif_param, tests, pre_func)
# tests are run on the modif_param
modif_param = "lastName"
invalid_range_err = ""
# tests represent parameters, text_len, expected_output_error
tests = [
[2, None],
[3, None],
[(0, 4), None],
[5, None],
[(5, 15), None],
[(5, 15), None],
[(15, 1000), None],
[(15, 1000), None],
]
run_tests_for_param(modif_param, tests, pre_func)
def test_string_accept():
modif_param = "team"
invalid_value_error = (
"value must be one from the list ['red', 'blue', 'yellow', 'green', 'orange']"
)
tests = [
["red", None],
["blue", None],
["Green", invalid_value_error],
["iojoidas", invalid_value_error],
["", invalid_value_error],
]
run_tests_for_param(modif_param, tests)
modif_param = "acceptTermsAndConditions"
invalid_value_error = "value should be 'on', or the field should not be submitted"
tests = [
["on", None],
[None, None],
["avcdscs", invalid_value_error],
["", invalid_value_error],
]
run_tests_for_param(modif_param, tests)
modif_param = "someEdgeCase"
invalid_value_error = "value should be 'on'"
tests = [
["on", None],
["avcdscs", invalid_value_error],
["", invalid_value_error],
[None, invalid_value_error],
]
run_tests_for_param(modif_param, tests)
def test_int_limits():
def pre_func(val):
if type(val) != tuple:
return val
return random.randint(*val)
# tests are run on the modif_param
modif_param = "age"
invalid_range_err = "value must be between 18.0 and 99.0"
# tests represent parameters, text_len, expected_output_error
tests = [
[2, invalid_range_err],
[3, invalid_range_err],
[-4, invalid_range_err],
[-7, invalid_range_err],
[(-1000, 17), invalid_range_err],
[18, None], # edge case
[(18, 99), None],
[(18, 99), None],
[99, None], # edge case
[(100, 1000), invalid_range_err],
[(100, 1000), invalid_range_err],
]
run_tests_for_param(modif_param, tests, pre_func)
# tests are run on the modif_param
modif_param = "height"
invalid_range_err = "value must be between 1.0 and inf"
# tests represent parameters, text_len, expected_output_error
tests = [
[1, None], # edge case
[2, None],
[3, None],
[-4, invalid_range_err],
[-7, invalid_range_err],
[(-10000, 0), invalid_range_err],
[(15, 99), None],
[(15, 99), None],
[99, None],
[(100, 10000), None],
[(100, 10000), None],
]
run_tests_for_param(modif_param, tests, pre_func)
# tests are run on the modif_param
modif_param = "someNegativeFloat"
invalid_range_err = "value must be between -inf and 0.0"
# tests represent parameters, text_len, expected_output_error
tests = [
[0, None], # edge case
[(-10000, 0), None],
[(-10000, 0), None],
[(100, 10000), invalid_range_err],
[(100, 10000), invalid_range_err],
]
run_tests_for_param(modif_param, tests, pre_func)
def test_bad_syntax():
bad_syntax_1 = """
middleName : str/lenlim(5, inf)/optional
# bad syntax over here, end bracket is missing
firstName : str/lenlim(5, 15
lastName : str/optional
email : str
password : str/lenlim(8, 15)
phone : str/lenlim(8, 15)
age : int/lim(18, 99)
height : float/lim(1, inf)/optional
someNegativeFloat : float/optional/lim(-inf, 0)
"""
with pytest.raises(FlaskValueCheckerSyntaxError):
checker = ValueChecker(bad_syntax_1)
bad_syntax_2 = """
# bad syntax over here, 3 parameters instead of 2
firstName : str/lenlim(5, 15, 56)
middleName : str/lenlim(5, inf)/optional
lastName : str/optional
email : str
password : str/lenlim(8, 15)
phone : str/lenlim(8, 15)
age : int/lim(18, 99)
height : float/lim(1, inf)/optional
someNegativeFloat : float/optional/lim(-inf, 0)
"""
with pytest.raises(FlaskValueCheckerValueError):
checker = ValueChecker(bad_syntax_2)
bad_syntax_3 = """
# bad syntax over here, 1 parameter instead of 2
firstName : str/lenlim(5)
middleName : str/lenlim(5, inf)/optional
lastName : str/optional
email : str
"""
with pytest.raises(FlaskValueCheckerValueError):
checker = ValueChecker(bad_syntax_3)
bad_syntax_4 = """
# bad parameter name here
firstName : str/blablabla
middleName : str/lenlim(5, inf)/optional
lastName : str/optional
email : str
"""
with pytest.raises(FlaskValueCheckerValueError):
checker = ValueChecker(bad_syntax_4)
bad_syntax_5 = """
# bad parameter name here
firstName : str/accept([,])
middleName : str/lenlim(5, inf)/optional
lastName : str/optional
email : str
"""
with pytest.raises(FlaskValueCheckerSyntaxError):
checker = ValueChecker(bad_syntax_5)
bad_syntax_6 = """
# bad parameter name here
firstName : str/accept([abc)
middleName : str/lenlim(5, inf)/optional
lastName : str/optional
email : str
"""
with pytest.raises(FlaskValueCheckerSyntaxError):
checker = ValueChecker(bad_syntax_6)
bad_syntax_7 = """
# bad parameter name here
firstName : str/accept(["abc'])
middleName : str/lenlim(5, inf)/optional
lastName : str/optional
email : str
"""
with pytest.raises(FlaskValueCheckerSyntaxError):
checker = ValueChecker(bad_syntax_7)
bad_syntax_8 = """
# bad parameter name here
firstName : str/accept(["abc", 124])
middleName : str/lenlim(5, inf)/optional
lastName : str/optional
email : str
"""
with pytest.raises(FlaskValueCheckerValueError):
checker = ValueChecker(bad_syntax_8)
| python |
import hashlib
string1 = 'Teste inicial'.encode('utf-8')
string2 = 'Teste inicial'.encode('utf-8')
hash1 = hashlib.new('ripemd160')
hash1.update(string1)
hash2 = hashlib.new('ripemd160')
hash2.update(string2)
print("-" * 60)
print(hash1.hexdigest())
print(hash2.hexdigest())
if hash1.digest() == hash2.digest():
print("\nA string 1 é igual a string 2")
else:
print("\nA string 1 é diferente a string 2") | python |
from django.conf.urls import url
import lessons.views
urlpatterns = (
url(r'^create/(?P<course_id>\d+)$', lessons.views.schedule_create_page,
name="lessons.views.schedule_create_page"),
url(r'^edit/(?P<lesson_id>\d+)$', lessons.views.schedule_edit_page,
name="lessons.views.schedule_edit_page"),
)
| python |
import warnings
import numpy as np
from skimage.restoration import denoise_wavelet
def apply_rolling_window(mainchunk: np.array, meterchunk: np.array, window_size: int):
if not window_size:
raise Warning('Window size is not defined.')
indexer = np.arange(window_size)[None, :] + np.arange(len(mainchunk) - window_size + 1)[:, None]
mainchunk = mainchunk[indexer]
meterchunk = meterchunk[window_size - 1:]
return mainchunk, meterchunk
def apply_midpoint_window(mainchunk: np.array, meterchunk: np.array, window_size: int):
if not window_size:
raise Warning('Window size is not defined.')
indexer = np.arange(window_size)[None, :] + np.arange(len(mainchunk) - window_size + 1)[:, None]
mainchunk = mainchunk[indexer]
midpoint = window_size // 2
meterchunk = meterchunk[midpoint: len(mainchunk) + midpoint]
return mainchunk, meterchunk
def apply_sequence_to_subsequence(mainchunk: np.array, meterchunk: np.array, sequence_window: int,
subsequence_window: int):
if not sequence_window:
raise Warning('Sequence window is not defined.')
if not subsequence_window:
warnings.warn('Sub sequence window is not defined. So the 20% of sequence window was used.')
subsequence_window = int(sequence_window * 0.2)
upper_limit = (sequence_window + subsequence_window) // 2
lower_limit = (sequence_window - subsequence_window) // 2
sequence_indexer = np.arange(sequence_window)[None, :] + np.arange(len(mainchunk) - sequence_window + 1)[:, None]
mainchunk = mainchunk[sequence_indexer]
subsequence_indexer = np.arange(sequence_window)[lower_limit: upper_limit] + np.arange(len(mainchunk))[:, None]
meterchunk = meterchunk[subsequence_indexer]
return mainchunk, meterchunk
def apply_sequence_to_sequence(mainchunk: np.array, meterchunk: np.array, sequence_window: int):
if not sequence_window:
raise Warning('Sequence window is not defined.')
sequence_indexer = np.arange(sequence_window)[None, :] + np.arange(len(mainchunk) - sequence_window + 1)[:, None]
mainchunk = mainchunk[sequence_indexer]
meterchunk = meterchunk[sequence_indexer]
return mainchunk, meterchunk
def create_batches(mainchunk: np.array, meterchunk: np.array, seq_len: int):
ix = mainchunk.index
additional = seq_len - (len(ix) % seq_len)
mainchunk = np.append(mainchunk, np.zeros(additional))
meterchunk = np.append(meterchunk, np.zeros(additional))
mainchunk = np.reshape(mainchunk, (int(len(mainchunk) / seq_len), seq_len, 1))
meterchunk = np.reshape(meterchunk, (int(len(meterchunk) / seq_len), seq_len, 1))
mainchunk = np.transpose(mainchunk, (0, 2, 1))
meterchunk = np.transpose(meterchunk, (0, 2, 1))
return mainchunk, meterchunk
def replace_nans(mainchunk: np.array, meterchunk: np.array):
mainchunk.fillna(0, inplace=True)
meterchunk.fillna(0, inplace=True)
return mainchunk, meterchunk
def replace_nans_interpolation(mainchunk: np.array, meterchunk: np.array):
mainchunk.interpolate(method='linear', limit_direction='forward', inplace=True)
meterchunk.interpolate(method='linear', limit_direction='forward', inplace=True)
return mainchunk, meterchunk
def normalize_chunks(mainchunk: np.array, meterchunk: np.array, mmax: float):
if mmax is None:
mmax = mainchunk.max()
mainchunk = mainchunk / mmax
meterchunk = meterchunk / mmax
return mainchunk, meterchunk
def standardize_chunks(mainchunk: np.array, meterchunk: np.array, mains_mean: float,
mains_std: float, meter_mean: float, meter_std: float):
if mains_mean is None and mains_std is None:
mains_mean = mainchunk.mean()
mains_std = mainchunk.std()
if meter_mean is None and meter_std is None:
meter_mean = meterchunk.mean()
meter_std = meterchunk.std()
mainchunk = (mainchunk - mains_mean) / mains_std
meterchunk = (meterchunk - meter_mean) / meter_std
return mainchunk, meterchunk
def is_bad_chunk(chunk: np.array):
return (chunk == 0).all()
def align_chunks(mainchunk: np.array, meterchunk: np.array):
mainchunk = mainchunk[~mainchunk.index.duplicated()]
meterchunk = meterchunk[~meterchunk.index.duplicated()]
ix = mainchunk.index.intersection(meterchunk.index)
mainchunk = mainchunk[ix]
meterchunk = meterchunk[ix]
return mainchunk, meterchunk
def replace_with_zero_small_values(mainchunk: np.array, meterchunk: np.array, threshold: int):
mainchunk[mainchunk < threshold] = 0
meterchunk[meterchunk < threshold] = 0
return mainchunk, meterchunk
def denoise(mainchunk: np.array, meterchunk: np.array):
mainchunk = denoise_wavelet(mainchunk, wavelet='haar', wavelet_levels=3)
meterchunk = denoise_wavelet(meterchunk, wavelet='haar', wavelet_levels=3)
return mainchunk, meterchunk
def add_gaussian_noise(mainchunk: np.array, noise_factor: float = 0.1):
noise = noise_factor * np.random.normal(0, 1, mainchunk.shape)
mainchunk = mainchunk + noise
return mainchunk
| python |
#pylint: disable=line-too-long,broad-except
"""Calculates total time from calendar events, grouped by an event attribute.
Usage:
calcatime -c <calendar_uri> [-d <domain>] -u <username> -p <password> <timespan>... [--by <event_attr>] [--include-zero] [--json] [--debug]
Options:
-h, --help Show this help
-V, --version Show command version
-c <calendar_uri> Calendar provider:server uri
↓ See Calendar Providers
-d <domain> Domain name
-u <username> User name
-p <password> Password
<timespan> Only include events in given time span
↓ See Timespan Options
--by=<event_attr> Group total times by given event attribute
↓ See Event Attributes
--include-zero Include zero totals in output
--json Output data to json; default is csv
--debug Extended debug logging
Examples:
$ calcatime -c "office365" -u "[email protected]" -p $password last week --json
Calendar Providers:
Microsoft Exchange: exchange:<server url>
Office365: office365[:<server url>]
default server url = outlook.office365.com
Timespan Options:
today
yesterday
week (current)
month (current)
year (current)
monday | mon
tuesday | tue
wednesday | wed
thursday | thu
friday | fri
saturday | sat
sunday | sun
last (can be used multiple times e.g. last last week)
next (can be used multiple times e.g. next next week)
Event Grouping Attributes:
category[:<regex_pattern>]
title[:<regex_pattern>]
"""
# python native modules
import sys
import re
import json
import calendar
from enum import Enum
from datetime import datetime, timedelta
from collections import namedtuple
from typing import Dict, List, Optional, Tuple, Iterator
# third-party modules
from docopt import docopt
__version__ = '0.5'
# Configs ---------------------------------------------------------------------
# default format used for outputting datetime values
DATETIME_FORMAT = '%Y-%m-%d'
# Data types ------------------------------------------------------------------
# tuple for command line arguments
Configs = namedtuple('Configs', [
'calendar_provider',
'username',
'password',
'range_start',
'range_end',
'domain',
'grouping_attr',
'include_zero',
'output_type'
])
# tuple for holding calendar event properties
# irrelevant of the calendar provider
CalendarEvent = namedtuple('CalendarEvent', [
'title',
'start',
'end',
'duration',
'categories'
])
# tuple for calendar provider configs
CalendarProvider = namedtuple('CalendarProvider', [
'name',
'prefix',
'server',
'supports_categories'
])
# calendar providers enum
class CalendarProviders(Enum):
"""Supported calendar providers"""
# microsoft exchange server, server url must be provided
Exchange: CalendarProvider = \
CalendarProvider(name='Microsoft Exchange',
prefix='exchange',
server='',
supports_categories=True)
# microsoft Office365, default url is provided
Office365: CalendarProvider = \
CalendarProvider(name='Office365',
prefix='office365',
server='outlook.office365.com',
supports_categories=True)
# Functions -------------------------------------------------------------------
def get_providers() -> List[CalendarProvider]:
"""Get list of supported providers."""
return [x.value for x in CalendarProviders]
def get_provider(connection_string: str) -> CalendarProvider:
"""Get provider configs from connection string."""
# determine calendar provider
if connection_string:
connstr = connection_string.lower()
for calprov in get_providers():
if calprov.prefix in connstr:
# grab server url from connection string
calserver = None
match = \
re.search(f'{calprov.prefix}:(.+)?', connstr, re.IGNORECASE)
if match:
calserver = match.group(1)
if not calprov.server and not calserver:
raise Exception('Calendar provider server url is required.')
# create provider configs
return CalendarProvider(
name=calprov.name,
prefix=calprov.prefix,
server=calserver or calprov.server,
supports_categories=calprov.supports_categories
)
raise Exception('Calendar provider is not supported.')
def parse_configs() -> Configs:
"""Parse command line arguments and return configs"""
# process command line args
args = docopt(__doc__, version='calcatime {}'.format(__version__))
# extended debug?
if args.get('--debug'):
import logging
from exchangelib.util import PrettyXmlHandler
logging.basicConfig(level=logging.DEBUG, handlers=[PrettyXmlHandler()])
# determine calendar provider
calprovider = get_provider(args.get('-c', None))
# determine credentials
username = args.get('-u', None)
password = args.get('-p', None)
if not username or not password:
raise Exception('Calendar access credentials are required.')
# get domain if provided
domain = args.get('-d', None)
# determine grouping attribute, set defaults if not provided
grouping_attr = args.get('--by', None)
if not grouping_attr:
if calprovider.supports_categories:
grouping_attr = 'category'
else:
grouping_attr = 'title'
# determine if zeros need to be included
include_zero = args.get('--include-zero', False)
# determine output type, defaults to csv
json_out = args.get('--json', False)
# determine requested time span
start, end = parse_timerange_tokens(
args.get('<timespan>', [])
)
return Configs(
calendar_provider=calprovider,
username=username,
password=password,
range_start=start,
range_end=end,
domain=domain,
grouping_attr=grouping_attr,
include_zero=include_zero,
output_type='json' if json_out else 'csv'
)
def parse_timerange_tokens(timespan_tokens: List[str]) -> Tuple[datetime, datetime]:
"""Return start and end of the range specified by tokens."""
# collect today info
today = datetime.today()
today_start = datetime(today.year, today.month, today.day, 0, 0)
today_end = today_start + timedelta(days=1)
# calculate this week start date
week_start = today_start - timedelta(days=today_start.weekday())
# count the number of times 'last' token is provided
# remove 7 days for each count
last_count = timespan_tokens.count('last')
last_offset = -7 * last_count
# count the number of times 'next' token is provided
# add 7 days for each count
next_count = timespan_tokens.count('next')
next_offset = 7 * next_count
offset = last_offset + next_offset
# now process the known tokens
if 'today' in timespan_tokens:
return (today_start + timedelta(days=offset),
today_end + timedelta(days=offset))
elif 'yesterday' in timespan_tokens:
return (today_start + timedelta(days=-1 + offset),
today_end + timedelta(days=-1 + offset))
elif 'week' in timespan_tokens:
return (week_start + timedelta(days=offset),
week_start + timedelta(days=7 + offset))
elif 'month' in timespan_tokens:
month_index = today.month + (-last_count + next_count)
month_index = month_index if month_index >= 1 else 12
month_start = datetime(today.year, month_index, 1)
month_end = datetime(today.year, month_index + 1, 1) + timedelta(-1)
return (month_start, month_end)
elif 'year' in timespan_tokens:
year_number = today.year + (-last_count + next_count)
year_start = datetime(year_number, 1, 1)
year_end = datetime(year_number + 1, 1, 1) + timedelta(-1)
return (year_start, year_end)
elif 'decade' in timespan_tokens:
raise NotImplementedError()
elif 'century' in timespan_tokens:
raise NotImplementedError()
elif 'millennium' in timespan_tokens:
raise NotImplementedError()
# process week days
for idx, day_names in enumerate(
zip(map(str.lower, list(calendar.day_name)),
map(str.lower, list(calendar.day_abbr)))):
if any(x in timespan_tokens for x in day_names):
range_start = week_start + timedelta(days=idx + offset)
range_end = week_start + timedelta(days=idx + 1 + offset)
return (range_start, range_end)
raise Exception('Can not determine time span.')
def collect_events(configs: Configs) -> List[CalendarEvent]:
"""Use calendar provider API to collect events within given range."""
# collect events from calendar
events: List[CalendarEvent] = []
provider = configs.calendar_provider
# if provider uses exchange api:
if provider.name == CalendarProviders.Exchange.name \
or provider.name == CalendarProviders.Office365.name:
events = get_exchange_events(
server=provider.server,
domain=configs.domain,
username=configs.username,
password=configs.password,
range_start=configs.range_start,
range_end=configs.range_end
)
# otherwise the api is not implemented
else:
raise Exception('Calendar provider API is not yet implemented.')
return events
def get_exchange_events(server: str,
domain: Optional[str],
username: str,
password: str,
range_start: datetime,
range_end: datetime) -> List[CalendarEvent]:
"""Connect to exchange calendar server and get events within range."""
# load exchange module if necessary
from exchangelib import Credentials, Configuration, Account, DELEGATE
from exchangelib import EWSDateTime, EWSTimeZone
# setup access
full_username = r'{}\{}'.format(domain, username) if domain else username
account = Account(
primary_smtp_address=username,
config=Configuration(server=server,
credentials=Credentials(full_username, password)),
autodiscover=False,
access_type=DELEGATE
)
# collect event information within given time range
events: List[CalendarEvent] = []
localzone = EWSTimeZone.localzone()
local_start = localzone.localize(EWSDateTime.from_datetime(range_start))
local_end = localzone.localize(EWSDateTime.from_datetime(range_end))
for item in account.calendar.filter( ##pylint: disable=no-member
start__range=(local_start, local_end)).order_by('start'):
events.append(
CalendarEvent(
title=item.subject,
start=item.start,
end=item.end,
duration=(item.end - item.start).seconds / 3600,
categories=item.categories
))
return events
def group_events(events: List[CalendarEvent],
configs: Configs)-> Dict[str, List[CalendarEvent]]:
"""Group events by given attribute."""
# group events
grouped_events: Dict[str, List[CalendarEvent]] = {}
group_attr = configs.grouping_attr
if events:
if group_attr.startswith('category:'):
_, pattern = group_attr.split(':')
if pattern:
grouped_events = \
group_by_pattern(events, pattern, attr='category')
elif group_attr == 'category':
grouped_events = \
group_by_category(events)
elif group_attr.startswith('title:'):
_, pattern = group_attr.split(':')
if pattern:
grouped_events = \
group_by_pattern(events, pattern, attr='title')
elif group_attr == 'title':
grouped_events = \
group_by_title(events)
return grouped_events
def group_by_title(
events: List[CalendarEvent]) -> Dict[str, List[CalendarEvent]]:
"""Group given events by event title."""
grouped_events: Dict[str, List[CalendarEvent]] = {}
for event in events:
if event.title in grouped_events:
grouped_events[event.title].append(event)
else:
grouped_events[event.title] = [event]
return grouped_events
def group_by_category(events: List[CalendarEvent],
unknown_group='---') -> Dict[str, List[CalendarEvent]]:
"""Group given events by event category."""
grouped_events: Dict[str, List[CalendarEvent]] = {}
for event in events:
if event.categories:
for cat in event.categories:
if cat in grouped_events:
grouped_events[cat].append(event)
else:
grouped_events[cat] = [event]
else:
if unknown_group in grouped_events:
grouped_events[unknown_group].append(event)
else:
grouped_events[unknown_group] = [event]
return grouped_events
def group_by_pattern(events: List[CalendarEvent],
pattern: str,
attr: str = 'title') -> Dict[str, List[CalendarEvent]]:
"""Group given events by given regex pattern and target attribute."""
grouped_events: Dict[str, List[CalendarEvent]] = {}
for event in events:
target_tokens = []
if attr == 'title':
target_tokens.append(event.title)
elif attr == 'category':
target_tokens = event.categories
if target_tokens:
for token in target_tokens or []:
match = re.search(pattern, token, flags=re.IGNORECASE)
if match:
matched_token = match.group()
if matched_token in grouped_events:
grouped_events[matched_token].append(event)
else:
grouped_events[matched_token] = [event]
break
return grouped_events
def cal_total_duration(
grouped_events: Dict[str, List[CalendarEvent]]) -> Dict[str, float]:
"""Calculate total duration of events in each group."""
hours_per_group: Dict[str, float] = {}
for event_group, events in grouped_events.items():
total_duration = 0
for event in events:
total_duration += event.duration
hours_per_group[event_group] = total_duration
return hours_per_group
def calculate_and_dump(grouped_events: Dict[str, List[CalendarEvent]],
configs: Configs):
"""Calculate totals and dump event data."""
total_durations = cal_total_duration(grouped_events)
calculated_data: List[Dict] = []
for event_group in grouped_events:
if not configs.include_zero and total_durations[event_group] == 0:
continue
calculated_data.append({
'start': configs.range_start.strftime(DATETIME_FORMAT),
'end': configs.range_end.strftime(DATETIME_FORMAT),
'group': event_group,
'duration': total_durations[event_group]
})
if configs.output_type == 'json':
print(json.dumps(calculated_data))
elif configs.output_type == 'csv':
print('"start","end","group","duration"')
for data in calculated_data:
print(','.join([
'"{}"'.format(data['start']),
'"{}"'.format(data['end']),
'"{}"'.format(data['group']),
str(data['duration'])
]))
# Main ------------------------------------------------------------------------
def main():
"""Parse arguments, parse time span, get and organize events, dump data."""
# get configs
configs = parse_configs()
# collect events
events = collect_events(configs)
# groups events by attribute
grouped_events = group_events(events, configs)
# prepare and dump data
calculate_and_dump(grouped_events, configs)
if __name__ == '__main__':
main()
| python |
"""
db_fun.py
This module contains helper functions for database entry creation.
"""
from models import Resource, Category
from datetime import datetime
def get_or_create(session, model, **kwargs):
"""
Determines if a given record already exists in the database.
Args:
session: The database session.
model: The model for the record.
**kwargs: The properties to set on the model. The first
specified property will be used to determine if
the model already exists.
Returns:
Two values. The first value is a boolean
indicating if this item is a new record. The second
value will be the created/retrieved model.
"""
instance = session.query(model).filter_by(**kwargs).first()
if instance:
return False, instance
else:
instance = model(**kwargs)
return True, instance
def add_get_or_create(session, model, **kwargs):
"""
Gets or creates an record based on if it already exists.
If it does not already exist, it will be created.
Args:
session: The database session.
model: The model to get or create.
**kwargs: The properties to set on the model. The first
specified property will be used to determine if
the model already exists.
Returns:
Two values. The first value is a boolean
indicating if this item is a new record. The second
value will be the created/retrieved model.
"""
new_record, record = get_or_create(session, model, **kwargs)
if new_record:
session.add(record)
return new_record, record
def try_add_categories(session, record, category_names, create_categories=True):
"""
Attempts to add the list of provided categories to the resource.
Args:
session: The current database context.
record: The resource to update.
category_names: The list of category names to add
create_categories: If true, will create categories if they don't already exist.
If false, will skip over listed categories that don't already exist.
Defaults to true.
"""
for category_name in category_names:
normalized_name = category_name.strip()
# Are we allowing categories to be created?
if create_categories:
# Try to look up the name of the provided category,
# get/create as necessary
new_category, category_record = add_get_or_create(session,
Category,
name=normalized_name)
else:
# Only look up the category - return None
# if we don't have one
category_record = session.query(Category). \
filter(Category.name == normalized_name). \
first()
# Make sure we got something back and we're not double-adding
if category_record and not category_record in record.categories:
record.categories.append(category_record)
def get_or_create_resource(session, rad_record, lazy=True, create_categories=True):
"""
Checks to see if a resource already exists in the database
and adds it if it does not exist (or is forced to by use of
the lazy argument).
Args:
session: The current database session.
rad_record: The RadRecord to be added.
lazy: If false, forces the record to be added even if it is a duplicate.
Defaults to true.
create_categories: If true, will create categories if they don't already exist.
If false, will skip over listed categories that don't already exist.
Defaults to true.
Returns:
Two values. The first value is a boolean
indicating if a new record was created. The second
value will be the created/updated model.
"""
# Just create a new record always if we're lazy-loading. This avoids
# weirdness in which we're partially updating an item.
if lazy:
new_record = True
record = Resource(name=rad_record.name.strip())
session.add(record)
else:
new_record, record = get_or_create(session, Resource, name=rad_record.name.strip())
record.last_updated = datetime.utcnow()
if new_record:
record.date_created = datetime.utcnow()
if new_record or not lazy:
# See if we have just a normal address field - if not,
# manually construct one by joining all available
# fields with commas
new_address = ''
if hasattr(rad_record, 'address') and \
rad_record.address is not None and \
rad_record.address != '' and \
not rad_record.address.isspace():
new_address = rad_record.address.strip()
else:
new_address = ", ".join(a.strip() for a in [rad_record.street,
rad_record.city, rad_record.state,
rad_record.zipcode, rad_record.country]
if a is not None and a != '' and not a.isspace())
# Address issue 131 - if we're updating an existing
# record, and are changing the address (using a lowercase comparison),
# invalidate the existing geocoding information.
if not new_record and \
record.address is not None and \
record.address.lower() != new_address.lower():
record.latitude = None
record.longitude = None
record.location = None
# Now set the new address
if new_address != '' and not new_address.isspace():
record.address = new_address
else:
record.address = None
# Try to parse out the date_verified field if it's provided
if rad_record.date_verified is not None and \
len(rad_record.date_verified) > 0 and \
not rad_record.date_verified.isspace():
# Try to parse it out using 'YYYY-MM-DD'
try:
record.date_verified = datetime.strptime(rad_record.date_verified,
'%Y-%m-%d').date()
except ValueError:
# Parsing error, clear it out
record.date_verified = None
else:
# Not provided - clear it out
record.date_verified = None
# Copy over all the other fields verbatim
record.organization = rad_record.organization
record.description = rad_record.description
record.email = rad_record.email
record.phone = rad_record.phone
record.fax = rad_record.fax
record.url = rad_record.url
record.hours = rad_record.hours
record.source = rad_record.source
record.npi = rad_record.npi
record.notes = rad_record.notes
record.visible = rad_record.visible
# Do we have a list of category names?
# Failing that, do we have a single category name?
if hasattr(rad_record, 'category_names') and \
rad_record.category_names is not None and \
len(rad_record.category_names) > 0:
# Use the list of category names
try_add_categories(session, record, rad_record.category_names, create_categories)
elif hasattr(rad_record, 'category_name') and \
rad_record.category_name is not None and \
not rad_record.category_name.isspace():
# Use the single category name
try_add_categories(session, record, [rad_record.category_name], create_categories)
session.add(record)
# Flush the session because otherwise we won't pick up
# duplicates with UNIQUE constraints (such as in category names)
# until we get an error trying to commit such duplicates
# (which is bad)
session.flush()
return new_record, record
| python |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import atexit
import math
import re
import signal
from typing import Dict, List, Tuple
import numpy as np
import psutil
MEMORY_SIZE_UNITS = {"K": 2**10, "M": 2**20, "G": 2**30, "T": 2**40}
# we use 4 bytes for block size, this means each block can contain
# 4294967296 records
BLOCK_SIZE_BIT = 32
def get_node_address() -> str:
"""
Get the ip address used in ray.
"""
pids = psutil.pids()
for pid in pids:
try:
proc = psutil.Process(pid)
# HACK: Workaround for UNIX idiosyncrasy
# Normally, cmdline() is supposed to return the argument list.
# But it in some cases (such as when setproctitle is called),
# an arbitrary string resembling a command-line is stored in
# the first argument.
# Explanation: https://unix.stackexchange.com/a/432681
# More info: https://github.com/giampaolo/psutil/issues/1179
for arglist in proc.cmdline():
for arg in arglist.split(" "):
if arg.startswith("--node-ip-address"):
addr = arg.split("=")[1]
return addr
except psutil.AccessDenied:
pass
except psutil.NoSuchProcess:
pass
raise Exception("can't find any ray process")
def register_exit_handler(func):
atexit.register(func)
signal.signal(signal.SIGTERM, func)
signal.signal(signal.SIGINT, func)
def random_split(df, weights, seed=None):
"""
Random split the spark DataFrame or koalas DataFrame into given part
:param df: the spark DataFrame or koalas DataFrame
:param weights: list of doubles as weights with which to split the df.
Weights will be normalized if they don't sum up to 1.0.
:param seed: The seed for sampling.
"""
# convert to Spark DataFrame
df, is_spark_df = convert_to_spark(df)
splits = df.randomSplit(weights, seed)
if is_spark_df:
return splits
else:
# convert back to koalas DataFrame
import databricks.koalas as ks # pylint: disable=C0415
return [ks.DataFrame(split) for split in splits]
def _df_helper(df, spark_callback, koalas_callback):
try:
import pyspark # pylint: disable=C0415
except Exception:
pass
else:
if isinstance(df, pyspark.sql.DataFrame):
return spark_callback(df)
try:
import databricks.koalas as ks # pylint: disable=C0415
except Exception:
pass
else:
if isinstance(df, ks.DataFrame):
return koalas_callback(df)
raise Exception(f"The type: {type(df)} is not supported, only support "
"pyspark.sql.DataFrame and databricks.koalas.DataFrame")
def df_type_check(df):
"""
Check whether the df is spark DataFrame or koalas DataFrame.
:return True for spark DataFrame or Koalas DataFrame.
:raise Exception when it is neither spark DataFrame nor Koalas DataFrame.
"""
return _df_helper(df, lambda d: True, lambda d: True)
def convert_to_spark(df):
"""
Do nothing if the df is spark DataFrame, convert to spark DataFrame if it is
koalas DataFrame. Raise Exception otherwise.
:return: a pair of (converted df, whether it is spark DataFrame)
"""
return _df_helper(df, lambda d: (d, True), lambda d: (d.to_spark(), False))
def parse_memory_size(memory_size: str) -> int:
"""
Parse the human readable memory size into bytes.
Adapt from: https://stackoverflow.com/a/60708339
:param memory_size: human readable memory size
:return: convert to int size
"""
memory_size = memory_size.strip().upper()
if re.search(r"B", memory_size):
# discard "B"
memory_size = re.sub(r"B", "", memory_size)
try:
return int(memory_size)
except ValueError:
pass
global MEMORY_SIZE_UNITS
if not re.search(r" ", memory_size):
memory_size = re.sub(r"([KMGT]+)", r" \1", memory_size)
number, unit_index = [item.strip() for item in memory_size.split()]
return int(float(number) * MEMORY_SIZE_UNITS[unit_index])
def divide_blocks(
blocks: List[int],
world_size: int,
shuffle: bool = False,
shuffle_seed: int = None) -> Dict[int, List[int]]:
"""
Divide the blocks into world_size partitions, and return the divided block indexes for the
given work_rank
:param blocks: the blocks and each item is the given block size
:param world_size: total world size
:param shuffle: whether shuffle the blocks before divide
:param shuffle_seed: the shuffle seed
:return: a dict, the key is the world rank, and the value is a list of pair of block index
and the samples selected in that block
"""
if len(blocks) < world_size:
raise Exception("do not have enough blocks to divide")
results = {}
# number of blocks per rank
num_blocks_per_rank = int(math.ceil(len(blocks) * 1.0 / world_size))
# number of samples per rank
num_samples_per_rank = int(math.ceil(sum(blocks) * 1.0 / world_size))
# total number of blocks
total_num_blocks = num_blocks_per_rank * world_size
# global block indexes
global_indexes = list(range(len(blocks)))
# add extra blocks to make it evenly divisible
if len(global_indexes) != total_num_blocks:
global_indexes += global_indexes[: (total_num_blocks - len(global_indexes))]
assert len(global_indexes) == total_num_blocks
if shuffle_seed:
np.random.seed(shuffle_seed)
else:
np.random.seed(0)
if shuffle:
np.random.shuffle(global_indexes)
def select(index: int, current_size: int, selected: List[Tuple[int, int]]) -> int:
block_size = blocks[index]
tmp = current_size + block_size
if tmp < num_samples_per_rank:
selected.append((index, block_size))
current_size = tmp
elif tmp >= num_samples_per_rank:
selected.append((index, (num_samples_per_rank - current_size)))
current_size = num_samples_per_rank
return current_size
for rank in range(world_size):
indexes = global_indexes[rank: total_num_blocks: world_size]
assert len(indexes) == num_blocks_per_rank
samples_cur_rank = 0
selected_indexes = []
for i in indexes:
samples_cur_rank = select(i, samples_cur_rank, selected_indexes)
if samples_cur_rank == num_samples_per_rank:
break
while samples_cur_rank < num_samples_per_rank:
index = np.random.choice(global_indexes, size=1)[0]
samples_cur_rank = select(index, samples_cur_rank, selected_indexes)
assert samples_cur_rank == num_samples_per_rank
results[rank] = selected_indexes
return results
| python |
#!/usr/bin/env python3
import sys
from setuptools import setup, find_packages
from urllib.parse import urlparse
with open('requirements.txt', 'r') as f:
install_requires = []
dependency_links = []
append_version = '-' + str(sys.maxsize)
requirements = [ line.strip() for line in f ]
for requirement in requirements:
name = urlparse(requirement)
if name.scheme and name.netloc:
install_requires.append(name.fragment.replace('egg=', ''))
dependency_links.append(requirement + append_version)
else:
install_requires.append(requirement)
setup(name="kochira",
version="0.0",
description="kochira",
author="",
author_email="",
url="",
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite="kochira",
install_requires=install_requires,
dependency_links=dependency_links,
entry_points="""\
[console_scripts]
kochira = kochira:main
"""
)
| python |
from flask import session, request
from flask_restful import Resource, reqparse, inputs, abort
from api.common.database import database
from api.common.utils import checkTag, checkTime, checkTel
import json
import requests
'''
### sendOfflineCapsule
Use this method to send offline capsule.
HTTP Request Method: **POST**
| Field | Type | Required | Description |
|---------------|---------|----------|----------------------------------------------------------------|
| sender_name | String | Yes | Sender's name. |
| sender_tel | String | Yes | Sender's telephone number. |
| receiver_name | String | Yes | Receiver's name. |
| receiver_tel | String | Yes | Receiver's telephone number. |
| receiver_addr | String | Yes | Receiver's address. |
| capsule_tag | String | Yes | The tag ID attached on the envelope. |
| period | String | Yes | The period of time capsule. Must be `half-year` or `one-year`. |
| seal | Boolean | Yes | Whether the seal is required. |
'''
parser = reqparse.RequestParser()
parser.add_argument('sender_name', type = str, required = True)
parser.add_argument('sender_tel', type = str, required = True)
parser.add_argument('receiver_name', type = str, required = True)
parser.add_argument('receiver_tel', type = str, required = True)
parser.add_argument('receiver_addr', type = str, required = True)
parser.add_argument('capsule_tag', type = str, required = True)
parser.add_argument('period', type = str, required = True, choices = ('half-year', 'one-year'))
parser.add_argument('seal', type = inputs.boolean, required = True)
class sendOfflineCapsule(Resource):
def post(self):
if checkTime() != 0:
abort(416, message = "Event is not ongoing.")
args = parser.parse_args()
if not checkTel(args["sender_tel"]) or not checkTel(args["receiver_tel"]):
abort(400, message = "Invalid telephone number.")
if checkTag(args["capsule_tag"]) == False:
abort(400, message = "Invalid capsule tag.")
if not database.getTagStatus(args["capsule_tag"]):
abort(409, message = "The capsule tag already exists.")
database.addOfflineCapsule(args["sender_name"], args["sender_tel"], args["receiver_name"], args["receiver_tel"], args["receiver_addr"], args["capsule_tag"], args["period"], args["seal"])
return {
"receiver_name": args["receiver_name"],
"count": database.getStatisticsByTel(args["receiver_tel"])
} | python |
""" Read a set of input files for the child oids
and generate a SQL file that queries for the master
records changed by those OID. This one uses an IN
clause instead of the simple query to test relative
performance of the two
I am using the therory that runing the commands
directly from psql should yield
the highest achivable performance since they should have
optimized the command line client.
"""
import sys
import os
def quote(str):
return "\'" + str + "\'"
MaxInItems = 500
# Process input file reading line by line.
# Break it up into chunks and generate
# a psql file with separate insert statements
# for each chunk
def processFile(fname, fout):
fin = open(fname)
hdr = fin.readline()
buf = []
insStr = "INSERT INTO omap(chiloid, chiltbl, paroid, partbl) VALUES"
while True:
dline = fin.readline().strip()
if dline:
flds = dline.split(",")
#print("flds=", flds)
partbl = flds[0]
paroid = flds[1]
chiltbl = flds[2]
chiloid = flds[3]
buf.append(quote(chiloid))
if (len(buf) > MaxInItems) or (not dline):
if len(buf) > 0:
fout.write("SELECT DISTINCT paroid, partbl FROM omap WHERE omap.chiloid IN ( ");
sout = ", ".join(buf)
fout.write(sout)
fout.write(" );\n")
buf = []
else:
break
def printMsg():
print("Usage: python generateInQueries.py inFiName outFiName")
# MAIN
if len(sys.argv) < 3:
raise ValueError('not enough parameters')
foutName = sys.argv[2]
fout = open(foutName, "w")
fout.write("\\c oidmap\n\o data/log/in_query.RESULTS.txt\n")
fnameIn = sys.argv[1]
print ("fnameIn=", fnameIn, "foutName=", foutName)
if not os.path.isfile(fnameIn):
printMsg()
raise ValueError("Could not find file " + str(fnameIn))
processFile(fnameIn, fout)
| python |
""""""
import os
import sys
import uuid
import bz2
import pickle
import traceback
import zlib
import json
from abc import ABC
from copy import copy
from typing import Any, Callable
from logging import INFO, ERROR
from datetime import datetime
from vnpy.trader.constant import Interval, Direction, Offset, Status, OrderType
from vnpy.trader.object import BarData, TickData, OrderData, TradeData
from vnpy.trader.utility import virtual, append_data, extract_vt_symbol, get_underlying_symbol
from .base import StopOrder
from vnpy.component.cta_grid_trade import CtaGrid, CtaGridTrade
from vnpy.component.cta_position import CtaPosition
from vnpy.component.cta_policy import CtaPolicy
class CtaTemplate(ABC):
"""CTA策略模板"""
author = ""
parameters = []
variables = []
# 保存委托单编号和相关委托单的字典
# key为委托单编号
# value为该合约相关的委托单
active_orders = {}
def __init__(
self,
cta_engine: Any,
strategy_name: str,
vt_symbol: str,
setting: dict,
):
""""""
self.cta_engine = cta_engine
self.strategy_name = strategy_name
self.vt_symbol = vt_symbol
self.inited = False # 是否初始化完毕
self.trading = False # 是否开始交易
self.pos = 0 # 持仓/仓差
self.entrust = 0 # 是否正在委托, 0, 无委托 , 1, 委托方向是LONG, -1, 委托方向是SHORT
self.tick_dict = {} # 记录所有on_tick传入最新tick
self.active_orders = {}
# Copy a new variables list here to avoid duplicate insert when multiple
# strategy instances are created with the same strategy class.
self.variables = copy(self.variables)
self.variables.insert(0, "inited")
self.variables.insert(1, "trading")
self.variables.insert(2, "pos")
self.variables.insert(3, "entrust")
def update_setting(self, setting: dict):
"""
Update strategy parameter wtih value in setting dict.
"""
for name in self.parameters:
if name in setting:
setattr(self, name, setting[name])
@classmethod
def get_class_parameters(cls):
"""
Get default parameters dict of strategy class.
"""
class_parameters = {}
for name in cls.parameters:
class_parameters[name] = getattr(cls, name)
return class_parameters
def get_parameters(self):
"""
Get strategy parameters dict.
"""
strategy_parameters = {}
for name in self.parameters:
strategy_parameters[name] = getattr(self, name)
return strategy_parameters
def get_variables(self):
"""
Get strategy variables dict.
"""
strategy_variables = {}
for name in self.variables:
strategy_variables[name] = getattr(self, name)
return strategy_variables
def get_data(self):
"""
Get strategy data.
"""
strategy_data = {
"strategy_name": self.strategy_name,
"vt_symbol": self.vt_symbol,
"class_name": self.__class__.__name__,
"author": self.author,
"parameters": self.get_parameters(),
"variables": self.get_variables(),
}
return strategy_data
def get_positions(self):
""" 返回持仓数量"""
pos_list = []
if self.pos > 0:
pos_list.append({
"vt_symbol": self.vt_symbol,
"direction": "long",
"volume": self.pos
})
elif self.pos < 0:
pos_list.append({
"vt_symbol": self.vt_symbol,
"direction": "short",
"volume": abs(self.pos)
})
return pos_list
@virtual
def on_timer(self):
pass
@virtual
def on_init(self):
"""
Callback when strategy is inited.
"""
pass
@virtual
def on_start(self):
"""
Callback when strategy is started.
"""
pass
@virtual
def on_stop(self):
"""
Callback when strategy is stopped.
"""
pass
@virtual
def on_tick(self, tick: TickData):
"""
Callback of new tick data update.
"""
pass
@virtual
def on_bar(self, bar: BarData):
"""
Callback of new bar data update.
"""
pass
@virtual
def on_trade(self, trade: TradeData):
"""
Callback of new trade data update.
"""
pass
@virtual
def on_order(self, order: OrderData):
"""
Callback of new order data update.
"""
pass
@virtual
def on_stop_order(self, stop_order: StopOrder):
"""
Callback of stop order update.
"""
pass
def buy(self, price: float, volume: float, stop: bool = False,
vt_symbol: str = '', order_type: OrderType = OrderType.LIMIT,
order_time: datetime = None, grid: CtaGrid = None):
"""
Send buy order to open a long position.
"""
if order_type in [OrderType.FAK, OrderType.FOK]:
if self.is_upper_limit(vt_symbol):
self.write_error(u'涨停价不做FAK/FOK委托')
return []
return self.send_order(vt_symbol=vt_symbol,
direction=Direction.LONG,
offset=Offset.OPEN,
price=price,
volume=volume,
stop=stop,
order_type=order_type,
order_time=order_time,
grid=grid)
def sell(self, price: float, volume: float, stop: bool = False,
vt_symbol: str = '', order_type: OrderType = OrderType.LIMIT,
order_time: datetime = None, grid: CtaGrid = None):
"""
Send sell order to close a long position.
"""
if order_type in [OrderType.FAK, OrderType.FOK]:
if self.is_lower_limit(vt_symbol):
self.write_error(u'跌停价不做FAK/FOK sell委托')
return []
return self.send_order(vt_symbol=vt_symbol,
direction=Direction.SHORT,
offset=Offset.CLOSE,
price=price,
volume=volume,
stop=stop,
order_type=order_type,
order_time=order_time,
grid=grid)
def short(self, price: float, volume: float, stop: bool = False,
vt_symbol: str = '', order_type: OrderType = OrderType.LIMIT,
order_time: datetime = None, grid: CtaGrid = None):
"""
Send short order to open as short position.
"""
if order_type in [OrderType.FAK, OrderType.FOK]:
if self.is_lower_limit(vt_symbol):
self.write_error(u'跌停价不做FAK/FOK short委托')
return []
return self.send_order(vt_symbol=vt_symbol,
direction=Direction.SHORT,
offset=Offset.OPEN,
price=price,
volume=volume,
stop=stop,
order_type=order_type,
order_time=order_time,
grid=grid)
def cover(self, price: float, volume: float, stop: bool = False,
vt_symbol: str = '', order_type: OrderType = OrderType.LIMIT,
order_time: datetime = None, grid: CtaGrid = None):
"""
Send cover order to close a short position.
"""
if order_type in [OrderType.FAK, OrderType.FOK]:
if self.is_upper_limit(vt_symbol):
self.write_error(u'涨停价不做FAK/FOK cover委托')
return []
return self.send_order(vt_symbol=vt_symbol,
direction=Direction.LONG,
offset=Offset.CLOSE,
price=price,
volume=volume,
stop=stop,
order_type=order_type,
order_time=order_time,
grid=grid)
def send_order(
self,
vt_symbol: str,
direction: Direction,
offset: Offset,
price: float,
volume: float,
stop: bool = False,
order_type: OrderType = OrderType.LIMIT,
order_time: datetime = None,
grid: CtaGrid = None
):
"""
Send a new order.
"""
# 兼容cta_strategy的模板,缺省不指定vt_symbol时,使用策略配置的vt_symbol
if vt_symbol == '':
vt_symbol = self.vt_symbol
if not self.trading:
return []
vt_orderids = self.cta_engine.send_order(
strategy=self,
vt_symbol=vt_symbol,
direction=direction,
offset=offset,
price=price,
volume=volume,
stop=stop,
order_type=order_type
)
if order_time is None:
order_time = datetime.now()
for vt_orderid in vt_orderids:
d = {
'direction': direction,
'offset': offset,
'vt_symbol': vt_symbol,
'price': price,
'volume': volume,
'order_type': order_type,
'traded': 0,
'order_time': order_time,
'status': Status.SUBMITTING
}
if grid:
d.update({'grid': grid})
grid.order_ids.append(vt_orderid)
self.active_orders.update({vt_orderid: d})
if direction == Direction.LONG:
self.entrust = 1
elif direction == Direction.SHORT:
self.entrust = -1
return vt_orderids
def cancel_order(self, vt_orderid: str):
"""
Cancel an existing order.
"""
if self.trading:
return self.cta_engine.cancel_order(self, vt_orderid)
return False
def cancel_all(self):
"""
Cancel all orders sent by strategy.
"""
if self.trading:
self.cta_engine.cancel_all(self)
def is_upper_limit(self, symbol):
"""是否涨停"""
tick = self.tick_dict.get(symbol, None)
if tick is None or tick.limit_up is None or tick.limit_up == 0:
return False
if tick.bid_price_1 == tick.limit_up:
return True
def is_lower_limit(self, symbol):
"""是否跌停"""
tick = self.tick_dict.get(symbol, None)
if tick is None or tick.limit_down is None or tick.limit_down == 0:
return False
if tick.ask_price_1 == tick.limit_down:
return True
def write_log(self, msg: str, level: int = INFO):
"""
Write a log message.
"""
self.cta_engine.write_log(msg=msg, strategy_name=self.strategy_name, level=level)
def write_error(self, msg: str):
"""write error log message"""
self.write_log(msg=msg, level=ERROR)
def get_engine_type(self):
"""
Return whether the cta_engine is backtesting or live trading.
"""
return self.cta_engine.get_engine_type()
def load_bar(
self,
days: int,
interval: Interval = Interval.MINUTE,
callback: Callable = None,
):
"""
Load historical bar data for initializing strategy.
"""
if not callback:
callback = self.on_bar
self.cta_engine.load_bar(self.vt_symbol, days, interval, callback)
def load_tick(self, days: int):
"""
Load historical tick data for initializing strategy.
"""
self.cta_engine.load_tick(self.vt_symbol, days, self.on_tick)
def put_event(self):
"""
Put an strategy data event for ui update.
"""
if self.inited:
self.cta_engine.put_strategy_event(self)
def send_email(self, msg):
"""
Send email to default receiver.
"""
if self.inited:
self.cta_engine.send_email(msg, self)
def sync_data(self):
"""
Sync strategy variables value into disk storage.
"""
if self.trading:
self.cta_engine.sync_strategy_data(self)
class CtaFutureTemplate(CtaTemplate):
"""
合约期货模板
"""
price_tick = 1 # 商品的最小价格跳动
symbol_size = 10 # 商品得合约乘数
margin_rate = 0.1 # 商品的保证金
volumn_tick = 1 # 商品最小成交数量
# 委托类型
order_type = OrderType.LIMIT
cancel_seconds = 120 # 撤单时间(秒)
activate_market = False
# 资金相关
max_invest_rate = 0.1 # 最大仓位(0~1)
max_invest_margin = 0 # 资金上限 0,不限制
max_invest_pos = 0 # 单向头寸数量上限 0,不限制
# 是否回测状态
backtesting = False
# 逻辑过程日志
dist_fieldnames = ['datetime', 'symbol', 'volume', 'price', 'margin',
'operation', 'signal', 'stop_price', 'target_price',
'long_pos', 'short_pos']
def __init__(self, cta_engine, strategy_name, vt_symbol, setting):
""""""
self.position = None # 仓位组件
self.policy = None # 事务执行组件
self.gt = None # 网格交易组件
self.klines = {} # K线组件字典: kline_name: kline
self.price_tick = 1 # 商品的最小价格跳动
self.symbol_size = 10 # 商品得合约乘数
self.margin_rate = 0.1 # 商品的保证金
self.volumn_tick = 1 # 商品最小成交数量
self.cancel_seconds = 120 # 撤单时间(秒)
self.activate_market = False
self.order_type = OrderType.LIMIT
self.backtesting = False
self.cur_datetime: datetime = None # 当前Tick时间
self.cur_tick: TickData = None # 最新的合约tick( vt_symbol)
self.cur_price = None # 当前价(主力合约 vt_symbol)
self.account_pos = None # 当前账号vt_symbol持仓信息
self.last_minute = None # 最后的分钟,用于on_tick内每分钟处理的逻辑
self.display_bars = True
super().__init__(
cta_engine, strategy_name, vt_symbol, setting
)
# 增加仓位管理模块
self.position = CtaPosition(strategy=self)
self.position.maxPos = sys.maxsize
# 增加网格持久化模块
self.gt = CtaGridTrade(strategy=self)
if 'backtesting' not in self.parameters:
self.parameters.append('backtesting')
def update_setting(self, setting: dict):
"""
Update strategy parameter wtih value in setting dict.
"""
for name in self.parameters:
if name in setting:
setattr(self, name, setting[name])
self.price_tick = self.cta_engine.get_price_tick(self.vt_symbol)
self.symbol_size = self.cta_engine.get_size(self.vt_symbol)
self.margin_rate = self.cta_engine.get_margin_rate(self.vt_symbol)
self.volumn_tick = self.cta_engine.get_volume_tick(self.vt_symbol)
if self.activate_market:
self.write_log(f'{self.strategy_name}使用市价单委托方式')
self.order_type = OrderType.MARKET
else:
if not self.backtesting:
self.cancel_seconds = 10
self.write_log(f'实盘撤单时间10秒')
def sync_data(self):
"""同步更新数据"""
if not self.backtesting:
self.write_log(u'保存k线缓存数据')
self.save_klines_to_cache()
if self.inited and self.trading:
self.write_log(u'保存policy数据')
self.policy.save()
def save_klines_to_cache(self, kline_names: list = []):
"""
保存K线数据到缓存
:param kline_names: 一般为self.klines的keys
:return:
"""
if len(kline_names) == 0:
kline_names = list(self.klines.keys())
# 获取保存路径
save_path = self.cta_engine.get_data_path()
# 保存缓存的文件名
file_name = os.path.abspath(os.path.join(save_path, f'{self.strategy_name}_klines.pkb2'))
with bz2.BZ2File(file_name, 'wb') as f:
klines = {}
for kline_name in kline_names:
kline = self.klines.get(kline_name, None)
# if kline:
# kline.strategy = None
# kline.cb_on_bar = None
klines.update({kline_name: kline})
pickle.dump(klines, f)
def load_klines_from_cache(self, kline_names: list = []):
"""
从缓存加载K线数据
:param kline_names:
:return:
"""
if len(kline_names) == 0:
kline_names = list(self.klines.keys())
save_path = self.cta_engine.get_data_path()
file_name = os.path.abspath(os.path.join(save_path, f'{self.strategy_name}_klines.pkb2'))
try:
last_bar_dt = None
with bz2.BZ2File(file_name, 'rb') as f:
klines = pickle.load(f)
# 逐一恢复K线
for kline_name in kline_names:
# 缓存的k线实例
cache_kline = klines.get(kline_name, None)
# 当前策略实例的K线实例
strategy_kline = self.klines.get(kline_name, None)
if cache_kline and strategy_kline:
# 临时保存当前的回调函数
cb_on_bar = strategy_kline.cb_on_bar
# 缓存实例数据 =》 当前实例数据
strategy_kline.__dict__.update(cache_kline.__dict__)
# 所有K线的最后时间
if last_bar_dt and strategy_kline.cur_datetime:
last_bar_dt = max(last_bar_dt, strategy_kline.cur_datetime)
else:
last_bar_dt = strategy_kline.cur_datetime
# 重新绑定k线策略与on_bar回调函数
strategy_kline.strategy = self
strategy_kline.cb_on_bar = cb_on_bar
self.write_log(f'恢复{kline_name}缓存数据,最新bar结束时间:{last_bar_dt}')
self.write_log(u'加载缓存k线数据完毕')
return last_bar_dt
except Exception as ex:
self.write_error(f'加载缓存K线数据失败:{str(ex)}')
return None
def get_klines_snapshot(self):
"""返回当前klines的切片数据"""
try:
d = {
'strategy': self.strategy_name,
'datetime': datetime.now()}
klines = {}
for kline_name in sorted(self.klines.keys()):
klines.update({kline_name: self.klines.get(kline_name).get_data()})
kline_names = list(klines.keys())
binary_data = zlib.compress(pickle.dumps(klines))
d.update({'kline_names': kline_names, 'klines': binary_data, 'zlib': True})
return d
except Exception as ex:
self.write_error(f'获取klines切片数据失败:{str(ex)}')
return {}
def init_policy(self):
self.write_log(u'init_policy(),初始化执行逻辑')
if self.policy:
self.policy.load()
def init_position(self):
"""
初始化Positin
使用网格的持久化,获取开仓状态的多空单,更新
:return:
"""
self.write_log(u'init_position(),初始化持仓')
changed = False
if len(self.gt.up_grids) <= 0:
self.position.short_pos = 0
# 加载已开仓的空单数据,网格JSON
short_grids = self.gt.load(direction=Direction.SHORT, open_status_filter=[True])
if len(short_grids) == 0:
self.write_log(u'没有持久化的空单数据')
self.gt.up_grids = []
else:
self.gt.up_grids = short_grids
for sg in short_grids:
if len(sg.order_ids) > 0 or sg.order_status:
self.write_log(f'重置委托状态:{sg.order_status},清除委托单:{sg.order_ids}')
sg.order_status = False
[self.cancel_order(vt_orderid) for vt_orderid in sg.order_ids]
sg.order_ids = []
changed = True
self.write_log(u'加载持仓空单[{},价格:{},数量:{}手,开仓时间:{}'
.format(self.vt_symbol, sg.open_price,
sg.volume, sg.open_time))
self.position.short_pos = round(self.position.short_pos - sg.volume, 7)
self.write_log(u'持久化空单,共持仓:{}手'.format(abs(self.position.short_pos)))
if len(self.gt.dn_grids) <= 0:
# 加载已开仓的多数据,网格JSON
self.position.long_pos = 0
long_grids = self.gt.load(direction=Direction.LONG, open_status_filter=[True])
if len(long_grids) == 0:
self.write_log(u'没有持久化的多单数据')
self.gt.dn_grids = []
else:
self.gt.dn_grids = long_grids
for lg in long_grids:
if len(lg.order_ids) > 0 or lg.order_status:
self.write_log(f'重置委托状态:{lg.order_status},清除委托单:{lg.order_ids}')
lg.order_status = False
[self.cancel_order(vt_orderid) for vt_orderid in lg.order_ids]
lg.order_ids = []
changed = True
self.write_log(u'加载持仓多单[{},价格:{},数量:{}手, 开仓时间:{}'
.format(self.vt_symbol, lg.open_price, lg.volume, lg.open_time))
self.position.long_pos = round(self.position.long_pos + lg.volume, 7)
self.write_log(f'持久化多单,共持仓:{self.position.long_pos}手')
self.position.pos = round(self.position.long_pos + self.position.short_pos, 7)
self.write_log(u'{}加载持久化数据完成,多单:{},空单:{},共:{}手'
.format(self.strategy_name,
self.position.long_pos,
abs(self.position.short_pos),
self.position.pos))
self.pos = self.position.pos
if changed:
self.gt.save()
self.display_grids()
def get_positions(self):
"""
获取策略当前持仓(重构,使用主力合约)
:return: [{'vt_symbol':symbol,'direction':direction,'volume':volume]
"""
if not self.position:
return []
pos_list = []
if self.position.long_pos > 0:
for g in self.gt.get_opened_grids(direction=Direction.LONG):
pos_list.append({'vt_symbol': self.vt_symbol,
'direction': 'long',
'volume': g.volume - g.traded_volume,
'price': g.open_price})
if abs(self.position.short_pos) > 0:
for g in self.gt.get_opened_grids(direction=Direction.SHORT):
pos_list.append({'vt_symbol': self.vt_symbol,
'direction': 'short',
'volume': abs(g.volume - g.traded_volume),
'price': g.open_price})
if self.cur_datetime and (datetime.now() - self.cur_datetime).total_seconds() < 10:
self.write_log(u'{}当前持仓:{}'.format(self.strategy_name, pos_list))
return pos_list
def on_trade(self, trade: TradeData):
"""交易更新"""
self.write_log(u'{},交易更新:{},当前持仓:{} '
.format(self.cur_datetime,
trade.__dict__,
self.position.pos))
dist_record = dict()
if self.backtesting:
dist_record['datetime'] = trade.time
else:
dist_record['datetime'] = ' '.join([self.cur_datetime.strftime('%Y-%m-%d'), trade.time])
dist_record['volume'] = trade.volume
dist_record['price'] = trade.price
dist_record['margin'] = trade.price * trade.volume * self.cta_engine.get_margin_rate(trade.vt_symbol)
dist_record['symbol'] = trade.vt_symbol
if trade.direction == Direction.LONG and trade.offset == Offset.OPEN:
dist_record['operation'] = 'buy'
self.position.open_pos(trade.direction, volume=trade.volume)
dist_record['long_pos'] = self.position.long_pos
dist_record['short_pos'] = self.position.short_pos
if trade.direction == Direction.SHORT and trade.offset == Offset.OPEN:
dist_record['operation'] = 'short'
self.position.open_pos(trade.direction, volume=trade.volume)
dist_record['long_pos'] = self.position.long_pos
dist_record['short_pos'] = self.position.short_pos
if trade.direction == Direction.LONG and trade.offset != Offset.OPEN:
dist_record['operation'] = 'cover'
self.position.close_pos(trade.direction, volume=trade.volume)
dist_record['long_pos'] = self.position.long_pos
dist_record['short_pos'] = self.position.short_pos
if trade.direction == Direction.SHORT and trade.offset != Offset.OPEN:
dist_record['operation'] = 'sell'
self.position.close_pos(trade.direction, volume=trade.volume)
dist_record['long_pos'] = self.position.long_pos
dist_record['short_pos'] = self.position.short_pos
self.save_dist(dist_record)
self.pos = self.position.pos
def on_order(self, order: OrderData):
"""报单更新"""
# 未执行的订单中,存在是异常,删除
self.write_log(u'{}报单更新,{}'.format(self.cur_datetime, order.__dict__))
if order.vt_orderid in self.active_orders:
if order.volume == order.traded and order.status in [Status.ALLTRADED]:
self.on_order_all_traded(order)
elif order.offset == Offset.OPEN and order.status in [Status.CANCELLED]:
# 开仓委托单被撤销
self.on_order_open_canceled(order)
elif order.offset != Offset.OPEN and order.status in [Status.CANCELLED]:
# 平仓委托单被撤销
self.on_order_close_canceled(order)
elif order.status == Status.REJECTED:
if order.offset == Offset.OPEN:
self.write_error(u'{}委托单开{}被拒,price:{},total:{},traded:{},status:{}'
.format(order.vt_symbol, order.direction, order.price, order.volume,
order.traded, order.status))
self.on_order_open_canceled(order)
else:
self.write_error(u'OnOrder({})委托单平{}被拒,price:{},total:{},traded:{},status:{}'
.format(order.vt_symbol, order.direction, order.price, order.volume,
order.traded, order.status))
self.on_order_close_canceled(order)
else:
self.write_log(u'委托单未完成,total:{},traded:{},tradeStatus:{}'
.format(order.volume, order.traded, order.status))
else:
self.write_error(u'委托单{}不在策略的未完成订单列表中:{}'.format(order.vt_orderid, self.active_orders))
def on_order_all_traded(self, order: OrderData):
"""
订单全部成交
:param order:
:return:
"""
self.write_log(u'{},委托单:{}全部完成'.format(order.time, order.vt_orderid))
order_info = self.active_orders[order.vt_orderid]
# 通过vt_orderid,找到对应的网格
grid = order_info.get('grid', None)
if grid is not None:
# 移除当前委托单
if order.vt_orderid in grid.order_ids:
grid.order_ids.remove(order.vt_orderid)
# 网格的所有委托单已经执行完毕
if len(grid.order_ids) == 0:
grid.order_status = False
grid.traded_volume = 0
# 平仓完毕(cover, sell)
if order.offset != Offset.OPEN:
grid.open_status = False
grid.close_status = True
if grid.volume < order.traded:
self.write_log(f'网格平仓数量{grid.volume},小于委托单成交数量:{order.volume},修正为:{order.volume}')
grid.volume = order.traded
self.write_log(f'{grid.direction.value}单已平仓完毕,order_price:{order.price}'
+ f',volume:{order.volume}')
self.write_log(f'移除网格:{grid.to_json()}')
self.gt.remove_grids_by_ids(direction=grid.direction, ids=[grid.id])
# 开仓完毕( buy, short)
else:
grid.open_status = True
grid.open_time = self.cur_datetime
self.write_log(f'{grid.direction.value}单已开仓完毕,order_price:{order.price}'
+ f',volume:{order.volume}')
# 网格的所有委托单部分执行完毕
else:
old_traded_volume = grid.traded_volume
grid.traded_volume += order.volume
grid.traded_volume = round(grid.traded_volume, 7)
self.write_log(f'{grid.direction.value}单部分{order.offset}仓,'
+ f'网格volume:{grid.volume}, traded_volume:{old_traded_volume}=>{grid.traded_volume}')
self.write_log(f'剩余委托单号:{grid.order_ids}')
self.gt.save()
# 在策略得活动订单中,移除
self.write_log(f'委托单{order.vt_orderid}完成,从活动订单中移除')
self.active_orders.pop(order.vt_orderid, None)
def on_order_open_canceled(self, order: OrderData):
"""
委托开仓单撤销
:param order:
:return:
"""
self.write_log(u'委托开仓单撤销:{}'.format(order.__dict__))
if order.vt_orderid not in self.active_orders:
self.write_error(u'{}不在未完成的委托单中{}。'.format(order.vt_orderid, self.active_orders))
return
# 直接更新“未完成委托单”,更新volume,retry次数
old_order = self.active_orders[order.vt_orderid]
self.write_log(u'{} 委托信息:{}'.format(order.vt_orderid, old_order))
old_order['traded'] = order.traded
grid = old_order.get('grid', None)
pre_status = old_order.get('status', Status.NOTTRADED)
if pre_status == Status.CANCELLED:
self.write_log(f'当前状态已经是{Status.CANCELLED},不做调整处理')
return
old_order.update({'status': Status.CANCELLED})
self.write_log(u'委托单状态:{}=>{}'.format(pre_status, old_order.get('status')))
if grid:
if order.vt_orderid in grid.order_ids:
grid.order_ids.remove(order.vt_orderid)
if order.traded > 0:
pre_traded_volume = grid.traded_volume
grid.traded_volume = round(grid.traded_volume + order.traded, 7)
self.write_log(f'撤单中部分开仓:{order.traded} + 原已成交:{pre_traded_volume} => {grid.traded_volume}')
if len(grid.order_ids) == 0:
grid.order_status = False
if grid.traded_volume > 0:
pre_volume = grid.volume
grid.volume = grid.traded_volume
grid.traded_volume = 0
grid.open_status = True
self.write_log(f'开仓完成,grid.volume {pre_volume} => {grid.volume}')
self.gt.save()
self.active_orders.update({order.vt_orderid: old_order})
self.display_grids()
def on_order_close_canceled(self, order: OrderData):
"""委托平仓单撤销"""
self.write_log(u'委托平仓单撤销:{}'.format(order.__dict__))
if order.vt_orderid not in self.active_orders:
self.write_error(u'{}不在未完成的委托单中:{}。'.format(order.vt_orderid, self.active_orders))
return
# 直接更新“未完成委托单”,更新volume,Retry次数
old_order = self.active_orders[order.vt_orderid]
self.write_log(u'{} 订单信息:{}'.format(order.vt_orderid, old_order))
old_order['traded'] = order.traded
grid = old_order.get('grid', None)
pre_status = old_order.get('status', Status.NOTTRADED)
if pre_status == Status.CANCELLED:
self.write_log(f'当前状态已经是{Status.CANCELLED},不做调整处理')
return
old_order.update({'status': Status.CANCELLED})
self.write_log(u'委托单状态:{}=>{}'.format(pre_status, old_order.get('status')))
if grid:
if order.vt_orderid in grid.order_ids:
grid.order_ids.remove(order.vt_orderid)
if order.traded > 0:
pre_traded_volume = grid.traded_volume
grid.traded_volume = round(grid.traded_volume + order.traded, 7)
self.write_log(f'撤单中部分平仓成交:{order.traded} + 原已成交:{pre_traded_volume} => {grid.traded_volume}')
if len(grid.order_ids) == 0:
grid.order_status = False
if grid.traded_volume > 0:
pre_volume = grid.volume
grid.volume = round(grid.volume - grid.traded_volume, 7)
grid.traded_volume = 0
if grid.volume <= 0:
grid.volume = 0
grid.open_status = False
self.write_log(f'强制全部平仓完成')
else:
self.write_log(f'平仓委托中,撤单完成,部分成交,减少持仓grid.volume {pre_volume} => {grid.volume}')
self.gt.save()
self.active_orders.update({order.vt_orderid: old_order})
self.display_grids()
def on_stop_order(self, stop_order: StopOrder):
self.write_log(f'停止单触发:{stop_order.__dict__}')
def grid_check_stop(self):
"""
网格逐一止损/止盈检查 (根据指数价格进行止损止盈)
:return:
"""
if self.entrust != 0:
return
if not self.trading and not self.inited:
self.write_error(u'当前不允许交易')
return
# 多单网格逐一止损/止盈检查:
long_grids = self.gt.get_opened_grids(direction=Direction.LONG)
for g in long_grids:
if g.stop_price > 0 and g.stop_price > self.cur_price and g.open_status and not g.order_status:
# 调用平仓模块
self.write_log(u'{} {}当前价:{} 触发多单止损线{},开仓价:{},v:{}'.
format(self.cur_datetime,
self.vt_symbol,
self.cur_price,
g.stop_price,
g.open_price,
g.volume))
if self.grid_sell(g):
self.write_log(u'多单止盈/止损委托成功')
else:
self.write_error(u'多单止损委托失败')
# 空单网格止损检查
short_grids = self.gt.get_opened_grids(direction=Direction.SHORT)
for g in short_grids:
if g.stop_price > 0 and g.stop_price < self.cur_price and g.open_status and not g.order_status:
# 网格止损
self.write_log(u'{} {}当前价:{} 触发空单止损线:{}, 开仓价:{},v:{}'.
format(self.cur_datetime, self.vt_symbol, self.cur_price, g.stop_price,
g.open_price, g.volume))
if self.grid_cover(g):
self.write_log(u'空单止盈/止损委托成功')
else:
self.write_error(u'委托空单平仓失败')
def grid_buy(self, grid):
"""
事务开多仓
:return:
"""
if self.backtesting:
buy_price = self.cur_price + self.price_tick
else:
buy_price = self.cur_tick.ask_price_1
vt_orderids = self.buy(vt_symbol=self.vt_symbol,
price=buy_price,
volume=grid.volume,
order_type=self.order_type,
order_time=self.cur_datetime,
grid=grid)
if len(vt_orderids) > 0:
self.write_log(u'创建{}事务多单,开仓价:{},数量:{},止盈价:{},止损价:{}'
.format(grid.type, grid.open_price, grid.volume, grid.close_price, grid.stop_price))
self.gt.dn_grids.append(grid)
self.gt.save()
return True
else:
self.write_error(u'创建{}事务多单,委托失败,开仓价:{},数量:{},止盈价:{}'
.format(grid.type, grid.open_price, grid.volume, grid.close_price))
return False
def grid_short(self, grid):
"""
事务开空仓
:return:
"""
if self.backtesting:
short_price = self.cur_price - self.price_tick
else:
short_price = self.cur_tick.bid_price_1
vt_orderids = self.short(vt_symbol=self.vt_symbol,
price=short_price,
volume=grid.volume,
order_type=self.order_type,
order_time=self.cur_datetime,
grid=grid)
if len(vt_orderids) > 0:
self.write_log(u'创建{}事务空单,事务开空价:{},当前价:{},数量:{},止盈价:{},止损价:{}'
.format(grid.type, grid.open_price, self.cur_price, grid.volume, grid.close_price,
grid.stop_price))
self.gt.up_grids.append(grid)
self.gt.save()
return True
else:
self.write_error(u'创建{}事务空单,委托失败,开仓价:{},数量:{},止盈价:{}'
.format(grid.type, grid.open_price, grid.volume, grid.close_price))
return False
def grid_sell(self, grid):
"""
事务平多单仓位
1.来源自止损止盈平仓
:param 平仓网格
:return:
"""
self.write_log(u'执行事务平多仓位:{}'.format(grid.to_json()))
"""
self.account_pos = self.cta_engine.get_position(
vt_symbol=self.vt_symbol,
direction=Direction.NET)
if self.account_pos is None:
self.write_error(u'无法获取{}得持仓信息'.format(self.vt_symbol))
return False
"""
# 发出委托卖出单
if self.backtesting:
sell_price = self.cur_price - self.price_tick
else:
sell_price = self.cur_tick.bid_price_1
# 发出平多委托
if grid.traded_volume > 0:
grid.volume -= grid.traded_volume
grid.volume = round(grid.volume, 7)
grid.traded_volume = 0
"""
if self.account_pos.volume <= 0:
self.write_error(u'当前{}的净持仓:{},不能平多单'
.format(self.vt_symbol,
self.account_pos.volume))
return False
if self.account_pos.volume < grid.volume:
self.write_error(u'当前{}的净持仓:{},不满足平仓目标:{}, 强制降低'
.format(self.vt_symbol,
self.account_pos.volume,
grid.volume))
grid.volume = self.account_pos.volume
"""
vt_orderids = self.sell(
vt_symbol=self.vt_symbol,
price=sell_price,
volume=grid.volume,
order_type=self.order_type,
order_time=self.cur_datetime,
grid=grid)
if len(vt_orderids) == 0:
if self.backtesting:
self.write_error(u'多单平仓委托失败')
else:
self.write_error(u'多单平仓委托失败')
return False
else:
self.write_log(u'多单平仓委托成功,编号:{}'.format(vt_orderids))
return True
def grid_cover(self, grid):
"""
事务平空单仓位
1.来源自止损止盈平仓
:param 平仓网格
:return:
"""
self.write_log(u'执行事务平空仓位:{}'.format(grid.to_json()))
"""
self.account_pos = self.cta_engine.get_position(
vt_symbol=self.vt_symbol,
direction=Direction.NET)
if self.account_pos is None:
self.write_error(u'无法获取{}得持仓信息'.format(self.vt_symbol))
return False
"""
# 发出委托单
if self.backtesting:
cover_price = self.cur_price + self.price_tick
else:
cover_price = self.cur_tick.ask_price_1
# 发出cover委托
if grid.traded_volume > 0:
grid.volume -= grid.traded_volume
grid.volume = round(grid.volume, 7)
grid.traded_volume = 0
"""
if self.account_pos.volume >= 0:
self.write_error(u'当前{}的净持仓:{},不能平空单'
.format(self.vt_symbol,
self.account_pos.volume))
return False
if abs(self.account_pos.volume) < grid.volume:
self.write_error(u'当前{}的净持仓:{},不满足平仓目标:{}, 强制降低'
.format(self.vt_symbol,
self.account_pos.volume,
grid.volume))
grid.volume = abs(self.account_pos.volume)
"""
vt_orderids = self.cover(
price=cover_price,
vt_symbol=self.vt_symbol,
volume=grid.volume,
order_type=self.order_type,
order_time=self.cur_datetime,
grid=grid)
if len(vt_orderids) == 0:
if self.backtesting:
self.write_error(u'空单平仓委托失败')
else:
self.write_error(u'空单平仓委托失败')
return False
else:
self.write_log(u'空单平仓委托成功,编号:{}'.format(vt_orderids))
return True
def cancel_all_orders(self):
"""
重载撤销所有正在进行得委托
:return:
"""
self.write_log(u'撤销所有正在进行得委托')
self.tns_cancel_logic(dt=datetime.now(), force=True, reopen=False)
def tns_cancel_logic(self, dt, force=False, reopen=False):
"撤单逻辑"""
if len(self.active_orders) < 1:
self.entrust = 0
return
canceled_ids = []
for vt_orderid in list(self.active_orders.keys()):
order_info = self.active_orders[vt_orderid]
order_vt_symbol = order_info.get('vt_symbol', self.vt_symbol)
order_time = order_info['order_time']
order_volume = order_info['volume'] - order_info['traded']
order_grid = order_info['grid']
order_status = order_info.get('status', Status.NOTTRADED)
order_type = order_info.get('order_type', OrderType.LIMIT)
over_seconds = (dt - order_time).total_seconds()
# 只处理未成交的限价委托单
if order_status in [Status.SUBMITTING, Status.NOTTRADED] and order_type == OrderType.LIMIT:
if over_seconds > self.cancel_seconds or force: # 超过设置的时间还未成交
self.write_log(u'超时{}秒未成交,取消委托单:vt_orderid:{},order:{}'
.format(over_seconds, vt_orderid, order_info))
order_info.update({'status': Status.CANCELLING})
self.active_orders.update({vt_orderid: order_info})
ret = self.cancel_order(str(vt_orderid))
if not ret:
self.write_log(u'撤单失败,更新状态为撤单成功')
order_info.update({'status': Status.CANCELLED})
self.active_orders.update({vt_orderid: order_info})
if order_grid and vt_orderid in order_grid.order_ids:
order_grid.order_ids.remove(vt_orderid)
continue
# 处理状态为‘撤销’的委托单
elif order_status == Status.CANCELLED:
self.write_log(u'委托单{}已成功撤单,删除{}'.format(vt_orderid, order_info))
canceled_ids.append(vt_orderid)
if reopen:
# 撤销的委托单,属于开仓类,需要重新委托
if order_info['offset'] == Offset.OPEN:
self.write_log(u'超时撤单后,重新开仓')
# 开空委托单
if order_info['direction'] == Direction.SHORT:
if self.backtesting:
short_price = self.cur_price - self.price_tick
else:
short_price = self.cur_tick.bid_price_1
if order_grid.volume != order_volume and order_volume > 0:
self.write_log(
u'网格volume:{},order_volume:{}不一致,修正'.format(order_grid.volume, order_volume))
order_grid.volume = order_volume
self.write_log(u'重新提交{}开空委托,开空价{},v:{}'.format(order_vt_symbol, short_price, order_volume))
vt_orderids = self.short(price=short_price,
volume=order_volume,
vt_symbol=order_vt_symbol,
order_type=order_type,
order_time=self.cur_datetime,
grid=order_grid)
if len(vt_orderids) > 0:
self.write_log(u'委托成功,orderid:{}'.format(vt_orderids))
order_grid.snapshot.update({'open_price': short_price})
else:
self.write_error(u'撤单后,重新委托开空仓失败')
else:
if self.backtesting:
buy_price = self.cur_price + self.price_tick
else:
buy_price = self.cur_tick.ask_price_1
if order_grid.volume != order_volume and order_volume > 0:
self.write_log(
u'网格volume:{},order_volume:{}不一致,修正'.format(order_grid.volume, order_volume))
order_grid.volume = order_volume
self.write_log(u'重新提交{}开多委托,开多价{},v:{}'.format(order_vt_symbol, buy_price, order_volume))
vt_orderids = self.buy(price=buy_price,
volume=order_volume,
vt_symbol=order_vt_symbol,
order_type=order_type,
order_time=self.cur_datetime,
grid=order_grid)
if len(vt_orderids) > 0:
self.write_log(u'委托成功,orderids:{}'.format(vt_orderids))
order_grid.snapshot.update({'open_price': buy_price})
else:
self.write_error(u'撤单后,重新委托开多仓失败')
else:
# 属于平多委托单
if order_info['direction'] == Direction.SHORT:
if self.backtesting:
sell_price = self.cur_price - self.price_tick
else:
sell_price = self.cur_tick.bid_price_1
self.write_log(u'重新提交{}平多委托,{},v:{}'.format(order_vt_symbol, sell_price, order_volume))
vt_orderids = self.sell(price=sell_price,
volume=order_volume,
vt_symbol=order_vt_symbol,
order_type=order_type,
order_time=self.cur_datetime,
grid=order_grid)
if len(vt_orderids) > 0:
self.write_log(u'委托成功,orderids:{}'.format(vt_orderids))
else:
self.write_error(u'撤单后,重新委托平多仓失败')
# 属于平空委托单
else:
if self.backtesting:
cover_price = self.cur_price + self.price_tick
else:
cover_price = self.cur_tick.ask_price_1
self.write_log(u'重新提交{}平空委托,委托价{},v:{}'.format(order_vt_symbol, cover_price, order_volume))
vt_orderids = self.cover(price=cover_price,
volume=order_volume,
vt_symbol=order_vt_symbol,
order_type=order_type,
order_time=self.cur_datetime,
grid=order_grid)
if len(vt_orderids) > 0:
self.write_log(u'委托成功,orderids:{}'.format(vt_orderids))
else:
self.write_error(u'撤单后,重新委托平空仓失败')
else:
if order_info['offset'] == Offset.OPEN \
and order_grid \
and len(order_grid.order_ids) == 0 \
and not order_grid.open_status \
and not order_grid.order_status \
and order_grid.traded_volume == 0:
self.write_log(u'移除从未开仓成功的委托网格{}'.format(order_grid.__dict__))
order_info['grid'] = None
self.gt.remove_grids_by_ids(direction=order_grid.direction, ids=[order_grid.id])
# 删除撤单的订单
for vt_orderid in canceled_ids:
self.write_log(f'活动订单撤单成功,移除{vt_orderid}')
self.active_orders.pop(vt_orderid, None)
if len(self.active_orders) == 0:
self.entrust = 0
def display_grids(self):
"""更新网格显示信息"""
if not self.inited:
return
self.account_pos = self.cta_engine.get_position(vt_symbol=self.vt_symbol, direction=Direction.NET)
if self.account_pos:
self.write_log(
f'账号{self.vt_symbol}持仓:{self.account_pos.volume}, 冻结:{self.account_pos.frozen}, 盈亏:{self.account_pos.pnl}')
up_grids_info = ""
for grid in list(self.gt.up_grids):
if not grid.open_status and grid.order_status:
up_grids_info += f'平空中: [已平:{grid.traded_volume} => 目标:{grid.volume}, 委托时间:{grid.order_time}]\n'
if len(grid.order_ids) > 0:
up_grids_info += f'委托单号:{grid.order_ids}'
continue
if grid.open_status and not grid.order_status:
up_grids_info += f'持空中: [数量:{grid.volume}, 开仓时间:{grid.open_time}]\n'
continue
if not grid.open_status and grid.order_status:
up_grids_info += f'开空中: [已开:{grid.traded_volume} => 目标:{grid.volume}, 委托时间:{grid.order_time}]\n'
if len(grid.order_ids) > 0:
up_grids_info += f'委托单号:{grid.order_ids}'
dn_grids_info = ""
for grid in list(self.gt.dn_grids):
if not grid.open_status and grid.order_status:
dn_grids_info += f'平多中: [已平:{grid.traded_volume} => 目标:{grid.volume}, 委托时间:{grid.order_time}]\n'
if len(grid.order_ids) > 0:
dn_grids_info += f'委托单号:{grid.order_ids}'
continue
if grid.open_status and not grid.order_status:
dn_grids_info += f'持多中: [数量:{grid.volume}, 开仓价:{grid.open_price},开仓时间:{grid.open_time}]\n'
continue
if not grid.open_status and grid.order_status:
dn_grids_info += f'开多中: [已开:{grid.traded_volume} => 目标:{grid.volume}, 委托时间:{grid.order_time}]\n'
if len(grid.order_ids) > 0:
dn_grids_info += f'委托单号:{grid.order_ids}'
if len(up_grids_info) > 0:
self.write_log(up_grids_info)
if len(dn_grids_info) > 0:
self.write_log(dn_grids_info)
def display_tns(self):
"""显示事务的过程记录=》 log"""
if not self.inited:
return
self.write_log(u'{} 当前 {}价格:{}, 委托状态:{}'
.format(self.cur_datetime, self.vt_symbol, self.cur_price, self.entrust))
if len(self.active_orders) > 0:
self.write_log('当前活动订单:{}'.format(self.active_orders))
if hasattr(self, 'policy'):
policy = getattr(self, 'policy')
if policy:
op = getattr(policy, 'to_json', None)
if callable(op):
self.write_log(u'当前Policy:{}'.format(json.dumps(policy.to_json(), indent=2, ensure_ascii=False)))
def save_dist(self, dist_data):
"""
保存策略逻辑过程记录=》 csv文件按
:param dist_data:
:return:
"""
if self.backtesting:
save_path = self.cta_engine.get_logs_path()
else:
save_path = self.cta_engine.get_data_path()
try:
if 'margin' not in dist_data:
dist_data.update({'margin': dist_data.get('price', 0) * dist_data.get('volume',
0) * self.cta_engine.get_margin_rate(
dist_data.get('symbol', self.vt_symbol))})
if 'datetime' not in dist_data:
dist_data.update({'datetime': self.cur_datetime})
if self.position and 'long_pos' not in dist_data:
dist_data.update({'long_pos': self.position.long_pos})
if self.position and 'short_pos' not in dist_data:
dist_data.update({'short_pos': self.position.short_pos})
file_name = os.path.abspath(os.path.join(save_path, f'{self.strategy_name}_dist.csv'))
append_data(file_name=file_name, dict_data=dist_data, field_names=self.dist_fieldnames)
except Exception as ex:
self.write_error(u'save_dist 异常:{} {}'.format(str(ex), traceback.format_exc()))
def save_tns(self, tns_data):
"""
保存多空事务记录=》csv文件,便于后续分析
:param tns_data:
:return:
"""
if self.backtesting:
save_path = self.cta_engine.get_logs_path()
else:
save_path = self.cta_engine.get_data_path()
try:
file_name = os.path.abspath(os.path.join(save_path, f'{self.strategy_name}_tns.csv'))
append_data(file_name=file_name, dict_data=tns_data)
except Exception as ex:
self.write_error(u'save_tns 异常:{} {}'.format(str(ex), traceback.format_exc()))
def send_wechat(self, msg: str):
"""实盘时才发送微信"""
if self.backtesting:
return
self.cta_engine.send_wechat(msg=msg, strategy=self)
class CtaSpotTemplate(CtaTemplate):
"""
现货模板
"""
asset_symbol = "" # 资产币 BTCUSDT => BTC
quote_symbol = "" # 定价币 BTCUSDT => USDT
price_tick = 0.01 # 商品的最小价格跳动
symbol_size = 1 # 商品得合约乘数
margin_rate = 1 # 商品的保证金
volumn_tick = 0.01 # 商品最小成交数量
# 委托类型
order_type = OrderType.LIMIT
cancel_seconds = 120 # 撤单时间(秒)
activate_market = False
# 资金相关
max_invest_rate = 0.1 # 最大仓位(0~1) asset / virtual_quote
max_invest_margin = 0 # 资金上限 0,不限制 virtual_quote
max_invest_pos = 0 # 单向头寸数量上限 0,不限制 asset
# 是否回测状态
backtesting = False
# 逻辑过程日志
dist_fieldnames = ['datetime', 'symbol', 'volume', 'price', 'margin',
'operation', 'signal', 'stop_price', 'target_price',
'long_pos']
def __init__(self, cta_engine, strategy_name, vt_symbol, setting):
""""""
# vt_symbol => symbol, exchange
self.symbol, self.exchange = extract_vt_symbol(vt_symbol)
self.position = None # 仓位组件
self.policy = None # 事务执行组件
self.gt = None # 网格交易组件
self.klines = {} # K线组件字典: kline_name: kline
self.price_tick = 0.01 # 商品的最小价格跳动
self.symbol_size = 1 # 商品得合约乘数
self.margin_rate = 1 # 商品的保证金
self.volumn_tick = 0.01 # 商品最小成交数量
self.cancel_seconds = 120 # 撤单时间(秒)
self.activate_market = False
self.order_type = OrderType.LIMIT
self.backtesting = False
self.cur_datetime: datetime = None # 当前Tick时间
self.cur_tick: TickData = None # 最新的合约tick( vt_symbol)
self.cur_price = None # 当前价(主力合约 vt_symbol)
self.asset_pos = None # 当前asset_symbol持仓信息
self.quote_pos = None # 当前quote_symbol的持仓信息
self.last_minute = None # 最后的分钟,用于on_tick内每分钟处理的逻辑
self.display_bars = True
super().__init__(
cta_engine, strategy_name, vt_symbol, setting
)
# 增加仓位管理模块
self.position = CtaPosition(strategy=self)
self.position.maxPos = sys.maxsize
# 增加网格持久化模块
self.gt = CtaGridTrade(strategy=self)
if 'backtesting' not in self.parameters:
self.parameters.append('backtesting')
def update_setting(self, setting: dict):
"""
Update strategy parameter wtih value in setting dict.
"""
for name in self.parameters:
if name in setting:
setattr(self, name, setting[name])
self.price_tick = self.cta_engine.get_price_tick(self.vt_symbol)
self.symbol_size = self.cta_engine.get_size(self.vt_symbol)
self.margin_rate = self.cta_engine.get_margin_rate(self.vt_symbol)
self.volumn_tick = self.cta_engine.get_volume_tick(self.vt_symbol)
# 检查资产币+定价币是否与vt_symbol一致
if self.symbol != f'{self.asset_symbol}{self.quote_symbol}':
raise Exception(f'{self.vt_symbol}与{self.asset_symbol}+{self.quote_symbol}不匹配')
if self.activate_market:
self.write_log(f'{self.strategy_name}使用市价单委托方式')
self.order_type = OrderType.MARKET
def sync_data(self):
"""同步更新数据"""
if not self.backtesting:
self.write_log(u'保存k线缓存数据')
self.save_klines_to_cache()
if self.inited and self.trading:
self.write_log(u'保存policy数据')
self.policy.save()
def save_klines_to_cache(self, kline_names: list = []):
"""
保存K线数据到缓存
:param kline_names: 一般为self.klines的keys
:return:
"""
if len(kline_names) == 0:
kline_names = list(self.klines.keys())
# 获取保存路径
save_path = self.cta_engine.get_data_path()
# 保存缓存的文件名
file_name = os.path.abspath(os.path.join(save_path, f'{self.strategy_name}_klines.pkb2'))
with bz2.BZ2File(file_name, 'wb') as f:
klines = {}
for kline_name in kline_names:
kline = self.klines.get(kline_name, None)
# if kline:
# kline.strategy = None
# kline.cb_on_bar = None
klines.update({kline_name: kline})
pickle.dump(klines, f)
def load_klines_from_cache(self, kline_names: list = []):
"""
从缓存加载K线数据
:param kline_names:
:return:
"""
if len(kline_names) == 0:
kline_names = list(self.klines.keys())
save_path = self.cta_engine.get_data_path()
file_name = os.path.abspath(os.path.join(save_path, f'{self.strategy_name}_klines.pkb2'))
try:
last_bar_dt = None
with bz2.BZ2File(file_name, 'rb') as f:
klines = pickle.load(f)
# 逐一恢复K线
for kline_name in kline_names:
# 缓存的k线实例
cache_kline = klines.get(kline_name, None)
# 当前策略实例的K线实例
strategy_kline = self.klines.get(kline_name, None)
if cache_kline and strategy_kline:
# 临时保存当前的回调函数
cb_on_bar = strategy_kline.cb_on_bar
# 缓存实例数据 =》 当前实例数据
strategy_kline.__dict__.update(cache_kline.__dict__)
# 所有K线的最后时间
if last_bar_dt and strategy_kline.cur_datetime:
last_bar_dt = max(last_bar_dt, strategy_kline.cur_datetime)
else:
last_bar_dt = strategy_kline.cur_datetime
# 重新绑定k线策略与on_bar回调函数
strategy_kline.strategy = self
strategy_kline.cb_on_bar = cb_on_bar
self.write_log(f'恢复{kline_name}缓存数据,最新bar结束时间:{last_bar_dt}')
self.write_log(u'加载缓存k线数据完毕')
return last_bar_dt
except Exception as ex:
self.write_error(f'加载缓存K线数据失败:{str(ex)}')
return None
def get_klines_snapshot(self):
"""返回当前klines的切片数据"""
try:
d = {
'strategy': self.strategy_name,
'datetime': datetime.now()}
klines = {}
for kline_name in sorted(self.klines.keys()):
klines.update({kline_name: self.klines.get(kline_name).get_data()})
kline_names = list(klines.keys())
binary_data = zlib.compress(pickle.dumps(klines))
d.update({'kline_names': kline_names, 'klines': binary_data, 'zlib': True})
return d
except Exception as ex:
self.write_error(f'获取klines切片数据失败:{str(ex)}')
return {}
def init_policy(self):
self.write_log(u'init_policy(),初始化执行逻辑')
if self.policy:
self.policy.load()
def init_position(self):
"""
初始化Positin
使用网格的持久化,获取开仓状态的多空单,更新
:return:
"""
self.write_log(u'init_position(),初始化持仓')
changed = False
if len(self.gt.dn_grids) <= 0:
# 加载已开仓的多数据,网格JSON
self.position.long_pos = 0
long_grids = self.gt.load(direction=Direction.LONG, open_status_filter=[True])
if len(long_grids) == 0:
self.write_log(u'没有持久化的多单数据')
self.gt.dn_grids = []
else:
self.gt.dn_grids = long_grids
for lg in long_grids:
if len(lg.order_ids) > 0 or lg.order_status:
self.write_log(f'重置委托状态:{lg.order_status},清除委托单:{lg.order_ids}')
lg.order_status = False
[self.cancel_order(vt_orderid) for vt_orderid in lg.order_ids]
lg.order_ids = []
changed = True
self.write_log(u'加载持仓多单[{},价格:{},数量:{}手, 开仓时间:{}'
.format(lg.vt_symbol, lg.open_price, lg.volume, lg.open_time))
self.position.long_pos = round(self.position.long_pos + lg.volume, 7)
self.write_log(f'持久化多单,共持仓:{self.position.long_pos}手')
self.position.pos = round(self.position.long_pos + self.position.short_pos, 7)
self.write_log(u'{}加载持久化数据完成,多单:{},空单:{},共:{}手'
.format(self.strategy_name,
self.position.long_pos,
abs(self.position.short_pos),
self.position.pos))
self.pos = self.position.pos
if changed:
self.gt.save()
self.display_grids()
def get_positions(self):
"""
获取策略当前持仓(重构,使用主力合约)
:return: [{'vt_symbol':symbol,'direction':direction,'volume':volume]
"""
if not self.position:
return []
pos_list = []
if self.position.long_pos > 0:
for g in self.gt.get_opened_grids(direction=Direction.LONG):
pos_list.append({'vt_symbol': f'{self.asset_symbol}.{self.exchange.value}',
'direction': 'long',
'volume': g.volume - g.traded_volume,
'price': g.open_price})
if self.cur_datetime and (datetime.now() - self.cur_datetime).total_seconds() < 10:
self.write_log(u'{}当前持仓:{}'.format(self.strategy_name, pos_list))
return pos_list
def on_trade(self, trade: TradeData):
"""交易更新"""
self.write_log(u'{},交易更新:{},当前持仓:{} '
.format(self.cur_datetime,
trade.__dict__,
self.position.pos))
dist_record = dict()
if self.backtesting:
dist_record['datetime'] = trade.time
else:
dist_record['datetime'] = ' '.join([self.cur_datetime.strftime('%Y-%m-%d'), trade.time])
dist_record['volume'] = trade.volume
dist_record['price'] = trade.price
dist_record['margin'] = trade.price * trade.volume * self.cta_engine.get_margin_rate(trade.vt_symbol)
dist_record['symbol'] = trade.vt_symbol
if trade.direction == Direction.LONG and trade.offset == Offset.OPEN:
dist_record['operation'] = 'buy'
self.position.open_pos(trade.direction, volume=trade.volume)
dist_record['long_pos'] = self.position.long_pos
dist_record['short_pos'] = self.position.short_pos
if trade.direction == Direction.SHORT and trade.offset == Offset.OPEN:
dist_record['operation'] = 'short'
self.position.open_pos(trade.direction, volume=trade.volume)
dist_record['long_pos'] = self.position.long_pos
dist_record['short_pos'] = self.position.short_pos
if trade.direction == Direction.LONG and trade.offset != Offset.OPEN:
dist_record['operation'] = 'cover'
self.position.close_pos(trade.direction, volume=trade.volume)
dist_record['long_pos'] = self.position.long_pos
dist_record['short_pos'] = self.position.short_pos
if trade.direction == Direction.SHORT and trade.offset != Offset.OPEN:
dist_record['operation'] = 'sell'
self.position.close_pos(trade.direction, volume=trade.volume)
dist_record['long_pos'] = self.position.long_pos
dist_record['short_pos'] = self.position.short_pos
self.save_dist(dist_record)
self.pos = self.position.pos
def on_order(self, order: OrderData):
"""报单更新"""
# 未执行的订单中,存在是异常,删除
self.write_log(u'{}报单更新,{}'.format(self.cur_datetime, order.__dict__))
if order.vt_orderid in self.active_orders:
if order.volume == order.traded and order.status in [Status.ALLTRADED]:
self.on_order_all_traded(order)
elif order.offset == Offset.OPEN and order.status in [Status.CANCELLED]:
# 开仓委托单被撤销
self.on_order_open_canceled(order)
elif order.offset != Offset.OPEN and order.status in [Status.CANCELLED]:
# 平仓委托单被撤销
self.on_order_close_canceled(order)
elif order.status == Status.REJECTED:
if order.offset == Offset.OPEN:
self.write_error(u'{}委托单开{}被拒,price:{},total:{},traded:{},status:{}'
.format(order.vt_symbol, order.direction, order.price, order.volume,
order.traded, order.status))
self.on_order_open_canceled(order)
else:
self.write_error(u'OnOrder({})委托单平{}被拒,price:{},total:{},traded:{},status:{}'
.format(order.vt_symbol, order.direction, order.price, order.volume,
order.traded, order.status))
self.on_order_close_canceled(order)
else:
self.write_log(u'委托单未完成,total:{},traded:{},tradeStatus:{}'
.format(order.volume, order.traded, order.status))
else:
self.write_error(u'委托单{}不在策略的未完成订单列表中:{}'.format(order.vt_orderid, self.active_orders))
def on_order_all_traded(self, order: OrderData):
"""
订单全部成交
:param order:
:return:
"""
self.write_log(u'{},委托单:{}全部完成'.format(order.time, order.vt_orderid))
order_info = self.active_orders[order.vt_orderid]
# 通过vt_orderid,找到对应的网格
grid = order_info.get('grid', None)
if grid is not None:
# 移除当前委托单
if order.vt_orderid in grid.order_ids:
grid.order_ids.remove(order.vt_orderid)
# 网格的所有委托单已经执行完毕
if len(grid.order_ids) == 0:
grid.order_status = False
grid.traded_volume = 0
# 平仓完毕(cover, sell)
if order.offset != Offset.OPEN:
grid.open_status = False
grid.close_status = True
if grid.volume < order.traded:
self.write_log(f'网格平仓数量{grid.volume},小于委托单成交数量:{order.volume},修正为:{order.volume}')
grid.volume = order.traded
self.write_log(f'{grid.direction.value}单已平仓完毕,order_price:{order.price}'
+ f',volume:{order.volume}')
self.write_log(f'移除网格:{grid.to_json()}')
self.gt.remove_grids_by_ids(direction=grid.direction, ids=[grid.id])
# 开仓完毕( buy, short)
else:
grid.open_status = True
grid.open_time = self.cur_datetime
self.write_log(f'{grid.direction.value}单已开仓完毕,order_price:{order.price}'
+ f',volume:{order.volume}')
# 网格的所有委托单部分执行完毕
else:
old_traded_volume = grid.traded_volume
grid.traded_volume += order.volume
grid.traded_volume = round(grid.traded_volume, 7)
self.write_log(f'{grid.direction.value}单部分{order.offset}仓,'
+ f'网格volume:{grid.volume}, traded_volume:{old_traded_volume}=>{grid.traded_volume}')
self.write_log(f'剩余委托单号:{grid.order_ids}')
self.gt.save()
# 在策略得活动订单中,移除
self.write_log(f'委托单{order.vt_orderid}完成,从活动订单中移除')
self.active_orders.pop(order.vt_orderid, None)
def on_order_open_canceled(self, order: OrderData):
"""
委托开仓单撤销
:param order:
:return:
"""
self.write_log(u'委托开仓单撤销:{}'.format(order.__dict__))
if order.vt_orderid not in self.active_orders:
self.write_error(u'{}不在未完成的委托单中{}。'.format(order.vt_orderid, self.active_orders))
return
# 直接更新“未完成委托单”,更新volume,retry次数
old_order = self.active_orders[order.vt_orderid]
self.write_log(u'{} 委托信息:{}'.format(order.vt_orderid, old_order))
old_order['traded'] = order.traded
grid = old_order.get('grid', None)
pre_status = old_order.get('status', Status.NOTTRADED)
if pre_status == Status.CANCELLED:
self.write_log(f'当前状态已经是{Status.CANCELLED},不做调整处理')
return
old_order.update({'status': Status.CANCELLED})
self.write_log(u'委托单状态:{}=>{}'.format(pre_status, old_order.get('status')))
if grid:
if order.vt_orderid in grid.order_ids:
grid.order_ids.remove(order.vt_orderid)
if order.traded > 0:
pre_traded_volume = grid.traded_volume
grid.traded_volume = round(grid.traded_volume + order.traded, 7)
self.write_log(f'撤单中部分开仓:{order.traded} + 原已成交:{pre_traded_volume} => {grid.traded_volume}')
if len(grid.order_ids) == 0:
grid.order_status = False
if grid.traded_volume > 0:
pre_volume = grid.volume
grid.volume = grid.traded_volume
grid.traded_volume = 0
grid.open_status = True
self.write_log(f'开仓完成,grid.volume {pre_volume} => {grid.volume}')
self.gt.save()
self.active_orders.update({order.vt_orderid: old_order})
self.display_grids()
def on_order_close_canceled(self, order: OrderData):
"""委托平仓单撤销"""
self.write_log(u'委托平仓单撤销:{}'.format(order.__dict__))
if order.vt_orderid not in self.active_orders:
self.write_error(u'{}不在未完成的委托单中:{}。'.format(order.vt_orderid, self.active_orders))
return
# 直接更新“未完成委托单”,更新volume,Retry次数
old_order = self.active_orders[order.vt_orderid]
self.write_log(u'{} 订单信息:{}'.format(order.vt_orderid, old_order))
old_order['traded'] = order.traded
grid = old_order.get('grid', None)
pre_status = old_order.get('status', Status.NOTTRADED)
if pre_status == Status.CANCELLED:
self.write_log(f'当前状态已经是{Status.CANCELLED},不做调整处理')
return
old_order.update({'status': Status.CANCELLED})
self.write_log(u'委托单状态:{}=>{}'.format(pre_status, old_order.get('status')))
if grid:
if order.vt_orderid in grid.order_ids:
grid.order_ids.remove(order.vt_orderid)
if order.traded > 0:
pre_traded_volume = grid.traded_volume
grid.traded_volume = round(grid.traded_volume + order.traded, 7)
self.write_log(f'撤单中部分平仓成交:{order.traded} + 原已成交:{pre_traded_volume} => {grid.traded_volume}')
if len(grid.order_ids) == 0:
grid.order_status = False
if grid.traded_volume > 0:
pre_volume = grid.volume
grid.volume = round(grid.volume - grid.traded_volume, 7)
grid.traded_volume = 0
if grid.volume <= 0:
grid.volume = 0
grid.open_status = False
self.write_log(f'强制全部平仓完成')
else:
self.write_log(f'平仓委托中,撤单完成,部分成交,减少持仓grid.volume {pre_volume} => {grid.volume}')
self.gt.save()
self.active_orders.update({order.vt_orderid: old_order})
self.display_grids()
def on_stop_order(self, stop_order: StopOrder):
self.write_log(f'停止单触发:{stop_order.__dict__}')
def grid_check_stop(self):
"""
网格逐一止损/止盈检查 (根据指数价格进行止损止盈)
:return:
"""
if self.entrust != 0:
return
if not self.trading and not self.inited:
self.write_error(u'当前不允许交易')
return
# 多单网格逐一止损/止盈检查:
long_grids = self.gt.get_opened_grids(direction=Direction.LONG)
for g in long_grids:
if g.stop_price > 0 and g.stop_price > self.cur_price and g.open_status and not g.order_status:
# 调用平仓模块
self.write_log(u'{} {}当前价:{} 触发多单止损线{},开仓价:{},v:{}'.
format(self.cur_datetime,
g.vt_symbol,
self.cur_price,
g.stop_price,
g.open_price,
g.volume))
if self.grid_sell(g):
self.write_log(u'多单止盈/止损委托成功')
else:
self.write_error(u'多单止损委托失败')
def grid_buy(self, grid):
"""
事务开多仓
:return:
"""
if self.backtesting:
buy_price = self.cur_price + self.price_tick
else:
buy_price = self.cur_tick.ask_price_1
if self.quote_pos is None:
self.write_error(u'无法获取{}得持仓信息'.format(self.quote_symbol))
return False
vt_orderids = self.buy(vt_symbol=self.vt_symbol,
price=buy_price,
volume=grid.volume,
order_type=self.order_type,
order_time=self.cur_datetime,
grid=grid)
if len(vt_orderids) > 0:
self.write_log(u'创建{}事务多单,开仓价:{},数量:{},止盈价:{},止损价:{}'
.format(grid.type, grid.open_price, grid.volume, grid.close_price, grid.stop_price))
self.gt.save()
return True
else:
self.write_error(u'创建{}事务多单,委托失败,开仓价:{},数量:{},止盈价:{}'
.format(grid.type, grid.open_price, grid.volume, grid.close_price))
return False
def grid_sell(self, grid):
"""
事务平多单仓位
1.来源自止损止盈平仓
:param 平仓网格
:return:
"""
self.write_log(u'执行事务平多仓位:{}'.format(grid.to_json()))
if self.asset_pos is None:
self.write_error(u'无法获取{}得持仓信息'.format(self.asset_symbol))
return False
# 发出委托卖出单
if self.backtesting:
sell_price = self.cur_price - self.price_tick
else:
sell_price = self.cur_tick.bid_price_1
# 发出平多委托
if grid.traded_volume > 0:
grid.volume -= grid.traded_volume
grid.volume = round(grid.volume, 7)
grid.traded_volume = 0
if self.asset_pos.volume <= 0:
self.write_error(u'当前{}的净持仓:{},不能平多单'
.format(self.asset_symbol,
self.asset_pos.volume))
return False
if self.asset_pos.volume < grid.volume:
self.write_error(u'当前{}的净持仓:{},不满足平仓目标:{}, 强制降低'
.format(self.asset_symbol,
self.asset_pos.volume,
grid.volume))
grid.volume = self.asset_pos.volume
vt_orderids = self.sell(
vt_symbol=self.vt_symbol,
price=sell_price,
volume=grid.volume,
order_type=self.order_type,
order_time=self.cur_datetime,
grid=grid)
if len(vt_orderids) == 0:
if self.backtesting:
self.write_error(u'多单平仓委托失败')
else:
self.write_error(u'多单平仓委托失败')
return False
else:
self.write_log(u'多单平仓委托成功,编号:{}'.format(vt_orderids))
return True
def cancel_all_orders(self):
"""
重载撤销所有正在进行得委托
:return:
"""
self.write_log(u'撤销所有正在进行得委托')
self.tns_cancel_logic(dt=datetime.now(), force=True, reopen=False)
def tns_cancel_logic(self, dt, force=False, reopen=False):
"撤单逻辑"""
if len(self.active_orders) < 1:
self.entrust = 0
return
canceled_ids = []
for vt_orderid in list(self.active_orders.keys()):
order_info = self.active_orders[vt_orderid]
order_vt_symbol = order_info.get('vt_symbol', self.vt_symbol)
order_time = order_info['order_time']
order_volume = order_info['volume'] - order_info['traded']
order_grid = order_info['grid']
order_status = order_info.get('status', Status.NOTTRADED)
order_type = order_info.get('order_type', OrderType.LIMIT)
over_seconds = (dt - order_time).total_seconds()
# 只处理未成交的限价委托单
if order_status in [Status.SUBMITTING, Status.NOTTRADED] and order_type == OrderType.LIMIT:
if over_seconds > self.cancel_seconds or force: # 超过设置的时间还未成交
self.write_log(u'超时{}秒未成交,取消委托单:vt_orderid:{},order:{}'
.format(over_seconds, vt_orderid, order_info))
order_info.update({'status': Status.CANCELLING})
self.active_orders.update({vt_orderid: order_info})
ret = self.cancel_order(str(vt_orderid))
if not ret:
self.write_log(u'撤单失败,更新状态为撤单成功')
order_info.update({'status': Status.CANCELLED})
self.active_orders.update({vt_orderid: order_info})
if order_grid and vt_orderid in order_grid.order_ids:
order_grid.order_ids.remove(vt_orderid)
continue
# 处理状态为‘撤销’的委托单
elif order_status == Status.CANCELLED:
self.write_log(u'委托单{}已成功撤单,删除{}'.format(vt_orderid, order_info))
canceled_ids.append(vt_orderid)
if order_info['offset'] == Offset.OPEN \
and order_grid \
and len(order_grid.order_ids) == 0 \
and not order_grid.open_status \
and not order_grid.order_status \
and order_grid.traded_volume == 0:
self.write_log(u'移除从未开仓成功的委托网格{}'.format(order_grid.__dict__))
order_info['grid'] = None
self.gt.remove_grids_by_ids(direction=order_grid.direction, ids=[order_grid.id])
# 删除撤单的订单
for vt_orderid in canceled_ids:
self.write_log(f'活动订单撤单成功,移除{vt_orderid}')
self.active_orders.pop(vt_orderid, None)
if len(self.active_orders) == 0:
self.entrust = 0
def display_grids(self):
"""更新网格显示信息"""
if not self.inited:
return
self.assett_pos = self.cta_engine.get_position(vt_symbol=f'{self.asset_symbol}.{self.exchange.value}', direction=Direction.NET)
if self.asset_pos:
self.write_log(
f'账号{self.asset_symbol}持仓:{self.asset_pos.volume}, 冻结:{self.asset_pos.frozen}')
self.quote_pos = self.cta_engine.get_position(vt_symbol=f'{self.quote_symbol}.{self.exchange.value}', direction=Direction.NET)
if self.quote_pos:
self.write_log(
f'账号{self.quote_symbol}持仓:{self.quote_pos.volume}, 冻结:{self.quote_pos.frozen}')
dn_grids_info = ""
for grid in list(self.gt.dn_grids):
if grid.close_status and not grid.open_status and grid.order_status:
dn_grids_info += f'平多中: {grid.vt_symbol}[已平:{grid.traded_volume} => 目标:{grid.volume}, 平仓价格:{grid.close_price},委托时间:{grid.order_time}]\n'
if len(grid.order_ids) > 0:
dn_grids_info += f'委托单号:{grid.order_ids}'
continue
if grid.open_status and not grid.order_status and not grid.close_status:
dn_grids_info += f'持多中: {grid.vt_symbol}[数量:{grid.volume}, 开仓价格:{grid.open_price},开仓时间:{grid.open_time}]\n'
continue
if not grid.open_status and grid.order_status and not grid.close_status:
dn_grids_info += f'开多中: {grid.vt_symbol}[已开:{grid.traded_volume} => 目标:{grid.volume}, 委托时间:{grid.order_time}]\n'
if len(grid.order_ids) > 0:
dn_grids_info += f'委托单号:{grid.order_ids}'
if len(dn_grids_info) > 0:
self.write_log(dn_grids_info)
def display_tns(self):
"""显示事务的过程记录=》 log"""
if not self.inited:
return
self.write_log(u'{} 当前 {}价格:{}, 委托状态:{}'
.format(self.cur_datetime, self.vt_symbol, self.cur_price, self.entrust))
if len(self.active_orders) > 0:
self.write_log('当前活动订单数:{}'.format(len(self.active_orders))) #json.dumps(self.active_orders, indent=2, ensure_ascii=False)))
if hasattr(self, 'policy'):
policy = getattr(self, 'policy')
if policy:
op = getattr(policy, 'to_json', None)
if callable(op):
self.write_log(u'当前Policy:{}'.format(json.dumps(policy.to_json(), indent=2, ensure_ascii=False)))
def save_dist(self, dist_data):
"""
保存策略逻辑过程记录=》 csv文件按
:param dist_data:
:return:
"""
if self.backtesting:
save_path = self.cta_engine.get_logs_path()
else:
save_path = self.cta_engine.get_data_path()
try:
if 'margin' not in dist_data:
dist_data.update({'margin': dist_data.get('price', 0) * dist_data.get('volume',
0) * self.cta_engine.get_margin_rate(
dist_data.get('symbol', self.vt_symbol))})
if 'datetime' not in dist_data:
dist_data.update({'datetime': self.cur_datetime})
if self.position and 'long_pos' not in dist_data:
dist_data.update({'long_pos': self.position.long_pos})
if self.position and 'short_pos' not in dist_data:
dist_data.update({'short_pos': self.position.short_pos})
file_name = os.path.abspath(os.path.join(save_path, f'{self.strategy_name}_dist.csv'))
append_data(file_name=file_name, dict_data=dist_data, field_names=self.dist_fieldnames)
except Exception as ex:
self.write_error(u'save_dist 异常:{} {}'.format(str(ex), traceback.format_exc()))
def save_tns(self, tns_data):
"""
保存多空事务记录=》csv文件,便于后续分析
:param tns_data:
:return:
"""
if self.backtesting:
save_path = self.cta_engine.get_logs_path()
else:
save_path = self.cta_engine.get_data_path()
try:
file_name = os.path.abspath(os.path.join(save_path, f'{self.strategy_name}_tns.csv'))
append_data(file_name=file_name, dict_data=tns_data)
except Exception as ex:
self.write_error(u'save_tns 异常:{} {}'.format(str(ex), traceback.format_exc()))
def send_wechat(self, msg: str):
"""实盘时才发送微信"""
if self.backtesting:
return
self.cta_engine.send_wechat(msg=msg, strategy=self)
class MultiContractPolicy(CtaPolicy):
"""多合约Policy,记录持仓"""
def __init__(self, strategy=None, **kwargs):
super().__init__(strategy, **kwargs)
self.debug = kwargs.get('debug', False)
self.positions = {} # vt_symbol: net_pos
def from_json(self, json_data):
"""将数据从json_data中恢复"""
super().from_json(json_data)
self.positions = json_data.get('positions')
def to_json(self):
"""转换至json文件"""
j = super().to_json()
j['positions'] = self.positions
return j
def on_trade(self, trade: TradeData):
"""更新交易"""
pos = self.positions.get(trade.vt_symbol)
if pos is None:
pos = 0
pre_pos = pos
if trade.direction == Direction.LONG:
pos = round(pos + trade.volume, 7)
elif trade.direction == Direction.SHORT:
pos = round(pos - trade.volume, 7)
self.positions.update({trade.vt_symbol: pos})
if self.debug and self.strategy:
self.strategy.write_log(f'{trade.vt_symbol} pos:{pre_pos}=>{pos}')
self.save()
class MultiContractTemplate(CtaTemplate):
"""多合约交易模板"""
def __init__(self, cta_engine, strategy_name, vt_symbol, setting):
self.policy = None
self.cur_datetime = None
super().__init__(cta_engine, strategy_name, vt_symbol, setting)
self.policy = MultiContractPolicy(strategy=self, debug=True)
def sync_data(self):
"""同步更新数据"""
if self.inited and self.trading:
self.write_log(u'保存policy数据')
self.policy.save()
def on_trade(self, trade: TradeData):
"""成交回报事件处理"""
self.policy.on_trade(trade)
def get_positions(self):
""" 获取策略所有持仓详细"""
pos_list = []
for vt_symbol, pos in self.policy.positions.items():
pos_list.append({'vt_symbol': vt_symbol,
'direction': 'long' if pos >= 0 else 'short',
'volume': pos})
if self.cur_datetime and (datetime.now() - self.cur_datetime).total_seconds() < 10:
self.write_log(u'{}当前持仓:{}'.format(self.strategy_name, pos_list))
return pos_list
def on_order(self, order: OrderData):
pass
def on_init(self):
self.inited = True
def on_start(self):
self.trading = True
def on_stop(self):
self.trading = False
| python |
# Copyright 2017, Wenjia Bai. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
testDir = '/home/pcorrado/Cardiac-DL-Segmentation-Paper/test'
modelBasePath = '/home/pcorrado/Cardiac-DL-Segmentation-Paper/Cardiac-Segmentation-4D-Flow/TrainedModels'
modelPaths = ['model_{}_layers_frozen'.format(l) for l in [4,8,12,14,15]]
modelPaths.append('modelUnfrozen')
modelName = 'FCN_sa_level5_filter16_22333_batch20_iter10000_lr0.001'
numLayers = [4,8,12,14,15,0]
if __name__ == '__main__':
for ii in range(len(modelPaths)):
os.system('python3 common/deploy_network.py --data_dir {0} '
'--model_path {1}/{2}/{3}/{3}.ckpt-10000'.format(testDir, modelBasePath, modelPaths[ii], modelName))
for data in sorted(os.listdir(testDir)):
data_dir = os.path.join(testDir, data)
os.system('mv {0}/seg_sa.nii.gz {0}/sa_label_{1}.nii.gz'.format(data_dir, numLayers[ii]))
| python |
# coding:utf-8
from lxml import etree
import requests
import config
def checkProxyType(selfip, proxies):
'''
用来检测代理的类型,突然发现,免费网站写的信息不靠谱,还是要自己检测代理的类型
:param proxies: 代理(0 高匿,1 匿名,2 透明 3 无效代理
:return:
'''
try:
r = requests.get(url='https://incloak.com/ip/', headers=config.get_header(), timeout=config.TIMEOUT, proxies=proxies)
print(r.text)
# if r.ok:
# root = etree.HTML(r.text)
# ip = root.xpath('.//center[2]/table/tr[3]/td[2]')[0].text
# http_x_forwared_for = root.xpath('.//center[2]/table/tr[8]/td[2]')[0].text
# http_via = root.xpath('.//center[2]/table/tr[9]/td[2]')[0].text
# # print ip,http_x_forwared_for,http_via,type(http_via),type(http_x_forwared_for)
# if ip==selfip:
# return 3
# if http_x_forwared_for is None and http_via is None:
# return 0
# if http_via != None and http_x_forwared_for.find(selfip)== -1:
# return 1
#
# if http_via != None and http_x_forwared_for.find(selfip)!= -1:
# return 2
# return 3
except Exception as e:
print(str(e))
return 3
if __name__ == '__main__':
ip = '61.132.241.109'
port = '808'
proxies = {"http": "http://%s:%s" % (ip, port), "https": "http://%s:%s" % (ip, port)}
checkProxyType(None, proxies) | python |
import torch
import torch.nn as nn
class FocalLoss(nn.Module):
def __init__(self,gamma=2,eps=1e-7,size_average=True):
super(FocalLoss,self).__init__()
self.gamma = gamma
self.eps = eps
self.size_average = size_average
def forward(self,prob,labels):
p_t = prob*labels + (1-prob)*(1-labels)
loss = -((1.0-p_t)**self.gamma)*torch.log(p_t+self.eps)
if self.size_average:
loss = torch.mean(loss)
return loss | python |
#Crie um programa que tenha uma tupla única com nomes de produtos e seus respectivos preços,
#na sequência. No final, mostre uma listagem de preços, organizando os dados em forma tabular.
Tabela = ('Lapís', 1.75,
'Borracha', 2,
'Caderno', 15.90,
'Estojo', 25,
'Transferidor', 4.20,
'Compasso', 9.99,
'Mochila', 120.32,
'Canetas', 22.30,
'Livro', 34.90)
print('='*45)
print(f'{"TABELA DE PREÇOS":^40}')
print('='*45)
for pos in range(0,len(Tabela)):
if pos % 2 == 0:
print(f'{Tabela[pos]:.<30}', end = ' ')#< centraliza a esquerda o texto
if pos % 2 == 1:
print(f'R${Tabela[pos]:>7.2f}')#.2f e para formatar como dinheiro. / > centraliza na direita o texto.
| python |
import maya.cmds as mc
import copy
def setDrivenKeyToRemapValue(animCurve,remapValueNode='',interpType=3,deleteAnimCurve=True,lockPosition=True,lockValue=False):
'''
Convert a set driven key setup to a remapValue node.
Each key on the animCurve node is represented as widget on the remapValue ramp control.
Incoming and outgoing curve connections will be replaced with equivalent remapValue connections.
@param animCurve: The animCurve to convert to a remapValue node
@type animCurve: str
@param remapValueNode: Name an existing remapValue node to use instead of creating a new one.
@type remapValueNode: str
@param interpType: Default ramp interpolation type.
@type interpType: int
@param deleteAnimCurve: Delete animCurve node after disconnection
@type deleteAnimCurve: bool
@param lockPosition: Lock ramp widget position values
@type lockPosition: bool
@param lockValue: Lock ramp widget float values
@type lockValue: bool
'''
# Checks
if not mc.objExists(animCurve):
raise Exception('AnimCurve node "'+animCurve+'" does not exist!!')
if remapValueNode and not mc.objExists(remapValueNode):
raise Exception('RemapValue node "'+remapValueNode+'" does not exist!!')
# Get connections to animCurve
inConn = mc.listConnections(animCurve+'.input',s=True,d=False,p=True)
outConn = mc.listConnections(animCurve+'.output',s=False,d=True,p=True)
# Get keyframe data
valList = mc.keyframe(animCurve,q=True,vc=True)
floatList = mc.keyframe(animCurve,q=True,fc=True)
# Get min/max input and output values
orderValList = copy.deepcopy(valList)
orderFloatList = copy.deepcopy(floatList)
orderValList.sort()
orderFloatList.sort()
minVal = orderValList[0]
maxVal = orderValList[-1]
minFloat = orderFloatList[0]
maxFloat = orderFloatList[-1]
# Create remapValue node
if not remapValueNode:
remapValueNode = mc.createNode('remapValue',n=animCurve+'_remapValue')
# Set Remap attribute values
mc.setAttr(remapValueNode+'.inputMin',minFloat)
mc.setAttr(remapValueNode+'.inputMax',maxFloat)
mc.setAttr(remapValueNode+'.outputMin',minVal)
mc.setAttr(remapValueNode+'.outputMax',maxVal)
# Remove existing ramp widgets
indexList = range(mc.getAttr(remapValueNode+'.value',s=True))
indexList.reverse()
for i in indexList:
mc.removeMultiInstance(remapValueNode+'.value['+str(i)+']',b=True)
# Set ramp widgets based on keys
valRange = maxVal - minVal
floatRange = maxFloat - minFloat
# Check zero values
if valRange < 0.0001: valRange = 0.0001
if floatRange < 0.0001: floatRange = 0.0001
# Iterate through keys
for i in range(len(valList)):
val = (valList[i] - minVal)/valRange
flt = (floatList[i] - minFloat)/floatRange
mc.setAttr(remapValueNode+'.value['+str(i)+'].value_Position',flt)
mc.setAttr(remapValueNode+'.value['+str(i)+'].value_FloatValue',val)
mc.setAttr(remapValueNode+'.value['+str(i)+'].value_Interp',interpType)
if lockPosition:
mc.setAttr(remapValueNode+'.value['+str(i)+'].value_Position',l=True)
if lockValue:
mc.setAttr(remapValueNode+'.value['+str(i)+'].value_FloatValue',l=True)
# Replace animCurve connections
mc.connectAttr(inConn[0],remapValueNode+'.inputValue',f=True)
mc.connectAttr(remapValueNode+'.outValue',outConn[0],f=True)
# Delete unused animCurve
if deleteAnimCurve: mc.delete(animCurve)
# Return result
return remapValueNode
| python |
import numpy as np
from keras.models import Sequential
from keras.layers import Dense,Activation,Flatten,Dropout
from keras.layers import Conv2D,MaxPooling2D
from keras.callbacks import ModelCheckpoint
from sklearn.model_selection import train_test_split
from matplotlib import pyplot as plt
data=np.load('data.npy')
target=np.load('target.npy')
#loading the save numpy arrays in the previous code
model=Sequential()
model.add(Conv2D(200,(3,3),input_shape=data.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
#The first CNN layer followed by Relu and MaxPooling layers
model.add(Conv2D(100,(3,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
#The second convolution layer followed by Relu and MaxPooling layers
model.add(Flatten())
model.add(Dropout(0.5))
#Flatten layer to stack the output convolutions from second convolution layer
model.add(Dense(50,activation='relu'))
#Dense layer of 64 neurons
model.add(Dense(2,activation='softmax'))
#The Final layer with two outputs for two categories
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
train_data,test_data,train_target,test_target=train_test_split(data,target,test_size=0.1)
checkpoint = ModelCheckpoint('model-{epoch:03d}.model',monitor='val_loss',verbose=0,save_best_only=True,mode='auto')
history=model.fit(train_data,train_target,epochs=20,callbacks=[checkpoint],validation_split=0.2)
plt.plot(history.history['loss'],'r',label='training loss')
plt.plot(history.history['val_loss'],label='validation loss')
plt.xlabel('# epochs')
plt.ylabel('loss')
plt.legend()
plt.plot(history.history['accuracy'],'r',label='training accuracy')
plt.plot(history.history['val_accuracy'],label='validation accuracy')
plt.xlabel('# epochs')
plt.ylabel('loss')
plt.legend() | python |
import zipfile
import os
from time import gmtime, strftime
from helper import utility
from lxml import etree
"""
MIT License
Copyright (c) 2018 Chapin Bryce, Preston Miller
Please share comments and questions at:
https://github.com/PythonForensics/Learning-Python-for-Forensics
or email [email protected]
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
def main(filename):
"""
The main function confirms the file type and sends it
to be processed.
:param filename: name of the file potentially containing
embedded metadata.
:return: A dictionary from getTags, containing the embedded
metadata.
"""
# DOCX, XLSX, and PPTX signatures
signatures = ['504b030414000600']
if utility.check_header(filename, signatures, 8) is True:
return get_tags(filename)
else:
raise TypeError
def get_tags(filename):
"""
The get_tags function extracts the office metadata from the
data object.
:param filename: the path and name to the data object.
:return: tags and headers, tags is a dictionary containing
office metadata and headers are the order of keys for the CSV
output.
"""
# Set up CSV headers
headers = ['Path', 'Name', 'Size', 'Filesystem CTime',
'Filesystem MTime', 'Title', 'Author(s)','Create Date',
'Modify Date', 'Last Modified By Date', 'Subject', 'Keywords',
'Description', 'Category', 'Status', 'Revision',
'Edit Time (Min)', 'Page Count', 'Word Count',
'Character Count', 'Line Count',
'Paragraph Count', 'Slide Count', 'Note Count',
'Hidden Slide Count', 'Company', 'Hyperlink Base']
# Create a ZipFile class from the input object
# This allows us to read or write to the 'Zip archive'
try:
zf = zipfile.ZipFile(filename)
except zipfile.BadZipfile:
return {}, headers
# These two XML files contain the embedded metadata of
# interest
try:
core = etree.fromstring(zf.read('docProps/core.xml'))
app = etree.fromstring(zf.read('docProps/app.xml'))
except KeyError as e:
assert Warning(e)
return {}, headers
tags = {}
tags['Path'] = filename
tags['Name'] = os.path.basename(filename)
tags['Size'] = utility.convert_size(
os.path.getsize(filename))
tags['Filesystem CTime'] = strftime('%m/%d/%Y %H:%M:%S',
gmtime(os.path.getctime(filename)))
tags['Filesystem MTime'] = strftime('%m/%d/%Y %H:%M:%S',
gmtime(os.path.getmtime(filename)))
# Core Tags
for child in core.iterchildren():
if 'title' in child.tag:
tags['Title'] = child.text
if 'subject' in child.tag:
tags['Subject'] = child.text
if 'creator' in child.tag:
tags['Author(s)'] = child.text
if 'keywords' in child.tag:
tags['Keywords'] = child.text
if 'description' in child.tag:
tags['Description'] = child.text
if 'lastModifiedBy' in child.tag:
tags['Last Modified By Date'] = child.text
if 'created' in child.tag:
tags['Create Date'] = child.text
if 'modified' in child.tag:
tags['Modify Date'] = child.text
if 'category' in child.tag:
tags['Category'] = child.text
if 'contentStatus' in child.tag:
tags['Status'] = child.text
if (filename.endswith('.docx') or
filename.endswith('.pptx')):
if 'revision' in child.tag:
tags['Revision'] = child.text
# App Tags
for child in app.iterchildren():
if filename.endswith('.docx'):
if 'TotalTime' in child.tag:
tags['Edit Time (Min)'] = child.text
if 'Pages' in child.tag:
tags['Page Count'] = child.text
if 'Words' in child.tag:
tags['Word Count'] = child.text
if 'Characters' in child.tag:
tags['Character Count'] = child.text
if 'Lines' in child.tag:
tags['Line Count'] = child.text
if 'Paragraphs' in child.tag:
tags['Paragraph Count'] = child.text
if 'Company' in child.tag:
tags['Company'] = child.text
if 'HyperlinkBase' in child.tag:
tags['Hyperlink Base'] = child.text
elif filename.endswith('.pptx'):
if 'TotalTime' in child.tag:
tags['Edit Time (Min)'] = child.text
if 'Words' in child.tag:
tags['Word Count'] = child.text
if 'Paragraphs' in child.tag:
tags['Paragraph Count'] = child.text
if 'Slides' in child.tag:
tags['Slide Count'] = child.text
if 'Notes' in child.tag:
tags['Note Count'] = child.text
if 'HiddenSlides' in child.tag:
tags['Hidden Slide Count'] = child.text
if 'Company' in child.tag:
tags['Company'] = child.text
if 'HyperlinkBase' in child.tag:
tags['Hyperlink Base'] = child.text
else:
if 'Company' in child.tag:
tags['Company'] = child.text
if 'HyperlinkBase' in child.tag:
tags['Hyperlink Base'] = child.text
return tags, headers
| python |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from app import db
age_func = db.Table('age_func',
db.Column('id', db.Integer, primary_key=True, autoincrement=True),
db.Column('age_id', db.Integer, db.ForeignKey('age_group.id'), nullable=False),
db.Column('func_id', db.Integer, db.ForeignKey('function.id'), nullable=False)
)
class AgeGroup(db.Model):
__tablename__ = 'age_group'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(10), nullable=False, unique=True)
functions = db.relationship('Function',
secondary=age_func,
backref=db.backref('age_set', lazy='dynamic')
)
books = db.relationship('Book', backref='age_bk', lazy='dynamic')
def __init__(self, name):
self.name = name
def __str__(self):
return '<AgeGroup: {}>'.format(self.name)
def model_to_dict(self, query_relation=False):
ag_dict = {
'id': self.id,
'name': self.name
}
if query_relation:
funcs = []
if self.functions is not None:
for func in self.functions:
funcs.append(func.model_to_dict())
ag_dict['functions'] = funcs
return ag_dict
def save(self):
db.session.add(self)
db.session.commit()
class Function(db.Model):
__tablename__ = 'function'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(10), nullable=False, unique=True)
books = db.relationship('Book', backref='function_set', lazy='dynamic')
age_groups = db.relationship('AgeGroup',
secondary=age_func,
backref=db.backref('function_ag', lazy='dynamic')
)
def __init__(self, name):
self.name = name
def __str__(self):
return '<Function: {}>'.format(self.name)
def model_to_dict(self, query_relation=False):
fun_dict = {
'id': self.id,
'name': self.name
}
if query_relation:
ags = []
if self.age_groups is not None:
for ag in self.age_groups:
ags.append(ag.model_to_dict())
fun_dict['age_groups'] = ags
return fun_dict
def save(self):
db.session.add(self)
db.session.commit() | python |
# Tool Imports
from bph.tools.windows.nircmd import BphNirCmd as NirCmd
# Core Imports
from bph.core.server.template import BphTemplateServer as TemplateServer
from bph.core.session import BphSession as Session
session = Session(project_name='blackhat_arsenal_2019')
session.start()
templateserver = TemplateServer()
templateserver.start()
nircmd = NirCmd()
nircmd.start_process(program=r'calc.exe')
nircmd.execute(delay=3)
nircmd = NirCmd()
nircmd.kill_process(program=r'calc.exe')
nircmd.execute(delay=3)
| python |
from sudachipy import dictionary
from sudachipy import tokenizer
from sudachipy.plugin import oov
from kuro2sudachi.normalizer import SudachiCharNormalizer
import jaconv
import fileinput
import argparse
import json
import os
import re
mode = tokenizer.Tokenizer.SplitMode.C
parser = argparse.ArgumentParser(
description="convert kuromoji user dict to sudacchi user dict"
)
parser.add_argument("file", help="kuromoji dict file path")
parser.add_argument(
"-c",
"--config",
help="convert config file (json format file)",
)
parser.add_argument("-o", "--out", help="output path")
parser.add_argument(
"-d",
"--rewrite_def",
default=os.path.dirname(os.path.abspath(__file__)) + "/rewrite.def",
help="rewrite text file path",
)
parser.add_argument(
"--rm_already_exist",
action="store_true",
help="remove words system dict already exist"
)
parser.add_argument("-r", "--sudachi_setting", help="the setting file in JSON format")
parser.add_argument("-s", "--sudachi_dict_type", help="sudachidict type")
parser.add_argument("-m", "--merge_dict", help="A dictionary for split registration of words that are not in the system dictionary. Must be specified as a user dictionary in sudachi's configuration file (json).")
parser.add_argument(
"--ignore",
action="store_true",
help="ignore invalid format line / unsupported pos error / oov error in splitted word",
)
default_setting = {
"固有名詞": {
"sudachi_pos": "名詞,固有名詞,一般,*,*,*",
"left_id": 4786,
"right_id": 4786,
"cost": 7000,
},
"名詞": {
"sudachi_pos": "名詞,普通名詞,一般,*,*,*",
"left_id": 5146,
"right_id": 5146,
"cost": 7000,
},
}
p = re.compile("[\u30A1-\u30FC]*")
class Error(Exception):
pass
class UnSupportedPosError(Error):
pass
class DictFormatError(Error):
pass
class OOVError(Error):
pass
class Converter:
def __init__(
self,
rewrite_file,
config=None,
sudachi_setting=None,
dict_type="core",
rm=False,
):
if rewrite_file == "":
raise DictFormatError("rewrite.def file path is required")
self.tokenizer = dictionary.Dictionary(
dict_type=dict_type, config_path=sudachi_setting
).create()
if config is not None:
with open(config) as f:
s = json.load(f)
else:
s = default_setting
self.rewrite = rewrite_file
self.setting = s
self.rm = rm
self.normalizer = SudachiCharNormalizer(rewrite_def_path=self.rewrite)
def convert(self, line: str) -> str:
data = line.split(",")
try:
word = data[0]
# splited = data[1]
yomi = self.nomlized_yomi(data[2].replace(" ", ""))
pos = self.pos_convert(data[3].replace(" ", ""))
except IndexError:
raise DictFormatError(f"'{line}' is invalid format")
words = [m.surface() for m in self.tokenizer.tokenize(word, mode)]
# alrady exists in system dic
if self.rm and len(words) == 1:
return ""
normalized = self.normalizer.rewrite(word)
unit_div_info = "*,*"
try:
if (udm := pos.get("unit_div_mode")) != None:
unit_div_info = self.split(normalized, udm)
except OOVError as e:
print(e)
raise e
split_mode = pos.get("split_mode", "*")
return f"{normalized},{pos['left_id']},{pos['right_id']},{pos['cost']},{word},{pos['sudachi_pos']},{yomi},{word},*,{split_mode},{unit_div_info},*"
def pos_convert(self, pos: str):
try:
spos = self.setting[pos]
return spos
except KeyError:
raise UnSupportedPosError(f"{pos} is not supported pos")
def nomlized_yomi(self, yomi: str) -> str:
yomi = jaconv.hira2kata(yomi)
if p.fullmatch(yomi):
return yomi
return ""
def split_info(self, normalized: str, udm: list[str], mode: any) -> str:
word_ids = []
oov = []
for m in self.tokenizer.tokenize(normalized, mode):
if ",".join(m.part_of_speech()) == "名詞,数詞,*,*,*,*":
return "*"
if m.is_oov() or m.dictionary_id()==-1:
oov.append(m.surface())
continue
word_ids.append(str(m.word_id()))
if len(oov) > 0:
raise OOVError(f"split word has out of vocab: {oov} in {normalized}")
return "/".join(word_ids)
def split(self, normalized: str, udm: list[str]) -> str:
try:
unit_div_info = []
if "A" in udm:
info = self.split_info(normalized, udm, tokenizer.Tokenizer.SplitMode.A)
unit_div_info.append(info)
else:
unit_div_info.append("*")
if "B" in udm:
info = self.split_info(normalized, udm, tokenizer.Tokenizer.SplitMode.B)
unit_div_info.append(info)
else:
unit_div_info.append("*")
return ",".join(unit_div_info)
except OOVError as e:
raise e
def cli() -> str:
args = parser.parse_args()
out = open(args.out, "wt")
rewrite = args.rewrite_def
rm = args.rm_already_exist
config = args.config
sudachi_setting = args.sudachi_setting
sudachi_dict_type = args.sudachi_dict_type
merge_dict = args.merge_dict
c = Converter(
rewrite,
config,
sudachi_setting=sudachi_setting,
dict_type=sudachi_dict_type,
rm=rm,
)
with fileinput.input(files=merge_dict) as merged:
for line in merged:
line = line.replace("\n" , "")
out.write(f"{line}\n")
with fileinput.input(files=args.file) as input:
for line in input:
line = line.strip()
if line == "":
continue
if line[0] == "#":
continue
converted = ""
try:
converted = c.convert(line)
if converted == "":
continue
except (UnSupportedPosError, DictFormatError, OOVError) as e:
if args.ignore:
continue
else:
raise e
out.write(f"{converted}\n")
| python |
###############################################################################
#
# Copyright (c) 2018, Henrique Morimitsu,
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# #############################################################################
from datetime import datetime
import numpy as np
import os
import os.path as osp
import shutil
import tensorflow as tf
import time
tfe = tf.contrib.eager
class LearningRate(object):
""" Helper class for managing the learning rate. It current implements
only learning rate decay at fixed step numbers.
Arguments:
global_step: tfe.Variable: the current step (iteration) number.
initial_lr: float: initial value of learning rate.
lr_decay: float: decay value to multiply at each decay step.
lr_decay_steps: list: the step numbers at which the decay is applied.
"""
def __init__(self, global_step, initial_lr, lr_decay, lr_decay_steps):
self.global_step = global_step
self.current_lr = tfe.Variable(initial_lr, dtype=tf.float32, name='lr')
self.initial_lr = tf.constant(initial_lr, tf.float32)
self.lr_decay = tf.constant(lr_decay, tf.float32)
self.lr_decay_steps = lr_decay_steps
self.last_lr_update = tfe.Variable(
global_step, dtype=tf.int64, name='last_lr_update')
def get_lr(self):
""" Returns the current learning rate.
Note that this call will activate the decay, if global_step is at a
decay step value.
Returns:
tfe.Variable: the learning rate ath the current global_step
"""
if self.global_step > self.last_lr_update and \
int(self.global_step) in self.lr_decay_steps:
tf.assign(self.current_lr, self.current_lr * self.lr_decay)
tf.assign(self.last_lr_update, self.global_step)
return self.current_lr
def trainer(tr_manager_trainer_queue,
trainer_tr_manager_queue,
train_dir, batch_size,
save_ckpt_interval,
max_train_iters,
initial_lr,
lr_decay,
lr_decay_steps,
log_interval,
backpropagate_losing_policies,
keep_checkpoint_every_n_hours,
game_config_string,
game_manager_module,
game_manager_kwargs):
""" Starts the training process. The network parameters will be restored
from a checkpoint, if it exists.
Args:
tr_manager_trainer_queue: Queue: to get training batch samples from
trainer_manager.
trainer_tr_manager_queue: Queue: to put checkpoint file names to
trainer_manager.
train_dir: string: path to the directory where training files are
stored.
batch_size: int: batch size to use during training.
save_ckpt_interval: int: number of training steps to save a new
checkpoint.
max_train_iters: int: number of training steps before concluding.
initial_lr: float: initial value of learning rate.
lr_decay: float: decay value to multiply at each decay step.
lr_decay_steps: list: the step numbers at which the decay is applied.
log_interval: int: number of steps to print a training log message.
backpropagate_losing_policies: boolean: if False, ignore policy losses
coming from the losing player.
keep_checkpoint_every_n_hours: float: interval in hours at which a
checkpoint is kept on disk permanently.
game_config_string: string: a name for the current game.
game_manager_module: list: a list with two string containing the name
of the game manager module (file) and the name of the class inside of
the module.
game_manager_kwargs: dict: a dictionary of arguments and its respective
values.
"""
np.random.seed()
ckpt_path = game_manager_kwargs['ckpt_path']
game_manager_kwargs['replace_unloaded_resnet_by_naivenet'] = False
gm_module = __import__(game_manager_module[0])
gm_class = getattr(gm_module, game_manager_module[1])
game_manager = gm_class(**game_manager_kwargs)
global_step = tf.train.get_or_create_global_step()
lr = LearningRate(global_step, initial_lr, lr_decay, lr_decay_steps)
start_time = time.time()
net = game_manager.net
optimizer = tf.train.MomentumOptimizer(
lr.get_lr(), momentum=0.9, use_nesterov=True)
checkpoint = tfe.Checkpoint(
net=net, optimizer=optimizer, global_step=global_step,
current_lr=lr.current_lr)
if ckpt_path is not None:
print('Loading training params from: ' + ckpt_path)
checkpoint.restore(ckpt_path)
ckpt_name = None
if ckpt_path is not None:
ckpt_name = osp.split(ckpt_path)[1]
trainer_tr_manager_queue.put(ckpt_name)
writer = tf.contrib.summary.create_file_writer(train_dir)
writer.set_as_default()
total_loss = 0.0
total_policy_loss = 0.0
total_value_loss = 0.0
total_reg_loss = 0.0
exp_decay = 1.0 - 1.0/log_interval
exp_moving_loss = -1.0
exp_moving_policy_loss = -1.0
exp_moving_value_loss = -1.0
exp_moving_reg_loss = -1.0
keep_checkpoint_every_n_seconds = keep_checkpoint_every_n_hours * 3600.0
last_kept_checkpoint_time = time.time()
while global_step <= max_train_iters:
# Workaround for memory leak when using loss in Eager Execution
# See tensorflow issue #20062
tf.reset_default_graph()
with tf.contrib.summary.always_record_summaries():
states_batch, policy_batch, value_prior_batch = \
tr_manager_trainer_queue.get()
with tf.device(game_manager_kwargs['tf_device']):
states_batch_tf = tf.constant(states_batch, tf.float32)
policy_batch_tf = tf.constant(policy_batch, tf.int32)
value_prior_batch_tf = \
tf.constant(value_prior_batch, tf.float32)
with tfe.GradientTape() as tape:
policy_pred, value_pred = \
net(states_batch_tf, training=True)
policy_loss = tf.losses.sparse_softmax_cross_entropy(
policy_batch_tf, policy_pred,
reduction=tf.losses.Reduction.NONE)
if not backpropagate_losing_policies:
policy_loss = tf.where(
tf.less(value_prior_batch_tf, 0.0),
tf.zeros_like(policy_loss),
policy_loss)
policy_loss = tf.reduce_mean(policy_loss)
value_loss = tf.square(
value_pred[:, 0] - value_prior_batch_tf)
value_loss = tf.reduce_mean(value_loss)
reg_loss = tf.reduce_sum(net.losses)
loss = policy_loss + value_loss + reg_loss
grads = tape.gradient(loss, net.variables)
optimizer.apply_gradients(
zip(grads, net.variables),
global_step=global_step)
total_loss += loss
total_policy_loss += policy_loss
total_value_loss += value_loss
total_reg_loss += reg_loss
if exp_moving_loss < 0.0:
exp_moving_loss = loss
exp_moving_policy_loss = policy_loss
exp_moving_value_loss = value_loss
exp_moving_reg_loss = reg_loss
else:
exp_moving_loss = \
exp_decay * exp_moving_loss + (1.0-exp_decay) * loss
exp_moving_policy_loss = \
exp_decay * exp_moving_policy_loss + \
(1.0-exp_decay) * policy_loss
exp_moving_value_loss = \
exp_decay * exp_moving_value_loss + \
(1.0-exp_decay) * value_loss
exp_moving_reg_loss = \
exp_decay * exp_moving_reg_loss + \
(1.0-exp_decay) * reg_loss
if int(global_step) % log_interval == 0:
tf.contrib.summary.scalar(
'policy_loss', exp_moving_policy_loss,
step=global_step)
tf.contrib.summary.scalar(
'value_loss', exp_moving_value_loss, step=global_step)
tf.contrib.summary.scalar(
'regularization_loss', exp_moving_reg_loss,
step=global_step)
tf.contrib.summary.scalar(
'total_loss', exp_moving_loss, step=global_step)
tf.contrib.summary.scalar('lr', lr.get_lr(),
step=global_step)
total_loss /= log_interval
total_policy_loss /= log_interval
total_value_loss /= log_interval
total_reg_loss /= log_interval
elapsed_time = time.time() - start_time
examples_per_second = \
(states_batch.shape[0] * float(log_interval)) / \
elapsed_time
print(
('%s: Train iter: %d, loss %.04f, ' +
'policy-loss %.04f, value-loss %.04f, ' +
'regul-loss %.04f, lr %.1e, ' +
'%.01f examples per sec.') %
(datetime.now().strftime('%Y_%m_%d_%H_%M_%S'),
global_step, total_loss, total_policy_loss,
total_value_loss, total_reg_loss,
float(lr.get_lr().value()), examples_per_second))
total_loss = 0.0
total_policy_loss = 0.0
total_value_loss = 0.0
start_time = time.time()
if int(global_step) % save_ckpt_interval == 0:
ckpt_name = '%s-%d.ckpt' % \
(game_config_string, global_step)
ckpt_path = osp.join(train_dir, ckpt_name)
checkpoint.save(ckpt_path)
ckpt_path = tf.train.get_checkpoint_state(train_dir)\
.model_checkpoint_path
# This could be done automatically if tfe.Checkpoint
# supported the keep_checkpoint_every_n_hours argument
# like tf.train.Saver does
ckpt_interval = time.time() - last_kept_checkpoint_time
if ckpt_interval > keep_checkpoint_every_n_seconds:
last_ckpt_files = [f for f in os.listdir(train_dir)
if f.startswith(ckpt_name)]
for lcf in last_ckpt_files:
shutil.copy(
osp.join(train_dir, lcf),
osp.join(train_dir, lcf.replace(
'.ckpt', '.ckpt-keep')))
last_kept_checkpoint_time = time.time()
print('%s: saved model %s' %
(datetime.now().strftime('%Y_%m_%d_%H_%M_%S'),
osp.join(train_dir,
'%s-%d.ckpt' %
(game_config_string, global_step))))
if global_step < max_train_iters:
ckpt_name = osp.split(ckpt_path)[1]
trainer_tr_manager_queue.put(ckpt_name)
| python |
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(name='dynamicmultithreadedexecutor',
version='1.0.2',
description='Dynamic Multi-threaded Executor',
author='Kevin McCabe',
author_email='[email protected]',
url='https://github.com/gumpcraca/dynamicmultithreadedexecutor',
keywords = [],
packages=find_packages(),
install_requires=['six','sentinels'],
py_modules=["dynamicmultithreadedexecutor"],
classifiers = [],
)
| python |
def EscreverArquivoRelatorio(tabelaDados, somaMegaBytes, dadosMedio):
'''Função para escrever o relatorio final do problema'''
arquivo_final = open('relatório.txt', 'w')
arquivo_final.write('ACME Inc. Uso do espaço em disco pelos usuários')
arquivo_final.write('\n')
arquivo_final.write('-' * 70)
arquivo_final.write('\n')
arquivo_final.write(tabela.to_string())
arquivo_final.write('\n')
arquivo_final.write('\n')
arquivo_final.write('Espaço total ocupado: {:.2f} MB'.format(soma_lista))
arquivo_final.write('\n')
arquivo_final.write('Espaço médio ocupado: {:.2f} MB'.format(medio_ocupado))
arquivo_final.close()
| python |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests.common import TransactionCase
class TestPointOfSale(TransactionCase):
def setUp(self):
super(TestPointOfSale, self).setUp()
# ignore pre-existing pricelists for the purpose of this test
self.env["product.pricelist"].search([]).write({"active": False})
self.currency = self.env.ref("base.USD")
self.company1 = self.env["res.company"].create({
"name": "company 1",
"currency_id": self.currency.id
})
self.company2 = self.env["res.company"].create({
"name": "company 2",
"currency_id": self.currency.id
})
self.company2_pricelist = self.env["product.pricelist"].create({
"name": "company 2 pricelist",
"currency_id": self.currency.id,
"company_id": self.company2.id,
"sequence": 1, # force this pricelist to be first
})
self.env.user.company_id = self.company1
def test_default_pricelist_with_company(self):
""" Verify that the default pricelist belongs to the same company as the config """
company1_pricelist = self.env["product.pricelist"].create({
"name": "company 1 pricelist",
"currency_id": self.currency.id,
"company_id": self.company1.id,
"sequence": 2,
})
# make sure this doesn't pick the company2 pricelist
new_config = self.env["pos.config"].create({
"name": "usd config"
})
self.assertEqual(new_config.pricelist_id, company1_pricelist,
"POS config incorrectly has pricelist %s" % new_config.pricelist_id.display_name)
def test_default_pricelist_without_company(self):
""" Verify that a default pricelist without a company works """
universal_pricelist = self.env["product.pricelist"].create({
"name": "universal pricelist",
"currency_id": self.currency.id,
"sequence": 2,
})
# make sure this doesn't pick the company2 pricelist
new_config = self.env["pos.config"].create({
"name": "usd config"
})
self.assertEqual(new_config.pricelist_id, universal_pricelist,
"POS config incorrectly has pricelist %s" % new_config.pricelist_id.display_name)
| python |
class TensorflowModelWrapper:
def __init__(self):
self._model = None
def set_model(self, model):
self._model = model
def forward(self, input_):
return self._model.predict(input_)
def __call__(self, *args, **kwargs):
return self._model.predict(*args, **kwargs)
| python |
"""
Partial Entropy Decomposition with the Hcs measure from Ince (2017)
https://arxiv.org/abs/1702.01591
"""
from __future__ import division
import numpy as np
from itertools import combinations
from .pid import BasePID
from .lattice import pid_lattice
from .. import modify_outcomes
from ..algorithms import maxent_dist
from ..multivariate import entropy
from ..utils import flatten, powerset
def h_cs(d, inputs, output=None):
"""
Compute H_cs, the average of positive pointwise co-information values
Parameters
----------
d : Distribution
The distribution to compute i_ccs for.
inputs : iterable of iterables
The input variables.
Returns
-------
hcs : float
The value of H_cs.
"""
var_map = {var: i for i, var in enumerate(inputs)}
vars = list(sorted(var_map.values()))
d = d.coalesce(inputs)
n_variables = d.outcome_length()
# pairwise marginal maxent
if n_variables > 2:
marginals = list(combinations(range(n_variables), 2))
d = maxent_dist(d, marginals)
d = modify_outcomes(d, lambda o: tuple(o))
# calculate pointwise co-information
sub_vars = [var for var in powerset(vars) if var]
sub_dists = {var: d.marginal(var) for var in sub_vars}
coinfos = {}
for e in d.outcomes:
coinfos[e] = 0.0
for sub_var in sub_vars:
P = sub_dists[sub_var][tuple([e[i] for i in flatten(sub_var)])]
coinfos[e] = coinfos[e] + np.log2(P)*((-1) ** (len(sub_var)))
# sum positive pointwise terms
hcs = sum(d[e] * coinfos[e] for e in d.outcomes if coinfos[e] > 0.0)
return hcs
class PED_CS(BasePID):
"""
The change in surprisal partial entropy decomposition, as defined by Ince (2017).
https://arxiv.org/abs/1702.01591
"""
_name = "H_cs"
_measure = staticmethod(h_cs)
_red_string = "H_r"
_pi_string = "H_d"
def __init__(self, dist, inputs=None, **kwargs):
"""
Parameters
----------
dist : Distribution
The distribution to compute the decomposition on.
inputs : iter of iters, None
The set of variables to include. If None, `dist.rvs` is used.
"""
self._dist = dist
if inputs is None:
inputs = dist.rvs
self._kwargs = kwargs
self._inputs = tuple(map(tuple, inputs))
self._output = None
self._lattice = pid_lattice(self._inputs)
self._total = entropy(self._dist, rvs=self._inputs)
self._compute()
| python |
from unittest import TestCase
import numpy as np
from pyfibre.tests.probe_classes.utilities import generate_probe_graph
from pyfibre.tests.dummy_classes import DummyGraphSegment
from pyfibre.tests.probe_classes.objects import ProbeGraphSegment
class TestBaseGraphSegment(TestCase):
def setUp(self):
self.graph = generate_probe_graph()
self.graph_segment = ProbeGraphSegment()
def test__getstate__(self):
status = self.graph_segment.to_json()
self.assertIn('shape', status)
self.assertDictEqual(
status['graph'],
{'directed': False,
'graph': {},
'links': [{'r': 1.4142135623730951,
'source': 2, 'target': 3},
{'r': 1.4142135623730951,
'source': 3, 'target': 4},
{'r': 1, 'source': 4, 'target': 5}],
'multigraph': False,
'nodes': [{'xy': [0, 0], 'id': 2},
{'xy': [1, 1], 'id': 3},
{'xy': [2, 2], 'id': 4},
{'xy': [2, 3], 'id': 5}]
}
)
def test_deserialise(self):
status = self.graph_segment.to_json()
new_graph_segment = DummyGraphSegment.from_json(status)
status = new_graph_segment.to_json()
self.assertDictEqual(
status['graph'],
{'directed': False,
'graph': {},
'links': [{'r': 1.4142135623730951,
'source': 2, 'target': 3},
{'r': 1.4142135623730951,
'source': 3, 'target': 4},
{'r': 1, 'source': 4, 'target': 5}],
'multigraph': False,
'nodes': [{'xy': [0, 0], 'id': 2},
{'xy': [1, 1], 'id': 3},
{'xy': [2, 2], 'id': 4},
{'xy': [2, 3], 'id': 5}]
}
)
def test_network_init(self):
self.assertEqual(4, self.graph_segment.number_of_nodes)
self.assertListEqual(
[2, 3, 4, 5], self.graph_segment.node_list)
self.assertEqual(3, self.graph_segment.graph.size())
self.assertTrue(
np.allclose(np.array([1, 1]),
self.graph_segment.graph.nodes[3]['xy']))
self.assertAlmostEqual(
np.sqrt(2), self.graph_segment.graph.edges[3, 4]['r'])
self.assertTrue(np.allclose(
np.array([[0, 0],
[1, 1],
[2, 2],
[2, 3]]),
self.graph_segment.node_coord))
def test_network_segment(self):
segment = self.graph_segment.region
self.assertEqual(
(3, 4), self.graph_segment.region.image.shape)
self.assertEqual(12, segment.area)
with self.assertRaises(AttributeError):
_ = segment.intensity_image
self.graph_segment._iterations = 0
self.graph_segment._area_threshold = 0
self.graph_segment._sigma = None
segment = self.graph_segment.region
self.assertEqual((3, 4), segment.image.shape)
self.assertEqual(4, segment.area)
self.graph_segment.image = np.ones((5, 5)) * 2
segment = self.graph_segment.region
self.assertEqual((3, 4), segment.image.shape)
self.assertEqual((3, 4), segment.intensity_image.shape)
def test_add_node_edge(self):
self.graph_segment.add_node(6)
self.assertEqual(5, self.graph_segment.number_of_nodes)
self.graph_segment.add_edge(6, 2)
self.assertEqual(4, self.graph_segment.graph.size())
| python |
"""Hack route cipher sent by Abraham Lincoln."""
from itertools import combinations
from src.ch03.c1_anagram_generator import split
def get_factors(integer: int) -> list:
"""Get factors of integer.
Calculate factors of a given integer.
Args:
integer (int): Number to get factors of.
Returns:
List of integer factors of **integer**.
"""
result = []
# A factor will always be less than or equal to sqrt(integer).
for i in range(1, int(integer ** 0.5) + 1):
if integer % i == 0:
result.append(i)
# If you have one factor, the other is integer / factor
result.append(integer // i)
return sorted(list(set(result))) # Eliminate perfect squares
def keygen(length: int) -> list:
"""Generate all possible route cipher keys.
Generates a list of all possible route cipher keys of **length**.
Args:
length (int): Length of route cipher key.
Returns:
List of lists of integers representing all possible route cipher keys
of **length**.
Example:
>>> from src.ch04.practice.p1_hack_lincoln import keygen
>>> keygen(2)
[[-1, -2], [-1, 2], [1, -2], [1, 2]]
"""
result = []
master_key = range(1, length + 1)
# Get all possible combinations of direction (pos/neg) of length
combs = set(combinations([-1, 1] * length, length)) # Remove repeats
for comb in combs:
result.append([sign * key for sign, key in zip(comb, master_key)])
result.sort() # Sort for test consistency.
return result
def decode_route(keys: list, cipherlist: list) -> list:
"""Decode route cipher.
Decode **cipherlist** encoded with a route cipher using **keys**.
Args:
keys (list): List of signed, integer keys.
cipherlist (list): List of strings representing encoded message.
Returns:
List of strings representing plaintext message.
Note:
Assumes vertical encoding route.
"""
table, message = [], []
split_list = split(cipherlist, len(keys))
rows = len(split_list[0])
# Build translation table.
for key in keys:
if key < 0:
# If negative, reverse direction
split_list[0].reverse()
table.append(split_list[0])
del split_list[0]
# For each column in the table, copy the relevant row.
for row in range(rows):
for column in table:
message.append(column[row])
return message
def hack_route(ciphertext: str) -> None:
"""Hack route cipher.
Hack route cipher by using :func:`get_factors` to find all possible key
lengths. Then use :func:`keygen` to generate all possible keys and pass
each one through :func:`decode_route`.
Args:
ciphertext (str): Message encoded with route cipher.
Returns:
None. Prints all possible decoded messages.
"""
cipherlist = ciphertext.split()
# Get all possible key lengths.
factors = get_factors(len(cipherlist))
for factor in factors:
# Get all possible keys.
if any([factor == 1, factor == len(cipherlist)]):
# Key length of 1 is the full cipherlist and key length of
# cipherlist length is one word per column.
continue
keys = keygen(factor)
for key in keys:
# Use each key to decode route cipher.
message = ' '.join(decode_route(key, cipherlist))
print(f'Key: {key}\nDecoded message: {message}\n')
def main():
"""Demonstrate hack of Lincoln's route cipher."""
print('I can do a brute-force hack of a route cipher sent by '
'Abraham Lincoln,\nand I do a better job than he did in that dumb '
'zombie movie.')
print('\nNote: I only hack the route cipher. I leave the '
'word-transposition\ncipher to you and your biochemical brain.\n')
ciphertext = """THIS OFF DETAINED ASCERTAIN WAYLAND CORRESPONDENTS OF AT
WHY AND IF FILLS IT YOU GET THEY NEPTUNE THE TRIBUNE PLEASE ARE THEM CAN
UP"""
print(f'Hacking: {ciphertext}\n')
hack_route(ciphertext)
if __name__ == '__main__':
main()
| python |
from django.core.files.storage import FileSystemStorage
class MediaStorage(FileSystemStorage):
pass
class ZarrStorage(FileSystemStorage):
pass
class FilesStorage(FileSystemStorage):
pass
class LocalStorage():
media = MediaStorage
zarr = ZarrStorage
files = FilesStorage | python |
from django.urls import path, include
from . import views
app_name = 'blog'
urlpatterns = [
path('', views.Home.as_view()),
path('posts/', include([
path('create/', views.CriarPost.as_view(), name='criar-post'),
path('<slug:titulo>/', views.VerPost.as_view(), name="ver-post"),
])),
path('tags/', include([
path('<str:nome>/', views.VerPostsTag.as_view(), name="ver-posts-tag")
]))
] | python |
import keras
import keras.backend as K
from keras.preprocessing import sequence
from keras.datasets import imdb
from keras.models import Sequential, Model
from keras.layers import \
Dense, Activation, Conv2D, MaxPool2D, Dropout, Flatten, Input, Reshape, LSTM, Embedding, RepeatVector,\
TimeDistributed, Bidirectional, Concatenate, Lambda, SpatialDropout1D, Softmax
from keras.optimizers import Adam
from tensorflow.python.client import device_lib
from keras.utils import multi_gpu_model
import tensorflow as tf
from sklearn import datasets
from tqdm import tqdm
import math, sys, os, random
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from argparse import ArgumentParser
from keras.layers import Input, Conv2D, Conv2DTranspose, Dense, Reshape, MaxPooling2D, UpSampling2D, Flatten, Cropping2D
from keras.models import Model, Sequential
from keras.engine.topology import Layer
from keras.utils import to_categorical
import numpy as np
from tensorboardX import SummaryWriter
import util
INDEX_FROM = 3
CHECK = 5
def generate_seq(model : Model, seed, size, temperature=1.0):
ls = seed.shape[0]
# Due to the way Keras RNNs work, we feed the model the
# whole sequence each time, constantly sampling the nect word.
# It's a little bit inefficient, but that doesn't matter much when generating
tokens = np.concatenate([seed, np.zeros(size - ls)])
for i in range(ls, size):
probs = model.predict(tokens[None,:])
# Extract the i-th probability vector and sample an index from it
next_token = util.sample_logits(probs[0, i-1, :], temperature=temperature)
tokens[i] = next_token
return [int(t) for t in tokens]
def sparse_loss(y_true, y_pred):
return K.sparse_categorical_crossentropy(y_true, y_pred, from_logits=True)
def go(options):
slength = options.max_length
top_words = options.top_words
lstm_hidden = options.lstm_capacity
print('devices', device_lib.list_local_devices())
tbw = SummaryWriter(log_dir=options.tb_dir)
if options.seed < 0:
seed = random.randint(0, 1000000)
print('random seed: ', seed)
np.random.seed(seed)
else:
np.random.seed(options.seed)
if options.task == 'file':
dir = options.data_dir
x, x_vocab_len, x_word_to_ix, x_ix_to_word = \
util.load_sentences(options.data_dir, vocab_size=top_words, limit=options.limit)
# Finding the length of the longest sequence
x_max_len = max([len(sentence) for sentence in x])
print('max sequence length ', x_max_len)
print(len(x_ix_to_word), 'distinct words')
x = util.batch_pad(x, options.batch)
def decode(seq):
return ' '.join(x_ix_to_word[id] for id in seq)
elif options.task == 'europarl':
dir = options.data_dir
x, x_vocab_len, x_word_to_ix, x_ix_to_word, _, _, _, _ = \
util.load_data(dir+os.sep+'europarl-v8.fi-en.en', dir+os.sep+'europarl-v8.fi-en.fi', vocab_size=top_words, limit=options.limit)
# Finding the length of the longest sequence
x_max_len = max([len(sentence) for sentence in x])
print('max sequence length ', x_max_len)
print(len(x_ix_to_word), 'distinct words')
x = util.batch_pad(x, options.batch)
def decode(seq):
return ' '.join(x_ix_to_word[id] for id in seq)
elif options.task == 'imdb':
# Load only training sequences
(x, _), _ = imdb.load_data(num_words=top_words)
# rm start symbol
x = [l[1:] for l in x]
# x = sequence.pad_sequences(x, maxlen=slength+1, padding='post', truncating='post')
# x = x[:, 1:] # rm start symbol
x = util.batch_pad(x, options.batch)
word_to_id = keras.datasets.imdb.get_word_index()
word_to_id = {k: (v + INDEX_FROM) for k, v in word_to_id.items()}
word_to_id["<PAD>"] = 0
word_to_id["<START>"] = 1
word_to_id["<UNK>"] = 2
word_to_id["???"] = 3
id_to_word = {value: key for key, value in word_to_id.items()}
def decode(seq):
return ' '.join(id_to_word[id] for id in seq)
else:
raise Exception('Task {} not recognized.'.format(options.task))
print('Data Loaded.')
print(sum([b.shape[0] for b in x]), ' sentences loaded')
# for i in range(3):
# print(x[i, :])
# print(decode(x[i, :]))
## Define model
input = Input(shape=(None, ))
embedding = Embedding(top_words, lstm_hidden, input_length=None)
embedded = embedding(input)
decoder_lstm = LSTM(lstm_hidden, return_sequences=True)
h = decoder_lstm(embedded)
if options.extra is not None:
for _ in range(options.extra):
h = LSTM(lstm_hidden, return_sequences=True)(h)
fromhidden = Dense(top_words, activation='linear')
out = TimeDistributed(fromhidden)(h)
model = Model(input, out)
opt = keras.optimizers.Adam(lr=options.lr)
lss = sparse_loss
model.compile(opt, lss)
model.summary()
epochs = 0
instances_seen = 0
while epochs < options.epochs:
for batch in tqdm(x):
n, l = batch.shape
batch_shifted = np.concatenate([np.ones((n, 1)), batch], axis=1) # prepend start symbol
batch_out = np.concatenate([batch, np.zeros((n, 1))], axis=1) # append pad symbol
loss = model.train_on_batch(batch_shifted, batch_out[:, :, None])
instances_seen += n
tbw.add_scalar('lm/batch-loss', float(loss), instances_seen)
epochs += options.out_every
# Show samples for some sentences from random batches
for temp in [0.0, 0.7, 1, 1.3, 1.5]:
print('### TEMP ', temp)
for i in range(CHECK):
b = random.choice(x)
if b.shape[1] > 20:
seed = b[0,:20]
else:
seed = b[0, :]
seed = np.insert(seed, 0, 1)
gen = generate_seq(model, seed, 60, temperature=temp)
print('*** [', decode(seed), '] ', decode(gen[len(seed):]))
if __name__ == "__main__":
## Parse the command line options
parser = ArgumentParser()
parser.add_argument("-e", "--epochs",
dest="epochs",
help="Number of epochs.",
default=150, type=int)
parser.add_argument("-E", "--embedding-size",
dest="embedding_size",
help="Size of the word embeddings on the input layer.",
default=300, type=int)
parser.add_argument("-o", "--output-every",
dest="out_every",
help="Output every n epochs.",
default=1, type=int)
parser.add_argument("-l", "--learn-rate",
dest="lr",
help="Learning rate",
default=0.00001, type=float)
parser.add_argument("-b", "--batch-size",
dest="batch",
help="Batch size",
default=32, type=int)
parser.add_argument("-t", "--task",
dest="task",
help="Task",
default='imdb', type=str)
parser.add_argument("-D", "--data-directory",
dest="data_dir",
help="Data directory",
default='./data', type=str)
parser.add_argument("-L", "--lstm-hidden-size",
dest="lstm_capacity",
help="LSTM capacity",
default=256, type=int)
parser.add_argument("-m", "--max_length",
dest="max_length",
help="Max length",
default=None, type=int)
parser.add_argument("-w", "--top_words",
dest="top_words",
help="Top words",
default=10000, type=int)
parser.add_argument("-I", "--limit",
dest="limit",
help="Character cap for the corpus",
default=None, type=int)
parser.add_argument("-T", "--tb-directory",
dest="tb_dir",
help="Tensorboard directory",
default='./runs/lm', type=str)
parser.add_argument("-r", "--random-seed",
dest="seed",
help="RNG seed. Negative for random",
default=1, type=int)
parser.add_argument("-x", "--extra-layers",
dest="extra",
help="Number of extra LSTM layers",
default=None, type=int)
options = parser.parse_args()
print('OPTIONS', options)
go(options) | python |
import asyncio
import ffmpeg
# Reason: Following export method in __init__.py from Effective Python 2nd Edition item 85
from asynccpu import ProcessTaskPoolExecutor # type: ignore
# Reason: Following export method in __init__.py from Effective Python 2nd Edition item 85
from asyncffmpeg import FFmpegCoroutineFactory, StreamSpec # type: ignore
async def create_stream_spec_copy() -> StreamSpec:
stream = ffmpeg.input("input.mp4")
return ffmpeg.output(stream, "output1.mp4", c="copy")
async def create_stream_spec_filter() -> StreamSpec:
stream = ffmpeg.input("input.mp4")
stream = ffmpeg.filter(stream, "scale", 768, -1)
return ffmpeg.output(stream, "output2.mp4")
async def main() -> None:
ffmpeg_coroutine = FFmpegCoroutineFactory.create()
with ProcessTaskPoolExecutor(max_workers=3, cancel_tasks_when_shutdown=True) as executor:
awaitables = (
executor.create_process_task(ffmpeg_coroutine.execute, create_stream_spec)
for create_stream_spec in [create_stream_spec_copy, create_stream_spec_filter]
)
await asyncio.gather(*awaitables)
if __name__ == "__main__":
asyncio.run(main())
| python |
import argparse
import csv
import inspect
import os
import re
import warnings
from abc import ABCMeta, abstractmethod
from contextlib import contextmanager
from pathlib import Path
from time import time
import pandas as pd
warnings.filterwarnings("ignore")
REPO = Path(__file__).resolve().parents[2]
@contextmanager
def timer(name):
t0 = time()
print(f'[{name}] start')
yield
print(f'[{name}] done in {time() - t0:.0f} s')
def get_arguments(description):
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--force', '-f', action='store_true', help='Overwrite existing files')
return parser.parse_args()
def get_features(namespace):
for k, v in ({k: v for k, v in namespace.items()}).items():
if inspect.isclass(v) and issubclass(v, Feature) and not inspect.isabstract(v):
yield v()
def generate_features(namespace, overwrite):
for f in get_features(namespace):
if f.data_path.exists() and not overwrite:
print(f.name, 'was skipped')
else:
f.run().save()
class Feature(metaclass=ABCMeta):
prefix = ""
suffix = ""
dir = REPO / "data" / "processed"
def __init__(self):
if self.__class__.__name__.isupper():
self.name = self.__class__.__name__.lower()
else:
self.name = re.sub("([A-Z])", lambda x: "_" + x.group(1).lower(), self.__class__.__name__).lstrip('_')
# ユーザーに更新してもらう
self.data = pd.DataFrame()
self.data_path = self.dir / f"{self.name}.pkl"
def run(self):
with timer(self.name):
self.create_features()
prefix = self.prefix + '_' if self.prefix else ''
suffix = '_' + self.suffix if self.suffix else ''
self.data.columns = prefix + self.data.columns + suffix
return self
@abstractmethod
def create_features(self):
raise NotImplementedError
def save(self):
# LightGBMError: Do not support special JSON characters in feature name.回避
self.data.columns = ["".join(c if c.isalnum() else "_" for c in str(x)) for x in self.data.columns]
self.data.to_pickle(str(self.data_path))
def load(self):
self.data = pd.read_pickle(str(self.data_path))
def create_memo(col_name, desc):
file_path = Feature.dir / "_features_memo.csv"
# hydraのログパスにカレントディレクトリが移動してしまうので初期化
os.chdir(os.path.dirname(os.path.abspath(__file__)))
file_path.touch()
with open(file_path, "r+") as f:
lines = f.readlines()
lines = [line.strip() for line in lines]
col = [line for line in lines if line.split(",")[0] == col_name]
if len(col) != 0:
return
writer = csv.writer(f)
writer.writerow([col_name, desc])
# One-hot encoding for categorical columns with get_dummies
def one_hot_encoder(df, nan_as_category=True):
original_columns = list(df.columns)
categorical_columns = [col for col in df.columns if df[col].dtype == 'object']
# 相関分析で相関の高い変数を削るのでここではdrop_first=FalseでOK
df = pd.get_dummies(
df, columns=categorical_columns, dummy_na=nan_as_category)
new_columns = [c for c in df.columns if c not in original_columns]
return df, new_columns
| python |
# -*- coding: utf8 -*-
from ..core.Oracle import Oracle
from ..utils.ColorString import ColorString
from ..utils.utils import *
from .Config import Config
import argparse
import re
import os
def install_jdk():
Oracle.install_jdk()
def uninstall_jdk():
Oracle.execute_uninstall_jdk()
def rsync_server_core_data():
'''服务器迁移时用来迁移服务器核心数据'''
#命令行参数解析 argparse 使用文档:https://docs.python.org/3/library/argparse.html
parser = argparse.ArgumentParser(description='use rsync command to sync the minecraft server core data to other location or host')
# Tool
parser.add_argument('-s', metavar='source', dest='source', help='Specified the source file or dir to sync')
parser.add_argument('-d', metavar='destination', dest='destination', help='Specified the destination dir to sync')
parser.add_argument('-y', '--yes', default=False, action='store_true', help='ask yes when require user select')
args = parser.parse_args()
source = args.source
destination = args.destination
ftp_server_base_dir_name = os.path.basename(Config.game_ftp_server_base_dir())
server_core_data_dir_name = os.path.basename(Config.game_ftp_server_core_data_backup_dir())
server_core_data_dir_path = os.path.join(os.path.expanduser('~'),"%s/%s" % (ftp_server_base_dir_name, server_core_data_dir_name))
if not source and os.path.exists(server_core_data_dir_path):
source = server_core_data_dir_path
if os.path.isdir(server_core_data_dir_path):
source += '/*'
def check_args(source, destination):
if not destination or not source:
print(ColorString.warn('You should provide both source and destination argument for this command, destination can be a (local dir/file) remote host (example: [email protected])'))
exit(-1)
def execute_sync(source, destination, test = True):
check_args(source, destination)
pattern = re.compile(r'\w+@\w+')
dest = destination.strip()
source = source.strip()
match = re.match(pattern, dest)
if match:
ftp_server_base_dir_name = os.path.basename(Config.game_ftp_server_base_dir())
sync_file_dir_name = os.path.basename(source)
if not os.path.exists(source):
segments = list(os.path.split(source))[0:-1]
sync_file_dir_name = os.path.basename(os.path.join(*segments))
dest += ':~/%s/%s' % (ftp_server_base_dir_name,sync_file_dir_name)
else:
dest += ':~/%s/' % (ftp_server_base_dir_name)
rsync_cmd = 'rsync -zarvh %s %s ' % (source, dest)
rsync_cmd += "--exclude 'plugins/dynmap/*'"
if test:
rsync_cmd += ' -n'
os.system(rsync_cmd)
if test:
print('\ncommand: %s' % ColorString.confirm(rsync_cmd))
print(ColorString.hint("Run in Fake Mode!"))
check_args(source = source, destination = destination)
execute_sync(source = source, destination = destination, test = True)
confirm = ['Y','y','Yes','yes']
cancel = ['N','n','No','no']
while True:
a = hint(ColorString.confirm('\nAre you confirm want to execute this operation? [%s] ' % ('/'.join(confirm) + '|' + '/'.join(cancel))))
if a in confirm:
execute_sync(source=source, destination = destination, test = False)
break
elif a in cancel:
break
else:
print(ColorString.warn('Your input is invalid, Please try again!'))
| python |
import unittest
import numpy as np
from nptest import nptest
class Test_ShapeBaseTests(unittest.TestCase):
def test_atleast_1d(self):
a = np.atleast_1d(1.0)
print(a)
print("**************")
x = np.arange(9.0).reshape(3,3)
b = np.atleast_1d(x)
print(b)
print("**************")
c = np.atleast_1d(1, [3,4])
print(c)
def test_atleast_2d(self):
a = np.atleast_2d(1.0)
print(a)
print("**************")
x = np.arange(9.0).reshape(3,3)
b = np.atleast_2d(x)
print(b)
print("**************")
c = np.atleast_2d(1, [3,4], [5.6])
print(c)
def test_atleast_3d(self):
a = np.atleast_3d(1.0)
print(a)
print("**************")
x = np.arange(9.0).reshape(3,3)
b = np.atleast_3d(x)
print(b)
print("**************")
c = np.atleast_3d([1,2], [[3,4]], [[5,6]])
print(c)
for arr in c:
print(arr, arr.shape)
def test_vstack_1(self):
a = np.array([1, 2, 3])
b = np.array([2, 3, 4])
c = np.vstack((a,b))
print(c)
def test_vstack_2(self):
a = np.array([[1], [2], [3]])
b = np.array([[2], [3], [4]])
c = np.vstack((a,b))
print(c)
def test_hstack_1(self):
a = np.array((1, 2, 3))
b = np.array((2, 3, 4))
c = np.hstack((a,b))
print(c)
def test_hstack_2(self):
a = np.array([[1],[2],[3]])
b = np.array([[2],[3],[4]])
c = np.hstack((a,b))
print(c)
def test_stack_1(self):
a = np.array([[1],[2],[3]])
b = np.array([[2],[3],[4]])
c = np.stack((a,b), axis=0)
print(c)
print("**************")
d = np.stack((a,b), axis=1)
print(d)
print("**************")
e = np.stack((a,b), axis=2)
print(e)
def test_block_1(self):
A = np.eye(2) * 2
B = np.eye(3) * 3
C = np.block([[A, np.zeros((2, 3))], [np.ones((3, 2)), B]])
print(C)
def test_block_2(self):
a = np.array([1, 2, 3])
b = np.array([2, 3, 4])
c = np.block([a, b, 10]) # hstack([a, b, 10])
print(c)
print("**************")
a = np.array([1, 2, 3])
b = np.array([2, 3, 4])
c = np.block([[a], [b]]) # vstack([a, b])
print(c)
def test_expand_dims_1(self):
a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]).reshape(2,-1, 2)
b = np.expand_dims(a, axis=0)
print(b)
print("**************")
c = np.expand_dims(a, axis=1)
print(c)
print("**************")
d = np.expand_dims(a, axis=2)
print(d)
def test_column_stack_1(self):
a = np.array((1, 2, 3))
b = np.array((2, 3, 4))
c = np.column_stack((a, b))
print(c)
print("**************")
a = np.array([1, 2, 3])
b = np.array([2, 3, 4])
c = np.column_stack([a, b])
print(c)
def test_row_stack_1(self):
a = np.array([1, 2, 3])
b = np.array([2, 3, 4])
c = np.row_stack((a,b))
print(c)
def test_dstack_1(self):
a = np.array([1, 2, 3])
b = np.array([2, 3, 4])
c = np.dstack((a,b))
print(c)
print("**************")
a = np.array([[1], [2], [3]])
b = np.array([[2], [3], [4]])
c = np.dstack((a,b))
print(c)
def test_array_split_1(self):
x = np.arange(8.0)
y = np.array_split(x, 3)
print(y)
print("**************")
x = np.arange(7.0)
y = np.array_split(x, 3)
print(y)
def test_array_split_2(self):
x = np.arange(16.0).reshape(2,8,1)
y = np.array_split(x, 3, axis=0)
print(y)
print("**************")
x = np.arange(16.0).reshape(2,8,1)
y = np.array_split(x, 3, axis=1)
print(y)
print("**************")
x = np.arange(16.0).reshape(2,8,1)
y = np.array_split(x, 3, axis=2)
print(y)
def test_split_1(self):
x = np.arange(9.0)
y = np.split(x, 3)
print(y)
print("**************")
x = np.arange(8.0)
y = np.split(x, [3,5,6,10])
print(y)
def test_split_2(self):
x = np.arange(16.0).reshape(8,2,1)
y = np.split(x, [2,3], axis=0)
print(y)
print("**************")
x = np.arange(16.0).reshape(8,2,1)
y = np.split(x, [2,3], axis=1)
print(y)
print("**************")
x = np.arange(16.0).reshape(8,2,1)
y = np.split(x, [2,3], axis=2)
print(y)
def test_hsplit_1(self):
x = np.arange(16).reshape(4,4)
y = np.hsplit(x, 2)
print(y)
print("**************")
x = np.arange(16).reshape(4,4)
y = np.hsplit(x, [3,6])
print(y)
def test_hsplit_2(self):
x = np.arange(8).reshape(2,2,2)
y = np.hsplit(x, 2)
print(y)
print("**************")
x = np.arange(8).reshape(2,2,2)
y = np.hsplit(x, [3,6])
print(y)
def test_vsplit_1(self):
x = np.arange(16).reshape(4,4)
y = np.vsplit(x, 2)
print(y)
print("**************")
x = np.arange(16).reshape(4,4)
y = np.vsplit(x, [3,6])
print(y)
def test_vsplit_2(self):
x = np.arange(8).reshape(2,2,2)
y = np.vsplit(x, 2)
print(y)
print("**************")
x = np.arange(8).reshape(2,2,2)
y = np.vsplit(x, [3,6])
print(y)
def test_dsplit_1(self):
x = np.arange(16).reshape(2,2,4)
y = np.dsplit(x, 2)
print(y)
print("**************")
x = np.arange(16).reshape(2,2,4)
y = np.dsplit(x, [3,6])
print(y)
def test_kron_1(self):
a = np.kron([1,10,100], [5,6,7])
print(a)
print("*******")
b = np.kron([5,6,7], [1,10,100])
print(b)
print("*******")
x = np.array([[2,3],[4,5]])
y = np.array([[5,6],[7,8]])
c = np.kron(x,y)
print(c)
print(c.shape)
print("*******")
c = np.kron(np.eye(2, dtype=np.int32), np.ones((2,2), dtype=np.int32))
print(c)
print(c.shape)
print("*******")
x = np.array([[[2,3,3],[4,5,3]]])
y = np.array([[[5,6,6,6],[7,8,6,6]]])
c = np.kron(x,y)
print(c)
print(c.shape)
print("*******")
d = np.kron(np.ones((5,7,9, 11), dtype=np.int32), np.ones((3,4, 6, 8), dtype=np.int32))
print(d.shape)
def test_kron_2(self):
a = np.arange(100).reshape((2,5,2,5))
b = np.arange(24).reshape((2,3,4))
c = np.kron(a,b)
print(c.shape)
d = c.sum()
print(d)
def test_tile_1(self):
a = np.array([0, 1, 2])
b = np.tile(a, 2)
print(b)
print("**************")
c = np.tile(a, (2,2))
print(c)
print("**************")
d = np.tile(a, (2,1,2))
print(d)
e = np.arange(100).reshape((2,5,2,5))
f = np.tile(e, (2,1,2))
print(f.shape)
def test_tile_2(self):
a = np.array([[1, 2], [3, 4]])
b = np.tile(a, 2)
print(b)
print("**************")
c = np.tile(a, (2, 1))
print(c)
print("**************")
d = np.array([1,2,3,4])
e = np.tile(d,(4,1))
print(e)
def test_apply_along_axis_1(self):
def my_func(a):
#Average first and last element of a 1-D array"""
return (a[0] + a[-1]) * 0.5
def my_func2(a):
#Average first and last element of a 1-D array"""
return (a[0] * 10)
b = np.array([[1,2,3], [4,5,6], [7,8,9]])
c = np.apply_along_axis(my_func2, 0, b)
print(c)
d = np.apply_along_axis(my_func, 1, b);
print(d)
print(b)
def test_apply_along_axis_2(self):
b = np.array([[[8,1,7], [4,3,9], [5,2,6]]])
c = np.apply_along_axis(sorted, 1, b)
print(c)
c = np.apply_along_axis(sorted, 0, b[:,0,0])
print(c)
c = np.apply_along_axis(sorted, 0, b[0,:,0])
print(c)
c = np.apply_along_axis(sorted, 0, b[0,0,:])
print(c)
def test_apply_along_axis_3(self):
b = np.array([[1,2,3], [4,5,6], [7,8,9]])
c = np.diag(b)
c = np.apply_along_axis(np.diag, 1, b)
print(c)
def test_apply_over_axes_1(self):
a = np.arange(24).reshape(2,3,4)
# print(a)
# Sum over axes 0 and 2. The result has same number of dimensions as the original array:
b = np.apply_over_axes(np.sum, a, [0,2])
print(b)
print("");
print("*******")
print("");
# Tuple axis arguments to ufuncs are equivalent:
c = np.sum(a, axis=(0,2), keepdims=True)
print(c)
print("");
print("*******")
print("");
d = np.sum(a, axis=0, keepdims=True)
print(d)
print("");
print("*******")
print("");
e = np.sum(a, axis=2, keepdims=True)
print(e)
if __name__ == '__main__':
unittest.main()
| python |
import os
from behave import *
from copy import deepcopy
from lxml import etree
import tempfile
import uuid
import logging
from pds_doi_service.core.entities.exceptions import InputFormatException, CriticalDOIException
from pds_doi_service.core.util.doi_xml_differ import DOIDiffer
from pds_doi_service.core.actions.draft import DOICoreActionDraft
from pds_doi_service.core.actions.reserve import DOICoreActionReserve
from pds_doi_service.core.actions.release import DOICoreActionRelease
from pds_doi_service.core.outputs.osti.osti_web_client import DOIOstiWebClient
from pds_doi_service.core.util.config_parser import DOIConfigUtil
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Global flag to submit the DOI to OSTI or not after it has been built.
g_submit_flag = True
g_submit_flag = False
def get_temporary_output_filename(extension='xml'):
return os.path.join(tempfile.gettempdir(), f'{str(uuid.uuid4())}.{extension}')
def save_doi_to_temporary_file(doi_label):
# Save doi_label to disk so it can be compared to reference in next step.
temporary_file_name = get_temporary_output_filename()
with open(temporary_file_name,"w+") as f:
f.write(doi_label + "\n")
return temporary_file_name
def replace_lidvid_in_file(input_file, lid, extension='csv'):
input_value_with_random_lidvid = get_temporary_output_filename(extension=extension)
with open(input_file, 'r') as f_in:
with open(input_value_with_random_lidvid, 'w') as f_out:
for line in f_in.readlines():
f_out.write(line.replace('{{random_lid}}', lid))
return input_value_with_random_lidvid
def draft_action_run(node_value,input_value, lid=None):
# Helper function to 'draft' a given input_value and write the DOI to a temporary file.
# This file will be available for other validation functions.
db_name = 'doi_temp.db'
_action = DOICoreActionDraft(db_name=db_name)
logger.info(f"input_value {input_value}")
if lid:
input_value = replace_lidvid_in_file(input_value, lid, extension='xml')
o_doi_label = _action.run(input=input_value,
node=node_value,
submitter='my_user@my_node.gov',force=True)
# Save o_doi_label to disk so it can be compared to reference in next step
logger.info(f"success input_value {input_value}")
return save_doi_to_temporary_file(o_doi_label)
def reserve_action_run(node_value,input_value, lid=None):
# Helper function to 'reserve' a given input_value.
logger.info(f"when node_value,input_value {node_value,input_value}")
db_name = 'doi_temp.db'
_action = DOICoreActionReserve(db_name=db_name)
if lid:
input_value = replace_lidvid_in_file(input_value, lid, extension='csv')
o_doi_label = _action.run(
input=input_value,
node=node_value, submitter='my_user@my_node.gov',
dry_run=True, force=True)
return save_doi_to_temporary_file(o_doi_label)
def release_action_run(node_value, input_value):
try:
db_name = 'doi_temp.db'
release_action = DOICoreActionRelease(db_name=db_name)
released_doi_str = release_action.run(input=input_value, node=node_value,
submitter='my_user@my_node.gov', force=True)
return save_doi_to_temporary_file(released_doi_str)
except Exception as e:
raise
def file_output_compare(output_file, ref_output_value):
# Function compare two XML files created from 'draft' or 'reserve' actions.
# Assumption(s):
# 1. The name of the new XML file is defined in get_temporary_output_filename().
# 2. The name of the reference name is ref_output_value
logger.info(f"output_file,ref_output_value {output_file},{ref_output_value}")
o_fields_differ_list, o_values_differ_list, o_record_index_differ_list = DOIDiffer.doi_xml_differ(ref_output_value,
output_file)
logger.info(f'different fields are {o_fields_differ_list}')
logger.info(f'o_fields_differ_list {o_fields_differ_list}')
logger.info(f'o_values_differ_list {o_values_differ_list}')
logger.info(f'o_record_index_differ_list {o_record_index_differ_list}')
assert len(o_fields_differ_list) is 0
return 1
@given('a valid input at {input_value}')
def given_valid_action_input(context, input_value):
logger.info(f"given {input_value}")
context.input_value = input_value # Don't forget to set the input_value in context to be available for other functions.
@given('an invalid PDS4 label at {input_value}')
def given_invalid_pds4(context, input_value):
logger.info(f'an invalid reserve PDS4 label at input_value {input_value}')
context.input_value = input_value # Don't forget to set the input_value in context to be available for other functions.
@given('random new lid')
def given_random_new_lid(context):
context.random_lid = f'urn:nasa:pds:{uuid.uuid4()}'
@when('create draft DOI for node {node_value}')
def when_create_draft_impl(context, node_value):
logger.info(f"when create DOI draft ")
logger.info(f"input_value {context.input_value}")
try:
if not hasattr(context, 'output_files'):
context.output_files = []
new_draft_output = draft_action_run(node_value,
context.input_value,
lid=context.random_lid if hasattr(context, 'random_lid') else None)
context.output_files.append(new_draft_output)
except CriticalDOIException as e:
logger.info(str(e))
context.exception_msg = str(e)
@then('a reading error report is generated for {input_value}')
def step_an_error_report_is_generated_impl(context, input_value):
assert hasattr(context, 'exception_msg')
assert context.exception_msg == f'Error reading file {input_value}'
@when('reserve DOI in OSTI format at {node_value}')
def step_when_reserve_doi_in_osti_format_impl(context, node_value):
input_value = context.input_value
logger.info(f"when context {context}")
logger.info(f"when input_value {input_value}")
try:
if not hasattr(context, 'output_files'):
context.output_files = []
new_reserve_file = reserve_action_run(node_value,input_value)
context.output_files.append(new_reserve_file)
except InputFormatException as e:
# Save the error message to context.exception_msg so the function step_an_error_report_is_generated_impl has something to check
logger.info(f"Expecting InputFormatException from input_value {input_value}")
context.exception_msg = str(e)
logger.error(e)
except CriticalDOIException as e:
logger.info(f"CRITICAL {e}")
logger.info(f"Expecting CriticalDOIException from input_value {input_value}")
logger.info(str(e))
# Save the error message to context.exception_msg so the function step_an_error_report_is_generated_impl has something to check
context.exception_msg = str(e)
logger.info(f"context.failed {context.failed}")
@then('OSTI DOI label is created from {input_value} for node {node_value}')
def step_then_osti_doi_label_is_created_impl(context,node_value,input_value):
logger.info(f"when context {context}")
logger.info(f"when input_value {input_value}")
try:
if not hasattr(context, 'output_files'):
context.output_files = []
reserve_ouput_file = reserve_action_run(node_value,
input_value,
lid=context.random_lid if hasattr(context, 'random_lid') else None)
context.output_files.append(reserve_ouput_file)
except InputFormatException as e:
logger.error(e)
except CriticalDOIException as e:
logger.info(f"CRITICAL {e}")
logger.info(f"Expecting CriticalDOIException from input_value {input_value}")
logger.info(f"context.failed {context.failed}")
@then(u'The OSTI DOI is submitted to the OSTI server')
def step_doi_label_is_submitted_impl(context):
doi_config_util = DOIConfigUtil()
m_config = doi_config_util.get_config()
# Fetch the content of payload_filename into memory and change the status from status="reserved_not_submitted"
# to status="Reserved".
payload_doc = etree.parse(context.output_files[0])
payload_root = payload_doc.getroot()
# Make a new root with modified 'status' attribute to 'Reserved'
out_root = etree.Element("records")
for element in payload_root.iter():
if element.tag == 'record':
new_element = deepcopy(element)
new_element.attrib['status'] = 'Reserved'
out_root.append(new_element)
etree.indent(out_root,space=" ")
# The payload is now ready to be submitted to OSTI.
if g_submit_flag:
doi, response_str = DOIOstiWebClient().submit_content(
payload=etree.tostring(out_root)
)
else:
logger.info(f"g_submit_flag is False")
@when('reference record is drafted for node {node_value} from {input_subdir}')
def when_reference_is_drafted_from_impl(context, node_value, input_subdir):
input_dir = os.path.join(context.transaction_dir, input_subdir)
if not hasattr(context, 'output_files'):
context.output_files = []
new_draft_file = draft_action_run(node_value,
input_dir,
lid=context.random_lid if hasattr(context, 'random_lid') else None)
context.output_files.append(new_draft_file)
@given('reference transactions in {transaction_dir}')
def given_reference_dir_impl(context,transaction_dir):
context.transaction_dir = transaction_dir
@when('reference record is reserved for node {node_value} with {input_value}')
def step_reference_is_reserved_at_input_impl(context, node_value, input_value):
transaction_dir = context.transaction_dir
input_dir = os.path.join(transaction_dir,input_value)
if not hasattr(context, 'output_files'):
context.output_files = []
context.output_files.append(reserve_action_run(node_value,input_dir,
lid=context.random_lid if hasattr(context, 'random_lid') else None))
@then('produced osti record is similar to reference osti {ref_output_value}')
def step_produced_osti_record_is_similiar_to_reference_osti_impl(context, ref_output_value):
if hasattr(context, 'transaction_dir'):
ref_output_value = os.path.join(context.transaction_dir, ref_output_value)
logger.info(f"context.transaction_dir {context.transaction_dir}")
logger.info(f"context.output_files {context.output_files}")
logger.info(f"ref_output_value {ref_output_value}")
file_output_compare(context.output_files[0], ref_output_value)
@when('submit osti record for {node_value}')
def submit_osti_record(context, node_value):
try:
context.output_files[-1] = release_action_run(node_value, context.output_files[-1])
logger.info(f'record in file {context.output_files[-1]} submitted from output index {len(context.output_files)}')
except CriticalDOIException as e:
context.exception_msg = str(e)
@then('lidvid already submitted exception is raised')
def step_lidvid_already_submitted_exception_is_raised(context):
assert hasattr(context, 'exception_msg')
logger.info(f'grab first created doi from file {context.output_files}')
reserved_xml = etree.parse(context.output_files[0])
reserved_doi = reserved_xml.xpath('/records/record/doi')[0].text
excepted_exception_msg = f'There is already a DOI {reserved_doi} submitted for this lidvid {context.random_lid}::1.0 (status=Pending). You cannot submit a new DOI for the same lidvid.'
logger.info(f'expected message {excepted_exception_msg}')
logger.info(f'found msg is {context.exception_msg}')
assert context.exception_msg == excepted_exception_msg
| python |
#!/usr/bin/python
# Copyright 2018 Blade M. Doyle
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Watches the blockchain for new blocks
# Request chain height from grin core every x seconds.
# If the height increased request each block from grin core.
# Adds them to the database.
# This keeps a record of each block *as we see it* (before any chain reorgs).
import sys
import traceback
import requests
import json
import atexit
from time import sleep
from datetime import datetime
import pymysql
import sqlalchemy
from grinlib import lib
from grinlib import grin
from grinbase.model.blocks import Blocks
PROCESS = "blockWatcher"
LOGGER = None
CONFIG = None
BATCHSZ = 100
def main():
global CONFIG
global LOGGER
CONFIG = lib.get_config()
LOGGER = lib.get_logger(PROCESS)
LOGGER.warn("=== Starting {}".format(PROCESS))
# Connect to DB
database = lib.get_db()
atexit.register(lib.teardown_db)
# Get Config
check_interval = float(CONFIG[PROCESS]["check_interval"])
# Find the height of the latest block
current_height = grin.blocking_get_current_height()
while current_height <= 0:
LOGGER.warn("Waiting for first block height")
sleep(10)
current_height = grin.blocking_get_current_height()
LOGGER.warn("current_height = {}".format(current_height))
latest_block = Blocks.get_latest()
if latest_block is None:
last_height = current_height - 1500
else:
last_height = latest_block.height
LOGGER.warn("last_height = {}".format(last_height))
height = last_height + 1
height = max(current_height-1500, height)
LOGGER.warn("Starting at block height: {}".format(height))
while True:
try:
latest = grin.blocking_get_current_height()
LOGGER.warn("latest: {}, height: {}".format(latest, height))
while latest >= height:
response = grin.blocking_get_block_by_height(height)
LOGGER.warn("New Block: {} at {}".format(response["header"]["hash"],
response["header"]["height"]))
try:
new_block = Blocks(hash = response["header"]["hash"],
version = response["header"]["version"],
height = response["header"]["height"],
previous = response["header"]["previous"],
timestamp = datetime.strptime(response["header"]["timestamp"][:-1], "%Y-%m-%dT%H:%M:%S+00:0"),
output_root = response["header"]["output_root"],
range_proof_root = response["header"]["range_proof_root"],
kernel_root = response["header"]["kernel_root"],
nonce = response["header"]["nonce"],
edge_bits = response["header"]["edge_bits"],
total_difficulty = response["header"]["total_difficulty"],
secondary_scaling = response["header"]["secondary_scaling"],
num_inputs = len(response["inputs"]),
num_outputs = len(response["outputs"]),
num_kernels = len(response["kernels"]),
fee = sum(k["fee"] for k in response["kernels"]),
lock_height = response["kernels"][0]["lock_height"] if(len(response["kernels"])>0) else 0,
total_kernel_offset = response["header"]["total_kernel_offset"],
state = "new")
# Batch inserts when catching up
database.db.getSession().add(new_block)
if( (height % BATCHSZ == 0) or (height >= (latest-10)) ):
database.db.getSession().commit()
height = height + 1
except (sqlalchemy.exc.IntegrityError, pymysql.err.IntegrityError):
LOGGER.warn("Attempted to re-add block: {}".format(response["header"]["height"]))
database.db.getSession().rollback()
latest_block = Blocks.get_latest()
height = latest_block.height + 1
sleep(check_interval)
sys.stdout.flush()
sleep(check_interval)
except Exception as e:
LOGGER.exception("Something went wrong: {}".format(repr(e)))
database.db.getSession().rollback()
sys.stdout.flush()
sleep(check_interval)
# Should never get here, but....
LOGGER.warn("=== Completed {}".format(PROCESS))
if __name__ == "__main__":
main()
| python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 5 19:05:19 2018
@author: myoussef
"""
import ringity
import unittest
import numpy as np
import networkx as nx
class TestConversions(unittest.TestCase):
def test_ddict2dict2ddict_unweighted(self):
E = nx.erdos_renyi_graph(100,0.17)
d = dict(E.edges)
dd = ringity.methods.dict2ddict(d)
ddd = ringity.methods.ddict2dict(dd)
dddd = ringity.methods.dict2ddict(ddd)
self.assertEqual(dd, dddd)
def test_ddict2dict2ddict_weighted(self):
E = nx.erdos_renyi_graph(100,0.17)
for (u, v) in E.edges():
E[u][v]['weight'] = np.random.uniform(-1,1)
d = dict(E.edges)
dd = ringity.methods.dict2ddict(d)
ddd = ringity.methods.ddict2dict(dd)
dddd = ringity.methods.dict2ddict(ddd)
self.assertEqual(dd, dddd)
if __name__ == '__main__':
unittest.main()
| python |
ballresponse = [
'Yes', 'No', 'Take a wild guess...', 'Very doubtful',
'Sure', 'Without a doubt', 'Most likely', 'Might be possible',
"You'll be the judge", 'no... (╯°□°)╯︵ ┻━┻', 'no... baka',
'senpai, pls no ;-;'
]
owos = [
"✪w✪", "¤w¤", "∅w∅", "⊗w⊗", "⊕w⊕", "∞w∞", "∆w∆", "θwθ", "δwδ", "①w①", "②w②", "③w③", "④w④", "⑤w⑤", "⑥w⑥", "⑦w⑦", "⑧w⑧", "⑨w⑨",
"⑩w⑩", "⑴w⑴", "⑵w⑵", "⑶w⑶", "⑷w⑷", "⑸w⑸", "⑹w⑹", "⑺w⑺", "⑻w⑻", "⑼w⑼", "⑽w⑽", "●w●", "○w○",
"■w■", "□w□", "★w★", "☆w☆", "◆w◆", "◇w◇", "▷w◁", "◐w◐", "◑w◑", "◐w◑", "◐w◑", "♀w♀", "♂w♂", "♡w♡", "❖w❖", "✞w✞", "©w©", "®w®"
"✧w✧", "✦w✦", "✩w✩", "✫w✫", "✬w✬", "✭w✭", "✮w✮", "✯w✯", "✰w✰", "✱w✱", "✲w✲", "✵w✵", "✶w✶", "✷w✷", ">w0",
"✸w✸", "※w※","↻w↻", "σwσ", "✹w✹", "✺w✺", "✻w✻", "✼w✼", "✽w✽", "✾w✾", "✿w✿", "❀w❀", "❁w❁", "❂w❂", "❃w❃", "❅w❅",
"❆w❆", "❈w❈", "❉w❉", "❊w❊", "❋w❋", "❍w❍", "❏w❏", "❐w❐", "❑w❑", "❒w❒", "◈w◈", "◉w◉", "◊w◊", "○w○", "ФwФ", "фwф", "юwю", "ЮwЮ"
"#w#", "@w@", "0w0", ";w;", "¢w¢", "×w×", "°w°", "OwO", "owo", "uwu", "UwU", "QwQ", "ОмО", "ОпО", "ОшО", "OnO", "ДwД", "ЖwЖ", "XwX", "qwq", "dwd", "DwD" "ИwИ", "ーwー"
]
randomPlayings = [
"with OwOs", "with a ball of String", "innocent", "in her burrow!", "with her friends", "in the fields"
]
| python |
import unicodedata
from collections import defaultdict
from itertools import zip_longest
from .porter import Stemmer
def _normalize(s):
return unicodedata.normalize("NFKD", s)
def _check_type(s):
if not isinstance(s, str):
raise TypeError("expected str or unicode, got %s" % type(s).__name__)
def levenshtein_distance(s1, s2):
_check_type(s1)
_check_type(s2)
if s1 == s2:
return 0
rows = len(s1) + 1
cols = len(s2) + 1
if not s1:
return cols - 1
if not s2:
return rows - 1
prev = None
cur = range(cols)
for r in range(1, rows):
prev, cur = cur, [r] + [0] * (cols - 1)
for c in range(1, cols):
deletion = prev[c] + 1
insertion = cur[c - 1] + 1
edit = prev[c - 1] + (0 if s1[r - 1] == s2[c - 1] else 1)
cur[c] = min(edit, deletion, insertion)
return cur[-1]
def _jaro_winkler(s1, s2, long_tolerance, winklerize):
_check_type(s1)
_check_type(s2)
s1_len = len(s1)
s2_len = len(s2)
if not s1_len or not s2_len:
return 0.0
min_len = min(s1_len, s2_len)
search_range = max(s1_len, s2_len)
search_range = (search_range // 2) - 1
if search_range < 0:
search_range = 0
s1_flags = [False] * s1_len
s2_flags = [False] * s2_len
# looking only within search range, count & flag matched pairs
common_chars = 0
for i, s1_ch in enumerate(s1):
low = max(0, i - search_range)
hi = min(i + search_range, s2_len - 1)
for j in range(low, hi + 1):
if not s2_flags[j] and s2[j] == s1_ch:
s1_flags[i] = s2_flags[j] = True
common_chars += 1
break
# short circuit if no characters match
if not common_chars:
return 0.0
# count transpositions
k = trans_count = 0
for i, s1_f in enumerate(s1_flags):
if s1_f:
for j in range(k, s2_len):
if s2_flags[j]:
k = j + 1
break
if s1[i] != s2[j]:
trans_count += 1
trans_count //= 2
# adjust for similarities in nonmatched characters
common_chars = float(common_chars)
weight = (
(
common_chars / s1_len
+ common_chars / s2_len
+ (common_chars - trans_count) / common_chars
)
) / 3
# winkler modification: continue to boost if strings are similar
if winklerize and weight > 0.7:
# adjust for up to first 4 chars in common
j = min(min_len, 4)
i = 0
while i < j and s1[i] == s2[i] and s1[i]:
i += 1
if i:
weight += i * 0.1 * (1.0 - weight)
# optionally adjust for long strings
# after agreeing beginning chars, at least two or more must agree and
# agreed characters must be > half of remaining characters
if (
long_tolerance
and min_len > 4
and common_chars > i + 1
and 2 * common_chars >= min_len + i
):
weight += (1.0 - weight) * (
float(common_chars - i - 1) / float(s1_len + s2_len - i * 2 + 2)
)
return weight
def jaro_similarity(s1, s2):
return _jaro_winkler(s1, s2, False, False) # noqa
def jaro_winkler_similarity(s1, s2, long_tolerance=False):
return _jaro_winkler(s1, s2, long_tolerance, True) # noqa
def damerau_levenshtein_distance(s1, s2):
_check_type(s1)
_check_type(s2)
len1 = len(s1)
len2 = len(s2)
infinite = len1 + len2
# character array
da = defaultdict(int)
# distance matrix
score = [[0] * (len2 + 2) for x in range(len1 + 2)]
score[0][0] = infinite
for i in range(0, len1 + 1):
score[i + 1][0] = infinite
score[i + 1][1] = i
for i in range(0, len2 + 1):
score[0][i + 1] = infinite
score[1][i + 1] = i
for i in range(1, len1 + 1):
db = 0
for j in range(1, len2 + 1):
i1 = da[s2[j - 1]]
j1 = db
cost = 1
if s1[i - 1] == s2[j - 1]:
cost = 0
db = j
score[i + 1][j + 1] = min(
score[i][j] + cost,
score[i + 1][j] + 1,
score[i][j + 1] + 1,
score[i1][j1] + (i - i1 - 1) + 1 + (j - j1 - 1),
)
da[s1[i - 1]] = i
return score[len1 + 1][len2 + 1]
def soundex(s):
_check_type(s)
if not s:
return ""
s = _normalize(s)
s = s.upper()
replacements = (
("BFPV", "1"),
("CGJKQSXZ", "2"),
("DT", "3"),
("L", "4"),
("MN", "5"),
("R", "6"),
)
result = [s[0]]
count = 1
# find would-be replacment for first character
for lset, sub in replacements:
if s[0] in lset:
last = sub
break
else:
last = None
for letter in s[1:]:
for lset, sub in replacements:
if letter in lset:
if sub != last:
result.append(sub)
count += 1
last = sub
break
else:
if letter != "H" and letter != "W":
# leave last alone if middle letter is H or W
last = None
if count == 4:
break
result += "0" * (4 - count)
return "".join(result)
def hamming_distance(s1, s2):
_check_type(s1)
_check_type(s2)
# ensure length of s1 >= s2
if len(s2) > len(s1):
s1, s2 = s2, s1
# distance is difference in length + differing chars
distance = len(s1) - len(s2)
for i, c in enumerate(s2):
if c != s1[i]:
distance += 1
return distance
def nysiis(s):
_check_type(s)
if not s:
return ""
s = s.upper()
key = []
# step 1 - prefixes
if s.startswith("MAC"):
s = "MCC" + s[3:]
elif s.startswith("KN"):
s = s[1:]
elif s.startswith("K"):
s = "C" + s[1:]
elif s.startswith(("PH", "PF")):
s = "FF" + s[2:]
elif s.startswith("SCH"):
s = "SSS" + s[3:]
# step 2 - suffixes
if s.endswith(("IE", "EE")):
s = s[:-2] + "Y"
elif s.endswith(("DT", "RT", "RD", "NT", "ND")):
s = s[:-2] + "D"
# step 3 - first character of key comes from name
key.append(s[0])
# step 4 - translate remaining chars
i = 1
len_s = len(s)
while i < len_s:
ch = s[i]
if ch == "E" and i + 1 < len_s and s[i + 1] == "V":
ch = "AF"
i += 1
elif ch in "AEIOU":
ch = "A"
elif ch == "Q":
ch = "G"
elif ch == "Z":
ch = "S"
elif ch == "M":
ch = "N"
elif ch == "K":
if i + 1 < len(s) and s[i + 1] == "N":
ch = "N"
else:
ch = "C"
elif ch == "S" and s[i + 1 : i + 3] == "CH":
ch = "SS"
i += 2
elif ch == "P" and i + 1 < len(s) and s[i + 1] == "H":
ch = "F"
i += 1
elif ch == "H" and (
s[i - 1] not in "AEIOU"
or (i + 1 < len(s) and s[i + 1] not in "AEIOU")
or (i + 1 == len(s))
):
if s[i - 1] in "AEIOU":
ch = "A"
else:
ch = s[i - 1]
elif ch == "W" and s[i - 1] in "AEIOU":
ch = s[i - 1]
if ch[-1] != key[-1][-1]:
key.append(ch)
i += 1
key = "".join(key)
# step 5 - remove trailing S
if key.endswith("S") and key != "S":
key = key[:-1]
# step 6 - replace AY w/ Y
if key.endswith("AY"):
key = key[:-2] + "Y"
# step 7 - remove trailing A
if key.endswith("A") and key != "A":
key = key[:-1]
# step 8 was already done
return key
def match_rating_codex(s):
_check_type(s)
s = s.upper()
codex = []
prev = None
for i, c in enumerate(s):
# not a space OR
# starting character & vowel
# or consonant not preceded by same consonant
if c != " " and (i == 0 and c in "AEIOU") or (c not in "AEIOU" and c != prev):
codex.append(c)
prev = c
# just use first/last 3
if len(codex) > 6:
return "".join(codex[:3] + codex[-3:])
else:
return "".join(codex)
def match_rating_comparison(s1, s2):
codex1 = match_rating_codex(s1)
codex2 = match_rating_codex(s2)
len1 = len(codex1)
len2 = len(codex2)
res1 = []
res2 = []
# length differs by 3 or more, no result
if abs(len1 - len2) >= 3:
return None
# get minimum rating based on sums of codexes
lensum = len1 + len2
if lensum <= 4:
min_rating = 5
elif lensum <= 7:
min_rating = 4
elif lensum <= 11:
min_rating = 3
else:
min_rating = 2
# strip off common prefixes
for c1, c2 in zip_longest(codex1, codex2):
if c1 != c2:
if c1:
res1.append(c1)
if c2:
res2.append(c2)
unmatched_count1 = unmatched_count2 = 0
for c1, c2 in zip_longest(reversed(res1), reversed(res2)):
if c1 != c2:
if c1:
unmatched_count1 += 1
if c2:
unmatched_count2 += 1
return (6 - max(unmatched_count1, unmatched_count2)) >= min_rating
def metaphone(s):
_check_type(s)
result = []
s = _normalize(s.lower())
# skip first character if s starts with these
if s.startswith(("kn", "gn", "pn", "wr", "ae")):
s = s[1:]
i = 0
while i < len(s):
c = s[i]
next = s[i + 1] if i < len(s) - 1 else "*****"
nextnext = s[i + 2] if i < len(s) - 2 else "*****"
# skip doubles except for cc
if c == next and c != "c":
i += 1
continue
if c in "aeiou":
if i == 0 or s[i - 1] == " ":
result.append(c)
elif c == "b":
if (not (i != 0 and s[i - 1] == "m")) or next:
result.append("b")
elif c == "c":
if next == "i" and nextnext == "a" or next == "h":
result.append("x")
i += 1
elif next in "iey":
result.append("s")
i += 1
else:
result.append("k")
elif c == "d":
if next == "g" and nextnext in "iey":
result.append("j")
i += 2
else:
result.append("t")
elif c in "fjlmnr":
result.append(c)
elif c == "g":
if next in "iey":
result.append("j")
elif next == "h" and nextnext and nextnext not in "aeiou":
i += 1
elif next == "n" and not nextnext:
i += 1
else:
result.append("k")
elif c == "h":
if i == 0 or next in "aeiou" or s[i - 1] not in "aeiou":
result.append("h")
elif c == "k":
if i == 0 or s[i - 1] != "c":
result.append("k")
elif c == "p":
if next == "h":
result.append("f")
i += 1
else:
result.append("p")
elif c == "q":
result.append("k")
elif c == "s":
if next == "h":
result.append("x")
i += 1
elif next == "i" and nextnext in "oa":
result.append("x")
i += 2
else:
result.append("s")
elif c == "t":
if next == "i" and nextnext in "oa":
result.append("x")
elif next == "h":
result.append("0")
i += 1
elif next != "c" or nextnext != "h":
result.append("t")
elif c == "v":
result.append("f")
elif c == "w":
if i == 0 and next == "h":
i += 1
result.append("w")
elif next in "aeiou":
result.append("w")
elif c == "x":
if i == 0:
if next == "h" or (next == "i" and nextnext in "oa"):
result.append("x")
else:
result.append("s")
else:
result.append("k")
result.append("s")
elif c == "y":
if next in "aeiou":
result.append("y")
elif c == "z":
result.append("s")
elif c == " ":
if len(result) > 0 and result[-1] != " ":
result.append(" ")
i += 1
return "".join(result).upper()
def porter_stem(s):
_check_type(s)
return Stemmer(s).stem()
| python |
#!/usr/bin/python
def findstem(arr):
# Determine size of the array
n = len(arr)
# Take first word from array
# as reference
s = arr[0]
l = len(s)
res = ""
for i in range(l):
for j in range(i + 1, l + 1):
# generating all possible substrings
# of our reference string arr[0] i.e s
stem = s[i:j]
k = 1
for k in range(1, n):
# Check if the generated stem is
# common to all words
if stem not in arr[k]:
break
# If current substring is present in
# all strings and its length is greater
# than current result
if (k + 1 == n and len(res) < len(stem)):
res = stem
return res
| python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#######################################################################################
# The MIT License
# Copyright (c) 2014 Hannes Schulz, University of Bonn <[email protected]>
# Copyright (c) 2013 Benedikt Waldvogel, University of Bonn <[email protected]>
# Copyright (c) 2008-2009 Sebastian Nowozin <[email protected]>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#######################################################################################
#
# See https://github.com/deeplearningais/curfil/wiki/Training-and-Prediction-with-the-NYU-Depth-v2-Dataset
"""Helper script to convert the NYU Depth v2 dataset Matlab file into a set of PNG and JPEG images.
Receives 3 Files from argparse:
<h5_file> - Contains the original images, depths maps, and scene types
<train_test_split> - contains two numpy arrays with the index of the
images based on the split to train and test sets.
<out_folder> - Name of the folder to save the original and depth images.
Every image in the DB will have it's twine B&W image that indicates the depth
in the image. the images will be read, converted by the convert_image function
and finally saved to path based on train test split and Scene types.
"""
from __future__ import print_function
import h5py
import numpy as np
import os
import scipy.io
import sys
import cv2
from tqdm import tqdm
def convert_image(index, depth_map, img, output_folder):
"""Processes data images and depth maps
:param index: int, image index
:param depth_map: numpy array, image depth - 2D array.
:param img: numpy array, the original RGB image - 3D array.
:param output_folder: path to save the image in.
Receives an image with it's relevant depth map.
Normalizes the depth map, and adds a 7 px boundary to the original image.
Saves both image and depth map to the appropriate processed data folder.
"""
# Normalize the depth image
# normalized_depth = cv2.normalize(depth_map, None, 0, 255, cv2.NORM_MINMAX)
img_depth = depth_map * 25.0
cv2.imwrite("%s/%05d_depth.png" % (output_folder, index), img_depth)
# Adding black frame to original image
img = img[:, :, ::-1] # Flipping the image from RGB to BGR for opencv
image_black_boundary = np.zeros(img.shape, dtype=np.uint8)
image_black_boundary[7:image_black_boundary.shape[0] - 6, 7:image_black_boundary.shape[1] - 6, :] = \
img[7:img.shape[0] - 6, 7:img.shape[1] - 6, :]
cv2.imwrite("%s/%05d.jpg" % (output_folder, index), image_black_boundary)
if __name__ == "__main__":
# Check if got all needed input for argparse
if len(sys.argv) != 4:
print("usage: %s <h5_file> <train_test_split> <out_folder>" % sys.argv[0], file=sys.stderr)
sys.exit(0)
# load arguments to variables
h5_file = h5py.File(sys.argv[1], "r")
train_test = scipy.io.loadmat(sys.argv[2]) # h5py is not able to open that file. but scipy is
out_folder = sys.argv[3]
# Extract images *indexes* for train and test data sets
test_images = set([int(x) for x in train_test["testNdxs"]])
train_images = set([int(x) for x in train_test["trainNdxs"]])
print("%d training images" % len(train_images))
print("%d test images" % len(test_images))
# Grayscale
depth = h5_file['depths']
print("Reading", sys.argv[1])
images = h5_file['images'] # (num_channels, height, width)
# Extract all sceneTypes per image - "office", "classroom", etc.
scenes = [u''.join(chr(c[0]) for c in h5_file[obj_ref]) for obj_ref in h5_file['sceneTypes'][0]]
for i, image in tqdm(enumerate(images), desc="Processing images", total=len(images)):
idx = int(i) + 1
if idx in train_images:
train_test = "train"
else:
assert idx in test_images, "index %d neither found in training set nor in test set" % idx
train_test = "test"
# Create path to save image in
folder = "%s/%s/%s" % (out_folder, train_test, scenes[i])
if not os.path.exists(folder):
os.makedirs(folder)
convert_image(i, depth[i, :, :].T, image.T, folder)
print("Finished")
| python |
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# -*- coding: utf-8 -*-
#
# Last modified: Tue, 23 Jan 2018 23:39:11 +0900
#
# try import libsbml
try:
from libsbml import ASTNode
from libsbml import AST_PLUS
from libsbml import AST_MINUS
from libsbml import AST_TIMES
from libsbml import formulaToString
from libsbml import readSBMLFromFile
except ImportError:
from libsbml import ASTNode
from libsbml import AST_PLUS
from libsbml import AST_MINUS
from libsbml import AST_TIMES
from libsbml import formulaToString
from libsbml import readSBMLFromFile
class Converter():
def __init__(self, filepath="", sbmldocument=None):
self.filepath = filepath
self.clear_objects()
# try SBMLDocument at first, and then SBML file
if sbmldocument is not None:
self.sbmldocument = sbmldocument
elif filepath is not "":
self.sbmldocument = readSBMLFromFile(filepath)
self.update_sbmldocument(self.sbmldocument)
def clear_objects(self):
self.pars = {}
self.icdict = {}
self.varspecs = {}
self.functions = {}
self.funcargs = {}
def update_sbmlfile(self, filepath=""):
if filepath is not "":
self.filepath = filepath
self.sbmldocument = readSBMLFromFile(filepath)
self.update_sbmldocument(self.sbmldocument)
def update_sbmldocument(self, sbmldocument):
if sbmldocument is not None:
self.sbmlmodel = sbmldocument.getModel()
self.filepath = ""
self.clear_objects()
self.generate_pars(self.sbmlmodel)
self.generate_icdict(self.sbmlmodel)
self.generate_varspecs(self.sbmlmodel)
self.generate_functions(self.sbmlmodel)
def generate_pars(self, model):
# global parameters
for p in model.getListOfParameters():
self.pars[p.getId()] = p.getValue()
# local parameters
for r in model.getListOfReactions():
k = r.getKineticLaw()
for p in r.getKineticLaw().getListOfParameters():
# we assume there is no conflict on parameter id
assert p.getId() not in self.pars, "Please rename your parameter id so that there is no conflict between local and global parameters."
self.pars[p.getId()] = p.getValue()
# compartments
for p in model.getListOfCompartments():
self.pars[p.getId()] = p.getSize()
def generate_icdict(self, model):
for s in model.getListOfSpecies():
if s.isSetInitialConcentration():
self.icdict[s.getId()] = s.getInitialConcentration()
elif s.isSetInitialAmount():
self.icdict[s.getId()] = s.getInitialAmount()
def is_species_reactant_of(self, species, reaction):
for sr in reaction.getListOfReactants():
if sr.getSpecies() == species.getId():
return True
return False
def is_species_product_of(self, species, reaction):
for sr in reaction.getListOfProducts():
if sr.getSpecies() == species.getId():
return True
return False
def add_ast_as_reactant(self, ast, r):
if ast is None: # if there is no parent, return -1 * v1.
root = ASTNode(AST_TIMES)
l = ASTNode()
l.setValue(-1.0)
root.addChild(l)
root.addChild(r.getKineticLaw().getMath().deepCopy())
else:
root = ASTNode(AST_MINUS)
root.addChild(ast)
root.addChild(r.getKineticLaw().getMath().deepCopy())
return root
def add_ast_as_product(self, ast, r):
if ast is None: # if there is no parent, return v1.
root = r.getKineticLaw().getMath().deepCopy()
else:
root = ASTNode(AST_PLUS)
root.addChild(ast)
root.addChild(r.getKineticLaw().getMath().deepCopy())
return root
def generate_varspecs(self, model):
# Generate Rate equation for all variable Species (ex. dx/dt = v1 - v2 + v3).
for s in model.getListOfSpecies():
#if s.isSetBoundaryCondition() or s.isSetConstant:
# continue
root = None
for r in model.getListOfReactions():
if self.is_species_reactant_of(s, r):
root = self.add_ast_as_reactant(root, r)
if self.is_species_product_of(s, r):
root = self.add_ast_as_product(root, r)
if root is not None:
self.varspecs[s.getId()] = formulaToString(root)
def generate_functions(self, model):
# global parameters
for f in model.getListOfFunctionDefinitions():
ast = f.getMath()
idx = ast.getNumChildren() - 1
ast_func = ast.getChild(idx) # most right child is the function
self.functions[f.getId()] = formulaToString(ast_func)
arglist = []
for i in range(ast.getNumChildren() - 1):
child = ast.getChild(i)
arglist.append(child.getName())
self.funcargs[f.getId()] = arglist
| python |
import threading
import Pyro4
class NameServerInThread(threading.Thread):
def __init__(self):
super(NameServerInThread, self).__init__()
self.name_server_daemon = None
@staticmethod
def is_name_server_started():
try:
ns = Pyro4.locateNS()
return True
except:
return False
def name_server_msg_loop(self):
ns_uri, daemon, broadcast_server = Pyro4.naming.startNS()
self.name_server_daemon = daemon
print ns_uri, daemon, broadcast_server
try:
daemon.requestLoop()
except:
import traceback
traceback.print_exc()
finally:
daemon.close()
if broadcast_server is not None:
broadcast_server.close()
def run(self):
if self.is_name_server_started():
raise "Name server running"
self.name_server_msg_loop()
print("NS shut down.")
def shutdown(self):
self.name_server_daemon.shutdown()
| python |
'''
Wrapper for bert embeddings
'''
import numpy as np
import torch
from pytorch_pretrained_bert import BertTokenizer, BertModel
class BertEmbeddings:
def __init__(self, model_name='bert-base-uncased', cache_dir=None, max_seq_length=64, max_batch_size=64, stats_count=False):
'''
:param normalize: whether to L2 normalize the embedding vectors to 1.0
'''
self.max_seq_length = max_seq_length
self.max_batch_size = max_batch_size
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print('BertEmbeddings DEVICE: ', self.device)
self.tokenizer = BertTokenizer.from_pretrained(model_name, cache_dir=cache_dir)
self.model = BertModel.from_pretrained(model_name, cache_dir=cache_dir)
self.model.to(self.device)
self.model.eval()
#debug stats
self.stats_count = stats_count
if self.stats_count:
self.unks = 0
self.total_toks = 0
def get_unk_ratio(self):
return float(self.unks)/self.total_toks
def is_context_sensitive(self):
return False
def is_seq_embedder(self):
'''
This embedder embed the entire text sequence into a single vector (not vector per word)
'''
return True
def size(self):
return -1
def units(self):
return -1
def __contains__(self, w):
return True
def tokenize_text(self, text):
# Tokenized input
tokenized_text = self.tokenizer.tokenize(' '.join(text))
if len(tokenized_text) > self.max_seq_length-2:
tokenized_text = tokenized_text[:self.max_seq_length-2]
if self.stats_count:
self.unks += tokenized_text.count('[UNK]')
self.total_toks += len(tokenized_text)
# Convert token to vocabulary indices
indexed_tokens = self.tokenizer.convert_tokens_to_ids(tokenized_text)
mask_ids = [1]*len(indexed_tokens)
indexed_tokens.extend([0]*((self.max_seq_length-2)-len(indexed_tokens)))
mask_ids.extend([0]*((self.max_seq_length-2)-len(mask_ids)))
segments_ids = [0] * len(indexed_tokens)
return indexed_tokens, segments_ids, mask_ids
def represent_text_batch(self, text_batch):
represented_num = 0
encoded_instances = []
while represented_num < len(text_batch):
n = min(self.max_batch_size, len(text_batch)-represented_num)
encoded_n = self.represent_small_text_batch(text_batch[represented_num:represented_num+n])
encoded_instances.append(encoded_n)
represented_num += n
if len(encoded_instances) > 1:
# print('Large batch size:', len(text_batch))
return np.concatenate(encoded_instances, axis=0)
else:
return encoded_instances[0]
def represent_small_text_batch(self, text_batch):
indexed_tokens_batch, segments_ids_batch, mask_ids_batch = zip(*[self.tokenize_text(text) for text in text_batch])
tokens_tensor = torch.tensor(indexed_tokens_batch, device=self.device)
segments_tensor = torch.tensor(segments_ids_batch, device=self.device)
masks_tensor = torch.tensor(mask_ids_batch, device=self.device)
encoded_words, encoded_text = self.model(tokens_tensor, segments_tensor, attention_mask=masks_tensor, output_all_encoded_layers=False)
return encoded_text.detach().cpu().numpy()
# def represent_text(self, text):
# with torch.cuda.device(self.gpu):
# # Tokenized input
# tokenized_text = self.tokenizer.tokenize(' '.join(text))
# # Convert token to vocabulary indices
# indexed_tokens = self.tokenizer.convert_tokens_to_ids(tokenized_text)
# segments_ids = [0] * len(indexed_tokens)
# # Convert inputs to PyTorch tensors
# tokens_tensor = torch.tensor([indexed_tokens])
# segments_tensors = torch.tensor([segments_ids])
# # Predict hidden states features for each layer
# encoded_words, encoded_text = self.model(tokens_tensor, segments_tensors, output_all_encoded_layers=False)
# return encoded_text.detach().numpy()
if __name__ == '__main__':
bert = BertEmbeddings()
embeddings = bert.represent_text('This is a test yes')
print(embeddings.shape)
| python |
"""Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Tests for source_match.py
"""
import unittest
import create_node
import source_match
DEFAULT_TEXT = 'default'
class TextPlaceholderTest(unittest.TestCase):
def testMatchSimpleText(self):
placeholder = source_match.TextPlaceholder('.*', DEFAULT_TEXT)
matched_text = placeholder.Match(None, 'to match')
self.assertEqual(matched_text, 'to match')
test_output = placeholder.GetSource(None)
self.assertEqual(test_output, 'to match')
def testPartialMatchEnd(self):
placeholder = source_match.TextPlaceholder(r'def \(', DEFAULT_TEXT)
matched_text = placeholder.Match(None, 'def (foo')
self.assertEqual(matched_text, 'def (')
test_output = placeholder.GetSource(None)
self.assertEqual(test_output, 'def (')
def testMatchWithoutMatchingReturnsDefault(self):
placeholder = source_match.TextPlaceholder('.*', DEFAULT_TEXT)
test_output = placeholder.GetSource(None)
self.assertEqual(test_output, DEFAULT_TEXT)
def testCantMatchThrowsError(self):
placeholder = source_match.TextPlaceholder('doesnt match', DEFAULT_TEXT)
with self.assertRaises(source_match.BadlySpecifiedTemplateError):
placeholder.Match(None, 'to match')
def testMatchWhitespace(self):
whitespace_text = ' \t \n '
placeholder = source_match.TextPlaceholder(r'\s*')
matched_text = placeholder.Match(None, whitespace_text)
self.assertEqual(matched_text, whitespace_text)
test_output = placeholder.GetSource(None)
self.assertEqual(test_output, whitespace_text)
def testWhitespaceMatchesLineContinuations(self):
whitespace_text = ' \t \n \\\n \\\n '
placeholder = source_match.TextPlaceholder(r'\s*')
matched_text = placeholder.Match(None, whitespace_text)
self.assertEqual(matched_text, whitespace_text)
test_output = placeholder.GetSource(None)
self.assertEqual(test_output, whitespace_text)
def testWhitespaceMatchesComments(self):
whitespace_text = ' \t # abc\n '
placeholder = source_match.TextPlaceholder(r'\s*')
matched_text = placeholder.Match(None, whitespace_text)
self.assertEqual(matched_text, whitespace_text)
test_output = placeholder.GetSource(None)
self.assertEqual(test_output, whitespace_text)
def testMultipleStatementsSeparatedBySemicolon(self):
whitespace_text = 'pdb;pdb'
placeholder = source_match.TextPlaceholder(r'pdb\npdb')
matched_text = placeholder.Match(None, whitespace_text)
self.assertEqual(matched_text, whitespace_text)
test_output = placeholder.GetSource(None)
self.assertEqual(test_output, whitespace_text)
def testCommentAfterExpectedLinebreak(self):
whitespace_text = 'pdb # A comment\n'
placeholder = source_match.TextPlaceholder(r'pdb\n')
matched_text = placeholder.Match(None, whitespace_text)
self.assertEqual(matched_text, whitespace_text)
test_output = placeholder.GetSource(None)
self.assertEqual(test_output, whitespace_text)
class FieldPlaceholderTest(unittest.TestCase):
def testMatchSimpleField(self):
node = create_node.Name('foobar')
placeholder = source_match.FieldPlaceholder('id')
matched_text = placeholder.Match(node, 'foobar')
self.assertEqual(matched_text, 'foobar')
test_output = placeholder.GetSource(node)
self.assertEqual(test_output, 'foobar')
def testPartialMatch(self):
node = create_node.Name('bar')
placeholder = source_match.FieldPlaceholder(
'id', before_placeholder=source_match.TextPlaceholder('foo'))
matched_text = placeholder.Match(node, 'foobarbaz')
self.assertEqual(matched_text, 'foobar')
test_output = placeholder.GetSource(node)
self.assertEqual(test_output, 'foobar')
def testBeforePlaceholder(self):
node = create_node.Name('bar')
placeholder = source_match.FieldPlaceholder(
'id',
before_placeholder=source_match.TextPlaceholder('before '))
matched_text = placeholder.Match(node, 'before bar')
self.assertEqual(matched_text, 'before bar')
test_output = placeholder.GetSource(node)
self.assertEqual(test_output, 'before bar')
def testCantMatchThrowsError(self):
node = create_node.Name('doesnt_match')
placeholder = source_match.FieldPlaceholder('id')
with self.assertRaises(source_match.BadlySpecifiedTemplateError):
placeholder.Match(node, 'to match')
def testRaisesErrorIfFieldIsList(self):
node = create_node.FunctionDef('function_name')
placeholder = source_match.FieldPlaceholder('body')
with self.assertRaises(source_match.BadlySpecifiedTemplateError):
placeholder.Match(node, 'invalid_match')
def testChangingValueChangesOutput(self):
node = create_node.Name('bar')
placeholder = source_match.FieldPlaceholder(
'id', before_placeholder=source_match.TextPlaceholder('foo'))
matched_text = placeholder.Match(node, 'foobarbaz')
self.assertEqual(matched_text, 'foobar')
node.id = 'hello'
test_output = placeholder.GetSource(node)
self.assertEqual(test_output, 'foohello')
def testWithoutMatch(self):
node = create_node.Name('bar')
placeholder = source_match.FieldPlaceholder('id')
test_output = placeholder.GetSource(node)
self.assertEqual(test_output, 'bar')
class ListFieldPlaceholderTest(unittest.TestCase):
def testMatchSimpleField(self):
body_node = create_node.Expr(create_node.Name('foobar'))
node = create_node.FunctionDef('function_name', body=[body_node])
placeholder = source_match.ListFieldPlaceholder('body')
matched_text = placeholder.Match(node, 'foobar\n')
self.assertEqual(matched_text, 'foobar\n')
test_output = placeholder.GetSource(node)
self.assertEqual(test_output, 'foobar\n')
def testMultipleListItems(self):
body_nodes = [create_node.Expr(create_node.Name('foobar')),
create_node.Expr(create_node.Name('baz'))]
node = create_node.FunctionDef('function_name', body=body_nodes)
placeholder = source_match.ListFieldPlaceholder('body')
matched_text = placeholder.Match(node, 'foobar\nbaz\n')
self.assertEqual(matched_text, 'foobar\nbaz\n')
test_output = placeholder.GetSource(node)
self.assertEqual(test_output, 'foobar\nbaz\n')
def testMultipleListItemsBeginningAndEnd(self):
body_nodes = [create_node.Expr(create_node.Name('foobar')),
create_node.Expr(create_node.Name('baz'))]
node = create_node.FunctionDef('function_name', body=body_nodes)
placeholder = source_match.ListFieldPlaceholder(
'body',
before_placeholder=source_match.TextPlaceholder('z'),
after_placeholder=source_match.TextPlaceholder('zz'))
matched_text = placeholder.Match(node, 'zfoobar\nzzzbaz\nzz')
self.assertEqual(matched_text, 'zfoobar\nzzzbaz\nzz')
test_output = placeholder.GetSource(node)
self.assertEqual(test_output, 'zfoobar\nzzzbaz\nzz')
def testMatchRaisesErrorIfFieldIsNotList(self):
node = create_node.Name('bar')
placeholder = source_match.ListFieldPlaceholder(
'id', before_placeholder=source_match.TextPlaceholder('\n', '\n'),
exclude_first_before=True)
with self.assertRaises(source_match.BadlySpecifiedTemplateError):
placeholder.Match(node, 'foobar\nbaz')
def testMatchRaisesErrorIfFieldDoesntMatch(self):
body_node = create_node.Expr(create_node.Name('foobar'))
node = create_node.FunctionDef('function_name', body=[body_node])
placeholder = source_match.ListFieldPlaceholder(
'body', before_placeholder=source_match.TextPlaceholder('\n', '\n'),
exclude_first_before=True)
with self.assertRaises(source_match.BadlySpecifiedTemplateError):
placeholder.Match(node, 'no match here')
def testMatchRaisesErrorIfSeparatorDoesntMatch(self):
body_nodes = [create_node.Expr(create_node.Name('foobar')),
create_node.Expr(create_node.Name('baz'))]
node = create_node.FunctionDef('function_name', body=body_nodes)
placeholder = source_match.ListFieldPlaceholder(
'body', before_placeholder=source_match.TextPlaceholder('\n', '\n'),
exclude_first_before=True)
with self.assertRaises(source_match.BadlySpecifiedTemplateError):
placeholder.Match(node, 'foobarbaz')
# TODO: Renabled this after adding indent information to matchers
@unittest.expectedFailure
def testListDefaults(self):
body_nodes = [create_node.Expr(create_node.Name('foobar')),
create_node.Expr(create_node.Name('baz'))]
node = create_node.FunctionDef('function_name', body=body_nodes)
module_node = create_node.Module(node)
placeholder = source_match.ListFieldPlaceholder(
'body', before_placeholder=source_match.TextPlaceholder('', ', '),
exclude_first_before=True)
test_output = placeholder.GetSource(node)
self.assertEqual(test_output, ' foobar\n, baz\n')
class BodyPlaceholderTest(unittest.TestCase):
def testMatchSimpleField(self):
body_node = create_node.Expr(create_node.Name('foobar'))
node = create_node.Module(body_node)
placeholder = source_match.BodyPlaceholder('body')
matched_text = placeholder.Match(node, 'foobar\n')
self.assertEqual(matched_text, 'foobar\n')
test_output = placeholder.GetSource(node)
self.assertEqual(test_output, 'foobar\n')
def testMatchFieldAddsEmptySyntaxFreeLine(self):
body_node_foobar = create_node.Expr(create_node.Name('foobar'))
body_node_a = create_node.Expr(create_node.Name('a'))
node = create_node.Module(body_node_foobar, body_node_a)
placeholder = source_match.BodyPlaceholder('body')
matched_text = placeholder.Match(node, 'foobar\n\na\n')
self.assertEqual(matched_text, 'foobar\n\na\n')
test_output = placeholder.GetSource(node)
self.assertEqual(test_output, 'foobar\n\na\n')
def testMatchFieldAddsEmptySyntaxFreeLineWithComment(self):
body_node_foobar = create_node.Expr(create_node.Name('foobar'))
body_node_a = create_node.Expr(create_node.Name('a'))
node = create_node.Module(body_node_foobar, body_node_a)
placeholder = source_match.BodyPlaceholder('body')
matched_text = placeholder.Match(node, 'foobar\n#blah\na\n')
self.assertEqual(matched_text, 'foobar\n#blah\na\n')
test_output = placeholder.GetSource(node)
self.assertEqual(test_output, 'foobar\n#blah\na\n')
def testDoesntMatchAfterEndOfBody(self):
body_node_foobar = create_node.Expr(create_node.Name('foobar'))
body_node_a = create_node.Expr(create_node.Name('a'))
node = create_node.FunctionDef('a', body=[body_node_foobar, body_node_a])
matcher = source_match.GetMatcher(node)
text_to_match = """def a():
foobar
#blah
a
# end comment
c
"""
matched_text = matcher.Match(text_to_match)
expected_match = """def a():
foobar
#blah
a
"""
self.assertEqual(matched_text, expected_match)
class TestDefaultSourceMatcher(unittest.TestCase):
def testInvalidExpectedPartsType(self):
node = create_node.Name('bar')
with self.assertRaises(ValueError):
source_match.DefaultSourceMatcher(node, ['blah'])
def testBasicTextMatch(self):
matcher = source_match.DefaultSourceMatcher(
None, [source_match.TextPlaceholder('blah', DEFAULT_TEXT)])
matcher.Match('blah')
self.assertEqual(matcher.GetSource(), 'blah')
def testRaisesErrorIfNoTextMatch(self):
matcher = source_match.DefaultSourceMatcher(
None, [source_match.TextPlaceholder('blah', DEFAULT_TEXT)])
with self.assertRaises(source_match.BadlySpecifiedTemplateError):
matcher.Match('bla')
def testBasicFieldMatch(self):
node = create_node.Name('bar')
matcher = source_match.DefaultSourceMatcher(
node, [source_match.FieldPlaceholder('id')])
matcher.Match('bar')
self.assertEqual(matcher.GetSource(), 'bar')
def testRaisesErrorIfNoFieldMatch(self):
node = create_node.Name('bar')
matcher = source_match.DefaultSourceMatcher(
node, [source_match.FieldPlaceholder('id')])
with self.assertRaises(source_match.BadlySpecifiedTemplateError):
matcher.Match('ba')
def testBasicFieldMatchWhenChangedFieldValue(self):
node = create_node.Name('bar')
matcher = source_match.DefaultSourceMatcher(
node, [source_match.FieldPlaceholder('id')])
matcher.Match('bar')
node.id = 'foo'
self.assertEqual(matcher.GetSource(), 'foo')
def testBasicListMatch(self):
body_nodes = [create_node.Expr(create_node.Name('foobar')),
create_node.Expr(create_node.Name('baz'))]
node = create_node.FunctionDef('function_name', body=body_nodes)
matcher = source_match.DefaultSourceMatcher(
node, [source_match.ListFieldPlaceholder('body')])
matcher.Match('foobar\nbaz\n')
self.assertEqual(matcher.GetSource(), 'foobar\nbaz\n')
def testRaisesErrorWhenNoMatchInBasicList(self):
body_nodes = [create_node.Expr(create_node.Name('foobar')),
create_node.Expr(create_node.Name('baz'))]
node = create_node.FunctionDef('function_name', body=body_nodes)
matcher = source_match.DefaultSourceMatcher(
node, [source_match.ListFieldPlaceholder('body')])
with self.assertRaises(source_match.BadlySpecifiedTemplateError):
matcher.Match('foobar\nba\n')
def testBasicListMatchWhenChangedFieldValue(self):
body_nodes = [create_node.Expr(create_node.Name('foobar')),
create_node.Expr(create_node.Name('baz'))]
node = create_node.FunctionDef('function_name', body=body_nodes)
matcher = source_match.DefaultSourceMatcher(
node,
[source_match.ListFieldPlaceholder('body')])
matcher.Match('foobar\nbaz\n')
node.body[0].value.id = 'hello'
self.assertEqual(matcher.GetSource(), 'hello\nbaz\n')
def testAdvancedMatch(self):
body_nodes = [create_node.Expr(create_node.Name('foobar')),
create_node.Expr(create_node.Name('baz'))]
node = create_node.FunctionDef('function_name', body=body_nodes)
matcher = source_match.DefaultSourceMatcher(
node,
[source_match.TextPlaceholder('def ', 'def '),
source_match.FieldPlaceholder('name'),
source_match.TextPlaceholder(r'\(\)', r'()'),
source_match.ListFieldPlaceholder('body')])
matcher.Match('def function_name()foobar\nbaz\n')
node.body[0].value.id = 'hello'
self.assertEqual(matcher.GetSource(), 'def function_name()hello\nbaz\n')
# TODO: Renabled this after adding indent information to matchers
@unittest.expectedFailure
def testGetSourceWithoutMatchUsesDefaults(self):
body_nodes = [create_node.Expr(create_node.Name('foobar')),
create_node.Expr(create_node.Name('baz'))]
node = create_node.FunctionDef('function_name', body=body_nodes)
module_node = create_node.Module(node)
matcher = source_match.DefaultSourceMatcher(
node,
[source_match.TextPlaceholder('def ', 'default '),
source_match.FieldPlaceholder('name'),
source_match.TextPlaceholder(r'\(\)', r'()'),
source_match.SeparatedListFieldPlaceholder(
'body', source_match.TextPlaceholder('\n', ', '))])
node.body[0].value.id = 'hello'
self.assertEqual(matcher.GetSource(),
'default function_name() hello\n, baz\n')
class TestGetMatcher(unittest.TestCase):
def testDefaultMatcher(self):
node = create_node.VarReference('foo', 'bar')
matcher = source_match.GetMatcher(node)
matcher.Match('foo.bar')
self.assertEqual(matcher.GetSource(), 'foo.bar')
def testDefaultMatcherWithModification(self):
node = create_node.VarReference('foo', 'bar')
matcher = source_match.GetMatcher(node)
matcher.Match('foo.bar')
node.attr = 'hello'
self.assertEqual(matcher.GetSource(), 'foo.hello')
class ParenWrappedTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.Name('a')
string = '(a)'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testNewLineMatch(self):
node = create_node.Name('a')
string = '(\na\n)'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testWithComplexLine(self):
node = create_node.Compare('a', '<', 'c')
string = '(a < \n c\n)'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testWithTuple(self):
node = create_node.Call('c', args=[create_node.Name('d'),
create_node.Tuple('a', 'b')])
string = 'c(d, (a, b))'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class ArgumentsMatcherTest(unittest.TestCase):
def testEmpty(self):
node = create_node.arguments()
string = ''
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testSingleArg(self):
node = create_node.arguments(args=('a'))
string = 'a'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testMultipleArgs(self):
node = create_node.arguments(args=('a', 'b'))
string = 'a, b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testDefault(self):
node = create_node.arguments(keys=('a'), values=('b'))
string = 'a=b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testDefaults(self):
node = create_node.arguments(keys=('a', 'c'), values=('b', 'd'))
string = 'a=b, c=d'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testArgsAndDefaults(self):
node = create_node.arguments(
args=('e', 'f'), keys=('a', 'c'), values=('b', 'd'))
string = 'e, f, a=b, c=d'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testArgsDefaultsVarargs(self):
node = create_node.arguments(
args=('e', 'f'), keys=('a', 'c'), values=('b', 'd'),
vararg_name='args')
string = 'e, f, a=b, c=d, *args'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testArgsDefaultsVarargsKwargs(self):
node = create_node.arguments(
args=('e', 'f'), keys=('a', 'c'), values=('b', 'd'),
vararg_name='args', kwarg_name='kwargs')
string = 'e, f, a=b, c=d, *args, **kwargs'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class AssertMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.Assert(create_node.Name('a'))
string = 'assert a\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testMatchWithMessage(self):
node = create_node.Assert(create_node.Name('a'),
create_node.Str('message'))
string = 'assert a, "message"\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class AttributeMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.VarReference('a', 'b')
string = 'a.b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testTripleReferenceMatch(self):
node = create_node.VarReference('a', 'b', 'c')
string = 'a.b.c'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class AugAssignMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.AugAssign('a', create_node.Add(), create_node.Num(1))
string = 'a += 1\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class BinOpMatcherTest(unittest.TestCase):
def testAddBinOp(self):
node = create_node.BinOp(
create_node.Name('a'),
create_node.Add(),
create_node.Name('b'))
string = 'a + b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testSubBinOp(self):
node = create_node.BinOp(
create_node.Name('a'),
create_node.Sub(),
create_node.Name('b'))
string = 'a - b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testMultBinOp(self):
node = create_node.BinOp(
create_node.Name('a'),
create_node.Mult(),
create_node.Name('b'))
string = 'a * b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testDivBinOp(self):
node = create_node.BinOp(
create_node.Name('a'),
create_node.Div(),
create_node.Name('b'))
string = 'a / b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testFloorDivBinOp(self):
node = create_node.BinOp(
create_node.Name('a'),
create_node.FloorDiv(),
create_node.Name('b'))
string = 'a // b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testModBinOp(self):
node = create_node.BinOp(
create_node.Name('a'),
create_node.Mod(),
create_node.Name('b'))
string = 'a % b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testPowBinOp(self):
node = create_node.BinOp(
create_node.Name('a'),
create_node.Pow(),
create_node.Name('b'))
string = 'a ** b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testLShiftBinOp(self):
node = create_node.BinOp(
create_node.Name('a'),
create_node.LShift(),
create_node.Name('b'))
string = 'a << b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testRShiftBinOp(self):
node = create_node.BinOp(
create_node.Name('a'),
create_node.RShift(),
create_node.Name('b'))
string = 'a >> b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testBitOrBinOp(self):
node = create_node.BinOp(
create_node.Name('a'),
create_node.BitOr(),
create_node.Name('b'))
string = 'a | b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testBitXorBinOp(self):
node = create_node.BinOp(
create_node.Name('a'),
create_node.BitXor(),
create_node.Name('b'))
string = 'a ^ b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testBitAndBinOp(self):
node = create_node.BinOp(
create_node.Name('a'),
create_node.BitAnd(),
create_node.Name('b'))
string = 'a & b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class BoolOpMatcherTest(unittest.TestCase):
def testAndBoolOp(self):
node = create_node.BoolOp(
create_node.Name('a'),
create_node.And(),
create_node.Name('b'))
string = 'a and b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testOrBoolOp(self):
node = create_node.BoolOp(
create_node.Name('a'),
create_node.Or(),
create_node.Name('b'))
string = 'a or b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testAndOrBoolOp(self):
node = create_node.BoolOp(
create_node.Name('a'),
create_node.And(),
create_node.Name('b'),
create_node.Or(),
create_node.Name('c'))
string = 'a and b or c'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testOrAndBoolOp(self):
node = create_node.BoolOp(
create_node.Name('a'),
create_node.Or(),
create_node.Name('b'),
create_node.And(),
create_node.Name('c'))
string = 'a or b and c'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class CallMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.Call('a')
string = 'a()'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testMatchStarargs(self):
node = create_node.Call('a', starargs='args')
string = 'a(*args)'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testMatchWithStarargsBeforeKeyword(self):
node = create_node.Call('a', keys=('b',), values=('c',), starargs='args')
string = 'a(*args, b=c)'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class ClassDefMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.ClassDef('TestClass')
string = 'class TestClass():\n pass\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testMatchBases(self):
node = create_node.ClassDef(
'TestClass', bases=('Base1', 'Base2'))
string = 'class TestClass(Base1, Base2):\n pass\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testMatchBody(self):
node = create_node.ClassDef(
'TestClass', body=[create_node.Expr(create_node.Name('a'))])
string = 'class TestClass():\n a\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testMatchDecoratorList(self):
node = create_node.ClassDef(
'TestClass',
decorator_list=[create_node.Name('dec'),
create_node.Call('dec2')])
string = '@dec\n@dec2()\nclass TestClass():\n pass\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testComplete(self):
node = create_node.ClassDef(
'TestClass',
bases=('Base1', 'Base2'),
body=[create_node.Expr(create_node.Name('a'))],
decorator_list=[create_node.Name('dec'),
create_node.Call('dec2')])
string = '@dec\n@dec2()\nclass TestClass(Base1, Base2):\n a\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testCanChangeValues(self):
node = create_node.ClassDef(
'TestClass',
bases=('Base1', 'Base2'),
body=[create_node.Expr(create_node.Name('a'))],
decorator_list=[create_node.Name('dec'),
create_node.Call('dec2')])
string = '@dec\n@dec2()\nclass TestClass(Base1, Base2):\n a\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
node.bases = [create_node.Name('Base3')]
node.decorator_list = [create_node.Name('dec3')]
node.body[0].value.id = 'x'
node.name = 'TestClass2'
changed_string = '@dec3\nclass TestClass2(Base3):\n x\n'
self.assertEqual(changed_string, matcher.GetSource())
class CompareMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.Compare(
create_node.Name('a'),
create_node.Lt(),
create_node.Name('b'))
string = 'a < b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testMultiMatch(self):
node = create_node.Compare(
create_node.Name('a'),
create_node.Lt(),
create_node.Name('b'),
create_node.Lt(),
create_node.Name('c'))
string = 'a < b < c'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testEq(self):
node = create_node.Compare(
create_node.Name('a'),
create_node.Eq(),
create_node.Name('b'))
string = 'a == b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testNotEq(self):
node = create_node.Compare(
create_node.Name('a'),
create_node.NotEq(),
create_node.Name('b'))
string = 'a != b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testLt(self):
node = create_node.Compare(
create_node.Name('a'),
create_node.Lt(),
create_node.Name('b'))
string = 'a < b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testLtE(self):
node = create_node.Compare(
create_node.Name('a'),
create_node.LtE(),
create_node.Name('b'))
string = 'a <= b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testGt(self):
node = create_node.Compare(
create_node.Name('a'),
create_node.Gt(),
create_node.Name('b'))
string = 'a > b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testGtE(self):
node = create_node.Compare(
create_node.Name('a'),
create_node.GtE(),
create_node.Name('b'))
string = 'a >= b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testIs(self):
node = create_node.Compare(
create_node.Name('a'),
create_node.Is(),
create_node.Name('b'))
string = 'a is b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testIsNot(self):
node = create_node.Compare(
create_node.Name('a'),
create_node.IsNot(),
create_node.Name('b'))
string = 'a is not b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testIn(self):
node = create_node.Compare(
create_node.Name('a'),
create_node.In(),
create_node.Name('b'))
string = 'a in b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testNotIn(self):
node = create_node.Compare(
create_node.Name('a'),
create_node.NotIn(),
create_node.Name('b'))
string = 'a not in b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class ComprehensionMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.comprehension('a', 'b')
string = 'for a in b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testBasicMatchWithIf(self):
node = create_node.comprehension(
'a', 'b',
create_node.Compare('c', '<', 'd'))
string = 'for a in b if c < d'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class DictMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.Dict([create_node.Name('a')],
[create_node.Name('b')])
string = '{a: b}'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testEmptyMatch(self):
node = create_node.Dict()
string = '{}'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testTwoItemMatch(self):
node = create_node.Dict(
[create_node.Name('a'), create_node.Str('c')],
[create_node.Name('b'), create_node.Str('d')])
string = '{a: b, "c": "d"}'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testChangeKey(self):
first_key = create_node.Name('a')
node = create_node.Dict(
[first_key, create_node.Str('c')],
[create_node.Name('b'), create_node.Str('d')])
string = '{a: b, "c": "d"}'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
first_key.id = 'k'
self.assertEqual('{k: b, "c": "d"}', matcher.GetSource())
def testChangeVal(self):
first_val = create_node.Name('b')
node = create_node.Dict(
[create_node.Name('a'), create_node.Str('c')],
[first_val, create_node.Str('d')])
string = '{a: b, "c": "d"}'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
first_val.id = 'k'
self.assertEqual('{a: k, "c": "d"}', matcher.GetSource())
class DictComprehensionMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.DictComp('e', 'f', 'a', 'b')
string = '{e: f for a in b}'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testBasicMatchWithIf(self):
node = create_node.DictComp(
'e', 'f', 'a', 'b',
create_node.Compare('c', '<', 'd'))
string = '{e: f for a in b if c < d}'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class ExceptHandlerMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.ExceptHandler()
string = 'except:\n pass\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testMatchWithType(self):
node = create_node.ExceptHandler('TestException')
string = 'except TestException:\n pass\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testMatchWithName(self):
node = create_node.ExceptHandler('TestException', name='as_part')
string = 'except TestException as as_part:\n pass\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testMatchWithBody(self):
node = create_node.ExceptHandler(
body=[create_node.Expr(create_node.Name('a'))])
string = 'except:\n a\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class FunctionDefMatcherTest(unittest.TestCase):
def testEmpty(self):
node = create_node.FunctionDef('test_fun')
string = 'def test_fun():\n pass\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testSingleArg(self):
node = create_node.FunctionDef('test_fun', args=('a'))
string = 'def test_fun(a):\n pass\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testMultipleArgs(self):
node = create_node.FunctionDef('test_fun', args=('a', 'b'))
string = 'def test_fun(a, b):\n pass\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testDefault(self):
node = create_node.FunctionDef('test_fun', keys=('a'), values=('b'))
string = 'def test_fun(a=b):\n pass\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testDefaults(self):
node = create_node.FunctionDef(
'test_fun', keys=('a', 'c'), values=('b', 'd'))
string = 'def test_fun(a=b, c=d):\n pass\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testArgsAndDefaults(self):
node = create_node.FunctionDef(
'test_fun', args=('e', 'f'), keys=('a', 'c'), values=('b', 'd'))
string = 'def test_fun(e, f, a=b, c=d):\n pass\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testArgsDefaultsVarargs(self):
node = create_node.FunctionDef(
'test_fun', args=('e', 'f'), keys=('a', 'c'), values=('b', 'd'),
vararg_name='args')
string = 'def test_fun(e, f, a=b, c=d, *args):\n pass\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testArgsDefaultsVarargsKwargs(self):
node = create_node.FunctionDef(
'test_fun', args=('e', 'f'), keys=('a', 'c'), values=('b', 'd'),
vararg_name='args', kwarg_name='kwargs')
string = 'def test_fun(e, f, a=b, c=d, *args, **kwargs):\n pass\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testDecoratorList(self):
node = create_node.FunctionDef(
'test_fun',
decorator_list=[create_node.Name('dec'),
create_node.Call('call_dec')])
string = '@dec\n@call_dec()\ndef test_fun():\n pass\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testCommentInDecoratorList(self):
node = create_node.FunctionDef(
'test_fun',
decorator_list=[create_node.Name('dec'),
create_node.Call('call_dec')])
string = '@dec\n#hello world\n@call_dec()\ndef test_fun():\n pass\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testBody(self):
node = create_node.FunctionDef(
'test_fun',
body=(create_node.Expr(create_node.Name('a')),))
string = 'def test_fun():\n a\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class IfMatcherTest(unittest.TestCase):
def testBasicIf(self):
node = create_node.If(
create_node.Name('True'))
string = """if True:\n pass\n"""
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testBasicIfElse(self):
node = create_node.If(
create_node.Name('True'), orelse=[create_node.Pass()])
string = """if True:\n pass\nelse:\n pass\n"""
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testBasicIfElif(self):
node = create_node.If(
create_node.Name('True'),
orelse=[create_node.If(create_node.Name('False'))])
string = """if True:
pass
elif False:
pass
"""
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testIfElifWithSpace(self):
node = create_node.If(
create_node.Name('True'),
orelse=[create_node.If(create_node.Name('False'))])
string = """if True:
pass
elif False:
pass
"""
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testIfInElse(self):
node = create_node.If(
create_node.Name('True'),
orelse=[create_node.If(create_node.Name('False'))])
string = """if True:
pass
else:
if False:
pass
"""
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testIfAndOthersInElse(self):
node = create_node.If(
create_node.Name('True'),
orelse=[create_node.If(create_node.Name('False')),
create_node.Expr(create_node.Name('True'))])
string = """if True:
pass
else:
if False:
pass
True
"""
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class IfExpMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.IfExp(
create_node.Name('True'), create_node.Name('a'), create_node.Name('b'))
string = 'a if True else b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testChangeParts(self):
node = create_node.IfExp(
create_node.Name('True'), create_node.Name('a'), create_node.Name('b'))
string = 'a if True else b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
node.test = create_node.Name('False')
node.body = create_node.Name('c')
node.orelse = create_node.Name('d')
self.assertEqual('c if False else d', matcher.GetSource())
class LambdaMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.Lambda(create_node.Name('a'))
string = 'lambda: a'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testMatchWithArgs(self):
node = create_node.Lambda(
create_node.Name('a'),
args=create_node.arguments(args=('b')))
string = 'lambda b: a'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testMatchWithArgsOnNewLine(self):
node = create_node.Lambda(
create_node.Name('a'),
args=create_node.arguments(args=('b')))
string = '(lambda\nb: a)'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class ListComprehensionMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.ListComp('c', 'a', 'b')
string = '[c for a in b]'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testBasicMatchWithIf(self):
node = create_node.ListComp(
'c', 'a', 'b',
create_node.Compare('c', '<', 'd'))
string = '[c for a in b if c < d]'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class ModuleMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.Module(create_node.Expr(create_node.Name('a')))
string = 'a\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testBasicMatchWithEmptyLines(self):
node = create_node.Module(
create_node.Expr(create_node.Name('a')),
create_node.Expr(create_node.Name('b')))
string = 'a\n\nb\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testBasicMatchWithCommentLines(self):
node = create_node.Module(
create_node.Expr(create_node.Name('a')),
create_node.Expr(create_node.Name('b')))
string = 'a\n#blah\nb\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class NameMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.Name('foobar')
string = 'foobar'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual('foobar', matcher.GetSource())
def testIdChange(self):
node = create_node.Name('foobar')
string = 'foobar'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
node.id = 'hello'
self.assertEqual('hello', matcher.GetSource())
class NumMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.Num('1')
string = '1'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual('1', matcher.GetSource())
def testBasicMatchWithSuffix(self):
node = create_node.Num('1')
string = '1L'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual('1L', matcher.GetSource())
class SetMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.Set('c', 'a', 'b')
string = '{c, a, b}'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class SetComprehensionMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.SetComp('c', 'a', 'b')
string = '{c for a in b}'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testBasicMatchWithIf(self):
node = create_node.SetComp(
'c', 'a', 'b',
create_node.Compare('c', '<', 'd'))
string = '{c for a in b if c < d}'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class StrMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.Str('foobar')
string = '"foobar"'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual('"foobar"', matcher.GetSource())
def testPrefixMatch(self):
node = create_node.Str('foobar')
string = 'r"foobar"'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual('r"foobar"', matcher.GetSource())
def testQuoteWrapped(self):
node = create_node.Str('foobar')
string = '("foobar")'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual('("foobar")', matcher.GetSource())
def testContinuationMatch(self):
node = create_node.Str('foobar')
string = '"foo"\n"bar"'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual('"foo"\n"bar"', matcher.GetSource())
def testContinuationMatchWithPrefix(self):
node = create_node.Str('foobar')
string = '"foo"\nr"bar"'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual('"foo"\nr"bar"', matcher.GetSource())
def testBasicTripleQuoteMatch(self):
node = create_node.Str('foobar')
string = '"""foobar"""'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual('"""foobar"""', matcher.GetSource())
def testMultilineTripleQuoteMatch(self):
node = create_node.Str('foobar\n\nbaz')
string = '"""foobar\n\nbaz"""'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual('"""foobar\n\nbaz"""', matcher.GetSource())
def testQuoteTypeMismatch(self):
node = create_node.Str('foobar')
string = '"foobar\''
matcher = source_match.GetMatcher(node)
with self.assertRaises(ValueError):
matcher.Match(string)
def testSChange(self):
node = create_node.Str('foobar')
string = '"foobar"'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
node.s = 'hello'
self.assertEqual('"hello"', matcher.GetSource())
def testSChangeInContinuation(self):
node = create_node.Str('foobar')
string = '"foo"\n"bar"'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
node.s = 'foobaz'
self.assertEqual('"foobaz"', matcher.GetSource())
def testQuoteTypeChange(self):
node = create_node.Str('foobar')
string = '"foobar"'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
matcher.quote_type = "'"
self.assertEqual("'foobar'", matcher.GetSource())
def testQuoteTypeChangeToTripleQuote(self):
node = create_node.Str('foobar')
string = '"foobar"'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
matcher.quote_type = "'''"
self.assertEqual("'''foobar'''", matcher.GetSource())
class SubscriptMatcherTest(unittest.TestCase):
"""Tests for the SyntaxFreeLine matcher."""
def testBasicMatch(self):
node = create_node.Subscript('a', 1)
string = 'a[1]'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual('a[1]', matcher.GetSource())
def testAllPartsMatch(self):
node = create_node.Subscript('a', 1, 2, 3)
string = 'a[1:2:3]'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual('a[1:2:3]', matcher.GetSource())
def testSeparatedWithStrings(self):
node = create_node.Subscript('a', 1, 2, 3)
string = 'a [ 1 : 2 : 3 ]'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual('a [ 1 : 2 : 3 ]', matcher.GetSource())
class SyntaxFreeLineMatcherTest(unittest.TestCase):
"""Tests for the SyntaxFreeLine matcher."""
def testBasicMatch(self):
node = create_node.SyntaxFreeLine()
string = '\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual('\n', matcher.GetSource())
def testVeryShortMatch(self):
node = create_node.SyntaxFreeLine(
comment='', col_offset=0, comment_indent=0)
string = '#\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual('#\n', matcher.GetSource())
def testCommentMatch(self):
node = create_node.SyntaxFreeLine(
comment='comment', col_offset=0, comment_indent=0)
string = '#comment\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual('#comment\n', matcher.GetSource())
def testIndentedCommentMatch(self):
node = create_node.SyntaxFreeLine(
comment='comment', col_offset=0, comment_indent=2)
string = '# comment\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual('# comment\n', matcher.GetSource())
def testOffsetCommentMatch(self):
node = create_node.SyntaxFreeLine(
comment='comment', col_offset=2, comment_indent=0)
string = ' #comment\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(' #comment\n', matcher.GetSource())
def testChangeComment(self):
node = create_node.SyntaxFreeLine(
comment='comment', col_offset=1, comment_indent=0)
string = ' #comment\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
node.col_offset = 0
node.comment_indent = 1
node.comment = 'hello'
self.assertEqual('# hello\n', matcher.GetSource())
def testNotCommentFails(self):
node = create_node.SyntaxFreeLine(
comment='comment', col_offset=1, comment_indent=0)
string = ' comment\n'
matcher = source_match.GetMatcher(node)
with self.assertRaises(source_match.BadlySpecifiedTemplateError):
matcher.Match(string)
class TryExceptMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.TryExcept(
[create_node.Expr(create_node.Name('a'))],
[create_node.ExceptHandler()])
string = """try:
a
except:
pass
"""
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testMatchMultipleExceptHandlers(self):
node = create_node.TryExcept(
[create_node.Expr(create_node.Name('a'))],
[create_node.ExceptHandler('TestA'),
create_node.ExceptHandler('TestB')])
string = """try:
a
except TestA:
pass
except TestB:
pass
"""
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testMatchExceptAndOrElse(self):
node = create_node.TryExcept(
[create_node.Expr(create_node.Name('a'))],
[create_node.ExceptHandler()],
orelse=[create_node.Pass()])
string = """try:
a
except:
pass
else:
pass
"""
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testMatchWithEmptyLine(self):
node = create_node.TryExcept(
[create_node.Expr(create_node.Name('a'))],
[create_node.ExceptHandler()])
string = """try:
a
except:
pass
"""
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class TryFinallyMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.TryFinally(
[create_node.Expr(create_node.Name('a'))],
[create_node.Expr(create_node.Name('c'))])
string = """try:
a
finally:
c
"""
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testBasicMatchWithExcept(self):
node = create_node.TryFinally(
[create_node.TryExcept(
[create_node.Expr(create_node.Name('a'))],
[create_node.ExceptHandler()])],
[create_node.Expr(create_node.Name('c'))])
string = """try:
a
except:
pass
finally:
c
"""
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testBasicMatchWithBlankLines(self):
node = create_node.TryFinally(
[create_node.Expr(create_node.Name('a'))],
[create_node.Expr(create_node.Name('c'))])
string = """try:
a
finally:
c
"""
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class UnaryOpMatcherTest(unittest.TestCase):
def testUAddUnaryOp(self):
node = create_node.UnaryOp(
create_node.UAdd(),
create_node.Name('a'))
string = '+a'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testUSubUnaryOp(self):
node = create_node.UnaryOp(
create_node.USub(),
create_node.Name('a'))
string = '-a'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testNotUnaryOp(self):
node = create_node.UnaryOp(
create_node.Not(),
create_node.Name('a'))
string = 'not a'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testInvertUnaryOp(self):
node = create_node.UnaryOp(
create_node.Invert(),
create_node.Name('a'))
string = '~a'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class WithMatcherTest(unittest.TestCase):
def testBasicWith(self):
node = create_node.With(
create_node.Name('a'))
string = 'with a:\n pass\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testBasicWithAs(self):
node = create_node.With(
create_node.Name('a'), as_part=create_node.Name('b'))
string = 'with a as b:\n pass\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testWithAsTuple(self):
node = create_node.With(
create_node.Name('a'),
as_part=create_node.Tuple(create_node.Name('b'),
create_node.Name('c')))
string = 'with a as (b, c):\n pass\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testChangeWithAsTuple(self):
node = create_node.With(
create_node.Name('a'),
as_part=create_node.Tuple(create_node.Name('b'),
create_node.Name('c')))
string = 'with a as (b, c):\n pass\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
node.context_expr = create_node.Name('d')
node.optional_vars.elts[0] = create_node.Name('e')
node.optional_vars.elts[1] = create_node.Name('f')
self.assertEqual('with d as (e, f):\n pass\n', matcher.GetSource())
def testCompoundWith(self):
node = create_node.With(
create_node.Name('a'),
as_part=create_node.Name('c'),
body=[
create_node.With(
create_node.Name('b'),
as_part=create_node.Name('d')
)]
)
string = """with a as c, b as d:
pass
"""
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
# TODO: Renabled this after adding indent information to matchers
@unittest.expectedFailure
def testCompoundWithReplacements(self):
node = create_node.With(
create_node.Name('a'),
as_part=create_node.Name('c'),
body=[
create_node.With(
create_node.Name('b'),
as_part=create_node.Name('d')
)]
)
module_node = create_node.Module(node)
string = 'with a as c, b as d:\n pass\n'
node.matcher = source_match.GetMatcher(node)
node.matcher.Match(string)
node.body[0] = create_node.With(
create_node.Name('e'),
as_part=create_node.Name('f')
)
self.assertEqual('with a as c, e as f:\n pass\n',
node.matcher.GetSource())
if __name__ == '__main__':
unittest.main()
| python |
from django.contrib import admin
from .models import ChatUser
admin.site.register(ChatUser)
| python |
from config import *
from dbMgr import *
@app.before_request
def clear_trailing():
rp = request.path
if rp != '/' and rp.endswith('/'):
return redirect(rp[:-1])
@app.route('/test')
def default():
return render_template('login.html')
@app.before_request
def before():
logging.info("IP address: {}".format(request.remote_addr))
#logging.info("Received request with header: {}".format(request.headers))
pass
@app.route('/validate', methods=['GET', 'POST'])
def authorizeFacebookUser():
if request.method == 'POST':
# Get command pass from login page and verify
#print(request.form)
logging.info('Input received: {}'.format(request.form))
if request.form['pw'] == curationpass:
return render_template('login_fb.html', getcommanpass=False)
else:
return render_template('login_fb.html', getcommanpass=True, rsp="Invalid passcode! Please try again.")
return redirect(url_for('index'))
@app.route('/curation')
def show_curation():
if current_user.is_authenticated:
return render_template('curation.html')
else:
return redirect(url_for('index'))
@app.route('/datatable')
def datatable():
if current_user.is_authenticated:
return render_template('datatable.html', server=server[:-1], keys=sorted(museums.keys()), data=returnCurationResults())
else:
return redirect(url_for('index'))
@app.route('/spec')
def show_specs():
return render_template('spec.html', server=server[7:-1])
@app.route('/profile')
def show_user_profile():
if current_user.is_authenticated:
# Get Keys
keys = [t for t in sorted(museums.keys()) if t != "ulan" ]
# Get User stats
# getStats about all the questions answered by this user
u = dbC[dname]["curator"].find_one({'uid':current_user.email}, projection={'_id':False})
answers = dbC[dname]["answer"].find({'author':current_user.email})
# Initialize per museum stats
stats = {}
for tag in list(museums.keys()):
stats[tag] = {"matched":0,"unmatched":0,"no-conclusion":0}
for a in answers:
# find question and check its current status
q = dbC[dname]["question"].find_one({'_id':ObjectId(a['qid'])})
for tag in q['tags']:
tag = dbC[dname]["tag"].find_one({'_id':ObjectId(tag)})['tagname']
if q['status'] == statuscodes["Agreement"]:
stats[tag]["matched"] += 1
elif q['status'] == statuscodes["Disagreement"]:
stats[tag]["unmatched"] += 1
elif q['status'] == statuscodes["Non-conclusive"]:
stats[tag]["no-conclusion"] += 1
elif q['status'] == statuscodes["InProgress"]:
if a["value"] == 3:
stats[tag]["no-conclusion"] += 1
return render_template('profile.html', keys=keys, museums=museums, userStats=stats, server=server[:-1])
return redirect('/login')
@app.route('/results')
def show_results_page():
if current_user.is_authenticated:
keys = [t for t in sorted(museums.keys())]
return render_template('results.html', keys=keys, server=server[:-1])
return redirect('/login')
@app.route('/stats', methods=['GET'])
def get_museum_stats():
tag = request.args['tag'].lower()
#print("Received stats request for tag : "+tag)
logging.info("Received stats request for tag : {}".format(tag))
if current_user.is_authenticated:
return jsonify(museums[tag])
return redirect('/login')
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.route('/done')
def done():
if current_user.is_authenticated:
return render_template('done.html')
return redirect('/login')
@app.route('/about')
def about():
return render_template('about.html')
| python |
import doctest
import pytest
if __name__ == "__main__":
doctest.testmod()
pytest.main()
| python |
"Iterative Solvers for Sparse Linear Systems"
#from info import __doc__
from .iterative import *
from .minres import minres
from .lgmres import lgmres
from .lsqr import lsqr
from .lsmr import lsmr
from ._gcrotmk import gcrotmk
from .tfqmr import tfqmr
__all__ = [
'bicg', 'bicgstab', 'cg', 'cgs', 'gcrotmk', 'gmres',
'lgmres', 'lsmr', 'lsqr',
'minres', 'qmr', 'tfqmr'
]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| python |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This test module contains the integration test for the gym skill."""
import os
import pytest
import shutil
import signal
import subprocess
import sys
import tempfile
import time
from pathlib import Path
import yaml
from aea.configurations.base import SkillConfig
from ...common.click_testing import CliRunner
from aea.cli import cli
from tests.conftest import CLI_LOG_OPTION
class TestGymSkill:
"""Test that gym skill works."""
@classmethod
def setup_class(cls):
"""Set up the test class."""
cls.runner = CliRunner()
cls.agent_name = "my_gym_agent"
cls.cwd = os.getcwd()
cls.t = tempfile.mkdtemp()
os.chdir(cls.t)
def test_gym(self, pytestconfig):
"""Run the gym skill sequence."""
if pytestconfig.getoption("ci"):
pytest.skip("Skipping the test since it doesn't work in CI.")
# add packages folder
packages_src = os.path.join(self.cwd, 'packages')
packages_dst = os.path.join(os.getcwd(), 'packages')
shutil.copytree(packages_src, packages_dst)
# create agent
result = self.runner.invoke(cli, [*CLI_LOG_OPTION, "create", self.agent_name], standalone_mode=False)
assert result.exit_code == 0
agent_dir_path = os.path.join(self.t, self.agent_name)
os.chdir(agent_dir_path)
# add packages and install dependencies
result = self.runner.invoke(cli, [*CLI_LOG_OPTION, "add", "skill", "gym"], standalone_mode=False)
assert result.exit_code == 0
result = self.runner.invoke(cli, [*CLI_LOG_OPTION, "add", "connection", "gym"], standalone_mode=False)
assert result.exit_code == 0
result = self.runner.invoke(cli, [*CLI_LOG_OPTION, "install"], standalone_mode=False)
assert result.exit_code == 0
# add gyms folder from examples
gyms_src = os.path.join(self.cwd, 'examples', 'gym_ex', 'gyms')
gyms_dst = os.path.join(self.t, self.agent_name, 'gyms')
shutil.copytree(gyms_src, gyms_dst)
# change config file of gym connection
file_src = os.path.join(self.cwd, 'tests', 'test_packages', 'test_skills', 'data', 'connection.yaml')
file_dst = os.path.join(self.t, self.agent_name, 'connections', 'gym', 'connection.yaml')
shutil.copyfile(file_src, file_dst)
# change number of training steps
skill_config_path = Path(self.t, self.agent_name, "skills", "gym", "skill.yaml")
skill_config = SkillConfig.from_json(yaml.safe_load(open(skill_config_path)))
skill_config.tasks.read("GymTask").args["nb_steps"] = 100
yaml.safe_dump(skill_config.json, open(skill_config_path, "w"))
process = subprocess.Popen([
sys.executable,
'-m',
'aea.cli',
"run",
"--connections",
"gym"
],
stdout=subprocess.PIPE,
env=os.environ.copy())
# check the gym run ends
time.sleep(10.0)
process.send_signal(signal.SIGINT)
process.wait(timeout=5)
assert process.returncode == 0
poll = process.poll()
if poll is None:
process.terminate()
process.wait(2)
os.chdir(self.t)
self.result = self.runner.invoke(cli, [*CLI_LOG_OPTION, "delete", self.agent_name], standalone_mode=False)
@classmethod
def teardown_class(cls):
"""Teardowm the test."""
os.chdir(cls.cwd)
try:
shutil.rmtree(cls.t)
except (OSError, IOError):
pass
| python |
"""Tests for reloading generated pyi."""
from pytype import utils
from pytype.pytd import pytd
from pytype.tests import test_inference
class ReingestTest(test_inference.InferenceTest):
"""Tests for reloading the pyi we generate."""
def testContainer(self):
ty = self.Infer("""
class Container:
def Add(self):
pass
class A(Container):
pass
""")
with utils.Tempdir() as d:
d.create_file("foo.pyi", pytd.Print(ty))
self.assertNoErrors("""
# u.py
from foo import A
A().Add()
""", pythonpath=[d.path])
def testUnion(self):
ty = self.Infer("""
class Union(object):
pass
x = {"Union": Union}
""")
with utils.Tempdir() as d:
d.create_file("foo.pyi", pytd.Print(ty))
self.assertNoErrors("""
from foo import Union
""", pythonpath=[d.path])
if __name__ == "__main__":
test_inference.main()
| python |
import time
import numpy as np
from pycqed.analysis import analysis_toolbox as a_tools
import pycqed.analysis_v2.base_analysis as ba
# import dataprep for tomography module
# import tomography module
# using the data prep module of analysis V2
# from pycqed.analysis_v2 import tomography_dataprep as dataprep
from pycqed.analysis import measurement_analysis as ma
try:
import qutip as qt
except ImportError as e:
pass
# logging.warning('Could not import qutip, tomo code will not work')
def reshape_block(shots_data, segments_per_block=16, block_size=4092, mode='truncate'):
"""
inputs: shots_data 1D array of dimension N
organizes data in blocks of dimension block_size.
num of blocks is N/block_size
"""
N = len(shots_data)
# Data dimension needs to be an integer multiple of block_size
assert(N%block_size==0)
num_blocks = N//block_size
full_segments = block_size//segments_per_block
orfan_segments = block_size % segments_per_block
missing_segments = segments_per_block - orfan_segments
# print(N,num_blocks,full_segments,orfan_segments,missing_segments)
reshaped_data = shots_data.reshape((num_blocks,block_size))
if mode.lower()=='truncate':
truncate_idx = full_segments*segments_per_block
return reshaped_data[:,:truncate_idx]
elif mode.lower()=='padd':
padd_dim = (full_segments+1)*segments_per_block
return_block = np.nan*np.ones((num_blocks,padd_dim))
return_block[:,:block_size] = reshaped_data
return return_block
else:
raise ValueError('Mode not understood. Needs to be truncate or padd')
def all_repetitions(shots_data,segments_per_block=16):
flat_dim = shots_data.shape[0]*shots_data.shape[1]
# Data dimension needs to divide the segments_per_block
assert(flat_dim%segments_per_block==0)
num_blocks = flat_dim // segments_per_block
block_data = shots_data.reshape((num_blocks,segments_per_block))
return block_data
def get_segments_average(shots_data, segments_per_block=16, block_size=4092, mode='truncate', average=True):
reshaped_data = reshape_block(shots_data=shots_data,
segments_per_block=segments_per_block,
block_size=block_size,
mode=mode)
all_reps = all_repetitions(shots_data=reshaped_data,
segments_per_block=segments_per_block)
if average:
return np.mean(all_reps,axis=0)
else:
return all_reps
class ExpectationValueCalculation:
def __init__(self, auto=True, label='', timestamp=None,
fig_format='png',
q0_label='q0',
q1_label='q1', close_fig=True, **kw):
self.label = label
self.timestamp = timestamp
self.fig_format = fig_format
# q0 == D2
self.q0_label = q0_label
# q1 == A
self.q1_label = q1_label
self.n_states = 2 ** 2
self.ma_obj = ma.MeasurementAnalysis(auto=False, label=label,
timestamp=timestamp)
self.ma_obj.get_naming_and_values()
# self.get_naming_and_values()
# hard coded number of segments for a 2 qubit state tomography
# constraint imposed by UHFLI
self.nr_segments = 16
# self.exp_name = os.path.split(self.folder)[-1][7:]
avg_h1 = self.ma_obj.measured_values[0]
avg_h2 = self.ma_obj.measured_values[1]
avg_h12 = self.ma_obj.measured_values[2]
# Binning all the points required for the tomo
h1_00 = np.mean(avg_h1[8:10])
h1_01 = np.mean(avg_h1[10:12])
h1_10 = np.mean(avg_h1[12:14])
h1_11 = np.mean(avg_h1[14:])
h2_00 = np.mean(avg_h2[8:10])
h2_01 = np.mean(avg_h2[10:12])
h2_10 = np.mean(avg_h2[12:14])
h2_11 = np.mean(avg_h2[14:])
h12_00 = np.mean(avg_h12[8:10])
h12_01 = np.mean(avg_h12[10:12])
h12_10 = np.mean(avg_h12[12:14])
h12_11 = np.mean(avg_h12[14:])
self.measurements_tomo = (
np.array([avg_h1[0:8], avg_h2[0:8],
avg_h12[0:8]])).flatten()
# print(self.measurements_tomo)
# print(len(self.measurements_tomo))
# 108 x 1
# get the calibration points by averaging over the five measurements
# taken knowing the initial state we put in
self.measurements_cal = np.array(
[h1_00, h1_01, h1_10, h1_11,
h2_00, h2_01, h2_10, h2_11,
h12_00, h12_01, h12_10, h12_11])
# print(len(self.measurements_cal))
# print(self.measurements_cal)
def _calibrate_betas(self):
"""
calculates betas from calibration points for the initial measurement
operator
Betas are ordered by B0 -> II B1 -> IZ etc(binary counting)
<0|Z|0> = 1, <1|Z|1> = -1
Keyword arguments:
measurements_cal --- array(2 ** n_qubits) should be ordered
correctly (00, 01, 10, 11) for 2 qubits
"""
cal_matrix = np.zeros((self.n_states, self.n_states))
# get the coefficient matrix for the betas
for i in range(self.n_states):
for j in range(self.n_states):
# perform bitwise AND and count the resulting 1s
cal_matrix[i, j] = (-1)**(bin((i & j)).count("1"))
# invert solve the simple system of equations
# print(cal_matrix)
# print(np.linalg.inv(cal_matrix))
betas = np.zeros(12)
# print(self.measurements_cal[0:4])
betas[0:4] = np.dot(np.linalg.inv(cal_matrix), self.measurements_cal[0:4])
# print(cal_matrix)
# print(np.linalg.inv(cal_matrix))
# print(self.measurements_cal[0:4])
# print(betas[0:4])
betas[4:8] = np.dot(np.linalg.inv(cal_matrix), self.measurements_cal[4:8])
# print(betas[4:8])
betas[8:] = np.dot(np.linalg.inv(cal_matrix), self.measurements_cal[8:12])
# print(betas[8:])
return betas
def expectation_value_calculation_IdenZ(self):
betas = self._calibrate_betas()
#inverting the unprimed beta matrix
#up is unprimed
self.betas = betas
# print(self.betas[0:4], self.betas[4:8], self.betas[8:])
beta_0_up =self.betas[0]
beta_1_up =self.betas[1]
beta_2_up =self.betas[2]
beta_3_up =self.betas[3]
beta_matrix_up = np.array([[beta_0_up,beta_1_up,beta_2_up,beta_3_up],
[beta_0_up,-1*beta_1_up,beta_2_up,-1*beta_3_up],
[beta_0_up,beta_1_up,-1*beta_2_up,-1*beta_3_up],
[beta_0_up,-1*beta_1_up,-1*beta_2_up,beta_3_up]])
#assuming 0:4 are
# expect_value_IdenZ_up = np.dot(np.linalg.inv(beta_matrix_up), self.measurements_tomo[1:4])
expect_value_IdenZ_up = np.dot(np.linalg.inv(beta_matrix_up), self.measurements_tomo[0:4])
#inverting the primed beta matrix
#p is primed
beta_0_p =self.betas[4]
beta_1_p =self.betas[5]
beta_2_p =self.betas[6]
beta_3_p =self.betas[7]
beta_matrix_p = np.array([[beta_0_p,beta_1_p,beta_2_p,beta_3_p],
[beta_0_p,-1*beta_1_p,beta_2_p,-1*beta_3_p],
[beta_0_p,beta_1_p,-1*beta_2_p,-1*beta_3_p],
[beta_0_p,-1*beta_1_p,-1*beta_2_p,beta_3_p]])
# beta_matrix_p = np.array([[-1*beta_1_p,beta_2_p,-1*beta_3_p],
# [beta_1_p,-1*beta_2_p,-1*beta_3_p],
# [-1*beta_1_p,-1*beta_2_p,beta_3_p]])
#assuming 0:4 are
expect_value_IdenZ_p = np.dot(np.linalg.inv(beta_matrix_p), self.measurements_tomo[8:12])
# expect_value_IdenZ_p = np.dot(np.linalg.inv(beta_matrix_p), self.measurements_tomo[1:4])
#inverting the unprimed beta matrix
#up is unprimed
beta_0_pp =self.betas[8]
beta_1_pp =self.betas[9]
beta_2_pp =self.betas[10]
beta_3_pp =self.betas[11]
beta_matrix_pp = np.array([[beta_0_pp,beta_1_pp,beta_2_pp,beta_3_pp],
[beta_0_pp,-1*beta_1_pp,beta_2_pp,-1*beta_3_pp],
[beta_0_pp,beta_1_pp,-1*beta_2_pp,-1*beta_3_pp],
[beta_0_pp,-1*beta_1_pp,-1*beta_2_pp,beta_3_pp]])
# beta_matrix_pp = np.array([[-1*beta_1_pp,beta_2_pp,-1*beta_3_pp],
# [beta_1_pp,-1*beta_2_pp,-1*beta_3_pp],
# [-1*beta_1_pp,-1*beta_2_pp,beta_3_pp]])
#assuming 0:4 are
expect_value_IdenZ_pp = np.dot(np.linalg.inv(beta_matrix_pp), self.measurements_tomo[16:20])
# expect_value_IdenZ_pp = np.dot(np.linalg.inv(beta_matrix_p), self.measurements_tomo[1:4])
#take the mean of calculated expectation values of II, IZ, ZI, ZZ
#for three different beta vectors
expect_value_IdenZ = np.mean( np.array([expect_value_IdenZ_up,
expect_value_IdenZ_p,
expect_value_IdenZ_pp]),
axis=0 )
print(expect_value_IdenZ_up)
print(expect_value_IdenZ_p)
print(expect_value_IdenZ_pp)
return expect_value_IdenZ
def expectation_value_calculation_XX(self):
expect_value_XX_up = ((self.measurements_tomo[4] + self.measurements_tomo[5]) -2*self.betas[0])/2*self.betas[3]
expect_value_XX_p = ((self.measurements_tomo[12] + self.measurements_tomo[13])-2*self.betas[4])/2*self.betas[7]
expect_value_XX_pp = ((self.measurements_tomo[20] + self.measurements_tomo[21]) - 2*self.betas[8])/2*self.betas[11]
expectation_value_XX = (expect_value_XX_up + expect_value_XX_p + expect_value_XX_pp)/3
# print(expect_value_XX_up, expect_value_XX_p, expect_value_XX_pp)
return expectation_value_XX
def expectation_value_calculation_YY(self):
expect_value_YY_up = ((self.measurements_tomo[6] + self.measurements_tomo[7]) -2*self.betas[0])/2*self.betas[3]
expect_value_YY_p = ((self.measurements_tomo[14] + self.measurements_tomo[15])-2*self.betas[4])/2*self.betas[7]
expect_value_YY_pp = ((self.measurements_tomo[22] + self.measurements_tomo[23]) - 2*self.betas[8])/2*self.betas[11]
# print(expect_value_YY_up, expect_value_YY_p, expect_value_YY_pp)
expectation_value_YY = (expect_value_YY_up + expect_value_YY_p + expect_value_YY_pp)/3
return expectation_value_YY
def execute_expectation_value_calculation(self):
expect_values = np.zeros(6)
expect_values[0:4] = self.expectation_value_calculation_IdenZ()
# print(self.expectation_value_calculation_IdenZ())
expect_values[4] = self.expectation_value_calculation_XX()
# print(self.expectation_value_calculation_XX())
expect_values[5] = self.expectation_value_calculation_YY()
# print(self.expectation_value_calculation_YY())
return expect_values, self.betas
class ExpectationValueCalculation2:
def __init__(self, auto=True, label='', timestamp=None,
fig_format='png',
q0_label='q0',
q1_label='q1', close_fig=True, **kw):
self.label = label
self.timestamp = timestamp
self.fig_format = fig_format
# q0 == D2
self.q0_label = q0_label
# q1 == A
self.q1_label = q1_label
self.n_states = 2 ** 2
self.ma_obj = ma.MeasurementAnalysis(auto=False, label=label,
timestamp=timestamp)
self.ma_obj.get_naming_and_values()
# self.get_naming_and_values()
# hard coded number of segments for a 2 qubit state tomography
# constraint imposed by UHFLI
self.nr_segments = 16
# self.exp_name = os.path.split(self.folder)[-1][7:]
avg_h1 = self.ma_obj.measured_values[0]
avg_h2 = self.ma_obj.measured_values[1]
avg_h12 = self.ma_obj.measured_values[2]
h1_00 = np.mean(avg_h1[8:10])
h1_01 = np.mean(avg_h1[10:12])
h1_10 = np.mean(avg_h1[12:14])
h1_11 = np.mean(avg_h1[14:])
h2_00 = np.mean(avg_h2[8:10])
h2_01 = np.mean(avg_h2[10:12])
h2_10 = np.mean(avg_h2[12:14])
h2_11 = np.mean(avg_h2[14:])
h12_00 = np.mean(avg_h12[8:10])
h12_01 = np.mean(avg_h12[10:12])
h12_10 = np.mean(avg_h12[12:14])
h12_11 = np.mean(avg_h12[14:])
self.measurements_tomo = (
np.array([avg_h1[0:8], avg_h2[0:8],
avg_h12[0:8]])).flatten()
# print(self.measurements_tomo)
# print(len(self.measurements_tomo))
# 108 x 1
# get the calibration points by averaging over the five measurements
# taken knowing the initial state we put in
self.measurements_cal = np.array(
[h1_00, h1_01, h1_10, h1_11,
h2_00, h2_01, h2_10, h2_11,
h12_00, h12_01, h12_10, h12_11])
def _calibrate_betas(self):
"""
calculates betas from calibration points for the initial measurement
operator
Betas are ordered by B0 -> II B1 -> IZ etc(binary counting)
<0|Z|0> = 1, <1|Z|1> = -1
Keyword arguments:
measurements_cal --- array(2 ** n_qubits) should be ordered
correctly (00, 01, 10, 11) for 2 qubits
"""
cal_matrix = np.zeros((self.n_states, self.n_states))
# get the coefficient matrix for the betas
for i in range(self.n_states):
for j in range(self.n_states):
# perform bitwise AND and count the resulting 1s
cal_matrix[i, j] = (-1)**(bin((i & j)).count("1"))
# invert solve the simple system of equations
# print(cal_matrix)
# print(np.linalg.inv(cal_matrix))
betas = np.zeros(12)
# print(self.measurements_cal[0:4])
betas[0:4] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[0:4])
self.betas_up = betas[0:4]
betas[4:8] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[4:8])
self.betas_p = betas[4:8]
betas[8:] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[8:12])
self.betas_pp = betas[8:]
return betas
def assemble_M_matrix_single_block(self, beta_array):
M_matrix_single_block_row_1 = np.array([beta_array[0], beta_array[1],
beta_array[2], beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_2 = np.array([beta_array[0],
-1*beta_array[1],
beta_array[2],
-1*beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_3 = np.array([beta_array[0],
beta_array[1],
-1*beta_array[2],
-1*beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_4 = np.array([beta_array[0],
-1*beta_array[1],
-1*beta_array[2],
beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_5 = np.array([beta_array[0],
0, 0, 0, -beta_array[1],
-beta_array[2],
beta_array[3], 0, 0, 0])
M_matrix_single_block_row_6 = np.array([beta_array[0], 0, 0, 0,
beta_array[1],
beta_array[2],
beta_array[3],
0, 0, 0])
M_matrix_single_block_row_7 = np.array([beta_array[0], 0, 0,
0, 0, 0, 0, beta_array[1],
beta_array[2],
beta_array[3]])
M_matrix_single_block_row_8 = np.array([beta_array[0], 0, 0, 0, 0,
0, 0, -beta_array[1],
-beta_array[2],
beta_array[3]])
M_matrix_single_block = np.vstack((M_matrix_single_block_row_1,
M_matrix_single_block_row_2,
M_matrix_single_block_row_3,
M_matrix_single_block_row_4,
M_matrix_single_block_row_5,
M_matrix_single_block_row_6,
M_matrix_single_block_row_7,
M_matrix_single_block_row_8))
M_matrix_single_block = M_matrix_single_block.reshape(8, 10)
return M_matrix_single_block
def assemble_M_matrix(self):
Block1 = self.assemble_M_matrix_single_block(self.betas_up)
Block2 = self.assemble_M_matrix_single_block(self.betas_p)
Block3 = self.assemble_M_matrix_single_block(self.betas_pp)
self.M_matrix = np.vstack((Block1, Block2, Block3)).reshape(24, 10)
return self.M_matrix
def invert_M_matrix(self):
self.inverse_matrix = np.linalg.pinv(self.M_matrix)
return self.inverse_matrix
def execute_error_signalling(self, ev):
II = (ev[0] - ev[3])/(1 - ev[3])
IZ = (ev[1] - ev[2])/(1 - ev[3])
ZI = (ev[1] - ev[2])/(1 - ev[3])
ZZ = (ev[3] - ev[0])/(1 - ev[3])
XX = (ev[4] + ev[5])/(1 - ev[3])
YY = (ev[4] + ev[5])/(1 - ev[3])
ev_error_signalling = np.array([II, IZ, ZI, ZZ, XX, YY])
return ev_error_signalling
def execute_expectation_value_calculation(self):
# assemble matrix that connects RO with terms
self._calibrate_betas()
self.assemble_M_matrix()
self.invert_M_matrix()
# use it to get terms back from RO
rescaled_measurements_tomo = self.measurements_tomo
self.expect_values = np.dot(self.inverse_matrix,
rescaled_measurements_tomo)
expect_values_VQE = np.array([self.expect_values[0],
self.expect_values[1],
self.expect_values[2],
self.expect_values[3],
self.expect_values[6],
self.expect_values[9]])
return expect_values_VQE
def execute_expectation_value_calculation_traceone(self):
# assemble matrix that connects RO with terms
self._calibrate_betas()
self.assemble_M_matrix()
self.inverse_matrix = np.linalg.pinv(self.M_matrix[:, 1:])
# use it to get terms back from RO
rescaled_measurements_tomo = self.measurements_tomo
self.expect_values = np.dot(self.inverse_matrix,
rescaled_measurements_tomo)
expect_values_VQE = np.array([1,
self.expect_values[0],
self.expect_values[1],
self.expect_values[2],
self.expect_values[5],
self.expect_values[8]])
return expect_values_VQE
def execute_expectation_value_calculation_T1signaling(self):
# assemble matrix that connects RO with terms
self._calibrate_betas()
self.assemble_M_matrix()
self.inverse_matrix = np.linalg.pinv(self.M_matrix[:, 1:])
# use it to get terms back from RO
rescaled_measurements_tomo = self.measurements_tomo
self.expect_values = np.dot(self.inverse_matrix,
rescaled_measurements_tomo)
expect_values_VQE = np.array([1,
self.expect_values[0],
self.expect_values[1],
self.expect_values[2],
self.expect_values[5],
self.expect_values[8]])
expect_values_VQE = self.execute_error_signalling(expect_values_VQE)
return expect_values_VQE
class ExpectationValueCalculation3_shots:
def __init__(self, auto=True, label='', timestamp=None,
fig_format='png',
q0_label='q0',
q1_label='q1', close_fig=True, **kw):
self.label = label
self.timestamp = timestamp
self.fig_format = fig_format
# q0 == D2
self.q0_label = q0_label
# q1 == A
self.q1_label = q1_label
self.n_states = 2 ** 2
self.ma_obj = ma.MeasurementAnalysis(auto=False, label=label,
timestamp=timestamp)
self.ma_obj.get_naming_and_values()
# self.get_naming_and_values()
# hard coded number of segments for a 2 qubit state tomography
# constraint imposed by UHFLI
self.nr_segments = 16
shots_I_q0 = get_segments_average(self.ma_obj.measured_values[0],
segments_per_block=16,
block_size=4094,
average=False)
shots_I_q1 = get_segments_average(self.ma_obj.measured_values[1],
segments_per_block=16,
block_size=4094,
average=False)
shots_I_q0q1 = np.multiply(shots_I_q0/(np.max(shots_I_q0)-np.min(shots_I_q0)),shots_I_q1/(np.max(shots_I_q1)-np.min(shots_I_q1)))
avg_h1 = np.mean(shots_I_q0,axis=0)
avg_h2 = np.mean(shots_I_q1,axis=0)
avg_h12 = np.mean(shots_I_q0q1,axis=0)
h1_00 = np.mean(avg_h1[8:10])
h1_01 = np.mean(avg_h1[10:12])
h1_10 = np.mean(avg_h1[12:14])
h1_11 = np.mean(avg_h1[14:])
h2_00 = np.mean(avg_h2[8:10])
h2_01 = np.mean(avg_h2[10:12])
h2_10 = np.mean(avg_h2[12:14])
h2_11 = np.mean(avg_h2[14:])
h12_00 = np.mean(avg_h12[8:10])
h12_01 = np.mean(avg_h12[10:12])
h12_10 = np.mean(avg_h12[12:14])
h12_11 = np.mean(avg_h12[14:])
mean_h1 = (h1_00+h1_10+h1_01+h1_11)/4
mean_h2 = (h2_00+h2_01+h2_10+h2_11)/4
mean_h12 = (h12_00+h12_11+h12_01+h12_10)/4
#subtract beta 0 from all measurements
#rescale them
avg_h1 -= mean_h1
avg_h2 -= mean_h2
avg_h12 -= mean_h12
scale_h1 = (h1_00+h1_10-h1_01-h1_11)/4
scale_h2 = (h2_00+h2_01-h2_10-h2_11)/4
scale_h12 = (h12_00+h12_11-h12_01-h12_10)/4
avg_h1 = (avg_h1)/scale_h1
avg_h2 = (avg_h2)/scale_h2
avg_h12 = (avg_h12)/scale_h12
#The averages have been redefined so redefine the cal terms
h1_00 = np.mean(avg_h1[8:10])
h1_01 = np.mean(avg_h1[10:12])
h1_10 = np.mean(avg_h1[12:14])
h1_11 = np.mean(avg_h1[14:])
h2_00 = np.mean(avg_h2[8:10])
h2_01 = np.mean(avg_h2[10:12])
h2_10 = np.mean(avg_h2[12:14])
h2_11 = np.mean(avg_h2[14:])
h12_00 = np.mean(avg_h12[8:10])
h12_01 = np.mean(avg_h12[10:12])
h12_10 = np.mean(avg_h12[12:14])
h12_11 = np.mean(avg_h12[14:])
self.measurements_tomo = (
np.array([avg_h1[0:8], avg_h2[0:8],
avg_h12[0:8]])).flatten()
# print(self.measurements_tomo)
# print(len(self.measurements_tomo))
# 108 x 1
# get the calibration points by averaging over the five measurements
# taken knowing the initial state we put in
self.measurements_cal = np.array(
[h1_00, h1_01, h1_10, h1_11,
h2_00, h2_01, h2_10, h2_11,
h12_00, h12_01, h12_10, h12_11])
def _calibrate_betas(self):
"""
calculates betas from calibration points for the initial measurement
operator
Betas are ordered by B0 -> II B1 -> IZ etc(binary counting)
<0|Z|0> = 1, <1|Z|1> = -1
Keyword arguments:
measurements_cal --- array(2 ** n_qubits) should be ordered
correctly (00, 01, 10, 11) for 2 qubits
"""
cal_matrix = np.zeros((self.n_states, self.n_states))
# get the coefficient matrix for the betas
for i in range(self.n_states):
for j in range(self.n_states):
# perform bitwise AND and count the resulting 1s
cal_matrix[i, j] = (-1)**(bin((i & j)).count("1"))
# invert solve the simple system of equations
# print(cal_matrix)
# print(np.linalg.inv(cal_matrix))
betas = np.zeros(12)
betas[0:4] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[0:4])
self.betas_up = betas[0:4]
betas[4:8] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[4:8])
self.betas_p = betas[4:8]
betas[8:] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[8:12])
self.betas_pp = betas[8:]
return betas
def assemble_M_matrix_single_block(self, beta_array):
# II IZ ZI ZZ IX XI XX IY YI YY
M_matrix_single_block_row_1 = np.array([beta_array[0], beta_array[1],
beta_array[2], beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_2 = np.array([beta_array[0],
-1*beta_array[1],
beta_array[2],
-1*beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_3 = np.array([beta_array[0],
beta_array[1],
-1*beta_array[2],
-1*beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_4 = np.array([beta_array[0],
-1*beta_array[1],
-1*beta_array[2],
beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_5 = np.array([beta_array[0], # 36
0, 0, 0, -1*beta_array[1],
-1*beta_array[2],
beta_array[3], 0, 0, 0])
M_matrix_single_block_row_6 = np.array([beta_array[0], 0, 0, 0, # 29
beta_array[1],
beta_array[2],
beta_array[3],
0, 0, 0])
M_matrix_single_block_row_7 = np.array([beta_array[0], 0, 0,
0, 0, 0, 0, beta_array[1],
beta_array[2],
beta_array[3]])
M_matrix_single_block_row_8 = np.array([beta_array[0], 0, 0, 0, 0,
0, 0, -1*beta_array[1],
-1*beta_array[2],
beta_array[3]])
M_matrix_single_block = np.vstack((M_matrix_single_block_row_1,
M_matrix_single_block_row_2,
M_matrix_single_block_row_3,
M_matrix_single_block_row_4,
M_matrix_single_block_row_5,
M_matrix_single_block_row_6,
M_matrix_single_block_row_7,
M_matrix_single_block_row_8))
M_matrix_single_block = M_matrix_single_block.reshape(8, 10)
return M_matrix_single_block
def assemble_M_matrix(self):
Block1 = self.assemble_M_matrix_single_block(self.betas_up)
Block2 = self.assemble_M_matrix_single_block(self.betas_p)
Block3 = self.assemble_M_matrix_single_block(self.betas_pp)
self.M_matrix = np.vstack((Block1, Block2, Block3)).reshape(24, 10)
return self.M_matrix
def invert_M_matrix(self):
self.inverse_matrix = np.linalg.pinv(self.M_matrix)
return self.inverse_matrix
def execute_error_signalling(self, ev):
II = (ev[0] - ev[3])/(1 - ev[3])
IZ = (ev[1] - ev[2])/(1 - ev[3])
ZI = (ev[2] - ev[1])/(1 - ev[3])
ZZ = (ev[3] - ev[0])/(1 - ev[3])
XX = (ev[4] + ev[5])/(1 - ev[3])
YY = (ev[5] + ev[4])/(1 - ev[3])
ev_error_signalling = np.array([II, IZ, ZI, ZZ, XX, YY])
return ev_error_signalling
def execute_expectation_value_calculation(self):
# assemble matrix that connects RO with terms
self._calibrate_betas()
self.assemble_M_matrix()
self.invert_M_matrix()
# use it to get terms back from RO
rescaled_measurements_tomo = self.measurements_tomo
self.expect_values = np.dot(self.inverse_matrix,
rescaled_measurements_tomo)
print(self.expect_values)
expect_values_VQE = np.array([1,
self.expect_values[1],
self.expect_values[2],
self.expect_values[3],
self.expect_values[6],
self.expect_values[9]])
self.expect_values = expect_values_VQE
return expect_values_VQE
def execute_expectation_value_calculation_traceone(self):
# assemble matrix that connects RO with terms
self._calibrate_betas()
self.assemble_M_matrix()
self.inverse_matrix = np.linalg.pinv(self.M_matrix[:, 1:])
# use it to get terms back from RO
beta_0_vec = np.repeat([self.betas_up[0],
self.betas_p[0],
self.betas_pp[0]], 8)
rescaled_measurements_tomo = self.measurements_tomo - beta_0_vec
self.expect_values = np.dot(self.inverse_matrix,
rescaled_measurements_tomo)
expect_values_VQE = np.array([1,
self.expect_values[0],
self.expect_values[1],
self.expect_values[2],
self.expect_values[5],
self.expect_values[8]])
self.expect_values = expect_values_VQE
print(self.expect_values)
return expect_values_VQE
def execute_expectation_value_calculation_T1signaling(self):
# assemble matrix that connects RO with terms
self._calibrate_betas()
self.assemble_M_matrix()
self.invert_M_matrix()
# use it to get terms back from RO
rescaled_measurements_tomo = self.measurements_tomo
self.expect_values = np.dot(self.inverse_matrix,
rescaled_measurements_tomo)
expect_values_VQE = np.array([self.expect_values[0],
self.expect_values[1],
self.expect_values[2],
self.expect_values[3],
self.expect_values[6],
self.expect_values[9]])
expect_values_VQE = self.execute_error_signalling(expect_values_VQE)
self.expect_values = expect_values_VQE
return expect_values_VQE
class ExpectationValueCalculation2_shots:
def __init__(self, auto=True, label='', timestamp=None,
fig_format='png',
q0_label='q0',
q1_label='q1', close_fig=True, **kw):
self.label = label
self.timestamp = timestamp
self.fig_format = fig_format
# q0 == D2
self.q0_label = q0_label
# q1 == A
self.q1_label = q1_label
self.n_states = 2 ** 2
self.ma_obj = ma.MeasurementAnalysis(auto=False, label=label,
timestamp=timestamp)
self.ma_obj.get_naming_and_values()
# self.get_naming_and_values()
# hard coded number of segments for a 2 qubit state tomography
# constraint imposed by UHFLI
self.nr_segments = 16
# self.exp_name = os.path.split(self.folder)[-1][7:]
shots_I_q0 = get_segments_average(self.ma_obj.measured_values[0],
segments_per_block=16,
block_size=4094,
average=False)
shots_I_q1 = get_segments_average(self.ma_obj.measured_values[1],
segments_per_block=16,
block_size=4094,
average=False)
shots_I_q0q1 = np.multiply(shots_I_q0/(np.max(shots_I_q0)-np.min(shots_I_q0)),shots_I_q1/(np.max(shots_I_q1)-np.min(shots_I_q1)))
avg_h1 = np.mean(shots_I_q0,axis=0)
avg_h2 = np.mean(shots_I_q1,axis=0)
avg_h12 = np.mean(shots_I_q0q1,axis=0)
h1_00 = np.mean(avg_h1[8:10])
h1_01 = np.mean(avg_h1[10:12])
h1_10 = np.mean(avg_h1[12:14])
h1_11 = np.mean(avg_h1[14:])
h2_00 = np.mean(avg_h2[8:10])
h2_01 = np.mean(avg_h2[10:12])
h2_10 = np.mean(avg_h2[12:14])
h2_11 = np.mean(avg_h2[14:])
h12_00 = np.mean(avg_h12[8:10])
h12_01 = np.mean(avg_h12[10:12])
h12_10 = np.mean(avg_h12[12:14])
h12_11 = np.mean(avg_h12[14:])
self.measurements_tomo = (
np.array([avg_h1[0:8], avg_h2[0:8],
avg_h12[0:8]])).flatten()
# print(self.measurements_tomo)
# print(len(self.measurements_tomo))
# 108 x 1
# get the calibration points by averaging over the five measurements
# taken knowing the initial state we put in
self.measurements_cal = np.array(
[h1_00, h1_01, h1_10, h1_11,
h2_00, h2_01, h2_10, h2_11,
h12_00, h12_01, h12_10, h12_11])
def _calibrate_betas(self):
"""
calculates betas from calibration points for the initial measurement
operator
Betas are ordered by B0 -> II B1 -> IZ etc(binary counting)
<0|Z|0> = 1, <1|Z|1> = -1
Keyword arguments:
measurements_cal --- array(2 ** n_qubits) should be ordered
correctly (00, 01, 10, 11) for 2 qubits
"""
cal_matrix = np.zeros((self.n_states, self.n_states))
# get the coefficient matrix for the betas
for i in range(self.n_states):
for j in range(self.n_states):
# perform bitwise AND and count the resulting 1s
cal_matrix[i, j] = (-1)**(bin((i & j)).count("1"))
# invert solve the simple system of equations
# print(cal_matrix)
# print(np.linalg.inv(cal_matrix))
betas = np.zeros(12)
# print(self.measurements_cal[0:4])
betas[0:4] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[0:4])
self.betas_up = betas[0:4]
betas[4:8] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[4:8])
self.betas_p = betas[4:8]
betas[8:] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[8:12])
self.betas_pp = betas[8:]
return betas
def assemble_M_matrix_single_block(self, beta_array):
M_matrix_single_block_row_1 = np.array([beta_array[0], beta_array[1],
beta_array[2], beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_2 = np.array([beta_array[0],
-1*beta_array[1],
beta_array[2],
-1*beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_3 = np.array([beta_array[0],
beta_array[1],
-1*beta_array[2],
-1*beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_4 = np.array([beta_array[0],
-1*beta_array[1],
-1*beta_array[2],
beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_5 = np.array([beta_array[0],
0, 0, 0, -beta_array[1],
-beta_array[2],
beta_array[3], 0, 0, 0])
M_matrix_single_block_row_6 = np.array([beta_array[0], 0, 0, 0,
beta_array[1],
beta_array[2],
beta_array[3],
0, 0, 0])
M_matrix_single_block_row_7 = np.array([beta_array[0], 0, 0,
0, 0, 0, 0, beta_array[1],
beta_array[2],
beta_array[3]])
M_matrix_single_block_row_8 = np.array([beta_array[0], 0, 0, 0, 0,
0, 0, -beta_array[1],
-beta_array[2],
beta_array[3]])
M_matrix_single_block = np.vstack((M_matrix_single_block_row_1,
M_matrix_single_block_row_2,
M_matrix_single_block_row_3,
M_matrix_single_block_row_4,
M_matrix_single_block_row_5,
M_matrix_single_block_row_6,
M_matrix_single_block_row_7,
M_matrix_single_block_row_8))
M_matrix_single_block = M_matrix_single_block.reshape(8, 10)
return M_matrix_single_block
def assemble_M_matrix(self):
Block1 = self.assemble_M_matrix_single_block(self.betas_up)
Block2 = self.assemble_M_matrix_single_block(self.betas_p)
Block3 = self.assemble_M_matrix_single_block(self.betas_pp)
self.M_matrix = np.vstack((Block1, Block2, Block3)).reshape(24, 10)
return self.M_matrix
def invert_M_matrix(self):
self.inverse_matrix = np.linalg.pinv(self.M_matrix)
return self.inverse_matrix
def execute_error_signalling(self, ev):
II = (ev[0] - ev[3])/(1 - ev[3])
IZ = (ev[1] - ev[2])/(1 - ev[3])
ZI = (ev[1] - ev[2])/(1 - ev[3])
ZZ = (ev[3] - ev[0])/(1 - ev[3])
XX = (ev[4] + ev[5])/(1 - ev[3])
YY = (ev[4] + ev[5])/(1 - ev[3])
ev_error_signalling = np.array([II, IZ, ZI, ZZ, XX, YY])
return ev_error_signalling
def execute_expectation_value_calculation(self):
# assemble matrix that connects RO with terms
self._calibrate_betas()
self.assemble_M_matrix()
self.invert_M_matrix()
# use it to get terms back from RO
rescaled_measurements_tomo = self.measurements_tomo
self.expect_values = np.dot(self.inverse_matrix,
rescaled_measurements_tomo)
expect_values_VQE = np.array([self.expect_values[0],
self.expect_values[1],
self.expect_values[2],
self.expect_values[3],
self.expect_values[6],
self.expect_values[9]])
return expect_values_VQE
def execute_expectation_value_calculation_traceone(self):
# assemble matrix that connects RO with terms
self._calibrate_betas()
self.assemble_M_matrix()
self.inverse_matrix = np.linalg.pinv(self.M_matrix[:, 1:])
# use it to get terms back from RO
rescaled_measurements_tomo = self.measurements_tomo
self.expect_values = np.dot(self.inverse_matrix,
rescaled_measurements_tomo)
expect_values_VQE = np.array([1,
self.expect_values[0],
self.expect_values[1],
self.expect_values[2],
self.expect_values[5],
self.expect_values[8]])
return expect_values_VQE
def execute_expectation_value_calculation_T1signaling(self):
# assemble matrix that connects RO with terms
self._calibrate_betas()
self.assemble_M_matrix()
self.inverse_matrix = np.linalg.pinv(self.M_matrix[:, 1:])
# use it to get terms back from RO
rescaled_measurements_tomo = self.measurements_tomo
self.expect_values = np.dot(self.inverse_matrix,
rescaled_measurements_tomo)
expect_values_VQE = np.array([1,
self.expect_values[0],
self.expect_values[1],
self.expect_values[2],
self.expect_values[5],
self.expect_values[8]])
expect_values_VQE = self.execute_error_signalling(expect_values_VQE)
return expect_values_VQE
| python |
from sys import modules
from unittest.mock import MagicMock
mock_sys_info = modules["pitop.common.sys_info"] = MagicMock()
mock_sys_info.is_pi = MagicMock(return_value=False)
mock_curr_session_info = modules["pitop.common.current_session_info"] = MagicMock()
mock_curr_session_info.get_first_display = MagicMock(return_value=None)
modules_to_patch = [
"PIL",
"pyinotify",
"pitop.camera",
"numpy",
"pitop.common",
]
for module in modules_to_patch:
modules[module] = MagicMock()
from os import environ, path
from unittest import TestCase, skip
from PIL import Image
# Avoid getting the mocked modules in other tests
for patched_module in modules_to_patch:
del modules[patched_module]
root = path.dirname(path.dirname(path.abspath(__file__)))
@skip
class OLEDTestCase(TestCase):
@classmethod
def setUpClass(cls):
environ["SDL_VIDEODRIVER"] = "dummy"
@classmethod
def tearDownClass(cls):
del environ["SDL_VIDEODRIVER"]
def setUp(self):
from pitop.miniscreen import Miniscreen
self.miniscreen = Miniscreen()
def tearDown(self):
pass
def get_bitmap_pix(self, file_path):
bmp = Image.open(file_path).convert("1")
bmp = bmp.point(lambda x: 0 if x == 0 else 1, "1")
return self.miniscreen.core.canvas._pil_image_to_pix_arr(bmp)
def compare_arrays(self, func_name, canvas_pix, bmp_pix):
print("CANVAS:")
print(canvas_pix)
print("BITMAP:")
print(bmp_pix)
self.assertEqual(canvas_pix.all(), bmp_pix.all())
def test_image(self):
logo_path = root + "/assets/images/pi-top.png"
img = Image.open(logo_path)
canvas_pix = self.miniscreen.core.canvas.image(
self.miniscreen.core.canvas.top_left(), img
)
bmp_pix = self.get_bitmap_pix(root + "/assets/bitmaps/pi-top.bmp")
self.compare_arrays("image", canvas_pix, bmp_pix)
def test_rectangle(self):
self.miniscreen.reset()
canvas_pix = self.miniscreen.core.canvas.rectangle(self.miniscreen.bounding_box)
bmp_pix = self.get_bitmap_pix(root + "/assets/bitmaps/rectangle.bmp")
self.compare_arrays("rectangle", canvas_pix, bmp_pix)
def test_arc(self):
self.miniscreen.reset()
canvas_pix = self.miniscreen.core.canvas.arc(
self.miniscreen.bounding_box, 0, 180
)
bmp_pix = self.get_bitmap_pix(root + "/assets/bitmaps/arc.bmp")
self.compare_arrays("arc", canvas_pix, bmp_pix)
def test_chord(self):
self.miniscreen.reset()
canvas_pix = self.miniscreen.core.canvas.chord(
self.miniscreen.bounding_box, 0, 180
)
bmp_pix = self.get_bitmap_pix(root + "/assets/bitmaps/chord.bmp")
self.compare_arrays("chord", canvas_pix, bmp_pix)
def test_ellipse(self):
self.miniscreen.reset()
canvas_pix = self.miniscreen.core.canvas.ellipse(self.miniscreen.bounding_box)
bmp_pix = self.get_bitmap_pix(root + "/assets/bitmaps/ellipse.bmp")
self.compare_arrays("ellipse", canvas_pix, bmp_pix)
def test_line(self):
self.miniscreen.reset()
canvas_pix = self.miniscreen.core.canvas.line(self.miniscreen.bounding_box)
bmp_pix = self.get_bitmap_pix(root + "/assets/bitmaps/line.bmp")
self.compare_arrays("line", canvas_pix, bmp_pix)
def test_pieslice(self):
self.miniscreen.reset()
canvas_pix = self.miniscreen.core.canvas.pieslice(
self.miniscreen.bounding_box, 0, 180
)
bmp_pix = self.get_bitmap_pix(root + "/assets/bitmaps/pieslice.bmp")
self.compare_arrays("pieslice", canvas_pix, bmp_pix)
def test_point(self):
self.miniscreen.reset()
canvas_pix = self.miniscreen.core.canvas.point(self.miniscreen.bounding_box)
bmp_pix = self.get_bitmap_pix(root + "/assets/bitmaps/point.bmp")
self.compare_arrays("point", canvas_pix, bmp_pix)
def test_polygon(self):
self.miniscreen.reset()
canvas_pix = self.miniscreen.core.canvas.polygon(self.miniscreen.bounding_box)
bmp_pix = self.get_bitmap_pix(root + "/assets/bitmaps/polygon.bmp")
self.compare_arrays("polygon", canvas_pix, bmp_pix)
def test_text(self):
self.miniscreen.reset()
canvas_pix = self.miniscreen.core.canvas.text(
self.miniscreen.core.canvas.top_left(), "test"
)
bmp_pix = self.get_bitmap_pix(root + "/assets/bitmaps/text.bmp")
self.compare_arrays("text", canvas_pix, bmp_pix)
def test_multiline_text(self):
self.miniscreen.reset()
canvas_pix = self.miniscreen.core.canvas.multiline_text(
self.miniscreen.core.canvas.top_left(), "Hello World!"
)
bmp_pix = self.get_bitmap_pix(root + "/assets/bitmaps/multiline_text.bmp")
self.compare_arrays("multiline_text", canvas_pix, bmp_pix)
def test_max_fps(self):
max_fps = 50
self.miniscreen.reset()
self.miniscreen.fps_regulator.set_max_fps(max_fps)
max_sleep_time = self.miniscreen.fps_regulator.max_sleep_time
self.assertEqual(max_sleep_time, 1 / max_fps)
| python |
import os
import boto3
AWS_ENDPOINT_URL = os.getenv("AWS_ENDPOINT_URL", None)
def handler(event, context):
client = boto3.client("s3", endpoint_url=AWS_ENDPOINT_URL)
client.create_bucket(Bucket="foo")
client.create_bucket(Bucket="bar")
buckets = client.list_buckets()["Buckets"]
l = []
for bucket in buckets:
l.append(bucket["Name"])
return str(l)
| python |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-05-17 15:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0004_auto_20170509_1559'),
('pfb_analysis', '0025_auto_20170511_1244'),
]
operations = [
migrations.AlterField(
model_name='neighborhood',
name='label',
field=models.CharField(help_text='Human-readable label for neighborhood, should not include State', max_length=256),
),
migrations.AlterUniqueTogether(
name='neighborhood',
unique_together=set([('name', 'state_abbrev', 'organization')]),
),
]
| python |
from rest_framework.exceptions import APIException
from rest_framework import status
class InvalidParameterException(APIException):
"""Exception for invalid request parameters."""
status_code = status.HTTP_400_BAD_REQUEST
default_detail = 'Request contained an invalid parameter'
default_code = 'invalid_request'
class UnprocessableEntityException(APIException):
"""https://tools.ietf.org/html/rfc4918"""
status_code = 422
default_detail = 'Request parameter is valid but unable to process due to constraints'
default_code = 'invalid_request'
class ElasticsearchConnectionException(APIException):
"""Exception for invalid request parameters."""
status_code = 500
default_detail = 'Unable to reach the Elasticsearch Cluster'
default_code = 'service_unavailable'
| python |
import arff
import argparse
import json
import logging
import openmlcontrib
import openmldefaults
import os
import sklearnbot
def parse_args():
metadata_file = '/home/janvanrijn/experiments/sklearn-bot/results/results__500__svc__predictive_accuracy.arff'
parser = argparse.ArgumentParser(description='Creates an ARFF file')
parser.add_argument('--output_directory', type=str, help='directory to store output',
default=os.path.expanduser('~') + '/experiments/openml-defaults/generated_data/')
parser.add_argument('--study_id', type=str, default='OpenML100', help='the tag to obtain the tasks from')
parser.add_argument('--metadata_file', type=str, default=metadata_file)
parser.add_argument('--classifier_name', type=str, default='svc', help='scikit-learn flow name')
parser.add_argument('--scoring', type=str, default='predictive_accuracy')
parser.add_argument('--resized_grid_size', type=int, default=8)
parser.add_argument('--random_seed', type=int, default=42)
return parser.parse_args()
def run(args):
root = logging.getLogger()
root.setLevel(logging.INFO)
config_space = sklearnbot.config_spaces.get_config_space(args.classifier_name, args.random_seed)
meta_data = openmldefaults.utils.get_dataset_metadata(args.metadata_file)
if args.scoring not in meta_data['measure']:
raise ValueError('Could not find measure: %s' % args.scoring)
metadata_frame = openmldefaults.utils.metadata_file_to_frame(args.metadata_file, config_space, args.scoring)
df_surrogate = openmldefaults.utils.generate_grid_dataset(metadata_frame,
config_space,
args.resized_grid_size,
args.scoring,
args.random_seed)
# if df_surrogate.shape[1] < num_params + len(study.tasks) / 2:
# raise ValueError('surrogate frame has too few columns. Min: %d Got %d' % (num_params + len(study.tasks) / 2,
# df_surrogate.shape[1]))
os.makedirs(args.output_directory, exist_ok=True)
df_surrogate.reset_index(inplace=True)
arff_object = openmlcontrib.meta.dataframe_to_arff(df_surrogate,
'surrogate_%s' % args.classifier_name,
json.dumps(meta_data))
filename = os.path.join(args.output_directory, 'surrogate__%s__%s__c%d.arff' % (args.classifier_name,
args.scoring,
args.resized_grid_size))
with open(filename, 'w') as fp:
arff.dump(arff_object, fp)
logging.info('Saved to: %s' % filename)
if __name__ == '__main__':
run(parse_args())
| python |
"""
This module executes the string matching between a input sequence T and an
pattern P using a Finite State Machine.
The complexity for building the transition function is O(m^3 x |A|) where A is the
alphabet. Since the string matching function scan the input sequence only once,
the total complexity is O(n + m^3 x |A|)
@author Filippo Squillace
@version 1.0.0
@date 07/06/2012
"""
def string_matching_FSM(T, trans, m):
"""
T: is the input sequence;
trans: is the transition function that define the pattern P we need to look
for;
m: lenght of the pattern
"""
s = 0
for i,c in enumerate(T):
s = trans[s][c]
if s == m:
return i-m+1
return -1
import string as st
def transition_function(P):
"""
The main principle on building the transition function is to think about
the fact that every time we scan a new character from the input sequence
the suffix should match with the prefix of the pattern. If that is not
possible for every length of the suffix, the next state need to be the
initial, otherwise the length of the suffix that matches properly will be
exactly the next state.
"""
alphabet = st.ascii_letters+st.punctuation+st.digits+st.whitespace
m = len(P)
trans = [{c:0 for c in alphabet} for i in range(m)]
for s in range(m):
for c in alphabet:
k = min(m, s+1)
while (P[:s]+c)[-k:] != P[:k]:
k-=1
trans[s][c]=k
return trans
if __name__=='__main__':
import unittest
class StringMatchTestCase(unittest.TestCase):
def setUp(self):
# Table of (sequence,pattern,expected_result)
self.pos_cases = [\
('abcbbaanmdiababcdrttf','ababcd',11),
('abcbbaanmdiabafweefabab','abab',19),
('abcbbaanmdiasfo pfj=pewpfiojafaXre8abbafw_ eefabab','aXre8ab',30)
]
self.neg_cases = [\
('abcbbaanmdiabcdrttf','ababcd',-1),
('abcbbaanmdiabafweefaba','abab',-1),
('abcbb_?aaFSRnmfew345sdhfhhuw.fad iabafweefaba','abab',-1)
]
def test_positive(self):
for (T,P,er) in self.pos_cases:
trans = transition_function(P)
res = string_matching_FSM(T, trans, len(P))
self.assertEqual(res, er)
def test_negative(self):
for (T,P,er) in self.neg_cases:
trans = transition_function(P)
res = string_matching_FSM(T, trans, len(P))
self.assertEqual(res, er)
unittest.main()
| python |
import tkinter
window = tkinter.Tk()
window.title("Test")
top_frame = tkinter.Frame(window).pack()
bottom_frame = tkinter.Frame(window).pack(side="bottom")
# label = tkinter.Label(window, text="Hello, world!").pack()
btn1 = tkinter.Button(top_frame, text="B1", fg="red").pack()
btn2 = tkinter.Button(top_frame, text="B2", fg="green").pack()
btn3 = tkinter.Button(bottom_frame, text="B3", fg="purple").pack(side="left")
btn4 = tkinter.Button(bottom_frame, text="B4", fg="orange").pack(side="left")
window.mainloop()
| python |
from setuptools import setup, Extension
with open('README.md', 'r') as f:
long_description = f.read()
meow_ext = Extension(
'meowhash.cpython',
# define_macros=[('MEOW_HASH_256', '0'), ('MEOW_HASH_512', '0')],
sources=['meowhash/cpython.c'],
extra_compile_args=['-mavx512f', '-mavx512vl', '-maes',
'-mavx512f', '-mavx512pf', '-mavx512er', '-mavx512cd',
'-mavx512vl', '-mavx512bw', '-mavx512dq', '-mavx512ifma',
'-mavx512vbmi'
],
include_dirs=['lib'])
setup(
name='meowhash',
version='0.1',
description='This is a demo package',
author='James Liu',
author_email='[email protected]',
license='MIT',
url='https://github.com/james7132/py-meowhash',
long_description=long_description,
packages=['meowhash'],
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.0",
"Programming Language :: Python :: 3.1",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
ext_modules=[meow_ext])
| python |
# Copyright The IETF Trust 2007-2019, All Rights Reserved
#
from django.contrib.sitemaps import GenericSitemap
from ietf.ipr.models import IprDisclosureBase
# changefreq is "never except when it gets updated or withdrawn"
# so skip giving one
queryset = IprDisclosureBase.objects.filter(state__in=('posted','removed'))
archive = {'queryset':queryset, 'date_field': 'time', 'allow_empty':True }
IPRMap = GenericSitemap(archive) # type: ignore
| python |
from grpclib.exceptions import GRPCError
from insanic.exceptions import APIException
from interstellar.exceptions import InvalidArgumentError
from grpc_test_monkey_v1.monkey_grpc import ApeServiceBase, MonkeyServiceBase
from grpc_test_monkey_v1.monkey_pb2 import ApeResponse, MonkeyResponse
class PlanetOfTheApes(ApeServiceBase):
async def GetChimpanzee(self,
stream: 'grpclib.server.Stream[grpc_test_monkey.monkey_pb2.ApeRequest, grpc_test_monkey.monkey_pb2.ApeResponse]'):
request = await stream.recv_message()
if request.include == "sound":
response = ApeResponse(id=int(request.id), extra="woo woo ahh ahh")
else:
response = ApeResponse(id=int(request.id), extra="i don't know")
await stream.send_message(response)
async def GetGorilla(self,
stream: 'grpclib.server.Stream[grpc_test_monkey.monkey_pb2.ApeRequest, grpc_test_monkey.monkey_pb2.ApeResponse]'):
request = await stream.recv_message()
if request.include == "sound":
response = ApeResponse(id=int(request.id), extra="raaahhh")
else:
response = ApeResponse(id=int(request.id), extra="i don't know")
await stream.send_message(response)
class PlanetOfTheMonkeys(MonkeyServiceBase):
async def GetMonkey(self,
stream: 'grpclib.server.Stream[grpc_test_monkey.monkey_pb2.MonkeyRequest, grpc_test_monkey.monkey_pb2.MonkeyResponse]'):
request = await stream.recv_message()
if request.id == "uncaught_exception":
raise Exception("Something Broke")
elif request.id == "api_exception":
raise APIException("help")
elif request.id == "grpc_error":
raise InvalidArgumentError(message="bad bad")
response = MonkeyResponse()
await stream.send_message(response)
| python |
#!/usr/bin/env python
# Copyright (c) 2005-2011 Grameen Foundation USA
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# See also http://www.apache.org/licenses/LICENSE-2.0.html for an
# explanation of the license and how it is applied.
import sys, re
LICENSE_TEXT="""/*
* Copyright (c) 2005-2011 Grameen Foundation USA
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*
* See also http://www.apache.org/licenses/LICENSE-2.0.html for an
* explanation of the license and how it is applied.
*/
"""
class Relicense:
"""Changes the license text that appears at the start of Mifos java files. Will add a license to files that do not contain one.
To relicense all java files in the source tree, do something like this:
find . -not -ipath "*/target/*" -not -ipath "*.svn*" -iname "*.java"| xargs -ifoo ./resources/relicense-java-file.py foo
"""
def __init__(self):
pass
def main(self):
if len(sys.argv) < 2:
sys.exit(0)
filename = sys.argv[1]
self.relicense(filename)
def relicense(self, filename):
contents = self.readEntireFile(filename)
newContents = self.replaceLicense(contents, LICENSE_TEXT)
if (contents != newContents):
self.writeEntireFile(filename, newContents)
print "Relicensed file: %s" % filename
def replaceLicense(self, contents, license):
noLicenseRe = re.match("^\w", contents, re.MULTILINE | re.DOTALL)
if (noLicenseRe):
return license + contents
licenseRe = re.compile("^(/\*.*?\*/\s*)", re.MULTILINE | re.DOTALL)
return licenseRe.sub(license, contents, 1)
def readEntireFile(self, filename):
file = open(filename, "r")
contents = file.read()
file.close
return contents
def writeEntireFile(self, filename, contents):
file = open(filename, "w")
contents = file.write(contents)
file.close
if __name__ == "__main__":
Relicense().main()
| python |
def vatCal(totalPrice):
result = totalPrice + (totalPrice*7/100)
return result
TotalPrice = int(input("Put your price : "))
print("Your total price is",vatCal(TotalPrice)) | python |
import angr
from angr.sim_type import SimTypeInt
######################################
# getchar
######################################
class getchar(angr.SimProcedure):
def run(self):
self.return_type = SimTypeInt(32, True)
data = self.inline_call(
# TODO: use a less private getc
angr.SIM_PROCEDURES['glibc']['_IO_getc'], 0).ret_expr # stdin
return data
| python |
#!/usr/bin/env python
import sys
sys.path.insert(0, '..')
import glob
import numpy as np
from dotmap import DotMap
from simpleplotlib import plot
from parse_logs import parse_hdfs_logs, parse_hdfs_throughput
bytes_units = 2.0**-30
types = ['HDFS+static', 'HDFS+resize', 'HDFS+reTCP', 'reHDFS+static',
'reHDFS+resize', 'reHDFS+reTCP']
fn_keys = {
'normal-16-QUEUE-False': 'static',
'normal-16-QUEUE-True-20000-reno': 'resize',
'normal-16-QUEUE-True-20000-retcp': 'reTCP',
'normal-16-ADU-False': 'adu',
'normal-16-ADU-True-20000-reno': 'adu+resize',
'normal-16-ADU-True-20000-retcp': 'adu+reTCP',
}
files = [
'/tmp/*QUEUE-False*-HDFS-dfsioe',
'/tmp/*QUEUE-True-20000-reno*-HDFS-dfsioe',
'/tmp/*QUEUE-True-20000-retcp*-HDFS-dfsioe',
'/tmp/*QUEUE-False*-reHDFS-dfsioe',
'/tmp/*QUEUE-True-20000-reno*-reHDFS-dfsioe',
'/tmp/*QUEUE-True-20000-retcp*-reHDFS-dfsioe',
]
files_short = [files[0], files[3]]
def get_default_plot_options(x, y):
options = DotMap()
options.plot_type = 'BAR'
options.legend.options.labels = ['HDFS', 'HDFS + Resize',
'HDFS + reTCP', 'reHDFS',
'reHDFS + Resize',
'reHDFS + reTCP']
options.series.color_groups = [0, 0, 0, 1, 1, 1]
options.legend.order = [0, 2, 4, 1, 3, 5]
options.legend.options.fontsize = 19
options.legend.options.ncol = 3
options.x.ticks.major.show = False
return options
def graph_wct(data):
x = data
y = [[float(j) / (len(x[i])-1) * 100 for j in xrange(len(x[i]))]
for i in xrange(len(x))]
options = get_default_plot_options(x, y)
options.plot_type = 'LINE'
options.legend.options.labels = ['HDFS', 'reHDFS']
options.series_options = [DotMap(linewidth=5) for i in range(len(x))]
options.output_fn = 'graphs/hdfs_writes_cdf.pdf'
options.x.label.xlabel = 'HDFS write completion time (ms)'
options.y.label.ylabel = 'CDF (%)'
del options.series.color_groups
del options.legend.options.ncol
del options.x.ticks.major.show
plot(x, y, options)
def graph_tail(data):
x = np.array([[0] for i in xrange(len(data))])
y = [np.percentile(d, 99) for d in data]
options = get_default_plot_options(x, y)
options.y.limits = [0, 1500]
options.output_fn = 'graphs/hdfs_99th.pdf'
options.y.label.ylabel = '99th percent. writes (ms)'
options.y.ticks.major.show = False
del options.legend.options.ncol
del options.legend.order
plot(x, y, options)
def graph_throughput(data):
x = np.array([[0] for i in xrange(len(data))])
y = data
options = get_default_plot_options(x, y)
options.horizontal_lines.lines = [80*8 + 10*8]
options.legend.options.fontsize = 18
options.y.label_offset = [-0.01, -.13]
options.y.limits = [0, 1100]
options.output_fn = 'graphs/hdfs_throughput.pdf'
options.y.label.ylabel = 'Agg. tput. (Gbps)'
options.y.ticks.major.show = False
plot(x, y, options)
def bytes_graph():
data = {}
for fn in glob.glob(sys.argv[1] + '/*.counters.txt'):
key = 'reHDFS+' if 'reHDFS' in fn else 'HDFS+'
key += [k for n, k in fn_keys.items() if n in fn][0]
c, p, _ = eval(open(fn).read())
c = sum([int(b.split('\n')[-1]) * bytes_units for b in c])
p = sum([int(b.split('\n')[-1]) * bytes_units for b in p])
data[key] = p, c
y = [data[t] for t in types]
x = np.array([[0, 1] for i in xrange(len(y))])
options = get_default_plot_options(x, y)
options.bar_labels.show = False
options.legend.options.fontsize = 18
options.y.label_offset = [-.07, -.18]
options.y.limits = [0, 40]
options.x.ticks.major.labels = DotMap(
text=['Packet', 'Circuit'])
options.y.ticks.major.labels = DotMap(
locations=[0, 5, 10, 15, 20, 25])
options.output_fn = 'graphs/hdfs_utilization.pdf'
options.x.label.xlabel = 'Which switch'
options.y.label.ylabel = 'Bytes sent (GB)'
plot(x, y, options)
if __name__ == '__main__':
graph_wct([parse_hdfs_logs(sys.argv[1] + n) for n in files_short])
graph_tail([parse_hdfs_logs(sys.argv[1] + n) for n in files])
graph_throughput([parse_hdfs_throughput(sys.argv[1] + n) for n in files])
bytes_graph()
| python |
'''
Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
NVIDIA CORPORATION and its licensors retain all intellectual property
and proprietary rights in and to this software, related documentation
and any modifications thereto. Any use, reproduction, disclosure or
distribution of this software and related documentation without an express
license agreement from NVIDIA CORPORATION is strictly prohibited.
'''
import argparse
import json
import numpy as np
from isaac import Application, Cask, Codelet, Composite
import apps.samples.pick_and_place.pick_and_place as pick_and_place
from apps.samples.pick_and_place.task_planner import *
def create_composite_atlas_ur10(cask_root, joints):
'''Creates composite atlas cask with waypoints for ur10. Tested with ovkit sim.'''
if len(joints) != 6:
raise ValueError("UR10 should have 6 joints, got {}".format(len(joints)))
cask = Cask(cask_root, writable=True)
# joint waypoints
quantities = [[x, "position", 1] for x in joints]
HOME_POSE_WAYPOINT = np.array(
[1.3504, -1.4784, 1.6887, -1.7811, -1.5708, 1.3488], dtype=np.dtype("float64"))
VIEW_POSE_WAYPOINT = np.array(
[2.1358, -1.4784, 1.6887, -1.7811, -1.5708, 0.5635], dtype=np.dtype("float64"))
APPROACH_POSE_WAYPOINT = np.array(
[-0.2966, -1.062, 1.251, -1.38, -1.716, 0.217], dtype=np.dtype("float64"))
cask.write_message(
pick_and_place.create_composite_waypoint("home_pose", quantities, HOME_POSE_WAYPOINT))
cask.write_message(
pick_and_place.create_composite_waypoint("view_pose", quantities, VIEW_POSE_WAYPOINT))
cask.write_message(
pick_and_place.create_composite_waypoint("approach_pose", quantities,
APPROACH_POSE_WAYPOINT))
# gripper waypoints
quantities = [[x, "none", 1] for x in ["pump", "valve", "gripper"]]
SUCTION_ON_WAYPOINT = np.array([1.0, 0.0, 1.0], dtype=np.dtype("float64"))
SUCTION_OFF_WAYPOINT = np.array([0.0, 1.0, 0.0], dtype=np.dtype("float64"))
VALVE_OFF_WAYPOINT = np.array([0.0, 0.0, 0.0], dtype=np.dtype("float64"))
cask.write_message(
pick_and_place.create_composite_waypoint("suction_on", quantities, SUCTION_ON_WAYPOINT))
cask.write_message(
pick_and_place.create_composite_waypoint("suction_off", quantities, SUCTION_OFF_WAYPOINT))
cask.write_message(
pick_and_place.create_composite_waypoint("valve_off", quantities, VALVE_OFF_WAYPOINT))
class MissionFeeder(Codelet):
'''Reads a list of tasks from config and adds it to task_planner.'''
def start(self):
tasks = self.config.tasks
if tasks is None:
self.report_failure("No valid mission")
return
if not hasattr(self, 'task_planner'):
raise AttributeError("task_planner not set before codelet start.")
if not isinstance(self.task_planner, TaskPlannerInterface):
raise TypeError("task_planner is not of type TaskPlannerInterface")
self.task_planner.clear_all_tasks()
for m in tasks:
task_planner.pick_and_place_object(m['pick'], m['place'])
self.log_info("Received {0} tasks".format(len(tasks)))
self.report_success()
class TasksRemainingChecker(Codelet):
'''Reports success if task_manager has remaining tasks on start, otherwise false.'''
def start(self):
if not hasattr(self, 'task_planner'):
raise AttributeError("task_planner not set before codelet start.")
if not isinstance(self.task_planner, TaskPlannerInterface):
raise TypeError("task_planner is not of type TaskPlannerInterface")
if task_planner.all_tasks_done():
self.report_failure("All tasks are done.")
else:
self.report_success("Tasks remain.")
class TaskRemover(Codelet):
'''Marks the current task in the task planner as done and reports success on start.'''
def start(self):
if not hasattr(self, 'task_planner'):
raise AttributeError("task_planner not set before codelet start.")
if not isinstance(self.task_planner, TaskPlannerInterface):
raise TypeError("task_planner is not of type TaskPlannerInterface")
task_planner.mark_current_task_as_done()
self.report_success("Current task is done.")
class AllTasksDoneChecker(Codelet):
'''Reports success if task_planner has no more tasks on start, otherwise reports failure.'''
def start(self):
if not hasattr(self, 'task_planner'):
raise AttributeError("task_planner not set before codelet start.")
if not isinstance(self.task_planner, TaskPlannerInterface):
raise TypeError("task_planner is not of type TaskPlannerInterface")
if task_planner.all_tasks_done():
self.report_success("All tasks are done.")
else:
self.report_failure("Tasks remain.")
# Main part that sets up the app's logic and starts it afterwards.
if __name__ == '__main__':
# Parse command line arguments. The only option available at the moment is to choose between a
# 'mock' controller setup (very basic linear controller, no state visualization) and the multi
# joint LQR controller. When `--mock` is set, the mock controller is used. Otherwise, the LQR
# controller is used.
parser = argparse.ArgumentParser()
parser.add_argument(
"--cask", help="Path to output atlas", default="/tmp/pick_and_place_waypoints")
parser.add_argument(
"--kinematic_file",
help="Path to kinematic json file",
default="apps/assets/kinematic_trees/ur10.kinematic.json")
parser.add_argument("--speed", help="Maximum joint speed", type=float, default=1.0)
parser.add_argument(
"--acceleration", help="Maximum joint acceleration", type=float, default=1.0)
parser.add_argument(
"--sim_host", type=str, help="Host ip for simulator (TcpSubscriber)", default="localhost")
parser.add_argument(
"--sim_output_port",
type=int,
help="Port to receive message from simulator (TcpSubscriber)",
default=46000)
parser.add_argument(
"--sim_input_port",
type=int,
help="Port to publish message to simulator (TcpPublisher). Default to output_port+1")
parser.add_argument(
"--robot_index", type=int, help="Channel suffix for goal for the current robot.", default=0)
parser.add_argument("--sight_port", type=int, help="Port for websight", default=3000)
parser.add_argument(
"--robot_name",
type=str,
help="Accept missions from the remote mission server for the robot with the given name",
default="station")
parser.add_argument(
"--mission_host",
type=str,
help="The ip address or hostname of the host to connect to and receive missions from",
default="localhost")
parser.add_argument(
"--mission_port",
type=int,
help="Port to receive goal from task manager (TcpSubscriber).",
default=9998)
args = parser.parse_args()
# Read the arm joints from file.
arm_joint_names = []
with open(args.kinematic_file) as kinematic_file_handle:
file_contents = json.load(kinematic_file_handle)
if file_contents is None:
raise ValueError("Unable to load kinematic json file {0}".format(args.kinematic_file))
for link in file_contents['links']:
if 'motor' in link and link['motor']['type'] != 'constant':
arm_joint_names.append(link['name'])
# create composite atlas
create_composite_atlas_ur10(args.cask, arm_joint_names)
app = Application(app_filename='packages/multi_robot_fof/station.app.json')
app.load_module("sight")
app.nodes["atlas"]["CompositeAtlas"].config.cask = args.cask
app.load('packages/multi_robot_fof/ur10.config.json')
# Configure the kinematic tree for the controller and for inverse kinematics.
kinematic_tree = app.nodes['controller.kinematic_tree']['KinematicTree']
kinematic_tree.config.kinematic_file = args.kinematic_file
root_frame = '/environments/stations/station_{0}/assembly_robot/ur10'.format(args.robot_index)
for node in ['pick_task.cartesian_planner', 'place_task.cartesian_planner']:
inverse_kinematics_planner = app.nodes[node]['EndEffectorGlobalPlanner']
inverse_kinematics_planner.config.kinematic_tree = 'controller.kinematic_tree'
inverse_kinematics_planner.config.root_frame = root_frame
app.nodes['controller.kinematic_tree']['KinematicTreeToPoseTree'].config.root_frame = root_frame
app.nodes['pick_task.detections_to_pose_tree'][
'DetectionsToPoseTree'].config.detection_frame = 'world'
# Configure velocity and acceleration limits for the planner.
planner = app.nodes['controller.local_plan']['MultiJointLqrPlanner']
planner.config.speed_min = [-args.speed] * len(arm_joint_names)
planner.config.speed_max = [args.speed] * len(arm_joint_names)
planner.config.acceleration_min = [-args.acceleration] * len(arm_joint_names)
planner.config.acceleration_max = [args.acceleration] * len(arm_joint_names)
task_planner = SimpleTaskPlanner()
# Prepare relinking the target poses
app.nodes['pick_task.relink_target_pose'].add(pick_and_place.RelinkTargetPoseCodelet)
destination = app.nodes['place_task.relink_destination_pose'].add(
pick_and_place.RelinkDestinationPoseCodelet)
destination.config.root_frame = root_frame
# Task flow control
app.nodes['mission_feeder'].add(MissionFeeder)
app.nodes['mission_done_checker'].add(AllTasksDoneChecker)
app.nodes['task_remain_checker'].add(TasksRemainingChecker)
app.nodes['task_remover'].add(TaskRemover)
# Set task manager for all PyCodelets
for _, frontend in app._pycodelet_frontends.items():
frontend.task_planner = task_planner
# Load the mission subgraph and set the config based on the input parameters
app.load("packages/behavior_tree/apps/missions.graph.json", "mission")
mission_client = app.nodes["mission.tcp_client"]["JsonTcpClient"]
mission_client.config["host"] = args.mission_host
mission_client.config["port"] = args.mission_port
app.nodes["mission.mission_control"]["NodeGroup"].config["node_names"] = ["main_sequence"]
mission_robot_name = "{0}_{1}".format(args.robot_name, args.robot_index)
app.nodes["mission.robot_name"]["JsonMockup"].config.json_mock = {"text": mission_robot_name}
sim_output = app.nodes['simulation.interface']['output']
sim_output.config.host = args.sim_host
sim_output.config.port = args.sim_output_port
sim_input = app.nodes['simulation.interface']['input']
if args.sim_input_port is not None:
sim_input.config.port = args.sim_input_port
else:
sim_input.config.port = args.sim_output_port + 1
app.nodes["websight"]["WebsightServer"].config.port = args.sight_port
# Start the application.
app.run()
| python |
import numpy as np
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import datetime
from keras import models
from keras.layers import Dense
if __name__ == "__main__":
startTime = datetime.datetime.now()
x = np.load('data/train_w2v_data_array_500d.npy')
y = np.load('data/train_w2v_target_array_500d.npy')
y = y.astype('int')
y = y.flatten()
z = np.load('data/test_w2v_data_array_500d.npy')
t = np.load('data/test_w2v_target_array_500d.npy')
t = t.astype('int')
t = t.flatten()
learningRate = [0.1]
for lr in learningRate:
clf = MLPClassifier(solver='sgd', hidden_layer_sizes=(30,20), batch_size='auto',
learning_rate='adaptive', learning_rate_init=lr, early_stopping=True)
clf.fit(x, y)
p = clf.predict(z)
y_scores = clf.predict_proba(z)
# predicted = predict_nn(x, y, z, clf)
print("For learning rate: ", lr)
print("Word2Vec Neural Network with 500 features")
# Compute accuracy
accuracy = accuracy_score(t, p, normalize=False)
print("Accuracy: ", (accuracy / len(t)) * 100)
# Confusion matrix
confusion_matrix = confusion_matrix(t, p)
print("Confusion Matrix:\n", confusion_matrix)
# Replace 4s with 1s
t[np.where(t == 4)] = 1
p[np.where(p == 4)] = 1
# Plot the Precision-Recall curve
precision, recall, _ = precision_recall_curve(t, y_scores[:, 1])
plt.figure()
plt.step(recall, precision, color='b', alpha=0.2, where='post')
plt.fill_between(recall, precision, step='post', alpha=0.2, color='b')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
average_precision = average_precision_score(t, p)
plt.title('Neural Network Precision-Recall curve: AP={0:0.2f}'.format(average_precision))
filename = "data/w2v_NN_500d_" + str(lr) + "_precisionRecall.png"
plt.savefig(filename) | python |
from setuptools import setup
version = '1.0.2'
setup(
name='django-mobi2',
version=version,
keywords='Django UserAgent',
description='Django middleware and view decorator to detect phones and small-screen devices',
long_description=open('README').read(),
url='https://github.com/django-xxx/django-mobi2.git',
author='Hackathon',
author_email='[email protected]',
packages=['mobi2'],
py_modules=[],
package_data={
'mobi2': ['*.txt']
},
install_requires=['django-six'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Office/Business :: Financial :: Spreadsheet',
],
)
| python |
import pickle
from tqdm import tqdm
import numpy as np
def save_stereotypes(animate_file, text_file, out_file):
"""
Save list of words that are stereotyped towards men or women
:param animate_file: list of noun pairs
:param text_file: file to test words counts on
:param out_file: output file
"""
with open(animate_file, "r") as f:
lines = f.readlines()
lines = [line.strip().split("\t") for line in lines]
words = list(zip([line[1] for line in lines], [line[2] for line in lines]))
with open(text_file) as f:
text = f.read()
text = text.split()
fem_main = []
masc_main = []
for i in tqdm(range(len(words)), total=len(words)):
fem, masc = words[i]
fem_count = text.count(fem) + text.count(fem.capitalize())
masc_count = text.count(masc) + text.count(masc.capitalize())
if .25 * fem_count >= masc_count and fem_count != 0:
fem_main.append((i, fem, masc))
elif .25 * masc_count >= fem_count and masc_count != 0:
masc_main.append((i, fem, masc))
print(len(fem_main), len(masc_main))
with open(out_file, "wb") as f:
pickle.dump(fem_main, f, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(masc_main, f, protocol=pickle.HIGHEST_PROTOCOL)
def get_probs(prob_file):
"""
:param prob_file: File containing query probabilities
:return: list of negative log likelihoods
"""
with open(prob_file, "r") as f:
lines = f.readlines()
probs = [float(line.strip()) for line in lines]
return probs
def calc_romance_bias(probs):
"""
:param probs: list of negative log likelihoods for a romance language corpus
:return: gender bias in corpus
"""
bias = 0
for idx in range(0, len(probs), 32):
bias -= probs[idx + 1] + probs[idx + 5] + probs[idx + 9] + probs[idx + 13]
bias += probs[idx + 18] + probs[idx + 22] + probs[idx + 26] + probs[idx + 30]
return bias / 8
def calc_romance_grammar(probs):
"""
:param probs: list of negative log likelihoods for a romance language corpus
:return: grammaticality of corpus
"""
grammar = 0
for idx in range(0, len(probs), 32):
grammar -= probs[idx + 1] + probs[idx + 5] + probs[idx + 9] + probs[idx + 13]
grammar -= probs[idx + 18] + probs[idx + 22] + probs[idx + 26] + probs[idx + 30]
grammar += probs[idx] + probs[idx + 4] + probs[idx + 8] + probs[idx + 12]
grammar += probs[idx + 19] + probs[idx + 23] + probs[idx + 27] + probs[idx + 31]
return grammar / 4
def calc_hebrew_bias(probs):
"""
:param probs: list of negative log likelihoods for a Hebrew corpus
:return: gender bias in corpus
"""
bias = 0
for idx in range(0, len(probs), 16):
bias -= probs[idx + 1] + probs[idx + 5] + probs[idx + 9] + probs[idx + 13]
bias += probs[idx + 2] + probs[idx + 6] + probs[idx + 10] + probs[idx + 14]
return bias / 4
def calc_hebrew_grammar(probs):
"""
:param probs: list of negative log likelihoods for a Hebrew corpus
:return: grammaticality of corpus
"""
grammar = 0
for idx in range(0, len(probs), 16):
grammar -= probs[idx + 1] + probs[idx + 5] + probs[idx + 9] + probs[idx + 13]
grammar -= probs[idx + 2] + probs[idx + 6] + probs[idx + 10] + probs[idx + 14]
grammar += probs[idx] + probs[idx + 4] + probs[idx + 8] + probs[idx + 12]
grammar += probs[idx + 19] + probs[idx + 23] + probs[idx + 27] + probs[idx + 31]
return grammar / 2
def calc_russian_bias(probs):
"""
:param probs: list of negative log likelihoods for a Russian coprus
:return: gender bias in corpus
"""
bias = 0
for idx in range(0, len(probs), 24):
bias -= probs[idx + 1] + probs[idx + 3] + probs[idx + 5] + probs[idx + 7]
bias += probs[idx + 8] + probs[idx + 10] + probs[idx + 12] + probs[idx + 14]
bias -= probs[idx + 17] + probs[idx + 19] + probs[idx + 21] + probs[idx + 23]
bias += probs[idx + 16] + probs[idx + 18] + probs[idx + 20] + probs[idx + 22]
return bias / 4
def calc_russian_grammar(probs):
"""
:param probs: list of negative log likelihoods for a Russian corpus
:return: grammaticality of corpus
"""
grammar = 0
for idx in range(0, len(probs), 16):
grammar -= probs[idx + 1] + probs[idx + 5] + probs[idx + 9] + probs[idx + 13]
grammar -= probs[idx + 2] + probs[idx + 6] + probs[idx + 10] + probs[idx + 14]
grammar += probs[idx] + probs[idx + 4] + probs[idx + 8] + probs[idx + 12]
grammar += probs[idx + 19] + probs[idx + 23] + probs[idx + 27] + probs[idx + 31]
return grammar / 2
def calc_other_bias(probs):
"""
:param probs: list of negative log likelihoods for a corpus
:return: gender bias in corpus
"""
bias = 0
for idx in range(0, len(probs), 16):
bias -= probs[idx + 1] + probs[idx + 3] + probs[idx + 5] + probs[idx + 7]
bias += probs[idx + 8] + probs[idx + 10] + probs[idx + 12] + probs[idx + 14]
return bias / 4
def calc_other_grammar(probs):
"""
:param probs: list of negative log likelihoods for a corpus
:return: grammaticality of corpus
"""
grammar = 0
for idx in range(0, len(probs), 24):
grammar -= probs[idx + 1] + probs[idx + 3] + probs[idx + 5] + probs[idx + 7]
grammar -= probs[idx + 8] + probs[idx + 10] + probs[idx + 12] + probs[idx + 14]
grammar += probs[idx] + probs[idx + 2] + probs[idx + 4] + probs[idx + 6]
grammar += probs[idx + 9] + probs[idx + 11] + probs[idx + 13] + probs[idx + 15]
return grammar / 2
def get_bias_and_grammar():
"""
Print bias and grammaticality for spanish, french, hebrew, and italian corpora
"""
bias = []
grammar = []
for lang, lang_type in [("spanish", 1), ("new_queries_old_model_french", 1),
("new_queries_old_model_hebrew", 0), ("new_queries_old_model_italian", 1)]:
prob_file_o = "../results/" + lang + "_original-initial.outlogliks"
prob_file_s = "../results/" + lang + "_swap-initial.outlogliks"
prob_file_d = "../results/" + lang + "_debias-initial.outlogliks"
probs_o = get_probs(prob_file_o)
probs_s = get_probs(prob_file_s)
probs_d = get_probs(prob_file_d)
if lang_type == 0:
bias_o = calc_hebrew_bias(probs_o)
bias_d = calc_hebrew_bias(probs_s)
bias_s = calc_hebrew_bias(probs_d)
grammar_o = calc_hebrew_grammar(probs_o)
grammar_d = calc_hebrew_grammar(probs_s)
grammar_s = calc_hebrew_grammar(probs_d)
elif lang_type == 1:
bias_o = calc_romance_bias(probs_o)
bias_d = calc_romance_bias(probs_s)
bias_s = calc_romance_bias(probs_d)
grammar_o = calc_romance_grammar(probs_o)
grammar_d = calc_romance_grammar(probs_s)
grammar_s = calc_romance_grammar(probs_d)
elif lang_type == 2:
bias_o = calc_russian_bias(probs_o)
bias_d = calc_russian_bias(probs_s)
bias_s = calc_russian_bias(probs_d)
grammar_o = calc_russian_grammar(probs_o)
grammar_d = calc_russian_grammar(probs_s)
grammar_s = calc_russian_grammar(probs_d)
else:
bias_o = calc_other_bias(probs_o)
bias_d = calc_other_bias(probs_s)
bias_s = calc_other_bias(probs_d)
grammar_o = calc_other_grammar(probs_o)
grammar_d = calc_other_bias(probs_s)
grammar_s = calc_other_grammar(probs_d)
bias.append([bias_o, bias_s, bias_d])
grammar.append([grammar_o, grammar_s, grammar_d])
print("Bias")
for i in range(3):
print("\\addplot coordinates {(Esp,", bias[0][i],
") (Fra,", bias[1][i], ") (Heb,", bias[2][i], ") (Ita,", bias[3][i], ")};")
x = 0
for i in range(4):
x += bias[i][0] / bias[i][2]
print(bias[i][0] / bias[i][2])
print(x/4)
print("Grammar")
for i in range(3):
print("\\addplot coordinates {(Esp,", grammar[0][i],
") (Fra,", grammar[1][i], ") (Heb,", grammar[2][i], ") (Ita,", grammar[3][i], ")};")
x = 0
for i in range(4):
x += grammar[i][1] / grammar[i][2]
print(grammar[i][1] / grammar[i][2])
print(x/4)
| python |
nome = str(input('Digite o nome: ')).strip()
caps = nome.upper()
truefalse = 'SILVA' in caps
print('Há SILVA no nome?\n', truefalse)
| python |
# %%
import numpy as np
from scipy import spatial
x, y = np.mgrid[0:4, 0:4]
points = np.c_[x.ravel(), y.ravel()]
tree = spatial.cKDTree(points)
tree.query_ball_point([2, 0], 1)
tree.query_ball_point(points, 1)
# %%
tree.query_ball_tree(points, 1) | python |
# This is a log file. It is saved as .py so that the following notebooks can easily import it and use its information.
# started at: 2022.03.03-15:28:15
| python |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import sys
import pyauto_functional # Must come before pyauto (and thus, policy_base).
import policy_base
sys.path.append('/usr/local') # Required to import autotest libs.
from autotest.cros import constants
from autotest.cros import cryptohome
class ChromeosEphemeral(policy_base.PolicyTestBase):
"""Tests a policy that makes users ephemeral.
When this policy is enabled, no persistent information in the form of
cryptohome shadow directories or local state prefs should be created for
users. Additionally, any persistent information previously accumulated should
be cleared when a user first logs in after enabling the policy."""
_usernames = ('[email protected]', '[email protected]')
def _SetEphemeralUsersEnabled(self, enabled):
"""Sets the ephemeral users device policy.
The show_user_names policy is set to False to ensure that even if the local
state is not being automatically cleared, the login screen never shows user
pods. This is required by the Login browser automation call.
"""
self.SetDevicePolicy({'ephemeral_users_enabled': enabled,
'show_user_names': False})
def _DoesVaultDirectoryExist(self, user_index):
user_hash = cryptohome.get_user_hash(self._usernames[user_index])
return os.path.exists(os.path.join(constants.SHADOW_ROOT, user_hash))
def _AssertLocalStatePrefsSet(self, user_indexes):
expected = sorted([self._usernames[index] for index in user_indexes])
# The OAuthTokenStatus pref is populated asynchronously. Checking whether it
# is set would lead to an ugly race.
for pref in ['LoggedInUsers', 'UserImages', 'UserDisplayEmail', ]:
actual = sorted(self.GetLocalStatePrefsInfo().Prefs(pref))
self.assertEqual(actual, expected,
msg='Expected to find prefs in local state for users.')
def _AssertLocalStatePrefsEmpty(self):
for pref in ['LoggedInUsers',
'UserImages',
'UserDisplayEmail',
'OAuthTokenStatus']:
self.assertFalse(self.GetLocalStatePrefsInfo().Prefs(pref),
msg='Expected to not find prefs in local state for any user.')
def _AssertVaultDirectoryExists(self, user_index):
self.assertTrue(self._DoesVaultDirectoryExist(user_index=user_index),
msg='Expected vault shadow directory to exist.')
def _AssertVaultDirectoryDoesNotExist(self, user_index):
self.assertFalse(self._DoesVaultDirectoryExist(user_index=user_index),
msg='Expected vault shadow directory to not exist.')
def _AssertVaultMounted(self, user_index, ephemeral):
if ephemeral:
device_regex = constants.CRYPTOHOME_DEV_REGEX_REGULAR_USER_EPHEMERAL
fs_regex = constants.CRYPTOHOME_FS_REGEX_TMPFS
else:
device_regex = constants.CRYPTOHOME_DEV_REGEX_REGULAR_USER_SHADOW
fs_regex = constants.CRYPTOHOME_FS_REGEX_ANY
self.assertTrue(
cryptohome.is_vault_mounted(device_regex=device_regex,
fs_regex=fs_regex,
user=self._usernames[user_index],
allow_fail=True),
msg='Expected vault backed by %s to be mounted.' %
'tmpfs' if ephemeral else 'shadow directory')
def _AssertNoVaultMounted(self):
self.assertFalse(cryptohome.is_vault_mounted(allow_fail=True),
msg='Did not expect any vault to be mounted.')
def Login(self, user_index):
"""Convenience method to login to the usr at the given index."""
self.assertFalse(self.GetLoginInfo()['is_logged_in'],
msg='Expected to be logged out.')
policy_base.PolicyTestBase.Login(self,
self._usernames[user_index],
'dummy_password')
self.assertTrue(self.GetLoginInfo()['is_logged_in'],
msg='Expected to be logged in.')
def testEnablingBeforeSession(self):
"""Checks that a new session can be made ephemeral."""
self.PrepareToWaitForLoginFormReload()
self._SetEphemeralUsersEnabled(True)
self.WaitForLoginFormReload()
self.Login(user_index=0)
self._AssertLocalStatePrefsEmpty()
self._AssertVaultMounted(user_index=0, ephemeral=True)
self.Logout()
self._AssertLocalStatePrefsEmpty()
self._AssertNoVaultMounted()
self._AssertVaultDirectoryDoesNotExist(user_index=0)
def testEnablingDuringSession(self):
"""Checks that an existing non-ephemeral session is not made ephemeral."""
self.PrepareToWaitForLoginFormReload()
self._SetEphemeralUsersEnabled(False)
self.WaitForLoginFormReload()
self.Login(user_index=0)
self._AssertLocalStatePrefsSet(user_indexes=[0])
self._AssertVaultMounted(user_index=0, ephemeral=False)
self._SetEphemeralUsersEnabled(True)
self._AssertLocalStatePrefsSet(user_indexes=[0])
self._AssertVaultMounted(user_index=0, ephemeral=False)
self.Logout()
self._AssertLocalStatePrefsEmpty()
self._AssertNoVaultMounted()
self._AssertVaultDirectoryDoesNotExist(user_index=0)
def testDisablingDuringSession(self):
"""Checks that an existing ephemeral session is not made non-ephemeral."""
self.PrepareToWaitForLoginFormReload()
self._SetEphemeralUsersEnabled(True)
self.WaitForLoginFormReload()
self.Login(user_index=0)
self._AssertVaultMounted(user_index=0, ephemeral=True)
self._SetEphemeralUsersEnabled(False)
self._AssertVaultMounted(user_index=0, ephemeral=True)
self.Logout()
self._AssertLocalStatePrefsEmpty()
self._AssertNoVaultMounted()
self._AssertVaultDirectoryDoesNotExist(user_index=0)
def testEnablingEphemeralUsersCleansUp(self):
"""Checks that persistent information is cleared."""
self.PrepareToWaitForLoginFormReload()
self._SetEphemeralUsersEnabled(False)
self.WaitForLoginFormReload()
self.Login(user_index=0)
self.Logout()
self._AssertLocalStatePrefsSet(user_indexes=[0])
self.Login(user_index=1)
self.Logout()
self._AssertLocalStatePrefsSet(user_indexes=[0, 1])
self._AssertVaultDirectoryExists(user_index=0)
self._AssertVaultDirectoryExists(user_index=1)
self._SetEphemeralUsersEnabled(True)
self.Login(user_index=0)
self._AssertVaultMounted(user_index=0, ephemeral=True)
self.Logout()
self._AssertVaultDirectoryDoesNotExist(user_index=0)
self._AssertVaultDirectoryDoesNotExist(user_index=1)
if __name__ == '__main__':
pyauto_functional.Main()
| python |
"""Unit test for the data_tuils module."""
import pytest
import candle
@pytest.mark.skip(reason="used by load_Xy_data_noheader")
def test_to_categorical():
pass
@pytest.mark.skip(reason="used by load_Xy_data2")
def test_convert_to_class():
pass
@pytest.mark.skip(reason="used by impute_and_scale_array")
def test_scale_array():
pass
# should we keep this?
@pytest.mark.skip(reason="impute_and_scale_array is not used")
def test_impute_and_scale_array():
pass
# should we keep this?
@pytest.mark.skip(reason="this function is not used")
def test_drop_impute_and_scale_dataframe():
pass
# should we keep this?
@pytest.mark.skip(reason="this function is not used")
def test_discretize_dataframe():
pass
# should we keep this?
@pytest.mark.skip(reason="this function is not used")
def test_discretize_array():
pass
# should we keep this?
@pytest.mark.skip(reason="this function is not used")
def test_lookup():
pass
# should we keep this?
@pytest.mark.skip(
reason="referenced in p1b1 but succeeded by load_csv_data. no longer used"
)
def test_load_X_data():
pass
# should we keep this?
@pytest.mark.skip(reason="this function is not used")
def test_load_X_data2():
pass
# should we keep this?
@pytest.mark.skip(reason="this function is not used")
def test_load_Xy_one_hot_data():
pass
# used by p1b2
def test_load_Xy_one_hot_data2():
import numpy as np
DEFAULT_DATATYPE = (
np.float32
) # will be replaced by default_utils.DEFAULT_DATATYPE once available
params = {
"data_url": "http://ftp.mcs.anl.gov/pub/candle/public/benchmarks/P1B2/",
"train_data": "P1B2.dummy.train.csv",
"test_data": "P1B2.dummy.test.csv",
"feature_subsample": 0,
"shuffle": True,
"scaling": "minmax",
"val_split": 0.1,
"data_type": DEFAULT_DATATYPE,
}
file_train = candle.fetch_file(
params["data_url"] + params["train_data"], subdir="Pilot1"
)
file_test = candle.fetch_file(
params["data_url"] + params["test_data"], subdir="Pilot1"
)
seed = 2017
(x_train, y_train), (x_val, y_val), (x_test, y_test) = candle.load_Xy_one_hot_data2(
file_train,
file_test,
class_col=["cancer_type"],
drop_cols=["case_id", "cancer_type"],
n_cols=params["feature_subsample"],
shuffle=params["shuffle"],
scaling=params["scaling"],
validation_split=params["val_split"],
dtype=params["data_type"],
seed=seed,
)
assert x_train.shape == (9, 28204)
assert len(y_train) == 9
assert len(x_val) == 0
assert len(y_val) == 0
assert len(x_test) == 1
assert len(y_test) == 1
# should we keep this?
@pytest.mark.skip(reason="referenced in p1b2 but not used")
def test_load_Xy_data2():
pass
# used by tc1
def test_load_Xy_data_noheader():
import numpy as np
DEFAULT_DATATYPE = (
np.float32
) # will be replaced by default_utils.DEFAULT_DATATYPE once available
params = {
"data_url": "http://ftp.mcs.anl.gov/pub/candle/public/benchmarks/Pilot1/type-class/",
"train_data": "type_18_300_train.dummy.csv",
"test_data": "type_18_300_test.dummy.csv",
"data_type": DEFAULT_DATATYPE,
"classes": 36,
}
train_path = candle.fetch_file(params["data_url"] + params["train_data"], "Pilot1")
test_path = candle.fetch_file(params["data_url"] + params["test_data"], "Pilot1")
usecols = None
x_train, y_train, x_test, y_test = candle.load_Xy_data_noheader(
train_path,
test_path,
params["classes"],
usecols,
scaling="maxabs",
dtype=params["data_type"],
)
assert x_train.shape == (10, 60483)
assert len(y_train) == 10
assert x_test.shape == (2, 60483)
assert len(y_test) == 2
# used by p1b1
def test_load_csv_data():
import numpy as np
DEFAULT_DATATYPE = (
np.float32
) # will be replaced by default_utils.DEFAULT_DATATYPE once available
params = {
"data_url": "http://ftp.mcs.anl.gov/pub/candle/public/benchmarks/P1B1/",
"train_data": "P1B1.dummy.train.csv",
"test_data": "P1B1.dummy.test.csv",
"feature_subsample": 0,
"shuffle": False,
"scaling": "minmax",
"data_type": DEFAULT_DATATYPE,
"val_split": 0.1,
}
train_path = candle.fetch_file(params["data_url"] + params["train_data"], "Pilot1")
test_path = candle.fetch_file(params["data_url"] + params["test_data"], "Pilot1")
x_cols = None
drop_cols = ["case_id"]
onehot_cols = ["cancer_type"]
y_cols = ["cancer_type"]
seed = 2017
(
x_train,
y_train,
x_val,
y_val,
x_test,
y_test,
x_labels,
y_labels,
) = candle.load_csv_data(
train_path,
test_path,
x_cols=x_cols,
y_cols=y_cols,
drop_cols=drop_cols,
onehot_cols=onehot_cols,
n_cols=params["feature_subsample"],
shuffle=params["shuffle"],
scaling=params["scaling"],
dtype=params["data_type"],
validation_split=params["val_split"],
return_dataframe=False,
return_header=True,
nrows=params["train_samples"]
if "train_samples" in params and params["train_samples"] > 0
else None,
seed=seed,
)
assert len(x_train) == 9
assert len(x_train[0]) == 60483
assert len(y_train) == 9
assert len(x_val) == 1
assert len(y_val) == 1
assert len(x_test) == 1
assert len(y_test) == 1
assert len(x_labels) == 60483
assert len(y_labels) == 1
| python |
import numpy as np
import scipy.optimize as so
import cv2
from . import cfilter, cresampler, clz4, report
from .struct import *
_LZ4_COMPRESSION_LEVEL = 9
def applyBestIntraCompression(img, dropThreshold, minRetSize, fastDecodeMode = 2):
h, w, nChannel = img.shape
def _addEx(filterModeList, baseMethod, baseFilter, baseDefilter, mode):
assert not baseMethod & 0xf0
EX2, EX4, EX6, EX8 = 0x10, 0x20, 0x30, 0x40
if(nChannel == 1):
if(mode < 2):
if(w % 4 == 0):
filterModeList.append((baseMethod | EX4, "filtered_ex", lambda x, d:cfilter.filterEx(baseFilter, x, 4, dropThreshold), lambda x:cfilter.defilterEx(baseDefilter, x, 4)))
if(w % 6 == 0):
filterModeList.append((baseMethod | EX6, "filtered_ex", lambda x, d:cfilter.filterEx(baseFilter, x, 6, dropThreshold), lambda x:cfilter.defilterEx(baseDefilter, x, 6)))
if(w % 8 == 0):
filterModeList.append((baseMethod | EX8, "filtered_ex", lambda x, d:cfilter.filterEx(baseFilter, x, 8, dropThreshold), lambda x:cfilter.defilterEx(baseDefilter, x, 8)))
elif(nChannel == 2):
if(mode < 2):
if(w % 2 == 0):
filterModeList.append((baseMethod | EX2, "filtered_ex", lambda x, d:cfilter.filterEx(baseFilter, x, 2, dropThreshold), lambda x:cfilter.defilterEx(baseDefilter, x, 2)))
if(w % 4 == 0):
filterModeList.append((baseMethod | EX4, "filtered_ex", lambda x, d:cfilter.filterEx(baseFilter, x, 4, dropThreshold), lambda x:cfilter.defilterEx(baseDefilter, x, 4)))
elif(nChannel == 4 or nChannel == 3):
if(w % 2 == 0):
filterModeList.append((baseMethod | EX2, "filtered_ex", lambda x, d:cfilter.filterEx(baseFilter, x, 2, dropThreshold), lambda x:cfilter.defilterEx(baseDefilter, x, 2)))
filterModeList = [
# intraMethod, hint, filterFunc, defilterFunc
(FILTER_NONE, "lossless", lambda x, d:x.copy(), lambda x:x.copy()),
(FILTER_SUBTOP, "filtered", cfilter.filterSubTop, cfilter.defilterSubTop),
(FILTER_SUBLEFT, "filtered", cfilter.filterSubLeft, cfilter.defilterSubLeft),
]
if(fastDecodeMode < 1):
filterModeList.append((FILTER_SUBAVG, "filtered", cfilter.filterSubAvg, cfilter.defilterSubAvg))
_addEx(filterModeList, FILTER_SUBLEFT, cfilter.filterSubLeft, cfilter.defilterSubLeft, 0)
_addEx(filterModeList, FILTER_SUBAVG, cfilter.filterSubAvg, cfilter.defilterSubAvg, fastDecodeMode)
resultList = []
for intraMethod, hint, filterFunc, defilterFunc in filterModeList:
filtered = filterFunc(img, dropThreshold)
data = filtered.tobytes()
task = clz4.LZ4CompressionTask(data, clz4.COMPRESS_MODE_HC, _LZ4_COMPRESSION_LEVEL)
resultList.append((filtered, data, task, intraMethod, hint, filterFunc, defilterFunc))
del filtered, data, task
filtered, data, task, intraMethod, hint, filterFunc, defilterFunc = sorted(tuple(x for x in resultList), key = lambda x:len(x[2].get()))[0]
bestSize = len(task.get())
if(minRetSize == -1 or bestSize < minRetSize):
return {
"filtered": filtered,
"decompressed": defilterFunc(filtered),
"intraMethod": intraMethod,
"hint": hint,
"compressedSize": bestSize,
}
else:
return None
def applyDeltaCompression(channel, refChannel, dropThreshold, minRetSize):
if(dropThreshold > 0):
deltaedChannel = channel.astype(int) - refChannel.astype(int)
needDrop = np.logical_and(~np.logical_and(channel < dropThreshold, refChannel > dropThreshold), np.abs(deltaedChannel) <= dropThreshold)
deltaedChannel[needDrop] = 0
del needDrop
deltaedChannel = deltaedChannel.astype(channel.dtype)
else:
deltaedChannel = channel - refChannel
intraResult = applyBestIntraCompression(deltaedChannel, 0, minRetSize)
if(intraResult is not None):
intraResult["decompressed"] += refChannel
return intraResult
else:
return None
def applyBestFilter(currImgList, prevFullImgList, prevImgList, dropThreshold):
assert len(currImgList) == 2
assert prevFullImgList is None or len(prevFullImgList) == 2
assert prevImgList is None or len(prevImgList) == 2
assert dropThreshold >= 0
bestResult = []
bestSize = -1
bestMethod = REFERENCE_NONE
# full
for img in currImgList:
bestResult.append(applyBestIntraCompression(img, dropThreshold, -1))
bestSize += bestResult[-1]["compressedSize"]
report.do("Full: intra %s, size %d" % (str([intraFilterMethodStr[x["intraMethod"]] for x in bestResult]), bestSize))
# prevFull
if(prevFullImgList is not None):
resultList = []
size = 0
for i, img in enumerate(currImgList):
resultList.append(applyDeltaCompression(img, prevFullImgList[i], dropThreshold, -1))
size += resultList[-1]["compressedSize"]
if(size < bestSize):
bestResult = resultList
bestSize = size
bestMethod = REFERENCE_PREVFULL
report.do("PrevFull: intra %s, size %d" % (str([intraFilterMethodStr[x["intraMethod"]] for x in resultList]), size))
del resultList, size
# prev
if(prevImgList is not None and prevImgList is not prevFullImgList):
resultList = []
size = 0
for i, img in enumerate(currImgList):
resultList.append(applyDeltaCompression(img, prevImgList[i], dropThreshold, -1))
size += resultList[-1]["compressedSize"]
if(size < bestSize):
bestResult = resultList
bestSize = size
bestMethod = REFERENCE_PREV
report.do("Prev: intra %s, size %d" % (str([intraFilterMethodStr[x["intraMethod"]] for x in resultList]), size))
del resultList, size
report.do("Best delta method is %s" % (referenceMethodStr[bestMethod]))
return {
"bestResult": bestResult,
"bestSize": bestSize,
"deltaMethod": bestMethod,
}
| python |
import math
import numpy as np
from datetime import datetime
startTime = datetime.now()
natural = range(1, 500000)
# Get list of prime numbers
def prime_list(max_prime):
primes = range(2, max_prime)
length = len(primes)
for idx in range(len(primes)):
p = primes[idx]
if p == 0:
continue
# No multiples of any prime is a prime
for i in range(2, (length + 1) / p + 1):
primes[p*i - 2] = 0
primes = [y for y in primes if y != 0]
return primes
# Construct list of triangles (= cummulative sum)
triangles = np.zeros(len(natural)).astype(np.int)
triangles[0] = 1
for i in range(1,len(natural)):
triangles[i] = natural[i] + triangles[i - 1]
# Find list of prime numbers
primes = prime_list(int(np.sqrt(triangles[-1]))) # Only need this many primes
done = False
for triangle_idx in range(len(triangles)):
if done:
break
tri = float(triangles[triangle_idx])
# Remove primes which does not constitute the considered number
lego = [prime for prime in primes if tri % prime == 0]
new_divisors = list(lego)
stored = []
new_found = True
while new_found:
# Fill with all combinations of primes and their products
tmp = np.zeros(len(lego)*len(new_divisors)).astype(np.int)
for i in range(len(lego)):
for j in range(len(new_divisors)):
# Make all combinations
tmp[i*len(new_divisors) + j] = lego[i]*new_divisors[j]
tmp2 = [new for new in tmp if tri % new == 0]
if set(new_divisors) == set(tmp2) or len(tmp2) == 0:
new_found = False
else:
stored += new_divisors
new_divisors = list(set(tmp2))
ans = len(stored) + 1 # Itself
if ans >= 500: # Don't try more triangle values
done = True
print 'triangle value', int(tri), 'with index', triangle_idx, 'gives', ans, 'possible divisors'
print 'primes:', lego
#print 'Possible divisors:', sorted(stored)
print datetime.now() - startTime
"""
prime_dict = {}
for p in range(len(lego)):
prime_dict[lego[p]] = 0
# Lego are the unique primes which builds the number.
# Find out how many primes the number is made from
nr_of_factors = 0
tmp_tri = tri
for i in range(len(lego)):
while tmp_tri % lego[i] == 0:
tmp_tri /= lego[i]
prime_dict[lego[i]] += 1
nr_of_factors += 1
print 'tri:', tri
print 'prime_dict', prime_dict
"""
"""
# When chosing 2 primes to make a factor for the number, and
# the number is made from, let's say 3 of the same prime, then
# limit those primes to 2 so the 'a chose b' doesn't produce
# identical copies. Chosing 2 out of [5, 5, 5] should only give
# [5, 5], i.e. there is only one way to do it.
chose_from = np.sum([min(prime_dict[lego[x]], i) for x in range(len(prime_dict))])
print 'chose', i, 'from', chose_from,':',math.factorial( chose_from ) / (math.factorial( chose_from - i ) * math.factorial( i ))
ans += math.factorial( chose_from ) / (math.factorial( chose_from - i ) * math.factorial( i ))
"""
"""
# With tri as 360, prime_dict is {2: 3, 3:2, 5:1}
# When grabbing 2 legos, we can take 0,1 or 2 of 2,
# 0,1 or 2 from 3, and 0 or 1 from 5.
# When grabbing 3 legos, we can take 0,1,2 or 3 of 2,
# 0,1 or 2 from 3, and 0 or 1 from 5.
# Search for these combinations where the sum of the
# number of lego pieces are 3.
# When grabbing 4 legos, we have the same options, but
# now we search for combinations where the sum is 4
# This generalizes to that we can take values from
# a range from 0 to min(#of pieces, #of legos in bin)
# in every bin.
# (Start searching from the bin with fewest legos to
# terminate search early.)
ans = 1 # Instead of reaching nr_of_factors which will give 1
for i in range(1, nr_of_factors): # Pick 1,2,3...
select = []
for piece_idx in range(len(lego)):
piece = lego[piece_idx]
# From 2*2*2, we can take 0,1,2 or 3 2's
select.append(range(prime_dict[piece] + 1) )
print select
print len(select)
print select[0][:]
for piece_idx in range(len(lego)):
hej = select[piece_idx][i] + select[piece_idx]
tjubadoo
"""
| python |
# http://codeforces.com/contest/268/problem/C
n, m = map(int, input().split())
d = min(n, m)
print(d + 1)
for i in range(d + 1): print("{} {}".format(d-i, i)) | python |
import numpy as np
from swarm import metrics
import pytest
# Example y with 11 points from -1.5 to 1.5.
y = np.array(
[
-0.997495,
-0.9320391,
-0.78332686,
-0.5646425,
-0.29552022,
0.0,
0.29552022,
0.5646425,
0.78332686,
0.9320391,
0.997495,
]
)
losses = np.array([[0.82777214, 0.82301313], [0.35649812, 0.35499558], [0.82012618, 0.81833321]])
# Example predictions for first two epochs of a swarm of three bees.
ypreds = np.array(
[
[
[
-0.75819135,
-0.6721624,
-0.5914593,
-0.5263963,
-0.4742774,
-0.42794737,
-0.4386463,
-0.45942548,
-0.5183165,
-0.6156955,
-0.7488868,
],
[
-0.75616974,
-0.6701199,
-0.5893732,
-0.5242175,
-0.4719131,
-0.42543185,
-0.43560237,
-0.45590907,
-0.51438874,
-0.61130494,
-0.74402857,
],
],
[
[
-0.18297303,
-0.21213517,
-0.18341143,
-0.15066521,
-0.11950047,
-0.09036797,
-0.0256229,
0.0269562,
0.06986493,
0.1414077,
0.19563401,
],
[
-0.18315202,
-0.21226275,
-0.18336335,
-0.15038337,
-0.11897573,
-0.08946133,
-0.0242492,
0.02882081,
0.07219976,
0.14433557,
0.19909364,
],
],
[
[
0.36912787,
0.34506714,
0.32219756,
0.3202601,
0.30032292,
0.259299,
0.21430482,
0.14271711,
0.05134173,
-0.063667,
-0.17867568,
],
[
0.36715215,
0.34335977,
0.32078195,
0.3192455,
0.2996201,
0.2587561,
0.21395013,
0.14270164,
0.05165949,
-0.06302758,
-0.1777146,
],
],
]
)
# An example of scores obtained for a swarm that bounce around on the way down.
epoch_scores = [
0.51727545,
0.4584964,
0.3589881,
0.2524824,
0.20734829,
0.2482427,
0.30246153,
0.3388226,
0.34041768,
0.3064342,
0.26800793,
0.2686419,
0.24010916,
0.18522426,
0.22644123,
0.26727045,
0.28942722,
0.28332102,
0.25410518,
0.22259913,
0.25512502,
0.28029743,
0.29604492,
0.30136263,
0.29408443,
0.27543014,
0.24885914,
0.21919054,
0.22593765,
0.2305434,
0.22474495,
0.21082267,
0.19170743,
0.17090012,
0.1521816,
0.13839552,
0.1299243,
0.12569669,
0.12456866,
0.12922356,
0.14023647,
0.15060309,
0.15662336,
0.15730526,
0.15512368,
0.15510257,
0.16903949,
0.1815229,
0.20310307,
0.21428823,
0.21110815,
0.19391632,
0.16897929,
0.15510854,
0.1513776,
0.15778454,
0.15062831,
0.1423014,
0.1533089,
0.16309854,
]
def test_summarise_across_bees_ypreds():
"""This shows how to get a summary feature for each point x in a swarm. Eg, the average of the swarms ypreds"""
for summ_metric in [np.min, np.max, np.mean, np.median, np.std, np.ptp]:
out = summ_metric(ypreds, axis=0)
assert type(out) == np.ndarray
assert out.shape == (2, 11)
def test_summarise_across_bees_losses():
"""This shows how to get the average loss across a swarm"""
for summ_metric in [np.min, np.max, np.mean, np.median, np.std, np.ptp]:
out = summ_metric(losses, axis=0)
assert type(out) == np.ndarray
assert out.shape == (2,)
def test_rmse_2d():
b0_preds = ypreds[0]
out = metrics.mse_loss(b0_preds, y)
assert len(out.shape) == len(b0_preds.shape) - 1
assert (
np.max(np.abs(out - losses[0])) < 0.000001
) # I dont' know why this isn't exactly 0, have tried pytest.approx
b2_preds = ypreds[2]
out = metrics.mse_loss(b2_preds, y)
assert len(out.shape) == len(b2_preds.shape) - 1
assert np.max(np.abs(out - losses[2])) < 0.000001 # I dont' know why this isn't exactly 0
def test_rmse_3d():
out = metrics.mse_loss(ypreds, y)
assert len(out.shape) == len(ypreds.shape) - 1
assert np.max(np.abs(out - losses)) < 0.000001 # I don't know why this isn't exactly 0
def test_loss_mean_point_pred():
"""
This is an example of interest, since it is plausible (and of interest) if the averaged prediction of many bees
in a swarm, at a given point x, might tend to be better than any given one.
"""
mean_point_preds = np.mean(ypreds, axis=0)
loss_mean_preds = metrics.mse_loss(mean_point_preds, y)
assert loss_mean_preds.shape == (2,)
def test_if_nom_first_below():
epoch = metrics.iteration_threshold(epoch_scores, 0.25, "first", "below")
assert epoch_scores[epoch] <= 0.25
assert np.all(np.array(epoch_scores[:epoch]) > 0.25)
assert metrics.iteration_threshold(epoch_scores, 0.001, "first", "below") is None
def test_if_nom_always_below():
epoch = metrics.iteration_threshold(epoch_scores, 0.25, "always", "below")
assert np.max(epoch_scores[epoch:]) <= 0.25
assert epoch_scores[epoch - 1] > 0.25
assert metrics.iteration_threshold(epoch_scores, 0.001, "always", "below") is None
def test_if_nom_first_above():
reverse_scores = 1 - np.array(epoch_scores)
epoch = metrics.iteration_threshold(reverse_scores, 0.75, "first", "above")
assert reverse_scores[epoch] >= 0.75
assert np.all(reverse_scores[:epoch] < 0.75)
assert metrics.iteration_threshold(reverse_scores, 0.999, "first", "above") is None
def test_if_nom_always_above():
reverse_scores = 1 - np.array(epoch_scores)
epoch = metrics.iteration_threshold(reverse_scores, 0.75, "always", "above")
assert np.min(reverse_scores[epoch:]) >= 0.75
assert reverse_scores[epoch - 1] < 0.75
assert metrics.iteration_threshold(reverse_scores, 0.999, "always", "above") is None
def test_if_ratio_first_below():
epoch = metrics.iteration_threshold_ratio(epoch_scores, 0.5, "first", "below")
epoch_ratios = np.array(epoch_scores) / epoch_scores[0]
assert epoch_ratios[epoch] <= 0.5
assert np.all(epoch_ratios[:epoch] > 0.5)
assert metrics.iteration_threshold_ratio(epoch_scores, 0.001, "first", "below") is None
def test_if_ratio_always_below():
epoch = metrics.iteration_threshold_ratio(epoch_scores, 0.5, "always", "below")
epoch_ratios = np.array(epoch_scores) / epoch_scores[0]
assert np.max(epoch_ratios[epoch:]) <= 0.5
assert epoch_ratios[epoch - 1] > 0.5
assert metrics.iteration_threshold_ratio(epoch_scores, 0.001, "always", "below") is None
def test_if_ratio_first_above():
reverse_scores = 1 / np.array(epoch_scores)
epoch = metrics.iteration_threshold_ratio(reverse_scores, 1.5, "first", "above", 3)
reverse_ratios = reverse_scores / reverse_scores[3]
assert reverse_ratios[epoch] >= 1.5
assert np.all(reverse_ratios[:epoch] < 1.5)
assert metrics.iteration_threshold_ratio(reverse_scores, 200, "first", "above") is None
def test_if_ratio_always_above():
reverse_scores = 1 / np.array(epoch_scores)
epoch = metrics.iteration_threshold_ratio(reverse_scores, 1.1, "always", "above", 3)
reverse_ratios = reverse_scores / reverse_scores[3]
assert np.min(reverse_ratios[epoch:]) >= 1.1
assert reverse_ratios[epoch - 1] < 1.1
assert metrics.iteration_threshold_ratio(reverse_scores, 200, "always", "above") is None
def test_if_ratio_error():
"""Should fail due to the score crossing zero"""
with pytest.raises(ValueError):
metrics.iteration_threshold_ratio(np.array([-0.1, 0, 0.1, 1]), 0.1)
| python |
import numpy as np
from numpy.random import uniform
from veneer.pest_runtime import *
import pyapprox as pya
from scipy.stats import uniform
from functools import partial
from pyapprox.adaptive_sparse_grid import max_level_admissibility_function
from pyapprox.adaptive_polynomial_chaos import variance_pce_refinement_indicator
from pyapprox.univariate_quadrature import clenshaw_curtis_rule_growth
from pyapprox.variable_transformations import AffineRandomVariableTransformation
from pyapprox.variables import IndependentMultivariateRandomVariable
num_vars = 2
alph = 5
bet = 5.
err_tol = 1e-7
a = np.random.uniform(0, 100, (num_vars, 1))
variable = IndependentMultivariateRandomVariable(
[uniform(0, 1)], [np.arange(num_vars)])
var_trans = AffineRandomVariableTransformation(
IndependentMultivariateRandomVariable(
[uniform(0, 1)], [np.arange(num_vars)]))
def function(x):
vals = [np.cos(np.pi*a[ii]*x[ii, :]) for ii in range(x.shape[0])]
vals = np.array(vals).sum(axis=0)[:, np.newaxis]
breakpoint()
return vals
# def run_source(x):
# """
# A test function for adaptive PCE.
# """
# y = np.array(x[0:10].sum() + x[10]**2 + x[11] * 4 + 0.1)
# # breakpoint()
# print(y.shape)
# return y.reshape(y.shape[0], 1)
# num_vars = variable.num_vars()
# Create PyApprox model
pce = pya.AdaptiveInducedPCE(num_vars, cond_tol=1e2)
# Define criteria
max_level = 4
# err_tol = 0.0
max_num_samples = 1000
max_level_1d = [max_level]*(pce.num_vars)
admissibility_function = partial(
max_level_admissibility_function, max_level, max_level_1d,
max_num_samples, err_tol)
refinement_indicator = variance_pce_refinement_indicator
pce.set_function(function, var_trans)
pce.set_refinement_functions(
refinement_indicator,
admissibility_function,
clenshaw_curtis_rule_growth
)
# Generate emulator
pce.build()
# fit the PCE
validation_samples = pya.generate_independent_random_samples(variable, 1000)
validation_vals = function(validation_samples)
hat_vals = pce(validation_samples)
np.std(validation_vals - hat_vals)
| python |
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This maintains access tokens for API calls."""
import os
from abc import ABC, abstractmethod
import google.auth.transport.requests
from google.oauth2 import service_account
from flask import current_app
class TokenService(ABC): # pylint: disable=too-few-public-methods
"""Token Service abstract class with single get_token method."""
@classmethod
@abstractmethod
def get_token(cls):
"""Generate an OAuth access token with storage access."""
class GoogleStorageTokenService(TokenService): # pylint: disable=too-few-public-methods
"""Google Cloud Storage implementation.
Maintain access token for Google Cloud Storage API calls.
"""
# Google APIs and cloud storage
GCP_PROJECT_ID = os.getenv('GCP_CS_PROJECT_ID')
GCP_SA_CLIENT_EMAIL = os.getenv('GCP_CS_SA_CLIENT_EMAIL')
GCP_SA_CLIENT_ID = os.getenv('GCP_CS_SA_CLIENT_ID')
GCP_SA_PRIVATE_KEY = os.getenv('GCP_CS_SA_PRIVATE_KEY')
GCP_SA_PRIVATE_KEY_ID = os.getenv('GCP_CS_SA_PRIVATE_KEY_ID')
GCP_SA_CERT_URL = os.getenv('GCP_CS_SA_CERT_URL')
# https://developers.google.com/identity/protocols/oauth2/scopes
GCP_SA_SCOPES = [os.getenv('GCP_CS_SA_SCOPES', 'https://www.googleapis.com/auth/cloud-platform')]
service_account_info = {
'type': 'service_account',
'project_id': GCP_PROJECT_ID,
'private_key_id': GCP_SA_PRIVATE_KEY_ID,
'private_key': str(GCP_SA_PRIVATE_KEY).replace('\\n', '\n'),
'client_email': GCP_SA_CLIENT_EMAIL,
'client_id': GCP_SA_CLIENT_ID,
'auth_uri': 'https://accounts.google.com/o/oauth2/auth',
'token_uri': 'https://oauth2.googleapis.com/token',
'auth_provider_x509_cert_url': 'https://www.googleapis.com/oauth2/v1/certs',
'client_x509_cert_url': GCP_SA_CERT_URL
}
credentials = None
@classmethod
def get_token(cls):
"""Generate an OAuth access token with cloud storage access."""
if cls.credentials is None:
cls.credentials = service_account.Credentials.from_service_account_info(cls.service_account_info,
scopes=cls.GCP_SA_SCOPES)
request = google.auth.transport.requests.Request()
cls.credentials.refresh(request)
current_app.logger.info('Call successful: obtained token.')
return cls.credentials.token
@classmethod
def get_credentials(cls):
"""Generate GCP auth credentials to pass to a GCP client."""
if cls.credentials is None:
cls.credentials = service_account.Credentials.from_service_account_info(cls.service_account_info,
scopes=cls.GCP_SA_SCOPES)
current_app.logger.info('Call successful: obtained credentials.')
return cls.credentials
| python |
#!/usr/bin/env python3
#fileencoding: utf-8
#-----------------------------------------------#
# python standard library
#-----------------------------------------------#
import calendar
import csv
from enum import Enum
from datetime import datetime as dt
#-----------------------------------------------#
# pip
#-----------------------------------------------#
from oauth2client import tools
#-----------------------------------------------#
# my lib
#-----------------------------------------------#
import gspread
from zaimapi import ZaimAPI, ZaimLocalDB
class Payer(Enum):
UNKNOWN = 0
alpha = 1
beta = 2
class PaymentFmt:
Header = []
Header.append("日付")
Header.append("カテゴリ")
Header.append("ジャンル")
Header.append("商品名")
Header.append("メモ")
Header.append("場所")
Header.append("支出額")
Header.append("alpha支払額")
Header.append("beta支払額")
Header.append("alpha負担額")
Header.append("beta負担額")
Header.append("alpha個人用")
Header.append("beta個人用")
def __init__(self):
pass
class Payment:
def __init__(self, date, category, genre, name, comment, place, price):
self.date = date
self.category = category
self.genre = genre
self.name = name
self.comment = comment
self.place = place
self.price = price
self.alpha_paid = 0
self.beta_paid = 0
self.alpha_owe = 0
self.beta_owe = 0
self.alpha_self_paid = 0
self.beta_self_paid = 0
self.id_paid = 0
self._set_paid()
self._set_owe()
def __repr__(self):
return " ".join([str(i) for i in self.to_list()])
def _pay_for_myself(self):
return "個人_" in self.category
def is_for_oneself(self):
return self._pay_for_myself()
def _who_paid(self):
if "_alpha" in self.category:
return Payer.alpha
elif "_beta" in self.category:
return Payer.beta
else:
return Payer.UNKNOWN
def _paid_by_id(self):
if "id" == self.comment.strip().split("\n")[0]:
return True
else:
return False
def get_normalized_category(self):
return self.category.replace("_alpha", "").replace("_beta", "").replace("個人_", "")
def _set_paid(self):
if self._who_paid() == Payer.alpha:
if self._pay_for_myself():
self.alpha_self_paid += self.price
else:
self.alpha_paid += self.price
elif self._who_paid() == Payer.beta:
if self._pay_for_myself():
self.beta_self_paid += self.price
else:
self.beta_paid += self.price
else:
self.beta_paid = self.price // 2
self.alpha_paid = self.price - self.beta_paid
def _set_owe(self):
if self._pay_for_myself():
return
if "dp" == self.comment.strip().split("\n")[0]:
return
category = self.get_normalized_category()
genre = self.genre
self.beta_owe = self.price // 2
self.alpha_owe = self.price - self.beta_owe
def get_date(self):
return self.date
def get_date_str(self):
return "{}-{:02d}".format(self.date.year, self.date.month)
def get_category(self):
return self.category
def get_genre(self):
return self.genre
def get_name(self):
return self.name
def get_place(self):
return self.place
def get_price(self):
return self.price
def get_alpha_paid(self):
return self.alpha_paid
def get_beta_paid(self):
return self.beta_paid
def get_alpha_owe(self):
return self.alpha_owe
def get_beta_owe(self):
return self.beta_owe
def get_alpha_self_paid(self):
return self.alpha_self_paid
def get_beta_self_paid(self):
return self.beta_self_paid
def to_list(self):
ret = []
ret.append("{}-{}-{}".format(self.date.year, self.date.month, self.date.day))
ret.append(self.category)
ret.append(self.genre)
ret.append(self.name)
ret.append(self.comment)
ret.append(self.place)
ret.append(self.price)
ret.append(self.alpha_paid)
ret.append(self.beta_paid)
ret.append(self.alpha_owe)
ret.append(self.beta_owe)
ret.append(self.alpha_self_paid)
ret.append(self.beta_self_paid)
return ret
class PaymentSummary:
def __init__(self):
self.payments = []
self.category_total = {}
self.alpha_category_total = {}
self.beta_category_total = {}
self.alpha_paid = 0
self.beta_paid = 0
self.alpha_owe = 0
self.beta_owe = 0
self.alpha_self_paid = 0
self.beta_self_paid = 0
def append(self, pay):
self.payments.append(pay)
ncat = pay.get_normalized_category()
if not pay.is_for_oneself():
self.category_total[ncat] = self.category_total.get(ncat, 0) + pay.get_price()
self.alpha_paid += pay.get_alpha_paid()
self.beta_paid += pay.get_beta_paid()
self.alpha_owe += pay.get_alpha_owe()
self.beta_owe += pay.get_beta_owe()
else:
self.alpha_category_total[ncat] = self.alpha_category_total.get(ncat, 0) + pay.get_alpha_self_paid()
self.beta_category_total[ncat] = self.beta_category_total.get(ncat, 0) + pay.get_beta_self_paid()
self.alpha_self_paid += pay.get_alpha_self_paid()
self.beta_self_paid += pay.get_beta_self_paid()
def get_category_total(self):
return self.category_total
def get_alpha_category_total(self):
return self.alpha_category_total
def get_beta_category_total(self):
return self.beta_category_total
def get_alpha_paid_total(self):
return self.alpha_paid
def get_beta_paid_total(self):
return self.beta_paid
def get_alpha_owe_total(self):
return self.alpha_owe
def get_beta_owe_total(self):
return self.beta_owe
def get_alpha_self_paid_total(self):
return self.alpha_self_paid
def get_beta_self_paid_total(self):
return self.beta_self_paid
def read_csv(filename):
payments = []
with open(filename, "r") as f:
reader = csv.reader(f)
header = next(f)
for r in reader:
date = dt.strptime(r[0], "%Y-%m-%d")
category = r[2]
genre = r[3]
name = r[6]
place = r[8]
comment = r[9]
price = int(r[11])
payments.append(Payment(date, category, genre, name, comment, place, price))
return payments
def get_data_by_api(apikey_filename, start_date, end_date):
z = ZaimAPI(apikey_filename)
print("(1/1) Get data by Zaim REST API")
entries = z.get_entries(start_date, end_date)
return entries
def update_local_db(entries, this_month):
zldb = ZaimLocalDB("./zaim.db")
print("(1/2) delete entries in {}".format(this_month))
zldb.delete_entries_by_date(this_month)
print("(2/2) update entries in {}".format(this_month))
zldb.update_entries(entries)
def gen_payments(entries):
payments = []
for r in entries[::-1]:
date = dt.strptime(r["date"], "%Y-%m-%d")
category = r["category"]
genre = r["genre"]
name = r["name"]
place = r["place"]
price = int(r["amount"])
comment = r["comment"]
payments.append(Payment(date, category, genre, name, comment, place, price))
return payments
def gen_reqvalues(pay_lists):
summary = PaymentSummary()
for p in pay_lists:
summary.append(p)
alpha_paid = summary.get_alpha_paid_total()
beta_paid = summary.get_beta_paid_total()
alpha_owe = summary.get_alpha_owe_total()
beta_owe = summary.get_beta_owe_total()
alpha_self_paid = summary.get_alpha_self_paid_total()
beta_self_paid = summary.get_beta_self_paid_total()
values = []
values.append(["■支払額"])
values.append(["alpha支払い額", alpha_paid, "=sum(h:h)"])
values.append(["beta支払い額", beta_paid, "=sum(i:i)"])
values.append(["合計", alpha_paid + beta_paid, "=sum(c2:c3)"])
values.append([""])
values.append(["■負担額"])
values.append(["alpha負担額", alpha_owe, "=sum(j:j)"])
values.append(["beta負担額", beta_owe, "=sum(k:k)"])
print("total_paid:", alpha_paid+beta_paid)
print("alpha_paid:", alpha_paid)
print("beta_paid:", beta_paid)
print("alpha_owe:", alpha_owe)
print("beta_owe:", beta_owe)
diff = alpha_paid - alpha_owe
if diff >= 0:
print("beta -> alpha:", diff)
values.append(["清算(betaからalpha)", diff, "=c2-c7"])
else:
print("alpha -> beta:", diff)
values.append(["清算(alphaからbeta)", diff, "=c7-c2"])
values.append([""])
values.append(["■カテゴリ別合計"])
for k, v in summary.get_category_total().items():
values.append([k, v])
values.append([""])
values.append(["■ 個人会計"])
values.append(["alpha個人合計", alpha_self_paid])
for k, v in summary.get_alpha_category_total().items():
values.append([k, v])
values.append([""])
values.append(["beta個人会計", beta_self_paid])
for k, v in summary.get_beta_category_total().items():
values.append([k, v])
values.append([""])
values.append(["■全エントリ"])
values.append(PaymentFmt.Header)
for p in pay_lists:
values.append(p.to_list())
return values
#-----------------------------------------------#
def main():
n = dt.now()
start_default = "{}-{:02d}-01".format(n.year, n.month)
end_default = "{}-{:02d}-{:02d}".format(n.year, n.month, calendar.monthrange(n.year, n.month)[1])
try:
import argparse
parent_parser = argparse.ArgumentParser(parents=[tools.argparser])
parent_parser.add_argument("--credential", type=str, default="sheets.googleapis.my-kakeibo.json")
parent_parser.add_argument("--start", type=str, default=start_default)
parent_parser.add_argument("--end", type=str, default=end_default)
parent_parser.add_argument("--zaimapikey", type=str, default="zaim_secret.json")
parent_parser.add_argument("--csv", type=str, default="")
parent_parser.add_argument("--spreadsheet", action="store_true")
flags = parent_parser.parse_args()
except ImportError:
flags = None
print("span: ", flags.start, flags.end)
if flags.spreadsheet == True:
num_of_steps = 4
else:
num_of_steps = 3
if flags.csv != "":
print("************* Start parsing CSV file *************")
pay_lists = read_csv(flags.csv)
print("************* End parsing CSV file *************")
else:
print("[1/{}] Get data from Zaim".format(num_of_steps))
entries = get_data_by_api(flags.zaimapikey, flags.start, flags.end)
print("[2/{}] Update local DB".format(num_of_steps))
this_month = flags.start[:7]
update_local_db(entries, this_month)
print("[3/{}] Calc payments".format(num_of_steps))
pay_lists = gen_payments(entries)
values = gen_reqvalues(pay_lists)
values.append([""])
print("")
if flags.spreadsheet:
print("[4/{}] Send data to Google Spreadsheet".format(num_of_steps))
print("sheet_name:", pay_lists[0].get_date_str())
#print(values)
g = gspread.Gspread(flags)
print("(1/2) create a sheet whose name is {}".format(pay_lists[0].get_date_str()))
result = g.create_new_sheet(pay_lists[0].get_date_str())
print(result) # fixme: check result
sheet_name = pay_lists[0].get_date_str()
start_column = "A"
end_column = chr(ord("A") + len(PaymentFmt.Header))
range_name = "{}!{}:{}".format(sheet_name, start_column, end_column)
print("range_name:", range_name)
value_input_option = "USER_ENTERED"
print("(2/2) append data to the sheet")
result = g.append_data(range_name, value_input_option, values)
print(result) # fixme: check result
if __name__ == "__main__":
main()
| python |
#!/usr/bin/env python
# coding: utf-8
from collections import namedtuple
class IDBase(str):
_attrs = (
# ('server_id', 0, 12, ServerID),
# ('_non_attr', 12, 13, validator),
# ('mountpoint_index', 13, 16, MountPointIndex),
# ('port', 13, 16, _port),
)
_str_len = 0
_tostr_fmt = '' # '{attr_1}-{attr_2:0>3}'
def __new__(clz, *args, **kwargs):
if len(args) + len(kwargs) == 1:
# New from a single serialized string
s = (list(args) + kwargs.values())[0]
s = str(s)
return clz._new_by_str(s)
else:
# multi args: new by making an instance
return clz._new_by_attrs(*args, **kwargs)
@classmethod
def _new_by_attrs(clz, *args, **kwargs):
# Create a namedtuple to simplify arguments receiving
tuple_type = namedtuple('_' + clz.__name__,
' '.join([x[0]
for x in clz._attrs
if clz._is_key_attr(x)
]))
t = tuple_type(*args, **kwargs)
# warn: if the value is float and _tostr_fmt is with float format,
# raise ValueError. Not convert to string?
s = clz._tostr_fmt.format(**{k: str(v)
for k, v in t._asdict().items()})
return clz._new_by_str(s)
@classmethod
def _new_by_str(clz, s):
if len(s) != clz._str_len:
raise ValueError('Expected {clz} length'
' to be {l} but {sl}: {s}'.format(
clz=clz.__name__,
l=clz._str_len,
sl=len(s),
s=s))
x = super(IDBase, clz).__new__(clz, s)
id_attrs = []
for attr_definition in clz._attrs:
k, start_idx, end_idx, attr_type, opt = clz._normalize(attr_definition)
if opt['self']:
val = x
else:
val = attr_type(s[start_idx:end_idx])
if opt['embed']:
for a in val._id_base_attrs:
if not a.startswith('_'):
super(IDBase, x).__setattr__(a, getattr(val, a))
id_attrs.append(a)
if k.startswith('_'):
continue
super(IDBase, x).__setattr__(k, val)
id_attrs.append(k)
super(IDBase, x).__setattr__('_id_base_attrs', tuple(id_attrs))
return x
@classmethod
def _is_key_attr(clz, attr_definition):
name, s, e, attr_type, opt = clz._normalize(attr_definition)
if name.startswith('_'):
return False
return opt['key_attr']
@classmethod
def _normalize(clz, attr_definition):
name, s, e, attr_type, opt = (attr_definition + (None,))[:5]
if opt is None:
opt = {}
elif opt is False:
opt = {'key_attr': False}
elif opt == 'self':
opt = {'key_attr': False, 'self': True}
elif opt == 'embed':
opt = {'embed': True}
else:
pass
tmpl = {'key_attr': True,
'self': False,
'embed': False,
}
tmpl.update(opt)
opt = tmpl
if opt['self']:
opt['key_attr'] = False
return name, s, e, attr_type, opt
def __setattr__(self, n, v):
raise TypeError('{clz} does not allow to change attribute'.format(
clz=self.__class__.__name__))
def as_tuple(self):
lst = []
for attr_definition in self._attrs:
k = attr_definition[0]
if IDBase._is_key_attr(attr_definition):
lst.append(getattr(self, k))
return tuple(lst)
| python |
"""
Test execution of at and cron style scheduler policies when group has updates
"""
from test_repo.autoscale.fixtures import AutoscaleFixture
from time import sleep
class UpdateSchedulerScalingPolicy(AutoscaleFixture):
"""
Verify update scheduler policy
"""
@classmethod
def setUpClass(cls):
"""
Define updates to launch config
"""
super(UpdateSchedulerScalingPolicy, cls).setUpClass()
cls.upd_server_name = "upd_lc_config"
cls.upd_image_ref = cls.lc_image_ref_alt
cls.upd_flavor_ref = "3"
def test_system_min_max_entities_at_style(self):
"""
Create a scaling group with minentities between 0 and maxentities and
maxentities=change, with 2 at style scheduler policies with change= +2 and -2,
cooldown=0 and verify that the scale up scheduler policy scales upto the
max entities specified on the group
and scale down scheduler policy scales down upto the minentities.
"""
minentities = 1
maxentities = 2
group = self._create_group(
cooldown=0, minentities=minentities, maxentities=maxentities)
self.create_default_at_style_policy_wait_for_execution(
group_id=group.id, change=maxentities + 1)
self.verify_group_state(group.id, group.groupConfiguration.maxEntities)
self.create_default_at_style_policy_wait_for_execution(
group_id=group.id, change=maxentities,
scale_down=True)
self.verify_group_state(group.id, group.groupConfiguration.minEntities)
self.empty_scaling_group(group)
def test_system_min_max_entities_cron_style(self):
"""
Create a scaling group with minentities between 0 and maxentities and maxentities=change,
with 2 cron style scheduler policies with change= +2 and -2, cooldown=0 and verify that
the scale up scheduler policy scales upto the maxentities specified on the group
and scale down scheduler policy scales down upto the minentities.
Note: The group and policy cooldown are 0 and the scale up and scale down policies
will keep trying to scale up beyond maxentities and scale down below minentities
but will not be executed as min/maxenetities are met, until group is deleted.
"""
minentities = 1
maxentities = 2
group = self._create_group(
cooldown=0, minentities=minentities, maxentities=maxentities)
self.autoscale_behaviors.create_schedule_policy_given(
group_id=group.id,
sp_cooldown=0,
sp_change=maxentities + 1,
schedule_cron='* * * * *')
sleep(60 + self.scheduler_interval)
self.verify_group_state(group.id, group.groupConfiguration.maxEntities)
self.autoscale_behaviors.create_schedule_policy_given(
group_id=group.id,
sp_cooldown=0,
sp_change=-maxentities,
schedule_cron='* * * * *')
sleep(60 + self.scheduler_interval)
self.verify_group_state(group.id, group.groupConfiguration.minEntities)
self.empty_scaling_group(group)
def test_system_group_cooldown_atstyle(self):
"""
Create a scaling group with cooldown>0, create a scheduler at style policy
and wait for its execution, creating another at style policy scheduled
to execute before the cooldown period expires does not trigger.
Creating a 3rd at style policy after the cooldown, executes successfully.
"""
group = self._create_group(cooldown=60)
self.create_default_at_style_policy_wait_for_execution(group.id)
self.verify_group_state(group.id, self.sp_change)
self.create_default_at_style_policy_wait_for_execution(group.id)
self.verify_group_state(group.id, self.sp_change)
sleep(60 - self.scheduler_interval)
self.create_default_at_style_policy_wait_for_execution(group.id)
self.verify_group_state(group.id, self.sp_change * 2)
self.empty_scaling_group(group)
def test_system_upd_launch_config_at_style_scheduler(self):
"""
Create a scaling group with minentities>0, update launch config, schedule at style
policy to scale up and verify the new servers of the latest launch config,
then schedule an at style policy to scale down and verify the servers remaining
are of the latest launch config.
"""
group = self._create_group(minentities=self.sp_change)
active_list_b4_upd = self.autoscale_behaviors.wait_for_expected_number_of_active_servers(
group_id=group.id,
expected_servers=group.groupConfiguration.minEntities)
self._update_launch_config(group)
self.create_default_at_style_policy_wait_for_execution(group.id)
active_servers = self.sp_change + group.groupConfiguration.minEntities
active_list_after_scale_up = self.autoscale_behaviors.wait_for_expected_number_of_active_servers(
group_id=group.id,
expected_servers=active_servers)
upd_lc_server = set(
active_list_after_scale_up) - set(active_list_b4_upd)
self._verify_server_list_for_launch_config(upd_lc_server)
self.create_default_at_style_policy_wait_for_execution(
group.id, scale_down=True)
active_list_on_scale_down = self.autoscale_behaviors.wait_for_expected_number_of_active_servers(
group_id=group.id,
expected_servers=group.groupConfiguration.minEntities)
self._verify_server_list_for_launch_config(active_list_on_scale_down)
self.empty_scaling_group(group)
def test_system_upd_launch_config_cron_style_scheduler(self):
"""
Create a scaling group with minentities>0, update launch config, schedule cron style
policy to scale up and verify the new servers of the latest launch config,
then schedule another cron style policy to scale down and verify the servers remaining
are of the latest launch config.
"""
group = self._create_group(minentities=self.sp_change)
active_list_b4_upd = self.autoscale_behaviors.wait_for_expected_number_of_active_servers(
group_id=group.id,
expected_servers=group.groupConfiguration.minEntities)
self._update_launch_config(group)
self.autoscale_behaviors.create_schedule_policy_given(
group_id=group.id,
sp_cooldown=3600,
sp_change=self.sp_change,
schedule_cron='* * * * *')
sleep(60 + self.scheduler_interval)
active_servers = self.sp_change + group.groupConfiguration.minEntities
active_list_after_scale_up = self.autoscale_behaviors.wait_for_expected_number_of_active_servers(
group_id=group.id,
expected_servers=active_servers)
upd_lc_server = set(
active_list_after_scale_up) - set(active_list_b4_upd)
self._verify_server_list_for_launch_config(upd_lc_server)
self.autoscale_behaviors.create_schedule_policy_given(
group_id=group.id,
sp_cooldown=3600,
sp_change=-self.sp_change,
schedule_cron='* * * * *')
sleep(60 + self.scheduler_interval)
active_list_on_scale_down = self.autoscale_behaviors.wait_for_expected_number_of_active_servers(
group_id=group.id,
expected_servers=group.groupConfiguration.minEntities)
self._verify_server_list_for_launch_config(active_list_on_scale_down)
self.empty_scaling_group(group)
def _create_group(self, cooldown=None, minentities=None, maxentities=None):
create_group_response = self.autoscale_behaviors.create_scaling_group_given(
gc_cooldown=cooldown,
gc_min_entities=minentities,
gc_max_entities=maxentities,
lc_name='upd_grp_scheduled')
group = create_group_response.entity
self.resources.add(group.id,
self.autoscale_client.delete_scaling_group)
return group
def _update_launch_config(self, group):
"""
Update the scaling group's launch configuration and
assert the update was successful.
"""
update_launch_config_response = self.autoscale_client.update_launch_config(
group_id=group.id,
name=self.upd_server_name,
image_ref=self.upd_image_ref,
flavor_ref=self.upd_flavor_ref)
self.assertEquals(update_launch_config_response.status_code, 204,
msg='Updating launch config failed with {0} for group {1}'
.format(update_launch_config_response, group.id))
def _verify_server_list_for_launch_config(self, server_list):
for each in list(server_list):
get_server_resp = self.server_client.get_server(each)
server = get_server_resp.entity
self.assertTrue(self.upd_server_name in server.name)
self.assertEquals(server.image.id, self.lc_image_ref_alt)
self.assertEquals(server.flavor.id, self.upd_flavor_ref)
| python |
# coding: utf-8
"""
CardPay REST API
Welcome to the CardPay REST API. The CardPay API uses HTTP verbs and a [REST](https://en.wikipedia.org/wiki/Representational_state_transfer) resources endpoint structure (see more info about REST). Request and response payloads are formatted as JSON. Merchant uses API to create payments, refunds, payouts or recurrings, check or update transaction status and get information about created transactions. In API authentication process based on [OAuth 2.0](https://oauth.net/2/) standard. For recent changes see changelog section. # noqa: E501
OpenAPI spec version: 3.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from cardpay.api_client import ApiClient
class PayoutsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_payout(self, payout_request, **kwargs): # noqa: E501
"""Create payout # noqa: E501
:param PayoutRequest payout_request: payoutRequest (required)
:return: PayoutResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
(data) = self.create_payout_with_http_info(
payout_request, **kwargs
) # noqa: E501
return data
def create_payout_with_http_info(self, payout_request, **kwargs): # noqa: E501
"""Create payout # noqa: E501
:param PayoutRequest payout_request: payoutRequest (required)
:return: PayoutResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ["payout_request"] # noqa: E501
all_params.append("_return_http_data_only")
all_params.append("_preload_content")
all_params.append("_request_timeout")
params = locals()
for key, val in six.iteritems(params["kwargs"]):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_payout" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'payout_request' is set
if "payout_request" not in params or params["payout_request"] is None:
raise ValueError(
"Missing the required parameter `payout_request` when calling `create_payout`"
) # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if "payout_request" in params:
body_params = params["payout_request"]
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# HTTP header `Content-Type`
header_params[
"Content-Type"
] = self.api_client.select_header_content_type( # noqa: E501
["application/json"]
) # noqa: E501
return self.api_client.call_api(
"/api/payouts",
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="PayoutResponse", # noqa: E501
_return_http_data_only=params.get("_return_http_data_only"),
_preload_content=params.get("_preload_content", True),
_request_timeout=params.get("_request_timeout"),
collection_formats=collection_formats,
)
def get_payout(self, payout_id, **kwargs): # noqa: E501
"""Read payout information # noqa: E501
:param str payout_id: Payout ID (required)
:return: PayoutResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
(data) = self.get_payout_with_http_info(payout_id, **kwargs) # noqa: E501
return data
def get_payout_with_http_info(self, payout_id, **kwargs): # noqa: E501
"""Read payout information # noqa: E501
:param str payout_id: Payout ID (required)
:return: PayoutResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ["payout_id"] # noqa: E501
all_params.append("_return_http_data_only")
all_params.append("_preload_content")
all_params.append("_request_timeout")
params = locals()
for key, val in six.iteritems(params["kwargs"]):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_payout" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'payout_id' is set
if "payout_id" not in params or params["payout_id"] is None:
raise ValueError(
"Missing the required parameter `payout_id` when calling `get_payout`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "payout_id" in params:
path_params["payoutId"] = params["payout_id"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
return self.api_client.call_api(
"/api/payouts/{payoutId}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="PayoutResponse", # noqa: E501
_return_http_data_only=params.get("_return_http_data_only"),
_preload_content=params.get("_preload_content", True),
_request_timeout=params.get("_request_timeout"),
collection_formats=collection_formats,
)
def get_payouts(self, request_id, **kwargs): # noqa: E501
"""Get payouts information # noqa: E501
:param str request_id: Request ID (required)
:param str currency: [ISO 4217](https://en.wikipedia.org/wiki/ISO_4217) currency code of transactions currency
:param datetime end_time: Date and time up to milliseconds (in [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) format) when requested period ends (not inclusive), UTC time, must be less than 7 days after 'start_time', default is current time (format: yyyy-MM-dd'T'HH:mm:ss'Z')
:param int max_count: Limit number of returned transactions (must be less than 10000, default is 1000, minimal value is 1)
:param str merchant_order_id: Merchant order number from the merchant system
:param str payment_method: Used payment method type name from payment methods list
:param str sort_order: Sort based on order of results. `asc` for ascending order or `desc` for descending order (default value)
:param datetime start_time: Date and time up to milliseconds (in [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) format) when requested period starts (inclusive), UTC time, default is 24 hours before 'end_time' (format: yyyy-MM-dd'T'HH:mm:ss'Z')
:return: PayoutsList
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
(data) = self.get_payouts_with_http_info(request_id, **kwargs) # noqa: E501
return data
def get_payouts_with_http_info(self, request_id, **kwargs): # noqa: E501
"""Get payouts information # noqa: E501
:param str request_id: Request ID (required)
:param str currency: [ISO 4217](https://en.wikipedia.org/wiki/ISO_4217) currency code of transactions currency
:param datetime end_time: Date and time up to milliseconds (in [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) format) when requested period ends (not inclusive), UTC time, must be less than 7 days after 'start_time', default is current time (format: yyyy-MM-dd'T'HH:mm:ss'Z')
:param int max_count: Limit number of returned transactions (must be less than 10000, default is 1000, minimal value is 1)
:param str merchant_order_id: Merchant order number from the merchant system
:param str payment_method: Used payment method type name from payment methods list
:param str sort_order: Sort based on order of results. `asc` for ascending order or `desc` for descending order (default value)
:param datetime start_time: Date and time up to milliseconds (in [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) format) when requested period starts (inclusive), UTC time, default is 24 hours before 'end_time' (format: yyyy-MM-dd'T'HH:mm:ss'Z')
:return: PayoutsList
If the method is called asynchronously,
returns the request thread.
"""
all_params = [
"request_id",
"currency",
"end_time",
"max_count",
"merchant_order_id",
"payment_method",
"sort_order",
"start_time",
] # noqa: E501
all_params.append("_return_http_data_only")
all_params.append("_preload_content")
all_params.append("_request_timeout")
params = locals()
for key, val in six.iteritems(params["kwargs"]):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_payouts" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'request_id' is set
if "request_id" not in params or params["request_id"] is None:
raise ValueError(
"Missing the required parameter `request_id` when calling `get_payouts`"
) # noqa: E501
if "request_id" in params and len(params["request_id"]) > 50:
raise ValueError(
"Invalid value for parameter `request_id` when calling `get_payouts`, length must be less than or equal to `50`"
) # noqa: E501
if "request_id" in params and len(params["request_id"]) < 1:
raise ValueError(
"Invalid value for parameter `request_id` when calling `get_payouts`, length must be greater than or equal to `1`"
) # noqa: E501
if "max_count" in params and params["max_count"] > 10000: # noqa: E501
raise ValueError(
"Invalid value for parameter `max_count` when calling `get_payouts`, must be a value less than or equal to `10000`"
) # noqa: E501
if "max_count" in params and params["max_count"] < 1: # noqa: E501
raise ValueError(
"Invalid value for parameter `max_count` when calling `get_payouts`, must be a value greater than or equal to `1`"
) # noqa: E501
if "merchant_order_id" in params and len(params["merchant_order_id"]) > 50:
raise ValueError(
"Invalid value for parameter `merchant_order_id` when calling `get_payouts`, length must be less than or equal to `50`"
) # noqa: E501
if "merchant_order_id" in params and len(params["merchant_order_id"]) < 0:
raise ValueError(
"Invalid value for parameter `merchant_order_id` when calling `get_payouts`, length must be greater than or equal to `0`"
) # noqa: E501
if "payment_method" in params and len(params["payment_method"]) > 50:
raise ValueError(
"Invalid value for parameter `payment_method` when calling `get_payouts`, length must be less than or equal to `50`"
) # noqa: E501
if "payment_method" in params and len(params["payment_method"]) < 0:
raise ValueError(
"Invalid value for parameter `payment_method` when calling `get_payouts`, length must be greater than or equal to `0`"
) # noqa: E501
if "sort_order" in params and not re.search(
r"asc|desc", params["sort_order"]
): # noqa: E501
raise ValueError(
"Invalid value for parameter `sort_order` when calling `get_payouts`, must conform to the pattern `/asc|desc/`"
) # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if "currency" in params:
query_params.append(("currency", params["currency"])) # noqa: E501
if "end_time" in params:
query_params.append(("end_time", params["end_time"])) # noqa: E501
if "max_count" in params:
query_params.append(("max_count", params["max_count"])) # noqa: E501
if "merchant_order_id" in params:
query_params.append(
("merchant_order_id", params["merchant_order_id"])
) # noqa: E501
if "payment_method" in params:
query_params.append(
("payment_method", params["payment_method"])
) # noqa: E501
if "request_id" in params:
query_params.append(("request_id", params["request_id"])) # noqa: E501
if "sort_order" in params:
query_params.append(("sort_order", params["sort_order"])) # noqa: E501
if "start_time" in params:
query_params.append(("start_time", params["start_time"])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
return self.api_client.call_api(
"/api/payouts",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="PayoutsList", # noqa: E501
_return_http_data_only=params.get("_return_http_data_only"),
_preload_content=params.get("_preload_content", True),
_request_timeout=params.get("_request_timeout"),
collection_formats=collection_formats,
)
def update_payout(self, payout_id, payout_update_request, **kwargs): # noqa: E501
"""Update payout # noqa: E501
:param str payout_id: Payout ID (required)
:param PayoutUpdateRequest payout_update_request: payoutUpdateRequest (required)
:return: PayoutUpdateResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
(data) = self.update_payout_with_http_info(
payout_id, payout_update_request, **kwargs
) # noqa: E501
return data
def update_payout_with_http_info(
self, payout_id, payout_update_request, **kwargs
): # noqa: E501
"""Update payout # noqa: E501
:param str payout_id: Payout ID (required)
:param PayoutUpdateRequest payout_update_request: payoutUpdateRequest (required)
:return: PayoutUpdateResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ["payout_id", "payout_update_request"] # noqa: E501
all_params.append("_return_http_data_only")
all_params.append("_preload_content")
all_params.append("_request_timeout")
params = locals()
for key, val in six.iteritems(params["kwargs"]):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_payout" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'payout_id' is set
if "payout_id" not in params or params["payout_id"] is None:
raise ValueError(
"Missing the required parameter `payout_id` when calling `update_payout`"
) # noqa: E501
# verify the required parameter 'payout_update_request' is set
if (
"payout_update_request" not in params
or params["payout_update_request"] is None
):
raise ValueError(
"Missing the required parameter `payout_update_request` when calling `update_payout`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "payout_id" in params:
path_params["payoutId"] = params["payout_id"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if "payout_update_request" in params:
body_params = params["payout_update_request"]
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# HTTP header `Content-Type`
header_params[
"Content-Type"
] = self.api_client.select_header_content_type( # noqa: E501
["application/json"]
) # noqa: E501
return self.api_client.call_api(
"/api/payouts/{payoutId}",
"PATCH",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="PayoutUpdateResponse", # noqa: E501
_return_http_data_only=params.get("_return_http_data_only"),
_preload_content=params.get("_preload_content", True),
_request_timeout=params.get("_request_timeout"),
collection_formats=collection_formats,
)
| python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.