content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
# coding: utf-8
import numpy as np
import matplotlib.pyplot as plt
import random
def busqueda_local(solucion_inicial, evaluacion, obtener_vecinos,
T_max, T_min, reduccion):
"""
Simulated Annealing.
"""
from random import random
solucion_mejor = solucion_actual = solucion_inicial
evaluacion_mejor = evaluacion_actual = evaluacion(solucion_actual)
soluciones_evaluadas = 1
T = T_max
while T >= T_min:
vecinos = obtener_vecinos(solucion_actual)
for vecino in vecinos:
evaluacion_vecino = evaluacion(vecino)
soluciones_evaluadas += 1
if (evaluacion_vecino > evaluacion_actual or
random() < np.exp((evaluacion_vecino - evaluacion_actual) / T)):
solucion_actual = vecino
evaluacion_actual = evaluacion_vecino
if evaluacion_mejor < evaluacion_actual:
solucion_mejor = solucion_actual
evaluacion_mejor = evaluacion_actual
T = reduccion * T
return solucion_mejor, soluciones_evaluadas
|
python
|
#!/usr/bin/python3
#-*-coding:utf-8-*-
from threading import Thread
import pymysql
from Sqlcore import Sqlcore
'''
入口功能
'''
'''vmode打印显示模式full/standard/simple'''
#生产环境的photo相册
conn = {"hostname":"xxxxxx.mysql.rds.aliyuncs.com","username":"forpython","password":"Ffu398fh_GFUPY","database":"dbuser","hostport":3708}
cfgs = {"table":"bota_photo", "column":"corver,gallery", "offset":0, "vmode":"full", "limit":5, "sleep": 1}
#本机的photo相册
conn = {"hostname":"localhost","username":"website","password":"website123","database":"dbuser","hostport":3306}
cfgs = {"table":"bota_photo", "column":"corver,gallery", "offset":0, "vmode":"full", "limit":5, "sleep": 0}
#去哪儿酒店的配置
#conn = {"hostname":"192.168.1.11","username":"website","password":"website123","database":"dbuser","hostport":3306}
#cfgs = {"table":"bota_hotel", "column":"cover,photos", "offset":0, "vmode":"simple", "limit":50, "sleep": 5}
'''开始执行核心工作'''
def runcore(uniq, conn, cfgs):
sc = Sqlcore(uniq, conn, cfgs)
sc.run()
'''结果集总数'''
def totalcount(conn, table):
db = pymysql.connect(conn['hostname'], conn['username'], conn['password'], conn['database'], conn['hostport'])
cursor = db.cursor()
cursor.execute("SELECT count(*) FROM `{table}`".format(table=table))
#cursor.fetchone()
data = cursor.fetchone()
#print(data[0])
return int(data[0])#type(data[0])
if __name__ == "__main__":
print("welcome")
total = totalcount(conn, cfgs["table"])
print("发现数据库表"+cfgs["table"]+"存在总数:"+str(total)+"条记录")
while True:
tnum = input("开启线程数,最少1个,最多10个,exit退出:")
if tnum == "exit":
exit()
if tnum.isdigit():
tnum = int(tnum)
if tnum <1:
raise ValueError("输入的必须是正整数啊")
elif tnum > 50:
print("线程数不能够超过50啊~")
elif total < tnum:
print("结果集还没有线程多~")
else:
break
else:
print("输入的必须是1以上的数字啊~")
if tnum == 1:
#单线程
runcore("ONETHREAD", conn, cfgs)
else:
#多线程
one = total//tnum #每个线程的开始游标
tlist = []
for i in range(tnum):
cfg = cfgs.copy()
cfg["offset"] = one * i
#print(cfgs)
t = Thread(target=runcore, args=("#t"+str(i), conn, cfg))
tlist.append(t)
t.start()
#print("\n" + t.getName())#获取线程名
for i in tlist:
i.join()#阻塞主线程,当前执行完再执行下一步
print("allDownloaded")
|
python
|
#!/usr/bin/env python
import sys
import os
import shutil
def cleanup_dump(dumpstr):
cardfrags = dumpstr.split('\n\n')
if len(cardfrags) < 4:
return ''
else:
return '\n\n'.join(cardfrags[2:-1]) + '\n\n'
def identify_checkpoints(basedir, ident):
cp_infos = []
for path in os.listdir(basedir):
fullpath = os.path.join(basedir, path)
if not os.path.isfile(fullpath):
continue
if not (path[:13] == 'lm_lstm_epoch' and path[-4:] == '.txt'):
continue
if not ident in path:
continue
# attempt super hacky parsing
inner = path[13:-4]
halves = inner.split('_')
if not len(halves) == 2:
continue
parts = halves[1].split('.')
if not len(parts) == 6:
continue
# lm_lstm_epoch[25.00_0.3859.t7.output.1.0].txt
if not parts[3] == ident:
continue
epoch = halves[0]
vloss = '.'.join([parts[0], parts[1]])
temp = '.'.join([parts[4], parts[5]])
cpname = 'lm_lstm_epoch' + epoch + '_' + vloss + '.t7'
cp_infos += [(fullpath, os.path.join(basedir, cpname),
(epoch, vloss, temp))]
return cp_infos
def process_dir(basedir, targetdir, ident, copy_cp = False, verbose = False):
(basepath, basedirname) = os.path.split(basedir)
if basedirname == '':
(basepath, basedirname) = os.path.split(basepath)
cp_infos = identify_checkpoints(basedir, ident)
for (dpath, cpath, (epoch, vloss, temp)) in cp_infos:
if verbose:
print(('found dumpfile ' + dpath))
dname = basedirname + '_epoch' + epoch + '_' + \
vloss + '.' + ident + '.' + temp + '.txt'
cname = basedirname + '_epoch' + epoch + '_' + vloss + '.t7'
tdpath = os.path.join(targetdir, dname)
tcpath = os.path.join(targetdir, cname)
if verbose:
print((' cpx ' + dpath + ' ' + tdpath))
with open(dpath, 'rt') as infile:
with open(tdpath, 'wt') as outfile:
outfile.write(cleanup_dump(infile.read()))
if copy_cp:
if os.path.isfile(cpath):
if verbose:
print((' cp ' + cpath + ' ' + tcpath))
shutil.copy(cpath, tcpath)
if copy_cp and len(cp_infos) > 0:
cmdpath = os.path.join(basedir, 'command.txt')
tcmdpath = os.path.join(targetdir, basedirname + '.command')
if os.path.isfile(cmdpath):
if verbose:
print((' cp ' + cmdpath + ' ' + tcmdpath))
shutil.copy(cmdpath, tcmdpath)
for path in os.listdir(basedir):
fullpath = os.path.join(basedir, path)
if os.path.isdir(fullpath):
process_dir(fullpath, targetdir, ident, copy_cp=copy_cp, verbose=verbose)
def main(basedir, targetdir, ident = 'output', copy_cp = False, verbose = False):
process_dir(basedir, targetdir, ident, copy_cp=copy_cp, verbose=verbose)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('basedir', #nargs='?'. default=None,
help='base rnn directory, must contain sample.lua')
parser.add_argument('targetdir', #nargs='?', default=None,
help='checkpoint directory, all subdirectories will be processed')
parser.add_argument('-c', '--copy_cp', action='store_true',
help='copy checkpoints used to generate the output files')
parser.add_argument('-i', '--ident', action='store', default='output',
help='identifier to look for to determine checkpoints')
parser.add_argument('-v', '--verbose', action='store_true',
help='verbose output')
args = parser.parse_args()
main(args.basedir, args.targetdir, ident=args.ident, copy_cp=args.copy_cp, verbose=args.verbose)
exit(0)
|
python
|
a, b = map(int, input())
if a <= 9 and b <= 9:
ans = a * b
else:
ans = -1
print(ans)
|
python
|
# Test the frozen module defined in frozen.c.
from test.test_support import TestFailed
import sys, os
try:
import __hello__
except ImportError, x:
raise TestFailed, "import __hello__ failed:" + str(x)
try:
import __phello__
except ImportError, x:
raise TestFailed, "import __phello__ failed:" + str(x)
try:
import __phello__.spam
except ImportError, x:
raise TestFailed, "import __phello__.spam failed:" + str(x)
if sys.platform != "mac": # On the Mac this import does succeed.
try:
import __phello__.foo
except ImportError:
pass
else:
raise TestFailed, "import __phello__.foo should have failed"
|
python
|
# Generated by Django 3.2.6 on 2021-08-21 12:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('education', '0021_alter_materialblocks_icon'),
]
operations = [
migrations.AlterField(
model_name='materialblocks',
name='icon',
field=models.CharField(choices=[('bolt', 'Молния'), ('apple-alt', 'Яблоко'), ('balance-scale-left', 'Весы'), ('brain', 'Мозг'), ('check', 'Галочка'), ('cloud', 'Облако'), ('compass', 'Компас'), ('dev', 'dev'), ('git', 'git'), ('lemon', 'Лимон')], default='fa-bolt', max_length=32),
),
]
|
python
|
from __future__ import annotations
from typing import TYPE_CHECKING
from openhab_creator import _
from openhab_creator.models.common import MapTransformation
from openhab_creator.models.configuration.equipment.types.sensor import (
Sensor, SensorType)
from openhab_creator.models.items import (DateTime, Group, GroupType, Number,
PointType, ProfileType, PropertyType,
String)
from openhab_creator.output.items import ItemsCreatorPipeline
from openhab_creator.output.items.baseitemscreator import BaseItemsCreator
if TYPE_CHECKING:
from openhab_creator.models.configuration import Configuration
from openhab_creator.models.configuration.location import Location
@ItemsCreatorPipeline(9)
class SensorItemsCreator(BaseItemsCreator):
def __init__(self, outputdir: str):
super().__init__(outputdir)
self.sensors = {}
def build(self, configuration: Configuration) -> None:
self._build_groups()
for sensor in configuration.equipment.equipment('sensor'):
location = sensor.location
area = location.area
if area not in self.sensors:
self.sensors[area] = {}
self.build_sensor(sensor)
if sensor.has_subequipment:
for subsensor in sensor.subequipment:
if sensor.category != 'sensor':
self.build_sensor(subsensor)
self.build_sensortype_area(subsensor)
else:
self.build_sensortype_area(sensor)
self.write_file('sensors')
def _build_groups(self) -> None:
Group('Trend')\
.append_to(self)
Group('Average7d')\
.append_to(self)
Group('PressureSealevel')\
.append_to(self)
for sensortype in SensorType:
Group(f'{sensortype}All')\
.typed(GroupType.NUMBER_AVG)\
.label(sensortype.labels.page)\
.format(sensortype.labels.format_str)\
.icon(f'{sensortype}')\
.append_to(self)
if sensortype.labels.has_gui_factor:
Group(f'gui{sensortype}All')\
.typed(GroupType.NUMBER_AVG)\
.label(sensortype.labels.item)\
.transform_js(f'gui{sensortype}')\
.icon(f'{sensortype}')\
.append_to(self)
def build_sensor(self, sensor: Sensor) -> None:
sensor_equipment = Group(sensor.item_ids.sensor)\
.semantic('Sensor')
if sensor.sensor_is_subequipment:
sensor_equipment\
.label(_('Sensor'))\
.equipment(sensor)
else:
sensor_equipment\
.label(sensor.name_with_type)\
.location(sensor.location)
sensor_equipment.append_to(self)
def build_sensortype_area(self, sensor: Sensor) -> None:
area = sensor.location.area
for sensortype in SensorType:
if sensortype.point in sensor.categories:
if sensortype not in self.sensors[area]:
self.sensors[area][sensortype] = {}
Group(f'{sensortype}{area}')\
.typed(GroupType.NUMBER_AVG)\
.label(sensortype.labels.item)\
.format(sensortype.labels.format_str)\
.icon(f'{sensortype}{area.lower()}')\
.groups(f'{sensortype}All')\
.append_to(self)
if sensortype.labels.has_gui_factor:
Group(f'gui{sensortype}{area}')\
.typed(GroupType.NUMBER_AVG)\
.label(sensortype.labels.item)\
.transform_js(f'gui{sensortype}')\
.icon(f'{sensortype}{area.lower()}')\
.groups(f'gui{sensortype}All')\
.append_to(self)
self.build_sensortype_location(sensortype, sensor)
def build_sensortype_location(self, sensortype: SensorType, sensor: Sensor) -> None:
location = sensor.location
area = location.area
if location not in self.sensors[area][sensortype]:
self.sensors[area][sensortype][location] = True
Group(f'{sensortype}{location}')\
.typed(GroupType.NUMBER_AVG)\
.label(sensortype.labels.item)\
.format(sensortype.labels.format_str)\
.icon(f'{sensortype}{area.lower()}')\
.groups(f'{sensortype}{area}')\
.location(location)\
.semantic(PointType.MEASUREMENT, sensortype.typed.property)\
.append_to(self)
if sensortype.labels.has_gui_factor:
Group(f'gui{sensortype}{location}')\
.typed(GroupType.NUMBER_AVG)\
.label(sensortype.labels.item)\
.transform_js(f'gui{sensortype}')\
.icon(f'{sensortype}{area.lower()}')\
.groups(f'gui{sensortype}{area}')\
.location(location)\
.semantic(PointType.MEASUREMENT, sensortype.typed.property)\
.append_to(self)
sensor_item = Number(f'{sensortype}{sensor.item_ids.merged_sensor}')\
.typed(sensortype.typed.number)\
.label(sensortype.labels.item)\
.format(sensortype.labels.format_str)\
.icon(f'{sensortype}{area.lower()}')\
.groups(sensor.item_ids.merged_sensor)\
.semantic(PointType.MEASUREMENT, sensortype.typed.property)\
.channel(sensor.points.channel(sensortype.point))\
.aisensor()
if sensortype == SensorType.MOISTURE:
sensor_item\
.scripting({
'reminder_item': sensor.item_ids.moisturelastreminder,
'watered_item': sensor.item_ids.moisturelastwatered
})\
.sensor(sensortype.point, sensor.influxdb_tags)\
.groups(f'{sensortype}{location}')
self.moisture_items(sensor)
elif sensortype == SensorType.PRESSURE and sensor.has_altitude:
sensor_item\
.scripting({
'pressure_sealevel_item': sensor.item_ids.pressure_sealevel,
'altitude': sensor.altitude
})\
.groups('PressureSealevel')
Number(f'pressureSeaLevel{sensor.item_ids.merged_sensor}')\
.typed(sensortype.typed.number)\
.label(sensortype.labels.item)\
.format(sensortype.labels.format_str)\
.icon(f'{sensortype}{area.lower()}')\
.groups(sensor.item_ids.merged_sensor, f'{sensortype}{location}')\
.semantic(PointType.MEASUREMENT, sensortype.typed.property)\
.sensor(sensortype.point, sensor.influxdb_tags)\
.append_to(self)
else:
sensor_item\
.sensor(sensortype.point, sensor.influxdb_tags)\
.groups(f'{sensortype}{location}')
if sensor.location.area == 'Outdoor' or sensortype == SensorType.PRESSURE:
String(f'trend{sensortype}{sensor.item_ids.merged_sensor}')\
.label(_('Trend {label}').format(label=sensortype.labels.item))\
.map(MapTransformation.TREND)\
.icon(f'trend{sensortype}')\
.groups(sensor.item_ids.merged_sensor)\
.semantic(PointType.STATUS)\
.aisensor()\
.append_to(self)
sensor_item\
.groups('Trend')\
.scripting({
'trend_item': f'trend{sensortype}{sensor.item_ids.merged_sensor}'
})
if sensortype == SensorType.TEMPERATURE:
Number(f'average7d{sensortype}{sensor.item_ids.merged_sensor}')\
.label(_('7 days average {label}').format(label=sensortype.labels.item))\
.icon('average7d')\
.groups(sensor.item_ids.merged_sensor)\
.semantic(PointType.STATUS)\
.aisensor()\
.append_to(self)
sensor_item\
.groups('Average7d')\
.scripting({
'average_item': f'average7d{sensortype}{sensor.item_ids.merged_sensor}'
})
sensor_item.append_to(self)
if sensortype.labels.has_gui_factor:
String(f'gui{sensortype}{sensor.item_ids.sensor}')\
.label(sensortype.labels.item)\
.transform_js(f'gui{sensortype}')\
.icon(f'{sensortype}{area.lower()}')\
.groups(sensor.item_ids.merged_sensor, f'gui{sensortype}{location}')\
.semantic(PointType.MEASUREMENT, sensortype.typed.property)\
.channel(sensor.points.channel(sensortype.point),
ProfileType.JS, f'togui{sensortype.labels.gui_factor}.js')\
.append_to(self)
def moisture_items(self, sensor: Sensor) -> None:
DateTime(sensor.item_ids.moisturelastreminder)\
.label(_('Last watering reminder'))\
.datetime()\
.config()\
.groups(sensor.item_ids.merged_sensor)\
.semantic(PointType.STATUS, PropertyType.TIMESTAMP)\
.scripting({
'message': _('The plant {plant} needs to be watered!')
.format(plant=sensor.blankname)
})\
.append_to(self)
DateTime(sensor.item_ids.moisturelastwatered)\
.label(_('Last watered'))\
.dateonly_weekday()\
.icon('wateringcan')\
.config()\
.groups(sensor.item_ids.merged_sensor)\
.semantic(PointType.STATUS, PropertyType.TIMESTAMP)\
.scripting({
'message': _('The plant {plant} says thank you for watering!')
.format(plant=sensor.blankname)
})\
.append_to(self)
|
python
|
# -*- coding: utf-8 -*-
"""Library to easily manage Host Health"""
from __future__ import division
import logging
import psutil
import threading
import time
COLOR_GREEN = '#4caf50'
COLOR_ORANGE = '#ff5722'
COLOR_RED = '#f44336'
CPU_THRESHOLD_WARNING = 70
CPU_THRESHOLD_DANGER = 85
duplex_map = {
psutil.NIC_DUPLEX_FULL: "full",
psutil.NIC_DUPLEX_HALF: "half",
psutil.NIC_DUPLEX_UNKNOWN: "?",
}
def get_host_health(cpu_percent, cpu_percent_details):
status = {}
cpu = {}
cpu['percent'] = cpu_percent
cpu['percpu'] = cpu_percent_details
cpu['color'] = COLOR_GREEN
if cpu_percent > CPU_THRESHOLD_WARNING:
cpu['color'] = COLOR_ORANGE
if cpu_percent > CPU_THRESHOLD_DANGER:
cpu['color'] = COLOR_RED
cpu['label'] = get_host_cpu_label(cpu_percent)
status['cpu'] = cpu
ram = {}
# Original tuple:
# r_total, r_avail, r_percent, r_used, r_free, r_active, r_inactive, r_buffers, r_cached, r_shared, r_slab
ram_raw_values = psutil.virtual_memory()
r_total = ram_raw_values[0]
r_avail = ram_raw_values[1]
r_percent = ram_raw_values[2]
r_used = ram_raw_values[3]
r_free = ram_raw_values[4]
ram['total'] = r_total / 1024 / 1024 / 1024
ram['available'] = r_avail / 1024 / 1024 / 1024
ram['free'] = r_free / 1024 / 1024 / 1024
ram['used'] = r_used / 1024 / 1024 / 1024
ram['percent'] = r_percent
ram['color'] = COLOR_GREEN
if r_percent > 70:
ram['color'] = COLOR_ORANGE
if r_percent > 85:
ram['color'] = COLOR_RED
status['ram'] = ram
disk = {}
d_total, d_used, d_free, d_percent = psutil.disk_usage('/')
disk['total'] = d_total / 1024 / 1024 / 1024
disk['used'] = d_used / 1024 / 1024 / 1024
disk['free'] = d_free / 1024 / 1024 / 1024
disk['percent'] = d_percent
disk['color'] = COLOR_GREEN
if d_percent > 70:
disk['color'] = COLOR_ORANGE
if d_percent > 85:
disk['color'] = COLOR_RED
status['disk'] = disk
status['boot_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(psutil.boot_time()))
try:
status['network_stats'] = psutil.net_if_stats()
for (key, item) in status['network_stats'].items():
status['network_stats'][key] = status['network_stats'][key]._replace(duplex=duplex_map[item.duplex])
status['network_io'] = psutil.net_io_counters(pernic=True)
except OSError as err:
logging.error("Error while reading NIC status: {}}".format(err))
return status
def get_host_cpu_label(cpu_percent):
status = 'success'
if cpu_percent > CPU_THRESHOLD_WARNING:
status = 'warning'
if cpu_percent > CPU_THRESHOLD_DANGER:
status = 'danger'
return status
class HostHealth:
percent = 'N/A'
percent_details = 'N/A'
thread = None
pool_time = 5
interval = 2
def __init__(self, pool_time, interval):
self.pool_time = pool_time
self.interval = interval
def start(self):
self.thread = threading.Timer(self.pool_time, self.update_stats)
self.thread.start()
def update_stats(self):
self.percent = psutil.cpu_percent(interval=self.interval)
self.percent_details = psutil.cpu_percent(interval=self.interval, percpu=True)
self.start()
def get_stats(self):
return self.percent, self.percent_details
|
python
|
__author__ = 'Rob Edwards'
import sys
from matrices import blosum62
def score(a, b):
"""score dna as match/mismatch"""
if a == b:
return 1
return -1
def dna_score_alignment(seq1, seq2, gap_open=11, gap_extend=1):
"""
Generate a score for an alignment between two sequences. This does not do the alignment!
:param seq1: The first sequence
:param seq2: The second sequence
:param gap_open: The gap opening penalty
:param gap_extend: The gap extension penalty
:return: An int for the best score for the alignment
"""
score_matrix = [[[0 for j in range(len(seq2)+1)] for i in range(len(seq1)+1)] for k in range(3)]
# initially populate the gap open/extension
for i in range(1, len(seq1)+1):
score_matrix[0][i][0] = -gap_open - (i-1)*gap_extend
score_matrix[1][i][0] = -gap_open - (i-1)*gap_extend
score_matrix[2][i][0] = -10*gap_open
for j in range(1, len(seq2)+1):
score_matrix[2][0][j] = -gap_open - (j-1)*gap_extend
score_matrix[1][0][j] = -gap_open - (j-1)*gap_extend
score_matrix[0][0][j] = -10*gap_open
for i in range(1, len(seq1)+1):
for j in range(1, len(seq2)+1):
lower_scores = [score_matrix[0][i-1][j] - gap_extend, score_matrix[1][i-1][j] - gap_open]
score_matrix[0][i][j] = max(lower_scores)
upper_scores = [score_matrix[2][i][j-1] - gap_extend, score_matrix[1][i][j-1] - gap_open]
score_matrix[2][i][j] = max(upper_scores)
mid_scores = [score_matrix[0][i][j], score_matrix[1][i-1][j-1] + score(seq1[i-1], seq2[j-1]), score_matrix[2][i][j]]
score_matrix[1][i][j] = max(mid_scores)
max_scores = [score_matrix[0][i][j], score_matrix[1][i][j], score_matrix[2][i][j]]
return max(max_scores)
def dna_gapped_alignment(seq1, seq2, gap_open=11, gap_extend=1):
"""
Perform a gapped alignment. This approach uses two, 3 dimensional matrices.
:param seq1: The first sequence
:param seq2: The second sequence
:param gap_open: The gap opening penalty (default = 11)
:param gap_extn: The gap extention penalty (default = 1)
:return: The score, and the two sequences with gaps in them
"""
score_matrix = [[[0 for j in range(len(seq2)+1)] for i in range(len(seq1)+1)] for k in range(3)]
backtrack_matrix = [[[0 for j in range(len(seq2)+1)] for i in range(len(seq1)+1)] for k in range(3)]
# initially populate the gap open/extension
for i in range(1, len(seq1)+1):
score_matrix[0][i][0] = -gap_open - (i-1)*gap_extend
score_matrix[1][i][0] = -gap_open - (i-1)*gap_extend
score_matrix[2][i][0] = -10*gap_open
for j in range(1, len(seq2)+1):
score_matrix[2][0][j] = -gap_open - (j-1)*gap_extend
score_matrix[1][0][j] = -gap_open - (j-1)*gap_extend
score_matrix[0][0][j] = -10*gap_open
for i in range(1, len(seq1)+1):
for j in range(1, len(seq2)+1):
lower_scores = [score_matrix[0][i-1][j] - gap_extend, score_matrix[1][i-1][j] - gap_open]
score_matrix[0][i][j] = max(lower_scores)
backtrack_matrix[0][i][j] = lower_scores.index(score_matrix[0][i][j])
upper_scores = [score_matrix[2][i][j-1] - gap_extend, score_matrix[1][i][j-1] - gap_open]
score_matrix[2][i][j] = max(upper_scores)
backtrack_matrix[2][i][j] = upper_scores.index(score_matrix[2][i][j])
mid_scores = [score_matrix[0][i][j], score_matrix[1][i-1][j-1] + score(seq1[i-1], seq2[j-1]), score_matrix[2][i][j]]
score_matrix[1][i][j] = max(mid_scores)
backtrack_matrix[1][i][j] = mid_scores.index(score_matrix[1][i][j])
i=len(seq1)
j=len(seq2)
output_seq1 = seq1
output_seq2 = seq2
max_scores = [score_matrix[0][i][j], score_matrix[1][i][j], score_matrix[2][i][j]]
max_score = max(max_scores)
backtrack_level = max_scores.index(max_score)
# we need this, time and again
insert_indel = lambda word, i: word[:i] + '-' + word[i:]
while i*j != 0:
if backtrack_level == 0:
if backtrack_matrix[0][i][j] == 1:
backtrack_level = 1
i -= 1
output_seq2 = insert_indel(output_seq2, j)
elif backtrack_level == 1:
if backtrack_matrix[1][i][j] == 0:
backtrack_level = 0
elif backtrack_matrix[1][i][j] == 2:
backtrack_level = 2
else:
i -= 1
j -= 1
else:
if backtrack_matrix[2][i][j] == 1:
backtrack_level = 1
j -= 1
output_seq1 = insert_indel(output_seq1, i)
for k in xrange(i):
output_seq2 = insert_indel(output_seq2, 0)
for k in xrange(j):
output_seq1 = insert_indel(output_seq1, 0)
return (max_score, output_seq1, output_seq2)
if __name__ == "__main__":
#s1 = 'ATGLVRRLGSFLVEDFSRYKLLL'
#s2 = 'ATGLGLMRRSGSPLVESRYKLL'
s1 = 'MQMCDRKHECYFEGFICDWHTLLEPHIVAQSEPYPCHKKMTQMPPPCSWFGNDIAEEKPSSIMATPAMPNVEEGM'
s2 = 'MWMKDRKKNANECDWHPLLEYHIVAQSEPYKCCKKAMLGVKGAGTQMPPPCSWFGNDIAEEKPSSIMATPAMPNWEEGM'
(score, s1, s2) = gapped_alignment(s1, s2)
print(str(score) + "\n" + s1 + "\n" + s2)
|
python
|
from libs.matrix_client.matrix_client.client import MatrixClient
from libs.matrix_client.matrix_client.api import MatrixRequestError
from libs.matrix_client.matrix_client.user import User
from requests.exceptions import MissingSchema
from helpers import setup_logger
logger = setup_logger(__name__, 'info')
class Client():
def __init__(self, username, password=None, token=None, server="matrix.org"):
self.username = username
self.server = server
self.server_url = "https://{}".format(self.server)
self.token = None
self.logged_in = True
# Create the matrix client
if token == None and password != None:
self.matrix_client = MatrixClient(self.server_url)
# Try logging in the user
try:
self.token = self.matrix_client.login(username=username, password=password)
self.user = User(self.matrix_client, self.matrix_client.user_id)
except MatrixRequestError as e:
self.logged_in = False
if e.code == 403:
logger.exception("Wrong username or password")
else:
logger.exception("Check server details")
except MissingSchema as e:
logger.exception("Bad URL format")
else:
self.matrix_client = MatrixClient(self.server_url, token=token, user_id=username)
self.user = User(self.matrix_client, self.matrix_client.user_id)
# Return the user's display name
def get_user_display_name(self):
return self.user.get_display_name()
# Get the rooms of the user
def get_rooms(self):
return self.matrix_client.rooms
def get_user(self):
return self.user
def get_token(self):
return self.token
|
python
|
import unittest
import uuid
from unittest.mock import patch
from microsetta_private_api.model.interested_user import InterestedUser
from microsetta_private_api.repo.interested_user_repo import InterestedUserRepo
from microsetta_private_api.repo.transaction import Transaction
from psycopg2.errors import ForeignKeyViolation
ADDRESS_1 = "9500 Gilman Dr"
ADDRESS_2 = ""
CITY = "La Jolla"
STATE = "CA"
POSTAL = "92093"
COUNTRY = "United States"
LATITUDE = "32.88003507430753"
LONGITUDE = "-117.23394724325632"
class InterestedUserRepoTests(unittest.TestCase):
def setUp(self):
self.test_campaign_title_1 = 'Test Campaign'
with Transaction() as t:
cur = t.cursor()
# create a test campaign
cur.execute(
"INSERT INTO campaign.campaigns (title) "
"VALUES (%s) "
"RETURNING campaign_id",
(self.test_campaign_title_1, )
)
self.test_campaign_id = cur.fetchone()[0]
# create necessary campaign/project relationship
cur.execute(
"INSERT INTO campaign.campaigns_projects "
"(campaign_id, project_id) "
"VALUES (%s, 1)",
(self.test_campaign_id, )
)
t.commit()
# need to create an extra, fake campaign ID
self.fake_campaign_id = None
while self.fake_campaign_id is None:
tmp_fake_campaign_id = uuid.uuid4()
if tmp_fake_campaign_id != self.test_campaign_id:
self.fake_campaign_id = str(tmp_fake_campaign_id)
def tearDown(self):
with Transaction() as t:
cur = t.cursor()
cur.execute(
"DELETE FROM campaign.campaigns_projects "
"WHERE campaign_id = %s",
(self.test_campaign_id,)
)
cur.execute(
"DELETE FROM campaign.campaigns "
"WHERE campaign_id = %s",
(self.test_campaign_id,)
)
t.commit()
def test_create_interested_user_valid(self):
dummy_user = {
"campaign_id": self.test_campaign_id,
"first_name": "Test",
"last_name": "McTesterson",
"email": "[email protected]"
}
interested_user = InterestedUser.from_dict(dummy_user)
with Transaction() as t:
interested_user_repo = InterestedUserRepo(t)
obs = interested_user_repo.insert_interested_user(interested_user)
self.assertTrue(obs is not None)
def test_create_interested_user_invalid(self):
# test with a required field missing
dummy_user = {
"campaign_id": self.test_campaign_id,
"first_name": "Test",
"last_name": "McTesterson"
}
with self.assertRaises(KeyError):
interested_user = InterestedUser.from_dict(dummy_user)
# test with invalid campaign ID
dummy_user = {
"campaign_id": self.fake_campaign_id,
"first_name": "Test",
"last_name": "McTesterson",
"email": "[email protected]"
}
interested_user = InterestedUser.from_dict(dummy_user)
with Transaction() as t:
interested_user_repo = InterestedUserRepo(t)
with self.assertRaises(ForeignKeyViolation):
interested_user_repo.insert_interested_user(interested_user)
def test_verify_address_already_verified(self):
dummy_user = {
"campaign_id": self.test_campaign_id,
"first_name": "Test",
"last_name": "McTesterson",
"email": "[email protected]",
"address_checked": True
}
interested_user = InterestedUser.from_dict(dummy_user)
with Transaction() as t:
interested_user_repo = InterestedUserRepo(t)
user_id = \
interested_user_repo.insert_interested_user(interested_user)
obs = interested_user_repo.verify_address(user_id)
self.assertTrue(obs is None)
@patch("microsetta_private_api.repo.interested_user_repo.verify_address")
def test_verify_address_not_verified_is_valid(self, test_verify_address):
test_verify_address.return_value = {
"address_1": ADDRESS_1,
"address_2": ADDRESS_2,
"city": CITY,
"state": STATE,
"postal": POSTAL,
"country": COUNTRY,
"latitude": LATITUDE,
"longitude": LONGITUDE,
"valid": True
}
dummy_user = {
"campaign_id": self.test_campaign_id,
"first_name": "Test",
"last_name": "McTesterson",
"email": "[email protected]",
"address_1": ADDRESS_1,
"city": CITY,
"state": STATE,
"postal_code": POSTAL,
"country": COUNTRY
}
interested_user = InterestedUser.from_dict(dummy_user)
with Transaction() as t:
interested_user_repo = InterestedUserRepo(t)
user_id = \
interested_user_repo.insert_interested_user(interested_user)
obs = interested_user_repo.verify_address(user_id)
self.assertTrue(obs is True)
@patch("microsetta_private_api.repo.interested_user_repo.verify_address")
def test_verify_address_not_verified_is_invalid(self, test_verify_address):
test_verify_address.return_value = {
"address_1": ADDRESS_1,
"address_2": ADDRESS_2,
"city": CITY,
"state": STATE,
"postal": POSTAL,
"country": COUNTRY,
"latitude": LATITUDE,
"longitude": LONGITUDE,
"valid": False
}
dummy_user = {
"campaign_id": self.test_campaign_id,
"first_name": "Test",
"last_name": "McTesterson",
"email": "[email protected]",
"address_1": ADDRESS_1,
"city": CITY,
"state": STATE,
"postal_code": POSTAL,
"country": COUNTRY
}
interested_user = InterestedUser.from_dict(dummy_user)
with Transaction() as t:
interested_user_repo = InterestedUserRepo(t)
user_id = \
interested_user_repo.insert_interested_user(interested_user)
obs = interested_user_repo.verify_address(user_id)
self.assertTrue(obs is False)
if __name__ == '__main__':
unittest.main()
|
python
|
import binascii
import socket
import struct
from twampy.utils import generate_zero_bytes, now
from twampy.constants import TIMEOFFSET, TWAMP_PORT_DEFAULT, TOS_DEFAULT, TIMEOUT_DEFAULT
import logging
logger = logging.getLogger("twampy")
class ControlClient:
def __init__(self, server, port=TWAMP_PORT_DEFAULT, timeout=TIMEOUT_DEFAULT, tos=TOS_DEFAULT, source_address=None):
self.socket = socket.create_connection((server,tcp_port), timeout, source_address)
pass
def connect(self):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
self.socket.setsockopt(ip_protocol, socket.IP_TOS, tos)
self.socket.connect((server, port))
def send(self, data):
logger.debug("CTRL.TX %s", binascii.hexlify(data))
try:
self.socket.send(data)
except Exception as e:
logger.critical('*** Sending data failed: %s', str(e))
def receive(self):
data = self.socket.recv(9216)
logger.debug("CTRL.RX %s (%d bytes)", binascii.hexlify(data), len(data))
return data
def close(self):
self.socket.close()
def connectionSetup(self):
logger.info("CTRL.RX <<Server Greeting>>")
data = self.receive()
self.smode = struct.unpack('!I', data[12:16])[0]
logger.info("TWAMP modes supported: %d", self.smode)
if self.smode & 1 == 0:
logger.critical('*** TWAMPY only supports unauthenticated mode(1)')
logger.info("CTRL.TX <<Setup Response>>")
self.send(struct.pack('!I', 1) + generate_zero_bytes(160))
logger.info("CTRL.RX <<Server Start>>")
data = self.receive()
rval = ord(data[15])
if rval != 0:
# TWAMP setup request not accepted by server
logger.critical("*** ERROR CODE %d in <<Server Start>>", rval)
self.nbrSessions = 0
def reqSession(self, sender="", s_port=20001, receiver="", r_port=20002, startTime=0, timeOut=3, dscp=0, padding=0):
typeP = dscp << 24
if startTime != 0:
startTime += now() + TIMEOFFSET
if sender == "":
request = struct.pack('!4B L L H H 13L 4ILQ4L', 5, 4, 0, 0, 0, 0, s_port, r_port, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, padding, startTime, 0, timeOut, 0, typeP, 0, 0, 0, 0, 0)
elif sender == "::":
request = struct.pack('!4B L L H H 13L 4ILQ4L', 5, 6, 0, 0, 0, 0, s_port, r_port, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, padding, startTime, 0, timeOut, 0, typeP, 0, 0, 0, 0, 0)
elif ':' in sender:
s = socket.inet_pton(socket.AF_INET6, sender)
r = socket.inet_pton(socket.AF_INET6, receiver)
request = struct.pack('!4B L L H H 16s 16s 4L L 4ILQ4L', 5, 6, 0, 0, 0, 0, s_port, r_port, s, r, 0, 0, 0, 0, padding, startTime, 0, timeOut, 0, typeP, 0, 0, 0, 0, 0)
else:
s = socket.inet_pton(socket.AF_INET, sender)
r = socket.inet_pton(socket.AF_INET, receiver)
request = struct.pack('!4B L L H H 16s 16s 4L L 4ILQ4L', 5, 4, 0, 0, 0, 0, s_port, r_port, s, r, 0, 0, 0, 0, padding, startTime, 0, timeOut, 0, typeP, 0, 0, 0, 0, 0)
logger.info("CTRL.TX <<Request Session>>")
self.send(request)
logger.info("CTRL.RX <<Session Accept>>")
data = self.receive()
rval = ord(data[0])
if rval != 0:
logger.critical("ERROR CODE %d in <<Session Accept>>", rval)
return False
return True
def startSessions(self):
request = struct.pack('!B', 2) + generate_zero_bytes(31)
logger.info("CTRL.TX <<Start Sessions>>")
self.send(request)
logger.info("CTRL.RX <<Start Accept>>")
self.receive()
def stopSessions(self):
request = struct.pack('!BBHLQQQ', 3, 0, 0, self.nbrSessions, 0, 0, 0)
logger.info("CTRL.TX <<Stop Sessions>>")
self.send(request)
self.nbrSessions = 0
|
python
|
from helpers import test_tools
from graph import GraphRoutingProblem
from dungeon import DungeonProblem
|
python
|
from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.models import AuthIdentity, AuthProvider
from sentry.testutils import APITestCase
class DeleteUserIdentityTest(APITestCase):
def test_simple(self):
user = self.create_user(email="[email protected]")
org = self.create_organization(owner=user)
auth_provider = AuthProvider.objects.create(organization=org, provider="dummy")
auth_identity = AuthIdentity.objects.create(
auth_provider=auth_provider, ident=user.email, user=user
)
self.login_as(user=user)
url = reverse(
"sentry-api-0-user-identity-details",
kwargs={"user_id": user.id, "identity_id": auth_identity.id},
)
resp = self.client.delete(url, format="json")
assert resp.status_code == 204, resp.content
assert not AuthIdentity.objects.filter(id=auth_identity.id).exists()
|
python
|
#
# build the vocab/dictionary from outside to all related lexicons
from __future__ import print_function
import os
import sys
import argparse
sys.path.append(".")
sys.path.append("..")
sys.path.append("../..")
from neuronlp2 import utils
from neuronlp2.io import get_logger, conllx_stacked_data
#
# Only use for building multi-lingual vocabs, this is only a simple workaround.
# However, we might also want multi-lingual embeddings before training for convenience.
# Usage:
# python2 examples/vocab/build_vocab.py --word_embedding <embde-type> --word_paths [various languages' embeddings: e1 e2 ...]
# --train <english-train-file> --extra [various languages' test-files: ... ] --model_path <path>
#
def parse_cmd(args):
args_parser = argparse.ArgumentParser(description='Building the alphabets/vocabularies.')
#
args_parser.add_argument('--word_embedding', type=str, choices=['word2vec', 'glove', 'senna', 'sskip', 'polyglot'],
help='Embedding for words', required=True)
args_parser.add_argument('--word_paths', type=str, nargs='+', help='path for word embedding dict', required=True)
args_parser.add_argument('--train', type=str, help="The main file to build vocab.", required=True)
args_parser.add_argument('--extra', type=str, nargs='+', help="Extra files to build vocab, usually dev/tests.",
required=True)
args_parser.add_argument('--model_path', help='path for saving model file.', required=True)
res = args_parser.parse_args(args)
return res
def _get_keys(wd):
try:
return wd.keys()
except:
# Word2VecKeyedVectors
return wd.vocab.keys()
# todo(warn): if not care about the specific language of the embeddings
def combine_embeds(word_dicts):
num_dicts = len(word_dicts)
count_ins, count_repeats = [0 for _ in range(num_dicts)], [0 for _ in range(num_dicts)]
res = dict()
for idx, one in enumerate(word_dicts):
for k in _get_keys(one):
if k in res:
count_repeats[idx] += 1
else:
count_ins[idx] += 1
res[k] = 0
return res, count_ins, count_repeats
def main(a=None):
if a is None:
a = sys.argv[1:]
args = parse_cmd(a)
# if output directory doesn't exist, create it
if not os.path.exists(args.model_path):
os.makedirs(args.model_path)
logger = get_logger("VocabBuilder", args.model_path + '/vocab.log.txt')
logger.info('\ncommand-line params : {0}\n'.format(sys.argv[1:]))
logger.info('{0}\n'.format(args))
# load embeds
logger.info("Load embeddings")
word_dicts = []
word_dim = None
for one in args.word_paths:
one_word_dict, one_word_dim = utils.load_embedding_dict(args.word_embedding, one)
assert word_dim is None or word_dim == one_word_dim, "Embedding size not matched!"
word_dicts.append(one_word_dict)
word_dim = one_word_dim
# combine embeds
combined_word_dict, count_ins, count_repeats = combine_embeds(word_dicts)
logger.info("Final embeddings size: %d." % len(combined_word_dict))
for one_fname, one_count_ins, one_count_repeats in zip(args.word_paths, count_ins, count_repeats):
logger.info(
"For embed-file %s, count-in: %d, repeat-discard: %d." % (one_fname, one_count_ins, one_count_repeats))
# create vocabs
logger.info("Creating Alphabets")
alphabet_path = os.path.join(args.model_path, 'alphabets/')
assert not os.path.exists(alphabet_path), "Alphabet path exists, please build with a new path."
word_alphabet, char_alphabet, pos_alphabet, type_alphabet, max_sent_length = conllx_stacked_data.create_alphabets(
alphabet_path, args.train, data_paths=args.extra, max_vocabulary_size=100000, embedd_dict=combined_word_dict)
# printing info
num_words = word_alphabet.size()
num_chars = char_alphabet.size()
num_pos = pos_alphabet.size()
num_types = type_alphabet.size()
logger.info("Word Alphabet Size: %d" % num_words)
logger.info("Character Alphabet Size: %d" % num_chars)
logger.info("POS Alphabet Size: %d" % num_pos)
logger.info("Type Alphabet Size: %d" % num_types)
if __name__ == '__main__':
main()
|
python
|
# Copyright 2021 Universität Tübingen, DKFZ and EMBL
# for the German Human Genome-Phenome Archive (GHGA)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Entrypoint of the metadata_service package.
"""
from typing import Optional
import typer
from ghga_service_chassis_lib.api import run_server
from metadata_service.config import get_config
from metadata_service.api import app # noqa: F401 pylint: disable=unused-import
def run(
config: Optional[str] = typer.Option(None, help="Path to config yaml.")
) -> None:
"""
Start the backend server.
Args:
config: The path to the application configuration
"""
run_server(app="metadata_service.__main__:app", config=get_config())
def run_cli() -> None:
"""
Command line interface for running the server.
"""
typer.run(run)
if __name__ == "__main__":
run_cli()
|
python
|
def ones(l):
#return l + [x + '_1' for x in l]
#return sorted(l + [x + '_1' for x in l])
ret = []
for x in l:
ret.append(x)
ret.append(x + '_1')
return ret
# The complete primitive sets
ffprims_fall = ones(
[
'FD',
'FDC',
'FDCE',
'FDE',
'FDP',
'FDPE',
'FDR',
'FDRE',
'FDS',
'FDSE',
])
ffprims_lall = ones([
'LDC',
'LDCE',
'LDE',
'LDPE',
'LDP',
])
# Base primitives
ffprims_f = [
'FDRE',
'FDSE',
'FDCE',
'FDPE',
]
ffprims_l = [
'LDCE',
'LDPE',
]
ffprims = ffprims_f + ffprims_l
def isff(prim):
return prim.startswith("FD")
def isl(prim):
return prim.startswith("LD")
ff_bels_5 = [
'A5FF',
'B5FF',
'C5FF',
'D5FF',
]
ff_bels_ffl = [
'AFF',
'BFF',
'CFF',
'DFF',
]
ff_bels = ff_bels_ffl + ff_bels_5
#ff_bels = ff_bels_ffl
|
python
|
import os
from setuptools import setup, find_packages
README = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name = 'django-pimp-my-filter',
version = '0.1.1',
packages = find_packages(),
include_package_data = True,
license = 'BSD License',
description = 'An application, that helps you build your own filters for any model and use it.',
long_description = README,
url = 'https://github.com/fynjah/django-pimp-my-filter',
author = 'Anton Ievtushenko',
author_email = '[email protected]',
classifiers = [
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
|
python
|
#!/usr/bin/env python
# Python imports
import matplotlib as plt
import numpy as np
import time
import argparse
# Other imports
import srl_example_setup
from simple_rl.tasks import GridWorldMDP
from simple_rl.planning.ValueIterationClass import ValueIteration
# INSTRUCTIONS FOR USE:
# 1. When you run the program [either with default or supplied arguments], a pygame window should pop up.
# This is the first iteration of running VI on the given MDP.
# 2. Press any button to close this pygame window and wait, another window will pop-up displaying the
# policy from the next time step
# 3. Repeat 1 and 2 until the program terminates
# An input function, creates the MDP object based on user input
def generate_MDP(width, height, init_loc, goal_locs, lava_locs, gamma, walls, slip_prob):
""" Creates an MDP object based on user input """
actual_args = {
"width": width,
"height": height,
"init_loc": init_loc,
"goal_locs": goal_locs,
"lava_locs": lava_locs,
"gamma": gamma,
"walls": walls,
"slip_prob": slip_prob,
"lava_cost": 1.0,
"step_cost": 0.1
}
return GridWorldMDP(**actual_args)
def main():
# This accepts arguments from the command line with flags.
# Example usage: python value_iteration_demo.py -w 4 -H 3 -s 0.05 -g 0.95 -il [(0,0)] -gl [(4,3)] -ll [(4,2)] -W [(2,2)]
parser = argparse.ArgumentParser(description='Run a demo that shows a visualization of value iteration on a GridWorld MDP')
# Add the relevant arguments to the argparser
parser.add_argument('-w', '--width', type=int, nargs="?", const=4, default=4,
help='an integer representing the number of cells for the GridWorld width')
parser.add_argument('-H', '--height', type=int, nargs="?", const=3, default=3,
help='an integer representing the number of cells for the GridWorld height')
parser.add_argument('-s', '--slip', type=float, nargs="?", const=0.05, default=0.05,
help='a float representing the probability that the agent will "slip" and not take the intended action')
parser.add_argument('-g', '--gamma', type=float, nargs="?", const=0.95, default=0.95,
help='a float representing the decay probability for Value Iteration')
parser.add_argument('-il', '--i_loc', type=tuple, nargs="?", const=(0,0), default=(0,0),
help='two integers representing the starting cell location of the agent [with zero-indexing]')
parser.add_argument('-gl', '--g_loc', type=list, nargs="?", const=[(3,3)], default=[(3,3)],
help='a sequence of integer-valued coordinates where the agent will receive a large reward and enter a terminal state')
parser.add_argument('-ll', '--l_loc', type=list, nargs="?", const=[(3,2)], default=[(3,2)],
help='a sequence of integer-valued coordinates where the agent will receive a large negative reward and enter a terminal state')
parser.add_argument('-W', '--Walls', type=list, nargs="?", const=[(2,2)], default=[(2,2)],
help='a sequence of integer-valued coordinates representing cells that the agent cannot transition into')
args = parser.parse_args()
mdp = generate_MDP(
args.width,
args.height,
args.i_loc,
args.g_loc,
args.l_loc,
args.gamma,
args.Walls,
args.slip)
# Run value iteration on the mdp and save the history of value backups until convergence
vi = ValueIteration(mdp, max_iterations=50)
_, _, histories = vi.run_vi_histories()
# For every value backup, visualize the policy
for value_dict in histories:
mdp.visualize_policy(lambda in_state: value_dict[in_state]) # Note: This lambda is necessary because the policy must be a function
time.sleep(0.5)
if __name__ == "__main__":
main()
|
python
|
import socket
import time
HOST='data.pr4e.org'
PORT=80
mysock=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
mysock.connect((HOST,PORT))
mysock.sendall(b'GET http://data.pr4e.org/cover3.jpg HTTP/1.0\r\n\r\n')
count=0
picture=b""
while True:
data=mysock.recv(5120)
if(len(data)<1):
break
time.sleep(0.25)
count=count+len(data)
print(len(data),count)
picture=picture+data
mysock.close()
#Look for the end of the headers
pos=picture.find(b"\r\n\r\n")
print('Header length',pos)
print(picture[:pos].decode())
#skip past the header and save the picture data
picture=picture[pos+4:]
fhand=open("stuff.jpg","web")
fhand.write(picture)
fhand.close()
|
python
|
#!/usr/bin/env python
"""
Tropical Cyclone Risk Model (TCRM) - Version 1.0 (beta release)
Copyright (C) 2011 Commonwealth of Australia (Geoscience Australia)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Title: mslp_seasonal_clim.py
Author: Nicholas Summons, [email protected]
Last modified: 4 June 2010
Description: Utility for creating Mean Sea Level Pressure (MSLP) seasonal climatology maps.
Uses NCEP-DOE Reanalysis 2 data averaged over date range: 1980-2007.
This script can either be run stand alone to create a NetCDF output file or
the class MSLPGrid can be invoked to return the MSLP seasonal average grid.
Acknowledgements:
NCEP-DOE Reanalysis 2 data provided by the NOAA/OAR/ESRL PSD, Boulder, Colorado, USA,
from their Web site at http://www.esrl.noaa.gov/psd/
Input data: mslp_seasonal_clim.nc (contains monthly means averaged over 28 year period)
"""
import os
import numpy as np
import Utilities.nctools as nctools
from Utilities import pathLocator
class MSLPGrid:
def __init__(self, selected_months, filename=''):
if not os.path.isfile(filename):
tcrm_dir = pathLocator.getRootDirectory()
filename = os.path.join(tcrm_dir, 'MSLP', 'mslp_monthly_clim.nc')
if not os.path.isfile(filename):
error_msg = "MSLP data file not found"
raise IOError, error_msg
selected_months = set(selected_months)
ncobj = nctools.ncLoadFile(filename)
mslp_all = nctools.ncGetData(ncobj, 'mslp')
self.lon = nctools.ncGetDims(ncobj, 'lon')
self.lat = nctools.ncGetDims(ncobj, 'lat')
dim0,dim1,dim2 = np.shape(mslp_all)
# Average over selected months
mslp_sum = np.zeros([dim1, dim2], dtype='float32')
for month in selected_months:
mslp_sum = mslp_sum + mslp_all[month-1,:,:]
self.mslp_av = np.flipud(mslp_sum / len(selected_months))
def sampleGrid(self, lon, lat):
"""sampleGrid(self, lon, lat):
Grab nearest value to given location.
No interpolation performed!
"""
indi = self.lon.searchsorted(lon)-1
indj = self.lat.searchsorted(lat)-1
return self.mslp_av[indj, indi]
def returnGrid(self):
return self.lon, self.lat, self.mslp_av
#def main(configFile):
# selected_months_str = str(cnfGetIniValue(configFile, 'DataProcess', 'selected_months', arange(13)))
# selected_months = set(selected_months_str.strip('[]{}() ').replace(',', ' ').split(' '))
# selected_months.discard('')
# if selected_months.issubset([str(k) for k in range(1,13)]):
# selected_months = [int(k) for k in selected_months]
# months_str = ', '.join([calendar.month_abbr[i] for i in sort(list(selected_months))])
# print "Creating Mean Sea Level Pressure (MSLP) seasonal climatology:"
# print "Months specified for seasonal average: " + months_str
# print "Using NCEP Reanalysis-2 data from 1980-2007"
#
# msp = MSLPGrid(selected_months)
# lon, lat, mslp_av = msp.returnGrid()
#
# #Create output file
# output_filename = "mslp_clim_" + ''.join([calendar.month_abbr[i][0] for i in sort(list(selected_months))]) + '.nc'
# data_title = 'MSLP (NCEP Reanalysis-2) seasonal climatology. Averaging period: ' \
# + months_str + ' ' + '1980-2007.'
# dimensions = {0:{'name':'lat','values':lat,'dtype':'f','atts':{'long_name':'Latitude',
# 'units':'degrees_north'} },
# 1:{'name':'lon','values':lon,'dtype':'f','atts':{'long_name':'Longitude',
# 'units':'degrees_east'} } }
#
# variables = {0:{'name':'mslp','dims':('lat','lon'),
# 'values':array(mslp_av),'dtype':'f',
# 'atts':{'long_name':'Mean sea level pressure',
# 'units':'hPa'} } }
# nctools.ncSaveGrid( output_filename, dimensions, variables,
# nodata=-9999,datatitle=data_title )
#
# print "Created output file: " + output_filename
#
#
#if __name__ == "__main__":
# try:
# configFile = sys.argv[1]
# except IndexError:
# # Try loading config file with same name as python script
# configFile = __file__.rstrip('.py') + '.ini'
# # If no filename is specified and default filename doesn't exist => raise error
# if not os.path.exists(configFile):
# error_msg = "No configuration file specified"
# raise IOError, error_msg
# # If config file doesn't exist => raise error
# if not os.path.exists(configFile):
# error_msg = "Configuration file '" + configFile +"' not found"
# raise IOError, error_msg
#
# main(configFile)
|
python
|
import pandas as pd
import numpy as np
from os import listdir
import config
def read(bound_file_path, compounds_file_path, adducts_file_path):
'''
Read in excel files as pd.DataFrame objects
'''
bound_df = pd.read_excel(bound_file_path)
compounds_df = pd.read_excel(compounds_file_path)
adducts_df = pd.read_excel(adducts_file_path)
adducts_df = adducts_df[adducts_df['Formula'] != 'H']
adducts_df.columns = ['Compound/Fragment', 'Formula', 'Min', 'Max', 'Charge of compound/fragment']
adducts_df['Compound/Fragment Type'] = 'Adducts'
all_compounds = pd.concat([compounds_df, adducts_df], ignore_index=True, sort=False)
return bound_df, all_compounds
def normalise(spectrum):
X = spectrum["I"].to_numpy()
spectrum["normalised_intensity"] = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
return spectrum
|
python
|
#! /usr/bin/env python
import onnx.backend
import argparse
import caffe2.python.workspace as c2_workspace
import glob
import json
import numpy as np
import onnx
import caffe2.python.onnx.frontend
import caffe2.python.onnx.backend
import os
import shutil
import tarfile
import tempfile
import boto3
from six.moves.urllib.request import urlretrieve
from caffe2.python.models.download import downloadFromURLToFile, getURLFromName, deleteDirectory
from caffe2.proto import caffe2_pb2
from onnx import numpy_helper
"""A script converting Caffe2 models to ONNX, and updating ONNX model zoos.
Arguments:
-v, verbose
--local-dir, where we store the ONNX and Caffe2 models
--no-cache, ignore existing models in local-dir
--clean-test-data, delete all the existing test data when updating ONNX model zoo
--add-test-data, add add-test-data sets of test data for each ONNX model
--only-local, run locally (for testing purpose)
Examples:
# store the data in /home/username/zoo-dir, delete existing test data, ignore local cache,
# and generate 3 sets of new test data
python update-caffe2-models.py --local-dir /home/username/zoo-dir --clean-test-data --no-cache --add-test-data 3
"""
# TODO: Add GPU support
def upload_onnx_model(model_name, zoo_dir, backup=False, only_local=False):
if only_local:
print('No uploading in local only mode.')
return
model_dir = os.path.join(zoo_dir, model_name)
suffix = '-backup' if backup else ''
if backup:
print('Backing up the previous version of ONNX model {}...'.format(model_name))
rel_file_name = '{}{}.tar.gz'.format(model_name, suffix)
abs_file_name = os.path.join(zoo_dir, rel_file_name)
print('Compressing {} model to {}'.format(model_name, abs_file_name))
with tarfile.open(abs_file_name, 'w:gz') as f:
f.add(model_dir, arcname=model_name)
file_size = os.stat(abs_file_name).st_size
print('Uploading {} ({} MB) to s3 cloud...'.format(abs_file_name, float(file_size) / 1024 / 1024))
client = boto3.client('s3', 'us-east-1')
transfer = boto3.s3.transfer.S3Transfer(client)
transfer.upload_file(abs_file_name, 'download.onnx', 'models/latest/{}'.format(rel_file_name),
extra_args={'ACL': 'public-read'})
print('Successfully uploaded {} to s3!'.format(rel_file_name))
def download_onnx_model(model_name, zoo_dir, use_cache=True, only_local=False):
model_dir = os.path.join(zoo_dir, model_name)
if os.path.exists(model_dir):
if use_cache:
upload_onnx_model(model_name, zoo_dir, backup=True, only_local=only_local)
return
else:
shutil.rmtree(model_dir)
url = 'https://s3.amazonaws.com/download.onnx/models/latest/{}.tar.gz'.format(model_name)
download_file = tempfile.NamedTemporaryFile(delete=False)
try:
download_file.close()
print('Downloading ONNX model {} from {} and save in {} ...\n'.format(
model_name, url, download_file.name))
urlretrieve(url, download_file.name)
with tarfile.open(download_file.name) as t:
print('Extracting ONNX model {} to {} ...\n'.format(model_name, zoo_dir))
t.extractall(zoo_dir)
except Exception as e:
print('Failed to download/backup data for ONNX model {}: {}'.format(model_name, e))
if not os.path.exists(model_dir):
os.makedirs(model_dir)
finally:
os.remove(download_file.name)
if not only_local:
upload_onnx_model(model_name, zoo_dir, backup=True, only_local=only_local)
def download_caffe2_model(model_name, zoo_dir, use_cache=True):
model_dir = os.path.join(zoo_dir, model_name)
if os.path.exists(model_dir):
if use_cache:
return
else:
shutil.rmtree(model_dir)
os.makedirs(model_dir)
for f in ['predict_net.pb', 'init_net.pb', 'value_info.json']:
url = getURLFromName(model_name, f)
dest = os.path.join(model_dir, f)
try:
try:
downloadFromURLToFile(url, dest,
show_progress=False)
except TypeError:
# show_progress not supported prior to
# Caffe2 78c014e752a374d905ecfb465d44fa16e02a28f1
# (Sep 17, 2017)
downloadFromURLToFile(url, dest)
except Exception as e:
print("Abort: {reason}".format(reason=e))
print("Cleaning up...")
deleteDirectory(model_dir)
raise
def caffe2_to_onnx(caffe2_model_name, caffe2_model_dir):
caffe2_init_proto = caffe2_pb2.NetDef()
caffe2_predict_proto = caffe2_pb2.NetDef()
with open(os.path.join(caffe2_model_dir, 'init_net.pb'), 'rb') as f:
caffe2_init_proto.ParseFromString(f.read())
caffe2_init_proto.name = '{}_init'.format(caffe2_model_name)
with open(os.path.join(caffe2_model_dir, 'predict_net.pb'), 'rb') as f:
caffe2_predict_proto.ParseFromString(f.read())
caffe2_predict_proto.name = caffe2_model_name
with open(os.path.join(caffe2_model_dir, 'value_info.json'), 'rb') as f:
value_info = json.loads(f.read())
print('Converting Caffe2 model {} in {} to ONNX format'.format(caffe2_model_name, caffe2_model_dir))
onnx_model = caffe2.python.onnx.frontend.caffe2_net_to_onnx_model(
init_net=caffe2_init_proto,
predict_net=caffe2_predict_proto,
value_info=value_info
)
return onnx_model, caffe2_init_proto, caffe2_predict_proto
def tensortype_to_ndarray(tensor_type):
shape = []
for dim in tensor_type.shape.dim:
shape.append(dim.dim_value)
if tensor_type.elem_type == onnx.TensorProto.FLOAT:
type = np.float32
elif tensor_type.elem_type == onnx.TensorProto.INT:
type = np.int32
else:
raise
array = np.random.rand(*shape).astype(type)
return array
def generate_test_input_data(onnx_model, scale):
real_inputs_names = list(set([input.name for input in onnx_model.graph.input]) - set([init.name for init in onnx_model.graph.initializer]))
real_inputs = []
for name in real_inputs_names:
for input in onnx_model.graph.input:
if name == input.name:
real_inputs.append(input)
test_inputs = []
for input in real_inputs:
ndarray = tensortype_to_ndarray(input.type.tensor_type)
test_inputs.append((input.name, ndarray * scale))
return test_inputs
def generate_test_output_data(caffe2_init_net, caffe2_predict_net, inputs):
p = c2_workspace.Predictor(caffe2_init_net, caffe2_predict_net)
inputs_map = {input[0]:input[1] for input in inputs}
output = p.run(inputs_map)
c2_workspace.ResetWorkspace()
return output
def onnx_verify(onnx_model, inputs, ref_outputs):
prepared = caffe2.python.onnx.backend.prepare(onnx_model)
onnx_inputs = []
for input in inputs:
if isinstance(input, tuple):
onnx_inputs.append(input[1])
else:
onnx_inputs.append(input)
onnx_outputs = prepared.run(inputs=onnx_inputs)
np.testing.assert_almost_equal(onnx_outputs, ref_outputs, decimal=3)
model_mapping = {
'bvlc_alexnet': 'bvlc_alexnet',
'bvlc_googlenet': 'bvlc_googlenet',
'bvlc_reference_caffenet': 'bvlc_reference_caffenet',
'bvlc_reference_rcnn_ilsvrc13': 'bvlc_reference_rcnn_ilsvrc13',
'densenet121': 'densenet121',
#'finetune_flickr_style': 'finetune_flickr_style',
'inception_v1': 'inception_v1',
'inception_v2': 'inception_v2',
'resnet50': 'resnet50',
'shufflenet': 'shufflenet',
'squeezenet': 'squeezenet_old',
#'vgg16': 'vgg16',
'vgg19': 'vgg19',
'zfnet512': 'zfnet512',
}
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Update the ONNX models.')
parser.add_argument('-v', action="store_true", default=False, help="verbose")
parser.add_argument("--local-dir", type=str, default=os.path.expanduser('~'),
help="local dir to store Caffe2 and ONNX models")
parser.add_argument("--no-cache", action="store_true", default=False,
help="whether use local ONNX models")
parser.add_argument('--clean-test-data', action="store_true", default=False,
help="remove the old test data")
parser.add_argument('--add-test-data', type=int, default=0,
help="add new test data")
parser.add_argument('--only-local', action="store_true", default=False,
help="no upload including backup")
args = parser.parse_args()
delete_test_data = args.clean_test_data
add_test_data = args.add_test_data
use_cache = not args.no_cache
only_local = args.only_local
root_dir = args.local_dir
caffe2_zoo_dir = os.path.join(root_dir, ".caffe2", "models")
onnx_zoo_dir = os.path.join(root_dir, ".onnx", "models")
for onnx_model_name in model_mapping:
c2_model_name = model_mapping[onnx_model_name]
print('####### Processing ONNX model {} ({} in Caffe2) #######'.format(onnx_model_name, c2_model_name))
download_caffe2_model(c2_model_name, caffe2_zoo_dir, use_cache=use_cache)
download_onnx_model(onnx_model_name, onnx_zoo_dir, use_cache=use_cache, only_local=only_local)
onnx_model_dir = os.path.join(onnx_zoo_dir, onnx_model_name)
if delete_test_data:
print('Deleting all the existing test data...')
# NB: For now, we don't delete the npz files.
#for f in glob.glob(os.path.join(onnx_model_dir, '*.npz')):
# os.remove(f)
for f in glob.glob(os.path.join(onnx_model_dir, 'test_data_set*')):
shutil.rmtree(f)
onnx_model, c2_init_net, c2_predict_net = caffe2_to_onnx(c2_model_name, os.path.join(caffe2_zoo_dir, c2_model_name))
print('Deleteing old ONNX {} model...'.format(onnx_model_name))
for f in glob.glob(os.path.join(onnx_model_dir, 'model*'.format(onnx_model_name))):
os.remove(f)
print('Serializing generated ONNX {} model ...'.format(onnx_model_name))
with open(os.path.join(onnx_model_dir, 'model.onnx'), 'wb') as file:
file.write(onnx_model.SerializeToString())
print('Verifying model {} with ONNX model checker...'.format(onnx_model_name))
onnx.checker.check_model(onnx_model)
total_existing_data_set = 0
print('Verifying model {} with existing test data...'.format(onnx_model_name))
for f in glob.glob(os.path.join(onnx_model_dir, '*.npz')):
test_data = np.load(f, encoding='bytes')
inputs = list(test_data['inputs'])
ref_outputs = list(test_data['outputs'])
onnx_verify(onnx_model, inputs, ref_outputs)
total_existing_data_set += 1
for f in glob.glob(os.path.join(onnx_model_dir, 'test_data_set*')):
inputs = []
inputs_num = len(glob.glob(os.path.join(f, 'input_*.pb')))
for i in range(inputs_num):
tensor = onnx.TensorProto()
with open(os.path.join(f, 'input_{}.pb'.format(i)), 'rb') as pf:
tensor.ParseFromString(pf.read())
inputs.append(numpy_helper.to_array(tensor))
ref_outputs = []
ref_outputs_num = len(glob.glob(os.path.join(f, 'output_*.pb')))
for i in range(ref_outputs_num):
tensor = onnx.TensorProto()
with open(os.path.join(f, 'output_{}.pb'.format(i)), 'rb') as pf:
tensor.ParseFromString(pf.read())
ref_outputs.append(numpy_helper.to_array(tensor))
onnx_verify(onnx_model, inputs, ref_outputs)
total_existing_data_set += 1
starting_index = 0
while os.path.exists(os.path.join(onnx_model_dir, 'test_data_set_{}'.format(starting_index))):
starting_index += 1
if total_existing_data_set == 0 and add_test_data == 0:
add_test_data = 3
total_existing_data_set = 3
print('Generating {} sets of new test data...'.format(add_test_data))
for i in range(starting_index, add_test_data + starting_index):
data_dir = os.path.join(onnx_model_dir, 'test_data_set_{}'.format(i))
os.makedirs(data_dir)
inputs = generate_test_input_data(onnx_model, 255)
ref_outputs = generate_test_output_data(c2_init_net, c2_predict_net, inputs)
onnx_verify(onnx_model, inputs, ref_outputs)
for index, input in enumerate(inputs):
tensor = numpy_helper.from_array(input[1])
with open(os.path.join(data_dir, 'input_{}.pb'.format(index)), 'wb') as file:
file.write(tensor.SerializeToString())
for index, output in enumerate(ref_outputs):
tensor = numpy_helper.from_array(output)
with open(os.path.join(data_dir, 'output_{}.pb'.format(index)), 'wb') as file:
file.write(tensor.SerializeToString())
del onnx_model
del c2_init_net
del c2_predict_net
upload_onnx_model(onnx_model_name, onnx_zoo_dir, backup=False, only_local=only_local)
print('\n\n')
|
python
|
# Copyright (c) 2018 Graphcore Ltd. All rights reserved.
# This script is run by the release agent to create a release of PopTorch
def install_release(release_utils, release_id, snapshot_id, version_str):
# Tag must contain the string 'poptorch' to keep it unique.
tag = "{}-poptorch".format(version_str)
release_utils.log.info('Tagging poptorch release ' + tag)
# Create the release on the document server.
release_utils.create_document_release(snapshot_id)
# Tag the view repository with the release.
release_utils.tag_view_repo(
'ssh://[email protected]/diffusion/' \
+ 'POPONNXVIEW/poponnxview.git',
snapshot_id,
release_id,
tag)
# Increment the point version number.
release_utils.increment_version_point(
'ssh://[email protected]/diffusion/' \
+ 'POPTORCH/poptorch.git')
|
python
|
import json
import pytest
from ermaket.api.database import DBConn
from ermaket.api.scripts import ScriptManager
from ermaket.api.system.hierarchy import Activation, Trigger, Triggers
CHAIN_ID = 1
TEAPOT_ID = 2
ADD_ID = 3
ADD2_ID = 4
def login(client, user):
return client.post(
'/auth/login', data={
"login": user.login,
"password": user.password
}
)
@pytest.mark.usefixtures("client", "test_db")
def test_login(test_db, client):
assert not client.get('/auth/current').json['ok']
assert not client.post('/auth/logout').json['ok']
response = login(client, test_db.admin_user)
assert response.json["ok"]
response = client.get('/auth/current')
assert response.json['user']['login'] == test_db.admin_user.login
hierarchy = response.json['hierarchy']
assert hierarchy
assert len(next(iter(hierarchy['hierarchy']))['children']) > 0
rights = response.json['rights']
assert rights
profile_forms = response.json['profile_forms']
assert len(profile_forms) > 0
response = client.post('/auth/logout')
assert response.json['ok']
assert not client.get('/auth/current').json['ok']
assert not client.post('/auth/logout').json['ok']
@pytest.mark.usefixtures("client", "test_db")
def test_password(test_db, client):
assert login(client, test_db.admin_user).json["ok"]
assert not client.put(
'/auth/password',
data={
"old_pass": "Surely wrong password, noone would ever set this",
"new_pass": "1234567890"
}
).json['ok']
client.post('/auth/logout')
assert login(client, test_db.admin_user).json["ok"]
assert client.put(
'/auth/password',
data={
"old_pass": test_db.admin_user.password,
"new_pass": "1234567890"
}
).json["ok"]
client.post('/auth/logout')
assert not login(client, test_db.admin_user).json["ok"]
assert client.post(
'/auth/login',
data={
"login": test_db.admin_user.login,
"password": "1234567890"
}
).json["ok"]
assert client.put(
'/auth/password',
data={
"old_pass": "1234567890",
"new_pass": test_db.admin_user.password
}
).json["ok"]
client.post('/auth/logout')
def print_roles(test_db):
from ermaket.api.database import DBConn
normal, admin = test_db.normal_user.user, test_db.admin_user.user
with DBConn.get_session() as db:
db.add(normal)
db.add(admin)
print(f'Normal roles: {normal.role_names}')
print(f'Admin roles: {admin.role_names}')
@pytest.mark.usefixtures("client", "test_db")
def test_get(client, test_db):
schema = test_db.schema
assert client.get('/tables/foo/bar').status_code == 404
name = test_db.model.__table__.name
table_url = f'/tables/table/{schema}/{name}'
entry_url = f'/tables/entry/{schema}/{name}'
assert client.get(table_url).status_code == 401
assert client.get(entry_url).status_code == 401
login(client, test_db.admin_user)
response = client.get(table_url)
assert response.status_code == 200
assert len(response.json) > 0
response = client.get(entry_url)
assert response.status_code == 200
assert response.json
client.post('/auth/logout')
login(client, test_db.normal_user)
# assert client.get(table_url).status_code == 403
# assert client.get(entry_url).status_code == 403
client.post('/auth/logout')
@pytest.mark.usefixtures("client", "test_db")
def test_transaction(client, test_db):
model = test_db.model
entry = test_db.entry
with DBConn.get_session() as db:
item = db.query(model).first()
data = model.__marshmallow__().dump(item)
key = data[entry.pk.rowName]
transaction = {entry.id: {'delete': {key: True}}}
login(client, test_db.admin_user)
response = client.post(
'/transaction/execute',
data=json.dumps({'transaction': transaction}),
content_type='application/json'
)
assert response.status_code == 200
@pytest.mark.usefixtures("client", "test_db")
def test_sql(client, test_db):
schema, table = test_db.schema, test_db.entry.tableName
query = f'SELECT * FROM {schema}.{table}'
login(client, test_db.admin_user)
response = client.post(
'/sql/execute',
data=json.dumps({'query': query}),
content_type='application/json'
)
assert response.status_code == 200
assert len(response.json["result"][0]) == len(response.json["keys"])
@pytest.mark.usefixtures("client", "test_db")
def test_call_script(client, test_db):
login(client, test_db.admin_user)
response = client.post(
f"/scripts/execute/{CHAIN_ID}",
data=json.dumps({'activation': 'call'}),
content_type='application/json'
)
assert response.status_code == 200
assert response.json['businessLogic']['done'] == 'step_1'
response = client.post(
f"/scripts/execute/{CHAIN_ID}",
data=json.dumps({'activation': 'call'}),
content_type='application/json'
)
assert response.status_code == 200
assert response.json['businessLogic']['done'] == 'step_2'
response = client.post(
f"/scripts/execute/{CHAIN_ID}",
data=json.dumps({'activation': 'call'}),
content_type='application/json'
)
assert response.status_code == 200
assert response.json['businessLogic']['done'] == 'step_1'
@pytest.mark.usefixtures("client", "test_db")
def test_abort_request(client, test_db):
test_db.entry.triggerList = Triggers([Trigger(Activation.READ, TEAPOT_ID)])
login(client, test_db.admin_user)
schema, name = test_db.entry.schema, test_db.entry.tableName
table_url = f'/tables/table/{schema}/{name}'
entry_url = f'/tables/entry/{schema}/{name}'
assert client.get(table_url).status_code == 418
assert client.get(entry_url).status_code == 418
mgr = ScriptManager()
mgr.global_triggers.append(Trigger(Activation.TRANSACTION, TEAPOT_ID))
model = test_db.model
entry = test_db.entry
with DBConn.get_session() as db:
item = db.query(model).first()
data = model.__marshmallow__().dump(item)
key = data[entry.pk.rowName]
transaction = {entry.id: {'delete': {key: True}}}
response = client.post(
'/transaction/execute',
data=json.dumps({'transaction': transaction}),
content_type='application/json'
)
assert response.status_code == 418
client.post('/auth/logout')
mgr.global_triggers.append(Trigger(Activation.LOGIN, TEAPOT_ID))
response = client.post(
'/auth/login',
data={
"login": test_db.admin_user.login,
"password": test_db.admin_user.password
}
)
assert response.status_code == 418
mgr.global_triggers = Triggers([])
@pytest.mark.usefixtures("client", "test_db")
def test_add_info(client, test_db):
mgr = ScriptManager()
mgr.global_triggers.append(Trigger(Activation.LOGIN, ADD_ID))
response = client.post(
'/auth/login',
data={
"login": test_db.admin_user.login,
"password": test_db.admin_user.password
}
)
assert response.status_code == 200
assert response.json['businessLogic']['data'] == "EXAMPLE_DATA"
client.post('/auth/logout')
mgr.global_triggers.append(Trigger(Activation.LOGIN, ADD2_ID))
response = client.post(
'/auth/login',
data={
"login": test_db.admin_user.login,
"password": test_db.admin_user.password
}
)
assert response.status_code == 200
assert response.json['businessLogic']['data'] == "EXAMPLE_DATA"
assert response.json['businessLogic']['data2'] == "EXAMPLE_DATA2"
mgr.global_triggers = Triggers([])
@pytest.mark.usefixtures("client", "test_db")
def test_register(client, test_db):
login(client, test_db.admin_user)
token_simple = client.post(
'/auth/register_token', data={
'name': 'test',
}
).json['token']
token_admin = client.post(
'/auth/register_token',
data=json.dumps({
'name': 'test',
'roles': ['admin']
}),
content_type='application/json'
).json['token']
assert token_admin is not None
assert token_simple is not None
client.post('/auth/logout')
assert client.post(
'/auth/register',
data={
'token': token_simple,
'login': 'manager_1',
'password': '12345'
}
).json['ok']
assert client.get('/auth/current').json['ok']
client.post('/auth/logout')
assert client.post(
'/auth/login', data={
'login': 'manager_1',
'password': '12345'
}
).json['ok']
client.post('/auth/logout')
assert client.post(
'/auth/register',
data={
'token': token_admin,
'login': 'manager_2',
'password': '12345'
}
).json['ok']
assert 'admin' in client.get('/auth/current').json['user']['roles']
client.post('/auth/logout')
@pytest.mark.usefixtures("client", "test_db")
def test_reset_password(client, test_db):
login(client, test_db.admin_user)
token_simple = client.post(
'/auth/register_token', data={
'name': 'test',
}
).json['token']
assert client.post(
'/auth/register',
data={
'token': token_simple,
'login': 'manager_10',
'password': '12345'
}
).json['ok']
assert client.get('/auth/current').json['ok']
client.post('/auth/logout')
login(client, test_db.admin_user)
token_reset = client.post(
'/auth/reset_password_token',
data={
'name': 'test',
'login': 'manager_10'
}
).json['token']
assert token_reset is not None
client.post('/auth/logout')
assert client.put(
'/auth/reset_password',
data={
'token': token_reset,
'login': 'manager_10',
'password': '54321'
}
).json['ok']
assert client.post(
'/auth/login', data={
'login': 'manager_10',
'password': '54321'
}
)
client.post('/auth/logout')
|
python
|
from django.conf.urls import url
from django.views.generic.base import RedirectView
from tests.views import foo, foo_api
urlpatterns = [
url(r"^api/foo/$", foo_api),
url(r"^foo/$", foo),
url(r"^bar/$", RedirectView.as_view(url="/foo/", permanent=False)),
]
|
python
|
from math import sqrt
from django.test import TestCase
from ..models import InstructionSet
class InstructionSetModelTest(TestCase):
def setUp(self):
self.test_set = InstructionSet.objects.create(up=1, down=3, left=5, right=7)
def test_model_fields(self):
self.assertEqual(self.test_set.up, 1)
self.assertEqual(self.test_set.down, 3)
self.assertEqual(self.test_set.left, 5)
self.assertEqual(self.test_set.right, 7)
def test_str_method(self):
expected_object_name = f'Instruction Set #{self.test_set.id}'
self.assertEqual(str(self.test_set), expected_object_name)
def test_get_distance(self):
x_dir = self.test_set.up - self.test_set.down
y_dir = self.test_set.left - self.test_set.right
dist = round(sqrt(x_dir**2 + y_dir**2), 1)
self.assertEqual(self.test_set.get_euclidean_dist(), dist)
def test_get_absolute_url(self):
self.assertEqual(self.test_set.get_absolute_url(), '/instructions/1/')
def test_get_num_of_instructions(self):
self.assertEqual(self.test_set.get_num_of_instructions(), 4)
def tearDown(self):
self.test_set.delete()
|
python
|
from .site import SiteCommand
from .platform import PlatformCommand
from .system import SystemCommand
AVAILABLE_COMMANDS = [SiteCommand(), PlatformCommand(), SystemCommand()]
|
python
|
# Generated by Django 2.0.8 on 2018-08-25 09:21
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('information_pages', '0004_auto_20180825_1114'),
]
operations = [
migrations.AlterModelOptions(
name='informationdocument',
options={'base_manager_name': 'objects', 'permissions': (('view_informationdocument', 'User/Group is allowed to view that document'),), 'verbose_name': 'Information document', 'verbose_name_plural': 'Information documents'},
),
]
|
python
|
"""
This script is used to construct, test and analyze the various variants
for digit addition and subtraction.
"""
import re
import sys
import subprocess
import colorama
# snippet construction
def define_all():
global ADD_09_09, ADD_09_90, ADD_90_09, ADD_90_90
global SUB_09_09, SUB_90_09, SUB_09_90, SUB_90_90
ADD_09_09 = r'''
s/(..)/\1;0123456789;0123456789/
s/(.)(.);\d*(\1\d*);(\d*(\2\d*))/\3\5\4/
s/.{10}(.)\d{0,9}(\d{0,1})\d*/1\1\2/
s/1\d(\d)/0\1/
'''
ADD_09_90 = r'''
s/(..)/\1;0123456789;9876543210/
s/(.)(.);(\d*)\1\d*;(\d*(\2\d*))/\3\5\4/
s/.{10}(.)\d{0,9}(\d{0,1})\d*/0\1\2/
s/0\d(\d)/1\1/
'''
ADD_90_09 = reverse1(ADD_09_09)
ADD_90_90 = reverse1(ADD_09_90)
SUB_09_09 = reverse2(permute(ADD_09_90))
SUB_90_09 = reverse2(permute(ADD_90_90))
SUB_09_90 = reverse2(permute(ADD_09_09))
SUB_90_90 = reverse2(permute(ADD_90_09))
def permute(snippet):
"""Permute snippet arguments:
a + b --> b + a
a - b --> b - a
Exchange \1 and \2 in s/(.)(.);\d*(\1\d*);(\d*(\2\d*))/\3\5\4/
"""
lines = snippet.splitlines()
lines[2] = re.sub(r'(\\[12])(.*)(\\[12])', r'\3\2\1', lines[2])
return '\n'.join(lines)
def reverse1(snippet):
"""Reverse first sequence.
This does not change the calculated value.
"""
text = snippet
if '0123456789;' in text:
text = text.replace('0123456789;', '9876543210;')
text = text.replace(r';\d*(\1\d*);', r';(\d*\1)\d*;') # [a9] to [9a] len = 10 - a
text = text.replace(r';(\d*)\1\d*;', r';\d*\1(\d*);') # [0a[ to ]a0] len = a
else:
text = text.replace('9876543210;', '0123456789;')
text = text.replace(r';(\d*\1)\d*;', r';\d*(\1\d*);') # [9a] to [a9] len = 10 - a
text = text.replace(r';\d*\1(\d*);', r';(\d*)\1\d*;') # ]a0] to [0a[ len = a
return text
def reverse2(snippet):
"""Reverse second sequence.
Change a + b to b - a and a - b or b - a to a + b.
"""
text = snippet
if '0123456789/' in text:
text = text.replace('0123456789/', '9876543210/')
else:
text = text.replace('9876543210/', '0123456789/')
return text
# testing
def testdeck_add():
inplist = list()
outlist = list()
for a in range(10):
for b in range(10):
inplist.append('%d%d' % (a, b))
outlist.append('%02d' % (a + b))
return inplist, outlist
def testdeck_sub():
inplist = list()
outlist = list()
for a in range(10):
for b in range(10):
inplist.append('%d%d' % (a, b))
if b <= a:
x, y = 0, a - b
else:
x, y = 1, 10 + a - b
outlist.append('%d%d' % (x, y))
return inplist, outlist
def runtest(descr, snippet, inplist, outlist):
""" Run a test.
snippet is the snippet to test
inplist is the input of the snippet
outlist is the expected result
"""
snippet = snippet.replace(r'\d', '[0-9]')
with open('tmp.sed', 'w') as f:
print(snippet, file=f)
with open('tmp.input', 'w') as f:
for line in inplist:
print(line, file=f)
com = 'sed -r -f %s %s' % ('tmp.sed', 'tmp.input')
res = subprocess.check_output(com).decode('ascii').splitlines()
if res == outlist:
print('%-15s %s' % (descr, 'OK'))
else:
print('%-15s %s' % (descr, 'fail'))
for inp, out, resline in zip(inplist, outlist, res):
if out != resline:
print('%-8s %-8s %-8s' % (inp, out, resline))
def test_all():
runtest('ADD_09_09', ADD_09_09, *testdeck_add())
runtest('ADD_90_09', ADD_90_09, *testdeck_add())
runtest('ADD_09_90', ADD_09_90, *testdeck_add())
runtest('ADD_90_90', ADD_90_90, *testdeck_add())
runtest('SUB_09_09', SUB_09_09, *testdeck_sub())
runtest('SUB_90_09', SUB_90_09, *testdeck_sub())
runtest('SUB_09_90', SUB_09_90, *testdeck_sub())
runtest('SUB_90_90', SUB_90_90, *testdeck_sub())
# colors
def colorize(snippet):
snippet = snippet.splitlines()
line1 = snippet[1] # s/(..)/\1;0123456789;0123456789/
line2 = snippet[2] # s/(.)(.);\d*(\2\d*);(\d*(\1\d*))/\3\5\4/
line3 = snippet[3] # s/.{10}(.)\d{0,9}(\d{0,1})\d*/1\2\1/
line1pat, line1sub = line1.split(r'/')[1:3]
line2pat, line2sub = line2.split(r'/')[1:3]
line3pat, line3sub = line3.split(r'/')[1:3]
for i in range(10):
for j in range(10):
s = '%d%d' % (i, j)
s = re.sub(line1pat, line1sub, s)
m = re.match(line2pat, s)
colors = 'A' * (m.end(3) - m.start(3)) + 'B' * (m.end(5) - m.start(5)) + 'C' * (m.end(4) - m.start(4))
s = re.sub(line2pat, line2sub, s)
m = re.match(line3pat, s)
index1 = m.start(1)
index2 = m.start(2)
print('%d%d' % (i, j), colored_string(s, colors, index1, index2))
def colored_string(s, color, index1, index2):
colors = {
'A': colorama.Fore.BLUE,
'B': colorama.Fore.GREEN,
'C': colorama.Fore.MAGENTA
}
colored = ''
for index, (dig, col) in enumerate(zip(s, color)):
colored_digit = colors[col] + dig + colorama.Fore.RESET
if index in (index1, index2):
colored_digit = colorama.Back.YELLOW + colored_digit + colorama.Back.RESET
colored += colored_digit
return colored
# main
def main():
colorama.init()
define_all()
nargs = len(sys.argv) - 1
if nargs == 2 and sys.argv[1] == 'trace':
snippet = globals()[sys.argv[2]]
print(snippet)
elif nargs == 1 and sys.argv[1] == 'test':
test_all()
elif nargs == 2 and sys.argv[1] == 'colorize':
snippet = globals()[sys.argv[2]]
colorize(snippet)
else:
s = 'ADD_09_09|ADD_09_90|ADD_90_09|ADD_90_90|SUB_09_09|SUB_90_09|SUB_09_90|SUB_90_90'
print('$ test-add-sub.py trace', s)
print('$ test-add-sub.py test')
print('$ test-add-sub.py colorize', s)
main()
|
python
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
AutoGIS lesson 2 geopandas
This is a temporary script file.
"""
import geopandas as gpd
# set filepath
fp = r'F:\GS\harrisab2\S18\GeoViz\autoGIS_2\Data\DAMSELFISH_distributions.shp'
# read file using gpd.read_file()
data = gpd.read_file(fp)
# write first 50 rows to a new shapefile
out = r'F:\GS\harrisab2\S18\GeoViz\autoGIS_2\Data\DAMSELFISH_distributions_SELECTION.shp'
# select first 50 rows
selection = data[0:50]
# write above rows into a new Shapefile (default output file format for geopandas)
selection.to_file(out)
# Select only specific columns with []
data['geometry'].head()
# make a selection of first five rows
selection = data[0:5]
# Iterate over the selected rows using .iterrows() MUST USE ITERROWS TO ITERATE OVER DATAFRAME
for index, row in selection.iterrows():
# calculate area for row geometry only
poly_area = row['geometry'].area
# {0} and {1:.3f} are text formatting
# curly brackets take .format inputs and print them
print('Polygon area at index {0} is: {1:.2f}.'.format(index, poly_area))
# create a new column of individual polygon areas
selection['area'] = selection.area
# find maximum, mean or minimum area
#max_area = selection['area'].max()
#mean_area = selection['area'].mean()
#min_area = selection['area'].min()
# make a geodataframe from scratch
# import necessary modules
from shapely.geometry import Point, Polygon
from fiona.crs import from_epsg
# create empty geodataframe and geometry column
newdata = gpd.GeoDataFrame()
newdata['geometry'] = None
# add coordinates and create polygon from coordinate-tuple list
coordinates = [(24.950899, 60.169158), (24.953492, 60.169158), (24.953510, 60.170104), (24.950958, 60.169990)]
poly = Polygon(coordinates)
# insert polygom data into geodataFrame using .loc
newdata.loc[0, 'geometry'] = poly
# add description
newdata.loc[0, 'location'] = 'Senaatintori'
# specify projection for newdata
newdata.crs = from_epsg(4326)
# export the data
outfp = r'F:\GS\harrisab2\S18\GeoViz\autoGIS_2'
# write data into new shapefie
newdata.to_file(outfp)
# HOW TO SAVE MULTIPLE SHAPEFILES
# Use .groupby() to group column BINOMIAL
grouped = data.groupby('BINOMIAL')
# output folder for multiple shapefiles
groupfp = r'F:\GS\harrisab2\S18\GeoViz\autoGIS_2/fishFolder'
# import os to parse
import os
# iterate over the dataframe (key = fish name) rows = all rows with that fish name
for key, rows in grouped:
# create output with {0} (start at first index) and replace blank space with underscore
output_name = "{0}.shp".format(key.replace(' ', '_'))
# create a new folder in the groupfp file path
output_path = os.path.join(groupfp, output_name)
rows.to_file(output_path)
|
python
|
# -*- coding: utf-8 -*-
from __future__ import division
from pyevolve import G1DList, GSimpleGA, Selectors, Statistics
from pyevolve import Initializators, Mutators, Consts, DBAdapters
import os
from math import log, log1p, exp
from pyevolve import G1DList
from pyevolve import GSimpleGA
from pyevolve import Selectors
from pyevolve import Statistics
from pyevolve import DBAdapters
import pandas as pd
import numpy as np
#define reLu function
def reLu(number):
return (0 if number < 0 else number)
#os.chdir("/home/jana/Documents/")
#evaluation functions for genetic algorithm
cowsGen = 5000
#the file with information about relatedness between herds
HerdsA = pd.read_csv('/home/jana/Documents/PhD/CompBio/RefADF_mean.csv')
#the file with information about relatedness of herds with test population
NapA = pd.read_csv('/home/jana/Documents/PhD/CompBio/NapADF_mean.csv')
##the file with the relatedness of test population with bulls for reference (training population)
PbA = pd.read_csv('/home/jana/Documents/PhD/CompBio/PbADF_mean.csv')
#the file containing the number of animals in each herd
HerdsAnim = pd.read_table("/home/jana/Documents/PhD/CompBio/HerdNo.txt", sep=" ")
#fitness function
def eval_func(chromosome):
#how many animals from the herds are chosen
NoAnimals = sum([no for (chrom, no) in zip (chromosome, HerdsAnim.NoAnim) if chrom == 1])
#which are the chosen herds (herds 1 - 100)
chosenHerds = [herd for (chrom, herd) in zip (chromosome, HerdsAnim.Herd) if chrom == 1]
#calculate the relatedness between the animals within the chosen cow herds for reference population (training population) [list]
withinA = []
for index, vals in HerdsA.iterrows():
if (int(vals.Herd1) in chosenHerds) and (int(vals.Herd2) in chosenHerds):
withinA.append(vals.A)
#the relatedness of the animals in chosen cow herds with reference bulls [list]
withPb = (PbA.A[PbA.Herd.isin(chosenHerds)])
#the relatedness of the chosen cows with animals in the testing population
withNap = (NapA.A[NapA.Herd.isin(chosenHerds)])
#mean relatedness within the reference (training) population
within = np.mean(list(withPb) + list(withinA))
#mean relatedness between the reference and testing population
between = np.mean (withNap)
#compute the score considering the relatedness and also the number of animals in the reference
score = (reLu(between - within) * 10000) **2
penalty = [-score if (NoAnimals > 1.5*cowsGen or NoAnimals < 0.85*cowsGen) else 0]
return score+penalty[0]
# Genome instance
genome = G1DList.G1DList(100) #chromosome is a list with 100 elements (one for each herds)
genome.setParams(rangemin=0, rangemax=1) #allowed values are 0 and 1
# The evaluator function (objective function)
genome.evaluator.set(eval_func) #evaluate chromosomes with the chosen fitness function
#genome.evaluator.add(eval_func2)
genome.mutator.set(Mutators.G1DListMutatorIntegerBinary) #mutate 0 and 1
# Genetic Algorithm Instance
ga = GSimpleGA.GSimpleGA(genome)
ga.setGenerations(900) #set the number of generations
ga.selector.set(Selectors.GTournamentSelector) #set the rule for parent selection
ga.setMutationRate(0.01) #set mutation rate
ga.setCrossoverRate(0.001) #set cross-over rate
ga.setPopulationSize(50) #set population size
#ga.terminationCriteria.set(GSimpleGA.ConvergenceCriteria)
# Create DB Adapter and set as adapter
sqlite_adapter = DBAdapters.DBSQLite(identify="TestHerdsGen") #this is to write the evolution to a file
ga.setDBAdapter(sqlite_adapter)
# Using CSV Adapter
#csvfile_adapter = DBAdapters.DBFileCSV()
#ga.setDBAdapter(csvfile_adapter)
# Using the URL Post Adapter
# urlpost_adapter = DBAdapters.DBURLPost(url="http://whatismyip.oceanus.ro/server_variables.php", post=False)
# ga.setDBAdapter(urlpost_adapter)
# Do the evolution, with stats dump
# frequency of 10 generations
ga.evolve(freq_stats=10)
# Best individual
print ga.bestIndividual()
|
python
|
import datetime
import random
import matplotlib.pyplot as plt
from graphpkg.live.graph import LiveTrend,LiveScatter
# plt.style.use('')
count1 = 0
cluster = 0.30
def func1():
global count1
count1 += 1
return datetime.datetime.now(), [random.randrange(1, 10) + count1,random.randrange(1,10)+ count1]
def func2():
global cluster
return random.randrange(1, 100), [random.randrange(1, 10000) * cluster, random.randrange(1, 100), random.randrange(1, 100)]
def func3(*args):
return random.randrange(1, args[0]), [random.randrange(1, args[0]), random.randrange(1, 100)]
if __name__ == "__main__":
fig = plt.figure()
trend1 = LiveTrend(
fig=fig,
fig_spec=(3,3,(1,2)),
func_for_data=func1,
interval=500,
title="trend plot"
)
trend1.start()
trend2 = LiveTrend(
fig=fig,
fig_spec=(3, 3, (4, 5)),
func_for_data=func1,
interval=500,
title="other trend plot"
)
trend2.start()
trend3 = LiveTrend(
fig=fig,
fig_spec=(3, 3, (7, 8)),
func_for_data=func1,
interval=500,
title="other other trend plot"
)
trend3.start()
scatter1 = LiveScatter(
fig = fig,
fig_spec=(3,3,3),
func_for_data=func2,
interval=500,
title="some scatter plot",
window=1000
)
scatter1.start()
scatter2 = LiveScatter(
fig=fig,
fig_spec=(3, 3, 6),
func_for_data=func3,
func_args=(1000,),
interval=1000,
title="other scatter plot",
window=500
)
scatter2.start()
scatter3 = LiveScatter(
fig=fig,
fig_spec=(3, 3, 9),
func_for_data=func3,
func_args=(1000,),
interval=1000,
title="other other scatter plot",
window=500
)
scatter3.start()
fig.canvas.set_window_title("dashboard")
plt.show()
|
python
|
'''
Created on 2016/9/20
:author: hubo
'''
from __future__ import print_function
from vlcp.server import main
from vlcp.event import Client
from vlcp.server.module import Module
from vlcp.config import defaultconfig
from vlcp.protocol.zookeeper import ZooKeeper, ZooKeeperConnectionStateEvent,\
ZooKeeperWatcherEvent
import vlcp.utils.zookeeper as zk
from vlcp.event.runnable import RoutineContainer
from namedstruct import dump as _dump
#from pprint import pprint
import json
from vlcp.event.event import M_
def dump(value):
return _dump(value, tostr=True)
def pprint(v):
print(json.dumps(v, indent=2))
@defaultconfig
class TestModule(Module):
_default_url = 'tcp://localhost/'
_default_sessiontimeout = 30
def __init__(self, server):
Module.__init__(self, server)
self.protocol = ZooKeeper()
self.client = Client(self.url, self.protocol, self.scheduler)
self.connections.append(self.client)
self.apiroutine = RoutineContainer(self.scheduler)
self.apiroutine.main = self.main
self.routines.append(self.apiroutine)
async def watcher(self):
watcher = ZooKeeperWatcherEvent.createMatcher(connection = self.client)
while True:
ev = await watcher
print('WatcherEvent: %r' % (dump(ev.message),))
async def main(self):
self.apiroutine.subroutine(self.watcher(), False, daemon = True)
up = ZooKeeperConnectionStateEvent.createMatcher(ZooKeeperConnectionStateEvent.UP, self.client)
notconn = ZooKeeperConnectionStateEvent.createMatcher(ZooKeeperConnectionStateEvent.NOTCONNECTED, self.client)
_, m = await M_(up, notconn)
if m is notconn:
print('Not connected')
return
else:
print('Connection is up: %r' % (self.client,))
# Handshake
await self.protocol.handshake(self.client, zk.ConnectRequest(
timeOut = int(self.sessiontimeout * 1000),
passwd = b'\x00' * 16, # Why is it necessary...
), self.apiroutine, [])
result = await self.protocol.requests(self.client, [zk.create(b'/vlcptest', b'test'),
zk.getdata(b'/vlcptest', True)], self.apiroutine)
pprint(dump(result[0]))
await self.apiroutine.wait_with_timeout(0.2)
result = await self.protocol.requests(self.client, [zk.delete(b'/vlcptest'),
zk.getdata(b'/vlcptest', watch = True)], self.apiroutine)
pprint(dump(result[0]))
result = await self.protocol.requests(self.client, [zk.multi(
zk.multi_create(b'/vlcptest2', b'test'),
zk.multi_create(b'/vlcptest2/subtest', 'test2')
),
zk.getchildren2(b'/vlcptest2', True)], self.apiroutine)
pprint(dump(result[0]))
result = await self.protocol.requests(self.client, [zk.multi(
zk.multi_delete(b'/vlcptest2/subtest'),
zk.multi_delete(b'/vlcptest2')),
zk.getchildren2(b'/vlcptest2', True)], self.apiroutine)
pprint(dump(result[0]))
if __name__ == '__main__':
main()
|
python
|
from mbdata import models
from sqlalchemy import inspect
from sqlalchemy.orm.session import object_session
ENTITY_TYPES = {
'artist': models.Artist,
'label': models.Label,
'place': models.Place,
'release_group': models.ReleaseGroup,
'release': models.Release,
'url': models.URL,
'work': models.Work,
}
def get_entity_type_model(type):
return ENTITY_TYPES[type]
def get_link_model(entity0, entity1):
names = sorted([entity0.__name__, entity1.__name__])
assert all(hasattr(models, name) for name in names)
return getattr(models, 'Link{0}{1}'.format(*names))
def get_link_target(link, source):
model = inspect(link).mapper.class_
source_model = inspect(source).mapper.class_
if source_model != model.entity0.property.mapper.class_:
return link.entity0
if source_model != model.entity1.property.mapper.class_:
return link.entity1
if source.id != link.entity0_id:
return link.entity0
if source.id != link.entity1_id:
return link.entity1
def query_links(obj, target_model):
session = object_session(obj)
model = get_link_model(inspect(obj).mapper.class_, target_model)
query = session.query(model)
if isinstance(obj, model.entity0.property.mapper.class_):
query = query.filter_by(entity0=obj)
if isinstance(obj, model.entity1.property.mapper.class_):
query = query.filter_by(entity1=obj)
return query
|
python
|
import enum
class Encrypting:
def __init__(self):
encrypting_type = "" #szyfr wybrany w gui
class Cipher(enum.Enum):
NO_CIPHER = 1
CAESAR = 2
FERNET = 3
POLYBIUS = 4
RAGBABY = 5
|
python
|
from flask import Flask
from flask import render_template
from flask import request, jsonify
import argparse
import itertools
import dateutil.parser
import os
from flask_sqlalchemy import SQLAlchemy
from libs import gcal_client
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL')
app = Flask(__name__)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = SQLALCHEMY_DATABASE_URI
db = SQLAlchemy(app)
import models
class gevent:
def __init__(self, event):
self.name = event['summary']
self.start = dateutil.parser.parse(event['start']['dateTime'])
self.date = self.start.strftime("%B %d, %A %H:%M%p")
self.players = []
self.count = 0
@app.route('/')
def index():
events = gcal_client.get_cal_details()
gevents=[]
for event in events:
ge = gevent(event)
db_event = models.Event(ge.name)
db.session.merge(db_event)
db.session.commit()
playing = db.session.query(models.Player.name)\
.join(models.Event_Player, models.Player.id == models.Event_Player.player_id)\
.filter(models.Event_Player.event_name == ge.name, models.Event_Player.is_playing)
#flatten the list of tuple names
ge.players = list(itertools.chain(*playing.all()))
ge.count = len(ge.players)
gevents.append(ge)
players = models.Player.query.all()
return render_template("index.html", events=gevents,players=players)
@app.route('/_update_poll', methods=['POST'])
def update_poll():
new_ep = models.Event_Player(request.form['player_id'], request.form['event_name'], True)
ep = models.Event_Player.query.filter_by(player_id = new_ep.player_id, event_name = new_ep.event_name).first()
if ep:
new_ep.is_playing = not ep.is_playing
db.session.merge(new_ep)
db.session.commit()
return jsonify({'msg': 'updated'})
db.session.add(new_ep)
db.session.commit()
return jsonify({'msg':'added'})
if __name__ == '__main__':
app.run(debug=True, use_reloader=True)
|
python
|
#!/usr/bin/env python
#
import webapp2
import re
from google.appengine.ext import ndb
from google.appengine.ext import blobstore
from google.appengine.api import users
from google.appengine.ext.webapp import blobstore_handlers
import logging
import parsers
import headers
# This is the triple store api.
# We have a number of triple sets. Each is from a user / tag combination
# models
NodeIDMap = {}
class Unit ():
@staticmethod
def GetUnit (id, createp=False):
if (id in NodeIDMap):
val = NodeIDMap[id]
return NodeIDMap[id]
if (createp != None):
return Unit(id)
def __init__ (self, id):
self.id = id
NodeIDMap[id] = self
self.arcsIn = []
self.arcsOut = []
self.examples = []
class Triple () :
def __init__ (self, source, arc, target, text):
self.source = source
source.arcsOut.append(self)
self.arc = arc
if (target != None):
self.target = target
self.text = None
target.arcsIn.append(self)
elif (text != None):
self.text = text
self.target = None
@staticmethod
def AddTriple(source, arc, target):
if (source == None or arc == None or target == None):
return
else:
return Triple(source, arc, target, None)
@staticmethod
def AddTripleText(source, arc, text):
if (source == None or arc == None or text == None):
return
else:
return Triple(source, arc, None, text)
class Example ():
@staticmethod
def AddExample(terms, original_html, microdata, rdfa, jsonld):
return Example(terms, original_html, microdata, rdfa, jsonld)
def __init__ (self, terms, original_html, microdata, rdfa, jsonld):
self.terms = terms
self.original_html = original_html
self.microdata = microdata
self.rdfa = rdfa
self.jsonld = jsonld
for term in terms:
term.examples.append(self)
def GetExamples(node):
return node.examples
def GetTargets(arc, source):
targets = {}
for triple in source.arcsOut:
if (triple.arc == arc):
if (triple.target != None):
targets[triple.target] = 1
if (triple.text != None):
targets[triple.text] = 1
return targets.keys()
def GetSources(arc, target):
sources = {}
for triple in target.arcsIn:
if (triple.arc == arc):
sources[triple.source] = 1
return sources.keys()
def GetArcsIn(target):
arcs = {}
for triple in target.arcsIn:
arcs[triple.arc] = 1
return arcs.keys()
def GetArcsOut(source):
arcs = {}
for triple in source.arcsOut:
arcs[triple.arc] = 1
return arcs.keys()
def GetComment(node) :
for triple in node.arcsOut:
if (triple.arc.id == 'rdfs:comment'):
return triple.text
return "No comment"
PageCache = {}
class ShowUnit (webapp2.RequestHandler) :
def GetCachedText(self, node):
global PageCache
if (node.id in PageCache):
return PageCache[node.id]
else:
return None
def AddCachedText(self, node, textStrings):
global PageCache
outputText = "".join(textStrings)
PageCache[node.id] = outputText
return outputText
def write(self, str):
self.outputStrings.append(str)
def GetParentStack(self, node):
if (node not in self.parentStack):
self.parentStack.append(node)
sc = Unit.GetUnit("rdfs:subClassOf")
for p in GetTargets(sc, node):
self.GetParentStack(p)
def ml(self, node):
return "<a href=%s>%s</a>" % (node.id, node.id)
def UnitHeaders(self, node):
self.write("<h1 class=page-title>")
ind = len(self.parentStack)
while (ind > 0) :
ind = ind -1
nn = self.parentStack[ind]
self.write("%s > " % (self.ml(nn)))
self.write("</h1>")
comment = GetComment(node)
self.write("<div>%s</div>" % (comment))
self.write("<table cellspacing=3 class=definition-table> <thead><tr><th>Property</th><th>Expected Type</th><th>Description</th> </tr></thead>")
def ClassProperties (self, cl):
headerPrinted = False
di = Unit.GetUnit("domainIncludes")
ri = Unit.GetUnit("rangeIncludes")
for prop in GetSources(di, cl):
ranges = GetTargets(ri, prop)
comment = GetComment(prop)
if (not headerPrinted):
self.write("<thead class=supertype><tr><th class=supertype-name colspan=3>Properties from %s</th></tr></thead><tbody class=supertype" % (self.ml(cl)))
headerPrinted = True
# logging.info("Property found %s" % (prop.id))
self.write("<tr><th class=prop-nam' scope=row> <code>%s</code></th> " % (self.ml(prop)))
self.write("<td class=prop-ect>")
first_range = True
for r in ranges:
if (not first_range):
self.write("<br>")
first_range = False
self.write(self.ml(r))
self.write(" ")
self.write("</td>")
self.write("<td class=prop-desc>%s</td> " % (comment))
self.write("</tr>")
def rep(self, markup):
m1 = re.sub("<", "<", markup)
m2 = re.sub(">", ">", m1)
return m2
def get(self, node):
if (node == "favicon.ico"):
return
node = Unit.GetUnit(node)
self.outputStrings = []
headers.OutputSchemaorgHeaders(self)
cached = self.GetCachedText(node)
if (cached != None):
self.response.write(cached)
return
self.parentStack = []
self.GetParentStack(node)
self.UnitHeaders(node)
for p in self.parentStack:
# logging.info("Doing " + p)
self.ClassProperties(p)
self.write("</table>")
children = GetSources(Unit.GetUnit("rdfs:subClassOf"), node)
if (len(children) > 0):
self.write("<br>More specific Types");
for c in children:
self.write("<li> %s" % (self.ml(c)))
examples = GetExamples(node)
if (len(examples) > 0):
self.write("<br><br><b>Examples</b><br><br>")
for ex in examples:
pl = "<pre class=\"prettyprint lang-html linenums\">"
self.write("<b>Without Markup</b><br><br>%s %s</pre><br><br>" % (pl, self.rep(ex.original_html)))
self.write("<b>Microdata</b><br>%s %s</pre><br><br>" % (pl, self.rep(ex.microdata)))
self.write("<b>RDFA</b><br>%s %s</pre><br><br>" % (pl, self.rep(ex.rdfa)))
self.write("<b>JSON-LD</b><br>%s %s</pre><br><br>" % (pl, self.rep(ex.jsonld)))
self.response.write(self.AddCachedText(node, self.outputStrings))
def read_file (filename):
import os.path
folder = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(folder, filename)
strs = []
for line in open(file_path, 'r').readlines():
strs.append(line)
return "".join(strs)
schemasInitialized = False
def read_schemas():
import os.path
global schemasInitialized
if (not schemasInitialized):
schema_content = read_file('data/schema.rdfa')
example_content = read_file('data/examples.txt')
ft = 'rdfa'
parser = parsers.MakeParserOfType(ft, None)
items = parser.parse(schema_content)
parser = parsers.ParseExampleFile(None)
parser.parse(example_content)
schemasInitialized = True
read_schemas()
app = ndb.toplevel(webapp2.WSGIApplication([("/(.*)", ShowUnit)]))
|
python
|
'''
Created By ILMARE
@Date 2019-3-1
'''
from bs4 import BeautifulSoup
from urllib.request import urlretrieve
import requests
from PIL import Image
import os
import re
totalCount = 0
pre_path = r"/home/ilmare/Desktop/FaceReplace/data/image/"
class PhotoScrawler:
def __init__(self, savePath, destUrl, maxPage):
self._savePath = savePath
self._destUrl = destUrl
self._maxPage = maxPage
def get_title_list(self, destUrl, pageNum=0):
Url = "{0}&ie=utf-8&pn={1}".format(destUrl, pageNum * 50)
print("Parsing page: ", Url)
try:
resp = requests.get(Url)
bsObj = BeautifulSoup(resp.text, "html.parser")
elts = bsObj.find_all("li", {"class": ["j_thread_list", "clearfix"]})
print(len(elts))
return_mat = []
for elt in elts:
repNum = int(elt.find("span", {"class": "threadlist_rep_num center_text"}).text)
a = elt.find("a", {"class": "j_th_tit"})
link = a.attrs.get("href")
title = a.attrs.get("title")
return_mat.append((title, "{0}{1}".format("http://tieba.baidu.com", link), repNum))
return return_mat
except Exception as e:
print(e)
return None
def parse_page(self, fronted_Url, pageNum=1):
Url = "{0}?pn={1}".format(fronted_Url, pageNum)
global totalCount
try:
resp = requests.get(Url)
bsObj = BeautifulSoup(resp.text, "html.parser")
ul = bsObj.find("ul", {"class": "l_posts_num"})
totalPage = int(ul.find("li", {"class": "l_reply_num"}).find_all("span", {"class": "red"})[1].text)
print("----", "Parsing page: ", Url, ", pageNum: ", pageNum, ", totalPage: ", totalPage)
elts = bsObj.find_all("div", {"class": ["l_post", "j_l_post", "l_post_bright", "noborder"]})
for elt, idx in zip(elts, range(len(elts))):
div = elt.find("div", {"class": "d_post_content j_d_post_content clearfix"})
imgs = div.find_all("img")
if imgs is not None:
for img in imgs:
src = img.attrs.get("src")
res = re.match(r"^http.*/(image_emoticon)[0-9]+.(png|jpg|jpeg|gif)$", src)
if res is None:
ret = re.search(r"(?<=\.)(png|jpg|jpeg|gif)$", src)
format = None
if ret is not None:
format = ret.group()
if format is None:
urlretrieve(src, "{0}{1}".format(pre_path, totalCount))
img = Image.open("{0}{1}".format(pre_path, totalCount))
format = img.format
img.save("{0}{1}.{2}".format(pre_path, totalCount, format.lower()))
os.remove("{0}{1}".format(pre_path, totalCount))
print("-------- ", idx, ": ", src)
else:
urlretrieve(src, "{0}{1}.{2}".format(pre_path, totalCount, format))
print("-------- ", idx, ": ", src, "format: ", format)
totalCount += 1
except Exception as e:
print(e)
finally:
if pageNum < totalPage:
self.parse_page(fronted_Url, pageNum + 1)
else:
return
def get_photo_from_tieba(self):
for i in range(self._maxPage):
return_mat = self.get_title_list(self._destUrl, i)
if return_mat is None:
continue
for (title, link, repNum), page in zip(return_mat, range(len(return_mat))):
if repNum <= 3000:
print("===>", title, ", current page: ", i + 1, ", current item: ", page)
self.parse_page(link)
if __name__ == "__main__":
obj = PhotoScrawler(pre_path,
"http://tieba.baidu.com/f?kw=%E6%9D%A8%E5%B9%82", 1)
obj.get_photo_from_tieba()
|
python
|
from diary.models import Note
from django.forms import Textarea, ModelForm
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.urls import reverse
from django.utils.html import format_html
from django.views.generic import ListView, CreateView
class NoteListView(ListView):
model = Note
@classmethod
def hx_create_view(cls):
return format_html('<div hx-get="{}" hx-trigger="load" hx-swap="outerHTML"></div>', reverse('note-create'))
@classmethod
def hx_first_read_view(cls):
first = Note.objects.all().order_by('-datetime', '-id').first()
if not first:
return ''
return note_and_next_html(first)
class NoteCreateForm(ModelForm):
class Meta:
fields = ['datetime', 'title', 'text']
model = Note
widgets = {
'text': Textarea(attrs={'rows': 3}),
}
class NoteCreateView(CreateView):
model = Note
form_class = NoteCreateForm
def form_valid(self, form):
# No Post/Redirect/Get needed for htmx
instance = form.save()
return HttpResponse(
format_html('{} {}', NoteListView.hx_create_view(), note_html(instance)))
def get_next_or_none(note):
# SQLite does not store mircoseconds. Two entries added in one second can't be
# distinguished with ".first()". Grrr ugly loop is needed.
found = False
for next in Note.objects.filter(datetime__lte=note.datetime).order_by('-datetime', '-id'):
if found:
return next
if next==note:
found = True
def note_html(note):
return format_html('''
<h1>{date} {title}</h1>
<p>{text}</p>
''', date=note.datetime.strftime('%d.%b'), title=note.title, text=note.text)
def note_and_next(request, pk):
return HttpResponse(note_and_next_html(get_object_or_404(Note, pk=pk)))
def note_and_next_html(note):
next = get_next_or_none(note)
if next:
hx_next = format_html('<div hx-get="{}" hx-trigger="revealed" hx-swap="outerHTML">...</div>',
reverse('note_and_next', kwargs=dict(pk=next.id)))
else:
hx_next = 'The End'
return format_html('{note_html} {hx_next}',
note_html=note_html(note),
hx_next=hx_next)
|
python
|
# -*- coding: utf-8 -*-
import os
from lib.ipc import ActorProcess
from lib.systray import SysTrayIcon
from lib.utils import init_logging
class UI(ActorProcess):
def __init__(self, coordinator):
super(UI, self).__init__()
self.coordinator = coordinator
def systray_quit(self, systray_ref):
pass
def sytray_launch_browser(self, systray_ref):
self.coordinator.IPC_launch_browser()
def systray_open_webadmin(self, systray_ref):
self.coordinator.IPC_open_admin_url()
def run(self):
init_logging()
self.start_actor()
rootdir = self.coordinator.get('rootdir')
confdata = self.coordinator.get('confdata')
icon = os.path.join(rootdir, confdata['icon_path'])
SysTrayIcon(
icon,
u'萤火虫翻墙代理',
(
(u'翻墙浏览', None, self.sytray_launch_browser),
(u'配置代理', None, self.systray_open_webadmin),
(u'退出', None, 'QUIT')
),
on_quit=self.systray_quit,
default_menu_index=1,
)
self.quit_actor()
|
python
|
# Copyright 2017 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ros2srv.api import get_service_path
from ros2srv.api import service_type_completer
from ros2srv.verb import VerbExtension
class ShowVerb(VerbExtension):
"""Output the service definition."""
def add_arguments(self, parser, cli_name):
arg = parser.add_argument(
'service_type',
help="Type of the ROS service (e.g. 'std_srvs/Trigger')")
arg.completer = service_type_completer
def main(self, *, args):
# TODO(dirk-thomas) this logic should come from a rosidl related
# package
try:
package_name, service_name = args.service_type.split('/', 2)
if not package_name or not service_name:
raise ValueError()
except ValueError:
raise RuntimeError('The passed service type is invalid')
try:
path = get_service_path(package_name, service_name)
except LookupError as e:
return str(e)
with open(path, 'r') as h:
print(h.read(), end='')
|
python
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from torch import nn
from fairscale.nn.pipe.skip import pop, skippable, stash
from fairscale.nn.pipe.skip.tracker import SkipTracker, use_skip_tracker
@pytest.fixture(autouse=True)
def skip_tracker():
skip_tracker = SkipTracker()
with use_skip_tracker(skip_tracker):
yield skip_tracker
def test_stash(skip_tracker):
@skippable(stash=["foo"])
class Stash(nn.Module):
def forward(self, input):
yield stash("foo", input)
return input * 2
l1 = Stash()
assert len(skip_tracker.tensors) == 0
with use_skip_tracker(skip_tracker):
l1(torch.tensor(42))
assert len(skip_tracker.tensors) == 1
def test_pop():
@skippable(stash=["foo"])
class Stash(nn.Module):
def forward(self, input):
yield stash("foo", input)
return input * 2
@skippable(pop=["foo"])
class Pop(nn.Module):
def forward(self, input):
foo = yield pop("foo")
return foo
l1 = Stash()
l2 = Pop()
output = l2(l1(torch.tensor(42)))
assert output.item() == 42
def test_declare_but_not_use():
@skippable(stash=["foo"])
class Stash(nn.Module):
def forward(self, input):
return input * 2
@skippable(pop=["foo"])
class Pop(nn.Module):
def forward(self, input):
return input * 3
l1 = Stash()
l2 = Pop()
with pytest.raises(RuntimeError):
l1(torch.tensor(42))
with pytest.raises(RuntimeError):
l2(torch.tensor(42))
def test_stash_not_declared():
@skippable()
class Stash(nn.Module):
def forward(self, input):
yield stash("foo", input)
return input * 2
l1 = Stash()
with pytest.raises(RuntimeError):
l1(torch.tensor(42))
def test_pop_not_declared():
@skippable(stash=["foo"])
class Stash(nn.Module):
def forward(self, input):
yield stash("foo", input)
return input * 2
@skippable()
class Pop(nn.Module):
def forward(self, input):
foo = yield pop("foo")
return foo
l1 = Stash()
l2 = Pop()
latent = l1(torch.tensor(42))
with pytest.raises(RuntimeError):
l2(latent)
def test_pop_not_stashed():
@skippable(pop=["foo"])
class Pop(nn.Module):
def forward(self, input):
yield pop("foo")
l1 = Pop()
with pytest.raises(RuntimeError):
l1(torch.tensor(42))
def test_stash_none():
@skippable(stash=["foo"])
class Stash(nn.Module):
def forward(self, input):
yield stash("foo", None)
return input * 2
l1 = Stash()
l1(torch.tensor(42))
|
python
|
import numpy as np
import string
wrd_fname = 'p042_words.txt'
def gen_triang_list(elems = 1000):
out_list = []
for x in xrange(1, elems):
out_list.append((x*(x+1))/2)
return out_list
def word_to_sum(word):
total_sum = 0
word = string.lower(string.strip(word))
base_sub = ord('a')-1
for ch in word:
total_sum += (ord(ch) - base_sub)
return total_sum
def find_words():
triang_list = gen_triang_list()
wsrc = open(wrd_fname)
total_count = 0
big_line = wsrc.readlines()[0]
for line in big_line.split(","):
line = line[1:-1]
ws = word_to_sum(line)
if ws in triang_list:
total_count += 1
print("Found %d words" % total_count)
if __name__ == '__main__':
find_words()
|
python
|
from flask import Flask, jsonify
from flask_marshmallow import Marshmallow
from flask_bcrypt import Bcrypt
from flask_cors import CORS
from flask_restful import Api
from flask_jwt_extended import JWTManager
ma = Marshmallow()
cors = CORS()
bcrypt = Bcrypt()
jwt = JWTManager()
def create_app(config_object=None):
app = Flask(__name__, instance_relative_config=True)
if config_object:
app.config.from_object(config_object)
from .models import db
db.init_app(app)
ma.init_app(app)
bcrypt.init_app(app)
jwt.init_app(app)
cors.init_app(app, resources={r"/api/*": {"origins": "*"}})
api = Api(app, prefix='/api')
register_api_resources(api)
from .views import api as api_bp
app.register_blueprint(api_bp, url_prefix='/api')
register_error_handlers(app)
return app
def register_api_resources(api):
from .resources.places import Place, PlaceList
api.add_resource(Place, '/places/<string:id>', endpoint='api.place')
api.add_resource(PlaceList, '/places')
def register_error_handlers(app):
from .errors import APIException
import json.decoder as jd
@app.errorhandler(APIException)
def handle_api_exception(error: APIException):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
@app.errorhandler(404)
def handle_not_found(error):
return handle_api_exception(APIException(
'The requested URL was not found on the server. '
'If you entered the URL manually please check your spelling and try again.', status_code=404))
@app.errorhandler(jd.JSONDecodeError)
def handle_json_parse_error(error):
return handle_api_exception(APIException(
'The browser (or proxy) sent a request that this server could not understand.'))
|
python
|
#!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
# bot_bcad_3.6.py
import utilities, os, fnmatch, re
async def membersDump(ctx):
serverName = (f"*{ctx.message.guild.name.replace(' ', '-')}*")
membersList = []
for file in os.listdir('./logs'):
if fnmatch.fnmatch(file, 'members-server*'):
if fnmatch.fnmatch(file, serverName):
filePath = './logs/' + file
with open(filePath) as fp:
lines = fp.readlines()
for line in lines:
line = line.split("Number: " + line[re.search(r"\d", line).start()])[1].strip(" ").replace("_", "\_")
if line not in membersList:
membersList.append(line)
membersList = '\n'.join(membersList)
return membersList
# with open('./logs/members.log', 'r', newline='\n', encoding='utf-8') as members_list_raw:
# members_list = members_list_raw.read()
# return members_list
async def membersLog(ctx):
log_path = ("./logs/members" + "-server-" + ctx.message.guild.name.replace(' ', '-') + "-" + (utilities.epoch_to_custom_date(utilities.FMT_TIME_FILE)) + ".log")
for i, member in enumerate(ctx.message.guild.members):
list_mem_num = (f'{i}')
list_mem_id = (f'{member.id}')
list_mem = (f'{member}')
list_mem_name = (f'{member.name}')
list_all = (f'Number: {list_mem_num} ID: {list_mem_id} Name: {list_mem} ({list_mem_name})\n')
with open(log_path, 'a') as file:
file.write(list_all)
return log_path
|
python
|
# Greedy
class Solution:
def maxProfit(self, prices):
profit = 0
for i in range(len(prices) - 1):
profit += max(0, prices[i + 1] - prices[i])
return profit
if __name__ == "__main__":
i = [7, 1, 5, 3, 6, 4]
s = Solution()
print(s.maxProfit(i))
|
python
|
# source code: binary_decoder.py
# author: Lukas Eder
# date: 13.02.2018
#
# Descr.:
# Userinput wird von ASCII in binaere Byte-Strings gewandelt oder umgekehrt.
# Funktion Binaer zu Ascii
def bin_read(binCode):
for line in file:
line_split = line.strip('\n').split(",")
if binCode == line_split[0]:
file.seek(0)
return line_split[1]
# Funktion Ascii zu Binaer
def ascii_read(word):
for line in file:
line_split = line.strip('\n').split(",")
if word == line_split[1]:
file.seek(0)
return line_split[0]
rep = "Ja"
while rep == "Ja":
file = open("PATH_PLACEHOLDER/binaer.txt", "r")
# User input
print("Bitte geben Sie einen mehrteiligen Binaer- oder ASCII-String ein.")
eingabe = str(input("Eingabe: "))
print("wollen Sie Binaer in Ascii (1) oder Ascii in Binaer (2) wandeln.")
wahl = str(input("Eingabe: "))
# processing
if wahl == "1":
eingabe_split = eingabe.split()
for element in eingabe_split:
temp1 = bin_read(element)
print(temp1)
elif wahl == "2":
for element in eingabe:
temp2 = ascii_read(element)
print(temp2)
else:
print("Fehlerhafte Eingabe")
file.close()
print("Moechten Sie noch eine Eingabe taetigen?")
rep = str(input("(Ja/Nein) Eingabe: "))
print("Bye")
|
python
|
#!/usr/bin/env python3
"""main.py: The main file used to execute MindFull."""
__author__ = "Rhys Read"
__copyright__ = "Copyright 2019, Rhys Read"
import logging
from display import Display
logging.basicConfig(level=logging.DEBUG)
class Main(object):
def __init__(self):
self.__display = Display()
def start(self):
self.__display.start()
if __name__ == '__main__':
main = Main()
main.start()
|
python
|
from Utils.alerter import send
from Utils.urlShortener import short
import discord
from discord.ext import commands
import config
class AntiCaps(commands.Cog):
def __init__(self, client):
self.client = client
self.title = "Anti-Caps System"
self.type = config.CAPS_TYPE
if self.type == 1:
self.maximum = config.MAXIMUM_CAPS_LETTERS
elif self.type == 2:
self.maximum = config.MAXIMUM_CAPS_WORDS
async def send_alert(self, message: discord.Message) -> None:
send(
self.title,
f"Author: {message.author}" +
f"\nChannel: {message.channel.name}" +
f"\nLink to message: {short(message.jump_url)}"
)
@commands.Cog.listener()
async def on_message(self, message: discord.Message) -> None:
if message.guild.id != config.GUILD_TO_LISTEN:
return
if message.channel.id in config.ALLOWED_CHANNELS_IDS:
return
count = 0
if self.type == 1:
for word in message.content.split(" "):
for letter in word:
if letter.isupper():
count += 1
if count == self.maximum:
await self.send_alert(message)
count = 0
elif self.type == 2:
for word in message.content.split(" "):
if word.isupper():
count += 1
if count == self.maximum:
await self.send_alert(message)
def setup(client):
client.add_cog(AntiCaps(client))
|
python
|
import numpy as np
def get_quantization_matrix(quality: int = 100) -> np.ndarray:
"""
Get quantization matrix denpends on quality.
Args:
quality: quality of quantization
Returns:
quantiazation matrix
"""
# quantization matrix
q = [[16, 11, 10, 16, 24, 40, 51, 61],
[12, 12, 14, 19, 26, 58, 60, 55],
[14, 13, 16, 24, 40, 57, 69, 56],
[14, 17, 22, 29, 51, 87, 80, 62],
[18, 22, 37, 56, 68, 109, 103, 77],
[24, 35, 55, 64, 81, 104, 113, 92],
[49, 64, 78, 87, 103, 121, 120, 101],
[72, 92, 95, 98, 112, 100, 103, 99]]
q = np.array(q)
if quality == 100:
return q
if quality < 50:
scale = 5000 / quality
else:
scale = 200 - 2 * quality
q = np.floor((q * scale + 50) / 100)
return q
|
python
|
# -*- coding: utf-8 -*-
#
# Hash/CMAC.py - Implements the CMAC algorithm
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""CMAC (Cipher-based Message Authentication Code) algorithm
CMAC is a MAC defined in `NIST SP 800-38B`_ and in RFC4493_ (for AES only)
and constructed using a block cipher. It was originally known as `OMAC1`_.
The algorithm is sometimes named *X-CMAC* where *X* is the name
of the cipher (e.g. AES-CMAC).
This is an example showing how to *create* an AES-CMAC:
>>> from Crypto.Hash import CMAC
>>> from Crypto.Cipher import AES
>>>
>>> secret = b'Sixteen byte key'
>>> cobj = CMAC.new(secret, ciphermod=AES)
>>> cobj.update(b'Hello')
>>> print cobj.hexdigest()
And this is an example showing how to *check* an AES-CMAC:
>>> from Crypto.Hash import CMAC
>>> from Crypto.Cipher import AES
>>>
>>> # We have received a message 'msg' together
>>> # with its MAC 'mac'
>>>
>>> secret = b'Sixteen byte key'
>>> cobj = CMAC.new(secret, ciphermod=AES)
>>> cobj.update(msg)
>>> try:
>>> cobj.verify(mac)
>>> print "The message '%s' is authentic" % msg
>>> except ValueError:
>>> print "The message or the key is wrong"
.. _`NIST SP 800-38B`: http://csrc.nist.gov/publications/nistpubs/800-38B/SP_800-38B.pdf
.. _RFC4493: http://www.ietf.org/rfc/rfc4493.txt
.. _OMAC1: http://www.nuee.nagoya-u.ac.jp/labs/tiwata/omac/omac.html
"""
__all__ = ['new', 'digest_size', 'CMAC' ]
import sys
if sys.version_info[0] == 2 and sys.version_info[1] == 1:
from Crypto.Util.py21compat import *
from Crypto.Util.py3compat import *
from binascii import unhexlify
from Crypto.Util.strxor import strxor
from Crypto.Util.number import long_to_bytes, bytes_to_long
#: The size of the authentication tag produced by the MAC.
digest_size = None
def _shift_bytes(bs, xor_lsb=0):
num = (bytes_to_long(bs)<<1) ^ xor_lsb
return long_to_bytes(num, len(bs))[-len(bs):]
class _SmoothMAC(object):
"""Turn a MAC that only operates on aligned blocks of data
into a MAC with granularity of 1 byte."""
def __init__(self, block_size, msg=b(""), min_digest=0):
self._bs = block_size
#: Data waiting to be MAC-ed
self._buffer = []
self._buffer_len = 0
#: Data received via update()
self._total_len = 0
#: Minimum amount of bytes required by the final digest step
self._min_digest = min_digest
#: Block MAC object
self._mac = None
#: Cached digest
self._tag = None
if msg:
self.update(msg)
def can_reduce(self):
return (self._mac is not None)
def get_len(self):
return self._total_len
def zero_pad(self):
if self._buffer_len & (self._bs-1):
npad = self._bs - self._buffer_len & (self._bs-1)
self._buffer.append(bchr(0)*npad)
self._buffer_len += npad
def update(self, data):
# Optimization (try not to copy data if possible)
if self._buffer_len==0 and self.can_reduce() and\
self._min_digest==0 and len(data)%self._bs==0:
self._update(data)
self._total_len += len(data)
return
self._buffer.append(data)
self._buffer_len += len(data)
self._total_len += len(data)
# Feed data into MAC
blocks, rem = divmod(self._buffer_len, self._bs)
if rem<self._min_digest:
blocks -= 1
if blocks>0 and self.can_reduce():
aligned_data = blocks*self._bs
buf = b("").join(self._buffer)
self._update(buf[:aligned_data])
self._buffer = [ buf[aligned_data:] ]
self._buffer_len -= aligned_data
def _deep_copy(self, target):
# Copy everything by self._mac, since we don't know how to
target._buffer = self._buffer[:]
for m in [ '_bs', '_buffer_len', '_total_len', '_min_digest', '_tag' ]:
setattr(target, m, getattr(self, m))
def _update(self, data_block):
"""Delegate to the implementation the update
of the MAC state given some new *block aligned* data."""
raise NotImplementedError("_update() must be still implemented")
def _digest(self, left_data):
"""Delegate to the implementation the computation
of the final MAC given the current MAC state
and the last piece of data (not block aligned)."""
raise NotImplementedError("_digest() must be still implemented")
def digest(self):
if self._tag:
return self._tag
if self._buffer_len>0:
self.update(b(""))
left_data = b("").join(self._buffer)
self._tag = self._digest(left_data)
return self._tag
class CMAC(_SmoothMAC):
"""Class that implements CMAC"""
#: The size of the authentication tag produced by the MAC.
digest_size = None
def __init__(self, key, msg = None, ciphermod = None):
"""Create a new CMAC object.
:Parameters:
key : byte string
secret key for the CMAC object.
The key must be valid for the underlying cipher algorithm.
For instance, it must be 16 bytes long for AES-128.
msg : byte string
The very first chunk of the message to authenticate.
It is equivalent to an early call to `update`. Optional.
ciphermod : module
A cipher module from `Crypto.Cipher`.
The cipher's block size must be 64 or 128 bits.
It is recommended to use `Crypto.Cipher.AES`.
"""
if ciphermod is None:
raise TypeError("ciphermod must be specified (try AES)")
_SmoothMAC.__init__(self, ciphermod.block_size, msg, 1)
self._key = key
self._factory = ciphermod
# Section 5.3 of NIST SP 800 38B
if ciphermod.block_size==8:
const_Rb = 0x1B
elif ciphermod.block_size==16:
const_Rb = 0x87
else:
raise TypeError("CMAC requires a cipher with a block size of 8 or 16 bytes, not %d" %
(ciphermod.block_size,))
self.digest_size = ciphermod.block_size
# Compute sub-keys
cipher = ciphermod.new(key, ciphermod.MODE_ECB)
l = cipher.encrypt(bchr(0)*ciphermod.block_size)
if bord(l[0]) & 0x80:
self._k1 = _shift_bytes(l, const_Rb)
else:
self._k1 = _shift_bytes(l)
if bord(self._k1[0]) & 0x80:
self._k2 = _shift_bytes(self._k1, const_Rb)
else:
self._k2 = _shift_bytes(self._k1)
# Initialize CBC cipher with zero IV
self._IV = bchr(0)*ciphermod.block_size
self._mac = ciphermod.new(key, ciphermod.MODE_CBC, self._IV)
def update(self, msg):
"""Continue authentication of a message by consuming the next chunk of data.
Repeated calls are equivalent to a single call with the concatenation
of all the arguments. In other words:
>>> m.update(a); m.update(b)
is equivalent to:
>>> m.update(a+b)
:Parameters:
msg : byte string
The next chunk of the message being authenticated
"""
_SmoothMAC.update(self, msg)
def _update(self, data_block):
self._IV = self._mac.encrypt(data_block)[-self._mac.block_size:]
def copy(self):
"""Return a copy ("clone") of the MAC object.
The copy will have the same internal state as the original MAC
object.
This can be used to efficiently compute the MAC of strings that
share a common initial substring.
:Returns: A `CMAC` object
"""
obj = CMAC(self._key, ciphermod=self._factory)
_SmoothMAC._deep_copy(self, obj)
obj._mac = self._factory.new(self._key, self._factory.MODE_CBC, self._IV)
for m in [ '_tag', '_k1', '_k2', '_IV']:
setattr(obj, m, getattr(self, m))
return obj
def digest(self):
"""Return the **binary** (non-printable) MAC of the message that has
been authenticated so far.
This method does not change the state of the MAC object.
You can continue updating the object after calling this function.
:Return: A byte string of `digest_size` bytes. It may contain non-ASCII
characters, including null bytes.
"""
return _SmoothMAC.digest(self)
def _digest(self, last_data):
if len(last_data)==self._bs:
last_block = strxor(last_data, self._k1)
else:
last_block = strxor(last_data+bchr(128)+
bchr(0)*(self._bs-1-len(last_data)), self._k2)
tag = self._mac.encrypt(last_block)
return tag
def hexdigest(self):
"""Return the **printable** MAC of the message that has been
authenticated so far.
This method does not change the state of the MAC object.
:Return: A string of 2* `digest_size` bytes. It contains only
hexadecimal ASCII digits.
"""
return "".join(["%02x" % bord(x)
for x in tuple(self.digest())])
def verify(self, mac_tag):
"""Verify that a given **binary** MAC (computed by another party) is valid.
:Parameters:
mac_tag : byte string
The expected MAC of the message.
:Raises ValueError:
if the MAC does not match. It means that the message
has been tampered with or that the MAC key is incorrect.
"""
mac = self.digest()
res = 0
# Constant-time comparison
for x,y in zip(mac, mac_tag):
res |= bord(x) ^ bord(y)
if res or len(mac_tag)!=self.digest_size:
raise ValueError("MAC check failed")
def hexverify(self, hex_mac_tag):
"""Verify that a given **printable** MAC (computed by another party) is valid.
:Parameters:
hex_mac_tag : string
The expected MAC of the message, as a hexadecimal string.
:Raises ValueError:
if the MAC does not match. It means that the message
has been tampered with or that the MAC key is incorrect.
"""
self.verify(unhexlify(tobytes(hex_mac_tag)))
def new(key, msg = None, ciphermod = None):
"""Create a new CMAC object.
:Parameters:
key : byte string
secret key for the CMAC object.
The key must be valid for the underlying cipher algorithm.
For instance, it must be 16 bytes long for AES-128.
msg : byte string
The very first chunk of the message to authenticate.
It is equivalent to an early call to `CMAC.update`. Optional.
ciphermod : module
A cipher module from `Crypto.Cipher`.
The cipher's block size must be 64 or 128 bits.
Default is `Crypto.Cipher.AES`.
:Returns: A `CMAC` object
"""
return CMAC(key, msg, ciphermod)
|
python
|
# Copyright (c) 2013 Hortonworks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shlex
from eventlet.green import subprocess as e_subprocess
from neutronclient.neutron import client as neutron_cli
import requests
from requests import adapters
from sahara import context
from sahara import exceptions as ex
from sahara.openstack.common import log as logging
from sahara.utils.openstack import base
LOG = logging.getLogger(__name__)
def client():
ctx = context.ctx()
args = {
'username': ctx.username,
'tenant_name': ctx.tenant_name,
'tenant_id': ctx.tenant_id,
'token': ctx.token,
'endpoint_url': base.url_for(ctx.service_catalog, 'network')
}
return neutron_cli.Client('2.0', **args)
class NeutronClientRemoteWrapper():
neutron = None
adapters = {}
routers = {}
def __init__(self, network, uri, token, tenant_name):
self.neutron = neutron_cli.Client('2.0',
endpoint_url=uri,
token=token,
tenant_name=tenant_name)
self.network = network
def get_router(self):
matching_router = NeutronClientRemoteWrapper.routers.get(self.network,
None)
if matching_router:
LOG.debug('Returning cached qrouter')
return matching_router['id']
routers = self.neutron.list_routers()['routers']
for router in routers:
device_id = router['id']
ports = self.neutron.list_ports(device_id=device_id)['ports']
port = next((port for port in ports
if port['network_id'] == self.network), None)
if port:
matching_router = router
NeutronClientRemoteWrapper.routers[
self.network] = matching_router
break
if not matching_router:
raise ex.SystemError('Neutron router corresponding to network {0} '
'is not found'.format(self.network))
return matching_router['id']
def get_http_session(self, host, port=None, *args, **kwargs):
session = requests.Session()
adapters = self._get_adapters(host, port=port, *args, **kwargs)
for adapter in adapters:
session.mount('http://{0}:{1}'.format(host, adapter.port), adapter)
return session
def _get_adapters(self, host, port=None, *args, **kwargs):
LOG.debug('Retrieving neutron adapters for {0}:{1}'.format(host, port))
adapters = []
if not port:
# returning all registered adapters for given host
adapters = [adapter for adapter in self.adapters
if adapter.host == host]
else:
# need to retrieve or create specific adapter
adapter = self.adapters.get((host, port), None, *args, **kwargs)
if not adapter:
LOG.debug('Creating neutron adapter for {0}:{1}'
.format(host, port))
qrouter = self.get_router()
adapter = (
NeutronHttpAdapter(qrouter, host, port))
self.adapters[(host, port)] = adapter
adapters = [adapter]
return adapters
class NeutronHttpAdapter(adapters.HTTPAdapter):
port = None
host = None
def __init__(self, qrouter, host, port, *args, **kwargs):
super(NeutronHttpAdapter, self).__init__(*args, **kwargs)
command = 'ip netns exec qrouter-{0} nc {1} {2}'.format(qrouter,
host, port)
LOG.debug('Neutron adapter created with cmd {0}'.format(command))
self.cmd = shlex.split(command)
self.port = port
self.host = host
def get_connection(self, url, proxies=None):
pool_conn = (
super(NeutronHttpAdapter, self).get_connection(url, proxies))
if hasattr(pool_conn, '_get_conn'):
http_conn = pool_conn._get_conn()
if http_conn.sock is None:
if hasattr(http_conn, 'connect'):
sock = self._connect()
LOG.debug('HTTP connecction {0} getting new '
'netcat socket {1}'.format(http_conn, sock))
http_conn.sock = sock
else:
if hasattr(http_conn.sock, 'is_netcat_socket'):
LOG.debug('pooled http connection has existing '
'netcat socket. resetting pipe...')
http_conn.sock.reset()
pool_conn._put_conn(http_conn)
return pool_conn
def close(self):
LOG.debug('Closing neutron adapter for {0}:{1}'
.format(self.host, self.port))
super(NeutronHttpAdapter, self).close()
def _connect(self):
LOG.debug('returning netcat socket with command {0}'
.format(self.cmd))
return NetcatSocket(self.cmd)
class NetcatSocket:
def _create_process(self):
self.process = e_subprocess.Popen(self.cmd,
stdin=e_subprocess.PIPE,
stdout=e_subprocess.PIPE,
stderr=e_subprocess.PIPE)
def __init__(self, cmd):
self.cmd = cmd
self._create_process()
def send(self, content):
try:
self.process.stdin.write(content)
except IOError as e:
raise ex.SystemError(e)
return len(content)
def sendall(self, content):
return self.send(content)
def makefile(self, mode, *arg):
if mode.startswith('r'):
return self.process.stdout
if mode.startswith('w'):
return self.process.stdin
raise ex.IncorrectStateError("Unknown file mode %s" % mode)
def recv(self, size):
try:
return os.read(self.process.stdout.fileno(), size)
except IOError as e:
raise ex.SystemError(e)
def _terminate(self):
self.process.terminate()
def close(self):
LOG.debug('Socket close called')
self._terminate()
def settimeout(self, timeout):
pass
def fileno(self):
return self.process.stdin.fileno()
def is_netcat_socket(self):
return True
def reset(self):
self._terminate()
self._create_process()
|
python
|
from abc import ABCMeta, abstractclassmethod
class iDocGenerator(metaclass=ABCMeta):
"""
Generate documents
"""
def __init__(self):
pass
@abstractclassmethod
def createDocumentTuple(self):
"""
Yield list of documents
"""
pass
@abstractclassmethod
def getDocumentTuple(self):
"""
Return a list of Documents
"""
pass
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Setup script for the pyexodus module.
:copyright:
Lion Krischer ([email protected]), 2016
:license:
MIT License
"""
import inspect
import os
import sys
from setuptools import setup, find_packages
# Import the version string.
path = os.path.join(os.path.abspath(os.path.dirname(inspect.getfile(
inspect.currentframe()))), "pyexodus")
sys.path.insert(0, path)
from version import get_git_version # NOQA
def get_package_data():
"""
Returns a list of all files needed for the installation relative to the
'pyexodus' subfolder.
"""
filenames = []
# The lasif root dir.
root_dir = os.path.join(os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe()))), "pyexodus")
# Recursively include all files in these folders:
folders = [os.path.join(root_dir, "tests", "data")]
for folder in folders:
for directory, _, files in os.walk(folder):
for filename in files:
# Exclude hidden files.
if filename.startswith("."):
continue
filenames.append(os.path.relpath(
os.path.join(directory, filename),
root_dir))
return filenames
setup_config = dict(
name="pyexodus",
version=get_git_version(),
description="Module for creating Exodus files",
author="Lion Krischer",
author_email="[email protected]",
url="https://github.com/SalvusHub/pyexodus",
packages=find_packages(),
license="MIT",
platforms="OS Independent",
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Physics'],
install_requires=["numpy", "h5netcdf"],
package_data={
"pyexodus": get_package_data()},
)
if __name__ == "__main__":
setup(**setup_config)
|
python
|
import numpy as np
import os
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
import pandas as pd
if __name__ == '__main__':
data_path = './augmentation_vtk_data/'
output_path = './'
num_augmentations = 20
train_size = 0.8
with_flip = True
num_samples = 36 # define number of samples
sample_list = list(range(1, num_samples+1))
sample_name = 'A{0}_Sample_0{1}_d.vtp'
# get valid sample list
valid_sample_list = []
for i_sample in sample_list:
for i_aug in range(num_augmentations):
if os.path.exists(os.path.join(data_path, sample_name.format(i_aug, i_sample))):
valid_sample_list.append(i_sample)
# remove duplicated
sample_list = list(dict.fromkeys(valid_sample_list))
sample_list = np.asarray(sample_list)
#print(sample_list)
i_cv = 0
kf = KFold(n_splits=6, shuffle=False)
for train_idx, test_idx in kf.split(sample_list):
i_cv += 1
print('Round:', i_cv)
train_list, test_list = sample_list[train_idx], sample_list[test_idx]
train_list, val_list = train_test_split(train_list, train_size=0.8, shuffle=True)
print('Training list:\n', train_list, '\nValidation list:\n', val_list, '\nTest list:\n', test_list)
#training
train_name_list = []
for i_sample in train_list:
for i_aug in range(num_augmentations):
#print('Computing Sample: {0}; Aug: {1}...'.format(i_sample, i_aug))
subject_name = 'A{}_Sample_0{}_d.vtp'.format(i_aug, i_sample)
train_name_list.append(os.path.join(data_path, subject_name))
if with_flip:
subject2_name = 'A{}_Sample_0{}_d.vtp'.format(i_aug, i_sample+1000)
train_name_list.append(os.path.join(data_path, subject2_name))
with open(os.path.join(output_path, 'train_list_{0}.csv'.format(i_cv)), 'w') as file:
for f in train_name_list:
file.write(f+'\n')
#validation
val_name_list = []
for i_sample in val_list:
for i_aug in range(num_augmentations):
#print('Computing Sample: {0}; Aug: {1}...'.format(i_sample, i_aug))
subject_name = 'A{}_Sample_0{}_d.vtp'.format(i_aug, i_sample)
val_name_list.append(os.path.join(data_path, subject_name))
if with_flip:
subject2_name = 'A{}_Sample_0{}_d.vtp'.format(i_aug, i_sample+1000)
val_name_list.append(os.path.join(data_path, subject2_name))
with open(os.path.join(output_path, 'val_list_{0}.csv'.format(i_cv)), 'w') as file:
for f in val_name_list:
file.write(f+'\n')
#test
test_df = pd.DataFrame(data=test_list, columns=['Test ID'])
test_df.to_csv('test_list_{}.csv'.format(i_cv), index=False)
print('--------------------------------------------')
print('with flipped samples:', with_flip)
print('# of train:', len(train_name_list))
print('# of validation:', len(val_name_list))
print('--------------------------------------------')
|
python
|
# -*- encoding: utf-8 -*-
# Copyright (c) 2017 Servionica
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import itertools
from oslo_config import cfg
from oslo_log import log
from oslo_utils import timeutils
import six
from watcher.common import context as watcher_context
from watcher.common import scheduling
from watcher import notifications
from watcher import objects
CONF = cfg.CONF
LOG = log.getLogger(__name__)
class APISchedulingService(scheduling.BackgroundSchedulerService):
def __init__(self, gconfig=None, **options):
self.services_status = {}
gconfig = None or {}
super(APISchedulingService, self).__init__(gconfig, **options)
def get_services_status(self, context):
services = objects.service.Service.list(context)
active_s = objects.service.ServiceStatus.ACTIVE
failed_s = objects.service.ServiceStatus.FAILED
for service in services:
result = self.get_service_status(context, service.id)
if service.id not in self.services_status:
self.services_status[service.id] = result
continue
if self.services_status[service.id] != result:
self.services_status[service.id] = result
notifications.service.send_service_update(context, service,
state=result)
if (result == failed_s) and (
service.name == 'watcher-decision-engine'):
audit_filters = {
'audit_type': objects.audit.AuditType.CONTINUOUS.value,
'state': objects.audit.State.ONGOING,
'hostname': service.host
}
ongoing_audits = objects.Audit.list(
context,
filters=audit_filters,
eager=True)
alive_services = [
s.host for s in services
if (self.services_status[s.id] == active_s and
s.name == 'watcher-decision-engine')]
round_robin = itertools.cycle(alive_services)
for audit in ongoing_audits:
audit.hostname = round_robin.__next__()
audit.save()
LOG.info('Audit %(audit)s has been migrated to '
'%(host)s since %(failed_host)s is in'
' %(state)s',
{'audit': audit.uuid,
'host': audit.hostname,
'failed_host': service.host,
'state': failed_s})
def get_service_status(self, context, service_id):
service = objects.Service.get(context, service_id)
last_heartbeat = (service.last_seen_up or service.updated_at or
service.created_at)
if isinstance(last_heartbeat, six.string_types):
# NOTE(russellb) If this service came in over rpc via
# conductor, then the timestamp will be a string and needs to be
# converted back to a datetime.
last_heartbeat = timeutils.parse_strtime(last_heartbeat)
else:
# Objects have proper UTC timezones, but the timeutils comparison
# below does not (and will fail)
last_heartbeat = last_heartbeat.replace(tzinfo=None)
elapsed = timeutils.delta_seconds(last_heartbeat, timeutils.utcnow())
is_up = abs(elapsed) <= CONF.service_down_time
if not is_up:
LOG.warning('Seems service %(name)s on host %(host)s is down. '
'Last heartbeat was %(lhb)s. Elapsed time is %(el)s',
{'name': service.name,
'host': service.host,
'lhb': str(last_heartbeat), 'el': str(elapsed)})
return objects.service.ServiceStatus.FAILED
return objects.service.ServiceStatus.ACTIVE
def start(self):
"""Start service."""
context = watcher_context.make_context(is_admin=True)
self.add_job(self.get_services_status, name='service_status',
trigger='interval', jobstore='default', args=[context],
next_run_time=datetime.datetime.now(),
seconds=CONF.periodic_interval)
super(APISchedulingService, self).start()
def stop(self):
"""Stop service."""
self.shutdown()
def wait(self):
"""Wait for service to complete."""
def reset(self):
"""Reset service.
Called in case service running in daemon mode receives SIGHUP.
"""
|
python
|
"""Utility code for facilitating collection of code coverage when running tests."""
from __future__ import annotations
import atexit
import os
import tempfile
import typing as t
from .config import (
IntegrationConfig,
SanityConfig,
TestConfig,
)
from .io import (
write_text_file,
make_dirs,
)
from .util import (
COVERAGE_CONFIG_NAME,
remove_tree,
sanitize_host_name,
)
from .data import (
data_context,
)
from .util_common import (
intercept_python,
ResultType,
)
from .host_configs import (
DockerConfig,
HostConfig,
OriginConfig,
PosixRemoteConfig,
PosixSshConfig,
PythonConfig,
)
def cover_python(
args, # type: TestConfig
python, # type: PythonConfig
cmd, # type: t.List[str]
target_name, # type: str
env, # type: t.Dict[str, str]
capture=False, # type: bool
data=None, # type: t.Optional[str]
cwd=None, # type: t.Optional[str]
): # type: (...) -> t.Tuple[t.Optional[str], t.Optional[str]]
"""Run a command while collecting Python code coverage."""
if args.coverage:
env.update(get_coverage_environment(args, target_name, python.version))
return intercept_python(args, python, cmd, env, capture, data, cwd)
def get_coverage_platform(config): # type: (HostConfig) -> str
"""Return the platform label for the given host config."""
if isinstance(config, PosixRemoteConfig):
platform = f'remote-{sanitize_host_name(config.name)}'
elif isinstance(config, DockerConfig):
platform = f'docker-{sanitize_host_name(config.name)}'
elif isinstance(config, PosixSshConfig):
platform = f'ssh-{sanitize_host_name(config.host)}'
elif isinstance(config, OriginConfig):
platform = 'origin' # previous versions of ansible-test used "local-{python_version}"
else:
raise NotImplementedError(f'Coverage platform label not defined for type: {type(config)}')
return platform
def get_coverage_environment(
args, # type: TestConfig
target_name, # type: str
version, # type: str
): # type: (...) -> t.Dict[str, str]
"""Return environment variables needed to collect code coverage."""
# unit tests, sanity tests and other special cases (localhost only)
# config is in a temporary directory
# results are in the source tree
config_file = get_coverage_config(args)
coverage_name = '='.join((args.command, target_name, get_coverage_platform(args.controller), f'python-{version}', 'coverage'))
coverage_dir = os.path.join(data_context().content.root, data_context().content.results_path, ResultType.COVERAGE.name)
coverage_file = os.path.join(coverage_dir, coverage_name)
make_dirs(coverage_dir)
if args.coverage_check:
# cause the 'coverage' module to be found, but not imported or enabled
coverage_file = ''
# Enable code coverage collection on local Python programs (this does not include Ansible modules).
# Used by the injectors to support code coverage.
# Used by the pytest unit test plugin to support code coverage.
# The COVERAGE_FILE variable is also used directly by the 'coverage' module.
env = dict(
COVERAGE_CONF=config_file,
COVERAGE_FILE=coverage_file,
)
return env
def get_coverage_config(args): # type: (TestConfig) -> str
"""Return the path to the coverage config, creating the config if it does not already exist."""
try:
return get_coverage_config.path
except AttributeError:
pass
coverage_config = generate_coverage_config(args)
if args.explain:
temp_dir = '/tmp/coverage-temp-dir'
else:
temp_dir = tempfile.mkdtemp()
atexit.register(lambda: remove_tree(temp_dir))
path = get_coverage_config.path = os.path.join(temp_dir, COVERAGE_CONFIG_NAME)
if not args.explain:
write_text_file(path, coverage_config)
return path
def generate_coverage_config(args): # type: (TestConfig) -> str
"""Generate code coverage configuration for tests."""
if data_context().content.collection:
coverage_config = generate_collection_coverage_config(args)
else:
coverage_config = generate_ansible_coverage_config()
return coverage_config
def generate_ansible_coverage_config(): # type: () -> str
"""Generate code coverage configuration for Ansible tests."""
coverage_config = '''
[run]
branch = True
concurrency = multiprocessing
parallel = True
omit =
*/python*/dist-packages/*
*/python*/site-packages/*
*/python*/distutils/*
*/pyshared/*
*/pytest
*/AnsiballZ_*.py
*/test/results/*
'''
return coverage_config
def generate_collection_coverage_config(args): # type: (TestConfig) -> str
"""Generate code coverage configuration for Ansible Collection tests."""
coverage_config = '''
[run]
branch = True
concurrency = multiprocessing
parallel = True
disable_warnings =
no-data-collected
'''
if isinstance(args, IntegrationConfig):
coverage_config += '''
include =
%s/*
*/%s/*
''' % (data_context().content.root, data_context().content.collection.directory)
elif isinstance(args, SanityConfig):
# temporary work-around for import sanity test
coverage_config += '''
include =
%s/*
omit =
%s/*
''' % (data_context().content.root, os.path.join(data_context().content.root, data_context().content.results_path))
else:
coverage_config += '''
include =
%s/*
''' % data_context().content.root
return coverage_config
|
python
|
import argparse
p = argparse.ArgumentParser()
p.add_argument("--foo", action="store_true")
args = p.parse_args()
print(args.foo)
|
python
|
#!/usr/bin/env python3
""" Tests for processing microsegment data """
# Import code to be tested
import mseg as rm
# Import needed packages
import unittest
import re
import numpy as np
import os
import itertools
# Skip this test if running on Travis-CI and print the given skip statement
@unittest.skipIf("TRAVIS" in os.environ and os.environ["TRAVIS"] == "true",
'External File Dependency Unavailable on Travis-CI')
class ResidentialDataIntegrityTest(unittest.TestCase):
""" Tests the imported residential equipment energy use data from
EIA to confirm that the data are in the expected order and that the
consumption and equipment stock data have the required names """
def setUp(self):
# Open the EIA data file for use by all tests
f = open(rm.EIAData().res_energy, 'r')
# Read in header line
self.header = f.readline()
f.close() # Close data file
# The function that parses and assigns the data from the EIA data
# to the JSON file expects housing stock data with specific
# header; test for the presence of that header
def test_for_presence_of_housing_stock_column(self):
chk_eqstock = re.search('HOUSEHOLDS', self.header, re.IGNORECASE)
self.assertTrue(chk_eqstock, msg='In a case-insensitive' +
'search, the HOUSEHOLDS column header was not' +
'found in the EIA data file.')
# The function that parses and assigns the data from the EIA data
# to the JSON file expects consumption data with specific header;
# test for the presence of that header
def test_for_presence_of_consumption_column(self):
chk_consumption = re.search('CONSUMPTION', self.header, re.IGNORECASE)
self.assertTrue(chk_consumption, msg='In a case-insensitive' +
'search, the CONSUMPTION column header was not' +
'found in the EIA data file.')
# The function that parses and assigns the data from the EIA data
# to the JSON file expects equipment stock data with specific
# header; test for the presence of that header
def test_for_presence_of_equipment_stock_column(self):
chk_eqstock = re.search('EQSTOCK', self.header, re.IGNORECASE)
self.assertTrue(chk_eqstock, msg='In a case-insensitive' +
'search, the EQSTOCK column header was not' +
'found in the EIA data file.')
# The function that parses and assigns the data from the EIA data
# to the JSON file expects bulb type data with specific
# header; test for the presence of that header
def test_for_presence_of_bulb_type_column(self):
chk_eqstock = re.search('BULBTYPE', self.header, re.IGNORECASE)
self.assertTrue(chk_eqstock, msg='In a case-insensitive' +
'search, the BULBTYPE column header was not' +
'found in the EIA data file.')
# Test for the order of the headers in the EIA data file
def test_order_of_columns_in_header_line(self):
# Define a regex for the expected order of the columns of data
# (formatting of regex takes advantage of string concatenation
# inside parentheses)
expectregex = (r'\w*[EU]\w*[,\s]+'
r'\w*[CD]\w*[,\s]+'
r'\w*[BG]\w*[,\s]+'
r'\w*[FL]\w*[,\s]+'
r'\w*[EQ]\w*[,\s]+'
r'\w*[YR]\w*[,\s]+'
r'\w*[ST]\w*[,\s]+'
r'\w*[CNS]\w*[,\s]+'
r'\w*[HS]\w*[,\s]+'
r'\w*[BL]\w*')
# Check for a match between the defined regex and the header line
match = re.search(expectregex, self.header, re.IGNORECASE)
# If there is no match, print the header line
if not match:
print("Header Line: " + self.header)
# Run assertTrue to check for match and complete unit test
self.assertTrue(match, msg="Column headers in the EIA data file" +
"are different than expected")
class JSONTranslatorTest(unittest.TestCase):
""" Test conversion of lists of strings from JSON file into
restructured lists corresponding to the codes used by EIA in the
residential microsegment text file """
# Define example filters for each of the data cases present in
# the JSON (and handled by the json_translator function)
ok_filters = [['pacific', 'multi family home', 'natural gas',
'heating', 'demand', 'ground'],
['new england', 'mobile home', 'electricity',
'cooling', 'demand', 'people gain'],
['mid atlantic', 'single family home', 'electricity',
'cooling', 'supply', 'room AC'],
['west south central', 'mobile home', 'electricity',
'TVs', 'set top box'],
['east north central', 'mobile home', 'electricity',
'lighting', 'general service (LED)'],
['west north central', 'mobile home', 'other fuel',
'heating', 'supply', 'resistance'],
['south atlantic', 'multi family home', 'distillate',
'secondary heating', 'demand', 'windows solar'],
['new england', 'single family home', 'other fuel',
'secondary heating', 'supply', 'secondary heating (coal)'],
['new england', 'single family home', 'natural gas',
'water heating'],
['new england', 'single family home',
'total square footage'],
['new england', 'single family home', 'other fuel',
'secondary heating', 'secondary heating (kerosene)',
'demand', 'windows conduction'],
['new england', 'single family home', 'new homes'],
['new england', 'single family home', 'total homes'],
['west south central', 'mobile home', 'electricity',
'TVs', 'TV']]
# Define nonsense filter examples (combinations of building types,
# end uses, etc. that are not possible and thus wouldn't appear in
# the microsegments JSON)
nonsense_filters = [['west north central', 'mobile home', 'natural gas',
'lighting', 'room AC'],
['new england', 'single family home',
'electricity (on site)', 'cooling', 'supply',
'room AC'],
['new england', 'single family home',
'electricity', 'refrigeration',
'linear fluorescent (T-8)'],
['new england', 'single family home', 'natural gas',
'water heating', 'general service (incandescent)']
]
# Define example filters that do not have information in the
# correct order to be prepared using json_translator and should
# raise an error or exception
fail_filters = [['west north central', 'cooking', 'natural gas',
'drying'],
['pacific', 'multi family home', 'electricity',
'computers', 'video game consoles'],
['the moon', 'mobile home', 'distillate',
'heating', 'supply', 'boiler (distillate)'],
['mountain', 'multi family home', 'natural gas',
'resistance'],
['mid atlantic', 'mobile home', 'distillate',
'TVs', 'monitors'],
['mid atlantic', 'mobile home', 'electricity',
'TVs', 'antennas'],
['west north central', 'mobile home',
'electricity', 'cooling', 'supply',
'windows solar'],
['west north central', 'mobile home',
'heating', 'electricity', 'demand', 'room AC'],
['mountain', 'mobile home', 'sq ft'],
['west north central', 'mobile home',
'total square footage',
'water heating', 'room AC'],
['new england', 'single family home', 'other fuel',
'secondary heating', 'supply',
'windows conduction'],
['new england', 'single family home', 'other fuel',
'secondary heating', 'demand',
'secondary heating (coal)'],
['west north central', 'mobile home', 'new homes',
'water heating', 'room AC'],
['west north central', 'mobile home', 'total homes',
'water heating', 'room AC']
]
# Define what json_translator should produce for the given filters;
# this part is critically important, as these tuples and/or lists
# will be used by later functions to extract data from the imported
# data files
ok_out = [[['HT', 9, 2, 'GS'], 'GRND'],
[['CL', 1, 3, 'EL'], 'PEOPLE'],
[['CL', 2, 1, 'EL', 'ROOM_AIR'], ''],
[['STB', 7, 3, 'EL'], ''],
[['LT', 3, 3, 'EL', ('GSL', 'LED')], ''],
[['HT', 4, 3, ('LG', 'KS', 'CL', 'GE', 'WD'),
'GE2'], ''],
[['SH', 5, 2, 'DS'], 'WIND_SOL'],
[['SH', 1, 1, ('LG', 'KS', 'CL', 'GE', 'WD'),
'CL'], ''],
[['HW', 1, 1, 'GS'], ''],
[['SQ', 1, 1], ''],
[['SH', 1, 1,
('LG', 'KS', 'CL', 'GE', 'WD')], 'WIND_COND'],
[['HS', 1, 1], ''],
[['HT', 1, 1, 'EL', 'ELEC_RAD'], ''],
[['TVS', 7, 3, 'EL'], '']]
nonsense_out = [[['LT', 4, 3, 'GS', 'ROOM_AIR'], ''],
[['CL', 1, 1, 'SL', 'ROOM_AIR'], ''],
[['RF', 1, 1, 'EL', ('LFL', 'T-8')], ''],
[['HW', 1, 1, 'GS', ('GSL', 'Inc')], '']]
# Test filters that have expected technology definitions and should match
def test_ok_filters(self):
for idx, afilter in enumerate(self.ok_filters):
self.assertEqual(rm.json_translator(rm.res_dictlist, afilter),
self.ok_out[idx])
# Test filters that have nonsensical technology definitions but
# should nonetheless match
def test_nonsense_filters(self):
for idx, afilter in enumerate(self.nonsense_filters):
self.assertEqual(rm.json_translator(rm.res_dictlist, afilter),
self.nonsense_out[idx])
# Test that filters that don't conform to the structure of the
# dicts or the expected order of data raise an error or exception
def test_fail_filters(self):
for afilter in self.fail_filters:
with self.assertRaises(KeyError):
rm.json_translator(rm.res_dictlist, afilter)
class NumpyArrayReductionTest(unittest.TestCase):
""" Test the operation of the txt_parser function to verify row
selection or deletion operations produce the expected output """
# Define sample structured array with the same form as the
# EIA data and that includes some of the rows to be removed
EIA_nrg_stock = np.array([
('HT', 1, 1, 'EL', 'ELEC_RAD', 2010, 126007.0, 1452680, 3, ''),
('HT', 1, 1, 'EL', 'ELEC_RAD', 2011, 125784.0, 1577350, 4, ''),
('HT', 1, 1, 'EL', 'ELEC_RAD', 2012, 125386.0, 1324963, 5, ''),
('HT', 1, 1, 'EL', 'ELEC_HP', 2010, 126007.0, 1452680, -1, ''),
('HT', 1, 1, 'EL', 'ELEC_HP', 2011, 125784.0, 1577350, -1, ''),
('HT', 1, 1, 'EL', 'ELEC_HP', 2012, 125386.0, 1324963, -1, ''),
('HT', 1, 1, 'GS', 'NGHP', 2010, 126007.0, 1452680, 11, ''),
('HT', 1, 1, 'GS', 'NGHP', 2011, 125784.0, 1577350, 12, ''),
('HT', 1, 1, 'GS', 'NGHP', 2012, 125386.0, 1324963, 13, ''),
('HT', 2, 3, 'KS', 'KERO_FA', 2010, 155340.0, 5955503, -1, ''),
('HT', 2, 3, 'KS', 'KERO_FA', 2011, 151349.0, 5550354, -1, ''),
('HT', 2, 3, 'KS', 'KERO_FA', 2012, 147470.0, 4490571, -1, ''),
('HT', 9, 1, 'EL', 'ELEC_RAD', 2010, 126007.0, 1452680, 3, ''),
('HT', 9, 1, 'EL', 'ELEC_RAD', 2011, 125784.0, 1577350, 4, ''),
('HT', 9, 1, 'EL', 'ELEC_RAD', 2012, 125386.0, 1324963, 5, ''),
('HT', 9, 1, 'GS', 'NGHP', 2010, 126007.0, 1452680, 11, ''),
('HT', 9, 1, 'GS', 'NGHP', 2011, 125784.0, 1577350, 12, ''),
('HT', 9, 1, 'GS', 'NGHP', 2012, 125386.0, 1324963, 13, ''),
('HT', 9, 3, 'KS', 'KERO_FA', 2010, 155340.0, 5955503, -1, ''),
('HT', 9, 3, 'KS', 'KERO_FA', 2011, 151349.0, 5550354, -1, ''),
('HT', 9, 3, 'KS', 'KERO_FA', 2012, 147470.0, 4490571, -1, ''),
('HT', 9, 2, 'GS', 'NG_RAD', 2010, 1, 3, -1, ''),
('HT', 9, 2, 'GS', 'NG_RAD', 2011, 2, 2, -1, ''),
('HT', 9, 2, 'GS', 'NG_RAD', 2012, 3, 1, -1, ''),
('HT', 9, 2, 'GS', 'NG_FA', 2010, 11, 13, -1, ''),
('HT', 9, 2, 'GS', 'NG_FA', 2011, 12, 12, -1, ''),
('HT', 9, 2, 'GS', 'NG_FA', 2012, 13, 11, -1, ''),
('CL', 1, 1, 'EL', 'GEO_HP', 2010, 126007.0, 1452680, -1, ''),
('CL', 1, 1, 'EL', 'GEO_HP', 2011, 125784.0, 1577350, -1, ''),
('CL', 1, 1, 'EL', 'GEO_HP', 2012, 125386.0, 1324963, -1, ''),
('CL', 5, 3, 'EL', 'GEO_HP', 2010, 126007.0, 1452680, -1, ''),
('CL', 5, 3, 'EL', 'GEO_HP', 2011, 125784.0, 1577350, -1, ''),
('CL', 5, 3, 'EL', 'GEO_HP', 2012, 125386.0, 1324963, -1, ''),
('CL', 2, 1, 'EL', 'ELEC_HP', 2010, 126007.0, 1452680, -1, ''),
('CL', 2, 1, 'EL', 'ELEC_HP', 2011, 125784.0, 1577350, -1, ''),
('CL', 2, 1, 'EL', 'ELEC_HP', 2012, 125386.0, 1324963, -1, ''),
('DW', 2, 1, 'EL', 'DS_WASH', 2010, 6423576.0, 9417809, -1, ''),
('DW', 2, 1, 'EL', 'DS_WASH', 2011, 6466014.0, 9387396, -1, ''),
('DW', 2, 1, 'EL', 'DS_WASH', 2012, 6513706.0, 9386813, -1, ''),
('DW', 2, 2, 'EL', 'DS_WASH', 2010, 6423576.0, 9417809, -1, ''),
('DW', 2, 2, 'EL', 'DS_WASH', 2011, 6466014.0, 9387396, -1, ''),
('DW', 2, 2, 'EL', 'DS_WASH', 2012, 6513706.0, 9386813, -1, ''),
('HW', 7, 3, 'GS', 'NG_WH', 2010, 104401.0, 1897629, -1, ''),
('HW', 7, 3, 'GS', 'NG_WH', 2011, 101793.0, 1875027, -1, ''),
('HW', 7, 3, 'GS', 'NG_WH', 2012, 99374.0, 1848448, -1, ''),
('SF', 8, 1, 'EL', 'ELEC_RAD', 2011, 78.0, 0, -1, ''),
('SF', 8, 1, 'EL', 'ELEC_HP', 2011, 6.0, 0, -1, ''),
('SF', 8, 1, 'GS', 'NG_FA', 2011, 0.0, 0, -1, ''),
('SF', 9, 1, 'EL', 'ELEC_RAD', 2011, 78.0, 0, -1, ''),
('SF', 9, 1, 'EL', 'ELEC_HP', 2011, 6.0, 0, -1, ''),
('SF', 9, 1, 'GS', 'NG_FA', 2011, 0.0, 0, -1, ''),
('ST', 3, 1, 'EL', 'ELEC_RAD', 2011, 0.0, 0, -1, ''),
('ST', 3, 1, 'EL', 'ELEC_HP', 2011, 3569.0, 0, -1, ''),
('ST', 4, 2, 'GS', 'NG_FA', 2011, 3463.0, 0, -1, ''),
('ST', 4, 2, 'GS', 'NG_FA', 2012, 0.0, 0, -1, ''),
('ST', 4, 2, 'GS', 'NG_FA', 2013, 3569.0, 0, -1, ''),
('ST', 3, 2, 'GS', 'NG_FA', 2009, 3463.0, 0, -1, ''),
('SQ', 2, 2, 0, 0, 2010, 2262.0, 3, 8245, ''),
('SQ', 2, 2, 0, 0, 2011, 2262.0, 2, 8246, ''),
('SQ', 2, 2, 0, 0, 2012, 2262.0, 233, 8247, ''),
('SQ', 1, 1, 0, 0, 2025, 232.0, 332, 8245, ''),
('SQ', 1, 1, 0, 0, 2026, 222.0, 232, 825, ''),
('SQ', 1, 1, 0, 0, 2027, 62.0, 332, 845, ''),
('HS', 7, 3, 0, 0, 2012, 3434, 0, -1, ''),
('HS', 7, 3, 0, 0, 2013, 3353, 0, -1, ''),
('HS', 7, 3, 0, 0, 2014, 3242, 0, -1, ''),
('HS', 7, 3, 0, 0, 2015, 23233, 0, -1, ''),
('HS', 7, 3, 0, 0, 2016, 3666, 0, -1, ''),
('HS', 7, 3, 0, 0, 2017, 34434, 0, -1, ''),
('HS', 7, 3, 0, 0, 2018, 3868, 0, -1, ''),
('HS', 3, 1, 0, 0, 2010, 266, 0, -1, ''),
('HS', 3, 1, 0, 0, 2011, 665, 0, -1, ''),
('HS', 3, 1, 0, 0, 2012, 66, 0, -1, ''),
('HS', 3, 1, 0, 0, 2013, 26, 0, -1, ''),
('HS', 3, 1, 0, 0, 2014, 2665, 0, -1, '')],
dtype=[('ENDUSE', '<U50'), ('CDIV', '<i4'), ('BLDG', '<i4'),
('FUEL', '<U50'), ('EQPCLASS', '<U50'), ('YEAR', '<i4'),
('EQSTOCK', '<f8'), ('CONSUMPTION', '<i4'),
('HOUSEHOLDS', '<i4'), ('BULB TYPE', '<U50')])
# Define filter to select a subset of the sample EIA supply data
EIA_nrg_stock_filter = [
[['DW', 2, 1, 'EL', 'DS_WASH'], ''],
[['HT', 1, 1, 'EL', 'ELEC_RAD'], ''],
[['HT', 2, 3, 'KS', 'KERO_FA'], ''],
[['CL', 1, 1, 'EL', 'GEO_HP'], ''],
[['HT', 9, 2, 'GS', 'NG_RAD'], '']]
# Set up selected data from EIA sample array as the basis for comparison
EIA_nrg_stock_out = [
({"2010": 9417809, "2011": 9387396, "2012": 9386813},
{"2010": 6423576, "2011": 6466014, "2012": 6513706}),
({"2010": 1452680, "2011": 1577350, "2012": 1324963},
{"2010": 126007.0, "2011": 125784.0, "2012": 125386.0}),
({"2010": 5955503, "2011": 5550354, "2012": 4490571},
{"2010": 155340.0, "2011": 151349.0, "2012": 147470.0}),
({"2010": 1452680, "2011": 1577350, "2012": 1324963},
{"2010": 126007, "2011": 125784, "2012": 125386}),
({"2010": 3, "2011": 2, "2012": 1},
{"2010": 1, "2011": 2, "2012": 3})]
# Define filter to select square footage subset of sample EIA supply data
EIA_sqft_homes_filter = [[['SQ', 2, 2], ''],
[['HT', 1, 1, 'EL', 'ELEC_RAD'], ''],
[['HS', 3, 1], ''],
]
# Set up selected data from EIA sample array as the basis for comparison
EIA_sqft_homes_out = [
{"2010": 8245, "2011": 8246, "2012": 8247},
{"2010": 3, "2011": 4, "2012": 5},
{"2010": 266, "2011": 665, "2012": 66, "2013": 26, "2014": 2665}]
# Define sample structured array comparable in form to the thermal
# loads data (note that the numeric data here do not represent
# realistic values for these data)
tloads_example = np.array([
('HT', 1, 1, 394.8, 0.28, 0.08, 0.08, 0.25, 0.38, -0.02, 0.22, -0.12),
('CL', 1, 1, 394.8, -0.01, 0.51, 0.10, 0.15, 0.14, 0.03, -0.12, 0.19),
('HT', 2, 1, 813.3, 0.29, -0.07, 0.10, 0.24, 0.38, 0.01, 0.20, -0.13),
('CL', 2, 1, 813.3, -0.01, 0.44, 0.12, 0.14, 0.14, 0.03, -0.09, 0.19),
('HT', 3, 2, 409.5, 0.27, -0.06, 0.23, 0.21, 0.48, 0.05, 0.13, -0.23),
('CL', 3, 2, 409.5, -0.02, 0.34, 0.13, 0.06, 0.09, 0.13, -0.16, 0.41),
('HT', 4, 2, 104.8, 0.29, 0.07, 0.23, 0.23, 0.44, -0.05, 0.17, -0.25),
('CL', 4, 2, 104.8, 0.00, 0.31, 0.09, 0.09, 0.13, 0.11, -0.11, 0.37),
('HT', 5, 3, 140.9, 0.44, -0.13, 0.11, 0.25, 0.33, -0.02, 0.16, 0.16),
('CL', 5, 3, 140.9, 0.00, 0.40, 0.12, 0.11, 0.14, 0.04, -0.03, 0.20),
('HT', 6, 3, 684.1, 0.47, 0.14, 0.18, 0.26, 0.39, -0.03, 0.07, -0.21),
('CL', 6, 3, 684.1, -0.01, 0.37, 0.14, 0.09, 0.14, 0.04, 0.02, 0.23)],
dtype=[('ENDUSE', '<U50'), ('CDIV', '<i4'), ('BLDG', '<i4'),
('NBLDGS', '<f8'), ('WIND_COND', '<f8'), ('WIND_SOL', '<f8'),
('ROOF', '<f8'), ('WALL', '<f8'), ('INFIL', '<f8'),
('PEOPLE', '<f8'), ('GRND', '<f8'), ('EQUIP', '<f8')])
# Specify filter to select thermal load data
tl_flt = [['HT', 3, 2, 'GS'], 'GRND']
# Set up selected data from thermal loads sample array
tloads_sample = 0.13
# Test restructuring of EIA data into stock and consumption lists
# using the EIA_Supply option to confirm that both the reported
# data and the reduced array with the remaining data are correct
def test_recording_of_EIA_data_tech(self):
for n in range(0, len(self.EIA_nrg_stock_filter)):
(a, b) = rm.nrg_stock_select(self.EIA_nrg_stock,
self.EIA_nrg_stock_filter[n])
# Compare equipment stock
self.assertEqual(a, self.EIA_nrg_stock_out[n][0])
# Compare consumption
self.assertEqual(b, self.EIA_nrg_stock_out[n][1])
# Test restructuring of EIA data into a square footage list, confirming
# that both the reported data and the reduced array with the remaining
# data are correct
# TEMP - this should also test home count numbers (and comments
# and variables names should reflect that)
def test_recording_of_EIA_data_sqft_homes(self):
for n in range(0, len(self.EIA_sqft_homes_filter)):
a = rm.sqft_homes_select(self.EIA_nrg_stock,
self.EIA_sqft_homes_filter[n])
# Compare square footage
self.assertEqual(a, self.EIA_sqft_homes_out[n])
# Test extraction of the correct value from the thermal load
# components data
def test_recording_of_thermal_loads_data(self):
self.assertEqual(rm.thermal_load_select(self.tloads_example,
self.tl_flt),
self.tloads_sample)
class DataToListFormatTest(unittest.TestCase):
""" Test operation of list_generator function (create dummy inputs
and test against established outputs) """
# Define sample AEO time horizon for this test
aeo_years = 2
# Define a sample set of stock/energy data
nrg_stock = [('HT', 1, 1, 'EL', 'ELEC_RAD', 2010, 0, 1, 3, ''),
('HT', 1, 1, 'EL', 'ELEC_RAD', 2011, 0, 1, 4, ''),
('HT', 2, 1, 'GS', 'NG_FA', 2010, 2, 3, -1, ''),
('HT', 2, 1, 'GS', 'NG_FA', 2011, 2, 3, -1, ''),
('HT', 2, 1, 'GS', 'NG_RAD', 2010, 4, 5, -1, ''),
('HT', 2, 1, 'GS', 'NG_RAD', 2011, 4, 5, -1, ''),
('CL', 2, 3, 'GS', 'NG_HP', 2010, 6, 7, -1, ''),
('CL', 2, 3, 'GS', 'NG_HP', 2011, 6, 7, -1, ''),
('CL', 1, 3, 'GS', 'NG_HP', 2010, 8, 9, -1, ''),
('CL', 1, 3, 'GS', 'NG_HP', 2011, 8, 9, -1, ''),
('SH', 1, 1, 'EL', 'EL', 2010, 10, 11, -1, ''),
('SH', 1, 1, 'EL', 'EL', 2011, 10, 11, -1, ''),
('SH', 1, 1, 'GS', 'GS', 2010, 12, 13, -1, ''),
('SH', 1, 1, 'GS', 'GS', 2011, 12, 13, -1, ''),
# ('OA ', 1, 1, 'EL', 'EL', 2010, 14, 15, -1),
# ('OA ', 1, 1, 'EL', 'EL', 2011, 14, 15, -1),
('SH', 2, 1, 'GS', 'GS', 2010, 16, 17, -1, ''),
('SH', 2, 1, 'GS', 'GS', 2011, 16, 17, -1, ''),
('SH', 3, 1, 'EL', 'EL', 2010, 18, 19, -1, ''),
('SH', 3, 1, 'EL', 'EL', 2011, 18, 19, -1, ''),
('SH', 3, 1, 'WD', 'WD', 2010, 20, 21, -1, ''),
('SH', 3, 1, 'WD', 'WD', 2011, 20, 21, -1, ''),
('STB', 1, 1, 'EL', 'TV&R', 2010, 22, 23, -1, ''),
('STB', 1, 1, 'EL', 'TV&R', 2011, 22, 23, -1, ''),
('STB', 1, 2, 'EL', 'TV&R', 2010, 24, 25, -1, ''),
('STB', 1, 2, 'EL', 'TV&R', 2011, 24, 25, -1, ''),
('BAT', 2, 2, 'EL', 'MEL', 2010, 36, 37, -1, ''),
('BAT', 2, 2, 'EL', 'MEL', 2011, 36, 37, -1, ''),
('SQ', 1, 1, 0, 0, 2010, 99, 100, 101, ''),
('SQ', 1, 1, 0, 0, 2011, 99, 100, 101, ''),
('LT', 1, 1, 'EL', 'GSL', 2010, 102, 0, -1, 'LED'),
('LT', 1, 1, 'EL', 'GSL', 2011, 103, 0, -1, 'LED'),
('LT', 1, 2, 'EL', 'GSL', 2010, 103, 0, -1, 'LED'),
('LT', 1, 1, 'EL', 'GSL', 2010, 179, 104, -1, 'Inc'),
('LT', 1, 1, 'EL', 'GSL', 2011, 176, 104, -1, 'Inc'),
('LT', 1, 1, 'EL', 'EXT', 2010, 103, 104, -1, 'LED'),
('HS', 1, 1, 0, 0, 2010, 299, 0, 0, ''),
('HS', 1, 1, 0, 0, 2011, 299, 0, 0, ''),
('TVS', 1, 1, 'EL', 'TV&R', 2010, 35, 757, -1, ''),
('TVS', 1, 1, 'EL', 'TV&R', 2011, 355., 787, -1, '')]
# Convert stock/energy data into numpy array with column names
nrg_stock_array = np.array(nrg_stock, dtype=[
('ENDUSE', '<U50'), ('CDIV', 'i4'), ('BLDG', 'i4'),
('FUEL', '<U50'), ('EQPCLASS', '<U50'), ('YEAR', 'i4'),
('EQSTOCK', 'i4'), ('CONSUMPTION', 'i4'), ('HOUSEHOLDS', 'i4'),
('BULBTYPE', '<U50')])
# Define a sample set of thermal load components data
loads_data = [('CL', 2, 3, 100, -0.25, 0.25, 0, 0, 0.25, 0, 0.5, 0),
('CL', 1, 2, 200, -0.1, 0.1, 0, 0, 0.4, 0, 0.6, 0),
('HT', 2, 3, 300, -0.5, 0.5, 0, 0, 0.5, 0, 0.5, 0),
('HT', 2, 1, 400, -0.75, 0.5, 0, 0, 0.25, 0, 1, 0),
('HT', 1, 1, 300, -0.2, 0.1, 0, 0.4, 0.1, 0.3, 0.3, 0),
('CL', 1, 1, 400, -0.3, 0.5, 0.1, 0.1, 0.2, 0, 0.4, 0)]
# Convert thermal loads data into numpy array with column names
loads_array = np.array(loads_data, dtype=[('ENDUSE', '<U50'),
('CDIV', 'i4'),
('BLDG', 'i4'),
('NBLDGS', 'f8'),
('WIND_COND', 'f8'),
('WIND_SOL', 'f8'),
('ROOF', 'f8'),
('WALL', 'f8'),
('INFIL', 'f8'),
('PEOPLE', 'f8'),
('GRND', 'f8'),
('EQUIP', 'f8')])
# Define a set of filters that should yield matched microsegment
# stock/energy data
ok_filters = [['new england', 'single family home',
'electricity', 'heating', 'supply',
'resistance heat'],
['new england', 'single family home',
'electricity', 'secondary heating',
'supply', 'non-specific'],
['new england', 'single family home',
'natural gas', 'secondary heating', 'supply',
'non-specific'],
['east north central', 'single family home',
'electricity', 'secondary heating', 'supply',
'non-specific'],
['new england', 'single family home',
'electricity', 'TVs', 'set top box'],
['new england', 'multi family home',
'electricity', 'TVs', 'set top box'],
['mid atlantic', 'multi family home',
'electricity', 'other (grid electric)',
'other MELs'],
['new england', 'single family home',
'electricity', 'heating',
'demand', 'ground'],
['mid atlantic', 'single family home',
'natural gas', 'heating', 'demand',
'windows conduction'],
['mid atlantic', 'mobile home',
'natural gas', 'cooling', 'demand',
'windows solar'],
['new england', 'single family home',
'total square footage'],
['east north central', 'single family home',
'other fuel', 'secondary heating', 'supply',
'secondary heating (wood)'],
['new england', 'single family home',
'electricity', 'lighting',
'general service (LED)'],
['new england', 'single family home', 'new homes'],
['new england', 'single family home', 'total homes'],
['new england', 'single family home',
'electricity', 'TVs', 'TV'],
['new england', 'single family home',
'electricity', 'lighting',
'general service (incandescent)']]
# Define a set of filters that should yield zeros for stock/energy
# data because they do not make sense
nonsense_filters = [['mid atlantic', 'mobile home', 'natural gas',
'heating', 'room AC'],
['pacific', 'single family home',
'electricity (on site)', 'water heating', 'solar WH'],
['new england', 'single family home',
'distillate', 'TVs',
'set top box']]
# Define a set of filters that should raise an error because certain
# filter elements do not have any match in the microsegment dict keys
fail_filters = [['the moon', 'single family home',
'electricity', 'heating', 'supply',
'resistance heat'],
['new england', 'single family cave',
'natural gas', 'secondary heating'],
['new england', 'mobile home',
'human locomotion', 'lighting', 'reflector'],
['south atlantic', 'single family home',
'distillate', 'secondary heating', 'supply',
'portable heater'],
['mid atlantic', 'mobile home',
'electricity', 'heating',
'supply', 'boiler (wood fired)'],
['east north central', 'multi family home',
'natural gas', 'cooling', 'demand', 'windows frames'],
['pacific', 'multi family home', 'electricity',
'other (grid electric)', 'beer cooler'],
['pacific', 'multi home', 'total square footage'],
['pacific', 'multi family home', 'square foot'],
['mid atlantic', 'mobile home', 'renewables',
'water heating', 'solar WH'],
['east north central', 'single family home',
'other fuel', 'secondary heating', 'demand',
'secondary heating (wood)'],
['pacific', 'multi family home', 'total square footage',
'natural gas', 'water heating'],
['pacific', 'multi family home', 'new homes',
'natural gas', 'water heating'],
['pacific', 'multi family home', 'total homes',
'natural gas', 'water heating']]
# Define array of lighting weighting factors expected to be output
# by the function under test
lt_factor_expected = np.array([
(1, 1, 'GSL', 'Inc', '2010', 0.91477392),
(1, 1, 'GSL', 'Inc', '2011', 0.90574519),
(1, 2, 'GSL', 'Inc', '2010', 0.82494111),
(1, 2, 'GSL', 'Inc', '2011', 0.81193743),
(3, 1, 'GSL', 'Inc', '2010', 0.87472457),
(3, 1, 'GSL', 'Inc', '2011', 0.86130072),
(3, 2, 'GSL', 'Inc', '2010', 0.87774796),
(3, 2, 'GSL', 'Inc', '2011', 0.87285453),
(1, 1, 'GSL', 'LED', '2010', 0.08522608),
(1, 1, 'GSL', 'LED', '2011', 0.09425481),
(1, 2, 'GSL', 'LED', '2010', 0.17505889),
(1, 2, 'GSL', 'LED', '2011', 0.18806257),
(3, 1, 'GSL', 'LED', '2010', 0.12527543),
(3, 1, 'GSL', 'LED', '2011', 0.13869928),
(3, 2, 'GSL', 'LED', '2010', 0.12225204),
(3, 2, 'GSL', 'LED', '2011', 0.12714547),
(1, 1, 'LFL', 'T12', '2010', 0.41604117),
(1, 1, 'LFL', 'T12', '2011', 0.41195155),
(1, 2, 'LFL', 'T12', '2010', 0.29310758),
(1, 2, 'LFL', 'T12', '2011', 0.28591929),
(3, 1, 'LFL', 'T12', '2010', 0.43981098),
(3, 1, 'LFL', 'T12', '2011', 0.43168629),
(3, 2, 'LFL', 'T12', '2010', 0.30942591),
(3, 2, 'LFL', 'T12', '2011', 0.30073385),
(1, 1, 'LFL', 'T-8', '2010', 0.33771134),
(1, 1, 'LFL', 'T-8', '2011', 0.33900962),
(1, 2, 'LFL', 'T-8', '2010', 0.40173711),
(1, 2, 'LFL', 'T-8', '2011', 0.40616518),
(3, 1, 'LFL', 'T-8', '2010', 0.30517175),
(3, 1, 'LFL', 'T-8', '2011', 0.30744705),
(3, 2, 'LFL', 'T-8', '2010', 0.26764825),
(3, 2, 'LFL', 'T-8', '2011', 0.276772),
(1, 1, 'LFL', 'T-5', '2010', 0.24624749),
(1, 1, 'LFL', 'T-5', '2011', 0.24903882),
(1, 2, 'LFL', 'T-5', '2010', 0.30515531),
(1, 2, 'LFL', 'T-5', '2011', 0.30791553),
(3, 1, 'LFL', 'T-5', '2010', 0.25501727),
(3, 1, 'LFL', 'T-5', '2011', 0.26086665),
(3, 2, 'LFL', 'T-5', '2010', 0.42292584),
(3, 2, 'LFL', 'T-5', '2011', 0.42249415)],
dtype=[('CDIV', 'i4'), ('BLDG', 'i4'), ('EQPCLASS', 'U4'),
('BULBTYPE', 'U4'), ('YEAR', 'i4'), ('FACTOR', 'f8')])
# Define the set of outputs that should be yielded by the "ok_filters"
# information above
ok_out = [[{'stock': {"2010": 0, "2011": 0},
'energy': {"2010": 1, "2011": 1}},
nrg_stock_array],
[{'stock': {"2010": 10, "2011": 10},
'energy': {"2010": 11, "2011": 11}},
np.hstack([nrg_stock_array[:10], nrg_stock_array[12:]])],
[{'stock': {"2010": 12, "2011": 12},
'energy': {"2010": 13, "2011": 13}},
np.hstack([nrg_stock_array[:12], nrg_stock_array[14:]])],
[{'stock': {"2010": 18, "2011": 18},
'energy': {"2010": 19, "2011": 19}},
np.hstack([nrg_stock_array[:16], nrg_stock_array[18:]])],
[{'stock': {"2010": 22, "2011": 22},
'energy': {"2010": 23, "2011": 23}},
np.hstack([nrg_stock_array[:20], nrg_stock_array[22:]])],
[{'stock': {"2010": 24, "2011": 24},
'energy': {"2010": 25, "2011": 25}},
np.hstack([nrg_stock_array[:22], nrg_stock_array[24:]])],
[{'stock': {"2010": 36, "2011": 36},
'energy': {"2010": 37, "2011": 37}},
np.hstack([nrg_stock_array[:24], nrg_stock_array[26:]])],
[{'stock': 'NA',
'energy': {"2010": 0.3, "2011": 0.3}},
nrg_stock_array],
[{'stock': 'NA',
'energy': {"2010": -6.0, "2011": -6.0}},
nrg_stock_array],
[{'stock': 'NA',
'energy': {"2010": 1.75, "2011": 1.75}},
nrg_stock_array],
[{"2010": 101, "2011": 101},
np.hstack([nrg_stock_array[0:26], nrg_stock_array[28:]])],
[{'stock': {"2010": 20, "2011": 20},
'energy': {"2010": 21, "2011": 21}},
np.hstack([nrg_stock_array[0:18], nrg_stock_array[20:]])],
[{'stock': {"2010": 102, "2011": 103},
'energy': {"2010": 8.86351232, "2011": 9.8025002399}},
nrg_stock_array],
[{"2010": 299, "2011": 299},
np.hstack([nrg_stock_array[0:-4], nrg_stock_array[-2:]])],
[{"2010": 3, "2011": 4}, nrg_stock_array],
[{'stock': {"2010": 35, "2011": 355},
'energy': {"2010": 757, "2011": 787}}, nrg_stock_array[:-2]],
[{'stock': {'2010': 179, '2011': 176},
'energy': {'2010': 95.13648768, '2011': 94.19749976}},
nrg_stock_array]]
# Define the set of outputs (empty dicts) that should be yielded
# by the "nonsense_filters" given above
nonsense_out = [{'stock': {}, 'energy': {}},
{'stock': {}, 'energy': {}},
{'stock': {}, 'energy': {}}]
def dict_check(self, dict1, dict2, msg=None):
"""Compare two dicts for equality, allowing for floating point error.
"""
# zip() and zip_longest() produce tuples for the items
# identified, where in the case of a dict, the first item
# in the tuple is the key and the second item is the value;
# in the case where the dicts are not of identical size,
# zip_longest() will use the fillvalue created below as a
# substitute in the dict that has missing content; this
# value is given as a tuple to be of comparable structure
# to the normal output from zip_longest()
fill_val = ('substituted entry', 5.2)
# In this structure, k and k2 are the keys that correspond to
# the dicts or unitary values that are found in i and i2,
# respectively, at the current level of the recursive
# exploration of dict1 and dict2, respectively
for (k, i), (k2, i2) in itertools.zip_longest(sorted(dict1.items()),
sorted(dict2.items()),
fillvalue=fill_val):
# Confirm that at the current location in the dict structure,
# the keys are equal; this should fail if one of the dicts
# is empty, is missing section(s), or has different key names
self.assertEqual(k, k2)
# If the recursion has not yet reached the terminal/leaf node
if isinstance(i, dict):
# Test that the dicts from the current keys are equal
self.assertCountEqual(i, i2)
# Continue to recursively traverse the dict
self.dict_check(i, i2)
# At the terminal/leaf node
else:
# Compare the values, allowing for floating point inaccuracy
self.assertAlmostEqual(dict1[k], dict2[k2], places=2)
# Test filter that should match and generate stock/energy data
def test_ok_filters(self):
for idx, afilter in enumerate(self.ok_filters):
# Call the function under test and capture its outputs
a = rm.list_generator(self.nrg_stock_array,
self.loads_array,
afilter,
self.aeo_years,
self.lt_factor_expected)
# Check the contents of the output dict
self.dict_check(a, self.ok_out[idx][0])
# Test filters that should match but ultimately do not make sense
def test_nonsense_filters(self):
for idx, afilter in enumerate(self.nonsense_filters):
# Call the function under test and capture its outputs
a = rm.list_generator(self.nrg_stock_array,
self.loads_array,
afilter,
self.aeo_years,
self.lt_factor_expected)
# Check the contents of the output dict
self.assertEqual(a, self.nonsense_out[idx])
# Test filters that should raise an error
def test_fail_filters(self):
for idx, afilter in enumerate(self.fail_filters):
with self.assertRaises(KeyError):
# Expect the function to raise an error with each call
# using the filters supplied from fail_filters
rm.list_generator(self.nrg_stock_array,
self.loads_array,
afilter,
self.aeo_years,
self.lt_factor_expected)
class LightingEfficiencyTablePrepTest(unittest.TestCase):
""" Test the function that restructures the lighting performance
data drawn from the AEO cost, performance, and lifetime file for
residential lighting into a lighting efficiency (inverse of
lighting performance, units of W/lm instead of lm/W) lookup
table for each year and each combination of fixture and bulb type. """
# Array of lighting CPL data with a similar structure to the
# data obtained from the AEO but with fewer years, fewer
# lighting types, and excluding columns that are not used
# by this function (the indicated values are not representative
# of the performance or anticipated performance improvements
# in actual lighting technologies)
lighting_cpl_data = np.array(
[(2011, 2012, 12, 45, 'GSL', 'INC'),
(2013, 2020, 22, 24, 'GSL', 'INC'),
(2011, 2015, 60, 75, 'GSL', 'LED'),
(2016, 2017, 78, 89, 'GSL', 'LED'),
(2018, 2020, 105, 200, 'GSL', 'LED'),
(2011, 2012, 12, 100, 'REF', 'INC'),
(2013, 2020, 99, 99, 'REF', 'INC'),
(2011, 2012, 78, 100, 'REF', 'LED'),
(2013, 2020, 99, 200, 'REF', 'LED'),
(2011, 2014, 30, 60, 'LFL', 'T12'),
(2015, 2017, 44, 75, 'LFL', 'T12'),
(2018, 2020, 56, 100, 'LFL', 'T12'),
(2011, 2016, 41, 35, 'LFL', 'T-8'),
(2017, 2020, 49, 77, 'LFL', 'T-8'),
(2011, 2012, 39, 89, 'LFL', 'T-5'),
(2013, 2013, 57, 91, 'LFL', 'T-5'),
(2014, 2016, 62, 99, 'LFL', 'T-5'),
(2017, 2020, 66, 101, 'LFL', 'T-5')],
dtype=[('FirstYear', 'i4'), ('LastYear', 'i4'), ('lm_per_W', 'i4'),
('Watts', 'i4'), ('Application', 'U8'), ('BulbType', 'U8')])
# Number of years represented in the synthetic CPL data
total_n_years = 10
# Number of lighting types (combinations of fixture and bulb types)
# in the synthetic CPL data
n_lighting_types = 7
# Array of lighting data restructured into a numpy structured
# array for each fixture type and bulb type
lighting_eff_result = np.array(
[('GSL', 'INC', 0.833333333, 0.833333333, 0.731707317, 0.731707317,
0.731707317, 0.78, 0.78, 0.826771654, 0.826771654, 0.826771654),
('GSL', 'LED', 0.166666667, 0.166666667, 0.268292683, 0.268292683,
0.268292683, 0.22, 0.22, 0.173228346, 0.173228346, 0.173228346),
('LFL', 'T12', 0.399849962, 0.399849962, 0.442865264,
0.451349432, 0.359344077, 0.359344077, 0.389920424,
0.334298119, 0.334298119, 0.334298119),
('LFL', 'T-8', 0.292573143, 0.292573143, 0.324047754,
0.330255682, 0.385637546, 0.385637546, 0.350132626,
0.382054993, 0.382054993, 0.382054993),
('LFL', 'T-5', 0.307576894, 0.307576894, 0.233086981,
0.218394886, 0.255018377, 0.255018377, 0.25994695,
0.283646889, 0.283646889, 0.283646889),
('REF', 'INC', 0.8666667, 0.8666667, 0.8918919, 0.8918919, 0.8918919,
0.8918919, 0.8918919, 0.8918919, 0.8918919, 0.8918919),
('REF', 'LED', 0.1333333, 0.1333333, 0.1081081, 0.1081081, 0.1081081,
0.1081081, 0.1081081, 0.1081081, 0.1081081, 0.1081081)],
dtype=[('Application', 'U4'), ('BulbType', 'U4'), ('2011', 'f8'),
('2012', 'f8'), ('2013', 'f8'), ('2014', 'f8'),
('2015', 'f8'), ('2016', 'f8'), ('2017', 'f8'),
('2018', 'f8'), ('2019', 'f8'), ('2020', 'f8')])
# Test that the lighting performance data is correctly restructured
# into lighting efficiency data for each bulb and fixture type
def test_lighting_efficiency_table_prep(self):
result = rm.lighting_eff_prep(self.lighting_cpl_data,
self.total_n_years,
self.n_lighting_types)
# Extract the numeric entries for matching lighting and bulb
# types from the reference array and test array and compare
# the year values
for row in self.lighting_eff_result:
# Grab the matching row from the function result array
fn_result_row = result[np.all([
result['Application'] == row['Application'],
result['BulbType'] == row['BulbType']], axis=0)]
# Compare the numeric values for each year reported in the
# rows with the matching fixture and bulb types
self.assertTrue(all(
[np.allclose(fn_result_row[name], row[name])
for name in row.dtype.names
if name not in ('Application', 'BulbType')]))
class LightingStockWeightedFactorsTest(unittest.TestCase):
""" Test the function that takes the normalized bulb efficiency
weighting factors and the stock data for each fixture and bulb
type combination and combines them to generate efficiency and
stock weighted multipliers that can be used to split up the
energy use data reported in RESDBOUT only for one bulb type
for each fixture type. """
# Sample input lighting efficiency factor array
lighting_eff_result = np.array(
[('GSL', 'INC', 0.833333333, 0.833333333, 0.731707317),
('GSL', 'LED', 0.166666667, 0.166666667, 0.268292683),
('LFL', 'T12', 0.399849962, 0.399849962, 0.442865264),
('LFL', 'T-8', 0.292573143, 0.292573143, 0.324047754),
('LFL', 'T-5', 0.307576894, 0.307576894, 0.233086981)],
dtype=[('Application', 'U4'), ('BulbType', 'U4'),
('2011', 'f8'), ('2012', 'f8'), ('2013', 'f8')])
# Number of lighting types (combinations of fixture and bulb types)
# in the synthetic CPL data (equal to the number of rows in the
# lighting_eff_result test array)
n_lighting_types = 5
# Number of years represented in the synthetic CPL data and the
# synthetic stock and energy data
total_n_years = 3
# Sample energy and stock data array
nrg_stock_array = np.array([
('LT', 1, 1, 'EL', 'GSL',
2011, 164420.0, 1452680, 0, 'INC'),
('LT', 1, 1, 'EL', 'GSL',
2012, 159428.0, 1577350, 0, 'INC'),
('LT', 1, 1, 'EL', 'GSL',
2013, 153895.0, 1324963, 0, 'INC'),
('LT', 1, 2, 'EL', 'GSL',
2011, 92810.0, 1452680, 0, 'INC'),
('LT', 1, 2, 'EL', 'GSL',
2012, 87534.0, 1577350, 0, 'INC'),
('LT', 1, 2, 'EL', 'GSL',
2013, 83958.0, 1324963, 0, 'INC'),
('LT', 3, 1, 'EL', 'GSL',
2011, 103295.0, 1452680, 0, 'INC'),
('LT', 3, 1, 'EL', 'GSL',
2012, 95567.0, 1577350, 0, 'INC'),
('LT', 3, 1, 'EL', 'GSL',
2013, 89356.0, 1324963, 0, 'INC'),
('LT', 3, 2, 'EL', 'GSL',
2011, 177692.0, 1452680, 0, 'INC'),
('LT', 3, 2, 'EL', 'GSL',
2012, 175438.0, 1577350, 0, 'INC'),
('LT', 3, 2, 'EL', 'GSL',
2013, 172984.0, 1324963, 0, 'INC'),
('LT', 1, 1, 'EL', 'GSL',
2011, 76592.0, 1452680, 0, 'LED'),
('LT', 1, 1, 'EL', 'GSL',
2012, 82953.0, 1577350, 0, 'LED'),
('LT', 1, 1, 'EL', 'GSL',
2013, 90485.0, 1324963, 0, 'LED'),
('LT', 1, 2, 'EL', 'GSL',
2011, 98475.0, 1452680, 0, 'LED'),
('LT', 1, 2, 'EL', 'GSL',
2012, 101374.0, 1577350, 0, 'LED'),
('LT', 1, 2, 'EL', 'GSL',
2013, 104884.0, 1324963, 0, 'LED'),
('LT', 3, 1, 'EL', 'GSL',
2011, 73968.0, 1452680, 0, 'LED'),
('LT', 3, 1, 'EL', 'GSL',
2012, 76948.0, 1577350, 0, 'LED'),
('LT', 3, 1, 'EL', 'GSL',
2013, 81524.0, 1324963, 0, 'LED'),
('LT', 3, 2, 'EL', 'GSL',
2011, 123744.0, 1452680, 0, 'LED'),
('LT', 3, 2, 'EL', 'GSL',
2012, 127777.0, 1577350, 0, 'LED'),
('LT', 3, 2, 'EL', 'GSL',
2013, 134395.0, 1324963, 0, 'LED'),
('LT', 1, 1, 'EL', 'LFL',
2011, 172698.0, 1452680, 0, 'T12'),
('LT', 1, 1, 'EL', 'LFL',
2012, 171593.0, 1577350, 0, 'T12'),
('LT', 1, 1, 'EL', 'LFL',
2013, 169985.0, 1324963, 0, 'T12'),
('LT', 1, 2, 'EL', 'LFL',
2011, 92416.0, 1452680, 0, 'T12'),
('LT', 1, 2, 'EL', 'LFL',
2012, 90115.0, 1577350, 0, 'T12'),
('LT', 1, 2, 'EL', 'LFL',
2013, 87888.0, 1324963, 0, 'T12'),
('LT', 3, 1, 'EL', 'LFL',
2011, 185455.0, 1452680, 0, 'T12'),
('LT', 3, 1, 'EL', 'LFL',
2012, 181322.0, 1577350, 0, 'T12'),
('LT', 3, 1, 'EL', 'LFL',
2013, 176931.0, 1324963, 0, 'T12'),
('LT', 3, 2, 'EL', 'LFL',
2011, 76209.0, 1452680, 0, 'T12'),
('LT', 3, 2, 'EL', 'LFL',
2012, 75481.0, 1577350, 0, 'T12'),
('LT', 3, 2, 'EL', 'LFL',
2013, 73852.0, 1324963, 0, 'T12'),
('LT', 1, 1, 'EL', 'LFL',
2011, 191584.0, 1452680, 0, 'T-8'),
('LT', 1, 1, 'EL', 'LFL',
2012, 192987.0, 1577350, 0, 'T-8'),
('LT', 1, 1, 'EL', 'LFL',
2013, 194125.0, 1324963, 0, 'T-8'),
('LT', 1, 2, 'EL', 'LFL',
2011, 173111.0, 1452680, 0, 'T-8'),
('LT', 1, 2, 'EL', 'LFL',
2012, 174952.0, 1577350, 0, 'T-8'),
('LT', 1, 2, 'EL', 'LFL',
2013, 176339.0, 1324963, 0, 'T-8'),
('LT', 3, 1, 'EL', 'LFL',
2011, 175865.0, 1452680, 0, 'T-8'),
('LT', 3, 1, 'EL', 'LFL',
2012, 176488.0, 1577350, 0, 'T-8'),
('LT', 3, 1, 'EL', 'LFL',
2013, 177539.0, 1324963, 0, 'T-8'),
('LT', 3, 2, 'EL', 'LFL',
2011, 90090.0, 1452680, 0, 'T-8'),
('LT', 3, 2, 'EL', 'LFL',
2012, 94938.0, 1577350, 0, 'T-8'),
('LT', 3, 2, 'EL', 'LFL',
2013, 100068.0, 1324963, 0, 'T-8'),
('LT', 1, 1, 'EL', 'LFL',
2011, 132882.0, 1452680, 0, 'T-5'),
('LT', 1, 1, 'EL', 'LFL',
2012, 134854.0, 1577350, 0, 'T-5'),
('LT', 1, 1, 'EL', 'LFL',
2013, 135321.0, 1324963, 0, 'T-5'),
('LT', 1, 2, 'EL', 'LFL',
2011, 125079.0, 1452680, 0, 'T-5'),
('LT', 1, 2, 'EL', 'LFL',
2012, 126162.0, 1577350, 0, 'T-5'),
('LT', 1, 2, 'EL', 'LFL',
2013, 127754.0, 1324963, 0, 'T-5'),
('LT', 3, 1, 'EL', 'LFL',
2011, 139793.0, 1452680, 0, 'T-5'),
('LT', 3, 1, 'EL', 'LFL',
2012, 142444.0, 1577350, 0, 'T-5'),
('LT', 3, 1, 'EL', 'LFL',
2013, 144879.0, 1324963, 0, 'T-5'),
('LT', 3, 2, 'EL', 'LFL',
2011, 135412.0, 1452680, 0, 'T-5'),
('LT', 3, 2, 'EL', 'LFL',
2012, 137854.0, 1577350, 0, 'T-5'),
('LT', 3, 2, 'EL', 'LFL',
2013, 140276.0, 1324963, 0, 'T-5'),
('SQ', 2, 2, 0, 0, 2011, 2262.0, 2332, 8245, ''),
('SQ', 2, 2, 0, 0, 2012, 2262.0, 2332, 8246, ''),
('SQ', 2, 2, 0, 0, 2013, 2262.0, 2332, 8247, ''),
('HS', 7, 3, 0, 0, 2012, 3434, 0, -1, ''),
('HS', 3, 1, 0, 0, 2012, 3434, 0, -1, '')],
dtype=[('ENDUSE', '<U50'), ('CDIV', 'i4'),
('BLDG', 'i4'), ('FUEL', '<U50'),
('EQPCLASS', '<U50'), ('YEAR', 'i4'),
('EQSTOCK', 'i4'), ('CONSUMPTION', 'i4'),
('HOUSEHOLDS', 'i4'), ('BULBTYPE', '<U50')])
# Define array of lighting weighting factors expected to be output
# by the function under test
lt_factor_expected = np.array([
(1, 1, 'GSL', 'INC', '2011', 0.91477392),
(1, 1, 'GSL', 'INC', '2012', 0.90574519),
(1, 1, 'GSL', 'INC', '2013', 0.82264751),
(1, 2, 'GSL', 'INC', '2011', 0.82494111),
(1, 2, 'GSL', 'INC', '2012', 0.81193743),
(1, 2, 'GSL', 'INC', '2013', 0.68584471),
(3, 1, 'GSL', 'INC', '2011', 0.87472457),
(3, 1, 'GSL', 'INC', '2012', 0.86130072),
(3, 1, 'GSL', 'INC', '2013', 0.74932829),
(3, 2, 'GSL', 'INC', '2011', 0.87774796),
(3, 2, 'GSL', 'INC', '2012', 0.87285453),
(3, 2, 'GSL', 'INC', '2013', 0.7782881),
(1, 1, 'GSL', 'LED', '2011', 0.08522608),
(1, 1, 'GSL', 'LED', '2012', 0.09425481),
(1, 1, 'GSL', 'LED', '2013', 0.17735249),
(1, 2, 'GSL', 'LED', '2011', 0.17505889),
(1, 2, 'GSL', 'LED', '2012', 0.18806257),
(1, 2, 'GSL', 'LED', '2013', 0.31415529),
(3, 1, 'GSL', 'LED', '2011', 0.12527543),
(3, 1, 'GSL', 'LED', '2012', 0.13869928),
(3, 1, 'GSL', 'LED', '2013', 0.25067171),
(3, 2, 'GSL', 'LED', '2011', 0.12225204),
(3, 2, 'GSL', 'LED', '2012', 0.12714547),
(3, 2, 'GSL', 'LED', '2013', 0.2217119),
(1, 1, 'LFL', 'T12', '2011', 0.41604117),
(1, 1, 'LFL', 'T12', '2012', 0.41195155),
(1, 1, 'LFL', 'T12', '2013', 0.44353641),
(1, 2, 'LFL', 'T12', '2011', 0.29310758),
(1, 2, 'LFL', 'T12', '2012', 0.28591929),
(1, 2, 'LFL', 'T12', '2013', 0.30929546),
(3, 1, 'LFL', 'T12', '2011', 0.43981098),
(3, 1, 'LFL', 'T12', '2012', 0.43168629),
(3, 1, 'LFL', 'T12', '2013', 0.46185268),
(3, 2, 'LFL', 'T12', '2011', 0.30942591),
(3, 2, 'LFL', 'T12', '2012', 0.30073385),
(3, 2, 'LFL', 'T12', '2013', 0.33432025),
(1, 1, 'LFL', 'T-8', '2011', 0.33771134),
(1, 1, 'LFL', 'T-8', '2012', 0.33900962),
(1, 1, 'LFL', 'T-8', '2013', 0.37062741),
(1, 2, 'LFL', 'T-8', '2011', 0.40173711),
(1, 2, 'LFL', 'T-8', '2012', 0.40616518),
(1, 2, 'LFL', 'T-8', '2013', 0.45407724),
(3, 1, 'LFL', 'T-8', '2011', 0.30517175),
(3, 1, 'LFL', 'T-8', '2012', 0.30744705),
(3, 1, 'LFL', 'T-8', '2013', 0.33910227),
(3, 2, 'LFL', 'T-8', '2011', 0.26764825),
(3, 2, 'LFL', 'T-8', '2012', 0.276772),
(3, 2, 'LFL', 'T-8', '2013', 0.33146147),
(1, 1, 'LFL', 'T-5', '2011', 0.24624749),
(1, 1, 'LFL', 'T-5', '2012', 0.24903882),
(1, 1, 'LFL', 'T-5', '2013', 0.18583618),
(1, 2, 'LFL', 'T-5', '2011', 0.30515531),
(1, 2, 'LFL', 'T-5', '2012', 0.30791553),
(1, 2, 'LFL', 'T-5', '2013', 0.23662731),
(3, 1, 'LFL', 'T-5', '2011', 0.25501727),
(3, 1, 'LFL', 'T-5', '2012', 0.26086665),
(3, 1, 'LFL', 'T-5', '2013', 0.19904505),
(3, 2, 'LFL', 'T-5', '2011', 0.42292584),
(3, 2, 'LFL', 'T-5', '2012', 0.42249415),
(3, 2, 'LFL', 'T-5', '2013', 0.33421828)],
dtype=[('CDIV', 'i4'), ('BLDG', 'i4'), ('EQPCLASS', 'U4'),
('BULBTYPE', 'U4'), ('YEAR', 'i4'), ('FACTOR', 'f8')])
# Test the function that combines the lighting efficiency factors
# and stock data to develop efficiency-and-stock lighting weighting
# factors
def test_lighting_weighting_factors_function(self):
result = rm.calc_lighting_factors(self.nrg_stock_array,
self.lighting_eff_result,
self.total_n_years,
self.n_lighting_types)
# Extract the numeric entries for matching lighting and bulb
# types from the reference array and test array and compare
# the year values
for expect_row in self.lt_factor_expected:
# Grab the matching row from the array output by the
# function under test
result_row = result[np.all([
result['CDIV'] == expect_row['CDIV'],
result['BLDG'] == expect_row['BLDG'],
result['EQPCLASS'] == expect_row['EQPCLASS'],
result['BULBTYPE'] == expect_row['BULBTYPE'],
result['YEAR'] == expect_row['YEAR']], axis=0)]
# Test that each lighting weighting factor in the array
# output by the function is the same as in the comparison
# array of expected values
np.testing.assert_allclose(
expect_row['FACTOR'], result_row['FACTOR'])
class LightingDictModificationFunctionTest(unittest.TestCase):
""" Test the function that converts the technology_supplydict
from being appropriate for the 2015 AEO, where incandescent bulbs
are coded with the string 'Inc', to the 2017 AEO, where those
bulbs are coded with 'INC'. """
# Define the expected contents of technology_supplydict after
# being modified by the function under test
comparison_tech_dict = {'solar WH': 'SOLAR_WH',
'electric WH': 'ELEC_WH',
'total homes (tech level)': 'ELEC_RAD',
'resistance heat': 'ELEC_RAD',
'ASHP': 'ELEC_HP',
'GSHP': 'GEO_HP',
'central AC': 'CENT_AIR',
'room AC': 'ROOM_AIR',
'linear fluorescent (T-12)': ('LFL', 'T12'),
'linear fluorescent (T-8)': ('LFL', 'T-8'),
'linear fluorescent (LED)': ('LFL', 'LED'),
'general service (incandescent)': ('GSL', 'INC'),
'general service (CFL)': ('GSL', 'CFL'),
'general service (LED)': ('GSL', 'LED'),
'reflector (incandescent)': ('REF', 'INC'),
'reflector (CFL)': ('REF', 'CFL'),
'reflector (halogen)': ('REF', 'HAL'),
'reflector (LED)': ('REF', 'LED'),
'external (incandescent)': ('EXT', 'INC'),
'external (CFL)': ('EXT', 'CFL'),
'external (high pressure sodium)': ('EXT', 'HPS'),
'external (LED)': ('EXT', 'LED'),
'furnace (NG)': 'NG_FA',
'boiler (NG)': 'NG_RAD',
'NGHP': 'NG_HP',
'furnace (distillate)': 'DIST_FA',
'boiler (distillate)': 'DIST_RAD',
'furnace (kerosene)': 'KERO_FA',
'furnace (LPG)': 'LPG_FA',
'stove (wood)': 'WOOD_HT',
'resistance': 'GE2',
'secondary heating (kerosene)': 'KS',
'secondary heating (LPG)': 'LG',
'secondary heating (wood)': 'WD',
'secondary heating (coal)': 'CL',
'non-specific': ''
}
# Test that before calling the lighting dict update function, the
# supply-side technology dict and the comparison dict defined in
# this test are not the same
def test_difference_without_modification(self):
self.assertFalse(
rm.technology_supplydict == self.comparison_tech_dict)
# Test that after calling the lighting dict update function, the
# supply-side technology mapping dict and the comparison dict
# defined in this test are identical
def test_modification_of_global_dict(self):
rm.update_lighting_dict()
self.assertDictEqual(rm.technology_supplydict,
self.comparison_tech_dict)
# Offer external code execution (include all lines below this point in all
# test files)
def main():
# Triggers default behavior of running all test fixtures in the file
unittest.main()
if __name__ == '__main__':
main()
|
python
|
# -*- coding: utf-8 -*-
import openingbook
book = openingbook.build_table(10)
assert len(book) > 0, "Your opening book is empty"
assert all(isinstance(k, tuple) for k in book), \
"All the keys should be `hashable`"
assert all(isinstance(v, tuple) and len(v) == 2 for v in book.values()), \
"All the values should be tuples of (x, y) actions"
print("Looks like your book worked!")
print(book)
|
python
|
"""
Insertion resources for SGAS.
Used for inserting usage records into database.
Author: Henrik Thostrup Jensen <[email protected]>
Magnus Jonsson <[email protected]>
Copyright: NorduNET / Nordic Data Grid Facility (2009, 2010, 2011)
"""
from twisted.internet import defer
from twisted.python import log
from twisted.enterprise import adbapi
import time
import psycopg2
import psycopg2.extensions # not used, but enables tuple adaption
from sgas.authz import rights, ctxinsertchecker
from sgas.generic.insertresource import GenericInsertResource
from sgas.database import error as dberror
from sgas.usagerecord import ursplitter, urparser, urconverter
from sgas.usagerecord import updater
ACTION_INSERT = 'insert' # backwards compat
ACTION_JOB_INSERT = 'jobinsert'
CTX_MACHINE_NAME = 'machine_name'
class JobInsertChecker(ctxinsertchecker.InsertChecker):
CONTEXT_KEY = CTX_MACHINE_NAME
class JobUsageRecordInsertResource(GenericInsertResource):
PLUGIN_ID = 'ur'
PLUGIN_NAME = 'Registration'
authz_right = ACTION_JOB_INSERT
insert_error_msg = 'Error during job usage insert: %s'
insert_authz_reject_msg = 'Rejecting job usage insert for %s. No insert rights.'
def __init__(self, cfg, db, authorizer):
GenericInsertResource.__init__(self,db,authorizer)
authorizer.addChecker(self.authz_right, JobInsertChecker(authorizer.insert_check_depth))
authorizer.rights.addActions(ACTION_INSERT)
authorizer.rights.addOptions(ACTION_INSERT,rights.OPTION_ALL)
authorizer.rights.addActions(ACTION_JOB_INSERT)
authorizer.rights.addOptions(ACTION_JOB_INSERT,[ rights.OPTION_ALL ])
authorizer.rights.addContexts(ACTION_JOB_INSERT,[ CTX_MACHINE_NAME ])
self.updater = updater.AggregationUpdater(db)
db.attachService(self.updater)
def insertRecords(self, data, subject, hostname):
return self._insertJobUsageRecords(data, self.db, self.authorizer, subject, hostname)
def _insertJobUsageRecords(self, usagerecord_data, db, authorizer, insert_identity=None, insert_hostname=None):
# parse ur data
insert_time = time.gmtime()
ur_docs = []
ur_errors = False
for ur_element in ursplitter.splitURDocument(usagerecord_data):
ur_doc = urparser.xmlToDict(ur_element,
insert_identity=insert_identity,
insert_hostname=insert_hostname,
insert_time=insert_time)
if not ur_doc.get(CTX_MACHINE_NAME):
log.msg("ERROR: UR %s from %s doesn't have %s defined!" % (ur_doc.get("record_id"), ur_doc.get("insert_identity"), CTX_MACHINE_NAME))
ur_errors = True
ur_docs.append(ur_doc)
if ur_errors:
raise Exception("There where faulty URs!")
# check authz
machine_names = set( [ doc.get(CTX_MACHINE_NAME) for doc in ur_docs ] )
ctx = [ (CTX_MACHINE_NAME, mn) for mn in machine_names ]
if authorizer.isAllowed(insert_identity, ACTION_JOB_INSERT, ctx):
return self.insertJobUsageRecords(db,ur_docs)
else:
MSG = 'Subject %s is not allowed to perform insertion for machines: %s' % (insert_identity, ','.join(machine_names))
return defer.fail(dberror.SecurityError(MSG))
def insertJobUsageRecords(self, db, usagerecord_docs, retry=False):
arg_list = urconverter.createInsertArguments(usagerecord_docs)
r = db.recordInserter('usage', 'urcreate', arg_list)
self.updater.updateNotification()
return r
|
python
|
# Copyright (c) 2021 Institute for Quantum Computing, Baidu Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
此模块包含构造 MBQC 模型的常用类和配套的运算模拟工具。
"""
from numpy import random, pi
from networkx import Graph, spring_layout, draw_networkx
import matplotlib.pyplot as plt
from paddle import t, to_tensor, matmul, conj, real, reshape, multiply
from paddle_quantum.mbqc.utils import plus_state, cz_gate, pauli_gate
from paddle_quantum.mbqc.utils import basis, kron, div_str_to_float
from paddle_quantum.mbqc.utils import permute_to_front, permute_systems, print_progress, plot_results
from paddle_quantum.mbqc.qobject import State, Pattern
from paddle_quantum.mbqc.transpiler import transpile
__all__ = [
"MBQC",
"simulate_by_mbqc"
]
class MBQC:
r"""定义基于测量的量子计算模型 ``MBQC`` 类。
用户可以通过实例化该类来定义自己的 MBQC 模型。
"""
def __init__(self):
r"""MBQC 类的构造函数,用于实例化一个 ``MBQC`` 对象。
"""
self.__graph = None # Graph in a MBQC model
self.__pattern = None # Measurement pattern in a MBQC model
self.__bg_state = State() # Background state of computation
self.__history = [self.__bg_state] # History of background states
self.__status = self.__history[-1] if self.__history != [] else None # latest history item
self.vertex = None # Vertex class to maintain all the vertices
self.__outcome = {} # Dictionary to store all measurement outcomes
self.max_active = 0 # Maximum number of active vertices so far
self.__draw = False # Switch to draw the dynamical running process
self.__track = False # Switch to track the running progress
self.__pause_time = None # Pause time for drawing
self.__pos = None # Position for drawing
class Vertex:
r"""定义维护点列表,用于实例化一个 ``Vertex`` 对象。
将 MBQC 算法中图的节点分为三类,并进行动态维护。
Note:
这是内部类,用户不需要直接调用到该类。
Attributes:
total (list): MBQC 算法中图上的全部节点,不随运算而改变
pending (list): 待激活的节点,随着运算的执行而逐渐减少
active (list): 激活的节点,与当前测量步骤直接相关的节点
measured (list): 已被测量过的节点,随着运算的执行而逐渐增加
"""
def __init__(self, total=None, pending=None, active=None, measured=None):
r"""``Vertex`` 类的构造函数,用于实例化一个 ``Vertex`` 对象。
Args:
total (list): MBQC 算法中图上的全部节点,不随运算而改变
pending (list): 待激活的节点,随着运算的执行而逐渐减少
active (list): 激活的节点,与当前测量步骤直接相关的节点
measured (list): 已被测量过的节点,随着运算的执行而逐渐增加
"""
self.total = [] if total is None else total
self.pending = [] if pending is None else pending
self.active = [] if active is None else active
self.measured = [] if measured is None else measured
def set_graph(self, graph):
r"""设置 MBQC 模型中的图。
该函数用于将用户自己构造的图传递给 ``MBQC`` 实例。
Args:
graph (list): MBQC 模型中的图,由列表 ``[V, E]`` 给出, 其中 ``V`` 为节点列表,``E`` 为边列表
"""
vertices, edges = graph
vertices_of_edges = set([vertex for edge in edges for vertex in list(edge)])
assert vertices_of_edges.issubset(vertices), "edge must be between the graph vertices."
self.__graph = Graph()
self.__graph.add_nodes_from(vertices)
self.__graph.add_edges_from(edges)
self.vertex = self.Vertex(total=vertices, pending=vertices, active=[], measured=[])
def get_graph(self):
r"""获取图的信息。
Returns:
nx.Graph: 图
"""
return self.__graph
def set_pattern(self, pattern):
r"""设置 MBQC 模型的测量模式。
该函数用于将用户由电路图翻译得到或自己构造的测量模式传递给 ``MBQC`` 实例。
Warning:
输入的 pattern 参数是 ``Pattern`` 类型,其中命令列表为标准 ``EMC`` 命令。
Args:
pattern (Pattern): MBQC 算法对应的测量模式
"""
assert isinstance(pattern, Pattern), "please input a pattern of type 'Pattern'."
self.__pattern = pattern
cmds = self.__pattern.commands[:]
# Check if the pattern is a standard EMC form
cmd_map = {"E": 1, "M": 2, "X": 3, "Z": 4, "S": 5}
cmd_num_wild = [cmd_map[cmd.name] for cmd in cmds]
cmd_num_standard = cmd_num_wild[:]
cmd_num_standard.sort(reverse=False)
assert cmd_num_wild == cmd_num_standard, "input pattern is not a standard EMC form."
# Set graph by entanglement commands
edges = [tuple(cmd.which_qubits) for cmd in cmds if cmd.name == "E"]
vertices = list(set([vertex for edge in edges for vertex in list(edge)]))
graph = [vertices, edges]
self.set_graph(graph)
def get_pattern(self):
r"""获取测量模式的信息。
Returns:
Pattern: 测量模式
"""
return self.__pattern
def set_input_state(self, state=None):
r"""设置需要替换的输入量子态。
Warning:
与电路模型不同,MBQC 模型通常默认初始态为加态。如果用户不调用此方法设置初始量子态,则默认为加态。
如果用户以测量模式运行 MBQC,则此处输入量子态的系统标签会被限制为从零开始的自然数,类型为整型。
Args:
state (State): 需要替换的量子态,默认为加态
"""
assert self.__graph is not None, "please set 'graph' or 'pattern' before calling 'set_input_state'."
assert isinstance(state, State) or state is None, "please input a state of type 'State'."
vertices = list(self.__graph.nodes)
if state is None:
vector = plus_state()
system = [vertices[0]] # Activate the first vertex, system should be a list
else:
vector = state.vector
# If a pattern is set, map the input state system to the pattern's input
if self.__pattern is not None:
assert all(isinstance(label, int) for label in state.system), "please input system labels of type 'int'"
assert all(label >= 0 for label in state.system), "please input system labels with non-negative values"
system = [label for label in self.__pattern.input_ if int(div_str_to_float(label[0])) in state.system]
else:
system = state.system
assert set(system).issubset(vertices), "input system labels must be a subset of graph vertices."
self.__bg_state = State(vector, system)
self.__history = [self.__bg_state]
self.__status = self.__history[-1]
self.vertex = self.Vertex(total=vertices,
pending=list(set(vertices).difference(system)),
active=system,
measured=[])
self.max_active = len(self.vertex.active)
def __set_position(self, pos):
r"""设置动态过程图绘制时节点的位置坐标。
Note:
这是内部方法,用户并不需要直接调用到该方法。
Args:
pos (dict or bool, optional): 节点坐标的字典数据或者内置的坐标选择,
内置的坐标选择有:``True`` 为测量模式自带的坐标,``False`` 为 ``spring_layout`` 坐标
"""
assert isinstance(pos, bool) or isinstance(pos, dict), "'pos' should be either bool or dict."
if isinstance(pos, dict):
self.__pos = pos
elif pos:
assert self.__pattern is not None, "'pos=True' must be chosen after a pattern is set."
self.__pos = {v: [div_str_to_float(v[1]), - div_str_to_float(v[0])] for v in list(self.__graph.nodes)}
else:
self.__pos = spring_layout(self.__graph) # Use 'spring_layout' otherwise
def __draw_process(self, which_process, which_qubit):
r"""根据当前节点状态绘图,用以实时展示 MBQC 模型的模拟计算过程。
Note:
这是内部方法,用户并不需要直接调用到该方法。
Args:
which_process (str): MBQC 执行的阶段,"measuring", "active" 或者 "measured"
which_qubit (any): 当前关注的节点,可以是 ``str``, ``tuple`` 等任意数据类型,但需要和图的标签类型匹配
"""
if self.__draw:
assert which_process in ["measuring", "active", "measured"]
assert which_qubit in self.vertex.total, "'which_qubit' must be in the graph."
vertex_sets = []
# Find where the 'which_qubit' is
if which_qubit in self.vertex.pending:
pending = self.vertex.pending[:]
pending.remove(which_qubit)
vertex_sets = [pending, self.vertex.active, [which_qubit], self.vertex.measured]
elif which_qubit in self.vertex.active:
active = self.vertex.active[:]
active.remove(which_qubit)
vertex_sets = [self.vertex.pending, active, [which_qubit], self.vertex.measured]
elif which_qubit in self.vertex.measured:
vertex_sets = [self.vertex.pending, self.vertex.active, [], self.vertex.measured]
# Indentify ancilla vertices
ancilla_qubits = []
if self.__pattern is not None:
for vertex in list(self.__graph.nodes):
row_coordinate = div_str_to_float(vertex[0])
col_coordinate = div_str_to_float(vertex[1])
# Ancilla vertices do not have integer coordinates
if abs(col_coordinate - int(col_coordinate)) >= 1e-15 \
or abs(row_coordinate - int(row_coordinate)) >= 1e-15:
ancilla_qubits.append(vertex)
plt.cla()
plt.title("MBQC Running Process", fontsize=15)
plt.xlabel("Measuring (RED) Active (GREEN) Pending (BLUE) Measured (GRAY)", fontsize=12)
plt.grid()
mngr = plt.get_current_fig_manager()
mngr.window.setGeometry(500, 100, 800, 600)
colors = ['tab:blue', 'tab:green', 'tab:red', 'tab:gray']
for j in range(4):
for vertex in vertex_sets[j]:
options = {
"nodelist": [vertex],
"node_color": colors[j],
"node_shape": '8' if vertex in ancilla_qubits else 'o',
"with_labels": False,
"width": 3,
}
draw_networkx(self.__graph, self.__pos, **options)
ax = plt.gca()
ax.margins(0.20)
plt.axis("on")
ax.set_axisbelow(True)
plt.pause(self.__pause_time)
def draw_process(self, draw=True, pos=False, pause_time=0.5):
r"""动态过程图绘制,用以实时展示 MBQC 模型的模拟计算过程。
Args:
draw (bool, optional): 是否绘制动态过程图的布尔开关
pos (bool or dict, optional): 节点坐标的字典数据或者内置的坐标选择,内置的坐标选择有:
``True`` 为测量模式自带的坐标,``False`` 为 `spring_layout` 坐标
pause_time (float, optional): 绘制动态过程图时每次更新的停顿时间
"""
assert self.__graph is not None, "please set 'graph' or 'pattern' before calling 'draw_process'."
assert isinstance(draw, bool), "'draw' must be bool."
assert isinstance(pos, bool) or isinstance(pos, dict), "'pos' should be either bool or dict."
assert pause_time > 0, "'pause_time' must be strictly larger than 0."
self.__draw = draw
self.__pause_time = pause_time
if self.__draw:
plt.figure()
plt.ion()
self.__set_position(pos)
def track_progress(self, track=True):
r""" 显示 MBQC 模型运行进度的开关。
Args:
track (bool, optional): ``True`` 打开进度条显示功能, ``False`` 关闭进度条显示功能
"""
assert isinstance(track, bool), "the parameter 'track' must be bool."
self.__track = track
def __apply_cz(self, which_qubits_list):
r"""对给定的两个比特作用控制 Z 门。
Note:
这是内部方法,用户并不需要直接调用到该方法。
Warning:
作用控制 Z 门的两个比特一定是被激活的。
Args:
which_qubits_list (list): 作用控制 Z 门的比特对标签列表,例如 ``[(1, 2), (3, 4),...]``
"""
for which_qubits in which_qubits_list:
assert set(which_qubits).issubset(self.vertex.active), \
"vertices in 'which_qubits_list' must be activated first."
assert which_qubits[0] != which_qubits[1], \
'the control and target qubits must not be the same.'
# Find the control and target qubits and permute them to the front
self.__bg_state = permute_to_front(self.__bg_state, which_qubits[0])
self.__bg_state = permute_to_front(self.__bg_state, which_qubits[1])
new_state = self.__bg_state
new_state_len = new_state.length
qua_length = int(new_state_len / 4)
cz = cz_gate()
# Reshape the state, apply CZ and reshape it back
new_state.vector = reshape(matmul(cz, reshape(new_state.vector, [4, qua_length])), [new_state_len, 1])
# Update the order of active vertices and the background state
self.vertex.active = new_state.system
self.__bg_state = State(new_state.vector, new_state.system)
def __apply_pauli_gate(self, gate, which_qubit):
r"""对给定的单比特作用 Pauli 门。
Note:
这是内部方法,用户并不需要直接调用到该方法。
Args:
gate (str): Pauli 门的索引字符,"I", "X", "Y", "Z" 分别表示对应的门,在副产品处理时用 "X" 和 "Z" 门
which_qubit (any): 作用 Pauli 门的系统标签,
可以是 ``str``, ``tuple`` 等任意数据类型,但需要和 MBQC 模型中节点的标签类型匹配
"""
new_state = permute_to_front(self.__bg_state, which_qubit)
new_state_len = new_state.length
half_length = int(new_state_len / 2)
gate_mat = pauli_gate(gate)
# Reshape the state, apply X and reshape it back
new_state.vector = reshape(matmul(gate_mat, reshape(new_state.vector, [2, half_length])), [new_state_len, 1])
# Update the order of active vertices and the background state
self.vertex.active = new_state.system
self.__bg_state = State(new_state.vector, new_state.system)
def __create_graph_state(self, which_qubit):
r"""以待测量的比特为输入参数,生成测量当前节点所需要的最小的量子图态。
Note:
这是内部方法,用户并不需要直接调用到该方法。
Args:
which_qubit (any): 待测量比特的系统标签。
可以是 ``str``, ``tuple`` 等任意数据类型,但需要和 MBQC 模型中节点的标签类型匹配
"""
# Find the neighbors of 'which_qubit'
which_qubit_neighbors = set(self.__graph.neighbors(which_qubit))
# Exclude the qubits already measured
neighbors_not_measured = which_qubit_neighbors.difference(set(self.vertex.measured))
# Create a list of system labels that will be applied to cz gates
cz_list = [(which_qubit, qubit) for qubit in neighbors_not_measured]
# Get the qubits to be activated
append_qubits = {which_qubit}.union(neighbors_not_measured).difference(set(self.vertex.active))
# Update active and pending lists
self.vertex.active += list(append_qubits)
self.vertex.pending = list(set(self.vertex.pending).difference(self.vertex.active))
# Compute the new background state vector
new_bg_state_vector = kron([self.__bg_state.vector] + [plus_state() for _ in append_qubits])
# Update the background state and apply cz
self.__bg_state = State(new_bg_state_vector, self.vertex.active)
self.__apply_cz(cz_list)
self.__draw_process("active", which_qubit)
def __update(self):
r"""更新历史列表和量子态信息。
"""
self.__history.append(self.__bg_state)
self.__status = self.__history[-1]
def measure(self, which_qubit, basis_list):
r"""以待测量的比特和测量基为输入参数,对该比特进行测量。
Note:
这是用户在实例化 MBQC 类之后最常调用的方法之一,此处我们对单比特测量模拟进行了最大程度的优化,
随着用户对该函数的调用,MBQC 类将自动完成激活相关节点、生成所需的图态以及对特定比特进行测量的全过程,
并记录测量结果和对应测量后的量子态。用户每调用一次该函数,就完成一次对单比特的测量操作。
Warning:
当且仅当用户调用 ``measure`` 类方法时,MBQC 模型才真正进行运算。
Args:
which_qubit (any): 待测量量子比特的系统标签,
可以是 ``str``, ``tuple`` 等任意数据类型,但需要和 MBQC 模型的图上标签匹配
basis_list (list): 测量基向量构成的列表,列表元素为 ``Tensor`` 类型的列向量
代码示例:
.. code-block:: python
from paddle_quantum.mbqc.simulator import MBQC
from paddle_quantum.mbqc.qobject import State
from paddle_quantum.mbqc.utils import zero_state, basis
G = [['1', '2', '3'], [('1', '2'), ('2', '3')]]
mbqc = MBQC()
mbqc.set_graph(G)
state = State(zero_state(), ['1'])
mbqc.set_input_state(state)
mbqc.measure('1', basis('X'))
mbqc.measure('2', basis('X'))
print("Measurement outcomes: ", mbqc.get_classical_output())
::
Measurement outcomes: {'1': 0, '2': 1}
"""
self.__draw_process("measuring", which_qubit)
self.__create_graph_state(which_qubit)
assert which_qubit in self.vertex.active, 'the qubit to be measured must be activated first.'
new_bg_state = permute_to_front(self.__bg_state, which_qubit)
self.vertex.active = new_bg_state.system
half_length = int(new_bg_state.length / 2)
eps = 10 ** (-10)
prob = [0, 0]
state_unnorm = [0, 0]
# Calculate the probability and post-measurement states
for result in [0, 1]:
basis_dagger = t(conj(basis_list[result]))
# Reshape the state, multiply the basis and reshape it back
state_unnorm[result] = reshape(matmul(basis_dagger,
reshape(new_bg_state.vector, [2, half_length])), [half_length, 1])
probability = matmul(t(conj(state_unnorm[result])), state_unnorm[result])
is_complex128 = probability.dtype == to_tensor([], dtype='complex128').dtype
prob[result] = real(probability) if is_complex128 else probability
# Randomly choose a result and its corresponding post-measurement state
if prob[0].numpy().item() < eps:
result = 1
post_state_vector = state_unnorm[1]
elif prob[1].numpy().item() < eps:
result = 0
post_state_vector = state_unnorm[0]
else: # Take a random choice of outcome
result = random.choice(2, 1, p=[prob[0].numpy().item(), prob[1].numpy().item()]).item()
# Normalize the post-measurement state
post_state_vector = state_unnorm[result] / prob[result].sqrt()
# Write the measurement result into the dict
self.__outcome.update({which_qubit: int(result)})
# Update measured, active lists
self.vertex.measured.append(which_qubit)
self.max_active = max(len(self.vertex.active), self.max_active)
self.vertex.active.remove(which_qubit)
# Update the background state and history list
self.__bg_state = State(post_state_vector, self.vertex.active)
self.__update()
self.__draw_process("measured", which_qubit)
def sum_outcomes(self, which_qubits, start=0):
r"""根据输入的量子系统标签,在存储测量结果的字典中找到对应的测量结果,并进行求和。
Note:
在进行副产品纠正操作和定义适应性测量角度时,用户可以调用该方法对特定比特的测量结果求和。
Args:
which_qubits (list): 需要查找测量结果并求和的比特的系统标签列表
start (int): 对结果进行求和后需要额外相加的整数
Returns:
int: 指定比特的测量结果的和
代码示例:
.. code-block:: python
from paddle_quantum.mbqc.simulator import MBQC
from paddle_quantum.mbqc.qobject import State
from paddle_quantum.mbqc.utils import zero_state, basis
G = [['1', '2', '3'], [('1', '2'), ('2', '3')]]
mbqc = MBQC()
mbqc.set_graph(G)
input_state = State(zero_state(), ['1'])
mbqc.set_input_state(input_state)
mbqc.measure('1', basis('X'))
mbqc.measure('2', basis('X'))
mbqc.measure('3', basis('X'))
print("All measurement outcomes: ", mbqc.get_classical_output())
print("Sum of outcomes of qubits '1' and '2': ", mbqc.sum_outcomes(['1', '2']))
print("Sum of outcomes of qubits '1', '2' and '3' with an extra 1: ", mbqc.sum_outcomes(['1', '2', '3'], 1))
::
All measurement outcomes: {'1': 0, '2': 0, '3': 1}
Sum of outcomes of qubits '1' and '2': 0
Sum of outcomes of qubits '1', '2' and '3' with an extra 1: 2
"""
assert isinstance(start, int), "'start' must be of type int."
return sum([self.__outcome[label] for label in which_qubits], start)
def correct_byproduct(self, gate, which_qubit, power):
r"""对测量后的量子态进行副产品纠正。
Note:
这是用户在实例化 MBQC 类并完成测量后,经常需要调用的一个方法。
Args:
gate (str): ``'X'`` 或者 ``'Z'``,分别表示 Pauli X 或 Z 门修正
which_qubit (any): 待操作的量子比特的系统标签,可以是 ``str``, ``tuple`` 等任意数据类型,但需要和 MBQC 中图的标签类型匹配
power (int): 副产品纠正算符的指数
代码示例:
此处展示的是 MBQC 模型下实现隐形传态的一个例子。
.. code-block:: python
from paddle_quantum.mbqc.simulator import MBQC
from paddle_quantum.mbqc.qobject import State
from paddle_quantum.mbqc.utils import random_state_vector, basis, compare_by_vector
G = [['1', '2', '3'], [('1', '2'), ('2', '3')]]
state = State(random_state_vector(1), ['1'])
mbqc = MBQC()
mbqc.set_graph(G)
mbqc.set_input_state(state)
mbqc.measure('1', basis('X'))
mbqc.measure('2', basis('X'))
outcome = mbqc.get_classical_output()
mbqc.correct_byproduct('Z', '3', outcome['1'])
mbqc.correct_byproduct('X', '3', outcome['2'])
state_out = mbqc.get_quantum_output()
state_std = State(state.vector, ['3'])
compare_by_vector(state_out, state_std)
::
Norm difference of the given states is:
0.0
They are exactly the same states.
"""
assert gate in ['X', 'Z'], "'gate' must be 'X' or 'Z'."
assert isinstance(power, int), "'power' must be of type 'int'."
if power % 2 == 1:
self.__apply_pauli_gate(gate, which_qubit)
self.__update()
def __run_cmd(self, cmd):
r"""执行测量或副产品处理命令。
Args:
cmd (Pattern.CommandM / Pattern.CommandX / Pattern.CommandZ): 测量或副产品处理命令
"""
assert cmd.name in ["M", "X", "Z"], "the input 'cmd' must be CommandM, CommandX or CommandZ."
if cmd.name == "M": # Execute measurement commands
signal_s = self.sum_outcomes(cmd.domain_s)
signal_t = self.sum_outcomes(cmd.domain_t)
# The adaptive angle is (-1)^{signal_s} * angle + {signal_t} * pi
adaptive_angle = multiply(to_tensor([(-1) ** signal_s], dtype="float64"), cmd.angle) \
+ to_tensor([signal_t * pi], dtype="float64")
self.measure(cmd.which_qubit, basis(cmd.plane, adaptive_angle))
else: # Execute byproduct correction commands
power = self.sum_outcomes(cmd.domain)
self.correct_byproduct(cmd.name, cmd.which_qubit, power)
def __run_cmd_lst(self, cmd_lst, bar_start, bar_end):
r"""对列表执行测量或副产品处理命令。
Args:
cmd_lst (list): 命令列表,包含测量或副产品处理命令
bar_start (int): 进度条的开始点
bar_end (int): 进度条的结束点
"""
for i in range(len(cmd_lst)):
cmd = cmd_lst[i]
self.__run_cmd(cmd)
print_progress((bar_start + i + 1) / bar_end, "Pattern Running Progress", self.__track)
def __kron_unmeasured_qubits(self):
r"""该方法将没有被作用 CZ 纠缠的节点初始化为 |+> 态,并与当前的量子态做张量积。
Warning:
该方法仅在用户输入测量模式时调用,当用户输入图时,如果节点没有被激活,我们默认用户没有对该节点进行任何操作。
"""
# Turn off the plot switch
self.__draw = False
# As the create_graph_state function would change the measured qubits list, we need to record it
measured_qubits = self.vertex.measured[:]
for qubit in list(self.__graph.nodes):
if qubit not in self.vertex.measured:
self.__create_graph_state(qubit)
# Update vertices and backgrounds
self.vertex.measured.append(qubit)
self.max_active = max(len(self.vertex.active), self.max_active)
self.__bg_state = State(self.__bg_state.vector, self.vertex.active)
# Restore the measured qubits
self.vertex.measured = measured_qubits
def run_pattern(self):
r"""按照设置的测量模式对 MBQC 模型进行模拟。
Warning:
该方法必须在 ``set_pattern`` 调用后调用。
"""
assert self.__pattern is not None, "please use this method after calling 'set_pattern'!"
# Execute measurement commands and correction commands
cmd_m_lst = [cmd for cmd in self.__pattern.commands if cmd.name == "M"]
cmd_c_lst = [cmd for cmd in self.__pattern.commands if cmd.name in ["X", "Z"]]
bar_end = len(cmd_m_lst + cmd_c_lst)
self.__run_cmd_lst(cmd_m_lst, 0, bar_end)
# Activate unmeasured qubits before byproduct corrections
self.__kron_unmeasured_qubits()
self.__run_cmd_lst(cmd_c_lst, len(cmd_m_lst), bar_end)
# The output state's label is messy (e.g. [(2, 0), (0, 1), (1, 3)...]),
# so we permute the systems in order
q_output = self.__pattern.output_[1]
self.__bg_state = permute_systems(self.__status, q_output)
self.__update()
@staticmethod
def __map_qubit_to_row(out_lst):
r"""将输出比特的标签与行数对应起来,便于查找其对应关系。
Returns:
dict: 返回字典,代表行数与标签的对应关系
"""
return {int(div_str_to_float(qubit[0])): qubit for qubit in out_lst}
def get_classical_output(self):
r"""获取 MBQC 模型运行后的经典输出结果。
Returns:
str or dict: 如果用户输入是测量模式,则返回测量输出节点得到的比特串,与原电路的测量结果相一致,没有被测量的比特位填充 "?",如果用户输入是图,则返回所有节点的测量结果
"""
# If the input is pattern, return the equivalent result as the circuit model
if self.__pattern is not None:
width = len(self.__pattern.input_)
c_output = self.__pattern.output_[0]
q_output = self.__pattern.output_[1]
# Acquire the relationship between row number and corresponding output qubit label
output_lst = c_output + q_output
row_and_qubit = self.__map_qubit_to_row(output_lst)
# Obtain the string, with classical outputs denoted as their measurement outcomes
# and quantum outputs denoted as "?"
bit_str = [str(self.__outcome[row_and_qubit[i]])
if row_and_qubit[i] in c_output else '?'
for i in range(width)]
string = "".join(bit_str)
return string
# If the input is graph, return the outcome dictionary
else:
return self.__outcome
def get_history(self):
r"""获取 MBQC 计算模拟时的中间步骤信息。
Returns:
list: 生成图态、进行测量、纠正副产品后运算结果构成的列表
"""
return self.__history
def get_quantum_output(self):
r"""获取 MBQC 模型运行后的量子态输出结果。
Returns:
State: MBQC 模型运行后的量子态
"""
return self.__status
def simulate_by_mbqc(circuit, input_state=None):
r"""使用等价的 MBQC 模型模拟量子电路。
该函数通过将量子电路转化为等价的 MBQC 模型并运行,从而获得等价于原始量子电路的输出结果。
Warning:
与 ``UAnsatz`` 不同,此处输入的 ``circuit`` 参数包含了测量操作。
另,MBQC 模型默认初始态为加态,因此,如果用户不输入参数 ``input_state`` 设置初始量子态,则默认为加态。
Args:
circuit (Circuit): 量子电路图
input_state (State, optional): 量子电路的初始量子态,默认为 :math:`|+\rangle` 态
Returns:
tuple: 包含如下两个元素:
- str: 经典输出
- State: 量子输出
"""
if input_state is not None:
assert isinstance(input_state, State), "the 'input_state' must be of type 'State'."
pattern = transpile(circuit)
mbqc = MBQC()
mbqc.set_pattern(pattern)
mbqc.set_input_state(input_state)
mbqc.run_pattern()
c_output = mbqc.get_classical_output()
q_output = mbqc.get_quantum_output()
# Return the classical and quantum outputs
return c_output, q_output
def __get_sample_dict(bit_num, mea_bits, samples):
r"""根据比特数和测量比特索引的列表,统计采样结果。
Args:
bit_num (int): 比特数
mea_bits (list): 测量的比特列表
samples (list): 采样结果
Returns:
dict: 统计得到的采样结果
"""
sample_dict = {}
for i in range(2 ** len(mea_bits)):
str_of_order = bin(i)[2:].zfill(len(mea_bits))
bit_str = []
idx = 0
for j in range(bit_num):
if j in mea_bits:
bit_str.append(str_of_order[idx])
idx += 1
else:
bit_str.append('?')
string = "".join(bit_str)
sample_dict[string] = 0
# Count sampling results
for string in list(set(samples)):
sample_dict[string] += samples.count(string)
return sample_dict
def sample_by_mbqc(circuit, input_state=None, plot=False, shots=1024, print_or_not=True):
r"""将 MBQC 模型重复运行多次,获得经典结果的统计分布。
Warning:
与 ``UAnsatz`` 不同,此处输入的 circuit 参数包含了测量操作。
另,MBQC 模型默认初始态为加态,因此,如果用户不输入参数 `input_state` 设置初始量子态,则默认为加态。
Args:
circuit (Circuit): 量子电路图
input_state (State, optional): 量子电路的初始量子态,默认为加态
plot (bool, optional): 绘制经典采样结果的柱状图开关,默认为关闭状态
shots (int, optional): 采样次数,默认为 1024 次
print_or_not (bool, optional): 是否打印采样结果和绘制采样进度,默认为开启状态
Returns:
dict: 经典结果构成的频率字典
list: 经典测量结果和所有采样结果(包括经典输出和量子输出)的列表
"""
# Initialize
if shots == 1:
print_or_not = False
if print_or_not:
print("Sampling " + str(shots) + " times." + "\nWill return the sampling results.\r\n")
width = circuit.get_width()
mea_bits = circuit.get_measured_qubits()
# Sampling for "shots" times
samples = []
all_outputs = []
for shot in range(shots):
if print_or_not:
print_progress((shot + 1) / shots, "Current Sampling Progress")
c_output, q_output = simulate_by_mbqc(circuit, input_state)
samples.append(c_output)
all_outputs.append([c_output, q_output])
sample_dict = __get_sample_dict(width, mea_bits, samples)
if print_or_not:
print("Sample count " + "(" + str(shots) + " shots)" + " : " + str(sample_dict))
if plot:
dict_lst = [sample_dict]
bar_labels = ["MBQC sample outcomes"]
title = 'Sampling results (MBQC)'
xlabel = "Measurement outcomes"
ylabel = "Distribution"
plot_results(dict_lst, bar_labels, title, xlabel, ylabel)
return sample_dict, all_outputs
|
python
|
# Generated by Django 3.2.8 on 2021-11-01 19:59
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('neighbor', '0002_rename_neighbourhood_neighborhood'),
]
operations = [
migrations.RemoveField(
model_name='business',
name='updated_at',
),
migrations.RemoveField(
model_name='neighborhood',
name='updated_at',
),
]
|
python
|
# -*- coding: utf-8 -*-
"""The Windows EventLog (EVT) file event formatter."""
from plaso.formatters import interface
from plaso.formatters import manager
from plaso.lib import errors
class WinEVTFormatter(interface.ConditionalEventFormatter):
"""Formatter for a Windows EventLog (EVT) record event."""
DATA_TYPE = u'windows:evt:record'
# TODO: add string representation of facility.
FORMAT_STRING_PIECES = [
u'[{event_identifier} /',
u'0x{event_identifier:04x}]',
u'Severity: {severity}',
u'Record Number: {record_number}',
u'Event Type: {event_type}',
u'Event Category: {event_category}',
u'Source Name: {source_name}',
u'Computer Name: {computer_name}',
u'Message string: {message_string}',
u'Strings: {strings}']
FORMAT_STRING_SHORT_PIECES = [
u'[{event_identifier} /',
u'0x{event_identifier:04x}]',
u'Strings: {strings}']
SOURCE_LONG = u'WinEVT'
SOURCE_SHORT = u'EVT'
# Mapping of the numeric event types to a descriptive string.
_EVENT_TYPES = [
u'Error event',
u'Warning event',
u'Information event',
u'Success Audit event',
u'Failure Audit event']
_SEVERITY = [
u'Success',
u'Informational',
u'Warning',
u'Error']
def GetEventTypeString(self, event_type):
"""Retrieves a string representation of the event type.
Args:
event_type: The numeric event type.
Returns:
An Unicode string containing a description of the event type.
"""
if event_type >= 0 and event_type < len(self._EVENT_TYPES):
return self._EVENT_TYPES[event_type]
return u'Unknown {0:d}'.format(event_type)
def GetSeverityString(self, severity):
"""Retrieves a string representation of the severity.
Args:
severity: The numeric severity.
Returns:
An Unicode string containing a description of the event type.
"""
if severity >= 0 and severity < len(self._SEVERITY):
return self._SEVERITY[severity]
return u'Unknown {0:d}'.format(severity)
def GetMessages(self, formatter_mediator, event_object):
"""Determines the formatted message strings for an event object.
Args:
formatter_mediator: the formatter mediator object (instance of
FormatterMediator).
event_object: the event object (instance of EventObject).
Returns:
A tuple containing the formatted message string and short message string.
Raises:
WrongFormatter: if the event object cannot be formatted by the formatter.
"""
if self.DATA_TYPE != event_object.data_type:
raise errors.WrongFormatter(u'Unsupported data type: {0:s}.'.format(
event_object.data_type))
event_values = event_object.GetValues()
event_type = event_values.get(u'event_type', None)
if event_type is not None:
event_values[u'event_type'] = self.GetEventTypeString(event_type)
# TODO: add string representation of facility.
severity = event_values.get(u'severity', None)
if severity is not None:
event_values[u'severity'] = self.GetSeverityString(severity)
source_name = event_values.get(u'source_name', None)
message_identifier = event_values.get(u'message_identifier', None)
strings = event_values.get(u'strings', [])
if source_name and message_identifier:
message_string = formatter_mediator.GetWindowsEventMessage(
source_name, message_identifier)
if message_string:
try:
event_values[u'message_string'] = message_string.format(*strings)
except IndexError:
# Unable to create the message string.
pass
message_strings = []
for string in strings:
message_strings.append(u'\'{0:s}\''.format(string))
message_string = u', '.join(message_strings)
event_values[u'strings'] = u'[{0:s}]'.format(message_string)
return self._ConditionalFormatMessages(event_values)
manager.FormattersManager.RegisterFormatter(WinEVTFormatter)
|
python
|
"""
Some basic admin tests.
Rather than testing the frontend UI -- that's be a job for something like
Selenium -- this does a bunch of mocking and just tests the various admin
callbacks.
"""
import mock
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.http import HttpRequest
from django import forms
from fack.admin import QuestionAdmin
from fack.models import Question
class FAQAdminTests(TestCase):
def test_question_admin_save_model(self):
user = get_user_model()
user1 = mock.Mock(spec=user)
user2 = mock.Mock(spec=user)
req = mock.Mock(spec=HttpRequest)
obj = mock.Mock(spec=Question)
form = mock.Mock(spec=forms.Form)
qa = QuestionAdmin(Question, admin.site)
# Test saving a new model.
req.user = user1
qa.save_model(req, obj, form, change=False)
self.assertEqual(obj.save.call_count, 1)
self.assertEqual(obj.created_by, user1, "created_by wasn't set to request.user")
self.assertEqual(obj.updated_by, user1, "updated_by wasn't set to request.user")
# And saving an existing model.
obj.save.reset_mock()
req.user = user2
qa.save_model(req, obj, form, change=True)
self.assertEqual(obj.save.call_count, 1)
self.assertEqual(obj.created_by, user1, "created_by shouldn't have been changed")
self.assertEqual(obj.updated_by, user2, "updated_by wasn't set to request.user")
|
python
|
import os
import json
import functools
from tornado import web
from notebook.base.handlers import IPythonHandler
from nbgrader.api import Gradebook
from nbgrader.apps.api import NbGraderAPI
class BaseHandler(IPythonHandler):
@property
def base_url(self):
return super(BaseHandler, self).base_url.rstrip("/")
@property
def db_url(self):
return self.settings['nbgrader_db_url']
@property
def url_prefix(self):
return self.settings['nbgrader_url_prefix']
@property
def coursedir(self):
return self.settings['nbgrader_coursedir']
@property
def authenticator(self):
return self.settings['nbgrader_authenticator']
@property
def gradebook(self):
gb = self.settings['nbgrader_gradebook']
if gb is None:
self.log.debug("creating gradebook")
gb = Gradebook(self.db_url, self.coursedir.course_id)
self.settings['nbgrader_gradebook'] = gb
return gb
@property
def mathjax_url(self):
return self.settings['mathjax_url']
@property
def api(self):
level = self.log.level
api = NbGraderAPI(
self.coursedir, self.authenticator, parent=self.coursedir.parent)
api.log_level = level
return api
def render(self, name, **ns):
template = self.settings['nbassignment_jinja2_env'].get_template(name)
return template.render(**ns)
def write_error(self, status_code, **kwargs):
if status_code == 500:
html = self.render(
'base_500.tpl',
base_url=self.base_url,
error_code=500)
elif status_code == 502:
html = self.render(
'base_500.tpl',
base_url=self.base_url,
error_code=502)
elif status_code == 403:
html = self.render(
'base_403.tpl',
base_url=self.base_url,
error_code=403)
else:
return super(BaseHandler, self).write_error(status_code, **kwargs)
self.write(html)
self.finish()
class BaseApiHandler(BaseHandler):
def get_json_body(self):
"""Return the body of the request as JSON data."""
if not self.request.body:
return None
body = self.request.body.strip().decode('utf-8')
try:
model = json.loads(body)
except Exception:
self.log.debug("Bad JSON: %r", body)
self.log.error("Couldn't parse JSON", exc_info=True)
raise web.HTTPError(400, 'Invalid JSON in body of request')
return model
def check_xsrf(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
_ = self.xsrf_token
return f(self, *args, **kwargs)
return wrapper
def check_notebook_dir(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
if self.settings['nbgrader_bad_setup']:
return self.write_error(500)
return f(self, *args, **kwargs)
return wrapper
|
python
|
"""Test creating new settings """
import pytest
from bot.core.database.settings_collection import SettingsCollection
from test_framework.asserts.database_asserts.check_settings_collection import check_create_new_settings
from test_framework.scripts.common.data_factory import get_test_data
@pytest.mark.asyncio
@pytest.mark.parametrize('test_data', get_test_data(__file__))
async def test_create_new_settings(settings_collection: SettingsCollection, test_data: dict):
"""
Test the database content after creating the new settings.
:param settings_collection: Database settings collection.
:type settings_collection: SettingsCollection
:param test_data: Settings collection test data.
:type test_data: dict
"""
await check_create_new_settings(settings_collection, test_data)
|
python
|
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
# pylint: disable=redefined-builtin, import-error
import struct
import uuid
import logging
from typing import List, Union, Tuple, Dict, Callable # pylint: disable=unused-import
from pyamqp2.message import Message, Header, Properties
_LOGGER = logging.getLogger(__name__)
_HEADER_PREFIX = memoryview(b'AMQP')
_COMPOSITES = {
35: 'received',
36: 'accepted',
37: 'rejected',
38: 'released',
39: 'modified',
}
c_unsigned_char = struct.Struct('>B')
c_signed_char = struct.Struct('>b')
c_unsigned_short = struct.Struct('>H')
c_signed_short = struct.Struct('>h')
c_unsigned_int = struct.Struct('>I')
c_signed_int = struct.Struct('>i')
c_unsigned_long = struct.Struct('>L')
c_unsigned_long_long = struct.Struct('>Q')
c_signed_long_long = struct.Struct('>q')
c_float = struct.Struct('>f')
c_double = struct.Struct('>d')
def _decode_null(buffer):
# type: (memoryview) -> Tuple[memoryview, None]
return buffer, None
def _decode_true(buffer):
# type: (memoryview) -> Tuple[memoryview, bool]
return buffer, True
def _decode_false(buffer):
# type: (memoryview) -> Tuple[memoryview, bool]
return buffer, False
def _decode_zero(buffer):
# type: (memoryview) -> Tuple[memoryview, int]
return buffer, 0
def _decode_empty(buffer):
# type: (memoryview) -> Tuple[memoryview, List[None]]
return buffer, []
def _decode_boolean(buffer):
# type: (memoryview) -> Tuple[memoryview, bool]
return buffer[1:], buffer[:1] == b'\x01'
def _decode_ubyte(buffer):
# type: (memoryview) -> Tuple[memoryview, int]
return buffer[1:], buffer[0]
def _decode_ushort(buffer):
# type: (memoryview) -> Tuple[memoryview, int]
return buffer[2:], c_unsigned_short.unpack(buffer[:2])[0]
def _decode_uint_small(buffer):
# type: (memoryview) -> Tuple[memoryview, int]
return buffer[1:], buffer[0]
def _decode_uint_large(buffer):
# type: (memoryview) -> Tuple[memoryview, int]
return buffer[4:], c_unsigned_int.unpack(buffer[:4])[0]
def _decode_ulong_small(buffer):
# type: (memoryview) -> Tuple[memoryview, int]
return buffer[1:], buffer[0]
def _decode_ulong_large(buffer):
# type: (memoryview) -> Tuple[memoryview, int]
return buffer[8:], c_unsigned_long_long.unpack(buffer[:8])[0]
def _decode_byte(buffer):
# type: (memoryview) -> Tuple[memoryview, int]
return buffer[1:], c_signed_char.unpack(buffer[:1])[0]
def _decode_short(buffer):
# type: (memoryview) -> Tuple[memoryview, int]
return buffer[2:], c_signed_short.unpack(buffer[:2])[0]
def _decode_int_small(buffer):
# type: (memoryview) -> Tuple[memoryview, int]
return buffer[1:], c_signed_char.unpack(buffer[:1])[0]
def _decode_int_large(buffer):
# type: (memoryview) -> Tuple[memoryview, int]
return buffer[4:], c_signed_int.unpack(buffer[:4])[0]
def _decode_long_small(buffer):
# type: (memoryview) -> Tuple[memoryview, int]
return buffer[1:], c_signed_char.unpack(buffer[:1])[0]
def _decode_long_large(buffer):
# type: (memoryview) -> Tuple[memoryview, int]
return buffer[8:], c_signed_long_long.unpack(buffer[:8])[0]
def _decode_float(buffer):
# type: (memoryview) -> Tuple[memoryview, float]
return buffer[4:], c_float.unpack(buffer[:4])[0]
def _decode_double(buffer):
# type: (memoryview) -> Tuple[memoryview, float]
return buffer[8:], c_double.unpack(buffer[:8])[0]
def _decode_timestamp(buffer):
# type: (memoryview) -> Tuple[memoryview, int]
return buffer[8:], c_signed_long_long.unpack(buffer[:8])[0]
def _decode_uuid(buffer):
# type: (memoryview) -> Tuple[memoryview, uuid.UUID]
return buffer[16:], uuid.UUID(bytes=buffer[:16].tobytes())
def _decode_binary_small(buffer):
# type: (memoryview) -> Tuple[memoryview, bytes]
length_index = buffer[0] + 1
return buffer[length_index:], buffer[1:length_index].tobytes()
def _decode_binary_large(buffer):
# type: (memoryview) -> Tuple[memoryview, bytes]
length_index = c_unsigned_long.unpack(buffer[:4])[0] + 4
return buffer[length_index:], buffer[4:length_index].tobytes()
def _decode_list_small(buffer):
# type: (memoryview) -> Tuple[memoryview, List[Any]]
count = buffer[1]
buffer = buffer[2:]
values = [None] * count
for i in range(count):
buffer, values[i] = _DECODE_BY_CONSTRUCTOR[buffer[0]](buffer[1:])
return buffer, values
def _decode_list_large(buffer):
# type: (memoryview) -> Tuple[memoryview, List[Any]]
count = c_unsigned_long.unpack(buffer[4:8])[0]
buffer = buffer[8:]
values = [None] * count
for i in range(count):
buffer, values[i] = _DECODE_BY_CONSTRUCTOR[buffer[0]](buffer[1:])
return buffer, values
def _decode_map_small(buffer):
# type: (memoryview) -> Tuple[memoryview, Dict[Any, Any]]
count = int(buffer[1]/2)
buffer = buffer[2:]
values = {}
for _ in range(count):
buffer, key = _DECODE_BY_CONSTRUCTOR[buffer[0]](buffer[1:])
buffer, value = _DECODE_BY_CONSTRUCTOR[buffer[0]](buffer[1:])
values[key] = value
return buffer, values
def _decode_map_large(buffer):
# type: (memoryview) -> Tuple[memoryview, Dict[Any, Any]]
count = int(c_unsigned_long.unpack(buffer[4:8])[0]/2)
buffer = buffer[8:]
values = {}
for _ in range(count):
buffer, key = _DECODE_BY_CONSTRUCTOR[buffer[0]](buffer[1:])
buffer, value = _DECODE_BY_CONSTRUCTOR[buffer[0]](buffer[1:])
values[key] = value
return buffer, values
def _decode_array_small(buffer):
# type: (memoryview) -> Tuple[memoryview, List[Any]]
count = buffer[1] # Ignore first byte (size) and just rely on count
if count:
subconstructor = buffer[2]
buffer = buffer[3:]
values = [None] * count
for i in range(count):
buffer, values[i] = _DECODE_BY_CONSTRUCTOR[subconstructor](buffer)
return buffer, values
return buffer[2:], []
def _decode_array_large(buffer):
# type: (memoryview) -> Tuple[memoryview, List[Any]]
count = c_unsigned_long.unpack(buffer[4:8])[0]
if count:
subconstructor = buffer[8]
buffer = buffer[9:]
values = [None] * count
for i in range(count):
buffer, values[i] = _DECODE_BY_CONSTRUCTOR[subconstructor](buffer)
return buffer, values
return buffer[8:], []
def _decode_described(buffer):
# type: (memoryview) -> Tuple[memoryview, Any]
# TODO: to move the cursor of the buffer to the described value based on size of the
# descriptor without decoding descriptor value
composite_type = buffer[0]
buffer, descriptor = _DECODE_BY_CONSTRUCTOR[composite_type](buffer[1:])
buffer, value = _DECODE_BY_CONSTRUCTOR[buffer[0]](buffer[1:])
try:
composite_type = _COMPOSITES[descriptor]
return buffer, {composite_type: value}
except KeyError:
return buffer, value
def decode_payload(buffer):
# type: (memoryview) -> Message
message = {}
while buffer:
# Ignore the first two bytes, they will always be the constructors for
# described type then ulong.
descriptor = buffer[2]
buffer, value = _DECODE_BY_CONSTRUCTOR[buffer[3]](buffer[4:])
if descriptor == 112:
message["header"] = Header(*value)
elif descriptor == 113:
message["delivery_annotations"] = value
elif descriptor == 114:
message["message_annotations"] = value
elif descriptor == 115:
message["properties"] = Properties(*value)
elif descriptor == 116:
message["application_properties"] = value
elif descriptor == 117:
try:
message["data"].append(value)
except KeyError:
message["data"] = [value]
elif descriptor == 118:
try:
message["sequence"].append(value)
except KeyError:
message["sequence"] = [value]
elif descriptor == 119:
message["value"] = value
elif descriptor == 120:
message["footer"] = value
# TODO: we can possibly swap out the Message construct with a TypedDict
# for both input and output so we get the best of both.
return Message(**message)
def decode_frame(data):
# type: (memoryview) -> Tuple[int, List[Any]]
# Ignore the first two bytes, they will always be the constructors for
# described type then ulong.
frame_type = data[2]
compound_list_type = data[3]
if compound_list_type == 0xd0:
# list32 0xd0: data[4:8] is size, data[8:12] is count
count = c_signed_int.unpack(data[8:12])[0]
buffer = data[12:]
else:
# list8 0xc0: data[4] is size, data[5] is count
count = data[5]
buffer = data[6:]
fields = [None] * count
for i in range(count):
buffer, fields[i] = _DECODE_BY_CONSTRUCTOR[buffer[0]](buffer[1:])
if frame_type == 20:
fields.append(buffer)
return frame_type, fields
def decode_empty_frame(header):
# type: (memory) -> bytes
if header[0:4] == _HEADER_PREFIX:
return 0, header.tobytes()
if header[5] == 0:
return 1, b"EMPTY"
raise ValueError("Received unrecognized empty frame")
_DECODE_BY_CONSTRUCTOR = [None] * 256 # type: List[Callable[memoryview]]
_DECODE_BY_CONSTRUCTOR[0] = _decode_described
_DECODE_BY_CONSTRUCTOR[64] = _decode_null
_DECODE_BY_CONSTRUCTOR[65] = _decode_true
_DECODE_BY_CONSTRUCTOR[66] = _decode_false
_DECODE_BY_CONSTRUCTOR[67] = _decode_zero
_DECODE_BY_CONSTRUCTOR[68] = _decode_zero
_DECODE_BY_CONSTRUCTOR[69] = _decode_empty
_DECODE_BY_CONSTRUCTOR[80] = _decode_ubyte
_DECODE_BY_CONSTRUCTOR[81] = _decode_byte
_DECODE_BY_CONSTRUCTOR[82] = _decode_uint_small
_DECODE_BY_CONSTRUCTOR[83] = _decode_ulong_small
_DECODE_BY_CONSTRUCTOR[84] = _decode_int_small
_DECODE_BY_CONSTRUCTOR[85] = _decode_long_small
_DECODE_BY_CONSTRUCTOR[86] = _decode_boolean
_DECODE_BY_CONSTRUCTOR[96] = _decode_ushort
_DECODE_BY_CONSTRUCTOR[97] = _decode_short
_DECODE_BY_CONSTRUCTOR[112] = _decode_uint_large
_DECODE_BY_CONSTRUCTOR[113] = _decode_int_large
_DECODE_BY_CONSTRUCTOR[114] = _decode_float
_DECODE_BY_CONSTRUCTOR[128] = _decode_ulong_large
_DECODE_BY_CONSTRUCTOR[129] = _decode_long_large
_DECODE_BY_CONSTRUCTOR[130] = _decode_double
_DECODE_BY_CONSTRUCTOR[131] = _decode_timestamp
_DECODE_BY_CONSTRUCTOR[152] = _decode_uuid
_DECODE_BY_CONSTRUCTOR[160] = _decode_binary_small
_DECODE_BY_CONSTRUCTOR[161] = _decode_binary_small
_DECODE_BY_CONSTRUCTOR[163] = _decode_binary_small
_DECODE_BY_CONSTRUCTOR[176] = _decode_binary_large
_DECODE_BY_CONSTRUCTOR[177] = _decode_binary_large
_DECODE_BY_CONSTRUCTOR[179] = _decode_binary_large
_DECODE_BY_CONSTRUCTOR[192] = _decode_list_small
_DECODE_BY_CONSTRUCTOR[193] = _decode_map_small
_DECODE_BY_CONSTRUCTOR[208] = _decode_list_large
_DECODE_BY_CONSTRUCTOR[209] = _decode_map_large
_DECODE_BY_CONSTRUCTOR[224] = _decode_array_small
_DECODE_BY_CONSTRUCTOR[240] = _decode_array_large
|
python
|
import unittest
import mock
from able.queue import ble_task
class TestBLETask(unittest.TestCase):
def setUp(self):
self.queue = mock.Mock()
self.task_called = None
@ble_task
def increment(self, a=1, b=0):
self.task_called = a + b
def test_method_not_executed(self):
self.increment()
self.assertEqual(self.task_called, None)
def test_task_enqued(self):
self.increment()
self.assertTrue(self.queue.enque.called)
def test_task_default_arguments(self):
self.increment()
task = self.queue.enque.call_args[0][0]
task()
self.assertEqual(self.task_called, 1)
def test_task_arguments_passed(self):
self.increment(200, 11)
task = self.queue.enque.call_args[0][0]
task()
self.assertEqual(self.task_called, 211)
|
python
|
#!/usr/bin/env python
# encoding: utf-8
"""@package Operators
This module offers various methods to process eye movement data
Created by Tomas Knapen on .
Copyright (c) 2010 TKLAB. All rights reserved.
More details.
"""
from __future__ import division
import os, pickle, inspect
import numpy as np
import scipy as sp
import seaborn as sn
import matplotlib.pylab as pl
import bottleneck as bn
import pandas as pd
import mne
import sklearn.decomposition.pca as pca
from sklearn import svm, grid_search
from scipy import ndimage, signal
from hedfpy import HDFEyeOperator
from hedfpy import EyeSignalOperator
from fir import FIRDeconvolution
from IPython import embed as shell
sn.set(style="ticks")
class Pupil_SSVEP_Session(object):
"""docstring for Pupil_SSVEP_Analyzer"""
def __init__(self, analyzer, file_alias, trial_duration = 90.0, eye = 'R', stim_frequency = 2.0):
super(Pupil_SSVEP_Session, self).__init__()
self.analyzer = analyzer
self.file_alias = file_alias
self.trial_duration = trial_duration
self.eye = eye
self.stim_frequency = stim_frequency
def assert_data_intern(self):
if not hasattr(self, 'trial_times'):
self.internalize_data()
def internalize_data(self):
""""""
# load times per session:
self.trial_times = self.analyzer.ho.read_session_data(self.file_alias, 'trials')
self.trial_phase_times = self.analyzer.ho.read_session_data(self.file_alias, 'trial_phases')
self.trial_parameters = self.analyzer.ho.read_session_data(self.file_alias, 'parameters')
try:
self.events = self.analyzer.ho.read_session_data(self.file_alias, 'events')
except KeyError:
self.events = []
# check at what timestamps the recording started:
self.session_start_EL_time = np.array( self.trial_phase_times[np.array(self.trial_phase_times['trial_phase_index'] == 1) * np.array(self.trial_phase_times['trial_phase_trial'] == 0)]['trial_phase_EL_timestamp'] )[0]
self.session_stop_EL_time = np.array(self.trial_times['trial_end_EL_timestamp'])[-1]
self.trial_indices = np.unique(self.trial_times['trial_start_index'])
self.sample_rate = self.analyzer.ho.sample_rate_during_period([self.session_start_EL_time, self.session_stop_EL_time], self.file_alias)
self.nr_samples_per_trial = int(self.trial_duration * self.sample_rate)
self.from_zero_timepoints = np.linspace(0, 90, self.nr_samples_per_trial)
# the signals to be analyzed
self.pupil_bp_pt = np.array([np.array(self.analyzer.ho.signal_during_period(
time_period = [self.trial_times[self.trial_times['trial_start_index']==i]['trial_start_EL_timestamp'], self.trial_times[self.trial_times['trial_end_index']==i]['trial_end_EL_timestamp']],
alias = self.file_alias, signal = 'pupil_bp_clean_zscore', requested_eye = 'R'))[-self.nr_samples_per_trial:] for i in range(len(self.trial_indices))]).squeeze() #
self.timestamps_pt = np.array([np.array(self.analyzer.ho.signal_during_period(
time_period = [self.trial_times[self.trial_times['trial_start_index']==i]['trial_start_EL_timestamp'], self.trial_times[self.trial_times['trial_end_index']==i]['trial_end_EL_timestamp']],
alias = self.file_alias, signal = 'time', requested_eye = 'R'))[-self.nr_samples_per_trial:] for i in range(len(self.trial_indices))]).squeeze()
# replay features only instantaneous transitions
# prepare for this
if 'RP' in self.file_alias:
self.colors = ['r','g']
self.scancode_list = [39,41]
else:
self.colors = ['r','b','g']
self.scancode_list = [89,90,91]
def raw_signal_plot(self):
self.assert_data_intern()
f = pl.figure(figsize = (24,24))
for x in range(len(self.trial_indices)):
s = f.add_subplot(len(self.trial_indices), 1, x+1)
pl.plot(self.timestamps_pt[x][::100], self.pupil_bp_pt[x][::100], 'k')
if len(self.events) != 0:
events_this_trial = self.events[(self.events['EL_timestamp'] > self.timestamps_pt[x][0]) & (self.events['EL_timestamp'] < self.timestamps_pt[x][-1])]
for sc, scancode in enumerate(self.scancode_list):
these_event_times = events_this_trial[events_this_trial['scancode'] == scancode]['EL_timestamp']
for tet in these_event_times:
pl.axvline(x = tet, c = self.colors[sc], lw = 5.0)
sn.despine(offset=10)
pl.tight_layout()
pl.savefig(os.path.join(self.analyzer.fig_dir, self.file_alias + '_raw.pdf'))
def behavioral_analysis(self):
"""some analysis of the behavioral data, such as mean percept duration,
dominance ratio etc"""
self.assert_data_intern()
# only do anything if this is not a no report trial
if 'RP' in self.file_alias:
all_percepts_and_durations = [[],[]]
else:
all_percepts_and_durations = [[],[],[]]
if not 'NR' in self.file_alias: # and not 'RP' in self.file_alias
for x in range(len(self.trial_indices)):
if len(self.events) != 0:
events_this_trial = self.events[(self.events['EL_timestamp'] > self.timestamps_pt[x][0]) & (self.events['EL_timestamp'] < self.timestamps_pt[x][-1])]
for sc, scancode in enumerate(self.scancode_list):
percept_start_indices = np.arange(len(events_this_trial))[np.array(events_this_trial['scancode'] == scancode)]
percept_end_indices = percept_start_indices + 1
# convert to times
start_times = np.array(events_this_trial['EL_timestamp'])[percept_start_indices] - self.timestamps_pt[x,0]
if len(start_times) > 0:
if percept_end_indices[-1] == len(events_this_trial):
end_times = np.array(events_this_trial['EL_timestamp'])[percept_end_indices[:-1]] - self.timestamps_pt[x,0]
end_times = np.r_[end_times, len(self.from_zero_timepoints)]
else:
end_times = np.array(events_this_trial['EL_timestamp'])[percept_end_indices] - self.timestamps_pt[x,0]
these_raw_event_times = np.array([start_times + self.timestamps_pt[x,0], end_times + self.timestamps_pt[x,0]]).T
these_event_times = np.array([start_times, end_times]).T + x * self.trial_duration * self.sample_rate
durations = np.diff(these_event_times, axis = -1)
all_percepts_and_durations[sc].append(np.hstack((these_raw_event_times, these_event_times, durations)))
self.all_percepts_and_durations = [np.vstack(apd) for apd in all_percepts_and_durations]
# last element is duration, sum inclusive and exclusive of transitions
total_percept_duration = np.concatenate([apd[:,-1] for apd in self.all_percepts_and_durations]).sum()
total_percept_duration_excl = np.concatenate([apd[:,-1] for apd in [self.all_percepts_and_durations[0], self.all_percepts_and_durations[-1]]]).sum()
self.ratio_transition = 1.0 - (total_percept_duration_excl / total_percept_duration)
self.ratio_percept_red = self.all_percepts_and_durations[0][:,-1].sum() / total_percept_duration_excl
self.red_durations = np.array([np.mean(self.all_percepts_and_durations[0][:,-1]), np.median(self.all_percepts_and_durations[0][:,-1])])
self.green_durations = np.array([np.mean(self.all_percepts_and_durations[-1][:,-1]), np.median(self.all_percepts_and_durations[-1][:,-1])])
self.transition_durations = np.array([np.mean(self.all_percepts_and_durations[1][:,-1]), np.median(self.all_percepts_and_durations[1][:,-1])])
self.ratio_percept_red_durations = self.red_durations / (self.red_durations + self.green_durations)
plot_mean_or_median = 0 # mean
f = pl.figure(figsize = (8,4))
s = f.add_subplot(111)
for i in range(len(self.colors)):
pl.hist(self.all_percepts_and_durations[i][:,-1], bins = 20, color = self.colors[i], histtype='step', lw = 3.0, alpha = 0.4, label = ['Red', 'Trans', 'Green'][i])
pl.hist(np.concatenate([self.all_percepts_and_durations[0][:,-1], self.all_percepts_and_durations[-1][:,-1]]), bins = 20, color = 'k', histtype='step', lw = 3.0, alpha = 0.4, label = 'Percepts')
pl.legend()
s.set_xlabel('time [ms]')
s.set_ylabel('count')
sn.despine(offset=10)
s.annotate("""ratio_transition: %1.2f, \nratio_percept_red: %1.2f, \nduration_red: %2.2f,\nduration_green: %2.2f, \nratio_percept_red_durations: %1.2f"""%(self.ratio_transition, self.ratio_percept_red, self.red_durations[plot_mean_or_median], self.green_durations[plot_mean_or_median], self.ratio_percept_red_durations[plot_mean_or_median]), (0.5,0.65), textcoords = 'figure fraction')
pl.tight_layout()
pl.savefig(os.path.join(self.analyzer.fig_dir, self.file_alias + '_dur_hist.pdf'))
def tf_analysis(self, plot_Z = True, frequencies = None, vis_frequency_limits = [1.8, 2.2], nr_cycles = 16, analysis_sample_rate = 100):
self.assert_data_intern()
if frequencies == None:
frequencies = np.linspace(1.0, self.analyzer.low_pass_pupil_f, 40)
down_sample_factor = int(self.sample_rate/analysis_sample_rate)
resampled_signal = self.pupil_bp_pt[:,::down_sample_factor]
# complex tf results per trial
self.tf_trials = mne.time_frequency.cwt_morlet(resampled_signal, analysis_sample_rate, frequencies, use_fft=True, n_cycles=nr_cycles, zero_mean=True)
self.instant_power_trials = np.abs(self.tf_trials)
# z-score power
self.Z_tf_trials = np.zeros_like(self.instant_power_trials)
m = self.instant_power_trials.mean(axis = -1)
sd = self.instant_power_trials.std(axis = -1)
for z in range(len(self.Z_tf_trials)):
self.Z_tf_trials[z] = ((self.instant_power_trials[z].T - m[z]) / sd[z] ).T
# some obvious conditions
if plot_Z:
tf_to_plot = self.Z_tf_trials
else:
tf_to_plot = self.instant_power_trials
f = pl.figure(figsize = (24,24))
for x in range(len(self.trial_indices)):
s = f.add_subplot(len(self.trial_indices), 2, (x*2)+1)
pl.imshow(np.squeeze(tf_to_plot[x,(frequencies > vis_frequency_limits[0]) & (frequencies < vis_frequency_limits[1]),::100]), cmap = 'seismic', extent = [self.from_zero_timepoints[0], self.from_zero_timepoints[-1], vis_frequency_limits[-1], vis_frequency_limits[0]], aspect='auto')
sn.despine(offset=10)
s = f.add_subplot(len(self.trial_indices), 2, (x*2)+2)
# pl.imshow(np.squeeze(tf_to_plot[x,:,::100]), cmap = 'gray')
pl.plot(self.from_zero_timepoints[::down_sample_factor], np.squeeze(np.squeeze(tf_to_plot[x,(frequencies > vis_frequency_limits[0]) & (frequencies < vis_frequency_limits[1]),:])).mean(axis = 0), 'k')
if len(self.events) != 0:
events_this_trial = self.events[(self.events['EL_timestamp'] > self.timestamps_pt[x][0]) & (self.events['EL_timestamp'] < self.timestamps_pt[x][-1])]
for sc, scancode in enumerate(self.scancode_list):
these_event_times = events_this_trial[events_this_trial['scancode'] == scancode]['EL_timestamp']
for tet in these_event_times:
pl.axvline(x = (tet - self.timestamps_pt[x,0]) / self.sample_rate, c = self.colors[sc], lw = 5.0)
sn.despine(offset=10)
pl.tight_layout()
pl.savefig(os.path.join(self.analyzer.fig_dir, self.file_alias + '_%i_tfr.pdf'%nr_cycles))
with pd.get_store(self.analyzer.h5_file) as h5_file:
for name, data in zip(['tf_complex_real', 'tf_complex_imag', 'tf_power', 'tf_power_Z'],
np.array([np.real(self.tf_trials), np.imag(self.tf_trials), self.instant_power_trials, self.Z_tf_trials], dtype = np.float64)):
opd = pd.Panel(data,
items = pd.Series(self.trial_indices),
major_axis = pd.Series(frequencies),
minor_axis = self.from_zero_timepoints[::down_sample_factor])
h5_file.put("/%s/tf/cycles_%s_%s"%(self.file_alias, nr_cycles, name), opd)
def read_trans_counts(self):
tc_file = os.path.join(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))), 'number_of_percepts.txt')
tcs = pd.read_csv(tc_file)
if self.file_alias in list(tcs):
self.trans_counts = np.array(tcs[self.file_alias])
def project_phases(self, nr_cycles = 20, freqs_of_interest = [1.8, 2.2]):
self.assert_data_intern()
self.read_trans_counts()
replay_phase = np.loadtxt(os.path.join(self.analyzer.sj_dir, 'phase_delay.txt'))[0]
with pd.get_store(self.analyzer.h5_file) as h5_file:
real_data = h5_file.get("/%s/tf/cycles_%s_%s"%(self.file_alias, nr_cycles, 'tf_complex_real'))
imag_data = h5_file.get("/%s/tf/cycles_%s_%s"%(self.file_alias, nr_cycles, 'tf_complex_imag'))
power_data = h5_file.get("/%s/tf/cycles_%s_%s"%(self.file_alias, nr_cycles, 'tf_power_Z'))
trial_numbers = np.array(real_data.keys())
frequencies = np.array(real_data.major_axis)
timepoints = np.array(real_data.minor_axis)
real_m = np.array(real_data)[:,(frequencies>freqs_of_interest[0]) & (frequencies<freqs_of_interest[1]),:].mean(axis = 1)
imag_m = np.array(imag_data)[:,(frequencies>freqs_of_interest[0]) & (frequencies<freqs_of_interest[1]),:].mean(axis = 1)
power_m = np.array(power_data)[:,(frequencies>freqs_of_interest[0]) & (frequencies<freqs_of_interest[1]),:].mean(axis = 1)
expected_phase_real = np.cos(replay_phase + timepoints * self.stim_frequency * 2.0 * np.pi)
expected_phase_imag = np.sin(replay_phase + timepoints * self.stim_frequency * 2.0 * np.pi)
complex_data = np.array([real_m, imag_m]).transpose((1,2,0))
template_data = np.array([expected_phase_real, expected_phase_imag]).transpose((1,0))
projected_data = np.zeros(complex_data.shape[:-1])
for x in range(len(complex_data)):
projected_data[x] = np.array([np.dot(c, t)/np.dot(t,t) for c, t in zip(complex_data[x], template_data)])
# plot these timecourses per trial
f = pl.figure(figsize = (8,24))
for x in range(len(complex_data)):
s = f.add_subplot(len(complex_data), 1, x+1)
pl.plot(timepoints, projected_data[x], 'k', lw = 4.0)
pl.plot(timepoints, power_m[x], 'k--', lw = 2.0)
s.axhline(np.median(projected_data[x]), color = 'b', lw = 3.0)
s.axhline(np.mean(projected_data[x]), color = 'r', lw = 3.0, ls = '--', alpha = 0.6)
if hasattr(self, 'trans_counts'):
s.annotate('%i'%self.trans_counts[x], (0.5,0.65), textcoords = 'axes fraction')
s.set_ylim([-2,4])
sn.despine(offset=10)
pl.tight_layout()
pl.savefig(os.path.join(self.analyzer.fig_dir, self.file_alias + '_projected.pdf'))
# save out as dataframe
pdf = pd.DataFrame(projected_data, index = trial_numbers, columns = timepoints)
with pd.get_store(self.analyzer.h5_file) as h5_file:
h5_file.put("/%s/tf/cycles_%s_%s"%(self.file_alias, nr_cycles, 'projected'), pdf)
def svm_classification(self, nr_cycles = 20, freqs_of_interest = [-np.inf, np.inf], n_components = 10):
self.assert_data_intern()
self.read_trans_counts()
with pd.get_store(self.analyzer.h5_file) as h5_file:
power_data = h5_file.get("/%s/tf/cycles_%s_%s"%(self.file_alias, nr_cycles, 'tf_power_Z'))
timepoints = np.array(power_data.minor_axis)
# preprocess the timecourses, by PCA
pd_r = np.array(power_data).transpose((1,0,2))
pd_r = pd_r.reshape((np.array(power_data).shape[1], -1))
p = pca.PCA(n_components = n_components)
p.fit(pd_r.T)
power_data_R = p.transform(pd_r.T)
# power_data_R = power_data_R.reshape((n_components, len(self.trial_indices),power_data.shape[-1]))
# time courses of behavior allow us to create labels for training and test sets
self.behavioral_analysis()
label_selection_array = np.zeros((2,pd_r.shape[-1]), dtype = bool)
times_in_data = np.concatenate([timepoints*self.sample_rate + x*timepoints.max()*self.sample_rate for x in range(len(power_data))])
if not hasattr(self, 'all_percepts_and_durations'): # the no report condition doesn't have percept definitions
data_points = power_data_R
label_selection_array = np.ones((2, power_data_R.shape[-1]))
svc = None
else: # with knowledge on what happened when, we can train a decoder.
labels = []
data_points = []
for percept, i in zip([0,-1], [0,1]):
event_times = self.all_percepts_and_durations[percept][:,[2,3]]
for ev in event_times:
label_selection_array[i] += (times_in_data > ev[0]) & (times_in_data < ev[1])
labels.append(np.ones(pd_r.shape[-1])[label_selection_array[i]] * i)
data_points.append(power_data_R[label_selection_array[i],:])
labels = np.concatenate(labels)
data_points = np.concatenate(data_points)
gammas = np.logspace(-6, -1, 5)
svc = svm.SVC(kernel='linear', probability = True)
# clf = grid_search.GridSearchCV(estimator=svc, param_grid=dict(gamma=gammas), n_jobs=-1)
svc.fit(data_points, labels)
try: os.mkdir(os.path.join(self.analyzer.base_directory, 'svm'))
except OSError: pass
with open(os.path.join(self.analyzer.base_directory, 'svm', self.file_alias+'_svm.pickle'), 'w') as f:
pickle.dump((svc, data_points, label_selection_array), f)
return svc, data_points, label_selection_array
class Pupil_SSVEP_Analyzer(object):
"""Pupil_SSVEP_Analyzer is a class that analyzes the results of a pupil size SSVEP experiment"""
def __init__(self, sj_initial, base_directory, file_aliases, low_pass_pupil_f = 6, high_pass_pupil_f = 0.01):
super(Pupil_SSVEP_Analyzer, self).__init__()
self.sj_initial = sj_initial
self.file_aliases = file_aliases
self.base_directory = base_directory
self.low_pass_pupil_f = low_pass_pupil_f
self.high_pass_pupil_f = high_pass_pupil_f
self.sj_dir = os.path.join(self.base_directory, self.sj_initial )
self.fig_dir = os.path.join(self.base_directory, self.sj_initial, 'figs' )
self.edf_files = [os.path.join(self.base_directory, 'raw', fa + '.edf') for fa in self.file_aliases]
self.h5_file = os.path.join(self.base_directory, self.sj_initial, self.sj_initial + '.h5')
try: os.mkdir(self.sj_dir)
except OSError: pass
try: os.mkdir(self.fig_dir)
except OSError: pass
os.chdir(self.base_directory)
# initialize the hdfeyeoperator
self.ho = HDFEyeOperator(self.h5_file)
# insert the edf file contents only when the h5 is not present.
if not os.path.isfile(self.h5_file):
self.preprocess()
def preprocess(self):
# implicit preprocessing
for i, ef in enumerate(self.edf_files):
self.ho.add_edf_file(ef)
self.ho.edf_message_data_to_hdf(alias = self.file_aliases[i])
self.ho.edf_gaze_data_to_hdf(alias = self.file_aliases[i], pupil_hp = self.high_pass_pupil_f, pupil_lp = self.low_pass_pupil_f)
def analyze(self, nr_cycles_tf = 12.0):
for alias in self.file_aliases:
pss = Pupil_SSVEP_Session(self, alias)
# pss.raw_signal_plot()
# pss.tf_analysis(nr_cycles = nr_cycles_tf)
# pss.behavioral_analysis()
pss.project_phases()
pss.svm_classification()
def analyze_tf(self, nr_cycles_tf = [40,20,12,8,4,2]):
for alias in self.file_aliases:
for nc_tf in nr_cycles_tf:
pss = Pupil_SSVEP_Session(self, alias)
pss.tf_analysis(nr_cycles = nc_tf)
def get_experimental_phase(self, freqs_of_interest = [1.8, 2.2]):
which_alias_reported_replay = [fa for fa in self.file_aliases if '_RP' in fa][0]
with pd.get_store(self.h5_file) as h5_file:
replay_report_real = h5_file.get("/%s/tf/cycles_%s_%s"%(which_alias_reported_replay, 20, 'tf_complex_real'))
replay_report_imag = h5_file.get("/%s/tf/cycles_%s_%s"%(which_alias_reported_replay, 20, 'tf_complex_imag'))
frequencies = np.array(replay_report_real.major_axis)
replay_report_real_m = np.array(replay_report_real, dtype = np.complex)[:,(frequencies>freqs_of_interest[0]) & (frequencies<freqs_of_interest[1]),:].mean(axis = 1)
replay_report_imag_m = np.array(replay_report_imag)[:,(frequencies>freqs_of_interest[0]) & (frequencies<freqs_of_interest[1]),:].mean(axis = 1)
replay_data = np.zeros(replay_report_real_m.shape, dtype = np.complex)
replay_data.real = replay_report_real_m
replay_data.imag = replay_report_imag_m
angle_mean = np.angle(replay_data.mean(axis = 0).reshape([-1,100]).mean(axis = 0))
real_mean = np.real(replay_data.mean(axis = 0).reshape([-1,100]).mean(axis = 0))
imag_mean = np.imag(replay_data.mean(axis = 0).reshape([-1,100]).mean(axis = 0))
distance_per_phase_real = np.array([(np.cos(np.linspace(0,4*np.pi,100, endpoint = False) + phase) - real_mean).max() for phase in np.linspace(0,2*np.pi,1000)])
distance_per_phase_imag = np.array([(np.sin(np.linspace(0,4*np.pi,100, endpoint = False) + phase) - imag_mean).max() for phase in np.linspace(0,2*np.pi,1000)])
phase_lag_real, phase_lag_imag = (np.linspace(0,2*np.pi,1000)[x] for x in (np.argmin(distance_per_phase_real), np.argmin(distance_per_phase_imag)))
f = pl.figure()
s = f.add_subplot(211)
pl.plot(angle_mean, label = 'phase')
pl.plot(real_mean, label = 'real')
pl.plot(imag_mean, label = 'imag')
pl.plot(np.arange(100),np.sin(np.linspace(0,4*np.pi,100, endpoint = False)), 'k--', label = 'sin')
pl.plot(np.arange(100),np.cos(np.linspace(0,4*np.pi,100, endpoint = False)), 'k:', label = 'cos')
pl.legend()
s = f.add_subplot(212)
pl.plot(np.linspace(0,2*np.pi,1000), distance_per_phase_real, 'k:', label = 'real')
pl.plot(np.linspace(0,2*np.pi,1000), distance_per_phase_imag, 'k--', label = 'imag')
s.axvline(phase_lag_real, color = 'r', lw = 2.0)
s.axvline(phase_lag_imag, color = 'g', lw = 2.0)
s.annotate('%0.3f'%phase_lag_imag, (0.5,0.3), textcoords = 'figure fraction')
pl.legend()
sn.despine(offset=10)
pl.tight_layout()
pl.savefig(os.path.join(self.fig_dir, 'phase_delay.pdf'))
np.savetxt(os.path.join(self.sj_dir, 'phase_delay.txt'), np.array([phase_lag_real, np.degrees(phase_lag_real)]), delimiter = '\t', fmt = '%3.4f')
def svm_decode(self, smoothing_widths = [0, 50, 100, 150, 200, 250, 300, 400, 500, 1000]):
results = {}
# train_alias = self.file_aliases[0]
train_alias = [fa for fa in self.file_aliases if fa.split('_')[-1] == 'RP'][-1]
# test_alias = [fa for fa in self.file_aliases if fa.split('_')[-1] == 'NR'][-1]
test_alias = self.file_aliases[0]
# shell()
print 'train on ' + train_alias
pss_train = Pupil_SSVEP_Session(self, train_alias)
results.update({train_alias: pss_train.svm_classification()})
print 'test on ' + test_alias
pss_test = Pupil_SSVEP_Session(self, test_alias)
pss_test.read_trans_counts()
results.update({test_alias: pss_test.svm_classification()})
replay_to_riv_prediction = results[train_alias][0].predict_proba(results[test_alias][1])
rtr_ptr = replay_to_riv_prediction[:,0].reshape((8,-1))
rtr_ptr_S = np.array([ndimage.gaussian_filter1d(rtr_ptr, sm, axis = -1, mode = 'constant', cval = 0.5) for sm in smoothing_widths])# 1 s smoothing width
total_ratio_threshold = np.percentile(replay_to_riv_prediction, 100*pss_train.ratio_percept_red, interpolation = 'linear')
total_duration_ratio_threshold = np.percentile(replay_to_riv_prediction, 100*pss_train.ratio_percept_red_durations[1], interpolation = 'linear')
# plot these timecourses per trial
f = pl.figure(figsize = (8,24))
for x in range(len(rtr_ptr)):
s = f.add_subplot(len(rtr_ptr), 1, x+1)
pl.plot(np.linspace(0,pss_test.trial_duration,rtr_ptr.shape[-1]), rtr_ptr[x], 'k', lw = 2.0)
for sm in range(rtr_ptr_S.shape[0]):
pl.plot(np.linspace(0,pss_test.trial_duration,rtr_ptr.shape[-1]), rtr_ptr_S[sm,x], 'b--', lw = 2.0, alpha = 0.25 + 0.75 * (sm / rtr_ptr_S.shape[0]), label = '%i'%smoothing_widths[sm])
s.axhline(total_ratio_threshold, color = 'g', ls = '--', lw = 2.0)
s.axhline(total_duration_ratio_threshold, color = 'r', lw = 2.0, ls = '--', alpha = 0.6)
if hasattr(pss_test, 'trans_counts'):
s.annotate('%i'%pss_test.trans_counts[x], (0.05,0.1), textcoords = 'axes fraction', fontsize = 18)
s.set_ylim([-0.2,1.1])
pl.legend(fontsize = 8, ncol = len(rtr_ptr_S), loc = (0.0,-0.15))
sn.despine(offset=10)
pl.tight_layout()
pl.savefig(os.path.join(self.fig_dir, self.sj_initial + '_svm_raw_%s_%s.pdf'%(train_alias, test_alias)))
f = pl.figure(figsize = (8,24))
for x in range(len(rtr_ptr)):
s = f.add_subplot(len(rtr_ptr), 1, x+1)
pl.imshow(rtr_ptr_S[:,x], cmap = 'seismic', extent = [0, pss_test.trial_duration, smoothing_widths[-1], smoothing_widths[0]], aspect='auto')
if hasattr(pss_test, 'trans_counts'):
s.annotate('%i'%pss_test.trans_counts[x], (0.05,0.1), textcoords = 'axes fraction', fontsize = 18)
s.set_yticks([0, len(smoothing_widths)-1])
s.set_yticklabels([smoothing_widths[0], smoothing_widths[0]])
sn.despine(offset=10)
pl.tight_layout()
pl.savefig(os.path.join(self.fig_dir, self.sj_initial + '_svm_imshow_%s_%s.pdf'%(train_alias, test_alias)))
|
python
|
class LOG:
""" This class specifies the different logging levels that we support.
Levels can be trivially added here and in src/core/utility.py#Msg along
with their pretty output information.
"""
INFO = 1 # green
SUCCESS = 2 # bold green
ERROR = 3 # red
DEBUG = 4 # blue
UPDATE = 5 # yellow
|
python
|
# -*- coding: utf-8 -*-
"""
Marker Property
===============
"""
# %% IMPORTS
# Package imports
from matplotlib import rcParams
# GuiPy imports
from guipy import widgets as GW
from guipy.plugins.figure.widgets.types.props import BasePlotProp
from guipy.widgets import set_box_value
# All declaration
__all__ = ['LineMarkerProp', 'ScatterMarkerProp']
# %% CLASS DEFINITIONS
# Define 'Marker' plot property
class MarkerProp(BasePlotProp):
# Class attributes
DISPLAY_NAME = "Marker"
WIDGET_NAMES = [*BasePlotProp.WIDGET_NAMES, 'marker_style_box',
'marker_size_box', 'marker_color_box']
# This function creates and returns a line style box
def marker_style_box(self):
"""
Creates a widget box for setting the style of the marker and returns
it.
"""
# Make combobox for markerstyles
marker_style_box = GW.MarkerStyleBox()
marker_style_box.setToolTip("Marker to be used for this plot")
# Set initial value to the default value in MPL
set_box_value(marker_style_box, self.default_marker)
# Return name and box
return('Style', marker_style_box)
# This function creates and returns a marker size box
def marker_size_box(self):
"""
Creates a widget box for setting the size of the marker and returns it.
"""
# Make a double spinbox for markersize
marker_size_box = GW.QDoubleSpinBox()
marker_size_box.setToolTip("Size of the plotted markers")
marker_size_box.setRange(0, 9999999)
marker_size_box.setSuffix(" pts")
# Set initial value to the default value in MPL
set_box_value(marker_size_box, rcParams['lines.markersize'])
# Return name and box
return('Size', marker_size_box)
# This function creates and returns a marker color box
def marker_color_box(self):
"""
Creates a widget box for setting the color of the marker and returns
it.
"""
# Make a color box
marker_color_box = GW.ColorBox()
marker_color_box.setToolTip("Color to be used for this marker")
# Connect 'applying' signal
self.options.applying.connect(marker_color_box.set_default_color)
# Return name and box
return('Color', marker_color_box)
# Define 'LineMarker' plot property
class LineMarkerProp(MarkerProp):
"""
Provides the definition of the :class:`~LineMarkerProp` plot property,
specifically used for line plots.
This property contains boxes for setting the marker style, marker size and
marker color.
"""
# Class attributes
NAME = "LineMarker"
# This property holds the default marker used for line plots
@property
def default_marker(self):
return(rcParams['lines.marker'])
# Define 'ScatterMarker' plot property
class ScatterMarkerProp(MarkerProp):
"""
Provides the definition of the :class:`~ScatterMarkerProp` plot property,
specifically used for scatter plots.
This property contains boxes for setting the marker style, marker size and
marker color.
"""
# Class attributes
NAME = "ScatterMarker"
# This property holds the default marker used for scatter plots
@property
def default_marker(self):
return(rcParams['scatter.marker'])
|
python
|
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import gaussian_kde
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.metrics import r2_score
import pandas as pd
# Importing the dataset
def dtree(data):
features = np.zeros(shape=(len(data), 4))
features[:, 0] = data["u"] - data["g"]
features[:, 1] = data["g"] - data["r"]
features[:, 2] = data["r"] - data["i"]
features[:, 3] = data["i"] - data["z"]
targets = data["redshift"]
# Splitting the dataset into the Training and Test set
features_train, features_test, targets_train, targets_test = train_test_split(
features, targets, test_size=0.2, random_state=0
)
# initialize model
regressor = DecisionTreeRegressor(max_depth=17, random_state=0)
regressor.fit(features_train, targets_train)
# get the predicted_redshifts
y_pred = regressor.predict(features_test)
accuracies = cross_val_score(
estimator=regressor, X=features_train, y=targets_train, cv=10
)
df = pd.DataFrame(y_pred)
df.to_csv('y_pred.csv',index=False)
df = pd.DataFrame(targets_test)
df.to_csv("targets_test.csv",index=False)
return [y_pred, targets_test, accuracies]
def median_diff(predicted, actual):
return np.median(np.abs(predicted[:] - actual[:]))
def plot_tree(data):
y_pred, targets_test, accuracies = dtree(data)
cmap = plt.get_cmap("hot")
xy = np.vstack([targets_test, y_pred])
z = gaussian_kde(xy)(xy)
plot = plt.scatter(targets_test, y_pred, c=z, cmap=cmap, s=0.4)
plt.colorbar(plot)
plt.xlim((0, 3))
plt.ylim((0, 3))
plt.xlabel("Measured Redshift")
plt.ylabel("Predicted Redshift")
plt.savefig("output/plot/Tree_Result", dpi=1200)
plt.show()
def R2(targets_test, y_pred):
R2 = r2_score(targets_test, y_pred)
return R2
def main_tree(data):
y_pred, targets_test, accuracies = dtree(data)
diff = median_diff(y_pred, targets_test)
znorm = []
znorm = (targets_test[:] - y_pred[:]) / (targets_test[:] + 1)
deltaz=(targets_test[:] - y_pred[:])
df = pd.DataFrame(deltaz)
df.to_csv('deltaz_dtree_total.csv',index=False)
df1 = pd.DataFrame(targets_test)
df1.to_csv('specz_dtree_total.csv',index=False)
print(np.mean(znorm))
print(f"Median difference of decision tree: {diff}")
print("Accuracy decision tree: {} %".format(accuracies.mean() * 100))
print("Standard Deviation decision tree: {} %".format(accuracies.std() * 100))
delta_tree = y_pred - targets_test
return delta_tree
def run(data):
y_pred, targets_test, accuracies = dtree(data)
diff = median_diff(y_pred, targets_test)
print(f"Median difference of decision tree: {diff}")
print("Accuracy decision tree: {} %".format(accuracies.mean() * 100))
print("Standard Deviation decision tree: {} %".format(accuracies.std() * 100))
cmap = plt.get_cmap("hot")
xy = np.vstack([targets_test, y_pred])
z = gaussian_kde(xy)(xy)
plot = plt.scatter(targets_test, y_pred, c=z, cmap=cmap, s=0.4)
plt.colorbar(plot)
plt.xlim((0, 7))
plt.ylim((0, 7))
plt.clim(0, 10)
plt.xlabel("Measured Redshift")
plt.ylabel("Predicted Redshift")
plt.savefig("Tree_Result_total", dpi=800)
plt.show()
return [y_pred, targets_test]
|
python
|
import flask
from pypi_vm.infrastructure.view_modifiers import response
from pypi_vm.viewmodels.packages.package_details_viewmodel import PackageDetailsViewModel
from pypi_vm.viewmodels.packages.popular_viewmodel import PopularPackageViewModel
blueprint = flask.Blueprint('packages', __name__, template_folder='templates')
@blueprint.route("/project/<package_name>")
@response(template_file='packages/details.html')
def details(package_name: str):
vm = PackageDetailsViewModel(package_name)
if not vm.package:
flask.abort(404)
return vm.to_dict()
@blueprint.route("/<int:num>")
@response(template_file='packages/popular.html')
def popular(num: int):
vm = PopularPackageViewModel(num)
if not (1 <= vm.num or vm.num <= 10):
flask.abort(404)
return vm.to_dict()
|
python
|
#!/usr/bin/python3
import requests as rq
from hbtn_api.urls import URL_AUTH
def auth(apikey, email, psswd, *args):
data = {
'api_key': apikey,
'email': email,
'password': psswd,
'scope': 'checker'
}
r = rq.get(URL_AUTH, data=data)
return r.json()
|
python
|
import configparser
import datetime
import wx
import os
# local imports
from . import core
class Config:
"""
Used as a common config manager thorought the application,
it uses configparser functionality to load configuration files
and theme files.
"""
def __init__(self):
self.load_config()
def load_config(self):
"""Loading global config from properties.conf file."""
self._config = core.load_config()
self.load_theme(self._config['General']['Theme'])
def write_config(self):
"""Writes config to properties.conf file."""
core.write_config(self._config)
self.load_theme(self._config['General']['Theme'])
def load_theme(self, name):
"""Loading theme from ..themes/ directory."""
dict_app, dict_widget = core.load_theme(name)
self._widget_colors = dict_widget
self._app_colors = dict_app
self._fallback_color = '#ffffff'
def get_key(self, key):
"""Returns key from configs' general selection"""
return self._config['General'][key]
def get_color(self, name, selection='app'):
"""
Get the color from given selection, which
can be 'app' or 'widget'. If the color name
was not found, Config._fallback_color will be returned.
"""
try:
if selection.lower() == 'app':
color = self._app_colors[name]
elif selection.lower() == 'widget':
color = self._widget_colors[name]
except KeyError:
print(datetime.datetime.now().time(),
f' Using fallback color, key {name} was not found.')
color = self._fallback_color
return color
def get_font(self, name):
"""Get the given font, can be 'small', 'medium' or 'large'."""
font_name = name.title() + ' Font'
size = int(self._config[font_name]['Size'])
family = int(self._config[font_name]['Family'])
weight = int(self._config[font_name]['Weight'])
return wx.Font(size, family=family, style=wx.FONTSTYLE_NORMAL, weight=weight)
def set_font(self, font, size, family, style, weight):
self._config[font]['Size'] = str(size)
self._config[font]['Family'] = str(family)
self._config[font]['Style'] = str(style)
self._config[font]['Weight'] = str(weight)
def get_current_theme(self):
"""Returns currently set theme."""
return self._config['General']['Theme']
def get_theme_list(self):
"""Returns a list of all avaible themes."""
themes = [theme.replace('.scienv_theme', '') for theme in os.listdir(
self._config['General']['themes_path'])]
return themes
def set_theme(self, theme):
"""Sets the theme."""
self._config['General']['Theme'] = theme
self.load_theme(theme)
core.write_config(self)
@property
def save_widgets_on_exit(self):
return self._config.getboolean('General', 'savewidgetsonexit')
@save_widgets_on_exit.setter
def save_widgets_on_exit(self, value):
self._config.set('General', 'savewidgetsonexit', value)
|
python
|
valores: list = list()
maior = menor = 0
for cv in range(0, 5):
valores.append(int(input(f'Digite um valor na posição {cv}: ')))
maior = max(valores)
menor = min(valores)
# if cv == 0:
# maior = menor = valores[cv]
# else:
# if valores[cv] > maior:
# maior = valores[cv]
# if valores[cv] < menor:
# menor = valores[cv]
print('---' * 15)
print(f'O maior valor é {maior} na posição :', end=' ')
for i, nv in enumerate(valores):
if nv == maior:
print(i, end=', ')
print(f'\nO menor valor é {menor} na posição ', end=' ')
for i, nv in enumerate(valores):
if nv == menor:
print(i, end=', ')
print()
print(type(valores))
|
python
|
import random
ALPHABET = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
NUMBERS = "0123456789"
class Robot(object):
ROBOT_NAME_CHARS_NUMBER = 2
ROBOT_NAME_NUM_NUMBERS = 3
def __init__(self):
self.reset()
@staticmethod
def _generate_name_section(collection, number_of_iteration):
"""_generate_name_section will return a random string of the size of number_of_iteration from the collection string"""
res = ""
for i in range(number_of_iteration):
number_index = random.randint(0, len(collection) - 1)
res += collection[number_index]
return res
def reset(self):
random.seed()
name = ""
name += Robot._generate_name_section(ALPHABET, self.ROBOT_NAME_CHARS_NUMBER)
name += Robot._generate_name_section(NUMBERS, self.ROBOT_NAME_NUM_NUMBERS)
self.name = name
|
python
|
import sublime,sublime_plugin,threading,json,os,threading,subprocess,socket,shutil
from base64 import b64encode, b64decode
# python 2.6 differences
try:
from hashlib import md5, sha1
except: from md5 import md5; from sha import sha as sha1
from SimpleHTTPServer import SimpleHTTPRequestHandler
from struct import pack, unpack_from
import array, struct, os
import urllib2
s2a = lambda s: [ord(c) for c in s]
settings = sublime.load_settings('LiveReload.sublime-settings')
port = settings.get('port')
version = settings.get('version')
##LOAD latest livereload.js from github (for v2 of protocol) or if this fails local version
try:
req = urllib2.urlopen(urllib2.Request("http://raw.github.com/livereload/livereload-js/master/dist/livereload.js"))
source_livereload_js = req.read()
if not "http://livereload.com/protocols/official-6" in source_livereload_js:
raise Exception("Something wrong with download!")
except Exception, u:
print u
try:
path = os.path.join(sublime.packages_path(), "LiveReload")
local = open(os.path.join(path, "livereload.js"), "rU")
source_livereload_js = local.read()
except IOError, e:
print e
sublime.error_message("livereload.js is missing from LiveReload package install")
class LiveReload(threading.Thread):
def run(self):
global LivereloadFactory
threading.Thread.__init__(self)
LivereloadFactory = WebSocketServer(port,version)
LivereloadFactory.start()
class LiveReloadChange(sublime_plugin.EventListener):
def __init__ (self):
LiveReload().start()
def __del__(self):
global LivereloadFactory
LivereloadFactory.stop()
def on_post_save(self, view):
global LivereloadFactory
settings = sublime.load_settings('LiveReload.sublime-settings')
filename = view.file_name()
if view.file_name().find('.scss') > 0 or view.file_name().find('.sass') > 0:
compiler = CompassThread(filename,LivereloadFactory)
compiler.start()
else:
filename = os.path.normcase(filename)
filename = os.path.split(filename)[1]
filename = filename.replace('.scss','.css').replace('.styl','.css').replace('.less','.css')
filename = filename.replace('.coffee','.js')
data = json.dumps(["refresh", {
"path": filename,
"apply_js_live": settings.get('apply_js_live'),
"apply_css_live": settings.get('apply_css_live'),
"apply_images_live": settings.get('apply_images_live')
}])
sublime.set_timeout(lambda: LivereloadFactory.send_all(data), int(settings.get('delay_ms')))
sublime.set_timeout(lambda: sublime.status_message("Sent LiveReload command for file: "+filename), int(settings.get('delay_ms')))
class CompassThread(threading.Thread):
def __init__(self, filename,LivereloadFactory):
self.dirname = os.path.dirname(filename)
self.filename = filename.replace('.scss','.css').replace('.sass','.css')
self.LivereloadFactory = LivereloadFactory
self.stdout = None
self.stderr = None
threading.Thread.__init__(self)
def run(self):
global LivereloadFactory
print 'compass compile ' + self.dirname
# compass compile
p = subprocess.Popen(['compass compile ' + self.dirname.replace('\\','/')],shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT )
if p.stdout.read() :
self.LivereloadFactory.send_all(json.dumps(["refresh", {
"path": self.filename.replace('\\','/'),
"apply_js_live": True,
"apply_css_live": True,
"apply_images_live": True
}]))
class WebSocketServer:
"""
Handle the Server, bind and accept new connections, open and close
clients connections.
"""
def __init__(self, port, version):
self.clients = []
self.port = port
self.version = version
self.s = None
def stop(self):
[client.close() for client in self.clients]
l = threading.Lock()
l.acquire()
l.release()
def start(self):
"""
Start the server.
"""
try:
self.s = socket.socket()
self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.s.bind(('', self.port))
self.s.listen(1)
except Exception, e:
self.stop()
try:
while 1:
conn, addr = self.s.accept()
newClient = WebSocketClient(conn, addr, self)
self.clients.append(newClient)
newClient.start()
except Exception, e:
self.stop()
def send_all(self, data):
"""
Send a message to all the currenly connected clients.
"""
[client.send(data) for client in self.clients]
def remove(self, client):
"""
Remove a client from the connected list.
"""
try:
l = threading.Lock()
l.acquire()
self.clients.remove(client)
l.release()
except Exception, e:
pass
class WebSocketClient(threading.Thread):
"""
A single connection (client) of the program
"""
# Handshaking, create the WebSocket connection
server_handshake_hybi = """HTTP/1.1 101 Switching Protocols\r
Upgrade: websocket\r
Connection: Upgrade\r
Sec-WebSocket-Accept: %s\r
"""
GUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
def __init__(self, sock, addr, server):
threading.Thread.__init__(self)
self.s = sock
self.addr = addr
self.server = server
def run(self):
wsh = WSRequestHandler(self.s, self.addr, False)
h = self.headers = wsh.headers
ver = h.get('Sec-WebSocket-Version')
if ver:
# HyBi/IETF version of the protocol
# HyBi-07 report version 7
# HyBi-08 - HyBi-12 report version 8
# HyBi-13 reports version 13
if ver in ['7', '8', '13']:
self.version = "hybi-%02d" % int(ver)
else:
raise Exception('Unsupported protocol version %s' % ver)
key = h['Sec-WebSocket-Key']
print key
# Generate the hash value for the accept header
accept = b64encode(sha1(key + self.GUID).digest())
response = self.server_handshake_hybi % accept
response += "\r\n"
print response
self.s.send(response.encode())
self.new_client()
# Receive and handle data
while 1:
try:
data = self.s.recv(1024)
except Exception, e:
break
if not data: break
dec = WebSocketClient.decode_hybi(data)
if dec["opcode"] == 8:
self.close()
else:
self.onreceive(dec)
# Close the client connection
self.close()
@staticmethod
def unmask(buf, f):
pstart = f['hlen'] + 4
pend = pstart + f['length']
# Slower fallback
data = array.array('B')
mask = s2a(f['mask'])
data.fromstring(buf[pstart:pend])
for i in range(len(data)):
data[i] ^= mask[i % 4]
return data.tostring()
@staticmethod
def encode_hybi(buf, opcode, base64=False):
""" Encode a HyBi style WebSocket frame.
Optional opcode:
0x0 - continuation
0x1 - text frame (base64 encode buf)
0x2 - binary frame (use raw buf)
0x8 - connection close
0x9 - ping
0xA - pong
"""
if base64:
buf = b64encode(buf)
b1 = 0x80 | (opcode & 0x0f) # FIN + opcode
payload_len = len(buf)
if payload_len <= 125:
header = pack('>BB', b1, payload_len)
elif payload_len > 125 and payload_len < 65536:
header = pack('>BBH', b1, 126, payload_len)
elif payload_len >= 65536:
header = pack('>BBQ', b1, 127, payload_len)
print("Encoded: %s" % repr(header + buf))
return header + buf, len(header), 0
@staticmethod
def decode_hybi(buf, base64=False):
""" Decode HyBi style WebSocket packets.
Returns:
{'fin' : 0_or_1,
'opcode' : number,
'mask' : 32_bit_number,
'hlen' : header_bytes_number,
'length' : payload_bytes_number,
'payload' : decoded_buffer,
'left' : bytes_left_number,
'close_code' : number,
'close_reason' : string}
"""
f = {'fin' : 0,
'opcode' : 0,
'mask' : 0,
'hlen' : 2,
'length' : 0,
'payload' : None,
'left' : 0,
'close_code' : None,
'close_reason' : None}
blen = len(buf)
f['left'] = blen
if blen < f['hlen']:
return f # Incomplete frame header
b1, b2 = unpack_from(">BB", buf)
f['opcode'] = b1 & 0x0f
f['fin'] = (b1 & 0x80) >> 7
has_mask = (b2 & 0x80) >> 7
f['length'] = b2 & 0x7f
if f['length'] == 126:
f['hlen'] = 4
if blen < f['hlen']:
return f # Incomplete frame header
(f['length'],) = unpack_from('>xxH', buf)
elif f['length'] == 127:
f['hlen'] = 10
if blen < f['hlen']:
return f # Incomplete frame header
(f['length'],) = unpack_from('>xxQ', buf)
full_len = f['hlen'] + has_mask * 4 + f['length']
if blen < full_len: # Incomplete frame
return f # Incomplete frame header
# Number of bytes that are part of the next frame(s)
f['left'] = blen - full_len
# Process 1 frame
if has_mask:
# unmask payload
f['mask'] = buf[f['hlen']:f['hlen']+4]
f['payload'] = WebSocketClient.unmask(buf, f)
else:
print("Unmasked frame: %s" % repr(buf))
f['payload'] = buf[(f['hlen'] + has_mask * 4):full_len]
if base64 and f['opcode'] in [1, 2]:
try:
f['payload'] = b64decode(f['payload'])
except:
print("Exception while b64decoding buffer: %s" %
repr(buf))
raise
if f['opcode'] == 0x08:
if f['length'] >= 2:
f['close_code'] = unpack_from(">H", f['payload'])
if f['length'] > 3:
f['close_reason'] = f['payload'][2:]
return f
def close(self):
"""
Close this connection
"""
self.server.remove(self)
self.s.close()
def send(self, msg):
"""
Send a message to this client
"""
msg = WebSocketClient.encode_hybi(msg, 0x1, False)
self.s.send(msg[0])
def onreceive(self, data):
"""
Event called when a message is received from this client
"""
try:
print data
if "payload" in data:
print "payload true"
if "hello" in data.get("payload"):
sublime.set_timeout(lambda: sublime.status_message("New LiveReload v2 client connected"), 100)
self.send('{"command":"hello","protocols":["http://livereload.com/protocols/connection-check-1","http://livereload.com/protocols/official-6"]}')
else:
sublime.set_timeout(lambda: sublime.status_message("New LiveReload v1 client connected"), 100)
self.send("!!ver:" + str(self.server.version))
except Exception, e:
print e
def new_client(self):
"""
Event called when handshake is compleated
"""
#self.send("!!ver:"+str(self.server.version))
def _clean(self, msg):
"""
Remove special chars used for the transmission
"""
msg = msg.replace(b'\x00', b'', 1)
msg = msg.replace(b'\xff', b'', 1)
return msg
# HTTP handler with WebSocket upgrade support
class WSRequestHandler(SimpleHTTPRequestHandler):
def __init__(self, req, addr, only_upgrade=True):
self.only_upgrade = only_upgrade # only allow upgrades
SimpleHTTPRequestHandler.__init__(self, req, addr, object())
def do_GET(self):
if (self.headers.get('upgrade') and
self.headers.get('upgrade').lower() == 'websocket'):
if (self.headers.get('sec-websocket-key1') or
self.headers.get('websocket-key1')):
# For Hixie-76 read out the key hash
self.headers.__setitem__('key3', self.rfile.read(8))
# Just indicate that an WebSocket upgrade is needed
self.last_code = 101
self.last_message = "101 Switching Protocols"
elif self.only_upgrade:
# Normal web request responses are disabled
self.last_code = 405
self.last_message = "405 Method Not Allowed"
else:
#Self injecting plugin
if "livereload.js" in self.path:
self.send_response(200, 'OK')
self.send_header('Content-type', 'text/javascript')
self.send_header("Content-Length", len(source_livereload_js))
self.end_headers()
self.wfile.write(bytes(source_livereload_js))
return
else:
#Disable other requests
self.send_response(405, 'Method Not Allowed')
self.send_header('Content-type', 'text/plain')
self.send_header("Content-Length", len("Method Not Allowed"))
self.end_headers()
self.wfile.write(bytes("Method Not Allowed"))
return
def send_response(self, code, message=None):
# Save the status code
self.last_code = code
SimpleHTTPRequestHandler.send_response(self, code, message)
def log_message(self, f, *args):
# Save instead of printing
self.last_message = f % args
|
python
|
"""Providers services related to the cache."""
from typing import Optional
from limberframework.cache.cache import Cache
from limberframework.cache.lockers import Locker, make_locker
from limberframework.cache.stores import Store, make_store
from limberframework.foundation.application import Application
from limberframework.support.services import Service, ServiceProvider
class CacheServiceProvider(ServiceProvider):
"""Register cache services to the service container."""
def register(self, app: Application):
"""Register the cache store to the service container.
Args:
app: The service container.
"""
async def register_store(app: Application) -> Store:
"""Closure for establishing a cache store.
Args:
app: The Application.
Returns:
Store: The created Store.
"""
config_service = await app.make("config")
config = config_service.get_section("cache")
if config["driver"] == "file":
config["path"] = app.paths["cache"]
elif (
config["driver"] == "redis" or config["driver"] == "asyncredis"
) and "password" not in config:
config["password"] = None
return await make_store(config)
app.bind(Service("cache.store", register_store, singleton=True))
async def register_locker(app: Application) -> Optional[Locker]:
"""Closure for establishing a locker.
Args:
app: The Application.
Returns
Locker: The created Locker.
"""
config_service = await app.make("config")
config = config_service.get_section("cache")
if config["locker"] == "asyncredis" and "password" not in config:
config["password"] = None
try:
return await make_locker(config)
except ValueError:
return None
app.bind(Service("cache.locker", register_locker, singleton=True))
async def register_cache(app: Application) -> Cache:
"""Closure for establishing a cache and linking to a store.
Args:
app: The Application.
Returns:
Cache: The created Cache.
"""
store = await app.make("cache.store")
locker = await app.make("cache.locker")
return Cache(store, locker)
app.bind(Service("cache", register_cache, defer=True))
|
python
|
#!/usr/bin/env python2.7
# Distributed under the terms of MIT License (MIT)
# Based on fa.wikipedia's AbarAbzar tool
##https://fa.wikipedia.org/wiki/Mediawiki:Gadget-Extra-Editbuttons-persianwikitools.js
##https://fa.wikipedia.org/wiki/Mediawiki:Gadget-Extra-Editbuttons-persiantools.js
##https://fa.wikipedia.org/wiki/Mediawiki:Gadget-Extra-Editbuttons-dictionary.js
# This tool edits Persian texts and solve some of the wrong dictation, typo and ZWNJ problems
# It is not suitable for Mediawiki's syntax for editing in Mediawiki environment use fa_cosmetic_changes_core.py
import re
arabicDigits = ur'0123456789'
arabicIndicDigits = ur'٠١٢٣٤٥٦٧٨٩'
# نویسه\u200cهای غیرفارسی ي-ك-ە و موارد مشابه پیش از تبدیل به نویسهٔ فارسی در سایر ریجکس\u200cها باید به عنوان کاراکتر فارسی شناخته شوند.
similarPersianCharacters = ur'\u0643\uFB91\uFB90\uFB8F\uFB8E\uFEDC\uFEDB\uFEDA\uFED9\u0649\uFEEF\u064A\u06C1\u06D5\u06BE\uFEF0-\uFEF4'
vowels = ur'\u064B-\u0650\u0652\u0670'
persianCharacters = ur'\u0621-\u0655\u067E\u0686\u0698\u06AF\u06A9\u0643\u06AA\uFED9\uFEDA\u06CC\uFEF1\uFEF2' + similarPersianCharacters
persianCharactersNoVowels = ur'\u0621-\u064A\u0653-\u0655\u067E\u0686\u0698\u06AF\u06A9\u0643\u06AA\uFED9\uFEDA\u06CC\uFEF1\uFEF2' + similarPersianCharacters
persianDigits = ur'۰۱۲۳۴۵۶۷۸۹'
hamza = ur'\u0654'
NASB = u'\u064b'
ZAMM = u'\u064c'
persianPastVerbs = ur'(' \
+ ur'ارزید|افتاد|افراشت|افروخت|افزود|افسرد|افشاند|افکند|انباشت|انجامید|انداخت|اندوخت|اندود|اندیشید|انگاشت|انگیخت|انگیزاند|اوباشت|ایستاد' \
+ ur'|آراست|آراماند|آرامید|آرمید|آزرد|آزمود|آسود|آشامید|آشفت|آشوبید|آغازید|آغشت|آفرید|آکند|آگند|آلود|آمد|آمرزید|آموخت|آموزاند' \
+ ur'|آمیخت|آهیخت|آورد|آویخت|باخت|باراند|بارید|بافت|بالید|باوراند|بایست|بخشود|بخشید|برازید|برد|برید|بست|بسود|بسیجید|بلعید' \
+ ur'|بود|بوسید|بویید|بیخت|پاشاند|پاشید|پالود|پایید|پخت|پذیراند|پذیرفت|پراکند|پراند|پرداخت|پرستید|پرسید|پرهیزید|پروراند|پرورد|پرید' \
+ ur'|پژمرد|پژوهید|پسندید|پلاسید|پلکید|پناهید|پنداشت|پوسید|پوشاند|پوشید|پویید|پیچاند|پیچانید|پیچید|پیراست|پیمود|پیوست|تاباند|تابید|تاخت' \
+ ur'|تاراند|تازاند|تازید|تافت|تپاند|تپید|تراشاند|تراشید|تراوید|ترساند|ترسید|ترشید|ترکاند|ترکید|تکاند|تکانید|تنید|توانست|جَست|جُست' \
+ ur'|جست|جنباند|جنبید|جنگید|جهاند|جهید|جوشاند|جوشید|جوید|چاپید|چایید|چپاند|چپید|چراند|چربید|چرخاند|چرخید|چرید|چسباند|چسبید' \
+ ur'|چشاند|چشید|چکاند|چکید|چلاند|چلانید|چمید|چید|خاراند|خارید|خاست|خایید|خراشاند|خراشید|خرامید|خروشید|خرید|خزید|خست|خشکاند' \
+ ur'|خشکید|خفت|خلید|خمید|خنداند|خندانید|خندید|خواباند|خوابانید|خوابید|خواست|خواند|خوراند|خورد|خوفید|خیساند|خیسید|داد|داشت|دانست' \
+ ur'|درخشانید|درخشید|دروید|درید|دزدید|دمید|دواند|دوخت|دوشید|دوید|دید|دیدم|راند|ربود|رخشید|رساند|رسانید|رست|رَست|رُست' \
+ ur'|رسید|رشت|رفت|رُفت|رقصاند|رقصید|رمید|رنجاند|رنجید|رندید|رهاند|رهانید|رهید|روبید|روفت|رویاند|رویید|ریخت|رید|ریسید' \
+ ur'|زاد|زارید|زایید|زد|زدود|زیست|سابید|ساخت|سپارد|سپرد|سپوخت|ستاند|ستد|سترد|ستود|ستیزید|سرایید|سرشت|سرود|سرید' \
+ ur'|سزید|سفت|سگالید|سنجید|سوخت|سود|سوزاند|شاشید|شایست|شتافت|شد|شست|شکافت|شکست|شکفت|شکیفت|شگفت|شمارد|شمرد|شناخت' \
+ ur'|شناساند|شنید|شوراند|شورید|طپید|طلبید|طوفید|غارتید|غرید|غلتاند|غلتانید|غلتید|غلطاند|غلطانید|غلطید|غنود|فرستاد|فرسود|فرمود|فروخت' \
+ ur'|فریفت|فشاند|فشرد|فهماند|فهمید|قاپید|قبولاند|کاست|کاشت|کاوید|کرد|کشاند|کشانید|کشت|کشید|کفت|کفید|کند|کوبید|کوچید' \
+ ur'|کوشید|کوفت|گَزید|گُزید|گایید|گداخت|گذارد|گذاشت|گذراند|گذشت|گرازید|گرایید|گرداند|گردانید|گردید|گرفت|گروید|گریاند|گریخت|گریست' \
+ ur'|گزارد|گزید|گسارد|گستراند|گسترد|گسست|گسیخت|گشت|گشود|گفت|گمارد|گماشت|گنجاند|گنجانید|گنجید|گندید|گوارید|گوزید|لرزاند|لرزید' \
+ ur'|لغزاند|لغزید|لمباند|لمدنی|لمید|لندید|لنگید|لهید|لولید|لیسید|ماسید|مالاند|مالید|ماند|مانست|مرد|مکشید|مکید|مولید|مویید' \
+ ur'|نازید|نالید|نامید|نشاند|نشست|نکوهید|نگاشت|نگریست|نمایاند|نمود|نهاد|نهفت|نواخت|نوردید|نوشاند|نوشت|نوشید|نیوشید|هراسید|هشت' \
+ ur'|ورزید|وزاند|وزید|یارست|یازید|یافت' + ur')'
persianPresentVerbs = ur'(' \
+ ur'ارز|افت|افراز|افروز|افزا|افزای|افسر|افشان|افکن|انبار|انباز|انجام|انداز|اندای|اندوز|اندیش|انگار|انگیز|انگیزان' \
+ ur'|اوبار|ایست|آرا|آرام|آرامان|آرای|آزار|آزما|آزمای|آسا|آسای|آشام|آشوب|آغار|آغاز|آفرین|آکن|آگن|آلا|آلای' \
+ ur'|آمرز|آموز|آموزان|آمیز|آهنج|آور|آویز|آی|بار|باران|باز|باش|باف|بال|باوران|بای|باید|بخش|بخشا|بخشای' \
+ ur'|بر|بَر|بُر|براز|بساو|بسیج|بلع|بند|بو|بوس|بوی|بیز|بین|پا|پاش|پاشان|پالا|پالای|پذیر|پذیران' \
+ ur'|پر|پراکن|پران|پرداز|پرس|پرست|پرهیز|پرور|پروران|پز|پژمر|پژوه|پسند|پلاس|پلک|پناه|پندار|پوس|پوش|پوشان' \
+ ur'|پوی|پیچ|پیچان|پیرا|پیرای|پیما|پیمای|پیوند|تاب|تابان|تاران|تاز|تازان|تپ|تپان|تراش|تراشان|تراو|ترس|ترسان' \
+ ur'|ترش|ترک|ترکان|تکان|تن|توان|توپ|جنب|جنبان|جنگ|جه|جهان|جو|جوش|جوشان|جوی|چاپ|چای|چپ|چپان' \
+ ur'|چر|چران|چرب|چرخ|چرخان|چسب|چسبان|چش|چشان|چک|چکان|چل|چلان|چم|چین|خار|خاران|خای|خر|خراش' \
+ ur'|خراشان|خرام|خروش|خز|خست|خشک|خشکان|خل|خم|خند|خندان|خواب|خوابان|خوان|خواه|خور|خوران|خوف|خیز|خیس' \
+ ur'|خیسان|دار|درخش|درخشان|درو|دزد|دم|ده|دو|دوان|دوز|دوش|ران|ربا|ربای|رخش|رس|رسان' \
+ ur'|رشت|رقص|رقصان|رم|رنج|رنجان|رند|ره|رهان|رو|روب|روی|رویان|ریز|ریس|رین|زا|زار|زای|زدا' \
+ ur'|زدای|زن|زی|ساب|ساز|سای|سپار|سپر|سپوز|ستا|ستان|ستر|ستیز|سر|سرا|سرای|سرشت|سز|سگال|سنب' \
+ ur'|سنج|سوز|سوزان|شاش|شای|شتاب|شکاف|شکف|شکن|شکوف|شکیب|شمار|شمر|شناس|شناسان|شنو|شو|شور|شوران|شوی' \
+ ur'|طپ|طلب|طوف|غارت|غر|غلت|غلتان|غلط|غلطان|غنو|فرسا|فرسای|فرست|فرما|فرمای|فروش|فریب|فشار|فشان|فشر' \
+ ur'|فهم|فهمان|قاپ|قبولان|کار|کاه|کاو|کش|کَش|کُش|کِش|کشان|کف|کن|کوب|کوچ|کوش|گا|گای|گداز' \
+ ur'|گذار|گذر|گذران|گرا|گراز|گرای|گرد|گردان|گرو|گری|گریان|گریز|گز|گزار|گزین|گسار|گستر|گستران|گسل|گشا' \
+ ur'|گشای|گمار|گنج|گنجان|گند|گو|گوار|گوز|گوی|گیر|لرز|لرزان|لغز|لغزان|لم|لمبان|لند|لنگ|له|لول' \
+ ur'|لیس|ماس|مال|مان|مک|مول|موی|میر|ناز|نال|نام|نشان|نشین|نکوه|نگار|نگر|نما|نمای|نمایان|نه' \
+ ur'|نهنب|نواز|نورد|نوش|نوشان|نویس|نیوش|هراس|هست|هل|ورز|وز|وزان|یاب|یار|یاز' \
+ ur')'
persianComplexPastVerbs = { # bug: در گذشته
ur'باز': u'آفرید|آمد|آموخت|آورد|ایستاد|تابید|جست|خواند|داشت|رساند|ستاند|شمرد|ماند|نمایاند|نهاد|نگریست|پرسید|گذارد' \
+ ur'|گرداند|گردید|گرفت|گشت|گشود|گفت|یافت',
ur'در': u'بر ?داشت|بر ?گرفت|آمد|آمیخت|آورد|آویخت|افتاد|افکند|انداخت|رفت|ماند|نوردید|کشید|گرفت',
ur'بر': u'آشفت|آمد|آورد|افتاد|افراشت|افروخت|افشاند|افکند|انداخت|انگیخت|تاباند|تابید|تافت|تنید|جهید|خاست|خواست|خورد' \
+ ur'|داشت|دمید|شمرد|نهاد|چید|کرد|کشید|گرداند|گردانید|گردید|گزید|گشت|گشود|گمارد|گماشت',
ur'فرو': u'آمد|خورد|داد|رفت|نشاند|کرد|گذارد|گذاشت',
ur'وا': u'داشت|رهاند|ماند|نهاد|کرد',
ur'ور': u'آمد|افتاد|رفت',
ur'یاد': u'گرفت',
ur'پدید': u'آورد',
ur'پراکنده': u'ساخت',
ur'زمین': u'خورد',
ur'گول': u'زد',
ur'لخت': u'کرد',
}
persianComplexPresentVerbs = { # مشکل با: در روم باستان، در ده
# مشکل با : بر گردن
ur'باز': u'آفرین|آموز|آور|ایست|تاب|جو|خوان|دار|رس|ستان|شمار|مان|نمایان|نه|نگر|پرس|گذار|گردان|گرد|گشا|گو|گیر|یاب',
ur'در': u'بر ?دار|بر ?گیر|آمیز|آور|آویز|افت|افکن|انداز|مان|نورد|کش|گذر|گیر',
ur'بر': u'آشوب|آور|افت|افراز|افروز|افشان|افکن|انداز|انگیز|تابان|تاب|تن|جه|خواه|خور|خیز|دار|دم|شمار|نه|چین|کش|کن' \
+ ur'|گردان|گزین|گشا|گمار',
ur'فرو': u'خور|ده|رو|نشین|کن|گذار',
ur'وا': u'دار|رهان|مان|نه|کن',
ur'ور': u'افت|رو',
ur'یاد': u'گیر',
ur'پدید': u'آور',
ur'پراکنده': u'ساز',
ur'زمین': u'خور',
ur'گول': u'زن',
ur'لخت': u'کن',
}
adjective = \
ur'اخمو|ارزان|ارغه|الکن|الکی|انبوه|آبدار|(نا)?آرام|آرغنده|(نا)?آشکار|(نا)?آماده|آهسته|(بی\u200c|با)انضباط|باریک|بد|بدحساب|بددل|بدریخت' \
+ ur'|بر|براق|برخوردار|برومند|بزدل|بلند|بیآلایش|بی دست و پا|بیچاره|بیدار|بیمار|پخ|پخش|پخمه|پرت|پرنور|پست|پشمالو|پلید|پوچ|(سر|نا)?پوشیده|پوک' \
+ ur'|پیر|پیروز|تار|تپل|ترد|ترسو|تفت|تلخ|تنبل|تندرو|تنک|تنگ|تنها|تهی|تیره|جلو|چابک|چاپلوس|چالاک|چپ|چرند|چسبان|چفته|چیره|خام|خانم|خراب' \
+ ur'|خرم|خسته|خشک|(نا)?خفته|خفن|خل|خنگ|(نا)?خوانا|خوب|خوشکل|خوشگوار|خیراندیش|دراز|درخور|درستکار|دلباخته|دلیر|دوست|دون|رحیم|رسمی|روانی|روشن' \
+ ur'|ریغو|زبر|زبردست|زبل|زشت|زیبا|زیرک|ژرف|ژنده|ساده|(نا)?سالم|ساکت|سبک|سخاوتمند|سر|سرکش|سفت|سوسول|شایسته|شکیبا|شل|شور|طولانی|عالم|فراوان|فرز' \
+ ur'|فنی|قرتی|قشنگ|قلنبه|قهرمان|کارکن|کال|کبود|کج|کچل|کر|کلان|کلفت|کم|کند|کنس|کوتاه|کوتوله|کوچک|کوچولو|کودن|گدا|گران|گرسنه|گشاد' \
+ ur'|گنگ|گود|گیج|لاغر|لبریز|لخت|لغزنده|له|مات|مچاله|مچل|(نا)?مرد|مردمی|مردنی|مست|مشکوک|مفید|ناپدید|ناپسند|ناتوان|ناجنس|ناجور|ناچیز|ناخوش' \
+ ur'|نادان|(نا)?درست|نازک|ناسپاس|نافرمان|ناگوار|نامرد|نرم|نیازمند|نیرومند|هشیار|هیز|واژگون|ول|ولرم|ولنگار|یکپارچه|یکدست|یکرنگ|(نا)?پیدا' \
+ ur'|گناهکار|ریز|دانا|کثیف|آقا|(با|بی\u200c)سواد|عاشق|(با|بی\u200c)محبت|صاف|زمخت|فریبنده|پیچیده|سخت|دشوار|تمیز|(نا)?پاکیزه|بزرگ|پهن|پخته|بیمورد' \
+ ur'|بینیاز|(بی\u200c|با|)تجربه'
personNames = \
ur'الله|محمد|علی|حسن|حسین|جواد|باقر|مهدی|تقی|نقی|نازی|نجم|' \
+ ur'اکرم|کاظم|عباس|منصور|خسرو|محمود|شمس|ملک|شوکت|' \
+ ur'نصر|همت|جهان|جلال|موسی|ابراهیم|جعفر|احمد|قاسم|کمال|هاشم|' \
+ ur'شفیع|صمد|شیخ|اسماعیل|ربیع|سلیمان|رستم|شاهرخ|فرخ|شریف|نعمت|' \
+ ur'امیر|خلیل|جلیل|مجید|اسد|شوکت|رضا|عجل|ید|عبد|سهیل|معصوم|عظیم' \
+ ur'اکبر|اصغر|بهمن|قلی'
wordsWithA = \
ur'ورامدن|هرزاب|هراینه|هجوامیز|نوشاذر|نواوری|نواموز|نهراب|میراخور|میراب|میاید|میاورند|مرات' \
+ ur'|ماخذ|مابی|لسانجلس|گلاذین|گزنداور|گرداوری|گرداوردن|گرداورد|گردامدن|کنداور|کفرامیز|فرودامدن|عطراگین|طنزامیز' \
+ ur'|شیرابه|شهرا[یئ]ین|شهراشوب|سوداور|سراوردن|سرامدن|سرامد|سراشپز|سحرامیز|زیرابی|زوراور|زهرالود|زهراگین|زردالو|دوداهنگ|دواتشه' \
+ ur'|دژاهنگ|دژالود|درداور|دردالود|درایند|دراید|دراویختن|دراوری|دراورنده|دراورند|دراوردن|درامده|درامدن|درامد|خیزاب|خشمالود' \
+ ur'|چندشاور|جگراور|تیراهن|تهورامیز|تنفراور|تنداب|پسندامدن|پرنداور|پردرامد|پراشوب|پراب|بیاب|بوا|بنداوردن' \
+ ur'|بنداور|سرامدن|برایند|براورده|براوردن|براورد|برامده|برامدن|برامدگی|برامد|براشفته|براشفتن|براشفتگی|براسودن|بداهنگ' \
+ ur'|بداموزی|بدامدن|بدامد|ائورت|ائسه|ا[یئ]ینه|ا[یئ]یننامه|ا[یئ]ین|ایهیاب|ایه|اینهدار|اینده|ایندگان|ایفون' \
+ ur'|ایروپلن|ایدین|ایتم|ایتالله|ایت|ایات|اویشن|اویسا|اویژه|اویزون|اویزه|اویزند|اویزگر|اویزش' \
+ ur'|اویزدار|اویزان|اویز|اویخته|اویختنی|اویختن|اویختگی|اویخت|اویتخه|اووکادو|اونگون|اونگان|اونگ|اوند|اوریل' \
+ ur'|اوریدن|اورنده|اورند|اورنجن|اوردیدن|اورده|اوردنی|اوردن|اوردگه|اوردگاه|اوردجو|اورد' \
+ ur'|اوایش|اوانویس|اوانگارد|اوانتاژ|اواکس|اواشناس|اوازهخوان|اوازهای|اوازه|اوازخوان|اواز|اواره|اوارگی|اوارگان' \
+ ur'|اوار|اهو|اهنین|اهنگساز|اهنگرخانه|اهنگر|اهنگ|اهنفروش|اهنربا' \
+ ur'|اهنپاره|اهنبر|اهنالات|اهن|اهکسازی|اهکپزی|اهکپز|اهک|اهسته|اهستگی|اهای|اهان' \
+ ur'|انیون|انوقت|انود|انها|انگه|انگاه|انکه|انکس|انکارا|انقدر|انفولانزا|انفلوانزا' \
+ ur'|انفارکتوس|انطور|انطرف|انسو|انژیوگرافی|انژین|انزیم|انروی|انروز|انرو|اندوسکوپی|انچه|انجا|انتیل|انتیک' \
+ ur'|انتی|انتن|انتریک|انتراکت|انتراسیت|انتالیا|اناهیتا|اناناس|انان|انالیز|انالوگ|انارشیسم|اناتومی|اناتولی' \
+ ur'|انابولیسم|امینه|امیغه|امیغ|امیزه|امیزگار|امیزش|امیز|امیخته|امیختن|امیختگی|امیب|امونیوم|امونیت|امونیاک|امون' \
+ ur'|اموکسی|اموزه|اموزنده|اموزگان|اموزگار|اموزشیار|اموزشگاه|اموزشکده|اموزش|اموزانه|اموزاندن|اموز|اموده|امودن|امودریا' \
+ ur'|اموخته|اموختن|اموختگی|اموختار|امله|امریکا|امرزیده|امرزیدن|امرزیدگی|امرزنده|امرزگار|امرزش|امرز|امرانه' \
+ ur'|امدید|امدوشد|امدورفت|امده|امدن|امدگی|امدکردن|امد|امخته|امپلی|امپرسنج|امپر|امبولی|امبولانس|امایه|امایش|امال' \
+ ur'|اماسیده|اماسیدن|اماسانیدن|اماس|امازون|امارگیر|امارگر|امارشناسی|امارشناس|امادهکردن|اماده|امادگی|امادگاه' \
+ ur'|اماجگاه|اماج|اماتور|الیداد|الیاژهای|الیاژ|الونک|الومینیوم|الومینیم|الوزرد|الوده|الودن|الودگی|الودگر|الود|الوچه' \
+ ur'|الوبخارا|الما|الفرد|الفا|الرژی|التو|الترناتیو|الت|الپ|البومین|البوم|البانی|البالوئی|البالو|الا[یئ]یدن' \
+ ur'|الایشی|الایش|الای|الاسکا|الاخون|الاچیق|الات|الاباما|اگنه|اگنده|اگندن|اگاهینامه|اگاهیدادن|اگاهی|اگاهگان' \
+ ur'|اگاهانیدن|اگاهانه|اگاهاندن|اگاه|اکوستیک|اکوردئون|اکواریوم|اکنه|اکنش|اکنده|اکندن|اکله|اکسفورد|اکروبات|اکتئون' \
+ ur'|اکتینیوم|اکانتاسه|اکادمیک|اکادمی|اقبانو|اقائی|اقایان|اقامنشانه|اقامنش|اقاسی|اقازاده|اقاجان|اقا' \
+ ur'|افریننده|افرینش|افرین|افریکانس|افریقا|افریده|افریدن|افریدگار|افتومات|افتابه|افتابمهتاب|افتابگیر' \
+ ur'|افتابگردان|افتابزده|افتابزدگی|افتابرو|افتابخورده|افتابپرست|افتاب|افاقی|افاق|افات|اغول|اغوشیدن|اغوش|اغل' \
+ ur'|اغشته|اغشتن|اغشتگی|اغش|اغالش|اغاسی|اغازین|اغازیدن|اغازیان|اغازی|اغازه|اغازگر|اغاز|اغاجی|اغا|اشیل|اشیانی|اشیانه' \
+ ur'|اشیانبندی|اشیان|اشور|اشوبیدن|اشوبناک|اشوبگرانه|اشوبگر|اشوبکن|اشوبطلب|اشوبانگیز|اشوب|اشنایان|اشناوری' \
+ ur'|اشناگر|اشناسازی|اشنا|اشکوخیدن|اشکاره|اشکارگر|اشکارساز|اشکارا|اشکار|اشفته|اشفتن|اشفتگی|اشغالدان|اشتی' \
+ ur'|اشفتگی|اشپزخانه|اشپز|اشامیدنی|اشامیدن|اشامه|اشامنده|اشام|اشاب|اسیمه|اسیمگی|اسیبزدن' \
+ ur'|اسیبدیده|اسیب|اسیاکردن|اسیاسنگ|اسیازنه|اسیاچرخ|اسیابان|اسیاب|اسیا|اسودهخاطر|اسودهحال|اسوده|اسودن' \
+ ur'|اسودگی|اسمون|اسمانه|اسمانسنجی|اسمانخراش|اسمانخانه|اسمان|اسکاریس|اسفالت|استینه|استرکاری' \
+ ur'|استردوز|استانه|اسپیرین|اسپرین|اسائی|اسا[یئ]یدن|اساینده|اسایشگاه|اسایش|اسانگیری|اسانسورچی' \
+ ur'|اسانسور|اسان|اژیرهوائی|اژیر|اژیانه|اژنگ|اژند|اژفنداک|اژدار|اژانس|ازیدن|ازیتا|ازوقه|ازمونگر|ازمونگاه|ازمون|ازموده' \
+ ur'|ازمودن|ازمودگی|ازمندی|ازمند|ازمائی|ازمایه|ازماینده|ازمایشی|ازمایشو|ازمایشگاه|ازمایشات|ازمایش|ازمابنده|ازما|ازگار' \
+ ur'|ازرده|ازردن|ازردگی|ازرد|ازاریدن|ازارنده|ازاررسان|ازاردهنده|ازار|ازادیخواه|ازادوار' \
+ ur'|ازاده|ازادمنش|ازادمرد|ازادگی|ازادگان|ازادکامی|ازادانه|ازاد|اریانا|اریان|اریا|ار[و]غ' \
+ ur'|ارواره|ارنولد|ارنگ|ارنج|ارنائوت|ارمینا|ارمین|ارمیس|ارمیده|ارمیدن|ارمیدگی|ارمیچر|ارمه|ارمانشهر|ارماگدون|ارگون' \
+ ur'|ارکاد|ارشیو|ارشیتکت|ارشه|ارشام|ارش|ارستن|ارسان|ارژانتین|ارزومندانه|ارزومند|ارزوخواه|ارزو|ارتین|ارتیشو|ارتیست' \
+ ur'|ارتور|ارتمیس|ارتروز|ارا[یئ]یدن|ارایهگر|ارایشی|ارایشگر|ارایشگاه|ارایش|ارامیدن|ارامگاه|ارامگان|ارامکردن' \
+ ur'|ارامش|ارامانیدن|ارام|اراسته|اراستن|اراستگی|ارارات|اراء|اذین|اذرین|اذرنوش|اذرنگ|اذرگون|اذرشهر|اذرسنجی' \
+ ur'|اذرروز|اذرخش|اذربرزین|اذربایجان|اذر|ادینه|ادیس|ادونیس|ادنیس|ادمیگرا|ادمیزاد' \
+ ur'|ادمیرال|ادمیت|ادمگرا|ادمکش|ادمک|ادمفروش|ادمربا|ادمخوار|ادرنالین|ادرس|ادامس|اداب|اخوندک|اخوند' \
+ ur'|اخور|اخرین|اخرسالار|اخرزمان|اخرتشناسی|اخرت|اخرالدواء|اخرالامر|اخر|اختن|احاد|اچمز|اچارکشی|اچاردار|اچار|اجیل|اجودان' \
+ ur'|اجرنما|اجرکاری|اجرچین|اجرپز|اجان|اثار|اتیه|اتیکا|اتیسا|اتلیه|اتشین|اتشناک|اتشگیره|اتشگیر' \
+ ur'|اتشگون|اتشگرفتن|اتشگاه|اتشکده|اتشکار|اتشفشان|اتشزنه|اتشزدن|اتشزا|اتشدان|اتشخوار|اتشخانه|اتشپاره|اتشبان|اتشبازی|اتشبار' \
+ ur'|اتش|اتریوم|اتروپین|اتابای|اپولو|اپوستروف|اپاندیسیت|اپاندیس|اپارتمان|اپارتاید|اپارات|ابیار|ابونه|ابونمان' \
+ ur'|ابها|ابنوس|ابنمک|ابنما|اب[ن]شدنی|ابنبات|ابمیوهگیر|ابمیوه|ابلیمو|ابلهکوب|ابلهرو|ابگینه|ابگیر|ابگونه|ابگونساز' \
+ ur'|ابگوشت|ابگرمکن|ابگردان|ابگذر|ابگاه|ابکش|ابکانه|ابکامه|ابکار|ابفشان|ابغوره|ابشی|ابشور|ابشنگولی|ابشش|ابشدنی' \
+ ur'|ابشخور|ابشتگاه|ابشار|ابسوار|ابسه|ابسکون|ابستن|ابسالان|ابسال|ابزیگاه|ابزیدان|ابزی|ابریزگاه|ابریزگان|ابریزش' \
+ ur'|ابریز|ابرومند|ابروریزی|ابرنگ|ابرفت|ابراهه|ابراهک|ابراه|ابدیده|ابدزدک|ابدانک|ابدان|ابداری|ابدارخانه|ابدارچی' \
+ ur'|ابدارباشی|ابدار|ابخیز|ابخوری|ابجی|ابجوفروشی|ابجوساز|ابجوساختن|ابجو|ابتین|ابتنی|ابپنیر|اگهی' \
+ ur'|ابپاش|اببها|اببند|ابباز|ابانبار|ابان|اباژور|اباده|ابادکردن|ابادسازی|ابادان|اباد|اباء' # first charcter should be آ
# removed ان for [[ان بی سی]]
# match ZWNJ also as a space or optional
wordsWithA = wordsWithA.replace(ur"\u200c", u'[\u200c ]?')
PresentVerbsWithA = \
ur'ارا|ارام|ارامان|ارای|ازار|ازما|ازمای|اسا|اسای|اشام|اشوب|اغار|اغاز|افرین|اکن|اگن|الای' \
+ ur'|امرز|اموز|اموزان|امیز|اهنج|اور|اویز'
PastVerbsWithA = \
ur'اراماند|ارامید|ارمید|ازرد|ازمود|اشامید|اشفت|اشوبید|اغازید|اغشت|افرید|اکند|اگند|الود' \
+ ur'|امد|امرزید|اموخت|اموزاند|امیخت|اهیخت|اورد|اویخت'
needsNasb = \
ur'اتفاقا|الزاما|لزوما|یقینا|قطعا|حتما|قاعدتا|طبیعتا|طبعا|قهرا|جدّا|حقیقتا|واقعا|مطمئنا|واضحا|مسلما|تماما|کاملا' \
+ ur'|عینا|اکیدا|مطلقا|دقیقا|مستقیما|اصولا|اصلا|اصالتا|نسبا|نسبتا|تقریبا|حدودا|معمولا|قانونا|شرعا|اخلاقا|خلقا|احتمالا' \
+ ur'|استثنائا|اساسا|کلّ?ا|جزئا|مجموعا|جمعا|اجماعا|شدیدا|نهایتا|اقلا|اکثرا|غالبا|عمدتا|ندرتا|بعضا|گاها|صریحا|صراحتا|عموما' \
+ ur'|اختصاصا|خصوصا|مجملا|اجمالا|اختصارا|مختصرا|مشروحا|ظاهرا|باطنا|عمیقا|ذاتا|فطرتا|جسما|ابتدائا|مقدمتا|بدوا|بعدا|قبلا' \
+ ur'|جدیدا|سابقا|اخیرا|ابدا|عمرا|تلویحا|علنا|حضورا|غیابا|نیابتا|لطفا|اجبارا|اختیارا|عالما|عمدا|عامدا|تعمدا|متعمدا|عادتا' \
+ ur'|مستقلا|احتیاطا|احیانا|غفلتا|سهوا|اشتباها|عاجلا|عجالتا|مرتجلا|ارتجالا|سریعا|فورا|دا[یئ]ما|ضرورتا|نقدا|منحصرا|صرفا|دفعتا' \
+ ur'|کرارا|مکررا|مجددا|مرتبا|مستمرا|متواترا|تدریجا|تصادفا|عملا|فعلا|موقتا|ضمنا|نتیجتا|نوعا|اصطلاحا|جسارتا|بالا ?غیرتا|م[وؤ]کدا' \
+ ur'|ذیلا|شخصا|مشترکا|مفصلا|رسما|ترجیحا|قلبا|ر[اأ]سا|تو[اأ]ما|متناوبا|متوالیا|متقابلا|متعاقبا|متّ?فقا|مثلا|فرضا|ایضا|مضافا' \
+ ur'|مصرّ?ا|ارفاقا|انصافا|جهارا|طولا|متدرجا|غانما|احتراما|ناچارا|سفارشا|تلفنا|زبانا|کتبا|شفاها|چهارما|ثانیا|ثالثا' \
+ ur'|رابعا|خامسا|سادسا|سابعا|ثامنا|تاسعا|عاشرا|مخصوصا' # اولا و سوما میتواند یک نام خاص باشد.
HamzehZam = \
ur'امیرالمومنین|مومن|رویا|فواد|موذن|مودب|موخر|مواخذه|مولف|موثر|مونث|موکد|موسس(?! خورناتسی)|سوال|موسسه' # for[[ران مودی]]removedمودی
HamzehZam = HamzehZam.replace(ur'\u0648', ur'وء?')
HamzehNasb = \
ur'تاکید|تالیف|تاسیس|تاسیسات|تامل|تفال|تاهل|تامین|تا[یئ]ید|تادیب|تاثیر|تاثر|تاثیرات|تاثیرگذار|تاجیل' \
+ ur'|تاخر|تاخیر|توام|ماوا|مستاجر|مبدا|منشا|متاسفانه|متاسف|متاثر|مساله|متاهل|خلا|ملا عام|رافت|ماخذ|مایوس|ماخوذ' \
+ ur'|مامور|مامورین|ماموران|ماموریت|مامون|مانوس' # removed راس، تالم
HamzehAtEnd = \
ur'اجزا|احشا|ارتجا|ارتقا|ازا|استثنا|استغنا|استقرا|استمنا|استهزا|اشبا|اشقیا|اشیا|اطبا|اطفا|اعتلا' \
+ ur'|اغوا|افترا|اقتضا|امنا|انبیا|انقضا|اولیا|ماورا' # re املا-انشا-اعضا-امضا-انزوا-ابتلا-استعفا-اعلا-اعتنا بدون همزه متداولترند، ابدا میتواند با همزه یا نتوین باشد در نتیجه برداشته شد
HamzehAtInside = {ur'سو': u'استفاده|تعبیر|تفاهم|برداشت',
ur'ما': u'الشعیر', ur'ماورا': u'الطبیعه|النهر'}
AlefMaghsooreh = \
ur'یحیا|حتا|خنثا|مبرا|مرتضا|مصطفا|موسا|مجتبا|عیسا|عظما|علارغم' # removed اولا- الا
colorsNames = \
ur'زرد|قرمز|آبی|سفید|سیاه|بنفش|سرخ|گلگون|ازرق|ابیض|نارنجی|توسی|کبود|ارغوانی|سورمهای|سپید|مشکی|کرم|قهوهای|سبز|طلا[یئ]ی'
persianNumbers = \
ur'یک|دو|سه|چهار|پنج|شش|هفت|هشت|نه|ده|یازده|دوازده|سیزده|چهارده|' \
+ ur'پانزده|شانزده|هفده|هجده|نوزده|بیست|سی|چهل|پنجاه|شصت|هفتاد|هشتاد|نود|صد|هزار'
addToAbad = ur'گلون|افضل|رقی|خیر|دل|حاجی|سید|مبارک|گنج|نهنگ|چنگ|' \
+ ur'سرخ|جنگل|خرم|خونی|دولت|به|نیاز|حفظ|عیش|نجم|بلاش|شیار|' \
+ ur'فتح|فضل|خدر|ساق|کج|زین|اسلام|بالش|پارس|اسکل|یاخچی|مهندس|قوژد'
firstNameComplex = {
ur'حمید|احمد|محمود': u'رضا',
ur'خدا': ur'بنده|داد',
ur'امیر': ur'علی|حسین|محمد|رضا|مهدی|عباس',
ur'محمد': ur'حسین|رضا|مهدی|جواد|باقر|کاظم|حسن|علی|امیر|طاها|هادی|وحید|حمید',
ur'علی': ur'رضا|محمد|اصغر|اکبر|قلی',
}
complexes = { # اسم خاص
# اسم مرکب
# فعل
# صفت مرکب
# 'با': u'پرستیژ|ابهت|احساس|اخلاق|ادب|ارزش|استعداد|استقامت|اصالت|اقتدار|اهمیت|تدبیر|تربیت|تسلط|تعصب|تقوی',
# 'باقی|ته': u'مانده', bug > باقی مانده بود- ته مانده بود.
# طبق بندهای شماره ..
# 'طبق|زمان': u'بند',
# 'مادر|خواهر|برادر|فرزند|پدر': u'خوانده', # bug > وی پدر خوانده شد.(پدر صدا زده شد)
ur'ویکی': u'پدیا|مدیا|انبار|واژه|نبشته|خبر|کتاب|داده|دیتا|سفر|تراول|دانشگاه',
ur'ایده': u'آل',
ur'سخت|نرم|پای|جنگ|نوشت|بد|ماشین': u'افزار',
ur'جنگ': u'افروز',
ur'پیاده': u'روی|رو|نظام',
ur'انسان|روان|گیاه|زیست|جانور|نماد|زمین|هوا|ریخت|خدا|جامعه|رفتار|فرهنگ|معرفت|زبان|کتاب|ستاره|اختر|شرق|اسلام|ریشه|آسیب|باستان|حق': u'شناس',
ur'بهره|نتیجه|فاصله|اندازه|مچ|رونق|دست|پا|پاچه|آبمیوه|آتش|آمار|اوج|کشتی|رأی|رای|یار|تصمیم': u'گیر',
ur'بهره': u'مند|کشی|دهی',
ur'اوج': u'دهی',
ur'آزاد|بد|نیک|مثبت|مصلحت': u'اندیش',
ur'هم': u'اندیشی|ارزی|راهی|سانی|رزم|خانه|نشین|سان|بند|مرز|سایه|مسلک|زمان|معنی|گام',
ur'گرم|نرم|سرد|جمع|خنک|خشک|مرطوب|ضرب|تقسیم|کم|سرگرم|خوشحال|ناراحت|سخت|روان|باز|زیبا|زشت|مصرف|تولید': u'کننده|کنندگی|کنندگان',
ur'خود|درون|پیه': u'سوز|خواه',
ur'دل': u'افروز|آزار|آرا|آزرده|بریده|افسرده|ربا|سوز|خواه|گشا',
ur'تفریق|افزایش|کاهش|ویرایش|کوزه|سفال|غارت|چپاول|صنعت|امداد|توطئه|حساب|افسون|ریخته': u'گر',
ur'آهن': u'ربا',
ur'طیف|امکان|اقتصاد|نور|زمان|عمق|گرما|فشار|قطر': u'سنج',
ur'فیزیک|شیمی|ریاضی|تاریخ|قلم|کتاب': u'دان',
ur'نام|اسم|سیاهه|خود|فیلمنامه|فیلمنامه|کتاب|روان|نسخه': u'نویس',
ur'بار|سرمایه|تخم|کتاب|خواب': u'گذار',
ur'شهر': u'بانو|زاد|ناز|نوش',
ur'اسد|اسماء?|اسم|امان|امر|امیر|امین|انصار|انعام|اهل|اولیاء?|اکرم|باب|بدیع|برات|بقیة|بهاء?|جار|جند|حبیب|حجت|حزب|حفظ|حمد|خلق|خلیل|خیر|ذبیح|ذکر|رام|رحمت|رحم|رسول|روح|سیف|شمس|شکر|صدق|صدیق|عبد|عزت|عزیز|عین|فتح|فرج|فضل|قدرت|لطف|لعنت|نصرت|نصر|نظیر|نعمت|نور|هیبت|ولی|کلام|کلیم|ید|یوم': u'الله|اللهی',
ur'مستند|هوا|روان|جریان|کار|مجسمه|ایمن|پیاده|مقاوم|امن|ساده|بهینه|مرتب|شبیه|ویکی|پل|جاده|راه': u'ساز',
ur'احترام|اختلاف|اضطراب|اعجاب|افتخار|بحث|بر|تحسین|ترحم|تعجب|تعصب|تنفر|ت[اأ]ثر|ت[اأ]سف|ت[اأ]مل|جالب|جدل|جنجال|حزن|حیرت|خفت|خوف|خیال|چالش|دل|رعب|رقت|روح|شهوت|شور|شوق|شگفت|طرب|عبرت|غرور|غم|فرح|ملال|مهر|نشاط|نفرت|هراس|هوس|وحشت|ی[اأ]س': u'برانگیز|انگیز',
ur'چهره|دور|تاریخ|خبر|روزنامه|روز|لرزه': u'نگار',
ur'خود|روان|پاک|چرک|دست|پشت|زیر|پا|داستان': u'نویس',
ur'زود|آرام|آب|کله|آش|بخار': u'پز',
ur'مه|پیمان|یخ|سنگ|بت|صف': u'شکن',
ur'خون': u'آشام|خوار|بار|گرم|سرد|بها',
ur'شیطان|خدا|بت|خورشید|مهر|آتش|یزدان|ایزد|گاو|خود|آفتاب|یکتا|پول|حق|مال|میهن|نژاد': u'پرست',
ur'پا[یئ]ین|بالا|عقب|جلو|کنار|ساده|بزرگ|کوچک|عمیق|رقیق|ضخیم|فهیم|گسترده': u'تر',
ur'برگشت|انحنا|برش|انعطاف|مهمان|امکان|تفکیک|تغییر|آسیب|تأثیر|دل|سازش|مهاجر': u'پذیر|ناپذیر',
ur'دانش': u'آموخته|پژوه|آموختگی',
ur'بی': u'آلایش|ابهت|احترام|احساس|اختیار|اخلاق|ادب|اراده|ارزش|استعداد|استقامت|اصالت|اعتماد|اعتبار|اقتدار|امان|امنیت|انتها|اهمیت|بها|بو|تدبیر|تربیت|تسلط|تعصب|تقوی|توجه|ثبات|جنبه|حس|دریغ|دست و پا|دین|رنگ|روح|رویه|سابقه|سیم|شرف|شعور|لیاقت|مایه|مبالات|مزد|مزه|مصرف|معرفت|معنی|مقدار|مورد|نتیجه|نزاکت|نهایت|نیاز|وجدان|پایه|پرستیژ|پناه|پول|چاره|چیز|کار|دلیل|خبر',
ur'می': u'دانم',
ur'عرضه': u'کننده|کنندگان',
ur'ابرو': u'کمان|قیطان',
ur'ابله': u'گونه',
ur'ابن': u'الیوم|الوقت|السبیل|عباس',
ur'اغراق|خشونت': u'آمیز',
ur'اجاق': u'کور|زاده|سوز',
ur'اجل': u'برگشته',
ur'اسفل': u'السافلین',
ur'اطلاع': u'رسان|رسانی|دهی',
ur'انگشت': u'نما|نشان|پیچ',
ur'سپاس|نام': u'گزار',
ur'گوشت|گیاه|علف|شیر': u'خوار',
ur'آدم': u'برفی|فروش|ربا|خوار',
ur'آب': u'لمبو|تنی',
ur'آتشین': u'پنجه',
ur'ریش|سنگ|قلم': u'تراش',
ur'آزرده': u'جان',
ur'آسوده': u'خاطر|وجدان',
ur'آسیمه': u'سر',
ur'آش': u'دهن|خور|پز',
ur'آشفته': u'سامان|دماغ|روز',
ur'آکنده': u'گوش|پهلو',
ur'آلاخون': u'والاخون',
ur'آمد': u'نیامد|شد',
ur'باب': u'الحوائج',
ur'باد': u'نشسته|گرفته',
ur'بار': u'خاطر',
ur'بالا|پایین|پائین': u'تنه',
ur'برنامه': u'نویس',
ur'برنامه|طرح|بتون': u'ریز',
ur'بزرگ': u'سال|مرد',
ur'بزن': u'دررو|بهادر',
ur'بد|خوش': u'سیرت|اخلاق|تراش|ترکیب|ریخت|ادا|استیل|اندام|بو|بینانه|بینی|پخت|برخورد|یمن|خوراک|خیم|رکاب|حال|مزه|حساب|پوش|اقبال|قلق|منظر|نام|نما',
ur'بد': u'انجام|پیله|خوی|عنق|کاره|گمان|گوهر|لگام|مسب|مست|مهر',
ur'بن': u'بست',
ur'به': u'غایت',
ur'حمله|بهره|پیشه|شعله|طاعت|طالع': u'ور',
ur'بین': u'النهرین|الملل|الممالک',
ur'پاچه': u'ورمالیده',
ur'تکه|پاره|آتش|آهن|جگر|چهار': u'پاره',
ur'ترویج|امداد|جهاد': u'گران|گر',
ur'جهان|خدا|سود|شفا|نیرو|گرما|سرما': u'بخش',
ur'پاک': u'نفس|سرشت|دامن|سیرت|منش|دیده',
ur'پالان': u'سا[یئ]یده|دوز',
ur'پراگنده|تاریک|شکسته|آشفته|آزرده|آسوده|بد|خوش|خونین|سیاه|نازک': u'دل',
ur'پری': u'نژاد|چهر',
ur'نیک|پست': u'فطرت',
ur'پی': u'گم|گرد|فراخ|سپید|نوشت',
ur'پیچ': u'واپیچ|پیچ',
ur'سفید|سیاه|قهوهای|قرمز|زرد|سبز|بنفش|گلگون|سرخ|پیروزه|مشک|نیل|مشکین': u'فام',
ur'پیش': u'مرگ|کسوت',
ur'تازه': u'وارد|خط|نفس|کار',
ur'تام': u'الاختیار',
ur'خوش|زشت|ترش': u'رو',
ur'ترگل': u'ور گل',
ur'تکه': u'تکه',
ur'تن': u'فروش|آسان|آرا|تن|پرور',
ur'تند': u'خو|خوی',
ur'تنگ': u'چشم',
ur'تی': u'تیش',
ur'پا|تن|زیر|سبز|سرخ|قرمز': u'پوش',
ur'تیره': u'روز',
ur'جامع': u'الشرایط|الاطراف',
ur'جان': u'سخت|جانی',
ur'یدک|فرو|نسل|آدم|ویروس|نقشه|سر|آب|آچار': u'کش',
ur'کشتی|گرده|دشت|نگه|دید|زمین|جنگل|دروازه': u'بان',
ur'چابک': u'سوار|دست',
ur'نقاره|چاپ': u'چی',
ur'چرب': u'زبان|ترازو',
ur'چشمه': u'چشمه',
ur'چل': u'کلید|تاج|تکه',
ur'ناقاره|چوبک|دف|دمبک|ساز|نی|سنتور|تار|گیتار|ارگ': u'زن',
ur'چیره': u'دست',
ur'پول|فنگ|قالی|ظرف|خشک|لباس': u'شو[ئی]ی',
ur'چیز': u'فهم',
ur'حرف': u'شنو',
ur'حق': u'السکوت|التدریس|الزحمه',
ur'حکیم': u'باشی',
ur'حرام|حلال': u'زاده',
ur'حیرت': u'زده',
ur'حیرت|نام|مقام|یاد|خواب|درد|شگفت|جمع': u'آور',
ur'خاله': u'زنک',
ur'خام': u'طمع|طبع',
ur'خشک': u'سر',
ur'خنده': u'رو|خریش',
ur'خواجه': u'سرا|تاش',
ur'سگ|مرغ|خوک': u'دانی',
ur'خونین': u'جگر|چشم|شهر',
ur'دایم': u'الخمر',
ur'دائم': u'الصوم|الخمر',
ur'درشت': u'خو',
ur'دست': u'نویس|خوش|پاچه|چین|آورد',
ur'دم': u'کلفت',
ur'دندان': u'گرد',
ur'دودوزه': u'باز',
ur'ذوات': u'الارحام|الاذناب',
ur'ذوی': u'القربی|الاوتار|العقول',
ur'ذی': u'نفع|صلاحیت|فقار|ربط|قیمت|شعور|علاقه|حیات|فن|روح|عقل|حق',
ur'چشم|بار|بر|پس|تیر|رو|زیر|غلط': u'انداز',
ur'رای': u'دهنده|دهندگان',
ur'راست': u'راستکی',
ur'رحمت': u'العالمین',
ur'رسم': u'الخط',
ur'رقیق': u'القلب|الفکر',
ur'رنگ': u'وارنگ',
ur'اندود': u'کاری',
ur'سنگ|ریز|دانه|تک|یک|بزرگ|رنگ': u'دانه',
ur'رو[یئ]ین|پاد|نرم|سخت': u'تن',
ur'ریش': u'ریش',
ur'رئیس': u'الوزراء|الرؤسا',
ur'تصویب|کار|اجازه|تکذیب|شب|پایان|اساس|آ[یئ]ین': u'نامه',
ur'زنگی': u'مزاج',
ur'زوار': u'دررفته',
ur'زیست': u'محیط|بوم',
ur'سابق|اخیر|فوق|لازم': u'الذکر',
ur'سابقه': u'سالار',
ur'سبک': u'مغز|سنگ|عنان|روح|لقا|سایه|سنگین|دست',
ur'سربه': u'مهر',
ur'سریع': u'السیر|الانتقال',
ur'سست': u'زخم|رگ|ریش|عنصر',
ur'سنگ': u'فرش',
ur'دو|سه|چهار': u'پایه',
ur'سیاه': u'مست|سوخته|چرده',
ur'سینه': u'چاک',
ur'شب': u'رنگ|پره|اداری',
ur'شبانه': u'روزی',
ur'شکسته': u'ناخن|مزاج',
ur'شلم': u'شوربا',
ur'شوخ': u'طبع|رو|دیده|چشم',
ur'شوم|نیک|بلند|بد': u'اختر|اقبال',
ur'شوی': u'دیده',
ur'شیرین': u'عقل|دهن',
ur'صد': u'شاخ',
ur'قتل|بار': u'عام',
ur'صف': u'آوار',
ur'ضرب': u'المثل|العجل',
ur'طبقه|زمان|درجه|رده|رتبه|دسته|جمله|تقسیم|بسته|آرماتور|اسکلت|امتیاز|بخش|جدول|جمع|جناح|رنگ|ساز|سایز|سرهم|سطح|شرط|شکم|فاز|فصل|قاب|پارتیشن|چشم|کادر|کمر|گاو|نیم': u'بند',
ur'طوطی': u'وار',
ur'طویل': u'المدت',
ur'طی': u'الارض',
ur'هنر|عاشق': u'پیشه',
ur'عالی': u'نسب',
ur'عام': u'المنفعه',
ur'عدیم': u'النظیر',
ur'عقب': u'گرد|نشینی',
ur'علی': u'البدل',
ur'عیال': u'وار',
ur'غلط': u'غلوط',
ur'فارغ': u'الاکناف|التحصیل',
ur'فراخ': u'رو|شکم|بال|کام|دیده|سخن|آهنگ|دست|آستین|ابرو|روزی',
ur'فرخ': u'لقا|دیم|فال|پی',
ur'فرمان': u'روا|بر',
ur'فرنگی': u'مآب',
ur'غیر': u'قابل|متعهد|اخلاقی|شرعی|انسانی|اصولی|مجاز|حضوری|دولتی|نظامی|انتفاعی|منتظره|قانونی|معمولی|ممکن|رسمی|فعال|نفتی|منقول|ارادی|جایز|طبیعی|عادی|عمد|لازم|مسئول|عادلانه|خودی|عاقلانه|کافی',
ur'وفا|فره|نیاز|جفا|خرد|غیرت|باور|ارزش|نعل|درد|علاقه': u'مند',
ur'فرو': u'نهادن|داشت|گذاشت|مایه|بست|پاشی|پاشیده',
ur'فوق': u'الذکر',
ur'خارق|فوق': u'العاده',
ur'کیلو|سانتی|میلی|دسی|نانو|ولت': u'متر|آمپر|گرم',
ur'قاچ': u'قاچ',
ur'قافله': u'سالار',
ur'قایم': u'الزاویه',
ur'قدسی': u'مآب',
ur'قره': u'قاطی',
ur'قریب': u'الوقوع',
ur'کاه|قطره|دله|آفتابه': u'دزد',
ur'قوی': u'پنجه',
ur'قیمه': u'قیمه',
ur'کاسه': u'یکی|سیاه|لیس',
ur'کج': u'نهاد|خلق|کلاه',
ur'کلاه': u'گوشه|گذار',
ur'کله': u'معلق|خشک|گنده|خر|شق|پوک',
ur'زبانه|زمین|ماشین|فرمان|کمان|کنگره|گوشه|دامنه|خانه|پول|مقام|آ[یئ]ینه': u'دار',
ur'کهن': u'سال|دیار',
ur'کینه': u'توز|ورز',
ur'گران': u'مغز|سایه|قدر|رکاب|سرشت|پایه|قیمت|روح|سنگ|جان|سر|فروش',
ur'گربه': u'گون|کوره',
ur'گشاده': u'رو|دست',
ur'گل': u'چهره|ریزان|ریز|گون|باران|آرا|اندام|برگ',
ur'گلوله': u'باران',
ur'ناهم|هم|گندم': u'گون',
ur'لازم': u'الوصول|الاجراء',
ur'مشکوک|معلوم|مجهول|فارغ': u'الحال',
ur'لت': u'لت|انبان|انبار',
ur'لسان': u'الغیب',
ur'مالک': u'الرقاب',
ur'ماه': u'طلعت',
ur'مشغول': u'الذمه',
ur'معظم': u'له|القدر',
ur'ملی|همجنس|زمینه': u'گرا',
ur'میرزا': u'قلمدان|قشمشم|بنویس',
ur'ناخن': u'خشک',
ur'نازک': u'نی|نارنجی|خیال',
ur'جهان|نافه': u'گشا',
ur'ندید': u'بدید',
ur'نظریه|رویا|رؤیا': u'پرداز',
ur'نقشه|وزنه|بهره|کلاه': u'بردار',
ur'نق': u'نقو',
ur'نگون': u'طشت|بخت',
ur'نیک': u'روز|انجام|پی|اختر|بخت',
ur'نیم': u'ته|بسمل',
ur'هرکن': u'پرکن',
ur'همایون': u'فال|آثار|بخت',
ur'همه': u'کاره|جانبه',
ur'هیچ': u'کاره|گاه|یک|کس|کدام',
ur'ول': u'خرج|معطل',
ur'یکه': u'شناس|بزن|سوار|تاز',
ur'ابجد': u'خوان',
ur'ابر': u'آلود|قدرت|ابزار',
ur'ابو': u'العجب|الکلام|الهول',
ur'اولو': u'الالباب|الامر|العزم',
ur'حسب|صاحب|واجب': u'الامر',
ur'گل|آذر': u'گون',
ur'آزاد': u'مرد|وار',
ur'باز': u'خرید|خواست|دید|بین',
ur'بر': u'هم|آشفتگی|آشفته|پایی',
ur'بلند': u'آوازه|پایه',
ur'آتش': u'بس|نشان|سوزی|افروز|افکن|افزار',
ur'پا': u'برجا|برهنه|بست|پتی|کار',
ur'پایه|بنیان': u'گذار|گذاری',
ur'پر': u'ابهام|ابهت|اتلاف|ادا|ادویه|ازدحام|استرس|استقامت|اشک|برخورد|ترانه|تردد|ترشح|تشبیه|تصادف|تعصب|تقلب|تلاش|تملق|شور',
ur'کم': u'محل|بضاعت|کم|یاب',
ur'پر|کم': u'نظیر|کار|تعداد|اشتباه|اشکال|اهمیت|تحرک|تحول|ترافیک|تراکم|تقاضا|تکرار|تنش|تنوع|رو|آب',
ur'تنگا': u'تنگ',
ur'تیز': u'پا|دست|دندان|هوش|بین',
ur'چادر|تخت|زاغه|شهر|ته|آب|کاخ|پایتخت|یکجا|ییلاق': u'نشین',
ur'چهار': u'شانه',
ur'ویروس': u'شناس|یاب',
ur'یاد': u'داشت|دهی',
ur'یار': u'کشی',
ur'ی[اأ]س': u'آلود',
ur'حاضر': u'جواب|یراق',
ur'خرد': u'سال',
ur'دو': u'برجی|تخمه|سره|قلو|بهشک',
ur'ذو': u'الجلال|العرش|القدر|القوافی|اللسانین|المجد|المکارم|المن|المناقب|المنن|النور|الوجهین|جسدین',
ur'رنگا': u'رنگ',
ur'رو': u'سفید|سیاه|باز',
ur'قهوه|نگار|آبدار|گل|کتاب': u'خانه',
ur'روز': u'افزون|انه',
ur'زود': u'باور',
ur'شاد': u'روان|کام|مان|مانه',
ur'فرا': u'خور|روی',
ur'کد': u'خدا|بانو',
ur'گردا': u'گرد',
ur'لا': u'ابالی|جون|کردار|مذهب|مروت|یتغیر|یتناهی|یزال|یعقل',
ur'نا': u'جوانمرد|خودآگاه|نجیب|امید|آزموده|آشنا|آگاه|برابر|تمام',
ur'ایمن|پیاده|مقاوم|امن|ساده|بهینه|مرتب|آماده|رها|آگاه|زیبا|یکسان|روان|ذخیره|استاندار|متمایز|جدا|شخصی|انبوه|خصوصی': u'سازی',
}
# ----------------------------------------Wrong dictations-----------------------
forReplace = { # 'میسیسیپی': u'میسیسیپی',
# 'هاوی': u'حاوی', ممکن است اسم خاص باشد
# 'سوماً': u'سوم',
# 'سوما': u'سوم',
# 'دوماً': u'دوم',
# 'دوما': u'دوم',
# 'جانا': u'جانی', باگ در [[جانا رودین]]
# 'طوسی': u'توسی', خواجه نصیرالدین طوسی را به تبدیل می کرد
# برپایه http:#www.persianacademy.ir/fa/pishvand.aspx
# بدل از تنوین
ur'به شخصه': u'بشخصه',
ur'بهشخصه': u'بشخصه',
ur'به عینه': u'بعینه',
ur'بهعینه': u'بعینه',
ur'احمدی نژاد': u'احمدینژاد',
ur'جابه جا': u'جابهجا',
ur'جا به جا': u'جابهجا',
ur'جا بهجا': u'جابهجا',
ur'بی بی سی': u'بیبیسی',
ur'اف بی آی': u'افبیآی',
ur'می سی سی پی': u'میسیسیپی',
ur'ویژهگی': u'ویژگی',
ur'دایرهالمعارف': u'دایرةالمعارف',
ur'دایره المعارف': u'دایرةالمعارف',
ur'تأئید': u'تأیید',
ur'تائید': u'تأیید',
ur'بقیهالله': u'بقیةالله',
ur'بقیه الله': u'بقیةالله',
ur'بقیة الله': u'بقیةالله',
ur'دگمه': u'دکمه',
ur'وحله': u'وهله',
ur'نقطهنظر': u'دیدگاه',
ur'ناچاراً': u'بهناچار',
ur'ناچارا': u'بهناچار',
ur'منیت': u'منی',
ur'منیٔت': u'منی',
ur'فرآیند': u'فرایند',
ur'فرآیندها': u'فرایندها',
ur'کارآیی': u'کارایی',
ur'ملاحضه': u'ملاحظه',
ur'ملیون': u'میلیون',
ur'ملیارد': u'میلیارد',
ur'مطمعن': u'مطمئن',
ur'مرهمت': u'مرحمت',
ur'مرحم': u'مرهم',
ur'محصوب': u'محسوب',
ur'مذبور': u'مزبور',
ur'متعصفانه|متاصفانه': u'متأسفانه',
ur'متغییر': u'متغیر',
ur'لشگر': u'لشکر',
ur'لحجه': u'لهجه',
ur'گاهاً': u'گاهی',
ur'گاها': u'گاهی',
ur'کهکیلویه': u'کهگیلویه',
ur'قائله': u'غائله',
ur'فارقالتحصیل': u'فارغالتحصیل',
ur'علاالدین': u'علاءالدین',
ur'علمشنگه': u'المشنگه',
ur'غلطاندن': u'غلتاندن',
ur'ظبط': u'ضبط',
ur'طنبور': u'تنبور',
ur'طپش': u'تپش',
ur'ضمینه': u'زمینه',
ur'زخامت|ذخامت': u'ضخامت',
ur'زخیم|ذخیم': u'ضخیم',
ur'صحفه': u'صفحه',
ur'سفارشاً': u'سفارشی',
ur'سفارشا': u'سفارشی',
ur'سرلشگر': u'سرلشکر',
ur'سپاسگذار': u'سپاسگزار',
ur'خبرگذار': u'خبرگزار',
ur'ساتع': u'ساطع',
ur'زندهگی': u'زندگی',
ur'زباناً': u'زبانی',
ur'زبانا': u'زبانی',
ur'رهبریت': u'رهبری',
ur'در باره': u'درباره',
ur'دوئیت': u'دوگانگی',
ur'داوطلبین': u'داوطلبان',
ur'خوشنود': u'خشنود',
ur'خوبیت': u'خوبی',
ur'خوانواده': u'خانواده',
ur'خواستگاه': u'خاستگاه',
ur'خرشید': u'خورشید',
ur'خردن': u'خوردن',
ur'خانند': u'خوانند',
ur'خابیدن': u'خوابیدن',
ur'حظور': u'حضور',
ur'حظرت': u'حضرت',
ur'حدلامکان': u'حتیالامکان',
ur'حاظر': u'حاضر',
ur'چهارماً': u'چهارم',
ur'چهارما': u'چهارم',
ur'چارشنبه': u'چهارشنبه',
ur'جاناً': u'جانی',
ur'توجیح': u'توجیه',
ur'توضیع': u'توزیع',
ur'تلوزیون': u'تلویزیون',
ur'تضاهر': u'تظاهر',
ur'ترجیه': u'ترجیح',
ur'پنچ': u'پنج',
ur'پزشگی': u'پزشکی',
ur'پرفسور': u'پروفسور',
ur'پاتوغ': u'پاتوق',
ur'بیمهابا': u'بیمحابا',
ur'بنیانگزار': u'بنیانگذار',
ur'بلقور': u'بلغور',
ur'بلاخره': u'بالاخره',
ur'برخواستن': u'برخاستن',
ur'برعلیه': u'علیه',
ur'برخواست': u'برخاست',
ur'بدیت': u'بدی',
ur'باطلاق': u'باتلاق',
ur'بازرسین': u'بازرسان',
ur'بارگزار': u'بارگذار',
ur'باجناق': u'باجناغ',
ur'باباقوری': u'باباغوری',
ur'آروق': u'آروغ',
ur'انظباط': u'انضباط',
ur'التفاط': u'التفات',
ur'افضلتر': u'بهتر',
ur'افسنطین': u'افسنتین',
ur'اعلمتر': u'داناتر',
ur'اطو': u'اتو',
ur'اطراق': u'اتراق',
ur'اطاق': u'اتاق',
ur'اصطرلاب': u'اسطرلاب',
ur'ارتقاع': u'ارتقا',
ur'اختاپوث': u'اختاپوس',
ur'ابولفضل': u'ابوالفضل',
ur'امپراطور': u'امپراتور',
ur'آزوقه': u'آذوقه',
ur'ذکام': u'زکام',
ur'بگیر و ببند': u'بگیر ببند',
ur'ساز و کار': u'سازوکار',
ur'جر و بحث': u'جربحث',
ur'خوار و بار': u'خواربار',
ur'احجام': u'حجمها',
ur'اقشار': u'قشرها',
ur'لازم به ذکر است': u'لازم است ذکر شود',
ur'بدلیل': u'به دلیل',
ur'آنرا': u'آن را',
ur'اینرا': u'این را',
ur'هیات': u'هیئت',
ur'هیأت': u'هیئت',
ur'رییسه': u'رئیسه',
ur'رییس': u'رئیس',
ur'مساله': u'مسئله',
ur'مسأله': u'مسئله',
ur'همین جا': u'همینجا',
ur'همینجا': u'همینجا',
ur'همینطور': u'همینطور',
ur'همین طور': u'همینطور',
ur'همان جا': u'همانجا',
ur'همانجا': u'همانجا',
ur'همان طور': u'همانطور',
ur'همانطور': u'همانطور',
ur'هیچکدام': u'هیچکدام',
ur'هیچ کدام': u'هیچکدام',
ur'هیچکس': u'هیچکس',
ur'هیچ کس': u'هیچکس',
ur'هیچیک': u'هیچیک',
ur'هیچ یک': u'هیچیک',
ur'همدیگر': u'همدیگر',
ur'آن چه': u'آنچه',
ur'آنچه': u'آنچه',
ur'چنان چه': u'چنانچه',
ur'چنانچه': u'چنانچه',
ur'چنان که': u'چنانکه',
ur'چنانکه': u'چنانکه',
ur'ئیدروژن': u'هیدروژن',
ur'بعضن': u'بعضاً',
ur'غالبن': u'غالباً',
ur'کاملن': u'کاملاً',
ur'احتمالن': u'احتمالاً',
ur'اصلن': u'اصلاً',
ur'اشتباهن': u'اشتباهاً',
ur'منشاء': u'منشأ',
ur'مبداء': u'مبدأ',
}
persianGlyphs = { # these two are for visually available ZWNJ #visualZwnj
ur'\u200cه': u'ﻫ',
ur'ی\u200c': u'ﻰﻲ',
ur'أ': u'ﺄﺃﺃ',
ur'آ': u'ﺁﺁﺂ',
ur'إ': u'ﺇﺈﺇ',
ur'\u0627': u'ﺍﺎ',
ur'ب': u'ﺏﺐﺑﺒ',
ur'پ': u'ﭖﭗﭘﭙ',
ur'ت': u'ﺕﺖﺗﺘ',
ur'ث': u'ﺙﺚﺛﺜ',
ur'ج': u'ﺝﺞﺟﺠ',
ur'چ': u'ﭺﭻﭼﭽ',
ur'ح': u'ﺡﺢﺣﺤ',
ur'خ': u'ﺥﺦﺧﺨ',
ur'د': u'ﺩﺪ',
ur'ذ': u'ﺫﺬ',
ur'ر': u'ﺭﺮ',
ur'ز': u'ﺯﺰ',
ur'ژ': u'ﮊﮋ',
ur'س': u'ﺱﺲﺳﺴ',
ur'ش': u'ﺵﺶﺷﺸ',
ur'ص': u'ﺹﺺﺻﺼ',
ur'ض': u'ﺽﺾﺿﻀ',
ur'ط': u'ﻁﻂﻃﻄ',
ur'ظ': u'ﻅﻆﻇﻈ',
ur'ع': u'ﻉﻊﻋﻌ',
ur'غ': u'ﻍﻎﻏﻐ',
ur'ف': u'ﻑﻒﻓﻔ',
ur'ق': u'ﻕﻖﻗﻘ',
ur'ک': u'ﮎﮏﮐﮑﻙﻚﻛﻜ',
ur'گ': u'ﮒﮓﮔﮕ',
ur'ل': u'ﻝﻞﻟﻠ',
ur'م': u'ﻡﻢﻣﻤ',
ur'ن': u'ﻥﻦﻧﻨ',
ur'ه': u'ﻩﻪﻫﻬ',
ur'هٔ': u'ﮤﮥ',
ur'\u0648': u'ﻭﻮ',
ur'ؤ': u'ﺅﺅﺆ',
ur'ی': u'ﯼﯽﯾﯿﻯﻰﻱﻲﻳﻴ',
ur'ئ': u'ﺉﺊﺋﺌ',
ur'لا': u'ﻻﻼ',
ur'لإ': u'ﻹﻺ',
ur'لأ': u'ﻸﻷ',
ur'لآ': u'ﻵﻶ',
}
def my_replace(match, word1, word2):
match = match.group()
return match.replace(word1, word2)
def quotation(text):
# این تابع زمانی گیومه را به فارسی تیدیل میکند که در پاراگراف مورد نظر تعداد گیومهٔ لاتین زوج باشد.
lines = text.split(ur'\n')
result = []
for i in range(0,len(lines)):
line = lines[i]
# رفع مشکل استفاده از ـً به جای گیومه لاتین در متن فارسی
line = re.sub(ur'ا\"([ ]*[' + persianCharacters + ur'])', u'اً\\1',line)
# ”“ تبدیل
line = re.sub(ur'(^|[' + persianCharacters+ ur'\:>،»؛\s\n\}\]\.]+)“((?:\[\[|).*?['+ persianCharacters+ ur'\n]+?(?:\]\]|\.|\<|\:|))”(['+ persianCharacters+ ur'،«؛\s\n\.\[\{]|$)', u'\\1«\\2»\\3',line)
# وارونه ”“ تبدیل
line = re.sub(ur'(^|[' + persianCharacters
+ ur'\:>،»؛\s\n\}\]\.]+)"((?:\[\[|).*?['
+ persianCharacters
+ ur'\n]+?(?:\]\]|\.|\<|\:|))"(['
+ persianCharacters
+ ur'،«؛\s\n\.\[\{]|$)', u'\\1«\\2»\\3',line)
# وارونه ”“ تبدیل
line = re.sub(ur'(^|[' + persianCharacters
+ ur'\:>،»؛\s\n\}\]\.]+)”((?:\[\[|).*?['
+ persianCharacters
+ ur'\n]+?(?:\]\]|\.|\<|\:|))“(['
+ persianCharacters
+ ur'،«؛\s\n\.\[\{]|$)', u'\\1«\\2»\\3',line)
# ‘’ تبدیل
line = re.sub(ur'(^|[' + persianCharacters
+ ur'\:>،»؛\s\n\}\]\.]+)‘((?:\[\[|).*?['
+ persianCharacters
+ ur'\n]+?(?:\]\]|\.|\<|\:|))’(['
+ persianCharacters
+ ur'،«؛\s\n\.\[\{]|$)', u'\\1«\\2»\\3',line)
# وارونه ‘’ تبدیل
line = re.sub(ur'(^|[' + persianCharacters
+ ur'\:>،»؛\s\n\}\]\.]+)’((?:\[\[|).*?['
+ persianCharacters
+ ur'\n]+?(?:\]\]|\.|\<|\:|))‘(['
+ persianCharacters
+ ur'،«؛\s\n\.\[\{]|$)', u'\\1«\\2»\\3',line)
# ‚’ تبدیل
line = re.sub(ur'(^|[' + persianCharacters
+ ur'\:>،»؛\s\n\}\]\.]+)‚((?:\[\[|).*?['
+ persianCharacters
+ ur'\n]+?(?:\]\]|\.|\<|\:|\{|\[|))’(['
+ persianCharacters
+ ur'،«؛\s\n\.\[\{]|$)', u'\\1«\\2»\\3',line)
# „” تبدیل
line = re.sub(ur'(^|[' + persianCharacters
+ ur'\:>،»؛\s\n\}\]\.]+)„((?:\[\[|).*?['
+ persianCharacters
+ ur'\n]+?(?:\]\]|\.|\<|\:|))”(['
+ persianCharacters
+ ur'،«؛\s\n\.\[\{]|$)', u'\\1«\\2»\\3',line)
# << >> تبدیل
line = re.sub(ur'(^|[' + persianCharacters
+ ur'\:>،»؛\s\n\}\]\.]+)\<\<((?:\[\[|).*?['
+ persianCharacters
+ ur'\n]+?(?:\]\]|\.|\<|\:|))\>\>(['
+ persianCharacters
+ ur'،«؛\s\n\.\[\{]|$)', u'\\1«\\2»\\3',line)
# (()) تبدیل
line = re.sub(ur'(^|[' + persianCharacters
+ ur'\:>،»؛\s\n\}\]\.]+)\(\(((?:\[\[|).*?['
+ persianCharacters
+ ur'\n]+?(?:\]\]|\.|\<|\:|))\)\)(['
+ persianCharacters
+ ur'،«؛\s\n\.\[\{]|$)', u'\\1«\\2»\\3',line)
result.append(line)
return ur'\n'.join(result)
def dictationReplace(x,y,extensions,text):
Regex=ur'(^|[^' + persianCharacters + ur'])(\s|\u200c|_|)(' + x + ur')(\s|_)('+ y + ur')(\s|\u200c|_|)(' + extensions + ur')(\n|[^' + persianCharacters + ur'])'
text = re.sub(Regex,u'\\1\\2\\3\u200c\\5\\6\\7\\8',text)
return text
def complex_replace(txt, pat, w1, w2):
def function(s):
return s.group(1)+s.group(2).replace(w1, w2,1)+s.group(3)+s.group(4)
txt=re.sub(re.compile(pat),function,txt)
return txt
def dictation(text):
for i in complexes:
if complexes.has_key(i):
text = dictationReplace(i, complexes[i],
ur'ی|یی|ها|های|هایی|هایم|هایت|هایش|هایمان|هایتان|هایشان|', text)
# for last name
text = dictationReplace(personNames,
ur'ی|یی|زاده|نیا|گان|فر|نژاد|یان|ی\u200cها|یها'
, ur'ی|', text)
# for 'آباد's
text = dictationReplace(personNames + ur'|' + addToAbad, ur'آباد',
ur'زاده|نیا|پور|گان|فر|نژاد|ی|یان|ها|های|یی|هایی|ی\u200cها|یها|'
, text)
# for first names
for i in firstNameComplex:
if firstNameComplex.has_key(i):
text = re.sub(ur'(^|[^' + persianCharacters
+ ur']|\s|_)(' + i + ur')(\s|_)('
+ firstNameComplex[i]
+ ur')(\s|_)($|[^' + persianCharacters
+ ur']|[' + persianCharacters + ur'])',
u'\\1\\2\u200c\\4\\5\\6',text)
# for colors
text = dictationReplace(colorsNames, ur'فام|گون', ur'زاده|نیا|پور|گان|فر|نژاد|ی|یی|ها|های|هایی|ی\u200cها|یها|هایم|هایت|هایش|هایمان|هایتان|هایشان|', text)
# for numbers
text = dictationReplace(persianNumbers, ur'گانه|ماهه', ur'زاده|نیا|پور|گان|فر|نژاد|ی|یی|ها|های|هایی|هایم|هایت|هایش|هایمان|هایتان|هایشان|', text)
# wrong dictation
for i in forReplace:
if forReplace.has_key(i):
text = re.sub(ur'(^|[^' + persianCharacters
+ ur'])(\s|\u200c|_|)(' + i
+ ur')(\s|\u200c|_|)($|[^'
+ persianCharacters + ur'])', u'\\1\\2'
+ forReplace[i] + u'\\4\\5',text)
# کلماتی که آ دارند
text = complex_replace(text, ur"(^|\s|_|«|»|\[|\(|\<|\>|\')("+ wordsWithA+ ur")(ی|ئی|یی|ٔ|)( |«|»|\.|،|_|\]|\s|\:|\)|\<|\>|؟|\'|\!|$)", u'\u0627', ur'آ')
# بن مضارع که آ دارند
text = complex_replace(text,
ur"(^|\u200c|\s|_|«|»|\[|\(|\<|\>|\')("
+ PresentVerbsWithA
+ ur")(م|ی|د|یم|ید|ند)( |«|»|\.|،|_|\s|\]|\:|\)|\<|\>|؟|\!|\'|$)"
, u'\u0627', ur'آ')
# بن ماضی که آ دارند
text = complex_replace(text,
ur"(^|\u200c|\s|_|«|»|\[|\(|\<|\>|\')("
+ PastVerbsWithA
+ ur")(م|ی|د|یم|ید|ند)( |«|»|\.|،|_|\s|\]|\:|\)|\<|\>|؟|\!|\'|$)"
, u'\u0627', ur'آ')
# همزه ضم
text = complex_replace(text, ur"(^|\s|_|«|»|\[|\(|\<|\>|\')("
+ HamzehZam
+ ur")(ها|ها|ین|ان|ی|ئی|یی|ٔ|)( |«|»|\.|،|_|\s|\]|\:|\)|\<|\>|؟|\!|\'|$)"
, u'\u0648', ur'ؤ')
# همزه نصب
text = complex_replace(text, ur"(^|\s|_|«|»|\[|\(|\<|\>|\')("
+ HamzehNasb
+ ur")(ی|ئی|یی|ٔ|)( |«|»|\.|،|_|\s|\]|\:|\)|\<|\>|؟|\!|\'|$)"
, u'\u0627', ur'أ')
# همزه وسط کلمه
for i in HamzehAtInside:
if HamzehAtInside.has_key(i):
text = re.sub(ur"(^|\s|_|«|»|\[|\(|\<|\>|\')(" + i
+ ur')(| )(' + HamzehAtInside[i]
+ ur")(?= |«|»|\.|،|_|\s|\]|\:|\)|\<|\>|؟|\!|\'|$)"
, u'\\1\\2ء\\4',text) # در مورد افزودن یا حذف همزهٔ پایانی اجماعی وجود ندارد.
# الف مقصوره
text = re.sub(ur"(^|\s|_|«|»|\[|\(|\<|\>|\')(" + HamzehAtEnd + ur")(?= |«|»|\.|،|_|\s|\]|\:|\)|\<|\>|؟|\!|\'|$)",u'\\1\\2ء',text)
text = complex_replace(text,
ur"(^|\s|_|«|»|\[|\(|\<|\>|\')("
+ AlefMaghsooreh
+ ur")(| )( |«|»|\.|،|_|\s|\]|\:|\)|\<|\>|؟|\!|\'|$)"
, u'\u0627', ur'ی')
# صفت+تر
text = re.sub(ur"(^|\s|_|«|»|\]|\[|\(|\<|\>|\')("
+ adjective
+ ur")( |_)تر(?= |«|»|\.|\[|\]|،|_|\s|\:|\)|\<|\>|؟|\!|\'|$)"
, u'\\1\\2\u200cتر',text)
# اسامی رنگها (بهعنوان صفت)+تر
text = re.sub(ur"(^|\s|_|«|»|\]|\[|\(|\<|\>|\')("
+ colorsNames
+ ur")( |_)تر(?= |«|»|\.|\[|\]|،|_|\s|\:|\)|\<|\>|؟|\!|\'|$)"
, u'\\1\\2\u200cتر',text)
text = re.sub(ur"به دست\u200cآورد", u'به دست آورد',text) # Solving a bug!
def function(s):
return s.group(1)+s.group(2)[:-1]+u'اً'+s.group(3)
regex=ur"(^|[؛\s\n\.،«»\'\"\<\>؟])(" + needsNasb + ur')[' + NASB + ZAMM + ur']?([؛؟\s\n\.،«»\'\"\<\>]|$)'
text=re.sub(regex,function,text, re.UNICODE)
return text
def normalizeZwnj(text):
text = re.sub(ur"\u200c{2,}", ur"\u200c",text)
# Clean ZWNJs after characters that don't conncet to the next letter
text = \
re.sub(ur"([۰-۹0-9إأةؤورزژاآدذ،؛,\:«»\\/@#$٪×\*\(\)ـ\-=\|ء])\u200c"
, u'\\1',text)
# Clean ZWNJs before and after English characters
text = re.sub(ur"\u200c([\w])", u'\\1',text)
text = re.sub(ur"([\w])\u200c", u'\\1',text)
# Clean ZWNJs before and after Persian characters
text = re.sub(ur'\u200c([' + vowels + arabicIndicDigits
+ persianDigits + hamza + '])', u'\\1',text)
text = re.sub(ur'([' + arabicIndicDigits + '])\u200c', u'\\1',text)
text = re.sub(ur"([\w])\u200c", u'\\1',text)
# Clean ZWNJs after and before punctuation
text = re.sub(ur"\u200c([ء\n\s\[\]\.،«»\:\(\)\؛\؟\?\;\$\!\@\-\=\+\\|])", u'\\1',text)
text = re.sub(ur"([\n\s\[\.،«»\:\(\)\؛\؟\?\;\$\!\@\-\=\+\\|])\u200c", u'\\1',text)
# Clean ZWNJs before brakets which have sapce after\before them
text = re.sub(ur"\u200c(\][\s\n])", u'\\1',text)
text = re.sub(ur"([\n\s]\[)\u200c", u'\\1',text)
return text
def toStandardPersianCharacters(text):
for i in persianGlyphs:
text = re.sub(ur'[' + persianGlyphs[i] + ur']', i,text)
text = normalizeZwnj(text)
text = text.replace(ur"ك", ur'ک') # Arabic
text = text.replace(ur"ڪ", ur'ک') # Urdu
text = text.replace(ur"ﻙ", ur'ک') # Pushtu
text = text.replace(ur"ﻚ", ur'ک') # Uyghur
text = text.replace(ur"ي", ur'ی') # Arabic
text = text.replace(ur"ى", ur'ی') # Urdu
text = text.replace(ur"ے", ur'ی') # Urdu
text = text.replace(ur"ۍ", ur'ی') # Pushtu
text = text.replace(ur"ې", ur'ی') # Uyghur
text = text.replace(ur"ہ", ur'ه') # Convert ہ to ه ہہہہ to ههه
text = re.sub(ur"ە", u'ه\u200c',text) # Kurdish
text = text.replace(ur"ھ", ur'ه') # Kurdish
return text
def applyOrthography(text):
text = text.replace(ur"\r", ur'')
# تمیزکاری autoFormatter.js
text = re.sub(ur"[\x00-\x08\x0B\x0C\x0E-\x1F\x7F-\x9F\uFEFF]+", u'',text)
text = re.sub(ur"[ \xA0\xAD\u1680\u180E\u2000-\u200D\u2028\u2029\u202F\u205F\u2060\u3000]+\n", u'\n',text)
# تبدیل تب و فاصله نشکن اول خط به هیچ چون مدیاویکی آن را در نظر نمیگیرد
#text = re.sub(ur"\n[\t\u00A0]+", u'\n',text)
# تبدیل انواع فاصلهها به فاصله ساده
text = re.sub(ur"[\u0020\u0085\u00A0\u180E\u2000-\u200A\u202F\u205F\u3000]", u' ',text)
text = re.sub(ur"[\u0085]", u'',text)
# http:#kb.mozillazine.org/Network.IDN.blacklist_chars
text = re.sub(ur"[\u01C3\uFE15]", u'!',text)
text = re.sub(ur"[\u02D0\u0589\u05C3\uA789]", u': ',text)
text = re.sub(ur"[\u0338\u2044\u2215\u2571\u29F8\u3033\uFF0F]", u'/',text)
text = re.sub(ur"[\u05F4]", u'"',text)
text = re.sub(ur"[\u06D4\u0701\uFF0E\uFF61]", u'.',text)
text = re.sub(ur"\u3014", u'(',text)
text = re.sub(ur"\u3015", u')',text)
# جایگزینی ۀ غیراستاندار+حرف بعدی بدون فاصله به ه+همزه+فاصله
text = re.sub(ur"ۀ(?![\s\n])", u'هٔ ',text)
# Replace ه followed by (space|ZWNJ|lrm) follow by ی with هٔ
text = re.sub(ur"ه[\u200c\u200e\s]+ی([\s\n])", u'هٔ\\1',text)
# Replace ه followed by (space|ZWNJ|lrm|nothing) follow by ء or with هٔ
text = re.sub(ur"ه[\u200c\u200e\s]*[ءٔ]([\s\n])", u'هٔ\\1',text)
# Replace هٓ or single-character ۀ with the standard هٔ
text = re.sub(ur"(ۀ|هٓ)", u'هٔ',text)
# Replace ه followed by ئ or ی, and then by ی, with ه\u200cای, example: خانهئی becomes خانه\u200cای
text = re.sub(ur"ه\u200c[ئی]ی", u'ه\u200cای',text)
# def for removing incorrect ZWNJs
text = re.sub(ur"([\u200c\u200e])([\s\n])", u'\\2',text)
text = re.sub(ur"([\s\n])([\u200c\u200e])", u'\\1',text)
# فاصلهٔ پیش از واکه\u200cهای کوتاه اشتباه است و برای جلوگیر از به هم چسبیدن کلمات فاصله و واکه جابجا باید گردند.
text = re.sub(ur'([' + persianCharacters + vowels + hamza+ ur'])(\s)([' + vowels + hamza + ur'])',u'\\1\\3\\2',text)
# واکه\u200cهای کوتاه پشت سرهم نمی\u200cآیند و یک حرف باید بینشان فاصله باشد
text = re.sub(ur'([' + vowels + hamza + ur']){2,}', u'\\1',text)
text = re.sub(ur"ئء", ur'یء',text) # two hamzes after each other
text = re.sub(ur"أء", ur'اء',text) # two hamzes after each other
text = re.sub(ur"ؤء", ur'ؤ',text) # two hamzes after each other
# .replace(ur"وء", ur'ؤ')#bug on سوء
text = re.sub(ur"سؤ ?استفاده", ur'سوءاستفاده',text) # bug on سوءاستفاده و سوء
# افزودن همزه
text = re.sub(ur"درباره (ام|ات|اش|مان|تان|شان|ای)( |$)",u'درباره\\1\\2',text) # i برای جلوگیری از باگ احتمالی برای افزودن همزه به درباره
text = re.sub(ur"درباره ", ur'دربارهٔ ',text)
text = re.sub(ur'صفحه( |)([' + persianDigits + ']+)(\n|\.|\,|\||\<)', u'صفحهٔ \\2\\3',text) # [[Special:PermaLink/15326391#افزودن همزه]]
return text
def complexVerbsApplyZwnj(text):
for x in persianComplexPastVerbs:
y = persianComplexPastVerbs[x]
text = re.sub(ur'(^|[^' + persianCharacters + ur'])(' + x
+ ur') ?(می|نمی|)( |\u200c|)(ن|)(' + y
+ ur')(م|ی|یم|ید|ند|ه|ن|)($|[^'
+ persianCharacters + ur'])',
u'\\1\\2\u200c\\3\u200c\\5\\6\\7\\8',text)
for x in persianComplexPresentVerbs:
y = persianComplexPresentVerbs[x]
text = re.sub(ur'(^|[^' + persianCharacters + ur'])(' + x
+ ur') ?(می|نمی|)( |\u200c|)(ن|)(' + y
+ ur')(م|ی|د|یم|ید|ند|ن)($|[^'
+ persianCharacters + ur'])',
u'\\1\\2\u200c\\3\u200c\\5\\6\\7\\8',text)
return text
def applyZwnj(text):
text = re.sub(ur'(^|[^' + persianCharacters + ur'])(می|نمی) ?'
+ persianPastVerbs + ur'(م|ی|یم|ید|ند|ه|)($|[^'
+ persianCharacters + u'])', u'\\1\\2\u200c\\3\\4\\5',text
)
text = re.sub(ur'(^|[^' + persianCharacters + ur'])(می|نمی) ?'
+ persianPresentVerbs
+ ur'(م|ی|د|یم|ید|ند)($|[^' + persianCharacters
+ ur'])', u'\\1\\2\u200c\\3\\4\\5',text)
# بن فعل مضارع «دان» جدا آمد چون پسوند «ی» با عبارت «میدانی» تداخل داشت
text = re.sub(ur'(^|[^' + persianCharacters
+ ur'])(می|نمی) ?(دان)(م|د|یم|ید|ند)($|[^'
+ persianCharacters + ur'])', u'\\1\\2\u200c\\3\\4\\5',text
)
# ای «توان» ناقلا!
text = re.sub(ur"(\s)(می|نمی) ?توان", u'\\1\\2\u200cتوان',text)
# چسباندن تمام «ها»ها با فاصلهٔ مجازی
text = re.sub(ur" ها([\]\.،\:»\)\s]|\'{2,3}|\={2,})",u'\u200cها\\1',text)
text = re.sub(ur" ها(ی|یی|یم|یت|یش|مان|تان|شان)([\]\.،\:»\)\s])", u'\u200cها\\1\\2',text)
text = re.sub(ur"هها", u'هها',text)
# چسباندن تمام «ترین»ها با فاصلهٔ مجازی
text = re.sub(ur" ترین([\]\.،\:»\)\s]|\'{2,3}|\={2,})",u'\u200cترین\\1',text)
# برای حذف علامت ستاره اضافی قبل از عنوان ها
text = re.sub(ur"\n\*\s*(\=+.+?\=+\n)", u'\n\\1',text)
# عضو علامت های نقل قول تکی از عنوان ها
text = re.sub(ur"(\n=+)(.*?)(?:'+)(.*?)(?:'+)(.*?)(=+\n)", u'\\1\\2\\3\\4\\5',text)
# اول و آخر هم خط اگر فاصلهٔ مجازی باشد، حذف شود
text = re.sub(ur"(^\u200c|\u200c$)", u'',text)
# شناسه ها
# توجه: «است» تعدماً از شناسه ها حذف شده چون به عنوان فعل مستقل هم کاربرد دارد و در آن موارد باید جدا نوشته شود
# مثال: «این یک خانه است» که است در آن باید از خانه جدا نوشته شود
text = re.sub(ur'ه +(ام|ای|ایم|اید|اند)($|[^' + persianCharacters + ur'\u200c])', u'ه\u200c\\1\\2',text)
# موارد جزئی دیگر و بی ربط به فاصلهٔ مجازی، باید منتقل شود
text = re.sub(ur"ا\sً", u'اً',text)
# رفع اشکال که\u200cای
text = re.sub(ur" که\u200cای ", u' که ای ',text)
# رفع اشکال میستری (Mystery)
text = re.sub(ur"می\u200cستری", u'میستری',text)
text = re.sub(ur'می\u200cگوی($|[^' + persianCharacters
+ ur'\u200c])', u'میگوی\\1',text) # for میگوی دریایی
text = re.sub(ur'می\u200cدوی($|[^' + persianCharacters
+ ur'\u200c])', u'میدوی\\1',text) # for [[میدوی (ابهامزدایی)]]
return text
def punctuation(text):
# / سجاوندی غیرفارسی
text = re.sub(ur"ː", u': ',text) # Replace incorrect : character
# استفاده از ؟ فارسی
text = re.sub(ur'([' + persianCharacters + ur'])[ ]*[?]',u'\\1؟',text)
# استفاده از ; فارسی
text = re.sub(ur'([' + persianCharacters + ur'])[ ]*[;]',u'\\1؛ ',text)
# استفاده از ، فارسی
text = re.sub(ur'([' + persianCharacters + ur'])(\]\]|»|)[ ]*[,]', u'\\1\\2، ',text)
# حذف دو فاصله بعد از سجاوندی
text = re.sub(ur"(،|؛|؟|\.) ", u'\\1 ',text)
text = re.sub(ur"\r", u'',text)
# افزودن یا حذف فاصله
# حذف فاصلههای تکراری میان واژهها، به جز بین نام پارامتر و علامت مساوی
text = re.sub(ur"(. ) +(?=[^= ])", u'\\1',text)
# فاصله بعد از سجاوندی به جز ! به دلیل (<!-- و !! در بالای جدولها)
text = \
re.sub(ur"([،\.\؛\؟»])([^\s\.\(\)«»\"\[\]<>\d\w\{\}\|۰۱۲۳۴۵۶۷۸۹\'])"
, u'\\1 \\2',text)
# افزودن فاصله به بعد از سجاوندی
text = re.sub(ur'([' + persianCharacters
+ ur']+|\]|\)|»)([؟،؛\!\.])(['
+ persianCharacters + persianDigits
+ ur']+|\[|\(|«)', u'\\1\\2 \\3',text)
# حذف فاصله بعد از گیومه، پرانتز، براکت باز
text = re.sub(ur"([\(«\[]) ", u'\\1',text)
# حذف فاصله قبل از گیومه، پرانتز، براکت بسته
text = re.sub(ur" ([\)»\]])", u'\\1',text)
# افزودن فاصله قبل از گیومه باز
text = re.sub(ur"([^ \(\[\|\r\n>'])(«)", u'\\1 \\2',text)
text = re.sub(ur" +\( +", u' (',text)
text = re.sub(ur'([' + persianCharacters
+ ur']|\]|») *\( *(?=[' + persianCharacters
+ ur'])(?!ها\)|ان\))', u'\\1 (',text)
text = re.sub(ur'([' + persianCharacters + ur']) *\) *(?=['
+ persianCharacters + ur']|\[|«)', u'\\1) ',text)
# Removes extra line between two items list
text = re.sub(ur"(\n\*.*?)\n+(?=\n\*)", u'\\1',text)
# Removes extra line between two items list
text = re.sub(ur"(\n#.*?)\n+(?=\n#)", u'\\1',text)
# Convert , to ، if there are Persian characters on both sides of it
text = re.sub(ur'([' + persianCharacters + ur']), ?(?=['
+ persianCharacters + ur'])', u'\\1\\2، ',text)
# بعد از نقطهویرگول فارسی علامتی قرار نمیگیرد
text = re.sub(ur"(؛)(([\s]+)?[\.،؛:!؟\-…])", u'\\1',text)
# در انتهای پاراگراف نقطهویرگول فارسی نمیآید
text = re.sub(ur"(؛)(\s|)\n", u'.\n',text)
# سجاوندی در ابتدای علامت باز قرار نمیگیرد
text = re.sub(ur"([\(«])[\s]([؛\.،])", ur'\\1',text)
# ویرگول فارسی
# بعد از ویرگول فارسی این علامتها قرار نمیگیرد
text = re.sub(ur"(،)([\s]+)?([،؛!؟\-][\.،؛!؟\-]*|\.(?!\.))",
u'\\1',text)
# نقطه
# باید سه نقطه باشد
text = re.sub(ur'([' + persianCharacters + ur'])( *)(\.{3,})'
, u'\\1\\2…',text)
text = re.sub(ur" \.\.\. ", ur' … ',text)
# بعد از نقطه این علایم نمیآیند
text = re.sub(ur'([' + persianCharacters
+ ur'])\.( *[،؛:!؟\?]+)', u'\\1.',text)
# سجاوندی در ابتدای پرانتز و گیومه باز قرار نمیگیرد
text = re.sub(ur'(\(|«)[\.،؛](\s|)([' + persianCharacters
+ ur'])', u'\\1\\3',text)
# سجاوندی در داخل پرانتز
text = re.sub(ur'([' + persianCharacters
+ ur'])(\s|)[\.،؛](\s|)(\))', u'\\1\\2\\3\\4',text)
# در صورت وابستگی معنی جملات بهتر است نقطهویرگول فارسی قرار گیرد
text = re.sub(ur'([' + persianCharacters
+ ur'])(\s|)(\.)(\s|)(ولی|که\s|و\s|بنابراین|لذا)'
, u'\\1؛ \\5',text)
# / Question & exclamation mark
# علامت تعجب تکراری به دلیل وجود !! در عنوان جدولهای مدیاویکی نباید اصلاح شود.
# تكرار علامت سوال فارسی
text = re.sub(ur"(؟(\s|)){2,}", u'؟',text)
# علامتگذاری نادرست
text = text.replace(ur'؟ !', ur'؟!').replace(ur'! ؟', u'!؟')
# Remove space preceding punctuation, except for ellipses
text = re.sub(ur"([^ \.]) +([؟،\:؛!\.])(\s[^ \.]|<|$)",
u'\\1\\2\\3',text)
return text
def run (text):
text = normalizeZwnj (text)
text = quotation (text)
text = dictation (text)
text = toStandardPersianCharacters (text)
text = applyOrthography (text)
text = complexVerbsApplyZwnj (text)
text = applyZwnj (text)
text = punctuation (text)
text = normalizeZwnj (text)
return text
def test(num,txt,txt2):
result=run (txt)
if result!=txt2:
print u'Case >' + txt
print u'Expected >' + txt2
print u'Result >'+ result
print u'---------'
else:
print u'Test '+str(num)+u' > '+txt2+u' is OK!'
test (1,u'من نرم افزار را می گزاریم . متن',u'من نرمافزار را میگزاریم. متن')
test (2,u'متن '+ u'می انباشتم'+u' متن',u'متن '+ u'میانباشتم'+u' متن')
test (3,u'متن '+ u'می چسبم'+u' متن',u'متن '+ u'میچسبم'+u' متن')
test (4,u'متن '+ u'باز آفریدم'+u' متن',u'متن '+ u'بازآفریدم'+u' متن')
test (5,u'متن '+ u'باز آفرینم'+u' متن',u'متن '+ u'بازآفرینم'+u' متن')
test (6,u'متن '+ u'محمد زاده'+u' متن',u'متن '+ u'محمدزاده'+u' متن')
test (7,u'متن '+ u'هجوامیز'+u' متن',u'متن '+ u'هجوآمیز'+u' متن')
test (8,u'متن '+ u'ارام'+u' متن',u'متن '+ u'آرام'+u' متن')
test (9,u'متن '+ u'می ارامم'+u' متن',u'متن '+ u'میآرامم'+u' متن')
test (10,u'متن '+ u'می اشامید'+u' متن',u'متن '+ u'میآشامید'+u' متن')
test (11,u'متن '+ u'اتفاقا'+u' متن',u'متن '+ u'اتفاقاً'+u' متن')
test (12,u'متن '+ u'مودب'+u' متن',u'متن '+ u'مؤدب'+u' متن')
test (13,u'متن '+ u'تالیف'+u' متن',u'متن '+ u'تألیف'+u' متن')
test (14,u'متن '+ u'استثنا'+u' متن',u'متن '+ u'استثناء'+u' متن')
test (15,u'متن '+ u'سواستفاده'+u' متن',u'متن '+ u'سوءاستفاده'+u' متن')
test (16,u'متن '+ u'حتا'+u' متن',u'متن '+ u'حتی'+u' متن')
test (17,u'متن '+ u'زرد فام'+u' متن',u'متن '+ u'زردفام'+u' متن')
test (18,u'متن '+ u'دو گانه'+u' متن',u'متن '+ u'دوگانه'+u' متن')
test (19,u'متن '+ u'عیش آباد'+u' متن',u'متن '+ u'عیشآباد'+u' متن')
test (20,u'متن '+ u'ویکی پدیا'+u' متن',u'متن '+ u'ویکیپدیا'+u' متن')
test (21,u'متن '+ u'ﭖﭗﭘﭙ'+u' متن',u'متن '+ u'پپپپ'+u' متن')
test (22,u'متن '+ u'علی "گفت" من'+u' متن',u'متن '+ u'علی «گفت» من'+u' متن')
test (23,u'متن '+ u'علی <<گفت>> من'+u' متن',u'متن '+ u'علی «گفت» من'+u' متن')
test (24,u'متن '+ u'حمید رضا'+u' متن',u'متن '+ u'حمیدرضا'+u' متن')
test (25,u'متن '+ u'بزرگ تر'+u' متن',u'متن '+ u'بزرگتر'+u' متن')
test (26,u'متن '+ u'کوچک ترین'+u' متن',u'متن '+ u'کوچکترین'+u' متن')
test (27,u'متن '+ u'کتاب ها'+u' متن',u'متن '+ u'کتابها'+u' متن')
test (28,u'متن '+ u'نیمفاصله اضافی'+u' متن',u'متن '+ u'نیمفاصله اضافی'+u' متن')
test (29,u'متن '+ u'بعد از رفاصله'+u' متن',u'متن '+ u'بعد از رفاصله'+u' متن')
test (30,u'متن '+ u'AB فاصله مجازی'+u' متن',u'متن '+ u'AB فاصله مجازی'+u' متن')
test (31,u'متن '+ u'ك عربی'+u' متن',u'متن '+ u'ک عربی'+u' متن')
test (32,u'متن '+ u'ي عربی'+u' متن',u'متن '+ u'ی عربی'+u' متن')
test (33,u'متن '+ u'ۀ غیر استاندارد'+u' متن',u'متن '+ u'هٔ غیر استاندارد'+u' متن')
test (34,u'متن '+ u'متن . نقطه'+u' متن',u'متن '+ u'متن. نقطه'+u' متن')
|
python
|
# -*- coding: utf-8 -*-
import logging
import sys
LOG_FORMAT = "[%(process)d] %(asctime)s.%(msecs)03d [%(levelname).1s] %(message)s"
LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
def setup_logging() -> None:
formatter = logging.Formatter(fmt=LOG_FORMAT, datefmt=LOG_DATE_FORMAT)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
root_logger.addHandler(handler)
|
python
|
#!/usr/bin/env python
# ClusterShell.Node* test suite
"""Unit test for NodeSet with Group support"""
import copy
import shutil
import sys
import unittest
sys.path.insert(0, '../lib')
from TLib import *
# Wildcard import for testing purpose
from ClusterShell.NodeSet import *
from ClusterShell.NodeUtils import *
def makeTestG1():
"""Create a temporary group file 1"""
f1 = make_temp_file("""
#
oss: montana5,montana4
mds: montana6
io: montana[4-6]
#42: montana3
compute: montana[32-163]
chassis1: montana[32-33]
chassis2: montana[34-35]
chassis3: montana[36-37]
chassis4: montana[38-39]
chassis5: montana[40-41]
chassis6: montana[42-43]
chassis7: montana[44-45]
chassis8: montana[46-47]
chassis9: montana[48-49]
chassis10: montana[50-51]
chassis11: montana[52-53]
chassis12: montana[54-55]
Uppercase: montana[1-2]
gpuchassis: @chassis[4-5]
gpu: montana[38-41]
all: montana[1-6,32-163]
""")
# /!\ Need to return file object and not f1.name, otherwise the temporary
# file might be immediately unlinked.
return f1
def makeTestG2():
"""Create a temporary group file 2"""
f2 = make_temp_file("""
#
#
para: montana[32-37,42-55]
gpu: montana[38-41]
escape%test: montana[87-90]
esc%test2: @escape%test
""")
return f2
def makeTestG3():
"""Create a temporary group file 3"""
f3 = make_temp_file("""
#
#
all: montana[32-55]
para: montana[32-37,42-55]
gpu: montana[38-41]
login: montana[32-33]
overclock: montana[41-42]
chassis1: montana[32-33]
chassis2: montana[34-35]
chassis3: montana[36-37]
single: idaho
""")
return f3
def makeTestR3():
"""Create a temporary reverse group file 3"""
r3 = make_temp_file("""
#
#
montana32: all,para,login,chassis1
montana33: all,para,login,chassis1
montana34: all,para,chassis2
montana35: all,para,chassis2
montana36: all,para,chassis3
montana37: all,para,chassis3
montana38: all,gpu
montana39: all,gpu
montana40: all,gpu
montana41: all,gpu,overclock
montana42: all,para,overclock
montana43: all,para
montana44: all,para
montana45: all,para
montana46: all,para
montana47: all,para
montana48: all,para
montana49: all,para
montana50: all,para
montana51: all,para
montana52: all,para
montana53: all,para
montana54: all,para
montana55: all,para
idaho: single
""")
return r3
def makeTestG4():
"""Create a temporary group file 4 (nD)"""
f4 = make_temp_file("""
#
rack-x1y1: idaho1z1,idaho2z1
rack-x1y2: idaho2z1,idaho3z1
rack-x2y1: idaho4z1,idaho5z1
rack-x2y2: idaho6z1,idaho7z1
rack-x1: @rack-x1y[1-2]
rack-x2: @rack-x2y[1-2]
rack-y1: @rack-x[1-2]y1
rack-y2: @rack-x[1-2]y2
rack-all: @rack-x[1-2]y[1-2]
""")
return f4
class NodeSetGroupTest(unittest.TestCase):
def setUp(self):
"""setUp test reproducibility: change standard group resolver
to ensure that no local group source is used during tests"""
set_std_group_resolver(GroupResolver()) # dummy resolver
def tearDown(self):
"""tearDown: restore standard group resolver"""
set_std_group_resolver(None) # restore std resolver
def testGroupResolverSimple(self):
"""test NodeSet with simple custom GroupResolver"""
test_groups1 = makeTestG1()
source = UpcallGroupSource(
"simple",
"sed -n 's/^$GROUP:\(.*\)/\\1/p' %s" % test_groups1.name,
"sed -n 's/^all:\(.*\)/\\1/p' %s" % test_groups1.name,
"sed -n 's/^\([0-9A-Za-z_-]*\):.*/\\1/p' %s" % test_groups1.name,
None)
# create custom resolver with default source
res = GroupResolver(source)
self.assertFalse(res.has_node_groups())
self.assertFalse(res.has_node_groups("dummy_namespace"))
nodeset = NodeSet("@gpu", resolver=res)
self.assertEqual(nodeset, NodeSet("montana[38-41]"))
self.assertEqual(str(nodeset), "montana[38-41]")
nodeset = NodeSet("@chassis3", resolver=res)
self.assertEqual(str(nodeset), "montana[36-37]")
nodeset = NodeSet("@chassis[3-4]", resolver=res)
self.assertEqual(str(nodeset), "montana[36-39]")
nodeset = NodeSet("@chassis[1,3,5]", resolver=res)
self.assertEqual(str(nodeset), "montana[32-33,36-37,40-41]")
nodeset = NodeSet("@chassis[2-12/2]", resolver=res)
self.assertEqual(str(nodeset), "montana[34-35,38-39,42-43,46-47,50-51,54-55]")
nodeset = NodeSet("@chassis[1,3-4,5-11/3]", resolver=res)
self.assertEqual(str(nodeset), "montana[32-33,36-41,46-47,52-53]")
# test recursive group gpuchassis
nodeset1 = NodeSet("@chassis[4-5]", resolver=res)
nodeset2 = NodeSet("@gpu", resolver=res)
nodeset3 = NodeSet("@gpuchassis", resolver=res)
self.assertEqual(nodeset1, nodeset2)
self.assertEqual(nodeset2, nodeset3)
# test also with some inline operations
nodeset = NodeSet("montana3,@gpuchassis!montana39,montana77^montana38",
resolver=res)
self.assertEqual(str(nodeset), "montana[3,40-41,77]")
def testAllNoResolver(self):
"""test NodeSet.fromall() with no resolver"""
self.assertRaises(NodeSetExternalError, NodeSet.fromall,
resolver=RESOLVER_NOGROUP)
def testGroupsNoResolver(self):
"""test NodeSet.groups() with no resolver"""
nodeset = NodeSet("foo", resolver=RESOLVER_NOGROUP)
self.assertRaises(NodeSetExternalError, nodeset.groups)
def testGroupResolverAddSourceError(self):
"""test GroupResolver.add_source() error"""
test_groups1 = makeTestG1()
source = UpcallGroupSource("simple",
"sed -n 's/^$GROUP:\(.*\)/\\1/p' %s" % test_groups1.name,
"sed -n 's/^all:\(.*\)/\\1/p' %s" % test_groups1.name,
"sed -n 's/^\([0-9A-Za-z_-]*\):.*/\\1/p' %s" % test_groups1.name,
None)
res = GroupResolver(source)
# adding the same source again should raise ValueError
self.assertRaises(ValueError, res.add_source, source)
def testGroupResolverMinimal(self):
"""test NodeSet with minimal GroupResolver"""
test_groups1 = makeTestG1()
source = UpcallGroupSource("minimal",
"sed -n 's/^$GROUP:\(.*\)/\\1/p' %s" % test_groups1.name,
None, None, None)
# create custom resolver with default source
res = GroupResolver(source)
nodeset = NodeSet("@gpu", resolver=res)
self.assertEqual(nodeset, NodeSet("montana[38-41]"))
self.assertEqual(str(nodeset), "montana[38-41]")
self.assertRaises(NodeSetExternalError, NodeSet.fromall, resolver=res)
def testConfigEmpty(self):
"""test groups with an empty configuration file"""
f = make_temp_file("")
res = GroupResolverConfig(f.name)
# NodeSet should work
nodeset = NodeSet("example[1-100]", resolver=res)
self.assertEqual(str(nodeset), "example[1-100]")
# without group support
self.assertRaises(GroupResolverSourceError, nodeset.regroup)
self.assertRaises(GroupResolverSourceError, NodeSet, "@bar", resolver=res)
def testConfigResolverEmpty(self):
"""test groups resolver with an empty file list"""
# empty file list OR as if no config file is parsable
res = GroupResolverConfig([])
# NodeSet should work
nodeset = NodeSet("example[1-100]", resolver=res)
self.assertEqual(str(nodeset), "example[1-100]")
# without group support
self.assertRaises(GroupResolverSourceError, nodeset.regroup)
self.assertRaises(GroupResolverSourceError, NodeSet, "@bar", resolver=res)
def testConfigBasicLocal(self):
"""test groups with a basic local config file"""
f = make_temp_file("""
# A comment
[Main]
default: local
[local]
map: echo example[1-100]
#all:
list: echo foo
#reverse:
""")
res = GroupResolverConfig(f.name)
nodeset = NodeSet("example[1-100]", resolver=res)
self.assertEqual(str(nodeset), "example[1-100]")
self.assertEqual(nodeset.regroup(), "@foo")
self.assertEqual(nodeset.groups().keys(), ["@foo"])
self.assertEqual(str(NodeSet("@foo", resolver=res)), "example[1-100]")
# No 'all' defined: all_nodes() should raise an error
self.assertRaises(GroupSourceNoUpcall, res.all_nodes)
# No 'reverse' defined: node_groups() should raise an error
self.assertRaises(GroupSourceNoUpcall, res.node_groups, "example1")
# regroup with rest
nodeset = NodeSet("example[1-101]", resolver=res)
self.assertEqual(nodeset.regroup(), "@foo,example101")
# regroup incomplete
nodeset = NodeSet("example[50-200]", resolver=res)
self.assertEqual(nodeset.regroup(), "example[50-200]")
# regroup no matching
nodeset = NodeSet("example[102-200]", resolver=res)
self.assertEqual(nodeset.regroup(), "example[102-200]")
def testConfigWrongSyntax(self):
"""test wrong groups config syntax"""
f = make_temp_file("""
# A comment
[Main]
default: local
[local]
something: echo example[1-100]
""")
self.assertRaises(GroupResolverConfigError, GroupResolverConfig, f.name)
def testConfigBasicLocalVerbose(self):
"""test groups with a basic local config file (verbose)"""
f = make_temp_file("""
# A comment
[Main]
default: local
[local]
map: echo example[1-100]
#all:
list: echo foo
#reverse:
""")
res = GroupResolverConfig(f.name)
nodeset = NodeSet("example[1-100]", resolver=res)
self.assertEqual(str(nodeset), "example[1-100]")
self.assertEqual(nodeset.regroup(), "@foo")
self.assertEqual(str(NodeSet("@foo", resolver=res)), "example[1-100]")
def testConfigBasicLocalAlternative(self):
"""test groups with a basic local config file (= alternative)"""
f = make_temp_file("""
# A comment
[Main]
default=local
[local]
map=echo example[1-100]
#all=
list=echo foo
#reverse=
""")
res = GroupResolverConfig(f.name)
nodeset = NodeSet("example[1-100]", resolver=res)
self.assertEqual(str(nodeset), "example[1-100]")
self.assertEqual(nodeset.regroup(), "@foo")
self.assertEqual(str(NodeSet("@foo", resolver=res)), "example[1-100]")
# @truc?
def testConfigBasicEmptyDefault(self):
"""test groups with a empty default namespace"""
f = make_temp_file("""
# A comment
[Main]
default:
[local]
map: echo example[1-100]
#all:
list: echo foo
#reverse:
""")
res = GroupResolverConfig(f.name)
nodeset = NodeSet("example[1-100]", resolver=res)
self.assertEqual(str(nodeset), "example[1-100]")
self.assertEqual(nodeset.regroup(), "@foo")
self.assertEqual(str(NodeSet("@foo", resolver=res)), "example[1-100]")
def testConfigBasicNoMain(self):
"""test groups with a local config without main section"""
f = make_temp_file("""
# A comment
[local]
map: echo example[1-100]
#all:
list: echo foo
#reverse:
""")
res = GroupResolverConfig(f.name)
nodeset = NodeSet("example[1-100]", resolver=res)
self.assertEqual(str(nodeset), "example[1-100]")
self.assertEqual(nodeset.regroup(), "@foo")
self.assertEqual(str(NodeSet("@foo", resolver=res)), "example[1-100]")
def testConfigBasicWrongDefault(self):
"""test groups with a wrong default namespace"""
f = make_temp_file("""
# A comment
[Main]
default: pointless
[local]
map: echo example[1-100]
#all:
list: echo foo
#reverse:
""")
self.assertRaises(GroupResolverConfigError, GroupResolverConfig, f.name)
def testConfigQueryFailed(self):
"""test groups with config and failed query"""
f = make_temp_file("""
# A comment
[Main]
default: local
[local]
map: false
all: false
list: echo foo
#reverse:
""")
res = GroupResolverConfig(f.name)
nodeset = NodeSet("example[1-100]", resolver=res)
self.assertEqual(str(nodeset), "example[1-100]")
self.assertRaises(NodeSetExternalError, nodeset.regroup)
# all_nodes()
self.assertRaises(NodeSetExternalError, NodeSet.fromall, resolver=res)
def testConfigQueryFailedReverse(self):
"""test groups with config and failed query (reverse)"""
f = make_temp_file("""
# A comment
[Main]
default: local
[local]
map: echo example1
list: echo foo
reverse: false
""")
res = GroupResolverConfig(f.name)
nodeset = NodeSet("@foo", resolver=res)
self.assertEqual(str(nodeset), "example1")
self.assertRaises(NodeSetExternalError, nodeset.regroup)
def testConfigRegroupWrongNamespace(self):
"""test groups by calling regroup(wrong_namespace)"""
f = make_temp_file("""
# A comment
[Main]
default: local
[local]
map: echo example[1-100]
#all:
list: echo foo
#reverse:
""")
res = GroupResolverConfig(f.name)
nodeset = NodeSet("example[1-100]", resolver=res)
self.assertRaises(GroupResolverSourceError, nodeset.regroup, "unknown")
def testConfigNoListNoReverse(self):
"""test groups with no list and not reverse upcall"""
f = make_temp_file("""
# A comment
[Main]
default: local
[local]
map: echo example[1-100]
#all:
#list:
#reverse:
""")
res = GroupResolverConfig(f.name)
nodeset = NodeSet("example[1-100]", resolver=res)
self.assertEqual(str(nodeset), "example[1-100]")
# not able to regroup, should still return valid nodeset
self.assertEqual(nodeset.regroup(), "example[1-100]")
def testConfigNoListButReverseQuery(self):
"""test groups with no list but reverse upcall"""
f = make_temp_file("""
# A comment
[Main]
default: local
[local]
map: echo example[1-100]
#all:
#list: echo foo
reverse: echo foo
""")
res = GroupResolverConfig(f.name)
nodeset = NodeSet("example[1-100]", resolver=res)
self.assertEqual(str(nodeset), "example[1-100]")
self.assertEqual(nodeset.regroup(), "@foo")
def testConfigNoMap(self):
"""test groups with no map upcall"""
f = make_temp_file("""
# A comment
[Main]
default: local
[local]
#map: echo example[1-100]
all:
list: echo foo
#reverse: echo foo
""")
# map is a mandatory upcall, an exception should be raised early
self.assertRaises(GroupResolverConfigError, GroupResolverConfig, f.name)
def testConfigWithEmptyList(self):
"""test groups with list upcall returning nothing"""
f = make_temp_file("""
# A comment
[Main]
default: local
[local]
map: echo example[1-100]
#all:
list: :
reverse: echo foo
""")
res = GroupResolverConfig(f.name)
nodeset = NodeSet("example[1-100]", resolver=res)
self.assertEqual(str(nodeset), "example[1-100]")
self.assertEqual(nodeset.regroup(), "@foo")
def testConfigListAllWithAll(self):
"""test all groups listing with all upcall"""
f = make_temp_file("""
# A comment
[Main]
default: local
[local]
map: echo example[1-100]
all: echo foo bar
list: echo foo
#reverse:
""")
res = GroupResolverConfig(f.name)
nodeset = NodeSet("example[1-50]", resolver=res)
self.assertEqual(str(nodeset), "example[1-50]")
self.assertEqual(str(NodeSet.fromall(resolver=res)), "bar,foo")
# test "@*" magic group listing
nodeset = NodeSet("@*", resolver=res)
self.assertEqual(str(nodeset), "bar,foo")
nodeset = NodeSet("rab,@*,oof", resolver=res)
self.assertEqual(str(nodeset), "bar,foo,oof,rab")
# with group source
nodeset = NodeSet("@local:*", resolver=res)
self.assertEqual(str(nodeset), "bar,foo")
nodeset = NodeSet("rab,@local:*,oof", resolver=res)
self.assertEqual(str(nodeset), "bar,foo,oof,rab")
def testConfigListAllWithoutAll(self):
"""test all groups listing without all upcall"""
f = make_temp_file("""
# A comment
[Main]
default: local
[local]
map: echo example[1-100]
#all:
list: echo foo bar
#reverse:
""")
res = GroupResolverConfig(f.name)
nodeset = NodeSet("example[1-50]", resolver=res)
self.assertEqual(str(nodeset), "example[1-50]")
self.assertEqual(str(NodeSet.fromall(resolver=res)), "example[1-100]")
# test "@*" magic group listing
nodeset = NodeSet("@*", resolver=res)
self.assertEqual(str(nodeset), "example[1-100]")
nodeset = NodeSet("@*,example[101-104]", resolver=res)
self.assertEqual(str(nodeset), "example[1-104]")
nodeset = NodeSet("example[105-149],@*,example[101-104]", resolver=res)
self.assertEqual(str(nodeset), "example[1-149]")
# with group source
nodeset = NodeSet("@local:*", resolver=res)
self.assertEqual(str(nodeset), "example[1-100]")
nodeset = NodeSet("example0,@local:*,example[101-110]", resolver=res)
self.assertEqual(str(nodeset), "example[0-110]")
def testConfigListAllNDWithoutAll(self):
"""test all groups listing without all upcall (nD)"""
# Even in nD, ensure that $GROUP is a simple group that has been previously expanded
f = make_temp_file("""
# A comment
[Main]
default: local
[local]
map: if [[ $GROUP == "x1y[3-4]" ]]; then exit 1; elif [[ $GROUP == "x1y1" ]]; then echo rack[1-5]z[1-42]; else echo rack[6-10]z[1-42]; fi
#all:
list: echo x1y1 x1y2 x1y[3-4]
#reverse:
""")
res = GroupResolverConfig(f.name, illegal_chars=ILLEGAL_GROUP_CHARS)
nodeset = NodeSet("rack3z40", resolver=res)
self.assertEqual(str(NodeSet.fromall(resolver=res)), "rack[1-10]z[1-42]")
self.assertEqual(res.grouplist(), ['x1y1', 'x1y2', 'x1y[3-4]']) # raw
self.assertEqual(grouplist(resolver=res), ['x1y1', 'x1y2', 'x1y3', 'x1y4']) # cleaned
# test "@*" magic group listing
nodeset = NodeSet("@*", resolver=res)
self.assertEqual(str(nodeset), "rack[1-10]z[1-42]")
# with group source
nodeset = NodeSet("@local:*", resolver=res)
self.assertEqual(str(nodeset), "rack[1-10]z[1-42]")
nodeset = NodeSet("rack11z1,@local:*,rack11z[2-42]", resolver=res)
self.assertEqual(str(nodeset), "rack[1-11]z[1-42]")
def testConfigIllegalCharsND(self):
"""test group list containing illegal characters"""
f = make_temp_file("""
# A comment
[Main]
default: local
[local]
map: echo rack[6-10]z[1-42]
#all:
list: echo x1y1 x1y2 @illegal x1y[3-4]
#reverse:
""")
res = GroupResolverConfig(f.name, illegal_chars=ILLEGAL_GROUP_CHARS)
nodeset = NodeSet("rack3z40", resolver=res)
self.assertRaises(GroupResolverIllegalCharError, res.grouplist)
def testConfigResolverSources(self):
"""test sources() with groups config of 2 sources"""
f = make_temp_file("""
# A comment
[Main]
default: local
[local]
map: echo example[1-100]
[other]
map: echo example[1-10]
""")
res = GroupResolverConfig(f.name)
self.assertEqual(len(res.sources()), 2)
self.assert_('local' in res.sources())
self.assert_('other' in res.sources())
def testConfigCrossRefs(self):
"""test groups config with cross references"""
f = make_temp_file("""
# A comment
[Main]
default: other
[local]
map: echo example[1-100]
[other]
map: echo "foo: @local:foo" | sed -n 's/^$GROUP:\(.*\)/\\1/p'
[third]
map: echo -e "bar: @ref-rel\\nref-rel: @other:foo\\nref-all: @*" | sed -n 's/^$GROUP:\(.*\)/\\1/p'
list: echo bar
""")
res = GroupResolverConfig(f.name)
nodeset = NodeSet("@other:foo", resolver=res)
self.assertEqual(str(nodeset), "example[1-100]")
# @third:bar -> @ref-rel (third) -> @other:foo -> @local:foo -> nodes
nodeset = NodeSet("@third:bar", resolver=res)
self.assertEqual(str(nodeset), "example[1-100]")
nodeset = NodeSet("@third:ref-all", resolver=res)
self.assertEqual(str(nodeset), "example[1-100]")
def testConfigGroupsDirDummy(self):
"""test groups with groupsdir defined (dummy)"""
f = make_temp_file("""
[Main]
default: local
groupsdir: /path/to/nowhere
[local]
map: echo example[1-100]
#all:
list: echo foo
#reverse:
""")
res = GroupResolverConfig(f.name)
nodeset = NodeSet("example[1-100]", resolver=res)
self.assertEqual(str(nodeset), "example[1-100]")
self.assertEqual(nodeset.regroup(), "@foo")
self.assertEqual(str(NodeSet("@foo", resolver=res)), "example[1-100]")
def testConfigGroupsDirExists(self):
"""test groups with groupsdir defined (real, other)"""
dname = make_temp_dir()
f = make_temp_file("""
[Main]
default: new_local
groupsdir: %s
[local]
map: echo example[1-100]
#all:
list: echo foo
#reverse:
""" % dname)
f2 = make_temp_file("""
[new_local]
map: echo example[1-100]
#all:
list: echo bar
#reverse:
""", suffix=".conf", dir=dname)
try:
res = GroupResolverConfig(f.name)
nodeset = NodeSet("example[1-100]", resolver=res)
self.assertEqual(str(nodeset), "example[1-100]")
self.assertEqual(nodeset.regroup(), "@bar")
self.assertEqual(str(NodeSet("@bar", resolver=res)), "example[1-100]")
finally:
f2.close()
f.close()
shutil.rmtree(dname, ignore_errors=True)
def testConfigGroupsMultipleDirs(self):
"""test groups with multiple confdir defined"""
dname1 = make_temp_dir()
dname2 = make_temp_dir()
# Notes:
# - use dname1 two times to check dup checking code
# - use quotes on one of the directory path
f = make_temp_file("""
[Main]
default: local2
confdir: "%s" %s %s
[local]
map: echo example[1-100]
list: echo foo
""" % (dname1, dname2, dname1))
fs1 = make_temp_file("""
[local1]
map: echo loc1node[1-100]
list: echo bar
""", suffix=".conf", dir=dname1)
fs2 = make_temp_file("""
[local2]
map: echo loc2node[02-50]
list: echo toto
""", suffix=".conf", dir=dname2)
try:
res = GroupResolverConfig(f.name)
nodeset = NodeSet("example[1-100]", resolver=res)
self.assertEqual(str(nodeset), "example[1-100]")
# local
self.assertEqual(nodeset.regroup("local"), "@local:foo")
self.assertEqual(str(NodeSet("@local:foo", resolver=res)), "example[1-100]")
# local1
nodeset = NodeSet("loc1node[1-100]", resolver=res)
self.assertEqual(nodeset.regroup("local1"), "@local1:bar")
self.assertEqual(str(NodeSet("@local1:bar", resolver=res)), "loc1node[1-100]")
# local2
nodeset = NodeSet("loc2node[02-50]", resolver=res)
self.assertEqual(nodeset.regroup(), "@toto") # default group source
self.assertEqual(str(NodeSet("@toto", resolver=res)), "loc2node[02-50]")
finally:
fs2.close()
fs1.close()
f.close()
shutil.rmtree(dname2, ignore_errors=True)
shutil.rmtree(dname1, ignore_errors=True)
def testConfigGroupsDirDupConfig(self):
"""test groups with duplicate in groupsdir"""
dname = make_temp_dir()
f = make_temp_file("""
[Main]
default: iamdup
groupsdir: %s
[local]
map: echo example[1-100]
#all:
list: echo foo
#reverse:
""" % dname)
f2 = make_temp_file("""
[iamdup]
map: echo example[1-100]
#all:
list: echo bar
#reverse:
""", suffix=".conf", dir=dname)
f3 = make_temp_file("""
[iamdup]
map: echo example[10-200]
#all:
list: echo patato
#reverse:
""", suffix=".conf", dir=dname)
try:
self.assertRaises(GroupResolverConfigError, GroupResolverConfig, f.name)
finally:
f3.close()
f2.close()
f.close()
shutil.rmtree(dname, ignore_errors=True)
def testConfigGroupsDirExistsNoOther(self):
"""test groups with groupsdir defined (real, no other)"""
dname1 = make_temp_dir()
dname2 = make_temp_dir()
f = make_temp_file("""
[Main]
default: new_local
groupsdir: %s %s
""" % (dname1, dname2))
f2 = make_temp_file("""
[new_local]
map: echo example[1-100]
#all:
list: echo bar
#reverse:
""", suffix=".conf", dir=dname2)
try:
res = GroupResolverConfig(f.name)
nodeset = NodeSet("example[1-100]", resolver=res)
self.assertEqual(str(nodeset), "example[1-100]")
self.assertEqual(nodeset.regroup(), "@bar")
self.assertEqual(str(NodeSet("@bar", resolver=res)), "example[1-100]")
finally:
f2.close()
f.close()
shutil.rmtree(dname1, ignore_errors=True)
shutil.rmtree(dname2, ignore_errors=True)
def testConfigGroupsDirNotADirectory(self):
"""test groups with groupsdir defined (not a directory)"""
dname = make_temp_dir()
fdummy = make_temp_file("wrong")
f = make_temp_file("""
[Main]
default: new_local
groupsdir: %s
""" % fdummy.name)
try:
self.assertRaises(GroupResolverConfigError, GroupResolverConfig, f.name)
finally:
fdummy.close()
f.close()
shutil.rmtree(dname, ignore_errors=True)
def testConfigIllegalChars(self):
"""test groups with illegal characters"""
f = make_temp_file("""
# A comment
[Main]
default: local
[local]
map: echo example[1-100]
#all:
list: echo 'foo *'
reverse: echo f^oo
""")
res = GroupResolverConfig(f.name, illegal_chars=set("@,&!&^*"))
nodeset = NodeSet("example[1-100]", resolver=res)
self.assertRaises(GroupResolverIllegalCharError, nodeset.groups)
self.assertRaises(GroupResolverIllegalCharError, nodeset.regroup)
def testConfigMaxRecursionError(self):
"""test groups maximum recursion depth exceeded error"""
f = make_temp_file("""
# A comment
[Main]
default: local
[local]
map: echo @deep
list: echo deep
""")
res = GroupResolverConfig(f.name)
self.assertRaises(NodeSetParseError, NodeSet, "@deep", resolver=res)
def testGroupResolverND(self):
"""test NodeSet with simple custom GroupResolver (nD)"""
test_groups4 = makeTestG4()
source = UpcallGroupSource("simple",
"sed -n 's/^$GROUP:\(.*\)/\\1/p' %s" % test_groups4.name,
"sed -n 's/^all:\(.*\)/\\1/p' %s" % test_groups4.name,
"sed -n 's/^\([0-9A-Za-z_-]*\):.*/\\1/p' %s" % test_groups4.name,
None)
# create custom resolver with default source
res = GroupResolver(source)
self.assertFalse(res.has_node_groups())
self.assertFalse(res.has_node_groups("dummy_namespace"))
nodeset = NodeSet("@rack-x1y2", resolver=res)
self.assertEqual(nodeset, NodeSet("idaho[2-3]z1"))
self.assertEqual(str(nodeset), "idaho[2-3]z1")
nodeset = NodeSet("@rack-y1", resolver=res)
self.assertEqual(str(nodeset), "idaho[1-2,4-5]z1")
nodeset = NodeSet("@rack-all", resolver=res)
self.assertEqual(str(nodeset), "idaho[1-7]z1")
# test NESTED nD groups()
self.assertEqual(sorted(nodeset.groups().keys()),
['@rack-all', '@rack-x1', '@rack-x1y1', '@rack-x1y2',
'@rack-x2', '@rack-x2y1', '@rack-x2y2', '@rack-y1',
'@rack-y2'])
self.assertEqual(sorted(nodeset.groups(groupsource="simple").keys()),
['@simple:rack-all', '@simple:rack-x1',
'@simple:rack-x1y1', '@simple:rack-x1y2',
'@simple:rack-x2', '@simple:rack-x2y1',
'@simple:rack-x2y2', '@simple:rack-y1',
'@simple:rack-y2'])
self.assertEqual(sorted(nodeset.groups(groupsource="simple",
noprefix=True).keys()),
['@rack-all', '@rack-x1', '@rack-x1y1', '@rack-x1y2',
'@rack-x2', '@rack-x2y1', '@rack-x2y2', '@rack-y1',
'@rack-y2'])
testns = NodeSet()
for gnodes, inodes in nodeset.groups().itervalues():
testns.update(inodes)
self.assertEqual(testns, nodeset)
# more tests with nested groups
nodeset = NodeSet("idaho5z1", resolver=res)
self.assertEqual(sorted(nodeset.groups().keys()),
['@rack-all', '@rack-x2', '@rack-x2y1', '@rack-y1'])
nodeset = NodeSet("idaho5z1,idaho4z1", resolver=res)
self.assertEqual(sorted(nodeset.groups().keys()),
['@rack-all', '@rack-x2', '@rack-x2y1', '@rack-y1'])
nodeset = NodeSet("idaho5z1,idaho7z1", resolver=res)
self.assertEqual(sorted(nodeset.groups().keys()),
['@rack-all', '@rack-x2', '@rack-x2y1', '@rack-x2y2',
'@rack-y1', '@rack-y2'])
def testConfigCFGDIR(self):
"""test groups with $CFGDIR use in upcalls"""
f = make_temp_file("""
[Main]
default: local
[local]
map: echo example[1-100]
list: basename $CFGDIR
""")
res = GroupResolverConfig(f.name)
nodeset = NodeSet("example[1-100]", resolver=res)
# just a trick to check $CFGDIR resolution...
tmpgroup = os.path.basename(os.path.dirname(f.name))
self.assertEqual(nodeset.groups().keys(), ['@%s' % tmpgroup])
self.assertEqual(str(nodeset), "example[1-100]")
self.assertEqual(nodeset.regroup(), "@%s" % tmpgroup)
self.assertEqual(str(NodeSet("@%s" % tmpgroup, resolver=res)),
"example[1-100]")
def test_fromall_grouplist(self):
"""test NodeSet.fromall() without all upcall"""
# Group Source that has no all upcall and that can handle special char
test_groups2 = makeTestG2()
source = UpcallGroupSource("simple",
"sed -n 's/^$GROUP:\(.*\)/\\1/p' %s" % test_groups2.name,
None,
"sed -n 's/^\([0-9A-Za-z_-\%%]*\):.*/\\1/p' %s"
% test_groups2.name,
None)
res = GroupResolver(source)
# fromall will trigger ParserEngine.grouplist() that we want to test here
nsall = NodeSet.fromall(resolver=res)
# if working, group resolution worked with % char
self.assertEqual(str(NodeSet.fromall(resolver=res)), "montana[32-55,87-90]")
self.assertEqual(len(nsall), 28)
# btw explicitly check escaped char
nsesc = NodeSet('@escape%test', resolver=res)
self.assertEqual(str(nsesc), 'montana[87-90]')
self.assertEqual(len(nsesc), 4)
nsesc2 = NodeSet('@esc%test2', resolver=res)
self.assertEqual(nsesc, nsesc2)
ns = NodeSet('montana[87-90]', resolver=res)
# could also result in escape%test?
self.assertEqual(ns.regroup(), '@esc%test2')
class NodeSetGroup2GSTest(unittest.TestCase):
def setUp(self):
"""configure simple RESOLVER_STD_GROUP"""
# create temporary groups file and keep a reference to avoid file closing
self.test_groups1 = makeTestG1()
self.test_groups2 = makeTestG2()
# create 2 GroupSource objects
default = UpcallGroupSource("default",
"sed -n 's/^$GROUP:\(.*\)/\\1/p' %s" % self.test_groups1.name,
"sed -n 's/^all:\(.*\)/\\1/p' %s" % self.test_groups1.name,
"sed -n 's/^\([0-9A-Za-z_-]*\):.*/\\1/p' %s" % self.test_groups1.name,
None)
source2 = UpcallGroupSource("source2",
"sed -n 's/^$GROUP:\(.*\)/\\1/p' %s" % self.test_groups2.name,
"sed -n 's/^all:\(.*\)/\\1/p' %s" % self.test_groups2.name,
"sed -n 's/^\([0-9A-Za-z_-]*\):.*/\\1/p' %s" % self.test_groups2.name,
None)
resolver = GroupResolver(default)
resolver.add_source(source2)
set_std_group_resolver(resolver)
def tearDown(self):
"""restore default RESOLVER_STD_GROUP"""
set_std_group_resolver(None)
del self.test_groups1
del self.test_groups2
def testGroupSyntaxes(self):
"""test NodeSet group operation syntaxes"""
nodeset = NodeSet("@gpu")
self.assertEqual(str(nodeset), "montana[38-41]")
nodeset = NodeSet("@chassis[1-3,5]&@chassis[2-3]")
self.assertEqual(str(nodeset), "montana[34-37]")
nodeset1 = NodeSet("@io!@mds")
nodeset2 = NodeSet("@oss")
self.assertEqual(str(nodeset1), str(nodeset2))
self.assertEqual(str(nodeset1), "montana[4-5]")
def testGroupListDefault(self):
"""test NodeSet group listing GroupResolver.grouplist()"""
groups = std_group_resolver().grouplist()
self.assertEqual(len(groups), 20)
helper_groups = grouplist()
self.assertEqual(len(helper_groups), 20)
total = 0
nodes = NodeSet()
for group in groups:
ns = NodeSet("@%s" % group)
total += len(ns)
nodes.update(ns)
self.assertEqual(total, 310)
all_nodes = NodeSet.fromall()
self.assertEqual(len(all_nodes), len(nodes))
self.assertEqual(all_nodes, nodes)
def testGroupListSource2(self):
"""test NodeSet group listing GroupResolver.grouplist(source)"""
groups = std_group_resolver().grouplist("source2")
self.assertEqual(len(groups), 2)
total = 0
for group in groups:
total += len(NodeSet("@source2:%s" % group))
self.assertEqual(total, 24)
def testGroupNoPrefix(self):
"""test NodeSet group noprefix option"""
nodeset = NodeSet("montana[32-37,42-55]")
self.assertEqual(nodeset.regroup("source2"), "@source2:para")
self.assertEqual(nodeset.regroup("source2", noprefix=True), "@para")
def testGroupGroups(self):
"""test NodeSet.groups()"""
nodeset = NodeSet("montana[32-37,42-55]")
self.assertEqual(sorted(nodeset.groups().keys()), ['@all', '@chassis1', '@chassis10', '@chassis11', '@chassis12', '@chassis2', '@chassis3', '@chassis6', '@chassis7', '@chassis8', '@chassis9', '@compute'])
testns = NodeSet()
for gnodes, inodes in nodeset.groups().itervalues():
testns.update(inodes)
self.assertEqual(testns, nodeset)
class NodeSetRegroupTest(unittest.TestCase):
def setUp(self):
"""setUp test reproducibility: change standard group resolver
to ensure that no local group source is used during tests"""
set_std_group_resolver(GroupResolver()) # dummy resolver
def tearDown(self):
"""tearDown: restore standard group resolver"""
set_std_group_resolver(None) # restore std resolver
def testGroupResolverReverse(self):
"""test NodeSet GroupResolver with reverse upcall"""
test_groups3 = makeTestG3()
test_reverse3 = makeTestR3()
source = UpcallGroupSource("test",
"sed -n 's/^$GROUP:\(.*\)/\\1/p' %s" % test_groups3.name,
"sed -n 's/^all:\(.*\)/\\1/p' %s" % test_groups3.name,
"sed -n 's/^\([0-9A-Za-z_-]*\):.*/\\1/p' %s" % test_groups3.name,
"awk -F: '/^$NODE:/ { gsub(\",\",\"\\n\",$2); print $2 }' %s" % test_reverse3.name)
# create custom resolver with default source
res = GroupResolver(source)
nodeset = NodeSet("@all", resolver=res)
self.assertEqual(nodeset, NodeSet("montana[32-55]"))
self.assertEqual(str(nodeset), "montana[32-55]")
self.assertEqual(nodeset.regroup(), "@all")
self.assertEqual(nodeset.regroup(), "@all")
nodeset = NodeSet("@overclock", resolver=res)
self.assertEqual(nodeset, NodeSet("montana[41-42]"))
self.assertEqual(str(nodeset), "montana[41-42]")
self.assertEqual(nodeset.regroup(), "@overclock")
self.assertEqual(nodeset.regroup(), "@overclock")
nodeset = NodeSet("@gpu,@overclock", resolver=res)
self.assertEqual(str(nodeset), "montana[38-42]")
self.assertEqual(nodeset, NodeSet("montana[38-42]"))
# un-overlap :)
self.assertEqual(nodeset.regroup(), "@gpu,montana42")
self.assertEqual(nodeset.regroup(), "@gpu,montana42")
self.assertEqual(nodeset.regroup(overlap=True), "@gpu,@overclock")
nodeset = NodeSet("montana41", resolver=res)
self.assertEqual(nodeset.regroup(), "montana41")
self.assertEqual(nodeset.regroup(), "montana41")
# test regroup code when using unindexed node
nodeset = NodeSet("idaho", resolver=res)
self.assertEqual(nodeset.regroup(), "@single")
self.assertEqual(nodeset.regroup(), "@single")
nodeset = NodeSet("@single", resolver=res)
self.assertEqual(str(nodeset), "idaho")
# unresolved unindexed:
nodeset = NodeSet("utah", resolver=res)
self.assertEqual(nodeset.regroup(), "utah")
self.assertEqual(nodeset.regroup(), "utah")
nodeset = NodeSet("@all!montana38", resolver=res)
self.assertEqual(nodeset, NodeSet("montana[32-37,39-55]"))
self.assertEqual(str(nodeset), "montana[32-37,39-55]")
self.assertEqual(nodeset.regroup(), "@para,montana[39-41]")
self.assertEqual(nodeset.regroup(), "@para,montana[39-41]")
self.assertEqual(nodeset.regroup(overlap=True),
"@chassis[1-3],@login,@overclock,@para,montana[39-40]")
self.assertEqual(nodeset.regroup(overlap=True),
"@chassis[1-3],@login,@overclock,@para,montana[39-40]")
nodeset = NodeSet("montana[32-37]", resolver=res)
self.assertEqual(nodeset.regroup(), "@chassis[1-3]")
self.assertEqual(nodeset.regroup(), "@chassis[1-3]")
class StaticGroupSource(UpcallGroupSource):
"""
A memory only group source based on a provided dict.
"""
def __init__(self, name, data):
all_upcall = None
if 'all' in data:
all_upcall = 'fake_all'
list_upcall = None
if 'list' in data:
list_upcall = 'fake_list'
UpcallGroupSource.__init__(self, name, "fake_map", all_upcall, list_upcall)
self._data = data
def _upcall_read(self, cmdtpl, args=dict()):
if cmdtpl == 'map':
return self._data[cmdtpl].get(args['GROUP'])
elif cmdtpl == 'reverse':
return self._data[cmdtpl].get(args['NODE'])
else:
return self._data[cmdtpl]
class GroupSourceCacheTest(unittest.TestCase):
def test_clear_cache(self):
"""test GroupSource.clear_cache()"""
source = StaticGroupSource('cache', {'map': {'a': 'foo1', 'b': 'foo2'} })
# create custom resolver with default source
res = GroupResolver(source)
# Populate map cache
self.assertEqual("foo1", str(NodeSet("@a", resolver=res)))
self.assertEqual("foo2", str(NodeSet("@b", resolver=res)))
self.assertEqual(len(source._cache['map']), 2)
# Clear cache
source.clear_cache()
self.assertEqual(len(source._cache['map']), 0)
def test_expired_cache(self):
"""test UpcallGroupSource cache entries expired according to config"""
# create custom resolver with default source
source = StaticGroupSource('cache', {'map': {'a': 'foo1', 'b': 'foo2'} })
source.cache_time = 0.2
res = GroupResolver(source)
# Populate map cache
self.assertEqual("foo1", str(NodeSet("@a", resolver=res)))
self.assertEqual("foo2", str(NodeSet("@b", resolver=res)))
self.assertEqual(len(source._cache['map']), 2)
# Be sure 0.2 cache time is expired (especially for old Python version)
time.sleep(0.25)
source._data['map']['a'] = 'something_else'
self.assertEqual('something_else', str(NodeSet("@a", resolver=res)))
def test_config_cache_time(self):
"""test group config cache_time options"""
f = make_temp_file("""
[local]
cache_time: 0.2
map: echo foo1
""")
res = GroupResolverConfig(f.name)
self.assertEqual(res._sources['local'].cache_time, 0.2)
self.assertEqual("foo1", str(NodeSet("@local:foo", resolver=res)))
class GroupSourceTest(unittest.TestCase):
"""Test class for 1.7 dict-based GroupSource"""
def test_base_class0(self):
"""test base GroupSource class (empty)"""
gs = GroupSource("emptysrc")
self.assertEqual(gs.resolv_map('gr1'), '')
self.assertEqual(gs.resolv_map('gr2'), '')
self.assertEqual(gs.resolv_list(), [])
self.assertRaises(GroupSourceNoUpcall, gs.resolv_all)
self.assertRaises(GroupSourceNoUpcall, gs.resolv_reverse, 'n4')
def test_base_class1(self):
"""test base GroupSource class (map and list)"""
gs = GroupSource("testsrc", { 'gr1': ['n1', 'n4', 'n3', 'n2'],
'gr2': ['n9', 'n4'] })
self.assertEqual(gs.resolv_map('gr1'), ['n1', 'n4', 'n3', 'n2'])
self.assertEqual(gs.resolv_map('gr2'), ['n9', 'n4'])
self.assertEqual(sorted(gs.resolv_list()), ['gr1', 'gr2'])
self.assertRaises(GroupSourceNoUpcall, gs.resolv_all)
self.assertRaises(GroupSourceNoUpcall, gs.resolv_reverse, 'n4')
def test_base_class2(self):
"""test base GroupSource class (all)"""
gs = GroupSource("testsrc", { 'gr1': ['n1', 'n4', 'n3', 'n2'],
'gr2': ['n9', 'n4'] },
'n[1-9]')
self.assertEqual(gs.resolv_all(), 'n[1-9]')
class YAMLGroupLoaderTest(unittest.TestCase):
def test_missing_pyyaml(self):
"""test YAMLGroupLoader with missing PyYAML"""
sys_path_saved = sys.path
try:
sys.path = [] # make import yaml failed
if 'yaml' in sys.modules:
# forget about previous yaml import
del sys.modules['yaml']
f = make_temp_file("""
vendors:
apricot: node""")
self.assertRaises(GroupResolverConfigError, YAMLGroupLoader,
f.name)
finally:
sys.path = sys_path_saved
def test_one_source(self):
"""test YAMLGroupLoader one source"""
f = make_temp_file("""
vendors:
apricot: node""")
loader = YAMLGroupLoader(f.name)
sources = list(loader)
self.assertEqual(len(sources), 1)
self.assertEqual(loader.groups("vendors"),
{ 'apricot': 'node' })
def test_multi_sources(self):
"""test YAMLGroupLoader multi sources"""
f = make_temp_file("""
vendors:
apricot: node
customers:
cherry: client-4-2""")
loader = YAMLGroupLoader(f.name)
sources = list(loader)
self.assertEqual(len(sources), 2)
self.assertEqual(loader.groups("vendors"),
{ 'apricot': 'node' })
self.assertEqual(loader.groups("customers"),
{ 'cherry': 'client-4-2' })
def test_reload(self):
"""test YAMLGroupLoader cache_time"""
f = make_temp_file("""
vendors:
apricot: "node[1-10]"
avocado: 'node[11-20]'
banana: node[21-30]
customers:
cherry: client-4-2""")
loader = YAMLGroupLoader(f.name, cache_time=1)
self.assertEqual(loader.groups("vendors"),
{ 'apricot': 'node[1-10]',
'avocado': 'node[11-20]',
'banana': 'node[21-30]' })
# modify YAML file and check that it is reloaded after cache_time
f.write("\n nut: node42\n")
# oh and BTW for ultimate code coverage, test if we add a new source
# on-the-fly, this is not supported but should be ignored
f.write("thieves:\n pomegranate: node100\n")
f.flush()
time.sleep(0.1)
# too soon
self.assertEqual(loader.groups("customers"),
{ 'cherry': 'client-4-2' })
time.sleep(1.0)
self.assertEqual(loader.groups("vendors"),
{ 'apricot': 'node[1-10]',
'avocado': 'node[11-20]',
'banana': 'node[21-30]' })
self.assertEqual(loader.groups("customers"),
{ 'cherry': 'client-4-2',
'nut': 'node42' })
def test_iter(self):
"""test YAMLGroupLoader iterator"""
f = make_temp_file("""
src1:
src1grp1: node11
src1grp2: node12
src2:
src2grp1: node21
src2grp2: node22
src3:
src3grp1: node31
src3grp2: node32""")
loader = YAMLGroupLoader(f.name, cache_time = 0.1)
# iterate sources with cache expired
for source in loader:
time.sleep(0.5) # force reload
self.assertEqual(len(source.groups), 2)
class GroupResolverYAMLTest(unittest.TestCase):
def setUp(self):
"""setUp test reproducibility: change standard group resolver
to ensure that no local group source is used during tests"""
set_std_group_resolver(GroupResolver()) # dummy resolver
def tearDown(self):
"""tearDown: restore standard group resolver"""
set_std_group_resolver(None) # restore std resolver
def test_yaml_basic(self):
"""test groups with a basic YAML config file"""
dname = make_temp_dir()
f = make_temp_file("""
# A comment
[Main]
default: yaml
autodir: %s
""" % dname)
yamlfile = make_temp_file("""
yaml:
foo: example[1-4,91-100],example90
bar: example[5-89]
""", suffix=".yaml", dir=dname)
res = GroupResolverConfig(f.name)
# Group resolution
nodeset = NodeSet("@foo", resolver=res)
self.assertEqual(str(nodeset), "example[1-4,90-100]")
nodeset = NodeSet("@bar", resolver=res)
self.assertEqual(str(nodeset), "example[5-89]")
nodeset = NodeSet("@foo,@bar", resolver=res)
self.assertEqual(str(nodeset), "example[1-100]")
nodeset = NodeSet("@unknown", resolver=res)
self.assertEqual(len(nodeset), 0)
# Regroup
nodeset = NodeSet("example[1-4,90-100]", resolver=res)
self.assertEqual(str(nodeset), "example[1-4,90-100]")
self.assertEqual(nodeset.regroup(), "@foo")
self.assertEqual(nodeset.groups().keys(), ["@foo"])
self.assertEqual(str(NodeSet("@foo", resolver=res)), "example[1-4,90-100]")
# No 'all' defined: all_nodes() should raise an error
self.assertRaises(GroupSourceError, res.all_nodes)
# but then NodeSet falls back to the union of all groups
nodeset = NodeSet.fromall(resolver=res)
self.assertEqual(str(nodeset), "example[1-100]")
# regroup doesn't use @all in that case
self.assertEqual(nodeset.regroup(), "@bar,@foo")
# No 'reverse' defined: node_groups() should raise an error
self.assertRaises(GroupSourceError, res.node_groups, "example1")
# regroup with rest
nodeset = NodeSet("example[1-101]", resolver=res)
self.assertEqual(nodeset.regroup(), "@bar,@foo,example101")
# regroup incomplete
nodeset = NodeSet("example[50-200]", resolver=res)
self.assertEqual(nodeset.regroup(), "example[50-200]")
# regroup no matching
nodeset = NodeSet("example[102-200]", resolver=res)
self.assertEqual(nodeset.regroup(), "example[102-200]")
def test_yaml_fromall(self):
"""test groups special all group"""
dname = make_temp_dir()
f = make_temp_file("""
[Main]
default: yaml
autodir: %s
""" % dname)
yamlfile = make_temp_file("""
yaml:
foo: example[1-4,91-100],example90
bar: example[5-89]
all: example[90-100]
""", suffix=".yaml", dir=dname)
res = GroupResolverConfig(f.name)
nodeset = NodeSet.fromall(resolver=res)
self.assertEqual(str(nodeset), "example[90-100]")
# regroup uses @all if it is defined
self.assertEqual(nodeset.regroup(), "@all")
def test_yaml_invalid_groups_not_dict(self):
"""test groups with an invalid YAML config file (1)"""
dname = make_temp_dir()
f = make_temp_file("""
[Main]
default: yaml
autodir: %s
""" % dname)
yamlfile = make_temp_file("""
yaml: bar
""", suffix=".yaml", dir=dname)
self.assertRaises(GroupResolverConfigError, GroupResolverConfig, f.name)
def test_yaml_invalid_root_dict(self):
"""test groups with an invalid YAML config file (2)"""
dname = make_temp_dir()
f = make_temp_file("""
[Main]
default: yaml
autodir: %s
""" % dname)
yamlfile = make_temp_file("""
- Casablanca
- North by Northwest
- The Man Who Wasn't There
""", suffix=".yaml", dir=dname)
self.assertRaises(GroupResolverConfigError, GroupResolverConfig, f.name)
def test_yaml_invalid_not_yaml(self):
"""test groups with an invalid YAML config file (3)"""
dname = make_temp_dir()
f = make_temp_file("""
[Main]
default: yaml
autodir: %s
""" % dname)
yamlfile = make_temp_file("""
[Dummy]
one: un
two: deux
three: trois
""", suffix=".yaml", dir=dname)
self.assertRaises(GroupResolverConfigError, GroupResolverConfig, f.name)
def test_wrong_autodir(self):
"""test wrong autodir (doesn't exist)"""
f = make_temp_file("""
[Main]
autodir: /i/do/not/=exist=
default: local
""")
# absent autodir itself doesn't raise any exception, but default
# pointing to nothing does...
self.assertRaises(GroupResolverConfigError, GroupResolverConfig, f.name)
def test_wrong_autodir_is_file(self):
"""test wrong autodir (is a file)"""
fe = make_temp_file("")
f = make_temp_file("""
[Main]
autodir: %s
default: local
[local]
map: node
""" % fe.name)
self.assertRaises(GroupResolverConfigError, GroupResolverConfig, f.name)
|
python
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import io
import math
import numpy as np
import random
import torch
from PIL import Image
def temporal_sampling(hdf5_video, hdf5_video_key, start_idx, end_idx, num_samples, video_length):
"""
Given the start and end frame index, sample num_samples frames between
the start and end with equal interval.
Args:
frames (tensor): a tensor of video frames, dimension is
`num video frames` x `channel` x `height` x `width`.
start_idx (int): the index of the start frame.
end_idx (int): the index of the end frame.
num_samples (int): number of frames to sample.
Returns:
frames (tersor): a tensor of temporal sampled video frames, dimension is
`num clip frames` x `channel` x `height` x `width`.
"""
index = torch.linspace(start_idx, end_idx, num_samples)
index = torch.clamp(index, 0, video_length - 1).long().tolist()
try:
data = hdf5_video[hdf5_video_key][index]
except:
data = [hdf5_video[hdf5_video_key][i] for i in index]
try:
frames = []
for raw_frame in data:
frames.append(np.asarray(Image.open(io.BytesIO(raw_frame)).convert('RGB')))
except:
print(f'{hdf5_video_key}, {start_idx}, {end_idx}')
frames = torch.as_tensor(np.stack(frames))
return frames
def get_start_end_idx(video_size, clip_size, clip_idx, num_clips):
"""
Sample a clip of size clip_size from a video of size video_size and
return the indices of the first and last frame of the clip. If clip_idx is
-1, the clip is randomly sampled, otherwise uniformly split the video to
num_clips clips, and select the start and end index of clip_idx-th video
clip.
Args:
video_size (int): number of overall frames.
clip_size (int): size of the clip to sample from the frames.
clip_idx (int): if clip_idx is -1, perform random jitter sampling. If
clip_idx is larger than -1, uniformly split the video to num_clips
clips, and select the start and end index of the clip_idx-th video
clip.
num_clips (int): overall number of clips to uniformly sample from the
given video for testing.
Returns:
start_idx (int): the start frame index.
end_idx (int): the end frame index.
"""
delta = max(video_size - clip_size, 0)
if clip_idx == -1:
# Random temporal sampling.
start_idx = random.uniform(0, delta)
else:
# Uniformly sample the clip with the given index.
start_idx = delta * clip_idx / num_clips
end_idx = start_idx + clip_size - 1
return start_idx, end_idx
def decode(
hdf5_video,
hdf5_video_key,
sampling_rate,
num_frames,
clip_idx=-1,
num_clips=10,
video_meta=None,
target_fps=30,
max_spatial_scale=0,
):
"""
Decode the video and perform temporal sampling.
Args:
hdf5_video (container): hdf5 video.
sampling_rate (int): frame sampling rate (interval between two sampled
frames).
num_frames (int): number of frames to sample.
clip_idx (int): if clip_idx is -1, perform random temporal
sampling. If clip_idx is larger than -1, uniformly split the
video to num_clips clips, and select the
clip_idx-th video clip.
num_clips (int): overall number of clips to uniformly
sample from the given video.
video_meta (dict): a dict contains VideoMetaData. Details can be find
at `pytorch/vision/torchvision/io/_video_opt.py`.
target_fps (int): the input video may have different fps, convert it to
the target video fps before frame sampling.
max_spatial_scale (int): keep the aspect ratio and resize the frame so
that shorter edge size is max_spatial_scale. Only used in
`torchvision` backend.
Returns:
frames (tensor): decoded frames from the video.
"""
assert clip_idx >= -1, "Not valid clip_idx {}".format(clip_idx)
# Perform selective decoding.
if 'fps' in video_meta:
sampling_fps = num_frames * sampling_rate * video_meta['fps'] / target_fps
else:
print('Warning: no FPS info found!')
sampling_fps = num_frames * sampling_rate
start_idx, end_idx = get_start_end_idx(
video_meta['num_frames'],
sampling_fps,
clip_idx,
num_clips,
)
# Perform temporal sampling from the decoded video.
frames = temporal_sampling(hdf5_video, hdf5_video_key, start_idx, end_idx, num_frames, video_meta['num_frames'])
return frames
|
python
|
from setuptools import setup, Extension
import setuptools
stream_json_parser_utils = Extension('stream_json_parser_utils', extra_compile_args = [ '--std=c++11' ], sources = [ 'utils/src/utils.cpp' ])
if __name__ == '__main__':
setup(
name='streamjsonparser',
version='1.1',
description='Stream Json Parser',
ext_modules=[stream_json_parser_utils],
author='Andrey Lysenko',
author_email='[email protected]',
packages=setuptools.find_packages(),
include_package_data=True,
zip_safe=False
)
|
python
|
from collections import OrderedDict
from datetime import datetime, timezone
from pytest import raises
from qsck import serialize
def test_serialize_is_a_function():
assert hasattr(serialize, '__call__')
def test_it_formats_identifier_and_timestamp_as_unix_epoch():
identifier = 'LOG'
some_key_value_pairs = [("It's Caturday?", 'YES')]
dt_timestamp = datetime(2019, 3, 23, 1, 2, 3, tzinfo=timezone.utc)
str_timestamp = "1553302923"
int_timestamp = int(str_timestamp)
for timestamp in (dt_timestamp, str_timestamp, int_timestamp):
qs_row = serialize(identifier, timestamp, some_key_value_pairs)
assert qs_row.startswith("LOG,1553302923,It's Caturday?=")
def test_it_rejects_future_and_far_past_and_mistyped_timestamps():
identifier = 'GOL'
far_past_timestamp = datetime(1999, 12, 31, 0, 0, 0, tzinfo=timezone.utc)
with raises(AssertionError):
serialize(identifier, far_past_timestamp, [])
future_timestamp = int(datetime.utcnow().timestamp() + 30)
with raises(AssertionError):
serialize(identifier, future_timestamp, [])
text_timestamp = 'First day of April, 2015'
with raises(ValueError):
serialize(identifier, text_timestamp, [])
mistyped_timestamp = [2019, 1, 3, 16, 1, 30]
with raises(TypeError):
serialize(identifier, mistyped_timestamp, [])
def test_it_rejects_malformatted_and_mistyped_key_value_pairs():
identifier = 'LOG'
timestamp = datetime.utcnow()
malformatted_key_value_pairs = [('hey', 'you'), ("i'm", "one", "too many")]
with raises(ValueError):
serialize(identifier, timestamp, malformatted_key_value_pairs)
mistyped_key_value_pair = [("here's a boolean", False)]
with raises(TypeError):
serialize(identifier, timestamp, mistyped_key_value_pair)
def test_it_formats_null_values_as_funky_strings():
identifier = 'LOG'
timestamp = datetime.utcnow()
key_value_pairs_with_none_values = [
('good_ideas', None), ('bad ideas', 'plenty'), ('newType', None)]
qs_row = serialize(identifier, timestamp, key_value_pairs_with_none_values)
assert ',good_ideas=(null),bad ideas=plenty,newType=(null)' in qs_row
def test_it_does_the_nested_key_value_formatting_on_root_level_list_values():
identifier = 'LOG'
timestamp = datetime.utcnow()
key_value_pairs_with_list_values = [
('howdy', None),
('empty', []),
('my_nest', [('sub_key1', 'foo'), ('sk2', 'bar')]),
('nest2', [('a', '1')]),
('otherStuff', 'ok')
]
qs_row = serialize(identifier, timestamp, key_value_pairs_with_list_values)
assert ',empty={},my_nest={sub_key1=foo, sk2=bar},nest2={a=1},oth' in qs_row
def test_it_does_the_nested_key_value_formatting_on_root_level_dict_values():
identifier = 'LOG'
timestamp = datetime.utcnow()
key_value_pairs_with_dict_values = [
('howdy', None),
('empty', {}),
('nest3', OrderedDict([('k31', 2.0), ('k32', 0)])),
('nest4', {'y': 3, 'x': 1}),
('moarStuff', '!')
]
qs_row = serialize(identifier, timestamp, key_value_pairs_with_dict_values)
assert ',empty={},nest3={"k31":2.0,"k32":0},nest4={"y":3,"x":1},m' in qs_row
def test_each_output_records_ends_with_newline():
identifier = 'FOO'
timestamp = datetime.utcnow()
some_key_value_pairs = [('theOnly', 'One')]
qs_row = serialize(identifier, timestamp, some_key_value_pairs)
assert qs_row.endswith(',theOnly=One\n')
|
python
|
"""
This build static figures with dataset from
the google drive : "study_mearec_SqMEA1015um".
"""
import sys
sys.path.append('../../examples/modules/comparison/')
from generate_erroneous_sorting import generate_erroneous_sorting
import spikeinterface.extractors as se
import spikeinterface.comparison as sc
import spikeinterface.widgets as sw
import numpy as np
import matplotlib.pyplot as plt
def make_comparison_figures():
gt_sorting, tested_sorting = generate_erroneous_sorting()
comp = sc.compare_sorter_to_ground_truth(gt_sorting, tested_sorting, gt_name=None, tested_name=None,
delta_time=0.4, sampling_frequency=None, min_accuracy=0.5, exhaustive_gt=True, match_mode='hungarian',
n_jobs=-1, bad_redundant_threshold=0.2, compute_labels=False, verbose=False)
print(comp.hungarian_match_12)
fig, ax = plt.subplots()
im = ax.matshow(comp.match_event_count, cmap='Greens')
ax.set_xticks(np.arange(0, comp.match_event_count.shape[1]))
ax.set_yticks(np.arange(0, comp.match_event_count.shape[0]))
ax.xaxis.tick_bottom()
ax.set_yticklabels(comp.match_event_count.index, fontsize=12)
ax.set_xticklabels(comp.match_event_count.columns, fontsize=12)
fig.colorbar(im)
fig.savefig('spikecomparison_match_count.png')
fig, ax = plt.subplots()
sw.plot_agreement_matrix(comp, ax=ax, ordered=False)
im = ax.get_images()[0]
fig.colorbar(im)
fig.savefig('spikecomparison_agreement_unordered.png')
fig, ax = plt.subplots()
sw.plot_agreement_matrix(comp, ax=ax)
im = ax.get_images()[0]
fig.colorbar(im)
fig.savefig('spikecomparison_agreement.png')
fig, ax = plt.subplots()
sw.plot_confusion_matrix(comp, ax=ax)
im = ax.get_images()[0]
fig.colorbar(im)
fig.savefig('spikecomparison_confusion.png')
plt.show()
if __name__ == '__main__':
make_comparison_figures()
|
python
|
from optparse import OptionParser
from random import randint
import random
import sys
import io
import queue
#import statprof
from contextlib import contextmanager
#@contextmanager
#def stat_profiler():
#statprof.start()
#yield statprof
#statprof.stop()
#statprof.display()
class Edge:
def __init__(self, i, u, v, cap, flux=0):
self.i = i
self.u = u
self.v = v
self.cap = cap
self.flux = flux
self.res_cap = cap
self._inv = None
# @property
# def res_cap(self):
# return self.cap - self.flux
class Network:
def __init__(self, in_stream):
self.read(in_stream)
def read(self, in_stream):
n_source, n_junction, n_edges = [int(v) for v in in_stream.readline().split()]
self.n_vtx = n_source + n_junction
sources = range(0, n_source)
cap_sources = [ int(in_stream.readline()) for _ in range(n_source)]
self.total_source = sum(cap_sources)
edges = [ in_stream.readline().split() for s in range(n_edges)]
self.source_vtx = self.n_vtx
self.sink_vtx = self.n_vtx + 1
self.n_vtx += 2
self.vtk = [[] for _ in range(self.n_vtx)]
self.max_cap = 0
for e_line in edges:
v_in, v_out, cap = [int(v) for v in e_line]
self.add_edge(v_in, v_out, cap, cap)
self.max_cap += cap
for v_out, cap in zip(sources, cap_sources):
self.add_edge(self.source_vtx, v_out, cap, 0)
#self.set_reverse_edges()
def reinit(self):
# remove sink edges
for e in reversed(self.vtk[self.sink_vtx]):
self.rm_edge(e)
# zero fluxes
for u_edges in self.vtk:
for e in u_edges:
e.flux = 0
e.res_cap = e.cap
def add_sinks(self, sinks):
for v_in in sinks:
self.add_edge(v_in, self.sink_vtx, self.total_source, 0)
# def set_reverse_edges(self):
# for v_list in self.vtk:
# for uv in v_list:
# # find reverse edge
# for vu in self.vtk[uv.v]:
# if vu.v == uv.u:
# uv.inv = vu.i
# uv.inv = vu.i
# break
# else:
# vu = self.add_edge(uv.v, uv.u, 0)
# uv.inv = vu.i
# vu.inv = uv.i
def add_edge(self, v_in, v_out, cap_uv, cap_vu):
uv = Edge(len(self.vtk[v_in]), v_in, v_out, cap_uv)
vu = Edge(len(self.vtk[v_out]), v_out, v_in, cap_vu)
self.vtk[v_in].append(uv)
self.vtk[v_out].append(vu)
uv.inv = vu.i
vu.inv = uv.i
return uv
def rm_edge(self, uv):
i, j = uv.i, uv.inv
self.vtk[uv.u].pop(i)
self.vtk[uv.v].pop(j)
@property
def n_sources(self):
return len(self.vtk[self.source_vtx])
@property
def max_flux(self):
src_sum = sum([e.flux for e in self.vtk[self.source_vtx]] )
sink_sum = sum([-e.flux for e in self.vtk[self.sink_vtx]])
#assert src_sum == sink_sum
return src_sum
def send_flux(self, uv, flux):
vu = self.vtk[uv.v][uv.inv]
# print(uv.u, uv.v, uv.flux, uv.cap)
uv.flux += flux
uv.res_cap -= flux
vu.flux -= flux
vu.res_cap += flux
def find_and_improve_path(self, net, source_vtx, sink_vtx):
#BFS
distance = self.n_vtx * [ None ]
previous = distance.copy()
q = queue.Queue()
u = source_vtx
q.put(u)
distance[u] = 0
while not q.empty():
u = q.get()
d = distance[u]
if u == sink_vtx:
break
for uv in net[u]:
v = uv.v
if uv.res_cap > 0 and distance[v] is None:
q.put(v)
distance[v] = d + 1
previous[v] = uv
else:
# No improvement path
return False
# Improve
min_cap = self.max_cap
e_list = []
while True:
uv = previous[u]
if uv is None:
break
u = uv.u
min_cap = min(min_cap, uv.res_cap)
e_list.append(uv)
#print("Improve path: + ", min_cap)
for uv in e_list:
self.send_flux(uv, min_cap)
#print("Total flux: ", self.max_flux)
return True
def max_flux_edmons_karp(self):
while self.find_and_improve_path(self.vtk, self.source_vtx, self.sink_vtx):
pass
return self.max_flux
def max_flux_dinic(self):
pass
def max_flux_dinic(self):
pass
def make_result_sources(self):
if self.max_flux == self.total_sink:
return [e.flux for e in self.vtk[self.source_vtx]]
else:
return [0 for _ in range(self.n_sources)]
def solve(in_stream, out_stream):
net = Network(in_stream)
n_cases = int(in_stream.readline())
for _ in range(n_cases):
#print("====")
net.reinit()
sinks = [int(v) for v in in_stream.readline().split()]
net.add_sinks(sinks)
max_flux = net.max_flux_edmons_karp()
out_stream.write("{}\n".format(max_flux))
def make_data(in_stream, problem_size):
'''
1. Generate square grid graph with diagonal edges.
2. Assign capacities random ints 0-5.
'''
import numpy as np
# random texture in range 0, 1024
n_vtxs = problem_size * problem_size
n_sources = problem_size
n_junctions = problem_size * (problem_size - 1)
edges = []
dxy = [ (dx, dy) for dx in [-1, 0, 1] for dy in [-1, 0, 1] if (dx, dy) != (0, 0) ]
for ix in range(problem_size):
for iy in range(problem_size):
for dx, dy in dxy:
jx, jy = ix + dx, iy + dy
if 0 <= jx < problem_size and 0 <= jy < problem_size:
cap = randint(0, 50)
u = iy * problem_size + ix
v = jy * problem_size + jx
if u < v:
edges.append((u, v, cap))
sources = [ randint(1, 100) for _ in range(n_sources)]
sinks = list(range(n_vtxs))
problem_setup = io.StringIO()
problem_setup.write("{} {} {}\n".format(n_sources, n_junctions, len(edges)))
for s in sources:
problem_setup.write("{}\n".format(s))
for e in edges:
problem_setup.write("{} {} {}\n".format(*e))
n_cases = 2 + int(np.log(problem_size))
n_case_sinks = 3
problem_setup.write("{}\n".format(n_cases))
for _ in range(n_cases):
selection = np.random.choice(sinks, size=n_case_sinks, replace=False)
problem_setup.write("{} {} {}\n".format(*selection))
sys.stdout.write(problem_setup.getvalue())
out_stream = io.StringIO()
problem_setup.seek(0)
solve(problem_setup, out_stream)
#print("====")
#sys.stderr.write(out_stream.getvalue())
#print("====")
#res_stream = StringIO.StringIO()
#segment.image_to_stream(res_stream, head=False)
#assert (out_stream.getvalue() == res_stream.getvalue())
in_stream.write(problem_setup.getvalue())
'''
Main script body.
'''
parser = OptionParser()
parser.add_option("-p", "--problem-size", dest="size", help="Problem size.", default=None)
parser.add_option("-v", "--validate", action="store_true", dest="validate", help="program size", default=None)
parser.add_option("-r", dest="rand", default=False, help="Use non-deterministic algo")
options, args = parser.parse_args()
if options.rand:
random.seed(options.rand)
else:
random.seed(options.size)
if options.size is not None:
make_data(sys.stdout, int(options.size))
else:
solve(sys.stdin, sys.stdout)
|
python
|
from moneywave.account import Account
from moneywave.resources import Resource
from moneywave.transaction import Transaction
from moneywave.utils import Util, Settings
from moneywave.wallet import Wallet
class MoneyWave:
def __init__(self, api_key, secret_key, mode="test"):
self.settings = Settings(api_key, secret_key, mode)
self.__util = Util(self.settings)
self.Account = Account(self.__util)
self.Resource = Resource(self.__util)
self.Wallet = Wallet(self.__util)
self.Transaction = Transaction(self.__util)
def __get_auth_key(self):
pass
@property
def token(self):
return self.__util.token
|
python
|
class PropsdDictionary(dict):
"""A dictionary backed by propsd
This dictionary enables the use of a standard python dictionary
that is backed by the propsd service. The dictionary must be
refreshed by calling the refresh method.
"""
def __init__(self, propsd_client):
self.__propsd_client = propsd_client
def refresh(self):
"""Refreshes the dictionary with properties from propsd
"""
self.update(self.__propsd_client.properties())
|
python
|
import argparse
import seaborn as sns # noqa
from matplotlib import pyplot as plt
from pathlib import Path
from dask import dataframe as dd
import const
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', required=True)
args = parser.parse_args()
data_dir = Path(args.input)
path = str(data_dir/'*.csv')
df = dd.read_csv(path).compute()
df['model'] = df['model'].map(const.models)
df = df.sort_values(['dataset', 'model']).reset_index(drop=True)
df = df.groupby(['model', 'dataset']).agg({
'CV AUC': ['mean', 'std'],
'duration[s]': ['mean', 'std'],
})
df.columns = ['%s_%s' % (a, b) for a, b in df.columns]
df = df.reset_index()
df['model'] = df['model'].apply(lambda d: d[1])
print(df)
# plot
plt.figure(figsize=(8, 6))
for i, (_, model) in enumerate(const.models.values()):
for j, dset in enumerate(['airline', 'amazon', 'bank']):
idx = (df['model'] == model) &\
(df['dataset'] == dset)
x = df.loc[idx, 'duration[s]_mean']
y = df.loc[idx, 'CV AUC_mean']
xerr = df.loc[idx, 'duration[s]_std']
yerr = df.loc[idx, 'CV AUC_std']
fmt = '%sC%d' % (['o', 's', 'D', '^'][j], i)
label = 'model=%s, dataset=%s' % (model, dset) # noqa
plt.errorbar(x, y, xerr=xerr, yerr=yerr, fmt=fmt, label=label)
plt.title('Model Comparison')
plt.xlabel('Training Time[s]')
plt.ylabel('CV AUC')
plt.legend(loc='lower right')
plt.savefig(data_dir/'model_and_task.png')
if __name__ == '__main__':
main()
|
python
|
default_chunk_size = 32 * 1024 * 1024
gs_max_parts_per_compose = 32
upload_chunk_identifier = "gs-chunked-io-part"
reader_retries = 5
writer_retries = 5
|
python
|
import cmath
def usual(tab):
N = len(tab)
tab2 = [0] * N
for n in range(0, N):
for k in range(0, N):
tab2[n] = tab2[n] + tab[k] * cmath.exp(-2 * 1j * cmath.pi * n * (k / N))
return tab2
def inverse(tab):
N = len(tab)
tab2 = [0] * N
for n in range(0, N):
for k in range(0, N):
tab2[n] = tab2[n] + (tab[k] * cmath.exp((2 * 1j * cmath.pi * n * (k / N)))) / N
return tab2
|
python
|
from sklearn import preprocessing
from desafio_iafront.jobs.common import transform
from desafio_iafront.data.saving import save_partitioned
class Preprocessing:
def __init__(self, result, saida):
self.result = result
self.saida = saida
def normalizer(self):
# Faz a escala dos valores
result_scaled = transform(self.result, preprocessing.Normalizer())
# salva resultado
save_partitioned(result_scaled, self.saida, ['data', 'hora'])
self.result = result_scaled
def standard_scale(self):
# Faz a escala dos valores
result_scaled = transform(self.result, preprocessing.StandardScaler())
# salva resultado
save_partitioned(result_scaled, self.saida, ['data', 'hora'])
self.result = result_scaled
def min_max_scale(self):
# Faz a escala dos valores
result_scaled = transform(self.result, preprocessing.MinMaxScaler())
# salva resultado
save_partitioned(result_scaled, self.saida, ['data', 'hora'])
self.result = result_scaled
def max_abs_scale(self):
# Faz a escala dos valores
result_scaled = transform(self.result, preprocessing.MaxAbsScaler())
# salva resultado
save_partitioned(result_scaled, self.saida, ['data', 'hora'])
return result_scaled
def robust_scale(self):
# Faz a escala dos valores
result_scaled = transform(self.result, preprocessing.RobustScaler())
# salva resultado
save_partitioned(result_scaled, self.saida, ['data', 'hora'])
self.result = result_scaled
def power_transformer(self):
# Faz a escala dos valores
result_scaled = transform(self.result, preprocessing.PowerTransformer())
# salva resultado
save_partitioned(result_scaled, self.saida, ['data', 'hora'])
self.result = result_scaled
|
python
|
from typing import TYPE_CHECKING
from contextlib import closing
from nonebot.rule import Rule
from nonebot.typing import T_State
from nonebot.adapters.cqhttp import MessageEvent
from .. import crud
from ..db import get_db
if TYPE_CHECKING:
from nonebot.typing import Bot, Event
def validate_user() -> Rule:
"""
validate user and assign to state
"""
async def _validate_user(bot: "Bot", event: "Event", state: T_State) -> bool:
if not isinstance(event, MessageEvent):
return False
with closing(get_db().cursor()) as cursor:
user = crud.user.get_by_qq(cursor, int(event.get_user_id()))
state['current_user'] = user
return True if not user else user.is_active
return Rule(_validate_user)
|
python
|
from django.utils.encoding import force_text
import pytest
from apps.core.tests.base_test_utils import (
generate_uid_and_token,
mock_email_backend_send_messages,
)
from apps.users.constants.messages import EXPIRED_LINK_MESSAGE
from .constants import NEW_TEST_PASSWORD, PASS_RESET_CONFIRM_URL, PASS_RESET_URL
pytestmark = pytest.mark.django_db
def test_password_reset_with_invalid_email(client, mocker):
mocked_email_func = mock_email_backend_send_messages(mocker)
post_data = {"email": "[email protected]"}
response = client.post(PASS_RESET_URL, post_data)
assert mocked_email_func.call_count == 0
assert response.status_code == 200
def test_password_reset_with_valid_email(user, client, mocker):
mocked_email_func = mock_email_backend_send_messages(mocker)
post_data = {"email": user.email}
response = client.post(PASS_RESET_URL, post_data)
assert response.status_code == 200
assert mocked_email_func.call_count == 1
def test_password_set_with_valid_password(user, client):
old_password_change_date = user.last_password_change_date
url_kwargs = generate_uid_and_token(user)
post_data = {
"new_password": NEW_TEST_PASSWORD,
"uid": force_text(url_kwargs["uuid"]),
"token": url_kwargs["token"],
}
response = client.post(PASS_RESET_CONFIRM_URL, post_data, format="json")
user.refresh_from_db()
assert response.status_code == 200
assert user.check_password(NEW_TEST_PASSWORD)
assert user.last_password_change_date != old_password_change_date
def test_password_set_with_invalid_uid_and_token(user, client):
post_data = {
"new_password": NEW_TEST_PASSWORD,
"uid": "invalid",
"token": "invalid",
}
response = client.post(PASS_RESET_CONFIRM_URL, post_data, format="json")
user.refresh_from_db()
assert response.status_code == 400
assert response.data["messages"][0] == f"non_field_errors: {EXPIRED_LINK_MESSAGE}"
assert not user.check_password(NEW_TEST_PASSWORD)
|
python
|
from pftools.module_conv_utils import PFConversion
class pncConversion(PFConversion):
"""Class for probe data conversion
inherits from PFConversion
Parameters
----------
pfFile : string
probe file (surface or volume)
verbose : bool
Activate or desactivate informative prints (default: True)
Methods
-------
compute_probe_weight
Collect volume/surface scaling
read_measurement
Convert probe measurement to pandas data frame
export_temporal_data
Write column text file
"""
def __init__(self,pfFile,verbose=True):
super().__init__(pfFile,verbose)
ext = pfFile.split('.')[-1]
if ext == 'psnc':
self.format = 'surface-probe'
elif ext == 'pfnc':
self.format = 'volume-probe'
else:
raise RuntimeError('This is not a probe file')
self.iscale = None
self.weight = None
self.data = None
if self.verbose:
print('PF file format is: {0:s}'.format(self.format))
def compute_probe_weight(self):
"""Collect volume/surface scaling and store it in the class instance
The results is stored in the class instance
and there is no input except from the class instance.
"""
import netCDF4 as netcdf
from numpy import pi
if self.params is None:
self.read_conversion_parameters()
f = netcdf.Dataset(self.pfFile, 'r')
if self.format == 'volume-probe':
self.weight = f.variables['fluid_volumes'][()] * self.params['coeff_dx']**3
elif self.format == 'surface-probe':
self.weight = f1.variables['surfel_area'][()] * self.params['coeff_dx']**2
# Average point
intv = self.weight.sum()
self.iscale = 1.0/float(intv)
f.close()
if self.verbose:
print('Probe size')
if self.format == 'volume-probe':
print(' -> Volume: {0:e} m3'.format(intv))
rad = (3*intv/(4*pi))**(1./3.)
print(' -> Radius: {0:e} m'.format(rad))
if self.format == 'surface-probe':
print(' -> Area: {0:e} m2'.format(intv))
rad = (intv/pi)**0.5
print(' -> Radius: {0:e} m'.format(rad))
def read_measurement(self):
"""Function that read and convert data as probe data in SI units
The results is stored in the class instance
and there is no input except from the class instance.
"""
import netCDF4 as netcdf
from pandas import DataFrame
if self.iscale is None:
self.compute_probe_weight()
if self.vars is None:
self.define_measurement_variables()
if self.time is None:
self.extract_time_info()
data = dict()
data['time'] = self.time['time_center']
f = netcdf.Dataset(self.pfFile, 'r')
meas = f.variables['measurements'][()] * self.weight
mean_meas = meas.sum(axis=-1) * self.iscale
f.close()
for var in self.vars.keys():
idx = self.vars[var]
if var == 'static_pressure':
if idx>=0 :
data[var] = ( ( mean_meas[:,idx] + self.params['offset_pressure'] )
* self.params['coeff_press'] )
else:
idx = self.vars['density']
data[var] = ( mean_meas[:,idx] * self.params['weight_rho_to_pressure']
+ self.params['offset_pressure'] ) * self.params['coeff_press']
if var == 'density':
if idx>=0:
data[var] = mean_meas[:,idx] * self.params['coeff_density']
else:
idx = self.vars['static_pressure']
data[var] = ( mean_meas[:,idx] * self.params['weight_pressure_to_rho']
* self.params['coeff_press'] )
if var in ['x_velocity','y_velocity','z_velocity']:
data[var] = mean_meas[:,idx] * self.params['coeff_vel']
self.data = DataFrame(data=data)
def export_temporal_data(self,casename,dirout,delimiter=' ',index=False,
extension='txt'):
"""Function to export probe temporal data to a text file.
All quantities will be written in SI units
Parameters
----------
casename : string
Name assiciated to the present probe conversion.
The casename is used to build the output file name
temporal_<casename>.<extension>
dirout : string
Absolute path/relative path where the converted file will be written
delimiter : char
Field delimiter (default is space).
If comma ',' is specified the file extension will be 'csv'
index : bool
Append index of rows as the first column in the text file
extension : string
Extension of the text file (by default txt)
"""
import os.path
if delimiter == ',':
ext = 'csv'
else:
ext = extension
if self.data is None:
self.read_measurement()
outFile = os.path.join(dirout,'temporal_{0:s}.{1:s}'.format(casename,ext))
print("Exporting in ascii column format:\n -> {0:s}".format(outFile))
self.data.to_csv(outFile,sep=delimiter,index=index)
|
python
|
# Generated by Django 3.0.8 on 2020-08-20 19:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp', '0019_auto_20200820_1508'),
]
operations = [
migrations.AlterField(
model_name='course',
name='hours',
field=models.IntegerField(default=40.0),
),
]
|
python
|
""" This module contains the code that allows the LWR to stage file's during
preprocessing (currently this means downloading or copying files) and then unstage
or send results back to client during postprocessing.
:mod:`lwr.managers.staging.preprocess` Module
-------------------------------
.. automodule:: lwr.managers.staging.preprocess
.. autofunction:: preprocess
:mod:`lwr.managers.staging.postprocess` Module
-------------------------------
.. automodule:: lwr.managers.staging.postprocess
.. autofunction:: postprocess
"""
from .postprocess import postprocess
from .preprocess import preprocess
__all__ = [preprocess, postprocess]
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.