content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
from __future__ import annotations
from datetime import datetime
from jsonclasses import jsonclass, types
from jsonclasses_pymongo import pymongo
@pymongo
@jsonclass(class_graph='linked')
class LinkedBomb:
id: str = types.readonly.str.primary.mongoid.required
name: str
soldiers: list[LinkedSoldier] = types.listof('LinkedSoldier') \
.linkedthru('bombs').cascade
created_at: datetime = types.readonly.datetime.tscreated.required
updated_at: datetime = types.readonly.datetime.tsupdated.required
@pymongo
@jsonclass(class_graph='linked')
class LinkedSoldier:
id: str = types.readonly.str.primary.mongoid.required
name: str
bombs: list[LinkedBomb] = types.listof('LinkedBomb') \
.linkedthru('soldiers').cascade
created_at: datetime = types.readonly.datetime.tscreated.required
updated_at: datetime = types.readonly.datetime.tsupdated.required
| python |
"""
rvmath.base
~~~~~~~~~~~
:copyright: 2021 by rvmath Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import annotations
import collections
import itertools as it
import numbers
import operator
import secrets
import typing as ty
from dataclasses import dataclass, field
import numpy as np
import scipy.interpolate
from scipy import stats
RVID_NBYTES = 16
_OP_STR = {
operator.add: "+",
operator.sub: "-",
operator.mul: "*",
operator.truediv: "/",
operator.pow: "**",
operator.pos: "+",
operator.neg: "-",
}
def builder(distro_cls):
"""Creates a hungry wrapper function.
Parameters
----------
distro_cls : rv_continuous
A SciPy distribution
"""
# Check if this is a continuous distribution?
def _inner(*args, **kwargs):
rvid = kwargs.pop("rvid", None)
size = kwargs.pop("size", None)
if any(isinstance(a, RandomVariable) for a in it.chain(args, kwargs.values())):
if rvid is None:
return DependentRandomVariable(
distro_cls, size=size, args=args, kwds=kwargs
)
else:
return DependentRandomVariable(
distro_cls, size=size, rvid=rvid, args=args, kwds=kwargs
)
distro = distro_cls(*args, **kwargs)
if rvid is None:
return RandomVariable(distro, size=size)
else:
return RandomVariable(distro, size=size, rvid=rvid)
return _inner
def wrap(distro_cls, *args, **kwargs):
"""Wrap a SciPy Stats distribution with rvmath class"""
return builder(distro_cls)(*args, **kwargs)
def ecdf(x):
"""Empirical from cumulative distribution function.
Parameters
----------
x : array-like
data
Returns
-------
np.ndarray, np.ndarray
value, ecdf
"""
xs = np.sort(x)
ys = np.arange(1, len(xs) + 1) / float(len(xs))
return xs, ys
def eval_value(value, realization):
"""Helper to dispatch the evaluation of (maybe) RVMixin values
See RVMixin.eval for help on `realization`.
"""
if isinstance(value, RVMixin):
return value.eval(realization)
return value
def any_none(els):
"""Return True if any of the elements is None."""
return any(el is None for el in els)
def combine_size(distro_size, size):
"""Combine distribution and user size according to certain rules.
Parameters
----------
distro_size : None, int or tuple of int or None
Size assigned to the distribution.
size : int or tuple of int
Size provided to the `rvs`.
Returns
-------
int or tuple of int
"""
if size is None:
raise ValueError("'size' cannot be None.")
elif isinstance(size, tuple):
if any_none(size):
raise ValueError("'size' cannot contain None.")
if distro_size is None:
return size
elif isinstance(distro_size, tuple) and isinstance(size, tuple):
if any_none(distro_size):
raise ValueError(
"A distribution 'distro_size' cannot contain None "
"when the 'rvs' distro_size is a tuple."
)
return distro_size
elif isinstance(distro_size, tuple) and isinstance(size, int):
return tuple(el or size for el in distro_size)
return distro_size
class RVMixin:
"""Mixin for classes that are or can contain random variables."""
def random_vars(self) -> ty.Generator[ty.Tuple[str, stats.rv_continuous]]:
"""Yields all random variables and their distributions within this expression.
Yields
------
str, stats.rv_continuous
variable name, distribution
"""
for rvid, obj in self.random_objs():
yield rvid, (obj.distro, obj.size)
def random_objs(self) -> ty.Generator[ty.Tuple[str, RandomVariable]]:
"""Yield all random rvmath object within this expression.
Yields
------
str, RandomVariable
"""
# This weird construction is a way to create
# an empty generator.
return
yield # pragma: no cover
def eval(self, realization):
"""Evaluate this expression given a realization of its random variables.
Parameters
----------
realization : Dict[str, np.ndarray or Number]
Dictionary mapping random variable id to a realization.
Returns
-------
np.ndarray or Number
"""
raise NotImplementedError
def draw(
self, size=1, random_state=None
) -> ty.Dict[str, np.ndarray or numbers.Number]:
"""Draw values for the random variables within this expression."""
robjs = dict(self.random_objs())
# We first evaluate the non-dependent distributions.
realization = {
rvid: obj.distro.rvs(combine_size(obj.size, size), random_state)
for rvid, obj in self.random_objs()
if not isinstance(obj, DependentRandomVariable)
}
# Then we build a dependency graph.
deps = {
rvid: set(_rvid for _rvid, _ in obj.children_random_objs())
for rvid, obj in robjs.items()
if isinstance(obj, DependentRandomVariable)
}
for layer in solve_dependencies(deps):
for rvid in layer:
cur = robjs[rvid]
sz = combine_size(cur.size, size)
if isinstance(cur, DependentRandomVariable):
realization[rvid] = cur.freeze(realization).rvs(sz, random_state)
else:
realization[rvid] = cur.distro.rvs(sz, random_state)
return realization
def rvs(self, size=1, random_state=None):
"""
Parameters
----------
size : int or tuple of ints, optional
Defining number of random variates (default is 1).
random_state : None, int, RandomState, Generator, optional
If seed is None the RandomState singleton is used. If seed is an int,
a new RandomState instance is used, seeded with seed. If seed is already
a RandomState or Generator instance, then that object is used. Default is None.
Returns
-------
ndarray or number
Random variates of given size.
"""
return self.eval(self.draw(size, random_state))
def to_distro(self, name, n=1_000_000, discrete=False, **kwargs):
"""Converts the current expression into a Random Variable Continuous distribution.
(Scipy.stats.rv_continuous).
This is done by estimating the CDF by drawing random samples and then building an interpolator.
Parameters
----------
name : str
name of the distribution
n : int, optional
number of random samples to drawn from which the cdf
is estimated (default: 1_000_000)
discrete : bool, optional
if True, a discrete distribution (i.e. a subclass from rv_discrete)
will be generated (default: False).
kwargs:
extra keyword arguments, passed directly to the
distribution constructors
"""
values = self.rvs(n)
if discrete:
xk, pk = zip(*collections.Counter(values).items())
xk = np.asarray(xk)
pk = np.asarray(pk).astype(np.float64)
pk /= np.sum(pk)
distro_gen = stats.rv_discrete(name=name, values=(xk, pk), **kwargs)
return distro_gen()
else:
itp = scipy.interpolate.interp1d(
*ecdf(values),
copy=True,
bounds_error=False,
fill_value=(0, 1),
assume_sorted=True,
)
class distro_gen(stats.rv_continuous):
def _cdf(self, x):
return itp(x)
return distro_gen(name=name, **kwargs)()
class OperatorMixin:
"""Mixin used for to deal with math expression and function calls."""
def __add__(self, other):
return BinaryOp(operator.add, self, other)
def __radd__(self, other):
return BinaryOp(operator.add, other, self)
def __sub__(self, other):
return BinaryOp(operator.sub, self, other)
def __rsub__(self, other):
return BinaryOp(operator.sub, other, self)
def __mul__(self, other):
return BinaryOp(operator.mul, self, other)
def __rmul__(self, other):
return BinaryOp(operator.mul, other, self)
def __truediv__(self, other):
return BinaryOp(operator.truediv, self, other)
def __rtruediv__(self, other):
return BinaryOp(operator.truediv, other, self)
def __pow__(self, power, modulo=None):
return BinaryOp(operator.pow, self, power)
def __rpow__(self, power, modulo=None):
return BinaryOp(operator.pow, power, self)
def __pos__(self):
return UnaryOp(operator.pos, self)
def __neg__(self):
return UnaryOp(operator.neg, self)
def __array_function__(self, func, types, args, kwargs):
return Function(func, args, kwargs)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
if method == "__call__":
return Function(ufunc, inputs, kwargs)
else:
return NotImplemented
Operand = ty.Union[numbers.Number, RVMixin, OperatorMixin]
@dataclass(frozen=True)
class ArgLessFunction(OperatorMixin, RVMixin):
"""An argument less function"""
func: ty.Callable
def eval(self, realization):
return self.func()
@dataclass(frozen=True)
class WithArg(RVMixin):
"""Add arguments and keyword arguments handling to
other dataclass
"""
args: ty.Tuple[ty.Any] = field(default_factory=tuple)
kwds: ty.Dict[str, ty.Any] = field(default_factory=dict)
def random_objs(self):
yield from super().random_objs()
yield from self.children_random_objs()
def children_random_objs(self):
for arg in self.args:
if isinstance(arg, RVMixin):
yield from arg.random_objs()
for k, v in self.kwds.items():
if isinstance(v, RVMixin):
yield from v.random_objs()
def get_args_kwds(self, realization):
args = tuple(eval_value(arg, realization) for arg in self.args)
kwds = {k: eval_value(v, realization) for k, v in self.kwds.items()}
return args, kwds
@dataclass(frozen=True)
class Function(WithArg, ArgLessFunction):
"""A function that can handles arguments and keyword arguments."""
def eval(self, realization):
args, kwds = self.get_args_kwds(realization)
return self.func(*args, **kwds)
@dataclass(frozen=True)
class RandomVariable(OperatorMixin, RVMixin):
"""A random variable."""
distro: stats.rv_continuous
size: ty.Optional[numbers.Integral] = None
rvid: str = field(default_factory=lambda: secrets.token_hex(nbytes=RVID_NBYTES))
def random_objs(self):
yield self.rvid, self
def eval(self, realization):
if self.rvid in realization:
return realization[self.rvid]
return self.distro()
def __str__(self):
obj = self.distro
s = tuple((str(a) for a in obj.args)) + tuple(
(f"{k}= {v}" for k, v in obj.kwds)
)
return f"{obj.dist.name}({', '.join(s)})#{self.rvid}"
@dataclass(frozen=True)
class DependentRandomVariable(WithArg, RandomVariable):
"""A random variable that depends on other random variables
(e.g. it's mean value is drawn from another random variable).
"""
def eval(self, realization):
return realization[self.rvid]
def freeze(self, realization):
args, kwds = self.get_args_kwds(realization)
return self.distro(*args, **kwds)
def __str__(self):
obj = self.distro
s = tuple((str(a) for a in self.args)) + tuple(
(f"{k}= {v}" for k, v in self.kwds)
)
return f"{obj.name}({', '.join(s)})#{self.rvid}"
@dataclass(frozen=True)
class UnaryOp(OperatorMixin, RVMixin):
"""An unary operator."""
op: ty.Callable
value: Operand
def random_objs(self):
if isinstance(self.value, RVMixin):
yield from self.value.random_objs()
def eval(self, realization):
return self.op(eval_value(self.value, realization))
def __str__(self):
return _OP_STR[self.op] + str(self.value)
@dataclass(frozen=True)
class BinaryOp(OperatorMixin, RVMixin):
"""An binary operator."""
op: ty.Callable
value1: Operand
value2: Operand
def random_objs(self):
if isinstance(self.value1, RVMixin):
yield from self.value1.random_objs()
if isinstance(self.value2, RVMixin):
yield from self.value2.random_objs()
def eval(self, realization):
return self.op(
eval_value(self.value1, realization),
eval_value(self.value2, realization),
)
def __str__(self):
return str(self.value1) + " " + _OP_STR[self.op] + " " + str(self.value2)
One = UnaryOp(operator.pos, 1)
def solve_dependencies(dependencies):
"""Solve a dependency graph.
Parameters
----------
dependencies :
dependency dictionary. For each key, the value is an iterable indicating its
dependencies.
Returns
-------
type
iterator of sets, each containing keys of independents tasks dependent only of
the previous tasks in the list.
"""
while dependencies:
# values not in keys (items without dep)
t = {i for v in dependencies.values() for i in v} - dependencies.keys()
# and keys without value (items without dep)
t.update(k for k, v in dependencies.items() if not v)
# can be done right away
if not t:
raise ValueError(
"Cyclic dependencies exist among these items: {}".format(
", ".join(repr(x) for x in dependencies.items())
)
)
# and cleaned up
dependencies = {k: v - t for k, v in dependencies.items() if v}
yield t
| python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 24 13:26:57 2018
@author: Fall
"""
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(-5,5,1000)
y = np.sin(x)
plt.plot(x, y, label="objective")
plt.plot(x, 0*x+0.5, color="r", linestyle="--", label="constraint")
plt.fill_between(x, -1, 1, where=y>=0.5, label="feasible region", alpha=0.3)
plt.plot([-7*np.pi/6, np.pi/6, 5*np.pi/6], 0.5+np.zeros(3), '*', color="orange", markersize=12, label="solutions")
plt.legend()
plt.title("$\min_x \; \sin(x) \; s.t. \; \sin(x)\geq 0.5$")
plt.show()
x = np.linspace(-10,10,1000)
plt.plot(x,x**2)
plt.title("A convex function")
plt.show()
plt.figure()
plt.plot(x, -x*np.sin(x))
plt.title("A non-convex function")
plt.show()
plt.figure()
plt.plot(x, np.sqrt(np.abs(x)))
plt.title("A non-convex function with only one minimum.")
plt.show() | python |
import os
import time
import argparse
import logging
from dirtositemap import DirToSitemap
from config import *
from sitemaptree import SitemapTree
def cmp_file(f1, f2):
st1 = os.stat(f1)
st2 = os.stat(f2)
# compare file size
if st1.st_size != st2.st_size:
return False
bufsize = 8 * 1024
with open(f1, 'rb') as fp1, open(f2, 'rb') as fp2:
while True:
b1 = fp1.read(bufsize)
b2 = fp2.read(bufsize)
if b1 != b2:
return False
if not b1:
logging.info("{} and {} isn't change".format(f1, f2))
return True
def parse_dir(dir, cur_path=""):
"""
get html file and path
:param dir: dir path, absolute path
:return: dict{rpath:filename}
"""
result = {}
apath = os.path.join(dir, cur_path)
files = os.listdir(apath)
for file_name in files:
temp_path = os.path.join(apath, file_name)
rpath = os.path.join(cur_path, file_name)
if os.path.isfile(temp_path):
if file_name[-5:] == '.html':
result[rpath] = file_name
else:
result.update(parse_dir(dir, rpath))
return result
def compare(old_dir, new_dir, old_sitemap, html):
"""
:param old_dir: absolute path
:param new_dir: absolute path
:param old_sitemap: html_old's sitemap
:return:
"""
# sitemaptree for dir html
sitemap = DirToSitemap(dir=new_dir, html=html, root_url=ROOTURL, home_page=HOMEPAGE,
change_freq=CHANGEFREQ_PATTERNS[3], nsmap=XMLNS, priorities=PRIORITIES, time_zone=TIMEZONE,
time_pattern=LASTMODFORMAT)
pt = sitemap.parse_dir("")
# if old_sitemap is None, or old_dir is None
if old_sitemap == None or old_dir == None:
return pt
if os.path.exists(old_sitemap) == False:
logging.error("there is no old sitemap in {}".format(old_sitemap))
return pt
if os.path.exists(old_dir) == False:
logging.error("there is no old dir in {}".format(old_dir))
return pt
# sitemaptree for dir html_old
pt_old = SitemapTree(file=old_sitemap)
path_file_dic = parse_dir(old_dir)
for rpath, file in path_file_dic.items():
old_apath, new_apath = os.path.join(old_dir, rpath), os.path.join(new_dir, rpath)
if os.path.exists(new_apath) and os.path.exists(old_apath):
if cmp_file(old_apath, new_apath) == True: # update lastmod
url_html = sitemap.path_to_url(rpath, True)
url_nhtml = sitemap.path_to_url(rpath, False)
if sitemap.html == True:
new_node = pt.get_node(url_html)
else:
new_node = pt.get_node(url_nhtml)
if new_node == None:
logging.error(
"the node in new sitemap should not be none, path is {},url is {}".format(rpath, url_html))
old_node = pt_old.get_node(url_html)
if old_node == None: # maybe some url in old sitemap are not ended with ".html"
old_node = pt_old.get_node(url_nhtml)
if old_node == None: # did not find the node in old sitemap
logging.error("no site map for file in {}".format(old_apath))
continue
logging.info("change file {} lastmod".format(rpath))
old_lastmod = old_node.find('lastmod', namespaces=old_node.nsmap).text
sitemap.change_lastmod(new_node, old_lastmod)
return pt
# if __name__ == "__main__":
logging.basicConfig(level=logging.ERROR,
format=LOGGINTFORMAT,
)
# generate sitemap by comparing html dir and old html dir
parser = argparse.ArgumentParser()
parser.add_argument('--ndir', help="new dir absolute path")
parser.add_argument('--odir', help="old dir absolute path")
parser.add_argument('--ositemap', help="old sitemap absolute path")
parser.add_argument('--sitemap', help="new sitemap absoluth path", default="")
parser.add_argument('--html', action='store_false', help="contains .html suffix, default true")
args = parser.parse_args()
pt = compare(args.odir,
args.ndir,
args.ositemap,
args.html)
pt.sort()
pt.save(os.path.abspath(args.sitemap))
| python |
###
# Copyright (c) 2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.conf as conf
import supybot.registry as registry
from supybot.i18n import PluginInternationalization, internationalizeDocstring
_ = PluginInternationalization('Karma')
def configure(advanced):
# This will be called by supybot to configure this module. advanced is
# a bool that specifies whether the user identified himself as an advanced
# user or not. You should effect your configuration by manipulating the
# registry as appropriate.
from supybot.questions import expect, anything, something, yn
conf.registerPlugin('Karma', True)
Karma = conf.registerPlugin('Karma')
conf.registerChannelValue(Karma, 'simpleOutput',
registry.Boolean(False, _("""Determines whether the bot will output shorter
versions of the karma output when requesting a single thing's karma.""")))
conf.registerChannelValue(Karma, 'response',
registry.Boolean(False, _("""Determines whether the bot will reply with a
success message when something's karma is increased or decreased.""")))
conf.registerChannelValue(Karma, 'rankingDisplay',
registry.Integer(3, _("""Determines how many highest/lowest karma things
are shown when karma is called with no arguments.""")))
conf.registerChannelValue(Karma, 'mostDisplay',
registry.Integer(25, _("""Determines how many karma things are shown when
the most command is called.""")))
conf.registerChannelValue(Karma, 'allowSelfRating',
registry.Boolean(False, _("""Determines whether users can adjust the karma
of their nick.""")))
conf.registerChannelValue(Karma, 'allowUnaddressedKarma',
registry.Boolean(False, _("""Determines whether the bot will
increase/decrease karma without being addressed.""")))
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| python |
#!/usr/bin/python2
import rospy
import cv_bridge
from cv_bridge import CvBridge
import cv2
import rospy
import numpy as np
from sensor_msgs.msg import CompressedImage
from crazyflie.msg import CFData
# from crazyflie.msg import CFImage
from crazyflie.msg import CFCommand
from crazyflie.msg import CFMotion
import time
import matplotlib.pyplot as plt
import os
class Camera:
# DO_NOTHING_CMD = CFMotion()
def __init__(self, ID):
self.id = ID
self.bridge = CvBridge()
self.mat = None
#need to facilitate a set of publishers per cf node
self.image_pub = rospy.Publisher('cf/%d/image'%self.id, CompressedImage, queue_size=10)
## CALLBACKS ##
## THREADS ##
def run(self):
try:
cap = cv2.VideoCapture(0) # TODO: multiple vid captures in parallel
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 192)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 144)
# cap.set(cv2.CAP_PROP_BRIGHTNESS, 0.8)
# cap.set(cv2.CAP_PROP_CONTRAST, 0.2)
# cap.set(cv2.CAP_PROP_EXPOSURE, 0.08)
# cap.set(cv2.CAP_PROP_AUTO_EXPOSURE, 0.25)
while not rospy.is_shutdown():
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#ret, gray = cap.read()
self.image_pub.publish(self.bridge.cv2_to_compressed_imgmsg(gray))
cv2.imshow('frame', gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
except Exception as e:
print "CAMERA %d STREAM FAILED -- CHECK INPUTS" % self.id
print "Error: " + str(e)
print " -- Camera %d Finished -- " % self.id
| python |
#!/usr/bin/python
"""
(C) Copyright 2019 Intel Corporation.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE
The Government's rights to use, modify, reproduce, release, perform, display,
or disclose this software are subject to the terms of the Apache License as
provided in Contract No. B609815.
Any reproduction of computer software, computer software documentation, or
portions thereof marked with this legend must also reproduce the markings.
"""
from __future__ import print_function
import general_utils
from command_utils import ExecutableCommand, EnvironmentVariables
from command_utils import CommandFailure, FormattedParameter
from ClusterShell.NodeSet import NodeSet
from server_utils import AVOCADO_FILE
class DfuseCommand(ExecutableCommand):
"""Defines a object representing a dfuse command."""
def __init__(self, namespace, command):
"""Create a dfuse Command object."""
super(DfuseCommand, self).__init__(namespace, command)
# dfuse options
self.puuid = FormattedParameter("--pool {}")
self.cuuid = FormattedParameter("--container {}")
self.mount_dir = FormattedParameter("--mountpoint {}")
self.svcl = FormattedParameter("--svc {}", 0)
self.sys_name = FormattedParameter("--sys-name {}")
self.singlethreaded = FormattedParameter("--singlethreaded", False)
self.foreground = FormattedParameter("--foreground", False)
def set_dfuse_params(self, pool, display=True):
"""Set the dfuse parameters for the DAOS group, pool, and container uuid
Args:
pool (TestPool): DAOS test pool object
display (bool, optional): print updated params. Defaults to True.
"""
self.set_dfuse_pool_params(pool, display)
def set_dfuse_pool_params(self, pool, display=True):
"""Set Dfuse params based on Daos Pool.
Args:
pool (TestPool): DAOS test pool object
display (bool, optional): print updated params. Defaults to True.
"""
self.puuid.update(pool.uuid, "puuid" if display else None)
self.set_dfuse_svcl_param(pool, display)
def set_dfuse_svcl_param(self, pool, display=True):
"""Set the dfuse svcl param from the ranks of a DAOS pool object.
Args:
pool (TestPool): DAOS test pool object
display (bool, optional): print updated params. Defaults to True.
"""
svcl = ":".join(
[str(item) for item in [
int(pool.pool.svc.rl_ranks[index])
for index in range(pool.pool.svc.rl_nr)]])
self.svcl.update(svcl, "svcl" if display else None)
def set_dfuse_cont_param(self, cont, display=True):
"""Set dfuse cont param from Container object
Args:
cont (TestContainer): Daos test container object
display (bool, optional): print updated params. Defaults to True.
"""
self.cuuid.update(cont, "cuuid" if display else None)
class Dfuse(DfuseCommand):
"""Class defining an object of type DfuseCommand"""
def __init__(self, hosts, tmp, dfuse_env=False):
"""Create a dfuse object"""
super(Dfuse, self).__init__("/run/dfuse/*", "dfuse")
# set params
self.hosts = hosts
self.tmp = tmp
self.dfuse_env = dfuse_env
def __del__(self):
"""Destroy Dfuse object and stop dfuse """
# stop dfuse
self.stop()
def create_mount_point(self):
"""Create dfuse directory
Raises:
CommandFailure: In case of error creating directory
"""
# raise exception if mount point not specified
if self.mount_dir.value is None:
raise CommandFailure("Mount point not specified, "
"check test yaml file")
dir_exists, _ = general_utils.check_file_exists(
self.hosts, self.mount_dir.value, directory=True)
if not dir_exists:
cmd = "mkdir -p {}".format(self.mount_dir.value)
ret_code = general_utils.pcmd(self.hosts, cmd, timeout=30)
if 0 not in ret_code:
error_hosts = NodeSet(
",".join(
[str(node_set) for code, node_set in ret_code.items()
if code != 0]))
raise CommandFailure(
"Error creating the {} dfuse mount point on the following "
"hosts: {}".format(self.mount_dir.value, error_hosts))
def remove_mount_point(self):
"""Remove dfuse directory
Raises:
CommandFailure: In case of error deleting directory
"""
# raise exception if mount point not specified
if self.mount_dir.value is None:
raise CommandFailure("Mount point not specified, "
"check test yaml file")
dir_exists, _ = general_utils.check_file_exists(
self.hosts, self.mount_dir.value, directory=True)
if dir_exists:
cmd = "rm -rf {}".format(self.mount_dir.value)
ret_code = general_utils.pcmd(self.hosts, cmd, timeout=30)
if 0 not in ret_code:
error_hosts = NodeSet(
",".join(
[str(node_set) for code, node_set in ret_code.items()
if code != 0]))
raise CommandFailure(
"Error removing the {} dfuse mount point on the following "
"hosts: {}".format(self.mount_dir.value, error_hosts))
def run(self):
""" Run the dfuse command.
Raises:
CommandFailure: In case dfuse run command fails
"""
# create dfuse dir if does not exist
self.create_mount_point()
# obtain env export string
env = self.get_default_env()
# run dfuse command
ret_code = general_utils.pcmd(self.hosts, env + self.__str__(),
timeout=30)
# check for any failures
if 0 not in ret_code:
error_hosts = NodeSet(
",".join(
[str(node_set) for code, node_set in ret_code.items()
if code != 0]))
raise CommandFailure(
"Error starting dfuse on the following hosts: {}".format(
error_hosts))
def stop(self):
"""Stop dfuse
Raises:
CommandFailure: In case dfuse stop fails
"""
cmd = "if [ -x '$(command -v fusermount)' ]; "
cmd += "then fusermount -u {0}; else fusermount3 -u {0}; fi".\
format(self.mount_dir.value)
ret_code = general_utils.pcmd(self.hosts, cmd, timeout=30)
self.remove_mount_point()
if 0 not in ret_code:
error_hosts = NodeSet(
",".join(
[str(node_set) for code, node_set in ret_code.items()
if code != 0]))
raise CommandFailure(
"Error stopping dfuse on the following hosts: {}".format(
error_hosts))
def get_default_env(self):
"""Get the default enviroment settings for running Dfuse.
Returns:
(str): a single string of all env vars to be
exported
"""
# obtain any env variables to be exported
env = EnvironmentVariables()
env["CRT_ATTACH_INFO_PATH"] = self.tmp
env["DAOS_SINGLETON_CLI"] = 1
if self.dfuse_env:
try:
with open('{}/{}'.format(self.tmp, AVOCADO_FILE),
'r') as read_file:
for line in read_file:
if ("provider" in line) or ("fabric_iface" in line):
items = line.split()
key, values = items[0][:-1], items[1]
env[key] = values
env['OFI_INTERFACE'] = env.pop('fabric_iface')
env['OFI_PORT'] = env.pop('fabric_iface_port')
env['CRT_PHY_ADDR_STR'] = env.pop('provider')
except Exception as err:
raise CommandFailure("Failed to read yaml file:{}".format(err))
return env.get_export_str()
| python |
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
'''Generate DV code for an IP block'''
import logging as log
import os
import sys
from collections import defaultdict
from typing import Dict, List, Union, Optional
import yaml
from mako import exceptions # type: ignore
from mako.lookup import TemplateLookup # type: ignore
from pkg_resources import resource_filename
from .ip_block import IpBlock
from .multi_register import MultiRegister
from .register import Register
from .window import Window
class DvBaseNames:
# Class global attributes
valid_types = ["pkg", "block", "reg", "field", "mem", "all"]
def __init__(self) -> None:
self.with_prefix("dv_base")
def with_prefix(self, prefix: str) -> None:
self.pkg = prefix + "_reg_pkg"
self.block = prefix + "_reg_block"
self.reg = prefix + "_reg"
self.field = prefix + "_reg_field"
self.mem = prefix + "_mem"
def set_entity(self, base_type: str, entity: str) -> None:
assert base_type in self.valid_types, f"Invalid argument type: {base_type}"
if base_type == "all":
self.with_prefix(entity)
else:
setattr(self, base_type, entity)
def bcname(esc_if_name: str) -> str:
'''Get the name of the dv_base_reg_block subclass for this device interface'''
return esc_if_name + "_reg_block"
def rcname(esc_if_name: str, r: Union[Register, MultiRegister]) -> str:
'''Get the name of the dv_base_reg subclass for this register'''
return '{}_reg_{}'.format(esc_if_name, r.name.lower())
def alias_rcname(esc_if_name: str,
r: Union[Register, MultiRegister]) -> Optional[str]:
'''Get the name of the dv_base_reg subclass for this alias register'''
if r.alias_target is not None:
return '{}_reg_{}'.format(esc_if_name, r.alias_target.lower())
else:
return None
def mcname(esc_if_name: str, m: Window) -> str:
'''Get the name of the dv_base_mem subclass for this memory'''
return '{}_mem_{}'.format(esc_if_name, m.name.lower())
def miname(m: Window) -> str:
'''Get the lower-case name of a memory block'''
return m.name.lower()
def gen_core_file(outdir: str,
lblock: str,
dv_base_names: List[str],
paths: List[str]) -> None:
depends = ["lowrisc:dv:dv_base_reg"]
blocks_base_names = get_dv_base_names_objects(dv_base_names)
if blocks_base_names is not None:
# Assume the core file naming convetion is the package name without `_pkg`
# suffix.
for block in blocks_base_names:
pkg_name = blocks_base_names[block].pkg
depends.append("lowrisc:dv:{}".format(pkg_name[:-4]))
# Generate a fusesoc core file that points at the files we've just
# generated.
core_data = {
'name': "lowrisc:dv:{}_ral_pkg".format(lblock),
'filesets': {
'files_dv': {
'depend': depends,
'files': paths,
'file_type': 'systemVerilogSource'
},
},
'targets': {
'default': {
'filesets': [
'files_dv',
],
},
},
}
core_file_path = os.path.join(outdir, lblock + '_ral_pkg.core')
with open(core_file_path, 'w') as core_file:
core_file.write('CAPI=2:\n')
yaml.dump(core_data, core_file, encoding='utf-8')
def get_dv_base_names_objects(dv_base_names: List[str]) -> Dict[str, DvBaseNames]:
'''Returns a dictionary mapping a `DvBaseNames` object to a block.
`dv_bave_names` is a list of base class entity names provided on the command-line, in the
following format:
ast:block:ast_base_reg_block ast:pkg:ast_base_reg_pkg otp_ctrl:all:otp_ctrl_base
This function creates a dictionary that wraps the provided base class overrides for each block
within a `DvBaseNames` object and returns a dictionary mapping the object to the block.
'''
if dv_base_names is None:
return None
dv_base_names_dict = defaultdict(DvBaseNames) # type: Dict[str, DvBaseNames]
for item in dv_base_names:
try:
block, base_type, entity = item.split(":")
except ValueError:
log.error(f"Bad input arg: {item}")
sys.exit(1)
dv_base_names_dict[block].set_entity(base_type, entity)
return dv_base_names_dict
def get_block_base_name(dv_base_names_map: Dict[str, DvBaseNames], block: str) -> DvBaseNames:
'''Given a dictionary of `DvBaseNames` and return a `DvBaseNames` object for a specific block.
If the given dictionary is empty, or cannot find the block name in the list of dictionary keys,
this function will return the default `DvBaseNames` object.
'''
if dv_base_names_map is None:
return DvBaseNames()
try:
return dv_base_names_map[block]
except KeyError:
return DvBaseNames()
def gen_dv(block: IpBlock, dv_base_names: List[str], outdir: str) -> int:
'''Generate DV files for an IpBlock'''
lookup = TemplateLookup(directories=[resource_filename('reggen', '.')])
uvm_reg_tpl = lookup.get_template('uvm_reg.sv.tpl')
# Generate the RAL package(s). For a device interface with no name we
# generate the package "<block>_ral_pkg" (writing to <block>_ral_pkg.sv).
# In any other case, we also need the interface name, giving
# <block>_<ifname>_ral_pkg.
generated = []
lblock = block.name.lower()
dv_base_names_map = get_dv_base_names_objects(dv_base_names)
block_dv_base_names = get_block_base_name(dv_base_names_map, lblock)
device_hier_paths = block.bus_interfaces.device_hier_paths
for if_name, rb in block.reg_blocks.items():
hier_path = device_hier_paths[if_name]
if_suffix = '' if if_name is None else '_' + if_name.lower()
mod_base = lblock + if_suffix
reg_block_path = hier_path + if_suffix
file_name = mod_base + '_ral_pkg.sv'
generated.append(file_name)
reg_top_path = os.path.join(outdir, file_name)
with open(reg_top_path, 'w', encoding='UTF-8') as fout:
try:
fout.write(uvm_reg_tpl.render(rb=rb,
block=block,
esc_if_name=mod_base,
reg_block_path=reg_block_path,
dv_base_names=block_dv_base_names))
except: # noqa F722 for template Exception handling
log.error(exceptions.text_error_template().render())
return 1
gen_core_file(outdir, lblock, dv_base_names, generated)
return 0
| python |
import cocotb
from lib.util import assertions
from lib.cycle import wait, clock
@cocotb.test()
def memory_address_register(dut):
def assert_o_address(value, error_msg):
"""Check the output address"""
assertions.assertEqual(dut.o_address.value.binstr, value, error_msg)
# Test initialization
yield from wait()
assert_o_address('0000', 'Address should initialize to 0000')
dut.i_reset = 0
# Set the input address to store
dut.i_address = 0b0100
yield from wait()
assert_o_address('0000', 'Address should not change until enabled')
# Enable the input
dut.i_enable_in = 1
yield from wait()
assert_o_address('0100', 'Address should change to 0100')
# Reset the address
dut.i_reset = 1
yield from wait()
assert_o_address('0000', 'Address should reset to 0000')
dut.i_reset = 0
yield from wait()
| python |
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.optim as optim
from modules.networks import LinearGaussianTree, TriResNet, ASVIupdate
from modules.models import ColliderModel, MeanField, GlobalFlow, MultivariateNormal
from modules.distributions import NormalDistribution
from modules.eval_utils import evaluate_multi_likelihood
# Parameters
depth = 2 #3
#join_link = lambda x, y: x - y
join_link = lambda x, y, k=2.: torch.tanh(k*x) - torch.tanh(k*y)
dist = NormalDistribution()
num_iterations = 7000 #10000
batch_size = 80
sigma = 0.05
in_sigma= 0.1 #0.2
num_samples = 20000
# Prior model
prior_model = ColliderModel(depth=depth, sigma=sigma, in_sigma=in_sigma, join_link=join_link,
transition_distribution=NormalDistribution())
# Data
true_smpl,_,_,_ = prior_model.sample(1)
pr_smpl,_,_,_ = prior_model.sample(num_samples)
value = join_link(true_smpl[-1][0,-2], true_smpl[-1][0,-1]).detach().numpy() + np.random.normal(0,sigma)
print(value)
y = torch.tensor(np.array([value])).type(torch.float32)
num_repetitions = 15
print("Depth: {}".format(depth))
lk_list = []
lk_asvi_list = []
lk_mf_list = []
lk_gf_list = []
lk_mn_list = []
for _ in range(num_repetitions):
### Cascading flows ###
d_eps = 10
tree = LinearGaussianTree(node_size=d_eps,depth=depth,in_scale=0.3,scale=0.5, in_w = 4.) #3
transformations = [TriResNet(d_x=1, d_epsilon=d_eps, epsilon_nu=0., in_pre_lambda=3., scale_w=0.8,) for _ in range(2**depth-1)] #0.8
post_model = ColliderModel(depth=depth, sigma=sigma, in_sigma=in_sigma, join_link=join_link,
transition_distribution=dist,
transformations=transformations, eps_generator=tree)
loss_list1 = []
parames_list = [tr.parameters() for tr in transformations] #+ [tree.parameters()]
params = []
for p in parames_list:
params += p
optimizer = optim.Adam(params, lr=0.001)
print("Train Cascading Flow model")
for itr in tqdm(range(num_iterations)):
# Gradient reset
optimizer.zero_grad()
# Variational loss
samples, samples_pre, log_jacobian, epsilon_loss = post_model.sample(batch_size)
log_q = post_model.evaluate_avg_joint_log_prob(samples, None, samples_pre, log_jacobian=log_jacobian
,epsilon_loss=epsilon_loss)
log_p = prior_model.evaluate_avg_joint_log_prob(samples, y)
loss = (log_q - log_p)
# Update
loss.backward()
optimizer.step()
loss_list1.append(float(loss.detach().numpy()))
#if itr % 100 == 0:
# print(tree.weights)
### ASVI ###
mu_transformations = [ASVIupdate(l_init=3.) for _ in range(2**depth-1)]
post_model_asvi = ColliderModel(depth=depth, sigma=sigma, in_sigma=in_sigma, join_link=join_link,
transition_distribution=dist,
mu_transformations=mu_transformations)
loss_list2 = []
parames_list = [tr.parameters() for tr in mu_transformations]
params = []
for p in parames_list:
params += p
optimizer = optim.Adam(params, lr=0.001)
print("Train Cascading Flow model (no coupling)")
for itr in tqdm(range(num_iterations)):
# Gradient reset
optimizer.zero_grad()
# Variational loss
samples, samples_pre, log_jacobian, epsilon_loss = post_model_asvi.sample(batch_size)
log_q = post_model_asvi.evaluate_avg_joint_log_prob(samples, None, samples_pre, log_jacobian=log_jacobian,
epsilon_loss=epsilon_loss)
log_p = prior_model.evaluate_avg_joint_log_prob(samples, y)
loss = (log_q - log_p)
# Update
loss.backward()
optimizer.step()
loss_list2.append(float(loss.detach().numpy()))
#
### Mean field ###
post_model_mf = MeanField(T=2**depth-2, d_x=1)
loss_list3 = []
parames_list = [post_model_mf.parameters()]
params = []
for p in parames_list:
params += p
optimizer = optim.Adam(params, lr=0.001)
print("Train Mean Field model")
for itr in tqdm(range(num_iterations)):
# Gradient reset
optimizer.zero_grad()
# Variational loss
x, _, _, _, _ = post_model_mf.sample_timeseries(batch_size)
samples = post_model_mf.reshape_collider_samples(x, depth)
log_q = post_model_mf.evaluate_avg_joint_log_prob(x, None, 0.)
log_p = prior_model.evaluate_avg_joint_log_prob(samples, y)
loss = (log_q - log_p)
# Update
loss.backward()
optimizer.step()
loss_list3.append(float(loss.detach().numpy()))
#
### Global flow ###
post_model_gf = GlobalFlow(T=2**depth-2, d_x=1, d_eps=5)
loss_list4 = []
parames_list = [post_model_gf.parameters()]
params = []
for p in parames_list:
params += p
optimizer = optim.Adam(params, lr=0.001)
print("Train Global flow")
for itr in tqdm(range(num_iterations)):
# Gradient reset
optimizer.zero_grad()
# Variational loss
x, _, samples_pre, log_jacobian, epsilon_loss = post_model_gf.sample_timeseries(batch_size)
samples = post_model_gf.reshape_collider_samples(x, depth)
log_q = post_model_gf.evaluate_avg_joint_log_prob(x, None, 0., samples_pre, log_jacobian=log_jacobian)
# , epsilon_loss=epsilon_loss)
log_p = prior_model.evaluate_avg_joint_log_prob(samples, y)
loss = (log_q - log_p)
# Update
loss.backward()
optimizer.step()
loss_list4.append(float(loss.detach().numpy()))
### Multivariate Normal ###
post_model_mn = MultivariateNormal(T=2**depth-2, d_x=1)
loss_list5 = []
parames_list = [post_model_mn.parameters()]
params = []
for p in parames_list:
params += p
optimizer = optim.Adam(params, lr=0.001)
print("Train Mean Field model")
for itr in tqdm(range(num_iterations)):
# Gradient reset
optimizer.zero_grad()
# Variational loss
x, _, _, _, _ = post_model_mn.sample_timeseries(batch_size)
samples = post_model_mn.reshape_collider_samples(x, depth)
log_q = post_model_mn.evaluate_avg_joint_log_prob(x, None, 0.)
log_p = prior_model.evaluate_avg_joint_log_prob(samples, y)
loss = (log_q - log_p)
# Update
loss.backward()
optimizer.step()
loss_list5.append(float(loss.detach().numpy()))
# Performance metrics
#evaluate_likelihood(X, x_true)
#uni_lk, multi_lk, pred = evaluate_model(variational_model, X_true, M=5000,
# emission_model=emission_model,
# emission_distribution=emission_dist,
# scale=lk_sigma, out_data=out_data, T_data=T_data)
#plt.plot(loss_list1)
#plt.plot(loss_list2)
#plt.plot(loss_list3)
#plt.plot(loss_list4)
#plt.show()
corr_list = []
N_itr = 10
# CF
smpl,_,_,_ = post_model.sample(num_samples)
smpl = torch.cat(smpl,1).detach().numpy()
# ASVI
smpl_asvi, _, _, _ = post_model_asvi.sample(num_samples)
smpl_asvi = torch.cat(smpl_asvi, 1).detach().numpy()
# MF
smpl_mf,_,_,_,_ = post_model_mf.sample_timeseries(num_samples)
smpl_mf = smpl_mf.squeeze().detach().numpy()
#GF
smpl_gf,_,_,_,_ = post_model_gf.sample_timeseries(num_samples)
smpl_gf = smpl_gf.squeeze().detach().numpy()
#MN
smpl_mn,_,_,_,_ = post_model_mn.sample_timeseries(num_samples)
smpl_mn = smpl_mn.squeeze().detach().numpy()
re_true_smpl = torch.cat(true_smpl,1).detach().numpy()
lk = evaluate_multi_likelihood(smpl, re_true_smpl)
lk_asvi = evaluate_multi_likelihood(smpl_asvi, re_true_smpl)
lk_mf = evaluate_multi_likelihood(smpl_mf, re_true_smpl)
lk_gf = evaluate_multi_likelihood(smpl_gf, re_true_smpl)
lk_mn = evaluate_multi_likelihood(smpl_mn, re_true_smpl)
print("CF likelihood: {}".format(lk))
print("ASVI likelihood: {}".format(lk_asvi))
print("MF likelihood: {}".format(lk_mf))
print("GF likelihood: {}".format(lk_gf))
print("MN likelihood: {}".format(lk_mn))
lk_list.append(lk)
lk_asvi_list.append(lk_asvi)
lk_mf_list.append(lk_mf)
lk_gf_list.append(lk_gf)
lk_mn_list.append(lk_mn)
# corr1 = [np.corrcoef(smpl[:,-1], smpl[:,k])[0,1] for k in range(smpl.shape[1])]
# #corr2 = [np.corrcoef(smpl_cfn[:,-1], smpl_cfn[:,k])[0,1] for k in range(smpl.shape[1])]
# p_smpl = torch.cat(pr_smpl,1)
# pr_corr = [np.corrcoef(p_smpl[:,-1], p_smpl[:,k])[0,1] for k in range(smpl.shape[1])]
# plt.plot(corr1, c="r")
# #plt.plot(corr2, c="m")
# plt.plot(pr_corr, c="k", ls="--")
# plt.axhline(y=0., color='k', linestyle='--', lw=2)
# plt.show()
#
# ## True posterior ##
# density = lambda x,y,s=in_sigma: np.exp(-(x**2+y**2)/(2*s**2))/np.sqrt(2*np.pi*s**2)
# mu_link = lambda x,y: join_link(x,y)
# s_link = lambda x,y: sigma
# lk = lambda x,y,z: np.exp(-(z - mu_link(x,y))**2/(2*s_link(x,y)**2))/np.sqrt(2*np.pi*s_link(x,y)**2)
# post = lambda x,y,z: density(x,y)*lk(x,y,z)
#
# d = 4.
# M = 300
# x_range = np.linspace(-d,d,M)
# y_range = np.linspace(-d,d,M)
#
# mesh1, mesh2 = np.meshgrid(x_range, y_range)
#
# data = value
# posterior = density(mesh1, mesh2)*lk(mesh1,mesh2,data)
# posterior = posterior/np.sum(posterior)
#
# plt.imshow(posterior, extent=[-d,d,-d,d], origin="lower", cmap="Greys")
# plt.scatter((smpl[:,-2]), (smpl[:,-1]), c="r", alpha=0.002)
# plt.scatter((true_smpl[-1][:,-2]), (true_smpl[-1][:,-1]), c="k")
# plt.xlim(-d,d)
# plt.ylim(-d,d)
# plt.show()
#
# plt.imshow(posterior, extent=[-d,d,-d,d], origin="lower", cmap="Greys")
# plt.scatter((smpl_mf[:,-2]), (smpl_mf[:,-1]), c="b", alpha=0.002)
# plt.scatter((true_smpl[-1][:,-2]), (true_smpl[-1][:,-1]), c="k")
# plt.xlim(-d,d)
# plt.ylim(-d,d)
# plt.show()
#
# plt.imshow(posterior, extent=[-d,d,-d,d], origin="lower", cmap="Greys")
# plt.scatter((smpl_mn[:,-2]), (smpl_mn[:,-1]), c="g", alpha=0.002)
# plt.scatter((true_smpl[-1][:,-2]), (true_smpl[-1][:,-1]), c="k")
# plt.xlim(-d,d)
# plt.ylim(-d,d)
# plt.show()
#
# plt.imshow(posterior, extent=[-d,d,-d,d], origin="lower", cmap="Greys")
# plt.scatter((smpl_gf[:,-2]), (smpl_gf[:,-1]), c="c", alpha=0.002)
# plt.scatter((true_smpl[-1][:,-2]), (true_smpl[-1][:,-1]), c="k")
# plt.xlim(-d,d)
# plt.ylim(-d,d)
# plt.show()
#
# # plt.scatter((pr_smpl[-1][:,-1]), (pr_smpl[-1][:,-2]), c="b", alpha=0.01)
# # plt.scatter((smpl_cfn[:,-1]), (smpl_cfn[:,-2]), c="m", alpha=0.01)
# # plt.scatter((true_smpl[-1][:,-1]), (true_smpl[-1][:,-2]), c="k")
# # plt.show()
# #
# # plt.scatter((pr_smpl[-1][:,-1]), (pr_smpl[-1][:,-2]), c="b", alpha=0.01)
# # plt.scatter((smpl_mf[:,-1]), (smpl_mf[:,-2]), c="g", alpha=0.01)
# # plt.scatter((true_smpl[-1][:,-1]), (true_smpl[-1][:,-2]), c="k")
# # plt.show()
# #
# #plt.scatter((pr_smpl[-1][:,-1]), (pr_smpl[-1][:,-2]), c="b", alpha=0.01)
# #plt.scatter((smpl_gf[:,-1]), (smpl[:,-2]), c="c", alpha=0.01)
# #plt.scatter((true_smpl[-1][:,-1]), (true_smpl[-1][:,-2]), c="k")
# #plt.show()
# #
# # #plt.hist(join_link(pr_smpl[-1][:,-1],pr_smpl[-1][:,-2]),30, c="b")
# plt.hist(join_link(smpl[:,-2],smpl[:,-1]),30, alpha=0.5, color="r")
# # plt.hist(join_link(smpl_cfn[:,-1],smpl_cfn[:,-2]),30, alpha=0.5, color="m")
# # plt.hist(join_link(smpl_mf[:,-1],smpl_mf[:,-2]),30, alpha=0.5, color="g")
# #plt.hist(join_link(smpl_gf[:,-1],smpl_gf[:,-2]),30, alpha=0.5, color="c")
# plt.axvline(x=value, color='k', linestyle='--', lw=2)
# plt.show()
print("Mean CF likelihood: {} += {}".format(np.mean(lk_list), np.std(lk_list)/np.sqrt(num_repetitions)))
print("Mean ASVI likelihood: {} += {}".format(np.mean(lk_asvi_list), np.std(lk_asvi_list)/np.sqrt(num_repetitions)))
print("Mean MF likelihood: {} += {}".format(np.mean(lk_mf_list), np.std(lk_mf_list)/np.sqrt(num_repetitions)))
print("Mean GF likelihood: {} += {}".format(np.mean(lk_gf_list), np.std(lk_gf_list)/np.sqrt(num_repetitions)))
print("Mean MN likelihood: {} += {}".format(np.mean(lk_mn_list), np.std(lk_mn_list)/np.sqrt(num_repetitions))) | python |
from django.contrib import admin
from .models import ScrumyUser, ScrumyGoals, GoalStatus
# Register your models here.
myModels = [ScrumyUser, ScrumyGoals, GoalStatus]
admin.site.register(myModels)
| python |
import re
from itertools import izip_longest
def percent(num, den):
return '%2.0f%%' % ((float(num)/den) * 100)
def parse(fname, level=2):
f = file(fname)
c = f.read()
f.close()
num_lines = len(c.split('\n'))
headings = []
print 'num lines', num_lines
regexp = '#{1,%s}\s' % level
for i, line in enumerate(c.split('\n')):
if re.match(regexp, line):
amount_through = percent(i, num_lines)
headings.append( (amount_through, line) )
return headings
def markdown(player, gm):
collection = izip_longest(gm, player)
for p, g, c in collection:
pp = '%s %s' % (p or ('',''))
gg = '%s %s' % (g or ('',''))
cc = '%s %s' % (c or ('',''))
print '| `%s` | `%s` | `%s` |' % (pp, gg, cc)
player = parse('mod_guide_player.md')
gm = parse('mod_guide_gm.md')
markdown(player, gm)
| python |
import glob
import os
from time import sleep, ctime
PATH = r"C:\Users\timmo\Downloads\*"
list_of_files = glob.glob(PATH)
latest_file = max(list_of_files, key=os.path.getctime)
latest_mod = os.path.getctime(latest_file)
latest_mod = ctime(latest_mod)
#latest_mod = datetime.fromtimestamp(latest_mod).strftime('%Y-%m-%d %H:%M:%S')
print(latest_file)
print(latest_mod) | python |
"""
The MIT License (MIT)
Copyright (c) 2015-2019 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import discord
from .errors import MissingRequiredArgument
__all__ = (
'CustomDefault',
'Author',
'CurrentChannel',
'CurrentGuild',
'Call',
)
class CustomDefaultMeta(type):
def __new__(cls, *args, **kwargs):
name, bases, attrs = args
attrs['display'] = kwargs.pop('display', name)
return super().__new__(cls, name, bases, attrs, **kwargs)
def __repr__(cls):
return str(cls)
def __str__(cls):
return cls.display
class CustomDefault(metaclass=CustomDefaultMeta):
"""The base class of custom defaults that require the :class:`.Context`.
Classes that derive from this should override the :attr:`~.CustomDefault.converters` attribute to specify
converters to use and the :meth:`~.CustomDefault.default` method to do its conversion logic.
This method must be a coroutine.
"""
converters = (str,)
async def default(self, ctx, param):
"""|coro|
The method to override to do conversion logic.
If an error is found while converting, it is recommended to
raise a :exc:`.CommandError` derived exception as it will
properly propagate to the error handlers.
Parameters
-----------
ctx: :class:`.Context`
The invocation context that the argument is being used in.
"""
raise NotImplementedError('Derived classes need to implement this.')
class Author(CustomDefault):
"""Default parameter which returns the author for this context."""
converters = (discord.Member, discord.User)
async def default(self, ctx, param):
return ctx.author
class CurrentChannel(CustomDefault):
"""Default parameter which returns the channel for this context."""
converters = (discord.TextChannel,)
async def default(self, ctx, param):
return ctx.channel
class CurrentGuild(CustomDefault):
"""Default parameter which returns the guild for this context."""
async def default(self, ctx, param):
if ctx.guild:
return ctx.guild
raise MissingRequiredArgument(param)
class Call(CustomDefault):
"""Easy wrapper for lambdas/inline defaults."""
def __init__(self, callback):
self._callback = callback
async def default(self, ctx, param):
return self._callback(ctx, param)
| python |
import FWCore.ParameterSet.Config as cms
from ..modules.hltEgammaCandidatesL1Seeded_cfi import *
from ..modules.hltEgammaHGCALIDVarsL1Seeded_cfi import *
from ..modules.hltEgammaHoverEL1Seeded_cfi import *
HLTPhoton187L1SeededTask = cms.Task(
hltEgammaCandidatesL1Seeded,
hltEgammaHGCALIDVarsL1Seeded,
hltEgammaHoverEL1Seeded
)
| python |
#!/usr/bin/env python3
# pylint: disable=C0103
"""Gets coordination environment and corresponding CSM."""
from pymatgen import Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.analysis.chemenv.coordination_environments\
.coordination_geometry_finder import LocalGeometryFinder
from pymatgen.analysis.chemenv.coordination_environments\
.chemenv_strategies import MultiWeightsChemenvStrategy
from pymatgen.analysis.chemenv.coordination_environments\
.structure_environments import LightStructureEnvironments
def get_cesym(lgf, structure, site):
"""See module docstring."""
# doc: http://pymatgen.org/_modules/pymatgen/analysis/chemenv/
# coordination_environments/coordination_geometry_finder.html
lgf.setup_structure(structure)
# doc: http://pymatgen.org/_modules/pymatgen/analysis/chemenv/
# coordination_environments/
# chemenv_strategies.html#MultiWeightsChemenvStrategy.
# stats_article_weights_parameters
strategy = MultiWeightsChemenvStrategy.stats_article_weights_parameters()
# returns all information about the structure; se is a structure object
se = lgf.compute_structure_environments(maximum_distance_factor=1.2,
only_cations=False,
only_indices=[site])
lse = LightStructureEnvironments.\
from_structure_environments(strategy=strategy,
structure_environments=se)
coor = lse.coordination_environments
# ce = chemical environment
# csm = continuous symmetry measure
# from Waroquiers et al (verbatim)
# DOI: 10.1021/acs.chemmater.7b02766
# "The environment of the atom is then the model polyhedron for which
# the similarity is the highest, that is, for which the CSM is the lowest."
# in this case, it looks like O:6 (octahedral?)
return [coor[site][0]['ce_symbol'], coor[site][0]['csm']]
| python |
from .conv import *
from .cell import *
from .mix_ops import *
from .prune import *
from .ops import *
| python |
import binascii
import binance.crypto
import binance.message
from .signature import *
from .transaction import *
class TransactionEncoder(object):
def __init__(self, wallet, memo="", source=0, data=None):
self.wallet = wallet
self.memo = memo
self.source = source
self.data = data
def sign(self, message):
"""
Sign message.
Args:
message (Message): The message to sign.
Returns:
bytes: The message signature.
"""
# get sign data with message
sign_data = binance.crypto.get_sign_data(wallet=self.wallet,
msgs=[message],
memo=self.memo,
source=self.source,
data=self.data)
# sign encoded JSON to bytes
return binance.crypto.generate_signature_for_message(
self.wallet.private_key,
binance.crypto.get_json_bytes_for_sign_data(sign_data)
)
def create_transaction(self, message, signature):
transaction = Transaction(memo=self.memo,
source=self.source,
data=b'' if self.data is None else self.data)
transaction.add_message(message)
transaction.add_signature(Signature(public_key=self.wallet.public_key,
signature=signature,
account_number=self.wallet.account_number,
sequence=self.wallet.sequence))
return transaction
def create_new_order_message(self,
symbol,
order_type,
order_side,
price,
quantity,
time_in_force):
"""
Create New Order Message from parameters.
Args:
symbol (str): Symbol for trading pair in full name of the tokens.
order_type (OrderType): The order type.
order_side (OrderSide): The order side.
price (int): Price of the order, which is the real price
multiplied by 1e8 (10^8) and rounded to integer.
quantity (int): Quantity of the order, which is the real price
multiplied by 1e8 (10^8) and rounded to integer.
time_in_force (TimeInForce): The time in force.
Returns:
NewOrderMessage: The created message object.
"""
# get compressed address
address = binascii.hexlify(
binance.crypto.get_address_in_bytes(self.wallet.address)
).decode()
# create order ID from compressed address and sequence ID
order_id = address.upper() + '-' + str(self.wallet.sequence + 1)
return binance.message.NewOrderMessage(
id=order_id,
sender=self.wallet.address,
symbol=symbol,
order_type=order_type,
order_side=order_side,
price=price,
quantity=quantity,
time_in_force=time_in_force
)
def create_cancel_order_message(self,
ref_id,
symbol):
"""
Create Cancel Order Message from parameters.
Args:
symbol (str): Symbol for trading pair in full name of the tokens.
ref_id (str): The order ID of the one to cancel.
Returns:
CancelOrderMessage: The created message object.
"""
return binance.message.CancelOrderMessage(
sender=self.wallet.address,
ref_id=ref_id,
symbol=symbol
)
def create_token_freeze_message(self,
symbol,
amount):
"""
Create Token Freeze from parameters.
Args:
symbol (str): Symbol for trading pair in full name of the tokens.
amount (str): The amount of tokens to freeze.
Returns:
TokenFreezeMessage: The created message object.
"""
return binance.message.TokenFreezeMessage(
sender=self.wallet.address,
amount=amount,
symbol=symbol
)
def create_token_unfreeze_message(self,
symbol,
amount):
"""
Create Token Unfreeze Message from parameters.
Args:
symbol (str): Symbol for trading pair in full name of the tokens.
amount (str): The amount of tokens to freeze.
Returns:
TokenUnfreezeMessage: The created message object.
"""
return binance.message.TokenUnfreezeMessage(
sender=self.wallet.address,
amount=amount,
symbol=symbol
)
def create_vote_message(self,
proposal_id,
option_set):
"""
Create Vote Message from parameters.
Args:
proposal_id (int): The ID of the proposal.
option_set (VoteOption): The vote option.
Returns:
VoteMessage: The created message object.
"""
return binance.message.VoteMessage(
voter=self.wallet.address,
proposal_id=proposal_id,
option_set=option_set
)
def create_transfer_message(self,
coin,
amount,
recipient_address,
sender_address=None):
"""
Create Transfer Message from parameters.
Args:
coin (str): The coin symbol (e.g. BTC, ETH, BNB, etc.).
amount (int): The amount of tokens to transfer.
recipient_address (str): The recipient's address.
sender_address (str): The sender's address (defaults to wallet's
address).
Returns:
TransferMessage: The created message object.
"""
# default to wallet's address when unspecified
if sender_address is None:
sender_address = self.wallet.address
return binance.message.TransferMessage(
coin=coin,
amount=amount,
sender_address=sender_address,
recipient_address=recipient_address
)
| python |
from pythonforandroid.recipe import CompiledComponentsPythonRecipe
from os.path import dirname, join
class CryptographyRecipe(CompiledComponentsPythonRecipe):
name = 'cryptography'
version = '1.4'
url = 'https://github.com/pyca/cryptography/archive/{version}.tar.gz'
depends = [('python2', 'python3crystax'), 'openssl', 'idna', 'pyasn1', 'six', 'setuptools', 'enum34', 'ipaddress', 'cffi']
call_hostpython_via_targetpython = False
def get_recipe_env(self, arch):
env = super(CryptographyRecipe, self).get_recipe_env(arch)
r = self.get_recipe('openssl', self.ctx)
openssl_dir = r.get_build_dir(arch.arch)
env['PYTHON_ROOT'] = self.ctx.get_python_install_dir()
env['CFLAGS'] += ' -I' + env['PYTHON_ROOT'] + '/include/python2.7' + \
' -I' + join(openssl_dir, 'include')
# Set linker to use the correct gcc
env['LDSHARED'] = env['CC'] + ' -pthread -shared -Wl,-O1 -Wl,-Bsymbolic-functions'
env['LDFLAGS'] += ' -L' + env['PYTHON_ROOT'] + '/lib' + \
' -L' + openssl_dir + \
' -lpython2.7' + \
' -lssl' + r.version + \
' -lcrypto' + r.version
return env
recipe = CryptographyRecipe()
| python |
"""empty message
Revision ID: 096057bb3435
Revises: 2daaf569f64d
Create Date: 2021-09-19 01:29:38.703707
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '096057bb3435'
down_revision = '2daaf569f64d'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('Auth0user',
sa.Column('id', sa.String(), nullable=False),
sa.Column('username', sa.String(length=200), nullable=False),
sa.Column('email', sa.String(length=200), nullable=False),
sa.Column('picture', sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('picture'),
sa.UniqueConstraint('username')
)
op.add_column('Mygame', sa.Column('owner', sa.String(), nullable=False))
op.create_foreign_key(None, 'Mygame', 'Auth0user', ['owner'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'Mygame', type_='foreignkey')
op.drop_column('Mygame', 'owner')
op.drop_table('Auth0user')
# ### end Alembic commands ###
| python |
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 27 12:05:05 2014
@author: dreymond
"""
import json
import pickle
import os
import codecs
#import bs4
from Patent2Net.P2N_Lib import LoadBiblioFile, Decoupe, UnNest3, UrlInventorBuild, UrlApplicantBuild, UrlIPCRBuild
from Patent2Net.P2N_Config import LoadConfig
import datetime
aujourd = datetime.date.today()
configFile = LoadConfig()
requete = configFile.requete
ndf = configFile.ndf
Gather = configFile.GatherContent
GatherBiblio = configFile.GatherBiblio
GatherPatent = configFile.GatherPatent
IsEnableScript = configFile.FormateExportBiblio
GatherFamilly = configFile.GatherFamilly
ListBiblioPath = configFile.ResultBiblioPath
ResultPathContent = configFile.ResultPath
temporPath = configFile.temporPath
if IsEnableScript:
# the list of keys for filtering for datatable
clesRef = ['label', 'title', 'year','priority-active-indicator',
'IPCR11', 'kind', 'applicant', 'country', 'inventor', 'representative', 'IPCR4',
'IPCR7', "Inventor-Country", "Applicant-Country", "equivalents", "CPC", 'references', 'Citations', 'CitedBy']
prefixes = [""]
if GatherFamilly:
prefixes.append("Families")
for prefix in prefixes:
ndf = prefix + configFile.ndf
if 'Description'+ndf in os.listdir(ListBiblioPath): # NEW 12/12/15 new gatherer append data to pickle file in order to consume less memory
LstBrevet = LoadBiblioFile(ListBiblioPath, ndf)
with open(ListBiblioPath +'//Description'+ndf, 'rb') as ficRes:
DataBrevet = pickle.load(ficRes)
else: #Retrocompatibility
with open(ListBiblioPath+'//'+ndf, 'rb') as data:
LstBrevet = pickle.load(data)
##next may need clarifying update
data = LstBrevet
LstBrevet = data['brevets']
if 'requete' in data:
requete = data["requete"]
if 'number' in data:
print("Found ", data["number"], " patents! Formating to HMTL tables")
LstExp = []
compt = 0
Dones = []
Double = dict() #dictionnary to manage multiple bib entries (same authors and date)
with codecs.open(ResultPathContent + '//' +ndf+'.bib', 'w', 'utf-8') as resFic:
cleBib = ['year', 'kind', 'title', 'inventor', 'IPCR11', 'label', 'country']
for bre in LstBrevet:
if len(cleBib) == len([cle for cle in cleBib if cle in list(bre.keys())]):
Gogo = True #checkin consistency
#==============================================================================
# for cle in cleBib:
# some cleaning in old version of gathered. Should be ok in V2
# Gogo = Gogo * (bre[cle] is not None)
# Gogo = Gogo * (u'None' not in bre[cle])
# Gogo = Gogo * ( bre[cle] != u'')
#==============================================================================
if Gogo:
if "A" in ' '.join(bre['kind']) or "B" in ' '.join(bre['kind']) or "C" in ' '.join(bre['kind']): #filter patent list again their status... only published
if bre['dateDate'] is not None or bre['dateDate'] != 'None' or bre['dateDate'] != '' or 'None' not in bre['dateDate'] or None in bre['dateDate']:
if len(bre['year'])>0 and not isinstance(bre['date'], list):
teatime=bre['date'].split('-')
bre['dateDate'] = datetime.date(int(teatime[0]), int(teatime[1]), int(teatime[2]))
elif len(bre['year'])>0:
teatime=bre['date'][0].split('-')
bre['dateDate'] = datetime.date(int(teatime[0]), int(teatime[1]), int(teatime[2]))
# # hum last test prooves that they is a bug in collector for dateDate field
if isinstance(bre['dateDate'], list):
Date = bre['dateDate'][0] #first publication (hope so)
else:
Date = bre['dateDate']
# else:
# if isinstance(bre['year'], list):
# temp= bre['year'][0] #first publication
# temp = temp.split('-')
# Date = datetime.date(int(temp[0]), int(temp[1]), int(temp[2]))
# else:
# temp = bre['year']
# temp = temp.split('-')
# Date = datetime.date(int(temp[0]), int(temp[1]), int(temp[2]))
if isinstance(bre['inventor'], list) and len(bre['inventor'])>0:
try:
entryName=bre['inventor'][0].split(' ')[0]+'etAl'+str(Date.year)
except:
entryName=bre['inventor'][0].split(' ')+'etAl'+str(Date.year)
tempolist = [nom.replace(' ', ', ', 1).title() for nom in bre['inventor']]
# Issue #7 - by cvanderlei in 4-jan-2016
try:
Authors = str(' and '.join(tempolist))
except UnicodeDecodeError:
Authors = ''
elif len(bre['inventor'])>0:
entryName=bre['inventor'].split(' ')[0]+' etAl '+str(Date.year)
Authors = bre['inventor'].replace(' ', ', ', 1).title()
else:
entryName= "unknown-" +str(Date.day) + "-" +str(Date.month) +"-" +str(Date.year)
Authors = ''
entryName = entryName.replace("'", "")
if entryName in Dones:
if entryName in Double:
Double[entryName] += 1
else:
Double[entryName] = 1
entryName+=str(Double[entryName])
if isinstance(bre['country'], list):
if len(bre['country']) ==1:
bre['country'] = bre['country'][0]
Dones.append(entryName)
# Issue #6 - by cvanderlei in 6-jan-2017
try:
resFic.write('@Patent{'+entryName+',\n')
except UnicodeDecodeError:
resFic.write('@Patent{""\n')
resFic.write('\t author={' + Authors + '},\n')
try:
resFic.write("\t title = {"+str(bre['title']).capitalize() +"},\n")
except: #damm unicode
resFic.write("\t title = {""},\n")
resFic.write("\t year = {" +str(Date.year)+ "},\n")
resFic.write("\t month = {" +str(Date.month)+ "},\n")
resFic.write("\t day = {" +str(Date.day)+ "},\n")
resFic.write("\t number = {" +str(bre['label'])+ "},\n")
resFic.write("\t location = {" +str(bre['country'])+ "},\n")
if isinstance(bre['IPCR11'], list):
resFic.write("\t IPC_class = {" + str(', '.join(bre['IPCR11'])) + "},\n")
else:
resFic.write("\t IPC_class = {" + str(bre['IPCR11']) + "},\n")
resFic.write("\t url = {" +"http://worldwide.espacenet.com/searchResults?compact=false&ST=singleline&query="+str(bre['label'])+"&locale=en_EP&DB=EPODOC" + "},\n")
resFic.write("\t urlyear = {" +str(aujourd.year)+ "},\n")
resFic.write("\t urlmonth = {" +str(aujourd.month)+ "},\n")
resFic.write("\t urlday = {" +str(aujourd.day)+ "},\n")
resFic.write("}\n \n")
compt +=1
print(compt, ' bibliographic data added in ', ndf +'.bib file')
print("Other bibliographic entry aren't consistent nor A, B, C kind code")
| python |
from mamba import description, before, context, it, after
from expects import equal, expect, be_none
from os import (
environ,
getpid,
)
import pika
from infcommon import logger
from infcommon.serializer import factory as serializer_factory
from infrabbitmq.rabbitmq import (
RabbitMQClient,
DIRECT_EXCHANGE_TYPE,
)
from infrabbitmq.pika_client_wrapper import PikaClientWrapper
MY_DIRECT_EXCHANGE_NAME = 'my_direct_exchange_name'
A_QUEUE_NAME = 'a_queue_name_{}'.format(getpid())
DEFAULT_ROUTING_KEY = ''
A_MESSAGE = 'a_message'
ANOTHER_MESSAGE = 'another_message'
SOME_ANOTHER_MESSAGE = 'some_another_message'
with description('RabbitMQClient Integration tests - Consuming and publishing Direct Exchange (direct message)') as self:
with before.each:
self.broker_uri = environ['BROKER_URI']
self.serializer = serializer_factory.json_serializer()
self.pika_wrapper_client = PikaClientWrapper(pika_library=pika)
self.logger = logger
self.sut = RabbitMQClient(self.broker_uri,
self.serializer,
self.pika_wrapper_client,
self.logger)
self.sut.exchange_declare(exchange=MY_DIRECT_EXCHANGE_NAME, exchange_type=DIRECT_EXCHANGE_TYPE)
self.sut.queue_declare(queue_name=A_QUEUE_NAME, auto_delete=False)
self.sut.queue_bind(queue_name=A_QUEUE_NAME, exchange=MY_DIRECT_EXCHANGE_NAME, routing_key=DEFAULT_ROUTING_KEY)
with after.each:
self.sut.queue_unbind(queue_name=A_QUEUE_NAME, exchange=MY_DIRECT_EXCHANGE_NAME, routing_key=DEFAULT_ROUTING_KEY)
self.sut.queue_delete(queue_name=A_QUEUE_NAME)
self.sut.exchange_delete(exchange=MY_DIRECT_EXCHANGE_NAME)
with context('when publishing and consuming a direct message'):
with it('consumes the message'):
self.sut.publish(exchange=MY_DIRECT_EXCHANGE_NAME, routing_key=DEFAULT_ROUTING_KEY, message=A_MESSAGE)
msg = self.sut.consume(queue_name=A_QUEUE_NAME)
expect(msg.body).to(equal(A_MESSAGE))
with it('consumes only one message'):
self.sut.publish(exchange=MY_DIRECT_EXCHANGE_NAME, routing_key=DEFAULT_ROUTING_KEY, message=A_MESSAGE)
msg = self.sut.consume(queue_name=A_QUEUE_NAME)
msg = self.sut.consume(queue_name=A_QUEUE_NAME)
expect(msg).to(be_none)
with context('when publishing and consuming more than one direct message'):
with it('consumes all pending messages (manually)'):
self.sut.publish(exchange=MY_DIRECT_EXCHANGE_NAME, routing_key=DEFAULT_ROUTING_KEY, message=A_MESSAGE)
self.sut.publish(exchange=MY_DIRECT_EXCHANGE_NAME, routing_key=DEFAULT_ROUTING_KEY, message=ANOTHER_MESSAGE)
first_consumed_message = self.sut.consume(queue_name=A_QUEUE_NAME)
second_consumed_message = self.sut.consume(queue_name=A_QUEUE_NAME)
third_consumed_message = self.sut.consume(queue_name=A_QUEUE_NAME)
expect(first_consumed_message.body).to(equal(A_MESSAGE))
expect(second_consumed_message.body).to(equal(ANOTHER_MESSAGE))
expect(third_consumed_message).to(be_none)
with it('consumes all pending messages (consuming next)'):
self.sut.publish(exchange=MY_DIRECT_EXCHANGE_NAME, routing_key=DEFAULT_ROUTING_KEY, message=A_MESSAGE)
self.sut.publish(exchange=MY_DIRECT_EXCHANGE_NAME, routing_key=DEFAULT_ROUTING_KEY, message=ANOTHER_MESSAGE)
self.sut.publish(exchange=MY_DIRECT_EXCHANGE_NAME, routing_key=DEFAULT_ROUTING_KEY, message=SOME_ANOTHER_MESSAGE)
expected_results = [A_MESSAGE, ANOTHER_MESSAGE, SOME_ANOTHER_MESSAGE]
for counter, msg in enumerate(self.sut.consume_next(queue_name=A_QUEUE_NAME)):
expect(msg.body).to(equal(expected_results[counter]))
if counter == (len(expected_results) - 1):
break
with it('consumes all pending messages iterating over them (consuming pending)'):
self.sut.publish(exchange=MY_DIRECT_EXCHANGE_NAME, routing_key=DEFAULT_ROUTING_KEY, message=A_MESSAGE)
self.sut.publish(exchange=MY_DIRECT_EXCHANGE_NAME, routing_key=DEFAULT_ROUTING_KEY, message=ANOTHER_MESSAGE)
self.sut.publish(exchange=MY_DIRECT_EXCHANGE_NAME, routing_key=DEFAULT_ROUTING_KEY, message=SOME_ANOTHER_MESSAGE)
expected_results = [A_MESSAGE, ANOTHER_MESSAGE, SOME_ANOTHER_MESSAGE]
for index, msg in enumerate(self.sut.consume_pending(queue_name=A_QUEUE_NAME)):
expect(msg.body).to(equal(expected_results[index]))
| python |
import logging
import os
import turnip_exchange_tool.gateways.turnip_exchange as source
from turnip_exchange_tool.gateways.db import Sqlite3Db
from turnip_exchange_tool.models.island import Island
_format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
logging.basicConfig(format=_format, level=logging.DEBUG)
log = logging.getLogger(__name__)
here = os.path.abspath(os.path.dirname(__file__))
def main():
# response = source.request_data(file_path=os.path.join(here, "./history/response.raw"))
response = source.request_data()
island_list = response["islands"]
with Sqlite3Db() as database:
database.create_table()
islands = [Island(island_data) for island_data in island_list]
database.insert_island_history(islands)
log.debug(f"{len(islands)} islands processed")
if __name__ == "__main__":
main()
""" TODO
Check success in request
Check success in json response
Incorporate $$time
Create object that stores
payload should not be static
"""
###
# success
# message
# islands
# $$time
| python |
from flask import render_template, request, redirect, url_for, session, escape, send_from_directory, current_app
from flask.ext.login import current_user
from mathsonmars.models import db
from mathsonmars.extensions import cache
from mathsonmars.marslogger import logger
from mathsonmars.main import main_view
from mathsonmars.models import db, Role, Student
from mathsonmars.constants.modelconstants import RoleTypes
@main_view.route('/')
@cache.cached(timeout=1000)
def index():
if 'user_name' in session:
logger.debug( 'Logged in as {0}'.format(escape(session['user_name'])))
return render_template('index.html')
@main_view.route('/features')
def features():
return render_template('index.html', _anchor='features')
@main_view.route('/about')
def about():
return render_template('index.html', _anchor='about')
@main_view.route('/privacy')
def privacy():
return render_template('privacy.html')
@main_view.route('/faq')
def faq():
return render_template('faq.html')
'''
@main_view.route('/robots.txt')
@main_view.route('/sitemap.xml')
def static_from_root():
app = current_app._get_current_object()
return send_from_directory(app.static_folder, request.path[1:])
'''
| python |
import json
from dataclasses import asdict
from typing import Dict, List, Tuple, Type
from fractal.core.repositories import Entity
from fractal.core.repositories.inmemory_repository_mixin import InMemoryRepositoryMixin
from fractal.core.utils.json_encoder import EnhancedEncoder
class ExternalDataInMemoryRepositoryMixin(InMemoryRepositoryMixin[Entity]):
def __init__(self, klass: Type[Entity]):
super(ExternalDataInMemoryRepositoryMixin, self).__init__()
self.klass = klass
def load_data_dict(self, data: Dict):
key = self.klass.__name__.lower()
self.entities = {e["id"]: self.klass(**e) for e in data.get(key, [])}
def dump_data_dict(self) -> Tuple[str, List[Dict]]:
return self.klass.__name__.lower(), [asdict(e) for e in self.entities.values()]
def load_data_json(self, data: Dict):
key = self.klass.__name__.lower()
self.entities = {
e["id"]: self.klass(**e) for e in json.loads(data.get(key, []))
}
def dump_data_json(self) -> Tuple[str, str]:
_, data = self.dump_data_dict()
return self.klass.__name__.lower(), json.dumps(data, cls=EnhancedEncoder)
| python |
from request_manager import app, db
from flask import render_template, redirect, url_for
from request.form import RequestForm
from product.models import Product
from client.models import Client
from request.models import RequestModel
@app.route('/')
@app.route('/index')
def index():
return redirect(url_for('request_form_view'))
@app.route('/request_list_view')
def request_list_view():
all_client_requests = RequestModel.query.all()
return render_template('request/list_view.html', all_client_requests=all_client_requests)
@app.route('/request_form_view', methods=('GET', 'POST'))
def request_form_view():
form = RequestForm()
form.product_id.choices = [(p.id, p.name) for p in Product.query.order_by('name')]
form.client_id.choices = [(c.id, c.name) for c in Client.query.order_by('name')]
# # set the default value for client_id to 1 ('Client A'), without this line flask_wtf sets default value to "None"
form.client_id.data = 1
client_requests = [r.client_request_priority for r in RequestModel.query.filter(RequestModel.client_id == form.client_id.data)]
form.client_request_priority.choices = [(x, x) for x in range(1, len(client_requests)+2)]
if form.validate_on_submit():
# check if other client priorities need to be updated
if len(client_requests) >= form.client_request_priority.data:
db.session.query(RequestModel).filter(RequestModel.client_request_priority >= form.client_request_priority.data).\
update({"client_request_priority": RequestModel.client_request_priority + 1}, synchronize_session='evaluate')
request = RequestModel(
form.title.data,
form.description.data,
form.target_date.data,
form.product_id.data,
form.client_id.data,
form.client_request_priority.data
)
db.session.add(request)
# flush() gets mysql to generate an autoincremented user ID
db.session.flush()
if request.id:
db.session.commit()
else:
db.session.rollback()
error = "Error creating request"
return redirect('/request_list_view')
return render_template('request/form_view.html', form=form)
@app.route('/request_success')
def request_success():
return "Request sucess!"
| python |
#!/usr/bin/env python
"""
Unit test for the grasping_handler_server.py.
NOTE: This should be run via 'rosrun grasping test_grasping_handler_server.py' and NOT with 'python test_grasping_status_server.py'.
WARNING: These test requires a connection to Robot DE NIRO
Author: John Lingi
Date: 05/18
"""
import rospy
import unittest
import sys
import os
from geometry_msgs.msg import Point
root_path = "/home/petar/fezzik-project"
sys.path.append(os.path.join(root_path, 'src/grasping/src'))
from grasping_handler_server import GraspingHandlerServer
server = GraspingHandlerServer("grasping_handler_service", GraspingHandler)
class GraspingHandlerServerTests(unittest.TestCase):
def test_transform_request_coordinates(self):
"""
Test transform request returns right values
:return:
"""
test_point = Point(0, 0, 0)
result = server.transform_request_coordinates(test_point)
self.assertEqual(result, Point(0.30381, -0.092, 0.82))
def test_get_intermediate_point(self):
"""
Test get_intermediate_point function
:return:
"""
end_point = Point(0, 0, 0)
offset = [-0.15, 0, 0]
int_point = server.get_intermediate_point(end_point, offset)
self.assertEqual(int_point, Point(-0.15, 0, 0))
offset = [-0.15, 0.2, 0.1]
int_point = server.get_intermediate_point(end_point, offset)
self.assertEqual(int_point, Point(-0.15, 0.2, 0.1))
end_point = Point(-0.111, 0.2, 0.3)
offset = [-0.1, 0, -0.3]
int_point = server.get_intermediate_point(end_point, offset)
self.assertAlmostEquals(int_point.x, -0.211, delta=1e-6)
self.assertAlmostEquals(int_point.y, 0.2, delta=1e-6)
self.assertAlmostEquals(int_point.z, 0, delta=1e-6)
if __name__ == "__main__":
rospy.init_node("grasping_handler_tests")
suite = unittest.TestLoader().loadTestsFromTestCase(GraspingHandlerServerTests)
unittest.TextTestRunner(verbosity=2).run(suite) | python |
# coding: utf-8
from proxy_spider.items import Proxy
from proxy_spider.spiders import _BaseSpider
from service.proxy.functions import exceed_check_period, valid_format
class CheckerSpider(_BaseSpider):
"""
Check proxy's availability and anonymity.
"""
name = 'checker'
# allowed_domains = ['*']
custom_settings = {
'DOWNLOAD_DELAY': 3,
'CONCURRENT_REQUESTS_PER_DOMAIN': 8,
}
def start_requests(self):
keys = self.srv.get_all_keys()
for key in keys:
data = self.srv.hgetall_dict(key)
last_check = data.get('last_check', 0)
if not valid_format(data):
self.srv.delete(key, 'Error format %s' % data)
continue
if exceed_check_period(last_check):
item = Proxy(**data)
yield self.build_check_request(item)
| python |
import argparse
from datetime import datetime
import os
import torch
import torch.nn as nn
import torch.utils.data
from model import Model
from dataset import Dataset
from tqdm import tqdm
from sklearn.metrics import confusion_matrix
parser = argparse.ArgumentParser(description='Train a CNN to classify image patches into different genetic ITH groups')
parser.add_argument('--model_dir', default='saved_models/', help='Directory to save models', dest='model_dir')
parser.add_argument('--init_model_file', default='',help='Initial model file (optional)', dest='init_model_file')
parser.add_argument('--image_dir_high', default='../../Images/gland_classification/cropped_patches__complete_and_partial_glands_50_25_512', help='Image directory', dest='image_dir_high')
parser.add_argument('--image_dir_medium', default='../../Images/gland_classification/cropped_patches__complete_and_partial_glands_50_50_512', help='Image directory', dest='image_dir_medium')
parser.add_argument('--image_dir_low', default='../../Images/gland_classification/cropped_patches__complete_and_partial_glands_50_100_512', help='Image directory', dest='image_dir_low')
parser.add_argument('--image_dir_low2', default='../../Images/gland_classification/cropped_patches__complete_and_partial_glands_50_200_512', help='Image directory', dest='image_dir_low2')
parser.add_argument('--slide_list_filename_train', default='../dataset/slide_ids_list_gland_classification_46_slides_train_saved.txt', help='slide list train', dest='slide_list_filename_train')
parser.add_argument('--slide_list_filename_valid', default='../dataset/slide_ids_list_gland_classification_46_slides_valid_saved.txt', help='slide list valid', dest='slide_list_filename_valid')
parser.add_argument('--slide_list_filename_test', default='../dataset/slide_ids_list_gland_classification_46_slides_test_saved.txt', help='slide list test', dest='slide_list_filename_test')
parser.add_argument('--patch_size', default='512', type=int, help='Patch size', dest='patch_size')
parser.add_argument('--num_classes', default='2', type=int, help='Number of classes', dest='num_classes')
parser.add_argument('--pretrained', default=False, help='Pretrain model on ImageNet', dest='pretrained')
parser.add_argument('--batch_size', default='16', type=int, help='Batch size', dest='batch_size')
parser.add_argument('--learning_rate', default='5e-4', type=float, help='Learning rate', dest='learning_rate')
parser.add_argument('--weight_decay', default='5e-5', type=float, help='Weight decay', dest='weight_decay')
parser.add_argument('--num_epochs', default=100, type=int, help='Number of epochs', dest='num_epochs')
parser.add_argument('--save_interval', default=10, type=int, help='Model save interval (default: 1000)', dest='save_interval')
parser.add_argument('--metrics_file', default='saved_metrics', help='Text file to write step, loss, accuracy metrics', dest='metrics_file')
FLAGS = parser.parse_args()
if not os.path.exists(FLAGS.model_dir):
os.makedirs(FLAGS.model_dir)
if not os.path.exists(FLAGS.metrics_file):
os.makedirs(FLAGS.metrics_file)
current_time = datetime.now().strftime("__%Y_%m_%d__%H_%M_%S")
FLAGS.metrics_loss_file = FLAGS.metrics_file + '/step_loss_metrics' + current_time + '.txt'
FLAGS.metrics_acc_file = FLAGS.metrics_file + '/step_acc_metrics' + current_time + '.txt'
FLAGS.metrics_cm_file = FLAGS.metrics_file + '/step_confusion_matrices' + current_time + '.txt'
FLAGS.test_loss_file = FLAGS.metrics_file + '/test_loss_metrics' + current_time + '.txt'
FLAGS.test_acc_file = FLAGS.metrics_file + '/test_acc_metrics' + current_time + '.txt'
FLAGS.test_cm_file = FLAGS.metrics_file + '/test_confusion_matrices' + current_time + '.txt'
print('current_time: {}'.format(current_time))
print('model_dir: {}'.format(FLAGS.model_dir))
print('init_model_file: {}'.format(FLAGS.init_model_file))
print('image_dir_high: {}'.format(FLAGS.image_dir_high))
print('image_dir_medium: {}'.format(FLAGS.image_dir_medium))
print('image_dir_low: {}'.format(FLAGS.image_dir_low))
print('image_dir_low2: {}'.format(FLAGS.image_dir_low2))
print('slide_list_filename_train: {}'.format(FLAGS.slide_list_filename_train))
print('slide_list_filename_valid: {}'.format(FLAGS.slide_list_filename_valid))
print('slide_list_filename_train: {}'.format(FLAGS.slide_list_filename_train))
print('patch_size: {}'.format(FLAGS.patch_size))
print('num_classes: {}'.format(FLAGS.num_classes))
print('pretrained: {}'.format(FLAGS.pretrained))
print('batch_size: {}'.format(FLAGS.batch_size))
print('learning_rate: {}'.format(FLAGS.learning_rate))
print('weight_decay: {}'.format(FLAGS.weight_decay))
print('num_epochs: {}'.format(FLAGS.num_epochs))
print('save_interval: {}'.format(FLAGS.save_interval))
print('metrics_file: {}'.format(FLAGS.metrics_file))
print('# metrics_loss_file: {}'.format(FLAGS.metrics_loss_file))
print('# metrics_acc_file: {}'.format(FLAGS.metrics_acc_file))
print('# metrics_cm_file: {}'.format(FLAGS.metrics_cm_file))
print('# test_loss_file: {}'.format(FLAGS.test_loss_file))
print('# test_acc_file: {}'.format(FLAGS.test_acc_file))
print('# test_cm_file: {}'.format(FLAGS.test_cm_file))
train_dataset = Dataset(img_dir_high=FLAGS.image_dir_high, img_dir_medium=FLAGS.image_dir_medium, img_dir_low=FLAGS.image_dir_low, img_dir_low2=FLAGS.image_dir_low2, slide_list_filename=FLAGS.slide_list_filename_train, transforms=True)
num_imgs_train = train_dataset.num_imgs
print("Training Data - num_imgs: {}".format(train_dataset.num_imgs))
valid_dataset = Dataset(img_dir_high=FLAGS.image_dir_high, img_dir_medium=FLAGS.image_dir_medium, img_dir_low=FLAGS.image_dir_low, img_dir_low2=FLAGS.image_dir_low2, slide_list_filename=FLAGS.slide_list_filename_valid, transforms=False)
num_imgs_valid = valid_dataset.num_imgs
print("Validation Data - num_imgs: {}".format(valid_dataset.num_imgs))
test_dataset = Dataset(img_dir_high=FLAGS.image_dir_high, img_dir_medium=FLAGS.image_dir_medium, img_dir_low=FLAGS.image_dir_low, img_dir_low2=FLAGS.image_dir_low2, slide_list_filename=FLAGS.slide_list_filename_test, transforms=False)
num_imgs_test = test_dataset.num_imgs
print("Test Data - num_imgs: {}".format(test_dataset.num_imgs))
# define training and validation data loaders
data_loader_train = torch.utils.data.DataLoader(train_dataset, batch_size=FLAGS.batch_size, shuffle=True, num_workers=1)
data_loader_valid = torch.utils.data.DataLoader(valid_dataset, batch_size=FLAGS.batch_size, shuffle=False, num_workers=1)
data_loader_test = torch.utils.data.DataLoader(test_dataset, batch_size=FLAGS.batch_size, shuffle=False, num_workers=1)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# get the model using our helper function
model = Model(FLAGS.pretrained, FLAGS.num_classes, num_intermediate_features=64)
# move model to the right device
model.to(device)
# define criterion
criterion = nn.CrossEntropyLoss()
# construct an optimizer
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.Adam(params, lr=FLAGS.learning_rate, weight_decay=FLAGS.weight_decay)
if FLAGS.init_model_file:
if os.path.isfile(FLAGS.init_model_file):
state_dict = torch.load(FLAGS.init_model_file, map_location=lambda storage, loc: storage)
model.load_state_dict(state_dict['model_state_dict'])
optimizer.load_state_dict(state_dict['optimizer_state_dict'])
print("Model weights loaded successfully from file: ", FLAGS.init_model_file)
with open(FLAGS.metrics_loss_file, 'w') as f:
f.write('# current_time: {}\n'.format(current_time))
f.write('# model_dir: {}\n'.format(FLAGS.model_dir))
f.write('# init_model_file: {}\n'.format(FLAGS.init_model_file))
f.write('# image_dir_high: {}\n'.format(FLAGS.image_dir_high))
f.write('# image_dir_medium: {}\n'.format(FLAGS.image_dir_medium))
f.write('# image_dir_low: {}\n'.format(FLAGS.image_dir_low))
f.write('# image_dir_low2: {}\n'.format(FLAGS.image_dir_low2))
f.write('# slide_list_filename_train: {}\n'.format(FLAGS.slide_list_filename_train))
f.write('# slide_list_filename_valid: {}\n'.format(FLAGS.slide_list_filename_valid))
f.write('# slide_list_filename_test: {}\n'.format(FLAGS.slide_list_filename_test))
f.write('# patch_size: {}\n'.format(FLAGS.patch_size))
f.write('# num_classes: {}\n'.format(FLAGS.num_classes))
f.write('# pretrained: {}\n'.format(FLAGS.pretrained))
f.write('# batch_size: {}\n'.format(FLAGS.batch_size))
f.write('# learning_rate: {}\n'.format(FLAGS.learning_rate))
f.write('# weight_decay: {}\n'.format(FLAGS.weight_decay))
f.write('# num_epochs: {}\n'.format(FLAGS.num_epochs))
f.write('# save_interval: {}\n'.format(FLAGS.save_interval))
f.write('# metrics_file: {}\n'.format(FLAGS.metrics_file))
f.write('# metrics_loss_file: {}\n'.format(FLAGS.metrics_loss_file))
f.write('# metrics_acc_file: {}\n'.format(FLAGS.metrics_acc_file))
f.write('# metrics_cm_file: {}\n'.format(FLAGS.metrics_cm_file))
f.write('# test_loss_file: {}\n'.format(FLAGS.test_loss_file))
f.write('# test_acc_file: {}\n'.format(FLAGS.test_acc_file))
f.write('# test_cm_file: {}\n'.format(FLAGS.test_cm_file))
f.write('# epoch\tlearning_rate\ttraining_loss_high\ttraining_loss_medium\ttraining_loss_low\ttraining_loss_low2\ttraining_loss_result\ttraining_loss_total \
\tvalidation_loss_high\tvalidation_loss_medium\tvalidation_loss_low\tvalidation_loss_low2\tvalidation_loss_result\tvalidation_loss_total\n')
with open(FLAGS.metrics_acc_file, 'w') as f:
f.write('# epoch\tlearning_rate\ttraining_acc_high\ttraining_acc_medium\ttraining_acc_low\ttraining_acc_low2\ttraining_acc_result\ttraining_acc_total \
\tvalidation_acc_high\tvalidation_acc_medium\tvalidation_acc_low\tvalidation_acc_low2\tvalidation_acc_result\tvalidation_acc_total\n')
with open(FLAGS.metrics_cm_file, 'w') as f:
f.write('# epoch\tlearning_rate \
\ttraining_label_benign_predicted_benign_high\ttraining_label_benign_predicted_malignant_high\ttraining_label_malignant_predicted_benign_high\ttraining_label_malignant_predicted_malignant_high \
\ttraining_label_benign_predicted_benign_medium\ttraining_label_benign_predicted_malignant_medium\ttraining_label_malignant_predicted_benign_medium\ttraining_label_malignant_predicted_malignant_medium \
\ttraining_label_benign_predicted_benign_low\ttraining_label_benign_predicted_malignant_low\ttraining_label_malignant_predicted_benign_low\ttraining_label_malignant_predicted_malignant_low \
\ttraining_label_benign_predicted_benign_low2\ttraining_label_benign_predicted_malignant_low2\ttraining_label_malignant_predicted_benign_low2\ttraining_label_malignant_predicted_malignant_low2 \
\ttraining_label_benign_predicted_benign_result\ttraining_label_benign_predicted_malignant_result\ttraining_label_malignant_predicted_benign_result\ttraining_label_malignant_predicted_malignant_result \
\ttraining_label_benign_predicted_benign_total\ttraining_label_benign_predicted_malignant_total\ttraining_label_malignant_predicted_benign_total\ttraining_label_malignant_predicted_malignant_total \
\tvalidation_label_benign_predicted_benign_high\tvalidation_label_benign_predicted_malignant_high\tvalidation_label_malignant_predicted_benign_high\tvalidation_label_malignant_predicted_malignant_high \
\tvalidation_label_benign_predicted_benign_medium\tvalidation_label_benign_predicted_malignant_medium\tvalidation_label_malignant_predicted_benign_medium\tvalidation_label_malignant_predicted_malignant_medium \
\tvalidation_label_benign_predicted_benign_low\tvalidation_label_benign_predicted_malignant_low\tvalidation_label_malignant_predicted_benign_low\tvalidation_label_malignant_predicted_malignant_low \
\tvalidation_label_benign_predicted_benign_low2\tvalidation_label_benign_predicted_malignant_low2\tvalidation_label_malignant_predicted_benign_low2\tvalidation_label_malignant_predicted_malignant_low2 \
\tvalidation_label_benign_predicted_benign_result\tvalidation_label_benign_predicted_malignant_result\tvalidation_label_malignant_predicted_benign_result\tvalidation_label_malignant_predicted_malignant_result \
\tvalidation_label_benign_predicted_benign_total\tvalidation_label_benign_predicted_malignant_total\tvalidation_label_malignant_predicted_benign_total\tvalidation_label_malignant_predicted_malignant_total\n')
total_steps = len(data_loader_train)
best_acc = 0.0
min_val_loss = 100.0
for epoch in range(FLAGS.num_epochs):
print('#################### EPOCH - {} ####################'.format(epoch + 1))
print('******************** training ********************')
pbar = tqdm(total=len(data_loader_train))
model.train()
num_predictions = 0
running_loss_high = 0.0
running_loss_medium = 0.0
running_loss_low = 0.0
running_loss_low2 = 0.0
running_loss_result = 0.0
running_loss_total = 0.0
running_correct_high = 0
running_correct_medium = 0
running_correct_low = 0
running_correct_low2 = 0
running_correct_result = 0
running_correct_total = 0
label_list = []
predicted_list_high = []
predicted_list_medium = []
predicted_list_low = []
predicted_list_low2 = []
predicted_list_result = []
predicted_list_total = []
for i, (img_paths, img_high, img_medium, img_low, img_low2, label) in enumerate(data_loader_train):
# print('high: {}'.format(img_high.shape))
# print('medium: {}'.format(img_medium.shape))
# print('low: {}'.format(img_low.shape))
# print('low2: {}'.format(img_low2.shape))
# print('label: {}'.format(label.shape))
img_high, img_medium, img_low, img_low2, label = img_high.to(device), img_medium.to(device), img_low.to(device), img_low2.to(device), label.to(device)
output_high, output_medium, output_low, output_low2, output_result = model(img_high, img_medium, img_low, img_low2)
output_total = output_high + output_medium + output_low + output_low2 + output_result
optimizer.zero_grad()
loss_high = criterion(output_high, label)
loss_medium = criterion(output_medium, label)
loss_low = criterion(output_low, label)
loss_low2 = criterion(output_low2, label)
loss_result = criterion(output_result, label)
loss_total = loss_high + loss_medium + loss_low + loss_low2 + loss_result
loss_total.backward()
optimizer.step()
_, predicted_high = torch.max(output_high, 1)
_, predicted_medium = torch.max(output_medium, 1)
_, predicted_low = torch.max(output_low, 1)
_, predicted_low2 = torch.max(output_low2, 1)
_, predicted_result = torch.max(output_result, 1)
_, predicted_total = torch.max(output_total, 1)
correct_high = (predicted_high == label).sum().item()
correct_medium = (predicted_medium == label).sum().item()
correct_low = (predicted_low == label).sum().item()
correct_low2 = (predicted_low2 == label).sum().item()
correct_result = (predicted_result == label).sum().item()
correct_total = (predicted_total == label).sum().item()
num_predictions += label.size(0)
running_loss_high += loss_high.item() * label.size(0)
running_loss_medium += loss_medium.item() * label.size(0)
running_loss_low += loss_low.item() * label.size(0)
running_loss_low2 += loss_low2.item() * label.size(0)
running_loss_result += loss_result.item() * label.size(0)
running_loss_total += loss_total.item() * label.size(0)
running_correct_high += correct_high
running_correct_medium += correct_medium
running_correct_low += correct_low
running_correct_low2 += correct_low2
running_correct_result += correct_result
running_correct_total += correct_total
label_list += list(label.cpu().numpy())
predicted_list_high += list(predicted_high.cpu().numpy())
predicted_list_medium += list(predicted_medium.cpu().numpy())
predicted_list_low += list(predicted_low.cpu().numpy())
predicted_list_low2 += list(predicted_low2.cpu().numpy())
predicted_list_result += list(predicted_result.cpu().numpy())
predicted_list_total += list(predicted_total.cpu().numpy())
pbar.update(1)
pbar.close()
train_loss_high = running_loss_high / num_predictions
train_loss_medium = running_loss_medium / num_predictions
train_loss_low = running_loss_low / num_predictions
train_loss_low2 = running_loss_low2 / num_predictions
train_loss_result = running_loss_result / num_predictions
train_loss_total = running_loss_total / num_predictions
train_acc_high = running_correct_high / num_predictions
train_acc_medium = running_correct_medium / num_predictions
train_acc_low = running_correct_low / num_predictions
train_acc_low2 = running_correct_low2 / num_predictions
train_acc_result = running_correct_result / num_predictions
train_acc_total = running_correct_total / num_predictions
print('Training loss high: {:.4f}\tTraining loss medium: {:.4f}\tTraining loss low: {:.4f}\tTraining loss low2: {:.4f}\tTraining loss result: {:.4f}\tTraining loss total: {:.4f}'.format(train_loss_high, train_loss_medium, train_loss_low, train_loss_low2, train_loss_result, train_loss_total))
print('Training accuracy high: {:.4f}\tTraining accuracy medium: {:.4f}\tTraining accuracy low: {:.4f}\tTraining accuracy low2: {:.4f}\tTraining accuracy result: {:.4f}\tTraining accuracy total: {:.4f}'.format(train_acc_high, train_acc_medium, train_acc_low, train_acc_low2, train_acc_result, train_acc_total))
# confusion matrix
cm_train_high = confusion_matrix(label_list, predicted_list_high, labels=[0, 1])
cm_train_medium = confusion_matrix(label_list, predicted_list_medium, labels=[0, 1])
cm_train_low = confusion_matrix(label_list, predicted_list_low, labels=[0, 1])
cm_train_low2 = confusion_matrix(label_list, predicted_list_low2, labels=[0, 1])
cm_train_result = confusion_matrix(label_list, predicted_list_result, labels=[0, 1])
cm_train_total = confusion_matrix(label_list, predicted_list_total, labels=[0, 1])
print('******************** validation ********************')
pbar2 = tqdm(total=len(data_loader_valid))
# validation
model.eval()
num_predictions = 0
running_loss_high = 0.0
running_loss_medium = 0.0
running_loss_low = 0.0
running_loss_low2 = 0.0
running_loss_result = 0.0
running_loss_total = 0.0
running_correct_high = 0
running_correct_medium = 0
running_correct_low = 0
running_correct_low2 = 0
running_correct_result = 0
running_correct_total = 0
label_list = []
predicted_list_high = []
predicted_list_medium = []
predicted_list_low = []
predicted_list_low2 = []
predicted_list_result = []
predicted_list_total = []
with torch.no_grad():
for i, (img_paths, img_high, img_medium, img_low, img_low2, label) in enumerate(data_loader_valid):
# print('high: {}'.format(img_high.shape))
# print('medium: {}'.format(img_medium.shape))
# print('low: {}'.format(img_low.shape))
# print('low2: {}'.format(img_low2.shape))
# print('label: {}'.format(label.shape))
img_high, img_medium, img_low, img_low2, label = img_high.to(device), img_medium.to(device), img_low.to(device), img_low2.to(device), label.to(device)
output_high, output_medium, output_low, output_low2, output_result = model(img_high, img_medium, img_low, img_low2)
output_total = output_high + output_medium + output_low + output_low2 + output_result
loss_high = criterion(output_high, label)
loss_medium = criterion(output_medium, label)
loss_low = criterion(output_low, label)
loss_low2 = criterion(output_low2, label)
loss_result = criterion(output_result, label)
loss_total = loss_high + loss_medium + loss_low + loss_low2 + loss_result
# print('loss_total: {}'.format(loss_total))
_, predicted_high = torch.max(output_high, 1)
_, predicted_medium = torch.max(output_medium, 1)
_, predicted_low = torch.max(output_low, 1)
_, predicted_low2 = torch.max(output_low2, 1)
_, predicted_result = torch.max(output_result, 1)
_, predicted_total = torch.max(output_total, 1)
correct_high = (predicted_high == label).sum().item()
correct_medium = (predicted_medium == label).sum().item()
correct_low = (predicted_low == label).sum().item()
correct_low2 = (predicted_low2 == label).sum().item()
correct_result = (predicted_result == label).sum().item()
correct_total = (predicted_total == label).sum().item()
num_predictions += label.size(0)
running_loss_high += loss_high.item() * label.size(0)
running_loss_medium += loss_medium.item() * label.size(0)
running_loss_low += loss_low.item() * label.size(0)
running_loss_low2 += loss_low2.item() * label.size(0)
running_loss_result += loss_result.item() * label.size(0)
running_loss_total += loss_total.item() * label.size(0)
running_correct_high += correct_high
running_correct_medium += correct_medium
running_correct_low += correct_low
running_correct_low2 += correct_low2
running_correct_result += correct_result
running_correct_total += correct_total
label_list += list(label.cpu().numpy())
predicted_list_high += list(predicted_high.cpu().numpy())
predicted_list_medium += list(predicted_medium.cpu().numpy())
predicted_list_low += list(predicted_low.cpu().numpy())
predicted_list_low2 += list(predicted_low2.cpu().numpy())
predicted_list_result += list(predicted_result.cpu().numpy())
predicted_list_total += list(predicted_total.cpu().numpy())
pbar2.update(1)
pbar2.close()
valid_loss_high = running_loss_high / num_predictions
valid_loss_medium = running_loss_medium / num_predictions
valid_loss_low = running_loss_low / num_predictions
valid_loss_low2 = running_loss_low2 / num_predictions
valid_loss_result = running_loss_result / num_predictions
valid_loss_total = running_loss_total / num_predictions
valid_acc_high = running_correct_high / num_predictions
valid_acc_medium = running_correct_medium / num_predictions
valid_acc_low = running_correct_low / num_predictions
valid_acc_low2 = running_correct_low2 / num_predictions
valid_acc_result = running_correct_result / num_predictions
valid_acc_total = running_correct_total / num_predictions
# confusion matrix
cm_valid_high = confusion_matrix(label_list, predicted_list_high, labels=[0, 1])
cm_valid_medium = confusion_matrix(label_list, predicted_list_medium, labels=[0, 1])
cm_valid_low = confusion_matrix(label_list, predicted_list_low, labels=[0, 1])
cm_valid_low2 = confusion_matrix(label_list, predicted_list_low2, labels=[0, 1])
cm_valid_result = confusion_matrix(label_list, predicted_list_result, labels=[0, 1])
cm_valid_total = confusion_matrix(label_list, predicted_list_total, labels=[0, 1])
# print('Epoch : {:d}'.format(epoch + 1))
print('Validation loss high: {:.4f}\tValidation loss medium: {:.4f}\tValidation loss low: {:.4f}\tValidation loss low2: {:.4f}\tValidation loss result: {:.4f}\tValidation loss total: {:.4f}' \
.format(valid_loss_high, valid_loss_medium, valid_loss_low, valid_loss_low2, valid_loss_result, valid_loss_total))
print('Validation accuracy high: {:.4f}\tValidation accuracy medium: {:.4f}\tValidation accuracy low: {:.4f}\tValidation accuracy low2: {:.4f}\tValidation accuracy result: {:.4f}\tValidation accuracy total: {:.4f}' \
.format(valid_acc_high, valid_acc_medium, valid_acc_low, valid_acc_low2, valid_acc_result, valid_acc_total))
# print('\n')
with open(FLAGS.metrics_loss_file, 'a') as f:
f.write('{:d}\t{:.8f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\n' \
.format(epoch + 1, optimizer.param_groups[0]['lr'],
train_loss_high, train_loss_medium, train_loss_low, train_loss_low2, train_loss_result, train_loss_total,
valid_loss_high, valid_loss_medium, valid_loss_low, valid_loss_low2, valid_loss_result, valid_loss_total))
with open(FLAGS.metrics_acc_file, 'a') as f:
f.write('{:d}\t{:.8f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\n' \
.format(epoch + 1, optimizer.param_groups[0]['lr'],
train_acc_high, train_acc_medium, train_acc_low, train_acc_low2, train_acc_result, train_acc_total,
valid_acc_high, valid_acc_medium, valid_acc_low, valid_acc_low2, valid_acc_result, valid_acc_total))
with open(FLAGS.metrics_cm_file, 'a') as f:
f.write('{:d}\t{:.8f}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\n' \
.format(epoch + 1, optimizer.param_groups[0]['lr'],
cm_train_high[0, 0], cm_train_high[0, 1], cm_train_high[1, 0], cm_train_high[1, 1],
cm_train_medium[0, 0], cm_train_medium[0, 1], cm_train_medium[1, 0], cm_train_medium[1, 1],
cm_train_low[0, 0], cm_train_low[0, 1], cm_train_low[1, 0], cm_train_low[1, 1],
cm_train_low2[0, 0], cm_train_low2[0, 1], cm_train_low2[1, 0], cm_train_low2[1, 1],
cm_train_result[0, 0], cm_train_result[0, 1], cm_train_result[1, 0], cm_train_result[1, 1],
cm_train_total[0, 0], cm_train_total[0, 1], cm_train_total[1, 0], cm_train_total[1, 1],
cm_valid_high[0, 0], cm_valid_high[0, 1], cm_valid_high[1, 0], cm_valid_high[1, 1],
cm_valid_medium[0, 0], cm_valid_medium[0, 1], cm_valid_medium[1, 0], cm_valid_medium[1, 1],
cm_valid_low[0, 0], cm_valid_low[0, 1], cm_valid_low[1, 0], cm_valid_low[1, 1],
cm_valid_low2[0, 0], cm_valid_low2[0, 1], cm_valid_low2[1, 0], cm_valid_low2[1, 1],
cm_valid_result[0, 0], cm_valid_result[0, 1], cm_valid_result[1, 0], cm_valid_result[1, 1],
cm_valid_total[0, 0], cm_valid_total[0, 1], cm_valid_total[1, 0], cm_valid_total[1, 1]))
if (valid_loss_result < min_val_loss) or ((epoch + 1) % FLAGS.save_interval == 0):
model_weights_filename = FLAGS.model_dir + 'model_weights' + current_time + '__' + str(epoch + 1) + '.pth'
state_dict = {'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict()}
torch.save(state_dict, model_weights_filename)
print('Model weights saved in file: {}'.format(model_weights_filename))
if valid_loss_result < min_val_loss:
min_val_loss = valid_loss_result
##################################################################################################################################
print('******************** testing ********************')
pbar = tqdm(total=len(data_loader_test))
model.eval()
num_predictions = 0
running_loss_high = 0.0
running_loss_medium = 0.0
running_loss_low = 0.0
running_loss_low2 = 0.0
running_loss_result = 0.0
running_loss_total = 0.0
running_correct_high = 0
running_correct_medium = 0
running_correct_low = 0
running_correct_low2 = 0
running_correct_result = 0
running_correct_total = 0
label_list = []
predicted_list_high = []
predicted_list_medium = []
predicted_list_low = []
predicted_list_low2 = []
predicted_list_result = []
predicted_list_total = []
with torch.no_grad():
for i, (img_paths, img_high, img_medium, img_low, img_low2, label) in enumerate(data_loader_test):
img_high, img_medium, img_low, img_low2, label = img_high.to(device), img_medium.to(device), img_low.to(device), img_low2.to(device), label.to(device)
output_high, output_medium, output_low, output_low2, output_result = model(img_high, img_medium, img_low, img_low2)
output_total = output_high + output_medium + output_low + output_low2 + output_result
loss_high = criterion(output_high, label)
loss_medium = criterion(output_medium, label)
loss_low = criterion(output_low, label)
loss_low2 = criterion(output_low2, label)
loss_result = criterion(output_result, label)
loss_total = loss_high + loss_medium + loss_low + loss_low2 + loss_result
_, predicted_high = torch.max(output_high, 1)
_, predicted_medium = torch.max(output_medium, 1)
_, predicted_low = torch.max(output_low, 1)
_, predicted_low2 = torch.max(output_low2, 1)
_, predicted_result = torch.max(output_result, 1)
_, predicted_total = torch.max(output_total, 1)
correct_high = (predicted_high == label).sum().item()
correct_medium = (predicted_medium == label).sum().item()
correct_low = (predicted_low == label).sum().item()
correct_low2 = (predicted_low2 == label).sum().item()
correct_result = (predicted_result == label).sum().item()
correct_total = (predicted_total == label).sum().item()
running_loss_high += loss_high.item() * label.size(0)
running_loss_medium += loss_medium.item() * label.size(0)
running_loss_low += loss_low.item() * label.size(0)
running_loss_low2 += loss_low2.item() * label.size(0)
running_loss_result += loss_result.item() * label.size(0)
running_loss_total += loss_total.item() * label.size(0)
num_predictions += label.size(0)
running_correct_high += correct_high
running_correct_medium += correct_medium
running_correct_low += correct_low
running_correct_low2 += correct_low2
running_correct_result += correct_result
running_correct_total += correct_total
label_list += list(label.cpu().numpy())
predicted_list_high += list(predicted_high.cpu().numpy())
predicted_list_medium += list(predicted_medium.cpu().numpy())
predicted_list_low += list(predicted_low.cpu().numpy())
predicted_list_low2 += list(predicted_low2.cpu().numpy())
predicted_list_result += list(predicted_result.cpu().numpy())
predicted_list_total += list(predicted_total.cpu().numpy())
pbar.update(1)
test_loss_high = running_loss_high / num_predictions
test_loss_medium = running_loss_medium / num_predictions
test_loss_low = running_loss_low / num_predictions
test_loss_low2 = running_loss_low2 / num_predictions
test_loss_result = running_loss_result / num_predictions
test_loss_total = running_loss_total / num_predictions
test_acc_high = running_correct_high / num_predictions
test_acc_medium = running_correct_medium / num_predictions
test_acc_low = running_correct_low / num_predictions
test_acc_low2 = running_correct_low2 / num_predictions
test_acc_result = running_correct_result / num_predictions
test_acc_total = running_correct_total / num_predictions
# confusion matrix
cm_test_high = confusion_matrix(label_list, predicted_list_high, labels=[0, 1])
cm_test_medium = confusion_matrix(label_list, predicted_list_medium, labels=[0, 1])
cm_test_low = confusion_matrix(label_list, predicted_list_low, labels=[0, 1])
cm_test_low2 = confusion_matrix(label_list, predicted_list_low2, labels=[0, 1])
cm_test_result = confusion_matrix(label_list, predicted_list_result, labels=[0, 1])
cm_test_total = confusion_matrix(label_list, predicted_list_total, labels=[0, 1])
pbar.close()
with open(FLAGS.test_loss_file, 'w') as f:
f.write('# test_loss_high\ttest_loss_medium\ttest_loss_low\ttest_loss_low2\ttest_loss_result\ttest_loss_total\n')
f.write('{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\n'.format(test_loss_high, test_loss_medium, test_loss_low, test_loss_low2, test_loss_result, test_loss_total))
with open(FLAGS.test_acc_file, 'w') as f:
f.write('# test_acc_high\ttest_acc_medium\ttest_acc_low\ttest_acc_low2\ttest_acc_result\ttest_acc_total\n')
f.write('{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\n'.format(test_acc_high, test_acc_medium, test_acc_low, test_acc_low2, test_acc_result, test_acc_total))
with open(FLAGS.test_cm_file, 'w') as f:
f.write('# test_label_benign_predicted_benign_high\ttest_label_benign_predicted_malignant_high\ttest_label_malignant_predicted_benign_high\ttest_label_malignant_predicted_malignant_high \
\ttest_label_benign_predicted_benign_medium\ttest_label_benign_predicted_malignant_medium\ttest_label_malignant_predicted_benign_medium\ttest_label_malignant_predicted_malignant_medium \
\ttest_label_benign_predicted_benign_low\ttest_label_benign_predicted_malignant_low\ttest_label_malignant_predicted_benign_low\ttest_label_malignant_predicted_malignant_low \
\ttest_label_benign_predicted_benign_low2\ttest_label_benign_predicted_malignant_low2\ttest_label_malignant_predicted_benign_low2\ttest_label_malignant_predicted_malignant_low2 \
\ttest_label_benign_predicted_benign_result\ttest_label_benign_predicted_malignant_result\ttest_label_malignant_predicted_benign_result\ttest_label_malignant_predicted_malignant_result\t \
\ttest_label_benign_predicted_benign_total\ttest_label_benign_predicted_malignant_total\ttest_label_malignant_predicted_benign_total\ttest_label_malignant_predicted_malignant_total\n')
f.write('{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\n' \
.format(cm_test_high[0, 0], cm_test_high[0, 1], cm_test_high[1, 0], cm_test_high[1, 1],
cm_test_medium[0, 0], cm_test_medium[0, 1], cm_test_medium[1, 0], cm_test_medium[1, 1],
cm_test_low[0, 0], cm_test_low[0, 1], cm_test_low[1, 0], cm_test_low[1, 1],
cm_test_low2[0, 0], cm_test_low2[0, 1], cm_test_low2[1, 0], cm_test_low2[1, 1],
cm_test_result[0, 0], cm_test_result[0, 1], cm_test_result[1, 0], cm_test_result[1, 1],
cm_test_total[0, 0], cm_test_total[0, 1], cm_test_total[1, 0], cm_test_total[1, 1]))
| python |
from django.db import models
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import models
from django.db.models.signals import post_save
from django.contrib.auth.models import User
from products.models import Product
from cart.models import ShippingDetails
# Create your models here.
User = get_user_model()
class UserProfile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
favourite_products = models.ManyToManyField(Product, blank=True)
anonymous_user = models.BooleanField(default=False)
use_saved_details = models.BooleanField(default=False)
shipping_details_id = models.CharField(max_length=40)
# Should have just used shipping details model
email = models.EmailField(max_length=70,blank=True)
name = models.CharField(max_length=40)
last_name = models.CharField(max_length=40)
city = models.CharField(max_length=40)
country = models.CharField(max_length=40)
address = models.CharField(max_length=40)
zip_code = models.CharField(max_length=40)
localidade = models.CharField(max_length=40)
cell_number = models.CharField(max_length=40)
def __str__(self):
return self.user.username
def post_save_profile_create(sender, instance, created, *args, **kwargs):
if created:
UserProfile.objects.get_or_create(user=instance)
post_save.connect(post_save_profile_create, sender=settings.AUTH_USER_MODEL)
| python |
# coding: utf-8
#
# This code is part of dqmc.
#
# Copyright (c) 2022, Dylan Jones
#
# This code is licensed under the MIT License. The copyright notice in the
# LICENSE file in the root directory and this permission notice shall
# be included in all copies or substantial portions of the Software.
import logging
# ======================================================================================
# Register custom logging levels
# ======================================================================================
# noinspection PyUnresolvedReferences
def addLoggingLevel(level_name, level_num, method_name=None):
"""Comprehensively adds a new logging level to the `logging` module and the
currently configured logging class.
`level_name` becomes an attribute of the `logging` module with the value
`level_num`. `method_name` becomes a convenience method for both `logging`
itself and the class returned by `logging.getLoggerClass()` (usually just
`logging.Logger`). If `method_name` is not specified, `level_name.lower()` is
used.
To avoid accidental clobberings of existing attributes, this method will
raise an `AttributeError` if the level name is already an attribute of the
`logging` module or if the method name is already present
Example
-------
>>> addLoggingLevel('TRACE', logging.DEBUG - 5)
>>> logging.getLogger(__name__).setLevel("TRACE")
>>> logging.getLogger(__name__).trace('that worked')
>>> logging.trace('so did this')
>>> logging.TRACE
5
"""
if not method_name:
method_name = level_name.lower()
if hasattr(logging, level_name):
raise AttributeError('{} already defined in logging module'.format(level_name))
if hasattr(logging, method_name):
raise AttributeError('{} already defined in logging module'.format(method_name))
if hasattr(logging.getLoggerClass(), method_name):
raise AttributeError('{} already defined in logger class'.format(method_name))
# This method was inspired by the answers to Stack Overflow post
# http://stackoverflow.com/q/2183233/2988730, especially
# http://stackoverflow.com/a/13638084/2988730
def logForLevel(self, message, *args, **kwargs):
if self.isEnabledFor(level_num):
self._log(level_num, message, args, **kwargs)
def logToRoot(message, *args, **kwargs):
logging.log(level_num, message, *args, **kwargs)
logging.addLevelName(level_num, level_name)
setattr(logging, level_name, level_num)
setattr(logging.getLoggerClass(), method_name, logForLevel)
setattr(logging, method_name, logToRoot)
SUMMARY = "SUMMARY"
addLoggingLevel(SUMMARY, 25)
# ======================================================================================
# Initialize logger
# ======================================================================================
logger = logging.getLogger("dqmc")
# Logging format
# frmt = "[%(asctime)s] (%(process)d) - %(name)s:%(levelname)-8s - %(message)s"
frmt = "[%(asctime)s] (%(process)d) - %(levelname)-7s - %(message)s"
formatter = logging.Formatter(frmt, datefmt="%H:%M:%S")
# Set up console logger
sh = logging.StreamHandler()
sh.setLevel(logging.INFO)
sh.setFormatter(formatter)
logger.addHandler(sh)
# Set up file logger
fh = logging.FileHandler("dqmc.log", mode="w")
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
# Set logging level
logger.setLevel(logging.WARNING)
logging.root.setLevel(logging.NOTSET)
| python |
import time
from multiprocessing.dummy import freeze_support
from pprint import pprint
from flowless import TaskState, RouterState, ChoiceState, FlowRoot, save_graph, QueueState
from flowless.deploy import deploy_pipline
from flowless.states.router import ParallelRouter
from flowless.demo_states import ModelClass, Test1Class, Test2Class
def f5(x):
return x * 7
m1 = TaskState('m1', class_name='ModelClass', class_params={'z': 100})
m2 = TaskState('m2', class_name='ModelClass', class_params={'z': 200})
m3 = TaskState('m3', class_name='ModelClass', class_params={'z': 300})
p = FlowRoot('root', start_at='ingest', trace=2).add_states(
TaskState('ingest', class_name=Test1Class),
ChoiceState('if', default='data-prep')
.add_choice('event.body==10', 'stream')
.add_choice('event.body==7', 'update-db'),
TaskState('data-prep', class_name='Test1Class', resource='f2'),
RouterState('router', routes=[m1, m2, m3], class_name=ParallelRouter, class_params={'executor': ''}),
QueueState('stream', outlets=['update-db'], resource=''),
TaskState('update-db', handler='json.dumps'),
)
p.default_resource = 'f1'
p.streams_path = 'x'
p.add_resource('st', 'stream', '')
p.add_resource('f1', 'function', '')
p.add_resource('f2', 'function', 'hub://model_server', endpoint= 'http://localhost:5000')
print(p.to_yaml())
p.export('p.json')
p.prepare('f1')
deploy_pipline(p)
exit(0)
print(p.init('f1', namespace=globals()))
save_graph(p, "js/data.json")
print(p.run(10, from_state='if'))
# for process executor
# if __name__ == '__main__':
# __spec__ = None
# freeze_support()
# print(p.run(10))
| python |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
"""
send.py will send a single messages to the queue.
"""
# Pika is a pure-Python implementation of the AMQP 0-9-1 protocol
import pika
# guest user can only connect via localhost
#credentials = pika.PlainCredentials('guest', 'guest')
credentials = pika.PlainCredentials('pi', 'macintosh')
connection = pika.BlockingConnection(pika.ConnectionParameters(host='192.168.31.156',
port=5672,
virtual_host='/',
credentials=credentials))
channel = connection.channel()
channel.queue_declare(queue='hello')
channel.basic_publish(exchange='',
routing_key='hello',
body='Hello World!')
print("[x] Sent 'Hello World!'")
connection.close()
"""
Please keep in mind that this and other tutorials are, well, tutorials, They demonstrate one new concept at a time and may
intentionally oversimplify some things and leave out others. For example topics such as connection management, error handling,
connection recovery, concurrency and metric collection are largely omitted for the sake of brevity. Such simplified code
should not be considered production ready.
""" | python |
from re import compile as re_compile, error as re_error, escape
from sys import stdout
from ..constant.colors import *
__all__ = [
'black', 'dark_blue', 'dark_green', 'dark_aqua', 'dark_red', 'dark_purple',
'gold', 'gray', 'dark_gray', 'blue', 'green', 'aqua', 'red', 'light_purple',
'yellow', 'white',
'input_regex',
]
def black(*args, sep=' ', end='\n'):
string = sep.join(f'{arg}' for arg in args)
stdout.write(f'{BLACK}{string}{end}\x1b[0m')
def dark_blue(*args, sep=' ', end='\n'):
string = sep.join(f'{arg}' for arg in args)
stdout.write(f'{DARK_BLUE}{string}{end}\x1b[0m')
def dark_green(*args, sep=' ', end='\n'):
string = sep.join(f'{arg}' for arg in args)
stdout.write(f'{DARK_GREEN}{string}{end}\x1b[0m')
def dark_aqua(*args, sep=' ', end='\n'):
string = sep.join(f'{arg}' for arg in args)
stdout.write(f'{DARK_AQUA}{string}{end}\x1b[0m')
def dark_red(*args, sep=' ', end='\n'):
string = sep.join(f'{arg}' for arg in args)
stdout.write(f'{DARK_RED}{string}{end}\x1b[0m')
def dark_purple(*args, sep=' ', end='\n'):
string = sep.join(f'{arg}' for arg in args)
stdout.write(f'{DARK_RED}{string}{end}\x1b[0m')
def gold(*args, sep=' ', end='\n'):
string = sep.join(f'{arg}' for arg in args)
stdout.write(f'{GOLD}{string}{end}\x1b[0m')
def gray(*args, sep=' ', end='\n'):
string = sep.join(f'{arg}' for arg in args)
stdout.write(f'{GRAY}{string}{end}\x1b[0m')
def dark_gray(*args, sep=' ', end='\n'):
string = sep.join(f'{arg}' for arg in args)
stdout.write(f'{DARK_GRAY}{string}{end}\x1b[0m')
def blue(*args, sep=' ', end='\n'):
string = sep.join(f'{arg}' for arg in args)
stdout.write(f'{BLUE}{string}{end}\x1b[0m')
def green(*args, sep=' ', end='\n'):
string = sep.join(f'{arg}' for arg in args)
stdout.write(f'{GREEN}{string}{end}\x1b[0m')
def aqua(*args, sep=' ', end='\n'):
string = sep.join(f'{arg}' for arg in args)
stdout.write(f'{AQUA}{string}{end}\x1b[0m')
def red(*args, sep=' ', end='\n'):
string = sep.join(f'{arg}' for arg in args)
stdout.write(f'{RED}{string}{end}\x1b[0m')
def light_purple(*args, sep=' ', end='\n'):
string = sep.join(f'{arg}' for arg in args)
stdout.write(f'{LIGHT_PURPLE}{string}{end}\x1b[0m')
def yellow(*args, sep=' ', end='\n'):
string = sep.join(f'{arg}' for arg in args)
stdout.write(f'{YELLOW}{string}{end}\x1b[0m')
def white(*args, sep=' ', end='\n'):
string = sep.join(f'{arg}' for arg in args)
stdout.write(f'{WHITE}{string}{end}\x1b[0m')
def input_regex(prompt: str, /, pattern: str) -> str:
try:
re_pattern = re_compile(pattern)
except re_error as err:
raise ValueError(f'invalid pattern: {err}')
while True:
green(prompt)
string = input(']> ')
if re_pattern.fullmatch(string):
return string
red(f'Invalid input for regex pattern {escape(pattern)}')
| python |
# import packages
import bs4
import requests
from bs4 import BeautifulSoup
# get soup object
def get_soup(text):
return BeautifulSoup(text, "lxml", from_encoding='utf-8')
# extract company
def extract_company(div):
try:
return (div.find('div', attrs={'class', 'job-result-card__contents'}).find('h4').text)
except:
return ''
# extract job salary
def extract_salary(div):
return 'Ksh Confidential'
# extract job location
def extract_location(div):
try:
return (div.find('div', attrs={'class', 'job-result-card__contents'})
.find('div', attrs={'class', 'job-result-card__meta'}).find('span').text)
except:
return ''
# extract job title
def extract_job_title(div):
try:
return (div.find('div', attrs={'class', 'job-result-card__contents'}).find('h3').text)
except:
return ''
# extract jd summary
def extract_summary(link):
try:
text = extract_fulltext(link)
sentences = text.splitlines()
return ' '.join(sentences[0:2])
except Exception as e:
write_logs(str(e))
return ''
return ''
# extract link of job description
def extract_link(div):
myurl = 'https://linkedin.com/jobs/view/'
try:
job_id = div.attrs['data-id']
return (myurl+job_id)
except:
return ''
# extract date of job when it was posted
def extract_date(div):
try:
return (div.find('div', attrs={'class', 'job-result-card__contents'})
.find('div', attrs={'class', 'job-result-card__meta'}).find('time').attrs['datetime'])
except:
return ''
# extract full job description from link
def extract_fulltext(url):
try:
page = requests.get(url)
soup = BeautifulSoup(page.text, "lxml", from_encoding='utf-8')
div = soup.find('section', attrs={
'class': 'show-more-less-html'}).find('div', attrs={
'class': 'show-more-less-html__markup'})
return '\n'.join(div.stripped_strings)
except Exception as e:
write_logs(str(e))
return ''
return ''
# write logs to file
def write_logs(text):
# print(text + '\n')
f = open('log.txt', 'a')
f.write(text + '\n')
f.close()
| python |
#!/usr/bin/env python
"""packt.py: Grab the daily free book claim from Packt Press.
This will run under Python 2.7 and 3.4 with minimum dependencies.
The goals was the most simplistic code that will function. The
script can be run from cron.
Replace the two lines with username/email and password with your
credentials.
Depends on:
requests
beautifulsoup
The code is heavily influenced by:
https://github.com/movb/packt-grabber
https://github.com/igbt6/Packt-Publishing-Free-Learning
https://github.com/niqdev/packtpub-crawler
"""
__author__ = "Michael McGarrah"
__email__ = "[email protected]"
__version__ = "0.1.0"
import sys
import requests
from bs4 import BeautifulSoup
email = '[email protected]'
password = 'CHANGE_ME_TOO'
base_url = 'https://www.packtpub.com'
free_url = 'https://www.packtpub.com/packt/offers/free-learning'
headers = {'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 '
'(KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36'}
if __name__ == "__main__":
s = requests.Session()
r = s.get(free_url, headers=headers, timeout=10)
soup = BeautifulSoup(r.text)
form = soup.find('form', {'id': 'packt-user-login-form'})
if form is None:
print 'Cannot find login form'
sys.exit()
form_build_id = form.find('input', attrs={'name': 'form_build_id'})['value']
if form_build_id is None:
print 'Cannot find build_id'
sys.exit()
form_id = form.find('input', attrs={'name': 'form_id'})['value']
if form_id is None:
print 'Cannot find form_id'
sys.exit()
post_payload = {
'email': email,
'password': password,
'op': 'Login',
'form_build_id': form_build_id,
'form_id': form_id
}
r = s.post(free_url, headers=headers, data=post_payload)
soup = BeautifulSoup(r.text)
login_error = soup.find('div', {'class': 'messages error'})
if login_error is not None:
print 'Login failed'
sys.exit()
print 'Logged into Packt'
deal_of_day = soup.find('div', {'id': 'deal-of-the-day'})
if deal_of_day is None:
print 'No deal of day found'
sys.exit()
claim_url = soup.find('a', class_='twelve-days-claim')['href']
if claim_url is None:
print 'Cannot find claim url'
sys.exit()
r = s.get(base_url + claim_url, headers=headers)
if r.status_code != 200:
print 'Claim failed for book. Likely bad credentials'
sys.exit()
soup = BeautifulSoup(r.text)
account_list = soup.find('div', {'id': 'product-account-list'})
if account_list is None:
print 'Cannot access claim page. Probably bad credentials'
sys.exit()
print 'Claim processed'
| python |
# coding=utf-8
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
# Modules
{
"module_name": "Case Management",
"color": "grey",
"icon": "octicon octicon-organization",
"type": "module",
"label": _("Case Management")
},
{
"module_name": "CPBNs",
"color": "grey",
"icon": "octicon octicon-broadcast",
"type": "module",
"label": _("CPBNs")
},
]
| python |
#!/usr/bin/env python
import argparse
import gzip
from contextlib import ExitStack
import pysam
from statistics import mean, median
argparser = argparse.ArgumentParser(description = 'Aggregate depth information (output as JSON) from individual depth files (generated using SAMtools mpileup).')
argparser.add_argument('-i', '--in', metavar = 'file', dest = 'in_files_list', required = True, help = 'Input file which lists all depth files (one depth file per sample) generated using SAMtools mpileup. One file per line.')
argparser.add_argument('-o', '--out', metavar = 'file', dest = 'out_file_name', required = True, help = 'Output file of depth information compressed with bgzip. In addition to this file, the tabix index will be produced.')
if __name__ == '__main__':
args = argparser.parse_args()
file_names = []
with open(args.in_files_list, 'r') as ifile:
for line in ifile:
line = line.strip()
if line:
file_names.append(line)
chromosomes = set()
positions = dict()
n_indv = len(file_names)
breaks = [1, 5, 10, 15, 20, 25, 30, 50, 100]
with ExitStack() as stack, pysam.BGZFile(args.out_file_name, 'w') as ofile:
ifiles = [ stack.enter_context(gzip.open(file_name, 'rt')) for file_name in file_names ]
while True:
for i, ifile in enumerate(ifiles):
line = ifile.readline()
if line:
chromosome, position, dp = line.rstrip().split()
chromosomes.add(chromosome)
if len(chromosomes) > 1:
raise Exception(f'Multiple chromosomes detected in input files, but only one is allowed.')
positions.setdefault(int(position), []).append(int(dp))
if not positions:
break
min_position = sorted(positions)[0]
depths = positions.pop(min_position)
counts = [0] * len(breaks)
for dp in depths:
for i in range(0, len(breaks)):
if dp >= breaks[i]:
counts[i] += 1
ofile.write('{}\t{:d}\t{:d}\t{{"chrom":"{}","start":{:d},"end":{:d},"mean":{:g},"median":{:g}'.format(chromosome.replace('chr', '', 1), min_position, min_position, chromosome.replace('chr', '', 1), min_position, min_position, mean(depths), median(depths)).encode())
for br, cnt in zip(breaks, counts):
ofile.write(',"{:d}":{:g}'.format(br, cnt / n_indv).encode())
ofile.write('}\n'.encode())
pysam.tabix_index(args.out_file_name, seq_col = 0, start_col = 1, end_col = 1, force = True)
| python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Filename: result_to_latex.py
import os, argparse, json, math
import logging
TEMPLATE = r"""\begin{table*}[tb]
\centering
\caption{Chaos Engineering Experiment Results on HedWig}\label{tab:resultsOfHedwig}
\setcounter{rowcount}{-1}
\begin{tabular}{@{\makebox[3em][r]{\stepcounter{rowcount}\therowcount\hspace*{\tabcolsep}}}lrp{3.2cm}rrp{6.2cm}}
\toprule
Target& Error Code& Original Failure Rate\newline(min, mean, max)& Fail. Rate& Injection Count& Result \scriptsize (SU: success, SF: sending failure, FF: fetching failure, VF: validation failure, SC: server crash, PI: post inspection)\\
\midrule
""" + "%s" + r"""
\bottomrule
\end{tabular}
\end{table*}
"""
TEMPLATE_SINGLE_COLUMN = r"""\begin{table}[tb]
\centering
\scriptsize
\caption{Chaos Engineering Experiment Results on HedWig}\label{tab:resultsOfHedwig}
\begin{tabularx}{\columnwidth}{lrRXXXXXXX}
\toprule
Target \& Error& F. Rate& Inj.& \multicolumn{6}{l}{Behavioral Assessment Criteria}& \\
& & & SU& SF& FF& VF& SC& CO& \\
\midrule
""" + "%s" + r"""
\bottomrule
\end{tabularx}
\end{table}
"""
def handle_args():
parser = argparse.ArgumentParser(
description="Summarize experiment results into a latex table.")
parser.add_argument("-f", "--file", help="the path to the result file (.json)")
parser.add_argument("-s", "--single-column", action="store_true", dest="single_column",
help="print the table in a single-column format")
return parser.parse_args()
def round_number(x, sig = 3):
return round(x, sig - int(math.floor(math.log10(abs(x)))) - 1)
def human_format(num):
num = float('{:.3g}'.format(num))
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0
return '{}{}'.format('{:f}'.format(num).rstrip('0').rstrip('.'), ['', 'K', 'M', 'B', 'T'][magnitude])
def categorize_result(result):
return_str = r"\colorbox{green}{\makebox[0.3em]{√}}"
if result["post_inspection"] == "failed" or result["server_crashed"] > 0:
return_str = r"\colorbox{red}{!}"
elif result["sending_failures"] > 0 or result["fetching_failures"] > 0 or result["validation_failures"] > 0:
return_str = r"\colorbox{orange}{-}"
return return_str.decode("utf-8")
def main(args):
with open(args.file, 'rt') as file:
data = json.load(file)
body = ""
for experiment in data["experiments"]:
if "injection_count" in experiment["result"]:
injection_count = experiment["result"]["injection_count"]
if injection_count == 0: continue # omit the cases in which Phoebe did not inject any errors
else:
# the experiment was done only once and the server crashed
injection_count = 1
result = "%.0f\\%%& %.0f\\%%& %.0f\\%%& %.0f\\%%& %.0f\\%%& %s" % (
float(experiment["result"]["succeeded"]) / experiment["result"]["rounds"] * 100,
float(experiment["result"]["sending_failures"]) / experiment["result"]["rounds"] * 100,
float(experiment["result"]["fetching_failures"]) / experiment["result"]["rounds"] * 100,
float(experiment["result"]["validation_failures"]) / experiment["result"]["rounds"] * 100,
float(experiment["result"]["server_crashed"]) / experiment["result"]["rounds"] * 100,
# the post inspection failure means state corruption is true (T)
"T" if experiment["result"]["post_inspection"] == "failed" else "F"
)
if args.single_column:
body += "%s:%s.& %s& %s& %s& %s\\\\\n"%(
experiment["syscall_name"],
experiment["error_code"][1:4], # remove the "-" before the error code
round_number(experiment["failure_rate"]),
human_format(injection_count),
result,
categorize_result(experiment["result"])
)
else:
body += "%s& %s& %s& %s& %d& %s\\\\\n"%(
experiment["syscall_name"],
experiment["error_code"][1:], # remove the "-" before the error code
"%s, %s, %s"%(round_number(experiment["original_min_rate"]), round_number(experiment["original_mean_rate"]), round_number(experiment["original_max_rate"])),
round_number(experiment["failure_rate"]),
injection_count,
result
)
body = body[:-1] # remove the very last line break
latex = TEMPLATE_SINGLE_COLUMN%body if args.single_column else TEMPLATE%body
latex = latex.replace("_", "\\_")
print(latex)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
args = handle_args()
main(args) | python |
from socket import *
class ChatServer:
def __init__(self, host, port):
#startvars
self.PORT = port
self.HOST = host
self.RECV_BUFFER = 4096
self.CONNECTION_LIST = []
#connection
self.server_socket = socket(AF_INET, SOCK_STREAM)
self.server_socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.server_socket.bind((self.HOST, self.PORT))
self.server_socket.listen(10)
#append ser.socket
self.CONNECTION_LIST.append(self.server_socket)
print('[+]ChatServer startet on port:%s' %str(self.PORT))#debug
#main
self.looping()
self.server_socket.close()
def broadcast_data(self, sock, msg):
for socket in self.CONNECTION_LIST:
if socket != self.server_socket and self.server_socket != sock:
try :
socket.send("msg")
except :
socket.close()
self.CONNECTION_LIST.remove(socket)
def looping(self):
read_sockets = self.CONNECTION_LIST
write_sockets = []
error_sockets = []
while True:
for sock in read_sockets:
if sock == self.server_socket:
sockfd, addr = self.server_socket.accept()
self.CONNECTION_LIST.append(sockfd)
print("Client (%s, %s) connected" % addr)
self.broadcast_data(sockfd, "[%s:%s] entered room\n" % addr)
else:
try:
data = sock.recv(RECV_BUFFER)
if data:
self.broadcast_data(sock, "\r" + '<' + str(sock.getpeername()) + '> ' + data)
except:
self.broadcast_data(sock, "[i]Client[%s, %s] is offline" % addr)
print("[i]Client[%s, %s] is offline" % addr)
sock.close()
self.CONNECTION_LIST.remove(sock)
continue
server = ChatServer('127.0.0.1', 5000)
| python |
from collections import defaultdict
from itertools import chain
from typing import Collection, Dict, Set, AnyStr, Iterable, TextIO
import pandas as pd
from pandas import Series, DataFrame
import jinja2 as j2
from burdock.core.variable import DaikonVariable, consts_from_df, vars_from_df
from burdock.expander import Expander
from burdock.matcher import Matcher
def _daikon_format_filter(var: DaikonVariable, value=None):
if value is None:
assert var.constant_value is not None
value = var.constant_value
if var.is_integer or var.is_float:
return "{}".format(value)
elif var.is_boolean:
return "{}".format(1 if value else 0)
elif var.is_string:
return "\"{}\"".format(value)
class Burdock:
name: str
variables: Dict[str, DaikonVariable]
traces: DataFrame
latent_variables: Dict[str, DaikonVariable]
latent_traces: DataFrame
_matchers: Dict[str, Collection[Matcher]]
_expanders: Dict[str, Collection[Expander]]
_matched_tags: Dict[str, Set[str]] = defaultdict(set)
_template_env = j2.Environment(loader=j2.PackageLoader('burdock.core', 'templates'))
_template_env.filters['daikon'] = _daikon_format_filter
_decls_template = _template_env.get_template('decls.jinja2')
_dtrace_template = _template_env.get_template('dtrace.jinja2')
def __init__(self, name: AnyStr, df: DataFrame, matchers=None, expanders=None):
self.name = str(name)
self.variables = vars_from_df(df)
self.traces = df
self.latent_variables = dict()
self.latent_traces = DataFrame()
self._matchers: Dict[str, Collection[Matcher]] = defaultdict(set)
if matchers is None:
matchers = []
for matcher in matchers:
self.add_matcher(matcher)
self._expanders: Dict[str, Collection[Expander]] = defaultdict(set)
if expanders is None:
expanders = []
for expander in expanders:
self.add_expander(expander)
def get_variable(self, column_label: str):
if column_label in self.variables:
return self.variables[column_label]
if column_label in self.latent_variables:
return self.latent_variables[column_label]
@property
def matchers(self) -> Iterable[Matcher]:
return chain(*self._matchers.values())
def get_matchers(self, tag: AnyStr) -> Iterable[Matcher]:
return self._matchers.get(str(tag), [])
def add_matcher(self, matcher: Matcher):
self._matchers[matcher.tag] |= {matcher}
def match(self):
for column_id in self.traces.columns:
column: Series = self.traces[column_id]
tags: Set[str] = set()
for matcher in self.matchers:
if matcher.match(column):
tags.add(matcher.tag)
print("Tagged column {} with '{}'.".format(column_id, matcher.tag))
self._matched_tags[column_id] = tags
@property
def expanders(self) -> Iterable[Expander]:
return chain(*self._expanders.values())
def get_expanders(self, tag: AnyStr) -> Iterable[Expander]:
return self._expanders.get(str(tag), [])
def add_expander(self, expander: Expander):
self._expanders[expander.tag] |= {expander}
def expand(self):
for column_id in self.traces.columns:
column: Series = self.traces[column_id]
for tag in self._matched_tags[column_id]:
for expander in self.get_expanders(tag):
const_df = expander.expand_constants(column)
self.latent_variables.update(consts_from_df(const_df))
vars_df = expander.expand_variables(column)
self.latent_variables.update(vars_from_df(vars_df))
self.latent_traces = pd.concat([self.latent_traces, vars_df])
def write_decls(self, out: TextIO):
template_data = {
'name': self.name,
'variables': [
var
for var
in chain(self.variables.values(),
self.latent_variables.values())
]
}
decls_text = self._decls_template.render(template_data)
out.write(decls_text)
def write_dtrace(self, out: TextIO):
template_data = {
'name': self.name,
'traces': [
[
{
'label': label,
'var': self.get_variable(label),
'value': row[label]
}
for label
in chain(self.traces.columns,
self.latent_traces.columns)
]
for (i, row)
in chain(self.traces.iterrows(),
self.latent_traces.iterrows())
]
}
dtrace_text = self._dtrace_template.render(template_data)
out.write(dtrace_text)
| python |
r"""
Gcd domains
"""
#*****************************************************************************
# Copyright (C) 2008 Teresa Gomez-Diaz (CNRS) <[email protected]>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#******************************************************************************
from sage.categories.category import Category
from sage.categories.category_singleton import Category_singleton
from sage.misc.cachefunc import cached_method
from sage.categories.integral_domains import IntegralDomains
class GcdDomains(Category_singleton):
"""
The category of gcd domains
domains where gcd can be computed but where there is no guarantee of
factorisation into irreducibles
EXAMPLES::
sage: GcdDomains()
Category of gcd domains
sage: GcdDomains().super_categories()
[Category of integral domains]
TESTS::
sage: TestSuite(GcdDomains()).run()
"""
def super_categories(self):
"""
EXAMPLES::
sage: GcdDomains().super_categories()
[Category of integral domains]
"""
return [IntegralDomains()]
class ParentMethods:
pass
class ElementMethods:
# gcd(x,y)
# lcm(x,y)
pass
| python |
import copy
import glob
import os
import numpy as np
import torch.utils.data as data
import torchvision as tv
from PIL import Image
from torch import distributed
from .utils import Subset, group_images
# Converting the id to the train_id. Many objects have a train id at
# 255 (unknown / ignored).
# See there for more information:
# https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/helpers/labels.py
id_to_trainid = {
0: 255,
1: 255,
2: 255,
3: 255,
4: 255,
5: 255,
6: 255,
7: 0, # road
8: 1, # sidewalk
9: 255,
10: 255,
11: 2, # building
12: 3, # wall
13: 4, # fence
14: 255,
15: 255,
16: 255,
17: 5, # pole
18: 255,
19: 6, # traffic light
20: 7, # traffic sign
21: 8, # vegetation
22: 9, # terrain
23: 10, # sky
24: 11, # person
25: 12, # rider
26: 13, # car
27: 14, # truck
28: 15, # bus
29: 255,
30: 255,
31: 16, # train
32: 17, # motorcycle
33: 18, # bicycle
-1: 255
}
city_to_id = {
"aachen": 0, "bremen": 1, "darmstadt": 2, "erfurt": 3, "hanover": 4,
"krefeld": 5, "strasbourg": 6, "tubingen": 7, "weimar": 8, "bochum": 9,
"cologne": 10, "dusseldorf": 11, "hamburg": 12, "jena": 13,
"monchengladbach": 14, "stuttgart": 15, "ulm": 16, "zurich": 17,
"frankfurt": 18, "lindau": 19, "munster": 20
}
def filter_images(dataset, labels):
# Filter images without any label in LABELS (using labels not reordered)
idxs = []
print(f"Filtering images...")
for i in range(len(dataset)):
domain_id = dataset.__getitem__(i, get_domain=True) # taking domain id
if domain_id in labels:
idxs.append(i)
if i % 1000 == 0:
print(f"\t{i}/{len(dataset)} ...")
return idxs
class CityscapesSegmentationDomain(data.Dataset):
def __init__(self, root, train=True, transform=None, domain_transform=None):
root = os.path.expanduser(root)
annotation_folder = os.path.join(root, 'gtFine')
image_folder = os.path.join(root, 'leftImg8bit')
self.images = [ # Add train cities
(
path,
os.path.join(
annotation_folder,
"train",
path.split("/")[-2],
path.split("/")[-1][:-15] + "gtFine_labelIds.png"
),
city_to_id[path.split("/")[-2]]
) for path in sorted(glob.glob(os.path.join(image_folder, "train/*/*.png")))
]
self.images += [ # Add validation cities
(
path,
os.path.join(
annotation_folder,
"val",
path.split("/")[-2],
path.split("/")[-1][:-15] + "gtFine_labelIds.png"
),
city_to_id[path.split("/")[-2]]
) for path in sorted(glob.glob(os.path.join(image_folder, "val/*/*.png")))
]
self.transform = transform
self.domain_transform = domain_transform
def __getitem__(self, index, get_domain=False):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is the image segmentation.
"""
if get_domain:
domain = self.images[index][2]
if self.domain_transform is not None:
domain = self.domain_transform(domain)
return domain
try:
img = Image.open(self.images[index][0]).convert('RGB')
target = Image.open(self.images[index][1])
except Exception as e:
raise Exception(f"Index: {index}, len: {len(self)}, message: {str(e)}")
if self.transform is not None:
img, target = self.transform(img, target)
return img, target
def __len__(self):
return len(self.images)
class CityscapesSegmentationIncrementalDomain(data.Dataset):
"""Labels correspond to domains not classes in this case."""
def __init__(
self,
root,
train=True,
transform=None,
labels=None,
idxs_path=None,
masking=True,
overlap=True,
**kwargs
):
full_data = CityscapesSegmentationDomain(root, train)
# take index of images with at least one class in labels and all classes in labels+labels_old+[255]
if idxs_path is not None and os.path.exists(idxs_path):
idxs = np.load(idxs_path).tolist()
else:
idxs = filter_images(full_data, labels)
if idxs_path is not None and distributed.get_rank() == 0:
np.save(idxs_path, np.array(idxs, dtype=int))
rnd = np.random.RandomState(1)
rnd.shuffle(idxs)
train_len = int(0.8 * len(idxs))
if train:
idxs = idxs[:train_len]
print(f"{len(idxs)} images for train")
else:
idxs = idxs[train_len:]
print(f"{len(idxs)} images for val")
target_transform = tv.transforms.Lambda(
lambda t: t.
apply_(lambda x: id_to_trainid.get(x, 255))
)
# make the subset of the dataset
self.dataset = Subset(full_data, idxs, transform, target_transform)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is the image segmentation.
"""
return self.dataset[index]
def __len__(self):
return len(self.dataset)
| python |
import datetime
import json
import yaml
import random
import string
def get_random_name(length=20):
store = string.ascii_letters + string.digits
return random.choice(string.ascii_letters) + ''.join([random.choice(store) for i in range(length - 1)])
def read_credentials(filename):
with open(filename) as fp:
return tuple(fp.read().splitlines())
def dicts_have_key_with_value(dicts, key, value):
return any(value in entry[key] for entry in dicts)
def dict_to_yaml(dictionary, filename):
with open(filename, 'w') as fobject:
yaml.dump(
dictionary,
fobject,
default_flow_style=False
)
def prettify_json(string):
def _datetime_handler(x):
if isinstance(x, datetime.datetime):
return x.isoformat()
raise TypeError("Unknown type")
return json.dumps(
string,
indent=2,
sort_keys=True,
default=_datetime_handler
)
def sort_key(dictionary, sortkey):
return sorted(dictionary, key=lambda k: k[sortkey])
def read_mapping_template(filepath):
with open(filepath, 'r') as f:
return f.read()
| python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Python for AHDA.
Part 1, Example 4.
"""
# Simple list
words = ['Mary', 'had', 'a', 'little', 'lamb']
# words = ('Mary', 'had', 'a', 'little', 'lamb')
print(words)
print(words[1])
words[3] = 'big'
print(words)
| python |
from ..utils import SyncClient, __version__
from .bucket import SyncStorageBucketAPI
from .file_api import SyncBucketProxy
__all__ = [
"SyncStorageClient",
]
class SyncStorageClient(SyncStorageBucketAPI):
"""Manage storage buckets and files."""
def __init__(self, url: str, headers: dict[str, str]) -> None:
super().__init__(
url,
{"User-Agent": f"supabase-py/storage3 v{__version__}", **headers},
SyncClient(),
)
def from_(self, id: str) -> SyncBucketProxy:
"""Run a storage file operation.
Parameters
----------
id
The unique identifier of the bucket
"""
return SyncBucketProxy(id, self.url, self.headers, self._client)
| python |
import tensorflow as tf
import numpy as np
import time
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "4"
from tensorflow.python.eager import tape
class FakeData(object):
def __init__(self, length):
super(FakeData, self).__init__()
self.length = length
self.X_train = np.random.random((224, 224, 3)).astype('float32')
self.Y_train = np.array([np.random.randint(1000)]).astype('int32')
def __iter__(self):
for _ in range(self.length):
yield self.X_train, self.Y_train
def __len__(self):
return self.length
def output_shapes(self):
return (self.X_train.shape, self.Y_train.shape)
def output_types(self):
return (tf.float32, tf.int32)
def get_data(df, batch_size):
tdf = tf.data.Dataset.from_generator(
generator=df.__iter__,
output_types=df.output_types(),
output_shapes=df.output_shapes())
tdf = tdf.batch(batch_size)
tdf = tdf.prefetch(tf.data.experimental.AUTOTUNE)
return tdf
def train_keras_model_by_fit(defun=False):
# warm up by first batch_size = 1
for batch_size in [1, 1, 4, 16, 32, 64, 128]:
df = FakeData(batch_size * 100)
model = tf.keras.applications.resnet.ResNet50(
input_shape=df.output_shapes()[0], include_top=True, weights=None)
model.compile(
optimizer=tf.keras.optimizers.Adam(lr=0.001),
loss='binary_crossentropy',
metrics=['accuracy'])
if defun:
model.call = tf.function(model.call)
start = time.time()
model.fit(get_data(df, batch_size), epochs=1)
# model.call(get_data(df, batch_size))
end = time.time()
print("batch_size: {}, cost: {} ms.".format(batch_size, (end - start) *
10))
def compute_gradients(model, images, labels, num_replicas=1):
with tf.GradientTape() as grad_tape:
logits = model(images, training=True)
loss = tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=labels)
if num_replicas != 1:
loss /= num_replicas
with tape.stop_recording():
grads = grad_tape.gradient(loss, model.variables)
return grads
def apply_gradients(model, optimizer, gradients):
optimizer.apply_gradients(zip(gradients, model.variables))
def random_batch(batch_size, data_format='channels_first'):
shape = (3, 224, 224) if data_format == 'channels_first' else (224, 224, 3)
shape = (batch_size, ) + shape
num_classes = 1000
images = tf.random.uniform(shape)
labels = tf.random.uniform(
[batch_size], minval=0, maxval=num_classes, dtype=tf.int32)
one_hot = tf.one_hot(labels, num_classes)
return images, one_hot
def train_eager_with_tf_function(defun=True):
from resnet50 import ResNet50
model = ResNet50(data_format='channels_first', classes=1000)
if defun:
model.call = tf.function(model.call)
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=0.01)
for batch_size in [1, 1, 4, 16, 32, 64, 128]:
images, labels = random_batch(batch_size)
for i in range(105):
if i == 5:
start = time.time()
apply_gradients(model, optimizer,
compute_gradients(model, images, labels))
end = time.time()
print("batch_size: {}, cost: {} ms.".format(batch_size, (end - start) *
10))
if __name__ == '__main__':
defun = True
# train_keras_model_by_fit(defun)
train_eager_with_tf_function(defun)
| python |
import json
import logging
from django.utils.translation import ugettext_lazy as _
from requests import RequestException
from connected_accounts.conf import settings
from connected_accounts.provider_pool import providers
from .base import OAuth2Provider, ProviderAccount
logger = logging.getLogger('connected_accounts')
class DisqusAccount(ProviderAccount):
def get_profile_url(self):
return self.account.extra_data.get('profileUrl')
def get_avatar_url(self):
username = self.account.extra_data.get('username')
return 'https://disqus.com/api/users/avatars/%s.jpg' % username # noqa
def to_str(self):
default = super(DisqusAccount, self).to_str()
return self.account.extra_data.get('name', default)
def extract_common_fields(self):
data = self.account.extra_data
return dict(
name=data.get('name', ''),
email=data.get('email', ''),
username=data.get('username', '')
)
class DisqusProvider(OAuth2Provider):
id = 'disqus'
name = _('Disqus')
account_class = DisqusAccount
expires_in_key = 'expires_in'
scope_separator = ','
authorization_url = 'https://disqus.com/api/oauth/2.0/authorize/'
access_token_url = 'https://disqus.com/api/oauth/2.0/access_token/'
profile_url = 'https://disqus.com/api/3.0/users/details.json'
consumer_key = settings.CONNECTED_ACCOUNTS_DISQUS_CONSUMER_KEY
consumer_secret = settings.CONNECTED_ACCOUNTS_DISQUS_CONSUMER_SECRET
scope = settings.CONNECTED_ACCOUNTS_DISQUS_SCOPE
def get_profile_data(self, raw_token):
"""Fetch user profile information."""
token_data = json.loads(raw_token)
params = {
'access_token': token_data['access_token'],
'api_key': self.consumer_key,
'api_secret': token_data['access_token']
}
try:
response = self.request('get', self.profile_url, params=params)
response.raise_for_status()
except RequestException as e:
logger.error('Unable to fetch user profile: {0}'.format(e))
return None
else:
return response.json() or response.text
def extract_uid(self, data):
"""Return unique identifier from the profile info."""
return str(data['response']['id'])
def extract_extra_data(self, data):
return data.get('response', {})
providers.register(DisqusProvider)
| python |
import argparse
import difflib
import re
import sys
from ssort._exceptions import UnknownEncodingError
from ssort._files import find_python_files
from ssort._ssort import ssort
from ssort._utils import (
detect_encoding,
detect_newline,
escape_path,
normalize_newlines,
)
def main():
parser = argparse.ArgumentParser(
description="Sort python statements into dependency order",
)
parser.add_argument(
"--diff",
dest="show_diff",
action="store_true",
help="Prints a diff of all changes ssort would make to a file.",
)
parser.add_argument(
"--check",
dest="check",
action="store_true",
help="Check the file for unsorted statements. Returns 0 if nothing "
"needs to be changed. Otherwise returns 1.",
)
parser.add_argument(
"files", nargs="*", help="One or more python files to sort"
)
args = parser.parse_args()
unsorted = 0
unsortable = 0
unchanged = 0
for path in find_python_files(args.files):
errors = False
try:
original_bytes = path.read_bytes()
except FileNotFoundError:
sys.stderr.write(f"ERROR: {escape_path(path)} does not exist\n")
unsortable += 1
continue
except IsADirectoryError:
sys.stderr.write(f"ERROR: {escape_path(path)} is a directory\n")
unsortable += 1
continue
except PermissionError:
sys.stderr.write(f"ERROR: {escape_path(path)} is not readable\n")
unsortable += 1
continue
# The logic for converting from bytes to text is duplicated in `ssort`
# and here because we need access to the text to be able to compute a
# diff at the end.
try:
encoding = detect_encoding(original_bytes)
except UnknownEncodingError as exc:
sys.stderr.write(
f"ERROR: unknown encoding, {exc.encoding!r}, in {escape_path(path)}\n"
)
unsortable += 1
continue
try:
original = original_bytes.decode(encoding)
except UnicodeDecodeError as exc:
sys.stderr.write(
f"ERROR: encoding error in {escape_path(path)}: {exc}\n"
)
unsortable += 1
continue
newline = detect_newline(original)
original = normalize_newlines(original)
def _on_parse_error(message, *, lineno, col_offset, **kwargs):
nonlocal errors
errors = True
sys.stderr.write(
f"ERROR: syntax error in {escape_path(path)}: "
+ f"line {lineno}, column {col_offset}\n"
)
def _on_unresolved(message, *, name, lineno, col_offset, **kwargs):
nonlocal errors
errors = True
sys.stderr.write(
f"ERROR: unresolved dependency {name!r} "
+ f"in {escape_path(path)}: "
+ f"line {lineno}, column {col_offset}\n"
)
def _on_wildcard_import(**kwargs):
sys.stderr.write(
"WARNING: can't determine dependencies on * import\n"
)
try:
updated = ssort(
original,
filename=escape_path(path),
on_parse_error=_on_parse_error,
on_unresolved=_on_unresolved,
on_wildcard_import=_on_wildcard_import,
)
if errors:
unsortable += 1
continue
except Exception as e:
raise Exception(f"ERROR while sorting {path}\n") from e
if original != updated:
unsorted += 1
if args.check:
sys.stderr.write(
f"ERROR: {escape_path(path)} is incorrectly sorted\n"
)
else:
sys.stderr.write(f"Sorting {escape_path(path)}\n")
# The logic for converting from bytes to text is duplicated in
# `ssort` and here because we need access to the text to be able
# to compute a diff at the end.
# We rename a little prematurely to avoid shadowing `updated`,
# which we use later for printing the diff.
updated_bytes = updated
if newline != "\n":
updated_bytes = re.sub("\n", newline, updated_bytes)
updated_bytes = updated_bytes.encode(encoding)
path.write_bytes(updated_bytes)
else:
unchanged += 1
if args.show_diff:
sys.stderr.writelines(
difflib.unified_diff(
original.splitlines(keepends=True),
updated.splitlines(keepends=True),
fromfile=f"{path}:before",
tofile=f"{path}:after",
)
)
if args.check:
def _fmt_count(count):
return f"{count} file" if count == 1 else f"{count} files"
summary = []
if unsorted:
summary.append(f"{_fmt_count(unsorted)} would be resorted")
if unchanged:
summary.append(f"{_fmt_count(unchanged)} would be left unchanged")
if unsortable:
summary.append(f"{_fmt_count(unsortable)} would not be sortable")
if not unsorted and not unchanged and not unsortable:
summary.append("No files are present to be sorted. Nothing to do.")
sys.stderr.write(", ".join(summary) + "\n")
if unsorted or unsortable:
sys.exit(1)
else:
def _fmt_count_were(count):
if count == 1:
return f"{count} file was"
else:
return f"{count} files were"
summary = []
if unsorted:
summary.append(f"{_fmt_count_were(unsorted)} resorted")
if unchanged:
summary.append(f"{_fmt_count_were(unchanged)} left unchanged")
if unsortable:
summary.append(f"{_fmt_count_were(unsortable)} not sortable")
if not unsorted and not unchanged and not unsortable:
summary.append("No files are present to be sorted. Nothing to do.")
sys.stderr.write(", ".join(summary) + "\n")
if unsortable:
sys.exit(1)
| python |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: thunderstorm.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='thunderstorm.proto',
package='thunderstorm',
syntax='proto3',
serialized_options=None,
serialized_pb=b'\n\x12thunderstorm.proto\x12\x0cthunderstorm\"O\n\x0e\x43ylinderIdList\x12\x0f\n\x07\x65ventId\x18\x01 \x01(\x05\x12,\n\ncylinderId\x18\x02 \x03(\x0b\x32\x18.thunderstorm.CylinderId\"\x85\x01\n\nCylinderId\x12\n\n\x02id\x18\x01 \x01(\x05\x12\x11\n\tparent_id\x18\x02 \x01(\x05\x12\x10\n\x08particle\x18\x03 \x01(\x05\x12\x0e\n\x06\x65nergy\x18\x04 \x01(\x01\x12\r\n\x05theta\x18\x05 \x01(\x01\x12\x0e\n\x06radius\x18\x06 \x01(\x01\x12\t\n\x01z\x18\x07 \x01(\x01\x12\x0c\n\x04time\x18\x08 \x01(\x01\"Y\n\x14ParticleDetectorList\x12\x0f\n\x07\x65ventId\x18\x01 \x01(\x05\x12\x30\n\x04\x64\x61ta\x18\x02 \x03(\x0b\x32\".thunderstorm.ParticleDetectorData\"e\n\x14ParticleDetectorData\x12\x10\n\x08particle\x18\x01 \x01(\x05\x12\x0e\n\x06\x65nergy\x18\x02 \x01(\x01\x12\r\n\x05theta\x18\x03 \x01(\x01\x12\x0e\n\x06radius\x18\x04 \x01(\x01\x12\x0c\n\x04time\x18\x05 \x01(\x01\"L\n\x0b\x43umulator1D\x12\x0e\n\x06number\x18\x01 \x01(\x05\x12\x0c\n\x04left\x18\x02 \x01(\x01\x12\r\n\x05right\x18\x03 \x01(\x01\x12\x10\n\x04\x64\x61ta\x18\x04 \x03(\x01\x42\x02\x10\x01\":\n\x0bUniformBins\x12\x0e\n\x06number\x18\x01 \x01(\x05\x12\x0c\n\x04left\x18\x02 \x01(\x01\x12\r\n\x05right\x18\x03 \x01(\x01\"k\n\x0b\x43umulator2D\x12$\n\x01x\x18\x01 \x01(\x0b\x32\x19.thunderstorm.UniformBins\x12$\n\x01y\x18\x02 \x01(\x0b\x32\x19.thunderstorm.UniformBins\x12\x10\n\x04\x64\x61ta\x18\x03 \x03(\x01\x42\x02\x10\x01\x62\x06proto3'
)
_CYLINDERIDLIST = _descriptor.Descriptor(
name='CylinderIdList',
full_name='thunderstorm.CylinderIdList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='eventId', full_name='thunderstorm.CylinderIdList.eventId', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cylinderId', full_name='thunderstorm.CylinderIdList.cylinderId', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=36,
serialized_end=115,
)
_CYLINDERID = _descriptor.Descriptor(
name='CylinderId',
full_name='thunderstorm.CylinderId',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='thunderstorm.CylinderId.id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='parent_id', full_name='thunderstorm.CylinderId.parent_id', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='particle', full_name='thunderstorm.CylinderId.particle', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='energy', full_name='thunderstorm.CylinderId.energy', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='theta', full_name='thunderstorm.CylinderId.theta', index=4,
number=5, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='radius', full_name='thunderstorm.CylinderId.radius', index=5,
number=6, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='z', full_name='thunderstorm.CylinderId.z', index=6,
number=7, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='time', full_name='thunderstorm.CylinderId.time', index=7,
number=8, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=118,
serialized_end=251,
)
_PARTICLEDETECTORLIST = _descriptor.Descriptor(
name='ParticleDetectorList',
full_name='thunderstorm.ParticleDetectorList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='eventId', full_name='thunderstorm.ParticleDetectorList.eventId', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='thunderstorm.ParticleDetectorList.data', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=253,
serialized_end=342,
)
_PARTICLEDETECTORDATA = _descriptor.Descriptor(
name='ParticleDetectorData',
full_name='thunderstorm.ParticleDetectorData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='particle', full_name='thunderstorm.ParticleDetectorData.particle', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='energy', full_name='thunderstorm.ParticleDetectorData.energy', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='theta', full_name='thunderstorm.ParticleDetectorData.theta', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='radius', full_name='thunderstorm.ParticleDetectorData.radius', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='time', full_name='thunderstorm.ParticleDetectorData.time', index=4,
number=5, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=344,
serialized_end=445,
)
_CUMULATOR1D = _descriptor.Descriptor(
name='Cumulator1D',
full_name='thunderstorm.Cumulator1D',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='number', full_name='thunderstorm.Cumulator1D.number', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='left', full_name='thunderstorm.Cumulator1D.left', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='right', full_name='thunderstorm.Cumulator1D.right', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='thunderstorm.Cumulator1D.data', index=3,
number=4, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\020\001', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=447,
serialized_end=523,
)
_UNIFORMBINS = _descriptor.Descriptor(
name='UniformBins',
full_name='thunderstorm.UniformBins',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='number', full_name='thunderstorm.UniformBins.number', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='left', full_name='thunderstorm.UniformBins.left', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='right', full_name='thunderstorm.UniformBins.right', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=525,
serialized_end=583,
)
_CUMULATOR2D = _descriptor.Descriptor(
name='Cumulator2D',
full_name='thunderstorm.Cumulator2D',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='x', full_name='thunderstorm.Cumulator2D.x', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='y', full_name='thunderstorm.Cumulator2D.y', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='thunderstorm.Cumulator2D.data', index=2,
number=3, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\020\001', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=585,
serialized_end=692,
)
_CYLINDERIDLIST.fields_by_name['cylinderId'].message_type = _CYLINDERID
_PARTICLEDETECTORLIST.fields_by_name['data'].message_type = _PARTICLEDETECTORDATA
_CUMULATOR2D.fields_by_name['x'].message_type = _UNIFORMBINS
_CUMULATOR2D.fields_by_name['y'].message_type = _UNIFORMBINS
DESCRIPTOR.message_types_by_name['CylinderIdList'] = _CYLINDERIDLIST
DESCRIPTOR.message_types_by_name['CylinderId'] = _CYLINDERID
DESCRIPTOR.message_types_by_name['ParticleDetectorList'] = _PARTICLEDETECTORLIST
DESCRIPTOR.message_types_by_name['ParticleDetectorData'] = _PARTICLEDETECTORDATA
DESCRIPTOR.message_types_by_name['Cumulator1D'] = _CUMULATOR1D
DESCRIPTOR.message_types_by_name['UniformBins'] = _UNIFORMBINS
DESCRIPTOR.message_types_by_name['Cumulator2D'] = _CUMULATOR2D
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CylinderIdList = _reflection.GeneratedProtocolMessageType('CylinderIdList', (_message.Message,), {
'DESCRIPTOR' : _CYLINDERIDLIST,
'__module__' : 'thunderstorm_pb2'
# @@protoc_insertion_point(class_scope:thunderstorm.CylinderIdList)
})
_sym_db.RegisterMessage(CylinderIdList)
CylinderId = _reflection.GeneratedProtocolMessageType('CylinderId', (_message.Message,), {
'DESCRIPTOR' : _CYLINDERID,
'__module__' : 'thunderstorm_pb2'
# @@protoc_insertion_point(class_scope:thunderstorm.CylinderId)
})
_sym_db.RegisterMessage(CylinderId)
ParticleDetectorList = _reflection.GeneratedProtocolMessageType('ParticleDetectorList', (_message.Message,), {
'DESCRIPTOR' : _PARTICLEDETECTORLIST,
'__module__' : 'thunderstorm_pb2'
# @@protoc_insertion_point(class_scope:thunderstorm.ParticleDetectorList)
})
_sym_db.RegisterMessage(ParticleDetectorList)
ParticleDetectorData = _reflection.GeneratedProtocolMessageType('ParticleDetectorData', (_message.Message,), {
'DESCRIPTOR' : _PARTICLEDETECTORDATA,
'__module__' : 'thunderstorm_pb2'
# @@protoc_insertion_point(class_scope:thunderstorm.ParticleDetectorData)
})
_sym_db.RegisterMessage(ParticleDetectorData)
Cumulator1D = _reflection.GeneratedProtocolMessageType('Cumulator1D', (_message.Message,), {
'DESCRIPTOR' : _CUMULATOR1D,
'__module__' : 'thunderstorm_pb2'
# @@protoc_insertion_point(class_scope:thunderstorm.Cumulator1D)
})
_sym_db.RegisterMessage(Cumulator1D)
UniformBins = _reflection.GeneratedProtocolMessageType('UniformBins', (_message.Message,), {
'DESCRIPTOR' : _UNIFORMBINS,
'__module__' : 'thunderstorm_pb2'
# @@protoc_insertion_point(class_scope:thunderstorm.UniformBins)
})
_sym_db.RegisterMessage(UniformBins)
Cumulator2D = _reflection.GeneratedProtocolMessageType('Cumulator2D', (_message.Message,), {
'DESCRIPTOR' : _CUMULATOR2D,
'__module__' : 'thunderstorm_pb2'
# @@protoc_insertion_point(class_scope:thunderstorm.Cumulator2D)
})
_sym_db.RegisterMessage(Cumulator2D)
_CUMULATOR1D.fields_by_name['data']._options = None
_CUMULATOR2D.fields_by_name['data']._options = None
# @@protoc_insertion_point(module_scope)
| python |
import json
import os
def dump_json(o: object, filename: str) -> None:
with open(filename, 'w', encoding='utf8') as f:
json.dump(o, f, ensure_ascii=False)
def load_json(filename: str):
with open(filename, 'r', encoding='utf8') as f:
return json.load(f)
def setup_django_pycharm():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Closure_Project.Closure_Project.settings")
import django
django.setup()
| python |
from django.test import TestCase
from . import *
class AbstractFormModelTestCase(TestCase):
def setUp(self):
pass
def create_form(self):
return AbstractFormModel.objects.create()
def test_form_creation(self):
print("Testing if running")
f = self.create_form()
l = AbstractFormModel()
self.assertEqual(f.get_required_sign_level(), 0) | python |
"""
REST API Documentation for TheOrgBook
TheOrgBook is a repository for Verifiable Claims made about Organizations related to a known foundational Verifiable Claim. See https://github.com/bcgov/VON
OpenAPI spec version: v1
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
from django.http.response import JsonResponse
from rest_framework.views import APIView
from django.http.response import JsonResponse
from rest_framework.response import Response
from rest_framework import status
from rest_framework import permissions
from rest_framework import mixins
from rest_framework import generics
from rest_framework_bulk import BulkCreateModelMixin
from . import serializers
from .models.DoingBusinessAs import DoingBusinessAs
from .models.InactiveClaimReason import InactiveClaimReason
from .models.IssuerService import IssuerService
from .models.Jurisdiction import Jurisdiction
from .models.Location import Location
from .models.LocationType import LocationType
from .models.VerifiableClaim import VerifiableClaim
from .models.VerifiableClaimType import VerifiableClaimType
from .models.VerifiableOrg import VerifiableOrg
from .models.VerifiableOrgType import VerifiableOrgType
from django.db.models import Count
from pathlib import Path
import os
import os.path
from django.conf import settings
# Custom views. This file is hand edited.
class verifiableOrgsIdVerifiableclaimsGet(APIView):
def get(self, request, id):
"""
Returns the Claims for a verifiable Organization
"""
org = VerifiableOrg.objects.get(id=id)
claims = VerifiableClaim.objects.filter(verifiableOrgId=org)
serializer = serializers.VerifiableClaimSerializer(claims, many=True)
return Response(serializer.data)
class verifiableOrgsIdDoingBusinessAsGet(APIView):
def get(self, request, id):
"""
Returns the Doing Business As information for a verifiable Organization
"""
org = VerifiableOrg.objects.get(id=id)
dbas = DoingBusinessAs.objects.filter(verifiableOrgId=org)
serializer = serializers.DoingBusinessAsSerializer(dbas, many=True)
return Response(serializer.data)
class verifiableOrgsIdLocationsGet(APIView):
def get(self, request, id):
"""
Returns the locations for a verifiable Organization
"""
org = VerifiableOrg.objects.get(id=id)
locations = Location.objects.filter(verifiableOrgId=org)
serializer = serializers.LocationSerializer(locations, many=True)
return Response(serializer.data)
class quickLoad(APIView):
def get(self, request):
"""
Used to initialize a client application.
Returns record counts, and data types required by the web application to perform filtering and/or populate list(s).
"""
response = {
'counts': recordCounts.get_recordCounts(),
'records': {}
}
inactive = InactiveClaimReason.objects.all()
response['records']['inactiveclaimreasons'] = serializers.InactiveClaimReasonSerializer(inactive, many=True).data
issuers = IssuerService.objects.all()
response['records']['issuerservices'] = serializers.IssuerServiceSerializer(issuers, many=True).data
jurisd = Jurisdiction.objects.all()
response['records']['jurisdictions'] = serializers.JurisdictionSerializer(jurisd, many=True).data
locTypes = LocationType.objects.all()
response['records']['locationtypes'] = serializers.LocationTypeSerializer(locTypes, many=True).data
claimTypes = VerifiableClaimType.objects.all()
response['records']['verifiableclaimtypes'] = serializers.VerifiableClaimTypeSerializer(claimTypes, many=True).data
orgTypes = VerifiableOrgType.objects.all()
response['records']['verifiableorgtypes'] = serializers.VerifiableOrgTypeSerializer(orgTypes, many=True).data
return JsonResponse(response)
class recordCounts(APIView):
@staticmethod
def get_recordCounts():
return {
'doingbusinessas': DoingBusinessAs.objects.count(),
'inactiveclaimreasons': InactiveClaimReason.objects.count(),
'issuerservices': IssuerService.objects.count(),
'jurisdictions': Jurisdiction.objects.count(),
'locations': Location.objects.count(),
'locationtypes': LocationType.objects.count(),
'verifiableclaims': VerifiableClaim.objects.count(),
'verifiableclaimtypes': VerifiableClaimType.objects.count(),
'verifiableorgs': VerifiableOrg.objects.count(),
'verifiableorgtypes': VerifiableOrgType.objects.count(),
}
def get(self, request):
"""
Returns record count information.
"""
response = {
'counts': self.get_recordCounts()
}
return JsonResponse(response)
class custom_settings(APIView):
"""
Returns contents of an active custom DJANGO settings file as raw JSON
"""
def get(self, request):
data = {}
if not hasattr(settings, 'CUSTOMIZATIONS'):
return data
data = settings.CUSTOMIZATIONS
return JsonResponse(json.loads(str(data).replace("'", '"')))
| python |
import pygments
import pygments.lexers
from pygments.token import Token
import PIL, PIL.Image, PIL.ImageFont, PIL.ImageDraw
from PIL.ImageColor import getrgb
import sys, os
import subprocess, re
font = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'font.pil')
class StyleDict(dict):
''' Store color information based on pygments token types. '''
def __init__(self):
self["fg"] = '#000000'
self["bg"] = '#ffffff'
def __missing__(self, token):
''' Search the token hierarchy for missing tokens. Default to
foregrount color. '''
for t in reversed(token.split()):
if t in self:
self[token] = self[t]
return self[token]
self[token] = self["fg"]
return self[token]
def __setitem__(self, key, value):
''' Auto-convert CSS/HTML color hashes (e.g. #112233) '''
if isinstance(value, str):
value = getrgb(value)
dict.__setitem__(self, key, value)
def _c_blend(c1,c2,f=0.5):
''' Blend two colors together. '''
return (int(c1[0]*f + c2[0]*(1-f)),
int(c1[1]*f + c2[1]*(1-f)),
int(c1[2]*f + c2[2]*(1-f)))
class Renderer:
def __init__(self, git_path, filename, out="out.flv",
pygments_style="default", fps=60, size=(1280, 720), quality=90, fast=False):
self.git_path = git_path
self.filename = filename
self.width, self.height = size
self.border = 15
self.lexer = pygments.lexers.get_lexer_for_filename(self.filename)
self.video_out = out
self.style = StyleDict()
self.fps = fps
self.quality = quality
self.font = PIL.ImageFont.load(font)
self.fast = fast
self.do_highlight = False
if pygments_style:
self.do_highlight = True
self.load_pygments_style(pygments_style)
def load_pygments_style(self, name):
from pygments.styles import get_style_by_name
style = get_style_by_name(name)
self.style["bg"] = style.background_color
self.style["fg"] = [255-c for c in self.style["bg"]]
for token, value in list(style.styles.items()):
rules = value.split()
for rule in rules:
if rule.startswith('#'):
self.style[token] = rule
if rule.startswith('bg:#'):
self.style[token] = rule[3:]
break #
if 'bold' not in rules or 'unbold' in rules:
self.style[token] = _c_blend(self.style[token], self.style["bg"], 0.8)
def sh(self, *cmd):
return subprocess.check_output(cmd, cwd=self.git_path).decode('utf8').splitlines()
def run(self):
self.video_prog = subprocess.Popen(
['ffmpeg', '-loglevel', 'panic', '-y',
'-f', 'image2pipe', '-vcodec', 'mjpeg', '-r', str(self.fps), '-i', '-',
'-vcodec', 'libx264', '-r', str(self.fps), self.video_out],
stdin=subprocess.PIPE,
stdout = open("/dev/null", 'wb'))
self.video_out = self.video_prog.stdin
self.image = PIL.Image.new("RGB", (self.width, self.height), self.style["bg"])
self.draw = PIL.ImageDraw.Draw(self.image)
try:
self.last_sha = self.last_msg = None
log = self.sh('git','log','--reverse','--pretty=oneline','--', self.filename)
for i, line in enumerate(log):
self.next_sha, self.next_msg = line.split(None, 1)
if not self.last_sha:
self.last_sha = self.next_sha
self.last_msg = self.next_msg
continue
print('(%d/%d) %s %s' % (i, len(log), self.next_sha[:8], self.next_msg))
self.render_diff()
self.last_sha = self.next_sha
self.last_msg = self.next_msg
finally:
self.video_out.close()
self.video_prog.wait()
def render_diff(self):
src = self.sh('git','show', '%s:%s' % (self.last_sha, self.filename))
if self.fast:
self.render(src)
else:
for op, ln, line in self.sha_diff():
sys.stdout.write(op)
sys.stdout.flush()
if op == '+':
src.insert(ln, line)
elif op == '-':
del src[ln]
self.render(src)
sys.stdout.write('\n')
def sha_diff(self):
lines = self.sh('git','diff','--minimal', self.last_sha, self.next_sha, '--', self.filename)
while lines[0][0] != '@':
del lines[0]
ln_old, ln_new = 0, 0
for line in lines:
if line[0] == '@':
ln_old, ln_new = list(map(int, re.match('@@ -(\\d+),\\d+ \\+(\\d+),\\d+ @@.*', line).groups()))
elif line[0] == '+':
yield '+', ln_new-1, line[1:]
ln_new += 1
elif line[0] == '-':
yield '-', ln_new-1, line[1:]
ln_old += 1
else:
ln_old += 1
ln_new += 1
def render(self, src):
self.draw.rectangle((0,0,self.width, self.height), self.style['bg'])
row = self.border
col = -1
offset = self.border
maxcol = 0
if self.do_highlight:
tokens = pygments.lex('\n'.join(src), self.lexer)
else:
tokens = [(Token.Text, '\n'.join(src))]
for token, text in tokens:
color = self.style[token]
points = []
for c in text:
col += 1
if c == '\n':
row += 1
maxcol = max(maxcol, col)
col = -1
if row >= self.height - (self.border*2):
row = self.border
offset += maxcol + self.border
continue
if c == ' ':
continue
if c == '\t':
col += 3
continue
points.extend((col + offset, row))
self.draw.point(points, color)
text = '%s %s' % (self.next_sha[:8], self.next_msg)
self.draw.text((0, 0), text, font=self.font, fill=(0,0,0,255))
self.image.save(self.video_out, 'JPEG', quality=self.quality)
video_size = {
"8K": (8192, 4608),
"WHUXGA": (7680, 4800),
"4320p": (7680, 4320),
"HUXGA": (6400, 4800),
"WHSXGA": (6400, 4096),
"HSXGA": (5120, 4096),
"WHXGA": (5120, 3200),
"HXGA": (4096, 3072),
"4K": (4096, 2304),
"2160p": (3840, 2160),
"QUXGA": (3200, 2400),
"WQSXGA": (3200, 2048),
"QSXGA": (2560, 2048),
"2K": (2048, 1152),
"QWXGA": (2048, 1152),
"WUXGA": (1920, 1200),
"HD": (1920, 1080),
"1080p": (1920, 1080),
"UXGA": (1600, 1200),
"900p": (1600, 900),
"SXGA": (1280, 1024),
"720p": (1280, 720),
"WSVGA": (1024, 600),
"PAL": (720, 576),
"SVGA": (800, 600),
"EGA": (640, 350),
"VGA": (640, 480),
"CGA": (320, 200)
}
def main():
import argparse
parser = argparse.ArgumentParser(description='Visualize source code history')
parser.add_argument('-o', '--out', metavar='OUT', default="gitvid.flv", help="Filename fo the target video file. (default: gitvid.flv)")
parser.add_argument('--fps', default="60", type=int, help="Frames per second (default: 60)")
parser.add_argument('--size', default="720p", help="Video resolution. Either [WIDTH]x[HEIGHT] or the name of a common resolution (e.g. 790p, 1080p, 4k, ...) (default: 790p)")
parser.add_argument('--style', default=None, help="Pygments syntax highlighting style (default: No syntax highlighting)")
parser.add_argument('--fast', action='store_true', help="Do not visualize individual line additions and deletions, but only full commits.")
parser.add_argument('--dry-run', action='store_true', help="Run without actually generating a video.")
parser.add_argument('SOURCE', help="Source folder (git repository)")
parser.add_argument('PATH', help="Filenames to include in the visualization")
args = parser.parse_args()
if args.size in video_size:
size = video_size[args.size]
else:
size = map(int, args.size.split('x', 1))
r = Renderer(args.SOURCE, args.PATH, out=args.out, size=size, pygments_style=args.style, fps=args.fps, fast=args.fast)
r.run()
if __name__ == "__main__":
main()
sys.exit(0)
| python |
class NextcloudRequestException(Exception):
def __init__(self, request=None, message=None):
self.request = request
message = message or f"Error {request.status_code}: {request.get_error_message()}"
super().__init__(message)
class NextcloudDoesNotExist(NextcloudRequestException):
pass
class NextcloudAlreadyExist(NextcloudRequestException):
pass
class NextcloudMultipleObjectsReturned(Exception):
pass
| python |
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\vet\vet_clinic_zone_director.py
# Compiled at: 2017-11-06 20:04:35
# Size of source mod 2**32: 27524 bytes
from collections import OrderedDict
import itertools
from protocolbuffers import Venue_pb2
from business.business_enums import BusinessType
from business.business_zone_director_mixin import BusinessZoneDirectorMixin
from clock import interval_in_sim_minutes
from sims.outfits.outfit_enums import OutfitCategory
from sims.sim_info_base_wrapper import SimInfoBaseWrapper
from sims.sim_info_types import Gender
from sims4.resources import Types
from sims4.tuning.tunable import TunableReference, HasTunableSingletonFactory, AutoFactoryInit, TunableMapping, TunableRange, Tunable
from sims4.tuning.tunable_base import GroupNames
from situations.service_npcs.modify_lot_items_tuning import TunableObjectMatchesDefinitionOrTagTest
from situations.situation_curve import SituationCurve
from venues.scheduling_zone_director import SchedulingZoneDirector
from venues.visitor_situation_on_arrival_zone_director_mixin import VisitorSituationOnArrivalZoneDirectorMixin
from vet.vet_clinic_manager import VetClinicManager
from vet.vet_clinic_tuning import VetClinicTuning, VetEmployeeOutfitType
from vet.vet_clinic_utils import get_vet_clinic_zone_director
import build_buy, services, sims4.log, sims
logger = sims4.log.Logger('Vet Clinic', default_owner='jdimailig')
SUPPORTED_BUSINESS_TYPES = (
BusinessType.VET,)
TRACKED_VET_ASSIGNMENTS_VETS = 'vet_assignments_vets'
TRACKED_VET_ASSIGNMENTS_CUSTOMERS = 'vet_assignments_customers_{}'
TRACKED_WAITING_SITUATION_IDS = 'waiting_situation_ids'
TRACKED_WAITING_SITUATION_CUSTOMERS = 'waiting_situation_customer_ids_{}'
CTA_DISABLED = 'cta_disabled'
class _ObjectBasedWaitingCustomerCap(HasTunableSingletonFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'object_count_waiting_customer_cap': TunableMapping(description='\n For each amount defined, set the cap to waiting customers.\n \n For this test we are using number of Vet Clinic exam tables.\n \n If the actual count exceeds the all the keys,\n then it will use the cap for the key with the highest value.\n ',
set_default_as_first_entry=True,
key_type=Tunable(description='\n Number of exam tables.\n ',
tunable_type=int,
default=0),
value_type=TunableRange(description='\n Value to cap waiting customers at.\n ',
tunable_type=int,
default=2,
minimum=0))}
def get_cap_amount(self):
zone_director = get_vet_clinic_zone_director()
if zone_director is None:
return 0
exam_table_thresholds = sorted((self.object_count_waiting_customer_cap.keys()), reverse=True)
num_exam_tables = zone_director.num_exam_tables
for threshold in exam_table_thresholds:
if num_exam_tables >= threshold:
return self.object_count_waiting_customer_cap[threshold]
return 0
class VetClinicZoneDirector(BusinessZoneDirectorMixin, VisitorSituationOnArrivalZoneDirectorMixin, SchedulingZoneDirector):
INSTANCE_TUNABLES = {'customer_situation_type_curve':SituationCurve.TunableFactory(description="\n When customer situations are being generated, they'll be pulled\n based on the tuning in this.\n \n The desired count in this tuning is not used.\n \n Otherwise it situation count is pulled from business multipliers.\n ",
tuning_group=GroupNames.BUSINESS,
get_create_params={'user_facing': False}),
'employee_situation':TunableReference(description='\n Employee situation to put employees in. \n ',
manager=services.get_instance_manager(Types.SITUATION),
tuning_group=GroupNames.BUSINESS),
'exam_table_test':TunableObjectMatchesDefinitionOrTagTest(description='\n Tests used to count number of exam tables that are in this zone. \n The number of these found will limit the number of customers \n situations that are generated.\n ',
tuning_group=GroupNames.BUSINESS),
'podium_call_to_action':TunableReference(description='\n Call to action to use to highlight the vet podium when visiting the vet.\n ',
manager=services.get_instance_manager(sims4.resources.Types.CALL_TO_ACTION)),
'waiting_customer_cap':_ObjectBasedWaitingCustomerCap.TunableFactory()}
def __init__(self, *args, **kwargs):
(super().__init__)(*args, **kwargs)
self._num_exam_tables = 0
self._default_uniform = {}
self._custom_uniform = {}
self._vet_to_customer_assignments = {}
self._waiting_situations = OrderedDict()
self._reservations = {}
self._has_cta_been_seen = False
self._cta_disabled = False
def _save_custom_zone_director(self, zone_director_proto, writer):
writer.write_uint64s(TRACKED_VET_ASSIGNMENTS_VETS, list(self._vet_to_customer_assignments.keys()))
for vet_id, customer_assignments in self._vet_to_customer_assignments.items():
writer.write_uint64s(TRACKED_VET_ASSIGNMENTS_CUSTOMERS.format(vet_id), list(customer_assignments))
writer.write_uint64s(TRACKED_WAITING_SITUATION_IDS, list(self._waiting_situations.keys()))
for situation_id, waiting_situations in self._waiting_situations.items():
writer.write_uint64s(TRACKED_WAITING_SITUATION_CUSTOMERS.format(situation_id), list(waiting_situations))
writer.write_bool(CTA_DISABLED, self._cta_disabled)
super()._save_custom_zone_director(zone_director_proto, writer)
def _load_custom_zone_director(self, zone_director_proto, reader):
if reader is not None:
vets_with_assigned_customers = reader.read_uint64s(TRACKED_VET_ASSIGNMENTS_VETS, [])
for vet_id in vets_with_assigned_customers:
assigned_customers = reader.read_uint64s(TRACKED_VET_ASSIGNMENTS_CUSTOMERS.format(vet_id), [])
if assigned_customers:
self._vet_to_customer_assignments[vet_id] = list(assigned_customers)
waiting_situation_ids = reader.read_uint64s(TRACKED_WAITING_SITUATION_IDS, [])
for situation_id in waiting_situation_ids:
situation_customers = reader.read_uint64s(TRACKED_WAITING_SITUATION_CUSTOMERS.format(situation_id), [])
if situation_customers:
self._waiting_situations[situation_id] = list(situation_customers)
self._cta_disabled = reader.read_bool(CTA_DISABLED, False)
super()._load_custom_zone_director(zone_director_proto, reader)
def on_startup(self):
super().on_startup()
self._load_default_uniforms()
self.refresh_configuration()
def clear_state(self):
self._vet_to_customer_assignments.clear()
self._waiting_situations.clear()
self._reservations.clear()
def on_loading_screen_animation_finished(self):
if any((sim_info.is_pet for sim_info in self._traveled_sim_infos)):
self._trigger_podium_call_to_action()
super().on_loading_screen_animation_finished()
def handle_sim_summon_request(self, sim_info, purpose):
super().handle_sim_summon_request(sim_info, purpose)
if sim_info.is_pet:
self._trigger_podium_call_to_action()
def _trigger_podium_call_to_action(self):
if services.current_zone().active_household_changed_between_save_and_load() or services.current_zone().time_has_passed_in_world_since_zone_save():
self._cta_disabled = False
if self._cta_disabled:
return
if self._has_cta_been_seen or self._business_manager.is_active_household_and_zone():
return
services.call_to_action_service().begin(self.podium_call_to_action, self)
self._has_cta_been_seen = True
def on_cta_ended(self, value):
self._cta_disabled = True
def on_shutdown(self):
if self._business_manager is not None:
self._business_manager.prepare_for_off_lot_simulation()
super().on_shutdown()
def on_exit_buildbuy(self):
super().on_exit_buildbuy()
self.refresh_configuration()
def create_situations_during_zone_spin_up(self):
if self.business_manager is not None:
if self.business_manager.is_open:
if services.current_zone().time_has_passed_in_world_since_zone_save() or services.current_zone().active_household_changed_between_save_and_load():
self.clear_state()
self._business_manager.start_already_opened_business()
self._on_customer_situation_request()
super().create_situations_during_zone_spin_up()
def _process_traveled_sim--- This code section failed: ---
L. 283 0 LOAD_GLOBAL services
2 LOAD_METHOD current_zone
4 CALL_METHOD_0 0 '0 positional arguments'
6 STORE_FAST 'current_zone'
L. 285 8 LOAD_FAST 'current_zone'
10 LOAD_ATTR is_first_visit_to_zone
12 POP_JUMP_IF_TRUE 60 'to 60'
L. 286 14 LOAD_FAST 'current_zone'
16 LOAD_METHOD time_has_passed_in_world_since_zone_save
18 CALL_METHOD_0 0 '0 positional arguments'
20 POP_JUMP_IF_TRUE 60 'to 60'
L. 287 22 LOAD_FAST 'current_zone'
24 LOAD_METHOD active_household_changed_between_save_and_load
26 CALL_METHOD_0 0 '0 positional arguments'
28 POP_JUMP_IF_TRUE 60 'to 60'
L. 288 30 LOAD_FAST 'sim_info'
32 LOAD_ATTR startup_sim_location
34 LOAD_CONST None
36 COMPARE_OP is-not
38 POP_JUMP_IF_FALSE 74 'to 74'
40 LOAD_GLOBAL services
42 LOAD_METHOD active_lot
44 CALL_METHOD_0 0 '0 positional arguments'
46 LOAD_METHOD is_position_on_lot
48 LOAD_FAST 'sim_info'
50 LOAD_ATTR startup_sim_location
52 LOAD_ATTR transform
54 LOAD_ATTR translation
56 CALL_METHOD_1 1 '1 positional argument'
58 POP_JUMP_IF_TRUE 74 'to 74'
60_0 COME_FROM 28 '28'
60_1 COME_FROM 20 '20'
60_2 COME_FROM 12 '12'
L. 289 60 LOAD_GLOBAL super
62 CALL_FUNCTION_0 0 '0 positional arguments'
64 LOAD_METHOD _process_traveled_sim
66 LOAD_FAST 'sim_info'
68 CALL_METHOD_1 1 '1 positional argument'
70 POP_TOP
72 JUMP_FORWARD 92 'to 92'
74_0 COME_FROM 58 '58'
74_1 COME_FROM 38 '38'
L. 291 74 LOAD_FAST 'self'
76 LOAD_METHOD _request_spawning_of_sim_at_location
78 LOAD_FAST 'sim_info'
80 LOAD_GLOBAL sims
82 LOAD_ATTR sim_spawner_service
84 LOAD_ATTR SimSpawnReason
86 LOAD_ATTR TRAVELING
88 CALL_METHOD_2 2 '2 positional arguments'
90 POP_TOP
92_0 COME_FROM 72 '72'
Parse error at or near `JUMP_FORWARD' instruction at offset 72
def _process_zone_saved_sim(self, sim_info):
if services.current_zone().time_has_passed_in_world_since_zone_save() or services.current_zone().active_household_changed_between_save_and_load():
business_manager = services.business_service().get_business_manager_for_zone()
if business_manager is not None and business_manager.is_employee(sim_info):
self._on_reinitiate_zone_saved_sim(sim_info)
else:
self._on_clear_zone_saved_sim(sim_info)
else:
super()._process_zone_saved_sim(sim_info)
def _should_create_npc_business_manager(self):
return True
def _get_new_npc_business_manager(self):
npc_business_manager = VetClinicManager()
npc_business_manager.set_zone_id(services.current_zone_id())
npc_business_manager.set_owner_household_id(None)
return npc_business_manager
def _get_employee_situation_for_employee_type(self, employee_type):
return self.employee_situation
def _get_npc_employee_situation_for_employee_type(self, employee_type):
return self.employee_situation
def _get_desired_employee_count(self, employee_type):
return self._num_exam_tables
def _on_customer_situation_request(self):
self.remove_stale_customer_situations()
desired_situation_count = self._get_num_desired_customer_situations()
current_customer_count = len(self._customer_situation_ids)
if current_customer_count >= desired_situation_count:
waiting_customers = sum((1 for _ in self.customer_situations_gen(lambda s: not s.customer_has_been_seen)))
waiting_customer_cap = self.waiting_customer_cap.get_cap_amount()
if waiting_customer_cap <= waiting_customers:
return
new_customer_situation, params = self.customer_situation_type_curve.get_situation_and_params()
if new_customer_situation is None:
return
situation_id = self.start_customer_situation(new_customer_situation, create_params=params)
if situation_id is None:
logger.info('Trying to create a new customer situation for vet clinic but failed.')
return
def apply_zone_outfit(self, sim_info, situation):
outfit_data, outfit_key = self.get_zone_outfit(sim_info)
if outfit_data is not None:
sim_info.generate_merged_outfit(outfit_data, (OutfitCategory.CAREER, 0), sim_info.get_current_outfit(), outfit_key)
sim_info.set_current_outfit((OutfitCategory.CAREER, 0))
sim_info.resend_current_outfit()
def get_zone_outfit(self, sim_info):
gender = sim_info.clothing_preference_gender
outfit_index, outfit_data = self._custom_uniform.get(gender, (0, None))
if outfit_data is None:
outfit_data = self._default_uniform.get(gender, None)
return (outfit_data, (OutfitCategory.CAREER, outfit_index))
def _load_default_uniforms(self):
self._default_uniform[Gender.MALE] = self._load_uniform_from_resource(VetClinicTuning.UNIFORM_EMPLOYEE_MALE)
self._default_uniform[Gender.FEMALE] = self._load_uniform_from_resource(VetClinicTuning.UNIFORM_EMPLOYEE_FEMALE)
def _load_uniform_from_resource(self, uniform_resource):
sim_info_wrapper = SimInfoBaseWrapper()
sim_info_wrapper.load_from_resource(uniform_resource)
sim_info_wrapper.set_current_outfit((OutfitCategory.CAREER, 0))
return sim_info_wrapper
def refresh_configuration(self):
self._update_from_venue_config()
self._update_exam_table_count()
def _update_from_venue_config(self):
config_data = build_buy.get_current_venue_config(services.current_zone_id())
if config_data is None:
return
vet_clinic_config = Venue_pb2.VetClinicConfiguration()
vet_clinic_config.ParseFromString(config_data)
self._custom_uniform.clear()
for i, outfit_data in enumerate(vet_clinic_config.outfits):
if i not in VetEmployeeOutfitType:
break
gender = Gender.MALE if i == VetEmployeeOutfitType.MALE_EMPLOYEE else Gender.FEMALE
sim_info_wrapper = None
mannequin_data = outfit_data.mannequin
if mannequin_data.HasField('mannequin_id'):
sim_info_wrapper = SimInfoBaseWrapper()
sim_info_wrapper.load_sim_info(outfit_data.mannequin)
sim_info_wrapper.set_current_outfit((OutfitCategory.CAREER, 0))
self._custom_uniform[gender] = (outfit_data.outfit_index, sim_info_wrapper)
def _update_exam_table_count(self):
self._num_exam_tables = sum((1 for obj in services.object_manager().get_valid_objects_gen() if self.exam_table_test(objects=(obj,))))
if self._business_manager is not None:
self._business_manager.set_exam_table_count(self._num_exam_tables)
@property
def num_exam_tables(self):
return self._num_exam_tables
def _get_num_desired_customer_situations(self):
business_manager = self._business_manager
if business_manager is None or business_manager.is_owned_by_npc:
return self._num_exam_tables
situation_count = business_manager.get_ideal_customer_count()
tracker = services.business_service().get_business_tracker_for_household(business_manager.owner_household_id, business_manager.business_type)
situation_count += tracker.addtitional_customer_count
return situation_count
def on_customers_waiting(self, situation_id, customer_ids, player_situation=False):
self._waiting_situations[situation_id] = customer_ids
if player_situation:
self._waiting_situations.move_to_end(situation_id, last=False)
def on_vet_assigned(self, situation_id, vet_id, customer_ids):
if situation_id in self._reservations:
del self._reservations[situation_id]
if situation_id in self._waiting_situations:
del self._waiting_situations[situation_id]
self._vet_to_customer_assignments[vet_id] = customer_ids
def on_customer_situation_being_destroyed(self, situation_id):
if situation_id in self._waiting_situations:
del self._waiting_situations[situation_id]
if situation_id in self._reservations:
del self._reservations[situation_id]
def remove_from_vet(self, vet_id):
if vet_id in self._vet_to_customer_assignments.keys():
del self._vet_to_customer_assignments[vet_id]
def is_assigned_to_vet(self, customer_id, vet_id=None):
if vet_id is not None:
customers = self._vet_to_customer_assignments.get(vet_id, tuple())
return customer_id in customers
for cust_id in itertools.chain(self._vet_to_customer_assignments.values()):
if cust_id == customer_id:
return True
return False
def is_waiting_for_services(self, customer_sim_id):
for situation_id in self._waiting_situations:
if customer_sim_id in self._waiting_situations[situation_id]:
return True
return False
def is_vet_attending_any_customers(self, vet_id):
if vet_id in self._vet_to_customer_assignments.keys():
return len(self._vet_to_customer_assignments[vet_id]) > 0
return False
def customer_situations_gen(self, criteria_test=None):
situation_manager = services.get_zone_situation_manager()
for situation_id in self._customer_situation_ids:
situation = situation_manager.get(situation_id)
if situation is None:
continue
else:
if criteria_test is None:
yield situation
if criteria_test(situation):
yield situation
def waiting_sims_gen(self, potential_reserver_id):
now = services.time_service().sim_now
for situation_id in self._waiting_situations:
if situation_id in self._reservations:
reservation = self._reservations[situation_id]
if now < reservation['expiration']:
if reservation['reserver_id'] != potential_reserver_id:
continue
for sim_id in self._waiting_situations[situation_id]:
yield services.object_manager().get(sim_id)
def reserve_waiting_sim(self, reserved_sim_id, reserver_id):
for situation_id in self._waiting_situations:
if reserved_sim_id in self._waiting_situations[situation_id]:
self._reservations[situation_id] = {'expiration':services.time_service().sim_now + interval_in_sim_minutes(30),
'reserver_id':reserver_id}
def bill_owner_for_treatment(self, sim):
if self._business_manager is not None:
for customer_situation in self.customer_situations_gen():
if not customer_situation.is_sim_in_situation(sim):
continue
(self._business_manager.bill_owner_for_treatment)(*customer_situation.get_payment_data())
customer_situation.apply_value_of_service()
break
@property
def supported_business_types(self):
return SUPPORTED_BUSINESS_TYPES | python |
#coding:utf-8
###################################################
# File Name: export.py
# Author: Meng Zhao
# mail: @
# Created Time: 2019年11月11日 星期一 16时03分43秒
#=============================================================
import os
import sys
import json
import shutil
import tensorflow as tf
import modeling
from tensorflow.python.estimator.estimator import Estimator
from tensorflow.python.estimator.run_config import RunConfig
from tensorflow.python.estimator.model_fn import EstimatorSpec
from run_sequencelabeling import create_model
from preprocess import bert_data_utils
from setting import *
def model_fn_builder(bert_config, num_labels, init_checkpoint,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = tf.ones(tf.shape(input_ids), dtype=tf.int32)
is_real_example = None
if "is_real_example" in features:
is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32)
else:
is_real_example = tf.ones(tf.shape(input_ids)[0], dtype=tf.float32)
input_ids = tf.placeholder_with_default(input_ids, shape=[None, input_ids.shape[1]], name='input_ids')
input_mask = tf.placeholder_with_default(input_mask, shape=[None, input_mask.shape[1]], name='input_mask')
segment_ids = tf.placeholder_with_default(segment_ids, shape=[None, segment_ids.shape[1]], name='segment_ids')
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, logits, probabilities) = create_model(
bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
num_labels, use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.PREDICT:
pred_label_ids = tf.argmax(logits, axis=-1, output_type=tf.int32)
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
predictions={"probabilities": probabilities,
"pred_label_ids": pred_label_ids})
return output_spec
return model_fn
def serving_input_receiver_fn():
"""Serving input_fn that builds features from placeholders
Returns
-------
tf.estimator.export.ServingInputReceiver
"""
input_ids = tf.placeholder(dtype=tf.int32, shape=[None, None], name='input_ids')
input_mask = tf.placeholder(dtype=tf.int32, shape=[None, None], name='input_mask')
segment_ids = tf.placeholder(dtype=tf.int32, shape=[None, None], name='segment_ids')
receiver_tensors = {'input_ids': input_ids,
'input_mask': input_mask,
'segment_ids': segment_ids}
features = {'input_ids': input_ids,
'input_mask': input_mask,
'segment_ids': segment_ids}
return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)
if __name__ == '__main__':
bert_config = modeling.BertConfig.from_json_file(BERT_CONFIG_FILE)
label2idx, idx2label = bert_data_utils.read_ner_label_map_file(LABEL_MAP_FILE)
num_labels = len(label2idx)
cp_file = tf.train.latest_checkpoint(CHECKPOINT_DIR)
model_fn = model_fn_builder(
bert_config=bert_config,
num_labels=num_labels,
init_checkpoint=cp_file,
use_one_hot_embeddings=False)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 1.0
config.log_device_placement = False
batch_size = 16
export_dir = CHECKPOINT_DIR
estimator = tf.estimator.Estimator(model_fn=model_fn, model_dir=MODEL_DIR, config=RunConfig(session_config=config),
params={'batch_size': batch_size})
estimator.export_saved_model(export_dir, serving_input_receiver_fn, checkpoint_path=cp_file)
| python |
import os
from rsqueakvm.error import PrimitiveFailedError
from rsqueakvm.plugins.plugin import Plugin
from rsqueakvm.primitives import index1_0
from rsqueakvm.util.system import IS_WINDOWS
class UnixOSProcessPlugin(Plugin):
def is_enabled(self):
return Plugin.is_enabled(self) and not IS_WINDOWS
plugin = UnixOSProcessPlugin()
@plugin.expose_primitive(unwrap_spec=[object, index1_0])
def primitiveEnvironmentAt(interp, s_frame, w_rcvr, index):
env_strings = ['%s=%s' % (k, v) for k, v in os.environ.items()]
if index < len(env_strings):
return interp.space.wrap_string(env_strings[index])
raise PrimitiveFailedError
class Win32OSProcessPlugin(Plugin):
def is_enabled(self):
return Plugin.is_enabled(self) and IS_WINDOWS
plugin = Win32OSProcessPlugin()
@plugin.expose_primitive(unwrap_spec=[object])
def primitiveGetEnvironmentStrings(interp, s_frame, w_rcvr):
return interp.space.wrap_string(
'\n'.join(['%s=%s' % (k, v) for k, v in os.environ.items()]))
| python |
from sklearn.ensemble import GradientBoostingRegressor
from deathbase.supervised.regression.base import BaseRegressor
class GradientBoosting(BaseRegressor):
def __init__(self, *args, **kwargs):
regressor = GradientBoostingRegressor(verbose=1)
super().__init__(regressor, *args, **kwargs) | python |
# Copyright 2019 Microsoft Corporation
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from c7n_azure.provider import resources
from c7n_azure.resources.arm import ArmResourceManager
@resources.register('postgresql-server')
class PostgresqlServer(ArmResourceManager):
"""PostgreSQL Server Resource
:example:
Finds all PostgreSQL Servers that have had zero active connections in the past week
.. code-block:: yaml
policies:
- name: find-all-unused-postgresql-servers
resource: azure.postgresql-server
filters:
- type: metric
metric: active_connections
op: eq
threshold: 0
timeframe: 168
:example:
Finds all PostgreSQL Servers that cost more than 1000 in the last month
.. code-block:: yaml
policies:
- name: find-all-costly-postgresql-servers
resource: azure.postgresql-server
filters:
- type: cost
key: TheLastMonth
op: gt
value: 1000
"""
class resource_type(ArmResourceManager.resource_type):
doc_groups = ['Databases']
service = 'azure.mgmt.rdbms.postgresql'
client = 'PostgreSQLManagementClient'
enum_spec = ('servers', 'list', None)
resource_type = 'Microsoft.DBforPostgreSQL/servers'
| python |
from math import *
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
from gcparser import get_parsed_struct
def Hill_Function_R(Kd,N,C):
# Hill function for modeling repressors
hill=1/(1+(N/Kd)**C)
# print hill
return hill
def Hill_Function_A(Kd,N,C):
# Hill function for modeling activators
hill=((N**C))/((Kd**C)+(N**C))
return hill
class CircuitModel(object):
def __init__(self,def_dict,con_dict):
#internalizes inputs
self.def_dict=def_dict
self.con_dict=con_dict
#sets hashtable keys for inline ode assembly
self.Plist=[key for key in def_dict.keys()]
self.number_of_protein_states=len(self.Plist)
def run(self):
#sets ODE variables
self.init_con=[self.def_dict[i]['x0'] for i in self.Plist]
self.tspan=10000
#degradation rate
self.d=log(2)/60.0
#runs ODE
self.Xnew=odeint(simulation_ODE, self.init_con,
[x for x in range(self.tspan)], (self,))
self.dt=[x for x in range(self.tspan)]
# extracts reporter behavior
self.reporters=[self.Plist[i] for i in range(self.number_of_protein_states) if self.Plist[i] in ['RFP','GFP','BFP']]
self.reporter_values=[self.Xnew[:,i] for i in range(self.number_of_protein_states) if self.Plist[i] in ['RFP','GFP','BFP']]
t, reporter_values = self._clean_output(self.dt,self.reporter_values)
return t,self.reporters,reporter_values
def _clean_output(self,t,reporter_values):
time = np.array(t)/60.0 # conver to minutes
dt = len(time)/1000
time = time[0::dt*2]
reporter_values[0] = reporter_values[0][0::dt]
return t,reporter_values
def graph(self):
plt.figure()
plt.plot(self.dt,self.reporter_values[0],'g-')
plt.show()
def simulation_ODE(y, t, (glob)):
#initializes ODEs
dX_dt = np.zeros(glob.number_of_protein_states);
# sets max transcripton rates
for p in range(glob.number_of_protein_states):
dX_dt[p]+=glob.def_dict[glob.Plist[p]]['alpha']
for p in range(glob.number_of_protein_states):
#applies hills
b=glob.con_dict[glob.Plist[p]]
for j in b.keys():
if j == "activates":
a=b['activates']
for key in a.keys():
dX_dt[glob.Plist.index(key)]*=Hill_Function_A(a[key]['kd'],y[p],a[key]['n'])
elif j == "represses":
r=b['represses']
for key in r.keys():
dX_dt[glob.Plist.index(key)]*=Hill_Function_R(r[key]['kd'],y[p],r[key]['n'])
# flips invertase
elif j == "inverts":
i=b['inverts']
for key in i.keys():
if i[key]['p']>0:
if i[key]['t']>y[p]:
dX_dt[glob.Plist.index(key)]*=0.00001
else:
if i[key]['t']<y[p]:
dX_dt[glob.Plist.index(key)]*=0.00001
#adds degradation
for p in range(glob.number_of_protein_states):
dX_dt[p]-=glob.d*y[p]
return dX_dt
if __name__=="__main__":
species = '''
R1 60 10
R2 60 50
R3 60 10
GFP 120 0
'''
program = '''
R1 represses R2 (0.7,2)
R2 represses R3 (0.7,2)
R3 represses R1 (0.7,2)
R1 represses GFP (0.7,2)
'''
a, b = get_parsed_struct(species, program)
g=CircuitModel(a,b)
g.run()
g.graph() | python |
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Tridots Tech Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
# from _future_ import unicode_literals
import frappe
import frappe.utils
import json
from frappe import _
def get_context(context):
location = frappe.request.cookies.get('city_location')
path = frappe.local.request.path
path = path.replace('csd-', '')
path = path.replace('-price', '')
context.path = path
path = path.strip('/')
word = path.split('/')
category_route = word[0]
brand_route = word[1]
item_route = word[2]
variant_route = word[3]
addrightadd = frappe.db.get_value('Widget Placeholder', fieldname=['google_ad_script'], filters={"view": 'Variant Detail Page', 'position': 'Right Panel'})
context.addrightadd = addrightadd
context.addtopadd = frappe.db.get_value('Widget Placeholder', fieldname=['google_ad_script'], filters={'view': 'Variant Detail Page', 'position': 'Top Panel'})
context.addbottomadd = frappe.db.get_value('Widget Placeholder', fieldname=['google_ad_script'], filters={'view': 'Variant Detail Page', 'position': 'Bottom Panel'})
context.addmidads = frappe.db.get_value('Widget Placeholder', fieldname=['google_ad_script'], filters={'view': 'Variant Detail Page', 'position': 'Middle Panel'})
item_name = frappe.db.get_value("Item",
filters={'route': item_route}, fieldname=['name'])
context.item_brand = frappe.db.get_value("ItemBrand",
filters={'route': brand_route}, fieldname=['brand_name'])
context.item_title = frappe.db.get_value("Item",
filters={'route': item_route}, fieldname=['item_name'])
context.category_title = frappe.db.get_value("Category",
filters={'route': category_route}, fieldname=['category_name'])
context.item_brand_route = brand_route
context.item_category_route = category_route
context.item_route = item_route
context.variant_route = variant_route
context.variant_title = frappe.db.get_value("Item Variant",
filters={'route': variant_route, 'item': item_name}, fieldname=['variant_name'])
context.meta_title = frappe.db.get_value("Item Variant",
filters={'route': variant_route, 'item': item_name}, fieldname=['meta_title'])
context.meta_description = frappe.db.get_value("Item Variant",
filters={'route': variant_route, 'item': item_name}, fieldname=['meta_description'])
context.meta_keywords = frappe.db.get_value("Item Variant",
filters={'route': variant_route, 'item': item_name}, fieldname=['meta_keywords'])
context.item_featured_image = frappe.db.get_value("Item",
filters={'route': item_route}, fieldname=['featured_image'])
item_variant_doc_name = frappe.db.get_value("Item Variant",
filters={'route': variant_route}, fieldname=['name'])
context.item_variant_doc_name =item_variant_doc_name
item_variants = frappe.db.get_all("Item Variant",
fields=['route','variant_name', 'name'],
filters={'item': item_name},
limit_page_length= 100)
for x in item_variants:
if frappe.request.cookies.get('city_location'):
price = frappe.db.get_list('Item Variant Price',
fields = ['market_price', 'csd_price'],
filters = {'variant': x.name, 'city': frappe.request.cookies.get('city_location'), 'item': item_name})
if len(price) > 0:
x.csd_price = price[0].csd_price
x.market_price = price[0].market_price
else:
x.csd_price = "Na"
x.market_price = "Na"
else:
price = frappe.db.get_list('Item Variant Price',
fields = ['market_price', 'csd_price'],
filters = {'variant': x.name, 'city': 'Delhi', 'item': item_name})
if len(price) > 0:
x.csd_price = price[0].csd_price
x.market_price = price[0].market_price
else:
x.csd_price = "Na"
x.market_price = "Na"
context.item_variants = item_variants
variant_specifications = frappe.db.get_list('Item Specification',
fields=['specification', 'value'],
filters={'parent': item_variant_doc_name})
for x in variant_specifications:
x.specification_group = frappe.db.get_value("Specification",
filters={'name': x.specification}, fieldname=['specification_category'])
context.variant_specifications = variant_specifications
if frappe.request.cookies.get('city_location'):
price = frappe.db.get_list('Item Variant Price',
fields = ['market_price', 'csd_price'],
filters = {'variant': item_variant_doc_name, 'city': frappe.request.cookies.get('city_location'), 'item': item_name})
if len(price) > 0:
context.csd_price = price[0].csd_price
context.market_price = price[0].market_price
else:
context.csd_price = "Na"
context.market_price = "Na"
else:
price = frappe.db.get_list('Item Variant Price',
fields = ['market_price', 'csd_price'], filters = {'variant': item_variant_doc_name, 'city': 'Delhi', 'item': item_name})
if len(price) > 0:
context.csd_price = price[0].csd_price
context.market_price = price[0].market_price
context.difference=price[0].difference
else:
context.csd_price = "Na"
context.market_price = "Na" | python |
"""
Exceptions for conditions app
"""
class TreatmentTooRecentError(Exception):
pass
class TreatmentAltConflict(Exception):
pass
| python |
from __future__ import annotations
import logging
import os
import pickle
from collections import Counter
from functools import partial
from itertools import groupby
from operator import itemgetter
from typing import Any, Dict, Iterator, List, Optional, Tuple
import click
import h5py
import numba
import numpy as np
from more_itertools import ilen, peekable
from scipy.spatial.distance import pdist, squareform
from sklearn.utils.random import sample_without_replacement
from skelshop.corpus import CorpusReader
from skelshop.face.consts import DEFAULT_DETECTION_THRESHOLD, DEFAULT_METRIC
from skelshop.face.io import SparseFaceReader
from skelshop.utils.click import PathPath, save_options
from skelshop.utils.numpy import min_pool_dists
from skelshop.utils.ray import maybe_ray
logger = logging.getLogger(__name__)
# XXX: These should be changed for a non-dlib face embedding
DEFAULT_MAX_EPS = 1
DEFAULT_EPS = DEFAULT_DETECTION_THRESHOLD
DEFAULT_MIN_SAMPLES = 3
DEFAULT_EPS_LIST = list(np.linspace(0.5, 0.7, 7))
DEFAULT_MIN_SAMPLES_LIST = list(range(3, 21, 3))
SAMPLE_KNN = 128
SAMPLE_BATCH_SIZE = 1024
# Possible TODO: have references participate in clustering
# refin: Path,
# @click.argument("refin", type=PathPath(exists=True))
# known_labels: List[str] = []
# all_embeddings: List[np.ndarray] = []
# for label, embeddings in multi_ref_embeddings(refin):
# known_labels.extend([label] * len(embeddings))
# all_embeddings.extend(embeddings)
def read_seg_pers(corpus: CorpusReader, num_embeddings) -> np.ndarray:
seg_pers = np.empty((num_embeddings, 3), dtype=np.int32)
idx = 0
for video_idx, video_info in enumerate(corpus):
with open(video_info["bestcands"], "r") as bestcands:
next(bestcands)
for line in bestcands:
(
seg,
pers_id,
seg_frame_num,
abs_frame_num,
extractor,
) = line.strip().split(",")
seg_pers[idx] = (video_idx, int(seg), int(pers_id))
idx += 1
return seg_pers
def corpus_reader_indices(corpus, msg="Loading"):
for video_info in corpus:
logger.debug("%s embeddings from %s", msg, video_info["faces"])
with h5py.File(video_info["faces"], "r") as face_h5f:
face_reader = SparseFaceReader(face_h5f)
for idx in range(len(face_reader)):
yield face_reader, idx
# Try extra hard to remove references to HDF5 file
# del face_reader
def corpus_embedding_fmt(corpus):
corpus_indices = corpus_reader_indices(corpus)
face_reader = next(corpus_indices)[0]
embedding = face_reader.embedding_at(0)
del corpus_indices
return embedding.shape, embedding.dtype
def collect_embeddings(corpus: CorpusReader, sample_size=None):
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"
shape, dtype = corpus_embedding_fmt(corpus)
logger.debug("Counting total number of embeddings")
total_num_embeddings = ilen(corpus_reader_indices(corpus, msg="Counting"))
logger.debug("Got %d", total_num_embeddings)
if sample_size is None:
logger.debug("Loading all of them...")
all_embeddings_np = np.zeros((total_num_embeddings, *shape), dtype=dtype)
for abs_idx, (face_reader, face_idx) in enumerate(
corpus_reader_indices(corpus)
):
all_embeddings_np[abs_idx] = face_reader.embedding_at(face_idx)
logger.debug("Done")
else:
logger.debug("Sampling and loading %d of them...", sample_size)
sampled_indices = sample_without_replacement(
total_num_embeddings, sample_size, method="reservoir_sampling"
)
sampled_indices.sort()
sampled_indices_peek = peekable(sampled_indices)
all_embeddings_np = np.zeros((sample_size, *shape), dtype=dtype)
idx = 0
for abs_idx, (face_reader, face_idx) in enumerate(
corpus_reader_indices(corpus)
):
if abs_idx != sampled_indices_peek.peek(None):
continue
all_embeddings_np[idx] = face_reader.embedding_at(face_idx)
next(sampled_indices_peek)
idx += 1
logger.debug("Done")
if logger.isEnabledFor(logging.INFO):
num_embeddings = len(all_embeddings_np)
logger.info("Number of loaded face embeddings: %d", num_embeddings)
logger.info(
"Size: %d bytes", (all_embeddings_np.size * all_embeddings_np.itemsize)
)
logger.info(
"Full squared distance matrix would take: %d bytes",
num_embeddings ** 2 * all_embeddings_np.itemsize,
)
if sample_size is None:
return all_embeddings_np
else:
return total_num_embeddings, sampled_indices, all_embeddings_np
def num_to_clus(num: int):
if num == -1:
return "noclus"
return f"c{num}"
def get_seg_clusts_vote(seg_pers: np.ndarray, label_it: Iterator[int]):
for grp, seg_pers_label in groupby(zip(*seg_pers.T, label_it), itemgetter(0, 1, 2)):
label_cnts = Counter((label for _, _, _, label in seg_pers_label))
clus: str
if len(label_cnts) == 1:
clus = num_to_clus(next(iter(label_cnts)))
else:
top, second = label_cnts.most_common(2)
if top[1] == second[1]:
clus = "noclus"
else:
clus = num_to_clus(top[0])
yield grp, clus
def get_seg_clusts(seg_pers: List[Tuple[str, str, str]], label_it: Iterator[int]):
for (grp, _it), label in zip(groupby(seg_pers), label_it):
yield grp, num_to_clus(label)
def write_seg_clusts(
corpus: CorpusReader, label_it: Iterator[Tuple[Tuple[str, str, str], str]]
):
peek = peekable(label_it)
for video_idx, video_info in enumerate(corpus):
with open(video_info["segsout"], "w") as outf:
outf.write("seg,skel_id,label\n")
while peek.peek(((None,),))[0][0] == video_idx:
(_video_idx, seg, skel_id), clus = next(peek)
outf.write(f"{seg},{skel_id},{clus}\n")
def medoid_vec(vecs, metric):
dists = squareform(pdist(vecs, metric=metric))
return np.argmax(dists.sum(axis=0))
def medoid_vecs(vecs, metric, n=1):
dists = squareform(pdist(vecs, metric=metric))
return np.argsort(dists.sum(axis=0))[:n]
def get_medioid_prototypes(all_embeddings_np, clus_labels, metric, n):
idx = 0
while 1:
clus_idxs = np.nonzero(clus_labels == idx)[0]
if not len(clus_idxs):
break
clus_embeddings = all_embeddings_np[clus_idxs]
medoid_clus_idxs = medoid_vecs(clus_embeddings, metric, n)
yield idx, (clus_idxs[idx] for idx in medoid_clus_idxs)
idx += 1
def get_rnn_prototypes(rev_knns, clus_labels, n):
idx = 0
while 1:
clus_idxs = np.nonzero(clus_labels == idx)[0]
if not len(clus_idxs):
break
subgraph = rev_knns[clus_idxs][:, clus_idxs]
max_rnn_idxs = np.flip(np.argsort(subgraph.getnnz(1)))[:n]
yield idx, (clus_idxs[idx] for idx in max_rnn_idxs)
idx += 1
def write_prototypes(protof, corpus, prototypes):
protof.write("clus_idx,video_idx,frame_num,pers_id\n")
face_sorted = sorted(
(
(face_idx, clus_idx)
for clus_idx, face_idxs in prototypes
for face_idx in face_idxs
)
)
face_sorted_it = iter(face_sorted)
face_idx = clus_idx = None
def next_proto():
nonlocal face_idx, clus_idx
face_idx, clus_idx = next(face_sorted_it, (None, None))
next_proto()
cur_face_idx = 0
clus = []
for video_idx, video_info in enumerate(corpus):
with h5py.File(video_info["faces"], "r") as face_h5f:
for (frame_num, pers), _ in SparseFaceReader(face_h5f):
if cur_face_idx == face_idx:
clus.append((clus_idx, video_idx, frame_num, pers))
next_proto()
cur_face_idx += 1
clus.sort()
for clus_idx, video_idx, frame_num, pers_id in clus:
protof.write(f"{clus_idx},{video_idx},{frame_num},{pers_id}\n")
@numba.guvectorize(["int32[:], int32[:], int32[:]"], "(n),(n)->()", nopython=True)
def vote(elems, cnts, res):
max_elem = -1
max_cnt = 0
num_maxes = 0
for idx, (elem, cnt) in enumerate(zip(elems, cnts)):
if elem == -1:
continue
if cnt > max_cnt:
max_elem = elem
max_cnt = cnt
num_maxes = 0
elif cnt == max_cnt:
num_maxes += 1
if num_maxes == 1:
res[0] = max_elem
else:
res[0] = -1
def mk_count_vote(min_samples):
@numba.guvectorize(
["int32[:], int32[:]", "int64[:], int64[:]"], "(n)->()", nopython=True
)
def count_vote(nbr_labels, res):
max_elem = -1
max_count = 0
num_maxes = 0
cur_elem = -1
cur_count = 0
def flush():
nonlocal max_count, num_maxes, max_elem
if cur_count > max_count:
max_count = cur_count
num_maxes = 1
max_elem = cur_elem
elif cur_count == max_count:
num_maxes += 1
for nbr_label in nbr_labels:
if nbr_label == -1:
break
elif nbr_label != cur_elem:
flush()
cur_elem = nbr_label
cur_count = 1
else:
cur_count += 1
flush()
# bool(...) due to https://github.com/numba/numba/issues/6585
if bool(num_maxes == 1) and ((max_count - 1) >= min_samples):
res[0] = max_elem
else:
res[0] = -1
return count_vote
def expand_clus_labels(
transformer_cls,
corpus,
num_embeddings_total,
*sampled_embeddings,
sampled_labels,
sample_idxs,
eps,
min_samples,
metric,
):
all_clus_labels = np.full(num_embeddings_total, -1)
sampled_labels_it = iter(sampled_labels)
index = transformer_cls(SAMPLE_KNN, metric=metric)
index.fit(sampled_embeddings)
del sampled_embeddings
sample_indices_peek = peekable(sample_idxs)
batch: List[np.ndarray] = []
batch_idxs: List[int] = []
count_vote = mk_count_vote(min_samples)
def flush_batch():
batch_np = np.vstack(batch)
dists, nbrs = index.transform(batch_np)
# Convert sims -> dists
dists = 1 - dists
# Mask out those over dist
nbrs[dists > eps] = -1
del dists
# Get the labels of the neighbours where not masked out
nbr_labels = np.where(nbrs != -1, sampled_labels[nbrs], -1)
del nbrs
nbr_labels.sort(axis=1)
nbr_labels = np.flip(nbr_labels, axis=1)
nearest_labels = count_vote(nbr_labels, axis=1)
all_clus_labels[batch_idxs] = nearest_labels
batch.clear()
batch_idxs.clear()
for abs_idx, (face_reader, face_idx) in enumerate(corpus_reader_indices(corpus)):
if abs_idx == sample_indices_peek.peek(None):
all_clus_labels[abs_idx] = next(sampled_labels_it)
next(sample_indices_peek)
else:
batch.append(face_reader.embedding_at(face_idx))
batch_idxs.append(abs_idx)
if len(batch_idxs) >= SAMPLE_BATCH_SIZE:
flush_batch()
flush_batch()
return all_clus_labels
def regroup_by_pers(all_embeddings_np, seg_pers):
indices = np.lexsort(seg_pers.T[::-1])
seg_pers[:] = seg_pers[indices]
all_embeddings_np[:] = all_embeddings_np[indices]
def process_common_clus_options(args, kwargs, inner):
corpus_desc = kwargs.pop("corpus_desc")
corpus_base = kwargs.pop("corpus_base")
proto_out = kwargs.pop("proto_out")
model_out = kwargs.pop("model_out")
num_protos = kwargs.pop("num_protos")
pool = kwargs["pool"]
ann_lib = kwargs["ann_lib"]
knn = kwargs.get("knn")
if model_out is not None and ann_lib != "pynndescent" and knn is not None:
raise click.UsageError("Model saving is only supported for pynndescent")
with CorpusReader(corpus_desc, corpus_base) as corpus:
kwargs["corpus"] = corpus
sample_idxs = None
sample_size = kwargs.pop("sample_size")
if sample_size is not None:
num_embeddings, sample_idxs, all_embeddings_np = collect_embeddings(
corpus, sample_size
)
else:
all_embeddings_np = collect_embeddings(corpus)
num_embeddings = len(all_embeddings_np)
seg_pers = read_seg_pers(corpus, num_embeddings)
regroup_by_pers(all_embeddings_np, seg_pers)
kwargs["seg_pers"] = seg_pers
if knn is not None and knn > len(all_embeddings_np) - 1:
knn = len(all_embeddings_np) - 1
logging.info(
"Only got %s embeddings so reducing k to %s",
len(all_embeddings_np),
knn,
)
kwargs["knn"] = knn
if pool == "med":
if sample_size is not None:
raise click.UsageError("Cannot use sampling when --pool=med")
all_embeddings_np = med_pool_vecs(
all_embeddings_np, seg_pers, DEFAULT_METRIC
)
kwargs["all_embeddings_np"] = all_embeddings_np
estimator, clus_labels, eps, min_samples = inner(*args, **kwargs)
if proto_out:
with open(proto_out, "w") as protof:
if knn is not None and ann_lib == "pynndescent":
rev_knns = estimator.named_steps["rnndbscan"].rev_knns_
prototypes = get_rnn_prototypes(rev_knns, clus_labels, num_protos)
else:
prototypes = get_medioid_prototypes(
all_embeddings_np, clus_labels, DEFAULT_METRIC, num_protos
)
write_prototypes(
protof, corpus, prototypes,
)
if model_out:
with open(model_out, "wb") as modelf:
pickle.dump(estimator, modelf)
if sample_idxs is not None:
transformer_cls = knn_lib_transformer(ann_lib)
clus_labels = expand_clus_labels(
transformer_cls,
corpus,
num_embeddings,
sampled_embeddings=all_embeddings_np,
sampled_labels=clus_labels,
sample_idxs=sample_idxs,
eps=eps,
min_samples=min_samples,
metric=DEFAULT_METRIC,
)
if pool == "vote":
grouped_label_it = get_seg_clusts_vote(seg_pers, iter(clus_labels))
else:
grouped_label_it = get_seg_clusts(seg_pers, iter(clus_labels))
write_seg_clusts(corpus, grouped_label_it)
common_clus_options = save_options(
[
click.argument("corpus_desc", type=PathPath(exists=True)),
click.option("--corpus-base", type=PathPath(exists=True)),
click.option("--proto-out", type=PathPath()),
click.option("--model-out", type=PathPath()),
click.option("--num-protos", type=int, default=1),
click.option(
"--algorithm", type=click.Choice(["dbscan", "optics-dbscan", "rnn-dbscan"])
),
click.option(
"--ann-lib",
type=click.Choice(["pynndescent", "faiss-exact"]),
default="pynndescent",
),
click.option(
"--pool", type=click.Choice(["med", "min", "vote"]), default="vote"
),
click.option("--knn", type=int, default=None),
click.option("--sample-size", type=int, default=None),
click.option("--n-jobs", type=int, default=-1),
],
process_common_clus_options,
)
@click.group()
def clus():
"""
Clusters embeddings from multiple videos descriped in a corpus description file.
"""
pass
def knn_lib_transformer(knn_lib):
if knn_lib == "faiss-exact":
from sklearn_ann.kneighbors.faiss import FAISSTransformer
return FAISSTransformer
else:
from sklearn_ann.kneighbors.pynndescent import PyNNDescentTransformer
return PyNNDescentTransformer
def get_clus_alg(
algorithm: str, knn_lib: str, knn: Optional[int], pool: str, metric: str, **kwargs
):
from sklearn.cluster import DBSCAN, OPTICS
from sklearn_ann.cluster.rnn_dbscan import simple_rnn_dbscan_pipeline
from skelshop.cluster.dbscan import knn_dbscan_pipeline
if knn is None:
metric = "precomputed" if pool == "min" else metric
if algorithm == "optics-dbscan":
return OPTICS(
metric=metric,
max_eps=DEFAULT_MAX_EPS,
cluster_method="dbscan",
**kwargs,
)
elif algorithm == "dbscan":
return DBSCAN(metric=metric, **kwargs)
else:
raise click.UsageError("Must specify knn when algorithm == 'rnn-dbscan'")
else:
if algorithm == "optics-dbscan":
raise NotImplementedError("KNN is not implemented for OPTICS")
if pool == "min":
raise NotImplementedError("Min pooling not implemented for KNN DBSCANs")
transformer = knn_lib_transformer(knn_lib)
if algorithm == "dbscan":
return knn_dbscan_pipeline(transformer, knn, metric=metric)
else:
return simple_rnn_dbscan_pipeline(
transformer, knn, metric=metric, keep_knns=True
)
def proc_data(vecs, seg_pers: List[Tuple[str, str, str]], pool: str, metric: str):
if pool == "min":
dists = squareform(pdist(vecs, metric=metric))
sizes = [ilen(it) for _, it in groupby(seg_pers)]
return min_pool_dists(dists, sizes, sizes)
else:
return vecs
@clus.command()
@common_clus_options
@click.option("--eps", type=float, default=DEFAULT_EPS)
@click.option("--min-samples", type=int, default=DEFAULT_MIN_SAMPLES)
def fixed(
all_embeddings_np: np.ndarray,
corpus: CorpusReader,
seg_pers: List[Tuple[str, str, str]],
algorithm: str,
ann_lib: str,
pool: str,
knn: Optional[int],
eps: float,
min_samples: float,
n_jobs: int,
):
"""
Performs dbscan with fixed parameters.
"""
clus_alg = get_clus_alg(
algorithm,
ann_lib,
knn,
pool,
DEFAULT_METRIC,
eps=eps,
min_samples=min_samples,
n_jobs=n_jobs,
)
labels = clus_alg.fit_predict(
proc_data(all_embeddings_np, seg_pers, pool, DEFAULT_METRIC)
)
with maybe_ray():
return (
clus_alg,
labels,
eps,
min_samples,
)
def med_pool_vecs(embeddings, seg_pers: List[Tuple[str, str, str]], metric: str):
output_size = ilen(groupby(seg_pers))
output_arr = np.empty((output_size, embeddings.shape[1]), dtype=embeddings.dtype)
output_idx = 0
input_idx = 0
for grp, it in groupby(seg_pers):
grp_size = ilen(it)
new_input_idx = input_idx + grp_size
output_arr[output_idx] = medoid_vec(embeddings[input_idx:new_input_idx], metric)
input_idx = new_input_idx
output_idx += 1
return output_arr
@clus.command()
@common_clus_options
@click.option("--eps")
@click.option("--min-samples")
@click.option(
"--score",
type=click.Choice(["both", "silhouette", "tracks-acc"]),
default="silhouette",
)
def search(
all_embeddings_np: np.ndarray,
corpus: CorpusReader,
seg_pers: List[Tuple[str, str, str]],
algorithm: str,
ann_lib: str,
pool: str,
knn: Optional[int],
eps: Optional[str],
min_samples: Optional[str],
n_jobs: int,
score: str,
):
"""
Performs grid search to find best clustering parameters.
"""
from skelshop.cluster.param_search import GridSearchClus
from skelshop.cluster.score import silhouette_scorer, tracks_acc
if pool == "med":
all_embeddings_np = med_pool_vecs(all_embeddings_np, seg_pers, DEFAULT_METRIC)
if eps is not None:
eps_list = [float(x) for x in eps.split(",")]
else:
eps_list = DEFAULT_EPS_LIST
if min_samples is not None:
min_samples_list = [int(x) for x in min_samples.split(",")]
else:
min_samples_list = DEFAULT_MIN_SAMPLES_LIST
scorer: Any
refit: Any = True
metric_silhouette_scorer = partial(silhouette_scorer, DEFAULT_METRIC)
if score == "silhouette":
scorer = metric_silhouette_scorer
else:
if pool != "vote":
raise click.UsageError(
"--score=tracks-acc can only be used with --pool=vote"
)
if score == "both":
scorer = {"tracks_acc": tracks_acc, "silhouette": metric_silhouette_scorer}
refit = "silhouette"
else:
scorer = tracks_acc
clus_kwargs: Dict[str, Any] = {"n_jobs": n_jobs}
if algorithm == "optics-dbscan" and "JOBLIB_CACHE_DIR" in os.environ:
logger.debug("Using JOBLIB_CACHE_DIR=%s", os.environ["JOBLIB_CACHE_DIR"])
clus_kwargs["memory"] = os.environ["JOBLIB_CACHE_DIR"]
clus_alg = get_clus_alg(
algorithm, ann_lib, knn, pool, DEFAULT_METRIC, **clus_kwargs
)
param_grid: Dict[str, List[Any]] = {
"min_samples": min_samples_list,
"eps": eps_list,
}
grid_search = GridSearchClus(
estimator=clus_alg,
param_grid=param_grid,
scoring=scorer,
refit=refit,
n_jobs=n_jobs,
)
X = proc_data(all_embeddings_np, seg_pers, pool, DEFAULT_METRIC)
with maybe_ray():
grid_search.fit(
X, y=None if score == "silhouette" else seg_pers,
)
if logger.isEnabledFor(logging.INFO):
if score == "both":
score_heading = "Silhouette, Track rand index/accuracy"
elif score == "silhouette":
score_heading = "Silhouette"
else:
score_heading = "Track rand index/accuracy"
logger.info(
"{}, Min samples".format(score_heading)
+ (", Eps" if algorithm != "optics-dbscan" else "")
)
keys = ["param_min_samples"]
if algorithm != "optics-dbscan":
keys = [*keys, "param_eps"]
if score == "both":
keys = ["mean_test_silhouette", "mean_test_tracks_acc", *keys]
else:
keys = ["mean_test_score", *keys]
for lst in zip(*(grid_search.cv_results_[k] for k in keys)):
logger.info(" ".join((str(x) for x in lst)))
logger.info("Best estimator: %s", grid_search.best_estimator_)
logger.info("Best params: %s", grid_search.best_params_)
logger.info("Best score: %s", grid_search.best_score_)
predicted_labels = grid_search.best_estimator_.labels_
return (
grid_search.best_estimator_,
predicted_labels,
grid_search.best_params_["eps"],
grid_search.best_params_["min_samples"],
)
| python |
import sys
sys.path.append('../')
from Normalizer.Normalizer import Normalizer
import unittest
import numpy as np
from sklearn.preprocessing import MinMaxScaler, StandardScaler
class TestNormalizer(unittest.TestCase):
normalizer = Normalizer()
test_data = [ 61.19499969, 57.31000137, 56.09249878, 61.72000122,
61.38000107, 64.61000061, 61.93500137, 63.70249939,
63.57249832, 60.22750092, 61.23249817, 60.35250092,
65.61750031, 64.85749817, 66.51750183, 66.99749756,
68.3125 , 71.76249695, 71.10749817, 71.67250061,
70.69999695, 69.23249817, 67.09249878, 69.02500153,
68.75749969, 70.74250031, 70.79250336, 69.64499664,
71.93250275, 73.44999695, 72.26750183, 73.29000092,
74.38999939, 75.15750122, 75.93499756, 77.53250122,
78.75250244, 77.85250092, 76.91249847, 77.38500214,
76.92749786, 78.73999786, 78.28500366, 79.80750275,
79.21250153, 79.72250366, 79.18250275, 79.52749634,
79.5625 , 79.48500061, 80.46250153, 80.83499908,
81.27999878, 80.58000183, 82.875 , 83.36499786,
85.99749756, 88.20999908, 83.97499847, 84.69999695,
85.74749756, 88.01999664, 87.89749908, 87.93250275,
87.43000031, 89.71749878, 91.63249969, 90.01499939,
91.20999908, 88.40750122, 90.44499969, 91.19999695,
91.02749634, 91.02749634, 93.46250153, 93.17250061,
95.34249878, 95.75250244, 95.91999817, 95.47750092,
97.05750275, 97.72499847, 96.52249908, 96.32749939,
98.35749817, 97. , 97.27249908, 92.84500122,
92.61499786, 94.80999756, 93.25250244, 95.04000092,
96.19000244, 106.26000214, 108.9375 , 109.66500092,
110.0625 , 113.90249634, 111.11250305, 112.72750092]
# test_data = np.array([round(t, 2) for t in test_data])
test_data = np.array(test_data)
test_data = np.reshape(test_data, (-1,1))
def test_transform_featurescaler(self):
result = self.normalizer.FeatureScaler.transform(self.test_data)
scaler = MinMaxScaler()
scaler.fit(self.test_data)
correct = scaler.transform(self.test_data)
self.assertTrue(np.allclose(result, correct))
def test_reverse_transform_featurescaler(self):
result = self.normalizer.FeatureScaler.transform(self.test_data)
result = self.normalizer.FeatureScaler.reverse_transform(result)
self.assertTrue(np.allclose(result, self.test_data))
def test_raise_transform(self):
test_data = self.test_data.tolist()
with self.assertRaises(ValueError):
self.normalizer.FeatureScaler.transform(test_data)
self.normalizer.MeanScaler.transform(test_data)
self.normalizer.ZScoreScaler.transform(test_data)
self.normalizer.UnitLengthScaler.transform(test_data)
| python |
from typing import List
from local_packages.binary_tree import TreeNode
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def levelOrderBottom(self, root: TreeNode) -> List[List[int]]:
ans = []
def helper(node: TreeNode, level: int) -> None:
if node is None:
return
if level >= len(ans):
ans.append([])
helper(node.left, level + 1)
helper(node.right, level + 1)
ans[level].append(node.val)
helper(root, 0)
return ans[::-1]
# TESTS
tests = [
("#", []),
("1,#,#", [[1]]),
("1,2,#,#,3,#,#", [[2, 3], [1]]),
("1,#,2,#,3,#,4,#,5,#,#", [[5], [4], [3], [2], [1]]),
("3,9,#,#,20,15,#,#,7,#,#", [[15, 7], [9, 20], [3]]),
]
for t in tests:
sol = Solution()
actual = sol.levelOrderBottom(TreeNode.deserialize(t[0]))
print("Bottom-up level order traversal of", t[0], "->", actual)
assert actual == t[1]
| python |
import os
from openpyxl import Workbook
from openpyxl.styles import PatternFill
from PIL import Image
from tqdm import tqdm
wb = Workbook()
sheet = wb.active
def rgb_to_hex(rgb):
return '%02x%02x%02x' % rgb
for file in os.listdir():
if file.endswith(".jpg") or file.endswith(".jpeg") or file.endswith(".png"):
filename = file.split(".")[0]
im = Image.open(file).convert('RGB')
px = im.load()
width, height = im.size
for x in tqdm(range(width)):
for y in range(height):
sheet.cell(row=y+1, column=x+1).fill = PatternFill(start_color=rgb_to_hex(px[x,y]), fill_type="solid")
im.close()
wb.save(f"{filename}.xlsx") | python |
import os
import numpy as np
import importlib
SilhouetteDetector = importlib.import_module('SilhouetteDetector')
np.random.seed(0)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Generate artificial videos with one subject')
parser.add_argument('--dataset', type=str, required=True,
default="casiab", choices=['casiab', 'tumgaid', 'other'],
help="Dataset name. Used tho select metadata and default folder. "
"Try 'casiab', 'tumgaid' or 'other'.")
parser.add_argument('--inputtype', type=str, required=True,
choices=['video', 'image'],
help="Input type."
"Try 'video' or 'image'.")
parser.add_argument('--datasetdir', type=str, required=False,
help='Full path to dataset directory')
parser.add_argument('--outputdir', type=str, required=False,
help='Full path to output directory')
parser.add_argument('--deeplabpath', type=str, required=False,
help='Full path to deeplab directory', default="/tensorflow/models/research/")
script_path = os.path.dirname(os.path.abspath(__file__))
args = parser.parse_args()
dataset = args.dataset
inputtype = args.inputtype
datasetdir = args.datasetdir
outputdir = args.outputdir
deeplabpath = args.deeplabpath
if dataset == 'casiab':
datasetdir = script_path + "/casiab/" if datasetdir is None else datasetdir
outputdir = script_path + "/casiab_silhouette/" if outputdir is None else outputdir
elif dataset == 'tumgaid':
datasetdir = script_path + "/tumgaid/" if datasetdir is None else datasetdir
outputdir = script_path + "/tumgaid_silhouettes/" if outputdir is None else outputdir
else:
if not all(v is not None for v in [datasetdir, outputdir]):
raise argparse.ArgumentTypeError('If you select "others" in dataset, you need to complete all the input arguments.')
if inputtype == 'video':
SilhouetteDetector.silhouettes_from_videos(datasetdir, outputdir, deeplabpath)
else:
SilhouetteDetector.silhouettes_from_images(datasetdir, outputdir, deeplabpath)
| python |
# -*- coding: utf-8 -*-
from numpy import *
from datetime import datetime, timedelta
from dateutil.relativedelta import *
import os
import re
import codecs
import pandas as pd
import scipy.io.netcdf as spnc
from ecmwfapi import ECMWFDataServer
import time_tools as tt
import geo_tools as gt
import download_file as df
def argo_gdac(start_date,end_date,lat_range,lon_range,save_to_root,
overwrite_global_index=True,overwrite_profs=False,bypass_download=False,
only_download_wmoids=[]):
""" Downloads Argo float profiles from US-GODAE GDAC.
Args:
start_date, end_date: datetime tuples, e.g. (Y,M,D) or (Y,M,D,H) or... etc.
lat_range, lon_range: list-pairs (i.e. [min,max]) of lats from -90 to 90 or lons from -180 to 180 or 0 to 360
note: to search over all longitudes, use [-180,180], [0,360], [0,0], or [lon,same_lon]... all work!
note: when lat/lon unavailable for a profile (e.g. no position fix or under ice), last valid lat/lon for
the float in question will be referenced
save_to_root: path of main Argo data directory of interest
only_download_wmoids: [] to download all
e.g. [5904468, 5904471, ...] to only download new profiles for specified WMOids
"""
save_to_meta = save_to_root + 'Meta/'
save_to_profiles = save_to_root + 'Profiles/'
url_root = 'http://www.usgodae.org/ftp/outgoing/argo/'
global_index_filename = 'ar_index_global_prof.txt'
local_index_filename = 'ar_index_local_prof.txt' # index of locally downloaded profiles
url_profiles_root = url_root + 'dac/'
# download most recent global profile list and parse columns
df.single_file(url_root,global_index_filename,save_to_meta,ftp_root=False,overwrite=overwrite_global_index,verbose=True)
data_frame = pd.read_csv(save_to_meta + global_index_filename,header=8,low_memory=False)
global_profile_list = data_frame.values
# identify profiles meeting argument criteria
num_profs = len(global_profile_list)
prof_matches = zeros(num_profs, dtype=bool)
float_number_regexp = re.compile('[a-z]*/[0-9]*/profiles/[A-Z]*([0-9]*)_[0-9]*[A-Z]*.nc')
last_valid_position_float = int(float_number_regexp.findall(global_profile_list[0,0])[0])
last_valid_position = [global_profile_list[0,2],global_profile_list[0,3]]
for n in range(num_profs):
current_float = int(float_number_regexp.findall(global_profile_list[n,0])[0])
# accommodate profiles with missing lat/lon data (set as 99999.000)
if global_profile_list[n,2] == 99999.000 or global_profile_list[n,3] == 99999.000 \
or global_profile_list[n,2] == -999.000 or global_profile_list[n,3] == -999.000:
if current_float == last_valid_position_float:
assumed_prof_position = last_valid_position
else:
continue # in effect, leave prof_matches[n] = False
### original solution was the following: raise AssertionError('Profile has invalid lat/lon and is unusable because no prior valid lat/lon for this float, {0}.'.format(current_float))
else:
assumed_prof_position = [global_profile_list[n,2],global_profile_list[n,3]]
last_valid_position = assumed_prof_position
last_valid_position_float = current_float
# skip profiles with missing timestamps
if isnan(global_profile_list[n,1]):
continue # in effect, leave prof_matches[n] = False
# finally, if profile has valid position and timestamp, then check against args
if tt.is_time_in_range(start_date,end_date,tt.convert_14_to_tuple(global_profile_list[n,1])):
if gt.geo_in_range(assumed_prof_position[0],assumed_prof_position[1],lat_range,lon_range):
prof_matches[n] = True
print('>>> Number of Argo profiles on GDAC meeting criteria = ',sum(prof_matches))
# using profile matches, create index of local float profile metadata (same format as global index)
# add columns for float number, profile number, profile status (R, D), profile suffix (D = descending profile)
matching_profs = where(prof_matches)[0]
local_profile_list = global_profile_list[matching_profs,:]
num_profs = len(local_profile_list)
# download necessary profiles to local
if not bypass_download:
if len(only_download_wmoids) is not 0:
only_download_wmoids = [str(selected_wmoid) for selected_wmoid in only_download_wmoids]
trim_local_profile_list_indices = []
starting_dir = os.getcwd()
os.chdir(save_to_profiles)
existing_prof_files = os.listdir()
prof_file_regexp = re.compile('[a-z]*/[0-9]*/profiles/([A-Z]*[0-9]*_[0-9]*[A-Z]*.nc)')
prof_path_regexp = re.compile('([a-z]*/[0-9]*/profiles/)[A-Z]*[0-9]*_[0-9]*[A-Z]*.nc')
for i, global_prof_index in enumerate(matching_profs):
prof_file = prof_file_regexp.findall(global_profile_list[global_prof_index,0])[0]
prof_path = prof_path_regexp.findall(global_profile_list[global_prof_index,0])[0]
if len(only_download_wmoids) is not 0:
if all([selected_wmoid not in prof_file for selected_wmoid in only_download_wmoids]):
if prof_file in existing_prof_files: trim_local_profile_list_indices.append(i)
continue
print('dlp.argo_gdac() is downloading ' + prof_file)
trim_local_profile_list_indices.append(i)
df.single_file(url_profiles_root + prof_path,prof_file,save_to_profiles,ftp_root=False,overwrite=overwrite_profs,verbose=False)
df.how_far(i,matching_profs,0.01)
if len(only_download_wmoids) is not 0:
matching_profs = matching_profs[trim_local_profile_list_indices]
local_profile_list = local_profile_list[trim_local_profile_list_indices,:]
num_profs = len(local_profile_list)
os.chdir(starting_dir)
# re-process local profile index
float_wmoid_regexp = re.compile('[a-z]*/([0-9]*)/profiles/[A-Z]*[0-9]*_[0-9]*[A-Z]*.nc')
float_profile_filename_regexp = re.compile('[a-z]*/[0-9]*/profiles/([A-Z]*[0-9]*_[0-9]*[A-Z]*.nc)')
float_profile_mode_regexp = re.compile('[a-z]*/[0-9]*/profiles/([A-Z]*)[0-9]*_[0-9]*[A-Z]*.nc')
float_profile_num_regexp = re.compile('[a-z]*/[0-9]*/profiles/[A-Z]*[0-9]*_([0-9]*)[A-Z]*.nc')
float_wmoids = [int(float_wmoid_regexp.findall(local_profile_list[n,0])[0]) for n in range(num_profs)]
float_profile_filenames = [float_profile_filename_regexp.findall(local_profile_list[n,0])[0] for n in range(num_profs)]
float_profile_modes = [float_profile_mode_regexp.findall(local_profile_list[n,0])[0] for n in range(num_profs)]
float_profile_nums = [int(float_profile_num_regexp.findall(local_profile_list[n, 0])[0]) for n in range(num_profs)]
float_position_flags = [0 for n in range(num_profs)]
local_profile_list = hstack((vstack(float_wmoids),vstack(float_profile_filenames),vstack(float_profile_modes),
vstack(float_position_flags),local_profile_list))
# sort profile index by WMOid + profile number (e.g. 7900093 is completely out of order)
sort_param = array(float_wmoids) + array(float_profile_nums) / 10000
local_profile_list = local_profile_list[argsort(sort_param)]
# flat and interpolate between missing positions
# note: ignores lat/lon of additional profiles when NUM_PROF > 1
# note: will likely fail if first or last profiles in the index have bad positions
currently_interpolating = 0
previous_prof_wmoid = local_profile_list[0,0]
bad_starting_position = 0
starting_position = [0, 0] # [lat,lon]
ending_position = [0, 0]
interp_profile_indices = []
datetime_stamps = []
for p in range(num_profs):
if p > 1: previous_prof_wmoid = local_profile_list[p-1, 0]
current_prof_wmoid = local_profile_list[p,0]
profile_file = spnc.netcdf_file(save_to_profiles + local_profile_list[p,1], 'r', mmap=False)
profile_mode = str(profile_file.variables['DATA_MODE'][0])[2]
local_profile_list[p,2] = profile_mode # R, D, or A (adjusted real-time)
profile_lat_given = profile_file.variables['LATITUDE'][0]
local_profile_list[p,6] = profile_lat_given
profile_lon_given = profile_file.variables['LONGITUDE'][0]
local_profile_list[p,7] = profile_lon_given
profile_position_qc = int(str(profile_file.variables['POSITION_QC'][0])[2])
profile_time = tt.convert_tuple_to_datetime(tt.convert_14_to_tuple(local_profile_list[p,5]))
profile_number = profile_file.variables['CYCLE_NUMBER'][0]
profile_file.close()
if current_prof_wmoid != previous_prof_wmoid and currently_interpolating == 1:
interp_profile_indices.append(p)
for n, pint in enumerate(interp_profile_indices[1:-1]):
local_profile_list[pint, 3] = 9 # 'ETHAN_POSITION_QC' of 9 = bad, failed interpolation attempt
# (reached last of float's profiles without finding a good position)
local_profile_list[pint, 6] = NaN
local_profile_list[pint, 7] = NaN
currently_interpolating = 0 # reinitialize tracker and counter variables
bad_starting_position = 0
starting_position = [0, 0]
ending_position = [0, 0]
interp_profile_indices = []
datetime_stamps = []
if gt.geo_in_range(profile_lat_given,profile_lon_given,[-90,90],[-180,180]) \
and (profile_position_qc == 1 or profile_position_qc == 2):
if currently_interpolating == 0:
local_profile_list[p,3] = 1 # 'ETHAN_POSITION_QC' of 1 = likely good
elif currently_interpolating == 1: # here ends the interpolated track
local_profile_list[p, 3] = 1 # 'ETHAN_POSITION_QC' of 1 = likely good
currently_interpolating = 0
if bad_starting_position == 0:
ending_position = [profile_lat_given,profile_lon_given]
interp_profile_indices.append(p)
datetime_stamps.append(profile_time)
if len(interp_profile_indices) > 2:
interp_positions = gt.great_circle_interp(starting_position,ending_position,datetime_stamps)
for n, pint in enumerate(interp_profile_indices[1:-1]):
local_profile_list[pint, 3] = 2 # 'ETHAN_POSITION_QC' of 2 = interpolated; assumed under ice
local_profile_list[pint, 6] = interp_positions[n][0]
local_profile_list[pint, 7] = interp_positions[n][1]
else: # weird case of float's first profile with position flag '8', second profile with '1', and
# same positions listed for both (e.g. 5901722)
local_profile_list[p-1, 3] = 9 # 'ETHAN_POSITION_QC' of 9 = bad
local_profile_list[p-1, 6] = NaN
local_profile_list[p-1, 7] = NaN
starting_position = [0, 0] # reinitialize tracker and counter variables
ending_position = [0, 0]
interp_profile_indices = []
datetime_stamps = []
elif bad_starting_position == 1:
bad_starting_position = 0
elif profile_number == 1 and current_prof_wmoid != previous_prof_wmoid and profile_position_qc == 8 \
and gt.geo_in_range(profile_lat_given,profile_lon_given,[-90,-50],[-180,180]):
# special case where float's first profile is under ice, and thus was marked '8' (interp'd)
# with lat/lon likely from deployment location
# note: criterion of profile number = 1 used to avoid floats that drifted into download lat/lon box while
# under ice (i.e. first profile downloaded was marked '8' with GDAC-interp'd lat/lon)
currently_interpolating = 1
starting_position = [local_profile_list[p, 6], local_profile_list[p, 7]]
bad_starting_position = 0
interp_profile_indices = [p]
datetime_stamps = [profile_time]
local_profile_list[p, 3] = 2 # 'ETHAN_POSITION_QC' of 2 = under-ice first profile, lat/lon from deployment
elif current_prof_wmoid == previous_prof_wmoid \
and (profile_position_qc == 9 or (profile_position_qc == 8
and gt.geo_in_range(profile_lat_given,profile_lon_given,[-90,-50],[-180,180]))):
if currently_interpolating == 0:
currently_interpolating = 1
if local_profile_list[p-1, 3] == 1: # good starting position
starting_position = [local_profile_list[p-1,6],local_profile_list[p-1,7]]
bad_starting_position = 0
interp_profile_indices = [p-1, p]
datetime_stamps = [tt.convert_tuple_to_datetime(tt.convert_14_to_tuple(local_profile_list[p-1,5]))]
datetime_stamps.append(profile_time)
local_profile_list[p, 3] = 0 # 'ETHAN_POSITION_QC' of 0 = pending interpolation attempt
else: # bad starting position
bad_starting_position = 1
local_profile_list[p, 3] = 9 # 'ETHAN_POSITION_QC' of 9 = bad, failed interpolation attempt
local_profile_list[p, 6] = NaN
local_profile_list[p, 7] = NaN
elif currently_interpolating == 1:
if bad_starting_position == 0:
interp_profile_indices.append(p)
datetime_stamps.append(profile_time)
local_profile_list[p, 3] = 0 # 'ETHAN_POSITION_QC' of 0 = pending interpolation attempt
elif bad_starting_position == 1:
local_profile_list[p, 3] = 9 # 'ETHAN_POSITION_QC' of 9 = bad, failed interpolation attempt
local_profile_list[p, 6] = NaN
local_profile_list[p, 7] = NaN
else:
if currently_interpolating == 0:
local_profile_list[p, 3] = 9 # 'ETHAN_POSITION_QC' of 9 = bad, for many possible reasons
local_profile_list[p, 6] = NaN
local_profile_list[p, 7] = NaN
elif currently_interpolating == 1:
local_profile_list[p, 3] = 9 # 'ETHAN_POSITION_QC' of 9 = bad, for many possible reasons
local_profile_list[p, 6] = NaN
local_profile_list[p, 7] = NaN
interp_profile_indices.append(p)
for n, pint in enumerate(interp_profile_indices[1:-1]):
local_profile_list[pint, 3] = 9 # 'ETHAN_POSITION_QC' of 9 = bad, failed interpolation attempt
# (ended on a bad lat/lon)
local_profile_list[pint, 6] = NaN
local_profile_list[pint, 7] = NaN
currently_interpolating = 0 # reinitialize tracker and counter variables
bad_starting_position = 0
starting_position = [0, 0]
ending_position = [0, 0]
interp_profile_indices = []
datetime_stamps = []
df.how_far(p,range(num_profs),0.01)
# save updated local profile index
savetxt(save_to_meta + local_index_filename, local_profile_list, fmt='%i,%s,%s,%i,%s,%i,%f,%f,%s,%s,%s,%i')
def argo_soccom(save_to_root,overwrite_profs=True):
""" Downloads and processes SOCCOM float profiles in text format from FloatViz FTP server.
Args:
save_to_root: path of main Argo data directory of interest
"""
save_to_floats = save_to_root + 'SOCCOM_HiResQC_ftp_' + datetime.today().strftime('%Y-%m-%d') + '/'
os.mkdir(save_to_floats)
ftp_root = 'ftp.mbari.org'
url_root = 'pub/SOCCOM/FloatVizData/HRQC/'
df.all_files(ftp_root,url_root,save_to_floats,overwrite=overwrite_profs)
# do a find-and-replace on data files to remove whitespace between some column names
for data_filename in os.listdir(save_to_floats):
orig_file_as_list = codecs.open(save_to_floats + data_filename,'rb',encoding='latin-1').readlines()
new_file_as_list = []
for line in orig_file_as_list:
first_edit = line.replace('Lon [°E]', 'Lon[°E]')
second_edit = first_edit.replace('Lat [°N]', 'Lat[°N]')
new_file_as_list.append(second_edit)
out_file = codecs.open(save_to_floats + data_filename,'wb',encoding='latin-1')
out_file.writelines(new_file_as_list)
out_file.close()
def amsr(which_amsr, start_date, end_date, save_to, get_pdfs=True, overwrite=False, convert=False, conversion_script_dir=None):
""" Downloads AMSR-E or AMSR2 sea ice concentration product.
Converts data from HDF4 to HDF5 format by calling df.convert_to_hdf5() if 'convert'
is True, then deletes original HDF4 file.
AMSR-2:
AMSR2 6.25 km daily sea ice concentration product is ARTIST Sea Ice (ASI)
algorithm from 89 GHz channel, a preliminary data product that uses the
AMSR-E calibrations. Consider switching to JAXA GCOM-W1 AMSR2 sea ice
product when "research" calibrated version becomes available, or NSIDC
DAAC validated versions (supposedly in late 2016).
Example file path: http://www.iup.uni-bremen.de:8084/amsr2data/asi_daygrid_swath/s6250/2015/aug/Antarctic/asi-AMSR2-s6250-20150801-v5.hdf
Note that 3.125 km gridded ARTIST AMSR2 is available from the following
link, but the lower 6.25 km resolution is used here for consistency with
AMSR-E products: ftp://ftp-projects.zmaw.de/seaice/AMSR2/
AMSR-E:
AMSR-E 6.25 km daily sea ice concentration product is ARTIST Sea Ice (ASI)
algorithm from 89 GHz channel.
Example file path: http://iup.physik.uni-bremen.de:8084/amsredata/asi_daygrid_swath/l1a/s6250/2011/oct/Antarctic/asi-s6250-20111004-v5.hdf
Another option for AMSR-E is the 12.5 km v3 NSIDC product available here:
http://nsidc.org/data/AE_SI12
It seems that the 6.25 km ASI product is also available at the following link,
but no 3.125 km product is available: ftp://ftp-projects.zmaw.de/seaice/AMSR-E_ASI_IceConc/
SSMIS product from University of Bremen on 6.25 km grid to bridge gap between AMSR-E and AMSR2:
SSMIS interim: http://iup.physik.uni-bremen.de:8084/ssmisdata/asi_daygrid_swath/s6250/
Required data acknowledgement: Spreen et al. (2008), doi:10.1029/2005JC003384
Optional data acknowledgement (for AMSR2): Beitsch et al. (2014), doi:10.3390/rs6053841
Args:
which_amsr: if 1, download AMSR-E; if 2, download AMSR2
start_date and end_date: (Y,M,D), with start/end inclusive
save_to: directory path
get_pdfs: download image files
Returns:
None
Raises:
No handled exceptions
"""
if which_amsr == 2:
url_part1 = 'http://www.iup.uni-bremen.de:8084/amsr2data/asi_daygrid_swath/s6250/'
url_part2 = '/Antarctic/'
filename_part1 = 'asi-AMSR2-s6250-'
filename_part2 = '-v5.hdf'
elif which_amsr == 1:
url_part1 = 'http://iup.physik.uni-bremen.de:8084/amsredata/asi_daygrid_swath/l1a/s6250/'
url_part2 = '/Antarctic/'
filename_part1 = 'asi-s6250-'
filename_part2 = '-v5.hdf'
filename_part2_pdf1 = '-v5_nic.pdf'
filename_part2_pdf2 = '-v5_visual.pdf'
months = ['jan','feb','mar','apr','may','jun','jul','aug','sep','oct','nov','dec']
starting_dir = os.getcwd()
os.chdir(save_to)
existing_files = os.listdir()
os.chdir(starting_dir)
all_dates = tt.dates_in_range(start_date, end_date)
for index, d in enumerate(all_dates):
url_dir = url_part1 + str(d[0]) + '/' + months[d[1]-1] + url_part2
filename = filename_part1 + '{0[0]}{0[1]:02d}{0[2]:02d}'.format(d) + filename_part2
new_filename = filename.split('.')[0] + '.h5'
if (new_filename not in existing_files) or (new_filename in existing_files and overwrite is True):
df.single_file(url_dir, filename, save_to, overwrite)
if convert:
df.convert_to_hdf5(conversion_script_dir, filename, save_to, save_to, overwrite=overwrite, delete_original=True)
if get_pdfs:
pdf1name = filename_part1 + '{0[0]}{0[1]:02d}{0[2]:02d}'.format(d) + filename_part2_pdf1
pdf2name = filename_part1 + '{0[0]}{0[1]:02d}{0[2]:02d}'.format(d) + filename_part2_pdf2
df.single_file(url_dir, pdf1name, save_to, overwrite)
df.single_file(url_dir, pdf2name, save_to, overwrite)
df.how_far(index,all_dates,0.01)
def dmsp_nrt(start_date, end_date, save_to, overwrite=False):
""" Downloads NSIDC 25 km preliminary Near Real-Time (NRT) sea ice concentration product.
NSIDC's v1 daily SSMIS product on 25 km grid in netCDF-4 (HDF5) format. Product derived from 3 channels. Data files
contain the following:
- NRT CDR (Climate Data Record) product based on DMSP SSMIS currently from 2016-01-01 to present, using purely
automated application and merging of the NASA Team (NT) and Bootstrap (BT) algorithms.
(The NRT product does not contain Goddard Merged fields.)
Information: https://nsidc.org/data/g10016
Example file path: ftp://sidads.colorado.edu/pub/DATASETS/NOAA/G10016/south/daily/2016/seaice_conc_daily_icdr_sh_f17_20160101_v01r00.nc
Expert guidance on the related CDR record:
https://climatedataguide.ucar.edu/climate-data/sea-ice-concentration-noaansidc-climate-data-record
Required data acknowledgement given in full under 'Citing This Data' here: http://dx.doi.org/10.7265/N5FF3QJ6.
"""
ftp_root = 'sidads.colorado.edu'
url_root = 'pub/DATASETS/NOAA/G10016/south/daily/'
filename_part1 = 'seaice_conc_daily_icdr_sh_'
filename_part2 = '_v01r00.nc'
sat_abbrevs = ['f17','f18']
sat_start_dates = [(2016,1,1),(2016,4,1)]
sat_end_dates = [(2016,3,30),tt.now()]
all_dates = tt.dates_in_range(start_date, end_date)
for index, d in enumerate(all_dates):
if not tt.is_time_in_range(sat_start_dates[0],sat_end_dates[-1],d):
raise ValueError('Given date range exceeds hard-coded satellite date ranges.')
for sat in range(0,len(sat_abbrevs)):
if tt.is_time_in_range(sat_start_dates[sat], sat_end_dates[sat], d):
sat_abbrev = sat_abbrevs[sat]
filename = filename_part1 + sat_abbrev + '_' + '{0[0]}{0[1]:02d}{0[2]:02d}'.format(d) + filename_part2
starting_dir = os.getcwd()
try:
if starting_dir is not save_to:
os.chdir(save_to)
if filename not in os.listdir() or (filename in os.listdir() and overwrite is True):
df.single_file(url_root + '{0[0]}/'.format(d), filename, save_to, ftp_root=ftp_root, overwrite=False, auth=None)
finally:
os.chdir(starting_dir)
df.how_far(index, all_dates, 0.1)
def dmsp_v3(start_date, end_date, save_to, overwrite=False):
""" Downloads NSIDC 25 km sea ice concentration product.
NSIDC's v3 r1 daily SMMR + SSM/I + SSMIS product on 25 km grid in netCDF-4 (HDF5) format. Product derived from
3 channels. Data files contain the following:
- CDR (Climate Data Record) product based on DMSP SSM/I and SSMIS from 1987-07-09 onwards, using purely automated
application and merging of the NASA Team (NT) and Bootstrap (BT) algorithms.
- GSFC (NASA Goddard Space Flight Center) merged product based on the above, plus Nimbus-7 SMMR from 1978-11-01
onwards (every other day). Some manual quality control, interpolation, and editing has been conducted (but without
provenance), meaning that GSFC is a higher-quality but less uniform record than CDR. In any case, CDR excludes
the SMMR period (as of now) due to "data quality issues of the input brightness temperatures" but also
because "full provenance and documentation of the SMMR brightness temperatures and processing methodology
... cannot be assured."
Information: https://nsidc.org/data/g02202
Example file path: ftp://sidads.colorado.edu/pub/DATASETS/NOAA/G02202_V3/south/daily/1978/seaice_conc_daily_sh_n07_19781101_v03r01.nc
Expert guidance on these records:
https://climatedataguide.ucar.edu/climate-data/sea-ice-concentration-noaansidc-climate-data-record
Required data acknowledgement given in full under 'Citing This Data' here: http://dx.doi.org/10.7265/N59P2ZTG.
"""
ftp_root = 'sidads.colorado.edu'
url_root = 'pub/DATASETS/NOAA/G02202_V3/south/daily/'
filename_part1 = 'seaice_conc_daily_sh_'
filename_part2 = '_v03r01.nc'
sat_abbrevs = ['n07','f08','f11','f13','f17']
sat_start_dates = [(1978,11,1),(1987,7,9),(1991,12,3),(1995,10,1),(2008,1,1)]
sat_end_dates = [(1987,7,8),(1991,12,2),(1995,9,30),(2007,12,31),(2017,12,31)]
all_dates = tt.dates_in_range(start_date, end_date)
starting_dir = os.getcwd()
if starting_dir is not save_to:
os.chdir(save_to)
dir_contents = os.listdir()
for index, d in enumerate(all_dates):
print(d) ### FOR TESTING
if not tt.is_time_in_range(sat_start_dates[0],sat_end_dates[-1],d):
raise ValueError('Given date range exceeds hard-coded satellite date ranges.')
for sat in range(0,len(sat_abbrevs)):
if tt.is_time_in_range(sat_start_dates[sat], sat_end_dates[sat], d):
sat_abbrev = sat_abbrevs[sat]
filename = filename_part1 + sat_abbrev + '_' + '{0[0]}{0[1]:02d}{0[2]:02d}'.format(d) + filename_part2
if filename not in dir_contents or (filename in dir_contents and overwrite is True):
# if tt.is_time_in_range((1986,9,25),(1987,1,1),d): # misplaced files -- but fixed now
# df.single_file(url_root + '1987/',filename,save_to,ftp_root=ftp_root,
# overwrite=False,auth=None)
df.single_file(url_root + '{0[0]}/'.format(d), filename, save_to, ftp_root=ftp_root,
overwrite=False, auth=None)
df.how_far(index, all_dates, 0.1)
os.chdir(starting_dir)
def nimbus5(start_date, end_date, save_to, convert=False, conversion_script_dir=None):
""" Downloads Nimbus-5 sea ice concentration product.
Unzips files first. Converts data from HDF4 to HDF5 format by calling df.convert_to_hdf5()
if 'convert' is True, then deletes original HDF4 file.
NSIDC's v1 Nimbus-5 daily ESMR product on 25 km grid in compressed HDF4 format. Product based on
a single channel (19 GHz), which is less accurate than SMMR and SSM/I products from after 1976.
Information: http://nsidc.org/data/NSIDC-0009
IMPORTANT NOTE: Downloading batch data via HTTPS requires login to EarthData. To do this, one must create an
account: https://urs.earthdata.nasa.gov/users/new
... and then create a .netrc file via the command line using the following process:
cd $HOME
rm -f .netrc
touch .netrc
echo 'machine urs.earthdata.nasa.gov login [USERNAME] password [PASSWORD]' >> .netrc
note: replace with your username and password
chmod 0600 .netrc
Example file path: https://daacdata.apps.nsidc.org/pub/DATASETS/nsidc0009_esmr_seaice/south/daily00/ESMR-1972346.tse.00.gz
Required data acknowledgement given in full here: http://dx.doi.org/10.5067/W2PKTWMTY0TP.
"""
url_dir = 'https://daacdata.apps.nsidc.org/pub/DATASETS/nsidc0009_esmr_seaice/south/daily00/'
filename_part1 = 'ESMR-'
filename_part2 = '.tse.00.gz'
filename_part2_uncompressed = '.tse.00.hdf'
filename_part2_uncompressed_converted = '.tse.00.h5'
all_dates = tt.dates_in_range(start_date, end_date)
for index, d in enumerate(all_dates):
date_365 = tt.convert_date_to_365(d)
filename = filename_part1 + '{0[0]}{1:03d}'.format(d,date_365) + filename_part2
intermediate_filename = filename_part1 + '{0[0]}{1:03d}'.format(d, date_365) + filename_part2_uncompressed
new_filename = filename_part1 + '{0[0]}{1:03d}'.format(d,date_365) + filename_part2_uncompressed_converted
starting_dir = os.getcwd()
try:
if starting_dir is not dir:
os.chdir(save_to)
if new_filename not in os.listdir():
df.single_file(url_dir, filename, save_to, overwrite=False, auth=None)
df.un_gzip(save_to, filename, append_extension='.hdf', remove_compressed_file=True)
df.convert_to_hdf5(conversion_script_dir, intermediate_filename, save_to, save_to, overwrite=False,delete_original=True)
finally:
os.chdir(starting_dir)
df.how_far(index, all_dates, 0.1)
def ecmwf(date_range='1979-01-01/to/2017-08-31',area='-40/-90/-90/90',type='an',step='0',time='00/06/12/18',
params=['msl','t2m','skt'],output_filename=None):
""" Submits MARS request to retrieve ERA-Interim reanalysis fields as netCDF file.
Arguments:
date_range: for daily fields, format as, e.g., '1979-01-01/to/2017-08-31'
for monthly means of daily means, use [datetime(start_yr,start_mo,1),datetime(end_yr,end_mo,1)]
area: subsetting area, format '-40/-90/-90/90' (N/W/S/E)
type: 'an' for analysis or 'fc' for forecast
step: '0' for analysis only, '6/12' or '3/6/9/12' for 6-hourly or 3-hourly forecasts from 0000 and 1200 UTC
or None for monthly means (regardless, it will be ignored)
time: analysis times, e.g. '00/06/12/18' for all analyses, or '00/12' if retrieving forecasts only
or None for monthly means (regardless, it will be ignored)
params: parameter abbreviations, to be translated into GRIB and Table 2 codes - see below for those available
note: to find new codes, use parameter database: http://apps.ecmwf.int/codes/grib/param-db/
or use web interface and check "View the MARS request"
output_filename: desired path + filename including '.nc' extension, to save locally
or None to save to temporary storage; download from: http://apps.ecmwf.int/webmars/joblist/
note: if not downloading locally, cancel call using Ctrl-C after "Request is queued" appears
(otherwise file will be deleted almost instantly from ECMWF servers)
None: cancelling call (Ctrl-C) after "Request is queued" appears is fine. It will prevent local download, though.
Note: private login key required. See documentation for instructions on creating local login key.
Note: file size limit is probably 20 GB. Check here: https://software.ecmwf.int/wiki/display/WEBAPI/News+feed
Limited web API access:
http://apps.ecmwf.int/datasets/data/interim-full-daily/levtype=sfc/
http://apps.ecmwf.int/datasets/data/interim-full-moda/levtype=sfc/
Documentation:
https://software.ecmwf.int/wiki/display/WEBAPI/Access+ECMWF+Public+Datasets
https://software.ecmwf.int/wiki/display/WEBAPI/Python+ERA-interim+examples
https://software.ecmwf.int/wiki/display/UDOC/MARS+user+documentation
https://software.ecmwf.int/wiki/display/UDOC/MARS+keywords
http://apps.ecmwf.int/codes/grib/param-db
Reference: Dee et al. 2011
"""
param_codes = ''
for param_idx, param in enumerate(params):
# analysis parameters
if param == 't2m': param_codes += '167.128' # 2 metre temperature (K)
elif param == 'sst': param_codes += '34.128' # Sea surface temperature (K)
elif param == 'skt': param_codes += '235.128' # Skin temperature (K)
elif param == 'd2m': param_codes += '168.128' # 2 metre dewpoint temperature (K)
elif param == 'msl': param_codes += '151.128' # Mean sea level pressure (Pa)
elif param == 'sp': param_codes += '134.128' # Surface pressure (Pa)
elif param == 'u10': param_codes += '165.128' # 10 metre U wind component (m/s)
elif param == 'v10': param_codes += '166.128' # 10 metre V wind component (m/s)
elif param == 'si10': param_codes += '207.128' # 10 metre wind speed (m/s) [NOTE: in monthly means only]
# forecast parameters (* indicates accumulated field; note downward fluxes are positive)
elif param == 'sf': param_codes += '144.128' # Snowfall (m of water equivalent) *
elif param == 'sshf': param_codes += '146.128' # Surface sensible heat flux (J/m^2) *
elif param == 'slhf': param_codes += '147.128' # Surface latent heat flux (J/m^2) *
elif param == 'ssr': param_codes += '176.128' # Surface net solar radiation [shortwave] (J/m^2) *
elif param == 'str': param_codes += '177.128' # Surface net thermal radiation [longwave] (J/m^2) *
elif param == 'strd': param_codes += '175.128' # Surface thermal radiation [longwave] downwards (J/m^2) *
elif param == 'e': param_codes += '182.128' # Evaporation (m of water equivalent) *
elif param == 'tp': param_codes += '228.128' # Total precipitation (m) *
elif param == 'iews': param_codes += '229.128' # Instantaneous eastward turbulent surface stress (N/m^2)
elif param == 'inss': param_codes += '230.128' # Instantaneous northward turbulent surface stress (N/m^2)
if param_idx < len(params)-1: param_codes += '/'
retrieve_dict = {
"class":"ei",
"dataset":"interim",
"expver":"1",
"format":"netcdf",
"grid":"0.75/0.75",
"levtype":"sfc",
"param":param_codes,
"type":type,
'area':area,
"target":output_filename,
"use":'frequent',
}
# monthly means of daily means
if len(date_range) == 2:
retrieve_dict['stream'] = 'moda'
final_date_range = ''
working_month = date_range[0]
while working_month < date_range[1]:
final_date_range += working_month.strftime('%Y%m%d')
final_date_range += '/'
working_month += relativedelta(months=+1)
final_date_range += date_range[1].strftime('%Y%m%d')
retrieve_dict['date'] = final_date_range
# daily fields
else:
retrieve_dict['stream'] = 'oper'
retrieve_dict['date'] = date_range
retrieve_dict['step'] = step
retrieve_dict['time'] = time
server = ECMWFDataServer()
server.retrieve(retrieve_dict)
def isd_station(station_number, start_year, end_year, save_to, overwrite=True):
""" Download sub-daily meteorological station data from NOAA NCEI Integrated Surface Database (ISD) ISD-Lite
space-delimited annual data files.
Args:
station_number: six-digit integer station number, likely five-digit WMOID with trailing zero appended
start_year: first year of met data
end_year: last year of met data
save_to: directory path
overwrite: overwrite existing files?
Data provenance and information:
ISD homepage: https://www.ncdc.noaa.gov/isd
root data directory: ftp://ftp.ncdc.noaa.gov/pub/data/noaa/isd-lite
info on file format: ftp://ftp.ncdc.noaa.gov/pub/data/noaa/isd-lite/isd-lite-format.pdf
brief technical document: ftp://ftp.ncdc.noaa.gov/pub/data/noaa/isd-lite/isd-lite-technical-document.pdf
station numbers can be found using: https://www.ncdc.noaa.gov/homr/#ncdcstnid=30103999&tab=MSHR
Antarctic station locations can be found at: http://nsidc.org/data/docs/daac/nsidc0190_surface_obs/spatial.html
Citation (assumed, not given):
Smith et al. (2011), BAMS, "The Integrated Surface Database: Recent developments and partnerships."
doi:10.1175/2011BAMS3015.1
Specific Antarctic station notes:
WMOid 89512 (station number 895120) - Novolazarevskaja Station (70.7678°S, 11.8317°E) - 1973-2019
http://www.aari.aq/stations/lazarev/lazarev_en.html
https://www.ncdc.noaa.gov/homr/#ncdcstnid=30103999&tab=MSHR
WMOid 89001 (station number 890010) - SANAE SAF-Base (70.3°S, 2.35°W) - 1973-1994
WMOid 89004 (station number 890040) - SANAE AWS (71.7°S, 2.8°W) - 1997-2019
WMOid 89002 (station number 890020) - Neumayer Station (70.667°S, 8.25°W) - 1981-2019
WMOid 89504 (station number 895040) - Troll in Antarktis (72.017°S, 2.383°W) - 1994-2019
WMOid 89514 (station number 895140) - Maitri (70.767°S, 11.75°E) - 1990-2019
WMOid 89524 (station number 895240) - Asuka Japan-Base (71.533°S, 24.133°E) - 1987-1997
WMOid 89003 (station number 890030) - Halvfarryggen (71.15°S, 6.683°W) - 2009-2017?
"""
for year in range(start_year,end_year+1):
df.single_file('pub/data/noaa/isd-lite/{0}/'.format(year),'{0}-99999-{1}.gz'.format(station_number,year),
save_to,ftp_root='ftp.ncdc.noaa.gov',overwrite=overwrite,verbose=True)
df.un_gzip(save_to,'{0}-99999-{1}.gz'.format(station_number,year),
remove_compressed_file=True,overwrite=overwrite)
| python |
from robot.api.parsing import (
Token,
ModelTransformer,
SectionHeader,
EmptyLine
)
from robot.parsing.model.statements import Statement
import click
class MergeAndOrderSections(ModelTransformer):
"""
Merge duplicated sections and order them.
Default order is: Comments > Settings > Variables > Test Cases > Keywords.
You can change sorting order by configuring ``order`` parameter with comma separated list of section names (without
spaces)::
robotidy --transform MergeAndOrderSections:order=settings,keywords,variables,testcases,comments
Because merging and changing the order of sections can shuffle your empty lines it's greatly advised to always
run ``NormalizeNewLines`` transformer after this one.
If both ``*** Test Cases ***`` and ``*** Tasks ***`` are defined in one file they will be merged into one (header
name will be taken from first encountered section).
Any data before first section is treated as comment in Robot Framework. This transformer add ``*** Comments ***``
section for such lines::
i am comment
# robocop: disable
*** Settings ***
To::
*** Comments ***
i am comment
# robocop: disable
*** Settings ***
You can disable this behaviour by setting ``create_comment_section`` to False.
"""
def __init__(self, order: str = '', create_comment_section: bool = True):
self.sections_order = self.parse_order(order)
self.create_comment_section = create_comment_section
@staticmethod
def parse_order(order):
default_order = (
Token.COMMENT_HEADER,
Token.SETTING_HEADER,
Token.VARIABLE_HEADER,
Token.TESTCASE_HEADER,
Token.KEYWORD_HEADER
)
if not order:
return default_order
splitted = order.lower().split(',')
map = {
'comments': Token.COMMENT_HEADER,
'comment': Token.COMMENT_HEADER,
'settings': Token.SETTING_HEADER,
'setting': Token.SETTING_HEADER,
'variables': Token.VARIABLE_HEADER,
'variable': Token.VARIABLE_HEADER,
'testcases': Token.TESTCASE_HEADER,
'testcase': Token.TESTCASE_HEADER,
'keywords': Token.KEYWORD_HEADER,
'keyword': Token.KEYWORD_HEADER
}
parsed_order = []
for split in splitted:
parsed_order.append(map.get(split, None))
if any(header not in parsed_order for header in default_order) and len(parsed_order) != len(default_order):
raise click.BadOptionUsage(
option_name='transform',
message=f"Invalid configurable value: '{order}' for order for MergeAndOrderSections transformer."
f" Custom order should be provided in comma separated list with all section names:\n"
f"order=comments,settings,variables,testcases,variables"
)
return parsed_order
def visit_File(self, node): # noqa
if len(node.sections) < 2:
return node
sections = {}
last = len(node.sections) - 1
for index, section in enumerate(node.sections):
if index == last:
section = self.from_last_section(section)
section_type = self.get_section_type(section)
if section_type not in sections:
sections[section_type] = section
else:
if len(section.header.data_tokens) > 1:
print(f'{node.source}: Merged duplicated section has section header comments. '
'Only header comments from first section header of the same type are preserved.')
sections[section_type].body += section.body
node.sections = [sections[order] for order in self.sections_order if order in sections]
return node
@staticmethod
def from_last_section(node):
""" Last node use different logic for new line marker. It is not possible to preserve all empty lines but
we need at least ensure that following code::
*** Test Case ***
*** Variables ***
Will not become::
*** Variables ****** Test Case ***
"""
if node.body:
last_statement = node.body[-1]
new_line = [Token(Token.EOL, '\n')]
if hasattr(last_statement, 'body'):
if not last_statement.body:
node.body[-1].body.append(EmptyLine.from_params(eol='\n'))
else:
last_statement = last_statement.body[-1]
if hasattr(last_statement, 'end'):
if last_statement.end:
node.body[-1].body[-1].end = Statement.from_tokens(
list(last_statement.end.tokens[:-1]) + new_line
)
else:
node.body[-1].body[-1] = Statement.from_tokens(list(last_statement.tokens[:-1]) + new_line)
else:
node.body[-1] = Statement.from_tokens(list(last_statement.tokens[:-1]) + new_line)
else:
last_token = node.header.tokens[-1]
if last_token.type == Token.EOL:
node.header = Statement.from_tokens(list(node.header.tokens[:-1]) + [Token(Token.EOL, '\n')])
return node
def get_section_type(self, section):
header_tokens = (Token.COMMENT_HEADER, Token.TESTCASE_HEADER, Token.SETTING_HEADER, Token.KEYWORD_HEADER,
Token.VARIABLE_HEADER)
if section.header:
name_token = section.header.get_token(*header_tokens)
section_type = name_token.type
else:
section_type = Token.COMMENT_HEADER
if self.create_comment_section:
section.header = SectionHeader.from_params(section_type, '*** Comments ***')
return section_type
| python |
from distutils.core import setup
from Cython.Build import cythonize
setup(ext_modules=cythonize('harmony.pyx'))
| python |
from logbook import Logger, StreamHandler, TimedRotatingFileHandler
from logbook.more import ColorizedStderrHandler
import logbook
import socket
import uuid
import sys
import fire
import os
def logger(name='LOGBOOK', log_path='', file_log=False):
logbook.set_datetime_format('local')
ColorizedStderrHandler(bubble=True).push_application()
log_dir = os.path.join('log') if not log_path else log_path
if not os.path.exists(log_dir):
os.makedirs(log_dir)
if file_log:
TimedRotatingFileHandler(os.path.join(log_dir, '%s.log' % name.lower()), date_format='%Y-%m-%d', bubble=True).push_application()
return Logger(name)
def bytes2human(n):
symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y', 'B', 'N', 'D')
prefix = {}
for i, s in enumerate(symbols):
prefix[s] = 1 << (i+1) * 10
for s in reversed(symbols):
if n >= prefix[s]:
value = float(n) / prefix[s]
return '%.2f %sB' % (value, s)
def filesize(path):
assert os.path.isdir(path)
total_size = 0
for root, dirs, files in os.walk(path):
for f in files:
fpath = os.path.join(root, f)
if os.path.islink(fpath):
continue
total_size += os.path.getsize(fpath)
return bytes2human(total_size)
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *arg):
"""Indicate whether or not to enter a case suite"""
if self.fall or not arg:
return True
elif self.value in args:
self.fall = True
return True
else:
return False
def HandleJson(object):
@classmethod
def __paths(cls, data, path=''):
if isinstance(data, dict):
for k, v in data.items():
tmp = path + "['%s']" % k
yield (tmp, v)
yield from cls.__paths(v, tmp)
if isinstance(data, path=''):
for k, v in enumerate(data):
tmp = path + '[%d]' % k
yield (tmp, v)
yield from cls.__paths(v, tmp)
@classmethod
def find_key_path(cls, data, key):
result = []
for path, value in cls.__path(data):
if path.endswith("['%s']" % key):
result.append(path)
return result
@classmethod
def find_value_path(cls, data, key):
result = []
for path, value in cls.__paths(data):
if isinstance(value, (str, int, bool, float)):
if value == key:
result.append(path)
return result
@classmethod
def get_key_node(cls, data, key):
for path, value in cls.__paths(data):
if path.endswith("['%s']" % key):
return value
def get_ip_hostname(ip='8.8.8.8', port=80):
h = socket.gethostname()
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((ip, port))
ip = s.getsockname()[0]
finally:
s.close()
return h, ip
def gen_uuid(func=1, name='python', namespace='url'):
namespaces = {
'dns': uuid.NAMESPACE_DNS,
'oid': uuid.NAMESPACE_OID,
'url': uuid.NAMESPACE_URL,
'x500': uuid.NAMESPACE_X500
}
name_space = namespaces.get(namespace, None)
assert name_space is not None, 'namespace support values: dns, oid, url & x500.'
assert func in [1, 3, 4, 5] , 'func support values: 1, 3, 4, 5.'
id1 = uuid.uuid1().hex
id3 = uuid.uuid3(name_space, name).hex
id4 = uuid.uuid4().hex
id5 = uuid.uuid5(name_space, name).hex
return eval('id%d' % func)
if __name__ == '__main__':
fire.Fire()
| python |
import test_reader as FR
if __name__ == "__main__":
extra = FR.Pair()
extra.first = '1'
extra.second = '2'
buf = extra.to_fbs()
extra1 = FR.Pair(buf)
acc = FR.Account()
acc.langs.append(FR.test_fbs.Language.Language.CHT)
acc.langs.append(FR.test_fbs.Language.Language.CHS)
acc.extras.append(extra1)
acc.test_int = 12
acc.test_str = 'erer'
buf = acc.to_fbs()
acc1 = FR.Account(buf)
pass
| python |
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Formatting strings for Artifact Registry commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
BUILD_GIT_SHA_FORMAT = ("BUILD_DETAILS.buildDetails.provenance."
"sourceProvenance.context.cloudRepo.revisionId"
".notnull().list().slice(:8).join(''):optional:label"
"=GIT_SHA")
BUILD_FORMAT = ("BUILD_DETAILS.buildDetails.provenance.id.notnull().list()"
":optional:label=BUILD")
VULNERABILITY_FORMAT = "vuln_counts.list():optional:label=VULNERABILITIES"
IMAGE_BASIS_FORMAT = ("IMAGE_BASIS.derivedImage.sort(distance).map()"
".extract(baseResourceUrl).slice(:1).map().list().list()"
".split('//').slice(1:).list().split('@').slice(:1)"
".list():optional:label=FROM")
DISCOVERY_FORMAT = ("DISCOVERY[0].discovered.analysisStatus:optional:label"
"=VULNERABILITY_SCAN_STATUS")
CONTAINER_ANALYSIS_METADATA_FORMAT = """
{},
{},
{},
{},
{}
""".format(BUILD_GIT_SHA_FORMAT, VULNERABILITY_FORMAT, IMAGE_BASIS_FORMAT,
BUILD_FORMAT, DISCOVERY_FORMAT)
| python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2019, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from argparse import ArgumentParser
from base64 import b64decode
from os import path, remove
from os.path import isdir
from xml.etree import ElementTree
REF = u"""\
┌──────────────────────────────────────────────────────────────────────────────────────────┐
│ EUMAK European Keyboard Layout │
└──────────────────────────────────────────────────────────────────────────────────────────┘
┌─────┐┌───────────────────────────────────┐┌──────────────────────────────────────────────┐
│ 2 4 ││ 2 = Shift │ 4 = Shift+AltGr ││ [Mod]+[~],[X] -> àèìǹòùẁỳǜ ὰὲὴὶὸὺὼ │
│ 1 3 ││ 1 = Normal │ 3 = AltGr ││ [Mod]+[1],[X] -> áćéǵíḱĺḿńóṕŕśúẃýźḯǘ άέήίόύώ │
└─────┘└───────────────────────────────────┘│ [Mod]+[2],[X] -> ǎčďěǧȟǐǰǩľňǒřšťǔžǚ │
┌──────────────────────────────────────────┐│ [Mod]+[3],[X] -> âĉêĝĥîĵôŝûŵŷẑ │
│ [Mod]+[X] -> áćéǵíḱĺḿńóṕŕśúẃýźőű άέήίόύώ ││ [Mod]+[4],[X] -> āēḡīōūȳǟȫǖ │
│ [Mod]+[6] -> Toggle Latin/Greek ││ [Mod]+[5],[X] -> ȧḃċḋėḟġḣıȷŀṁṅȯṗṙṡṫẇẋẏż │
└──────────────────────────────────────────┘└──────────────────────────────────────────────┘
"""
LATIN = u"""\
┌─────┬─────┬─────┬─────┬─────┬─────┬─────┬─────┬─────┬─────┬─────┬─────┬─────┬────────────┐
│ @ ° │ ! ¡ │ " ½ │ £ # │ $ € │ % § │ & ¶ │ | † │ ( « │ ) » │ = ≠ │ / \ │ * · │ Backspace │
│ ~ ` │ 1 ´ │ 2 ˇ │ 3 ^ │ 4 ¯ │ 5 ˙ │ 6 µ │ 7 { │ 8 [ │ 9 ] │ 0 } │ - ÷ │ + × │ │
├─────┴─┬───┴─┬───┴─┬───┴─┬───┴─┬───┴─┬───┴─┬───┴─┬───┴─┬───┴─┬───┴─┬───┴─┬───┴─┬──────────┤
│ │ Q Ă │ W Ł │ E Ę │ R Ŧ │ T Ț │ Y Ů │ U Ų │ I Į │ O Ø │ P Õ │ Ü Å │ Ï Ÿ │ Enter │
│ Tab │ q ă │ w ł │ e ę │ r ŧ │ t ț │ y ů │ u ų │ i į │ o ø │ p õ │ ü å │ ï ÿ │ │
├───────┴─┬───┴─┬───┴─┬───┴─┬───┴─┬───┴─┬───┴─┬───┴─┬───┴─┬───┴─┬───┴─┬───┴─┬───┴─┐ │
│ │ A Ą │ S Ș │ D Đ │ F Þ │ G Ģ │ H Ħ │ J Ñ │ K Ķ │ L Ļ │ Ö Œ │ Ä Æ │ Ë Ẅ │ │
│ Caps │ a ą │ s ș │ d đ │ f þ │ g ģ │ h ħ │ j ñ │ k ķ │ l ļ │ ö œ │ ä æ │ ë ẅ │ │
├───────┬─┴───┬─┴───┬─┴───┬─┴───┬─┴───┬─┴───┬─┴───┬─┴───┬─┴───┬─┴───┬─┴───┬─┴─────┴────────┤
│ │ │ Z ẞ │ X Ŭ │ C Ç │ V Ð │ B Ã │ N Ņ │ M Ŋ │ ; ≤ │ : ≥ │ ? ¿ │ │
│ Shift │ Mod │ z ß │ x ŭ │ c ç │ v ð │ b ã │ n ņ │ m ŋ │ , < │ . > │ ' _ │ Shift │
├───────┼─────┴─┬───┴───┬─┴─────┴─────┴─────┴─────┴─────┴──┬──┴────┬┴─────┴┬───────┬───────┤
│ │ │ │ │ │ │ │ │
│ Ctrl │ Meta │ Alt │ Space │ AltGr │ Meta │ Menu │ Ctrl │
└───────┴───────┴───────┴──────────────────────────────────┴───────┴───────┴───────┴───────┘
"""
GREEK = u"""\
┌─────┬─────┬─────┬─────┬─────┬─────┬─────┬─────┬─────┬─────┬─────┬─────┬─────┬────────────┐
│ @ ° │ ! ¡ │ " ½ │ £ # │ $ € │ % § │ & ¶ │ | † │ ( « │ ) » │ = ≠ │ / \ │ * · │ Backspace │
│ ~ ` │ 1 ´ │ 2 ˇ │ 3 ^ │ 4 ¯ │ 5 ˙ │ 6 µ │ 7 { │ 8 [ │ 9 ] │ 0 } │ - ÷ │ + × │ │
├─────┴─┬───┴─┬───┴─┬───┴─┬───┴─┬───┴─┬───┴─┬───┴─┬───┴─┬───┴─┬───┴─┬───┴─┬───┴─┬──────────┤
│ │ : │ │ Ε │ Ρ │ Τ │ Υ │ Θ │ Ι │ Ο │ Π │ Ϋ │ Ϊ │ Enter │
│ Tab │ ; │ ς │ ε │ ρ │ τ │ υ │ θ │ ι │ ο │ π │ ϋ │ ϊ │ │
├───────┴─┬───┴─┬───┴─┬───┴─┬───┴─┬───┴─┬───┴─┬───┴─┬───┴─┬───┴─┬───┴─┬───┴─┬───┴─┐ │
│ │ Α │ Σ │ Δ │ Φ │ Γ │ Η │ Ξ │ Κ │ Λ │ │ │ │ │
│ Caps │ α │ σ │ δ │ φ │ γ │ η │ ξ │ κ │ λ │ │ │ │ │
├───────┬─┴───┬─┴───┬─┴───┬─┴───┬─┴───┬─┴───┬─┴───┬─┴───┬─┴───┬─┴───┬─┴───┬─┴─────┴────────┤
│ │ │ Ζ │ Χ │ Ψ │ Ω │ Β │ Ν │ Μ │ ; « │ : » │ ? ¿ │ │
│ Shift │ Mod │ ζ │ χ │ ψ │ ω │ β │ ν │ μ │ , < │ . > │ ' _ │ Shift │
├───────┼─────┴─┬───┴───┬─┴─────┴─────┴─────┴─────┴─────┴──┬──┴────┬┴─────┴┬───────┬───────┤
│ │ │ │ │ │ │ │ │
│ Ctrl │ Meta │ Alt │ Space │ AltGr │ Meta │ Menu │ Ctrl │
└───────┴───────┴───────┴──────────────────────────────────┴───────┴───────┴───────┴───────┘
"""
LANGUAGES = [
"eng",
"deu",
"fra",
"ita",
"spa",
"pol",
"ron",
"nld",
"swe",
]
class XKB(object):
xkb = "/usr/share/X11/xkb"
rules = path.join(xkb, "rules", "evdev.xml")
symbols = path.join(xkb, "symbols", "eumak")
def __init__(self):
if not isdir(self.xkb):
raise OSError("XKB installation not found at %s" % self.xkb)
self._tree = ElementTree.parse(self.rules)
self._root = self._tree.getroot()
self._layout_list = self._root.find("./layoutList")
def install(self):
self._install_symbols()
self._uninstall_layout()
self._install_layout()
self._tree.write(self.rules)
def uninstall(self):
self._uninstall_symbols()
self._uninstall_layout()
self._tree.write(self.rules)
def _install_symbols(self):
with open(self.symbols, "w") as f:
f.write(b64decode(DATA))
def _uninstall_symbols(self):
if path.isfile(self.symbols):
remove(self.symbols)
def _install_layout(self):
layout = ElementTree.SubElement(self._layout_list, "layout")
config_item = ElementTree.SubElement(layout, "configItem")
ElementTree.SubElement(config_item, "name").text = "eumak"
ElementTree.SubElement(config_item, "shortDescription").text = "eumak"
ElementTree.SubElement(config_item, "description").text = "European (Eumak)"
language_list = ElementTree.SubElement(config_item, "languageList")
for lang in LANGUAGES:
ElementTree.SubElement(language_list, "iso639Id").text = lang
ElementTree.SubElement(layout, "variantList")
def _uninstall_layout(self):
to_delete = []
for layout in self._layout_list.iterfind("layout"):
name = layout.find("configItem/name")
if name.text == "eumak":
to_delete.append(layout)
for layout in to_delete:
self._layout_list.remove(layout)
def main():
parser = ArgumentParser(description="Eumak keyboard layout installer")
parser.add_argument("-i", "--install", action="store_true")
parser.add_argument("-u", "--uninstall", action="store_true")
args = parser.parse_args()
xkb = XKB()
if args.install:
xkb.install()
elif args.uninstall:
xkb.uninstall()
else:
print(REF, end="")
print(LATIN, end="")
print(GREEK, end="")
DATA = """\
Ly8gRXVtYWsgRXVyb3BlYW4gS2V5Ym9hcmQgTGF5b3V0CgovLyBDb3B5cmlnaHQgMjAxOSwgTmln
ZWwgU21hbGwKLy8KLy8gTGljZW5zZWQgdW5kZXIgdGhlIEFwYWNoZSBMaWNlbnNlLCBWZXJzaW9u
IDIuMCAodGhlICJMaWNlbnNlIik7Ci8vIHlvdSBtYXkgbm90IHVzZSB0aGlzIGZpbGUgZXhjZXB0
IGluIGNvbXBsaWFuY2Ugd2l0aCB0aGUgTGljZW5zZS4KLy8gWW91IG1heSBvYnRhaW4gYSBjb3B5
IG9mIHRoZSBMaWNlbnNlIGF0Ci8vCi8vICAgICBodHRwOi8vd3d3LmFwYWNoZS5vcmcvbGljZW5z
ZXMvTElDRU5TRS0yLjAKLy8KLy8gVW5sZXNzIHJlcXVpcmVkIGJ5IGFwcGxpY2FibGUgbGF3IG9y
IGFncmVlZCB0byBpbiB3cml0aW5nLCBzb2Z0d2FyZQovLyBkaXN0cmlidXRlZCB1bmRlciB0aGUg
TGljZW5zZSBpcyBkaXN0cmlidXRlZCBvbiBhbiAiQVMgSVMiIEJBU0lTLAovLyBXSVRIT1VUIFdB
UlJBTlRJRVMgT1IgQ09ORElUSU9OUyBPRiBBTlkgS0lORCwgZWl0aGVyIGV4cHJlc3Mgb3IgaW1w
bGllZC4KLy8gU2VlIHRoZSBMaWNlbnNlIGZvciB0aGUgc3BlY2lmaWMgbGFuZ3VhZ2UgZ292ZXJu
aW5nIHBlcm1pc3Npb25zIGFuZAovLyBsaW1pdGF0aW9ucyB1bmRlciB0aGUgTGljZW5zZS4KCgov
LyDilIzilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDi
lIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDi
lIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDi
lIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDi
lIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilJAKLy8g4pSC
IEVVTUFLIEV1cm9wZWFuIEtleWJvYXJkIExheW91dCAgICAgICAgICAgICAgICAgICAgICAgICAg
ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg4pSCCi8vIOKUlOKUgOKUgOKUgOKUgOKU
gOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKU
gOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKU
gOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKU
gOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKU
gOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUmAovLyDilIzilIDilIDilIDilIDilIDilJDi
lIzilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDi
lIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilJDilIzi
lIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDi
lIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDi
lIDilIDilIDilIDilIDilIDilIDilIDilJAKLy8g4pSCIDIgNCDilILilIIgIDIgPSBTaGlmdCAg
ICDilIIgIDQgPSBTaGlmdCtBbHRHciAg4pSC4pSCIFtNb2RdK1t+XSxbWF0gLT4gw6DDqMOsx7nD
ssO54bqB4buzx5wg4b2w4b2y4b204b224b244b264b28ICAgICAgICAgICDilIIKLy8g4pSCIDEg
MyDilILilIIgIDEgPSBOb3JtYWwgICDilIIgIDMgPSBBbHRHciAgICAgICAg4pSC4pSCIFtNb2Rd
K1sxXSxbWF0gLT4gw6HEh8Opx7XDreG4scS64bi/xYTDs+G5lcWVxZvDuuG6g8O9xbrhuK/HmCDO
rM6tzq7Or8+Mz43PjiDilIIKLy8g4pSU4pSA4pSA4pSA4pSA4pSA4pSY4pSU4pSA4pSA4pSA4pSA
4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSA
4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSY4pSCIFtNb2RdK1syXSxbWF0g
LT4gx47EjcSPxJvHp8ifx5DHsMepxL7FiMeSxZnFocWlx5TFvseaICAgICAgICAgIOKUggovLyDi
lIzilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDi
lIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDi
lIDilIDilIDilIDilIDilJDilIIgW01vZF0rWzNdLFtYXSAtPiDDosSJw6rEncSlw67EtcO0xZ3D
u8W1xbfhupEgICAgICAgICAgICAgICDilIIKLy8g4pSCIFtNb2RdK1tYXSAtPiDDocSHw6nHtcOt
4bixxLrhuL/FhMOz4bmVxZXFm8O64bqDw73FusWRxbEgzqzOrc6uzq/PjM+Nz44g4pSC4pSCIFtN
b2RdK1s0XSxbWF0gLT4gxIHEk+G4ocSrxY3Fq8izx5/Iq8eWICAgICAgICAgICAgICAgICAg4pSC
Ci8vIOKUgiBbTW9kXStbNl0gLT4gVG9nZ2xlIExhdGluL0dyZWVrICAgICAgICAgIOKUguKUgiBb
TW9kXStbNV0sW1hdIC0+IMin4biDxIvhuIvEl+G4n8Sh4bijxLHIt8WA4bmB4bmFyK/huZfhuZnh
uaHhuavhuofhuovhuo/FvCAgICAgIOKUggovLyDilJTilIDilIDilIDilIDilIDilIDilIDilIDi
lIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDi
lIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilJjilJTilIDilIDi
lIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDi
lIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDi
lIDilIDilIDilIDilIDilIDilJgKLy8g4pSM4pSA4pSA4pSA4pSA4pSA4pSs4pSA4pSA4pSA4pSA
4pSA4pSs4pSA4pSA4pSA4pSA4pSA4pSs4pSA4pSA4pSA4pSA4pSA4pSs4pSA4pSA4pSA4pSA4pSA
4pSs4pSA4pSA4pSA4pSA4pSA4pSs4pSA4pSA4pSA4pSA4pSA4pSs4pSA4pSA4pSA4pSA4pSA4pSs
4pSA4pSA4pSA4pSA4pSA4pSs4pSA4pSA4pSA4pSA4pSA4pSs4pSA4pSA4pSA4pSA4pSA4pSs4pSA
4pSA4pSA4pSA4pSA4pSs4pSA4pSA4pSA4pSA4pSA4pSs4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSA
4pSA4pSA4pSA4pSA4pSQCi8vIOKUgiBAIMKwIOKUgiAhIMKhIOKUgiAiIMK9IOKUgiDCoyAjIOKU
giAkIOKCrCDilIIgJSDCpyDilIIgJiDCtiDilIIgfCDigKAg4pSCICggwqsg4pSCICkgwrsg4pSC
ID0g4omgIOKUgiAvIFwg4pSCICogwrcg4pSCIEJhY2tzcGFjZSAg4pSCCi8vIOKUgiB+IGAg4pSC
IDEgwrQg4pSCIDIgy4cg4pSCIDMgXiDilIIgNCDCryDilIIgNSDLmSDilIIgNiDCtSDilIIgNyB7
IOKUgiA4IFsg4pSCIDkgXSDilIIgMCB9IOKUgiAtIMO3IOKUgiArIMOXIOKUgiAgICAgICAgICAg
IOKUggovLyDilJzilIDilIDilIDilIDilIDilLTilIDilKzilIDilIDilIDilLTilIDilKzilIDi
lIDilIDilLTilIDilKzilIDilIDilIDilLTilIDilKzilIDilIDilIDilLTilIDilKzilIDilIDi
lIDilLTilIDilKzilIDilIDilIDilLTilIDilKzilIDilIDilIDilLTilIDilKzilIDilIDilIDi
lLTilIDilKzilIDilIDilIDilLTilIDilKzilIDilIDilIDilLTilIDilKzilIDilIDilIDilLTi
lIDilKzilIDilIDilIDilLTilIDilKzilIDilIDilIDilIDilIDilIDilIDilIDilIDilIDilKQK
Ly8g4pSCICAgICAgIOKUgiBRIMSCIOKUgiBXIMWBIOKUgiBFIMSYIOKUgiBSIMWmIOKUgiBUIMia
IOKUgiBZIMWuIOKUgiBVIMWyIOKUgiBJIMSuIOKUgiBPIMOYIOKUgiBQIMOVIOKUgiDDnCDDhSDi
lIIgw48gxbgg4pSCIEVudGVyICAgIOKUggovLyDilIIgVGFiICAg4pSCIHEgxIMg4pSCIHcgxYIg
4pSCIGUgxJkg4pSCIHIgxacg4pSCIHQgyJsg4pSCIHkgxa8g4pSCIHUgxbMg4pSCIGkgxK8g4pSC
IG8gw7gg4pSCIHAgw7Ug4pSCIMO8IMOlIOKUgiDDryDDvyDilIIgICAgICAgICAg4pSCCi8vIOKU
nOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUtOKUgOKUrOKUgOKUgOKUgOKUtOKUgOKUrOKUgOKUgOKU
gOKUtOKUgOKUrOKUgOKUgOKUgOKUtOKUgOKUrOKUgOKUgOKUgOKUtOKUgOKUrOKUgOKUgOKUgOKU
tOKUgOKUrOKUgOKUgOKUgOKUtOKUgOKUrOKUgOKUgOKUgOKUtOKUgOKUrOKUgOKUgOKUgOKUtOKU
gOKUrOKUgOKUgOKUgOKUtOKUgOKUrOKUgOKUgOKUgOKUtOKUgOKUrOKUgOKUgOKUgOKUtOKUgOKU
rOKUgOKUgOKUgOKUtOKUgOKUkCAgICAgICAg4pSCCi8vIOKUgiAgICAgICAgIOKUgiBBIMSEIOKU
giBTIMiYIOKUgiBEIMSQIOKUgiBGIMOeIOKUgiBHIMSiIOKUgiBIIMSmIOKUgiBKIMORIOKUgiBL
IMS2IOKUgiBMIMS7IOKUgiDDliDFkiDilIIgw4Qgw4Yg4pSCIMOLIOG6hCDilIIgICAgICAgIOKU
ggovLyDilIIgQ2FwcyAgICDilIIgYSDEhSDilIIgcyDImSDilIIgZCDEkSDilIIgZiDDviDilIIg
ZyDEoyDilIIgaCDEpyDilIIgaiDDsSDilIIgayDEtyDilIIgbCDEvCDilIIgw7YgxZMg4pSCIMOk
IMOmIOKUgiDDqyDhuoUg4pSCICAgICAgICDilIIKLy8g4pSc4pSA4pSA4pSA4pSA4pSA4pSA4pSA
4pSs4pSA4pS04pSA4pSA4pSA4pSs4pSA4pS04pSA4pSA4pSA4pSs4pSA4pS04pSA4pSA4pSA4pSs
4pSA4pS04pSA4pSA4pSA4pSs4pSA4pS04pSA4pSA4pSA4pSs4pSA4pS04pSA4pSA4pSA4pSs4pSA
4pS04pSA4pSA4pSA4pSs4pSA4pS04pSA4pSA4pSA4pSs4pSA4pS04pSA4pSA4pSA4pSs4pSA4pS0
4pSA4pSA4pSA4pSs4pSA4pS04pSA4pSA4pSA4pSs4pSA4pS04pSA4pSA4pSA4pSA4pSA4pS04pSA
4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSkCi8vIOKUgiAgICAgICDilIIgICAgIOKUgiBaIOG6niDi
lIIgWCDFrCDilIIgQyDDhyDilIIgViDDkCDilIIgQiDDgyDilIIgTiDFhSDilIIgTSDFiiDilIIg
OyDiiaQg4pSCIDog4omlIOKUgiA/IMK/IOKUgiAgICAgICAgICAgICAgICDilIIKLy8g4pSCIFNo
aWZ0IOKUgiBNb2Qg4pSCIHogw58g4pSCIHggxa0g4pSCIGMgw6cg4pSCIHYgw7Ag4pSCIGIgw6Mg
4pSCIG4gxYYg4pSCIG0gxYsg4pSCICwgPCDilIIgLiA+IOKUgiAnIF8g4pSCIFNoaWZ0ICAgICAg
ICAgIOKUggovLyDilJzilIDilIDilIDilIDilIDilIDilIDilLzilIDilIDilIDilIDilIDilLTi
lIDilKzilIDilIDilIDilLTilIDilIDilIDilKzilIDilLTilIDilIDilIDilIDilIDilLTilIDi
lIDilIDilIDilIDilLTilIDilIDilIDilIDilIDilLTilIDilIDilIDilIDilIDilLTilIDilIDi
lIDilIDilIDilLTilIDilIDilKzilIDilIDilLTilIDilIDilIDilIDilKzilLTilIDilIDilIDi
lIDilIDilLTilKzilIDilIDilIDilIDilIDilIDilIDilKzilIDilIDilIDilIDilIDilIDilIDi
lKQKLy8g4pSCICAgICAgIOKUgiAgICAgICDilIIgICAgICAg4pSCICAgICAgICAgICAgICAgICAg
ICAgICAgICAgICAgICAgIOKUgiAgICAgICDilIIgICAgICAg4pSCICAgICAgIOKUgiAgICAgICDi
lIIKLy8g4pSCIEN0cmwgIOKUgiBNZXRhICDilIIgQWx0ICAg4pSCICAgICAgICAgICAgIFNwYWNl
ICAgICAgICAgICAgICAgIOKUgiBBbHRHciDilIIgTWV0YSAg4pSCIE1lbnUgIOKUgiBDdHJsICDi
lIIKLy8g4pSU4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pS04pSA4pSA4pSA4pSA4pSA4pSA4pSA4pS0
4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pS04pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSA
4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSA
4pSA4pSA4pSA4pSA4pS04pSA4pSA4pSA4pSA4pSA4pSA4pSA4pS04pSA4pSA4pSA4pSA4pSA4pSA
4pSA4pS04pSA4pSA4pSA4pSA4pSA4pSA4pSA4pS04pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSYCi8v
IOKUjOKUgOKUgOKUgOKUgOKUgOKUrOKUgOKUgOKUgOKUgOKUgOKUrOKUgOKUgOKUgOKUgOKUgOKU
rOKUgOKUgOKUgOKUgOKUgOKUrOKUgOKUgOKUgOKUgOKUgOKUrOKUgOKUgOKUgOKUgOKUgOKUrOKU
gOKUgOKUgOKUgOKUgOKUrOKUgOKUgOKUgOKUgOKUgOKUrOKUgOKUgOKUgOKUgOKUgOKUrOKUgOKU
gOKUgOKUgOKUgOKUrOKUgOKUgOKUgOKUgOKUgOKUrOKUgOKUgOKUgOKUgOKUgOKUrOKUgOKUgOKU
gOKUgOKUgOKUrOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUkAovLyDilIIg
QCDCsCDilIIgISDCoSDilIIgIiDCvSDilIIgwqMgIyDilIIgJCDigqwg4pSCICUgwqcg4pSCICYg
wrYg4pSCIHwg4oCgIOKUgiAoIMKrIOKUgiApIMK7IOKUgiA9IOKJoCDilIIgLyBcIOKUgiAqIMK3
IOKUgiBCYWNrc3BhY2UgIOKUggovLyDilIIgfiBgIOKUgiAxIMK0IOKUgiAyIMuHIOKUgiAzIF4g
4pSCIDQgwq8g4pSCIDUgy5kg4pSCIDYgwrUg4pSCIDcgeyDilIIgOCBbIOKUgiA5IF0g4pSCIDAg
fSDilIIgLSDDtyDilIIgKyDDlyDilIIgICAgICAgICAgICDilIIKLy8g4pSc4pSA4pSA4pSA4pSA
4pSA4pS04pSA4pSs4pSA4pSA4pSA4pS04pSA4pSs4pSA4pSA4pSA4pS04pSA4pSs4pSA4pSA4pSA
4pS04pSA4pSs4pSA4pSA4pSA4pS04pSA4pSs4pSA4pSA4pSA4pS04pSA4pSs4pSA4pSA4pSA4pS0
4pSA4pSs4pSA4pSA4pSA4pS04pSA4pSs4pSA4pSA4pSA4pS04pSA4pSs4pSA4pSA4pSA4pS04pSA
4pSs4pSA4pSA4pSA4pS04pSA4pSs4pSA4pSA4pSA4pS04pSA4pSs4pSA4pSA4pSA4pS04pSA4pSs
4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSkCi8vIOKUgiAgICAgICDilIIgOiAgIOKU
giAgICAg4pSCIM6VICAg4pSCIM6hICAg4pSCIM6kICAg4pSCIM6lICAg4pSCIM6YICAg4pSCIM6Z
ICAg4pSCIM6fICAg4pSCIM6gICAg4pSCIM6rICAg4pSCIM6qICAg4pSCIEVudGVyICAgIOKUggov
LyDilIIgVGFiICAg4pSCIDsgICDilIIgz4IgICDilIIgzrUgICDilIIgz4EgICDilIIgz4QgICDi
lIIgz4UgICDilIIgzrggICDilIIgzrkgICDilIIgzr8gICDilIIgz4AgICDilIIgz4sgICDilIIg
z4ogICDilIIgICAgICAgICAg4pSCCi8vIOKUnOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUtOKUgOKU
rOKUgOKUgOKUgOKUtOKUgOKUrOKUgOKUgOKUgOKUtOKUgOKUrOKUgOKUgOKUgOKUtOKUgOKUrOKU
gOKUgOKUgOKUtOKUgOKUrOKUgOKUgOKUgOKUtOKUgOKUrOKUgOKUgOKUgOKUtOKUgOKUrOKUgOKU
gOKUgOKUtOKUgOKUrOKUgOKUgOKUgOKUtOKUgOKUrOKUgOKUgOKUgOKUtOKUgOKUrOKUgOKUgOKU
gOKUtOKUgOKUrOKUgOKUgOKUgOKUtOKUgOKUrOKUgOKUgOKUgOKUtOKUgOKUkCAgICAgICAg4pSC
Ci8vIOKUgiAgICAgICAgIOKUgiDOkSAgIOKUgiDOoyAgIOKUgiDOlCAgIOKUgiDOpiAgIOKUgiDO
kyAgIOKUgiDOlyAgIOKUgiDOniAgIOKUgiDOmiAgIOKUgiDOmyAgIOKUgiAgICAg4pSCICAgICDi
lIIgICAgIOKUgiAgICAgICAg4pSCCi8vIOKUgiBDYXBzICAgIOKUgiDOsSAgIOKUgiDPgyAgIOKU
giDOtCAgIOKUgiDPhiAgIOKUgiDOsyAgIOKUgiDOtyAgIOKUgiDOviAgIOKUgiDOuiAgIOKUgiDO
uyAgIOKUgiAgICAg4pSCICAgICDilIIgICAgIOKUgiAgICAgICAg4pSCCi8vIOKUnOKUgOKUgOKU
gOKUgOKUgOKUgOKUgOKUrOKUgOKUtOKUgOKUgOKUgOKUrOKUgOKUtOKUgOKUgOKUgOKUrOKUgOKU
tOKUgOKUgOKUgOKUrOKUgOKUtOKUgOKUgOKUgOKUrOKUgOKUtOKUgOKUgOKUgOKUrOKUgOKUtOKU
gOKUgOKUgOKUrOKUgOKUtOKUgOKUgOKUgOKUrOKUgOKUtOKUgOKUgOKUgOKUrOKUgOKUtOKUgOKU
gOKUgOKUrOKUgOKUtOKUgOKUgOKUgOKUrOKUgOKUtOKUgOKUgOKUgOKUrOKUgOKUtOKUgOKUgOKU
gOKUgOKUgOKUtOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUpAovLyDilIIgICAgICAg4pSCICAg
ICDilIIgzpYgICDilIIgzqcgICDilIIgzqggICDilIIgzqkgICDilIIgzpIgICDilIIgzp0gICDi
lIIgzpwgICDilIIgOyDCqyDilIIgOiDCuyDilIIgPyDCvyDilIIgICAgICAgICAgICAgICAg4pSC
Ci8vIOKUgiBTaGlmdCDilIIgTW9kIOKUgiDOtiAgIOKUgiDPhyAgIOKUgiDPiCAgIOKUgiDPiSAg
IOKUgiDOsiAgIOKUgiDOvSAgIOKUgiDOvCAgIOKUgiAsIDwg4pSCIC4gPiDilIIgJyBfIOKUgiBT
aGlmdCAgICAgICAgICDilIIKLy8g4pSc4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pS84pSA4pSA4pSA
4pSA4pSA4pS04pSA4pSs4pSA4pSA4pSA4pS04pSA4pSA4pSA4pSs4pSA4pS04pSA4pSA4pSA4pSA
4pSA4pS04pSA4pSA4pSA4pSA4pSA4pS04pSA4pSA4pSA4pSA4pSA4pS04pSA4pSA4pSA4pSA4pSA
4pS04pSA4pSA4pSA4pSA4pSA4pS04pSA4pSA4pSs4pSA4pSA4pS04pSA4pSA4pSA4pSA4pSs4pS0
4pSA4pSA4pSA4pSA4pSA4pS04pSs4pSA4pSA4pSA4pSA4pSA4pSA4pSA4pSs4pSA4pSA4pSA4pSA
4pSA4pSA4pSA4pSkCi8vIOKUgiAgICAgICDilIIgICAgICAg4pSCICAgICAgIOKUgiAgICAgICAg
ICAgICAgICAgICAgICAgICAgICAgICAgICDilIIgICAgICAg4pSCICAgICAgIOKUgiAgICAgICDi
lIIgICAgICAg4pSCCi8vIOKUgiBDdHJsICDilIIgTWV0YSAg4pSCIEFsdCAgIOKUgiAgICAgICAg
ICAgICBTcGFjZSAgICAgICAgICAgICAgICDilIIgQWx0R3Ig4pSCIE1ldGEgIOKUgiBNZW51ICDi
lIIgQ3RybCAg4pSCCi8vIOKUlOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUtOKUgOKUgOKUgOKUgOKU
gOKUgOKUgOKUtOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUtOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKU
gOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKU
gOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUtOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUtOKUgOKUgOKU
gOKUgOKUgOKUgOKUgOKUtOKUgOKUgOKUgOKUgOKUgOKUgOKUgOKUtOKUgOKUgOKUgOKUgOKUgOKU
gOKUgOKUmAoKCmRlZmF1bHQgcGFydGlhbAp4a2Jfc3ltYm9scyAiYmFzaWMiIHsKCiAgICBpbmNs
dWRlICJldW1hayhhbHRncl9zd2l0Y2gpIgogICAgaW5jbHVkZSAiZXVtYWsobW9kX3N3aXRjaCki
CgogICAgbmFtZVtHcm91cDFdID0gIkxhdGluIjsKICAgIG5hbWVbR3JvdXAyXSA9ICJHcmVlayI7
CgogICAgLy8gU3ltYm9scwogICAgLy8KICAgIGtleS50eXBlW0dyb3VwMV0gPSAiRUlHSFRfTEVW
RUwiOwogICAga2V5LnR5cGVbR3JvdXAyXSA9ICJFSUdIVF9MRVZFTCI7CiAgICAvLwogICAgLy8g
Uk9XIEUKICAgIC8vID09PT09CiAgICAvLwogICAgLy8gICAgICAgICAgIFs9PT09PT09PT09PT09
PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09
PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09
PT09PT09PT09PT09PT1dCiAgICAvLyAgICAgICAgICAgWyBOb3JtICAgICAgICAgICAgLCBTaGlm
dCAgICAgICAgICAsIEFsdCAgICAgICAgICAgICAsIFNoaWZ0K0FsdCAgICAgICwgTW9kICAgICAg
ICAgICAgICwgU2hpZnQrTW9kICAgICAgICwgTW9kK0FsdCAgICAgICAgICwgU2hpZnQrTW9kK0Fs
dCAgIF0KICAgIC8vICAgICAgICAgICBbPT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09
PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09
PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09XQogICAg
a2V5IDxUTERFPiB7IFsgYXNjaWl0aWxkZSAgICAgICwgYXQgICAgICAgICAgICAgLCBncmF2ZSAg
ICAgICAgICAgLCBkZWdyZWUgICAgICAgICAsIGRlYWRfZ3JhdmUgICAgICAsIGRlYWRfZ3JhdmUg
ICAgICAsIGRlYWRfZ3JhdmUgICAgICAsIGRlYWRfZ3JhdmUgICAgICBdICwKICAgICAgICAgICAg
ICAgICBbIGFzY2lpdGlsZGUgICAgICAsIGF0ICAgICAgICAgICAgICwgZ3JhdmUgICAgICAgICAg
ICwgZGVncmVlICAgICAgICAgLCBkZWFkX2dyYXZlICAgICAgLCBkZWFkX2dyYXZlICAgICAgLCBk
ZWFkX2dyYXZlICAgICAgLCBkZWFkX2dyYXZlICAgICAgXSB9OwogICAgLy8gICAgICAgICAgIFst
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS1dCiAgICBrZXkgPEFFMDE+IHsgWyAxICAgICAgICAg
ICAgICAgLCBleGNsYW0gICAgICAgICAsIGFjdXRlICAgICAgICAgICAsIGV4Y2xhbWRvd24gICAg
ICwgZGVhZF9hY3V0ZSAgICAgICwgZGVhZF9hY3V0ZSAgICAgICwgZGVhZF9hY3V0ZSAgICAgICwg
ZGVhZF9hY3V0ZSAgICAgIF0gLAogICAgICAgICAgICAgICAgIFsgMSAgICAgICAgICAgICAgICwg
ZXhjbGFtICAgICAgICAgLCBleGNsYW1kb3duICAgICAgLCBkZWFkX2FjdXRlICAgICAsIGRlYWRf
YWN1dGUgICAgICAsIGRlYWRfYWN1dGUgICAgICAsIGFjdXRlICAgICAgICAgICAsIGRlYWRfYWN1
dGUgICAgICBdIH07CiAgICAvLyAgICAgICAgICAgWy0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LV0KICAgIGtleSA8QUUwMj4geyBbIDIgICAgICAgICAgICAgICAsIHF1b3RlZGJsICAgICAgICwg
Y2Fyb24gICAgICAgICAgICwgb25laGFsZiAgICAgICAgLCBkZWFkX2Nhcm9uICAgICAgLCBkZWFk
X2Nhcm9uICAgICAgLCBkZWFkX2Nhcm9uICAgICAgLCBkZWFkX2Nhcm9uICAgICAgXSAsCiAgICAg
ICAgICAgICAgICAgWyAyICAgICAgICAgICAgICAgLCBxdW90ZWRibCAgICAgICAsIGNhcm9uICAg
ICAgICAgICAsIG9uZWhhbGYgICAgICAgICwgZGVhZF9jYXJvbiAgICAgICwgZGVhZF9jYXJvbiAg
ICAgICwgZGVhZF9jYXJvbiAgICAgICwgZGVhZF9jYXJvbiAgICAgIF0gfTsKICAgIC8vICAgICAg
ICAgICBbLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tXQogICAga2V5IDxBRTAzPiB7IFsgMyAg
ICAgICAgICAgICAgICwgbnVtYmVyc2lnbiAgICAgLCBhc2NpaWNpcmN1bSAgICAgLCBzdGVybGlu
ZyAgICAgICAsIGRlYWRfY2lyY3VtZmxleCAsIGRlYWRfY2lyY3VtZmxleCAsIGRlYWRfY2lyY3Vt
ZmxleCAsIGRlYWRfY2lyY3VtZmxleCBdICwKICAgICAgICAgICAgICAgICBbIDMgICAgICAgICAg
ICAgICAsIG51bWJlcnNpZ24gICAgICwgYXNjaWljaXJjdW0gICAgICwgc3RlcmxpbmcgICAgICAg
LCBkZWFkX2NpcmN1bWZsZXggLCBkZWFkX2NpcmN1bWZsZXggLCBkZWFkX2NpcmN1bWZsZXggLCBk
ZWFkX2NpcmN1bWZsZXggXSB9OwogICAgLy8gICAgICAgICAgIFstLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS1dCiAgICBrZXkgPEFFMDQ+IHsgWyA0ICAgICAgICAgICAgICAgLCBkb2xsYXIgICAg
ICAgICAsIG1hY3JvbiAgICAgICAgICAsIEV1cm9TaWduICAgICAgICwgZGVhZF9tYWNyb24gICAg
ICwgZGVhZF9tYWNyb24gICAgICwgZGVhZF9tYWNyb24gICAgICwgZGVhZF9tYWNyb24gICAgIF0g
LAogICAgICAgICAgICAgICAgIFsgNCAgICAgICAgICAgICAgICwgZG9sbGFyICAgICAgICAgLCBt
YWNyb24gICAgICAgICAgLCBFdXJvU2lnbiAgICAgICAsIGRlYWRfbWFjcm9uICAgICAsIGRlYWRf
bWFjcm9uICAgICAsIGRlYWRfbWFjcm9uICAgICAsIGRlYWRfbWFjcm9uICAgICBdIH07CiAgICAv
LyAgICAgICAgICAgWy0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLV0KICAgIGtleSA8QUUwNT4g
eyBbIDUgICAgICAgICAgICAgICAsIHBlcmNlbnQgICAgICAgICwgYWJvdmVkb3QgICAgICAgICwg
c2VjdGlvbiAgICAgICAgLCBkZWFkX2Fib3ZlZG90ICAgLCBkZWFkX2Fib3ZlZG90ICAgLCBkZWFk
X2Fib3ZlZG90ICAgLCBkZWFkX2Fib3ZlZG90ICAgXSAsCiAgICAgICAgICAgICAgICAgWyA1ICAg
ICAgICAgICAgICAgLCBwZXJjZW50ICAgICAgICAsIGFib3ZlZG90ICAgICAgICAsIHNlY3Rpb24g
ICAgICAgICwgZGVhZF9hYm92ZWRvdCAgICwgZGVhZF9hYm92ZWRvdCAgICwgZGVhZF9hYm92ZWRv
dCAgICwgZGVhZF9hYm92ZWRvdCAgIF0gfTsKICAgIC8vICAgICAgICAgICBbLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tXQogICAga2V5IDxBRTA2PiB7IFsgNiAgICAgICAgICAgICAgICwgYW1w
ZXJzYW5kICAgICAgLCBtdSAgICAgICAgICAgICAgLCBwYXJhZ3JhcGggICAgICAsIElTT19MYXN0
X0dyb3VwICAsIElTT19MYXN0X0dyb3VwICAsIElTT19MYXN0X0dyb3VwICAsIElTT19MYXN0X0dy
b3VwICBdICwKICAgICAgICAgICAgICAgICBbIDYgICAgICAgICAgICAgICAsIGFtcGVyc2FuZCAg
ICAgICwgbXUgICAgICAgICAgICAgICwgcGFyYWdyYXBoICAgICAgLCBJU09fRmlyc3RfR3JvdXAg
LCBJU09fRmlyc3RfR3JvdXAgLCBJU09fRmlyc3RfR3JvdXAgLCBJU09fRmlyc3RfR3JvdXAgXSB9
OwogICAgLy8gICAgICAgICAgIFstLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS1dCiAgICBrZXkg
PEFFMDc+IHsgWyA3ICAgICAgICAgICAgICAgLCBiYXIgICAgICAgICAgICAsIGJyYWNlbGVmdCAg
ICAgICAsIGRhZ2dlciAgICAgICAgICwgYnJhY2VsZWZ0ICAgICAgICwgZGFnZ2VyICAgICAgICAg
ICwgYnJhY2VsZWZ0ICAgICAgICwgZGFnZ2VyICAgICAgICAgIF0gLAogICAgICAgICAgICAgICAg
IFsgNyAgICAgICAgICAgICAgICwgYmFyICAgICAgICAgICAgLCBicmFjZWxlZnQgICAgICAgLCBk
YWdnZXIgICAgICAgICAsIGJyYWNlbGVmdCAgICAgICAsIGRhZ2dlciAgICAgICAgICAsIGJyYWNl
bGVmdCAgICAgICAsIGRhZ2dlciAgICAgICAgICBdIH07CiAgICAvLyAgICAgICAgICAgWy0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLV0KICAgIGtleSA8QUUwOD4geyBbIDggICAgICAgICAgICAg
ICAsIHBhcmVubGVmdCAgICAgICwgYnJhY2tldGxlZnQgICAgICwgZ3VpbGxlbW90bGVmdCAgLCBi
cmFja2V0bGVmdCAgICAgLCBndWlsbGVtb3RsZWZ0ICAgLCBicmFja2V0bGVmdCAgICAgLCBndWls
bGVtb3RsZWZ0ICAgXSAsCiAgICAgICAgICAgICAgICAgWyA4ICAgICAgICAgICAgICAgLCBwYXJl
bmxlZnQgICAgICAsIGJyYWNrZXRsZWZ0ICAgICAsIGd1aWxsZW1vdGxlZnQgICwgYnJhY2tldGxl
ZnQgICAgICwgZ3VpbGxlbW90bGVmdCAgICwgYnJhY2tldGxlZnQgICAgICwgZ3VpbGxlbW90bGVm
dCAgIF0gfTsKICAgIC8vICAgICAgICAgICBbLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tXQog
ICAga2V5IDxBRTA5PiB7IFsgOSAgICAgICAgICAgICAgICwgcGFyZW5yaWdodCAgICAgLCBicmFj
a2V0cmlnaHQgICAgLCBndWlsbGVtb3RyaWdodCAsIGJyYWNrZXRyaWdodCAgICAsIGd1aWxsZW1v
dHJpZ2h0ICAsIGJyYWNrZXRyaWdodCAgICAsIGd1aWxsZW1vdHJpZ2h0ICBdICwKICAgICAgICAg
ICAgICAgICBbIDkgICAgICAgICAgICAgICAsIHBhcmVucmlnaHQgICAgICwgYnJhY2tldHJpZ2h0
ICAgICwgZ3VpbGxlbW90cmlnaHQgLCBicmFja2V0cmlnaHQgICAgLCBndWlsbGVtb3RyaWdodCAg
LCBicmFja2V0cmlnaHQgICAgLCBndWlsbGVtb3RyaWdodCAgXSB9OwogICAgLy8gICAgICAgICAg
IFstLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS1dCiAgICBrZXkgPEFFMTA+IHsgWyAwICAgICAg
ICAgICAgICAgLCBlcXVhbCAgICAgICAgICAsIGJyYWNlcmlnaHQgICAgICAsIG5vdGVxdWFsICAg
ICAgICwgYnJhY2VyaWdodCAgICAgICwgbm90ZXF1YWwgICAgICAgICwgYnJhY2VyaWdodCAgICAg
ICwgbm90ZXF1YWwgICAgICAgIF0gLAogICAgICAgICAgICAgICAgIFsgMCAgICAgICAgICAgICAg
ICwgZXF1YWwgICAgICAgICAgLCBicmFjZXJpZ2h0ICAgICAgLCBub3RlcXVhbCAgICAgICAsIGJy
YWNlcmlnaHQgICAgICAsIG5vdGVxdWFsICAgICAgICAsIGJyYWNlcmlnaHQgICAgICAsIG5vdGVx
dWFsICAgICAgICBdIH07CiAgICAvLyAgICAgICAgICAgWy0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLV0KICAgIGtleSA8QUUxMT4geyBbIG1pbnVzICAgICAgICAgICAsIHNsYXNoICAgICAgICAg
ICwgZGl2aXNpb24gICAgICAgICwgYmFja3NsYXNoICAgICAgLCBkaXZpc2lvbiAgICAgICAgLCBi
YWNrc2xhc2ggICAgICAgLCBkaXZpc2lvbiAgICAgICAgLCBiYWNrc2xhc2ggICAgICAgXSAsCiAg
ICAgICAgICAgICAgICAgWyBtaW51cyAgICAgICAgICAgLCBzbGFzaCAgICAgICAgICAsIGRpdmlz
aW9uICAgICAgICAsIGJhY2tzbGFzaCAgICAgICwgZGl2aXNpb24gICAgICAgICwgYmFja3NsYXNo
ICAgICAgICwgZGl2aXNpb24gICAgICAgICwgYmFja3NsYXNoICAgICAgIF0gfTsKICAgIC8vICAg
ICAgICAgICBbLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tXQogICAga2V5IDxBRTEyPiB7IFsg
cGx1cyAgICAgICAgICAgICwgYXN0ZXJpc2sgICAgICAgLCBtdWx0aXBseSAgICAgICAgLCBwZXJp
b2RjZW50ZXJlZCAsIG11bHRpcGx5ICAgICAgICAsIHBlcmlvZGNlbnRlcmVkICAsIG11bHRpcGx5
ICAgICAgICAsIHBlcmlvZGNlbnRlcmVkICBdICwKICAgICAgICAgICAgICAgICBbIHBsdXMgICAg
ICAgICAgICAsIGFzdGVyaXNrICAgICAgICwgbXVsdGlwbHkgICAgICAgICwgcGVyaW9kY2VudGVy
ZWQgLCBtdWx0aXBseSAgICAgICAgLCBwZXJpb2RjZW50ZXJlZCAgLCBtdWx0aXBseSAgICAgICAg
LCBwZXJpb2RjZW50ZXJlZCAgXSB9OwogICAgLy8gICAgICAgICAgIFs9PT09PT09PT09PT09PT09
PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09
PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09
PT09PT09PT09PT1dCiAgICAvLwogICAgLy8gUk9XIEIKICAgIC8vID09PT09CiAgICAvLwogICAg
Ly8gICAgICAgICAgIFs9PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09
PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09
PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09XQogICAgLy8gICAg
ICAgICAgIFsgTm9ybSAgICAgICAgICAgICwgU2hpZnQgICAgICAgICAgLCBBbHQgICAgICAgICAg
ICAgLCBTaGlmdCtBbHQgICAgICAgICwgTW9kICAgICAgICAgICAgICwgU2hpZnQrTW9kICAgICAg
ICAsIE1vZCtBbHQgICAgICAgICAsIFNoaWZ0K01vZCtBbHQgICAgXQogICAgLy8gICAgICAgICAg
IFs9PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09
PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09
PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09XQogICAga2V5IDxBQjA4PiB7IFsgY29t
bWEgICAgICAgICAgICwgc2VtaWNvbG9uICAgICAgLCBsZXNzICAgICAgICAgICAgLCBsZXNzdGhh
bmVxdWFsICAgICwgbGVzcyAgICAgICAgICAgICwgbGVzc3RoYW5lcXVhbCAgICAsIGxlc3MgICAg
ICAgICAgICAsIGxlc3N0aGFuZXF1YWwgICAgXSAsCiAgICAgICAgICAgICAgICAgWyBjb21tYSAg
ICAgICAgICAgLCBzZW1pY29sb24gICAgICAsIGxlc3MgICAgICAgICAgICAsIGxlc3N0aGFuZXF1
YWwgICAgLCBsZXNzICAgICAgICAgICAgLCBsZXNzdGhhbmVxdWFsICAgICwgbGVzcyAgICAgICAg
ICAgICwgbGVzc3RoYW5lcXVhbCAgICBdIH07CiAgICAvLyAgICAgICAgICAgWy0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS1dCiAgICBrZXkgPEFCMDk+IHsgWyBwZXJpb2QgICAgICAgICAg
LCBjb2xvbiAgICAgICAgICAsIGdyZWF0ZXIgICAgICAgICAsIGdyZWF0ZXJ0aGFuZXF1YWwgLCBn
cmVhdGVyICAgICAgICAgLCBncmVhdGVydGhhbmVxdWFsICwgZ3JlYXRlciAgICAgICAgICwgZ3Jl
YXRlcnRoYW5lcXVhbCBdICwKICAgICAgICAgICAgICAgICBbIHBlcmlvZCAgICAgICAgICAsIGNv
bG9uICAgICAgICAgICwgZ3JlYXRlciAgICAgICAgICwgZ3JlYXRlcnRoYW5lcXVhbCAsIGdyZWF0
ZXIgICAgICAgICAsIGdyZWF0ZXJ0aGFuZXF1YWwgLCBncmVhdGVyICAgICAgICAgLCBncmVhdGVy
dGhhbmVxdWFsIF0gfTsKICAgIC8vICAgICAgICAgICBbLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLV0KICAgIGtleSA8QUIxMD4geyBbIGFwb3N0cm9waGUgICAgICAsIHF1ZXN0aW9uICAg
ICAgICwgdW5kZXJzY29yZSAgICAgICwgcXVlc3Rpb25kb3duICAgICAsIHVuZGVyc2NvcmUgICAg
ICAsIHF1ZXN0aW9uZG93biAgICAgLCB1bmRlcnNjb3JlICAgICAgLCBxdWVzdGlvbmRvd24gICAg
IF0gLAogICAgICAgICAgICAgICAgIFsgYXBvc3Ryb3BoZSAgICAgICwgcXVlc3Rpb24gICAgICAg
LCB1bmRlcnNjb3JlICAgICAgLCBxdWVzdGlvbmRvd24gICAgICwgdW5kZXJzY29yZSAgICAgICwg
cXVlc3Rpb25kb3duICAgICAsIHVuZGVyc2NvcmUgICAgICAsIHF1ZXN0aW9uZG93biAgICAgXSB9
OwogICAgLy8gICAgICAgICAgIFs9PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09
PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09
PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09XQoKICAg
IC8vIExldHRlcnMKICAgIC8vCiAgICBrZXkudHlwZVtHcm91cDFdID0gIkVJR0hUX0xFVkVMX0FM
UEhBQkVUSUMiOwogICAga2V5LnR5cGVbR3JvdXAyXSA9ICJFSUdIVF9MRVZFTF9BTFBIQUJFVElD
IjsKICAgIC8vCiAgICAvLyBST1cgRAogICAgLy8gPT09PT0KICAgIC8vIFUwMTY2ID0gVHN0cm9r
ZQogICAgLy8gVTAxNjcgPSB0c3Ryb2tlCiAgICAvLyBVMDFGQSA9IEFyaW5nYWN1dGUKICAgIC8v
IFUwMUZCID0gYXJpbmdhY3V0ZQogICAgLy8gVTAxRkUgPSBPc2xhc2hhY3V0ZQogICAgLy8gVTAx
RkYgPSBvc2xhc2hhY3V0ZQogICAgLy8gVTAyMUEgPSBUY29tbWEKICAgIC8vIFUwMjFCID0gdGNv
bW1hCiAgICAvLyBVMUU0QyA9IE90aWxkZWFjdXRlCiAgICAvLyBVMUU0RCA9IG90aWxkZWFjdXRl
CiAgICAvLyBVMUU1NCA9IFBhY3V0ZQogICAgLy8gVTFFNTUgPSBwYWN1dGUKICAgIC8vCiAgICAv
LyAgICAgICAgICAgWz09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09
PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09
PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PV0K
ICAgIC8vICAgICAgICAgICBbIE5vcm0gICAgICAgICAgICAgICAgICAsIFNoaWZ0ICAgICAgICAg
ICAgICAgICAsIEFsdCAgICAgICAgLCBTaGlmdCtBbHQgICwgTW9kICAgICAgICAgICAgICAgICAg
ICAgICAgICwgU2hpZnQrTW9kICAgICAgICAgICAsIE1vZCtBbHQgICAgICAsIFNoaWZ0K01vZCtB
bHQgXQogICAgLy8gICAgICAgICAgIFs9PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09
PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09
PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09
PT09PT09PT1dCiAgICBrZXkgPEFEMDE+IHsgWyBxICAgICAgICAgICAgICAgICAgICAgLCBRICAg
ICAgICAgICAgICAgICAgICAgLCBhYnJldmUgICAgICwgQWJyZXZlICAgICAsIE5vU3ltYm9sICAg
ICAgICAgICAgICAgICAgICAsIE5vU3ltYm9sICAgICAgICAgICAgLCBhYnJldmVhY3V0ZSAgLCBB
YnJldmVhY3V0ZSAgIF0gLAogICAgICAgICAgICAgICAgIFsgc2VtaWNvbG9uICAgICAgICAgICAg
ICwgY29sb24gICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg
ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg
ICAgICAgICAgICAgICAgICAgICBdICwgdHlwZVtHcm91cDJdID0gIlRXT19MRVZFTCIgfTsKICAg
IC8vICAgICAgICAgICBbLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
XQogICAga2V5IDxBRDAyPiB7IFsgdyAgICAgICAgICAgICAgICAgICAgICwgVyAgICAgICAgICAg
ICAgICAgICAgICwgbHN0cm9rZSAgICAsIExzdHJva2UgICAgLCB3YWN1dGUgICAgICAgICAgICAg
ICAgICAgICAgLCBXYWN1dGUgICAgICAgICAgICAgICwgTm9TeW1ib2wgICAgICwgTm9TeW1ib2wg
ICAgICBdICwKICAgICAgICAgICAgICAgICBbIEdyZWVrX2ZpbmFsc21hbGxzaWdtYSAsIEdyZWVr
X2FjY2VudGRpZXJlc2lzICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg
ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg
ICAgICAgICAgICAgXSAsIHR5cGVbR3JvdXAyXSA9ICJUV09fTEVWRUwiIH07CiAgICAvLyAgICAg
ICAgICAgWy0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLV0KICAgIGtl
eSA8QUQwMz4geyBbIGUgICAgICAgICAgICAgICAgICAgICAsIEUgICAgICAgICAgICAgICAgICAg
ICAsIGVvZ29uZWsgICAgLCBFb2dvbmVrICAgICwgZWFjdXRlICAgICAgICAgICAgICAgICAgICAg
ICwgRWFjdXRlICAgICAgICAgICAgICAsIE5vU3ltYm9sICAgICAsIE5vU3ltYm9sICAgICAgXSAs
CiAgICAgICAgICAgICAgICAgWyBHcmVla19lcHNpbG9uICAgICAgICAgLCBHcmVla19FUFNJTE9O
ICAgICAgICAgLCBOb1N5bWJvbCAgICwgTm9TeW1ib2wgICAsIEdyZWVrX2Vwc2lsb25hY2NlbnQg
ICAgICAgICAsIEdyZWVrX0VQU0lMT05hY2NlbnQgICAgICAgICAgICAgICAgICAgICAgICAgICAg
ICAgIF0gfTsKICAgIC8vICAgICAgICAgICBbLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tXQogICAga2V5IDxBRDA0PiB7IFsgciAgICAgICAgICAgICAgICAgICAgICwg
UiAgICAgICAgICAgICAgICAgICAgICwgVTAxNjcgICAgICAsIFUwMTY2ICAgICAgLCByYWN1dGUg
ICAgICAgICAgICAgICAgICAgICAgLCBSYWN1dGUgICAgICAgICAgICAgICwgTm9TeW1ib2wgICAg
ICwgTm9TeW1ib2wgICAgICBdICwKICAgICAgICAgICAgICAgICBbIEdyZWVrX3JobyAgICAgICAg
ICAgICAsIEdyZWVrX1JITyAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg
ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg
ICAgICAgICAgICAgICAgICAgICAgICAgXSB9OwogICAgLy8gICAgICAgICAgIFstLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS1dCiAgICBrZXkgPEFEMDU+IHsgWyB0ICAg
ICAgICAgICAgICAgICAgICAgLCBUICAgICAgICAgICAgICAgICAgICAgLCBVMDIxQiAgICAgICwg
VTAyMUEgICAgICAsIE5vU3ltYm9sICAgICAgICAgICAgICAgICAgICAsIE5vU3ltYm9sICAgICAg
ICAgICAgLCBOb1N5bWJvbCAgICAgLCBOb1N5bWJvbCAgICAgIF0gLAogICAgICAgICAgICAgICAg
IFsgR3JlZWtfdGF1ICAgICAgICAgICAgICwgR3JlZWtfVEFVICAgICAgICAgICAgICAgICAgICAg
ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg
ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICBdIH07CiAgICAvLyAg
ICAgICAgICAgWy0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLV0KICAg
IGtleSA8QUQwNj4geyBbIHkgICAgICAgICAgICAgICAgICAgICAsIFkgICAgICAgICAgICAgICAg
ICAgICAsIHVyaW5nICAgICAgLCBVcmluZyAgICAgICwgeWFjdXRlICAgICAgICAgICAgICAgICAg
ICAgICwgWWFjdXRlICAgICAgICAgICAgICAsIE5vU3ltYm9sICAgICAsIE5vU3ltYm9sICAgICAg
XSAsCiAgICAgICAgICAgICAgICAgWyBHcmVla191cHNpbG9uICAgICAgICAgLCBHcmVla19VUFNJ
TE9OICAgICAgICAgLCBOb1N5bWJvbCAgICwgTm9TeW1ib2wgICAsIEdyZWVrX3Vwc2lsb25hY2Nl
bnQgICAgICAgICAsIEdyZWVrX1VQU0lMT05hY2NlbnQgICAgICAgICAgICAgICAgICAgICAgICAg
ICAgICAgIF0gfTsKICAgIC8vICAgICAgICAgICBbLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tXQogICAga2V5IDxBRDA3PiB7IFsgdSAgICAgICAgICAgICAgICAgICAg
ICwgVSAgICAgICAgICAgICAgICAgICAgICwgdW9nb25layAgICAsIFVvZ29uZWsgICAgLCB1YWN1
dGUgICAgICAgICAgICAgICAgICAgICAgLCBVYWN1dGUgICAgICAgICAgICAgICwgTm9TeW1ib2wg
ICAgICwgTm9TeW1ib2wgICAgICBdICwKICAgICAgICAgICAgICAgICBbIEdyZWVrX3RoZXRhICAg
ICAgICAgICAsIEdyZWVrX1RIRVRBICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg
ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg
ICAgICAgICAgICAgICAgICAgICAgICAgICAgXSB9OwogICAgLy8gICAgICAgICAgIFstLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS1dCiAgICBrZXkgPEFEMDg+IHsgWyBp
ICAgICAgICAgICAgICAgICAgICAgLCBJICAgICAgICAgICAgICAgICAgICAgLCBpb2dvbmVrICAg
ICwgSW9nb25layAgICAsIGlhY3V0ZSAgICAgICAgICAgICAgICAgICAgICAsIElhY3V0ZSAgICAg
ICAgICAgICAgLCBOb1N5bWJvbCAgICAgLCBOb1N5bWJvbCAgICAgIF0gLAogICAgICAgICAgICAg
ICAgIFsgR3JlZWtfaW90YSAgICAgICAgICAgICwgR3JlZWtfSU9UQSAgICAgICAgICAgICwgTm9T
eW1ib2wgICAsIE5vU3ltYm9sICAgLCBHcmVla19pb3RhYWNjZW50ICAgICAgICAgICAgLCBHcmVl
a19JT1RBYWNjZW50ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICBdIH07CiAgICAv
LyAgICAgICAgICAgWy0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLV0K
ICAgIGtleSA8QUQwOT4geyBbIG8gICAgICAgICAgICAgICAgICAgICAsIE8gICAgICAgICAgICAg
ICAgICAgICAsIG9zbGFzaCAgICAgLCBPc2xhc2ggICAgICwgb2FjdXRlICAgICAgICAgICAgICAg
ICAgICAgICwgT2FjdXRlICAgICAgICAgICAgICAsIFUwMUZGICAgICAgICAsIFUwMUZFICAgICAg
ICAgXSAsCiAgICAgICAgICAgICAgICAgWyBHcmVla19vbWljcm9uICAgICAgICAgLCBHcmVla19P
TUlDUk9OICAgICAgICAgLCBOb1N5bWJvbCAgICwgTm9TeW1ib2wgICAsIEdyZWVrX29taWNyb25h
Y2NlbnQgICAgICAgICAsIEdyZWVrX09NSUNST05hY2NlbnQgICAgICAgICAgICAgICAgICAgICAg
ICAgICAgICAgIF0gfTsKICAgIC8vICAgICAgICAgICBbLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tXQogICAga2V5IDxBRDEwPiB7IFsgcCAgICAgICAgICAgICAgICAg
ICAgICwgUCAgICAgICAgICAgICAgICAgICAgICwgb3RpbGRlICAgICAsIE90aWxkZSAgICAgLCBV
MUU1NSAgICAgICAgICAgICAgICAgICAgICAgLCBVMUU1NCAgICAgICAgICAgICAgICwgVTFFNEQg
ICAgICAgICwgVTFFNEMgICAgICAgICBdICwKICAgICAgICAgICAgICAgICBbIEdyZWVrX3BpICAg
ICAgICAgICAgICAsIEdyZWVrX1BJICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg
ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg
ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgXSB9OwogICAgLy8gICAgICAgICAgIFstLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS1dCiAgICBrZXkgPEFEMTE+IHsg
WyB1ZGlhZXJlc2lzICAgICAgICAgICAgLCBVZGlhZXJlc2lzICAgICAgICAgICAgLCBhcmluZyAg
ICAgICwgQXJpbmcgICAgICAsIHVkb3VibGVhY3V0ZSAgICAgICAgICAgICAgICAsIFVkb3VibGVh
Y3V0ZSAgICAgICAgLCBVMDFGQiAgICAgICAgLCBVMDFGQSAgICAgICAgIF0gLAogICAgICAgICAg
ICAgICAgIFsgR3JlZWtfdXBzaWxvbmRpZXJlc2lzICwgR3JlZWtfVVBTSUxPTmRpZXJlc2lzICwg
Tm9TeW1ib2wgICAsIE5vU3ltYm9sICAgLCBHcmVla191cHNpbG9uYWNjZW50ZGllcmVzaXMgLCBO
b1N5bWJvbCAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICBdIH07CiAg
ICAvLyAgICAgICAgICAgWy0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LV0KICAgIGtleSA8QUQxMj4geyBbIGlkaWFlcmVzaXMgICAgICAgICAgICAsIElkaWFlcmVzaXMg
ICAgICAgICAgICAsIHlkaWFlcmVzaXMgLCBZZGlhZXJlc2lzICwgTm9TeW1ib2wgICAgICAgICAg
ICAgICAgICAgICwgTm9TeW1ib2wgICAgICAgICAgICAsIE5vU3ltYm9sICAgICAsIE5vU3ltYm9s
ICAgICAgXSAsCiAgICAgICAgICAgICAgICAgWyBHcmVla19pb3RhZGllcmVzaXMgICAgLCBHcmVl
a19JT1RBZGllcmVzaXMgICAgLCBOb1N5bWJvbCAgICwgTm9TeW1ib2wgICAsIEdyZWVrX2lvdGFh
Y2NlbnRkaWVyZXNpcyAgICAsIE5vU3ltYm9sICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg
ICAgICAgICAgICAgIF0gfTsKICAgIC8vICAgICAgICAgICBbPT09PT09PT09PT09PT09PT09PT09
PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09
PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09
PT09PT09PT09PT09PT09PT09PT09XQogICAgLy8KICAgIC8vIFJPVyBDCiAgICAvLyA9PT09PQog
ICAgLy8gVTAxRjQgPSBHYWN1dGUKICAgIC8vIFUwMUY1ID0gZ2FjdXRlCiAgICAvLyBVMDFGQyA9
IEFFYWN1dGUKICAgIC8vIFUwMUZEID0gYWVhY3V0ZQogICAgLy8gVTAyMTggPSBTY29tbWEKICAg
IC8vIFUwMjE5ID0gc2NvbW1hCiAgICAvLyBVMUUzMCA9IEthY3V0ZQogICAgLy8gVTFFMzEgPSBr
YWN1dGUKICAgIC8vCiAgICAvLyAgICAgICAgICAgWz09PT09PT09PT09PT09PT09PT09PT09PT09
PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09
PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09XQogICAg
Ly8gICAgICAgICAgIFsgTm9ybSAgICAgICAgICwgU2hpZnQgICAgICAgICwgQWx0ICAgICAgICAg
ICAgICAgICAgICwgU2hpZnQrQWx0ICAgLCBNb2QgICAgICAgICAgICAgICAsIFNoaWZ0K01vZCAg
ICAgICAgICwgTW9kK0FsdCAgLCBTaGlmdCtNb2QrQWx0IF0KICAgIC8vICAgICAgICAgICBbPT09
PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09
PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09
PT09PT09PT09PT09PT09PT1dCiAgICBrZXkgPEFDMDE+IHsgWyBhICAgICAgICAgICAgLCBBICAg
ICAgICAgICAgLCBhb2dvbmVrICAgICAgICAgICAgICAgLCBBb2dvbmVrICAgICAsIGFhY3V0ZSAg
ICAgICAgICAgICwgQWFjdXRlICAgICAgICAgICAgLCBOb1N5bWJvbCAsIE5vU3ltYm9sICAgICAg
XSAsCiAgICAgICAgICAgICAgICAgWyBHcmVla19hbHBoYSAgLCBHcmVla19BTFBIQSAgLCBOb1N5
bWJvbCAgICAgICAgICAgICAgLCBOb1N5bWJvbCAgICAsIEdyZWVrX2FscGhhYWNjZW50ICwgR3Jl
ZWtfQUxQSEFhY2NlbnQgICAgICAgICAgICAgICAgICAgICAgICAgICAgXSB9OwogICAgLy8gICAg
ICAgICAgIFstLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLV0KICAgIGtleSA8QUMwMj4geyBbIHMgICAgICAg
ICAgICAsIFMgICAgICAgICAgICAsIFUwMjE5ICAgICAgICAgICAgICAgICAsIFUwMjE4ICAgICAg
ICwgc2FjdXRlICAgICAgICAgICAgLCBTYWN1dGUgICAgICAgICAgICAsIE5vU3ltYm9sICwgTm9T
eW1ib2wgICAgICBdICwKICAgICAgICAgICAgICAgICBbIEdyZWVrX3NpZ21hICAsIEdyZWVrX1NJ
R01BICAsIEdyZWVrX2ZpbmFsc21hbGxzaWdtYSAsIEdyZWVrX1NJR01BICwgTm9TeW1ib2wgICAg
ICAgICAgLCBOb1N5bWJvbCAgICAgICAgICAsIE5vU3ltYm9sICwgTm9TeW1ib2wgICAgICBdIH07
CiAgICAvLyAgICAgICAgICAgWy0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tXQogICAga2V5IDxBQzAzPiB7
IFsgZCAgICAgICAgICAgICwgRCAgICAgICAgICAgICwgZHN0cm9rZSAgICAgICAgICAgICAgICwg
RHN0cm9rZSAgICAgLCBOb1N5bWJvbCAgICAgICAgICAsIE5vU3ltYm9sICAgICAgICAgICwgTm9T
eW1ib2wgLCBOb1N5bWJvbCAgICAgIF0gLAogICAgICAgICAgICAgICAgIFsgR3JlZWtfZGVsdGEg
ICwgR3JlZWtfREVMVEEgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg
ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg
ICAgICAgIF0gfTsKICAgIC8vICAgICAgICAgICBbLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS1dCiAgICBr
ZXkgPEFDMDQ+IHsgWyBmICAgICAgICAgICAgLCBGICAgICAgICAgICAgLCB0aG9ybiAgICAgICAg
ICAgICAgICAgLCBUSE9STiAgICAgICAsIE5vU3ltYm9sICAgICAgICAgICwgTm9TeW1ib2wgICAg
ICAgICAgLCBOb1N5bWJvbCAsIE5vU3ltYm9sICAgICAgXSAsCiAgICAgICAgICAgICAgICAgWyBH
cmVla19waGkgICAgLCBHcmVla19QSEkgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg
ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg
ICAgICAgICAgICAgICAgICAgXSB9OwogICAgLy8gICAgICAgICAgIFstLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLV0KICAgIGtleSA8QUMwNT4geyBbIGcgICAgICAgICAgICAsIEcgICAgICAgICAgICAsIGdj
ZWRpbGxhICAgICAgICAgICAgICAsIEdjZWRpbGxhICAgICwgVTAxRjUgICAgICAgICAgICAgLCBV
MDFGNCAgICAgICAgICAgICAsIE5vU3ltYm9sICwgTm9TeW1ib2wgICAgICBdICwKICAgICAgICAg
ICAgICAgICBbIEdyZWVrX2dhbW1hICAsIEdyZWVrX0dBTU1BICAgICAgICAgICAgICAgICAgICAg
ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg
ICAgICAgICAgICAgICAgICAgICAgICAgICAgICBdIH07CiAgICAvLyAgICAgICAgICAgWy0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tXQogICAga2V5IDxBQzA2PiB7IFsgaCAgICAgICAgICAgICwgSCAgICAg
ICAgICAgICwgaHN0cm9rZSAgICAgICAgICAgICAgICwgSHN0cm9rZSAgICAgLCBOb1N5bWJvbCAg
ICAgICAgICAsIE5vU3ltYm9sICAgICAgICAgICwgTm9TeW1ib2wgLCBOb1N5bWJvbCAgICAgIF0g
LAogICAgICAgICAgICAgICAgIFsgR3JlZWtfZXRhICAgICwgR3JlZWtfRVRBICAgICwgTm9TeW1i
b2wgICAgICAgICAgICAgICwgTm9TeW1ib2wgICAgLCBHcmVla19ldGFhY2NlbnQgICAsIEdyZWVr
X0VUQWFjY2VudCAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIF0gfTsKICAgIC8vICAgICAg
ICAgICBbLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS1dCiAgICBrZXkgPEFDMDc+IHsgWyBqICAgICAgICAg
ICAgLCBKICAgICAgICAgICAgLCBudGlsZGUgICAgICAgICAgICAgICAgLCBOdGlsZGUgICAgICAs
IE5vU3ltYm9sICAgICAgICAgICwgTm9TeW1ib2wgICAgICAgICAgLCBOb1N5bWJvbCAsIE5vU3lt
Ym9sICAgICAgXSAsCiAgICAgICAgICAgICAgICAgWyBHcmVla194aSAgICAgLCBHcmVla19YSSAg
ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg
ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgXSB9Owog
ICAgLy8gICAgICAgICAgIFstLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLV0KICAgIGtleSA8QUMwOD4geyBb
IGsgICAgICAgICAgICAsIEsgICAgICAgICAgICAsIGtjZWRpbGxhICAgICAgICAgICAgICAsIEtj
ZWRpbGxhICAgICwgVTFFMzEgICAgICAgICAgICAgLCBVMUUzMCAgICAgICAgICAgICAsIE5vU3lt
Ym9sICwgTm9TeW1ib2wgICAgICBdICwKICAgICAgICAgICAgICAgICBbIEdyZWVrX2thcHBhICAs
IEdyZWVrX0tBUFBBICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg
ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg
ICAgICBdIH07CiAgICAvLyAgICAgICAgICAgWy0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tXQogICAga2V5
IDxBQzA5PiB7IFsgbCAgICAgICAgICAgICwgTCAgICAgICAgICAgICwgbGNlZGlsbGEgICAgICAg
ICAgICAgICwgTGNlZGlsbGEgICAgLCBsYWN1dGUgICAgICAgICAgICAsIExhY3V0ZSAgICAgICAg
ICAgICwgTm9TeW1ib2wgLCBOb1N5bWJvbCAgICAgIF0gLAogICAgICAgICAgICAgICAgIFsgR3Jl
ZWtfbGFtYmRhICwgR3JlZWtfTEFNQkRBICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg
ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg
ICAgICAgICAgICAgICAgIF0gfTsKICAgIC8vICAgICAgICAgICBbLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS1dCiAgICBrZXkgPEFDMTA+IHsgWyBvZGlhZXJlc2lzICAgLCBPZGlhZXJlc2lzICAgLCBvZSAg
ICAgICAgICAgICAgICAgICAgLCBPRSAgICAgICAgICAsIG9kb3VibGVhY3V0ZSAgICAgICwgT2Rv
dWJsZWFjdXRlICAgICAgLCBOb1N5bWJvbCAsIE5vU3ltYm9sICAgICAgXSAsCiAgICAgICAgICAg
ICAgICAgWyBOb1N5bWJvbCAgICAgLCBOb1N5bWJvbCAgICAgICAgICAgICAgICAgICAgICAgICAg
ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg
ICAgICAgICAgICAgICAgICAgICAgICAgICAgXSB9OwogICAgLy8gICAgICAgICAgIFstLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLV0KICAgIGtleSA8QUMxMT4geyBbIGFkaWFlcmVzaXMgICAsIEFkaWFlcmVz
aXMgICAsIGFlICAgICAgICAgICAgICAgICAgICAsIEFFICAgICAgICAgICwgTm9TeW1ib2wgICAg
ICAgICAgLCBOb1N5bWJvbCAgICAgICAgICAsIFUwMUZEICAgICwgVTAxRkMgICAgICAgICBdICwK
ICAgICAgICAgICAgICAgICBbIE5vU3ltYm9sICAgICAsIE5vU3ltYm9sICAgICAgICAgICAgICAg
ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg
ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICBdIH07CiAgICAvLyAgICAgICAg
ICAgWy0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tXQogICAga2V5IDxBQzEyPiB7IFsgZWRpYWVyZXNpcyAg
ICwgRWRpYWVyZXNpcyAgICwgd2RpYWVyZXNpcyAgICAgICAgICAgICwgV2RpYWVyZXNpcyAgLCBO
b1N5bWJvbCAgICAgICAgICAsIE5vU3ltYm9sICAgICAgICAgICwgTm9TeW1ib2wgLCBOb1N5bWJv
bCAgICAgIF0gLAogICAgICAgICAgICAgICAgIFsgTm9TeW1ib2wgICAgICwgTm9TeW1ib2wgICAg
ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg
ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIF0gfTsKICAg
IC8vICAgICAgICAgICBbPT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09
PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09
PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT1dCiAgICAvLwogICAgLy8gUk9XIEIK
ICAgIC8vID09PT09CiAgICAvLyBVMUUwOCA9IENjZWRpbGxhYWN1dGUKICAgIC8vIFUxRTA5ID0g
Y2NlZGlsbGFhY3V0ZQogICAgLy8gVTFFM0UgPSBNYWN1dGUKICAgIC8vIFUxRTNGID0gbWFjdXRl
CiAgICAvLyBVMUU5RSA9IFNzaGFycAogICAgLy8KICAgIC8vICAgICAgICAgICBbPT09PT09PT09
PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09
PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09XQogICAg
Ly8gICAgICAgICAgIFsgTm9ybSAgICAgICAgLCBTaGlmdCAgICAgICAsIEFsdCAgICAgICwgU2hp
ZnQrQWx0ICwgTW9kICAgICAgICAgICAgICAgLCBTaGlmdCtNb2QgICAgICAgICAsIE1vZCtBbHQg
ICwgU2hpZnQrTW9kK0FsdCBdCiAgICAvLyAgICAgICAgICAgWz09PT09PT09PT09PT09PT09PT09
PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09
PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PV0KICAgIGtleSA8QUIwMT4g
eyBbIHogICAgICAgICAgICwgWiAgICAgICAgICAgLCBzc2hhcnAgICAsIFUxRTlFICAgICAsIHph
Y3V0ZSAgICAgICAgICAgICwgWmFjdXRlICAgICAgICAgICAgLCBOb1N5bWJvbCAsIE5vU3ltYm9s
ICAgICAgXSAsCiAgICAgICAgICAgICAgICAgWyBHcmVla196ZXRhICAsIEdyZWVrX1pFVEEgICAg
ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg
ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIF0gfTsKICAgIC8vICAgICAgICAgICBbLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
XQogICAga2V5IDxBQjAyPiB7IFsgeCAgICAgICAgICAgLCBYICAgICAgICAgICAsIHVicmV2ZSAg
ICwgVWJyZXZlICAgICwgTm9TeW1ib2wgICAgICAgICAgLCBOb1N5bWJvbCAgICAgICAgICAsIE5v
U3ltYm9sICwgTm9TeW1ib2wgICAgICBdICwKICAgICAgICAgICAgICAgICBbIEdyZWVrX2NoaSAg
ICwgR3JlZWtfQ0hJICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg
ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgXSB9OwogICAg
Ly8gICAgICAgICAgIFstLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS1dCiAgICBrZXkgPEFCMDM+IHsgWyBjICAgICAgICAgICAsIEMgICAg
ICAgICAgICwgY2NlZGlsbGEgLCBDY2VkaWxsYSAgLCBjYWN1dGUgICAgICAgICAgICAsIENhY3V0
ZSAgICAgICAgICAgICwgVTFFMDkgICAgLCBVMUUwOCAgICAgICAgIF0gLAogICAgICAgICAgICAg
ICAgIFsgR3JlZWtfcHNpICAgLCBHcmVla19QU0kgICAgICAgICAgICAgICAgICAgICAgICAgICAg
ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg
ICAgICAgICBdIH07CiAgICAvLyAgICAgICAgICAgWy0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLV0KICAgIGtleSA8QUIwND4geyBbIHYg
ICAgICAgICAgICwgViAgICAgICAgICAgLCBldGggICAgICAsIEVUSCAgICAgICAsIE5vU3ltYm9s
ICAgICAgICAgICwgTm9TeW1ib2wgICAgICAgICAgLCBOb1N5bWJvbCAsIE5vU3ltYm9sICAgICAg
XSAsCiAgICAgICAgICAgICAgICAgWyBHcmVla19vbWVnYSAsIEdyZWVrX09NRUdBICwgTm9TeW1i
b2wgLCBOb1N5bWJvbCAgLCBHcmVla19vbWVnYWFjY2VudCAsIEdyZWVrX09NRUdBYWNjZW50ICAg
ICAgICAgICAgICAgICAgICAgICAgICAgIF0gfTsKICAgIC8vICAgICAgICAgICBbLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tXQogICAg
a2V5IDxBQjA1PiB7IFsgYiAgICAgICAgICAgLCBCICAgICAgICAgICAsIGF0aWxkZSAgICwgQXRp
bGRlICAgICwgTm9TeW1ib2wgICAgICAgICAgLCBOb1N5bWJvbCAgICAgICAgICAsIE5vU3ltYm9s
ICwgTm9TeW1ib2wgICAgICBdICwKICAgICAgICAgICAgICAgICBbIEdyZWVrX2JldGEgICwgR3Jl
ZWtfQkVUQSAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg
ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgXSB9OwogICAgLy8gICAg
ICAgICAgIFstLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS1dCiAgICBrZXkgPEFCMDY+IHsgWyBuICAgICAgICAgICAsIE4gICAgICAgICAg
ICwgbmNlZGlsbGEgLCBOY2VkaWxsYSAgLCBuYWN1dGUgICAgICAgICAgICAsIE5hY3V0ZSAgICAg
ICAgICAgICwgTm9TeW1ib2wgLCBOb1N5bWJvbCAgICAgIF0gLAogICAgICAgICAgICAgICAgIFsg
R3JlZWtfbnUgICAgLCBHcmVla19OVSAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg
ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg
ICBdIH07CiAgICAvLyAgICAgICAgICAgWy0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLV0KICAgIGtleSA8QUIwNz4geyBbIG0gICAgICAg
ICAgICwgTSAgICAgICAgICAgLCBlbmcgICAgICAsIEVORyAgICAgICAsIFUxRTNGICAgICAgICAg
ICAgICwgVTFFM0UgICAgICAgICAgICAgLCBOb1N5bWJvbCAsIE5vU3ltYm9sICAgICAgXSAsCiAg
ICAgICAgICAgICAgICAgWyBHcmVla19tdSAgICAsIEdyZWVrX01VICAgICAgICAgICAgICAgICAg
ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg
ICAgICAgICAgICAgICAgICAgIF0gfTsKICAgIC8vICAgICAgICAgICBbPT09PT09PT09PT09PT09
PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09
PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09XQoKfTsKCi8vIFRo
ZSBSaWdodCBBbHQga2V5ICh3aGlsZSBwcmVzc2VkKSBjaG9vc2VzIGxldmVsIDMuCnBhcnRpYWwg
bW9kaWZpZXJfa2V5cwp4a2Jfc3ltYm9scyAiYWx0Z3Jfc3dpdGNoIiB7CiAgICBpbmNsdWRlICJs
ZXZlbDMobW9kaWZpZXJfbWFwcGluZykiCiAgICBrZXkgPFJBTFQ+IHsKICAgICAgICB0eXBlW0dy
b3VwMV0gPSAiT05FX0xFVkVMIiwKICAgICAgICBzeW1ib2xzW0dyb3VwMV0gPSBbIElTT19MZXZl
bDNfU2hpZnQgXQogICAgfTsKfTsKCi8vIFRoZSBbTW9kXSBrZXkgKHdoaWxlIHByZXNzZWQpIGNo
b29zZXMgbGV2ZWwgNS4KcGFydGlhbCBtb2RpZmllcl9rZXlzCnhrYl9zeW1ib2xzICJtb2Rfc3dp
dGNoIiB7CiAgICBpbmNsdWRlICJsZXZlbDUobW9kaWZpZXJfbWFwcGluZykiCiAgICBrZXkgPExT
R1Q+IHsKICAgICAgdHlwZVtHcm91cDFdID0gIk9ORV9MRVZFTCIsCiAgICAgIHN5bWJvbHNbR3Jv
dXAxXSA9IFsgSVNPX0xldmVsNV9TaGlmdCBdCiAgICB9Owp9OwoK
"""
if __name__ == "__main__":
main()
| python |
import logging
import random
import string
import sys
import textwrap
import time
import typing
from contextlib import contextmanager
from datetime import datetime, timedelta
from functools import lru_cache, wraps
import flask
from util.config_utils import iris_prefix
def cls_by_name(fully_qualified_classname):
parts = fully_qualified_classname.split(".")
fully_qualified_module_name = ".".join(parts[:-1])
module = __import__(fully_qualified_module_name)
for subcomponent in parts[1:]:
try:
module = getattr(module, subcomponent)
except AttributeError:
logging.exception(
f"Cannot load {fully_qualified_classname}. "
"Plugin classes must have the same name as their module "
"(file under the plugins directory), except that the "
"module name should be in lowercase and the class name in Titlecase, "
"as for example bigquery.Bigquery or gce.Gce.",
exc_info=True,
)
raise
return module
def shorten(o, length=400) -> str:
return textwrap.shorten(str(o), length)
def methods(o, pfx="") -> typing.List[typing.Callable]:
names = (
name
for name in dir(o.__class__)
if callable(getattr(o.__class__, name)) and name.startswith(pfx)
)
return [getattr(o, name) for name in names]
def random_str(length: int):
return "".join(
random.choices(
string.ascii_lowercase + string.digits + string.digits, # more digits
k=length,
)
)
def init_logging():
class ContextFilter(logging.Filter):
def filter(self, record):
try:
if hasattr(flask.request, "trace_msg"):
trace_msg = flask.request.trace_msg
else:
trace_id = flask.request.headers.get(
"X-Cloud-Trace-Context", random_str(30)
)
trace_id_trunc = truncate_middle(trace_id, 20)
trace_msg = " [Trace: " + trace_id_trunc + "]"
flask.request.trace_msg = trace_msg
except RuntimeError as e:
if "outside of request context" in str(e):
# Occurs in app tartup
trace_msg = ""
else:
raise e
record.trace_msg = trace_msg
return True
f = ContextFilter()
h1 = logging.StreamHandler(sys.stdout)
h1.addFilter(filter=f)
logging.basicConfig(
handlers=[h1],
format=f"%(levelname)s [{iris_prefix()}]%(trace_msg)s %(message)s",
level=logging.INFO,
)
logging.getLogger("googleapiclient.discovery_cache").setLevel(logging.ERROR)
logging.info("logging: Initialized logger")
def __log_end_timer(tag, start):
logging.info(f"Time {tag}: {int((time.time() - start) * 1000)} ms")
def log_time(func):
@wraps(func)
def _time_it(*args, **kwargs):
start = time.time()
try:
return func(*args, **kwargs)
finally:
if args:
if hasattr(args[0], "__name__"):
name_base = args[0]
else:
name_base = type(args[0])
arg_s = name_base.__name__
else:
arg_s = ""
__log_end_timer(f"{func.__name__}({arg_s})", start)
return _time_it
@contextmanager
def timing(tag: str) -> None:
start = time.time()
yield
elapsed_ms = int((time.time() - start) * 1000)
logging.getLogger("Time").info("%s: %d ms", tag, elapsed_ms)
def timed_lru_cache(seconds: int, maxsize: int = 128):
def wrapper_cache(func):
func = lru_cache(maxsize=maxsize)(func)
func.lifetime = timedelta(seconds=seconds)
func.expiration = datetime.utcnow() + func.lifetime
@wraps(func)
def wrapped_func(*args, **kwargs):
if datetime.utcnow() >= func.expiration:
func.cache_clear()
func.expiration = datetime.utcnow() + func.lifetime
return func(*args, **kwargs)
return wrapped_func
return wrapper_cache
def truncate_middle(s, resulting_len):
ellipsis_s = "..."
if resulting_len < len(ellipsis_s) + 2:
# "a...z" is shortest. The "+ 2" is for the starting and ending letters
return s
if len(s) <= len(ellipsis_s) + 2: # Truncate "ab" to "ab"
return s
if len(s) <= resulting_len: # No need to shorten
return s
len_remaining_strings = resulting_len - len(ellipsis_s)
half = len_remaining_strings // 2
len_sfx_string = half
len_pfx_string = half if len_remaining_strings % 2 == 0 else half + 1
pfx = s[:len_pfx_string]
sfx = s[-len_sfx_string:]
ret = pfx + ellipsis_s + sfx
return ret
| python |
import sys
from itertools import islice, izip
def parse(lines):
return [int(line.split(" ")[-1]) for line in lines]
def generator(startValue, factor, multiple):
prevValue = startValue
while True:
prevValue = ( factor * prevValue ) % 2147483647
if prevValue % multiple == 0:
yield prevValue
def lowerBits(value):
return value & 0xffff
def sameLowerBits(valueA, valueB):
return lowerBits(valueA) == lowerBits(valueB)
def doit(lines):
generatorStarts = parse(lines)
generatorA = generator(generatorStarts[0], 16807, 4)
generatorB = generator(generatorStarts[1], 48271, 8)
return sum(1 for a, b in islice(izip(generatorA, generatorB), 5000000) if sameLowerBits(a, b))
if __name__ == "__main__":
print(doit(sys.stdin.readlines()))
| python |
import sys
import random
import helptext
from time import sleep
from threading import Timer
from mbientlab.metawear import MetaWear, libmetawear, parse_value
from mbientlab.metawear.cbindings import *
from mbientlab.warble import *
from resizable import *
if sys.version_info[0] < 3:
import Tkinter as Tk
import ttk
else:
import tkinter as Tk
from tkinter import ttk, tkMessageBox
class Resizable():
def __init__(self, canvas):
self.canvas = canvas
self.canvas_width_orig = canvas.width
self.canvas_height_orig = canvas.height
def redraw(self, x0, y0, x1, y1, **kwargs):
self.ratio_width = self.canvas.width / float(self.canvas_width_orig)
self.ratio_height = self.canvas.height / float(self.canvas_height_orig)
a = x0 * self.ratio_width
b = y0 * self.ratio_height
c = x1 * self.ratio_width
d = y1 * self.ratio_height
self.canvas.coords(self.object, a, b, c, d, **kwargs)
def itemconfig(self, **kwargs):
self.canvas.itemconfig(self.object, **kwargs)
class ResizablePlotPoint(Resizable):
def __init__(self, canvas, x0, y0, mag, **kwargs):
Resizable.__init__(self, canvas)
self.x0 = x0
self.y0 = y0
self.mag = mag
self.size = 3
self.object = canvas.create_oval(
x0 - self.size,
y0 - self.size,
x0 + self.size,
y0 + self.size,
**kwargs)
def redraw(self, **kwargs):
self.ratio_width = self.canvas.width / float(self.canvas_width_orig)
self.ratio_height = self.canvas.height / float(self.canvas_height_orig)
a = self.x0 * self.ratio_width
b = self.y0 * self.ratio_height
self.canvas.coords(
self.object,
a - self.size,
b - self.size,
a + self.size,
b + self.size,
**kwargs)
class ResizableRectangle(Resizable):
def __init__(self, canvas, x0, y0, x1, y1, **kwargs):
Resizable.__init__(self, canvas)
self.object = canvas.create_rectangle(x0, y0, x1, y1, **kwargs)
class ResizableLine(Resizable):
def __init__(self, canvas, x0, y0, x1, y1, **kwargs):
Resizable.__init__(self, canvas)
self.object = canvas.create_line(x0, y0, x1, y1, **kwargs)
class ResizableOval(Resizable):
def __init__(self, canvas, x0, y0, x1, y1, **kwargs):
Resizable.__init__(self, canvas)
self.object = canvas.create_oval(x0, y0, x1, y1, **kwargs)
class ResizableText(Resizable):
def __init__(self, canvas, x0, y0, **kwargs):
Resizable.__init__(self, canvas)
self.object = canvas.create_text(x0, y0, **kwargs)
def redraw(self, x0, y0, **kwargs):
self.ratio_width = self.canvas.width / float(self.canvas_width_orig)
self.ratio_height = self.canvas.height / float(self.canvas_height_orig)
a = x0 * self.ratio_width
b = y0 * self.ratio_height
self.canvas.coords(self.object, a, b, **kwargs)
class ResizableCanvas(Tk.Canvas):
def __init__(self, parent, **kwargs):
Tk.Canvas.__init__(self, parent, **kwargs)
self.bind("<Configure>", self.on_resize)
self.height = self.winfo_reqheight()
self.width = self.winfo_reqwidth()
def on_resize(self, event):
# determine the ratio of old width/height to new width/height
wscale = float(event.width) / self.width
hscale = float(event.height) / self.height
self.width = event.width
self.height = event.height
# resize the canvas
self.config(width=self.width, height=self.height)
# rescale all the objects tagged with the "all" tag
self.scale("all", 0, 0, wscale, hscale) | python |
real_value = float(input("enter real value(이론값): ")) #참값
test_value = float(input("enter test value: ")) #실험값
err = abs(real_value - test_value) / real_value
print(f"err = {err}")
| python |
# Copyright 2010 Gregory L. Rosenblatt
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def tyErr(msg): raise RuntimeError(msg)
class Cons:
def __init__(self, name, vars): self.name = name; self.vars = vars
class TyExpr:
def freeVars(self): return set()
def subst(self, subs): return self, ()
def occurs(self, name): return False
def strengthen(self, cenv, mentions, parity, final): return self
def __repr__(self): return '%s(%s)'%(self.__class__.__name__, str(self))
def __str__(self): return ''
class TyExtreme(TyExpr):
def __init__(self, name, relat): self.name = name; self.relat = relat
def __str__(self): return self.name
def constrain(self, subs, cenv, rhs, relat):
if relat != self.relat and self is not rhs:
tyErr('invalid type constraint: %s %s %s'%
(self.name, ('<:', '<:>',':>')[relat+1], rhs))
def merge(self, subs, cenv, rhs, parity, grow):
if self.relat*parity > 0: return self
return rhs
def contains(self, cenv, ty, parity):
return self.relat*parity > 0 or self is ty
tyTop = TyExtreme('Top', 1); tyBot = TyExtreme('Bot', -1)
def mapFrees(args): return set().union(*(arg.freeVars() for arg in args))
def mapSubs(subs, args0, ret, mk):
args = [subst(subs, arg) for arg in args0]
if all(a1 == a2 for a1, a2 in zip(args, args0)): return ret, ()
return mk(args), ()
def mapOccurs(name, args): return any(arg.occurs(name) for arg in self.args)
class TyCons(TyExpr):
def __init__(self, cons, args): self.cons = cons; self.args = args
def __str__(self):
if self.args:
if (not self.cons.name.isalnum()) and len(self.args) == 2:
return '(%s %s %s)'%(self.args[0],self.cons.name,self.args[1])
return '(%s)'%(self.cons.name+' '+' '.join(map(str, self.args)))
return self.cons.name
def freeVars(self): return mapFrees(self.args)
def subst(self, subs):
return mapSubs(subs, self.args, self,
lambda args1: TyCons(self.cons, args1))
def occurs(self, name): return mapOccurs(name, self.args)
def strengthen(self, cenv, mentions, parity, final):
args = [arg.strengthen(cenv, mentions, parity*var, final)
for arg, var in zip(self.args, self.cons.vars)]
return TyCons(self.cons, args)
def constrain(self, subs, cenv, rhs, relat):
if not isinstance(rhs, TyCons) or self.cons is not rhs.cons:
tyErr('invalid constraint') # todo
for lhs, rhs, variance in zip(self.args, rhs.args, self.cons.vars):
constrain(subs, cenv, lhs, rhs, relat*variance)
def merge(self, subs, cenv, ty, parity, grow):
if isinstance(ty, TyCons) and ty.cons is self.cons:
args = [merge(subs, cenv, lhs, rhs, parity*var, grow)
for lhs, rhs, var in
zip(self.args, ty.args, self.cons.vars)]
return TyCons(self.cons, args)
elif parity == 0: tyErr("cannot equate '%s' and '%s'"%(self, ty))
elif parity > 0:
if isinstance(ty, TyCons): return TyVariant([self, ty])
return tyTop
else: return tyBot
def contains(self, cenv, ty, parity):
if isinstance(ty, TyCons) and ty.cons is self.cons:
return all(contains(cenv, lhs, rhs, parity*var)
for lhs, rhs, var in
zip(self.args, ty.args, self.cons.vars))
else: return ty is tyBot
class TyVariant(TyExpr):
def __init__(self, conss): self.conss = conss; assert len(conss) > 1
def __str__(self):
return '{%s}'%' '.join(str(cons) for cons in self.conss)
def freeVars(self): return mapFrees(self.conss)
def subst(self, subs): return mapSubs(subs, self.conss, self, TyVariant)
def occurs(self, name): return mapOccurs(name, self.conss)
def strengthen(self, cenv, mentions, parity, final):
return TyVariant([cns.strengthen(cenv, mentions, parity, final)
for cns in self.conss])
def constrain(self, subs, cenv, rhs, relat):
if isinstance(rhs, TyCons):
if relat > 0:
for cons in self.conss:
if cons.cons is rhs.cons:
return constrain(subs, cenv, cons, rhs, relat)
tyErr('variant... constructor') # todo
elif isinstance(rhs, TyVariant):
if relat == 0:
lhs = sorted((id(cons.cons), cons) for cons in self.conss)
rhs = sorted((id(cons.cons), cons) for cons in rhs.conss)
if len(lhs) != len(rhs): tyErr('unmatched variant sizes')
for lc, rc in zip(lhs, rhs):
lc.constrain(subs, cenv, lc, relat)
else:
if relat < 0: lhs = rhs; rhs = self
else: lhs = self
for cons in rhs.conss: lhs.constrain(subs, cenv, cons, relat)
else: tyErr('invalid variant constraint') # todo
def merge(self, subs, cenv, ty, parity, grow):
if isinstance(ty, TyCons):
for idx, cons in enumerate(self.conss):
if cons.cons is ty.cons:
merged = cons.merge(subs, cenv, ty, parity, grow)
if parity < 0 or not isinstance(merged, TyCons):
return merged
else:
if merged is cons: return self
return TyVariant(self.conss[:idx]+[merged]+
self.conss[idx+1:])
if parity > 0: return TyVariant(self.conss+[ty])
elif isinstance(ty, TyVariant):
match = dict((cons.cons, cons) for cons in ty.conss); acc = []
for cons in self.conss:
other = match.get(cons.cons)
if other is None: parity > 0 and acc.append(cons)
else:
acc.append(cons.merge(subs, cenv, other, parity, grow))
del match[cons.cons]
if parity > 0: acc.extend(list(match.values()))
if len(acc) > 1: return TyVariant(acc)
elif len(acc) == 1: return acc[0]
else: return tyBot
if parity > 0: return tyTop
else: return tyBot
def contains(self, cenv, ty, parity):
if isinstance(ty, TyVariant):
return all(contains(cenv, self, cons, parity) for cons in ty.conss)
elif isinstance(ty, TyCons):
for cons in self.conss:
if cons.cons is ty.cons:
return all(contains(cenv, lhs, rhs, parity*var)
for lhs, rhs, var in
zip(cons.args, ty.args, cons.cons.vars))
else: return ty is tyBot
class TyUQfied(TyExpr):
def __init__(self, bqs, body): self.bqs = bqs; self.body = body
def __str__(self):
return '(all [%s] => %s)'%(', '.join('%s<:%s'%(qn, bnd)
for qn, bnd in self.bqs), self.body)
def _boundVars(self): return tuple(zip(self.bqs))[0]
def freeVars(self): return self.body.freeVars() - set(self._boundVars())
def subst(self, subs):
qns = self._boundVars()
body = subst([sub for sub in subs if sub[0] not in qns], self.body)
if body is self.body: return self, ()
return TyUQfied(self.bqs, body), ()
def occurs(self, name):
return (name not in self._boundVars()) and self.body.occurs(name)
def _instantiate(self, cenv, relat):
subs = []
for qn, bnd in self.bqs:
newName, _ = fresh(cenv, qn)
if relat >= 0: bnd = TyQVar(newName.name, bnd)
newName.constrain([], cenv, bnd, -1)
subs.append((qn, newName))
print('subs:', subs)
return subst(subs, self.body)
def constrain(self, subs, cenv, rhs, relat):
constrain(subs, cenv, self._instantiate(cenv, relat), rhs, relat)
def merge(self, subs, cenv, ty, parity, grow):
return merge(subs, cenv, self._instantiate(cenv, parity), ty, parity,
grow)
def contains(self, cenv, ty, parity):
return contains(cenv, self._instantiate(cenv, parity), ty, parity)
class TyQVar(TyExpr):
def __init__(self, name, bnd): self.name = name; self.bnd = bnd
def __str__(self): return '(%s<:%s)'%(self.name, self.bnd)
def constrain(self, subs, cenv, rhs, relat):
if rhs is self: return
if parity < 0: constrain(subs, cenv, self.bnd, rhs, relat)
tyErr('invalid quantified var constraint: %s <: %s'%(rhs, self))
def merge(self, subs, cenv, ty, parity, grow):
if ty is self: return self
if parity > 0: return merge(subs, cenv, self.bnd, ty, parity, grow)
elif parity < 0: return tyBot
tyErr('cannot equate %s and %s'%(self, ty))
def contains(self, cenv, ty, parity):
if ty is self: return True
if parity < 0: return contains(cenv, self.bnd, ty, parity)
return False
class TyVar(TyExpr):
def __init__(self, name): self.name = name
def __str__(self): return self.name
def identical(self, cenv, ty):
return isinstance(ty, TyVar) and (ty.name == self.name or
cenv[ty.name] is cenv[self.name])
def freeVars(self): return {self.name}
def subst(self, subs):
for idx, (nm, ty) in enumerate(subs):
if self.name == nm: return ty, subs[idx:]
return self, ()
def occurs(self, name): return self.name == name
def strengthen(self, cenv, mentions, parity, final):
if final and mentions[self.name] > 1: return self
cx = cenv[self.name]
if cx.invar: return cx.invar.strengthen(cenv, mentions, parity, final)
if parity == 1:
if final or cx.contravar.bnd is not tyBot:
return cx.contravar.bnd.strengthen(cenv, mentions, parity,
final)
elif (final or isinstance(cx.covar.bnd, TyCons) or
cx.covar.bnd.freeVars()):
return cx.covar.bnd.strengthen(cenv, mentions, parity, final)
count = mentions.setdefault(cx.name, 0); mentions[cx.name] += 1
return TyVar(cx.name)#.strengthen(cenv, mentions, parity, final)
def constrain(self, subs, cenv, rhs, relat):
print('uh oh:', self, '?', rhs)
if self.identical(cenv, rhs): return
if relat == 0: cenv[self.name].equate(subs, cenv, rhs, True)
else:
lc = cenv[self.name]
if isinstance(rhs, TyVar):
rc = cenv[rhs.name]
if relat > 0: high, low = lc, rc
else: high, low = rc, lc
high.link(low)
else: lc.merge(subs, cenv, rhs, relat, True)
def merge(self, subs, cenv, ty, parity, grow):
if self.identical(cenv, ty): return self
varc = cenv[self.name]
if parity == 0: varc.equate(subs, cenv, ty, grow); return ty
else:
if grow: bnd = varc.parity(parity).bnd
else: bnd = varc.upperBound().bnd
maybe = merge(subs, cenv, bnd, ty, parity, False)
if not grow or (isinstance(maybe, TyExtreme) and
maybe.relat*parity > 0): return maybe
var, csrnt = fresh(cenv)
csrnt.merge(subs, cenv, ty, parity, grow)
csrnt.mergeC(varc, parity)
return var
def contains(self, cenv, ty, parity): # todo: chokes on recursive types
return contains(cenv, cenv[self.name].upperBound().bnd, ty, parity)
def makeVar(cenv, name, parity):
csrnt = Constraint(name, parity); cenv[name] = csrnt
return TyVar(name), csrnt
uid = 0
def fresh(cenv, nm=''):
global uid
name = '$UID_%s_%s'%(uid, nm); uid += 1; return makeVar(cenv, name, 1)
def subst(subs, ty):
print('subst:', ty)
while subs: ty, subs = ty.subst(subs); print('subst:', ty)
return ty
def ordered(lhs, rhs, ordering):
for tyty in ordering:
if isinstance(lhs, tyty): return True
if isinstance(rhs, tyty): return False
return True
cxOrder = TyUQfied, TyVar, TyExtreme, TyVariant
def constrain(subs, cenv, lhs, rhs, relat):
lhs = subst(subs, lhs); rhs = subst(subs, rhs)
if not ordered(lhs, rhs, cxOrder): relat*=-1; lhs,rhs = rhs,lhs
lhs.constrain(subs, cenv, rhs, relat)
def merge(subs, cenv, lhs, rhs, parity, grow):
if not ordered(lhs, rhs, (TyExtreme, TyUQfied, TyVar, TyVariant)):
lhs,rhs = rhs,lhs
return lhs.merge(subs, cenv, rhs, parity, grow)
def contains(cenv, lhs, rhs, parity):
if not ordered(lhs, rhs, cxOrder): parity*=-1; lhs,rhs = rhs,lhs
return lhs.contains(cenv, rhs, parity)
def identical(cenv, lhs, rhs):
return contains(cenv, lhs, rhs, -1) and contains(cenv, lhs, rhs, 1)
class Bound:
def __init__(self, initBnd): # todo: fill backDeps during DFS
self.bnd = initBnd; self.deps = set(); self.backDeps = set()
def __str__(self): return '%s, %s'%(self.bnd, list(self.deps))
# def __str__(self): return '%s'%self.bnd
def mergeBound(self, subs, cenv, bnd, parity):
self.deps |= bnd.deps;
self.bnd = merge(subs, cenv, self.bnd, bnd.bnd, parity)
def discardDeps(self, deps): self.deps -= deps
class Constraint:
def __init__(self, name, parity):
self.name = name; self.invar = None
self.covar = Bound(tyTop); self.contravar = Bound(tyBot)
self.bndParity = {1: self.contravar, -1: self.covar}
self.finalParity = parity
def __repr__(self):
return 'CX(%s, %s <: %s)'%(self.name, self.contravar, self.covar)
def equate(self, subs, cenv, ty, grow):
self.invar = ty; subs.append((self.name, ty))
if isinstance(ty, TyVar):
csrnt = cenv[ty.name]; cenv[self.name] = csrnt
csrnt.covar.mergeBound(subs, cenv, self.covar, -1, grow)
csrnt.contravar.mergeBound(subs, cenv, self.contravar, 1, grow)
else: self.meet(subs, cenv, ty, grow)#; self.join(subs, cenv, ty, grow)
def link(self, low):
self.contravar.deps.add(low.name); low.covar.deps.add(self.name)
def mergeC(self, csrnt, relat):
if relat > 0: lhs,rhs = self, csrnt
elif relat < 0: lhs,rhs = csrnt, self
lhs.link(rhs)
def merge(self, subs, cenv, ty, relat, grow):
if relat > 0: self.join(subs, cenv, ty, grow)
elif relat < 0: self.meet(subs, cenv, ty, grow)
else: self.equate(subs, cenv, ty, grow)
def join(self, subs, cenv, ty, grow):
self.contravar.bnd = merge(subs, cenv, self.contravar.bnd, ty, 1,grow)
def meet(self, subs, cenv, ty, grow):
self.covar.bnd = merge(subs, cenv, self.covar.bnd, ty, -1, grow)
def parity(self, parity): return self.bndParity[parity]
def upperBound(self): return self.parity(-1)
def check(self, cenv):
if not contains(cenv, self.covar.bnd, self.contravar.bnd, 1):
tyErr("failed constraint '%s': %s <: %s"%
(self.name, self.contravar.bnd, self.covar.bnd))
if self.invar and not contains(cenv, self.covar.bnd, self.invar, 1):
tyErr("failed constraint invariant '%s': %s <: %s"%
(self.name, self.invar, self.covar.bnd))
# todo: this all ends up incorrect thanks to constraint bounds with type vars
def dfs(cenv, cx, parity, finished, seen):
if cx in seen: return
seen.add(cx)
for dep in cx.parity(parity).deps|cx.parity(parity).bnd.freeVars():
dfs(cenv, cenv[dep], parity, finished, seen)
finished.append(cx)
def depthReach(cenv, cs, parity, components, seen):
while cs:
cx = cs.pop()
if cx in seen: continue
print('cx:', cx.name)
component = []; components.append(component)
dfs(cenv, cx, parity, component, seen)
def depSort(cenv):
seen = set(); cs = set(cenv.values()); orders = []
depthReach(cenv, cs, -1, orders, seen)
print('orders:\n', '\n'.join(map(str, orders)))
seen = set(); components = []
for order in reversed(orders):
depthReach(cenv, order, 1, components, seen)
print('components:\n', '\n'.join(map(str, components)))
return components
def mergeDeps(subs, cenv, cx, parity, ignore=set()):
bnd = cx.parity(parity).bnd
cx.parity(parity).discardDeps(ignore)
for name in cx.parity(parity).deps:
dep = cenv[name]
bnd = merge(subs, cenv, bnd, dep.parity(parity).bnd, parity, False)
cx.parity(parity).bnd = bnd
def mergeComp(subs, cenv, comp, parity):
tgt = comp[0]; comp = set(comp); comp.remove(tgt)
for cx in comp: mergeDeps(subs, cenv, cx, parity, comp)
tgt.parity(parity).deps |= set(cy.name for cy in comp)
mergeDeps(subs, cenv, tgt, parity)
def mergeComponents(subs, cenv, components, parity):
for comp in components:
if len(comp) == 1: mergeDeps(subs, cenv, comp[0], parity)
else: mergeComp(subs, cenv, comp, parity)
def satisfy(subs, cenv):
components = depSort(cenv)
mergeComponents(subs, cenv, reversed(components), -1)
mergeComponents(subs, cenv, components, 1)
for comp in components:
tgt = comp[0]
if len(comp) > 1:
for cx in comp[1:]: cenv[cx.name] = tgt
tgt.check(cenv)
deps = tgt.contravar.deps
if len(deps) == 1: # coalesce matching single-dep contravar constraints
dep = cenv[list(deps)[0]]
if identical(cenv, dep.covar.bnd, tgt.covar.bnd):
cenv[tgt.name] = dep
# todo: rethink parities approach
def quantify(cenv, ty):
mentions = {}
ty = ty.strengthen(cenv, mentions, 1, False)
print('strengthen:', mentions, ty)
ty = ty.strengthen(cenv, mentions, 1, True)
print('final:', mentions, ty)
bqs = [(name, cenv[name].upperBound().bnd)
for name, count in mentions.items() if count > 1]
if bqs: ty = TyUQfied(bqs, ty)
return ty
if __name__ == '__main__':
cenv = {}; subs = []
def mkv(name, parity=1): return makeVar(cenv, name, parity)[0]
def stat():
print('status:')
for k, v in cenv.items(): print(k, '::', v)
def go(): satisfy(subs, cenv)
def test(): stat(); go(); stat()
def mkarr(*tys):
tys = list(tys); res = tys.pop()
while tys: res = TyCons(arrow, (tys.pop(), res))
return res
def qfy(ty): return quantify(cenv, ty)
arrow = Cons('->', (-1, 1)); intc = Cons('Int', ());
pair = Cons('Pair', (1, 1))
intTy = TyCons(intc, ())
addTy = TyCons(arrow, (intTy, TyCons(arrow, (intTy, intTy))))
pairTy = TyCons(pair, (intTy, tyTop))
nilTy = TyCons(Cons('Nil', ()), ())
listTy = TyVariant([pairTy, nilTy])
pconsdef = mkarr(tyTop, tyTop, pairTy)
def mkPairTy(a, b): return TyCons(pair, (a, b))
def mkListTy(x): return TyVariant([nilTy, mkPairTy(x, tyTop)])
polypconsdef = TyUQfied([('A', tyTop), ('B', tyTop)],
mkarr(TyVar('A'), TyVar('B'),
mkPairTy(TyVar('A'), TyVar('B'))))
selectTy = mkarr(pairTy, intTy)
fTy = TyUQfied([('X', tyTop), ('Y', tyTop)],
mkarr(TyVar('X'), mkarr(TyVar('X'), TyVar('Y')), TyVar('Y')))
# gv = mkv('g'); xv = mkv('x'); gvr = mkv('$g')
# gdef = mkarr(xv, gvr)
# constrain(subs, cenv, gv, gdef, 0)
# # gbodyr = mkv('gbodyr'); gapp1r = mkv('gapp1r')
# gapp2r = mkv('gapp2r')
# # gbody = mkarr(gapp1r, gapp2r, gbodyr)
# # gapp1 = mkarr(xv, selectTy, gapp1r)
# gapp2 = mkarr(xv, selectTy, gapp2r)
# # constrain(subs, cenv, fTy, gapp1, -1)
# constrain(subs, cenv, fTy, gapp2, -1)
# constrain(subs, cenv, gvr, gapp2r, 1)
# constrain(subs, cenv, pconsdef, gbody, -1)
# constrain(subs, cenv, gvr, gbodyr, 1)
qdef = TyUQfied([('Q', listTy)], mkarr(TyVar('Q'), listTy, TyVar('Q')))
rdef = TyUQfied([('R', tyTop)],
mkarr(mkPairTy(TyVar('R'), tyTop),
mkListTy(TyVar('R')), intTy))
sdef = mkarr(nilTy, pairTy, listTy)
fv = mkv('f'); xv = mkv('x', -1); hv = mkv('h', -1)
fvr = mkv('$f'); fbodyr = mkv('fbodyr')
fdef = mkarr(xv, hv, fvr)
constrain(subs, cenv, fv, fdef, 0)
fapp1r = mkv('fapp1r'); fapp2r = mkv('fapp2r')
fbody = mkarr(fapp1r, fapp2r, fbodyr)
fapp1 = mkarr(xv, hv, fapp1r)
fapp2 = mkarr(xv, hv, fapp2r)
constrain(subs, cenv, qdef, fapp1, -1)
constrain(subs, cenv, rdef, fapp2, -1)
constrain(subs, cenv, polypconsdef, fbody, -1)
# fbody = mkarr(xv, fbodyr)
# constrain(subs, cenv, hv, fbody, -1)
constrain(subs, cenv, fvr, fbodyr, 1)
# gv = mkv(cenv, 'g'); yv = mkv(cenv, 'y'); jv = mkv(cenv, 'j')
# gvr = mkv(cenv, '$g'); gbodyr = mkv(cenv, 'gbodyr')
# gdef = mkarr(yv, gvr)
# constrain(subs, cenv, gv, gdef, 0)
# gbody = mkarr(yv, gbodyr)
# # constrain(subs, cenv, pconsdef, fbody, -1)
# constrain(subs, cenv, gdef, fbody, -1)
# constrain(subs, cenv, fvr, fbodyr, 1)
# constrain(subs, cenv, fdef, gbody, -1)
# constrain(subs, cenv, gvr, gbodyr, 1)
# fbody = TyCons(arrow, (xv, TyCons(arrow, (yv, fvr))))
# fdef = TyCons(arrow, (xv, TyCons(arrow, (yv, fvr))))
# constrain(subs, cenv, fv, fdef, 0)
# constrain(subs, cenv, addTy, fbody, -1)
# # constrain(subs, cenv, fv, fbody, -1)
| python |
# 11.4. Dictionary methods
"""
Dictionaries have a number of useful built-in methods. The following table
provides a summary and more details can be found in the Python Documentation.
Method
Parameters
Description
keys
none
Returns a view of the keys in the dictionary
values
none
Returns a view of the values in the dictionary
items
none
Returns a view of the key-value pairs in the dictionary
get
key
Returns the value associated with key; None otherwise
get
key,alt
Returns the value associated with key; alt otherwise
As we saw earlier with strings and lists, dictionary methods use dot notation,
which specifies the name of the method to the right of the dot and the name of
the object on which to apply the method immediately to the left of the dot.
The empty parentheses in the case of keys indicate that this method takes no
parameters. If x is a variable whose value is a dictionary, x.keys is the
method object, and x.keys() invokes the method, returning a view of the value.
The keys method returns the keys, not necessarily in the same order they were
added to the dictionary or any other particular order.
"""
inventory = {'apples': 430, 'bananas': 312, 'oranges': 525, 'pears': 217}
for akey in inventory.keys(): # the order in which we get the keys is
# not defined
print("Got key", akey, "which maps to value", inventory[akey])
ks = list(inventory.keys())
print(ks)
"""
It’s so common to iterate over the keys in a dictionary that you can omit the
keys method call in the for loop — iterating over a dictionary implicitly
iterates over its keys.
"""
inventory = {'apples': 430, 'bananas': 312, 'oranges': 525, 'pears': 217}
for k in inventory:
print("Got key", k)
"""
The values and items methods are similar to keys. They return the objects
which can be iterated over. Note that the item objects are tuples containing
the key and the associated value.
"""
inventory = {'apples': 430, 'bananas': 312, 'oranges': 525, 'pears': 217}
print(list(inventory.values()))
print(list(inventory.items()))
for k in inventory:
print("Got", k, "that maps to", inventory[k])
"""
Note
Technically, .keys(), .values(), and .items() don’t return actual lists.
Like the range function described previously, in python 3 they return objects
that produce the items one at a time, rather than producing and storing all of
them in advance as a list. Unless the dictionary has a whole lot of keys, this
won’t make a difference for performance. In any case, as with the range
function, it is safe for you to think of them as returning lists, for most
purposes. For the python interpreter built into this textbook, they actually
do produce lists. In a native python interpreter, if you print out
type(inventory.keys()), you will find that it is something other than an actual
list. If you want to get the first key, inventory.keys()[0] works in the online
textbook, but in a real python interpreter, you need to make the collection of
keys into a real list before using [0] to index into it:
list(inventory.keys())[0].
"""
# The in and not in operators can test if a key is in the dictionary:
inventory = {'apples': 430, 'bananas': 312, 'oranges': 525, 'pears': 217}
print('apples' in inventory)
print('cherries' in inventory)
if 'bananas' in inventory:
print(inventory['bananas'])
else:
print("We have no bananas")
"""
This operator can be very useful since looking up a non-existent key in a
dictionary causes a runtime error.
The get method allows us to access the value associated with a key, similar to
the [ ] operator. The important difference is that get will not cause a runtime
error if the key is not present. It will instead return None. There exists a
variation of get that allows a second parameter that serves as an alternative
return value in the case where the key is not present. This can be seen in the
final example below. In this case, since “cherries” is not a key, return 0
(instead of None).
"""
inventory = {'apples': 430, 'bananas': 312, 'oranges': 525, 'pears': 217}
print(inventory.get("apples"))
print(inventory.get("cherries"))
print(inventory.get("cherries", 0))
# Check your understanding
#
# dictionaries-3-1: What is printed by the following statements?
mydict = {"cat": 12, "dog": 6, "elephant": 23, "bear": 20}
answer = mydict.get("cat") // mydict.get("dog")
print(answer)
# dictionaries-3-2: What is printed by the following statements?
mydict = {"cat": 12, "dog": 6, "elephant": 23, "bear": 20}
print("dog" in mydict)
# [x] True
# [] False
# dictionaries-3-3: What is printed by the following statements?
mydict = {"cat": 12, "dog": 6, "elephant": 23, "bear": 20}
print(23 in mydict)
# [] True
# [x] False
# dictionaries-3-4: What is printed by the following statements?
total = 0
mydict = {"cat": 12, "dog": 6, "elephant": 23, "bear": 20}
for akey in mydict:
if len(akey) > 3:
total = total + mydict[akey]
print(total)
"""
5. Every four years, the summer Olympics are held in a different country.
Add a key-value pair to the dictionary places that reflects that the 2016
Olympics were held in Brazil. Do not rewrite the entire dictionary to do this!
"""
places = {"Australia": 2000, "Greece": 2004, "China": 2008, "England": 2012,
"Brazil": 2016}
print(places)
"""
6. We have a dictionary of the specific events that Italy has won medals in
and the number of medals they have won for each event. Assign to the variable
events a list of the keys from the dictionary medal_events. Do not hard
code this.
"""
medal_events = {'Shooting': 7, 'Fencing': 4, 'Judo': 2, 'Swimming': 3,
'Diving': 2}
events = medal_events.keys()
| python |
from abc import ABC, abstractmethod
import pandas as pd
class Interpolator(ABC):
@abstractmethod
def get_approximate_value(self, x: float, table: pd.DataFrame) -> float:
raise NotImplementedError
| python |
"""
Logging module.
"""
import logging
class Logger:
"""
Logger helper.
"""
loggers = {}
level = logging.WARNING
def __init__(self, logger):
self.__level = Logger.level
self.__logger = logging.getLogger(logger)
# set formatter
#formatter = logging.Formatter('[%(name)s] - %(levelname)s - %(message)s')
formatter = logging.Formatter('[%(levelname)s] %(message)s')
self.__channel = logging.StreamHandler()
self.__channel.setLevel(self.__level)
self.__channel.setFormatter(formatter)
self.__logger.addHandler(self.__channel)
def __getattr__(self, attr):
if hasattr(self.__logger, attr):
return getattr(self.__logger, attr)
else:
raise AttributeError()
def setLevel_(self, level):
print("set level to %d" % level)
self.__level = level
self.__logger.setLevel(level)
@staticmethod
def setLevel(level):
"""
Set loggers level.
@param level int Logging level
"""
Logger.level = level
for i in Logger.loggers.keys():
Logger.loggers[i].setLevel_(Logger.level)
@staticmethod
def single(loggerName):
"""
Get logger singleton based on module name.
@param string loggerName Module name
@return object Logger instance.
"""
if loggerName not in Logger.loggers:
Logger.loggers[loggerName] = Logger(loggerName)
return Logger.loggers[loggerName]
def warning(module, message):
Logger.single(module).warning(message)
def error(module, message):
Logger.single(module).error(message)
def info(module, message):
Logger.single(module).info(message)
def debug(module, message):
Logger.single(module).debug(message)
| python |
"""Test prsw.api.looking_glass."""
import pytest
from datetime import datetime
from typing import Iterable
from unittest.mock import patch
from .. import UnitTest
from prsw.api import API_URL, Output
from prsw.stat.looking_glass import LookingGlass
class TestLookingGlass(UnitTest):
RESPONSE = {
"messages": [],
"see_also": [],
"version": "2.1",
"data_call_status": "supported",
"cached": False,
"data": {
"rrcs": [
{
"rrc": "RRC00",
"location": "Amsterdam, Netherlands",
"peers": [
{
"asn_origin": "1205",
"as_path": "34854 6939 1853 1853 1205",
"community": "34854:1009",
"last_updated": "2021-04-15T08:21:07",
"prefix": "140.78.0.0/16",
"peer": "2.56.11.1",
"origin": "IGP",
"next_hop": "2.56.11.1",
"latest_time": "2021-04-15T12:51:19",
},
],
},
],
"query_time": "2021-04-15T12:51:22",
"latest_time": "2021-04-15T12:51:04",
"parameters": {"resource": "140.78.0.0/16"},
},
"query_id": "20210415125122-96ed15ff-31d8-41b9-b1d0-d0c3f293f0c1",
"process_time": 79,
"server_id": "app114",
"build_version": "live.2021.4.14.157",
"status": "ok",
"status_code": 200,
"time": "2021-04-15T12:45:22.211516",
}
def setup(self):
url = f"{API_URL}{LookingGlass.PATH}data.json?resource=140.78.0.0/16"
self.api_response = Output(url, **TestLookingGlass.RESPONSE)
self.params = {
"preferred_version": LookingGlass.VERSION,
"resource": "140.78.0.0/16",
}
return super().setup()
@pytest.fixture(scope="session")
def mock_get(self):
self.setup()
with patch.object(self.ripestat, "_get") as mocked_get:
mocked_get.return_value = self.api_response
yield self
mocked_get.assert_called_with(LookingGlass.PATH, self.params)
def test__init__valid_resource(self, mock_get):
response = LookingGlass(mock_get.ripestat, resource=self.params["resource"])
assert isinstance(response, LookingGlass)
def test__init__invalid_resource(self):
with pytest.raises(ValueError):
LookingGlass(self.ripestat, resource="invalid-prefix")
def test__getitem__(self, mock_get):
response = LookingGlass(mock_get.ripestat, resource=self.params["resource"])
assert isinstance(response["RRC00"], tuple) # namedtuple: RRC by RRC key
def test__iter__(self, mock_get):
response = LookingGlass(mock_get.ripestat, resource=self.params["resource"])
assert isinstance(response, Iterable)
def test__len__(self, mock_get):
response = LookingGlass(mock_get.ripestat, resource=self.params["resource"])
assert len(response) == len(TestLookingGlass.RESPONSE["data"]["rrcs"])
def test_objectify_rrcs(self, mock_get):
response = LookingGlass(mock_get.ripestat, resource=self.params["resource"])
for collector in response:
assert isinstance(collector, tuple) # namedtuple: RRC
assert "rrc" in collector.__dir__()
assert "location" in collector.__dir__()
assert "peers" in collector.__dir__()
for peer in collector.peers:
assert isinstance(peer, tuple) # namedtuple: Peer
assert "asn_origin" in peer.__dir__()
assert "as_path" in peer.__dir__()
assert "community" in peer.__dir__()
assert "last_updated" in peer.__dir__()
assert "prefix" in peer.__dir__()
assert "peer" in peer.__dir__()
assert "origin" in peer.__dir__()
assert "next_hop" in peer.__dir__()
assert "latest_time" in peer.__dir__()
def test_latest_time(self, mock_get):
response = LookingGlass(mock_get.ripestat, resource=self.params["resource"])
latest_time = TestLookingGlass.RESPONSE["data"]["latest_time"]
assert response.latest_time == datetime.fromisoformat(latest_time)
def test_query_time(self, mock_get):
response = LookingGlass(mock_get.ripestat, resource=self.params["resource"])
time = TestLookingGlass.RESPONSE["data"]["query_time"]
assert response.query_time == datetime.fromisoformat(time)
def test_peers(self, mock_get):
response = LookingGlass(mock_get.ripestat, resource=self.params["resource"])
assert isinstance(response.peers, list)
for peer in response.peers:
assert isinstance(peer, tuple) # namedtuple: Peer
def test_rrcs(self, mock_get):
response = LookingGlass(mock_get.ripestat, resource=self.params["resource"])
assert isinstance(response.rrcs, dict)
for name, route_server in response.rrcs.items():
assert isinstance(name, str) # RRC name: 'RRC00'
assert isinstance(route_server, tuple) # namedtuple: RRC
| python |
from django.urls import path, include
# from django.conf.urls import include, url
# from .views import TestViewSet
from .views import *
from rest_framework import routers
router = routers.DefaultRouter()
router.register('task_list', TestViewSet, basename="task_list")
router.register('Machine', MachineViewSet, basename="Machine")
router.register('Class', ClassViewSet, basename="Class")
router.register('Mechanical_hours', Mechanical_hoursViewSet, basename="Mechanical_hours")
urlpatterns = [
path('', include(router.urls)),
# url(r'^task_list/$', TestViewSet),
]
| python |
from utils.KTS.cpd_auto import *
| python |
class City:
'''
This class will hold a city in terms of its
x and y coordinates
@author Sebastian Castro
'''
def __init__(self, x, y):
# Holds the x and y components
self.x = x
self.y = y
self.point = (x, y)
def __str__(self):
return f'City: {self.point}'
def __repr__(self):
return f'City: {self.point}' | python |
import os
# Directory Config
ROOT_DIR = os.path.abspath(os.path.dirname(__file__))
DB_DIR = os.path.join(ROOT_DIR, 'db')
# Regexes
COURSE_NAME_PATTERN = r'[FD]0*(\d+\w*)\.?'
DAYS_PATTERN = f"^{'(M|T|W|Th|F|S|U)?'*7}$"
# Scraped table headers (for scrape_term.py)
HEADERS = (
'course',
'CRN',
'desc',
'status',
'days',
'time',
'start',
'end',
'room',
'campus',
'units',
'instructor',
'seats',
'wait_seats',
'wait_cap'
)
# MyPortal endpoint
SSB_URL = 'https://ssb-prod.ec.fhda.edu'
# Current banner term codes
CURRENT_TERM_CODES = {'fh': '202231', 'da': '202232'}
# Available Campuses - Foothill, De Anza, and test
CAMPUS_LIST = {
'fh': CURRENT_TERM_CODES['fh'],
'da': CURRENT_TERM_CODES['da'],
'test': 'test'
}
'''
Course Type Flags - Foothill College
Online - online, fully asynchronous classes (no live meetings)
Virtual - online, fully synchronous classes (only live meetings)
Hybrid - online, hybrid (mixed) between `online` and `virtual` [COVID-19]
Standard - physical classes (or all of the above are N/A, e.g. "Independent Study")
Last Verified / Updated for: Fall 2020
'''
FH_TYPE_ALIAS = {'standard': None, 'online': 'W', 'virtual': 'V', 'hybrid': 'Z'}
'''
Course Type Flags - De Anza College
Online - online, fully asynchronous classes (no live meetings)
Hybrid - hybrid classes that are both online and physical
Standard - physical classes (or all of the above are N/A, e.g. "Independent Study")
Last Verified / Updated for: Fall 2020
'''
DA_TYPE_ALIAS = {'standard': None, 'online': 'Z', 'hybrid': 'Y'}
# Mapping of campuses to class type variants
# NOTE: test database currently has Foothill College data
COURSE_TYPES_TO_FLAGS = {
'fh': FH_TYPE_ALIAS,
'da': DA_TYPE_ALIAS,
'test': FH_TYPE_ALIAS
}
| python |
#!/usr/bin/env python3
# coding:utf-8
import unittest
import zencad
#from PyQt5.QtWidgets import *
#from PyQt5.QtCore import *
#from PyQt5.QtGui import *
#from zencad.gui.settingswdg import SettingsWidget
#qapp = QApplication([])
class WidgetsTest(unittest.TestCase):
def test_segment_probe(self):
pass
# settings = SettingsWidget()
| python |
import numpy as np
import tensorflow as tf
# N, size of matrix. R, rank of data
N = 100
R = 5
# generate data
W_true = np.random.randn(N,R)
C_true = np.random.randn(R,N)
Y_true = np.dot(W_true, C_true)
Y_tf = tf.constant(Y_true.astype(np.float32))
W = tf.Variable(np.random.randn(N,R).astype(np.float32))
C = tf.Variable(np.random.randn(R,N).astype(np.float32))
Y_est = tf.matmul(W,C)
loss = tf.reduce_sum((Y_tf-Y_est)**2)
# regularization
alpha = tf.constant(1e-4)
regW = alpha*tf.reduce_sum(W**2)
regC = alpha*tf.reduce_sum(C**2)
# full objective
objective = loss + regW + regC
# optimization setup
train_step = tf.train.AdamOptimizer(0.001).minimize(objective)
# fit the model
init_op = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init_op)
for n in range(10000):
sess.run(train_step)
if (n+1) % 1000 == 0:
print('iter %i, %f' % (n+1, sess.run(objective)))
| python |
import os
script_dir = os.path.dirname(os.path.abspath(__file__))
input = []
with open(os.path.join(script_dir, "input.txt"), "r") as file:
questionaire = {}
for line in file:
if (line.strip('\n') != ""):
if "People" in questionaire:
questionaire["People"] += 1
else:
questionaire["People"] = 1
for val in list(line.strip('\n')):
if val in questionaire:
questionaire[val] += 1
else:
questionaire[val] = 1
else:
input.append(questionaire)
questionaire = {}
input.append(questionaire)
print ('Read ' + str(len(input) - 1) + ' questionaires')
totalQuestions = 0
for questionaire in input:
totalQuestions += len(questionaire) -1
print('Solution 1: ' + str(totalQuestions) + ' questions')
totalQuestions = 0
for questionaire in input:
for response in questionaire:
if response != "People" and questionaire["People"] == questionaire[response]:
totalQuestions += 1
print('Solution 2: ' + str(totalQuestions) + ' questions') | python |
from distutils.core import setup
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except(IOError, ImportError):
long_description = open('README.md').read()
setup(
name='pymp4parse',
version='0.3.0',
packages=[''],
url='https://github.com/use-sparingly/pymp4parse',
license='The MIT License',
author='Alastair Mccormack',
author_email='alastair at alu.media',
description='MP4 / ISO base media file format (ISO/IEC 14496-12 - MPEG-4 Part 12) file parser',
requires=['bitstring', 'six'],
install_requires=['bitstring', 'six'],
long_description=long_description,
data_files=[('', ['README.md'])]
)
| python |
import scrapy
class ScrapeTableSpider(scrapy.Spider):
name = 'jcs'
def start_requests(self):
urls = [
'https://en.wikipedia.org/wiki/List_of_schools_in_Singapore',
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
for row in response.xpath('//center/*[@class="wikitable"]//tbody/tr'):
yield {
'area' : row.xpath('td[2]//text()').extract_first(),
'school_name': row.xpath('td[4]//text()').extract_first(),
'address' : row.xpath('td[8]//text()').extract_first(),
'schooltype' : row.xpath('td[9]//text()').extract_first()
}
#testing in scrapy shell
#for row in response.xpath('//center/*[@class="wikitable"]//tbody/tr'):print('area',row.xpath('td[2]//text()').extract_first(),'school_name',row.xpath('td[4]//text()').extract_first(),'address',row.xpath('td[8]//text()').extract_first(),'schooltype',row.xpath('td[9]//text()').extract_first()) | python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2018 CERN.
# Copyright (c) 2017 Thomas P. Robitaille.
#
# Asclepias Broker is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Asclepias Broker."""
from __future__ import absolute_import, print_function
from .version import __version__
__all__ = ('__version__', )
| python |
#
# Copyright (C) 2020 CESNET.
#
# oarepo-fsm is free software; you can redistribute it and/or modify it under
# the terms of the MIT License; see LICENSE file for more details.
"""OArepo FSM library for record state transitions."""
from flask import url_for
from invenio_records_rest.links import default_links_factory
from oarepo_fsm.mixins import FSMMixin
from oarepo_fsm.views import build_url_transition_for_pid, \
record_class_from_pid_type
def record_fsm_links_factory(pid, record=None, original_links_factory=None, **kwargs):
"""Factory for record FSM links generation.
:param pid: A Persistent Identifier instance.
:param record: An instance of a Record.
:param original_links_factory: a link factory to be used to generate default links
:returns: Dictionary containing a list of useful links + FSM link for the record.
"""
links_factory = original_links_factory or default_links_factory
links = links_factory(pid, record, **kwargs)
if record and isinstance(record, FSMMixin):
transitions = {}
for act in record.available_user_transitions().keys():
transitions[act] = build_url_transition_for_pid(pid, act)
links['transitions'] = transitions
return links
| python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bldcontrol', '0006_brlayer_local_source_dir'),
]
operations = [
migrations.AlterField(
model_name='brlayer',
name='commit',
field=models.CharField(max_length=254, null=True),
),
migrations.AlterField(
model_name='brlayer',
name='dirpath',
field=models.CharField(max_length=254, null=True),
),
migrations.AlterField(
model_name='brlayer',
name='giturl',
field=models.CharField(max_length=254, null=True),
),
]
| python |
"""
You are given a m x n 2D grid initialized with these three possible values.
-1 - A wall or an obstacle.
0 - A gate.
INF - Infinity means an empty room. We use the value 231 - 1 = 2147483647 to represent INF as you may assume that the distance to a gate is less than 2147483647.
Fill each empty room with the distance to its nearest gate. If it is impossible to reach a gate, it should be filled with INF.
Example:
Given the 2D grid:
INF -1 0 INF
INF INF INF -1
INF -1 INF -1
0 -1 INF INF
After running your function, the 2D grid should be:
3 -1 0 1
2 2 1 -1
1 -1 2 -1
0 -1 3 4
"""
# bfs using deque
# pretty much new method for me
# Runtime: 300 ms, faster than 75.37% of Python3 online submissions for Walls and Gates.
# Memory Usage: 16.7 MB, less than 60.00% of Python3 online submissions for Walls and Gates.
import collections
class Solution:
def wallsAndGates(self, rooms: List[List[int]]) -> None:
"""
Do not return anything, modify rooms in-place instead.
"""
if not rooms:
return []
n_row = len(rooms)
n_col = len(rooms[0])
bfs = collections.deque()
for i in range(n_row):
for j in range(n_col):
if rooms[i][j] == 0:
bfs.append((i, j))
directions = [[0, 1], [0, -1], [1, 0], [-1, 0]]
while bfs:
x, y = bfs.popleft()
dist = rooms[x][y] + 1
for direction in directions:
new_x, new_y = x + direction[0], y + direction[1]
if new_x >=0 and new_x < n_row and new_y >= 0 and new_y < n_col and rooms[new_x][new_y] == 2147483647:
rooms[new_x][new_y] = dist
bfs.append((new_x, new_y))
| python |
import difflib
import bs4 as bs
try:
from PIL import Image
except ImportError:
import Image
import pytesseract
def parse_hocr(search_terms=None, hocr_file=None, regex=None):
"""Parse the hocr file and find a reasonable bounding box for each of the strings
in search_terms. Return a dictionary with values as the bounding box to be used for
extracting the appropriate text.
inputs:
search_terms = Tuple, A tuple of search terms to look for in the HOCR file.
outputs:
box_dict = Dictionary, A dictionary whose keys are the elements of search_terms and values
are the bounding boxes where those terms are located in the document.
"""
# Make sure the search terms provided are a tuple.
if not isinstance(search_terms,tuple):
raise ValueError('The search_terms parameter must be a tuple')
# Make sure we got a HOCR file handle when called.
if not hocr_file:
raise ValueError('The parser must be provided with an HOCR file handle.')
# Open the hocr file, read it into BeautifulSoup and extract all the ocr words.
hocr = open(hocr_file,'r').read()
soup = bs.BeautifulSoup(hocr,'html.parser')
words = soup.find_all('span',class_='ocrx_word')
result = dict()
# Loop through all the words and look for our search terms.
for word in words:
w = word.get_text().lower()
for s in search_terms:
# If the word is in our search terms, find the bounding box
if len(w) > 1 and difflib.SequenceMatcher(None, s, w).ratio() > .5:
bbox = word['title'].split(';')
bbox = bbox[0].split(' ')
bbox = tuple([int(x) for x in bbox[1:]])
# Update the result dictionary or raise an error if the search term is in there twice.
if s not in result.keys():
result.update({s:bbox})
else:
pass
return result
if __name__ == "__main__":
from pathlib import Path
import cv2
image = Path('data/CNI_robin_clean.jpg')
hocr = pytesseract.image_to_pdf_or_hocr(str(image), lang='fra',extension='hocr')
hocr_file = image.with_suffix('.xml')
with open(hocr_file, 'wb') as f:
f.write(hocr)
parse_hocr(search_terms=('Prénom',), hocr_file=hocr_file)
img = cv2.imread(str(image))
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#img = cv2.GaussianBlur(img, (5,5), 0)
img = cv2.medianBlur(img, 3)
# img = cv2.bilateralFilter(img, 9, 75, 75)
# cv2.imshow("cropped", img)
# cv2.waitKey(0)
#_, img = cv2.threshold(img, 110, 255, cv2.THRESH_BINARY)
img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 31, 2)
#img = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
cv2.imshow("cropped", img)
cv2.waitKey(0)
crop = img[200:260,541:700]
cv2.imshow("cropped", crop)
cv2.waitKey(0)
print(pytesseract.image_to_string(crop))
print('hello') | python |
#############################################################################
##
## Copyright (C) 2019 The Qt Company Ltd.
## Contact: http://www.qt.io/licensing/
##
## This file is part of the Qt for Python examples of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of The Qt Company Ltd nor the names of its
## contributors may be used to endorse or promote products derived
## from this software without specific prior written permission.
##
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
## $QT_END_LICENSE$
##
#############################################################################
from PySide2.QtCore import Slot, Qt, QRect, QSize
from PySide2.QtGui import QColor, QPainter, QTextFormat
from PySide2.QtWidgets import QPlainTextEdit, QWidget, QTextEdit
class LineNumberArea(QWidget):
def __init__(self, editor):
QWidget.__init__(self, editor)
self.codeEditor = editor
def sizeHint(self):
return QSize(self.codeEditor.line_number_area_width(), 0)
def paintEvent(self, event):
self.codeEditor.lineNumberAreaPaintEvent(event)
class CodeEditor(QPlainTextEdit):
def __init__(self):
QPlainTextEdit.__init__(self)
self.line_number_area = LineNumberArea(self)
self.blockCountChanged[int].connect(self.update_line_number_area_width)
self.updateRequest[QRect, int].connect(self.update_line_number_area)
self.cursorPositionChanged.connect(self.highlight_current_line)
self.update_line_number_area_width(0)
self.highlight_current_line()
def line_number_area_width(self):
digits = 1
max_num = max(1, self.blockCount())
while max_num >= 10:
max_num *= 0.1
digits += 1
space = 3 + self.fontMetrics().width('9') * digits
return space
def resizeEvent(self, e):
super().resizeEvent(e)
cr = self.contentsRect()
width = self.line_number_area_width()
rect = QRect(cr.left(), cr.top(), width, cr.height())
self.line_number_area.setGeometry(rect)
def lineNumberAreaPaintEvent(self, event):
painter = QPainter(self.line_number_area)
painter.fillRect(event.rect(), Qt.lightGray)
block = self.firstVisibleBlock()
block_number = block.blockNumber()
offset = self.contentOffset()
top = self.blockBoundingGeometry(block).translated(offset).top()
bottom = top + self.blockBoundingRect(block).height()
while block.isValid() and top <= event.rect().bottom():
if block.isVisible() and bottom >= event.rect().top():
number = str(block_number + 1)
painter.setPen(Qt.black)
width = self.line_number_area.width()
height = self.fontMetrics().height()
painter.drawText(0, top, width, height, Qt.AlignRight, number)
block = block.next()
top = bottom
bottom = top + self.blockBoundingRect(block).height()
block_number += 1
@Slot()
def update_line_number_area_width(self, newBlockCount):
self.setViewportMargins(self.line_number_area_width(), 0, 0, 0)
@Slot()
def update_line_number_area(self, rect, dy):
if dy:
self.line_number_area.scroll(0, dy)
else:
width = self.line_number_area.width()
self.line_number_area.update(0, rect.y(), width, rect.height())
if rect.contains(self.viewport().rect()):
self.update_line_number_area_width(0)
@Slot()
def highlight_current_line(self):
extra_selections = []
if not self.isReadOnly():
selection = QTextEdit.ExtraSelection()
line_color = QColor(Qt.yellow).lighter(160)
selection.format.setBackground(line_color)
selection.format.setProperty(QTextFormat.FullWidthSelection, True)
selection.cursor = self.textCursor()
selection.cursor.clearSelection()
extra_selections.append(selection)
self.setExtraSelections(extra_selections)
| python |
from pbge.plots import Plot
import game
import gears
import pbge
import random
from game import teams
# ***************************
# *** MECHA_ENCOUNTER ***
# ***************************
#
# Elements:
# LOCALE: The scene where the encounter will take place
# FACTION: The faction you'll be fighting; may be None
# ROOM: The room where the encounter will take place; if None, an open room will be added.
#
class RandoMechaEncounter( Plot ):
# Fight some random mecha. What do they want? To pad the adventure.
LABEL = "MECHA_ENCOUNTER"
active = True
scope = "LOCALE"
def custom_init( self, nart ):
myscene = self.elements["LOCALE"]
if not self.elements.get("ROOM"):
self.register_element("ROOM",pbge.randmaps.rooms.OpenRoom(5,5),dident="LOCALE")
team2 = self.register_element("_eteam",teams.Team(enemies=(myscene.player_team,)),dident="ROOM")
team2.contents += gears.selector.RandomMechaUnit(self.rank,100,self.elements.get("FACTION",None),myscene.environment).mecha_list
return True
def t_ENDCOMBAT(self,camp):
# If the player team gets wiped out, end the mission.
myteam = self.elements["_eteam"]
if len(myteam.get_active_members(camp)) < 1:
self.end_plot(camp)
camp.dole_xp(100)
class SmallMechaEncounter( Plot ):
# Fight some random mecha. What do they want? To pad the adventure.
LABEL = "MECHA_ENCOUNTER"
active = True
scope = "LOCALE"
def custom_init( self, nart ):
myscene = self.elements["LOCALE"]
if not self.elements.get("ROOM"):
self.register_element("ROOM",pbge.randmaps.rooms.OpenRoom(5,5),dident="LOCALE")
team2 = self.register_element("_eteam",teams.Team(enemies=(myscene.player_team,)),dident="ROOM")
team2.contents += gears.selector.RandomMechaUnit(self.rank,50,self.elements.get("FACTION",None),myscene.environment).mecha_list
return True
def t_ENDCOMBAT(self,camp):
# If the player team gets wiped out, end the mission.
myteam = self.elements["_eteam"]
if len(myteam.get_active_members(camp)) < 1:
self.end_plot(camp)
camp.dole_xp(50)
| python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.