content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
from django.shortcuts import render
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status, viewsets
from profiles_api import serializer, models
class HelloApiView(APIView):
"""Test API View"""
serializer_class = serializer.HelloSerializer
def get(self, request, format=None):
"""return get """
an_apiview = [
'Uses HTTP method get post del put and push ',
"lalalla",
'blalala',
]
return Response({'message': 'Hello, I"m API', 'an_apiview': an_apiview})
def post(self, request):
"""post method """
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'hello {name}'
return Response({'message': message})
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
def put(self, request, pk=None):
"""Handles the object"""
return Response({'message': 'PUT'})
def patch(self, request, pk=None):
"""partial update of fields """
return Response({'message': 'Patch'})
def delete(self, request, pk=None):
"""partial update of fields """
return Response({'message': 'delete '})
class HelloViewSet(viewsets.ViewSet):
"""testing view set"""
serializer_class = serializer.HelloSerializer
def list(self, request):
a_viewset = [
'Uses HTTP method get post del put and push ',
"lalalla",
'blalala',
'sam',
]
return Response({'message': 'Hello', 'a_viewset': a_viewset})
def create(self, request):
"""create hello msg """
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}!'
return Response({'message': message})
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
def retrieve(self, request, pk=None):
"""handle get user id"""
return Response({'http_method': 'GET'})
def update(self, request, pk=None):
"""Update obj"""
return Response({'http_method': 'put'})
def partial_update(self, request, pk=None):
"""update partialy """
return Response({'http_method': 'patch'})
def destroy(self, request, pk=None):
return Response({'http_method': 'delete'})
class UserProfileViewSet(viewsets.ModelViewSet):
"""Handle creating and updating model view set"""
serializer_class = serializer.UserProfileSerializer
queryset = models.UserProfile.objects.all()
| python |
"""
[caption]
def=Cutting URL parameters
ja=URLパラメータの切り取り
"""
import sys
import io
import tkinter
import tkinter.ttk
import tkinter.simpledialog
import ctypes
import ctypes.wintypes
from urllib.parse import urlparse, parse_qsl, urlencode, urlunparse
sys.stdin = io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8')
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')
class CuttingDialog(tkinter.simpledialog.Dialog):
def __init__(self, parent, title, url) -> None:
self.url = url
super().__init__(parent, title=title)
def body(self, master) -> None:
parts = urlparse(self.url)
queries = parse_qsl(parts.query)
self.boollist = []
self.attributes("-toolwindow", 1)
self.attributes("-topmost", 1)
self.focus_force()
lf = tkinter.LabelFrame(master, text="URL")
tkinter.Label(lf, text=f"{parts.netloc}{parts.path}").pack(pady=8, padx=4)
lf.pack(side=tkinter.TOP)
for query in queries:
bv = tkinter.BooleanVar()
tkinter.Checkbutton(master, variable=bv, text=f"{query[0]}={query[1]}").pack(side = tkinter.TOP, anchor=tkinter.W)
self.boollist.append(bv)
return super().body(master)
def grab_set(self) -> None:
p = ctypes.wintypes.POINT()
ctypes.windll.user32.GetCursorPos(ctypes.byref(p))
self.geometry(f"+{p.x - self.winfo_width() // 2}+{p.y - self.winfo_height() // 2}")
return super().grab_set()
def ok(self, event=None): super().ok(event); self.result = True
def cancel(self, event=None): super().cancel(event); self.result = False
text = sys.stdin.read()
if text != "":
result = True
bools = []
p = urlparse(text)
if "params" in globals():
bools = globals()["params"]["bools"]
elif p.scheme:
owner = tkinter.Tk()
owner.withdraw()
dlg = CuttingDialog(owner, 'Cutting URL Params', text)
bools = dlg.boollist
result = dlg.result
if result:
url = urlparse(text)
qsls = parse_qsl(url.query)
qsla = []
for b, q in zip(bools, qsls):
if b.get() if type(b) is tkinter.BooleanVar else b:
qsla.append((q[0], q[1]))
print(urlunparse(url._replace(query=urlencode(qsla))))
else:
print(text)
| python |
# coding: utf-8
from __future__ import unicode_literals
from django.test import TestCase
class ComputeIntersectionsTestCase(TestCase):
def test_command(self):
pass # @todo
| python |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2015-2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Train a Fast-RCNN model on the PASCAL VOC dataset.
This Fast-RCNN is based on VGG16 that was pre-trained using ImageI1K.
By default, the script will download the pre-trained VGG16 from neon model zoo
and seed the convolution and pooling layers. And Fast R-CNN starts training from
that. If the script is given --model_file, it will continue training the
Fast R-CNN from the given model file.
Reference:
"Fast R-CNN"
http://arxiv.org/pdf/1504.08083v2.pdf
https://github.com/rbgirshick/fast-rcnn
Usage:
python examples/fast-rcnn/train.py -e 20 --save_path frcn_vgg.pkl
Notes:
1. For VGG16 based Fast R-CNN model, we can support training/testing with small
batch size such as, 2 or 3 images per batch. The model training will converge
around 20 epochs. With 3 images per batch, and 64 ROIs per image, the training
consumes about 11G memory.
2. The original caffe model goes through 40000 iteration (mb) of training, with
2 images per minibatch.
3. The dataset will cache the preprocessed file and re-use that if the same
configuration of the dataset is used again. The cached file by default is in
~/nervana/data/VOCDevkit/VOC<year>/train_< >.pkl or
~/nervana/data/VOCDevkit/VOC<year>/inference_< >.pkl
"""
from neon import logger as neon_logger
from neon.backends import gen_backend
from neon.data import PASCALVOCTrain
from neon.transforms import CrossEntropyMulti, SmoothL1Loss, ObjectDetection
from neon.util.argparser import NeonArgparser, extract_valid_args
from neon.optimizers import GradientDescentMomentum, MultiOptimizer
from neon.callbacks.callbacks import Callbacks
from neon.layers import Multicost, GeneralizedCostMask
from neon.util.persist import save_obj
from util import load_vgg_weights, create_frcn_model, scale_bbreg_weights
# main script
# parse the command line arguments
parser = NeonArgparser(__doc__, default_overrides=dict(batch_size=4))
parser.add_argument('--subset_pct', type=float, default=100,
help='subset of training dataset to use (percentage)')
args = parser.parse_args(gen_be=False)
# Override save path if None
if args.save_path is None:
args.save_path = 'frcn_vgg.pkl'
if args.callback_args['save_path'] is None:
args.callback_args['save_path'] = args.save_path
if args.callback_args['serialize'] is None:
args.callback_args['serialize'] = min(args.epochs, 10)
# hyperparameters
args.batch_size = 4
num_epochs = args.epochs
n_mb = None
img_per_batch = args.batch_size
rois_per_img = 64
frcn_fine_tune = False
learning_rate_scale = 1.0 / 10
if frcn_fine_tune is True:
learning_rate_scale = 1.0 / 16
# setup backend
be = gen_backend(**extract_valid_args(args, gen_backend))
if args.backend == 'gpu':
be.enable_winograd = 4
if be.gpu_memory_size < 11 * 1024 * 1024 * 1024:
exit("ERROR: This model requires at least 11GB GPU memory to be run.")
# setup training dataset
train_set = PASCALVOCTrain('trainval', '2007', path=args.data_dir, n_mb=n_mb,
img_per_batch=img_per_batch, rois_per_img=rois_per_img,
rois_random_sample=True,
add_flipped=False, subset_pct=args.subset_pct)
test_set = PASCALVOCTrain('test', '2007', path=args.data_dir, n_mb=n_mb,
img_per_batch=img_per_batch, rois_per_img=rois_per_img,
rois_random_sample=True,
add_flipped=False)
# setup model
model = create_frcn_model(frcn_fine_tune)
# setup optimizer
opt_w = GradientDescentMomentum(
0.001 * learning_rate_scale, 0.9, wdecay=0.0005)
opt_b = GradientDescentMomentum(0.002 * learning_rate_scale, 0.9)
optimizer = MultiOptimizer({'default': opt_w, 'Bias': opt_b})
# if training a new model, seed the image model conv layers with pre-trained weights
# otherwise, just load the model file
if args.model_file is None:
load_vgg_weights(model, args.data_dir)
cost = Multicost(costs=[GeneralizedCostMask(costfunc=CrossEntropyMulti()),
GeneralizedCostMask(costfunc=SmoothL1Loss())],
weights=[1, 1])
callbacks = Callbacks(model, eval_set=test_set, **args.callback_args)
model.fit(train_set, optimizer=optimizer,
num_epochs=num_epochs, cost=cost, callbacks=callbacks)
# Fast R-CNN model requires scale the bbox regression branch linear layer weights
# before saving the model
model = scale_bbreg_weights(
model, train_set.bbtarget_means, train_set.bbtarget_stds)
save_obj(model.serialize(keep_states=True), args.save_path)
neon_logger.display('running eval...')
metric_train = model.eval(train_set, metric=ObjectDetection())
neon_logger.display(
'Train: label accuracy - {}%, object detection logloss - {}'.format(metric_train[0] * 100,
metric_train[1]))
metric_test = model.eval(test_set, metric=ObjectDetection())
neon_logger.display(
'Test: label accuracy - {}%, object detection logloss - {}'.format(metric_test[0] * 100,
metric_test[1]))
| python |
"""Define resize, blur, and related constants."""
from . import io
from collections import namedtuple
from numba import guvectorize
import math
import numpy as np
RowOps = namedtuple('RowOps', 'tindices sindices fweights'.split())
GAUSSIAN_SCALE = 1.0 / np.sqrt(0.5 * np.pi)
def hermite(x):
x = np.clip(x, 0, 1)
return 2 * x * x * x - 3 * x * x + 1
def triangle(x):
x = np.clip(x, 0, 1)
return 1.0 - x
def gaussian(x):
x = np.clip(x, 0, 2)
return np.exp(-2 * x * x) * GAUSSIAN_SCALE
def nearest(x):
return np.less_equal(x, 0.5) * 1.0
def sinc(x):
if x <= 0.00001: return 1.0
return np.sin(np.pi * x) / (np.pi * x)
def lanczos(x):
x = np.clip(x, 0, 1)
return sinc(x) * sinc(x)
def mitchell(x):
B = 1.0 / 3.0
C = 1.0 / 3.0
P0 = (6 - 2*B) / 6.0
P1 = 0
P2 = (-18 +12*B + 6*C) / 6.0
P3 = (12 - 9*B - 6*C) / 6.0
Q0 = (8*B +24*C) / 6.0
Q1 = (-12*B -48*C) / 6.0
Q2 = (6*B +30*C) / 6.0
Q3 = (-1*B - 6*C) / 6.0
if x >= 2.0: return 0.0
if x >= 1.0: return Q0 + Q1*x + Q2*x*x + Q3*x*x*x
return P0 + P1*x + P2*x*x + P3*x*x*x
class Filter:
def __init__(self, fn, radius):
self.radius = radius
self.function = fn
HERMITE = Filter(hermite, 1)
TRIANGLE = Filter(triangle, 1)
GAUSSIAN = Filter(gaussian, 2)
NEAREST = Filter(nearest, 0)
LANCZOS = Filter(lanczos, 1)
MITCHELL = Filter(mitchell, 2)
def resize(source, width=None, height=None, filter=None, radius=1,
wrapx=False, wrapy=False):
"""Create a new numpy image with the desired size.
Either width or height can be null, in which case its value
is inferred from the aspect ratio of the source image.
Filter can be HERMITE, TRIANGLE, GAUSSIAN, NEAREST, LANCZOS, or
MITCHELL.
"""
assert len(source.shape) == 3, 'Shape is not rows x cols x channels'
assert width != None or height != None, 'Missing target size'
aspect = source.shape[1] / source.shape[0]
if width == None: width = height * aspect
if height == None: height = width / aspect
magnifying = width > source.shape[1]
if filter == None: filter = MITCHELL if magnifying else LANCZOS
return resample(source, width, height, filter, radius, wrapx, wrapy)
def resample(source, width, height, filter, radius, wrapx, wrapy):
nchans = source.shape[2]
def fn(t): return filter.function(t / radius)
scaled_filter = Filter(fn, radius * filter.radius)
srows, scols = source.shape[0], source.shape[1]
trows, tcols = int(height), int(width)
vresult = np.zeros([srows, tcols, nchans])
rowops = create_ops(tcols, scols, scaled_filter, wrapx)
convolve(vresult, source, rowops)
vresult = transpose(vresult)
hresult = np.zeros([tcols, trows, nchans])
rowops = create_ops(trows, srows, scaled_filter, wrapy)
convolve(hresult, vresult, rowops)
return transpose(hresult)
def blur(image, filter=GAUSSIAN, radius=4, wrapx=False, wrapy=False):
"""Resample an image and produce a new image with the same size.
For a list of available filters, see <a href="#resize">resize</a>.
"""
width, height = image.shape[1], image.shape[0]
return resize(image, width, height, filter, radius, wrapx, wrapy)
def transpose(source: np.ndarray):
return np.swapaxes(source, 0, 1)
def create_ops(ntarget, nsource, filter: Filter, wrap) -> RowOps:
# Generate a sequence of operations to perform a 1D convolution
# where each operation is represented by 3-tuple of: target index,
# source index, weight.
tindices, sindices, fweights = [], [], []
dtarget = 1.0 / ntarget
dsource = 1.0 / nsource
minifying = ntarget < nsource
fextent = dtarget if minifying else dsource
fdomain = float(ntarget if minifying else nsource)
x = dtarget / 2
for tindex in range(ntarget):
minx = x - filter.radius * fextent
maxx = x + filter.radius * fextent
minsi = int(minx * float(nsource))
maxsi = int(math.ceil(maxx * float(nsource)))
localops = []
weightsum = 0.0
for sindex in range(minsi, maxsi+1):
wrapped = sindex
if sindex < 0 or sindex >= nsource:
if wrap:
wrapped = sindex % nsource
else:
continue
sx = (0.5 + sindex) * dsource
t = fdomain * abs(sx - x)
weight = filter.function(t)
if weight != 0:
localops.append((tindex, wrapped, weight))
weightsum += weight
if weightsum > 0.0:
for op in localops:
tindices.append(op[0])
sindices.append(op[1])
fweights.append(op[2] / weightsum)
x += dtarget
return RowOps(tindices, sindices, fweights)
SIG0 = "void(f8[:,:,:], f8[:,:,:], i4[:], i4[:], f8[:])"
SIG1 = "(r0,c0,d),(r0,c1,d),(i),(i),(i)"
@guvectorize([SIG0], SIG1, target='parallel')
def jit_convolve(target, source, tinds, sinds, weights):
nrows, nchan, nops = target.shape[0], target.shape[2], len(tinds)
for c in range(nchan):
for row in range(nrows):
for op in range(nops):
tind, sind, weight = tinds[op], sinds[op], weights[op]
target[row][tind][c] += source[row][sind][c] * weight
def convolve(target, source, rowops: RowOps):
# Perform highly generalized 1D convolution. This is almost
# equivalent to:
#
# for row in range(len(target)):
# target[row][tindices] += source[row][sindices] * fweights
#
# ...but with the crucial feature of allowing the same index to
# appear multiple times in tindices.
#
# Note that standard numpy convolution assumes a stationary kernel,
# whereas this function could possibly be used to apply a varying
# kernel.
tindices, sindices, fweights = rowops
assert len(tindices) == len(sindices) == len(fweights)
assert len(target) == len(source)
jit_convolve(target, source,
np.int32(tindices), np.int32(sindices),
np.double(fweights))
| python |
# Generic imports
import os
import random
import shutil
from datetime import datetime
# Imports with probable installation required
try:
import progress.bar
except ImportError:
print('*** Missing required packages, I will install them for you ***')
os.system('pip3 install progress')
import progress.bar
# Custom imports
from python_tools.shapes.shapes_utils import *
from python_tools.meshes.meshes_utils import *
### ************************************************
### Generate full dataset
# Parameters
n_sampling_pts = 5
mesh_domain = False
plot_pts = True
n_shapes = 200
time = datetime.now().strftime('%Y-%m-%d_%H_%M_%S')
dataset_dir = 'dataset_'+time+'/'
mesh_dir = dataset_dir+'meshes/'
img_dir = dataset_dir+'images/'
filename = 'shape'
magnify = 1.0
xmin =-2.0
xmax = 2.0
ymin =-2.0
ymax = 2.0
n_tri_max = 5000
# Create directories if necessary
if not os.path.exists(mesh_dir):
os.makedirs(mesh_dir)
if not os.path.exists(img_dir):
os.makedirs(img_dir)
# Generate dataset
bar = progress.bar.Bar('Generating shapes', max=n_shapes)
for i in range(0,n_shapes):
generated = False
while (not generated):
n_pts = random.randint(3, 7)
radius = np.random.uniform(0.0, 1.0, size=n_pts)
edgy = np.random.uniform(0.0, 1.0, size=n_pts)
shape = Shape(filename+'_'+str(i),
None,n_pts,n_sampling_pts,radius,edgy)
shape.generate(magnify=2.0,
xmin=xmin,
xmax=xmax,
ymin=ymin,
ymax=ymax)
meshed, n_tri = shape.mesh()
if (meshed and (n_tri < n_tri_max)):
shape.generate_image(plot_pts=plot_pts,
xmin=xmin,
xmax=xmax,
ymin=ymin,
ymax=ymax)
img = filename+'_'+str(i)+'.png'
mesh = filename+'_'+str(i)+'.mesh'
shutil.move(img, img_dir)
shutil.move(mesh, mesh_dir)
generated = True
bar.next()
# End bar
bar.finish()
| python |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 2 10:54:54 2021
@author: po-po
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
#filename = r'C:\Users\po-po\Desktop\DOC\Fibras\Programas\data\dr2todr4e01121121.csv'
filename = r'C:\Users\po-po\Desktop\DOC\Fibras\Programas\data\drgodet\r5pa1dr2e0f10.csv'
clean_arr = pd.read_csv(filename)
file = str(os.path.splitext(os.path.basename(filename))[0])
#plot formating
params = {'figure.figsize': (6, 4),
'font.size': 18,
'font.sans-serif': 'Arial',
'lines.linewidth': 2.0,
'axes.linewidth': 1.5,
'axes.formatter.use_mathtext': True,
'axes.formatter.min_exponent': False,
'axes.formatter.useoffset': False,
'axes.grid': False,
'axes.grid.axis': 'both',
'xtick.minor.visible': True,
'ytick.minor.visible': True,
'xtick.direction': 'in',
'xtick.top': True,
'ytick.direction': 'in',
'ytick.right': True,
'xtick.major.size': 10,
'xtick.minor.size': 5,
'xtick.major.width': 1,
'ytick.major.size': 10,
'ytick.minor.size': 5,
'ytick.major.width': 1,
'legend.frameon': True,
}
plt.rcParams.update(params)
fig = plt.figure()
#perform rolling average on pandas dataframe of clean data
interval = 100
clean_arr['Average'] = clean_arr['Diameter'].rolling(window = interval, center = True, min_periods = 1).mean()
clean_arr['Std'] = clean_arr['Diameter'].rolling(window = interval, center = True, min_periods = 1).std()
clean_arr['Clean'] = clean_arr.Diameter[(clean_arr['Diameter'] >= clean_arr['Average']-clean_arr['Std']) & (clean_arr['Diameter'] <= clean_arr['Average']+clean_arr['Std'])]
clean_arr['Dirty'] = clean_arr.Diameter[(clean_arr['Diameter'] <= clean_arr['Average']-clean_arr['Std']) | (clean_arr['Diameter'] >= clean_arr['Average']+clean_arr['Std'])]
clean_arr['CAverage'] = clean_arr['Clean'].rolling(window = interval, center = True, min_periods = 1).mean()
clean_arr['Marked'] = clean_arr.Time[clean_arr['Event Flag'] == 1]
#plot diameter array
stflag = 1
if stflag == 1:
plt.plot(clean_arr['Time'],clean_arr['Clean'],'kx')
plt.plot(clean_arr['Time'],clean_arr['CAverage'],'b-')
plt.plot(clean_arr['Marked'], clean_arr['Event Flag'], 'go')
else:
plt.plot(clean_arr['Time'],clean_arr['Clean'],'kx')
plt.plot(clean_arr['Time'],clean_arr['CAverage'],'b-')
plt.plot(clean_arr['Marked'], clean_arr['Event Flag'], 'go')
plt.plot(clean_arr['Time'],clean_arr['Average']-clean_arr['Std'],'r--')
plt.plot(clean_arr['Time'],clean_arr['Average']+clean_arr['Std'],'r--')
plt.plot(clean_arr['Time'],clean_arr['Dirty'],'rx')
plt.xlabel('Time (s)')
plt.ylabel('Fiber Diameter (um)')
plt.title('%s'%file)
plt.show()
| python |
# Copyright (c) 2022 Aiven, Helsinki, Finland. https://aiven.io/
import sys
from unittest import mock
import pytest
from pghoard import postgres_command
def test_restore_command_error():
with mock.patch("pghoard.postgres_command.http_request", return_value=500):
with pytest.raises(postgres_command.PGCError, match="Restore failed with HTTP status 500"):
postgres_command.restore_command("foo", "123", "/tmp/xxx")
def test_postgres_command_archive_error():
args = ["postgres_command", "--site", "foo", "--xlog", "bar", "--mode", "archive"]
with mock.patch.object(sys, "argv", args):
with mock.patch("pghoard.postgres_command.archive_command", side_effect=SystemExit):
assert postgres_command.main() == postgres_command.EXIT_UNEXPECTED
def test_postgres_command_restore_error():
args = ["postgres_command", "--site", "foo", "--xlog", "bar", "--mode", "restore"]
with mock.patch.object(sys, "argv", args):
with mock.patch("pghoard.postgres_command.restore_command", side_effect=SystemExit):
assert postgres_command.main() == postgres_command.EXIT_ABORT
def test_postgres_command_archive_pgcerror():
args = ["postgres_command", "--site", "foo", "--xlog", "bar", "--mode", "archive"]
with mock.patch.object(sys, "argv", args):
with mock.patch(
"pghoard.postgres_command.archive_command", side_effect=postgres_command.PGCError(message="howdy", exit_code=42)
):
assert postgres_command.main() == 42
| python |
# This file is part of the Reference Data Repository (refdata).
#
# Copyright (C) 2021 New York University.
#
# refdata is free software; you can redistribute it and/or modify it under the
# terms of the MIT License; see LICENSE file for more details.
"""Loader implementation for datasets that are given in Json format."""
from typing import Any, Dict, IO, List
import json
from refdata.base import FormatDescriptor
from refdata.dataset.consumer import DataConsumer
from refdata.dataset.loader import DatasetLoader
class JsonLoader(DatasetLoader):
"""Dataset loader for Json files. The dataset is assumed to be a list of
dictionaries, where each dictionary represents one row in the dataset.
This list of dictionary may be contained in another dictionary. In this
case the target path in the format settings references the list element.
For each column, the column identifier from the dataset schema is
expected to be the query path to extract the respective cell value from
a dictionary representing a dataset row. This default behavior can be
overriden by including an object {'id': 'column-id', 'path': 'query path'}
for that column in the 'sources' element of the format settings.
The Json loader considers the following settings:
- target (string): Path to the list element containing the data row
dictionaries (default='').
- sources (list): List of {'id', 'path'}-pairs defining the query path
extract cell values for individual columns.
"""
def __init__(self, parameters: FormatDescriptor):
"""Initialize the format settings.
Parameters
----------
parameters: refdata.base.FormatDescriptor
Dataset format specification.
"""
# Set the arget query to extract the dataset rows from the document.
self.target = JQuery(parameters.get('target', ''))
# Create mapping of column identifier to their source path for the
# columns that have a source path that is different from thier
# identifier. Columns fow which no entry exists in the 'sources' list
# the source path is expected to be the column identifier.
self.source_map = {s['id']: s['path'] for s in parameters.get('sources', dict())}
def read(self, file: IO, columns: List[str], consumer: DataConsumer) -> DataConsumer:
"""Read dataset rows from a given file handle.
Assumes that the file contains a Json object. This method first extracts
the list of dataset row objects from the Json object in the file. It
then creates a dataset row from each object based on the source path for
each column in the given column list.
If no source path was list of objects identfied by the data target path that
was defined in the dataset format.
Parameters
----------
file: file object
Open file object.
columns: list of string
Column identifier defining the content and the schema of the
returned data.
consumer: refdata.dataset.consumer.DataConsumer
Consumer for data rows that are being read.
Returns
-------
list of list
"""
# Create the list of source queries for each column in the resulting
# dataset rows. Use the column to source mapping that was created from
# the format parameters when the object was instantiated. By default,
# the column identifier is used as the query path.
sources = list()
for col in columns:
sources.append(JQuery(self.source_map.get(col, col)))
for doc in self.target.find(json.load(file)):
consumer.consume([q.find(doc) for q in sources])
return consumer
# -- Helper Functions ---------------------------------------------------------
class JQuery:
"""Helper class to evaluate path expressions on nested dictionaries."""
def __init__(self, path: str):
"""Initialize the query path. The path is a string with individual
path components separated by '/'.
Parameters
----------
query: string
Query path expression.
"""
# Remove trailing '/' from the path.
while path.endswith('/'):
path = path[:-1]
# Ensure that the query path is an empty list if the path is empty.
self.path = path.split('/') if path else []
def find(self, doc: Dict[str, Any]) -> Any:
"""Get the element at the query path in the given nested dictionary.
Returns None if the query path does not identify an element in the
given dictionary.
Parameters
----------
doc: dict
Nested dictionary object.
Returns
-------
any
"""
# Keep track of the depth of the (successfully) evaluated part of the
# query path.
depth = 0
while depth < len(self.path) and isinstance(doc, dict):
doc = doc.get(self.path[depth]) # type: ignore
depth += 1
# The result depends on whether we reaced the end of the path (depth
# equals length of the query path) or encountered an element in the
# query path that was not matched (depth is less than the length of
# the query path). In the latter case the result is always None.
return doc if depth == len(self.path) else None
| python |
from django.test import TestCase
from whats_fresh.whats_fresh_api.models import Video
from django.contrib.gis.db import models
class VideoTestCase(TestCase):
def setUp(self):
self.expected_fields = {
'video': models.URLField,
'caption': models.TextField,
'name': models.TextField,
'created': models.DateTimeField,
'modified': models.DateTimeField,
'story': models.related.RelatedObject,
'id': models.AutoField
}
self.optional_fields = {
'caption'
}
def test_fields_exist(self):
model = models.get_model('whats_fresh_api', 'Video')
for field, field_type in self.expected_fields.items():
self.assertEqual(
field_type, type(model._meta.get_field_by_name(field)[0]))
def test_no_additional_fields(self):
fields = Video._meta.get_all_field_names()
self.assertTrue(sorted(fields) == sorted(self.expected_fields.keys()))
def test_created_modified_fields(self):
self.assertTrue(Video._meta.get_field('modified').auto_now)
self.assertTrue(Video._meta.get_field('created').auto_now_add)
def test_optional_fields(self):
for field in self.optional_fields:
self.assertEqual(
Video._meta.get_field_by_name(field)[0].blank, True)
| python |
import os
import json
import torch
import numpy as np
from PIL import Image
import copy
import os
import logging
from detectron2.data import detection_utils as utils
from ..registry import DATASOURCES
from .load_coco import load_coco_json
@DATASOURCES.register_module
class COCO_BOXES(object):
def __init__(self, root, json_file, max_box_num, image_format='RGB', *args, **kwargs):
if json_file.endswith('instances_train2017.json'):
logging.critical('Using ground-truth for pre-training, please use selective search result!')
self.data_dicts = load_coco_json(json_file, root)
self.image_format = image_format
self.max_box_num = max_box_num
def get_length(self):
return len(self.data_dicts)
def __len__(self):
return self.get_length()
def get_sample(self, idx):
data_dict = self.data_dicts[idx]
dataset_dict = copy.deepcopy(data_dict) # it will be modified by code below
annos = [obj for obj in dataset_dict.pop("annotations") if obj.get("iscrowd", 0) == 0]
instances = utils.annotations_to_instances(annos, (dataset_dict['height'], dataset_dict['width']),)
dataset_dict["instances"] = utils.filter_empty_instances(instances)
return dataset_dict | python |
from __future__ import absolute_import, division, print_function
import os
from pdfx import cli
# import pytest
curdir = os.path.dirname(os.path.realpath(__file__))
def test_cli():
parser = cli.create_parser()
parsed = parser.parse_args(['-j', 'pdfs/valid.pdf'])
assert parsed.json
assert parsed.pdf == "pdfs/valid.pdf"
| python |
#!/usr/bin/env python
#
# This script is experimental.
#
# Liang Wang @ Dept. Computer Science, University of Helsinki
# 2011.09.21
#
import os, sys
import socket
import pickle
import random
import Queue
import time
import threading
import resource
from khash import *
from bencode import bencode, bdecode
from common import *
MYPORT = 6882 # The port used for communication
ACTIVE_THRESHOLD = 2000 # The minimum number of nodes in nodePool
REFRESH_LIMIT = 60 # The time interval to refresh a node
class Maintainer(object):
def __init__(self, id = None):
self.id = id if id else newID() # Maintainer's ID
self.noisy = True # Output extra info or not
self.krpc = KRPC() # Simple KRPC translator
self.nodePool = {} # Dict of the nodes collected
self.nodePool_lock = threading.Lock()
self.nodeQueue = Queue.Queue(0) # Queue of the nodes to scan
self.startTime = time.time() # Time start the crawler
self.respondent = 0 # Number of respondent
self.isock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.isock.bind( ("",MYPORT) )
self.isock_lock = threading.Lock()
pass
def addNode(self, node, ip):
self.nodePool_lock.acquire()
try:
now = time.time()
# Generate Ip pool
IPs = [ x["host"] for x in self.nodePool.values() ]
if node["id"] not in self.nodePool:
if ip not in IPs:
node["timestamp"] = now
node["lastupdate"] = now - REFRESH_LIMIT
self.nodePool[node['id']] = node
else:
node = self.nodePool[node['id']]
# only update the lastupdate if the message is from node itself
if ip==node["host"]:
node["lastupdate"] = now
self.nodePool[node['id']] = node
except Exception, err:
print "Exception:Maintainer.addNode()", err
self.nodePool_lock.release()
pass
def bootstrap(self):
"""Whenever the number of nodes in nodePool drops below the threshold,
use this function to get more nodes."""
self.nodePool_lock.acquire()
try:
if len(self.nodePool) == 0:
self.findNode("router.bittorrent.com", 6881, self.id)
else:
for n in self.nodePool.values():
self.findNode(n["host"], n["port"], newID(), n["id"])
except Exception, err:
print "Exception:Maintainer.bootstrap()", err
self.nodePool_lock.release()
pass
def findNode(self, host, port, target, rID = None):
msg = self.krpc.encodeReq("find_node", {"id":self.id, "target":target})
self.sendMsg(msg, (host,port))
pass
def ping(self, host, port):
msg = self.krpc.encodeReq("ping", {"id":self.id})
self.sendMsg(msg, (host,port))
pass
def pingNodes(self, nodes):
for node in nodes:
try:
self.ping(node['host'], node['port'])
except Exception, err:
print "Exception:Maintainer.pingNodes():", err
pass
def processNodes(self, nodes):
timestamp = time.time()
for node in nodes:
id = node["id"]
if id not in self.nodePool:
if id != self.id:
self.nodeQueue.put(node)
self.addNode(node, node["host"])
pass
def scan_nodePool(self):
"""Kick out the dead nodes"""
print "scan the nodePool"
now = time.time()
self.nodePool_lock.acquire()
for n in self.nodePool.values():
try:
t = now - n["lastupdate"]
if t >= REFRESH_LIMIT and t < 2*REFRESH_LIMIT:
self.ping(n["host"], n["port"])
elif t >= 2*REFRESH_LIMIT:
self.nodePool.pop(n["id"])
print "kick out %s:%i" % (n["host"], n["port"])
except Exception, err:
print "Exception:Maintainer.scan_nodePool():", err, n
self.nodePool_lock.release()
pass
def sendMsg(self, msg, addr):
self.isock_lock.acquire()
try:
self.isock.sendto(msg, addr)
except:
pass
self.isock_lock.release()
pass
def serialize(self):
tmp = []
obj = []
self.nodePool_lock.acquire()
try:
# Choose those stable nodes to cache
tmp = self.nodePool.values()
tmp.sort(key=lambda x: x["timestamp"])
tmp = tmp[:500]
tmp = random.sample(tmp, min(100,len(tmp)))
# Cache the nodes
obj = []
for v in tmp:
try:
n = {}
n["id"] = v["id"]
n["host"] = v["host"]
n["port"] = v["port"]
n["timestamp"] = v["timestamp"]
n["lastupdate"] = v["lastupdate"]
obj.append(n)
except Exception, err:
print "Exception:Maintainer.serialize():loop:", err
except Exception, err:
print "Exception:Maintainer.serialize():", err
self.nodePool_lock.release()
print "longest", time.time()-tmp[0]["timestamp"]
f = open("nodescache", "w")
pickle.Pickler(f).dump(obj)
f.close()
pass
def start_listener(self):
while True:
try:
msg, addr = self.isock.recvfrom(PACKET_LEN)
msgTID, msgType, msgContent = self.krpc.decodeRsp(msg)
if msgType==RSP and "nodes" in msgContent:
if len(self.nodePool) < 2*ACTIVE_THRESHOLD:
self.processNodes(unpackNodes(msgContent["nodes"]))
if msgType==RSP and "id" in msgContent:
id = msgContent["id"]
if id != self.id:
if id in self.nodePool or len(self.nodePool) < 2*ACTIVE_THRESHOLD:
self.addNode( {"id":id, "host":addr[0], "port":addr[1]}, addr[0] )
self.respondent += 1
except Exception, err:
print "Exception:Maintainer.listener():", err
pass
def start_sender(self):
while True:
try:
# Check are there any nodes in the queue waiting for processing
node = self.nodeQueue.get(True)
if node and len(self.nodePool)<1.5*ACTIVE_THRESHOLD:
self.findNode(node["host"], node["port"], newID(), node["id"])
except Exception, err:
print "Exception:Maintainer.start_sender()", err
pass
def start_service(self):
t1 = threading.Thread(target=self.start_listener, args=())
t1.daemon = True
t1.start()
t2 = threading.Thread(target=self.start_sender, args=())
t2.daemon = True
t2.start()
while True:
try:
now = time.time()
# Should we request more nodes?
if int(now)%10==0 and len(self.nodePool)<ACTIVE_THRESHOLD:
self.bootstrap()
# Scan nodePool, kick out the dead node
if int(now)%15==0:
self.scan_nodePool()
# Cache the nodes to file
if int(now)%300==0:
self.serialize()
self.info()
time.sleep(1)
except KeyboardInterrupt:
break
except Exception, err:
print "Exception:Maintainer.start_service()", err
pass
def info(self):
print "[NodeSet]:%i\t\t[Queue]:%i\t\t[Response]:%i" % \
(len(self.nodePool), self.nodeQueue.qsize(), self.respondent)
pass
def convergeSpeed(self,node):
if (distance(self.id, node["id"])>>148)==0:
self.tn += 1
if (time.time()-self.tntold) >= 5:
self.tnspeed = int((self.tn-self.tnold)/(time.time()-self.tntold))
self.tnold = self.tn
self.tntold = time.time()
pass
if __name__=="__main__":
now = time.time()
maintainer = Maintainer()
maintainer.start_service()
print "%.2f minutes" % ((time.time() - now)/60.0)
pass
| python |
import itertools
import json
import logging
import os
import subprocess
import sys
import warnings
from collections import OrderedDict
from pathlib import Path
import click
import pip_api
import requests
from cachecontrol import CacheControl
# from pipdownload.settings import SETTINGS_FILE
from pipdownload import logger, settings
from pipdownload.utils import (
TempDirectory,
download as normal_download,
get_file_links,
mkurl_pypi_url,
quiet_download,
resolve_package_file,
)
from tzlocal import get_localzone
sess = requests.Session()
session = CacheControl(sess)
@click.command()
@click.argument("packages", nargs=-1)
@click.option(
"-i",
"--index-url",
"index_url",
default="https://pypi.org/simple",
type=click.STRING,
help="Pypi index.",
)
@click.option(
"-r",
"--requirement",
"requirement_file",
type=click.Path(exists=True, file_okay=True, resolve_path=True),
help="Requirements File.",
)
@click.option(
"-d",
"--dest",
"dest_dir",
type=click.Path(exists=False, file_okay=False, writable=True, resolve_path=True),
help="Destination directory.",
)
@click.option(
"-s",
"--suffix",
"whl_suffixes",
type=click.STRING,
multiple=True,
hidden=True,
help="Suffix of whl packages except `none-any` `tar.gz` `zip`.\n"
'Deprecated, Using "-p/--platform-tag instead!"',
)
@click.option(
"-p",
"--platform-tag",
"platform_tags",
type=click.STRING,
multiple=True,
help="Suffix of whl packages except 'none-any', like 'win_amd64', 'manylinux1_x86_64', 'linux_i386' "
"and so on. It can be specified multiple times. This is an option to replace option 'suffix'. "
"You can even specify 'manylinux' to download packages contain 'manylinux1_x86_64', "
"'manylinux2010_x84_64', 'manylinux2014_x86_64'.",
)
@click.option(
"-py",
"--python-version",
"python_versions",
type=click.STRING,
multiple=True,
help="Version of python to be downloaded. More specifically, this is the abi tag of the Python package. "
"It can be specified multiple times. Like: 'cp38', 'cp37', 'cp36', 'cp35', 'cp27' and so on.",
)
@click.option(
"-q",
"--quiet",
is_flag=True,
help="When specified, logs and progress bar will not be shown.",
)
@click.option(
"--no-source",
"no_source",
is_flag=True,
help="When specified, the source package of the project that provides wheel package will not be "
"downloaded.",
)
@click.option(
"--show-config",
"show_config",
is_flag=True,
help="When specified, the config file will be created if not exists and the path will be shown later.",
)
@click.option(
"--show-urls",
"show_urls",
is_flag=True,
help=("When specified, all of downloaded urls will be printed as an report list, with library name before them. " +
"For use in other tools for checking the libraries."),
)
def pipdownload(
packages,
index_url,
requirement_file,
dest_dir,
whl_suffixes,
platform_tags,
python_versions,
quiet,
no_source,
show_config,
show_urls
):
"""
pip-download is a tool which can be used to download python projects and their dependencies listed on
pypi's `download files` page. It can be used to download Python packages across system platforms and
Python versions.
"""
if show_config:
if not Path(settings.SETTINGS_FILE).exists():
Path(settings.SETTINGS_FILE).parent.mkdir(parents=True, exist_ok=True)
# Path(SETTINGS_FILE).touch()
with open(settings.SETTINGS_FILE, "w", encoding="utf8") as f:
json.dump({}, f)
click.echo(f"The config file is {settings.SETTINGS_FILE}.")
sys.exit(0)
if Path(settings.SETTINGS_FILE).exists():
with open(settings.SETTINGS_FILE, "r") as f:
try:
settings_dict = json.loads(f.read(), object_pairs_hook=OrderedDict)
except json.decoder.JSONDecodeError:
logger.error(
f"The config file {settings.SETTINGS_FILE} is not correct, it should be a json object."
)
sys.exit(-2)
if not python_versions:
python_versions = settings_dict.get("python-versions", None)
if python_versions:
click.echo(f"Using `python-versions` in config file.")
if not (platform_tags or whl_suffixes):
platform_tags = settings_dict.get("platform-tags", None)
if platform_tags:
click.echo(f"Using `platform-tags` in config file.")
tz = get_localzone()
if tz.zone in ["Asia/Shanghai", "Asia/Chongqing"]:
index_url = "https://mirrors.aliyun.com/pypi/simple/"
if whl_suffixes:
warnings.warn(
"Option '-s/--suffix' has been deprecated. Please use '-p/--platform-tag' instead."
)
platform_tags = whl_suffixes
if quiet:
logger.setLevel(logging.ERROR)
download = quiet_download
else:
download = normal_download
url_list = []
if not dest_dir:
dest_dir = os.getcwd()
else:
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
# dest_dir = os.path.abspath(dest_dir)
if requirement_file:
packages_extra_dict = pip_api.parse_requirements(requirement_file)
packages_extra = {str(value) for value in packages_extra_dict.values()}
else:
packages_extra = set()
for package in itertools.chain(packages_extra, packages):
with TempDirectory(delete=True) as directory:
logger.info(
"We are using pip download command to download package %s" % package
)
logger.info("-" * 50)
try:
command = [
sys.executable,
"-m",
"pip",
"download",
"-i",
index_url,
"--dest",
directory.path,
package,
]
if quiet:
command.extend(["--progress-bar", "off", "-qqq"])
subprocess.check_call(command)
except subprocess.CalledProcessError as e:
logger.error(
"Sorry, we can not use pip download to download the package %s,"
" and Exception is below" % package
)
logger.error(e)
raise
file_names = os.listdir(directory.path)
for file_name in file_names:
python_package = resolve_package_file(file_name)
url_list.append(python_package)
if python_package.name is None:
logger.warning(
"Can not resolve a package's name and version from a downloaded package. You shuold "
"create an issue maybe."
)
continue
url = mkurl_pypi_url(index_url, python_package.name)
try:
r = session.get(url)
for file in get_file_links(r.text, url, python_package):
url_list.append(file)
if "none-any" in file:
if "py2.py3" in file_name or not python_versions:
download(file, dest_dir)
elif [1 for x in python_versions if "-"+x+"-" in file]:
download(file, dest_dir)
continue
if ".tar.gz" in file or ".zip" in file:
if not no_source:
download(file, dest_dir)
continue
eligible = True
if platform_tags:
for tag in platform_tags:
if tag in file:
eligible = True
break
else:
eligible = False
if not eligible:
continue
if python_versions:
for version in python_versions:
if version in file:
eligible = True
break
else:
eligible = False
if eligible:
download(file, dest_dir)
except ConnectionError as e:
logger.error(
"Can not get information about package %s, and the Exception is below.",
python_package.name,
)
logger.error(e)
raise
logger.info("All packages have been downloaded successfully!")
if show_urls:
logger.setLevel(logging.INFO)
logger.error("List of files downloaded :")
for entry in url_list:
logger.info(entry)
return url_list
if __name__ == "__main__":
pipdownload()
| python |
# -*- coding: utf-8 -*-
# Copyright (C) 2013-2017 Oliver Ainsworth
# Modifications (remove py2) by (C) Stefan Tapper 2021
import enum
import itertools
import rf2settings.valve
from rf2settings.valve import messages, util
REGION_US_EAST_COAST = 0x00
REGION_US_WEST_COAST = 0x01
REGION_SOUTH_AMERICA = 0x02
REGION_EUROPE = 0x03
REGION_ASIA = 0x04
REGION_AUSTRALIA = 0x05
REGION_MIDDLE_EAST = 0x06
REGION_AFRICA = 0x07
REGION_REST = 0xFF
MASTER_SERVER_ADDR = ("hl2master.steampowered.com", 27011)
class Duplicates(enum.Enum):
"""Behaviour for duplicate addresses.
These values are intended to be used with :meth:`MasterServerQuerier.find`
to control how duplicate addresses returned by the master server are
treated.
:cvar KEEP: All addresses are returned, even duplicates.
:cvar SKIP: Skip duplicate addresses.
:cvar STOP: Stop returning addresses when a duplicate is encountered.
"""
KEEP = "keep"
SKIP = "skip"
STOP = "stop"
class MasterServerQuerier(rf2settings.valve.BaseQuerier):
"""Implements the Source master server query protocol
https://developer.valvesoftware.com/wiki/Master_Server_Query_Protocol
.. note::
Instantiating this class creates a socket. Be sure to close the
querier once finished with it. See :class:`rf2settings.valve.BaseQuerier`.
"""
def __init__(self, address=MASTER_SERVER_ADDR, timeout=10.0):
super(MasterServerQuerier, self).__init__(address, timeout)
def __iter__(self):
"""An unfitlered iterator of all Source servers
This will issue a request for an unfiltered set of server addresses
for each region. Addresses are received in batches but returning
a completely unfiltered set will still take a long time and be
prone to timeouts.
.. note::
If a request times out then the iterator will terminate early.
Previous versions would propagate a :exc:`NoResponseError`.
See :meth:`.find` for making filtered requests.
"""
return self.find(region="all")
def _query(self, region, filter_string):
"""Issue a request to the master server
Returns a generator which yields ``(host, port)`` addresses as
returned by the master server.
Addresses are returned in batches therefore multiple requests may be
dispatched. Because of this any of these requests may result in a
:exc:`NotResponseError` raised. In such circumstances the iterator
will exit early. Otherwise the iteration continues until the final
address is reached which is indicated by the master server returning
a 0.0.0.0:0 address.
.. note::
The terminating 0.0.0.0:0 is not yielded by the iterator.
``region`` should be a valid numeric region identifier and
``filter_string`` should be a formatted filter string as described
on the Valve develper wiki:
https://developer.valvesoftware.com/wiki/Master_Server_Query_Protocol#Filter
"""
last_addr = "0.0.0.0:0"
first_request = True
while first_request or last_addr != "0.0.0.0:0":
first_request = False
self.request(messages.MasterServerRequest(
region=region, address=last_addr, filter=filter_string))
try:
raw_response = self.get_response()
except rf2settings.valve.NoResponseError:
return
else:
response = messages.MasterServerResponse.decode(raw_response)
for address in response["addresses"]:
last_addr = "{}:{}".format(
address["host"], address["port"])
if not address.is_null:
yield address["host"], address["port"]
def _deduplicate(self, method, query):
"""Deduplicate addresses in a :meth:`._query`.
The given ``method`` should be a :class:`Duplicates` object. The
``query`` is an iterator as returned by :meth:`._query`.
"""
seen = set()
if method is Duplicates.KEEP:
for address in query:
yield address
else:
for address in query:
if address in seen:
if method is Duplicates.SKIP:
continue
elif method is Duplicates.STOP:
break
yield address
seen.add(address)
def _map_region(self, region):
"""Convert string to numeric region identifier
If given a non-string then a check is performed to ensure it is a
valid region identifier. If it's not, ValueError is raised.
Returns a list of numeric region identifiers.
"""
if isinstance(region, str):
try:
regions = {
"na-east": [REGION_US_EAST_COAST],
"na-west": [REGION_US_WEST_COAST],
"na": [REGION_US_EAST_COAST, REGION_US_WEST_COAST],
"sa": [REGION_SOUTH_AMERICA],
"eu": [REGION_EUROPE],
"as": [REGION_ASIA, REGION_MIDDLE_EAST],
"oc": [REGION_AUSTRALIA],
"af": [REGION_AFRICA],
"rest": [REGION_REST],
"all": [REGION_US_EAST_COAST,
REGION_US_WEST_COAST,
REGION_SOUTH_AMERICA,
REGION_EUROPE,
REGION_ASIA,
REGION_AUSTRALIA,
REGION_MIDDLE_EAST,
REGION_AFRICA,
REGION_REST],
}[region.lower()]
except KeyError:
raise ValueError(
"Invalid region identifer {!r}".format(region))
else:
# Just assume it's an integer identifier, we'll validate below
regions = [region]
for reg in regions:
if reg not in {REGION_US_EAST_COAST,
REGION_US_WEST_COAST,
REGION_SOUTH_AMERICA,
REGION_EUROPE,
REGION_ASIA,
REGION_AUSTRALIA,
REGION_MIDDLE_EAST,
REGION_AFRICA,
REGION_REST}:
raise ValueError("Invalid region identifier {!r}".format(reg))
return regions
def find(self, region="all", duplicates=Duplicates.SKIP, **filters):
"""Find servers for a particular region and set of filtering rules
This returns an iterator which yields ``(host, port)`` server
addresses from the master server.
``region`` spcifies what regions to restrict the search to. It can
either be a ``REGION_`` constant or a string identifying the region.
Alternately a list of the strings or ``REGION_`` constants can be
used for specifying multiple regions.
The following region identification strings are supported:
+---------+-----------------------------------------+
| String | Region(s) |
+=========+=========================================+
| na-east | East North America |
+---------+-----------------------------------------+
| na-west | West North America |
+---------+-----------------------------------------+
| na | East North American, West North America |
+---------+-----------------------------------------+
| sa | South America |
+---------+-----------------------------------------+
| eu | Europe |
+---------+-----------------------------------------+
| as | Asia, the Middle East |
+---------+-----------------------------------------+
| oc | Oceania/Australia |
+---------+-----------------------------------------+
| af | Africa |
+---------+-----------------------------------------+
| rest | Unclassified servers |
+---------+-----------------------------------------+
| all | All of the above |
+---------+-----------------------------------------+
.. note::
"``rest``" corresponds to all servers that don't fit with any
other region. What causes a server to be placed in this region
by the master server isn't entirely clear.
The region strings are not case sensitive. Specifying an invalid
region identifier will raise a ValueError.
As well as region-based filtering, alternative filters are supported
which are documented on the Valve developer wiki.
https://developer.valvesoftware.com/wiki/Master_Server_Query_Protocol#Filter
This method accepts keyword arguments which are used for building the
filter string that is sent along with the request to the master server.
Below is a list of all the valid keyword arguments:
+------------+-------------------------------------------------------+
| Filter | Description |
+============+=======================================================+
| type | Server type, e.g. "dedicated". This can be a |
| | ``ServerType`` instance or any value that can be |
| | converted to a ``ServerType``. |
+------------+-------------------------------------------------------+
| secure | Servers using Valve anti-cheat (VAC). This should be |
| | a boolean. |
+------------+-------------------------------------------------------+
| gamedir | A string specifying the mod being ran by the server. |
| | For example: ``tf``, ``cstrike``, ``csgo``, etc.. |
+------------+-------------------------------------------------------+
| map | Which map the server is running. |
+------------+-------------------------------------------------------+
| linux | Servers running on Linux. Boolean. |
+------------+-------------------------------------------------------+
| empty | Servers which are not empty. Boolean. |
+------------+-------------------------------------------------------+
| full | Servers which are full. Boolean. |
+------------+-------------------------------------------------------+
| proxy | SourceTV relays only. Boolean. |
+------------+-------------------------------------------------------+
| napp | Servers not running the game specified by the given |
| | application ID. E.g. ``440`` would exclude all TF2 |
| | servers. |
+------------+-------------------------------------------------------+
| noplayers | Servers that are empty. Boolean |
+------------+-------------------------------------------------------+
| white | Whitelisted servers only. Boolean. |
+------------+-------------------------------------------------------+
| gametype | Server which match *all* the tags given. This should |
| | be set to a list of strings. |
+------------+-------------------------------------------------------+
| gamedata | Servers which match *all* the given hidden tags. |
| | Only applicable for L4D2 servers. |
+------------+-------------------------------------------------------+
| gamedataor | Servers which match *any* of the given hidden tags. |
| | Only applicable to L4D2 servers. |
+------------+-------------------------------------------------------+
.. note::
Your mileage may vary with some of these filters. There's no
real guarantee that the servers returned by the master server will
actually satisfy the filter. Because of this it's advisable to
explicitly check for compliance by querying each server
individually. See :mod:`rf2settings.valve.a2s`.
The master server may return duplicate addresses. By default, these
duplicates are excldued from the iterator returned by this method.
See :class:`Duplicates` for controller this behaviour.
"""
if isinstance(region, (int, str)):
regions = self._map_region(region)
else:
regions = []
for reg in region:
regions.extend(self._map_region(reg))
filter_ = {}
for key, value in filters.items():
if key in {"secure", "linux", "empty",
"full", "proxy", "noplayers", "white"}:
value = int(bool(value))
elif key in {"gametype", "gamedata", "gamedataor"}:
value = [str(elt)
for elt in value if str(elt)]
if not value:
continue
value = ",".join(value)
elif key == "napp":
value = int(value)
elif key == "type":
if not isinstance(value, util.ServerType):
value = util.ServerType(value).char
else:
value = value.char
filter_[key] = str(value)
# Order doesn't actually matter, but it makes testing easier
filter_ = sorted(filter_.items(), key=lambda pair: pair[0])
filter_string = "\\".join([part for pair in filter_ for part in pair])
if filter_string:
filter_string = "\\" + filter_string
queries = []
for region in regions:
queries.append(self._query(region, filter_string))
query = self._deduplicate(
Duplicates(duplicates), itertools.chain.from_iterable(queries))
for address in query:
yield address
| python |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import os
import platform
import re
from setuptools import setup, Extension
python_version = platform.python_version()
system_name = platform.system()
print("build for python{} on {}".format(python_version, system_name))
# Arguments
actrie_dir = ""
alib_dir = ""
def get_root_dir():
return os.path.dirname(os.path.realpath(__file__))
if not actrie_dir:
actrie_dir = get_root_dir()
if not alib_dir:
alib_dir = os.path.join(actrie_dir, 'deps', 'alib')
def build_library():
os.system(os.path.join(actrie_dir, "utils", "build.sh"))
# build_library()
warp_sources = [
os.path.join(actrie_dir, 'actrie', 'src', 'wrap.c')
]
compile_args = []
if system_name == "Windows":
compile_args.append("/utf-8")
else:
compile_args.append("-fno-strict-aliasing")
library_dirs = [
# os.path.join(alib_dir, 'lib'),
os.path.join(actrie_dir, 'lib')
]
libraries = ['actrie', 'alib']
include_dirs = [
os.path.join(alib_dir, 'include'),
os.path.join(actrie_dir, 'include')
]
actrie = Extension('actrie._actrie',
sources=warp_sources,
extra_compile_args=compile_args,
include_dirs=include_dirs,
library_dirs=library_dirs,
libraries=libraries)
kwds = {}
# Read version from bitarray/__init__.py
pat = re.compile(r'__version__\s*=\s*(\S+)', re.M)
data = open(os.path.join(actrie_dir, 'actrie', '__init__.py')).read()
kwds['version'] = eval(pat.search(data).group(1))
setup(name="actrie",
description="Aho-Corasick automation for large-scale multi-pattern matching.",
author="James Yin",
author_email="[email protected]",
url="https://github.com/ifplusor/actrie",
license="BSD",
packages=['actrie', 'actrie.example'],
ext_modules=[actrie],
classifiers=[
"Programming Language :: C",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: Implementation :: CPython",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Topic :: Utilities"
],
keywords=["matcher", "trie", "aho-corasick automation", "ac-automation",
"string matching", "string search", "string matcher"],
zip_safe=False,
**kwds)
| python |
# @author kingofthenorth
# @filename problemsearch.py
# @description Assignment 2
# @class CS 550
# @instructor Roch
# @notes N/A
from collections import deque
from basicsearch_lib02.queues import PriorityQueue
from basicsearch_lib02.searchrep import (Node, Problem)
from explored import Explored
def graph_search(problem: Problem, verbose=False, debug=False):
"""graph_search(problem, verbose, debug) - Given a problem representation
(instance of basicsearch_lib02.representation.Problem or derived class),
attempt to solve the problem.
If debug is True, debugging information will be displayed.
if verbose is True, the following information will be displayed:
Number of moves to solution
List of moves and resulting puzzle states
Example:
Solution in 25 moves
Initial state
0 1 2
0 4 8 7
1 5 . 2
2 3 6 1
Move 1 - [0, -1]
0 1 2
0 4 8 7
1 . 5 2
2 3 6 1
Move 2 - [1, 0]
0 1 2
0 4 8 7
1 3 5 2
2 . 6 1
... more moves ...
0 1 2
0 1 3 5
1 4 2 .
2 6 7 8
Move 22 - [-1, 0]
0 1 2
0 1 3 .
1 4 2 5
2 6 7 8
Move 23 - [0, -1]
0 1 2
0 1 . 3
1 4 2 5
2 6 7 8
Move 24 - [1, 0]
0 1 2
0 1 2 3
1 4 . 5
2 6 7 8
If no solution were found (not possible with the puzzles we
are using), we would display:
No solution found
Returns a tuple (path, nodes_explored) where:
path - list of actions to solve the problem or None if no solution was found
nodes_explored - Number of nodes explored (dequeued from frontier)
"""
# Establish frontier set and nodes
frontier = PriorityQueue()
frontier.append(Node(problem, problem.initial))
node = frontier.pop()
popping = True
if node.expand(node.problem)[0].g < 0:
# Depth First Search
frontier = deque()
frontier.append(Node(problem, problem.initial))
elif node.expand(node.problem)[0].h < 2:
# Breadth First Search
popping = False
frontier = deque()
frontier.append(Node(problem, problem.initial))
else:
# Manhattan Search
frontier.append(node)
# Working with the hash
frontier_hash = Explored()
frontier_hash.add(problem.initial.state_tuple())
finished = False
nodes_explored = 0
explored = Explored()
while not finished:
if popping:
node = frontier.pop() # Manhattan and DFS
else:
node = frontier.popleft() # BFS
if debug:
print("Node popped:", str(node))
explored.add(node.state.state_tuple())
nodes_explored += 1
if node.state.solved():
if debug:
print("Solution found!")
solution_path = node.path()
finished = True
if verbose:
print_solution(solution_path)
return solution_path, nodes_explored
else:
for child in node.expand(node.problem):
if not explored.exists(child.state.state_tuple()) and not frontier_hash.exists(
child.state.state_tuple()):
frontier.append(child)
frontier_hash.add(child)
elif debug:
print("Skipping...", child)
pass
finished = len(frontier) == 0
if debug:
print("")
if verbose:
print("No solution found")
return None, nodes_explored
def print_solution(path: tuple):
print("Amount of moves taken: %d" % (len(path) - 1))
print("Initial State...")
print(path[0])
for i in range(1, len(path)):
print("Move %d - %s" % (i, path[i].action))
print(path[i].state)
print("")
| python |
# @name: Katana-DorkScanner
# @repo: https://github.com/adnane-X-tebbaa/Katana
# @author: Adnane-X-tebbaa (AXT)
# Scada-file V2.2
# I used dorks for the most used PLCs
"""
MIT License
Copyright (c) 2020 adnane tebbaa
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import sys
import os
import time
from googlesearch import search
import sys
from termcolor import colored, cprint
import random
def clear():
if name == 'nt':
_ = system('cls')
else:
_ = system('clear')
from http import cookiejar
class BlockAll(cookiejar.CookiePolicy):
return_ok = set_ok = domain_return_ok = path_return_ok = lambda self, *args, **kwargs: False
netscape = True
rfc2965 = hide_cookie2 = False
NPP = """
) ) )
( ( (
) ) )
(~~~~~~~~~)
| POWER | Katana-ds V1.5.3
| | Find online PLCs
| _._ by AXT (adnane-X-tebbaa)
| / `\
| | N |
| | |~~~~~~~~~~~~~~|
/ | ||~~~~~~~~| |
__/_____|___||__|||___|____|__________________________________________
Note: That will take some time
"""
print (NPP)
TLD = ["com","com.tw","co.in"]
beta = random.choice(TLD)
betax = random.choice(TLD)
print (" ")
print(colored('[+] Searching... ', 'green'))
B = """ intitle:"Rockwell Automation" "Device Name" "Uptime" """
query = B
# ****
def spinning_cursor():
while True:
for cursor in '|/-\\':
yield cursor
spinner = spinning_cursor()
for _ in range(100):
sys.stdout.write(next(spinner))
sys.stdout.flush()
time.sleep(0.1)
sys.stdout.write('\b')
#*****
for gamma in search(query, tld=beta,stop=50, num=10,pause=2):
print(colored ('[+] Found > ' ,'yellow') + (gamma) )
print(colored('[+] 20% done ', 'green'))
B = """ inurl:dtm.html intitle:1747-L551 """
query = B
# ****
def spinning_cursor():
while True:
for cursor in '|/-\\':
yield cursor
spinner = spinning_cursor()
for _ in range(100):
sys.stdout.write(next(spinner))
sys.stdout.flush()
time.sleep(0.1)
sys.stdout.write('\b')
#*****
for gamma in search(query, tld=betax, num=10,stop=50,pause=2):
print(colored ('[+] Found > ' ,'yellow') + (gamma) )
print(colored('[+] 40% done ', 'green' )) # more scada dorks will be added here
from Modes import Scada2
| python |
import sqlite3
from functools import partial
import multiprocessing as mp
def create(args):
p,name,sql = args
db = sqlite3.connect(name)
db.execute(sql)
class mydb:
def __init__(self, w):
self.pool = mp.Pool(w)
def create(self, tab, name_tmpl, parts=[0]):
sql = 'create table if not exists {}'.format(tab)
args = [(p,name_tmpl.format(p),sql) for p in parts]
self.pool.map(create,args)
def insert_iter(self): pass # TODO API
if __name__ == "__main__":
db = mydb(4)
db.create('main(a,b,c)','data/v4_{}.sqlite',[1,2,3,4,5])
| python |
#!/usr/bin/env python
"""
Hacky script for comparing output set to gold set.
Usage:
just run python compare_solution.py -h
"""
import argparse
import fileinput
import sys
import re
def compare_sets(s1, s2):
"""Compare the sets."""
if len(s1) != 0:
# return s1 == s2
return s1 - s2
return False
def read_from_stdin():
"""Collect piped elements in set."""
s1 = set()
for line in fileinput.input():
s1.add(line.strip())
return s1
def order_output(s):
time_step_matches = (re.search("(\d{1,4})\)\.", i) for i in s)
time_steps = ((item, int(m.group(1)))
for m, item in zip(time_step_matches, s) if m)
return [i[0] for i in sorted(time_steps, key=lambda x: x[1])]
def file2set(file_obj):
"""Turn lines in a file into a set."""
return set(line.strip() for line in file_obj)
def read_from_file(file_name):
"""Read set from a file"""
with open(file_name, "r") as f:
return file2set(f)
if __name__ == "__main__":
prs = argparse.ArgumentParser()
prs.add_argument('expected', help="Name of gold standard file")
prs.add_argument(
'ours',
nargs="?",
help="Name of our output. "
"If not given, stdin is used.")
args = prs.parse_args()
expected_set = read_from_file(args.expected)
if args.ours:
our_set = read_from_file(args.ours)
else:
our_set = file2set(sys.stdin)
# print("\ncorrect solution: {}\n".format(compare_sets(test, gold)))
# print("\ndifferences in set 1 and set 2:\n\n {}\n".format(compare_sets(test, gold)))
test_ordered = order_output(our_set - expected_set)
gold_ordered = order_output(expected_set - our_set)
with open("our-output.lp", "w") as f:
f.write("\n".join(test_ordered))
with open("expected-output.lp", "w") as f:
f.write("\n".join(gold_ordered))
| python |
import logging
from airflow.decorators import dag, task
from datetime import datetime, timedelta
from airflow.utils.dates import days_ago
from airflow.providers.amazon.aws.operators.dms_create_task import DmsCreateTaskOperator
from airflow.providers.amazon.aws.operators.dms_start_task import DmsStartTaskOperator
from airflow.providers.amazon.aws.operators.dms_stop_task import DmsStopTaskOperator
from airflow.providers.amazon.aws.operators.dms_delete_task import DmsDeleteTaskOperator
from airflow.providers.amazon.aws.sensors.dms_task import DmsTaskCompletedSensor
default_args = {
'owner': 'crn-data',
"retries": 2,
"retry_delay": timedelta(seconds=30),
}
env = 'dev'
REPLICATION_TASK_ID = 'rds-to-crm-redshift-test'
SOURCE_ENDPOINT_ARN = 'arn:aws:dms:us-east-1:341484775232:endpoint:STD2AIN4MHPTLCYRLKNGYPHDUSQM7SQLGDKZDHY'
TARGET_ENDPOINT_ARN = 'arn:aws:dms:us-east-1:341484775232:endpoint:4L3AIBD3U4PW37TNROXLBCLDRTDPVI5MO2RG2CA'
REPLICATION_INSTANCE_ARN = 'arn:aws:dms:us-east-1:341484775232:rep:JZ6JLH3PSJN4HZK7AXWYZ22YKLGEKWEO7QUE52Q'
TABLE_MAPPINGS = {
"rules": [
{
"rule-type": "transformation",
"rule-id": "1",
"rule-name": "1",
"rule-target": "table",
"object-locator": {
"schema-name": "treat",
"table-name": "points_type"
},
"rule-action": "replace-prefix",
"value": "crn_points_",
"old-value": "points_"
},
{
"rule-type": "selection",
"rule-id": "8",
"rule-name": "8",
"object-locator": {
"schema-name": "treat",
"table-name": "points_type"
},
"rule-action": "include",
"filters": []
}
]
}
# TABLE_MAPPINGS = {
# "rules": [
# {
# "rule-type": "transformation",
# "rule-id": "1",
# "rule-name": "1",
# "rule-target": "table",
# "object-locator": {
# "schema-name": "treat",
# "table-name": "treat_offer"
# },
# "rule-action": "replace-prefix",
# "value": "crn_treat_",
# "old-value": "treat_"
# },
# {
# "rule-type": "transformation",
# "rule-id": "2",
# "rule-name": "2",
# "rule-target": "table",
# "object-locator": {
# "schema-name": "treat",
# "table-name": "points_used"
# },
# "rule-action": "replace-prefix",
# "value": "crn_points_",
# "old-value": "points_"
# },
# {
# "rule-type": "transformation",
# "rule-id": "3",
# "rule-name": "3",
# "rule-target": "table",
# "object-locator": {
# "schema-name": "treat",
# "table-name": "points_type"
# },
# "rule-action": "replace-prefix",
# "value": "crn_points_",
# "old-value": "points_"
# },
# {
# "rule-type": "transformation",
# "rule-id": "4",
# "rule-name": "4",
# "rule-target": "table",
# "object-locator": {
# "schema-name": "treat",
# "table-name": "cust_loyalty_tier"
# },
# "rule-action": "replace-prefix",
# "value": "crn_cust_",
# "old-value": "cust_"
# },
# {
# "rule-type": "transformation",
# "rule-id": "5",
# "rule-name": "5",
# "rule-target": "table",
# "object-locator": {
# "schema-name": "crn",
# "table-name": "menu_item_xref"
# },
# "rule-action": "replace-prefix",
# "value": "crn_menu_",
# "old-value": "menu_"
# },
# {
# "rule-type": "transformation",
# "rule-id": "6",
# "rule-name": "6",
# "rule-target": "schema",
# "object-locator": {
# "schema-name": "treat"
# },
# "rule-action": "replace-prefix",
# "value": "crm",
# "old-value": "treat"
# },
# {
# "rule-type": "transformation",
# "rule-id": "7",
# "rule-name": "7",
# "rule-target": "schema",
# "object-locator": {
# "schema-name": "crn"
# },
# "rule-action": "replace-prefix",
# "value": "crm",
# "old-value": "crn"
# },
# {
# "rule-type": "selection",
# "rule-id": "8",
# "rule-name": "8",
# "object-locator": {
# "schema-name": "treat",
# "table-name": "treat_offer"
# },
# "rule-action": "include",
# "filters": []
# },
# {
# "rule-type": "selection",
# "rule-id": "9",
# "rule-name": "9",
# "object-locator": {
# "schema-name": "treat",
# "table-name": "points_used"
# },
# "rule-action": "include",
# "filters": []
# },
# {
# "rule-type": "selection",
# "rule-id": "10",
# "rule-name": "10",
# "object-locator": {
# "schema-name": "treat",
# "table-name": "points_type"
# },
# "rule-action": "include",
# "filters": []
# },
# {
# "rule-type": "selection",
# "rule-id": "11",
# "rule-name": "11",
# "object-locator": {
# "schema-name": "treat",
# "table-name": "cust_loyalty_tier"
# },
# "rule-action": "include",
# "filters": []
# },
# {
# "rule-type": "selection",
# "rule-id": "12",
# "rule-name": "12",
# "object-locator": {
# "schema-name": "crn",
# "table-name": "customer_activity"
# },
# "rule-action": "include",
# "filters": []
# },
# {
# "rule-type": "selection",
# "rule-id": "13",
# "rule-name": "13",
# "object-locator": {
# "schema-name": "crn",
# "table-name": "menu_item_xref"
# },
# "rule-action": "include",
# "filters": []
# },
# {
# "rule-type": "transformation",
# "rule-id": "14",
# "rule-name": "14",
# "rule-target": "table",
# "object-locator": {
# "schema-name": "crn",
# "table-name": "customer_activity"
# },
# "rule-action": "replace-prefix",
# "value": "crn_customer_",
# "old-value": "customer_"
# }
# ]
# }
redshift_conn_id = f'crm_redshift_{env}'
aws_connection = 'aws-default'
@dag(dag_id='rds_to_redshift',
default_args=default_args,
schedule_interval='0 10 * * *',
start_date=days_ago(1),
catchup=True,
tags=[f'crn-data-{env}'])
def rds_to_redshift():
"""
Copies RTS RDS data to CRN Redshift
"""
# [START howto_dms_operators]
create_task = DmsCreateTaskOperator(
task_id='create_task',
replication_task_id=REPLICATION_TASK_ID,
source_endpoint_arn=SOURCE_ENDPOINT_ARN,
target_endpoint_arn=TARGET_ENDPOINT_ARN,
replication_instance_arn=REPLICATION_INSTANCE_ARN,
table_mappings=TABLE_MAPPINGS,
)
start_task = DmsStartTaskOperator(
task_id='start_task',
replication_task_arn=create_task.output,
)
wait_for_completion = DmsTaskCompletedSensor(
task_id='wait_for_completion',
replication_task_arn=create_task.output,
)
stop_task = DmsStopTaskOperator(
task_id='delete_task',
replication_task_arn=create_task.output,
)
start_task >> wait_for_completion >> stop_task
rds_to_redshift = rds_to_redshift() | python |
import numpy as np
s = '''73167176531330624919225119674426574742355349194934
96983520312774506326239578318016984801869478851843
85861560789112949495459501737958331952853208805511
12540698747158523863050715693290963295227443043557
66896648950445244523161731856403098711121722383113
62229893423380308135336276614282806444486645238749
30358907296290491560440772390713810515859307960866
70172427121883998797908792274921901699720888093776
65727333001053367881220235421809751254540594752243
52584907711670556013604839586446706324415722155397
53697817977846174064955149290862569321978468622482
83972241375657056057490261407972968652414535100474
82166370484403199890008895243450658541227588666881
16427171479924442928230863465674813919123162824586
17866458359124566529476545682848912883142607690042
24219022671055626321111109370544217506941658960408
07198403850962455444362981230987879927244284909188
84580156166097919133875499200524063689912560717606
05886116467109405077541002256983155200055935729725
71636269561882670428252483600823257530420752963450'''
s = s.replace('\n', '').replace(' ', '')
s = [int(ch) for ch in s]
N = len(s)
K = 13
answer = 0
for i in range(K, N + 1):
p = np.prod(s[i-K:i])
if p > answer:
answer = p
# 23514624000
print(answer) | python |
# step: build the vectorizer for year_month + general, f > 2, ngram = 3
#
import os
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
import numpy as np
import pickle
from sklearn.metrics import classification_report, f1_score
from scipy.sparse import lil_matrix
from imblearn.over_sampling import RandomOverSampler
from multiprocessing import Pool
def data_batch_loader(
data_name, test_time_label, file_type='year',
batch_size=100, mode='train'):
data_path = './data/'+data_name+'/'+data_name+'_'+file_type+'_sample.tsv'
time_labels = sorted(
[
file_name.split('.')[0].split('_')[1].strip()
for file_name in os.listdir('./vects1/' + data_name + '/')
if file_type in file_name]
)
valid_time_label = time_labels[-3]
if not test_time_label:
test_time_label = time_labels[-2] # the latest year
batch_data = {
'data': [], 'label': [], 'time_label': []
}
all_data = []
all_label = []
all_time_label = []
with open(data_path) as datafile:
datafile.readline()
for line in datafile:
infos = line.strip().split('\t')
if mode == 'train' and infos[1] == test_time_label:
continue
if mode == 'test':
if infos[1] != test_time_label:
continue
if mode == 'valid':
if infos[1] != valid_time_label:
continue
all_data.append(infos[0])
all_label.append(infos[2])
all_time_label.append(infos[1])
if mode == 'train': # over sampling
print('\t\tOver Sampling.......')
sampler = RandomOverSampler(random_state=0)
indices = [[item] for item in list(range(len(all_data)))]
indices, all_label = sampler.fit_sample(indices, all_label)
all_data = [all_data[item[0]] for item in indices]
all_time_label = [all_time_label[item[0]] for item in indices]
for item in zip(all_data, all_label, all_time_label):
batch_data['data'].append(item[0])
batch_data['label'].append(item[2])
batch_data['time_label'].append(item[1])
if len(batch_data['data']) >= batch_size:
yield batch_data
batch_data = {
'data': [], 'label': [], 'time_label': [],
}
if len(batch_data['data']) > 0:
yield batch_data
def create_domain_vects(data_name, mode='year'):
data_path = './data/' + data_name + '/' + data_name + '_' + mode + '_sample.tsv'
domain_docs = {'general': []}
time_idx = 1
# load the data for domain data
print('\t\tLoading domain data')
with open(data_path) as datafile:
datafile.readline()
for line in datafile:
infos = line.strip().split('\t')
domain_docs['general'].append(infos[0])
if infos[time_idx] not in domain_docs:
domain_docs[infos[time_idx]] = list()
domain_docs[infos[time_idx]].append(infos[0])
print('\t\tFitting domain data')
for domain_name in domain_docs:
print('\t\t\tWorking on: ' + domain_name)
da_vect = TfidfVectorizer(min_df=2, ngram_range=(1, 3), stop_words='english')
da_vect.fit(domain_docs[domain_name])
pickle.dump(
da_vect,
open('./vects1/' + data_name + '/' + mode + '_' + str(domain_name) + '.pkl', 'wb')
)
return list(domain_docs.keys())
def create_domain_clfs(data_name, test_time_label, file_type='year'):
domains = {file_type: []}
sum_fea_size = 0
fea_size = {file_type: dict()}
# get feature size of each vectorizer:
print('\t\tGet domain information.....')
for file_name in os.listdir('./vects1/' + data_name + '/'):
if file_type not in file_name:
continue
with open('./vects1/' + data_name + '/' + file_name, 'rb') as vect_pkl_f:
vect_pkl = pickle.load(vect_pkl_f)
cur_domain = file_name.split('.')[0].split('_')[1].strip()
sum_fea_size += len(vect_pkl.vocabulary_)
domains[file_type].append(cur_domain)
fea_size[file_type][cur_domain] = len(vect_pkl.vocabulary_)
print('Total feature size: ' + str(sum_fea_size))
# load the time label: year by loop the file names in the vectorizer folder
domains['year'] = sorted(domains['year'], reverse=True) # reverse for set the 'general' in the 1st place
domains['month'] = sorted(domains['month'])
clf = SGDClassifier(
loss='log', penalty='elasticnet', max_iter=2000,
l1_ratio=0.1, n_jobs=-1, tol=0.0001)
# load the data
batch_size = 1000
train_iter = data_batch_loader(
data_name, test_time_label=test_time_label, file_type=file_type)
# load the general vect
general_vect = pickle.load(open('./vects1/' + data_name + '/' + file_type + '_general.pkl', 'rb'))
print('\t\tBacth fit............')
batch_count = 0
for train_batch in train_iter:
if len(np.unique(train_batch['label'])) == 1:
continue
print('Working on batch #' + str(batch_count))
batch_count += 1
# transform the data
train_data = lil_matrix((len(train_batch['data']), sum_fea_size))
train_data[:, :fea_size[file_type]['general']] = general_vect.transform(train_batch['data'])
start_idx = fea_size['year']['general']
for domain_name in domains[file_type]:
if domain_name == 'general':
continue
with open('./vects1/' + data_name + '/' + file_type + '_' + str(domain_name) + '.pkl', 'rb') as vect_pkl_f:
vect_pkl = pickle.load(vect_pkl_f)
transformed_data = vect_pkl.transform(train_batch['data'])
for label_idx in range(len(train_batch['time_label'])):
if train_batch['time_label'][label_idx] == domain_name:
train_data[label_idx, start_idx:start_idx + fea_size[file_type][domain_name]] = transformed_data[
label_idx, :]
start_idx += fea_size[file_type][domain_name] # update the start index
# partial training
train_data = train_data.tocsr()
clf.partial_fit(train_data, train_batch['label'], classes=['0', '1'])
# save the clf
print('\t\tSaving classifier............')
with open('./clfs1/' + data_name + '_' + file_type + '.pkl', 'wb') as clf_file:
pickle.dump(
clf,
clf_file
)
return clf
def run_exp(data_name, file_type, create_vects=False, create_clfs=False):
print('Working on: ' + data_name + '..............................')
if not os.path.exists('./vects1/' + data_name):
os.mkdir('./vects1/' + data_name)
if create_vects:
print('\tCreating vects.........')
domain_list = create_domain_vects(data_name, mode=file_type)
print(domain_list)
print('Creating logistic regression classifier------------')
if create_clfs:
clf = create_domain_clfs(data_name)
else:
clf = pickle.load(open('./clfs1/' + data_name + '.pkl', 'rb'))
# only load general vectorizer
gen_vect = pickle.load(open('./vects1/' + data_name + '/year_general.pkl', 'rb'))
fea_size = clf.coef_.shape[1] # feature size
print('Validation.....') # validation choose the 2nd latest year as the validation
lambdas = [1, 10, 100, 200, 300]
best_valid_f1 = 0
best_lambda = 1
for flip_l in lambdas:
valid_iter = data_batch_loader(data_name, mode='valid')
y_valids = []
valid_preds = []
for valid_batch in valid_iter:
for label in valid_batch['label']:
y_valids.append(label)
valid_data = lil_matrix((len(valid_batch['data']), fea_size))
valid_data[:, :len(gen_vect.vocabulary_)] = gen_vect.transform(valid_batch['data'])
if flip_l != 1:
valid_data = valid_data * flip_l
predictions = clf.predict(valid_data)
for label in predictions:
valid_preds.append(label)
tmp_f1 = f1_score(y_true=y_valids, y_pred=valid_preds, average='weighted')
if tmp_f1 > best_valid_f1:
best_valid_f1 = tmp_f1
best_lambda = flip_l
print(data_name + ' lambda: ' + str(best_lambda))
print(data_name + ' valid f1: ' + str(best_valid_f1))
print('Testing .....')
test_iter = data_batch_loader(data_name, mode='test')
y_preds = []
y_truth = []
print('Test by each batch')
for test_batch in test_iter:
for label in test_batch['label']:
y_truth.append(label)
# transform the test data:
test_data = lil_matrix((len(test_batch['data']), fea_size))
test_data[:, :len(gen_vect.vocabulary_)] = gen_vect.transform(test_batch['data'])
# flip lambda
test_data = test_data * best_lambda
# prediction
predictions = clf.predict(test_data)
for label in predictions:
y_preds.append(label)
my_f1 = str(f1_score(y_true=y_truth, y_pred=y_preds, average='weighted'))
my_report = classification_report(y_true=y_truth, y_pred=y_preds)
print(data_name + '----- F1-score: ' + my_f1)
with open('results.txt', 'a') as result_file:
result_file.write('Working on ' + data_name + '--------------------\n')
result_file.write(
'Best valid result: ' + str(best_valid_f1) +
', lambda flip: ' + str(best_lambda) + '\n')
result_file.write('F1: ' + my_f1 + '\n')
result_file.write(my_report)
result_file.write('\n----------------------------------------\n')
if __name__ == '__main__':
data_list = [
'amazon',
'economy',
'vaccine',
'yelp_hotel',
'yelp_rest',
'parties',
]
# multiprocess:
# p = Pool(5)
# p.map(run_exp, 'year')
# p.map(run_exp, 'month')
for file_type in ['year', 'month']:
for data in data_list:
run_exp(data, file_type=file_type, create_vects=False, create_clfs=False)
| python |
# --------------------------------------------------------------------------- #
import os
import filecmp
from arroyo import utils
import pytest
# --------------------------------------------------------------------------- #
# Asymmetric Key Tests
from arroyo.crypto import KeyAlgorithmType, EncodingType
from arroyo.crypto import asymmetric
# --------------------------------------------------------------------------- #
PASSWORD = b'password'
HERE = os.path.dirname(__file__)
# --------------------------------------------------------------------------- #
def get_public_key_filename(key_type, key_encoding):
if not isinstance(key_type, str):
key_type = key_type.value
key_type = key_type.lower()
if not isinstance(key_encoding, str):
key_encoding = key_encoding.value
key_encoding = key_encoding.lower()
key_name = "{}_public_{}.key".format(key_type, key_encoding)
return os.path.join(HERE, "keys", key_name)
def get_private_key_filename(key_type, key_encoding, encrypted=False):
if not isinstance(key_type, str):
key_type = key_type.value
key_type = key_type.lower()
if not isinstance(key_encoding, str):
key_encoding = key_encoding.value
key_encoding = key_encoding.lower()
if encrypted:
key_name = "{}_private_{}_encrypted.key".format(key_type, key_encoding)
else:
key_name = "{}_private_{}.key".format(key_type, key_encoding)
return os.path.join(HERE, "keys", key_name)
class FakeTestKey(asymmetric.AsymmetricKey):
def __eq__(self, other):
pass
def to_bytes(self, *, encoding: EncodingType, fmt: str):
pass
def to_jwk(self):
return b'\x00\x01'
# --------------------------------------------------------------------------- #
@pytest.fixture(scope="session", params=EncodingType)
def public_key_encoding(request):
return request.param
@pytest.fixture(scope="session",
params=[e for e in EncodingType if e != EncodingType.OpenSSH])
def private_key_encoding(request):
return request.param
# --------------------------------------------------------------------------- #
def test_load_public_key_files(key_algorithm, public_key_encoding):
key_file = get_public_key_filename(key_algorithm, public_key_encoding)
key = asymmetric.PublicKey.from_file(key_file)
assert isinstance(key, asymmetric.PublicKey)
assert key.algorithm == key_algorithm
assert key.encoding == public_key_encoding
def test_load_private_key_files(key_algorithm, private_key_encoding):
key_file = get_private_key_filename(key_algorithm, private_key_encoding)
key = asymmetric.PrivateKey.from_file(key_file)
assert isinstance(key, asymmetric.PrivateKey)
assert key.algorithm == key_algorithm
assert key.encoding == private_key_encoding
def test_load_encrypted_private_key_files(key_algorithm, private_key_encoding):
key_file = get_private_key_filename(key_algorithm, private_key_encoding,
encrypted=True)
key = asymmetric.PrivateKey.from_file(key_file, password=PASSWORD)
assert isinstance(key, asymmetric.PrivateKey)
assert key.algorithm == key_algorithm
assert key.encoding == private_key_encoding
def test_load_encrypted_private_key_files_str_pass(key_algorithm,
private_key_encoding):
key_file = get_private_key_filename(key_algorithm, private_key_encoding,
encrypted=True)
key = asymmetric.PrivateKey.from_file(key_file, password=PASSWORD.decode())
assert isinstance(key, asymmetric.PrivateKey)
assert key.algorithm == key_algorithm
assert key.encoding == private_key_encoding
def test_load_encrypted_private_key_files_inv_pass_type(key_algorithm,
private_key_encoding):
key_file = get_private_key_filename(key_algorithm, private_key_encoding,
encrypted=True)
with pytest.raises(TypeError):
asymmetric.PrivateKey.from_file(key_file, password=12345)
def test_unsupported_key_algorithm():
class FakeSubclass(asymmetric.AsymmetricKey):
def to_bytes(self, *, encoding: EncodingType, fmt: str) -> bytes:
pass
def __eq__(self, other):
return True
with pytest.raises(TypeError):
FakeSubclass(key=None)
def test_private_key_bytes():
key_file = get_private_key_filename(KeyAlgorithmType.RSA, EncodingType.PEM)
key = asymmetric.PrivateKey.from_file(key_file)
assert isinstance(bytes(key), bytes)
assert bytes(key) == key.to_bytes()
def test_public_key_bytes():
key_file = get_public_key_filename(KeyAlgorithmType.RSA, EncodingType.PEM)
key = asymmetric.PublicKey.from_file(key_file)
assert isinstance(bytes(key), bytes)
assert bytes(key) == key.to_bytes()
def test_private_key_size():
key_file = get_private_key_filename(KeyAlgorithmType.RSA, EncodingType.PEM)
key = asymmetric.PrivateKey.from_file(key_file)
assert isinstance(len(key), int)
assert len(key) == key.size
def test_public_key_size():
key_file = get_public_key_filename(KeyAlgorithmType.RSA, EncodingType.PEM)
key = asymmetric.PublicKey.from_file(key_file)
assert isinstance(len(key), int)
assert len(key) == key.size
def test_private_key_equality():
key_file = get_private_key_filename(KeyAlgorithmType.RSA, EncodingType.PEM)
key1 = asymmetric.PrivateKey.from_file(key_file)
key2 = asymmetric.PrivateKey.from_file(key_file)
assert key1 is not key2
assert key1 == key2
assert not key1 != key2
assert key1 != 12345
def test_public_key_equality():
key_file = get_public_key_filename(KeyAlgorithmType.RSA, EncodingType.PEM)
pub_key = asymmetric.PublicKey.from_file(key_file)
key_file = get_private_key_filename(KeyAlgorithmType.RSA, EncodingType.PEM)
priv_key = asymmetric.PrivateKey.from_file(key_file)
assert priv_key.public_key is not pub_key
assert priv_key.public_key == pub_key
assert not priv_key.public_key != pub_key
assert pub_key != 12345
# Test the __contains__ Operator
assert pub_key in priv_key
def test_size_in_repr(key_algorithm):
key_file = get_private_key_filename(key_algorithm, EncodingType.PEM)
key = asymmetric.PrivateKey.from_file(key_file)
assert str(key.size) in repr(key)
def test_algorithm_in_repr(key_algorithm):
key_file = get_private_key_filename(key_algorithm, EncodingType.PEM)
key = asymmetric.PrivateKey.from_file(key_file)
assert str(key_algorithm.value) in repr(key)
def test_set_invalid_encoding():
key_file = get_private_key_filename(KeyAlgorithmType.RSA, EncodingType.PEM)
key = asymmetric.PrivateKey.from_file(key_file)
with pytest.raises(ValueError):
key.encoding = b'NotValid'
def test_private_key_to_file(key_algorithm, private_key_encoding, empty_file):
key_file = get_private_key_filename(key_algorithm, private_key_encoding)
key = asymmetric.PrivateKey.from_file(key_file)
key.to_file(empty_file)
assert filecmp.cmp(key_file, empty_file)
def test_private_key_to_file_encrypted(key_algorithm, private_key_encoding,
empty_file):
key_file = get_private_key_filename(key_algorithm, private_key_encoding)
key1 = asymmetric.PrivateKey.from_file(key_file)
key1.to_file(empty_file, password=PASSWORD)
key2 = asymmetric.PrivateKey.from_file(empty_file, password=PASSWORD)
assert key1 == key2
@pytest.mark.xfail
def test_public_key_to_file(key_algorithm, public_key_encoding, empty_file):
# XXX: Currently this fails because we are not using sane defaults
# when writing out Public Keys, specifically ECDSA keys.
key_file = get_public_key_filename(key_algorithm, public_key_encoding)
key = asymmetric.PublicKey.from_file(key_file)
key.to_file(empty_file)
assert filecmp.cmp(key_file, empty_file)
def test_rsa_private_key_to_jwk():
key_file = get_private_key_filename(KeyAlgorithmType.RSA, EncodingType.PEM)
key = asymmetric.PrivateKey.from_file(key_file)
jwk = key.to_jwk()
assert jwk['kty'] == 'RSA'
assert 'n' in jwk
assert 'e' in jwk
assert 'd' in jwk
assert 'p' in jwk
assert 'q' in jwk
assert 'dp' in jwk
assert 'dq' in jwk
assert 'qi' in jwk
def test_dsa_private_key_to_jwk():
"""Test to ensure that attempting to convert a DSA key to a JWK results
in an exception thrown, since DSA keys cannot be represented as JWKs."""
key_file = get_private_key_filename(KeyAlgorithmType.DSA, EncodingType.PEM)
key = asymmetric.PrivateKey.from_file(key_file)
with pytest.raises(TypeError):
key.to_jwk()
def test_ecdsa_private_key_to_jwk():
key_file = get_private_key_filename(KeyAlgorithmType.ECDSA,
EncodingType.PEM)
key = asymmetric.PrivateKey.from_file(key_file)
with pytest.raises(NotImplementedError):
key.to_jwk()
def test_rsa_private_key_jwk_thumbprint():
key_file = get_private_key_filename(KeyAlgorithmType.RSA, EncodingType.PEM)
key = asymmetric.PrivateKey.from_file(key_file)
jwk_thumbprint = key.jwk_thumbprint
assert isinstance(jwk_thumbprint, str)
# Ensure the result can be decoded as JOSE base64 and appears to be a
# SHA256 result
decoded = utils.jose_b64decode(jwk_thumbprint)
assert len(decoded) * 8 == 256
def test_invalid_key_type():
with pytest.raises(TypeError):
FakeTestKey(key=25)
def test_invalid_to_jwk():
key_file = get_private_key_filename(KeyAlgorithmType.RSA, EncodingType.PEM)
key = asymmetric.PrivateKey.from_file(key_file)
new_key = FakeTestKey(key=key._key)
with pytest.raises(TypeError):
new_key.jwk_thumbprint
def test_direct_public_key_creation_as_str(key_algorithm):
key_file = get_public_key_filename(key_algorithm, EncodingType.PEM)
with open(key_file, 'r') as f:
key_data = f.read()
asymmetric.PublicKey(data=key_data)
def test_direct_public_key_invalid_data():
with pytest.raises(TypeError):
asymmetric.PublicKey(data=54321)
def test_direct_private_key_creation_as_str(key_algorithm):
key_file = get_private_key_filename(key_algorithm, EncodingType.PEM)
with open(key_file, 'r') as f:
key_data = f.read()
asymmetric.PrivateKey(data=key_data)
def test_direct_private_key_invalid_data():
with pytest.raises(TypeError):
asymmetric.PrivateKey(data=54321)
def test_invalid_public_key_file(nonempty_file):
with pytest.raises(ValueError):
asymmetric.PublicKey.from_file(nonempty_file)
def test_invalid_private_key_file(nonempty_file):
with pytest.raises(ValueError):
asymmetric.PrivateKey.from_file(nonempty_file)
# --------------------------------------------------------------------------- #
# Key Generation Tests
def test_strong_key_generation(recwarn, key_algorithm):
key = asymmetric.PrivateKey.generate(key_algorithm)
# Ensure that the default parameters generate a "strong" key
# (thus no warnings were raised)
assert len(recwarn) == 0
assert key.algorithm is key_algorithm
def test_weak_rsa_key_generation(recwarn):
key = asymmetric.PrivateKey.generate(KeyAlgorithmType.RSA, size=1024)
# Ensure that a warning was raised since the key size will generate a
# "weak" key
assert len(recwarn) > 0
assert key.algorithm is KeyAlgorithmType.RSA
def test_weak_dsa_key_generation(recwarn):
key = asymmetric.PrivateKey.generate(KeyAlgorithmType.DSA, size=1024)
# Ensure that a warning was raised since the key size will generate a
# "weak" key
assert len(recwarn) > 0
assert key.algorithm is KeyAlgorithmType.DSA
def test_invalid_ecdsa_curve_size():
with pytest.warns(UserWarning) as record:
asymmetric.PrivateKey.generate(KeyAlgorithmType.ECDSA, size=1)
# Ensure that a warning was raised about the key size being too small
# and that it was rounded up.
assert len(record) == 1
assert "Rounding up" in str(record[0].message)
def test_too_large_ecdsa_curve_size():
with pytest.warns(UserWarning) as record:
asymmetric.PrivateKey.generate(KeyAlgorithmType.ECDSA, size=9999999999)
# Ensure that a warning was raised about the key size being too small
# and that it was rounded up.
assert len(record) == 1
assert "Rounding down" in str(record[0].message)
| python |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import unittest
from collections import OrderedDict
import numpy as np
from oneflow.test_utils.test_util import GenArgList
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
def do_test_dropout_numpy_p0(test_case, shape, device, dtype):
np_x = np.random.randn(*shape).astype(dtype)
np_one_mask = np.ones_like(np_x)
x_tensor = flow.tensor(np_x, requires_grad=True, device=device)
out = flow._C.dropout(x_tensor, p=0.0)
test_case.assertTrue(np.allclose(out.numpy(), np_x, atol=1e-5, rtol=1e-5))
out_sum = out.sum()
out_sum.backward()
test_case.assertTrue(
np.allclose(x_tensor.grad.numpy(), np_one_mask, atol=1e-5, rtol=1e-5)
)
def do_test_dropout_numpy_p1(test_case, shape, device, dtype):
np_x = np.random.randn(*shape).astype(dtype)
np_zero_mask = np.zeros_like(np_x)
x_tensor = flow.tensor(np_x, requires_grad=True, device=device)
out = flow._C.dropout(x_tensor, p=1.0)
test_case.assertTrue(np.allclose(out.numpy(), np_zero_mask, atol=1e-5, rtol=1e-5))
out_sum = out.sum()
out_sum.backward()
test_case.assertTrue(
np.allclose(x_tensor.grad.numpy(), np_zero_mask, atol=1e-5, rtol=1e-5)
)
def do_test_dropout_numpy_fp16_p0(test_case, shape):
np_x = np.random.randn(*shape).astype(np.float32)
np_x_fp16 = np_x.astype(np.float16)
x_tensor = flow.tensor(np_x, requires_grad=True, device="cuda")
x_tensor_fp16 = flow.cast(x_tensor, flow.float16)
np_one_mask = np.ones_like(np_x)
out = flow._C.dropout(x_tensor_fp16, p=0.0)
out_fp32 = flow.cast(out, flow.float32)
test_case.assertTrue(np.allclose(out_fp32.numpy(), np_x_fp16, atol=1e-5, rtol=1e-5))
out_sum = out_fp32.sum()
out_sum.backward()
test_case.assertTrue(
np.allclose(x_tensor.grad.numpy(), np_one_mask, atol=1e-5, rtol=1e-5)
)
def do_test_dropout_numpy_fp16_p1(test_case, shape):
np_x = np.random.randn(*shape).astype(np.float32)
x_tensor = flow.tensor(np_x, requires_grad=True, device="cuda")
x_tensor_fp16 = flow.cast(x_tensor, flow.float16)
np_zero_mask = np.zeros_like(np_x)
out = flow._C.dropout(x_tensor_fp16, p=1.0)
out_fp32 = flow.cast(out, flow.float32)
test_case.assertTrue(
np.allclose(out_fp32.numpy(), np_zero_mask, atol=1e-5, rtol=1e-5)
)
out_sum = out_fp32.sum()
out_sum.backward()
test_case.assertTrue(
np.allclose(x_tensor.grad.numpy(), np_zero_mask, atol=1e-5, rtol=1e-5)
)
def do_test_dropout_addend_numpy_p0(test_case, shape, device, dtype):
np_x = np.random.randn(*shape).astype(dtype)
np_addend = np.random.randn(*shape).astype(dtype)
np_one_mask = np.ones_like(np_x)
x_tensor = flow.tensor(np_x, requires_grad=True, device=device)
addend_tensor = flow.tensor(np_addend, requires_grad=True, device=device)
DropoutModule = flow.nn.Dropout(p=0.0)
out = DropoutModule(x_tensor, addend_tensor)
test_case.assertTrue(
np.allclose(out.numpy(), np_x + np_addend, atol=1e-5, rtol=1e-5)
)
out_sum = out.sum()
out_sum.backward()
test_case.assertTrue(
np.allclose(x_tensor.grad.numpy(), np_one_mask, atol=1e-5, rtol=1e-5)
)
test_case.assertTrue(
np.allclose(addend_tensor.grad.numpy(), np_one_mask, atol=1e-5, rtol=1e-5)
)
def do_test_dropout_addend_numpy_p1(test_case, shape, device, dtype):
np_x = np.random.randn(*shape).astype(dtype)
np_addend = np.random.randn(*shape).astype(dtype)
np_one_mask = np.ones_like(np_x)
np_zero_mask = np.zeros_like(np_x)
x_tensor = flow.tensor(np_x, requires_grad=True, device=device)
addend_tensor = flow.tensor(np_addend, requires_grad=True, device=device)
DropoutModule = flow.nn.Dropout(p=1.0)
out = DropoutModule(x_tensor, addend_tensor)
test_case.assertTrue(np.allclose(out.numpy(), np_addend, atol=1e-5, rtol=1e-5))
out_sum = out.sum()
out_sum.backward()
test_case.assertTrue(
np.allclose(x_tensor.grad.numpy(), np_zero_mask, atol=1e-5, rtol=1e-5)
)
test_case.assertTrue(
np.allclose(addend_tensor.grad.numpy(), np_one_mask, atol=1e-5, rtol=1e-5)
)
def do_test_dropout_addend_numpy_fp16_p0(test_case, shape):
np_x = np.random.randn(*shape).astype(np.float32)
np_x_fp16 = np_x.astype(np.float16)
np_addend = np.random.randn(*shape).astype(np.float32)
np_addend_fp16 = np_addend.astype(np.float16)
x_tensor = flow.tensor(np_x, requires_grad=True, device="cuda")
x_tensor_fp16 = flow.cast(x_tensor, flow.float16)
addend_tensor = flow.tensor(np_addend, requires_grad=True, device="cuda")
addend_tensor_fp16 = flow.cast(addend_tensor, flow.float16)
np_one_mask = np.ones_like(np_x)
DropoutModule = flow.nn.Dropout(p=0.0)
out = DropoutModule(x_tensor_fp16, addend_tensor_fp16)
out_fp32 = flow.cast(out, flow.float32)
test_case.assertTrue(
np.allclose(out_fp32.numpy(), np_x_fp16 + np_addend_fp16, atol=1e-5, rtol=1e-5)
)
out_sum = out_fp32.sum()
out_sum.backward()
test_case.assertTrue(
np.allclose(x_tensor.grad.numpy(), np_one_mask, atol=1e-5, rtol=1e-5)
)
test_case.assertTrue(
np.allclose(addend_tensor.grad.numpy(), np_one_mask, atol=1e-5, rtol=1e-5)
)
def do_test_dropout_addend_numpy_fp16_p1(test_case, shape):
np_x = np.random.randn(*shape).astype(np.float32)
np_addend = np.random.randn(*shape).astype(np.float32)
np_addend_fp16 = np_addend.astype(np.float16)
x_tensor = flow.tensor(np_x, requires_grad=True, device="cuda")
x_tensor_fp16 = flow.cast(x_tensor, flow.float16)
addend_tensor = flow.tensor(np_addend, requires_grad=True, device="cuda")
addend_tensor_fp16 = flow.cast(addend_tensor, flow.float16)
np_zero_mask = np.zeros_like(np_x)
np_one_mask = np.ones_like(np_x)
DropoutModule = flow.nn.Dropout(p=1.0)
out = DropoutModule(x_tensor_fp16, addend_tensor_fp16)
out_fp32 = flow.cast(out, flow.float32)
test_case.assertTrue(
np.allclose(out_fp32.numpy(), np_addend_fp16, atol=1e-5, rtol=1e-5)
)
out_sum = out_fp32.sum()
out_sum.backward()
test_case.assertTrue(
np.allclose(x_tensor.grad.numpy(), np_zero_mask, atol=1e-5, rtol=1e-5)
)
test_case.assertTrue(
np.allclose(addend_tensor.grad.numpy(), np_one_mask, atol=1e-5, rtol=1e-5)
)
def fixed_cpu_seed_dropout_test(test_case):
gen1 = flow.Generator()
gen1.manual_seed(5)
dropped_array1 = np.array(
[
[0.000000, 0.000000, 1.333333],
[1.333333, 0.000000, 1.333333],
[1.333333, 1.333333, 1.333333],
]
).astype(np.float32)
dropout1 = flow.nn.Dropout(p=0.25, generator=gen1)
x = flow.ones((3, 3), dtype=flow.float32)
out1 = dropout1(x)
test_case.assertTrue(
np.allclose(out1.numpy(), dropped_array1, atol=1e-4, rtol=1e-4)
)
gen2 = flow.Generator()
gen2.manual_seed(7)
dropout2 = flow.nn.Dropout(p=0.5, generator=gen2)
dropped_array2 = np.array(
[[0.0, 0.0, 2.0], [0.0, 0.0, 2.0], [2.0, 0.0, 2.0]]
).astype(np.float32)
out2 = dropout2(x)
test_case.assertTrue(
np.allclose(out2.numpy(), dropped_array2, atol=1e-4, rtol=1e-4)
)
def fixed_gpu_seed_dropout_test(test_case):
gen1 = flow.Generator()
gen1.manual_seed(5)
dropped_array1 = np.array(
[[1.2500, 0.0000, 1.2500], [1.2500, 1.2500, 1.2500], [1.2500, 1.2500, 1.2500]]
).astype(np.float32)
dropout1 = flow.nn.Dropout(p=0.2, generator=gen1).to("cuda")
x = flow.ones((3, 3), dtype=flow.float32).to("cuda")
out1 = dropout1(x)
test_case.assertTrue(
np.allclose(out1.numpy(), dropped_array1, atol=1e-4, rtol=1e-4)
)
gen2 = flow.Generator()
gen2.manual_seed(7)
dropout2 = flow.nn.Dropout(p=0.7, generator=gen2).to("cuda")
dropped_array2 = np.array(
[
[3.333333, 3.333333, 0.000000],
[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 0.000000],
]
).astype(np.float32)
out2 = dropout2(x)
test_case.assertTrue(
np.allclose(out2.numpy(), dropped_array2, atol=1e-4, rtol=1e-4)
)
@flow.unittest.skip_unless_1n1d()
class TestModule(flow.unittest.TestCase):
def test_dropout_numpy_case(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [do_test_dropout_numpy_p0, do_test_dropout_numpy_p1]
arg_dict["shape"] = [[4, 127, 256], [2, 1024, 1024]]
arg_dict["device"] = ["cuda"]
if os.getenv("ONEFLOW_TEST_CPU_ONLY"):
arg_dict["device"] = ["cpu"]
arg_dict["dtype"] = [np.float32, np.float64]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_dropout_fp16_numpy_case(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
do_test_dropout_numpy_fp16_p0,
do_test_dropout_numpy_fp16_p1,
]
arg_dict["shape"] = [[4, 127, 256], [5, 63, 49], [7, 32, 64], [16, 512, 512]]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
def test_dropout_addend_numpy_case(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
do_test_dropout_addend_numpy_p0,
do_test_dropout_addend_numpy_p1,
]
arg_dict["shape"] = [[4, 47, 156], [5, 33, 65], [3, 132, 94], [9, 256, 63]]
arg_dict["device"] = ["cpu", "cuda"]
arg_dict["dtype"] = [np.float32, np.float64]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_dropout_addend_fp16_numpy_case(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
do_test_dropout_addend_numpy_fp16_p0,
do_test_dropout_addend_numpy_fp16_p1,
]
arg_dict["shape"] = [[2, 44, 66], [1, 2, 7], [5, 32, 74], [8, 125, 63]]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
def test_cpu_fixed_dropout(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
fixed_cpu_seed_dropout_test,
]
for arg in GenArgList(arg_dict):
arg[0](test_case)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_gpu_fixed_dropout(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
fixed_gpu_seed_dropout_test,
]
for arg in GenArgList(arg_dict):
arg[0](test_case)
@autotest()
def autotest_dropout_p0(test_case):
device = random_device()
x = random_tensor(ndim=random(), dim0=random(1, 8)).to(device)
m = torch.nn.Dropout(p=0, inplace=random_bool())
return m(x)
@autotest()
def autotest_dropout_p1(test_case):
device = random_device()
x = random_tensor(ndim=random(), dim0=random(1, 8)).to(device)
m = torch.nn.Dropout(p=1.0, inplace=random_bool())
return m(x)
@autotest()
def autotest_dropout_eval(test_case):
device = random_device()
x = random_tensor(ndim=random(), dim0=random(1, 8)).to(device)
m = torch.nn.Dropout(p=1.0, inplace=random_bool())
m.eval()
return m(x)
@autotest()
def autotest_0dim_dropout_eval(test_case):
device = random_device()
x = random_tensor(ndim=0).to(device)
m = torch.nn.Dropout(p=1.0, inplace=random_bool())
m.eval()
return m(x)
if __name__ == "__main__":
unittest.main()
| python |
def solution(arrows):
answer = 0
coorL = [[0,0]]
for each in arrows:
if each == 0:
a = [int(coorL[-1][0]), int(coorL[-1][1])+1]
elif each == 1:
a = [int(coorL[-1][0])+1, int(coorL[-1][1])+1]
elif each == 2:
a = [int(coorL[-1][0])+1, int(coorL[-1][1])]
elif each == 3:
a = [int(coorL[-1][0])+1, int(coorL[-1][1])-1]
elif each == 4:
a = [int(coorL[-1][0]), int(coorL[-1][1])-1]
elif each == 5:
a = [int(coorL[-1][0])-1, int(coorL[-1][1])-1]
elif each == 6:
a = [int(coorL[-1][0])-1, int(coorL[-1][1])]
elif each == 7:
a = [int(coorL[-1][0])-1, int(coorL[-1][1])+1]
if a in coorL:
answer += 1
coorL.append(a)
return answer
print(solution([6, 6, 6, 4, 4, 4, 2, 2, 2, 0, 0, 0, 1, 6, 5, 5, 3, 6, 0, 2, 4]))
#ans = 5 | python |
from django.urls import reverse
from rest_framework import status
from cornershop.apps.weather.tests import WeatherAPTestCase
class WeatherPostTestCase(WeatherAPTestCase):
def test_with_existing_record(self):
url = reverse('weather-list')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('results'), self.response['results'])
| python |
"""Utilities for algebraic number theory. """
from sympy.core.sympify import sympify
from sympy.ntheory.factor_ import factorint
from sympy.polys.domains.rationalfield import QQ
from sympy.polys.domains.integerring import ZZ
from sympy.polys.matrices.exceptions import DMRankError
from sympy.polys.numberfields.minpoly import minpoly
from sympy.printing.lambdarepr import IntervalPrinter
from sympy.utilities.decorator import public
from sympy.utilities.lambdify import lambdify
from mpmath import mp
def is_rat(c):
r"""
Test whether an argument is of an acceptable type to be used as a rational
number.
Explanation
===========
Returns ``True`` on any argument of type ``int``, :ref:`ZZ`, or :ref:`QQ`.
See Also
========
is_int
"""
# ``c in QQ`` is too accepting (e.g. ``3.14 in QQ`` is ``True``),
# ``QQ.of_type(c)`` is too demanding (e.g. ``QQ.of_type(3)`` is ``False``).
#
# Meanwhile, if gmpy2 is installed then ``ZZ.of_type()`` accepts only
# ``mpz``, not ``int``, so we need another clause to ensure ``int`` is
# accepted.
return isinstance(c, int) or ZZ.of_type(c) or QQ.of_type(c)
def is_int(c):
r"""
Test whether an argument is of an acceptable type to be used as an integer.
Explanation
===========
Returns ``True`` on any argument of type ``int`` or :ref:`ZZ`.
See Also
========
is_rat
"""
# If gmpy2 is installed then ``ZZ.of_type()`` accepts only
# ``mpz``, not ``int``, so we need another clause to ensure ``int`` is
# accepted.
return isinstance(c, int) or ZZ.of_type(c)
def get_num_denom(c):
r"""
Given any argument on which :py:func:`~.is_rat` is ``True``, return the
numerator and denominator of this number.
See Also
========
is_rat
"""
r = QQ(c)
return r.numerator, r.denominator
@public
def extract_fundamental_discriminant(a):
r"""
Extract a fundamental discriminant from an integer *a*.
Explanation
===========
Given any rational integer *a* that is 0 or 1 mod 4, write $a = d f^2$,
where $d$ is either 1 or a fundamental discriminant, and return a pair
of dictionaries ``(D, F)`` giving the prime factorizations of $d$ and $f$
respectively, in the same format returned by :py:func:`~.factorint`.
A fundamental discriminant $d$ is different from unity, and is either
1 mod 4 and squarefree, or is 0 mod 4 and such that $d/4$ is squarefree
and 2 or 3 mod 4. This is the same as being the discriminant of some
quadratic field.
Examples
========
>>> from sympy.polys.numberfields.utilities import extract_fundamental_discriminant
>>> print(extract_fundamental_discriminant(-432))
({3: 1, -1: 1}, {2: 2, 3: 1})
For comparison:
>>> from sympy import factorint
>>> print(factorint(-432))
{2: 4, 3: 3, -1: 1}
Parameters
==========
a: int, must be 0 or 1 mod 4
Returns
=======
Pair ``(D, F)`` of dictionaries.
Raises
======
ValueError
If *a* is not 0 or 1 mod 4.
References
==========
.. [1] Cohen, H. *A Course in Computational Algebraic Number Theory.*
(See Prop. 5.1.3)
"""
if a % 4 not in [0, 1]:
raise ValueError('To extract fundamental discriminant, number must be 0 or 1 mod 4.')
if a == 0:
return {}, {0: 1}
if a == 1:
return {}, {}
a_factors = factorint(a)
D = {}
F = {}
# First pass: just make d squarefree, and a/d a perfect square.
# We'll count primes (and units! i.e. -1) that are 3 mod 4 and present in d.
num_3_mod_4 = 0
for p, e in a_factors.items():
if e % 2 == 1:
D[p] = 1
if p % 4 == 3:
num_3_mod_4 += 1
if e >= 3:
F[p] = (e - 1) // 2
else:
F[p] = e // 2
# Second pass: if d is cong. to 2 or 3 mod 4, then we must steal away
# another factor of 4 from f**2 and give it to d.
even = 2 in D
if even or num_3_mod_4 % 2 == 1:
e2 = F[2]
assert e2 > 0
if e2 == 1:
del F[2]
else:
F[2] = e2 - 1
D[2] = 3 if even else 2
return D, F
@public
class AlgIntPowers:
r"""
Compute the powers of an algebraic integer.
Explanation
===========
Given an algebraic integer $\theta$ by its monic irreducible polynomial
``T`` over :ref:`ZZ`, this class computes representations of arbitrarily
high powers of $\theta$, as :ref:`ZZ`-linear combinations over
$\{1, \theta, \ldots, \theta^{n-1}\}$, where $n = \deg(T)$.
The representations are computed using the linear recurrence relations for
powers of $\theta$, derived from the polynomial ``T``. See [1], Sec. 4.2.2.
Optionally, the representations may be reduced with respect to a modulus.
Examples
========
>>> from sympy import Poly, cyclotomic_poly
>>> from sympy.polys.numberfields.utilities import AlgIntPowers
>>> T = Poly(cyclotomic_poly(5))
>>> zeta_pow = AlgIntPowers(T)
>>> print(zeta_pow[0])
[1, 0, 0, 0]
>>> print(zeta_pow[1])
[0, 1, 0, 0]
>>> print(zeta_pow[4]) # doctest: +SKIP
[-1, -1, -1, -1]
>>> print(zeta_pow[24]) # doctest: +SKIP
[-1, -1, -1, -1]
References
==========
.. [1] Cohen, H. *A Course in Computational Algebraic Number Theory.*
"""
def __init__(self, T, modulus=None):
"""
Parameters
==========
T : :py:class:`~.Poly`
The monic irreducible polynomial over :ref:`ZZ` defining the
algebraic integer.
modulus : int, None, optional
If not ``None``, all representations will be reduced w.r.t. this.
"""
self.T = T
self.modulus = modulus
self.n = T.degree()
self.powers_n_and_up = [[-c % self for c in reversed(T.rep.rep)][:-1]]
self.max_so_far = self.n
def red(self, exp):
return exp if self.modulus is None else exp % self.modulus
def __rmod__(self, other):
return self.red(other)
def compute_up_through(self, e):
m = self.max_so_far
if e <= m: return
n = self.n
r = self.powers_n_and_up
c = r[0]
for k in range(m+1, e+1):
b = r[k-1-n][n-1]
r.append(
[c[0]*b % self] + [
(r[k-1-n][i-1] + c[i]*b) % self for i in range(1, n)
]
)
self.max_so_far = e
def get(self, e):
n = self.n
if e < 0:
raise ValueError('Exponent must be non-negative.')
elif e < n:
return [1 if i == e else 0 for i in range(n)]
else:
self.compute_up_through(e)
return self.powers_n_and_up[e - n]
def __getitem__(self, item):
return self.get(item)
@public
def coeff_search(m, R):
r"""
Generate coefficients for searching through polynomials.
Explanation
===========
Lead coeff is always non-negative. Explore all combinations with coeffs
bounded in absolute value before increasing the bound. Skip the all-zero
list, and skip any repeats. See examples.
Examples
========
>>> from sympy.polys.numberfields.utilities import coeff_search
>>> cs = coeff_search(2, 1)
>>> C = [next(cs) for i in range(13)]
>>> print(C)
[[1, 1], [1, 0], [1, -1], [0, 1], [2, 2], [2, 1], [2, 0], [2, -1], [2, -2],
[1, 2], [1, -2], [0, 2], [3, 3]]
Parameters
==========
m : int
Length of coeff list.
R : int
Initial max abs val for coeffs (will increase as search proceeds).
Returns
=======
generator
Infinite generator of lists of coefficients.
"""
R0 = R
c = [R] * m
while True:
if R == R0 or R in c or -R in c:
yield c[:]
j = m - 1
while c[j] == -R:
j -= 1
c[j] -= 1
for i in range(j + 1, m):
c[i] = R
for j in range(m):
if c[j] != 0:
break
else:
R += 1
c = [R] * m
def supplement_a_subspace(M):
r"""
Extend a basis for a subspace to a basis for the whole space.
Explanation
===========
Given an $n \times r$ matrix *M* of rank $r$ (so $r \leq n$), this function
computes an invertible $n \times n$ matrix $B$ such that the first $r$
columns of $B$ equal *M*.
This operation can be interpreted as a way of extending a basis for a
subspace, to give a basis for the whole space.
To be precise, suppose you have an $n$-dimensional vector space $V$, with
basis $\{v_1, v_2, \ldots, v_n\}$, and an $r$-dimensional subspace $W$ of
$V$, spanned by a basis $\{w_1, w_2, \ldots, w_r\}$, where the $w_j$ are
given as linear combinations of the $v_i$. If the columns of *M* represent
the $w_j$ as such linear combinations, then the columns of the matrix $B$
computed by this function give a new basis $\{u_1, u_2, \ldots, u_n\}$ for
$V$, again relative to the $\{v_i\}$ basis, and such that $u_j = w_j$
for $1 \leq j \leq r$.
Examples
========
Note: The function works in terms of columns, so in these examples we
print matrix transposes in order to make the columns easier to inspect.
>>> from sympy.polys.matrices import DM
>>> from sympy import QQ, FF
>>> from sympy.polys.numberfields.utilities import supplement_a_subspace
>>> M = DM([[1, 7, 0], [2, 3, 4]], QQ).transpose()
>>> print(supplement_a_subspace(M).to_Matrix().transpose())
Matrix([[1, 7, 0], [2, 3, 4], [1, 0, 0]])
>>> M2 = M.convert_to(FF(7))
>>> print(M2.to_Matrix().transpose())
Matrix([[1, 0, 0], [2, 3, -3]])
>>> print(supplement_a_subspace(M2).to_Matrix().transpose())
Matrix([[1, 0, 0], [2, 3, -3], [0, 1, 0]])
Parameters
==========
M : :py:class:`~.DomainMatrix`
The columns give the basis for the subspace.
Returns
=======
:py:class:`~.DomainMatrix`
This matrix is invertible and its first $r$ columns equal *M*.
Raises
======
DMRankError
If *M* was not of maximal rank.
References
==========
.. [1] Cohen, H. *A Course in Computational Algebraic Number Theory*
(See Sec. 2.3.2.)
"""
n, r = M.shape
# Let In be the n x n identity matrix.
# Form the augmented matrix [M | In] and compute RREF.
Maug = M.hstack(M.eye(n, M.domain))
R, pivots = Maug.rref()
if pivots[:r] != tuple(range(r)):
raise DMRankError('M was not of maximal rank')
# Let J be the n x r matrix equal to the first r columns of In.
# Since M is of rank r, RREF reduces [M | In] to [J | A], where A is the product of
# elementary matrices Ei corresp. to the row ops performed by RREF. Since the Ei are
# invertible, so is A. Let B = A^(-1).
A = R[:, r:]
B = A.inv()
# Then B is the desired matrix. It is invertible, since B^(-1) == A.
# And A * [M | In] == [J | A]
# => A * M == J
# => M == B * J == the first r columns of B.
return B
@public
def isolate(alg, eps=None, fast=False):
"""
Find a rational isolating interval for a real algebraic number.
Examples
========
>>> from sympy import isolate, sqrt, Rational
>>> print(isolate(sqrt(2))) # doctest: +SKIP
(1, 2)
>>> print(isolate(sqrt(2), eps=Rational(1, 100)))
(24/17, 17/12)
Parameters
==========
alg : str, int, :py:class:`~.Expr`
The algebraic number to be isolated. Must be a real number, to use this
particular function. However, see also :py:meth:`.Poly.intervals`,
which isolates complex roots when you pass ``all=True``.
eps : positive element of :ref:`QQ`, None, optional (default=None)
Precision to be passed to :py:meth:`.Poly.refine_root`
fast : boolean, optional (default=False)
Say whether fast refinement procedure should be used.
(Will be passed to :py:meth:`.Poly.refine_root`.)
Returns
=======
Pair of rational numbers defining an isolating interval for the given
algebraic number.
See Also
========
.Poly.intervals
"""
alg = sympify(alg)
if alg.is_Rational:
return (alg, alg)
elif not alg.is_real:
raise NotImplementedError(
"complex algebraic numbers are not supported")
func = lambdify((), alg, modules="mpmath", printer=IntervalPrinter())
poly = minpoly(alg, polys=True)
intervals = poly.intervals(sqf=True)
dps, done = mp.dps, False
try:
while not done:
alg = func()
for a, b in intervals:
if a <= alg.a and alg.b <= b:
done = True
break
else:
mp.dps *= 2
finally:
mp.dps = dps
if eps is not None:
a, b = poly.refine_root(a, b, eps=eps, fast=fast)
return (a, b)
| python |
import enum
import os
from argparse import ArgumentParser
import tensorflow as tf
import create_mask_image
tf.logging.set_verbosity(tf.logging.INFO)
logger = tf.logging
home = os.path.expanduser("~")
class TrainingPaths(enum.Enum):
MASK = 0,
ORIGINAL_IMAGE = 1,
MASKED_IMAGE = 2
PATHS = {
TrainingPaths.MASK: os.path.join(home, "inpainting/masks/"),
TrainingPaths.ORIGINAL_IMAGE: os.path.join(home, "inpainting/original-images/"),
TrainingPaths.MASKED_IMAGE: os.path.join(home, "inpainting/masked-images/")
}
def maybe_create_paths(paths):
for path in paths:
tf.gfile.MakeDirs(path)
logger.info("Created {} path".format(path))
def build_parser():
parser = ArgumentParser()
parser.add_argument('--num_mask', type=int,
dest='num_mask', help='how many mask to generate',
metavar='Number of mask', required=True)
parser.add_argument('--min_units', type=int,
dest='min_units', help='min units to generate',
metavar='Min units to generate', required=True)
parser.add_argument('--max_units', type=int,
dest='max_units', help='max units to generate',
metavar='Max units to generate', required=True)
parser.add_argument('--masks_path', type=str,
dest='masks_path', help='path to save masks',
metavar='Path to save masks',
default=PATHS[TrainingPaths.MASK])
parser.add_argument('--original_images_path', type=str,
dest='original_images_path', help='path to raw image',
metavar='Path to raw image',
default=PATHS[TrainingPaths.ORIGINAL_IMAGE])
parser.add_argument('--masked_images_path', type=str,
dest='masked_images_path', help='image to train',
metavar='Train',
default=PATHS[TrainingPaths.MASKED_IMAGE])
return parser
def main():
parser = build_parser()
arguments = parser.parse_args()
paths = [arguments.masks_path, arguments.original_images_path, arguments.masked_images_path]
maybe_create_paths(paths)
create_mask_image.save_mask(arguments.num_mask, arguments.min_units, arguments.max_units,
arguments.masks_path, arguments.original_images_path, arguments.masked_images_path)
if __name__ == '__main__':
main()
| python |
from __future__ import absolute_import, unicode_literals
from .extras.clients import WebApplicationPushClient
from .extras.grant_types import AuthorizationCodePushGrant
from .extras.endpoints import Server
from .extras.errors import MalformedResponsePushCodeError | python |
from functools import wraps
import logging
import math
import time
from typing import Callable
logger = logging.getLogger()
def format_seconds(seconds: int):
seconds = int(seconds or 0)
hours = math.floor(seconds / 3600)
seconds -= hours * 3600
minutes = math.floor(seconds / 60)
seconds -= minutes * 60
formatted = "{}:{}".format(minutes, str(seconds).zfill(2))
if hours:
formatted = "{}:{}".format(hours, minutes)
return formatted
def format_bytes(bytes_count: int):
B = float(bytes_count)
KB = float(1024)
MB = float(KB ** 2)
GB = float(KB ** 3)
TB = float(KB ** 4)
if B < KB:
return "{0} {1}".format(B, "Bytes" if 0 == B > 1 else "Byte")
elif KB <= B < MB:
return "{0:.2f} KB".format(B / KB)
elif MB <= B < GB:
return "{0:.2f} MB".format(B / MB)
elif GB <= B < TB:
return "{0:.2f} GB".format(B / GB)
elif TB <= B:
return "{0:.2f} TB".format(B / TB)
def async_log_time(coroutine: Callable):
@wraps(coroutine)
async def wrapper(*args, **kwargs):
t0 = time.time()
await coroutine(*args, **kwargs)
t1 = time.time()
logger.info('{} took {:.3f}s'.format(coroutine.__name__, t1 - t0))
return wrapper
| python |
#!/usr/bin/env python
import RPi.GPIO as GPIO
import subprocess
import time
SENSOR_PIN = 14
TIME_ON = 20
def main():
GPIO.setmode(GPIO.BCM)
GPIO.setup(SENSOR_PIN, GPIO.IN)
subprocess.run(['xset', 'dpms', 'force', 'off'])
def callback(_):
subprocess.run(['xset', 'dpms', 'force', 'on'])
time.sleep(TIME_ON)
subprocess.run(['xset', 'dpms', 'force', 'off'])
try:
GPIO.add_event_detect(SENSOR_PIN, GPIO.RISING, callback=callback)
while True:
time.sleep(100)
except KeyboardInterrupt:
pass
GPIO.cleanup()
if __name__ == '__main__':
main()
| python |
"""Testing v0x04 FlowRemoved message."""
from pyof.v0x04.asynchronous.flow_removed import FlowRemoved
from pyof.v0x04.common.flow_match import Match
from tests.test_struct import TestStruct
class TestFlowRemovedMsg(TestStruct):
"""FlowRemoved message tests (also those in :class:`.TestDump`)."""
@classmethod
def setUpClass(cls):
"""Configure raw file and its object in parent class (TestDump)."""
super().setUpClass()
super().set_raw_dump_file('v0x04', 'ofpt_flow_removed')
super().set_raw_dump_object(FlowRemoved, xid=1, cookie=1, priority=1,
reason=1, table_id=1, duration_sec=1,
duration_nsec=2, idle_timeout=3,
hard_timeout=4, packet_count=1,
byte_count=1, match=Match())
super().set_minimum_size(56)
| python |
import enum
from ..time import Resolution, UTC
class Curve:
"""
The curve identifies any type of time series data and OHLC data.
The ``curve.name`` is used in the API when loading data for a curve.
"""
def __init__(self, name, curve_type=None, instance_issued_timezone=None,
area=None, area_sink=None, place=None,
resolution=None, frequency=None, timezone=None,
categories=None, unit=None, denominator=None, data_type=None,
source=None, commodity=None):
#: The curve name is the identifier.
self.name = name
#: Curve type (the type of data this curve refers to).
self.curve_type = curve_type
if self.curve_type.has_instances:
#: For instance-based curves: The time-zone of the issue date
#: in the instance, see :py:attr:`Instance.issued`.
self.instance_issued_timezone = instance_issued_timezone or UTC
else:
self.instance_issued_timezone = None
# The areas and place (if any)
#: The area
self.area = area
#: The importing area for exchange curves
self.area_sink = area_sink
if area_sink:
#: The exporting area for exchange curves
self.area_source = area
self.place = place
# Resolution
if resolution:
#: The frequency of data in this curve
self.frequency = resolution.frequency
#: The time-zone of date-times in this curve
self.timezone = resolution.timezone
else:
self.frequency = frequency
self.timezone = timezone
# Other metadata
#: List of categories for this curve.
self.categories = categories
#: The unit (MW, EUR, etc.). See also :py:attr:`Curve.denominator`.
self.unit = unit
#: The denominator (for EUR/MWh: unit=EUR and denominator=MWh). See
#: also :py:attr:`Curve.unit`.
self.denominator = denominator
#: The data type, :py:class:`DataType`.
self.data_type = data_type
#: The source of the data.
self.source = source
#: The curve commodity (Power, Gas, etc.)
self.commodity = commodity
@property
def resolution(self):
"""
The resolution (combination of frequency and timezone) for this curve.
"""
return Resolution(self.frequency, self.timezone)
def __str__(self):
return self.name
def __repr__(self):
return f"<Curve: \"{self.name}\", curve_type={self.curve_type}>"
_datatype_lookup = {}
class DataType(enum.Enum):
"""
Data types describe the type of data (i.e. actuals, forecast). This is
the attribute that is always set as the last word in the curve name.
"""
#: Third-party actuals collected by Energy Quantified, but not modified.
ACTUAL = ("ACTUAL", "Actual")
#: Scenario data generated by Energy Quantified, which is based on climate
#: data sets (synthetic weather years).
CLIMATE = ("CLIMATE", "Climate")
#: Scenario data generated by Energy Quantified. If you are looking for
#: weather-based scenarios, look at ``DataType.CLIMATE``.
SCENARIO = ("SCENARIO", "Scenario")
#: A combination of third-party actuals and numbers generated by Energy
#: Quantified, where we have filled missing with our best calculations.
SYNTHETIC = ("SYNTHETIC", "Synthetic")
#: The forecast models run backwards.
BACKCAST = ("BACKCAST", "Backcast")
#: The seasonal normals using 40 weather years.
NORMAL = ("NORMAL", "Normal")
#: Some model value (such as a factor).
VALUE = ("VALUE", "Value")
#: Forecasts generated by Energy Quantified unless another source is
#: explicitly stated in the curve name.
FORECAST = ("FORECAST", "Forecast")
#: Currency rates.
FOREX = ("FOREX", "Forex")
#: Closing data from the market.
OHLC = ("OHLC", "OHLC")
#: Capacity data generated from REMIT outage messages.
REMIT = ("REMIT", "REMIT")
#: Total installed capacity.
CAPACITY = ("CAPACITY", "Capacity")
def __init__(self, tag=None, label=None):
self.tag = tag
self.label = label
_datatype_lookup[tag.lower()] = self
def __str__(self):
return self.__repr__()
def __repr__(self):
return self.name
@staticmethod
def is_valid_tag(tag):
"""
Check whether a data type tag exists or not.
:param tag: A data type tag
:type tag: str
:return: True if it exists, otherwise False
:rtype: bool
"""
return tag.lower() in _datatype_lookup
@staticmethod
def by_tag(tag):
"""
Look up data type by tag.
:param tag: A data type tag
:type tag: str
:return: The data type for the given tag
:rtype: DataType
"""
return _datatype_lookup[tag.lower()]
_curvetype_lookup = {}
class CurveType(enum.Enum):
"""
Curve type is not a part of the curve name.
Curve type describes the storage format of the underlying data and which
operations must be used to fetch data for these curves.
* Load time series and scenario-based time series using the
``EnergyQuantified.timeseries.*`` operations.
* To load instances (i.e. forecasts), use the
``EnergyQuantified.timeseries.*`` operations.
* Periods and period-instances can be loaded by using each of
their respective operations located under
``EnergyQuantified.periods.*`` and
``EnergyQuantified.instance_periods.*``.
* OHLC means "open, high, low and close" data. To load data from
these curves, use the OHLC operations.
"""
#: Plain, fixed-interval time series data
TIMESERIES = ("TIMESERIES", False)
#: Plain, fixed-interval scenarios of time series data
SCENARIO_TIMESERIES = ("SCENARIO_TIMESERIES", False)
#: Instances (forecasts)
INSTANCE = ("INSTANCE", True)
#: Period-based data
PERIOD = ("PERIOD", False)
#: Instances of period-based data
INSTANCE_PERIOD = ("INSTANCE_PERIOD", True)
#: Closing prices for market data
OHLC = ("OHLC", False)
def __init__(self, tag=None, has_instances=False):
self.tag = tag
self.has_instances = has_instances
_curvetype_lookup[tag.lower()] = self
def __str__(self):
return self.name
def __repr__(self):
return self.name
@staticmethod
def is_valid_tag(tag):
"""
Check whether a curve type tag exists or not.
:param tag: A curve type tag
:type tag: str
:return: True if it exists, otherwise False
:rtype: bool
"""
return tag.lower() in _curvetype_lookup
@staticmethod
def by_tag(tag):
"""
Look up curve type by tag.
:param tag: A curve type tag
:type tag: str
:return: The curve type for the given tag
:rtype: CurveType
"""
return _curvetype_lookup[tag.lower()]
| python |
#! python3
import sys, PyQt5
from PyQt5.QtWidgets import QWidget, QLabel, QLineEdit, QApplication
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.label = QLabel(self)
qle = QLineEdit(self)
qle.move(60, 100)
self.label.move(60, 40)
qle.textChanged[str].connect(self.onChanged)
self.setGeometry(300, 300, 350, 250)
self.setWindowTitle('QLineEdit')
self.show()
def onChanged(self, text):
self.label.setText(text)
self.label.adjustSize()
def main():
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
if __name__ == '__main__':
main() | python |
# coding=utf-8
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
# FIpurE: This is odd...
import sys
import os
from grpc._cython.cygrpc import StatusCode
from pur.core.purnode import purNode
from pur.generated.purbase_pb2 import GetNodeInfoReq, GetNodeInfoResp
from pur.generated.purbase_pb2_grpc import BaseServicer
class BaseService(BaseServicer):
def __init__(self, purnode: purNode):
self.purnode = purnode
def GetNodeInfo(self, request: GetNodeInfoReq, context) -> GetNodeInfoResp:
try:
resp = GetNodeInfoResp()
resp.version = self.purnode.version
pkgdir = os.path.dirname(sys.modules['pur'].__file__)
grpcprotopath = os.path.join(pkgdir, "protos", "pur.proto")
with open(grpcprotopath, 'r') as infile:
resp.grpcProto = infile.read()
return resp
except Exception as e:
context.set_code(StatusCode.unknown)
context.set_details(e)
return GetNodeInfoResp()
| python |
# -*- coding: utf-8 -*-
import sys
import time
timer = time.clock if sys.platform[:3] == 'win' else time.time
def total(reps, func, *args, **kwargs):
"""Total time to run func() reps times.
Returns (total time, last result)
"""
repslist = list(range(reps))
start = timer()
for i in repslist:
ret = func(*args, **kwargs)
elapsed = timer() - start
return (elapsed, ret)
def bestof(reps, func, *args, **kwargs):
"""Quickest func() among reps runs.
Returns (best time, last result)
"""
best = 2 ** 32
for i in range(reps):
start = timer()
ret = func(*args, **kwargs)
elapsed = timer() - start
if elapsed < best: best = elapsed
return (best, ret)
def bestoftotal(reps1, reps2, func, *args, **kwargs):
"""Best of totals:
(best of reps1 runs of (total of reps2 runs of func))
"""
return bestof(reps1, total, reps2, func, *args, **kwargs)
| python |
"""Platform to present any Tuya DP as a binary sensor."""
import logging
from functools import partial
import voluptuous as vol
from homeassistant.components.binary_sensor import (
DEVICE_CLASSES_SCHEMA,
DOMAIN,
BinarySensorEntity,
)
from homeassistant.const import CONF_DEVICE_CLASS
from .common import LocalTuyaEntity, async_setup_entry
_LOGGER = logging.getLogger(__name__)
CONF_STATE_ON = "state_on"
CONF_STATE_OFF = "state_off"
def flow_schema(dps):
"""Return schema used in config flow."""
return {
vol.Required(CONF_STATE_ON, default="True"): str,
vol.Required(CONF_STATE_OFF, default="False"): str,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
}
class LocaltuyaBinarySensor(LocalTuyaEntity, BinarySensorEntity):
"""Representation of a Tuya binary sensor."""
def __init__(
self,
device,
config_entry,
sensorid,
**kwargs,
):
"""Initialize the Tuya binary sensor."""
super().__init__(device, config_entry, sensorid, _LOGGER, **kwargs)
self._is_on = False
@property
def is_on(self):
"""Return sensor state."""
return self._is_on
@property
def device_class(self):
"""Return the class of this device."""
return self._config.get(CONF_DEVICE_CLASS)
def status_updated(self):
"""Device status was updated."""
state = str(self.dps(self._dp_id)).lower()
if state == self._config[CONF_STATE_ON].lower():
self._is_on = True
elif state == self._config[CONF_STATE_OFF].lower():
self._is_on = False
else:
self.warning(
"State for entity %s did not match state patterns", self.entity_id
)
async_setup_entry = partial(
async_setup_entry, DOMAIN, LocaltuyaBinarySensor, flow_schema
)
| python |
#!/usr/bin/env python3
from utils import mathfont
import fontforge
# Create a WOFF font with glyphs for all the operator strings.
font = mathfont.create("stretchy", "Copyright (c) 2021 Igalia S.L.")
# Set parameters for stretchy tests.
font.math.MinConnectorOverlap = mathfont.em // 2
# Make sure that underover parameters don't add extra spacing.
font.math.LowerLimitBaselineDropMin = 0
font.math.LowerLimitGapMin = 0
font.math.StretchStackBottomShiftDown = 0
font.math.StretchStackGapAboveMin = 0
font.math.UnderbarVerticalGap = 0
font.math.UnderbarExtraDescender = 0
font.math.UpperLimitBaselineRiseMin = 0
font.math.UpperLimitGapMin = 0
font.math.StretchStackTopShiftUp = 0
font.math.StretchStackGapBelowMin = 0
font.math.OverbarVerticalGap = 0
font.math.AccentBaseHeight = 0
font.math.OverbarExtraAscender = 0
# These two characters will be stretchable in both directions.
horizontalArrow = 0x295A # LEFTWARDS HARPOON WITH BARB UP FROM BAR
verticalArrow = 0x295C # UPWARDS HARPOON WITH BARB RIGHT FROM BAR
mathfont.createSizeVariants(font)
# Add stretchy vertical and horizontal constructions for the horizontal arrow.
mathfont.createSquareGlyph(font, horizontalArrow)
mathfont.createStretchy(font, horizontalArrow, True)
mathfont.createStretchy(font, horizontalArrow, False)
# Add stretchy vertical and horizontal constructions for the vertical arrow.
mathfont.createSquareGlyph(font, verticalArrow)
mathfont.createStretchy(font, verticalArrow, True)
mathfont.createStretchy(font, verticalArrow, False)
mathfont.save(font)
| python |
# Copyright (C) 2018 The python-bitcoin-utils developers
#
# This file is part of python-bitcoin-utils
#
# It is subject to the license terms in the LICENSE file found in the top-level
# directory of this distribution.
#
# No part of python-bitcoin-utils, including this file, may be copied,
# modified, propagated, or distributed except according to the terms contained
# in the LICENSE file.
from bitcoinutils.setup import setup
from bitcoinutils.transactions import Transaction, TxInput, TxOutput, Sequence
from bitcoinutils.keys import P2pkhAddress, P2shAddress, PrivateKey, P2wshAddress, P2wpkhAddress
from bitcoinutils.script import Script
from bitcoinutils.constants import TYPE_RELATIVE_TIMELOCK
def main():
# always remember to setup the network
setup('testnet')
priv1 = PrivateKey("cN1XE3ESGgdvr4fWsB7L3BcqXncUauF8Fo8zzv4Sm6WrkiGrsxrG")
priv2 = PrivateKey("cR8AkcbL2pgBswrHp28AftEznHPPLA86HiTog8MpNCibxwrsUcZ4")
p2sh_redeem_script = Script(
['OP_1', priv1.get_public_key().to_hex(), priv2.get_public_key().to_hex(),'OP_2', 'OP_CHECKMULTISIG'])
fromAddress = P2wshAddress.from_script(p2sh_redeem_script)
toAddress = P2wpkhAddress.from_address("tb1qtstf97nhk2gycz7vl37esddjpxwt3ut30qp5pn")
# set values
txid = '2042195c40a92353f2ffe30cd0df8d177698560e81807e8bf9174a9c0e98e6c2'
vout = 0
amount = 0.01
# create transaction input from tx id of UTXO
txin = TxInput(txid, vout)
txOut1 = TxOutput(0.0001, toAddress.to_script_pub_key())
txOut2 = TxOutput(0.0098, fromAddress.to_script_pub_key())
tx = Transaction([txin], [txOut1, txOut2], has_segwit=True)
sig1 = priv1.sign_segwit_input(tx, 0, p2sh_redeem_script, amount)
tx.witnesses.append(Script(['OP_0', sig1, p2sh_redeem_script.to_hex()]))
# print raw signed transaction ready to be broadcasted
print("\nRaw signed transaction:\n" + tx.serialize())
print("\nTxId:", tx.get_txid())
if __name__ == "__main__":
main()
| python |
legal_labels = ["west-germany", "usa", "france", "canada", "uk", "japan"]
label_name = "places"
MAX_NUM_WORDS = 10000
MAX_SEQ_LENGTH = 100
EMBEDDING_DIM = 50
| python |
from django.conf.urls import url
from . import views
from django.contrib.auth.views import LoginView, LogoutView, PasswordResetView, PasswordResetDoneView, PasswordResetConfirmView
app_name = 'account'
urlpatterns = [
url(r'^$', LoginView.as_view(template_name='account/welcome.html'), name='welcome_page'),
url(r'^logout/$', LogoutView.as_view(template_name='account/auth/logout.html'), name='logout'),
url(r'^student/$', views.student_home, name='student_home'),
url(r'^teacher/$', views.teacher_home, name='teacher_home'),
url(r'^student/join_class/$', views.student_join_request, name='student_join_request'),
url(r'^teacher/notifications/$', views.teacher_notifications, name='teacher_notifications'),
url(r'^teacher/notifications/(?P<request_id>[0-9]+)/(?P<handle>[0-9]+)/$', views.teacher_handle_request, name='teacher_handle_request'),
url(r'^teacher/add_mainclass/$', views.add_mainclass, name='add_mainclass'),
url(r'^teacher/edit_mainclass/$', views.edit_mainclass, name='edit_mainclass'),
url(r'^teacher/main_class/$', views.mainclass_home, name='mainclass_home'),
url(r'^teacher/main_class/students/$', views.mainclass_students, name='mainclass_students'),
url(r'^teacher/main_class/kick/(?P<student_id>[0-9]+)/$', views.kick_student, name='kick_student'),
url(r'^teacher/main_class/add_subclass/$', views.add_subclass_request, name='add_subclass_request'),
url(r'^teacher/main_class/(?P<subclass_id>[0-9]+)/edit/$', views.edit_subclass, name='edit_subclass'),
url(r'^student/(?P<subclass_id>[0-9]+)/$', views.student_subclass_home, name='student_subclass_home'),
url(r'^student/lessons/(?P<subclass_id>[0-9]+)/$', views.student_lessons, name='student_lessons'),
url(r'^teacher/lessons/(?P<subclass_id>[0-9]+)/$', views.teacher_lessons, name='teacher_lessons'),
url(r'^teacher/lessons/(?P<subclass_id>[0-9]+)/add_lesson/$', views.add_lesson, name='add_lesson'),
url(r'^teacher/lessons/(?P<subclass_id>[0-9]+)/(?P<lesson_id>[0-9]+)/confirm_delete/$', views.remove_confirm_lesson, name='remove_confirm_lesson'),
url(r'^teacher/lessons/(?P<subclass_id>[0-9]+)/(?P<lesson_id>[0-9]+)/delete/$', views.remove_lesson, name='remove_lesson'),
url(r'^student/grades/(?P<subclass_id>[0-9]+)/$', views.student_grades, name='student_grades'),
url(r'^student/calendar/(?P<subclass_id>[0-9]+)/(?P<week>[0-9]+)/$', views.student_calendar, name='student_calendar'),
url(r'^student/lessons/(?P<subclass_id>[0-9]+)/(?P<lesson_id>[0-9]+)/$', views.student_lesson, name='student_lesson'),
url(r'^teacher/lessons/(?P<subclass_id>[0-9]+)/(?P<lesson_id>[0-9]+)/$', views.teacher_lesson, name='teacher_lesson'),
url(r'^student_register/$', views.student_register, name='student_register'),
url(r'^student/edit/$', views.edit_student_profile, name='edit_student_profile'),
url(r'^teacher_register/$', views.teacher_register, name='teacher_register'),
url(r'^teacher/edit/$', views.edit_teacher_profile, name='edit_teacher_profile'),
url(r'^student/archives/(?P<subclass_id>[0-9]+)/(?P<my_filter>[0-9]+)/$', views.student_archives, name='student_archives'),
] | python |
import numpy as np
from astropy.io import fits
import matplotlib.pyplot as plt
source = 'NGC3351'
line = np.array(('CO10','CO21','13CO21','13CO32','C18O21','C18O32'))
num = line.shape[0]
for i in range(num):
fits_map = fits.open('data_image/'+source+'_'+line[i]+'_mom0_broad_nyq.fits')[0].data
fits_err = fits.open('data_image/errors/'+source+'_'+line[i]+'_emom0_broad_nyq.fits')[0].data
fits_map[fits_map < 0] = 0
if i > 3: # 1 sigma cutoff for C18O lines
fits_map[fits_map < fits_err] = 0
fits_err[fits_map < fits_err] = 0
else: # 3 sigma cutoff
fits_map[fits_map < 3 * fits_err] = 0
fits_err[fits_map < 3 * fits_err] = 0
np.save('data_image/'+source+'_'+line[i]+'_mom0.npy',fits_map)
np.save('data_image/errors/'+source+'_'+line[i]+'_emom0_broad_nyq.npy',fits_err)
'''
plt.imshow(fits_map, origin='lower', cmap='hot')
plt.colorbar()
plt.show()
''' | python |
import pytest
import grblas as gb
import dask_grblas as dgb
from grblas import dtypes
from pytest import raises
from .utils import compare
def test_new():
s = gb.Scalar.new(int)
ds = dgb.Scalar.new(int)
compare(lambda x: x, s, ds)
s = gb.Scalar.new(float)
ds = dgb.Scalar.new(float)
compare(lambda x: x, s, ds)
o = object()
compare(lambda x, y: type(x).new(y), (s, o), (ds, o), errors=True)
def test_dup():
s = gb.Scalar.from_value(5)
ds = dgb.Scalar.from_value(5)
ds2 = dgb.Scalar.from_value(s)
compare(lambda x: x, s, ds)
compare(lambda x: x, s, ds2)
compare(lambda x: x.dup(), s, ds)
compare(lambda x: x.dup(), s, ds2)
compare(lambda x: x.dup(dtype=dtypes.FP64), s, ds)
compare(lambda x: x.dup(dtype=dtypes.FP64), s, ds2)
o = object()
compare(lambda x, y: x.dup(y), (s, o), (ds, o), errors=True)
# testing compare
with raises(AssertionError):
compare(lambda x: x, s, dgb.Scalar.from_value(6))
with raises(AssertionError):
compare(lambda x: x, s, dgb.Scalar.from_value(5, dtype=dtypes.FP64))
@pytest.mark.slow
def test_isequal_isclose():
values = [
(gb.Scalar.from_value(5), gb.Scalar.from_value(5)),
(gb.Scalar.from_value(5), gb.Scalar.from_value(6)),
(gb.Scalar.from_value(5), gb.Scalar.from_value(5.0)),
(gb.Scalar.from_value(None, dtype=int), gb.Scalar.from_value(5)),
(gb.Scalar.from_value(None, dtype=int), gb.Scalar.from_value(None, dtype=int)),
(gb.Scalar.from_value(None, dtype=int), gb.Scalar.from_value(None, dtype=float)),
]
o = object()
for s, t in values:
for method_name in ['isequal', 'isclose']:
ds = dgb.Scalar.from_value(s)
dt = dgb.Scalar.from_value(t)
compare(
lambda x, y: getattr(x, method_name)(y),
(s, t),
(ds, dt),
)
compare(
lambda x, y: getattr(x, method_name)(y, check_dtype=True),
(s, t),
(ds, dt),
)
compare(lambda x, y: x == y, (s, t), (ds, dt), compute=False)
compare(lambda x: getattr(x, method_name)(o), s, ds, errors=True)
s = gb.Scalar.from_value(5.0)
t = gb.Scalar.from_value(5.000000001)
ds = dgb.Scalar.from_value(s)
dt = dgb.Scalar.from_value(t)
assert s.isclose(t)
compare(lambda x, y: x.isclose(y), (s, t), (ds, dt))
assert not s.isclose(None)
compare(lambda x, y: x.isclose(y), (s, None), (ds, None))
assert not s.isequal(None)
compare(lambda x, y: x.isequal(y), (s, None), (ds, None))
assert not s.isclose(t, rel_tol=1e-10)
compare(lambda x, y: x.isclose(y, rel_tol=1e-10), (s, t), (ds, dt))
assert s.isclose(t, rel_tol=1e-10, abs_tol=1e-8)
compare(lambda x, y: x.isclose(y, rel_tol=1e-10, abs_tol=1e-8), (s, t), (ds, dt))
compare(lambda x, y: x.isequal(y, check_dtype=True), (s, 5), (ds, 5))
compare(lambda x, y: x.isclose(y, check_dtype=True), (s, 5), (ds, 5))
def test_nvals():
s = gb.Scalar.from_value(1)
ds = dgb.Scalar.from_value(s)
compare(lambda x: x.nvals, s, ds)
s = gb.Scalar.from_value(None, dtype=int)
ds = dgb.Scalar.from_value(s)
compare(lambda x: x.nvals, s, ds)
# Test creation with PythonScalar
compare(lambda x: type(x).from_value(x.nvals), s, ds)
def test_value():
s = gb.Scalar.from_value(3)
ds = dgb.Scalar.from_value(s)
compare(lambda x: x.value, s, ds)
def f(x, y):
x.value = y
return x
compare(f, (s, 4), (ds, 4))
s2 = gb.Scalar.from_value(5)
ds2 = dgb.Scalar.from_value(s)
# compare(f, (s, s2), (ds, ds2)) # not yet supported in grblas
compare(f, (s, s2.value), (ds, ds2.value))
compare(f, (s, s.nvals), (ds, ds.nvals))
compare(f, (s, None), (ds, None))
o = object()
compare(f, (s, o), (ds, o), errors=True)
def test_bool():
values = [
gb.Scalar.from_value(0),
gb.Scalar.from_value(10.1),
gb.Scalar.from_value(True),
gb.Scalar.from_value(False),
gb.Scalar.from_value(None, dtype=int),
]
for s in values:
ds = dgb.Scalar.from_value(s)
compare(lambda x: bool(x), s, ds, compute=False)
def test_clear():
s = gb.Scalar.from_value(4)
ds = dgb.Scalar.from_value(s)
def f(x):
x.clear()
return x
compare(f, s, ds)
def test_is_empty():
s = gb.Scalar.from_value(4)
ds = dgb.Scalar.from_value(s)
compare(lambda x: x.is_empty, s, ds)
s.clear()
ds.clear()
compare(lambda x: x.is_empty, s, ds)
s = gb.Scalar.from_value(None, dtype=float)
ds = dgb.Scalar.from_value(s)
compare(lambda x: x.is_empty, s, ds)
def test_update():
def f1(x, y):
x.update(y)
return x
def f2(x, y):
x << y
return x
for f in [f1, f2]:
s = gb.Scalar.from_value(6)
ds = dgb.Scalar.from_value(s)
s2 = gb.Scalar.from_value(7)
ds2 = dgb.Scalar.from_value(s2)
compare(f, (s, s2), (ds, ds2))
compare(f, (s, 1), (ds, 1))
compare(f, (s, None), (ds, None))
v = gb.Vector.from_values([0, 2], [0, 2])
dv = dgb.Vector.from_vector(v)
compare(f, (s, v[0]), (ds, dv[0]))
@pytest.mark.xfail
def test_attrs():
s = gb.Scalar.from_value(3)
ds = dgb.Scalar.from_value(s)
assert set(dir(s)) - set(dir(ds)) == {
'_is_empty', '_assign_element', '_extract_element', '_is_scalar', '_prep_for_assign',
'_prep_for_extract', 'gb_obj', 'show',
}
assert set(dir(ds)) - set(dir(s)) == {
'_delayed', '_meta', '_optional_dup',
'compute', 'from_delayed', 'persist', 'visualize',
}
| python |
# -*- coding: utf-8 -*-
"""This file contains the wifi.log (Mac OS X) parser."""
import logging
import re
import pyparsing
from plaso.events import time_events
from plaso.lib import errors
from plaso.lib import eventdata
from plaso.lib import timelib
from plaso.parsers import manager
from plaso.parsers import text_parser
__author__ = 'Joaquin Moreno Garijo ([email protected])'
class MacWifiLogEvent(time_events.TimestampEvent):
"""Convenience class for a Mac Wifi log line event."""
DATA_TYPE = u'mac:wifilog:line'
def __init__(self, timestamp, agent, function, text, action):
"""Initializes the event object.
Args:
timestamp: the timestamp, contains the number of microseconds from
January 1, 1970 00:00:00 UTC.
agent: TODO
function: TODO
text: The log message
action: A string containing known WiFI actions, eg: connected to
an AP, configured, etc. If the action is not known,
the value is the message of the log (text variable).
"""
super(MacWifiLogEvent, self).__init__(
timestamp, eventdata.EventTimestamp.ADDED_TIME)
self.agent = agent
self.function = function
self.text = text
self.action = action
class MacWifiLogParser(text_parser.PyparsingSingleLineTextParser):
"""Parse text based on wifi.log file."""
NAME = u'macwifi'
DESCRIPTION = u'Parser for Mac OS X wifi.log files.'
_ENCODING = u'utf-8'
# Regular expressions for known actions.
RE_CONNECTED = re.compile(r'Already\sassociated\sto\s(.*)\.\sBailing')
RE_WIFI_PARAMETERS = re.compile(
r'\[ssid=(.*?), bssid=(.*?), security=(.*?), rssi=')
# Define how a log line should look like.
WIFI_LINE = (
text_parser.PyparsingConstants.MONTH.setResultsName(u'day_of_week') +
text_parser.PyparsingConstants.MONTH.setResultsName(u'month') +
text_parser.PyparsingConstants.ONE_OR_TWO_DIGITS.setResultsName(u'day') +
text_parser.PyparsingConstants.TIME_MSEC.setResultsName(u'time') +
pyparsing.Literal(u'<') +
pyparsing.CharsNotIn(u'>').setResultsName(u'agent') +
pyparsing.Literal(u'>') +
pyparsing.CharsNotIn(u':').setResultsName(u'function') +
pyparsing.Literal(u':') +
pyparsing.SkipTo(pyparsing.lineEnd).setResultsName(u'text'))
WIFI_HEADER = (
text_parser.PyparsingConstants.MONTH.setResultsName(u'day_of_week') +
text_parser.PyparsingConstants.MONTH.setResultsName(u'month') +
text_parser.PyparsingConstants.ONE_OR_TWO_DIGITS.setResultsName(u'day') +
text_parser.PyparsingConstants.TIME_MSEC.setResultsName(u'time') +
pyparsing.Literal(u'***Starting Up***'))
# Define the available log line structures.
LINE_STRUCTURES = [
(u'logline', WIFI_LINE),
(u'header', WIFI_HEADER)]
def __init__(self):
"""Initializes a parser object."""
super(MacWifiLogParser, self).__init__()
self._year_use = 0
self._last_month = None
def _GetAction(self, agent, function, text):
"""Parse the well know actions for easy reading.
Args:
agent: The device that generate the entry.
function: The function or action called by the agent.
text: Mac Wifi log text.
Returns:
know_action: A formatted string representing the known (or common) action.
"""
if not agent.startswith(u'airportd'):
return text
# TODO: replace "x in y" checks by startswith if possible.
if u'airportdProcessDLILEvent' in function:
interface = text.split()[0]
return u'Interface {0:s} turn up.'.format(interface)
if u'doAutoJoin' in function:
match = re.match(self.RE_CONNECTED, text)
if match:
ssid = match.group(1)[1:-1]
else:
ssid = u'Unknown'
return u'Wifi connected to SSID {0:s}'.format(ssid)
if u'processSystemPSKAssoc' in function:
wifi_parameters = self.RE_WIFI_PARAMETERS.search(text)
if wifi_parameters:
ssid = wifi_parameters.group(1)
bssid = wifi_parameters.group(2)
security = wifi_parameters.group(3)
if not ssid:
ssid = u'Unknown'
if not bssid:
bssid = u'Unknown'
if not security:
security = u'Unknown'
return (
u'New wifi configured. BSSID: {0:s}, SSID: {1:s}, '
u'Security: {2:s}.').format(bssid, ssid, security)
return text
def _ConvertToTimestamp(self, day, month, year, time):
"""Converts date and time values into a timestamp.
This is a timestamp_string as returned by using
text_parser.PyparsingConstants structures:
08, Nov, [20, 36, 37], 222]
Args:
day: an integer representing the day.
month: an integer representing the month.
year: an integer representing the year.
time: a list containing integers with the number of
hours, minutes and seconds.
Returns:
The timestamp which is an integer containing the number of micro seconds
since January 1, 1970, 00:00:00 UTC.
Raises:
TimestampError: if the timestamp cannot be created from the date and
time values.
"""
time_values, milliseconds = time
hours, minutes, seconds = time_values
microseconds = milliseconds * 1000
return timelib.Timestamp.FromTimeParts(
year, month, day, hours, minutes, seconds, microseconds=microseconds)
def _ParseLogLine(self, parser_mediator, structure):
"""Parse a single log line and produce an event object.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
structure: A pyparsing.ParseResults object from a line in the
log file.
"""
if not self._year_use:
self._year_use = parser_mediator.GetEstimatedYear()
# Gap detected between years.
month = timelib.MONTH_DICT.get(structure.month.lower())
if not self._last_month:
self._last_month = month
if month < self._last_month:
self._year_use += 1
try:
timestamp = self._ConvertToTimestamp(
structure.day, month, self._year_use, structure.time)
except errors.TimestampError as exception:
parser_mediator.ProduceParseError(
u'unable to determine timestamp with error: {0:s}'.format(
exception))
return
self._last_month = month
text = structure.text
# Due to the use of CharsNotIn pyparsing structure contains whitespaces
# that need to be removed.
function = structure.function.strip()
action = self._GetAction(structure.agent, function, text)
event_object = MacWifiLogEvent(
timestamp, structure.agent, function, text, action)
parser_mediator.ProduceEvent(event_object)
def ParseRecord(self, parser_mediator, key, structure):
"""Parses a log record structure and produces events.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
key: An identification string indicating the name of the parsed
structure.
structure: A pyparsing.ParseResults object from a line in the
log file.
"""
if key == u'logline':
self._ParseLogLine(parser_mediator, structure)
elif key != u'header':
logging.warning(
u'Unable to parse record, unknown structure: {0:s}'.format(key))
def VerifyStructure(self, parser_mediator, line):
"""Verify that this file is a Mac Wifi log file.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
line: A single line from the text file.
Returns:
True if this is the correct parser, False otherwise.
"""
try:
_ = self.WIFI_HEADER.parseString(line)
except pyparsing.ParseException:
logging.debug(u'Not a Mac Wifi log file')
return False
return True
manager.ParsersManager.RegisterParser(MacWifiLogParser)
| python |
# chebyfit/__init__.py
from .chebyfit import __doc__, __all__, __version__
from .chebyfit import *
| python |
from typing import Generator, Mapping, Union
from flask_babel import lazy_gettext
from app.questionnaire.location import Location
from .context import Context
from .section_summary_context import SectionSummaryContext
class SubmitQuestionnaireContext(Context):
def __call__(
self, answers_are_editable: bool = True
) -> dict[str, Union[str, dict]]:
summary_options = self._schema.get_summary_options()
collapsible = summary_options.get("collapsible", False)
submission_schema: Mapping = self._schema.get_submission() or {}
title = submission_schema.get("title") or lazy_gettext(
"Check your answers and submit"
)
submit_button = submission_schema.get("button") or lazy_gettext(
"Submit answers"
)
guidance = submission_schema.get("guidance") or lazy_gettext(
"Please submit this survey to complete it"
)
warning = submission_schema.get("warning") or None
context = {
"title": title,
"guidance": guidance,
"warning": warning,
"submit_button": submit_button,
}
if summary_options:
context["summary"] = self._get_summary_context(
collapsible, answers_are_editable
)
return context
def _get_summary_context(
self, collapsible: bool, answers_are_editable: bool
) -> dict[str, Union[list, bool, str]]:
groups = list(self._build_all_groups())
return {
"groups": groups,
"answers_are_editable": answers_are_editable,
"collapsible": collapsible,
"summary_type": "Summary",
}
def _build_all_groups(self) -> Generator[dict, None, None]:
""" NB: Does not support repeating sections """
for section_id in self._router.enabled_section_ids:
location = Location(section_id=section_id)
section_summary_context = SectionSummaryContext(
language=self._language,
schema=self._schema,
answer_store=self._answer_store,
list_store=self._list_store,
progress_store=self._progress_store,
metadata=self._metadata,
current_location=location,
return_to="final-summary",
routing_path=self._router.routing_path(section_id),
)
section: Mapping = self._schema.get_section(section_id) or {}
if section.get("summary", {}).get("items"):
break
for group in section_summary_context()["summary"]["groups"]:
yield group
| python |
def get_answer():
"""something"""
return True
| python |
# Copyright (c) 2015, Michael Boyle
# See LICENSE file for details: <https://github.com/moble/scri/blob/master/LICENSE>
from __future__ import print_function, division, absolute_import
import pytest
import numpy as np
from numpy import *
import quaternion
import spherical_functions as sf
import scri
from conftest import linear_waveform, constant_waveform, random_waveform, delta_waveform
@pytest.mark.parametrize("w", [linear_waveform, constant_waveform, random_waveform])
def test_identity_rotation(w):
# Rotation by 1 should be identity operation
W_in = w()
W_out = w()
assert W_in.ensure_validity(alter=False)
assert W_out.ensure_validity(alter=False)
W_out.rotate_decomposition_basis(quaternion.one)
assert W_out.ensure_validity(alter=False)
assert np.array_equal(W_out.t, W_in.t)
assert np.array_equal(W_out.frame, W_in.frame)
assert np.array_equal(W_out.data, W_in.data)
assert np.array_equal(W_out.LM, W_in.LM)
assert W_out.ell_min == W_in.ell_min
assert W_out.ell_max == W_in.ell_max
for h_in, h_out in zip(W_in.history, W_out.history[:-1]):
assert (h_in == h_out.replace(type(W_out).__name__ + '_' + str(W_out.num),
type(W_in).__name__ + '_' + str(W_in.num))
or (h_in.startswith('# ') and h_out.startswith('# ')))
assert W_out.frameType == W_in.frameType
assert W_out.dataType == W_in.dataType
assert W_out.r_is_scaled_out == W_in.r_is_scaled_out
assert W_out.m_is_scaled_out == W_in.m_is_scaled_out
assert isinstance(W_out.num, int)
assert W_out.num != W_in.num
@pytest.mark.parametrize("w", [linear_waveform, constant_waveform, random_waveform])
def test_rotation_invariants(w):
# A random rotation should leave everything but data and frame the
# same (except num, of course)
W_in = w()
W_out = w()
np.random.seed(hash('test_rotation_invariants') % 4294967294) # Use mod to get in an acceptable range
W_out.rotate_decomposition_basis(np.quaternion(*np.random.uniform(-1, 1, 4)).normalized())
assert W_in.ensure_validity(alter=False)
assert W_out.ensure_validity(alter=False)
assert np.array_equal(W_out.t, W_in.t)
assert not np.array_equal(W_out.frame, W_in.frame) # This SHOULD change
assert not np.array_equal(W_out.data, W_in.data) # This SHOULD change
assert W_out.ell_min == W_in.ell_min
assert W_out.ell_max == W_in.ell_max
assert np.array_equal(W_out.LM, W_in.LM)
for h_in, h_out in zip(W_in.history[:-3], W_out.history[:-5]):
assert (h_in == h_out.replace(type(W_out).__name__ + '_' + str(W_out.num),
type(W_in).__name__ + '_' + str(W_in.num))
or (h_in.startswith('# ') and h_out.startswith('# ')))
assert W_out.frameType == W_in.frameType
assert W_out.dataType == W_in.dataType
assert W_out.r_is_scaled_out == W_in.r_is_scaled_out
assert W_out.m_is_scaled_out == W_in.m_is_scaled_out
assert W_out.num != W_in.num
@pytest.mark.parametrize("w", [linear_waveform, constant_waveform, random_waveform])
def test_constant_versus_series(w):
# A random rotation should leave everything but data and frame the
# same (except num, of course)
W_const = w()
W_series = w()
np.random.seed(hash('test_constant_versus_series') % 4294967294) # Use mod to get in an acceptable range
W_const.rotate_decomposition_basis(np.quaternion(*np.random.uniform(-1, 1, 4)).normalized())
W_series.rotate_decomposition_basis(
np.array([np.quaternion(*np.random.uniform(-1, 1, 4)).normalized()] * W_series.n_times))
assert W_const.ensure_validity(alter=False)
assert W_series.ensure_validity(alter=False)
assert np.array_equal(W_series.t, W_const.t)
assert not np.array_equal(W_series.frame, W_const.frame) # This SHOULD change
assert not np.array_equal(W_series.data, W_const.data) # This SHOULD change
assert W_series.ell_min == W_const.ell_min
assert W_series.ell_max == W_const.ell_max
assert np.array_equal(W_series.LM, W_const.LM)
for h_const, h_series in zip(W_const.history[:-5], W_series.history[:-11]):
assert (h_const == h_series.replace(type(W_series).__name__ + '_' + str(W_series.num),
type(W_const).__name__ + '_' + str(W_const.num))
or (h_const.startswith('# ') and h_series.startswith('# ')))
assert W_series.frameType == W_const.frameType
assert W_series.dataType == W_const.dataType
assert W_series.r_is_scaled_out == W_const.r_is_scaled_out
assert W_series.m_is_scaled_out == W_const.m_is_scaled_out
assert W_series.num != W_const.num
@pytest.mark.parametrize("w", [linear_waveform, constant_waveform, random_waveform])
def test_rotation_inversion(w):
# Rotation followed by the inverse rotation should leave
# everything the same (except that the frame data will be either a
# 1 or a series of 1s)
np.random.seed(hash('test_rotation_inversion') % 4294967294) # Use mod to get in an acceptable range
W_in = w()
assert W_in.ensure_validity(alter=False)
# We loop over (1) a single constant rotation, and (2) an array of random rotations
for R_basis in [np.quaternion(*np.random.uniform(-1, 1, 4)).normalized(),
np.array([np.quaternion(*np.random.uniform(-1, 1, 4)).normalized()] * W_in.n_times)]:
W_out = w()
W_out.rotate_decomposition_basis(R_basis)
W_out.rotate_decomposition_basis(~R_basis)
assert W_out.ensure_validity(alter=False)
assert np.array_equal(W_out.t, W_in.t)
assert np.max(np.abs(W_out.frame - W_in.frame)) < 1e-15
assert np.allclose(W_out.data, W_in.data, atol=W_in.ell_max ** 4 ** 4e-14, rtol=W_in.ell_max ** 4 * 4e-14)
assert W_out.ell_min == W_in.ell_min
assert W_out.ell_max == W_in.ell_max
assert np.array_equal(W_out.LM, W_in.LM)
for h_in, h_out in zip(W_in.history[:-3], W_out.history[:-5]):
assert (h_in == h_out.replace(type(W_out).__name__ + '_' + str(W_out.num),
type(W_in).__name__ + '_' + str(W_in.num))
or (h_in.startswith('# datetime') and h_out.startswith('# datetime')))
assert W_out.frameType == W_in.frameType
assert W_out.dataType == W_in.dataType
assert W_out.r_is_scaled_out == W_in.r_is_scaled_out
assert W_out.m_is_scaled_out == W_in.m_is_scaled_out
assert W_out.num != W_in.num
def test_rotations_of_0_0_mode(Rs):
# The (ell,m)=(0,0) mode should be rotationally invariant
n_copies = 10
W_in = delta_waveform(0, 0, begin=-10., end=100., n_times=n_copies * len(Rs), ell_min=0, ell_max=8)
assert W_in.ensure_validity(alter=False)
W_out = scri.WaveformModes(W_in)
R_basis = np.array([R for R in Rs for i in range(n_copies)])
W_out.rotate_decomposition_basis(R_basis)
assert W_out.ensure_validity(alter=False)
assert np.array_equal(W_out.t, W_in.t)
assert np.max(np.abs(W_out.frame - R_basis)) == 0.0
assert np.array_equal(W_out.data, W_in.data)
assert W_out.ell_min == W_in.ell_min
assert W_out.ell_max == W_in.ell_max
assert np.array_equal(W_out.LM, W_in.LM)
for h_in, h_out in zip(W_in.history, W_out.history[:-1]):
assert (h_in == h_out.replace(type(W_out).__name__ + '_' + str(W_out.num),
type(W_in).__name__ + '_' + str(W_in.num))
or (h_in.startswith('# ') and h_out.startswith('# ')))
assert W_out.frameType == W_in.frameType
assert W_out.dataType == W_in.dataType
assert W_out.r_is_scaled_out == W_in.r_is_scaled_out
assert W_out.m_is_scaled_out == W_in.m_is_scaled_out
assert W_out.num != W_in.num
def test_rotations_of_each_mode_individually(Rs):
ell_min = 0
ell_max = 8 # sf.ell_max is just too much; this test is too slow, and ell=8 should be fine
R_basis = Rs
Ds = np.empty((len(Rs), sf.LMpM_total_size(ell_min, ell_max)), dtype=complex)
for i, R in enumerate(Rs):
Ds[i, :] = sf.Wigner_D_matrices(R, ell_min, ell_max)
for ell in range(ell_max + 1):
first_zeros = np.zeros((len(Rs), sf.LM_total_size(ell_min, ell - 1)), dtype=complex)
later_zeros = np.zeros((len(Rs), sf.LM_total_size(ell + 1, ell_max)), dtype=complex)
for Mp in range(-ell, ell):
W_in = delta_waveform(ell, Mp, begin=-10., end=100., n_times=len(Rs), ell_min=ell_min, ell_max=ell_max)
# Now, the modes are f^{\ell,m[} = \delta^{\ell,mp}_{L,Mp}
assert W_in.ensure_validity(alter=False)
W_out = scri.WaveformModes(W_in)
W_out.rotate_decomposition_basis(Rs)
assert W_out.ensure_validity(alter=False)
assert np.array_equal(W_out.t, W_in.t)
assert np.max(np.abs(W_out.frame - R_basis)) == 0.0
i_D0 = sf.LMpM_index(ell, Mp, -ell, ell_min)
assert np.array_equal(W_out.data[:, :sf.LM_total_size(ell_min, ell - 1)], first_zeros)
if ell < ell_max:
assert np.array_equal(
W_out.data[:, sf.LM_total_size(ell_min, ell - 1):-sf.LM_total_size(ell + 1, ell_max)],
Ds[:, i_D0:i_D0 + (2 * ell + 1)])
assert np.array_equal(W_out.data[:, -sf.LM_total_size(ell + 1, ell_max):], later_zeros)
else:
assert np.array_equal(W_out.data[:, sf.LM_total_size(ell_min, ell - 1):],
Ds[:, i_D0:i_D0 + (2 * ell + 1)])
assert W_out.ell_min == W_in.ell_min
assert W_out.ell_max == W_in.ell_max
assert np.array_equal(W_out.LM, W_in.LM)
for h_in, h_out in zip(W_in.history, W_out.history[:-1]):
assert h_in == h_out.replace(type(W_out).__name__ + str(W_out.num), type(W_in).__name__ + str(W_in.num))
assert W_out.frameType == W_in.frameType
assert W_out.dataType == W_in.dataType
assert W_out.r_is_scaled_out == W_in.r_is_scaled_out
assert W_out.m_is_scaled_out == W_in.m_is_scaled_out
assert W_out.num != W_in.num
| python |
from apiaudio.api_request import APIRequest
class Connector(APIRequest):
OBJECT_NAME = "connector"
resource_path = "/connector/"
connection_path = "/connection/"
@classmethod
def retrieve(cls, name):
if not name:
raise Exception("Name must be set")
return cls._get_request(path_param=cls.resource_path + name)
@classmethod
def connection(cls, connection_id):
if not connection_id:
raise Exception("Connection id must be set")
return cls._get_request(path_param=cls.connection_path + connection_id)
| python |
import unittest
from rime.util import struct
class TestStruct(unittest.TestCase):
def test_dict_attr(self):
self.assertEqual(struct.Struct.items, dict.items)
def test_constructor(self):
s = struct.Struct(test_attr='test_obj')
self.assertEqual(s.test_attr, 'test_obj')
self.assertEqual(s['test_attr'], 'test_obj')
def test_add_attr(self):
s = struct.Struct()
s.test_attr = 'test_obj'
self.assertEqual(s.test_attr, 'test_obj')
def test_add_key(self):
s = struct.Struct()
s['test_attr'] = 'test_obj'
self.assertEqual(s.test_attr, 'test_obj')
self.assertEqual(s['test_attr'], 'test_obj')
def test_attribute_error(self):
s = struct.Struct()
with self.assertRaises(AttributeError):
s.test_attr
| python |
import pandas as pd
from calendar import isleap
def get_date_range_hours_from_year(year):
"""
creates date range in hours for the year excluding leap day
:param year: year of date range
:type year: int
:return: pd.date_range with 8760 values
:rtype: pandas.data_range
"""
date_range = pd.date_range(start=str(year), end=str(year + 1), freq='H', closed='left')
# Check if leap year and remove extra day
if isleap(year):
date_range = date_range[~((date_range.month == 2) & (date_range.day == 29))]
return date_range
| python |
from collections import defaultdict
import nltk
import random
import string
import torch
from nltk.corpus import stopwords
from pytorch_pretrained_bert import BertTokenizer, BertForMaskedLM
from tqdm import tqdm
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print('Initialize BERT vocabulary...')
bert_tokenizer = BertTokenizer(vocab_file='data/BERT_model_reddit/vocab.txt')
print('Initialize BERT model...')
bert_model = BertForMaskedLM.from_pretrained('data/BERT_model_reddit').to(device)
bert_model.eval()
''' Printing functions '''
class print_color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
def color_print_top_words(top_words, gt_euphemism):
print('[Euphemism Candidates]: ')
gt_euphemism_upper = set([y for x in gt_euphemism for y in x.split()])
for i in top_words[:100]:
if i in gt_euphemism:
print(print_color.BOLD + print_color.PURPLE + i + print_color.END, end=', ')
elif i in gt_euphemism_upper:
print(print_color.UNDERLINE + print_color.PURPLE + i + print_color.END, end=', ')
else:
print(i, end=', ')
print()
''' Evaluation '''
def evaluate_detection(top_words, gt_euphemism):
color_print_top_words(top_words, gt_euphemism)
correct_list = [] # appear in the ground truth
correct_list_upper = [] # not appear in the ground truth but contain in a ground truth phase.
gt_euphemism_upper = set([y for x in gt_euphemism for y in x.split()])
for i, x in enumerate(top_words):
correct_list.append(1 if x in gt_euphemism else 0)
correct_list_upper.append(1 if x in gt_euphemism_upper else 0)
topk_precision_list = []
cummulative_sum = 0
topk_precision_list_upper = []
cummulative_sum_upper = 0
for i in range(0, len(correct_list)):
cummulative_sum += correct_list[i]
topk_precision_list.append(cummulative_sum/(i+1))
cummulative_sum_upper += correct_list_upper[i]
topk_precision_list_upper.append(cummulative_sum_upper/(i+1))
for topk in [10, 20, 30, 40, 50, 60, 80, 100]:
if topk < len(topk_precision_list):
print('Top-{:d} precision is ({:.2f}, {:.2f})'.format(topk, topk_precision_list[topk-1], topk_precision_list_upper[topk-1]))
return 0
''' Main Function '''
def MLM(sgs, input_keywords, thres=1, filter_uninformative=1):
def to_bert_input(tokens, bert_tokenizer):
token_idx = torch.tensor(bert_tokenizer.convert_tokens_to_ids(tokens))
sep_idx = tokens.index('[SEP]')
segment_idx = token_idx * 0
segment_idx[(sep_idx + 1):] = 1
mask = (token_idx != 0)
return token_idx.unsqueeze(0).to(device), segment_idx.unsqueeze(0).to(device), mask.unsqueeze(0).to(device)
def single_MLM(message):
MLM_k = 50
tokens = bert_tokenizer.tokenize(message)
if len(tokens) == 0:
return []
if tokens[0] != CLS:
tokens = [CLS] + tokens
if tokens[-1] != SEP:
tokens.append(SEP)
token_idx, segment_idx, mask = to_bert_input(tokens, bert_tokenizer)
with torch.no_grad():
logits = bert_model(token_idx, segment_idx, mask, masked_lm_labels=None)
logits = logits.squeeze(0)
probs = torch.softmax(logits, dim=-1)
for idx, token in enumerate(tokens):
if token == MASK:
topk_prob, topk_indices = torch.topk(probs[idx, :], MLM_k)
topk_tokens = bert_tokenizer.convert_ids_to_tokens(topk_indices.cpu().numpy())
out = [[topk_tokens[i], float(topk_prob[i])] for i in range(MLM_k)]
return out
PAD, MASK, CLS, SEP = '[PAD]', '[MASK]', '[CLS]', '[SEP]'
MLM_score = defaultdict(float)
temp = sgs if len(sgs) < 10 else tqdm(sgs)
skip_ms_num = 0
good_sgs = []
for sgs_i in temp:
top_words = single_MLM(sgs_i)
seen_input = 0
for input_i in input_keywords:
if input_i in [x[0] for x in top_words[:thres]]:
seen_input += 1
if filter_uninformative == 1 and seen_input < 2:
skip_ms_num += 1
continue
good_sgs.append(sgs_i)
for j in top_words:
if j[0] in string.punctuation:
continue
if j[0] in stopwords.words('english'):
continue
if j[0] in input_keywords:
continue
if j[0] in ['drug', 'drugs']: # exclude these two for the drug dataset.
continue
if j[0][:2] == '##': # the '##' by BERT indicates that is not a word.
continue
MLM_score[j[0]] += j[1]
# print(sgs_i)
# print([x[0] for x in top_words[:20]])
out = sorted(MLM_score, key=lambda x: MLM_score[x], reverse=True)
out_tuple = [[x, MLM_score[x]] for x in out]
if len(sgs) >= 10:
print('The percentage of uninformative masked sentences is {:d}/{:d} = {:.2f}%'.format(skip_ms_num, len(sgs), float(skip_ms_num)/len(sgs)*100))
return out, out_tuple, good_sgs
def euphemism_detection(input_keywords, all_text, ms_limit, filter_uninformative):
print('\n' + '*' * 40 + ' [Euphemism Detection] ' + '*' * 40)
print('[util.py] Input Keyword: ', end='')
print(input_keywords)
print('[util.py] Extracting masked sentences for input keywords...')
masked_sentence = []
for sentence in tqdm(all_text):
temp = nltk.word_tokenize(sentence)
for input_keyword_i in input_keywords:
if input_keyword_i not in temp:
continue
temp_index = temp.index(input_keyword_i)
masked_sentence += [' '.join(temp[: temp_index]) + ' [MASK] ' + ' '.join(temp[temp_index + 1:])]
random.shuffle(masked_sentence)
masked_sentence = masked_sentence[:ms_limit]
print('[util.py] Generating top candidates...')
top_words, _, _ = MLM(masked_sentence, input_keywords, thres=5, filter_uninformative=filter_uninformative)
return top_words
| python |
from .FeatureSet import FeatureSet
class Version(FeatureSet):
def __init__(self, api, internalIdentifier, identifier, versionString, apiString):
super(Version, self).__init__(api, internalIdentifier)
self.nativeIdentifier = identifier
self.apiString = apiString
self.majorVersion, self.minorVersion = [ int(val) for val in versionString.split(".")[0:2] ]
self.isCore = False
self.isExt = False
self.deprecatedFunctions = []
self.deprecatedConstants = []
self.deprecatedTypes = []
self.removedFunctions = []
self.removedConstants = []
self.removedTypes = []
def __lt__(self, other):
return self.majorVersion < other.majorVersion or (self.majorVersion == other.majorVersion and self.minorVersion < other.minorVersion)
| python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Device',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('token', models.CharField(unique=True, max_length=127)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('message_id', models.CharField(max_length=63, null=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('body', models.TextField()),
('type', models.TextField(null=True, blank=True)),
('devices', models.ManyToManyField(to='pesteh.Device')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
]
| python |
#!/usr/bin/python
import dnaseq
import bm_preproc
import kmer_index
human_chromosome = dnaseq.read_genome("chr1.GRCh38.excerpt.fasta")
def approximate_matches(p, t, index):
n = 2
matches = set()
total_hits = 0
for i in range(0, 24, 8):
pi = p[i:i+8]
hits = index.query(pi);
total_hits += len(hits)
for hit in hits:
if hit < i or hit - i + len(p) > len(t):
continue
missmatches = 0
for j in range(0, i):
if p[j] != t[hit - i + j]:
missmatches += 1
if missmatches > n:
break
for j in range(i + len(pi), len(p)):
if p[j] != t[hit - i + j]:
missmatches += 1
if missmatches > n:
break
if missmatches <= n:
matches.add(hit - i)
return sorted(list(matches)), total_hits
def approximate_matches_seq(p, t, index):
n = 2
matches = set()
total_hits = 0
for i in range(0, 3):
pi = p[i:]
hits = index.query(pi);
total_hits += len(hits)
for hit in hits:
if hit < i or hit - i + len(p) > len(t):
continue
missmatches = 0
for j in range(0, i):
if p[j] != t[hit - i + j]:
missmatches += 1
if missmatches > n:
break
for j in range(i + len(pi), len(p)):
if p[j] != t[hit - i + j]:
missmatches += 1
if missmatches > n:
break
if missmatches <= n:
matches.add(hit - i)
return sorted(list(matches)), total_hits
def question_01():
occurrences, comparisons, alignments = \
dnaseq.naive_instrumented(
human_chromosome,
"GGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGG")
print "question_01: %i" % alignments
def question_02():
occurrences, comparisons, alignments = \
dnaseq.naive_instrumented(
human_chromosome,
"GGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGG")
print "question_02: %i" % comparisons
def question_03():
p = "GGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGG"
p_bm = bm_preproc.BoyerMoore(p)
occurrences, comparisons, alignments = \
dnaseq.boyer_moore_instrumented(p, p_bm, human_chromosome)
print "question_03: %i" % alignments
def question_04():
p = "GGCGCGGTGGCTCACGCCTGTAAT"
index = kmer_index.Index(human_chromosome, 8)
matches, hits = approximate_matches(p, human_chromosome, index)
print "question_04: %i" % len(matches)
def question_05():
p = "GGCGCGGTGGCTCACGCCTGTAAT"
index = kmer_index.Index(human_chromosome, 8)
matches, hits = approximate_matches(p, human_chromosome, index)
print "question_05: %i" % hits
def question_06():
p = "GGCGCGGTGGCTCACGCCTGTAAT"
t = human_chromosome
index = kmer_index.SubseqIndex(t, 8, 3)
matches, hits = approximate_matches_seq(p, t, index)
print "question_06: %i" % hits
if __name__ == '__main__':
question_01()
question_02()
question_03()
question_04()
question_05()
question_06()
| python |
from .baselines import *
from .cocostuff import *
from .potsdam import *
from .duckietown import *
| python |
import os
import sys
import tempfile
from unittest import mock
from hashlib import sha1
from random import random
from io import StringIO
import argparse
from .base import BaseTest
from .. import cloudssh
class Test(BaseTest):
fake_reservations = [
{
'Groups': [],
'Instances': [
{
'InstanceId': 'i-b929323f777f4c016d',
'PrivateIpAddress': '10.0.0.60',
'PublicIpAddress': '123.456.7.89',
'State': {
'Code': 16,
'Name': 'running'
},
'Tags': [
{
'Key': 'Name',
'Value': 'test_instance'
}
]
},
{
'InstanceId': 'i-2959b4a6e3cdd13a2f',
'PrivateIpAddress': '10.0.0.61',
'PublicIpAddress': '123.456.7.90',
'State': {
'Code': 16,
'Name': 'running'
},
'Tags': [
{
'Key': 'Name',
'Value': 'test_instance_2'
}
]
},
{
'InstanceId': 'i-' + sha1(str(random()).encode('utf-8')).hexdigest()[:18],
'PrivateIpAddress': '10.0.0.62',
'PublicIpAddress': '123.456.7.91',
'State': {
'Code': 80,
'Name': 'stopped'
},
'Tags': [
{
'Key': 'Name',
'Value': 'test_instance_stopped'
}
]
},
{
'InstanceId': 'i-' + sha1(str(random()).encode('utf-8')).hexdigest()[:18],
'PrivateIpAddress': '10.0.0.63',
'PublicIpAddress': '123.456.7.94',
'State': {
'Code': 16,
'Name': 'running'
}
},
{
'InstanceId': 'i-' + sha1(str(random()).encode('utf-8')).hexdigest()[:18],
'PrivateIpAddress': '10.0.0.64',
'PublicIpAddress': '123.456.7.95',
'State': {
'Code': 16,
'Name': 'running'
},
'Tags': [
{
'Key': 'env',
'Value': 'prod'
}
]
}
]
}
]
test_config = """
[MAIN]
region = us-east-1
aws_profile_name = cloud_ssh_unittest
ssh_user = paul
"""
def setUp(self):
# Set unit tests config dir
self.tmp_config_dir = tempfile.TemporaryDirectory()
cloudssh.config_dir = self.tmp_config_dir.name + '/'
# Write default config
with open(cloudssh.config_dir + 'cloudssh.cfg', 'w') as f:
f.write(self.test_config)
# Parse config
cloudssh.parse_user_config()
# Set region
cloudssh.set_region()
def tearDown(self):
# Cleanup temp dir
self.tmp_config_dir.cleanup()
@mock.patch('argparse.ArgumentParser.parse_args',
return_value=argparse.Namespace(region=None, build_index=None, instance='my_server', search=None, info=None))
def test_parse_cli_args(self, mock_args):
args = cloudssh.parse_cli_args()
assert type(args) is dict
assert args['region'] is None # defaulted to None
assert args['build_index'] is False # defaulted to False
assert args['info'] is None # defaulted to None
def test_parse_user_config(self):
# Config file exists
assert isinstance(cloudssh.parse_user_config(), object)
# Config file does not exists
assert cloudssh.parse_user_config(filename='invalid.cfg') is None
def test_get_value_from_user_config(self):
# Get a valid config
assert cloudssh.get_value_from_user_config(
'aws_profile_name') == 'cloud_ssh_unittest'
# We should get None with an invalid config
assert cloudssh.get_value_from_user_config('invalid') is None
# We should get None if we don't have a loaded config
cloudssh.user_config = None
assert cloudssh.get_value_from_user_config('aws_profile_name') is None
def test_set_region(self):
# From config file
assert cloudssh.set_region() == 'us-east-1'
# Region sent from CLI
assert cloudssh.set_region(from_args='us-west-1') == 'us-west-1'
# Invalid region name
self.assertRaises(RuntimeError, cloudssh.set_region, 'us-invalid-1')
@mock.patch.object(cloudssh, 'get_value_from_user_config', return_value=None)
def test_set_region_2(self, mock_args):
# Test default without CLI input or config file
assert cloudssh.set_region() == 'us-east-1'
def test_get_aws_client(self):
client = cloudssh.get_aws_client()
# assert isinstance(client, botocore.client.EC2)
assert isinstance(client, object)
def test_is_instance_id(self):
assert cloudssh.is_instance_id('i-68602df5') is True
assert cloudssh.is_instance_id('i-015baacc848a0brfg') is True
assert cloudssh.is_instance_id('this_is_a_name') is False
def test_aws_lookup(self):
client = cloudssh.get_aws_client()
# Lookup an instance name
response = cloudssh.aws_lookup(
instance='cloudssh_test_instance', client=client)
assert isinstance(response, dict)
assert isinstance(response['Reservations'], list)
# lookup an instance ID
response = cloudssh.aws_lookup(
instance='i-06bb6dbab77bfcf3f', client=client)
assert isinstance(response, dict)
assert isinstance(response['Reservations'], list)
def test_get_instance_infos(self):
assert cloudssh.get_instance_infos(
reservations=self.fake_reservations) == {
'id': 'i-b929323f777f4c016d',
'launch_date': None,
'private_ip': '10.0.0.60',
'public_ip': '123.456.7.89',
'subnet': None,
'tags': [{'Key': 'Name', 'Value': 'test_instance'}],
'type': None,
'vpc': None
}
# No reservations
self.assertRaises(
SystemExit, cloudssh.get_instance_infos, reservations=[])
# Reservations but no public IP
altered = self.fake_reservations
altered[0]['Instances'][0].pop('PublicIpAddress')
self.assertRaises(SystemExit, cloudssh.get_instance_infos,
reservations=altered)
def test_get_ssh_command(self):
assert cloudssh.get_ssh_command(public_ip='123.456.7.89') == [
'ssh', '123.456.7.89']
assert cloudssh.get_ssh_command(
public_ip='123.456.7.89',
user='paul'
) == ['ssh', '[email protected]']
assert cloudssh.get_ssh_command(
public_ip='123.456.7.89',
proxyjump='1.2.3.4'
) == ['ssh', '-J 1.2.3.4', '123.456.7.89']
assert cloudssh.get_ssh_command(
public_ip='123.456.7.89',
flag='-v'
) == ['ssh', 'v', '123.456.7.89']
assert cloudssh.get_ssh_command(
public_ip='123.456.7.89',
user='paul',
proxyjump='1.2.3.4',
flag='-v'
) == ['ssh', '-J 1.2.3.4', 'v', '[email protected]']
def test_resolve_home(self):
assert cloudssh.resolve_home('/tmp/full/path') == '/tmp/full/path'
assert cloudssh.resolve_home(
'~/in_home').startswith(('/home/', '/Users'))
def test_is_dir(self):
assert cloudssh.is_dir('/tmp/nonexistent') is False
assert cloudssh.is_dir('/tmp/') is True
def test_mkdir(self):
test_dir = '/tmp/test_mkdir'
assert cloudssh.mkdir(test_dir) is True
os.rmdir(test_dir)
def test_get_instances_list(self):
assert cloudssh.get_instances_list(
reservations=self.fake_reservations) == [
{
'name': 'test_instance',
'detail': {
'id': 'i-b929323f777f4c016d',
'public_ip': None,
'private_ip': '10.0.0.60',
'type': None,
'vpc': None,
'subnet': None,
'launch_date': None,
'tags': [{'Key': 'Name', 'Value': 'test_instance'}]
}
}, {
'name': 'test_instance_2',
'detail': {
'id': 'i-2959b4a6e3cdd13a2f',
'public_ip': '123.456.7.90',
'private_ip': '10.0.0.61',
'type': None,
'vpc': None,
'subnet': None,
'launch_date': None,
'tags': [{'Key': 'Name', 'Value': 'test_instance_2'}]
}
}
]
# No reservations
self.assertRaises(
SystemExit, cloudssh.get_instances_list, reservations=[])
def test_read_index(self):
filename = 'test_read_file'
cloudssh.write_index(
filename=filename,
content={'a': True}
)
# Read file
assert cloudssh.read_index(filename=filename) == {'a': True}
# Read invalid file
assert cloudssh.read_index(filename='/tmp/nonexistent') == {}
def test_write_index(self):
filename = 'test_write_index'
assert cloudssh.write_index(
filename=filename,
content={}
) is True
@mock.patch.object(cloudssh, 'get_value_from_user_config', return_value='my_profile')
def test_append_to_index(self, mock_args):
cloudssh.region = 'us-east-1'
# With an existing index
assert cloudssh.append_to_index(
existing_index={
'my_profile': {
'us-west-1': ['name_123']
}
},
new=['name_1', 'name_2']
) == {
'my_profile': {
'us-west-1': ['name_123'],
'us-east-1': ['name_1', 'name_2'],
}
}
# Without an existing index
assert cloudssh.append_to_index(
existing_index={},
new=['name_1', 'name_2']
) == {
'my_profile': {
'us-east-1': ['name_1', 'name_2'],
}
}
def test_build_index(self):
filename = 'test_index'
assert cloudssh.build_index(filename=filename) is True
# Build index with config dir creation
with tempfile.TemporaryDirectory() as test_dir:
cloudssh.config_dir = test_dir + '/new_path/'
assert cloudssh.build_index(filename=filename) is True
@mock.patch.object(cloudssh, 'get_instances_list_from_index', return_value=[{'name': 'one_thing', 'detail': {'publicIp': '123.456.789.0'}}, {'name': 'one_other_thing', 'detail': {'publicIp': '123.456.789.1'}}, {'name': 'third_thing', 'detail': {'publicIp': '123.456.789.2'}}])
@mock.patch('src.cloudssh.confirm', return_value=True)
def test_search_one_result(self, mock_args, mock_args_2):
saved_stdout = sys.stdout
try:
out = StringIO()
sys.stdout = out
# Render file content to stdout
cloudssh.search(query='other_thing')
output = out.getvalue().strip()
assert output == '' # Because it was intercepted and never printed
finally:
sys.stdout = saved_stdout
@mock.patch.object(cloudssh, 'get_instances_list_from_index', return_value=[{'name': 'one_thing', 'detail': {'publicIp': '123.456.789.0'}}, {'name': 'one_other_thing', 'detail': {'publicIp': '123.456.789.1'}}, {'name': 'third_thing', 'detail': {'publicIp': '123.456.789.2'}}])
def test_search_multiple_results(self, mock_args):
saved_stdout = sys.stdout
try:
out = StringIO()
sys.stdout = out
# Catch `exit()` and render content to stdout
self.assertRaises(
SystemExit, cloudssh.search, query='thing')
output = out.getvalue().strip()
assert output == 'Results:\n* one_thing\n* one_other_thing\n* third_thing'
finally:
sys.stdout = saved_stdout
def test_search_no_result(self):
saved_stdout = sys.stdout
try:
out = StringIO()
sys.stdout = out
# Catch `exit()` and render content to stdout
self.assertRaises(
SystemExit, cloudssh.search, query='invalid_name')
output = out.getvalue().strip()
assert output == 'No result!'
finally:
sys.stdout = saved_stdout
def test_confirm(self):
with mock.patch('builtins.input', return_value='y'):
self.assertTrue(cloudssh.confirm())
self.assertTrue(cloudssh.confirm(resp=True))
def test_confirm_2(self):
with mock.patch('builtins.input', return_value='n'):
self.assertFalse(cloudssh.confirm())
self.assertFalse(cloudssh.confirm(resp=True))
def test_confirm_3(self):
# Test empty return
with mock.patch('builtins.input', return_value=''):
self.assertTrue(cloudssh.confirm(resp=True))
def test_get_instances_list_from_index(self):
filename = 'test_get_instances_list_from_index'
cloudssh.region = 'us-east-1'
# Write test index
cloudssh.write_index(
filename=filename,
content={
'cloud_ssh_unittest': {
'us-west-1': [{'name': 'name_123'}],
'us-east-1': [{'name': 'name_1'}, {'name': 'name_2'}],
}
}
)
assert cloudssh.get_instances_list_from_index(filename=filename) == [
{'name': 'name_1'}, {'name': 'name_2'}]
@mock.patch.object(cloudssh, 'get_value_from_user_config', return_value='nonexistent_profile')
def test_get_instances_list_from_index_2(self, mock_args):
filename = 'test_get_instances_list_from_index'
assert cloudssh.get_instances_list_from_index(filename=filename) == []
@mock.patch.object(cloudssh, 'get_instances_list_from_index', return_value=[{'name': 'one_thing'}, {'name': 'one_other_thing'}, {'name': 'third_thing'}, {'name': 'with space'}])
@mock.patch('readline.get_line_buffer', return_value='one')
def test_autocomplete(self, mock_args, mock_args_2):
assert cloudssh.autocomplete('on', state=0) == 'one_thing'
assert cloudssh.autocomplete(
'on', state=1) == 'one_other_thing'
assert cloudssh.autocomplete('on', state=2) is None
@mock.patch.object(cloudssh, 'get_instances_list_from_index', return_value=[{'name': 'one_thing'}, {'name': 'one_other_thing'}, {'name': 'third_thing'}, {'name': 'with space'}])
@mock.patch('readline.get_line_buffer', return_value='with ')
def test_autocomplete_2(self, mock_args, mock_args_2):
assert cloudssh.autocomplete('on', state=0) == 'space'
@mock.patch.object(cloudssh, 'get_instances_list_from_index', return_value=[{'name': 'one_thing'}, {'name': 'one_other_thing'}, {'name': 'third_thing'}])
@mock.patch('readline.get_line_buffer', return_value='ONE')
def test_autocomplete_3(self, mock_args, mock_args_2):
assert cloudssh.autocomplete(
'on', state=0, is_case_sensitive=True) is None
@mock.patch.object(cloudssh, 'get_instances_list_from_index', return_value=[{'name': 'one_thing'}, {'name': 'one_other_thing'}, {'name': 'third_thing'}])
@mock.patch('readline.get_line_buffer', return_value='ONE')
def test_autocomplete_4(self, mock_args, mock_args_2):
assert cloudssh.autocomplete('on', state=0) == 'one_thing'
assert cloudssh.autocomplete(
'on', state=1) == 'one_other_thing'
assert cloudssh.autocomplete('on', state=2) is None
@mock.patch.object(cloudssh, 'get_instances_list_from_index', return_value=[{'name': 'one_thing'}, {'name': 'one_other_thing'}, {'name': 'third_thing'}])
@mock.patch('builtins.input', return_value='some_value')
def test_get_input_autocomplete(self, mock_args, mock_args_2):
assert cloudssh.get_input_autocomplete() == 'some_value'
@mock.patch.object(cloudssh, 'get_instances_list_from_index', return_value=[{'name': 'one_thing', 'detail': {'public_ip': '123.456.789.0'}}, {'name': 'one_other_thing', 'detail': {'public_ip': '123.456.789.1'}}, {'name': 'third_thing', 'detail': {'public_ip': '123.456.789.2'}}])
def test_instance_lookup_index(self, mock_args):
assert cloudssh.instance_lookup(
'one_thing') == ('index', {'public_ip': '123.456.789.0'})
@mock.patch.object(cloudssh, 'get_instances_list_from_index', return_value=[{'name': 'one_thing', 'detail': {'public_ip': '123.456.789.0'}}, {'name': 'one_other_thing', 'detail': {'public_ip': '123.456.789.1'}}, {'name': 'third_thing', 'detail': {'public_ip': '123.456.789.2'}}])
def test_instance_lookup_aws(self, mock_args):
assert cloudssh.instance_lookup(
'cloudssh_test_instance') == ('aws', {
'id': 'i-06bb6dbab77bfcf3f',
'public_ip': '52.6.180.201',
'private_ip': '172.31.91.210',
'type': 't2.micro',
'vpc': 'vpc-37911a4d',
'subnet': 'subnet-e4f389ca',
'launch_date': '2019-04-05 19:15:28+00:00',
'tags': [{'Key': 'Name', 'Value': 'cloudssh_test_instance'}]
})
| python |
#!/usr/bin/env python
from glob import glob
import re
from collections import Counter
import subprocess32 as sp
import string
from itertools import product
from sys import stderr
from time import time
def split_regions_file(boot_contigs_dict, fnames, size):
"""
takes Counter dictionary of bootstrapped contigs
and an iterator over filenames to choose
writes out split regions files with repetitions of contigs
NOT spread over different split regions files
"""
c = 0 # initialise contig count
# get next file name from iterator
fn = fnames.next()
# open new file for writing and get filehandle
out = open("split_rf/" + fn[0] + fn[1], "w")
# iterate over Counter dict of bootstrapped contigs, key=contig name, value=count (rep)
for contig,rep in sorted(boot_contigs_dict.items(), key=lambda x: int(x[0].replace("Contig_", ""))):
c+=rep
if c > size: # write up to 'size' contigs to each split rf file
out.close() # close current rf file
fn = fnames.next() # get next file name from iterator
out = open("split_rf/" + fn[0] + fn[1], "w") # open new rf file for writing
c = rep
for _ in range(rep): # write contig name to rf file as often as it occurs in the bootstrap resample
out.write(contig + "\n")
index = '' # index of bootstrap replicate
for rf in sorted(glob("including_non-overlapping/BOOT_RF/000*")):
start = time()
index = re.findall(r'\d+', rf)[-1]
# reset array for bootstrapped contigs
boot_contigs = []
with open(rf, "r") as boot_rf:
for contig in boot_rf:
boot_contigs.append(contig.rstrip())
# create dictionary of counts of contigs
boot_contigs_dict = Counter(boot_contigs)
# clear directory
sp.call("rm -f split_rf/*", shell=True)
# get filename iterator
fnames = product(string.lowercase, repeat=2)
# split bootstrapped regions file, 400 contigs per file
split_regions_file(boot_contigs_dict, fnames, 400)
# remove previous split SAF files for PAR
cmd = "rm -f including_non-overlapping/SAF/bootstrap/PAR/[a-z]*"
sp.call(cmd, shell=True)
# remove previous split SAF files for ERY
cmd = cmd.replace("PAR", "ERY")
sp.call(cmd, shell=True)
# run SAF calculation in parallel for PAR
cmd = 'ls split_rf/* | parallel -j 24 "angsd -bam PAR.slim.bamfile.list -ref Big_Data_ref.fa \
-anc Big_Data_ref.fa -out including_non-overlapping/SAF/bootstrap/PAR/{/}.unfolded -fold 0 \
-sites all.sites -rf {} -only_proper_pairs 0 -baq 1 -minMapQ 5 -minInd 9 -GL 1 -doSaf 1 -nThreads 1 2>/dev/null"'
sp.call(cmd, shell=True)
# run SAF calculation in parallel for ERY
cmd = cmd.replace("PAR", "ERY")
sp.call(cmd, shell=True)
# concatenate split SAF files for PAR
cmd = "realSFS cat -outnames including_non-overlapping/SAF/bootstrap/PAR/{}.unfolded including_non-overlapping/SAF/bootstrap/PAR/[a-z]*saf.idx 2>/dev/null".format(index)
sp.call(cmd, shell=True)
# concatenate split SAF files for ERY
cmd = cmd.replace("PAR", "ERY")
sp.call(cmd, shell=True)
end = time()
run_time = end - start
print >> stderr, "Finished SAF calculation for bootstrap {0}. It took {1} sec to complete.".format(index, int(run_time))
# remove split SAF files for PAR
cmd = "rm -f including_non-overlapping/SAF/bootstrap/PAR/[a-z]*"
sp.call(cmd, shell=True)
# remove split SAF files for ERY
cmd = cmd.replace("PAR", "ERY")
sp.call(cmd, shell=True)
| python |
import tempfile
from django.urls import reverse
from PIL import Image
from rest_framework import status
from rest_framework.test import APITestCase
from brouwers.users.tests.factories import UserFactory
from ..factories import AlbumFactory, PhotoFactory
class PhotoViewsetTests(APITestCase):
def setUp(self):
super().setUp()
self.user = UserFactory.create()
self.album = AlbumFactory.create(user=self.user)
self.list_url = reverse("api:photo-list")
def test_upload(self):
"""
Test that API uploads are possible.
"""
data = {"album": self.album.pk}
# anonymous
response = self.client.post(self.list_url, data, format="multipart")
self.assertEqual(
response.data, {"detail": "Authentication credentials were not provided."}
)
# authenticated
self.client.login(username=self.user.username, password="password")
# create an image
image = Image.new("RGB", (192, 108), "green")
tmp_file = tempfile.NamedTemporaryFile(suffix=".jpg")
image.save(tmp_file, format="JPEG")
with open(tmp_file.name, "rb") as image:
data.update(
{
"image": image,
"description": "dummy description",
}
)
response = self.client.post(self.list_url, data, format="multipart")
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual(response.data["success"], True)
def test_failing_upload(self):
self.client.login(username=self.user.username, password="password")
response = self.client.post(
self.list_url, {"album": self.album.pk}, format="multipart"
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn("image", response.data) # there must be an error
def test_list_photos(self):
photos = PhotoFactory.create_batch(10, album=self.album)
self.client.login(username=self.user.username, password="password")
response = self.client.get(self.list_url, {"album": self.album.pk})
self.assertEqual(response.data["count"], 10)
for photo, result in zip(photos, response.data["results"]):
self.assertEqual(photo.id, result["id"])
self.assertEqual(set(result["image"].keys()), set(["large", "thumb"]))
def test_detail_next_previous(self):
photos = PhotoFactory.create_batch(5, album=self.album)
next_url = reverse("api:photo-next", kwargs={"pk": photos[2].pk})
previous_url = reverse("api:photo-previous", kwargs={"pk": photos[2].pk})
response = self.client.get(next_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data["id"], photos[3].id)
response = self.client.get(previous_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data["id"], photos[1].id)
def test_unauthenticated_rotate(self):
photo = PhotoFactory.create(
album=self.album, image__width=100, image__height=50
)
detail_url = reverse("api:photo-rotate", kwargs={"pk": photo.pk})
response = self.client.patch(detail_url, data={"direction": "cw"})
self.assertEqual(response.status_code, 403)
def test_rotate(self):
photo = PhotoFactory.create(
album=self.album, image__width=100, image__height=50
)
self.client.login(username=self.user.username, password="password")
detail_url = reverse("api:photo-rotate", kwargs={"pk": photo.pk})
response = self.client.patch(detail_url, data={"direction": "cw"}) # clockwise
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data["width"], 50)
self.assertEqual(response.data["height"], 100)
img = Image.open(photo.image.path)
self.assertEqual(img.size, (50, 100))
response = self.client.patch(
detail_url, data={"direction": "ccw"}
) # counter-clockwise
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data["width"], 100)
self.assertEqual(response.data["height"], 50)
img = Image.open(photo.image.path)
self.assertEqual(img.size, (100, 50))
def test_invalid_rotate(self):
photo = PhotoFactory.create(
album=self.album, image__width=100, image__height=50
)
self.client.login(username=self.user.username, password="password")
detail_url = reverse("api:photo-rotate", kwargs={"pk": photo.pk})
response = self.client.patch(
detail_url, data={"direction": "fl;asjdf"}
) # clockwise
self.assertEqual(response.status_code, 400)
| python |
"""
MIT License
Copyright (c) 2020 Shahibur Rahaman
"""
import Operations
import time
def main():
print(
"""
Calculator version 2.9.10.20
Copyright (c) Shahibur Rahaman
Licensed under the MIT License.
|> Press (Ctrl + C) to exit the program.
|> Choose your operation:
1. Addition
2. Subtraction
3. Multiplication
4. Division
"""
)
choice = 0
while True:
try:
while True:
try:
choice = int(input("Enter your choice: [1, 2, 3, 4] "))
if choice > 4 or choice < 1:
print("\nPlease enter your choice according to the given operation options only!")
continue
except ValueError:
print("\nPlease enter a numerical value only!")
continue
else:
break
while True:
try:
x = float(input("\nEnter the first number: "))
y = float(input("Enter the second number: "))
except ValueError:
print("\nPlease enter numerical values only!\n")
else:
break
add = Operations.Operation(x, y).addition()
sub = Operations.Operation(x, y).subtraction()
mul = Operations.Operation(x, y).multiplication()
div = Operations.Operation(x, y).division()
c = choice
print("\n--------------------------")
if c == 1:
print(f"{x} + {y} = {add}")
elif c == 2:
print(f"{x} - {y} = {sub}")
elif c == 3:
print(f"{x} X {y} = {mul}")
elif c == 4:
print(f"{x} / {y} = {div}")
print("--------------------------\n")
except KeyboardInterrupt:
print("\nExiting...")
time.sleep(1)
break
if __name__ == "__main__":
main()
| python |
import sys
import json
from data_grab.run_scraper import Scraper
if(len(sys.argv)<2):
print('Please Give topic name. e.g. "Clock"')
sys.exit()
topic = sys.argv[1]
data_obj = False
j_data = json.loads(open('data_grab/resources/topic_examvida.json').read())
for c in j_data:
if topic == c["topic_name"]:
topic_name = topic
data_obj = c
break
if not data_obj:
print("<<Error>> [ Topic Not Found ] - " + topic)
sys.exit()
print("Topic Found - Please Wait")
scraper = Scraper()
if(len(sys.argv)>2):
if(sys.argv[2]=="-y"):
scraper.run_spiders(data_obj , False)
else:
scraper.run_spiders(data_obj)
else:
scraper.run_spiders(data_obj) | python |
from views.main_view import prompt
prompt()
| python |
#!/usr/bin/env python
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
A clone of 'pmap' utility on Linux, 'vmmap' on OSX and 'procstat -v' on BSD.
Report memory map of a process.
$ python scripts/pmap.py 32402
pid=32402, name=hg
Address RSS Mode Mapping
0000000000400000 1200K r-xp /usr/bin/python2.7
0000000000838000 4K r--p /usr/bin/python2.7
0000000000839000 304K rw-p /usr/bin/python2.7
00000000008ae000 68K rw-p [anon]
000000000275e000 5396K rw-p [heap]
00002b29bb1e0000 124K r-xp /lib/x86_64-linux-gnu/ld-2.17.so
00002b29bb203000 8K rw-p [anon]
00002b29bb220000 528K rw-p [anon]
00002b29bb2d8000 768K rw-p [anon]
00002b29bb402000 4K r--p /lib/x86_64-linux-gnu/ld-2.17.so
00002b29bb403000 8K rw-p /lib/x86_64-linux-gnu/ld-2.17.so
00002b29bb405000 60K r-xp /lib/x86_64-linux-gnu/libpthread-2.17.so
00002b29bb41d000 0K ---p /lib/x86_64-linux-gnu/libpthread-2.17.so
00007fff94be6000 48K rw-p [stack]
00007fff94dd1000 4K r-xp [vdso]
ffffffffff600000 0K r-xp [vsyscall]
...
"""
import sys
import psutil
def main():
if len(sys.argv) != 2:
sys.exit('usage: pmap <pid>')
p = psutil.Process(int(sys.argv[1]))
print("pid=%s, name=%s" % (p.pid, p.name()))
templ = "%-16s %10s %-7s %s"
print(templ % ("Address", "RSS", "Mode", "Mapping"))
total_rss = 0
for m in p.memory_maps(grouped=False):
total_rss += m.rss
print(templ % (
m.addr.split('-')[0].zfill(16),
str(m.rss / 1024) + 'K',
m.perms,
m.path))
print("-" * 33)
print(templ % ("Total", str(total_rss / 1024) + 'K', '', ''))
if __name__ == '__main__':
main()
| python |
# coding=utf-8
# Copyright 2021-present, the Recognai S.L. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from typing import Dict, List, Optional, Union
from pydantic import BaseModel, Field
from rubrix.client.models import Text2TextRecord as ClientText2TextRecord
from rubrix.client.sdk.commons.models import (
MACHINE_NAME,
BaseAnnotation,
BaseRecord,
PredictionStatus,
ScoreRange,
TaskStatus,
UpdateDatasetRequest,
)
class Text2TextPrediction(BaseModel):
text: str
score: float = Field(default=1.0, ge=0.0, le=1.0)
class Text2TextAnnotation(BaseAnnotation):
sentences: List[Text2TextPrediction]
class CreationText2TextRecord(BaseRecord[Text2TextAnnotation]):
text: str
@classmethod
def from_client(cls, record: ClientText2TextRecord):
prediction = None
if record.prediction is not None:
prediction = Text2TextAnnotation(
sentences=[
Text2TextPrediction(text=pred[0], score=pred[1])
if isinstance(pred, tuple)
else Text2TextPrediction(text=pred)
for pred in record.prediction
],
agent=record.prediction_agent or MACHINE_NAME,
)
annotation = None
if record.annotation is not None:
annotation = Text2TextAnnotation(
sentences=[Text2TextPrediction(text=record.annotation)],
agent=record.annotation_agent or MACHINE_NAME,
)
return cls(
text=record.text,
prediction=prediction,
annotation=annotation,
status=record.status,
metadata=record.metadata,
id=record.id,
event_timestamp=record.event_timestamp,
)
class Text2TextRecord(CreationText2TextRecord):
last_updated: datetime = None
_predicted: Optional[PredictionStatus] = Field(alias="predicted")
def to_client(self) -> ClientText2TextRecord:
return ClientText2TextRecord(
text=self.text,
prediction=[
(sentence.text, sentence.score)
for sentence in self.prediction.sentences
]
if self.prediction
else None,
prediction_agent=self.prediction.agent if self.prediction else None,
annotation=self.annotation.sentences[0].text if self.annotation else None,
annotation_agent=self.annotation.agent if self.annotation else None,
status=self.status,
metadata=self.metadata or {},
id=self.id,
event_timestamp=self.event_timestamp,
metrics=self.metrics or None,
search_keywords=self.search_keywords or None,
)
class Text2TextBulkData(UpdateDatasetRequest):
records: List[CreationText2TextRecord]
class Text2TextQuery(BaseModel):
ids: Optional[List[Union[str, int]]]
query_text: str = Field(default=None)
advanced_query_dsl: bool = False
annotated_by: List[str] = Field(default_factory=list)
predicted_by: List[str] = Field(default_factory=list)
score: Optional[ScoreRange] = Field(default=None)
status: List[TaskStatus] = Field(default_factory=list)
predicted: Optional[PredictionStatus] = Field(default=None, nullable=True)
metadata: Optional[Dict[str, Union[str, List[str]]]] = None
| python |
# Copyright (c) 2018, Manfred Moitzi
# License: MIT License
import pytest
import os
import ezdxf
BASEDIR = 'integration_tests' if os.path.exists('integration_tests') else '.'
DATADIR = 'data'
COLDFIRE = r"D:\Source\dxftest\CADKitSamples\kit-dev-coldfire-xilinx_5213.dxf"
@pytest.mark.skipif(not os.path.exists(COLDFIRE), reason='test data not present')
def test_kit_dev_coldfire():
doc = ezdxf.readfile(COLDFIRE)
auditor = doc.audit()
assert len(auditor) == 0
@pytest.fixture(params=['Leica_Disto_S910.dxf'])
def filename(request):
filename = os.path.join(BASEDIR, DATADIR, request.param)
if not os.path.exists(filename):
pytest.skip(f'File {filename} not found.')
return filename
def test_leica_disto_r12(filename):
doc = ezdxf.readfile(filename, legacy_mode=True)
auditor = doc.audit()
assert len(auditor) == 0
| python |
def run():
my_range = range(0, 7, 2)
print(my_range)
other_range = range(0, 8, 2)
print(other_range)
print(id(my_range))
print(id(other_range))
print(my_range == other_range) # Validate (value equality)
print(my_range is other_range) # Validate (object equality)
# Par
for i in range(0, 101, 2):
print(i)
# None
for i in range(1, 99):
if i % 2 != 0:
print(f'{i} is none')
if __name__ == '__main__':
run()
| python |
from fastapi import APIRouter, Depends
from typing import List
from src.utils.crud_router import include_generic_collection_document_router
from src.dependencies import current_active_user
from src.services.courses import CourseService, CourseSectionService
dependencies: List[Depends] = [Depends(current_active_user)]
course_service: CourseService = CourseService()
course_router: APIRouter = APIRouter(dependencies=dependencies,
prefix="/api/courses", tags=["Course"])
include_generic_collection_document_router(course_router, course_service)
course_section_service: CourseSectionService = CourseSectionService()
course_section_router: APIRouter = APIRouter(dependencies=dependencies,
prefix="/api/course-section", tags=["CourseSection"])
include_generic_collection_document_router(course_section_router, course_section_service)
| python |
from typing import Dict, Text, Any, List
import tensorflow_transform as tft
def preprocessing_fn(inputs: Dict[Text, Any], custom_config) -> Dict[Text, Any]:
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
custom_config:
timesteps: The number of timesteps in the look back window
features: Which of the features from the TF.Example to use in the model.
Returns:
Map from string feature key to transformed feature operations.
"""
feature_columns = sorted(custom_config["feature_columns"])
features = {}
for feature in feature_columns:
if feature not in inputs.keys():
raise ValueError(
f"Input is missing required feature {feature}. Input has: {inputs.keys()}"
)
features[f"{feature}"] = tft.scale_to_z_score(inputs[feature])
return features
| python |
import numpy as np
import tensorflow as tf
from datasets import audio
from infolog import log
from wavenet_vocoder import util
from wavenet_vocoder.util import *
from .gaussian import sample_from_gaussian
from .mixture import sample_from_discretized_mix_logistic
from .modules import (Conv1D1x1, ConvTranspose2D, ConvTranspose1D, DiscretizedMixtureLogisticLoss, Embedding, GaussianMaximumLikelihoodEstimation,
LeakyReluActivation, MaskedCrossEntropyLoss, ReluActivation, ResidualConv1DGLU, WeightNorm)
import pdb
def _expand_global_features(batch_size, time_length, global_features, data_format='BCT'):
"""Expand global conditioning features to all time steps
Args:
batch_size: int
time_length: int
global_features: Tensor of shape [batch_size, channels] or [batch_size, channels, 1]
data_format: string, 'BCT' to get output of shape [batch_size, channels, time_length]
or 'BTC' to get output of shape [batch_size, time_length, channels]
Returns:
None or Tensor of shape [batch_size, channels, time_length] or [batch_size, time_length, channels]
"""
accepted_formats = ['BCT', 'BTC']
if not (data_format in accepted_formats):
raise ValueError('{} is an unknow data format, accepted formats are "BCT" and "BTC"'.format(data_format))
if global_features is None:
return None
#[batch_size, channels] ==> [batch_size, channels, 1]
# g = tf.cond(tf.equal(tf.rank(global_features), 2),
# lambda: tf.expand_dims(global_features, axis=-1),
# lambda: global_features)
g = tf.reshape(global_features, [tf.shape(global_features)[0], tf.shape(global_features)[1], 1])
g_shape = tf.shape(g)
#[batch_size, channels, 1] ==> [batch_size, channels, time_length]
# ones = tf.ones([g_shape[0], g_shape[1], time_length], tf.int32)
# g = g * ones
g = tf.tile(g, [1, 1, time_length])
if data_format == 'BCT':
return g
else:
#[batch_size, channels, time_length] ==> [batch_size, time_length, channels]
return tf.transpose(g, [0, 2, 1])
def receptive_field_size(total_layers, num_cycles, kernel_size, dilation=lambda x: 2**x):
"""Compute receptive field size.
Args:
total_layers; int
num_cycles: int
kernel_size: int
dilation: callable, function used to compute dilation factor.
use "lambda x: 1" to disable dilated convolutions.
Returns:
int: receptive field size in sample.
"""
assert total_layers % num_cycles == 0
layers_per_cycle = total_layers // num_cycles
dilations = [dilation(i % layers_per_cycle) for i in range(total_layers)]
return (kernel_size - 1) * sum(dilations) + 1
def maybe_Normalize_weights(layer, weight_normalization=True, init=False, init_scale=1.):
"""Maybe Wraps layer with Weight Normalization wrapper.
Args;
layer: tf layers instance, the layer candidate for normalization
weight_normalization: Boolean, determines whether to normalize the layer
init: Boolean, determines if the current run is the data dependent initialization run
init_scale: Float, Initialisation scale of the data dependent initialization. Usually 1.
"""
if weight_normalization:
return WeightNorm(layer, init, init_scale)
return layer
class WaveNet():
"""Tacotron-2 Wavenet Vocoder model.
"""
def __init__(self, hparams, init):
#Get hparams
self._hparams = hparams
if self.local_conditioning_enabled():
assert hparams.num_mels == hparams.cin_channels
#Initialize model architecture
assert hparams.layers % hparams.stacks == 0
layers_per_stack = hparams.layers // hparams.stacks
self.scalar_input = is_scalar_input(hparams.input_type)
#first (embedding) convolution
with tf.variable_scope('input_convolution'):
if self.scalar_input:
self.first_conv = Conv1D1x1(hparams.residual_channels,
weight_normalization=hparams.wavenet_weight_normalization,
weight_normalization_init=init,
weight_normalization_init_scale=hparams.wavenet_init_scale,
name='input_convolution')
else:
self.first_conv = Conv1D1x1(hparams.residual_channels,
weight_normalization=hparams.wavenet_weight_normalization,
weight_normalization_init=init,
weight_normalization_init_scale=hparams.wavenet_init_scale,
name='input_convolution')
#Residual Blocks
self.residual_layers = []
for layer in range(hparams.layers):
self.residual_layers.append(ResidualConv1DGLU(
hparams.residual_channels, hparams.gate_channels,
kernel_size=hparams.kernel_size,
skip_out_channels=hparams.skip_out_channels,
use_bias=hparams.use_bias,
dilation_rate=2**(layer % layers_per_stack),
dropout=hparams.wavenet_dropout,
cin_channels=hparams.cin_channels,
gin_channels=hparams.gin_channels,
weight_normalization=hparams.wavenet_weight_normalization,
init=init,
init_scale=hparams.wavenet_init_scale,
name='ResidualConv1DGLU_{}'.format(layer)))
#Final (skip) convolutions
with tf.variable_scope('skip_convolutions'):
self.last_conv_layers = [
ReluActivation(name='final_conv_relu1'),
Conv1D1x1(hparams.skip_out_channels,
weight_normalization=hparams.wavenet_weight_normalization,
weight_normalization_init=init,
weight_normalization_init_scale=hparams.wavenet_init_scale,
name='final_convolution_1'),
ReluActivation(name='final_conv_relu2'),
Conv1D1x1(hparams.out_channels,
weight_normalization=hparams.wavenet_weight_normalization,
weight_normalization_init=init,
weight_normalization_init_scale=hparams.wavenet_init_scale,
name='final_convolution_2'),]
#Global conditionning embedding
if hparams.gin_channels > 0 and hparams.use_speaker_embedding:
assert hparams.n_speakers is not None
self.embed_speakers = Embedding(
hparams.n_speakers, hparams.gin_channels, std=0.1, name='gc_embedding')
else:
self.embed_speakers = None
self.all_convs = [self.first_conv] + self.residual_layers + self.last_conv_layers
#Upsample conv net
if hparams.upsample_conditional_features:
self.upsample_conv = []
for i, s in enumerate(hparams.upsample_scales):
with tf.variable_scope('local_conditioning_upsampling_{}'.format(i+1)):
if hparams.upsample_type == '2D':
convt = ConvTranspose2D(1, (hparams.freq_axis_kernel_size, 2*s),
padding='same', strides=(1, s))
else:
assert hparams.upsample_type == '1D'
convt = ConvTranspose1D(hparams.cin_channels, (2*s, ),
padding='same', strides=(s, ))
self.upsample_conv.append(maybe_Normalize_weights(convt,
hparams.wavenet_weight_normalization, init, hparams.wavenet_init_scale))
if hparams.upsample_activation == 'LeakyRelu':
self.upsample_conv.append(LeakyReluActivation(alpha=hparams.leaky_alpha,
name='upsample_leaky_relu_{}'.format(i+1)))
elif hparams.upsample_activation == 'Relu':
self.upsample_conv.append(ReluActivation(name='upsample_relu_{}'.format(i+1)))
else:
assert hparams.upsample_activation == None
self.all_convs += self.upsample_conv
else:
self.upsample_conv = None
self.receptive_field = receptive_field_size(hparams.layers,
hparams.stacks, hparams.kernel_size)
def set_mode(self, is_training):
for conv in self.all_convs:
try:
conv.set_mode(is_training)
except AttributeError:
pass
def initialize(self, y, c, g, input_lengths, x=None, synthesis_length=None):
'''Initialize wavenet graph for train, eval and test cases.
'''
hparams = self._hparams
self.is_training = x is not None
self.is_evaluating = not self.is_training and y is not None
#Set all convolutions to corresponding mode
self.set_mode(self.is_training)
log('Initializing Wavenet model. Dimensions (? = dynamic shape): ')
log(' Train mode: {}'.format(self.is_training))
log(' Eval mode: {}'.format(self.is_evaluating))
log(' Synthesis mode: {}'.format(not (self.is_training or self.is_evaluating)))
with tf.variable_scope('inference') as scope:
#Training
if self.is_training:
batch_size = tf.shape(x)[0]
#[batch_size, time_length, 1]
self.mask = self.get_mask(input_lengths, maxlen=tf.shape(x)[-1]) #To be used in loss computation
#[batch_size, channels, time_length]
y_hat = self.step(x, c, g, softmax=False) #softmax is automatically computed inside softmax_cross_entropy if needed
if is_mulaw_quantize(hparams.input_type):
#[batch_size, time_length, channels]
self.y_hat_q = tf.transpose(y_hat, [0, 2, 1])
self.y_hat = y_hat
self.y = y
self.input_lengths = input_lengths
#Add mean and scale stats if using Guassian distribution output (there would be too many logistics if using MoL)
if self._hparams.out_channels == 2:
self.means = self.y_hat[:, 0, :]
self.log_scales = self.y_hat[:, 1, :]
else:
self.means = None
#Graph extension for log saving
#[batch_size, time_length]
shape_control = (batch_size, tf.shape(x)[-1], 1)
with tf.control_dependencies([tf.assert_equal(tf.shape(y), shape_control)]):
y_log = tf.squeeze(y, [-1])
if is_mulaw_quantize(hparams.input_type):
self.y = y_log
y_hat_log = tf.cond(tf.equal(tf.rank(y_hat), 4),
lambda: tf.squeeze(y_hat, [-1]),
lambda: y_hat)
y_hat_log = tf.reshape(y_hat_log, [batch_size, hparams.out_channels, -1])
if is_mulaw_quantize(hparams.input_type):
#[batch_size, time_length]
y_hat_log = tf.argmax(tf.nn.softmax(y_hat_log, axis=1), 1)
y_hat_log = util.inv_mulaw_quantize(y_hat_log, hparams.quantize_channels)
y_log = util.inv_mulaw_quantize(y_log, hparams.quantize_channels)
else:
#[batch_size, time_length]
if hparams.out_channels == 2:
y_hat_log = sample_from_gaussian(
y_hat_log, log_scale_min_gauss=hparams.log_scale_min_gauss)
else:
y_hat_log = sample_from_discretized_mix_logistic(
y_hat_log, log_scale_min=hparams.log_scale_min)
if is_mulaw(hparams.input_type):
y_hat_log = util.inv_mulaw(y_hat_log, hparams.quantize_channels)
y_log = util.inv_mulaw(y_log, hparams.quantize_channels)
self.y_hat_log = y_hat_log
self.y_log = y_log
log(' inputs: {}'.format(x.shape))
if self.local_conditioning_enabled():
log(' local_condition: {}'.format(c.shape))
if self.has_speaker_embedding():
log(' global_condition: {}'.format(g.shape))
log(' targets: {}'.format(y_log.shape))
log(' outputs: {}'.format(y_hat_log.shape))
#evaluating
elif self.is_evaluating:
#[time_length, ]
idx = 0
length = input_lengths[idx]
y_target = tf.reshape(y[idx], [-1])[:length]
if c is not None:
c = tf.expand_dims(c[idx, :, :length], axis=0)
with tf.control_dependencies([tf.assert_equal(tf.rank(c), 3)]):
c = tf.identity(c, name='eval_assert_c_rank_op')
if g is not None:
g = tf.expand_dims(g[idx], axis=0)
batch_size = tf.shape(c)[0]
#Start silence frame
if is_mulaw_quantize(hparams.input_type):
initial_value = mulaw_quantize(0, hparams.quantize_channels)
elif is_mulaw(hparams.input_type):
initial_value = mulaw(0.0, hparams.quantize_channels)
else:
initial_value = 0.0
#[channels, ]
if is_mulaw_quantize(hparams.input_type):
initial_input = tf.one_hot(indices=initial_value, depth=hparams.quantize_channels, dtype=tf.float32)
initial_input = tf.tile(tf.reshape(initial_input, [1, 1, hparams.quantize_channels]), [batch_size, 1, 1])
else:
initial_input = tf.ones([batch_size, 1, 1], tf.float32) * initial_value
#Fast eval
y_hat = self.incremental(initial_input, c=c, g=g, time_length=length,
softmax=False, quantize=True, log_scale_min=hparams.log_scale_min, log_scale_min_gauss=hparams.log_scale_min_gauss)
#Save targets and length for eval loss computation
if is_mulaw_quantize(hparams.input_type):
self.y_eval = tf.reshape(y[idx], [1, -1])[:, :length]
else:
self.y_eval = tf.expand_dims(y[idx], axis=0)[:, :length, :]
self.eval_length = length
if is_mulaw_quantize(hparams.input_type):
y_hat = tf.reshape(tf.argmax(y_hat, axis=1), [-1])
y_hat = inv_mulaw_quantize(y_hat, hparams.quantize_channels)
y_target = inv_mulaw_quantize(y_target, hparams.quantize_channels)
elif is_mulaw(hparams.input_type):
y_hat = inv_mulaw(tf.reshape(y_hat, [-1]), hparams.quantize_channels)
y_target = inv_mulaw(y_target, hparams.quantize_channels)
else:
y_hat = tf.reshape(y_hat, [-1])
self.y_hat = y_hat
self.y_target = y_target
if self.local_conditioning_enabled():
log(' local_condition: {}'.format(c.shape))
if self.has_speaker_embedding():
log(' global_condition: {}'.format(g.shape))
log(' targets: {}'.format(y_target.shape))
log(' outputs: {}'.format(y_hat.shape))
#synthesizing
else:
batch_size = tf.shape(c)[0]
if c is None:
assert synthesis_length is not None
else:
#[batch_size, local_condition_time, local_condition_dimension(num_mels)]
message = ('Expected 3 dimension shape [batch_size(1), time_length, {}] for local condition features but found {}'.format(
hparams.cin_channels, c.shape))
with tf.control_dependencies([tf.assert_equal(tf.rank(c), 3, message=message)]):
c = tf.identity(c, name='synthesis_assert_c_rank_op')
Tc = tf.shape(c)[1]
upsample_factor = audio.get_hop_size(self._hparams)
#Overwrite length with respect to local condition features
synthesis_length = Tc * upsample_factor
#[batch_size, local_condition_dimension, local_condition_time]
#time_length will be corrected using the upsample network
c = tf.transpose(c, [0, 2, 1])
if g is not None:
assert g.shape == (batch_size, 1)
#Start silence frame
if is_mulaw_quantize(hparams.input_type):
initial_value = mulaw_quantize(0, hparams.quantize_channels)
elif is_mulaw(hparams.input_type):
initial_value = mulaw(0.0, hparams.quantize_channels)
else:
initial_value = 0.0
if is_mulaw_quantize(hparams.input_type):
assert initial_value >= 0 and initial_value < hparams.quantize_channels
initial_input = tf.one_hot(indices=initial_value, depth=hparams.quantize_channels, dtype=tf.float32)
initial_input = tf.tile(tf.reshape(initial_input, [1, 1, hparams.quantize_channels]), [batch_size, 1, 1])
else:
initial_input = tf.ones([batch_size, 1, 1], tf.float32) * initial_value
y_hat = self.incremental(initial_input, c=c, g=g, time_length=synthesis_length,
softmax=False, quantize=True, log_scale_min=hparams.log_scale_min, log_scale_min_gauss=hparams.log_scale_min_gauss)
if is_mulaw_quantize(hparams.input_type):
y_hat = tf.reshape(tf.argmax(y_hat, axis=1), [batch_size, -1])
y_hat = util.inv_mulaw_quantize(y_hat, hparams.quantize_channels)
elif is_mulaw(hparams.input_type):
y_hat = util.inv_mulaw(tf.reshape(y_hat, [batch_size, -1]), hparams.quantize_channels)
else:
y_hat = tf.reshape(y_hat, [batch_size, -1])
self.y_hat = y_hat
if self.local_conditioning_enabled():
log(' local_condition: {}'.format(c.shape))
if self.has_speaker_embedding():
log(' global_condition: {}'.format(g.shape))
log(' outputs: {}'.format(y_hat.shape))
self.variables = tf.trainable_variables()
n_vars = np.sum([np.prod(v.shape) for v in tf.trainable_variables()])
log(' Receptive Field: ({} samples / {:.1f} ms)'.format(self.receptive_field, self.receptive_field / hparams.sample_rate * 1000.))
#1_000_000 is causing syntax problems for some people?! Python please :)
log(' WaveNet Parameters: {:.3f} Million.'.format(np.sum([np.prod(v.get_shape().as_list()) for v in self.variables]) / 1000000))
self.ema = tf.train.ExponentialMovingAverage(decay=hparams.wavenet_ema_decay)
def add_loss(self):
'''Adds loss computation to the graph. Supposes that initialize function has already been called.
'''
with tf.variable_scope('loss') as scope:
if self.is_training:
if is_mulaw_quantize(self._hparams.input_type):
self.loss = MaskedCrossEntropyLoss(self.y_hat_q[:, :-1, :], self.y[:, 1:], mask=self.mask)
else:
if self._hparams.out_channels == 2:
self.loss = GaussianMaximumLikelihoodEstimation(self.y_hat[:, :, :-1], self.y[:, 1:, :], hparams=self._hparams, mask=self.mask)
else:
self.loss = DiscretizedMixtureLogisticLoss(self.y_hat[:, :, :-1], self.y[:, 1:, :], hparams=self._hparams, mask=self.mask)
elif self.is_evaluating:
if is_mulaw_quantize(self._hparams.input_type):
self.eval_loss = MaskedCrossEntropyLoss(self.y_hat_eval, self.y_eval, lengths=[self.eval_length])
else:
if self._hparams.out_channels == 2:
self.eval_loss = GaussianMaximumLikelihoodEstimation(self.y_hat_eval, self.y_eval, hparams=self._hparams, lengths=[self.eval_length])
else:
self.eval_loss = DiscretizedMixtureLogisticLoss(self.y_hat_eval, self.y_eval, hparams=self._hparams, lengths=[self.eval_length])
def add_optimizer(self, global_step):
'''Adds optimizer to the graph. Supposes that initialize function has already been called.
'''
with tf.variable_scope('optimizer'):
hp = self._hparams
#Create lr schedule
if hp.wavenet_lr_schedule == 'noam':
learning_rate = self._noam_learning_rate_decay(hp.wavenet_learning_rate,
global_step,
warmup_steps=hp.wavenet_warmup)
else:
assert hp.wavenet_lr_schedule == 'exponential'
learning_rate = self._exponential_learning_rate_decay(hp.wavenet_learning_rate,
global_step,
hp.wavenet_decay_rate,
hp.wavenet_decay_steps)
#Adam optimization
self.learning_rate = learning_rate
optimizer = tf.train.AdamOptimizer(learning_rate, hp.wavenet_adam_beta1,
hp.wavenet_adam_beta2, hp.wavenet_adam_epsilon)
gradients, variables = zip(*optimizer.compute_gradients(self.loss))
self.gradients = gradients
#Gradients clipping
if hp.wavenet_clip_gradients:
clipped_gradients, _ = tf.clip_by_global_norm(gradients, 1.)
else:
clipped_gradients = gradients
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
adam_optimize = optimizer.apply_gradients(zip(clipped_gradients, variables),
global_step=global_step)
#Add exponential moving average
#https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
#Use adam optimization process as a dependency
with tf.control_dependencies([adam_optimize]):
#Create the shadow variables and add ops to maintain moving averages
#Also updates moving averages after each update step
#This is the optimize call instead of traditional adam_optimize one.
assert tuple(self.variables) == variables #Verify all trainable variables are being averaged
self.optimize = self.ema.apply(variables)
def _noam_learning_rate_decay(self, init_lr, global_step, warmup_steps=4000.0):
# Noam scheme from tensor2tensor:
step = tf.cast(global_step + 1, dtype=tf.float32)
return tf.maximum(init_lr * warmup_steps**0.5 * tf.minimum(step * warmup_steps**-1.5, step**-0.5), 1e-4)
def _exponential_learning_rate_decay(self, init_lr, global_step,
decay_rate=0.5,
decay_steps=300000):
#Compute natural exponential decay
lr = tf.train.exponential_decay(init_lr,
global_step,
decay_steps,
decay_rate,
name='wavenet_lr_exponential_decay')
return lr
def get_mask(self, input_lengths, maxlen=None):
expand = not is_mulaw_quantize(self._hparams.input_type)
mask = sequence_mask(input_lengths, max_len=maxlen, expand=expand)
if is_mulaw_quantize(self._hparams.input_type):
return mask[:, 1:]
return mask[:, 1:, :]
#Sanity check functions
def has_speaker_embedding(self):
return self.embed_speakers is not None
def local_conditioning_enabled(self):
return self._hparams.cin_channels > 0
def step(self, x, c=None, g=None, softmax=False):
"""Forward step
Args:
x: Tensor of shape [batch_size, channels, time_length], One-hot encoded audio signal.
c: Tensor of shape [batch_size, cin_channels, time_length], Local conditioning features.
g: Tensor of shape [batch_size, gin_channels, 1] or Ids of shape [batch_size, 1],
Global conditioning features.
Note: set hparams.use_speaker_embedding to False to disable embedding layer and
use extrnal One-hot encoded features.
softmax: Boolean, Whether to apply softmax.
Returns:
a Tensor of shape [batch_size, out_channels, time_length]
"""
#[batch_size, channels, time_length] -> [batch_size, time_length, channels]
batch_size = tf.shape(x)[0]
time_length = tf.shape(x)[-1]
if g is not None:
if self.embed_speakers is not None:
#[batch_size, 1] ==> [batch_size, 1, gin_channels]
g = self.embed_speakers(tf.reshape(g, [batch_size, -1]))
#[batch_size, gin_channels, 1]
with tf.control_dependencies([tf.assert_equal(tf.rank(g), 3)]):
g = tf.transpose(g, [0, 2, 1])
#Expand global conditioning features to all time steps
g_bct = _expand_global_features(batch_size, time_length, g, data_format='BCT')
if c is not None and self.upsample_conv is not None:
if self._hparams.upsample_type == '2D':
#[batch_size, 1, cin_channels, time_length]
expand_dim = 1
else:
assert self._hparams.upsample_type == '1D'
#[batch_size, cin_channels, 1, time_length]
expand_dim = 2
c = tf.expand_dims(c, axis=expand_dim)
for transposed_conv in self.upsample_conv:
c = transposed_conv(c)
#[batch_size, cin_channels, time_length]
c = tf.squeeze(c, [expand_dim])
with tf.control_dependencies([tf.assert_equal(tf.shape(c)[-1], tf.shape(x)[-1])]):
c = tf.identity(c, name='control_c_and_x_shape')
#Feed data to network
x = self.first_conv(x)
skips = None
for conv in self.residual_layers:
x, h = conv(inputs = x, c = c, g = g_bct)
if skips is None:
skips = h
else:
skips = skips + h
x = skips
for conv in self.last_conv_layers:
x = conv(inputs = x)
return tf.nn.softmax(x, axis=1) if softmax else x
def incremental(self, initial_input, c=None, g=None,
time_length=100, test_inputs=None,
softmax=True, quantize=True, log_scale_min=-7.0, log_scale_min_gauss=-7.0):
"""Inceremental forward step
Inputs of shape [batch_size, channels, time_length] are reshaped to [batch_size, time_length, channels]
Input of each time step is of shape [batch_size, 1, channels]
Args:
Initial input: Tensor of shape [batch_size, channels, 1], initial recurrence input.
c: Tensor of shape [batch_size, cin_channels, time_length], Local conditioning features
g: Tensor of shape [batch_size, gin_channels, time_length] or [batch_size, gin_channels, 1]
global conditioning features
T: int, number of timesteps to generate
test_inputs: Tensor, teacher forcing inputs (debug)
softmax: Boolean, whether to apply softmax activation
quantize: Whether to quantize softmax output before feeding to
next time step input
log_scale_min: float, log scale minimum value.
Returns:
Tensor of shape [batch_size, channels, time_length] or [batch_size, channels, 1]
Generated one_hot encoded samples
"""
batch_size = tf.shape(initial_input)[0]
#Note: should reshape to [batch_size, time_length, channels]
#not [batch_size, channels, time_length]
if test_inputs is not None:
if self.scalar_input:
if tf.shape(test_inputs)[1] == 1:
test_inputs = tf.transpose(test_inputs, [0, 2, 1])
else:
if tf.shape(test_inputs)[1] == self._hparams.out_channels:
test_inputs = tf.transpose(test_inputs, [0, 2, 1])
batch_size = tf.shape(test_inputs)[0]
if time_length is None:
time_length = tf.shape(test_inputs)[1]
else:
time_length = tf.maximum(time_length, tf.shape(test_inputs)[1])
#Global conditioning
if g is not None:
if self.embed_speakers is not None:
g = self.embed_speakers(tf.reshape(g, [batch_size, -1]))
#[batch_size, channels, 1]
with tf.control_dependencies([tf.assert_equal(tf.rank(g), 3)]):
g = tf.transpose(g, [0, 2, 1])
self.g_btc = _expand_global_features(batch_size, time_length, g, data_format='BTC')
#Local conditioning
if c is not None and self.upsample_conv is not None:
if self._hparams.upsample_type == '2D':
#[batch_size, 1, cin_channels, time_length]
expand_dim = 1
else:
assert self._hparams.upsample_type == '1D'
#[batch_size, cin_channels, 1, time_length]
expand_dim = 2
c = tf.expand_dims(c, axis=expand_dim)
for upsample_conv in self.upsample_conv:
c = upsample_conv(c)
#[batch_size, channels, time_length]
c = tf.squeeze(c, [expand_dim])
with tf.control_dependencies([tf.assert_equal(tf.shape(c)[-1], time_length)]):
self.c = tf.transpose(c, [0, 2, 1])
#Initialize loop variables
if initial_input.shape[1] == self._hparams.out_channels:
initial_input = tf.transpose(initial_input, [0, 2, 1])
initial_time = tf.constant(0, dtype=tf.int32)
if test_inputs is not None:
initial_input = tf.expand_dims(test_inputs[:, 0, :], axis=1)
initial_outputs_ta = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True)
initial_loss_outputs_ta = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True)
#Only use convolutions queues for Residual Blocks main convolutions (only ones with kernel size 3 and dilations, all others are 1x1)
initial_queues = [tf.zeros((batch_size, res_conv.layer.kw + (res_conv.layer.kw - 1) * (res_conv.layer.dilation_rate[0] - 1), self._hparams.residual_channels),
name='convolution_queue_{}'.format(i+1)) for i, res_conv in enumerate(self.residual_layers)]
def condition(time, unused_outputs_ta, unused_current_input, unused_loss_outputs_ta, unused_queues):
return tf.less(time, time_length)
def body(time, outputs_ta, current_input, loss_outputs_ta, queues):
#conditioning features for single time step
ct = None if self.c is None else tf.expand_dims(self.c[:, time, :], axis=1)
gt = None if self.g_btc is None else tf.expand_dims(self.g_btc[:, time, :], axis=1)
x = self.first_conv.incremental_step(current_input)
skips = None
new_queues = []
for conv, queue in zip(self.residual_layers, queues):
x, h, new_queue = conv.incremental_step(x, ct, gt, queue=queue)
skips = h if skips is None else (skips + h)
new_queues.append(new_queue)
x = skips
for conv in self.last_conv_layers:
try:
x = conv.incremental_step(x)
except AttributeError: #When calling Relu activation
x = conv(x)
#Save x for eval loss computation
loss_outputs_ta = loss_outputs_ta.write(time, tf.squeeze(x, [1])) #squeeze time_length dimension (=1)
#Generate next input by sampling
if self.scalar_input:
if self._hparams.out_channels == 2:
x = sample_from_gaussian(
tf.reshape(x, [batch_size, -1, 1]),
log_scale_min_gauss=log_scale_min_gauss)
else:
x = sample_from_discretized_mix_logistic(
tf.reshape(x, [batch_size, -1, 1]), log_scale_min=log_scale_min)
next_input = tf.expand_dims(x, axis=-1) #Expand on the channels dimension
else:
x = tf.nn.softmax(tf.reshape(x, [batch_size, -1]), axis=1) if softmax \
else tf.reshape(x, [batch_size, -1])
if quantize:
#[batch_size, 1]
sample = tf.multinomial(x, 1) #Pick a sample using x as probability (one for each batche)
#[batch_size, 1, quantize_channels] (time dimension extended by default)
x = tf.one_hot(sample, depth=self._hparams.quantize_channels)
next_input = x
if len(x.shape) == 3:
x = tf.squeeze(x, [1])
outputs_ta = outputs_ta.write(time, x)
time = tf.Print(time + 1, [time+1, time_length])
#output = x (maybe next input)
if test_inputs is not None:
#override next_input with ground truth
next_input = tf.expand_dims(test_inputs[:, time, :], axis=1)
return (time, outputs_ta, next_input, loss_outputs_ta, new_queues)
res = tf.while_loop(
condition,
body,
loop_vars=[
initial_time, initial_outputs_ta, initial_input, initial_loss_outputs_ta, initial_queues
],
parallel_iterations=32,
swap_memory=self._hparams.wavenet_swap_with_cpu)
outputs_ta = res[1]
#[time_length, batch_size, channels]
outputs = outputs_ta.stack()
#Save eval prediction for eval loss computation
eval_outputs = res[3].stack()
if is_mulaw_quantize(self._hparams.input_type):
self.y_hat_eval = tf.transpose(eval_outputs, [1, 0, 2])
else:
self.y_hat_eval = tf.transpose(eval_outputs, [1, 2, 0])
#[batch_size, channels, time_length]
return tf.transpose(outputs, [1, 2, 0])
def clear_queue(self):
self.first_conv.clear_queue()
for f in self.conv_layers:
f.clear_queue()
for f in self.last_conv_layers:
try:
f.clear_queue()
except AttributeError:
pass
| python |
"""Testing for vault_backend module."""
import hvac
import pytest
import requests
import config
import vault_backend
def test___get_vault_client(monkeypatch):
# valid test
client = vault_backend.__get_vault_client('salesforce')
assert isinstance(client, hvac.Client)
# test w/ no VAULT_CERT
def mock_vault_cert(*args):
return False
monkeypatch.setattr(config, 'get_vault_ca_cert', mock_vault_cert)
client = vault_backend.__get_vault_client('salesforce')
assert isinstance(client, hvac.Client)
def test___get_vault_client_no_mtls_client_cert(monkeypatch):
def mock_config(*args):
return False
monkeypatch.setattr(config, 'get_vault_mtls_client_cert', mock_config)
client = vault_backend.__get_vault_client('salesforce')
assert client is None
def test___get_vault_client_no_mtls_client_key(monkeypatch):
def mock_config(*args):
return False
monkeypatch.setattr(config, 'get_vault_mtls_client_key', mock_config)
client = vault_backend.__get_vault_client('salesforce')
assert client is None
def test___get_vault_client_no_vault_url(monkeypatch):
def mock_config(*args):
return False
monkeypatch.setattr(config, 'get_vault_url', mock_config)
client = vault_backend.__get_vault_client('salesforce')
assert client is None
def test___get_vault_client_no_vault_ns(monkeypatch):
def mock_config(*args):
return False
monkeypatch.setattr(config, 'get_vault_namespace', mock_config)
client = vault_backend.__get_vault_client('salesforce')
assert client is None
def test_get_dynamic_secret(monkeypatch, get_jwt):
# test w/o connection to vault
dek = vault_backend.get_dynamic_secret('salesforce', 'salesforce',
'latest', get_jwt)
assert dek == b''
# test w/ failing client creation
def mock_client(tenant: str):
return None
monkeypatch.setattr(vault_backend, '__get_vault_client', mock_client)
dek = vault_backend.get_dynamic_secret('salesforce', 'salesforce',
'latest', get_jwt)
def test_get_dynamic_secret_3(monkeypatch, get_jwt):
# test client initialized
def mock_auth_client(*args):
return vault_backend.__get_vault_client('salesforce')
monkeypatch.setattr(vault_backend, '__authenticate_vault_client',
mock_auth_client)
def mock_client_init(*args):
return True
monkeypatch.setattr(hvac.api.SystemBackend, 'is_initialized',
mock_client_init)
# fails, because cannot reach Vault
with pytest.raises(requests.exceptions.ConnectionError):
vault_backend.get_dynamic_secret('salesforce', 'salesforce',
'latest', get_jwt)
def test_get_dynamic_secret_4(monkeypatch, get_jwt):
# test client initialized
def mock_auth_client(*args):
return vault_backend.__get_vault_client('salesforce')
monkeypatch.setattr(vault_backend, '__authenticate_vault_client',
mock_auth_client)
def mock_client_init(*args):
return True
monkeypatch.setattr(hvac.api.SystemBackend, 'is_initialized',
mock_client_init)
# mock client.secrets.transit.read_key()
def mock_readkey(*args, **kwargs):
raise hvac.exceptions.Forbidden
monkeypatch.setattr(hvac.api.secrets_engines.transit.Transit, 'read_key',
mock_readkey)
dek = vault_backend.get_dynamic_secret('salesforce', 'salesforce',
'latest', get_jwt)
assert dek == b''
def test_get_dynamic_secret_5(monkeypatch, get_jwt):
# test client initialized
def mock_auth_client(*args):
return vault_backend.__get_vault_client('salesforce')
monkeypatch.setattr(vault_backend, '__authenticate_vault_client',
mock_auth_client)
def mock_client_init(*args):
return True
monkeypatch.setattr(hvac.api.SystemBackend, 'is_initialized',
mock_client_init)
# mock client.secrets.transit.read_key()
def mock_readkey(*args, **kwargs):
response = {}
return response
monkeypatch.setattr(hvac.api.secrets_engines.transit.Transit, 'read_key',
mock_readkey)
dek = vault_backend.get_dynamic_secret('salesforce', 'salesforce',
'latest', get_jwt)
assert dek == b''
def test_get_dynamic_secret_6(monkeypatch, get_jwt):
# test client initialized
def mock_auth_client(*args):
return vault_backend.__get_vault_client('salesforce')
monkeypatch.setattr(vault_backend, '__authenticate_vault_client',
mock_auth_client)
def mock_client_init(*args):
return True
monkeypatch.setattr(hvac.api.SystemBackend, 'is_initialized',
mock_client_init)
# mock client.secrets.transit.read_key()
def mock_readkey(*args, **kwargs):
response = {'data': {'latest_version': 1}}
return response
monkeypatch.setattr(hvac.api.secrets_engines.transit.Transit, 'read_key',
mock_readkey)
dek = vault_backend.get_dynamic_secret('salesforce', 'salesforce',
'latest', get_jwt)
assert dek == b''
def test_get_dynamic_secret_7(monkeypatch, get_jwt):
# test client initialized
def mock_auth_client(*args):
return vault_backend.__get_vault_client('salesforce')
monkeypatch.setattr(vault_backend, '__authenticate_vault_client',
mock_auth_client)
def mock_client_init(*args):
return True
monkeypatch.setattr(hvac.api.SystemBackend, 'is_initialized',
mock_client_init)
# mock client.secrets.transit.read_key()
def mock_readkey(*args, **kwargs):
response = {'data': {'latest_version': 1}}
return response
monkeypatch.setattr(hvac.api.secrets_engines.transit.Transit, 'read_key',
mock_readkey)
# mock client.secrets.transit.export_key()
def mock_exportkey(*args, **kwargs):
return None
monkeypatch.setattr(hvac.api.secrets_engines.transit.Transit, 'export_key',
mock_exportkey)
dek = vault_backend.get_dynamic_secret('salesforce', 'salesforce',
'latest', get_jwt)
assert dek == b''
def test_get_dynamic_secret_8(monkeypatch, get_jwt):
# test client initialized
def mock_auth_client(*args):
return vault_backend.__get_vault_client('salesforce')
monkeypatch.setattr(vault_backend, '__authenticate_vault_client',
mock_auth_client)
def mock_client_init(*args):
return True
monkeypatch.setattr(hvac.api.SystemBackend, 'is_initialized',
mock_client_init)
# mock client.secrets.transit.read_key()
def mock_readkey(*args, **kwargs):
response = {'data': {'latest_version': 1}}
return response
monkeypatch.setattr(hvac.api.secrets_engines.transit.Transit, 'read_key',
mock_readkey)
magic_dek = 'bWFnaWNfZGVr' # value: magic_dek
# mock client.secrets.transit.export_key()
def mock_exportkey(*args, **kwargs):
response = {'data': {'keys': {'1': magic_dek}}}
return response
monkeypatch.setattr(hvac.api.secrets_engines.transit.Transit, 'export_key',
mock_exportkey)
dek = vault_backend.get_dynamic_secret('salesforce', 'salesforce',
1, get_jwt)
assert dek == b'magic_dek'
def test___get_vault_token(monkeypatch, get_jwt):
# test with valid token
client = vault_backend.__get_vault_client('salesforce')
fake_token = 's.FAKETOKEN'
def mock_devmode(*args):
# if get_config_by_keypath() is called with key DEV_MODE,
# interfere and return true, if called with other keys, ignore
if args[0] == 'DEV_MODE':
return True
if args[0] == [
'TENANT_CFG.salesforce.backend.VAULT.default_role',
'VAULT.default_role'
]:
# return default role
return 'distributey'
monkeypatch.setattr(config, 'get_config_by_keypath', mock_devmode)
def mock_vault_auth_jwt(*args, **kwargs):
# example token: s.f7Ea3C3ojOYE0GRLzmhSGNkE
response = {'auth': {'client_token': fake_token}}
return response
monkeypatch.setattr(
hvac.api.auth_methods.jwt.JWT, 'jwt_login', mock_vault_auth_jwt)
token = vault_backend.__get_vault_token(
client,
'salesforce',
get_jwt,
'jwt')
assert token == fake_token
def test___get_vault_token2(monkeypatch, get_jwt):
# test with invalid response
client = vault_backend.__get_vault_client('salesforce')
fake_token = 's.FAKETOKEN'
def mock_vault_auth_jwt(*args, **kwargs):
# example token: s.f7Ea3C3ojOYE0GRLzmhSGNkE
response = {'auth': {'wrong_key': fake_token}}
return response
monkeypatch.setattr(
hvac.api.auth_methods.jwt.JWT, 'jwt_login', mock_vault_auth_jwt)
token = vault_backend.__get_vault_token(
client,
'salesforce',
get_jwt,
'jwt')
assert token == ''
def test___authenticate_vault_client(monkeypatch, get_jwt):
# test with "valid" token
client = vault_backend.__get_vault_client('salesforce')
def mock_client_is_authenticated(*args, **kwargs):
return True
monkeypatch.setattr(
hvac.v1.Client, 'is_authenticated', mock_client_is_authenticated)
vault_backend.__VAULT_TOKEN_CACHE = {
'c2FsZXNmb3JjZS1qd3Rfa2lkX3NhbGVzZm9yY2Vfc2VydmljZVg=': 's.FAKETOKEN'
}
client = vault_backend.__authenticate_vault_client(
client, 'salesforce', get_jwt)
assert isinstance(client, hvac.v1.Client)
def test___authenticate_vault_client2(monkeypatch, get_jwt):
# test with invalid token
client = vault_backend.__get_vault_client('salesforce')
def mock_client_is_authenticated(*args, **kwargs):
return False
monkeypatch.setattr(
hvac.v1.Client, 'is_authenticated', mock_client_is_authenticated)
client = vault_backend.__authenticate_vault_client(
client, 'salesforce', get_jwt)
assert client is None
| python |
#====================================================================================
# TOPIC: PYTHON - Modules Usage
#====================================================================================
#
# FILE-NAME : 013_module_usage.py
# DEPENDANT-FILES : These are the files and libraries needed to run this program ;
# module.py and 013_module_usage.py
#
# AUTHOR : learnpython.com / Hemaxi
# (c) 2013
#
# DESC : Python Modules , used to organize code.
#
#====================================================================================
# Use this to import the module named "module"
import module
# Using the module's variables and functions
# print the "MODULE" variables, use"module." -->DOT
print (module.country_1, module.country_2, module.country_3);
# OUTPUT: USA China India
# print the "MODULE" LIST
print (module.list_world_nations);
# OUTPUT: ['USA', 'China', 'India']
# print the "MODULE" TUPLE
print (module.tuple_world_nations);
# OUTPUT: ('USA', 'China', 'India')
# print the "MODULE" DICTIONARY
print (module.dictionary_world_nations);
# OUTPUT: {'Country_1': 'India'}
# calling the function from the module
print (module.module_function_add(1, 3));
# OUTPUT: 4
#====================================================================================
# END OF CODE
#====================================================================================
| python |
from django.db import models
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from colorful.fields import RGBColorField
from mayan.apps.acls.models import AccessControlList
from mayan.apps.databases.model_mixins import ExtraDataModelMixin
from mayan.apps.events.classes import EventManagerMethodAfter, EventManagerSave
from mayan.apps.events.decorators import method_event
from mayan.apps.documents.models import Document
from mayan.apps.documents.permissions import permission_document_view
from .events import (
event_tag_attached, event_tag_created, event_tag_edited, event_tag_removed
)
from .html_widgets import widget_single_tag
class Tag(ExtraDataModelMixin, models.Model):
"""
This model represents a binary property that can be applied to a document.
The tag can have a label and a color.
"""
label = models.CharField(
db_index=True, help_text=_(
'A short text used as the name.'
), max_length=128, unique=True, verbose_name=_('Label')
)
color = RGBColorField(
help_text=_('The RGB color values for this.'),
verbose_name=_('Color')
)
documents = models.ManyToManyField(
related_name='tags', to=Document, verbose_name=_('Documents')
)
class Meta:
ordering = ('label',)
verbose_name = _('Tag')
verbose_name_plural = _('Tags')
def __str__(self):
return self.label
@method_event(
action_object='self',
event=event_tag_attached,
event_manager_class=EventManagerMethodAfter,
)
def attach_to(self, document):
self._event_target = document
self.documents.add(document)
def get_absolute_url(self):
return reverse(
viewname='tags:tag_document_list', kwargs={'tag_id': self.pk}
)
def get_document_count(self, user):
"""
Return the numeric count of documents that have this tag attached.
The count is filtered by access.
"""
return self.get_documents(permission=permission_document_view, user=user).count()
def get_documents(self, user, permission=None):
"""
Return a filtered queryset documents that have this tag attached.
"""
queryset = self.documents.all()
if permission:
queryset = AccessControlList.objects.restrict_queryset(
permission=permission_document_view, queryset=queryset,
user=user
)
return queryset
def get_preview_widget(self):
return widget_single_tag(tag=self)
get_preview_widget.short_description = _('Preview')
@method_event(
action_object='self',
event=event_tag_removed,
event_manager_class=EventManagerMethodAfter,
)
def remove_from(self, document):
self._event_target = document
self.documents.remove(document)
@method_event(
event_manager_class=EventManagerSave,
created={
'event': event_tag_created,
'target': 'self',
},
edited={
'event': event_tag_edited,
'target': 'self',
}
)
def save(self, *args, **kwargs):
return super().save(*args, **kwargs)
class DocumentTag(Tag):
class Meta:
proxy = True
verbose_name = _('Document tag')
verbose_name_plural = _('Document tags')
| python |
"""Common run function which does the heavy lifting of formatting output"""
import csv
import enum
import itertools
import logging
import typing
from notions.flatten import flatten_item
from notions.models.database import Database
from notions.models.page import Page, PageTitleProperty
from . import yaml
from .config import OutputFormats
LOG = logging.getLogger(__name__)
def text_format_item(
item: typing.Union[Page, Database],
output: typing.TextIO,
text_formatter: typing.Callable[[typing.Any], str],
):
output.write(text_formatter(item))
output.write("\n")
async def text_format_iterable(
iterable: typing.AsyncIterable,
output: typing.TextIO,
text_formatter: typing.Callable[[typing.Any], str],
):
async for item in iterable:
text_format_item(item, output, text_formatter)
def notion_json_format_item(
item: typing.Union[Page, Database],
output: typing.TextIO,
):
output.write(item.json())
output.write("\n")
def json_format_item(
item: typing.Union[Page, Database],
output: typing.TextIO,
):
output.write(flatten_item(item).json())
output.write("\n")
async def json_format_iterable(
iterable: typing.AsyncIterable,
output: typing.TextIO,
formatter=lambda item: flatten_item(item).json(),
):
items = []
async for item in iterable:
items.append(formatter(item))
output.write("[\n")
LOG.info(f"Writing {len(items)} items to {output.name}")
for item in items[0:-1]:
output.write(item)
output.write(",\n")
output.write(items[-1])
output.write("\n]")
async def notion_json_format_iterable(
iterable: typing.AsyncIterable,
output: typing.TextIO,
):
# re-use the json formatter
await json_format_iterable(iterable, output, formatter=lambda item: item.json())
async def jsonl_format_iterable(
iterable: typing.AsyncIterable,
output: typing.TextIO,
):
async for item in iterable:
output.write(flatten_item(item).json())
output.write("\n")
async def notion_jsonl_format_iterable(
iterable: typing.AsyncIterable,
output: typing.TextIO,
):
async for item in iterable:
output.write(item.json())
output.write("\n")
def notion_yaml_format_item(
item: typing.Union[Page, Database],
output: typing.TextIO,
):
yaml.dump(item.dict(), output)
def yaml_format_item(
item: typing.Union[Page, Database],
output: typing.TextIO,
):
yaml.dump(flatten_item(item).dict(), output)
async def notion_yaml_format_iterable(
iterable: typing.AsyncIterable,
output: typing.TextIO,
):
items = []
async for item in iterable:
items.append(item.dict())
yaml.dump(items, output)
async def yaml_format_iterable(
iterable: typing.AsyncIterable,
output: typing.TextIO,
):
items = []
async for item in iterable:
items.append(flatten_item(item).dict())
yaml.dump(items, output)
def default_text_formatter(item: typing.Union[Database, Page]) -> str:
title = "-No title-"
item_type = "unknown"
if isinstance(item, Database):
title_property = item.title
item_type = "database"
else:
item_type = "page"
if "Name" in item.properties and isinstance(
item.properties["Name"], PageTitleProperty
):
title_property = item.properties["Name"].title
titles = [t.plain_text for t in title_property]
if titles:
title = titles[0]
return f"{item_type} : {item.id} : {title} : {list(item.properties)}"
async def csv_format_iterable(
iterable: typing.AsyncIterable,
output: typing.TextIO,
format: str,
guess_headers: bool,
):
writer = csv.writer(output, dialect="excel-tab" if format == "tsv" else "excel")
core_headers = ["type", "id", "title", "created_time", "last_edited_time"]
first_row = True
async for item in iterable:
item = flatten_item(item)
if first_row:
if guess_headers:
# TODO: expand and flatten nested objects to property_nested_name
property_headers = list(item.properties)
headers = core_headers + property_headers
else:
headers = core_headers
writer.writerow(headers)
first_row = False
row = [item.type, item.id, item.title, item.created_time, item.last_edited_time]
if guess_headers:
row += [str(item.properties[header].value) for header in property_headers]
else:
row += [str(prop.value) for prop in item.properties.values()]
writer.writerow(row)
async def csv_format_item(
item: typing.Union[Page, Database],
output: typing.TextIO,
format: str,
guess_headers: bool,
):
async def items():
yield item
await csv_format_iterable(
items(), output, format=format, guess_headers=guess_headers
)
async def run(
iterable: typing.AsyncIterable,
output: typing.TextIO,
output_format: OutputFormats,
text_formatter: typing.Callable[[typing.Any], str] = default_text_formatter,
guess_headers: bool = False,
):
"""Helper for commands which handles formatting output"""
if output_format == OutputFormats.notion_json:
await notion_json_format_iterable(iterable, output)
elif output_format == OutputFormats.notion_jsonl:
await notion_jsonl_format_iterable(iterable, output)
elif output_format == OutputFormats.notion_yaml:
await notion_yaml_format_iterable(iterable, output)
elif output_format == OutputFormats.text:
await text_format_iterable(iterable, output, text_formatter)
elif output_format == OutputFormats.json:
await json_format_iterable(iterable, output)
elif output_format == OutputFormats.jsonl:
await jsonl_format_iterable(iterable, output)
elif output_format == OutputFormats.yaml:
await yaml_format_iterable(iterable, output)
elif output_format == OutputFormats.tsv:
await csv_format_iterable(iterable, output, "tsv", guess_headers=guess_headers)
elif output_format == OutputFormats.csv:
await csv_format_iterable(iterable, output, "csv", guess_headers=guess_headers)
else:
raise NotImplementedError(f"Unknown output format: {output_format=}")
async def run_single_item(
awaitable: typing.Awaitable[typing.Union[Page, Database]],
output: typing.TextIO,
output_format: OutputFormats,
text_formatter: typing.Callable[[typing.Any], str] = default_text_formatter,
guess_headers: bool = False,
):
item = await awaitable
if output_format == OutputFormats.notion_json:
notion_json_format_item(item, output)
elif output_format == OutputFormats.notion_jsonl:
notion_json_format_item(item, output)
elif output_format == OutputFormats.notion_yaml:
notion_yaml_format_item(item, output)
elif output_format == OutputFormats.text:
text_format_item(item, output, text_formatter)
elif output_format == OutputFormats.json:
json_format_item(item, output)
elif output_format == OutputFormats.jsonl:
json_format_item(item, output)
elif output_format == OutputFormats.yaml:
yaml_format_item(item, output)
elif output_format == OutputFormats.tsv:
await csv_format_item(item, output, "tsv", guess_headers=guess_headers)
elif output_format == OutputFormats.csv:
await csv_format_item(item, output, "csv", guess_headers=guess_headers)
else:
raise NotImplementedError(f"Unknown output format: {output_format=}")
| python |
import numpy as np
import pickle
from natasha import (
Doc,
Segmenter,
NewsEmbedding,
NewsMorphTagger,
MorphVocab
)
from navec import Navec
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, ConversationHandler
from telegram import Bot as Bot_
from metrics import metric
PATH = 'navec_hudlit_v1_12B_500K_300d_100q.tar' # Name of file for Navec
NAME = 'embeddings'
NAME_POP = 'popularity'
TOKEN = ...
INPUT = 0
# Natasha setup.
segm = Segmenter()
_emb = NewsEmbedding()
morph_tagger = NewsMorphTagger(_emb)
morph_vocab = MorphVocab()
def query_to_noun(query: str) -> list[str]:
doc = Doc(query.lower())
doc.segment(segmenter=segm)
doc.tag_morph(morph_tagger)
res_arr = []
for token in doc.tokens:
if token.pos == 'NOUN':
token.lemmatize(morph_vocab)
res_arr.append(token.lemma)
return res_arr
# Navec setup.
navec = Navec.load(PATH)
# Loading pretrained embedding vocab.
with open(NAME + '.pkl', 'rb') as f:
embed_dict = pickle.load(f)
with open(NAME_POP + '.pkl', 'rb') as f:
pop_dict = pickle.load(f)
def get_tags(request: str) -> str:
nouns = query_to_noun(request)
if not len(nouns):
return f'В запросе \'{request}\' не найдено существительных.'
request_vec = np.zeros(300)
found = False
sum_weights = 0
for noun in nouns:
if noun in navec:
if noun in pop_dict:
request_vec += navec[noun] * pop_dict[noun]
sum_weights += pop_dict[noun]
else:
request_vec += navec[noun]
sum_weights += 1
found = True
if not found:
return f'В запросе \'{request}\' не найдено существительных с реализованными эмбеддингами.'
request_vec /= sum_weights
distances = {
key: (metric(request_vec, vec) / (np.log(pop_dict[key] + 1) + 1) if key in pop_dict else metric(request_vec, vec))
for key, vec in embed_dict.items()}
distances = {k: v for k, v in sorted(distances.items(), key=lambda item: item[1])}
req_keys = list(distances.keys())[1:11]
return f'Потенциальные теги для запроса \'{request}\': {req_keys}'
class Bot:
def __init__(self, token: str = TOKEN):
self.token = token
def start(self) -> None:
self.bot = Bot_(token=self.token)
self.updater = Updater(self.token, use_context=True)
self.dispatcher = self.updater.dispatcher
self.request()
def stop(self) -> None:
self.updater.stop()
def start_msg(self, update, _):
self.user_id = update.message.from_user.id
msg = 'Привет! Введи запрос, содержащий существительное, и я подскажу потенциальные теги ' \
'для твоего запроса.'
update.message.reply_text(msg)
return INPUT
def cancel_msg(self, update, _):
msg = 'Определение тегов остановлено.'
update.message.reply_text(msg)
return ConversationHandler.END
def tags_reply(self, update, _):
msg = get_tags(update.message.text)
update.message.reply_text(msg)
return INPUT
def request(self) -> None:
conv_handler = ConversationHandler(
entry_points=[CommandHandler('start', self.start_msg)],
states={
INPUT: [MessageHandler(Filters.text & ~Filters.command, self.tags_reply)],
},
fallbacks=[CommandHandler('cancel', lambda update, context: ConversationHandler.END)],
)
self.dispatcher.add_handler(conv_handler)
self.updater.start_polling()
if __name__ == '__main__':
bot = Bot()
bot.start()
_ = input()
bot.stop()
| python |
from django.apps import AppConfig
class CityeventConfig(AppConfig):
name = 'cityEvent'
| python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import arrow
from app import celery, create_app
from app.models.email_event import EmailEvent
from app.email import send_email
@celery.task
def schedule_send_emails():
now = arrow.utcnow().replace(second=0, microsecond=0)
app = create_app(os.getenv('JUBLIA_CONFIG') or 'default')
with app.app_context():
# find email_events need to be send.
target_emailEvents = find_target_emailEvents(timestamp=now)
for email_event in target_emailEvents:
# send email
send_email.delay(email_event.id)
def find_target_emailEvents(timestamp):
'''
Find email_events need to be send.
**timestamp==now & is_send=False
'''
target_emailEvents = EmailEvent.query.filter_by(timestamp=timestamp, is_send=False).all()
return target_emailEvents
| python |
# Command Line Interface
import argparse as ap
import datetime as dt
import inflationtools.main as main
from argparse import RawTextHelpFormatter # Allows to use newline in help text
import locale
import gettext # Unable to get pot for this file... find the reason.
pt = gettext.translation('CLI', localedir='locales', languages=['pt_BR'])
sys_locale = locale.getdefaultlocale()
if 'BR' in sys_locale[0]:
pt.install()
_ = pt.gettext
locale.setlocale(locale.LC_NUMERIC,
sys_locale[0][0:2])# Sets locales to system default for numbers
locale.setlocale(locale.LC_MONETARY, 'pt') # Sets locales to Brazil, for money
# Prepares indexes list.
indexes = {}
indexes.update(main.bcb_urls)
indexes.update(main.quandl_urls)
indexes = list(indexes.keys())
indexes.sort()
indexes = '\n'.join(indexes)
# Date parser
def parse_dates(date_string):
assert type(date_string) is str, f'date_string is a {type(date_string)}'
date_string = '01-' + date_string
new_date = dt.datetime.strptime(date_string, '%d-%m-%Y') # Quandl uses '2009-09-30' date style
return new_date
def CLI():
"""
Implements the argument parser to inflationtools.
:return:
"""
parser = ap.ArgumentParser(formatter_class=RawTextHelpFormatter)
parser.add_argument('index', metavar=_('index'),
help=_('The inflation index that you want to look. Available: \n') + indexes)
parser.add_argument('start_date', metavar=_('start_date'),
help=_("Starting date, using '01-2001' format."))
parser.add_argument('end_date', metavar=_('end_date'),
help=_("Ending date, using '01-2001' format."))
parser.add_argument('-a', '--amount', metavar=_('amount'),
help=_('Amount you want to update.'))
arguments = parser.parse_args()
arguments.start_date, arguments.end_date = parse_dates(arguments.start_date), parse_dates(arguments.end_date)
inflation = main.get_cumulative_inflation(arguments.index, arguments.start_date, arguments.end_date)
if arguments.amount:
money = arguments.amount
if money[0:2] == 'R$':
money = money[2:]
money = locale.atof(money)
money *= inflation
print(locale.currency(money)) # Prints in BRL
else:
print(locale.str(inflation))
if __name__ == '__main__':
CLI() | python |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-28 15:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('book', '0008_book_type'),
]
operations = [
migrations.AddField(
model_name='book',
name='folder',
field=models.CharField(blank=True, max_length=1000, null=True),
),
]
| python |
#!/usr/bin/python3
def islower(c):
chrcode = ord(c)
if chrcode >= 97 and chrcode <= 122:
return True
else:
return False
| python |
import json
import os
import time
import pandas as pd
from bing import bing_web_search
def crawl_snippets(title, retry=3):
_, raw_resp = bing_web_search(title)
response = json.loads(raw_resp)
for _ in range(retry):
try:
pages = response['webPages']['value']
return '\n'.join([title] + list(map(lambda page: page['snippet'], pages)))
except KeyError:
time.sleep(1.5)
print('retry fail response: {}'.format(response))
continue
print('fail to crawl {}'.format(title))
def retrieve_data(titles, prefix):
for i, title in enumerate(titles, 1):
filename = '{}/{}.txt'.format(prefix, i)
if not os.path.exists(filename):
result = crawl_snippets(title)
if result is None:
print('fail to crawl index: {}, title: {}, skip it'.format(i, title))
continue
file = open(filename, 'w+', encoding='utf-8')
file.write(result)
file.close()
time.sleep(0.01)
for mode in ['train', 'test']:
path = '../../input/{}_v2.csv'.format(mode)
df = pd.read_csv(path)
retrieve_data(df['title'].values, '../../snippets/{}'.format(mode))
| python |
import base64
from unittest.mock import ANY
import pytest
from rhub.auth.keycloak import KeycloakClient
from rhub.api import DEFAULT_PAGE_LIMIT
API_BASE = '/v0'
def test_token_create(client, keycloak_mock):
keycloak_mock.login.return_value = {'access_token': 'foobar'}
rv = client.post(
f'{API_BASE}/auth/token/create',
headers={
'Authorization': 'Basic ' + base64.b64encode(b'user:pass').decode(),
}
)
keycloak_mock.login.assert_called_with('user', 'pass')
assert rv.status_code == 200
assert rv.json == {'access_token': 'foobar'}
def test_me(client, keycloak_mock):
keycloak_mock.user_get.return_value = {
'id': '00000000-0000-0000-0000-000000000000',
'username': 'user',
}
rv = client.get(
f'{API_BASE}/me',
headers={'Authorization': 'Bearer foobar'},
)
assert rv.status_code == 200
assert rv.json == {
'id': '00000000-0000-0000-0000-000000000000',
'username': 'user',
'_href': ANY,
}
def test_list_users(client, keycloak_mock):
keycloak_mock.user_list.return_value = [{
'id': '00000000-0000-0000-0000-000000000000',
'username': 'user',
}]
rv = client.get(
f'{API_BASE}/auth/user',
headers={'Authorization': 'Bearer foobar'},
)
keycloak_mock.user_list.assert_called_with({'first': 0, 'max': DEFAULT_PAGE_LIMIT})
assert rv.status_code == 200
assert rv.json == [{
'id': '00000000-0000-0000-0000-000000000000',
'username': 'user',
'_href': ANY,
}]
def test_create_user(client, keycloak_mock):
user_id = '00000000-0000-0000-0000-000000000000'
user_data = {'username': 'user', 'email': '[email protected]'}
keycloak_mock.user_create.return_value = user_id
keycloak_mock.user_get.return_value = user_data | {'id': user_id}
rv = client.post(
f'{API_BASE}/auth/user',
headers={'Authorization': 'Bearer foobar'},
json=user_data,
)
keycloak_mock.user_create.assert_called_with(user_data)
keycloak_mock.user_get.assert_called_with(user_id)
assert rv.status_code == 200
assert rv.json == user_data | {'id': user_id, '_href': ANY}
def test_get_user(client, keycloak_mock):
user_id = '00000000-0000-0000-0000-000000000000'
user_data = {'username': 'user', 'email': '[email protected]'}
keycloak_mock.user_get.return_value = user_data | {'id': user_id}
rv = client.get(
f'{API_BASE}/auth/user/{user_id}',
headers={'Authorization': 'Bearer foobar'},
)
keycloak_mock.user_get.assert_called_with(user_id)
assert rv.status_code == 200
assert rv.json == user_data | {'id': user_id, '_href': ANY}
def test_update_user(client, keycloak_mock):
user_id = '00000000-0000-0000-0000-000000000000'
user_data = {'username': 'user', 'email': '[email protected]'}
keycloak_mock.user_update.return_value = user_id
keycloak_mock.user_get.return_value = user_data | {'id': user_id}
rv = client.patch(
f'{API_BASE}/auth/user/{user_id}',
headers={'Authorization': 'Bearer foobar'},
json=user_data,
)
keycloak_mock.user_update.assert_called_with(user_id, user_data)
keycloak_mock.user_get.assert_called_with(user_id)
assert rv.status_code == 200
assert rv.json == user_data | {'id': user_id, '_href': ANY}
def test_delete_user(client, keycloak_mock):
user_id = '00000000-0000-0000-0000-000000000000'
keycloak_mock.user_delete.return_value = None
rv = client.delete(
f'{API_BASE}/auth/user/{user_id}',
headers={'Authorization': 'Bearer foobar'},
)
keycloak_mock.user_delete.assert_called_with(user_id)
assert rv.status_code == 200
assert rv.json == {}
def test_list_user_groups(client, keycloak_mock):
user_id = '00000000-0000-0000-0000-000000000000'
keycloak_mock.user_group_list.return_value = [{'id': user_id, 'name': 'admin'}]
rv = client.get(
f'{API_BASE}/auth/user/{user_id}/groups',
headers={'Authorization': 'Bearer foobar'},
)
keycloak_mock.user_group_list.assert_called_with(user_id)
assert rv.status_code == 200
assert rv.json == [{'id': user_id, 'name': 'admin', '_href': ANY}]
def test_add_user_group(client, keycloak_mock):
user_id = '00000000-0000-0000-0000-000000000000'
group_id = '00000000-0004-0003-0002-000000000001'
keycloak_mock.group_user_add.return_value = None
rv = client.post(
f'{API_BASE}/auth/user/{user_id}/groups',
headers={'Authorization': 'Bearer foobar'},
json={'id': group_id},
)
keycloak_mock.group_user_add.assert_called_with(user_id, group_id)
assert rv.status_code == 200
assert rv.json == {}
def test_delete_user_group(client, keycloak_mock):
user_id = '00000000-0000-0000-0000-000000000000'
group_id = '00000000-0004-0003-0002-000000000001'
keycloak_mock.group_user_remove.return_value = None
rv = client.delete(
f'{API_BASE}/auth/user/{user_id}/groups',
headers={'Authorization': 'Bearer foobar'},
json={'id': group_id},
)
keycloak_mock.group_user_remove.assert_called_with(user_id, group_id)
assert rv.status_code == 200
assert rv.json == {}
def test_list_groups(client, keycloak_mock):
keycloak_mock.group_list.return_value = [{
'id': '00000000-0000-0000-0000-000000000000',
'name': 'admin',
}]
rv = client.get(
f'{API_BASE}/auth/group',
headers={'Authorization': 'Bearer foobar'},
)
assert rv.status_code == 200
assert rv.json == [{
'id': '00000000-0000-0000-0000-000000000000',
'name': 'admin',
'_href': ANY,
}]
def test_create_group(client, keycloak_mock):
group_id = '00000000-0004-0003-0002-000000000001'
group_data = {'name': 'admin'}
keycloak_mock.group_create.return_value = group_id
keycloak_mock.group_get.return_value = group_data | {'id': group_id}
rv = client.post(
f'{API_BASE}/auth/group',
headers={'Authorization': 'Bearer foobar'},
json=group_data,
)
keycloak_mock.group_create.assert_called_with(group_data)
keycloak_mock.group_get.assert_called_with(group_id)
assert rv.status_code == 200
assert rv.json == group_data | {'id': group_id, '_href': ANY}
def test_get_group(client, keycloak_mock):
group_id = '00000000-0004-0003-0002-000000000001'
group_data = {'name': 'admin'}
keycloak_mock.group_get.return_value = group_data | {'id': group_id}
rv = client.get(
f'{API_BASE}/auth/group/{group_id}',
headers={'Authorization': 'Bearer foobar'},
)
keycloak_mock.group_get.assert_called_with(group_id)
assert rv.status_code == 200
assert rv.json == group_data | {'id': group_id, '_href': ANY}
def test_update_group(client, keycloak_mock):
group_id = '00000000-0004-0003-0002-000000000001'
group_data = {'name': 'new-admin'}
keycloak_mock.group_update.return_value = group_id
keycloak_mock.group_get.return_value = group_data | {'id': group_id}
rv = client.patch(
f'{API_BASE}/auth/group/{group_id}',
headers={'Authorization': 'Bearer foobar'},
json=group_data,
)
keycloak_mock.group_update.assert_called_with(group_id, group_data)
keycloak_mock.group_get.assert_called_with(group_id)
assert rv.status_code == 200
assert rv.json == group_data | {'id': group_id, '_href': ANY}
def test_delete_group(client, keycloak_mock):
group_id = '00000000-0004-0003-0002-000000000001'
keycloak_mock.group_delete.return_value = group_id
rv = client.delete(
f'{API_BASE}/auth/group/{group_id}',
headers={'Authorization': 'Bearer foobar'},
)
keycloak_mock.group_delete.assert_called_with(group_id)
assert rv.status_code == 200
assert rv.json == {}
def test_list_group_users(client, keycloak_mock):
group_id = '00000000-0004-0003-0002-000000000001'
user_data = {
'id': '00000000-0000-0000-0000-000000000000',
'username': 'user',
}
keycloak_mock.group_user_list.return_value = [user_data]
rv = client.get(
f'{API_BASE}/auth/group/{group_id}/users',
headers={'Authorization': 'Bearer foobar'},
)
keycloak_mock.group_user_list.assert_called_with(group_id)
assert rv.status_code == 200
assert rv.json == [user_data | {'_href': ANY}]
def test_list_roles(client, keycloak_mock):
keycloak_mock.role_list.return_value = [{
'id': '00000000-000d-000c-000b-00000000000a',
'name': 'admin',
}]
rv = client.get(
f'{API_BASE}/auth/role',
headers={'Authorization': 'Bearer foobar'},
)
assert rv.status_code == 200
assert rv.json == [{
'id': '00000000-000d-000c-000b-00000000000a',
'name': 'admin',
'_href': ANY,
}]
def test_create_role(client, keycloak_mock):
role_id = '00000000-000d-000c-000b-00000000000a'
role_data = {'name': 'admin'}
keycloak_mock.role_create.return_value = role_id
keycloak_mock.role_get.return_value = role_data | {'id': role_id}
rv = client.post(
f'{API_BASE}/auth/role',
headers={'Authorization': 'Bearer foobar'},
json=role_data,
)
keycloak_mock.role_create.assert_called_with(role_data)
keycloak_mock.role_get.assert_called_with(role_id)
assert rv.status_code == 200
assert rv.json == role_data | {'id': role_id, '_href': ANY}
def test_get_role(client, keycloak_mock):
role_id = '00000000-000d-000c-000b-00000000000a'
role_data = {'name': 'admin'}
keycloak_mock.role_get.return_value = role_data | {'id': role_id}
rv = client.get(
f'{API_BASE}/auth/role/{role_id}',
headers={'Authorization': 'Bearer foobar'},
)
keycloak_mock.role_get.assert_called_with(role_id)
assert rv.status_code == 200
assert rv.json == role_data | {'id': role_id, '_href': ANY}
def test_update_role(client, keycloak_mock):
role_id = '00000000-000d-000c-000b-00000000000a'
role_data = {'name': 'new-admin'}
keycloak_mock.role_update.return_value = role_id
keycloak_mock.role_get.return_value = role_data | {'id': role_id}
rv = client.patch(
f'{API_BASE}/auth/role/{role_id}',
headers={'Authorization': 'Bearer foobar'},
json=role_data,
)
keycloak_mock.role_update.assert_called_with(role_id, role_data)
keycloak_mock.role_get.assert_called_with(role_data['name'])
assert rv.status_code == 200
assert rv.json == role_data | {'id': role_id, '_href': ANY}
def test_delete_role(client, keycloak_mock):
role_id = '00000000-000d-000c-000b-00000000000a'
keycloak_mock.role_delete.return_value = role_id
rv = client.delete(
f'{API_BASE}/auth/role/{role_id}',
headers={'Authorization': 'Bearer foobar'},
)
keycloak_mock.role_delete.assert_called_with(role_id)
assert rv.status_code == 200
assert rv.json == {}
| python |
from corehq.apps.commtrack.const import COMMTRACK_USERNAME
from corehq.apps.users.util import DEMO_USER_ID, SYSTEM_USER_ID
from corehq.pillows.utils import (
COMMCARE_SUPPLY_USER_TYPE,
DEMO_USER_TYPE,
MOBILE_USER_TYPE,
SYSTEM_USER_TYPE,
WEB_USER_TYPE,
)
from corehq.warehouse.loaders import (
ApplicationDimLoader,
ApplicationStagingLoader,
DomainMembershipDimLoader,
GroupDimLoader,
GroupStagingLoader,
LocationDimLoader,
LocationStagingLoader,
UserDimLoader,
UserGroupDimLoader,
UserStagingLoader,
)
from corehq.warehouse.models import (
ApplicationDim,
Batch,
DomainMembershipDim,
GroupDim,
LocationDim,
LocationStagingTable,
UserDim,
UserGroupDim,
)
from corehq.warehouse.tests.utils import (
BaseWarehouseTestCase,
create_application_staging_record,
create_batch,
create_group_staging_record,
create_location_records_from_tree,
create_location_staging_record,
create_user_staging_record,
)
def teardown_module():
Batch.objects.all().delete()
class TestUserDim(BaseWarehouseTestCase):
domain = 'user-dim-test'
slug = 'user_dim'
@classmethod
def setUpClass(cls):
super(TestUserDim, cls).setUpClass()
cls.batch = create_batch(cls.slug)
cls.records = [
create_user_staging_record(
cls.domain,
user_id=SYSTEM_USER_ID,
username='system_bob',
batch_id=cls.batch.id
),
create_user_staging_record(
cls.domain,
user_id=DEMO_USER_ID,
username='demo_sally',
batch_id=cls.batch.id
),
create_user_staging_record(
cls.domain,
user_id=COMMTRACK_USERNAME,
username='commtrack_billy',
batch_id=cls.batch.id
),
create_user_staging_record(
None,
user_id='beeboobop',
username='web',
doc_type='WebUser',
batch_id=cls.batch.id
),
create_user_staging_record(
cls.domain,
user_id='greengoblin',
username='mobile',
batch_id=cls.batch.id
),
]
@classmethod
def tearDownClass(cls):
for record in cls.records:
record.delete()
UserDimLoader().clear_records()
UserStagingLoader().clear_records()
super(TestUserDim, cls).tearDownClass()
def test_user_types(self):
UserDimLoader().commit(self.batch)
self.assertEqual(UserDim.objects.count(), 5)
self.assertEqual(
UserDim.objects.filter(user_type=SYSTEM_USER_TYPE).first().user_id,
SYSTEM_USER_ID,
)
self.assertEqual(
UserDim.objects.filter(user_type=DEMO_USER_TYPE).first().user_id,
DEMO_USER_ID,
)
self.assertEqual(
UserDim.objects.filter(user_type=COMMCARE_SUPPLY_USER_TYPE).first().user_id,
COMMTRACK_USERNAME,
)
self.assertEqual(
UserDim.objects.filter(user_type=MOBILE_USER_TYPE).first().user_id,
'greengoblin',
)
self.assertEqual(
UserDim.objects.filter(user_type=WEB_USER_TYPE).first().user_id,
'beeboobop',
)
class TestDomainMembershipDim(BaseWarehouseTestCase):
slug = DomainMembershipDimLoader.slug
@classmethod
def setUpClass(cls):
super(TestDomainMembershipDim, cls).setUpClass()
cls.batch = create_batch(cls.slug)
cls.bootstrap_user_staging()
@classmethod
def bootstrap_user_staging(cls):
create_user_staging_record(
domain='test1',
user_id='u1',
username='mobile1',
doc_type='CommCareUser',
batch_id=cls.batch.id,
)
create_user_staging_record(
domain='test1',
user_id='u2',
username='mobile2',
doc_type='CommCareUser',
batch_id=cls.batch.id,
)
create_user_staging_record(
domain=None,
username='mobile1',
user_id='u3',
doc_type='WebUser',
batch_id=cls.batch.id,
domain_memberships=[
{'domain': 'test1', 'is_admin': True},
{'domain': 'test2', 'is_admin': False},
]
)
UserDimLoader().commit(cls.batch)
@classmethod
def tearDownClass(cls):
DomainMembershipDimLoader().clear_records()
UserDimLoader().clear_records()
UserStagingLoader().clear_records()
super(TestDomainMembershipDim, cls).tearDownClass()
def test_insert_and_update(self):
DomainMembershipDimLoader().commit(self.batch)
# should create 4 domain membership columns
self.assertEqual(
DomainMembershipDim.objects.count(), 4
)
# 'u3' user should have 2 membership columns for each of the domain
dim_id_of_user3 = UserDim.objects.filter(user_id='u3')[0].id
self.assertEqual(
DomainMembershipDim.objects.filter(user_dim_id=dim_id_of_user3).count(),
2
)
## test removing a domain membership
# clear and add new staging record to remove a membership of 2
UserStagingLoader().clear_records()
create_user_staging_record(
domain=None,
username='mobile1',
user_id='u3',
doc_type='WebUser',
batch_id=self.batch.id,
domain_memberships=[
{'domain': 'test1', 'is_admin': True},
]
)
DomainMembershipDimLoader().commit(self.batch)
# should create 3 domain membership columns instead of 4
self.assertEqual(
DomainMembershipDim.objects.count(), 3
)
# u3 user should have only 1 domain-membership
dim_id_of_user3 = UserDim.objects.filter(user_id='u3')[0].id
self.assertEqual(
DomainMembershipDim.objects.filter(user_dim_id=dim_id_of_user3).count(),
1
)
class TestUserGroupDim(BaseWarehouseTestCase):
domain = 'user-group-dim-test'
slug = 'user_group_dim'
@classmethod
def setUpClass(cls):
super(TestUserGroupDim, cls).setUpClass()
cls.batch = create_batch(cls.slug)
cls.blue_dog = create_user_staging_record(cls.domain,
username='blue-dog',
batch_id=cls.batch.id)
cls.black_dog = create_user_staging_record(cls.domain,
username='black-dog',
batch_id=cls.batch.id)
cls.yellow_cat = create_user_staging_record(cls.domain,
username='yellow-cat',
batch_id=cls.batch.id)
@classmethod
def tearDownClass(cls):
GroupStagingLoader().clear_records()
UserStagingLoader().clear_records()
GroupDimLoader().clear_records()
UserDimLoader().clear_records()
UserGroupDimLoader().clear_records()
super(TestUserGroupDim, cls).tearDownClass()
def test_basic_user_group_insert(self):
UserDimLoader().commit(self.batch)
self.assertEqual(UserDim.objects.count(), 3)
# Setup group records to have multiple users
dogs = create_group_staging_record(
self.domain,
'dogs',
user_ids=[self.blue_dog.user_id, self.black_dog.user_id],
batch_id=self.batch.id
)
create_group_staging_record(
self.domain,
'cats',
user_ids=[self.yellow_cat.user_id],
batch_id=self.batch.id
)
GroupDimLoader().commit(self.batch)
self.assertEqual(GroupDim.objects.count(), 2)
UserGroupDimLoader().commit(self.batch)
self.assertEqual(UserGroupDim.objects.count(), 3)
dog_relations = UserGroupDim.objects.filter(group_dim=GroupDim.objects.get(group_id=dogs.group_id))
self.assertEqual(
dog_relations.count(),
2,
)
self.assertEqual(
set(dog_relations.values_list('user_dim_id', flat=True)),
set(UserDim.objects.filter(
user_id__in=[self.blue_dog.user_id, self.black_dog.user_id]
).values_list('id', flat=True)),
)
class TestLocationDim(BaseWarehouseTestCase):
domain = 'location-dim-test'
slug = 'location_dim'
@classmethod
def setUpClass(cls):
super(TestLocationDim, cls).setUpClass()
cls.batch = create_batch(cls.slug)
def tearDown(self):
LocationStagingLoader().clear_records()
LocationDimLoader().clear_records()
super(TestLocationDim, self).tearDown()
def test_location_dim(self):
tree = {
('Illinois', 'state'): {
('Naperville', 'city'): {
('Home', 'home'): {}
},
('Chicago', 'city'): {},
}
}
create_location_records_from_tree(self.domain, tree, self.batch.id)
self.assertEqual(LocationStagingTable.objects.count(), 4)
LocationDimLoader().commit(self.batch)
self.assertEqual(LocationDim.objects.count(), 4)
home_location = LocationDim.objects.filter(name='Home').first()
self.assertEqual(home_location.location_type_name, 'home')
self.assertEqual(home_location.location_type_code, 'home')
root_location = LocationDim.objects.filter(name='Illinois').first()
self.assertEqual(root_location.location_level_0, root_location.sql_location_id)
def test_location_dim_update(self):
tree = {
('Illinois', 'state'): {
('Naperville', 'city'): {
('Home', 'home'): {}
},
('Chicago', 'city'): {},
}
}
create_location_records_from_tree(self.domain, tree, self.batch.id)
LocationDimLoader().commit(self.batch)
self.assertEqual(LocationDim.objects.count(), 4)
# Let's add one more location under Naperville to ensure that the dim updates
# when it's not a root node
LocationStagingLoader().clear_records()
home_location = LocationDim.objects.filter(name='Home').first()
city_location = LocationDim.objects.filter(name='Naperville').first()
create_location_staging_record(
self.domain,
'Other home',
sql_location_id=10,
# Give it the same parent as the Home location
sql_parent_location_id=city_location.sql_location_id,
location_type_id=home_location.location_type_id,
batch_id=self.batch.id
)
LocationDimLoader().commit(self.batch)
self.assertEqual(LocationDim.objects.count(), 5)
class TestAppDim(BaseWarehouseTestCase):
domain = 'app-dim-test'
slug = 'app_dim'
@classmethod
def setUpClass(cls):
super(TestAppDim, cls).setUpClass()
cls.batch = create_batch(cls.slug)
@classmethod
def tearDownClass(cls):
ApplicationDimLoader().clear_records()
ApplicationStagingLoader().clear_records()
super(TestAppDim, cls).tearDownClass()
def test_app_dim(self):
create_application_staging_record(self.domain, 'test-app', batch_id=self.batch.id)
create_application_staging_record(self.domain, 'test-deleted', doc_type='Application-Deleted', batch_id=self.batch.id)
ApplicationDimLoader().commit(self.batch)
self.assertEqual(ApplicationDim.objects.count(), 2)
test_app = ApplicationDim.objects.get(name='test-app')
self.assertEqual(test_app.deleted, False)
deleted_app = ApplicationDim.objects.get(name='test-deleted')
self.assertEqual(deleted_app.deleted, True)
| python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 15 11:52:31 2019
@author: tgadfort
"""
import sys
import re
from datetime import timedelta
from playTypes import playtype
# create logger
import logging
module_logger = logging.getLogger('log.{0}'.format(__name__))
############################################################################################################
## Drive Class
############################################################################################################
class driveclass:
def __init__(self, headline, detail, possession, postdrivehomescore, postdriveawayscore, plays=None, text={}):
self.logger = logging.getLogger('log.{0}.{1}'.format(__name__, self.__class__))
self.ind = 6*" "
self.headline = headline
self.detail = detail
self.possession = possession
self.postdrivehomescore = postdrivehomescore
self.postdriveawayscore = postdriveawayscore
self.plays = plays
try:
self.headlineText = text.get("Headline")[0]
except:
self.headlineText = str(None)
try:
self.detailText = text.get("Detail")[0]
except:
self.detailText = str(None)
def setPlays(self, plays):
self.plays = plays
def getHeadlineText(self):
return self.headlineText
def getDetailText(self):
return self.detailText
def getSummaryText(self):
plays = self.detail.plays
yards = self.detail.yards
headline = self.headline
retval = "{0: <5}{1: <5}{2: <25}{3: <25}{4: <25}".format(plays, yards, headline, self.headlineText, self.detailText)
return retval
############################################################################################################
## Drive Detail Class
############################################################################################################
class drivedetailclass:
def __init__(self, plays, yards, gametime):
self.logger = logging.getLogger('log.{0}.{1}'.format(__name__, self.__class__))
self.ind = 6*" "
self.plays = plays
self.yards = yards
self.gametime = gametime
############################################################################################################
## Drive Summary Class
############################################################################################################
class drivesummary:
def __init__(self, drive, fieldMap):
self.logger = logging.getLogger('log.{0}.{1}'.format(__name__, self.__class__))
self.ind = 4*" "
self.name = "drivesummary"
self.headline = None
self.score = None
self.details = None
self.fullDrive = None
driveNo = drive.get('Drive')
if driveNo is None:
raise ValueError("No Drive in drive dict")
headline = drive.get('Headline')
if headline is None:
raise ValueError("No Headline in drive dict")
self.headlineText = headline
detail = drive.get('Detail')
if detail is None:
raise ValueError("No Detail in drive dict")
self.detailText = detail
possession = drive.get('Posession')
if possession is None:
raise ValueError("No Posession in drive dict")
data = drive.get('Data')
if data is None:
raise ValueError("No Data in drive dict")
###
### For whatever reason home/away scores are reversed on the webpage...
###
homescore = drive.get('AwayScore')
if homescore is None:
raise ValueError("No AwayScore in drive dict")
awayscore = drive.get('HomeScore')
if awayscore is None:
raise ValueError("No HomeScore in drive dict")
self.possession = self.parsePossession(possession, fieldMap)
self.headline = self.parseHeadline(headline)
self.detail = self.parseDetail(detail)
self.homescore = self.parseScore(homescore)
self.awayscore = self.parseScore(awayscore)
self.driveplays = data
self.logger.debug("{0}Drive Summary: [{1} - {2}] {3}".format(self.ind, self.awayscore, self.homescore, headline))
self.fullDrive = driveclass(headline=self.headline, detail=self.detail, possession=self.possession,
postdrivehomescore=self.homescore, postdriveawayscore=self.awayscore,
text={"Headline": self.headlineText, "Detail": self.detailText})
def getHeadline(self):
return self.headlineText
def getDetail(self):
return self.detailText
def getPostDriveScore(self):
return [self.awayscore, self.homescore]
def getDrivePlays(self):
return self.driveplays
def getFullDrive(self):
return self.fullDrive
def parsePossession(self, possession, fieldMap, debug=False):
if not isinstance(possession, list):
self.logger.error("Possession is not a list: {0}".format(possession))
if len(possession) != 1:
self.logger.error("Not one element in possession list: {0}".format(possession))
teamID = possession[0]
teamAbbrev = None
try:
teamAbbrev = fieldMap[teamID]
except:
self.logger.error("Could not find {0} in field map: {1}".format(teamID, fieldMap))
self.logger.debug("{0}Parsed Possession: {1}".format(self.ind, teamAbbrev))
return teamAbbrev
def parseHeadline(self, headline, debug=False):
play = None
if isinstance(headline, list):
if len(headline) >= 1:
pt = playtype()
play = pt.getPlay(headline[0]).name
else:
self.logger.error("Not one headline entry: {0}".format(headline))
else:
self.logger.error("Headline is not a list: {0}".format(headline))
self.logger.debug("{0}Parsed Headline: {1}".format(self.ind, play))
return play
def parseScore(self, score, debug=False):
if not isinstance(score, list):
self.logger.error("Could not determine score type: {0}".format(score))
if len(score) != 1:
self.logger.error("Not one detail entry: {0}".format(score))
scoredata = score[0]
try:
scoredata = int(scoredata)
except:
self.logger.error("Could not find an integer score for {0}".format(scoredata))
self.logger.debug("{0}Parsed Score: {1}".format(self.ind, scoredata))
return scoredata
def parseDetail(self, detail, debug=False):
if debug:
fname = sys._getframe().f_code.co_name
print("FUNC {0}".format(fname))
if not isinstance(detail, list):
raise ValueError("Could not determine detail play type: {0}".format(detail))
if len(detail) != 1:
raise ValueError("Not one detail entry: {0}".format(detail))
detaildata = detail[0]
yards = "(yards|yard|Yds|yds|Yd|yd)"
plays = "(play|plays)"
num = "([+-?]\d+|\d+)"
totalplays = None
totalyards = None
totalclock = None
m = re.search(r"{0}\s{1},\s{2}\s{3},\s{4}:{5}".format(num, plays, num, yards, num, num), detaildata)
if m is not None:
groups = m.groups()
totalplays = int(groups[0])
totalyards = int(groups[2])
totalclock = timedelta(minutes=int(groups[4]), seconds=int(groups[5]))
if totalplays is None and totalyards is None and totalclock is None:
m = re.search(r"{0}\s{1},\s{2}\s{3}".format(num, plays, num, yards), detaildata)
if m is not None:
groups = m.groups()
totalplays = int(groups[0])
totalyards = int(groups[2])
totalclock = timedelta(minutes=0, seconds=0)
if totalplays is None and totalyards is None and totalclock is None:
raise ValueError("Could not parse drive detail: {0}".format(detaildata))
drivedetail = drivedetailclass(plays=totalplays, yards=totalyards, gametime=totalclock)
return drivedetail
| python |
"""
Abstractions for lazy compositions/manipulations of And Inverter
Graphs.
"""
from __future__ import annotations
from typing import (Union, FrozenSet, Callable, Tuple,
Mapping, Sequence, Optional)
import attr
import funcy as fn
from bidict import bidict
from pyrsistent import pmap
from pyrsistent.typing import PMap
import aiger as A
from aiger.aig import AIG, Node, Input, LatchIn
from aiger.aig import ConstFalse
@attr.s(frozen=True, auto_attribs=True)
class LazyAIG:
def __call__(self, inputs, latches=None, *, lift=None):
pass
@property
def latch2init(self):
pass
@property
def inputs(self):
pass
@property
def outputs(self):
pass
@property
def comments(self):
pass
def write(self, path):
self.aig.write(path)
relabel = AIG.relabel
simulator = AIG.simulator
simulate = AIG.simulate
@property
def latches(self) -> FrozenSet[str]:
return frozenset(self.latch2init.keys())
@property
def lazy_aig(self) -> LazyAIG:
return self
@property
def aig(self) -> AIG:
"""Return's flattened AIG represented by this LazyAIG."""
false = ConstFalse()
inputs = {i: Input(i) for i in self.inputs}
latches = {i: LatchIn(i) for i in self.latches}
def lift(obj):
if isinstance(obj, Node):
return obj
assert isinstance(obj, bool)
return ~false if obj else false
node_map, latch_map = self(inputs, latches=latches, lift=lift)
return AIG(
comments=self.comments,
inputs=self.inputs,
node_map=node_map,
latch_map=latch_map,
latch2init=self.latch2init,
)
def __rshift__(self, other: AIG_Like) -> LazyAIG:
"""Cascading composition. Feeds self into other."""
return Cascading(self, other)
def __lshift__(self, other: AIG_Like) -> LazyAIG:
"""Cascading composition. Feeds other into self."""
return lazy(other) >> self
def __or__(self, other: AIG_Like) -> LazyAIG:
"""Parallel composition between self and other."""
assert not self.latches & other.latches
assert not self.outputs & other.outputs
return Parallel(self, other)
def cutlatches(self, latches=None, renamer=None) -> Tuple[LazyAIG, Labels]:
"""Returns LazyAIG where the latches specified
in `latches` have been converted into inputs/outputs.
- If `latches` is `None`, then all latches are cut.
- `renamer`: is a function from strings to strings which
determines how to rename latches to avoid name collisions.
"""
lcirc = CutLatches(self, renamer=renamer, cut=latches)
l2init = dict(self.latch2init)
lmap = {k: (lcirc.renamer(k), l2init[k]) for k in lcirc.cut}
return lcirc, lmap
def loopback(self, *wirings) -> LazyAIG:
"""Returns result of feeding outputs specified in `*wirings` to
inputs specified in `wirings`.
Each positional argument (element of wirings) should have the following
schema:
{
'input': str,
'output': str,
'latch': str, # what to name the new latch.
'init': bool, # new latch's initial value.
'keep_output': bool, # whether output is consumed by feedback.
}
"""
return LoopBack(self, wirings=wirings)
def unroll(self, horizon, *, init=True, omit_latches=True,
only_last_outputs=False) -> LazyAIG:
"""
Returns circuit which computes the same function as
the sequential circuit after `horizon` many inputs.
Each input/output has `##time_{time}` appended to it to
distinguish different time steps.
"""
return A.Unrolled(
self, horizon, init, omit_latches, only_last_outputs
)
def __getitem__(self, others):
"""Relabel inputs, outputs, or latches.
`others` is a tuple, (kind, relabels), where
1. kind in {'i', 'o', 'l'}
2. relabels is a mapping from old names to new names.
Note: The syntax is meant to resemble variable substitution
notations, i.e., foo[x <- y] or foo[x / y].
"""
assert isinstance(others, tuple) and len(others) == 2
kind, relabels = others
assert kind in {'i', 'o', 'l'}
key = {
'i': 'input_relabels',
'l': 'latch_relabels',
'o': 'output_relabels',
}.get(kind)
return A.Relabeled(self, **{key: relabels})
def reinit(self, latch2init) -> LazyAIG:
"""Update late initial values based on mapping provided."""
assert set(latch2init.keys()) <= self.latches
return UpdatedLatchInits(circ=self, latch2init=latch2init)
AIG_Like = Union[AIG, LazyAIG]
Labels = Mapping[str, str]
def walk_keys(func, mapping):
return fn.walk_keys(func, dict(mapping))
def omit(mapping, keys):
return fn.omit(dict(mapping), keys)
def project(mapping, keys):
return fn.project(dict(mapping), keys)
@attr.s(frozen=True, auto_attribs=True)
class Parallel(LazyAIG):
left: AIG_Like
right: AIG_Like
def __call__(self, inputs, latches=None, *, lift=None):
out_l, lmap_l = self.left(inputs, latches=latches, lift=lift)
out_r, lmap_r = self.right(inputs, latches=latches, lift=lift)
return fn.merge(out_l, out_r), fn.merge(lmap_l, lmap_r)
def _merge_maps(self, key):
map1, map2 = [pmap(getattr(c, key)) for c in (self.left, self.right)]
return map1 + map2
@property
def latch2init(self):
return self._merge_maps('latch2init')
@property
def inputs(self):
return self.left.inputs | self.right.inputs
@property
def outputs(self):
return self.left.outputs | self.right.outputs
@property
def comments(self):
return self.left.comments + self.right.comments
@attr.s(frozen=True, auto_attribs=True)
class Wire:
input: str
output: str
latch: str
keep_output: bool = True
init: bool = True
def convert_wirings(wirings):
for wire in wirings:
wire.setdefault('latch', wire['input'])
return tuple(Wire(**w) for w in wirings)
@attr.s(frozen=True, auto_attribs=True)
class LoopBack(LazyAIG):
circ: AIG_Like
wirings: Sequence[Wire] = attr.ib(converter=convert_wirings)
def __call__(self, inputs, latches=None, *, lift=None):
if latches is None:
latches = pmap()
latches = dict(self.latch2init + latches) # Override initial values.
for wire in self.wirings:
inputs[wire.input] = latches[wire.latch]
del latches[wire.latch]
omap, lmap = self.circ(inputs, latches=latches, lift=lift)
for wire in self.wirings:
out, latch = wire.output, wire.latch
lmap[latch] = omap[out]
if not wire.keep_output:
del omap[out]
return omap, lmap
@property
def latch2init(self):
latch2init = pmap(self.circ.latch2init).evolver()
for wire in self.wirings:
latch2init[wire.latch] = wire.init
return latch2init.persistent()
@property
def inputs(self):
return self.circ.inputs - {w.input for w in self.wirings}
@property
def outputs(self):
omitted = {w.output for w in self.wirings if not w.keep_output}
return self.circ.outputs - omitted
@property
def comments(self):
return self.circ.comments
def convert_renamer(renamer):
if renamer is None:
def renamer(*_):
return A.common._fresh()
return fn.memoize(renamer)
@attr.s(frozen=True, auto_attribs=True)
class CutLatches(LazyAIG):
circ: AIG_Like
renamer: Callable[[str], str] = attr.ib(converter=convert_renamer)
cut: Optional[FrozenSet[str]] = None
def __attrs_post_init__(self):
if self.cut is None:
object.__setattr__(self, "cut", self.circ.latches)
def __call__(self, inputs, latches=None, *, lift=None):
inputs = dict(inputs)
if latches is None:
latches = pmap()
latches = dict(self.latch2init + latches) # Override initial values.
for latch in self.cut:
new_name = self.renamer(latch)
latches[latch] = inputs[new_name]
del inputs[new_name]
omap, lmap = self.circ(inputs, latches=latches, lift=lift)
for latch in self.cut:
new_name = self.renamer(latch)
omap[new_name] = lmap[latch]
del lmap[latch]
return omap, lmap
@property
def latch2init(self):
return pmap(omit(self.circ.latch2init, self.cut))
@property
def inputs(self):
return self.circ.inputs | set(map(self.renamer, self.cut))
@property
def outputs(self):
return self.circ.outputs | set(map(self.renamer, self.cut))
@property
def comments(self):
return self.circ.comments
@attr.s(frozen=True, auto_attribs=True)
class Cascading(LazyAIG):
left: AIG_Like
right: AIG_Like
def __call__(self, inputs, latches=None, *, lift=None):
inputs_l = project(inputs, self.left.inputs)
omap_l, lmap_l = self.left(inputs_l, latches=latches, lift=lift)
inputs_r = project(inputs, self.right.inputs)
inputs_r.update(omap_l) # <--- Cascade setup happens here.
omap_l = omit(omap_l, self._interface)
omap_r, lmap_r = self.right(inputs_r, latches=latches, lift=lift)
return fn.merge(omap_l, omap_r), fn.merge(lmap_l, lmap_r)
def _merge_maps(self, key):
map1, map2 = [pmap(getattr(c, key)) for c in (self.left, self.right)]
return map1 + map2
@property
def latch2init(self):
return self._merge_maps('latch2init')
@property
def _interface(self):
return self.left.outputs & self.right.inputs
@property
def inputs(self):
return self.left.inputs | (self.right.inputs - self._interface)
@property
def outputs(self):
return self.right.outputs | (self.left.outputs - self._interface)
@property
def comments(self):
return self.left.comments + self.right.comments
def _relabel_map(relabels, mapping):
return pmap(walk_keys(lambda x: relabels.get(x, x), mapping))
@attr.s(frozen=True, auto_attribs=True)
class UpdatedLatchInits(LazyAIG):
circ: AIG_Like
_latch2init: PMap[str, bool] = pmap()
def __call__(self, inputs, latches=None, *, lift=None):
if latches is None:
latches = pmap()
latches = dict(self.latch2init + latches) # Override initial values.
return self.circ(inputs, latches=latches, lift=lift)
@property
def latch2init(self):
return self.circ.latch2init + self._latch2init
@property
def inputs(self):
return self.circ.inputs
@property
def outputs(self):
return self.circ.outputs
@property
def comments(self):
return self.circ.comments
@attr.s(frozen=True, auto_attribs=True)
class Relabeled(LazyAIG):
circ: AIG_Like
input_relabels: PMap[str, str] = pmap()
latch_relabels: PMap[str, str] = pmap()
output_relabels: PMap[str, str] = pmap()
def __call__(self, inputs, latches=None, *, lift=None):
if latches is None:
latches = pmap()
latches = dict(self.latch2init + latches) # Override initial values.
new2old_i = bidict(self.input_relabels).inv
new2old_l = bidict(self.latch_relabels).inv
inputs = _relabel_map(new2old_i, inputs)
latches = _relabel_map(new2old_l, latches)
omap, lmap = self.circ(inputs, latches=latches, lift=lift)
omap = _relabel_map(self.output_relabels, omap)
lmap = _relabel_map(self.latch_relabels, lmap)
return dict(omap), dict(lmap)
@property
def latch2init(self):
return _relabel_map(self.latch_relabels, self.circ.latch2init)
@property
def inputs(self):
old_inputs = self.circ.inputs
return frozenset(self.input_relabels.get(i, i) for i in old_inputs)
@property
def outputs(self):
old_outputs = self.circ.outputs
return frozenset(self.output_relabels.get(i, i) for i in old_outputs)
@property
def comments(self):
return self.circ.comments
@attr.s(frozen=True, auto_attribs=True)
class Unrolled(LazyAIG):
circ: AIG_Like
horizon: int
init: bool = True
omit_latches: bool = True
only_last_outputs: bool = False
def __call__(self, inputs, latches=None, *, lift=None):
circ, omit_latches, init = self.circ, self.omit_latches, self.init
horizon, only_last_outputs = self.horizon, self.only_last_outputs
if not omit_latches:
assert (circ.latches & circ.outputs) == set()
if not init:
assert (circ.latches & circ.inputs) == set()
latches = circ.latch2init if init else project(inputs, circ.inputs)
if init:
inputs = omit(inputs, circ.inputs)
outputs = {}
for time in range(horizon):
omap, latches = circ(
inputs={i: inputs[f'{i}##time_{time}'] for i in circ.inputs},
latches=latches,
lift=lift
)
if (not only_last_outputs) or (time + 1 == horizon):
template = '{}' + f'##time_{time + 1}'
outputs.update(walk_keys(template.format, omap))
if not self.omit_latches:
outputs.update(walk_keys(template.format, latches))
assert set(outputs.keys()) == self.outputs
return dict(outputs), {}
@property
def latch2init(self):
return pmap()
def __with_times(self, keys, times):
for time in times:
template = '{}' + f'##time_{time}'
yield from map(template.format, keys)
def _with_times(self, keys, times):
return frozenset(self.__with_times(keys, times))
@property
def inputs(self):
base = set() if self.init else self.circ.latches
base |= self.circ.inputs
return self._with_times(base, times=range(self.horizon))
@property
def outputs(self):
start = self.horizon if self.only_last_outputs else 1
base = set() if self.omit_latches else self.circ.latches
base |= self.circ.outputs
return self._with_times(base, times=range(start, self.horizon + 1))
@property
def comments(self):
return self.circ.comments
@attr.s(frozen=True, auto_attribs=True)
class Lifted(LazyAIG):
circ: AIG_Like
def __call__(self, inputs, latches=None, *, lift=None):
return self.circ(inputs=inputs, latches=latches, lift=lift)
@property
def latch2init(self):
return self.circ.latch2init
@property
def inputs(self):
return self.circ.inputs
@property
def outputs(self):
return self.circ.outputs
@property
def comments(self):
return self.circ.comments
def lazy(circ: Union[AIG, LazyAIG]) -> LazyAIG:
"""Lifts AIG to a LazyAIG."""
return Lifted(circ)
__all__ = ['lazy', 'LazyAIG', 'Parallel', 'LoopBack', 'CutLatches',
'Cascading', 'Relabeled', 'Unrolled', 'AIG_Like']
| python |
import numpy as np
import xobjects as xo
import xpart as xp
# Create a Particles on your selected context (default is CPU)
context = xo.ContextCupy()
part = xp.Particles(_context=context, x=[1,2,3])
##############
# PANDAS/HDF #
##############
# Save particles to hdf file via pandas
import pandas as pd
df = part.to_pandas()
df.to_hdf('part.hdf', key='df', mode='w')
# Read particles from hdf file via pandas
part_from_pdhdf = xp.Particles.from_pandas(pd.read_hdf('part.hdf'))
| python |
# proxy module
from pyface.i_file_dialog import *
| python |
import unicodedata
from django.utils.timezone import make_aware
from eagle.models import EDINETCompany, EDINETDocument
class EDINETDocumentRegister():
@classmethod
def register_document(cls, document, xbrl_path, pdf_path):
def normalize(text):
if text is not None:
return unicodedata.normalize("NFKC", text)
else:
return text
# Confirm company registration
jcn = document.jcn
company = None
try:
company = EDINETCompany.objects.get(jcn=jcn)
except EDINETCompany.DoesNotExist:
company = None
if company is None:
name = normalize(document.filer_name)
company = EDINETCompany(
local_name=name,
global_name=name,
jcn=document.jcn,
edinet_code=document.edinet_code,
sec_code=document.sec_code,
fund_code=document.fund_code
)
company.save()
parent = None
if document.parent_document_id:
try:
parent = EDINETDocument.objects.get(
edinet_document_id=document.parent_document_id)
except EDINETDocument.DoesNotExist:
parent = None
_document = EDINETDocument()
try:
_document = EDINETDocument.objects.get(
edinet_document_id=document.document_id)
except EDINETDocument.DoesNotExist:
_document = EDINETDocument()
# Register Company's document
title = normalize(document.title)
reason = normalize(document.submit_reason)
_document.company = company
if document.period_start is None and parent is not None:
_document.period_start = parent.period_start
else:
_document.period_start = document.period_start
if document.period_end is None and parent is not None:
_document.period_end = parent.period_end
else:
_document.period_end = document.period_end
_document.submitted_date = make_aware(document.submitted_date)
_document.lang = "ja"
_document.path = xbrl_path
_document.xbrl_path = xbrl_path
_document.pdf_path = pdf_path
_document.edinet_document_id = document.document_id
_document.edinet_document_type = document.doc_type_code
_document.title = title
_document.ordinance_code = document.ordinance_code
_document.form_code = document.form_code
_document.issuer_edinet_code = document.issuer_edinet_code
_document.subject_edinet_code = document.subject_edinet_code
_document.subsidiary_edinet_code = document.subsidiary_edinet_code
_document.submit_reason = reason
_document.parent_document_id = parent
if document.operated_date is None:
_document.operated_date = document.operated_date
else:
_document.operated_date = make_aware(document.operated_date)
_document.withdraw_status = document.withdraw_status
_document.operation_status = document.operation_status
_document.disclosure_status = document.disclosure_status
_document.has_attachment = document.has_attachment
_document.has_xbrl = document.has_xbrl
_document.has_pdf = document.has_pdf
_document.has_english_doc = document.has_english_doc
_document.save()
return _document
| python |
import asyncio
import aiohttp
import json
async def pollForex(symbols, authkey):
i = 0
while True:
symbol = symbols[i % len(symbols)]
try:
async with aiohttp.ClientSession() as session:
async with session.get(
url="https://api-fxpractice.oanda.com/v1/prices",
headers={'Authorization': ('Bearer ' + authkey)},
params='instruments=' + symbol) as resp:
yield (await resp.json())
except Exception as error:
print("Fetch forex rates from Oanda: " + type(error).__name__ + " " + str(error.args))
i += 1
await asyncio.sleep(1)
async def forexPoller(symbols, authkey, orderbookAnalyser):
async for ticker in pollForex(symbols=symbols, authkey=authkey):
symbolBase = ticker['prices'][0]['instrument'].split("_")[0]
symbolQuote = ticker['prices'][0]['instrument'].split("_")[1]
ask = ticker['prices'][0]['ask']
bid = ticker['prices'][0]['bid']
print("Received " + symbolBase+"/"+ symbolQuote +
" prices from Oanda. Ask: " + str(ask) + ", Bid: " + str(bid))
#orderbookAnalyser.updateForexPrice(ticker['prices'][0])
with open('./cred/oanda.json') as file:
authkeys = json.load(file)
asyncio.ensure_future(
forexPoller(
symbols=['EUR_USD', 'GBP_USD'],
authkey=authkeys['practice'],
orderbookAnalyser=None))
loop = asyncio.get_event_loop()
loop.run_forever()
| python |
import os
import sys
import glob
import random
import math
import datetime
import itertools
import json
import re
import logging
# from collections import OrderedDict
import numpy as np
from scipy.stats import multivariate_normal
# import scipy.misc
import tensorflow as tf
# import keras
import keras.backend as KB
import keras.layers as KL
import keras.engine as KE
sys.path.append('..')
import mrcnn.utils as utils
import tensorflow.contrib.util as tfc
import pprint
##----------------------------------------------------------------------------------------------------------------------
## build_predictions
##----------------------------------------------------------------------------------------------------------------------
def build_predictions(norm_input_rois, mrcnn_class, mrcnn_bbox, config):
'''
Split output_rois by class id, and add class_id and class_score
output:
-------
pred_tensor: [ Batchsz, Num_Classes, Num_Rois, 7: (y1, x1, y2, x2, class_id, class_score, normalized class score)]
y1,x1, y2,x2 are in image dimension format
'''
batch_size = config.BATCH_SIZE
num_classes = config.NUM_CLASSES
h, w = config.IMAGE_SHAPE[:2]
# num_rois = config.TRAIN_ROIS_PER_IMAGE
num_rois = KB.int_shape(norm_input_rois)[1]
scale = tf.constant([h,w,h,w], dtype = tf.float32)
# dup_scale = tf.reshape(tf.tile(scale, [num_rois]),[num_rois,-1])
dup_scale = scale * tf.ones([batch_size, num_rois, 1], dtype = 'float32')
det_per_class = config.DETECTION_PER_CLASS
print()
print(' > build_predictions()')
print(' num_rois : ', num_rois )
print(' norm_input_rois.shape : ', type(norm_input_rois), KB.int_shape(norm_input_rois))
print(' scale.shape : ', type(scale), KB.int_shape(scale), scale.get_shape())
print(' dup_scale.shape : ', type(dup_scale), KB.int_shape(dup_scale), dup_scale.get_shape())
print()
print(' mrcnn_class shape : ', KB.int_shape(mrcnn_class))
print(' mrcnn_bbox.shape : ', KB.int_shape(mrcnn_bbox), mrcnn_bbox.shape )
print(' config image shape : ', config.IMAGE_SHAPE, 'h:',h,'w:',w)
#---------------------------------------------------------------------------
# Build a meshgrid for image id and bbox to use in gathering of bbox delta information
#---------------------------------------------------------------------------
batch_grid, bbox_grid = tf.meshgrid( tf.range(batch_size, dtype=tf.int32),
tf.range(num_rois, dtype=tf.int32), indexing = 'ij' )
#------------------------------------------------------------------------------------
# use the argmaxof each row to determine the dominating (predicted) class
#------------------------------------------------------------------------------------
pred_classes = tf.argmax( mrcnn_class,axis=-1,output_type = tf.int32)
pred_classes_exp = tf.to_float(tf.expand_dims(pred_classes ,axis=-1))
# print(' pred_classes : ', pred_classes.shape)
# print(pred_classes.eval())
# print(' pred_scores : ', pred_scores.shape ,'\n', pred_scores.eval())
# print(' pred_classes_exp : ', pred_classes_exp.shape)
gather_ind = tf.stack([batch_grid , bbox_grid, pred_classes],axis = -1)
pred_scores = tf.gather_nd(mrcnn_class, gather_ind)
pred_deltas = tf.gather_nd(mrcnn_bbox , gather_ind)
#------------------------------------------------------------------------------------
# 22-05-2018 - stopped using the following code as it was clipping too many bouding
# boxes to 0 or 128 causing zero area generation
##------------------------------------------------------------------------------------
## apply delta refinements to the rois, based on deltas provided by the mrcnn head
##------------------------------------------------------------------------------------
pred_deltas *= config.BBOX_STD_DEV
input_rois = tf.multiply(norm_input_rois , dup_scale )
# compute "refined rois" utils.apply_box_deltas_tf(input_rois, pred_deltas)
# input_rois = utils.apply_box_deltas_tf(input_rois, pred_deltas)
print(' input_rois.shape : ', type(input_rois), KB.int_shape(input_rois), input_rois.get_shape())
# print(' mrcnn_class : ', mrcnn_class.shape, mrcnn_class)
# print(' gather_ind : ', gather_ind.shape, gather_ind)
# print(' pred_scores : ', pred_scores.shape )
# print(' pred_deltas : ', pred_deltas.shape )
# print(' input_rois : ', input_rois.shape, input_rois)
# print(' refined rois: ', refined_rois.shape, refined_rois)
# ## Clip boxes to image window
# # for now we will consider the window [0,0, 128,128]
# # _, _, window, _ = parse_image_meta(image_meta)
# window = tf.constant([[0,0,128,128]], dtype =tf.float32)
# refined_rois = utils.clip_to_window_tf(window, refined_rois)
# print(' refined rois clipped: ', refined_rois.shape, refined_rois)
#------------------------------------------------------------------------------------
##------------------------------------------------------------------------------------
## Build Pred_Scatter: tensor of bounding boxes by Image / Class
##------------------------------------------------------------------------------------
# sequence id is used to preserve the order of rois as passed to this routine
# This may be important in the post matching process but for now it's not being used.
# sequence = tf.ones_like(pred_classes, dtype = tf.int32) * (bbox_grid[...,::-1] + 1)
# sequence = tf.to_float(tf.expand_dims(sequence, axis = -1))
# print(sequence.shape)
# print(sequence.eval())
# pred_array = tf.concat([ refined_rois, pred_classes_exp , tf.expand_dims(pred_scores, axis = -1), sequence], axis=-1)
#------------------------------------------------------------------------------------
pred_array = tf.concat([input_rois, pred_classes_exp , tf.expand_dims(pred_scores, axis = -1)], axis=-1)
scatter_ind = tf.stack([batch_grid , pred_classes, bbox_grid],axis = -1)
pred_scatt = tf.scatter_nd(scatter_ind, pred_array, [batch_size, num_classes, num_rois, pred_array.shape[-1]])
print(' pred_array : ', pred_array.shape)
print(' scatter_ind : ', type(scatter_ind), 'shape', scatter_ind.shape)
print(' pred_scatter : ', pred_scatt.get_shape())
##--------------------------------------------------------------------------------------------
## Apply a per class score normalization
##--------------------------------------------------------------------------------------------
normalizer = tf.reduce_max(pred_scatt[...,-1], axis = -1, keepdims=True)
normalizer = tf.where(normalizer < 1.0e-15, tf.ones_like(normalizer), normalizer)
norm_score = tf.expand_dims(pred_scatt[...,-1]/normalizer, axis = -1)
pred_scatt = tf.concat([pred_scatt, norm_score],axis = -1)
print(' - Add normalized score --\n')
print(' normalizer : ', normalizer.shape)
print(' norm_score : ', norm_score.shape)
print(' pred_scatter : ', pred_scatt.get_shape())
##------------------------------------------------------------------------------------
## sort pred_scatter in each class dimension based on bbox scores (last column)
##------------------------------------------------------------------------------------
_, sort_inds = tf.nn.top_k(pred_scatt[...,-1], k=pred_scatt.shape[2])
# build indexes to gather rows from pred_scatter based on sort order
class_grid, batch_grid, roi_grid = tf.meshgrid(tf.range(num_classes),tf.range(batch_size), tf.range(num_rois))
roi_grid_exp = tf.to_float(tf.expand_dims(roi_grid, axis = -1))
gather_inds = tf.stack([batch_grid , class_grid, sort_inds],axis = -1)
pred_tensor = tf.gather_nd(pred_scatt, gather_inds[...,:det_per_class,:], name = 'pred_tensor')
# append an index to the end of each row --- commented out 30-04-2018
# pred_tensor = tf.concat([pred_tensor, roi_grid_exp], axis = -1)
print(' sort_inds : ', type(sort_inds) , ' shape ', sort_inds.shape)
print(' class_grid : ', type(class_grid) , ' shape ', class_grid.get_shape())
print(' batch_grid : ', type(batch_grid) , ' shape ', batch_grid.get_shape())
print(' roi_grid shape : ', type(roi_grid) , ' shape ', roi_grid.get_shape())
print(' roi_grid_exp : ', type(roi_grid_exp), ' shape ', roi_grid_exp.get_shape())
print(' gather_inds : ', type(gather_inds) , ' shape ', gather_inds.get_shape())
print(' pred_tensor : ', pred_tensor.get_shape())
return pred_tensor
##----------------------------------------------------------------------------------------------------------------------
## build_refined_predictions
##----------------------------------------------------------------------------------------------------------------------
def build_refined_predictions(norm_input_rois, mrcnn_class, mrcnn_bbox, config):
'''
Split output_rois by class id, and add class_id and class_score
output:
-------
pred_tensor: [ Batchsz, Num_Classes, Num_Rois, 7: (y1, x1, y2, x2, class_id, class_score, normalized class score)]
y1,x1, y2,x2 are in image dimension format
'''
batch_size = config.BATCH_SIZE
num_classes = config.NUM_CLASSES
h, w = config.IMAGE_SHAPE[:2]
# num_rois = config.TRAIN_ROIS_PER_IMAGE
num_rois = KB.int_shape(norm_input_rois)[1]
scale = tf.constant([h,w,h,w], dtype = tf.float32)
# dup_scale = tf.reshape(tf.tile(scale, [num_rois]),[num_rois,-1])
dup_scale = scale * tf.ones([batch_size, num_rois, 1], dtype = 'float32')
det_per_class = config.DETECTION_PER_CLASS
print()
print(' > build_predictions()')
print(' num_rois : ', num_rois )
print(' norm_input_rois.shape : ', type(norm_input_rois), KB.int_shape(norm_input_rois))
print(' scale.shape : ', type(scale), KB.int_shape(scale), scale.get_shape())
print(' dup_scale.shape : ', type(dup_scale), KB.int_shape(dup_scale), dup_scale.get_shape())
print()
print(' mrcnn_class shape : ', KB.int_shape(mrcnn_class))
print(' mrcnn_bbox.shape : ', KB.int_shape(mrcnn_bbox), mrcnn_bbox.shape )
print(' config image shape : ', config.IMAGE_SHAPE, 'h:',h,'w:',w)
#---------------------------------------------------------------------------
# Build a meshgrid for image id and bbox to use in gathering of bbox delta information
#---------------------------------------------------------------------------
batch_grid, bbox_grid = tf.meshgrid( tf.range(batch_size, dtype=tf.int32),
tf.range(num_rois, dtype=tf.int32), indexing = 'ij' )
#------------------------------------------------------------------------------------
# use the argmaxof each row to determine the dominating (predicted) class
#------------------------------------------------------------------------------------
pred_classes = tf.argmax( mrcnn_class,axis=-1,output_type = tf.int32)
pred_classes_exp = tf.to_float(tf.expand_dims(pred_classes ,axis=-1))
# print(' pred_classes : ', pred_classes.shape)
# print(pred_classes.eval())
# print(' pred_scores : ', pred_scores.shape ,'\n', pred_scores.eval())
# print(' pred_classes_exp : ', pred_classes_exp.shape)
gather_ind = tf.stack([batch_grid , bbox_grid, pred_classes],axis = -1)
pred_scores = tf.gather_nd(mrcnn_class, gather_ind)
pred_deltas = tf.gather_nd(mrcnn_bbox , gather_ind)
#------------------------------------------------------------------------------------
# 22-05-2018 - stopped using the following code as it was clipping too many bouding
# boxes to 0 or 128 causing zero area generation
##------------------------------------------------------------------------------------
## apply delta refinements to the rois, based on deltas provided by the mrcnn head
##------------------------------------------------------------------------------------
pred_deltas = tf.multiply(pred_deltas, config.BBOX_STD_DEV, name = 'pred_deltas')
input_rois = tf.multiply(norm_input_rois , dup_scale )
## compute "refined rois" utils.apply_box_deltas_tf(input_rois, pred_deltas)
refined_rois = utils.apply_box_deltas_tf(input_rois, pred_deltas)
## Clip boxes to image window
window = tf.constant([[0,0,h,w]], dtype = tf.float32)
refined_rois = utils.clip_to_window_tf( window, refined_rois)
print(' refined rois clipped : ', refined_rois.shape)
print(' input_rois.shape : ', type(input_rois), KB.int_shape(input_rois), input_rois.get_shape())
print(' refined_rois.shape : ', type(refined_rois), KB.int_shape(refined_rois), refined_rois.get_shape())
# print(' mrcnn_class : ', mrcnn_class.shape, mrcnn_class)
# print(' gather_ind : ', gather_ind.shape, gather_ind)
# print(' pred_scores : ', pred_scores.shape )
# print(' pred_deltas : ', pred_deltas.shape )
# print(' input_rois : ', input_rois.shape, input_rois)
# print(' refined rois: ', refined_rois.shape, refined_rois)
# ## Clip boxes to image window
# # for now we will consider the window [0,0, 128,128]
# # _, _, window, _ = parse_image_meta(image_meta)
# window = tf.constant([[0,0,128,128]], dtype =tf.float32)
# refined_rois = utils.clip_to_window_tf(window, refined_rois)
# print(' refined rois clipped: ', refined_rois.shape, refined_rois)
#------------------------------------------------------------------------------------
##------------------------------------------------------------------------------------
## Build Pred_Scatter: tensor of bounding boxes by Image / Class
##------------------------------------------------------------------------------------
# sequence id is used to preserve the order of rois as passed to this routine
# This may be important in the post matching process but for now it's not being used.
# sequence = tf.ones_like(pred_classes, dtype = tf.int32) * (bbox_grid[...,::-1] + 1)
# sequence = tf.to_float(tf.expand_dims(sequence, axis = -1))
# print(sequence.shape)
# print(sequence.eval())
# pred_array = tf.concat([ refined_rois, pred_classes_exp , tf.expand_dims(pred_scores, axis = -1), sequence], axis=-1)
#------------------------------------------------------------------------------------
pred_array = tf.concat([refined_rois, pred_classes_exp , tf.expand_dims(pred_scores, axis = -1)], axis=-1)
scatter_ind = tf.stack([batch_grid , pred_classes, bbox_grid],axis = -1)
pred_scatt = tf.scatter_nd(scatter_ind, pred_array, [batch_size, num_classes, num_rois, pred_array.shape[-1]])
print(' pred_array : ', pred_array.shape)
print(' scatter_ind : ', type(scatter_ind), 'shape', scatter_ind.shape)
print(' pred_scatter : ', pred_scatt.get_shape())
##--------------------------------------------------------------------------------------------
## Apply a per class score normalization
##--------------------------------------------------------------------------------------------
normalizer = tf.reduce_max(pred_scatt[...,-1], axis = -1, keepdims=True)
normalizer = tf.where(normalizer < 1.0e-15, tf.ones_like(normalizer), normalizer)
norm_score = tf.expand_dims(pred_scatt[...,-1]/normalizer, axis = -1)
pred_scatt = tf.concat([pred_scatt, norm_score],axis = -1)
print(' - Add normalized score --\n')
print(' normalizer : ', normalizer.shape)
print(' norm_score : ', norm_score.shape)
print(' pred_scatter : ', pred_scatt.get_shape())
##------------------------------------------------------------------------------------
## sort pred_scatter in each class dimension based on bbox scores (last column)
##------------------------------------------------------------------------------------
_, sort_inds = tf.nn.top_k(pred_scatt[...,-1], k=pred_scatt.shape[2])
# build indexes to gather rows from pred_scatter based on sort order
class_grid, batch_grid, roi_grid = tf.meshgrid(tf.range(num_classes),tf.range(batch_size), tf.range(num_rois))
roi_grid_exp = tf.to_float(tf.expand_dims(roi_grid, axis = -1))
gather_inds = tf.stack([batch_grid , class_grid, sort_inds],axis = -1)
pred_tensor = tf.gather_nd(pred_scatt, gather_inds[...,:det_per_class,:], name = 'pred_refined_tensor')
# append an index to the end of each row --- commented out 30-04-2018
# pred_tensor = tf.concat([pred_tensor, roi_grid_exp], axis = -1)
print(' sort_inds : ', type(sort_inds) , ' shape ', sort_inds.shape)
print(' class_grid : ', type(class_grid) , ' shape ', class_grid.get_shape())
print(' batch_grid : ', type(batch_grid) , ' shape ', batch_grid.get_shape())
print(' roi_grid shape : ', type(roi_grid) , ' shape ', roi_grid.get_shape())
print(' roi_grid_exp : ', type(roi_grid_exp), ' shape ', roi_grid_exp.get_shape())
print(' gather_inds : ', type(gather_inds) , ' shape ', gather_inds.get_shape())
print(' pred_tensor : ', pred_tensor.get_shape())
return pred_tensor , pred_deltas
##----------------------------------------------------------------------------------------------------------------------
##
##----------------------------------------------------------------------------------------------------------------------
def build_ground_truth(gt_class_ids, norm_gt_bboxes, config):
batch_size = config.BATCH_SIZE
num_classes = config.NUM_CLASSES
h, w = config.IMAGE_SHAPE[:2]
num_bboxes = KB.int_shape(norm_gt_bboxes)[1]
scale = tf.constant([h,w,h,w], dtype = tf.float32)
# dup_scale = tf.reshape(tf.tile(scale, [num_rois]),[num_rois,-1])
dup_scale = scale * tf.ones([batch_size, num_bboxes, 1], dtype = 'float32')
gt_bboxes = tf.multiply(norm_gt_bboxes , dup_scale )
det_per_class = config.DETECTION_PER_CLASS
# num of bounding boxes is determined by bbox_list.shape[1] instead of config.DETECTION_MAX_INSTANCES
# use of this routine for both input_gt_boxes, and target_gt_deltas
if num_bboxes == config.DETECTION_MAX_INSTANCES:
tensor_name = "gt_tensor_max"
else:
tensor_name = "gt_tensor"
print('\n')
print(' > BUILD_GROUND TRUTH_TF()' )
print(' num_bboxes : ', num_bboxes, '(building ', tensor_name , ')' )
print(' gt_class_ids shape : ', gt_class_ids.get_shape(), ' ', KB.int_shape(gt_class_ids))
print(' norm_gt_bboxes.shape : ', norm_gt_bboxes.get_shape() , ' ', KB.int_shape(norm_gt_bboxes))
print(' gt_bboxes.shape : ', gt_bboxes.get_shape() , ' ', KB.int_shape(gt_bboxes))
#---------------------------------------------------------------------------
# use the argmaxof each row to determine the dominating (predicted) class
# mask identifies class_ids > 0
#---------------------------------------------------------------------------
gt_classes_exp = tf.to_float(tf.expand_dims(gt_class_ids ,axis=-1))
print(' gt_classes_exp : ', gt_classes_exp.get_shape() )
ones = tf.ones_like(gt_class_ids)
zeros= tf.zeros_like(gt_class_ids)
mask = tf.greater(gt_class_ids , 0)
gt_scores = tf.where(mask, ones, zeros)
# pred_scores = tf.reduce_max(mrcnn_class ,axis=-1, keep_dims=True) # (32,)
gt_scores_exp = tf.to_float(KB.expand_dims(gt_scores, axis=-1))
print(' gt_scores_exp : ', gt_scores_exp.get_shape())
##------------------------------------------------------------------------------------
## Generate GT_ARRAY
## Note that we add gt_scores_exp TWICE so that the shape of gt_array matches
## pred_tensor generated in build_predictions
##
## sequence id is used to preserve the order of rois as passed to this routine
##------------------------------------------------------------------------------------
batch_grid, bbox_grid = tf.meshgrid( tf.range(batch_size, dtype=tf.int32),
tf.range(num_bboxes, dtype=tf.int32), indexing = 'ij' )
sequence = gt_scores * (bbox_grid[...,::-1] + 1)
sequence = tf.to_float(tf.expand_dims(sequence, axis = -1))
gt_array = tf.concat([gt_bboxes, gt_classes_exp, gt_scores_exp, gt_scores_exp, sequence ], axis=2)
# print(' batch_grid shape ', batch_grid.get_shape())
# print(' bbox_grid shape ', bbox_grid.get_shape())
# print(' sequence shape ', sequence.get_shape())
##------------------------------------------------------------------------------
## Create indicies to scatter rois out to multi-dim tensor by image id and class
## resulting tensor is batch size x num_classes x num_bboxes x 7 (num columns)
##------------------------------------------------------------------------------
scatter_ind = tf.stack([batch_grid , gt_class_ids, bbox_grid],axis = -1)
gt_scatter = tf.scatter_nd(scatter_ind, gt_array, [batch_size, num_classes, num_bboxes, gt_array.shape[-1] ])
print(' gt_array shape : ', gt_array.shape , gt_array.get_shape())
print(' scatter_ind shape : ', scatter_ind.shape, scatter_ind.get_shape())
print(' tf.shape(gt_array)[-1] : ', gt_array.shape[-1], KB.int_shape(gt_array))
print(' gt_scatter shape : ', gt_scatter.shape , gt_scatter.get_shape())
##-------------------------------------------------------------------------------
## sort in each class dimension based on on sequence number (last column)
## scatter_nd places bboxs in a sparse fashion --- this sort is to place all bboxes
## at the top of the class bbox array
##-------------------------------------------------------------------------------
_ , sort_inds = tf.nn.top_k(tf.abs(gt_scatter[:,:,:,-1]), k=gt_scatter.shape[2])
# build indexes to gather rows from pred_scatter based on sort order
class_grid, batch_grid, bbox_grid = tf.meshgrid(tf.range(num_classes),tf.range(batch_size), tf.range(num_bboxes))
bbox_grid_exp = tf.to_float(tf.expand_dims(bbox_grid, axis = -1))
gather_inds = tf.stack([batch_grid , class_grid, sort_inds],axis = -1)
gt_result = tf.gather_nd(gt_scatter[...,:-1], gather_inds[...,:det_per_class,:] , name = tensor_name)
# append an index to the end of each row --- commented out 30-04-2018
# gt_result = tf.concat([gt_result, bbox_grid_exp], axis = -1)
print(' sort_inds : ', type(sort_inds) , ' shape ', sort_inds.shape)
print(' class_grid : ', type(class_grid) , ' shape ', class_grid.get_shape())
print(' batch_grid : ', type(batch_grid) , ' shape ', batch_grid.get_shape())
print(' gather_inds : ', gather_inds.get_shape())
print(' gt_result.shape : ', KB.int_shape(gt_result), gt_result.get_shape())
return gt_result
##----------------------------------------------------------------------------------------------------------------------
## INPUTS :
## FCN_HEATMAP [ numn_images x height x width x num classes ]
## PRED_HEATMAP_SCORES
##----------------------------------------------------------------------------------------------------------------------
def build_heatmap(in_tensor, config, names = None):
num_detections = config.DETECTION_MAX_INSTANCES
img_h, img_w = config.IMAGE_SHAPE[:2]
batch_size = config.BATCH_SIZE
num_classes = config.NUM_CLASSES
# rois per image is determined by size of input tensor
# detection mode: config.TRAIN_ROIS_PER_IMAGE
# ground_truth : config.DETECTION_MAX_INSTANCES
# strt_cls = 0 if rois_per_image == 32 else 1
rois_per_image = (in_tensor.shape)[2]
print('\n ')
print(' > NEW build_heatmap() for ', names )
print(' in_tensor shape : ', in_tensor.shape)
print(' num bboxes per class : ', rois_per_image )
#-----------------------------------------------------------------------------
## Stack non_zero bboxes from in_tensor into pt2_dense
#-----------------------------------------------------------------------------
# pt2_ind shape is [?, 3].
# pt2_ind[0] corresponds to image_index
# pt2_ind[1] corresponds to class_index
# pt2_ind[2] corresponds to roi row_index
# pt2_dense shape is [?, 6]
# pt2_dense[0] is image index
# pt2_dense[1:4] roi cooridnaytes
# pt2_dense[5] is class id
#-----------------------------------------------------------------------------
pt2_sum = tf.reduce_sum(tf.abs(in_tensor[:,:,:,:-2]), axis=-1)
print(' pt2_sum shape ',pt2_sum.shape)
# print(pt2_sum[0].eval())
pt2_ind = tf.where(pt2_sum > 0)
## replaced the two operations below with the one above - 15-05-2018
# pt2_mask = tf.greater(pt2_sum , 0)
# pt2_ind = tf.where(pt2_mask)
# print(' pt2_mask shape ', pt2_mask.get_shape())
# print(pt2_mask.eval())
# print(' pt2_ind shape ', pt2_ind.get_shape())
# print(pt2_ind.eval())
pt2_dense = tf.gather_nd( in_tensor, pt2_ind)
print(' dense shape ',pt2_dense.get_shape())
#-----------------------------------------------------------------------------
## Build mesh-grid to hold pixel coordinates
#-----------------------------------------------------------------------------
X = tf.range(img_w, dtype=tf.int32)
Y = tf.range(img_h, dtype=tf.int32)
X, Y = tf.meshgrid(X, Y)
# duplicate (repeat) X and Y into a batch_size x rois_per_image tensor
print(' X/Y shapes :', X.get_shape(), Y.get_shape())
ones = tf.ones([tf.shape(pt2_dense)[0] , 1, 1], dtype = tf.int32)
rep_X = ones * X
rep_Y = ones * Y
print(' Ones: ', ones.shape)
print(' ones_exp * X', ones.shape, '*', X.shape, '= ',rep_X.shape)
print(' ones_exp * Y', ones.shape, '*', Y.shape, '= ',rep_Y.shape)
# # stack the X and Y grids
bef_pos = tf.to_float(tf.stack([rep_X,rep_Y], axis = -1))
print(' before transpse ', bef_pos.get_shape())
pos_grid = tf.transpose(bef_pos,[1,2,0,3])
print(' after transpose ', pos_grid.get_shape())
##-----------------------------------------------------------------------------
## Build mean and convariance tensors for Multivariate Normal Distribution
##-----------------------------------------------------------------------------
width = pt2_dense[:,3] - pt2_dense[:,1] # x2 - x1
height = pt2_dense[:,2] - pt2_dense[:,0]
cx = pt2_dense[:,1] + ( width / 2.0)
cy = pt2_dense[:,0] + ( height / 2.0)
means = tf.stack((cx,cy),axis = -1)
covar = tf.stack((width * 0.5 , height * 0.5), axis = -1)
covar = tf.sqrt(covar)
##-----------------------------------------------------------------------------
## Compute Normal Distribution for bounding boxes
##-----------------------------------------------------------------------------
tfd = tf.contrib.distributions
mvn = tfd.MultivariateNormalDiag( loc = means, scale_diag = covar)
prob_grid = mvn.prob(pos_grid)
print(' Prob_grid shape before tanspose: ',prob_grid.get_shape())
prob_grid = tf.transpose(prob_grid,[2,0,1])
print(' Prob_grid shape after tanspose: ',prob_grid.get_shape())
print(' >> input to MVN.PROB: pos_grid (meshgrid) shape: ', pos_grid.get_shape())
print(' << output probabilities shape:' , prob_grid.get_shape())
##--------------------------------------------------------------------------------
## IMPORTANT: kill distributions of NaN boxes (resulting from bboxes with height/width of zero
## which cause singular sigma cov matrices
##--------------------------------------------------------------------------------
prob_grid = tf.where(tf.is_nan(prob_grid), tf.zeros_like(prob_grid), prob_grid)
##-------------------------------------------------------------------------------------
## scatter out the probability distributions based on class
##-------------------------------------------------------------------------------------
print('\n Scatter out the probability distributions based on class --------------')
gauss_scatt = tf.scatter_nd(pt2_ind, prob_grid, [batch_size, num_classes, rois_per_image, img_w, img_h])
print(' pt2_ind shape : ', pt2_ind.shape)
print(' prob_grid shape : ', prob_grid.shape)
print(' gauss_scatt : ', gauss_scatt.shape) # batch_sz , num_classes, num_rois, image_h, image_w
##-------------------------------------------------------------------------------------
## SUM : Reduce and sum up gauss_scattered by class
##-------------------------------------------------------------------------------------
print('\n Reduce sum based on class ---------------------------------------------')
gauss_sum = tf.reduce_sum(gauss_scatt, axis=2, name='pred_heatmap2')
# force small sums to zero - for now (09-11-18) commented out but could reintroduce based on test results
# gauss_sum = tf.where(gauss_sum < 1e-12, gauss_sum, tf.zeros_like(gauss_sum), name='Where1')
print(' gaussian_sum shape : ', gauss_sum.get_shape(), 'Keras tensor ', KB.is_keras_tensor(gauss_sum) )
### Normalize `gauss_sum` --> `gauss_norm`
#---------------------------------------------------------------------------------------------
# heatmap L2 normalization
# Normalization using the `gauss_sum` (batchsize , num_classes, height, width)
# 17-05-2018 (New method, replace dthe previous method that usedthe transposed gauss sum
# 17-05-2018 Replaced with normalization across the CLASS axis
#---------------------------------------------------------------------------------------------
# print('\n L2 normalization ------------------------------------------------------')
# gauss_L2norm = KB.l2_normalize(gauss_sum, axis = +1) # normalize along the CLASS axis
# print(' gauss L2 norm : ', gauss_L2norm.shape ,' Keras tensor ', KB.is_keras_tensor(gauss_L2norm) )
#---------------------------------------------------------------------------------------------
##---------------------------------------------------------------------------------------------
## gauss_sum normalization
## normalizer is set to one when the max of class is zero
## this prevents elements of gauss_norm computing to nan
##---------------------------------------------------------------------------------------------
print('\n normalization ------------------------------------------------------')
normalizer = tf.reduce_max(gauss_sum, axis=[-2,-1], keepdims = True)
normalizer = tf.where(normalizer < 1.0e-15, tf.ones_like(normalizer), normalizer)
gauss_norm = gauss_sum / normalizer
# gauss_norm = gauss_sum / tf.reduce_max(gauss_sum, axis=[-2,-1], keepdims = True)
# gauss_norm = tf.where(tf.is_nan(gauss_norm), tf.zeros_like(gauss_norm), gauss_norm, name = 'Where2')
print(' gauss norm : ', gauss_norm.shape ,' Keras tensor ', KB.is_keras_tensor(gauss_norm) )
##--------------------------------------------------------------------------------------------
## generate score based on gaussian using bounding box masks
## NOTE: Score is generated on NORMALIZED gaussian distributions (GAUSS_NORM)
## If want to do this on NON-NORMALIZED, we need to apply it on GAUSS_SUM
##--------------------------------------------------------------------------------------------
# flatten guassian scattered and input_tensor, and pass on to build_bbox_score routine
in_shape = tf.shape(in_tensor)
print(' shape of in_tensor is : ', KB.int_shape(in_tensor))
# in_tensor_flattened = tf.reshape(in_tensor, [-1, in_shape[-1]]) <-- not a good reshape style!!
# replaced with following line:
in_tensor_flattened = tf.reshape(in_tensor, [-1, in_tensor.shape[-1]])
# bboxes = tf.to_int32(tf.round(in_tensor_flattened[...,0:4]))
print(' in_tensor : ', in_tensor.shape)
print(' in_tensor_flattened : ', in_tensor_flattened.shape)
print(' Rois per class : ', rois_per_image)
#--------------------------------------------------------------------------------------------------------------------------
# duplicate GAUSS_NORM <num_roi> times to pass along with bboxes to map_fn function
# Here we have a choice to calculate scores using the GAUSS_SUM (unnormalized) or GAUSS_NORM (normalized)
# after looking at the scores and ratios for each option, I decided to go with the normalized
# as the numbers are larger
#
# Examples>
# Using GAUSS_SUM
# [ 3.660313 3.513489 54.475536 52.747402 1. 0.999997 4.998889 2450. 0.00204 0.444867]
# [ 7.135149 1.310972 50.020126 44.779854 1. 0.999991 4.981591 1892. 0.002633 0.574077]
# [ 13.401865 0. 62.258957 46.636948 1. 0.999971 4.957398 2303. 0.002153 0.469335]
# [ 0. 0. 66.42349 56.123024 1. 0.999908 4.999996 3696. 0.001353 0.294958]
# [ 0. 0. 40.78952 60.404335 1. 0.999833 4.586552 2460. 0.001864 0.406513]
#
# Using GAUSS_NORM: class r-cnn scr
# [ 3.660313 3.513489 54.475536 52.747402 1. 0.999997 1832.9218 2450. 0.748131 0.479411]
# [ 7.135149 1.310972 50.020126 44.779854 1. 0.999991 1659.3965 1892. 0.877059 0.56203 ]
# [ 13.401865 0. 62.258957 46.636948 1. 0.999971 1540.4974 2303. 0.668909 0.428645]
# [ 0. 0. 66.42349 56.123024 1. 0.999908 1925.3267 3696. 0.520922 0.333813]
# [ 0. 0. 40.78952 60.404335 1. 0.999833 1531.321 2460. 0.622488 0.398898]
#
# to change the source, change the following line gauss_norm <--> gauss_sum
#---------------------------------------------------------------------------------------------------------------------------
##--------------------------------------------------------------------------------------------
## Generate scores :
## Testing demonstated that the NORMALIZED score generated from using GAUSS_SUM and GAUSS_NORM
## Are the same.
## For now we will use GAUSS_SUM score and GAUSS_NORM heatmap. The reason being that the
## raw score generated in GAUSS_SUM is much smaller.
## We may need to change this base on the training results from FCN
##--------------------------------------------------------------------------------------------
##--------------------------------------------------------------------------------------------
## Generate scores using GAUSS_SUM
##--------------------------------------------------------------------------------------------
print('\n Scores from gauss_sum ----------------------------------------------')
temp = tf.expand_dims(gauss_sum, axis =2)
print(' temp expanded : ', temp.shape)
temp = tf.tile(temp, [1,1, rois_per_image ,1,1])
print(' temp tiled shape : ', temp.shape)
temp = KB.reshape(temp, (-1, temp.shape[-2], temp.shape[-1]))
print(' temp flattened : ', temp.shape)
print(' in_tensor_flattened : ', in_tensor_flattened.shape)
scores_from_sum = tf.map_fn(build_mask_routine, [temp, in_tensor_flattened], dtype=tf.float32)
print(' Scores_from_sum (after build mask routine) : ', scores_from_sum.shape) # [(num_batches x num_class x num_rois ), 3]
scores_shape = [in_tensor.shape[0], in_tensor.shape[1], in_tensor.shape[2], -1]
scores_from_sum = tf.reshape(scores_from_sum, scores_shape)
print(' reshaped scores : ', scores_from_sum.shape)
##--------------------------------------------------------------------------------------------
## tf.reduce_max(scores_from_sum[...,-1], axis = -1, keepdims=True) result is [num_imgs, num_class, 1]
##
## This is a regular normalization that moves everything between [0, 1].
## This causes negative values to move to -inf, which is a problem in FCN scoring.
## To address this a normalization between [-1 and +1] was introduced in FCN.
## Not sure how this will work with training tho.
##--------------------------------------------------------------------------------------------
normalizer = tf.reduce_max(scores_from_sum[...,-1], axis = -1, keepdims=True)
normalizer = tf.where(normalizer < 1.0e-15, tf.ones_like(normalizer), normalizer)
norm_score = tf.expand_dims(scores_from_sum[...,-1]/normalizer, axis = -1)
# scores_from_sum = tf.concat([scores_from_sum, norm_score],axis = -1) <-- added to concat down below 18-9-18
'''
##--------------------------------------------------------------------------------------------
## Generate scores using normalized GAUSS_SUM (GAUSS_NORM)
##--------------------------------------------------------------------------------------------
print('==== Scores from gauss_norm ================')
temp = tf.expand_dims(gauss_norm, axis =2)
print(' temp expanded shape : ', temp.shape)
temp = tf.tile(temp, [1,1, rois_per_image ,1,1])
print(' temp tiled shape : ', temp.shape)
temp_reshape = KB.reshape(temp, (-1, temp.shape[-2], temp.shape[-1]))
print(' temp flattened shape : ', temp_reshape.shape)
print(' in_tensor_flattened : ', in_tensor_flattened.shape)
scores_from_norm = tf.map_fn(build_mask_routine_inf, [temp_reshape, in_tensor_flattened], dtype=tf.float32)
print(' Scores_from_norm (after build mask routine) : ', scores_from_norm.shape) # [(num_batches x num_class x num_rois ), 3]
scores_shape = [in_tensor.shape[0], in_tensor.shape[1],in_tensor.shape[2], -1]
scores_from_norm = tf.reshape(scores_from_norm, scores_shape)
print(' reshaped scores : ', scores_from_norm.shape)
##--------------------------------------------------------------------------------------------
## normalize score between [0, 1].
##--------------------------------------------------------------------------------------------
normalizer = tf.reduce_max(scores_from_norm[...,-1], axis = -1, keepdims=True)
normalizer = tf.where(normalizer < 1.0e-15, tf.ones_like(normalizer), normalizer)
print(' normalizer : ',normalizer.shape)
norm_score = tf.expand_dims(scores_from_norm[...,-1]/normalizer, axis = -1)
scores_from_norm = tf.concat([scores_from_norm, norm_score],axis = -1)
print(' norm_score : ', norm_score.shape)
print(' scores_from_norm final: ', scores_from_norm.shape)
'''
##--------------------------------------------------------------------------------------------
## Append `in_tensor` and `scores_from_sum` to form `bbox_scores`
##--------------------------------------------------------------------------------------------
gauss_scores = tf.concat([in_tensor, scores_from_sum, norm_score], axis = -1,name = names[0]+'_scores')
print(' in_tensor : ', in_tensor.shape)
print(' scores_from_sum final : ', scores_from_sum.shape)
print(' norm_score : ', norm_score.shape)
print(' gauss_scores : ', gauss_scores.shape, ' name: ', gauss_scores.name)
print(' gauss_scores (FINAL) : ', gauss_scores.shape, ' Keras tensor ', KB.is_keras_tensor(gauss_scores) )
##--------------------------------------------------------------------------------------------
## //create heatmap Append `in_tensor` and `scores_from_sum` to form `bbox_scores`
##--------------------------------------------------------------------------------------------
# gauss_heatmap = KB.identity(tf.transpose(gauss_sum,[0,2,3,1]), name = names[0])
gauss_sum = tf.transpose(gauss_sum,[0,2,3,1], name = names[0])
gauss_norm = tf.transpose(gauss_norm,[0,2,3,1], name = names[0]+'_norm')
# print(' gauss_heatmap shape : ', gauss_heatmap.shape ,' Keras tensor ', KB.is_keras_tensor(gauss_heatmap) )
# print(' gauss_heatmap_norm shape : ', gauss_heatmap_norm.shape,' Keras tensor ', KB.is_keras_tensor(gauss_heatmap_norm) )
# print(gauss_heatmap)
# gauss_heatmap_norm = KB.identity(tf.transpose(gauss_norm,[0,2,3,1]), name = names[0]+'_norm')
# print(' gauss_heatmap_norm final shape : ', gauss_heatmap_norm.shape,' Keras tensor ', KB.is_keras_tensor(gauss_heatmap_norm) )
# gauss_heatmap_L2norm = KB.identity(tf.transpose(gauss_L2norm,[0,2,3,1]), name = names[0]+'_L2norm')
print(' complete')
return gauss_norm, gauss_scores # , gauss_heatmap gauss_heatmap_L2norm # [gauss_sum, gauss_scatt, means, covar]
'''
17-9-2018 -- routine was cloned from chm_layer_inf, and this code was commented out as we dont use L2 normalization
kept for history
# consider the two new columns for reshaping the gaussian_bbox_scores
new_shape = tf.shape(in_tensor)+ [0,0,0, tf.shape(scores)[-1]]
bbox_scores = tf.concat([in_tensor_flattened, scores], axis = -1)
bbox_scores = tf.reshape(bbox_scores, new_shape)
# print(' new shape is : ', new_shape.eval())
print(' in_tensor_flattened : ', in_tensor_flattened.shape)
print(' Scores shape : ', scores.shape) # [(num_batches x num_class x num_rois ), 3]
print(' boxes_scores (rehspaed) : ', bbox_scores.shape)
##--------------------------------------------------------------------------------------------
## Normalize computed score above, and add it to the heatmap_score tensor as last column
##--------------------------------------------------------------------------------------------
scr_L2norm = tf.nn.l2_normalize(bbox_scores[...,-1], axis = -1) # shape (num_imgs, num_class, num_rois)
scr_L2norm = tf.expand_dims(scr_L2norm, axis = -1)
##--------------------------------------------------------------------------------------------
# shape of tf.reduce_max(bbox_scores[...,-1], axis = -1, keepdims=True) is (num_imgs, num_class, 1)
# This is a regular normalization that moves everything between [0, 1].
# This causes negative values to move to -inf, which is a problem in FCN scoring.
# To address this a normalization between [-1 and +1] was introduced in FCN.
# Not sure how this will work with training tho.
##--------------------------------------------------------------------------------------------
scr_norm = bbox_scores[...,-1]/ tf.reduce_max(bbox_scores[...,-1], axis = -1, keepdims=True)
scr_norm = tf.where(tf.is_nan(scr_norm), tf.zeros_like(scr_norm), scr_norm)
#--------------------------------------------------------------------------------------------
# this normalization moves values to [-1, +1] which we use in FCN, but not here.
#--------------------------------------------------------------------------------------------
# reduce_max = tf.reduce_max(bbox_scores[...,-1], axis = -1, keepdims=True)
# reduce_min = tf.reduce_min(bbox_scores[...,-1], axis = -1, keepdims=True) ## epsilon = tf.ones_like(reduce_max) * 1e-7
# scr_norm = (2* (bbox_scores[...,-1] - reduce_min) / (reduce_max - reduce_min)) - 1
scr_norm = tf.where(tf.is_nan(scr_norm), tf.zeros_like(scr_norm), scr_norm)
scr_norm = tf.expand_dims(scr_norm, axis = -1) # shape (num_imgs, num_class, 32, 1)
bbox_scores = tf.concat([bbox_scores, scr_norm, scr_L2norm], axis = -1)
gauss_heatmap = KB.identity(tf.transpose(gauss_sum,[0,2,3,1]), name = names[0])
gauss_heatmap_norm = KB.identity(tf.transpose(gauss_norm,[0,2,3,1]), name = names[0]+'_norm')
gauss_heatmap_L2norm = KB.identity(tf.transpose(gauss_L2norm,[0,2,3,1]), name = names[0]+'_L2norm')
gauss_scores = KB.identity(bbox_scores, name = names[0]+'_scores')
print(' gauss_heatmap final shape : ', gauss_heatmap.shape ,' Keras tensor ', KB.is_keras_tensor(gauss_heatmap) )
print(' gauss_scores final shape : ', gauss_scores.shape ,' Keras tensor ', KB.is_keras_tensor(gauss_scores) )
print(' complete')
return gauss_heatmap_norm, gauss_scores, gauss_heatmap,gauss_heatmap_L2norm # [gauss_sum, gauss_scatt, means, covar]
'''
##----------------------------------------------------------------------------------------------------------------------
##
##----------------------------------------------------------------------------------------------------------------------
def build_mask_routine(input_list):
'''
Inputs:
-----------
heatmap_tensor : [ image height, image width ]
input_row : [y1, x1, y2, x2] in absolute (non-normalized) scale
Returns
-----------
gaussian_sum : sum of gaussian heatmap vlaues over the area covered by the bounding box
bbox_area : bounding box area (in pixels)
weighted_sum : gaussian_sum * bbox_score
'''
heatmap_tensor, input_row = input_list
with tf.variable_scope('mask_routine'):
y_extent = tf.range(input_row[0], input_row[2])
x_extent = tf.range(input_row[1], input_row[3])
Y,X = tf.meshgrid(y_extent, x_extent)
bbox_mask = tf.stack([Y,X],axis=2)
mask_indices = tf.reshape(bbox_mask,[-1,2])
mask_indices = tf.to_int32(mask_indices)
mask_size = tf.shape(mask_indices)[0]
mask_updates = tf.ones([mask_size], dtype = tf.float32)
mask = tf.scatter_nd(mask_indices, mask_updates, tf.shape(heatmap_tensor))
# mask_sum = tf.reduce_sum(mask)
mask_applied = tf.multiply(heatmap_tensor, mask, name = 'mask_applied')
bbox_area = tf.to_float((input_row[2]-input_row[0]) * (input_row[3]-input_row[1]))
gaussian_sum = tf.reduce_sum(mask_applied)
# Multiply gaussian_sum by score to obtain weighted sum
weighted_sum = gaussian_sum * input_row[5]
# ratio = gaussian_sum / bbox_area
# ratio = tf.where(tf.is_nan(ratio), 0.0, ratio)
return tf.stack([gaussian_sum, bbox_area, weighted_sum], axis = -1)
##----------------------------------------------------------------------------------------------------------------------
##
##----------------------------------------------------------------------------------------------------------------------
class CHMLayer(KE.Layer):
'''
Contextual Heatmap Layer (previously CHMLayerTF)
Receives the bboxes, their repsective classification and roi_outputs and
builds the per_class tensor
Returns:
-------
The CHM layer returns the following tensors:
pred_tensor : [batch, NUM_CLASSES, TRAIN_ROIS_PER_IMAGE , (index, class_prob, y1, x1, y2, x2, class_id, old_idx)]
in normalized coordinates
pred_cls_cnt: [batch, NUM_CLASSES]
gt_tensor: [batch, NUM_CLASSES, DETECTION_MAX_INSTANCES, (index, class_prob, y1, x1, y2, x2, class_id, old_idx)]
gt_cls_cnt: [batch, NUM_CLASSES]
Note: Returned arrays might be zero padded if not enough target ROIs.
'''
def __init__(self, config=None, **kwargs):
super().__init__(**kwargs)
print('--------------------------------')
print('>>> CHM Layer ')
print('--------------------------------')
self.config = config
def call(self, inputs):
print(' > CHMLayer Call() ', len(inputs))
# mrcnn_class , mrcnn_bbox, output_rois, gt_class_ids, gt_bboxes, tgt_class_ids, tgt_deltas = inputs
mrcnn_class , mrcnn_bbox, output_rois, tgt_class_ids, tgt_bboxes = inputs
print(' mrcnn_class.shape :', mrcnn_class.shape, KB.int_shape( mrcnn_class ))
print(' mrcnn_bbox.shape :', mrcnn_bbox.shape, KB.int_shape( mrcnn_bbox ))
print(' output_rois.shape :', output_rois.shape, KB.int_shape( output_rois ))
print(' tgt_class_ids.shape :', tgt_class_ids.shape, KB.int_shape(tgt_class_ids ))
print(' tgt_bboxes.shape :', tgt_bboxes.shape, KB.int_shape( tgt_bboxes ))
# print(' tgt_deltas.shape :', tgt_deltas.shape, KB.int_shape( tgt_deltas ))
pred_tensor = build_predictions(output_rois, mrcnn_class, mrcnn_bbox, self.config)
pr_hm_norm, pr_hm_scores = build_heatmap(pred_tensor, self.config, names = ['pred_heatmap'])
# pred_cls_cnt = KL.Lambda(lambda x: tf.count_nonzero(x[:,:,:,-1],axis = -1), name = 'pred_cls_count')(pred_tensor)
pred_refined_tensor, pred_deltas = build_refined_predictions(output_rois, mrcnn_class, mrcnn_bbox, self.config)
pr_ref_hm_norm, pr_ref_hm_scores = build_heatmap(pred_refined_tensor, self.config, names = ['pred_refined_heatmap'])
gt_tensor = build_ground_truth (tgt_class_ids, tgt_bboxes, self.config)
gt_hm_norm, gt_hm_scores = build_heatmap(gt_tensor, self.config, names = ['gt_heatmap'])
# gt_cls_cnt = KL.Lambda(lambda x: tf.count_nonzero(x[:,:,:,-1],axis = -1), name = 'gt_cls_count')(gt_tensor)
print()
# print(' pred_cls_cnt shape : ', pred_cls_cnt.shape , 'Keras tensor ', KB.is_keras_tensor(pred_cls_cnt) )
# print(' gt_cls_cnt shape : ', gt_cls_cnt.shape , 'Keras tensor ', KB.is_keras_tensor(gt_cls_cnt) )
print(' pred_heatmap : ', pr_hm_norm.shape , 'Keras tensor ', KB.is_keras_tensor(pr_hm_norm))
print(' pred_heatmap_scores: ', pr_hm_scores.shape , 'Keras tensor ', KB.is_keras_tensor(pr_hm_scores))
print(' pred_refined_heatmap : ', pr_ref_hm_norm.shape , 'Keras tensor ', KB.is_keras_tensor(pr_ref_hm_norm))
print(' pred_refnined_heatmap_scores: ', pr_ref_hm_scores.shape , 'Keras tensor ', KB.is_keras_tensor(pr_ref_hm_scores))
print(' gt_heatmap : ', gt_hm_norm.shape , 'Keras tensor ', KB.is_keras_tensor(gt_hm_norm))
print(' gt_heatmap_scores : ', gt_hm_scores.shape , 'Keras tensor ', KB.is_keras_tensor(gt_hm_scores))
print(' complete')
return [ pr_hm_norm, pr_ref_hm_norm, gt_hm_norm , pr_hm_scores, pr_ref_hm_scores, gt_hm_scores, pred_refined_tensor, pred_deltas]
# pred_tensor , gt_tensor]
def compute_output_shape(self, input_shape):
# may need to change dimensions of first return from IMAGE_SHAPE to MAX_DIM
return [
(None, self.config.IMAGE_SHAPE[0], self.config.IMAGE_SHAPE[1], self.config.NUM_CLASSES) # pred_heatmap_norm
, (None, self.config.IMAGE_SHAPE[0], self.config.IMAGE_SHAPE[1], self.config.NUM_CLASSES) # pred_refined_heatmap_norm
, (None, self.config.IMAGE_SHAPE[0], self.config.IMAGE_SHAPE[1], self.config.NUM_CLASSES) # gt_heatmap_norm
, (None, self.config.NUM_CLASSES , self.config.DETECTION_PER_CLASS ,11) # pred_heatmap_scores
, (None, self.config.NUM_CLASSES , self.config.DETECTION_PER_CLASS ,11) # pred_refined_heatmap_scores
, (None, self.config.NUM_CLASSES , self.config.DETECTION_PER_CLASS ,11) # gt_heatmap+scores
# ----extra stuff for now ---------------------------------------------------------------------------------------------------
, (None, self.config.NUM_CLASSES , self.config.DETECTION_PER_CLASS ,7) # pred_refined_tensor
, (None, self.config.NUM_CLASSES , self.config.DETECTION_PER_CLASS ,4) # pred_deltas
# , (None, self.config.NUM_CLASSES , self.config.TRAIN_ROIS_PER_IMAGE ,10) # pred_heatmap_scores (expanded)
# , (None, self.config.NUM_CLASSES , self.config.DETECTION_MAX_INSTANCES ,10) # gt_heatmap+scores (expanded)
# , (None, self.config.NUM_CLASSES , self.config.TRAIN_ROIS_PER_IMAGE , 7) # pred_tensor
# , (None, self.config.NUM_CLASSES , self.config.DETECTION_MAX_INSTANCES , 7) # gt_tensor (expanded)
]
##----------------------------------------------------------------------------------------------------------------------
##
##
##
##----------------------------------------------------------------------------------------------------------------------
##----------------------------------------------------------------------------------------------------------------------
## removed 17-05-2018 and replaced with version that calculates heatmap scores using the gauss_sum tensor instead
## of the gauss_scatter tensor -- this was done since the gauss_sum matches the output we have out of FCN
##----------------------------------------------------------------------------------------------------------------------
"""
def build_heatmap_old_2(in_tensor, config, names = None):
num_detections = config.DETECTION_MAX_INSTANCES
img_h, img_w = config.IMAGE_SHAPE[:2]
batch_size = config.BATCH_SIZE
num_classes = config.NUM_CLASSES
print('\n ')
print(' > NEW build_heatmap() for ', names )
print(' orignal in_tensor shape : ', in_tensor.shape)
# rois per image is determined by size of input tensor
# detection mode: config.TRAIN_ROIS_PER_IMAGE
# ground_truth : config.DETECTION_MAX_INSTANCES
rois_per_image = (in_tensor.shape)[2]
# strt_cls = 0 if rois_per_image == 32 else 1
print(' num of bboxes per class is : ', rois_per_image )
#-----------------------------------------------------------------------------
## Stack non_zero bboxes from in_tensor into pt2_dense
#-----------------------------------------------------------------------------
# pt2_ind shape is [?, 3].
# pt2_ind[0] corresponds to image_index
# pt2_ind[1] corresponds to class_index
# pt2_ind[2] corresponds to roi row_index
# pt2_dense shape is [?, 6]
# pt2_dense[0] is image index
# pt2_dense[1:4] roi cooridnaytes
# pt2_dense[5] is class id
#-----------------------------------------------------------------------------
pt2_sum = tf.reduce_sum(tf.abs(in_tensor[:,:,:,:-2]), axis=-1)
print(' pt2_sum shape ',pt2_sum.shape)
# print(pt2_sum[0].eval())
pt2_ind = tf.where(pt2_sum > 0)
## replaced the two operations below with the one above - 15-05-2018
# pt2_mask = tf.greater(pt2_sum , 0)
# pt2_ind = tf.where(pt2_mask)
# print(' pt2_mask shape ', pt2_mask.get_shape())
# print(pt2_mask.eval())
# print(' pt2_ind shape ', pt2_ind.get_shape())
# print(pt2_ind.eval())
pt2_dense = tf.gather_nd( in_tensor, pt2_ind)
print(' dense shape ',pt2_dense.get_shape())
#-----------------------------------------------------------------------------
## Build mesh-grid to hold pixel coordinates
#-----------------------------------------------------------------------------
X = tf.range(img_w, dtype=tf.int32)
Y = tf.range(img_h, dtype=tf.int32)
X, Y = tf.meshgrid(X, Y)
# duplicate (repeat) X and Y into a batch_size x rois_per_image tensor
print(' X/Y shapes :', X.get_shape(), Y.get_shape())
ones = tf.ones([tf.shape(pt2_dense)[0] , 1, 1], dtype = tf.int32)
rep_X = ones * X
rep_Y = ones * Y
print(' Ones: ', ones.shape)
print(' ones_exp * X', ones.shape, '*', X.shape, '= ',rep_X.shape)
print(' ones_exp * Y', ones.shape, '*', Y.shape, '= ',rep_Y.shape)
# # stack the X and Y grids
bef_pos = tf.to_float(tf.stack([rep_X,rep_Y], axis = -1))
print(' before transpse ', bef_pos.get_shape())
pos_grid = tf.transpose(bef_pos,[1,2,0,3])
print(' after transpose ', pos_grid.get_shape())
#-----------------------------------------------------------------------------
## Build mean and convariance tensors for Multivariate Normal Distribution
#-----------------------------------------------------------------------------
width = pt2_dense[:,3] - pt2_dense[:,1] # x2 - x1
height = pt2_dense[:,2] - pt2_dense[:,0]
cx = pt2_dense[:,1] + ( width / 2.0)
cy = pt2_dense[:,0] + ( height / 2.0)
means = tf.stack((cx,cy),axis = -1)
covar = tf.stack((width * 0.5 , height * 0.5), axis = -1)
covar = tf.sqrt(covar)
tfd = tf.contrib.distributions
mvn = tfd.MultivariateNormalDiag( loc = means, scale_diag = covar)
prob_grid = mvn.prob(pos_grid)
print(' Prob_grid shape before tanspose: ',prob_grid.get_shape())
prob_grid = tf.transpose(prob_grid,[2,0,1])
print(' Prob_grid shape after tanspose: ',prob_grid.get_shape())
print(' >> input to MVN.PROB: pos_grid (meshgrid) shape: ', pos_grid.get_shape())
print(' << output probabilities shape:' , prob_grid.get_shape())
#--------------------------------------------------------------------------------
## IMPORTANT: kill distributions of NaN boxes (resulting from bboxes with height/width of zero
## which cause singular sigma cov matrices
#--------------------------------------------------------------------------------
prob_grid = tf.where(tf.is_nan(prob_grid), tf.zeros_like(prob_grid), prob_grid)
## scatter out the probability distributions based on class --------------------------
print('\n Scatter out the probability distributions based on class --------------')
gauss_scatt = tf.scatter_nd(pt2_ind, prob_grid, [batch_size, num_classes, rois_per_image, img_w, img_h])
print(' pt2_ind shape : ', pt2_ind.shape)
print(' prob_grid shape : ', prob_grid.shape)
print(' gauss_scatt : ', gauss_scatt.shape) # batch_sz , num_classes, num_rois, image_h, image_w
## heatmap: sum gauss_scattered based on class ---------------------------------------
print('\n Reduce sum based on class ---------------------------------------------')
gauss_sum = tf.reduce_sum(gauss_scatt, axis=2, name='pred_heatmap2')
gauss_sum = tf.where(gauss_sum > 1e-12, gauss_sum, tf.zeros_like(gauss_sum))
print(' gaussian_sum shape : ', gauss_sum.get_shape(), 'Keras tensor ', KB.is_keras_tensor(gauss_sum) )
# reshape to [img, class, height, width] ---> [img, height, width, class]
gauss_sum = tf.transpose(gauss_sum,[0,2,3,1], name = names[0])
print(' gaussian sum type/name : ', type(gauss_sum), gauss_sum.name, names[0])
print(' gaussian_sum shape : ', gauss_sum.get_shape(), 'Keras tensor ', KB.is_keras_tensor(gauss_sum) )
## heatmap: L2 normalization -----------------------------------------------------------------
print('\n L2 normalization ------------------------------------------------------')
heatmap_shape=KB.shape(gauss_sum)
print(' gauss-sum.shape:', gauss_sum.shape, 'tf.shape :', tf.shape(gauss_sum))
gauss_flatten = KB.reshape(gauss_sum, (heatmap_shape[0], -1, heatmap_shape[-1]) ) # reshape to image, class
output_norm = KB.l2_normalize(gauss_flatten, axis = 1)
gauss_norm = KB.identity(KB.reshape(output_norm, heatmap_shape ) , name = names[0]+'_norm')
print(' gauss_flatten : ', KB.int_shape(gauss_flatten) , gauss_flatten.get_shape(),' Keras tensor ', KB.is_keras_tensor(gauss_flatten) )
print(' gauss_norm1 : ', KB.int_shape(output_norm) , output_norm.get_shape(),' Keras tensor ', KB.is_keras_tensor(output_norm) )
print(' gauss_norm final : ', KB.int_shape(gauss_norm) , gauss_norm.get_shape(),' Keras tensor ', KB.is_keras_tensor(gauss_norm) )
##--------------------------------------------------------------------------------------------
## generate score based on gaussian using bouding box masks
## NOTE: Score is generated on NON-NORMALIZED gaussian distributions
## If want to do this on normalized, we need to apply normalization to gauss_scatt first
##--------------------------------------------------------------------------------------------
# flatten guassian scattered and input_tensor, and pass on to build_bbox_score routine
in_tensor_flattened = tf.reshape(in_tensor, [-1,6])
bboxes = tf.to_int32(tf.round(in_tensor_flattened[...,0:4]))
print(' in_tensor_flattened is ', in_tensor_flattened.shape)
print(' boxes shape ', bboxes.shape)
# DONT NEED THIS - was put there to try to avoid computing sum/area for zero bboxes.
# kept as reference for future generations .....
# bbox_sum = tf.reduce_max(in_tensor[...,0:3], axis = -1, name = 'bbox_sum')
# print(' bbox sum shape: ', bbox_sum.shape)
gauss_scatt_shape = KB.int_shape(gauss_scatt)
gauss_scatt_reshape = KB.reshape(gauss_scatt, (-1, gauss_scatt_shape[-2], gauss_scatt_shape[-1]))
print(' gaussian scatter shape : ', gauss_scatt_shape)
print(' gaussian scatter reshaped : ', gauss_scatt_reshape.shape)
# ones_map = tf.ones([384,128,128])
scores = tf.map_fn(build_mask_routine, [gauss_scatt_reshape, bboxes], dtype=tf.float32)
new_shape = tf.shape(in_tensor)+ [0,0,0,tf.shape(scores)[-1]]
gaussian_bbox_scores = tf.concat([in_tensor_flattened, scores], axis = -1)
print(' Scatter Flattened shape : ', in_tensor_flattened.shape)
print(' Scores shape : ', scores.shape)
print(' gaussian_boxes_scores initial shape: ', gaussian_bbox_scores.shape)
gaussian_bbox_scores = tf.reshape(gaussian_bbox_scores, new_shape, name = names[0]+'_scores')
##--------------------------------------------------------------------------------------------
## Normalize computed score above, and add it to the heatmap_score tensor as last column
##--------------------------------------------------------------------------------------------
scr = gaussian_bbox_scores[...,-2]/gaussian_bbox_scores[...,-1]
scr = tf.where(tf.is_nan(scr), tf.zeros_like(scr), scr)
scr_norm = tf.nn.l2_normalize(scr, axis = -1)
scr_norm = tf.expand_dims(scr_norm, axis = -1)
gaussian_bbox_scores = tf.concat([gaussian_bbox_scores, scr_norm], axis = -1)
print(' gaussian_bbox_scores final shape : ', gaussian_bbox_scores.shape)
print(' complete')
return gauss_norm, gaussian_bbox_scores # [gauss_sum, gauss_scatt, means, covar]
"""
| python |
from django.contrib import admin
from forums.models import Category
from guardian.admin import GuardedModelAdmin
class CategoryAdmin(GuardedModelAdmin):
list_display = ('title', 'parent', 'ordering')
list_display_links = ('title',)
admin.site.register(Category, CategoryAdmin) | python |
# transaction_model.py
#
# ATM MVC program
#
# Team alroda
#
# Aldrich Huang A01026502 2B
# Robert Janzen A01029341 2B
# David Xiao A00725026 2B
import datetime
import os
class TransactionModel:
_TRANSACTION_COLUMNS = 'date,uid,account_type,account_number,transaction_type,amount'
def __init__(self):
pass
def createNewEntry(self, uid, account_type, account_num, transaction_type, amount, date=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')):
"""
Creates a new transaction log entry to be saved to file
Args:
uid:
UID of the user that owns the account that the transaction is initiated from
account_type:
The type of the account
account_num:
The account number of the account
transaction_type:
The type of transaction that was done
amount:
The dollar value involved in the transaction
date:
The time and date when the transaction took place
Returns:
None
"""
row = '{0},{1},{2},{3},{4},{5}'.format(date, uid, account_type, account_num, transaction_type, str(float(amount)), )
self.saveTransaction(uid, row)
def saveTransaction(self, uid, row):
"""
Saves the new entry to the transaction log file
Args:
uid:
UID of the user who owns that account that initiated the transaction
row:
String containing the new entry to be added to the transaction log file
Returns:
None
"""
filename = 'model/logs/'+str(uid)+'-transactions.csv'
try:
if os.path.getsize(filename) > 0:
with open(filename, 'a') as csv_file:
csv_file.write('\n'+row)
else:
with open(filename, 'w') as csv_file:
output = self._TRANSACTION_COLUMNS + '\n' + row
csv_file.write(output)
except OSError:
with open(filename, 'w') as csv_file:
output = self._TRANSACTION_COLUMNS + '\n' + row
csv_file.write(output)
def createNewActionEntry(self, uid, account_type, account_num, action_type, date=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')):
"""
Creates a new account action entry in the transaction log
Args:
uid:
uid of the user who owns the accound
account_type:
type of the account
account_num:
account number of the account
action_type:
string describing the type of action done to the account
date:
current date
Returns:
None
"""
row = '{0},{1},{2},{3},{4}'.format(date, uid, account_type, account_num, action_type)
self.saveTransaction(uid, row)
def displayReport(self, uid):
"""
Displays a report of all recorded transactions for accounts owned by the User with the ID uid. The report
is displayed in the CLI
Args:
uid:
UID of the user
Returns:
None
"""
filename = 'model/logs/' + str(uid) + '-transactions.csv'
report_content = [['Comprehensive report for user no. ' + uid]]
try:
if os.path.getsize(filename) > 0:
with open(filename, 'r') as csv_file:
transaction_dic = {}
csv_file.readline()
full_file = csv_file.readlines()
account_list = []
for line in full_file:
line_data = line.rstrip('\n').split(',')
if line_data[3] in account_list:
transaction_dic[line_data[3]].append(', '.join(line_data))
else:
transaction_dic[line_data[3]] = [', '.join(line_data)]
account_list = list(transaction_dic.keys())
account_list.sort(key=str)
for account_num in account_list:
acc_specific_entry = [('Transactions for account no.' + account_num)]
for entry in transaction_dic[account_num]:
acc_specific_entry.append(entry)
report_content.append(acc_specific_entry)
return report_content
except:
return 'Error Generating Report...'
if __name__ == '__main__':
test = TransactionModel()
test.displayReport('1')
| python |
#!/usr/bin/python3.5
"""
Command line utility to extract basic statistics from a gpx file
"""
import pdb
import sys as mod_sys
import logging as mod_logging
import math as mod_math
import gpxpy as mod_gpxpy
#hack for heart rate
import xml.etree.ElementTree as ET
#heart rate statistics
import numpy as np
import os
import sys
#mod_logging.basicConfig(level=mod_logging.DEBUG,
# format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
header = 'id, duration, avgHeartRate, maxHeartRate, dateOfTraining, elevation, uphill, downhill, length_2d, length_3d, moving_time, stopped_time'
def format_time(time_s):
if not time_s:
return 'n/a'
minutes = mod_math.floor(time_s / 60.)
hours = mod_math.floor(minutes / 60.)
return '%s:%s:%s' % (str(int(hours)).zfill(2), str(int(minutes % 60)).zfill(2), str(int(time_s % 60)).zfill(2))
def print_gpx_part_info(gpx_part, csvFile, heartRate, athleteId):
#multivariable returns
start_time, end_time = gpx_part.get_time_bounds()
moving_time, stopped_time, moving_distance, stopped_distance, max_speed = gpx_part.get_moving_data()
uphill, downhill = gpx_part.get_uphill_downhill()
duration = gpx_part.get_duration()
avgHeartRate = round(np.mean(heartRate), 2)
maxHeartRate = np.max(heartRate)
dateOfTraining = start_time
elevation = round(uphill + downhill, 2)
uphill = round(uphill, 2)
downhill = round(downhill, 2)
length_2d = round(gpx_part.length_2d(), 2)
length_3d = round(gpx_part.length_3d(), 2)
#id is written seperately
data = [
duration,
avgHeartRate,
maxHeartRate,
dateOfTraining,
elevation,
uphill,
downhill,
length_2d,
length_3d,
moving_time,
stopped_time
]
csvFile.write('\n' + athleteId)
for d in data:
csvFile.write(", " + str(d))
def print_gpx_info(gpx, gpx_file, csvFile):
print('File: %s' % gpx_file)
if gpx.name:
print(' GPX name: %s' % gpx.name)
if gpx.description:
print(' GPX description: %s' % gpx.description)
if gpx.author_name:
print(' Author: %s' % gpx.author_name)
if gpx.author_email:
print(' Email: %s' % gpx.author_email)
print_gpx_part_info(gpx, csvFile)
'''for track_no, track in enumerate(gpx.tracks):
for segment_no, segment in enumerate(track.segments):
print(' Track #%s, Segment #%s' % (track_no, segment_no))
print_gpx_part_info(segment, indentation=' ')'''
def parseHeartRate(file):
hrs = []
tree = ET.parse(file)
root = tree.getroot()
for hr in root.iter('{http://www.garmin.com/xmlschemas/TrackPointExtension/v1}hr'):
hrs.append(int(hr.text))
return hrs
def run(gpx_files, csvFilePath, athleteId):
if not gpx_files:
print('No GPX files given')
mod_sys.exit(1)
csvFile = open(csvFilePath, "w")
csvFile.write(header)
i = 0
fLen = str(len(gpx_files))
for gpx_file in gpx_files:
sys.stdout.write("\rProgressing file " + str(i) + " out of " + fLen + " ")
#sys.stdout.write("\rDoing thing %i % i" % i, fLen)
sys.stdout.flush()
i += 1
try:
heartRate = parseHeartRate(gpx_file)
if not heartRate:
continue
gpx = mod_gpxpy.parse(open(gpx_file))
print_gpx_part_info(gpx, csvFile, heartRate, athleteId)
except Exception as e:
mod_logging.exception(e)
print('Error processing %s' % gpx_file)
mod_sys.exit(1)
def parserMain(directoryPath, outDirectoryPath):
for dir in os.listdir(directoryPath):
filePaths = os.listdir(directoryPath + dir)
for i in range(0, len(filePaths)):
filePaths[i] = directoryPath + dir + '/' + filePaths[i]
run(filePaths, outDirectoryPath + dir + ".csv", dir)
def joinFiles(dirPath, outFilePath):
outFile = open(outFilePath, "w")
outFile.write(header + '\n')
for fileName in os.listdir(dirPath):
with open(dirPath + fileName) as f:
f.readline() #throw away first line
content = f.readline()
while content != "":
outFile.write(content)
content = f.readline()
parserMain("../Data/Sport/", "../Data/Parsed/")
joinFiles('../Data/Parsed/', '../Data/summed.csv') | python |
import matplotlib.pyplot as plt, sys
sys.path.insert(0, '..')
from Louis.misc import *
from Louis.ARC_data.objects import *
from Louis.grids import *
from Louis.unifying import *
def show_pb(name, n=17):
l, i = reversed(pickle_read(name)), 0
for pb, p, c_type in l:
i += 1
if i % n == 0:
print(p)
pb_to_grid(pb)
display_pb(pb, 'Solution : '+str(p)+'\nCohesion type : '+str(c_type))
# figManager = plt.get_current_fig_manager()
# figManager.window.showMaximized()
plt.show(block=False)
if input() == '0': break
plt.close('all')
if __name__ == '__main__':
show_pb('../../espace partage remy louis/Louis/mutation.pickle', 1)
# l = pickle_read('../../espace partage remy louis/Louis/mutation.pickle')
# ans = [0] * 11
# i = 0
# j = 0
# for _, p, _ in l:
# if contains(p, 'singleton'): i += 1
# if contains(p, 'car'): j += 1
# _, d = analyze_var(p)
# ans[d] += 1
# print(ans, i, j)
# l = pickle_read('../../espace partage remy louis/diff_I_rand_5_25_10000.pickle')
# l = pickle_read('data_for_nn/problems/diff_I_5_1000.pickle')
# i = 0
# for pb, p, c_type in l:
# if i % 5 == 0:
# display_pb(pb, 'Solution : '+str(p)+'\nCohesion type : '+cohesion_types_corresp[c_type])
# figManager = plt.get_current_fig_manager()
# figManager.window.showMaximized()
# plt.show(block=False)
# if input() == '0':
# break
# plt.close('all')
# i += 1
# mut_l = pickle_read('data_for_nn/problems/mutation_10mutants_1grid.pickle')
# for pb, p, c_type in mut_l:
# display_pb(pb, 'Solution : '+str(p)+'\nCohesion type : '+str(c_type))
# figManager = plt.get_current_fig_manager()
# figManager.window.showMaximized()
# plt.show(block=False)
# if input() == '0':
# break
# plt.close('all') | python |
import re
import csv
from io import BytesIO
from zipfile import ZipFile
import requests
from ._version import __version__
URLHAUS_API_URL = 'https://urlhaus.abuse.ch/downloads/'
REGEX_CSV_HEADER = re.compile(r'^#\s((?:[a-z_]+,)+(?:[a-z_]+))\r', re.MULTILINE)
REGEX_HOSTFILE_DOMAIN = re.compile(r'^127\.0\.0\.1\t(.+)\r', re.MULTILINE)
class URLhaus(object):
def __init__(self, api_url=URLHAUS_API_URL):
'''
Prepare the URLhaus API
'''
# Save the API URL
self._api_url = api_url
# Get and prepare the session that will be used for all API calls
self._session = requests.session()
self._session.headers.update({
'User-Agent': f'abuse_ch-urlhaus-api/{__version__}',
})
def _request(self, path, **kwargs):
'''
Internal method to handle API requests. Raises for errors and
parses CSV or returns raw data as requested
'''
# Compose the full request URL
req_url = f'{self._api_url}{path}'
# Make the request
resp = self._session.get(req_url, **kwargs)
resp.raise_for_status()
# Determine what to do based on response content-type
content_type = resp.headers.get('content-type', None)
# Is it a zip?
if content_type == 'application/zip':
# Attempt to open the response as a zip file
sample_zip = ZipFile(BytesIO(resp.content))
# Get the file list and ensure it's just a single file
file_list = sample_zip.infolist()
assert len(file_list) == 1
# Extract the one file
resp = sample_zip.read(file_list[0].filename)
# Otherwise we're dealing with the raw content
else:
resp = resp.content
# Return the result
return resp.decode()
def _parse_csv(self, content):
# Attempt to find the CSV header
csv_header = REGEX_CSV_HEADER.search(content)
# We found the header
if csv_header is not None:
# Get the CSV columns
csv_columns = tuple(csv_header[1].split(','))
# Get the CSV data (minus comment lines)
csv_data = [row for row in content.splitlines() if not row.startswith('#')]
# Convert the CSV column names and data into a list of dicts
content = list(csv.DictReader(csv_data, fieldnames=csv_columns))
# Return the result
return content
def get_csv_urls_all(self, raw=False):
# Make the request
resp = self._request('csv/')
# Not returning a raw result? Parse it
if not raw:
resp = self._parse_csv(resp)
# Return the result
return resp
def get_csv_urls_recent(self, raw=False):
# Make the request
resp = self._request('csv_recent/')
# Not returning a raw result? Parse it
if not raw:
resp = self._parse_csv(resp)
# Return the result
return resp
def get_csv_urls_online(self, raw=False):
# Make the request
resp = self._request('csv_online/')
# Not returning a raw result? Parse it
if not raw:
resp = self._parse_csv(resp)
# Return the result
return resp
def get_text_urls_all(self):
# Make the request and return the result
resp = self._request('text/')
return resp
def get_text_urls_recent(self):
# Make the request and return the result
resp = self._request('text_recent/')
return resp
def get_text_urls_online(self):
# Make the request and return the result
resp = self._request('text_online/')
return resp
def get_hostfile(self):
# Make the request and return the result
resp = self._request('hostfile/')
return resp
def get_domains(self):
# Get the hostfile
hostfile = self.get_hostfile()
# Get the domains from the hostfile
domains = REGEX_HOSTFILE_DOMAIN.findall(hostfile)
# Return the result
return domains
def get_payloads(self, raw=False):
# Make the request and return the result
resp = self._request('payloads/')
return resp
| python |
from enum import Enum
from typing import List, NewType
TeamID = NewType("TeamID", int)
class RoleType(Enum):
PLANNER = 0
OPERATOR = 1
LINKER = 2
KEYFARMING = 3
CLEANER = 4
FIELD_AGENT = 5
ITEM_SPONSOR = 6
KEY_TRANSPORT = 7
RECHARGING = 8
SOFTWARE_SUPPORT = 9
ANOMALY_TL = 10
TEAM_LEAD = 11
OTHER = 99
class TeamRole:
def __init__(self, id, name):
self._id = id
role_translation = { # TODO: esto deberia estar en la clase y despues llamar al .value al momento de mandarlo al server
"Planner": RoleType.PLANNER,
"Operator": RoleType.OPERATOR,
"Linker": RoleType.LINKER,
"Keyfarming": RoleType.KEYFARMING,
"Cleaner": RoleType.CLEANER,
"Field Agent": RoleType.FIELD_AGENT,
"Item Sponser": RoleType.ITEM_SPONSOR,
"Key Transport": RoleType.KEY_TRANSPORT,
"Recharging": RoleType.RECHARGING,
"Software Support": RoleType.SOFTWARE_SUPPORT,
"Anomaly TL": RoleType.ANOMALY_TL,
"Team Lead": RoleType.TEAM_LEAD,
"Other": RoleType.OTHER
}
self._name = role_translation[name]
@property
def id(self):
return self._id
@property
def name(self):
return self._name
class Team:
def __init__(self, api_result):
self._teamid = api_result["teamid"]
self._team = api_result["team"]
self._roles = [TeamRole(r["id"], r["name"])
for r in api_result["roles"]]
@property
def teamid(self) -> int:
return self._teamid
@property
def team(self) -> str:
return self._team
@property
def roles(self) -> List[TeamRole]:
return self._roles
| python |
#!/usr/bin/env python
# Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import boto
import boto.s3
command = sys.argv[1]
conn = boto.connect_s3()
if command == "upload":
bucketname = sys.argv[2]
keyname = sys.argv[3]
filename = sys.argv[4]
bucket = conn.get_bucket(bucketname)
key = bucket.new_key(keyname)
key.set_contents_from_filename(filename)
elif command == "download":
bucketname = sys.argv[2]
keyname = sys.argv[3]
filename = sys.argv[4]
bucket = conn.get_bucket(bucketname)
key = bucket.new_key(keyname)
key.get_contents_to_filename(filename)
elif command == "list":
bucketname = sys.argv[2]
bucket = conn.get_bucket(bucketname)
keys = bucket.get_all_keys()
for key in keys:
print key
else:
raise Exception("unknown command: %s" % command)
| python |
import os
import shutil
import datetime
import functools
import subprocess
import xml.etree.ElementTree as ET
import numpy as np
import torch
import logging
from util.misc import all_gather
from collections import OrderedDict, defaultdict
class OWEvaluator:
def __init__(self, voc_gt, iou_types, args=None, use_07_metric=True, ovthresh=list(range(50, 100, 5))):
assert tuple(iou_types) == ('bbox',)
self.use_07_metric = use_07_metric
self.ovthresh = ovthresh
self.voc_gt = voc_gt
self.eps = torch.finfo(torch.float64).eps
self.num_classes = len(self.voc_gt.CLASS_NAMES)
self._class_names = self.voc_gt.CLASS_NAMES
self.AP = torch.zeros(self.num_classes, 1)
self.all_recs = defaultdict(list)
self.all_precs = defaultdict(list)
self.recs = defaultdict(list)
self.precs = defaultdict(list)
self.num_unks = defaultdict(list)
self.unk_det_as_knowns = defaultdict(list)
self.tp_plus_fp_cs = defaultdict(list)
self.fp_os = defaultdict(list)
self.coco_eval = dict(bbox=lambda: None)
self.coco_eval['bbox'].stats = torch.tensor([])
self.coco_eval['bbox'].eval = dict()
self.img_ids = []
self.lines = []
self.lines_cls = []
if args is not None:
self.prev_intro_cls = args.PREV_INTRODUCED_CLS
self.curr_intro_cls = args.CUR_INTRODUCED_CLS
self.total_num_class = args.num_classes
self.unknown_class_index = self.total_num_class - 1
self.num_seen_classes = self.prev_intro_cls + self.curr_intro_cls
self.known_classes = self._class_names[:self.num_seen_classes]
print("testing data details")
print(self.total_num_class)
print(self.unknown_class_index)
print(self.known_classes)
print(self.voc_gt.CLASS_NAMES)
def update(self, predictions):
for img_id, pred in predictions.items():
pred_boxes, pred_labels, pred_scores = [pred[k].cpu() for k in ['boxes', 'labels', 'scores']]
image_id = self.voc_gt.convert_image_id(int(img_id), to_string=True)
self.img_ids.append(img_id)
classes = pred_labels.tolist()
for (xmin, ymin, xmax, ymax), cls, score in zip(pred_boxes.tolist(), classes , pred_scores.tolist()):
xmin += 1
ymin += 1
self.lines.append(f"{image_id} {score:.3f} {xmin:.1f} {ymin:.1f} {xmax:.1f} {ymax:.1f}")
self.lines_cls.append(cls)
def compute_avg_precision_at_many_recall_level_for_unk(self, precisions, recalls):
precs = {}
for r in range(1, 10):
r = r/10
p = self.compute_avg_precision_at_a_recall_level_for_unk(precisions, recalls, recall_level=r)
precs[r] = p
return precs
def compute_avg_precision_at_a_recall_level_for_unk(self, precisions, recalls, recall_level=0.5):
precs = {}
for iou, recall in recalls.items():
prec = []
for cls_id, rec in enumerate(recall):
if cls_id == self.unknown_class_index and len(rec)>0:
p = precisions[iou][cls_id][min(range(len(rec)), key=lambda i: abs(rec[i] - recall_level))]
prec.append(p)
if len(prec) > 0:
precs[iou] = np.mean(prec)
else:
precs[iou] = 0
return precs
def compute_WI_at_many_recall_level(self, recalls, tp_plus_fp_cs, fp_os):
wi_at_recall = {}
for r in range(1, 10):
r = r/10
wi = self.compute_WI_at_a_recall_level(recalls, tp_plus_fp_cs, fp_os, recall_level=r)
wi_at_recall[r] = wi
return wi_at_recall
def compute_WI_at_a_recall_level(self, recalls, tp_plus_fp_cs, fp_os, recall_level=0.5):
wi_at_iou = {}
for iou, recall in recalls.items():
tp_plus_fps = []
fps = []
for cls_id, rec in enumerate(recall):
if cls_id in range(self.num_seen_classes) and len(rec) > 0:
index = min(range(len(rec)), key=lambda i: abs(rec[i] - recall_level))
tp_plus_fp = tp_plus_fp_cs[iou][cls_id][index]
tp_plus_fps.append(tp_plus_fp)
fp = fp_os[iou][cls_id][index]
fps.append(fp)
if len(tp_plus_fps) > 0:
wi_at_iou[iou] = np.mean(fps) / np.mean(tp_plus_fps)
else:
wi_at_iou[iou] = 0
return wi_at_iou
def synchronize_between_processes(self):
self.img_ids = torch.tensor(self.img_ids, dtype=torch.int64)
self.lines_cls = torch.tensor(self.lines_cls, dtype=torch.int64)
self.img_ids, self.lines, self.lines_cls = self.merge(self.img_ids, self.lines, self.lines_cls)
def merge(self, img_ids, lines, lines_cls):
flatten = lambda ls: [s for l in ls for s in l]
all_img_ids = torch.cat(all_gather(img_ids))
all_lines_cls = torch.cat(all_gather(lines_cls))
all_lines = flatten(all_gather(lines))
return all_img_ids, all_lines, all_lines_cls
def accumulate(self):
for class_label_ind, class_label in enumerate(self.voc_gt.CLASS_NAMES):
lines_by_class = [l + '\n' for l, c in zip(self.lines, self.lines_cls.tolist()) if c == class_label_ind]
if len(lines_by_class) == 0:
lines_by_class = []
print(class_label + " has " + str(len(lines_by_class)) + " predictions.")
ovthresh = 50
ovthresh_ind, _ = map(self.ovthresh.index, [50, 75])
self.rec, self.prec, self.AP[class_label_ind, ovthresh_ind], self.unk_det_as_known, \
self.num_unk, self.tp_plus_fp_closed_set, self.fp_open_set = voc_eval(lines_by_class, \
self.voc_gt.annotations, self.voc_gt.image_set, class_label, ovthresh=ovthresh / 100.0, use_07_metric=self.use_07_metric, known_classes=self.known_classes) #[-1]
self.AP[class_label_ind, ovthresh_ind] = self.AP[class_label_ind, ovthresh_ind] * 100
self.all_recs[ovthresh].append(self.rec)
self.all_precs[ovthresh].append(self.prec)
self.num_unks[ovthresh].append(self.num_unk)
self.unk_det_as_knowns[ovthresh].append(self.unk_det_as_known)
self.tp_plus_fp_cs[ovthresh].append(self.tp_plus_fp_closed_set)
self.fp_os[ovthresh].append(self.fp_open_set)
try:
self.recs[ovthresh].append(self.rec[-1] * 100)
self.precs[ovthresh].append(self.prec[-1] * 100)
except:
self.recs[ovthresh].append(0.)
self.precs[ovthresh].append(0.)
def summarize(self, fmt='{:.06f}'):
o50, _ = map(self.ovthresh.index, [50, 75])
mAP = float(self.AP.mean())
mAP50 = float(self.AP[:, o50].mean())
print('detection mAP50:', fmt.format(mAP50))
print('detection mAP:', fmt.format(mAP))
print('---AP50---')
wi = self.compute_WI_at_many_recall_level(self.all_recs, self.tp_plus_fp_cs, self.fp_os)
print('Wilderness Impact: ' + str(wi))
avg_precision_unk = self.compute_avg_precision_at_many_recall_level_for_unk(self.all_precs, self.all_recs)
print('avg_precision: ' + str(avg_precision_unk))
total_num_unk_det_as_known = {iou: np.sum(x) for iou, x in self.unk_det_as_knowns.items()} #torch.sum(self.unk_det_as_knowns[:, o50]) #[np.sum(x) for x in self.unk_det_as_knowns[:, o50]]
total_num_unk = self.num_unks[50][0]
print('Absolute OSE (total_num_unk_det_as_known): ' + str(total_num_unk_det_as_known))
print('total_num_unk ' + str(total_num_unk))
print("AP50: " + str(['%.1f' % x for x in self.AP[:, o50]]))
print("Precisions50: " + str(['%.1f' % x for x in self.precs[50]]))
print("Recall50: " + str(['%.1f' % x for x in self.recs[50]]))
if self.prev_intro_cls > 0:
print("Prev class AP50: " + str(self.AP[:, o50][:self.prev_intro_cls].mean()))
print("Prev class Precisions50: " + str(np.mean(self.precs[50][:self.prev_intro_cls])))
print("Prev class Recall50: " + str(np.mean(self.recs[50][:self.prev_intro_cls])))
print("Current class AP50: " + str(self.AP[:, o50][self.prev_intro_cls:self.prev_intro_cls + self.curr_intro_cls].mean()))
print("Current class Precisions50: " + str(np.mean(self.precs[50][self.prev_intro_cls:self.prev_intro_cls + self.curr_intro_cls])))
print("Current class Recall50: " + str(np.mean(self.recs[50][self.prev_intro_cls:self.prev_intro_cls + self.curr_intro_cls])))
print("Known AP50: " + str(self.AP[:, o50][:self.prev_intro_cls + self.curr_intro_cls].mean()))
print("Known Precisions50: " + str(np.mean(self.precs[50][:self.prev_intro_cls + self.curr_intro_cls])))
print("Known Recall50: " + str(np.mean(self.recs[50][:self.prev_intro_cls + self.curr_intro_cls])))
print("Unknown AP50: " + str(self.AP[:, o50][-1]))
print("Unknown Precisions50: " + str(self.precs[50][-1]))
print("Unknown Recall50: " + str(self.recs[50][-1]))
for class_name, ap in zip(self.voc_gt.CLASS_NAMES, self.AP[:, o50].cpu().tolist()):
print(class_name, fmt.format(ap))
self.coco_eval['bbox'].stats = torch.cat(
[self.AP[:, o50].mean(dim=0, keepdim=True),
self.AP.flatten().mean(dim=0, keepdim=True), self.AP.flatten()])
def voc_ap(rec, prec, use_07_metric=False):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
@functools.lru_cache(maxsize=None)
def parse_rec(filename, known_classes):
""" Parse a PASCAL VOC xml file """
VOC_CLASS_NAMES_COCOFIED = [
"airplane", "dining table", "motorcycle",
"potted plant", "couch", "tv"
]
BASE_VOC_CLASS_NAMES = [
"aeroplane", "diningtable", "motorbike",
"pottedplant", "sofa", "tvmonitor"
]
tree = ET.parse(filename)
# import pdb;pdb.set_trace()
objects = []
for obj in tree.findall('object'):
obj_struct = {}
cls_name = obj.find('name').text
if cls_name in VOC_CLASS_NAMES_COCOFIED:
cls_name = BASE_VOC_CLASS_NAMES[VOC_CLASS_NAMES_COCOFIED.index(cls_name)]
if cls_name not in known_classes:
cls_name = 'unknown'
obj_struct['name'] = cls_name
obj_struct['difficult'] = int(obj.find('difficult').text)
bbox = obj.find('bndbox')
obj_struct['bbox'] = [int(bbox.find('xmin').text),
int(bbox.find('ymin').text),
int(bbox.find('xmax').text),
int(bbox.find('ymax').text)]
objects.append(obj_struct)
return objects
def voc_eval(detpath,
annopath,
imagesetfile,
classname,
ovthresh=0.5,
use_07_metric=False,
known_classes=None):
# --------------------------------------------------------
# Fast/er R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Bharath Hariharan
# --------------------------------------------------------
"""rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
cachedir: Directory for caching the annotations
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default False)
"""
def iou(BBGT, bb):
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +
(BBGT[:, 2] - BBGT[:, 0] + 1.) *
(BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
return ovmax, jmax
# assumes detections are in detpath.format(classname)
# assumes annotations are in annopath.format(imagename)
# assumes imagesetfile is a text file with each line an image name
# cachedir caches the annotations in a pickle file
# read list of images
if isinstance(imagesetfile, list):
lines = imagesetfile
else:
with open(imagesetfile, 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
# import pdb;pdb.set_trace()
# load annots
recs = {}
if isinstance(annopath, list):
# print("hi")
for a in annopath:
imagename = os.path.splitext(os.path.basename(a))[0]
recs[imagename] = parse_rec(a, tuple(known_classes))
else:
for i, imagename in enumerate(imagenames):
recs[imagename] = parse_rec(annopath.format(imagename), tuple(known_classes))
# extract gt objects for this class
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in R])
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {'bbox': bbox,
'difficult': difficult,
'det': det}
# read dets
if isinstance(detpath, list):
lines = detpath
else:
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()
# import pdb;pdb.set_trace()
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
if len(splitlines) == 0:
BB = np.array([[float(z) for z in x[2:]] for x in splitlines]).reshape(-1, 4)
else:
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])#.reshape(-1, 4)
# if BB.size == 0:
# return 0, 0, 0
# sort by confidence
sorted_ind = np.argsort(-confidence)
BB = BB[sorted_ind, :]
# import pdb;pdb.set_trace()
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
ovmax, jmax = iou(BBGT, bb)
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
'''
Computing Absolute Open-Set Error (A-OSE) and Wilderness Impact (WI)
===========
Absolute OSE = # of unknown objects classified as known objects of class 'classname'
WI = FP_openset / (TP_closed_set + FP_closed_set)
'''
# logger = logging.getLogger(__name__)
# Finding GT of unknown objects
unknown_class_recs = {}
n_unk = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj["name"] == 'unknown']
bbox = np.array([x["bbox"] for x in R])
difficult = np.array([x["difficult"] for x in R]).astype(np.bool)
det = [False] * len(R)
n_unk = n_unk + sum(~difficult)
unknown_class_recs[imagename] = {"bbox": bbox, "difficult": difficult, "det": det}
if classname == 'unknown':
return rec, prec, ap, 0., n_unk, None, None
# Go down each detection and see if it has an overlap with an unknown object.
# If so, it is an unknown object that was classified as known.
is_unk = np.zeros(nd)
for d in range(nd):
R = unknown_class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R["bbox"].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1.0, 0.0)
ih = np.maximum(iymax - iymin + 1.0, 0.0)
inters = iw * ih
# union
uni = (
(bb[2] - bb[0] + 1.0) * (bb[3] - bb[1] + 1.0)
+ (BBGT[:, 2] - BBGT[:, 0] + 1.0) * (BBGT[:, 3] - BBGT[:, 1] + 1.0)
- inters
)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
is_unk[d] = 1.0
is_unk_sum = np.sum(is_unk)
tp_plus_fp_closed_set = tp+fp
fp_open_set = np.cumsum(is_unk)
# import pdb;pdb.set_trace()
return rec, prec, ap, is_unk_sum, n_unk, tp_plus_fp_closed_set, fp_open_set
def bbox_nms(boxes, scores, overlap_threshold=0.4, score_threshold=0.0, mask=False):
def overlap(box1, box2=None, rectint=False, eps=1e-6):
area = lambda boxes=None, x1=None, y1=None, x2=None, y2=None: (boxes[..., 2] - boxes[..., 0]) * (
boxes[..., 3] - boxes[..., 1]) if boxes is not None else (x2 - x1).clamp(min=0) * (y2 - y1).clamp(
min=0)
if box2 is None and not isinstance(box1, list) and box1.dim() == 3:
return torch.stack(list(map(overlap, box1)))
b1, b2 = [(b if b.dim() == 2 else b.unsqueeze(0)).t().contiguous() for b in
[box1, (box2 if box2 is not None else box1)]]
xx1 = torch.max(b1[0].unsqueeze(1), b2[0].unsqueeze(0))
yy1 = torch.max(b1[1].unsqueeze(1), b2[1].unsqueeze(0))
xx2 = torch.min(b1[2].unsqueeze(1), b2[2].unsqueeze(0))
yy2 = torch.min(b1[3].unsqueeze(1), b2[3].unsqueeze(0))
inter = area(x1=xx1, y1=yy1, x2=xx2, y2=yy2)
return inter / (area(b1.t()).unsqueeze(1) + area(b2.t()).unsqueeze(0) - inter + eps) if not rectint else inter
O = overlap(boxes)
I = scores.sort(0)[1]
M = scores.gather(0, I).ge(score_threshold)
M = M if M.any() else M.fill_(1)
pick = []
for i, m in zip(I.t(), M.t()):
p = []
i = i[m]
while len(i) > 1:
p.append(i[-1])
m = O[:, i[-1]][i].lt(overlap_threshold)
m[-1] = 0
i = i[m]
pick.append(torch.tensor(p + i.tolist(), dtype=torch.int64))
return pick if not mask else torch.stack(
[torch.zeros(len(scores), dtype=torch.bool).scatter_(0, p, 1) for p in pick])
def package_submission(out_dir, image_file_name, class_labels, VOCYEAR, SUBSET, TASK, tar=True, **kwargs):
def cls(file_path, class_label_ind, scores):
with open(file_path, 'w') as f:
f.writelines(map('{} {}\n'.format, image_file_name, scores[:, class_label_ind].tolist()))
def det(file_path, class_label_ind, scores, proposals, keep):
zipped = []
for example_idx, basename in enumerate(image_file_name):
I = keep[example_idx][class_label_ind]
zipped.extend((basename, s) + tuple(p) for s, p in zip(scores[example_idx][I, class_label_ind].tolist(),
proposals[example_idx][I, :4].add(1).tolist()))
with open(file_path, 'w') as f:
f.writelines(map('{} {} {:.0f} {:.0f} {:.0f} {:.0f} \n'.format, *zip(*zipped)))
task_a, task_b = TASK.split('_')
resdir = os.path.join(out_dir, 'results')
respath = os.path.join(resdir, VOCYEAR, 'Main', '%s_{}_{}_%s.txt'.format(task_b, SUBSET))
if os.path.exists(resdir):
shutil.rmtree(resdir)
os.makedirs(os.path.join(resdir, VOCYEAR, 'Main'))
for class_label_ind, class_label in enumerate(class_labels):
dict(det=det, cls=cls)[task_b](respath.replace('%s', '{}').format(task_a, class_label), class_label_ind,
**kwargs)
if tar:
subprocess.check_call(['tar', '-czf', 'results-{}-{}-{}.tar.gz'.format(VOCYEAR, TASK, SUBSET), 'results'],
cwd=out_dir)
return respath
def detection_mean_ap(out_dir, image_file_name, class_labels, VOCYEAR, SUBSET, VOC_DEVKIT_VOCYEAR, scores=None,
boxes=None, nms_score_threshold=1e-4, nms_overlap_threshold=0.4, tar=False, octave=False,
cmd='octave --eval', env=None, stdout_stderr=open(os.devnull, 'wb'), do_nms=True):
if scores is not None:
nms = list(map(lambda s, p: bbox_nms(p, s, overlap_threshold=nms_overlap_threshold,
score_threshold=nms_score_threshold), scores, boxes)) if do_nms else [
torch.arange(len(p)) for p in boxes]
else:
nms = torch.arange(len(class_labels)).unsqueeze(0).unsqueeze(-1).expand(len(image_file_name), len(class_labels),
1)
scores = torch.zeros(len(image_file_name), len(class_labels), len(class_labels))
imgsetpath = os.path.join(VOC_DEVKIT_VOCYEAR, 'ImageSets', 'Main', SUBSET + '.txt')
detrespath = package_submission(out_dir, image_file_name, class_labels, VOCYEAR, SUBSET, 'comp4_det', tar=tar,
scores=scores, proposals=boxes, nms=nms)
if octave:
imgsetpath_fix = os.path.join(out_dir, detection_mean_ap.__name__ + '.txt')
with open(imgsetpath_fix, 'w') as f:
f.writelines([line[:-1] + ' -1\n' for line in open(imgsetpath)])
procs = [subprocess.Popen(cmd.split() + [
"oldpwd = pwd; cd('{}/..'); addpath(fullfile(pwd, 'VOCcode')); VOCinit; cd(oldpwd); VOCopts.testset = '{}'; VOCopts.detrespath = '{}'; VOCopts.imgsetpath = '{}'; classlabel = '{}'; warning('off', 'Octave:possible-matlab-short-circuit-operator'); warning('off', 'Octave:num-to-str'); [rec, prec, ap] = VOCevaldet(VOCopts, 'comp4', classlabel, false); dlmwrite(sprintf(VOCopts.detrespath, 'resu4', classlabel), ap); quit;".format(
VOC_DEVKIT_VOCYEAR, SUBSET, detrespath, imgsetpath_fix, class_label)], stdout=stdout_stderr,
stderr=stdout_stderr, env=env) for class_label in class_labels]
res = list(map(lambda class_label, proc: proc.wait() or float(open(detrespath % ('resu4', class_label)).read()),
class_labels, procs))
else:
res = [voc_eval(detrespath.replace('%s', '{}').format('comp4', '{}'),
os.path.join(VOC_DEVKIT_VOCYEAR, 'Annotations', '{}.xml'), imgsetpath, class_label,
cachedir=os.path.join(out_dir, 'cache_detection_mean_ap_' + SUBSET), use_07_metric=True)[-1] for
class_label in class_labels]
return torch.tensor(res).mean(), res | python |
# a method for obtaining a rough estimate of species richness on islands with transient dynamics
# check it gives reasonable estimates
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# check a range of parameter values
# ---
# where to save results
dir_results = '../../../results/verify/test_sampling/'
suffix = '_rough_estimate'
J = 10000 # number of individuals on an island
theta = 30 # fundamental biodiversity number
# immigration rates
mV = [0.0005, 0.005, 0.01, 0.05, 0.1]
# time in generations since an island separated from the mainland
TV = [50, 100, 500, 1000, 5000, 100000]
# for each parameter combination, make rough estimate of species richness
# ---
E_SM = list() # a place to store species richnesses
for T in TV:
E_SV = list()
for m in mV:
# find the expected number of founders using Chen & Chen's asymptotic approximation
W = J*m / (1-m)
alpha = T/2
beta = (W-1)*T/(2*J)
D = ( T*(W-1)/2 ) / ( alpha*(np.exp(beta)-1) + beta*np.exp(beta) )
D = int(round(D))
# expected number of ancestors given the number of founders
E_C = D + sum( W / (W+i) for i in range(D,J) )
E_C = int(round(E_C))
# expected number of species given the number of ancestors
E_S = sum( theta / (theta+i) for i in range(E_C) )
# store
E_SV.append(E_S)
# store
E_SM.append(E_SV)
# for each parameter combination, average the species richnesses from the samples
# ---
# read in the dataframe
fname = dir_results + 'samples' + suffix + '.csv'
df = pd.read_csv(fname)
S_SM = list() # a place to store species richnesses from samples for each T
for T in TV:
# find the entries that match the T
df_sub = df[ df['T_0'] == T ]
# the islands are in the same order as mV (5 of them), so find the no species on each island
SV = df_sub['no_spp_S'].values
HV = df_sub['no_isles_H'].values
data_row_as_strV = df_sub['presence_absence_matrix_cols_(isles)_concatenated'].values
richness_islands = list()
for S, H, data_row_as_str in zip(SV, HV, data_row_as_strV):
isle_strings = [ data_row_as_str[i:i+S] for i in range(0, S*H, S) ]
richnesses = [ this_isle.count('p') for this_isle in isle_strings ]
richness_islands.append(richnesses)
S_SV = np.mean(np.array(richness_islands), axis=0)
S_SM.append(S_SV)
# plot it
# ---
colour_cycle = plt.rcParams['axes.prop_cycle'].by_key()['color']
# add fake markes so I can label rough estimate versus sample
plt.plot([], [], color='black', alpha=0.5, label='rough estimate')
plt.scatter([], [], marker='o', color='black', alpha=0.5, label='average over 30 samples')
for T, E_SV, S_SV, colour in zip(TV, E_SM, S_SM, colour_cycle):
plt.plot(mV, E_SV, color=colour, alpha=0.5, label = r'$T = ' + str(T) + '$')
plt.scatter(mV, S_SV, marker='o', color=colour, alpha=0.5)
plt.legend(loc='best')
plt.xlabel('immigrant probability')
plt.ylabel('number of species')
plt.xscale('log')
plt.tight_layout()
plt.savefig(dir_results + 'check_rough_richness_estimate.pdf')
plt.close()
| python |
#!/usr/bin/env python3
import rospy
from nav_msgs.msg import Path, Odometry
from geometry_msgs.msg import PoseStamped, Point, Quaternion, Twist
from controller_copy import Controller
class Test():
def __init__(self):
self.odom_topic = "/odom"
self.target_path = Path()
self.target_path.poses.append(PoseStamped())
self.goal = PoseStamped() # for testing
self.goal.pose.position.x = 3.0
self.goal.pose.position.y = 2.0
# self.target_path.poses.append(goal)
self.controller = Controller(self.target_path, odom_topic=self.odom_topic)
if __name__ == "__main__":
rospy.init_node("test")
test = Test()
velocity = Twist()
pub = rospy.Publisher("/cmd_vel", Twist, queue_size=10)
while not rospy.is_shutdown():
velocity = test.controller.get_velocity(test.goal)
pub.publish(velocity)
if test.controller.reached_intermediate_goal():
velocity.linear.x = 0.0
velocity.linear.y = 0.0
pub.publish(velocity)
print("goal reached")
break
else:
continue | python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.