content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
""" Unit tests for FeatureNormalizer """
import nose.tools
import sys
import numpy
sys.path.append('..')
from dcase_framework.features import FeatureNormalizer, FeatureContainer, FeatureExtractor
import os
def test_accumulate_finalize():
FeatureExtractor(store=True, overwrite=True).extract(
audio_file=os.path.join('material', 'test.wav'),
extractor_name='mfcc',
extractor_params={
'mfcc': {
'n_mfcc': 10
}
},
storage_paths={
'mfcc': os.path.join('material', 'test.mfcc.cpickle')
}
)
# Test 1
feature_container = FeatureContainer().load(filename=os.path.join('material', 'test.mfcc.cpickle'))
feature_normalizer = FeatureNormalizer().accumulate(feature_container=feature_container).finalize()
nose.tools.eq_(feature_normalizer['N'][0], 501)
numpy.testing.assert_array_equal(feature_normalizer['mean'][0][0],
numpy.mean(feature_container.feat[0], axis=0))
numpy.testing.assert_array_equal(feature_normalizer['S1'][0],
numpy.sum(feature_container.feat[0], axis=0))
numpy.testing.assert_array_equal(feature_normalizer['S2'][0],
numpy.sum(feature_container.feat[0]**2, axis=0))
# Test 2
feature_container = FeatureContainer().load(filename=os.path.join('material', 'test.mfcc.cpickle'))
feature_normalizer = FeatureNormalizer()
feature_normalizer.accumulate(feature_container=feature_container)
feature_normalizer.accumulate(feature_container=feature_container)
feature_normalizer.finalize()
nose.tools.eq_(feature_normalizer['N'][0], 501*2)
numpy.testing.assert_array_equal(feature_normalizer['mean'][0][0],
numpy.mean(feature_container.feat[0], axis=0))
numpy.testing.assert_array_equal(feature_normalizer['S1'][0],
numpy.sum(feature_container.feat[0], axis=0)*2)
numpy.testing.assert_array_equal(feature_normalizer['S2'][0],
numpy.sum(feature_container.feat[0] ** 2, axis=0)*2)
def test_with_statement():
FeatureExtractor(store=True, overwrite=True).extract(
audio_file=os.path.join('material', 'test.wav'),
extractor_name='mfcc',
extractor_params={
'mfcc': {
'n_mfcc': 10
}
},
storage_paths={
'mfcc': os.path.join('material', 'test.mfcc.cpickle')
}
)
feature_container = FeatureContainer().load(filename=os.path.join('material', 'test.mfcc.cpickle'))
with FeatureNormalizer() as feature_normalizer:
feature_normalizer.accumulate(feature_container)
nose.tools.eq_(feature_normalizer['N'][0], 501)
numpy.testing.assert_array_equal(feature_normalizer['mean'][0][0],
numpy.mean(feature_container.feat[0], axis=0))
numpy.testing.assert_array_equal(feature_normalizer['S1'][0],
numpy.sum(feature_container.feat[0], axis=0))
numpy.testing.assert_array_equal(feature_normalizer['S2'][0],
numpy.sum(feature_container.feat[0] ** 2, axis=0))
test_accumulate_finalize() | python |
"""A package for computing Pfaffians"""
import cmath
import math
import numpy as np
import scipy.linalg as la
import scipy.sparse as sp
def householder_real(x):
"""(v, tau, alpha) = householder_real(x)
Compute a Householder transformation such that
(1-tau v v^T) x = alpha e_1
where x and v a real vectors, tau is 0 or 2, and
alpha a real number (e_1 is the first unit vector)
"""
assert x.shape[0] > 0
sigma = np.dot(x[1:], x[1:])
if sigma == 0:
return (np.zeros(x.shape[0]), 0, x[0])
else:
norm_x = math.sqrt(x[0] ** 2 + sigma)
v = x.copy()
# depending on whether x[0] is positive or negatvie
# choose the sign
if x[0] <= 0:
v[0] -= norm_x
alpha = +norm_x
else:
v[0] += norm_x
alpha = -norm_x
v = v / np.linalg.norm(v)
return (v, 2, alpha)
def householder_complex(x):
"""(v, tau, alpha) = householder_real(x)
Compute a Householder transformation such that
(1-tau v v^T) x = alpha e_1
where x and v a complex vectors, tau is 0 or 2, and
alpha a complex number (e_1 is the first unit vector)
"""
assert x.shape[0] > 0
sigma = np.dot(np.conj(x[1:]), x[1:])
if sigma == 0:
return (np.zeros(x.shape[0]), 0, x[0])
else:
norm_x = cmath.sqrt(x[0].conjugate() * x[0] + sigma)
v = x.copy()
phase = cmath.exp(1j * math.atan2(x[0].imag, x[0].real))
v[0] += phase * norm_x
v /= np.linalg.norm(v)
return (v, 2, -phase * norm_x)
def skew_tridiagonalize(A, overwrite_a=False, calc_q=True):
"""T, Q = skew_tridiagonalize(A, overwrite_a, calc_q=True)
or
T = skew_tridiagonalize(A, overwrite_a, calc_q=False)
Bring a real or complex skew-symmetric matrix (A=-A^T) into
tridiagonal form T (with zero diagonal) with a orthogonal
(real case) or unitary (complex case) matrix U such that
A = Q T Q^T
(Note that Q^T and *not* Q^dagger also in the complex case)
A is overwritten if overwrite_a=True (default: False), and
Q only calculated if calc_q=True (default: True)
"""
# Check if matrix is square
assert A.shape[0] == A.shape[1] > 0
# Check if it's skew-symmetric
assert abs((A + A.T).max()) < 1e-14
A = np.asarray(A) # the slice views work only properly for arrays
# Check if we have a complex data type
if np.issubdtype(A.dtype, np.complexfloating):
householder = householder_complex
elif not np.issubdtype(A.dtype, np.number):
raise TypeError("pfaffian() can only work on numeric input")
else:
householder = householder_real
if not overwrite_a:
A = A.copy()
if calc_q:
Q = np.eye(A.shape[0], dtype=A.dtype)
for i in range(A.shape[0] - 2):
# Find a Householder vector to eliminate the i-th column
v, tau, alpha = householder(A[i + 1 :, i])
A[i + 1, i] = alpha
A[i, i + 1] = -alpha
A[i + 2 :, i] = 0
A[i, i + 2 :] = 0
# Update the matrix block A(i+1:N,i+1:N)
w = tau * np.dot(A[i + 1 :, i + 1 :], v.conj())
A[i + 1 :, i + 1 :] += np.outer(v, w) - np.outer(w, v)
if calc_q:
# Accumulate the individual Householder reflections
# Accumulate them in the form P_1*P_2*..., which is
# (..*P_2*P_1)^dagger
y = tau * np.dot(Q[:, i + 1 :], v)
Q[:, i + 1 :] -= np.outer(y, v.conj())
if calc_q:
return (np.asmatrix(A), np.asmatrix(Q))
else:
return np.asmatrix(A)
def skew_LTL(A, overwrite_a=False, calc_L=True, calc_P=True):
"""T, L, P = skew_LTL(A, overwrite_a, calc_q=True)
Bring a real or complex skew-symmetric matrix (A=-A^T) into
tridiagonal form T (with zero diagonal) with a lower unit
triangular matrix L such that
P A P^T= L T L^T
A is overwritten if overwrite_a=True (default: False),
L and P only calculated if calc_L=True or calc_P=True,
respectively (default: True).
"""
# Check if matrix is square
assert A.shape[0] == A.shape[1] > 0
# Check if it's skew-symmetric
assert abs((A + A.T).max()) < 1e-14
n = A.shape[0]
A = np.asarray(A) # the slice views work only properly for arrays
if not overwrite_a:
A = A.copy()
if calc_L:
L = np.eye(n, dtype=A.dtype)
if calc_P:
Pv = np.arange(n)
for k in range(n - 2):
# First, find the largest entry in A[k+1:,k] and
# permute it to A[k+1,k]
kp = k + 1 + np.abs(A[k + 1 :, k]).argmax()
# Check if we need to pivot
if kp != k + 1:
# interchange rows k+1 and kp
temp = A[k + 1, k:].copy()
A[k + 1, k:] = A[kp, k:]
A[kp, k:] = temp
# Then interchange columns k+1 and kp
temp = A[k:, k + 1].copy()
A[k:, k + 1] = A[k:, kp]
A[k:, kp] = temp
if calc_L:
# permute L accordingly
temp = L[k + 1, 1 : k + 1].copy()
L[k + 1, 1 : k + 1] = L[kp, 1 : k + 1]
L[kp, 1 : k + 1] = temp
if calc_P:
# accumulate the permutation matrix
temp = Pv[k + 1]
Pv[k + 1] = Pv[kp]
Pv[kp] = temp
# Now form the Gauss vector
if A[k + 1, k] != 0.0:
tau = A[k + 2 :, k].copy()
tau /= A[k + 1, k]
# clear eliminated row and column
A[k + 2 :, k] = 0.0
A[k, k + 2 :] = 0.0
# Update the matrix block A(k+2:,k+2)
A[k + 2 :, k + 2 :] += np.outer(tau, A[k + 2 :, k + 1])
A[k + 2 :, k + 2 :] -= np.outer(A[k + 2 :, k + 1], tau)
if calc_L:
L[k + 2 :, k + 1] = tau
if calc_P:
# form the permutation matrix as a sparse matrix
P = sp.csr_matrix((np.ones(n), (np.arange(n), Pv)))
if calc_L:
if calc_P:
return (np.asmatrix(A), np.asmatrix(L), P)
else:
return (np.asmatrix(A), np.asmatrix(L))
else:
if calc_P:
return (np.asmatrix(A), P)
else:
return np.asmatrix(A)
def pfaffian(A, overwrite_a=False, method="P"):
"""pfaffian(A, overwrite_a=False, method='P')
Compute the Pfaffian of a real or complex skew-symmetric
matrix A (A=-A^T). If overwrite_a=True, the matrix A
is overwritten in the process. This function uses
either the Parlett-Reid algorithm (method='P', default),
or the Householder tridiagonalization (method='H')
"""
# Check if matrix is square
assert A.shape[0] == A.shape[1] > 0
# Check if it's skew-symmetric
assert abs((A + A.T).max()) < 1e-14
# Check that the method variable is appropriately set
assert method == "P" or method == "H"
if method == "P":
return pfaffian_LTL(A, overwrite_a)
else:
return pfaffian_householder(A, overwrite_a)
def pfaffian_LTL(A, overwrite_a=False):
"""pfaffian_LTL(A, overwrite_a=False)
Compute the Pfaffian of a real or complex skew-symmetric
matrix A (A=-A^T). If overwrite_a=True, the matrix A
is overwritten in the process. This function uses
the Parlett-Reid algorithm.
"""
# Check if matrix is square
assert A.shape[0] == A.shape[1] > 0
# Check if it's skew-symmetric
assert abs((A + A.T).max()) < 1e-14
n, m = A.shape
# type check to fix problems with integer numbers
dtype = type(A[0, 0])
if dtype != np.complex128:
# the slice views work only properly for arrays
A = np.asarray(A, dtype=float)
# Quick return if possible
if n % 2 == 1:
return 0
if not overwrite_a:
A = A.copy()
pfaffian_val = 1.0
for k in range(0, n - 1, 2):
# First, find the largest entry in A[k+1:,k] and
# permute it to A[k+1,k]
kp = k + 1 + np.abs(A[k + 1 :, k]).argmax()
# Check if we need to pivot
if kp != k + 1:
# interchange rows k+1 and kp
temp = A[k + 1, k:].copy()
A[k + 1, k:] = A[kp, k:]
A[kp, k:] = temp
# Then interchange columns k+1 and kp
temp = A[k:, k + 1].copy()
A[k:, k + 1] = A[k:, kp]
A[k:, kp] = temp
# every interchange corresponds to a "-" in det(P)
pfaffian_val *= -1
# Now form the Gauss vector
if A[k + 1, k] != 0.0:
tau = A[k, k + 2 :].copy()
tau = tau / A[k, k + 1]
pfaffian_val *= A[k, k + 1]
if k + 2 < n:
# Update the matrix block A(k+2:,k+2)
A[k + 2 :, k + 2 :] = A[k + 2 :, k + 2 :] + np.outer(
tau, A[k + 2 :, k + 1]
)
A[k + 2 :, k + 2 :] = A[k + 2 :, k + 2 :] - np.outer(
A[k + 2 :, k + 1], tau
)
else:
# if we encounter a zero on the super/subdiagonal, the
# Pfaffian is 0
return 0.0
return pfaffian_val
def pfaffian_householder(A, overwrite_a=False):
"""pfaffian(A, overwrite_a=False)
Compute the Pfaffian of a real or complex skew-symmetric
matrix A (A=-A^T). If overwrite_a=True, the matrix A
is overwritten in the process. This function uses the
Householder tridiagonalization.
Note that the function pfaffian_schur() can also be used in the
real case. That function does not make use of the skew-symmetry
and is only slightly slower than pfaffian_householder().
"""
# Check if matrix is square
assert A.shape[0] == A.shape[1] > 0
# Check if it's skew-symmetric
assert abs((A + A.T).max()) < 1e-14
n = A.shape[0]
# type check to fix problems with integer numbers
dtype = type(A[0, 0])
if dtype != np.complex128:
# the slice views work only properly for arrays
A = np.asarray(A, dtype=float)
# Quick return if possible
if n % 2 == 1:
return 0
# Check if we have a complex data type
if np.issubdtype(A.dtype, np.complexfloating):
householder = householder_complex
elif not np.issubdtype(A.dtype, np.number):
raise TypeError("pfaffian() can only work on numeric input")
else:
householder = householder_real
A = np.asarray(A) # the slice views work only properly for arrays
if not overwrite_a:
A = A.copy()
pfaffian_val = 1.0
for i in range(A.shape[0] - 2):
# Find a Householder vector to eliminate the i-th column
v, tau, alpha = householder(A[i + 1 :, i])
A[i + 1, i] = alpha
A[i, i + 1] = -alpha
A[i + 2 :, i] = 0
A[i, i + 2 :] = 0
# Update the matrix block A(i+1:N,i+1:N)
w = tau * np.dot(A[i + 1 :, i + 1 :], v.conj())
A[i + 1 :, i + 1 :] = A[i + 1 :, i + 1 :] + np.outer(v, w) - np.outer(w, v)
if tau != 0:
pfaffian_val *= 1 - tau
if i % 2 == 0:
pfaffian_val *= -alpha
pfaffian_val *= A[n - 2, n - 1]
return pfaffian_val
def pfaffian_schur(A, overwrite_a=False):
"""Calculate Pfaffian of a real antisymmetric matrix using
the Schur decomposition. (Hessenberg would in principle be faster,
but scipy-0.8 messed up the performance for scipy.linalg.hessenberg()).
This function does not make use of the skew-symmetry of the matrix A,
but uses a LAPACK routine that is coded in FORTRAN and hence faster
than python. As a consequence, pfaffian_schur is only slightly slower
than pfaffian().
"""
assert np.issubdtype(A.dtype, np.number) and not np.issubdtype(
A.dtype, np.complexfloating
)
assert A.shape[0] == A.shape[1] > 0
assert abs(A + A.T).max() < 1e-14
# Quick return if possible
if A.shape[0] % 2 == 1:
return 0
(t, z) = la.schur(A, output="real", overwrite_a=overwrite_a)
l = np.diag(t, 1) # noqa: E741
return np.prod(l[::2]) * la.det(z)
| python |
# -*- coding: utf-8 -*-
from django.core.urlresolvers import reverse
from django.template import loader
from django.utils.safestring import mark_safe as _S
from django.utils.six.moves.urllib.parse import urlparse
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth import get_permission_codename
from django.utils import six
if six.PY3:
import io
contents = io.BytesIO
else:
import StringIO
contents = StringIO.StringIO
import markdown
import json
from mdx_gfm import GithubFlavoredMarkdownExtension
from mimetypes import guess_type
from distutils.util import strtobool
import djclick as click
import re
import yaml
import time
import struct
def get_absolute_url(instance, name='detail'):
return reverse(
'{0}_{1}_{2}'.format(
instance._meta.app_label, instance._meta.model_name, name),
kwargs={'id': instance.id})
def get_contenttype(instance_or_class):
if isinstance(instance_or_class, ContentType):
return instance_or_class
return ContentType.objects.get_for_model(instance_or_class)
def to_natural_key(instance_or_class):
return get_contenttype(instance_or_class).natural_key()
def to_natural_key_string(instance_or_class):
return ".".join(to_natural_key(instance_or_class))
def from_natual_key(app_lable, model_name, **queries):
ct = ContentType.objects.get_by_natural_key(app_lable, model_name)
if queries:
return ct.get_object_for_this_type(**queries)
return ct.model_class()
def from_natual_key_string(natural_key_string, **queries):
return from_natual_key(*natural_key_string.split('.'), **queries)
def get_permission(ct_or_model, codename):
ct = get_contenttype(ct_or_model)
return ct.permission_set.filter(codename=codename).first()
def get_perm_name(model, action):
'''指定されたアクションに対するパーミッション名'''
return "{}.{}".format(
model._meta.app_label, get_permission_codename(action, model._meta))
def to_admin_change_url_name(instance_or_class):
return "admin:{}_{}_change".format(instance_or_class._meta.app_label)
def to_admin_change_url(instance_or_class, id=None):
id = id or instance_or_class.id
return reverse(
to_admin_change_url_name(instance_or_class), args=[id])
def to_admin_changelist_url_name(instance_or_class):
return 'admin:{0}_changelist'.format(instance_or_class._meta.db_table)
def to_admin_changelist_url(instance_or_class):
return reverse(to_admin_changelist_url_name(instance_or_class))
def spaceless(src):
'''空白を取り除く'''
return re.sub(ur'[\s\u3000]', '', src or '')
def render(src, request=None, **ctx):
'''テンプレート文字列でレンダリングする'''
from django.template import engines
engine = engines['django']
request = request or None
return _S(engine.from_string(src).render(ctx, request=request))
def render_by(name, request=None, **ctx):
'''テンプレートファイルでレンダリングする'''
request = request or None
return _S(loader.get_template(name).render(ctx, request=request))
def echo(teml, fg="green", **kwargs):
'''テンプレートでコンソールに書き出す '''
click.secho(render(teml, **kwargs), fg=fg)
def echo_by(name, fg="green", **kwargs):
'''テンプレートでコンソールに書き出す '''
click.secho(render_by(name, **kwargs), fg=fg)
def force_bool(value):
'''強制的に論理値に変換'''
try:
return strtobool(u"{}".format(value)) == 1
except:
pass
return False
def get_mimetype(file_name):
'''ファイルのmimetypeを返す'''
if not file_name or file_name.startswith('__MACOSX/'):
return [None, None]
name, _x = guess_type(file_name)
return name and name.split('/') or [None, None]
def list_to_choices(choices):
return tuple((choices.index(i), i) for i in choices)
def to_gfm(text, safe=True):
'''Github Favored Markdown'''
md = markdown.Markdown(extensions=[GithubFlavoredMarkdownExtension()])
return _S(md.convert(text)) if safe else md.convert(text)
def convert(source, format='yaml'):
if format in ['yaml', 'yml']:
return yaml.load(source)
if format == 'json':
return json.loads(source)
def load_template(name):
'''名前で指定されたテンプレートのソース'''
return loader.get_template(name).template.source
def time_serial():
'''時間のシリアル値を16進数で返す'''
return struct.pack('d', time.time()).encode('hex')
def url(url_string):
'''urlparse'''
return urlparse(url_string)
def permcode_items(perm_code):
p = re.split(r"[._]", perm_code) + [None, None, None]
return dict(zip(['app_label', 'action', 'model'], p[:3]))
| python |
"""
Evaluate the true Fourier coefficients of a given function x(1-x),
generate the domain based on that and define the model Q:\Lambda \to D
"""
import sympy
from inversefuns.utilities import get_coef, coef_domain, fourier_exp_vec
import numpy as np
param_len = 5
t=np.array((0.1,0.2,0.4,0.5,0.7))
period0 = 1.0
def true_param():
x = sympy.symbols('x')
# This will take some time because we are evaluating oscillatory function integration
an, bn = get_coef(expr=(1-x)*(x), vari=x, trun=(param_len-1), period = period0)
return an, bn
def my_model_domain(pow=-1,halfwidth0=0.5):
an = bn = np.zeros(param_len)
domain = coef_domain(an, bn, pow=pow, halfwidth0=halfwidth0)
return domain
def my_model(parameter_samples):
num_samples = parameter_samples.shape[0]
if t.shape:
QoI_samples = np.zeros((num_samples, t.shape[0]))
else:
QoI_samples = np.zeros((num_samples, 1))
an = parameter_samples[:, 0::2]
bn = parameter_samples[:, 1::2]
for i in range(0, num_samples):
QoI_samples[i, :] = fourier_exp_vec(t,an[i,:],bn[i,:])
return QoI_samples
| python |
"""Base Class for a Solver. This class contains the different methods that
can be used to solve an environment/problem. There are methods for
mini-batch training, control, etc...
The idea is that this class will contain all the methods that the different
algorithms would need. Then we can simply call this class in the solver scripts
and use its methods.
I'm still torn between using a class or just using a script.
"""
from .evaluator import Evaluator
from .interrogator import Interrogator
import torch
class Solver(object):
"""This class makes absolute sense because there are many types of training
depending on the task. For this reason, in the future, this class can easily
include all instances of such training routines. Of course, transparent to
the user -which is the ultimate goal, complete transparency-.
"""
def __init__(self, slv_params):
print("Creating Solver")
self.env = slv_params['environment']
self.alg = slv_params['algorithm']
self.logger = slv_params['logger']
self.evaluator = Evaluator()
self.interrogator = Interrogator()
def forward(self):
self.interrogator.set_inference(self.alg.model, self.env)
def backward(self):
self.evaluator.evaluate(self.env, self.interrogator.inference)
feedback = (self.evaluator.score)
self.alg.step(feedback)
self.alg.print_state()
def save(self, path=''):
"""Only works with my algorithms, not with SGD."""
fn = path+"model_elite.pth"
torch.save(self.alg.model.state_dict(), fn)
def save_pool_weights(self, models, path):
for i, model in enumerate(models):
fn = path+"model_"+str(i)+".pth"
torch.save(model.state_dict(), fn)
def save_elite_weights(self, path, name=''):
if name == '':
name = "model_elite.pth"
else:
name = name+'.pth'
fn = path+name
torch.save(self.alg.model.state_dict(), fn)
def load(self, path, name="model_elite"):
"""Only works with my algorithms, not with SGD."""
fn = path+name+".pth"
print("Loading weights in: " + fn)
self.alg.model.load_state_dict(torch.load(fn))
self.alg.model.eval()
#
| python |
import re
mystring='My ip address is 10.10.10.20 and by subnet mask is 255.255.255.255'
if (re.search("ip address",mystring)):
ipaddregex=re.search("ip address is \d+.\d+.\d+.\d+",mystring)
ipaddregex=ipaddregex.group(0)
ipaddress=ipaddregex.replace("ip address is ","")
print ("IP address is :",ipaddress)
if (re.search("subnet mask",mystring)):
ipaddregex=re.search("subnet mask is \d+.\d+.\d+.\d+",mystring)
ipaddregex=ipaddregex.group(0)
ipaddress=ipaddregex.replace("subnet mask is ","")
print ("Subnet mask is :",ipaddress)
| python |
import sys
import PyFBA.metabolism
class Reaction:
"""
A reaction is the central concept of metabolism and is the conversion of substrates to products.
The reaction describes what we know. At a bare minimum we need a a name for the reaction. The name can either be the
reaction id (e.g. modelSEED or KEGG id), or another name for this reaction.
A reaction is an object that describes how to get from one compound to another. We need to know what the compound(s)
on the left of the equation are, what the compounds on the right of the reaction are, and the probability that the
reaction proceeds in either direction. If the reaction is truly reversible the probability can be 1 in both cases.
If it is unidirectional the probability can be 0 in one direction.
The likelihood that a reaction completes will be some product of its delta G and its p. We could also do something
simpler, e.g. if there is a -ve delta G (favorable reaction) we can increase p and if there is a +ve delta G
(unfavorable reaction) we can decrease p.
The direction and reversible is the direction that the equation can run.
Acceptable values are:
====== ===========================
Value Meaning
====== ===========================
None We don't know the direction
> Left to right
< Right to left
= Bidirectional
====== ===========================
:ivar rctn_id: The reaction ID
:ivar readable_name: The name of the reaction
:ivar description: A description of the reaction
:ivar equation: The reaction equation
:ivar direction: The direction of the reaction (<, =, >, or ?)
:ivar gfdirection: The possible gapfilled direction
:ivar ntdirection: The non-template direction (before correcting for templates)
:ivar left_compounds: A set of CompoundWithLocations on the left side of the reaction
:ivar left_abundance: A dict of the CompoundWithLocations on the left and their abundance
:ivar right_compounds: The set of CompoundWithLocations on the right side of the equation
:ivar right_abundance: A dict of the CompoundWithLocations on the right and their abundance
:ivar lower_bound: The lower bound for the reaction
:ivar upper_bound: The upper bound for the reaction
:ivar pLR: The probability the reaction proceeds left to right
:ivar pRL: The probability the reaction proceeds right to left
:ivar enzymes: The enzyme complex IDs involved in the reaction
:ivar pegs: The protein-encoding genes involved in the reaction
:ivar deltaG: The delta G
:ivar deltaG_error: The error in the delta G
:ivar inp: Whether the reaction is an input reaction
:ivar outp: Whether the reaction is an output reaction
:ivar is_transport: Whether the reaction is a transport reaction (imports or exports something)
:ivar ran: Boolean to note whether the reaction ran
:ivar is_biomass_reaction: Boolean to note whether this is a biomass reaction
:ivar biomass_direction: If it is a biomass reaction, what is the direction
:ivar is_gapfilled: Boolean to note whether the reaction was gapfilled
:ivar gapfill_method: If the reaction was gapfilled, how was it gapfilled
:ivar is_uptake_secretion: Is the reaction involved in uptake of compounds or secretion of compounds.
"""
def __init__(self, rctn_id, readable_name=None, description=None, equation=None, direction=None):
"""
Instantiate a reaction
:param rctn_id: the reaction id
:param readable_name: a human readable name. This was refactored from name to make it more unique
:param description: a description of the reaction
:param equation: the equation for the reaction
:param direction: the direction of the reaction
"""
self.id = rctn_id
self.model_seed_id = rctn_id
self.readable_name = readable_name
self.description = description
self.equation = equation
self.direction = direction
self.gfdirection = direction # the gap filling direction
self.ntdirection = direction # the non-template driven direction
self.left_compounds = set() # type: set[PyFBA.metabolism.CompoundWithLocation]
self.left_abundance = {}
self.right_compounds = set() # type: set[PyFBA.metabolism.CompoundWithLocation]
self.right_abundance = {}
self.lower_bound = None
self.upper_bound = None
self.pLR = 0
self.pRL = 0
self.enzymes = set()
self.ec_numbers = []
self.pegs = set()
self.deltaG_error = 0
self.deltaG = 0
self.inp = False
self.outp = False
self.is_transport = False
self.ran = False
self.is_biomass_reaction = False
self.biomass_direction = False
self.is_gapfilled = False
self.gapfill_method = ""
self.is_uptake_secretion = False
self.aliases = []
def __eq__(self, other):
"""
Two reactions are the same if they have the same left and
right products, but not necessarily the same names or reactions.
Note that we don't care whether the left and right (the
directionality) is the same in our two comparisons
:param other: The other reaction
:type other: Reaction
:return: Boolean
:rtype: bool
"""
if isinstance(other, Reaction):
return (self.id == other.id or
(self.left_compounds, self.right_compounds) ==
(other.left_compounds, other.right_compounds) or
(self.left_compounds, self.right_compounds) ==
(other.right_compounds, other.left_compounds)
)
else:
raise NotImplementedError(f"Comparing Reaction with {type(other)} is not implemented")
def __cmp__(self, other):
"""
Compare whether two things are the same
:param other: The other reaction
:type other: Reaction
:return: an integer, zero if they are the same
:rtype: int
"""
if isinstance(other, Reaction):
if __eq__(other):
return 0
else:
return 1
else:
raise NotImplementedError(f"Comparing Reaction with {type(other)} is not implemented")
def __ne__(self, other):
"""
Are these not equal?
:param other: The other reaction
:type other: Reaction
:return: Boolean
:rtype: bool
"""
try:
result = self.__eq__(other)
except NotImplementedError:
return True
return not result
def __hash__(self):
"""
The hash function is based on the name of the reaction.
:rtype: int
"""
return hash((self.id, self.readable_name))
def __str__(self):
"""
The string version of the reaction.
:rtype: str
"""
if self.readable_name:
return f"{self.id}: {self.readable_name}"
else:
return f"{self.id}: {self.equation}"
"""
Since we have complex data structures, we can't just pickle them and unpickle them with aplomb!
In fact, this is affecting deep/shallow copy, and we need to ensure that we use copy.deepcopy()
at all times, otherwise the data structures are not copied correctly.
These two methods correctly allow us to pickle the data structures. Note that we have
CompoundWithLocation objects, and we need both the object and its abundance to correctly create the pickle.
"""
def __getstate__(self):
"""
The state that the object is saved or copied as. We override the left/right compounds and abundances
with simple arrays of data. This is lossy - we are losing the connections between compounds and data
and we probably need to reconstruct that after pickling/unpickling the reactions.
:return:
"""
state = self.__dict__.copy()
state['left_compounds'] = []
state['right_compounds'] = []
state['left_abundance'] = {}
state['right_abundance'] = {}
for l in self.left_compounds:
state['left_compounds'].append([l.id, l.name, l.location])
state['left_abundance'][f"{l.id} :: {l.name} :: {l.location}"] = self.left_abundance[l]
for r in self.right_compounds:
state['right_compounds'].append([r.id, r.name, r.location])
state['right_abundance'][f"{r.id} :: {r.name} :: {r.location}"] = self.right_abundance[r]
return state
def __setstate__(self, state):
"""
Create a new reaction from a saved state. This is from __getstate__ eg. when pickled.
:param state: the state that was saved.
:return:
"""
left = set()
right = set()
left_abundance = {}
right_abundance = {}
for l in state['left_compounds']:
c = PyFBA.metabolism.CompoundWithLocation(id=l[0], name=l[1], location=l[2])
left.add(c)
left_abundance[c] = state['left_abundance'][f"{l[0]} :: {l[1]} :: {l[2]}"]
state['left_compounds'] = left
state['left_abundance'] = left_abundance
for r in state['right_compounds']:
c = PyFBA.metabolism.CompoundWithLocation(id=r[0], name=r[1], location=r[2])
right.add(c)
right_abundance[c] = state['right_abundance'][f"{r[0]} :: {r[1]} :: {r[2]}"]
state['right_compounds'] = right
state['right_abundance'] = right_abundance
self.__dict__.update(state)
def set_direction(self, direction):
"""
Set the direction of the reaction.
:param direction: The direction of the reaction
:type direction: str
:rtype: str
:return: The current direction
"""
allowable_directions = {'>', '<', '=', None}
if direction in allowable_directions:
self.direction = direction
if not self.gfdirection:
self.gfdirection = direction
else:
sys.stderr.write("Direction: " + str(direction) + " is not a permitted direction. Ignored\n")
self.direction = None
return self.direction
def add_left_compounds(self, cmpds):
"""
The compounds on the left are a set of compounds that the reaction typically uses as substrates.
:param cmpds: The compounds that should be added
:type cmpds: set[PyFBA.metabolism.CompoundWithLocation]
"""
if isinstance(cmpds, set):
# choose one element. next(iter(cmpds)) does not remove the element
if not isinstance(next(iter(cmpds)), PyFBA.metabolism.CompoundWithLocation):
raise TypeError(f"Starting with v.2 reactions need PyFBA.metabolism.CompoundWithLocation objects not {type(next(iter(cmpds)))}")
self.left_compounds.update(cmpds)
elif isinstance(cmpds, PyFBA.metabolism.CompoundWithLocation):
# add a single compound
self.left_compounds.add(cmpds)
else:
raise TypeError("Compounds must be a set of CompoundWithLocation")
def set_left_compound_abundance(self, cmpd, abundance):
"""
Set the abundance of a compound on the left side of the equation.
:param cmpd: The compound to set the abundance for
:type cmpd: PyFBA.metabolism.CompoundWithLocation
:param abundance: The amount of that abundance
:type abundance: float | int
"""
if cmpd not in self.left_compounds:
raise KeyError(f"{cmpd} is not in left compounds. Please add it before trying to set the abundance")
if isinstance(abundance, float):
self.left_abundance[cmpd] = abundance
elif isinstance(abundance, int):
self.left_abundance[cmpd] = float(abundance)
else:
raise TypeError("Abundance must be an int or a float")
def get_left_compound_abundance(self, cmpd):
"""
Get the abundance of the compound on the left side of the equation.
:param cmpd: The compound to get the abundance of
:type cmpd: PyFBA.metabolism.CompoundWithLocation
:return: The compounds abundance
:rtype: float
"""
if cmpd in self.left_abundance:
return self.left_abundance[cmpd]
else:
raise KeyError(f"In the reaction {self.readable_name} (reaction id: {self.id}), you do not have" +
f" {cmpd} on the left hand side of the equation: {self.equation}")
def number_of_left_compounds(self):
"""
The number of compounds on the left side of the equation.
:rtype: int
"""
return len(self.left_compounds)
def add_right_compounds(self, cmpds):
"""
The compounds on the right are a set of compounds that the reaction typically uses as substrates.
:param cmpds: The compounds that should be added
:type cmpds: set[PyFBA.metabolism.CompoundWithLocation]
"""
if isinstance(cmpds, set):
# choose one element. next(iter(cmpds)) does not remove the element
if not isinstance(next(iter(cmpds)), PyFBA.metabolism.CompoundWithLocation):
raise TypeError("Starting with v.2 reactions need PyFBA.metabolism.CompoundWithLocation objects")
self.right_compounds.update(cmpds)
elif isinstance(cmpds, PyFBA.metabolism.CompoundWithLocation):
# add a single compound
self.right_compounds.add(cmpds)
else:
raise TypeError("Compounds must be a set of CompoundWithLocation")
def set_right_compound_abundance(self, cmpd, abundance):
"""
Set the abundance of a compound on the right side of the equation
:param cmpd: The compound to set the abundance for
:type cmpd: PyFBA.metabolism.CompoundWithLocation
:param abundance: The amount of that abundance
:type abundance: float | int
"""
if cmpd not in self.right_compounds:
raise KeyError(f"{cmpd} is not in right compounds. " + " Please add it before trying to set the abundance")
if isinstance(abundance, float):
self.right_abundance[cmpd] = abundance
elif isinstance(abundance, int):
self.right_abundance[cmpd] = float(abundance)
else:
raise TypeError("Abundance must be an int or a float")
def get_right_compound_abundance(self, cmpd):
"""
Get the abundance of the compound on the right side of the equation.
:param cmpd: The compound to get the abundance of
:type cmpd: Compound
:return: The compounds abundance
:rtype: float
"""
if cmpd in self.right_abundance:
return self.right_abundance[cmpd]
else:
raise KeyError(f"In the reaction {self.readable_name} (reaction id: {self.id}), you do not have" +
f" {cmpd} on the right hand side of the equation: {self.equation}")
def number_of_right_compounds(self):
"""
The number of compounds on the right side of the equation.
:rtype: int
"""
return len(self.right_compounds)
def all_compounds(self):
"""
Get all the compounds involved in this reaction.
:return: A set of all the compounds
:rtype: set
"""
return self.left_compounds.union(self.right_compounds)
def number_of_compounds(self):
"""
Get the total number of compounds involved in this reaction.
:rtype: int
"""
return len(self.all_compounds())
def has(self, cmpd):
"""
Does this reaction have a compound? Just returns true if the compound is present somewhere in the reaction.
:param cmpd: The compound to test for
:type cmpd: Compound
:rtype: bool
"""
return cmpd in self.left_compounds or cmpd in self.right_compounds
def opposite_sides(self, cmpd1, cmpd2):
"""
Are these two compounds on opposite sides of the reaction?
:param cmpd1: The first compound
:type cmpd1: Compound
:param cmpd2: The second compound
:type cmpd2: Compound
:return: Whether the compounds are on opposite sides
:rtype: bool
"""
if not self.has(cmpd1):
raise ValueError(str(cmpd1) + " is not in this reaction")
if not self.has(cmpd2):
raise ValueError(str(cmpd2) + " is not in this reaction")
if cmpd1 in self.left_compounds and cmpd2 in self.right_compounds:
return True
if cmpd1 in self.right_compounds and cmpd2 in self.left_compounds:
return True
return False
def set_probability_left_to_right(self, p):
"""
Set the probability of the reaction running left to right. Note you can also access this as reaction.pLR
:param p: The probablity
:type p: float
"""
if isinstance(p, float):
self.pLR = p
elif isinstance(p, int):
self.pLR = float(p)
else:
raise TypeError("The probability must be an int or a float")
def get_probability_left_to_right(self):
"""
Get the probability of the reaction running left to right. Note you can also access this as reaction.pLR
:return: The probablity
:rtype p: float
"""
return self.pLR
def set_probability_right_to_left(self, p):
"""
Set the probability of the reaction running right to left Note you can also access this as reaction.pRL
:param p: The probablity
:type p: float
"""
if isinstance(p, float):
self.pRL = p
elif isinstance(p, int):
self.pRL = float(p)
else:
raise TypeError("The probability must be an int or a float")
def get_probability_right_to_left(self):
"""
Get the probability of the reaction running right to left. Note you can also access this as reaction.pRL
:return: The probablity
:rtype p: float
"""
return self.pRL
def add_enzymes(self, enz):
"""
Add one or more enzymes that completes this reaction.
:param enz: A set of enzymes that you want to add
:type enz: set
"""
if isinstance(enz, set):
self.enzymes.update(enz)
else:
raise TypeError("You need to supply a set of enzymes")
def has_enzyme(self, enz):
"""
Check whether an enzyme is involved in this reaction.
:param enz: An Enzyme object
:type enz: Enzyme
:return: Whether we have this enzyme
:rtype: bool
"""
return enz in self.enzymes
def all_enzymes(self):
"""
Get all the enzymes involved in this reaction. Returns a set of complex IDs.
:rtype: set
"""
return self.enzymes
def number_of_enzymes(self):
"""
Gets the number of enzymes involved in this reaction.
:rtype: int
"""
return len(self.enzymes)
def add_pegs(self, pegs):
"""
Add one or more pegs to this reaction. Pegs must be a set.
:param pegs: The pegs to add to the reaction
:type pegs: set
"""
if isinstance(pegs, set):
self.pegs.update(pegs)
else:
raise TypeError("pegs must be a set")
def has_peg(self, peg):
"""
Check whether a peg is involved in this reaction.
:param peg: The peg to check for
:type peg: str
:rtype: bool
"""
return peg in self.pegs
def set_deltaG(self, dg):
"""
Set the value for delta G (Gibbs free energy) for this reaction. Recall -ve deltaG means the reaction is
favorable.
:param dg: The delta G of the reaction
:type dg: float
"""
if isinstance(dg, float):
self.deltaG = dg
elif isinstance(dg, int):
self.deltaG = float(dg)
else:
raise TypeError("The delta G must be an int or a float")
def get_deltaG(self):
"""
Get the value for delta G (Gibbs free energy) for this reaction.
:rtype: float
"""
return self.deltaG
def check_input_output(self):
"""
Check whether this reaction is an input or output reaction.
This is called when we ask is_input_reaction / is_output_reaction and both inp and outp are False
"""
# do we have external compounds on the left ... then it is an input reaction
for c in self.left_compounds:
if c.location == 'e':
self.inp = True
for c in self.right_compounds:
if c.location == 'e':
self.outp = True
def toggle_input_reaction(self):
"""
Set this reaction as an input reaction. This only applies to
this reaction, so if it is true we set it false, else we set
it true
"""
if self.inp:
self.inp = False
else:
self.inp = True
def is_input_reaction(self):
"""
Is this an input reaction?
:rtype: bool
"""
if self.inp is False and self.outp is False:
self.check_input_output()
return self.inp
def toggle_output_reaction(self):
"""
Set this reaction as an output reaction. This only applies to
this reaction, so if it is true we set it false, else we set
it true
"""
if self.outp:
self.outp = False
else:
self.outp = True
def is_output_reaction(self):
"""
Is this an output reaction?
:rtype: bool
"""
if self.inp is False and self.outp is False:
self.check_input_output()
return self.outp
def reverse_reaction(self):
"""
Reverse the reaction - move the left compounds to the right,
and vice versa. We also switch the abundances and the pLR and
pRL.
We also negate the deltaG, since that should be the other way
around now.
At the moment we don't switch input/output, not sure if we
need to do that.
"""
(self.left_compounds, self.right_compounds) = (self.right_compounds, self.left_compounds)
(self.left_abundance, self.right_abundance) = (self.right_abundance, self.left_abundance)
(self.inp, self.outp) = (self.outp, self.inp)
# we only need to reverse two directions
if self.direction == '>':
self.direction = '<'
elif self.direction == '<':
self.direction = '>'
# we only need to reverse two gfdirections
if self.gfdirection == '>':
self.gfdirection = '<'
elif self.gfdirection == '<':
self.gfdirection = '>'
if self.lower_bound != None and self.upper_bound != None:
lbtemp = 0 - self.lower_bound
self.lower_bound = 0 - self.upper_bound
self.upper_bound = lbtemp
(self.pLR, self.pRL) = (self.pRL, self.pLR)
self.deltaG = -self.deltaG
def add_attribute(self, key, value):
"""
Add an attribute to this class
"""
setattr(self, key, value)
def get_attribute(self, key):
"""
Retrieve an attribute
"""
return getattr(self, key)
def reset_bounds(self):
"""
reset the bounds of this reaction. If we are using this in gapfilling, we need to reset the bounds
so we can calculate appropriately.
:return: None
"""
self.lower_bound = None
self.upper_bound = None | python |
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import paddle
from mmdet.models.utils import interpolate_as
def test_interpolate_as():
source = paddle.rand((1, 5, 4, 4))
target = paddle.rand((1, 1, 16, 16))
# Test 4D source and target
result = interpolate_as(source, target)
assert result.shape == torch.Size((1, 5, 16, 16))
# Test 3D target
result = interpolate_as(source, target.squeeze(0))
assert result.shape == torch.Size((1, 5, 16, 16))
# Test 3D source
result = interpolate_as(source.squeeze(0), target)
assert result.shape == torch.Size((5, 16, 16))
# Test type(target) == np.ndarray
target = np.random.rand(16, 16)
result = interpolate_as(source.squeeze(0), target)
assert result.shape == torch.Size((5, 16, 16))
| python |
"""AyudaEnPython: https://www.facebook.com/groups/ayudapython
"""
class Punto:
"""Representación de un punto en coordenadas polares.
:param x: coordenada x del punto.
:x type: int
:param y: coordenada y del punto.
:y type: int
"""
def __init__(self, x: int = 0, y: int = 0) -> None:
self.x = x
self.y = y
def cuadrante(self) -> str:
"""Devuelve el cuadrante en el que se encuentra el punto."""
return f"{self} se encuentra en el {self._posicion()}."
def _posicion(self) -> str:
if self.x > 0 and self.y > 0:
return "I° cuadrante"
elif self.x < 0 and self.y > 0:
return "II° cuadrante"
elif self.x < 0 and self.y < 0:
return "III° cuadrante"
elif self.x > 0 and self.y < 0:
return "IV° cuadrante"
elif self.x != 0 and self.y == 0:
return "eje X"
elif self.x == 0 and self.y != 0:
return "eje Y"
else:
return "origen"
def __repr__(self) -> str:
return f"({self.x}, {self.y})"
| python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from pdb import set_trace
import re
def check_entitys(text):
ptrn = r"(&{1})([\w-]+)([;]{0,1})"
lst = []
for m in re.finditer(ptrn, text):
s = m.group()
g2 = m.groups()[2]
t = 0 if g2 == ';' else 1
lst.append({'s': s, 't': t})
return lst
def check_entity_brackets(text):
ptrn = r"([;(])(\S+)(\)*)"
lst = []
for m in re.finditer(ptrn, text):
s = m.group()
nop = s.count('(')
noc = s.count(')')
if nop+noc == 0:
continue
s = s if s.find(';') < 0 else s[1:]
t = 0 if nop == noc else 1
e = {'s': s, 't': t}
lst.append(e)
return lst
# lista de pattern del tipo from to
def check_overflow(text, po, pc):
lst = []
pc = re.compile(pc)
po = re.compile(po)
so_last = ""
c1_ = 0
for mo in re.finditer(po, text):
so = mo.group()
o0 = mo.start()
o1 = mo.end()
js = {'so': so,
'sc': '',
's': '',
't': 0}
if o0 < c1_:
l = len(lst)-1
lst[l]['s'] = so_last
lst[l]['t'] = 1
so_last = so
mc = re.search(pc, text[o1:])
if mc is None:
js['s'] = so
js['t'] = 1
lst.append(js)
continue
c0 = mc.start()
c1 = mc.end()
c1_ = o1+c0
s = text[o0:o1+c1]
js['s'] = s
js['sc'] = mc.group()
lst.append(js)
return lst
OVER_KEY_TYPE_LIST = (
('g3', '{3%',0),
('g2', '{2%',0),
('g1', '{1%',0),
('g0', '{0%',0),
('gu', '{_' ,0),
('qu', '[_' ,1),
('g', '{' ,0),
('q', '[' ,1)
)
def fill_tag_over_lst(tag_lst):
def find_over_key_type(tag_op):
k=None
t=None
for kpt in OVER_KEY_TYPE_LIST:
if tag_op==kpt[1]:
k=kpt[0]
t=kpt[2]
break
return k,t
lst=[]
for tag in tag_lst:
key,func_type=find_over_key_type(tag[1])
if key is None:
continue
po = tag[1]
pc = tag[2]
so=po
sc=pc
if po == "[":
po = po.replace('[', r'\[[^_]')
pc = pc.replace(']', r'[^_]\]')
elif po == "[_":
po = po.replace('[', r'\[')
pc = pc.replace(']', r'\]')
elif po == "{":
po = po.replace('{', r'\{[^_]\w')
pc = pc.replace('}', r'\w[^_]\}')
name = tag[0]
lst.append([func_type,name,so,sc,po,pc])
return lst
| python |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2017-07-30 15:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0011_auto_20170718_2027'),
]
operations = [
migrations.AlterField(
model_name='course',
name='published_on',
field=models.DateField(blank=True, null=True),
),
migrations.AlterUniqueTogether(
name='coursemember',
unique_together=set([('course', 'member', 'role')]),
),
]
| python |
import json, pdb, os, numpy as np, cv2, threading, math, io
import torch
from torch.autograd import Variable
def open_image(fn):
""" Opens an image using OpenCV given the file path.
Arguments:
fn: the file path of the image
Returns:
The image in RGB format as numpy array of floats normalized to range between 0.0 - 1.0
"""
flags = cv2.IMREAD_UNCHANGED+cv2.IMREAD_ANYDEPTH+cv2.IMREAD_ANYCOLOR
if not os.path.exists(fn):
raise OSError('No such file or directory: {}'.format(fn))
elif os.path.isdir(fn):
raise OSError('Is a directory: {}'.format(fn))
else:
try:
im = cv2.imread(str(fn), flags).astype(np.float32)/255
if im is None: raise OSError(f'File not recognized by opencv: {fn}')
print(f'Image shape is {im.shape}')
return cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
except Exception as e:
raise OSError('Error handling image at: {}'.format(fn)) from e
# getting val_tfms to work without fastai import
from enum import IntEnum
class TfmType(IntEnum):
""" Type of transformation.
Parameters
IntEnum: predefined types of transformations
NO: the default, y does not get transformed when x is transformed.
PIXEL: x and y are images and should be transformed in the same way.
Example: image segmentation.
COORD: y are coordinates (i.e bounding boxes)
CLASS: y are class labels (same behaviour as PIXEL, except no normalization)
"""
NO = 1
PIXEL = 2
COORD = 3
CLASS = 4
class CropType(IntEnum):
""" Type of image cropping.
"""
RANDOM = 1
CENTER = 2
NO = 3
GOOGLENET = 4
class ChannelOrder():
'''
changes image array shape from (h, w, 3) to (3, h, w).
tfm_y decides the transformation done to the y element.
'''
def __init__(self, tfm_y=TfmType.NO): self.tfm_y=tfm_y
def __call__(self, x, y):
x = np.rollaxis(x, 2)
#if isinstance(y,np.ndarray) and (len(y.shape)==3):
if self.tfm_y==TfmType.PIXEL: y = np.rollaxis(y, 2)
elif self.tfm_y==TfmType.CLASS: y = y[...,0]
return x,y
class Transforms():
def __init__(self, sz, tfms, normalizer, denorm, crop_type=CropType.CENTER,
tfm_y=TfmType.NO, sz_y=None):
if sz_y is None: sz_y = sz
self.sz,self.denorm,self.norm,self.sz_y = sz,denorm,normalizer,sz_y
crop_tfm = crop_fn_lu[crop_type](sz, tfm_y, sz_y)
self.tfms = tfms + [crop_tfm, normalizer, ChannelOrder(tfm_y)]
def __call__(self, im, y=None): return compose(im, y, self.tfms)
def __repr__(self): return str(self.tfms)
def A(*a): return np.array(a[0]) if len(a)==1 else [np.array(o) for o in a]
class Denormalize():
""" De-normalizes an image, returning it to original format.
"""
def __init__(self, m, s):
self.m=np.array(m, dtype=np.float32)
self.s=np.array(s, dtype=np.float32)
def __call__(self, x): return x*self.s+self.m
class Normalize():
""" Normalizes an image to zero mean and unit standard deviation, given the mean m and std s of the original image """
def __init__(self, m, s, tfm_y=TfmType.NO):
self.m=np.array(m, dtype=np.float32)
self.s=np.array(s, dtype=np.float32)
self.tfm_y=tfm_y
def __call__(self, x, y=None):
x = (x-self.m)/self.s
if self.tfm_y==TfmType.PIXEL and y is not None: y = (y-self.m)/self.s
return x,y
class Transform():
""" A class that represents a transform.
All other transforms should subclass it. All subclasses should override
do_transform.
Arguments
---------
tfm_y : TfmType
type of transform
"""
def __init__(self, tfm_y=TfmType.NO):
self.tfm_y=tfm_y
self.store = threading.local()
def set_state(self): pass
def __call__(self, x, y):
self.set_state()
x,y = ((self.transform(x),y) if self.tfm_y==TfmType.NO
else self.transform(x,y) if self.tfm_y in (TfmType.PIXEL, TfmType.CLASS)
else self.transform_coord(x,y))
return x, y
def transform_coord(self, x, y): return self.transform(x),y
def transform(self, x, y=None):
x = self.do_transform(x,False)
return (x, self.do_transform(y,True)) if y is not None else x
# @abstractmethod
# def do_transform(self, x, is_y): raise NotImplementedError
class CoordTransform(Transform):
""" A coordinate transform. """
@staticmethod
def make_square(y, x):
r,c,*_ = x.shape
y1 = np.zeros((r, c))
y = y.astype(np.int)
y1[y[0]:y[2], y[1]:y[3]] = 1.
return y1
def map_y(self, y0, x):
y = CoordTransform.make_square(y0, x)
y_tr = self.do_transform(y, True)
return to_bb(y_tr, y)
def transform_coord(self, x, ys):
yp = partition(ys, 4)
y2 = [self.map_y(y,x) for y in yp]
x = self.do_transform(x, False)
return x, np.concatenate(y2)
class Scale(CoordTransform):
""" A transformation that scales the min size to sz.
Arguments:
sz: int
target size to scale minimum size.
tfm_y: TfmType
type of y transformation.
"""
def __init__(self, sz, tfm_y=TfmType.NO, sz_y=None):
super().__init__(tfm_y)
self.sz,self.sz_y = sz,sz_y
def do_transform(self, x, is_y):
if is_y: return scale_min(x, self.sz_y, cv2.INTER_NEAREST)
else : return scale_min(x, self.sz, cv2.INTER_AREA )
class NoCrop(CoordTransform):
""" A transformation that resize to a square image without cropping.
This transforms (optionally) resizes x,y at with the same parameters.
Arguments:
targ: int
target size of the crop.
tfm_y (TfmType): type of y transformation.
"""
def __init__(self, sz, tfm_y=TfmType.NO, sz_y=None):
super().__init__(tfm_y)
self.sz,self.sz_y = sz,sz_y
def do_transform(self, x, is_y):
if is_y: return no_crop(x, self.sz_y, cv2.INTER_NEAREST)
else : return no_crop(x, self.sz, cv2.INTER_AREA )
imagenet_stats = A([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
stats = imagenet_stats
tfm_norm = Normalize(*stats, TfmType.NO)
tfm_denorm = Denormalize(*stats)
def image_gen(normalizer, denorm, sz, tfms=None, max_zoom=None, pad=0, crop_type=None,
tfm_y=None, sz_y=None, pad_mode=cv2.BORDER_REFLECT):
"""
Generate a standard set of transformations
Arguments
---------
normalizer :
image normalizing function
denorm :
image denormalizing function
sz :
size, sz_y = sz if not specified.
tfms :
iterable collection of transformation functions
max_zoom : float,
maximum zoom
pad : int,
padding on top, left, right and bottom
crop_type :
crop type
tfm_y :
y axis specific transformations
sz_y :
y size, height
pad_mode :
cv2 padding style: repeat, reflect, etc.
Returns
-------
type : ``Transforms``
transformer for specified image operations.
See Also
--------
Transforms: the transformer object returned by this function
"""
if tfm_y is None: tfm_y=TfmType.NO
if tfms is None: tfms=[]
elif not isinstance(tfms, collections.Iterable): tfms=[tfms]
if sz_y is None: sz_y = sz
scale = [RandomScale(sz, max_zoom, tfm_y=tfm_y, sz_y=sz_y) if max_zoom is not None
else Scale(sz, tfm_y, sz_y=sz_y)]
if pad: scale.append(AddPadding(pad, mode=pad_mode))
if crop_type!=CropType.GOOGLENET: tfms=scale+tfms
return Transforms(sz, tfms, normalizer, denorm, crop_type,
tfm_y=tfm_y, sz_y=sz_y)
crop_fn_lu = {CropType.NO: NoCrop}
def compose(im, y, fns):
""" apply a collection of transformation functions fns to images
"""
for fn in fns:
#pdb.set_trace()
im, y =fn(im, y)
return im if y is None else (im, y)
def scale_min(im, targ, interpolation=cv2.INTER_AREA):
""" Scales the image so that the smallest axis is of size targ.
Arguments:
im (array): image
targ (int): target size
"""
r,c,*_ = im.shape
ratio = targ/min(r,c)
sz = (scale_to(c, ratio, targ), scale_to(r, ratio, targ))
return cv2.resize(im, sz, interpolation=interpolation)
def scale_to(x, ratio, targ):
'''
no clue, does not work.
'''
return max(math.floor(x*ratio), targ)
def crop(im, r, c, sz):
'''
crop image into a square of size sz,
'''
return im[r:r+sz, c:c+sz]
def no_crop(im, min_sz=None, interpolation=cv2.INTER_AREA):
""" Returns a squared resized image """
r,c,*_ = im.shape
if min_sz is None: min_sz = min(r,c)
return cv2.resize(im, (min_sz, min_sz), interpolation=interpolation)
# -------- end val_tfms stuff
def write_test_image(img_bytes, path, file):
if os.path.exists(path):
print(f'Cleaning test dir: {path}')
for root, dirs, files in os.walk(path):
for f in files:
os.unlink(os.path.join(root, f))
else:
print(f'Creating test dir: {path}')
os.makedirs(path, exist_ok=True)
f = open(file, 'wb')
f.write(img_bytes)
def preproc_img(img, sz):
val_tfm = image_gen(tfm_norm, tfm_denorm, sz, pad=0, crop_type=CropType.NO, tfm_y=None, sz_y=None)
trans_img = val_tfm(img)
print(f'Image shape: {trans_img.shape}')
return Variable(torch.FloatTensor(trans_img)).unsqueeze_(0)
def get_file_with_ext(path, ext):
if type(ext) == list:
ext = tuple(ext)
if os.path.isdir(path):
for file in os.listdir(path):
if file.endswith(ext):
return os.path.join(path, file)
return None | python |
# -*- python -*-
# This software was produced by NIST, an agency of the U.S. government,
# and by statute is not subject to copyright in the United States.
# Recipients of this software assume all responsibilities associated
# with its operation, modification and maintenance. However, to
# facilitate maintenance we ask that before distributing modifed
# versions of this software, you first contact the authors at
# [email protected].
dirname = 'elements'
if not DIM_3:
clib = 'oof2engine'
else:
clib = 'oof3dengine'
cfiles = ['quad4.C', 'quad4_8.C', 'quad8.C',
# 'quad8_4.C',
'quad9.C',
'tri3.C', 'tri3_6.C', 'tri6.C', 'tri6_3.C',
'quad4shapefunction.C', 'quad8shapefunction.C',
'quad9shapefunction.C', 'tri3shapefunction.C',
'tri6shapefunction.C',
'edge2.C','edge2shapefunction.C',
'edge3.C','edge3shapefunction.C',
'edge3sub.C','edge3super.C']#Interface branch
hfiles = ['quad4shapefunction.h', 'quad8shapefunction.h',
'quad9shapefunction.h', 'tri3shapefunction.h',
'tri6shapefunction.h',
'edge2shapefunction.h','edge3shapefunction.h']#Interface branch
swigfiles = ['quad4.swg', 'quad4_8.swg', 'quad8.swg',
# 'quad8_4.swg',
'quad9.swg',
'tri3.swg', 'tri3_6.swg', 'tri6.swg', 'tri6_3.swg']
swigpyfiles = ['quad4.spy', 'quad4_8.spy', 'quad8.spy', 'quad8_4.spy',
'quad9.swg',
'tri3.spy', 'tri3_6.spy', 'tri6.spy', 'tri6_3.spy']
if DIM_3:
cfiles.extend(['tet4.C','tet4shapefunction.C',
'tet10.C', 'tet10shapefunction.C'])
hfiles.extend(['tet4shapefunction.h',
'tet10shapefunction.h'])
swigfiles.extend(['tet4.swg', 'tet10.swg'])
swigpyfiles.extend(['tet4.spy', 'tet10.spy'])
pyfiles = ['initialize.py']
| python |
from queue import PriorityQueue as PQueue
N = int(input())
C = int(input())
V = int(input())
S = list(map(lambda x: int(x)-1, input().split()))
T = list(map(lambda x: int(x)-1, input().split()))
Y = list(map(int, input().split()))
M = list(map(int, input().split()))
E = [[] for _ in range(N)]
for f, t, cost, time in zip(S, T, Y, M):
E[t].append((f, cost, time))
INF = 10**7
dp = [[INF] * (C+1) for _ in range(N)]
for i in range(C+1):
dp[0][i] = 0
for t in range(N):
for j in range(C+1):
for f, cost, time in E[t]:
if j >= cost and dp[t][j] > dp[f][j-cost] + time:
dp[t][j] = dp[f][j-cost] + time
print(min(dp[N-1]) if min(dp[N-1]) != INF else -1)
| python |
import datetime
from django.conf import settings
from django.db import models
BLOOD_GROUP_STATUSES = (
('U', 'Urgente'),
('S', 'Stabile'),
('Z', 'Emergenza'),
('E', 'Eccedenza'),
('F', 'Fragile'),
)
class BloodGroup(models.Model):
groupid = models.CharField(max_length=3, unique=True) # AB+, B-, ...
status = models.CharField(
max_length=2,
choices=BLOOD_GROUP_STATUSES,
default='S',
) # choice between U, E ...
def __str__(self):
return self.groupid
class Log(models.Model):
datetime = models.DateTimeField(unique=True)
image = models.ImageField(
upload_to=settings.UPLOAD_METEO,
blank=True
)
twitter_done = models.BooleanField(default=False)
telegram_done = models.BooleanField(default=False)
facebook_done = models.BooleanField(default=False)
@property
def is_completed(self):
return self.twitter_done and self.telegram_done and self.facebook_done
def __str__(self):
if self.datetime:
return self.datetime.replace(microsecond=0).isoformat()
else:
return 'Bad Log entry'
| python |
from rest_framework import serializers
from rest_framework_recursive.fields import RecursiveField
from backend.blog.models import BlogCategory, Tag, Post
class BlogCategorySerializer(serializers.ModelSerializer):
"""Сериализация модели категорий"""
children = serializers.ListField(source='get_children', read_only=True,
child=RecursiveField(), )
class Meta:
model = BlogCategory
fields = ("id", "name", "children", "slug")
class SortPostCategorySerializer(serializers.ModelSerializer):
"""Сериализация категории сортировки постов"""
class Meta:
model = BlogCategory
fields = ("id", "name", "slug")
class TagSerializer(serializers.ModelSerializer):
"""Сериализация тегов"""
class Meta:
model = Tag
fields = ("id", "name")
class PostSerializer(serializers.ModelSerializer):
"""Сериализация списка статей"""
category = BlogCategorySerializer()
tag = TagSerializer(many=True)
class Meta:
model = Post
fields = ("id",
"title",
"mini_text",
"created_date",
"category",
"tag",
"viewed")
class SortPostSerializer(serializers.ModelSerializer):
"""Сериализация постов по категории"""
category = SortPostCategorySerializer()
tag = TagSerializer(many=True)
class Meta:
model = Post
fields = ("id",
"title",
"mini_text",
"created_date",
"category",
"tag",
"viewed")
class PostDetailSerializer(serializers.ModelSerializer):
"""Сериализация полной статьи"""
category = BlogCategorySerializer()
tag = TagSerializer(many=True)
class Meta:
model = Post
fields = ("id",
"author",
"title",
"text",
"image",
"created_date",
"category",
"tag",
"viewed")
| python |
#!/usr/bin/env python
# encoding: utf-8
# dit gedeelte zorgt ervoor dat stdout, stderr = subprocess.Popen werkt.
import subprocess
# tussen word = "" kun je de tekst typen die de koe moet uitspreken.
# cowsay staat voor een koe, maar als je een ander karakter wilt zul je de code moeten aanpassen.
# van 'cowsay', naar 'cowsay' '-f' 'hier komt de naam van je karakter'
word="In de hal van kasteel Elseneur."
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Stil nu! De schone Ophelia! Nimf, gedenk in uw gebeden al mijn zonden."
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Edele heer, hoe gaat het u de laatste tijd?"
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Ik dank u heel goed."
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Ik heb nog souvenirs van u, die ik al lang terug had willen geven. Hier... neemt u ze."
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Nee, nee, ik niet ik heb u nimmer iets gegeven."
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="U weet heel goed, heer, dat u 't wel gedaan hebt, en met zó zoete woorden dat hun waarde nog groter werd. Hun geur is nu vervlogen, neem ze dus terug; want voor een edele geest verbleekt de rijkste gift wanneer de gever zich arm aan liefde toont. Hier zijn ze, heer."
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Aha! ben je kuis?"
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Heer"
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Ben je mooi?"
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Wat bedoelt uwe hoogheid?"
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Dat als je kuis en mooi bent, je kuisheid geen omgang met je schoonheid zou mogen toestaan."
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Maar, heer, kan schoonheid ooit beter omgang hebben dan met kuisheid?"
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Jazeker, want de macht van de schoonheid zal de kuisheid eer der in een koppelaarster veranderen, dan dat kuisheid de schoonheid dwingen kan haar te gelijken. Dit was vroeger een paradox, maar nu wordt het door de tijd bewezen. Ik heb je eens liefgehad."
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Ja, heer, dat hebt u me doen geloven."
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Je had me niet moeten geloven, want de deugd kan niet zó geënt worden op onze oude stam, dat er geen zweem van overblijft. Ik heb je niet liefgehad."
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Dan ben ik des te meer bedrogen."
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Ga in een klooster! Waarom zou je zondaars fokken? Ik mag wel zeggen dat ik vrij deugdzaam ben, maar toch zou ik me kunnen beschuldigen van dingen waarom mijn moeder me beter niet had kunnen baren. Ik ben erg hoogmoedig, wraak zuchtig en eergierig, en ik heb meer wandaden voor 't grijpen dan gedachten om ze uit te drukken, verbeelding om ze vorm te geven of tijd om ze te begaan. Wat moeten kerels als ik ook rond kruipen tussen hemel en aarde? Wij zijn aartsschavuiten geloof niemand van ons. Maak dat je in een klooster komt! Waar is je vader?"
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Thuis, heer."
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Laat dan de deuren achter hem dichtdoen, opdat hij nergens anders voor gek kan spelen dan in zijn eigen huis. Vaarwel."
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="0 hemelse goedheid, help hem! "
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Mocht je trouwen, dan geef ik je deze vloek als bruidsschat mee, je kunt zo kuis als ijs, zo zuiver als sneeuw zijn, tóch ontkom je niet aan de laster. Ga in een klooster! Vaarwel. Of als je met alle geweld trouwen wilt, trouw dan een idioot, want mannen met hersens weten te goed wat voor monsters je van hen maakt. Naar een klooster en gauw! Vaarwel."
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Ik weet maar al te goed hoe jullie je beschildert. God heeft je een gezicht gegeven, maar jullie maakt je een ander. Je huppelt en trippelt, je geeft Gods schepselen bijnamen en laat je wulpsheid doorgaan voor argeloosheid. Ga weg, ik wil er niets meer van weten het heeft me gek gemaakt. Ik zeg je, dat er geen huwelijken meer moeten komen. De getrouwden mogen blijven leven op één na - en de ongetrouwden moeten blijven zoals ze zijn. Naar een klooster! Ga! "
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Wat een edele geest is hier verscheurd! Oog, tong en zwaard van hoveling, geleerde en krijgsman, hoop en bloem van onze staat, spiegel der zeden, toonbeeld van beschaving, door eerbetoon omringd... voorgoed verloren. En ik, rampzaligste van alle vrouwen, die honing zoog uit zijn welluidend woord, hoor nu de tonen van dat helder brein verward en schril als een ontstemde beiaard, en zie het ongeëvenaarde beeld van bloesemende jeugd, verdord door waanzin. 0, wee mij, die gezien heeft wat ik zag, zie wat ik zie!"
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
def main():
""" Hiermee opent u de script bestanden en print u de conversatie. """
#roep de python bestand op en voert het uit, met de juiste cowfile.
py = conversation.py
print conversation.py #print conversatie uit script bestand
#if __name__ == '__main__':
# main()
#def main():
# word = .communicate()
# stdout, stderr = subprocess.Popen(
# ['cowsay', word]).communicate()
#if __name__ == '__main__':
# main() | python |
"""Functions for generating interactive visualizations of 3D models of
trees."""
import os
import numpy as np
import pandas as pd
import geopandas as gpd
import seaborn as sns
import ipyvolume as ipv
from ipywidgets import FloatSlider, VBox, HBox, Accordion, Text, Layout
from forest3d.geometry import make_tree_all_params, get_elevation, Tree
from forest3d.validate_data import tree_list_checker
import warnings
warnings.filterwarnings(
"ignore", message="invalid value encountered in double_scalars")
warnings.filterwarnings(
"ignore", message="invalid value encountered in greater_equal")
warnings.filterwarnings("ignore", message="invalid value encountered in less")
warnings.filterwarnings(
"ignore", message="invalid value encountered in true_divide")
def plot_tree_with_widgets():
"""Creates and interactive plot of a tree crown with widgets to control its
shape.
Returns
--------
tree_plot : ipywidgets HBox widget
widget containing the parameter widgets and a 3D scatter plot widget.
"""
# creating all the widgets for each parameter of the tree model
species = Text(value='Douglas-fir', description='Species')
dbh = FloatSlider(value=5.0, min=0, max=50, step=1.0, description='dbh')
height = FloatSlider(
value=75,
min=0,
max=150,
step=1.0,
description='height',
orientation='vertical')
stem_x = FloatSlider(value=0, min=-10, max=10, step=1.0, description='x')
stem_y = FloatSlider(value=0, min=-10, max=10, step=1.0, description='y')
stem_z = FloatSlider(value=0, min=-10, max=10, step=1.0, description='z')
lean_direction = FloatSlider(
min=0, max=360, step=1.0, description='direction')
lean_severity = FloatSlider(
min=0, max=89, step=1.0, description='severity')
crown_ratio = FloatSlider(
value=0.65,
min=0,
max=1.0,
step=0.01,
description='crown ratio',
orientation='vertical')
crown_radius_E = FloatSlider(
value=10, min=0, max=30, step=1.0, description='east')
crown_radius_N = FloatSlider(
value=10, min=0, max=30, step=1.0, description='north')
crown_radius_W = FloatSlider(
value=10, min=0, max=30, step=1.0, description='west')
crown_radius_S = FloatSlider(
value=10, min=0, max=30, step=1.0, description='south')
crown_edge_height_E = FloatSlider(
value=0.3,
min=0,
max=1,
step=0.01,
description='east',
orientation='vertical')
crown_edge_height_N = FloatSlider(
value=0.3,
min=0,
max=1,
step=0.01,
description='north',
orientation='vertical')
crown_edge_height_W = FloatSlider(
value=0.3,
min=0,
max=1,
step=0.01,
description='west',
orientation='vertical')
crown_edge_height_S = FloatSlider(
value=0.3,
min=0,
max=1,
step=0.01,
description='south',
orientation='vertical')
shape_top_E = FloatSlider(
value=2.0, min=0.0, max=3.0, step=0.1, description='top, east')
shape_top_N = FloatSlider(
value=2.0, min=0.0, max=3.0, step=0.1, description='top, north')
shape_top_W = FloatSlider(
value=2.0, min=0.0, max=3.0, step=0.1, description='top, west')
shape_top_S = FloatSlider(
value=2.0, min=0.0, max=3.0, step=0.1, description='top, south')
shape_bot_E = FloatSlider(
value=2.0, min=0.0, max=3.0, step=0.1, description='bottom, east')
shape_bot_N = FloatSlider(
value=2.0, min=0.0, max=3.0, step=0.1, description='bottom, north')
shape_bot_W = FloatSlider(
value=2.0, min=0.0, max=3.0, step=0.1, description='bottom, west')
shape_bot_S = FloatSlider(
value=2.0, min=0.0, max=3.0, step=0.1, description='bottom, south')
# Group the parameter widgets into groups of controls
height_controls = HBox([height, crown_ratio])
edge_height_controls = HBox([
crown_edge_height_E, crown_edge_height_N, crown_edge_height_W,
crown_edge_height_S
])
location_controls = VBox([stem_x, stem_y, stem_z])
lean_controls = VBox([lean_direction, lean_severity])
radius_controls = VBox(
[crown_radius_E, crown_radius_N, crown_radius_W, crown_radius_S])
shape_controls = VBox([
shape_top_E, shape_top_N, shape_top_W, shape_top_S, shape_bot_E,
shape_bot_N, shape_bot_W, shape_bot_S
])
# create and expandable user interface
controls = Accordion([
location_controls, height_controls, lean_controls, radius_controls,
edge_height_controls, shape_controls
])
controls.set_title(0, 'Stem Location')
controls.set_title(1, 'Tree Height')
controls.set_title(2, 'Tree Lean')
controls.set_title(3, 'Crown Radius')
controls.set_title(4, 'Crown Edge Heights')
controls.set_title(5, 'Crown Shapes')
# create the 3D scatter widget
tree_scatter = ipv.quickscatter(
x=np.random.rand(100, ) * 100 - 50,
y=np.random.rand(100, ) * 100 - 50,
z=np.random.rand(100, ) * 170 - 10,
marker='sphere',
color='green',
size=1)
# define some visualization parameters of the scatter plot
tree_scatter.children[0].xlim = [-50, 50]
tree_scatter.children[0].ylim = [-50, 50]
tree_scatter.children[0].zlim = [-10, 160]
tree_scatter.children[0].camera.up = [0, 1, 0]
tree_scatter.children[0].camera.position = (-0.03944879903076046,
-3.097863509106879,
0.27417047137158385)
def on_value_change(*args):
"""Updates values of scatter plot when parameter widgets are updated.
"""
new_x, new_y, new_z = make_tree_all_params(
species.value, dbh.value, height.value, stem_x.value, stem_y.value,
stem_z.value, lean_direction.value, lean_severity.value,
crown_ratio.value, crown_radius_E.value, crown_radius_N.value,
crown_radius_W.value, crown_radius_S.value,
crown_edge_height_E.value, crown_edge_height_N.value,
crown_edge_height_W.value, crown_edge_height_S.value,
shape_top_E.value, shape_top_N.value, shape_top_W.value,
shape_top_S.value, shape_bot_E.value, shape_bot_N.value,
shape_bot_W.value, shape_bot_S.value)
tree_scatter.children[0].scatters[0].x = new_x
tree_scatter.children[0].scatters[0].y = new_y
tree_scatter.children[0].scatters[0].z = new_z
# set up all widgets to trigger update to scatter plot upon changed value
species.observe(on_value_change, 'value')
dbh.observe(on_value_change, 'value')
height.observe(on_value_change, 'value')
stem_x.observe(on_value_change, 'value')
stem_y.observe(on_value_change, 'value')
stem_z.observe(on_value_change, 'value')
lean_direction.observe(on_value_change, 'value')
lean_severity.observe(on_value_change, 'value')
crown_ratio.observe(on_value_change, 'value')
crown_radius_E.observe(on_value_change, 'value')
crown_radius_N.observe(on_value_change, 'value')
crown_radius_W.observe(on_value_change, 'value')
crown_radius_S.observe(on_value_change, 'value')
crown_edge_height_E.observe(on_value_change, 'value')
crown_edge_height_N.observe(on_value_change, 'value')
crown_edge_height_W.observe(on_value_change, 'value')
crown_edge_height_S.observe(on_value_change, 'value')
shape_top_E.observe(on_value_change, 'value')
shape_top_N.observe(on_value_change, 'value')
shape_top_W.observe(on_value_change, 'value')
shape_top_S.observe(on_value_change, 'value')
shape_bot_E.observe(on_value_change, 'value')
shape_bot_N.observe(on_value_change, 'value')
shape_bot_W.observe(on_value_change, 'value')
shape_bot_S.observe(on_value_change, 'value')
return HBox([controls, tree_scatter], layout=Layout(width='100%'))
def plot_tree_list(tree_list, dem=None, sample=None):
"""Plots an interactive 3D view of a tree list.
Parameters
-----------
tree_list : path to shapefile
shapefile containing trees with measured attributes
dem : path to elevation raster
raster readable by rasterio, will be used to calculate elevation on
a grid and produce
"""
if not tree_list_checker(tree_list):
raise TypeError('Tree list is not formatted appropriately.')
if type(tree_list) == pd.core.frame.DataFrame:
trees = tree_list
elif type(tree_list) == gpd.geodataframe.GeoDataFrame:
trees = tree_list
elif not os.path.isfile(tree_list):
raise FileNotFoundError('The file does not exist.')
else: # check file type and open with pandas or geopandas
file_type = os.path.basename(tree_list).split('.')[1]
if file_type == "csv":
trees = pd.read_csv(tree_list)
elif file_type == "shp":
trees = gpd.read_file(tree_list)
else:
raise TypeError('Unknown file type')
spp = pd.unique(trees.species)
palette = sns.color_palette('colorblind', len(spp))
# get elevation raster to display as surface underneath trees
if dem is not None:
# calculate z locations of the tree stems based on the dem
trees['stem_z'] = get_elevation(dem, trees['stem_x'], trees['stem_y'])
# calculate a dem to display as a surface in the plot
xs = np.linspace(trees.stem_x.min(), trees.stem_x.max(), 100)
ys = np.linspace(trees.stem_y.min(), trees.stem_y.max(), 100)
xx, yy = np.meshgrid(xs, ys)
elevation = get_elevation(dem, xx.flatten(), yy.flatten())
elevation_surface = elevation.reshape(xs.shape[0], ys.shape[0])
else:
if 'stem_z' not in trees.columns:
trees['stem_z'] = 0
else:
pass
if sample is not None:
trees = trees.sample(n=sample)
else:
pass
ipv.figure(width=800)
for idx, tree in trees.iterrows():
# calculate the tree's crown coordinates
x, y, z = Tree(
species=tree.species,
dbh=tree.dbh,
top_height=tree.top_height,
stem_x=tree.stem_x,
stem_y=tree.stem_y,
stem_z=tree.stem_z,
crown_ratio=tree.cr_ratio,
crown_radii=np.full(shape=4, fill_value=tree.cr_radius),
crown_shapes=np.full(shape=(2, 4), fill_value=2.0)).get_crown()
# find out the spp index to give it a unique color
spp_idx = np.where(spp == tree.species)[0][0]
# plot the tree crown
ipv.plot_surface(
x.reshape((50, 32)),
y.reshape((50, 32)),
z.reshape((50, 32)),
color=palette[spp_idx])
if dem is not None:
ipv.plot_surface(xx, yy, elevation_surface, color='brown')
else:
pass
ipv.xlim(trees.stem_x.min() - 20, trees.stem_x.max() + 20)
ipv.ylim(trees.stem_y.min() - 20, trees.stem_y.max() + 20)
ipv.zlim(trees.stem_z.min(),
trees.stem_z.min() + trees.top_height.max() + 20)
ipv.style.use('minimal')
ipv.squarelim()
ipv.show()
| python |
from js9 import j
def init_actions_(service, args):
dependencies = {
'list_disks': ['init'],
'get_consumption': ['install']
}
return dependencies
def init(job):
service = job.service
if 'g8client' not in service.producers:
raise j.exceptions.AYSNotFound("No producer g8client found. Cannot continue init of %s" % service)
users = service.model.data.accountusers
for user in users:
uservdc = service.aysrepo.serviceGet('uservdc', user.name)
service.consume(uservdc)
service.saveAll()
def authorization_user(account, service, g8client):
authorized_users = account.authorized_users
userslist = service.producers.get('uservdc', [])
if not userslist:
return
users = []
user_exists = True
for u in userslist:
if u.model.data.provider != '':
users.append(u.model.dbobj.name + "@" + u.model.data.provider)
else:
users.append(u.model.dbobj.name)
# Authorize users
for user in users:
if user not in authorized_users:
user_exists = False
for uvdc in service.model.data.accountusers:
if uvdc.name == user.split('@')[0]:
if user_exists:
for acl in account.model['acl']:
if acl['userGroupId'] == user and acl['right'] != uvdc.accesstype:
account.update_access(username=user, right=uvdc.accesstype)
else:
account.authorize_user(username=user, right=uvdc.accesstype)
# Unauthorize users not in the schema
for user in authorized_users:
if user not in users:
if user == g8client.model.data.login:
raise j.exceptions.Input("Can't remove current authenticating user: %s. To remove use another user for g8client service." % user)
account.unauthorize_user(username=user)
def get_user_accessright(username, service):
for u in service.model.data.accountusers:
if u.name == username:
return u.accesstype
def install(job):
service = job.service
if 'g8client' not in service.producers:
raise j.exceptions.AYSNotFound("No producer g8client found. Cannot continue install of %s" % service)
g8client = service.producers["g8client"][0]
config_instance = "{}_{}".format(g8client.aysrepo.name, g8client.model.data.instance)
cl = j.clients.openvcloud.get(instance=config_instance, create=False, die=True, sshkey_path="/root/.ssh/ays_repos_key")
# Set limits
# if account does not exist, it will create it
account = cl.account_get(name=service.model.dbobj.name,
create=True,
maxMemoryCapacity=service.model.data.maxMemoryCapacity,
maxVDiskCapacity=service.model.data.maxDiskCapacity,
maxCPUCapacity=service.model.data.maxCPUCapacity,
maxNumPublicIP=service.model.data.maxNumPublicIP,
)
service.model.data.accountID = account.model['id']
service.model.save()
authorization_user(account, service, g8client)
# Unauthorize users not in the schema
# THIS FUNCTIONALITY IS DISABLED UNTIL OVC DOESN'T REQUIRE USERS TO BE ADMIN
# update capacity in case account already existed
account.model['maxMemoryCapacity'] = service.model.data.maxMemoryCapacity
account.model['maxVDiskCapacity'] = service.model.data.maxDiskCapacity
account.model['maxNumPublicIP'] = service.model.data.maxNumPublicIP
account.model['maxCPUCapacity'] = service.model.data.maxCPUCapacity
account.save()
def processChange(job):
service = job.service
if 'g8client' not in service.producers:
raise j.exceptions.AYSNotFound("No producer g8client found. Cannot continue processChange of %s" % service)
g8client = service.producers["g8client"][0]
config_instance = "{}_{}".format(g8client.aysrepo.name, g8client.model.data.instance)
cl = j.clients.openvcloud.get(instance=config_instance, create=False, die=True, sshkey_path="/root/.ssh/ays_repos_key")
account = cl.account_get(name=service.model.dbobj.name, create=False)
args = job.model.args
category = args.pop('changeCategory')
if category == "dataschema" and service.model.actionsState['install'] == 'ok':
for key, value in args.items():
if key == 'accountusers':
# value is a list of (uservdc)
if not isinstance(value, list):
raise j.exceptions.Input(message="%s should be a list" % key)
if 'uservdc' in service.producers:
for s in service.producers['uservdc']:
if not any(v['name'] == s.name for v in value):
service.model.producerRemove(s)
for v in value:
accessRight = v.get('accesstype', '')
if v['name'] == s.name and accessRight != get_user_accessright(s.name, service) and accessRight:
name = s.name + '@' + s.model.data.provider if s.model.data.provider else s.name
account.update_access(name, v['accesstype'])
for v in value:
userservice = service.aysrepo.serviceGet('uservdc', v['name'])
if userservice not in service.producers.get('uservdc', []):
service.consume(userservice)
setattr(service.model.data, key, value)
authorization_user(account, service, g8client)
# update capacity
account.model['maxMemoryCapacity'] = service.model.data.maxMemoryCapacity
account.model['maxVDiskCapacity'] = service.model.data.maxDiskCapacity
account.model['maxNumPublicIP'] = service.model.data.maxNumPublicIP
account.model['maxCPUCapacity'] = service.model.data.maxCPUCapacity
account.save()
service.save()
def uninstall(job):
service = job.service
if 'g8client' not in service.producers:
raise j.exceptions.AYSNotFound("No producer g8client found. Cannot continue uninstall of %s" % service)
g8client = service.producers["g8client"][0]
config_instance = "{}_{}".format(g8client.aysrepo.name, g8client.model.data.instance)
cl = j.clients.openvcloud.get(instance=config_instance, create=False, die=True, sshkey_path="/root/.ssh/ays_repos_key")
acc = cl.account_get(service.model.dbobj.name)
acc.delete()
def list_disks(job):
service = job.service
g8client = service.producers["g8client"][0]
config_instance = "{}_{}".format(g8client.aysrepo.name, g8client.model.data.instance)
cl = j.clients.openvcloud.get(instance=config_instance, create=False, die=True, sshkey_path="/root/.ssh/ays_repos_key")
account = cl.account_get(name=service.model.dbobj.name)
service.model.disks = account.disks
service.save()
def get_consumption(job):
import datetime
service = job.service
g8client = service.producers["g8client"][0]
config_instance = "{}_{}".format(g8client.aysrepo.name, g8client.model.data.instance)
cl = j.clients.openvcloud.get(instance=config_instance, create=False, die=True, sshkey_path="/root/.ssh/ays_repos_key")
account = cl.account_get(name=service.model.dbobj.name)
if not service.model.data.consumptionFrom and not service.model.data.consumptionTo:
service.model.data.consumptionFrom = account.model['creationTime']
end = datetime.datetime.fromtimestamp(service.model.data.consumptionFrom) + datetime.timedelta(hours=1)
service.model.data.consumptionTo = end.timestamp()
service.model.data.consumptionData = account.get_consumption(service.model.data.consumptionFrom, service.model.data.consumptionTo)
| python |
import math
n = input()
r = list(map(int,n))
lastNum = r[-1]
l = r[:-1]
newArray = list(map(int,l))
#print(newArray)
print(lastNum)
print(newArray)
| python |
from __future__ import absolute_import
import logging
import time
from django.contrib.auth.models import User
from django.http import HttpResponse, StreamingHttpResponse
from django.shortcuts import get_object_or_404, render, render_to_response
import elasticapm
class MyException(Exception):
pass
class IgnoredException(Exception):
skip_elasticapm = True
def no_error(request):
resp = HttpResponse('')
resp['My-Header'] = 'foo'
return resp
def fake_login(request):
return HttpResponse('')
def django_exc(request):
return get_object_or_404(MyException, pk=1)
def raise_exc(request):
raise MyException(request.GET.get('message', 'view exception'))
def raise_ioerror(request):
raise IOError(request.GET.get('message', 'view exception'))
def decorated_raise_exc(request):
return raise_exc(request)
def template_exc(request):
return render_to_response('error.html')
def ignored_exception(request):
raise IgnoredException()
def logging_request_exc(request):
logger = logging.getLogger(__name__)
try:
raise Exception(request.GET.get('message', 'view exception'))
except Exception as e:
logger.error(e, exc_info=True, extra={'request': request})
return HttpResponse('')
def logging_view(request):
logger = logging.getLogger('logmiddleware')
logger.info("Just loggin'")
return HttpResponse('')
def render_template_view(request):
def something_expensive():
with elasticapm.capture_span("something_expensive", "code"):
return [User(username='Ron'), User(username='Beni')]
return render(request, "list_users.html",
{'users': something_expensive})
def render_jinja2_template(request):
return render(request, "jinja2_template.html")
def render_user_view(request):
def something_expensive():
with elasticapm.capture_span("something_expensive", "code"):
for i in range(100):
users = list(User.objects.all())
return users
return render(request, "list_users.html",
{'users': something_expensive})
def streaming_view(request):
def my_generator():
for i in range(5):
with elasticapm.capture_span('iter', 'code'):
time.sleep(0.01)
yield str(i)
resp = StreamingHttpResponse(my_generator())
return resp
def override_transaction_name_view(request):
elasticapm.set_transaction_name('foo')
elasticapm.set_transaction_result('okydoky')
return HttpResponse()
| python |
"""empty message
Revision ID: 878f67285c72
Revises: 122dd6a5c035
Create Date: 2019-05-29 12:57:36.544059
"""
# revision identifiers, used by Alembic.
revision = '878f67285c72'
down_revision = '122dd6a5c035'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
# ###commandsautogeneratedbyAlembic-pleaseadjust!###
op.create_table('registration',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('offer_id', sa.Integer(), nullable=False),
sa.Column('registration_form_id', sa.Integer(), nullable=False),
sa.Column('confirmed', sa.Boolean(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('confirmation_email_sent_at', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['offer_id'], ['offer.id']),
sa.ForeignKeyConstraint(['registration_form_id'], ['registration_form.id']),
sa.PrimaryKeyConstraint('id')
)
op.create_table('registration_answer',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('registration_id', sa.Integer(), nullable=False),
sa.Column('registration_question_id', sa.Integer(), nullable=False),
sa.Column('value', sa.String(), nullable=False),
sa.ForeignKeyConstraint(['registration_id'], ['registration.id']),
sa.ForeignKeyConstraint(['registration_question_id'], ['registration_question.id']),
sa.PrimaryKeyConstraint('id')
)
# ###endAlembiccommands###
def downgrade():
# ### commands auto generated by Alembic-please adjust!###
op.drop_table('registration_answer')
op.drop_table('registration')
# ###endAlembiccommands###
| python |
import numpy as np
import trimesh
from pdb import set_trace as bp
def write_off(file_path, verts, faces=None):
"""Export point cloud into .off file.
Positional arguments:
file_path: output path
verts: Nx3 array (float)
Kwargs:
faces: Mx3 array (int)
"""
off = open(file_path, 'w')
assert isinstance(verts, np.ndarray), "Invalid data type for vertices: %s" % type(verts)
assert len(verts.shape) == 2 and verts.shape[1] == 3, "Invalid array shape for vertices: %s" % str(verts.shape)
verts_count = verts.shape[0]
if faces is not None:
assert isinstance(faces, np.ndarray), "Invalid data type for faces: %s" % type(faces)
assert len(faces.shape) == 2 and faces.shape[1] == 3, "Invalid array shape for faces: %s" % str(faces.shape)
faces_count = faces.shape[0]
# write header
off.write('OFF\n')
if faces is not None:
off.write('%d %d 0\n' % (verts_count, faces_count))
else:
off.write('%d 0 0\n' % (verts_count))
# write vertices
np.savetxt(off, verts, fmt='%.6f')
# write faces
if faces is not None:
augmented_faces = np.hstack((np.ones((faces.shape[0], 1), dtype=np.int)*3, faces))
np.savetxt(off, augmented_faces, fmt='%d')
off.close()
## base function
NORM = np.linalg.norm
def lap_smooth(v,f,adj):
smoothed = v.copy()
for i in range(v.shape[0]):
neibour = adj[i]
base_point = v[i]
if 1:
laplacian = np.vstack((v[neibour]))
smoothed[i] = np.average(laplacian,0)
else:
laplacian = np.zeros_like((base_point))
edge_cost = 1/ NORM(v[neibour] - v[i],axis=1)
laplacian += np.sum(v[neibour] * edge_cost.reshape(-1,1),axis=0)
# laplacian += base_point
total_weight = np.sum(edge_cost)
if total_weight > 0:
smoothed[i] = laplacian/total_weight
# else:
return smoothed
def smooth2(v,f,adj,iteration):
for i in range(iteration):
v = lap_smooth(v,f,adj)
return v
def get_smoothed_mesh(v,f,iteration=5):
adj = get_adj(v,f)
smooth_verts = smooth2(v,f,adj,iteration)
tri_mesh = trimesh.Trimesh(vertices=smooth_verts,faces=f,process=False)
return tri_mesh
def get_adj(v,f):
adj = []
for i,vt in enumerate(v):
neibour = set(f[np.where(f==i)[0]].flatten())
# pdb.set_trace()
# print(neibour)
# print(i)
neibour.remove(i)
neibour = list(neibour)
adj.append(neibour)
return adj
def get_tagent_space_naive(mesh):
normals = mesh.vertex_normals
tangents = np.cross(normals,normals+[0,1,0])
tangents = tangents/np.linalg.norm(tangents,axis=1).reshape(-1,1)
bitangents = np.cross(normals,tangents)
bitangents = bitangents/np.linalg.norm(bitangents,axis=1).reshape(-1,1)
return tangents,normals,bitangents
def rotation_matrix_x(angle):
rad = angle * np.pi / 180
return np.array([[1,0,0],[0, np.cos(rad), -np.sin(rad)], [0, np.sin(rad), np.cos(rad)]])
def rotation_matrix_y(angle):
rad = angle * np.pi / 180
return np.array([[np.cos(rad), 0, np.sin(rad)],[0, 1, 0], [-np.sin(rad), 0, np.cos(rad)]])
def rotation_matrix_z(angle):
rad = angle * np.pi / 180
return np.array([[np.cos(rad), -np.sin(rad), 0], [np.sin(rad), np.cos(rad), 0], [0, 0, 1]])
def rotate_plane(vec1, vec2 ):
"""
giving two vector, return the rotation matrix
"""
#vec1 = vec1 / np.linalg.norm(vec1) #unit vector
norm = np.linalg.norm(vec1) * np.linalg.norm(vec2)
cos_theta = np.dot(vec1,vec2)/norm
sin_theta = np.linalg.norm(np.cross(vec1,vec2))/norm
if sin_theta == 0:
return np.eye(3)
k = np.cross(vec1,vec2) /(norm*sin_theta)
K = np.array([[0,-k[2],k[1]],
[k[2],0,-k[0]],
[-k[1],k[0],0]])
R = np.eye(3) + sin_theta*K +(1-cos_theta)*np.dot(K,K)
return R
def get_index_list(full,part):
idlist = []
for pt in part:
arr = NORM(full-pt,axis=1) < 0.001
id = np.where(arr)
idlist.append(id[0][0])
return idlist
def get_Rs(tangents,normals,bitangents):
return np.dstack(( tangents,normals,bitangents))
def get_delta_mushed_target(source_v,target_v,f):
smooth_time = 25
smoothed_source_mesh = get_smoothed_mesh(source_v,f,smooth_time)
st,sn,sb = get_tagent_space_naive(smoothed_source_mesh)
Rs = get_Rs(st,sn,sb)
vd = np.einsum('ijk,ik->ij' ,np.linalg.pinv(Rs),source_v-smoothed_source_mesh.vertices)
smoothed_target_mesh = get_smoothed_mesh(target_v,f,smooth_time)
tn = smoothed_target_mesh.vertex_normals
tt = np.zeros_like(tn)
tb = np.zeros_like(tn)
# key part: get rotated tangent space
for i,vec1 in enumerate(tn):
Rn = rotate_plane(sn[i],tn[i])
tt[i],tb[i] = Rn @ st[i], Rn @ sb[i]
Cs = get_Rs(tt,tn,tb)
deformed = np.einsum('ijk,ik->ij' ,Cs,vd) + smoothed_target_mesh.vertices
return deformed
def demo():
# load source mesh
source_mesh = trimesh.load_mesh('tube_r.off',process=False)
v,f = source_mesh.vertices,source_mesh.faces
# rotate part of tube
rotation_angle_y = 45
center = np.average(v,0)
select = np.where(v[:,0]>center[0]+1)
R = rotation_matrix_z(rotation_angle_y)
target = v.copy()
target[:,0] -= 1
target[select] = (R @ target[select].T).T
target[:,0] += 1
# get delta mushed target mesh
deformed = get_delta_mushed_target(v,target,f)
write_off('deformed.off',deformed,f)
if __name__ == '__main__':
demo() | python |
from PyQt5 import QtWidgets, QtCore, QtGui
import os
#from gui.export_widget import Ui_Form
from editable_list_widget import list_widget
from gui import build
from wizard.vars import defaults
from wizard.tools import log
from wizard.prefs.main import prefs
import options_widget
import dialog_comment
from wizard.tools.tx_from_files import tx_from_files
from wizard.prefs import project as project_prefs
logger = log.pipe_log(__name__)
prefs = prefs()
class Main(list_widget):
def __init__(self, asset, sanity, count):
super(Main, self).__init__()
self.sanity = sanity
self.count = count
self.asset = asset
self.init_ui()
self.connect_functions()
def init_ui(self):
self.export_widget_folder_pushButton = self.add_button(defaults._folder_icon_)
self.export_widget_comment_pushButton = self.add_button(defaults._comment_icon_)
self.export_widget_tx_pushButton = self.add_button(defaults._tx_icon_)
icon = defaults._export_list_neutral_icon_
export_prefs = prefs.asset(self.asset).export
self.export_widget_version_label = self.add_label(self.asset.export_version, "export_widget_version_label", 40)
self.export_widget_user_label = self.add_label(export_prefs.version_user, "export_widget_user_label", 120)
self.export_widget_date_label = self.add_label(export_prefs.version_date, "export_widget_date_label", 180)
self.export_widget_comment_label = self.add_label(export_prefs.version_comment, "export_widget_comment_label", 230, QtCore.Qt.AlignLeft)
try:
self.ui.export_widget_software_label.setText(f'From {export_prefs.version_software}')
except:
pass
if self.asset.stage != defaults._texturing_:
self.export_widget_tx_pushButton.setVisible(0)
self.update_sanity(self.sanity)
def update_sanity(self, sanity):
if sanity:
list_dir = os.listdir(prefs.asset(self.asset).export.version_folder)
if list_dir == [] or not list_dir:
icon = defaults._missing_file_export_list_icon_
else:
if prefs.asset(self.asset).software.extension in list_dir[0]:
icon = defaults._export_list_icon_
else:
icon = defaults._missing_file_export_list_icon_
else:
icon = defaults._export_list_neutral_icon_
self.set_icon(icon)
def open_folder(self):
file = prefs.asset(self.asset).export.version_folder
os.startfile(file)
def change_comment(self):
self.dialog_comment = dialog_comment.Main(self.asset)
if build.launch_dialog_comment(self.dialog_comment):
self.export_widget_comment_label.setText(self.dialog_comment.comment)
def make_tx(self):
folder = prefs.asset(self.asset).export.version_folder
file_names_list = os.listdir(folder)
files_list = []
extension = (project_prefs.get_custom_pub_ext_dic())[self.asset.stage][self.asset.software]
for file in file_names_list:
if file.endswith(extension):
files_list.append(os.path.join(folder, file))
tx_from_files(files_list)
def connect_functions(self):
self.export_widget_folder_pushButton.clicked.connect(self.open_folder)
self.export_widget_comment_pushButton.clicked.connect(self.change_comment)
self.export_widget_tx_pushButton.clicked.connect(self.make_tx)
def closeEvent(self, event):
event.ignore()
self.hide()
| python |
"""
PYTHON NUMBER SEQUENCE
"""
__author__ = 'Sol Amour - [email protected]'
__twitter__ = '@solamour'
__version__ = '1.0.0'
# SYNTAX: [ value * step for value in range( amount ) ]
# Step = This is the value we will multiply our range by
# Amount = How many total values we want
# NOTES:
# All parameters can be either integers or doubles
# All parameters can be positive or negative
# range( amount ) is the same as range( 0, amount )
# To achieve the same output as '0..10' in DesignScript, you must use
# 'range( 10 + 1 )' as the Stop value is not included in the range function
# The input ports
step = IN[0] # A number such as 20 (int) or 20.5 (float) demarcating the step
amount = IN[1] # A number such as 10 demarcating the amount
# The output port - In this case we use a list comprehension
OUT = [ value * step for value in range( amount ) ]
| python |
# based on https://github.com/pypa/sampleproject
# MIT License
# Always prefer setuptools over distutils
from setuptools import setup, find_namespace_packages
from os import path
from io import open
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='asreview-semantic-clustering',
description='Semantic clustering tool for the ASReview project',
version='0.1',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/asreview/semantic-clusters',
author='Utrecht University',
author_email='[email protected]',
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Pick your license as you wish
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
keywords='asreview extension semantic clustering clusters visualization',
packages=find_namespace_packages(include=['asreviewcontrib.*']),
install_requires=[
"numpy",
"matplotlib",
"asreview",
"dash",
"plotly",
"sklearn",
"transformers",
"numpy",
"seaborn",
"torch",
],
extras_require={
},
entry_points={
"asreview.entry_points": [
"semantic_clustering = asreviewcontrib.semantic_clustering.main:SemClusEntryPoint", # noqa: E501
]
},
project_urls={
'Bug Reports':
"https://github.com/asreview/semantic-clusters/issues",
'Source':
"https://github.com/asreview/semantic-clusters",
},
)
| python |
from __future__ import unicode_literals
import datetime
from django.core.urlresolvers import reverse
from tracpro.polls.models import Answer, PollRun, Response
from tracpro.test.cases import TracProDataTest
from ..models import BaselineTerm
class TestBaselineTermCRUDL(TracProDataTest):
def setUp(self):
"""
There will be a set of results for 3 contacts, in 2 regions
self.contact1 and self.contact2 are in self.region1
self.contact4 is in self.region2
"""
super(TestBaselineTermCRUDL, self).setUp()
self.org = self.unicef
self.baselineterm = BaselineTerm.objects.create(
name='Baseline Term SetUp',
org=self.org,
start_date=datetime.date(2015, 5, 1),
end_date=datetime.date(2015, 5, 1),
baseline_poll=self.poll1,
baseline_question=self.poll1_question1,
follow_up_poll=self.poll1,
follow_up_question=self.poll1_question2
)
self.data = {
'name': 'Test Baseline Term',
'org': self.org.pk,
'start_date': 'May 1, 2015',
'end_date': 'May 1, 2015',
'baseline_poll': self.poll1.pk,
'baseline_question': self.poll1_question1.pk,
'follow_up_poll': self.poll1.pk,
'follow_up_question': self.poll1_question2.pk,
}
def test_list(self):
url_name = "baseline.baselineterm_list"
self.login(self.admin)
response = self.url_get('unicef', reverse(url_name))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['object_list']), 1)
def test_create(self):
url = reverse('baseline.baselineterm_create')
# Log in as an org administrator
self.login(self.admin)
response = self.url_get('unicef', url)
self.assertEqual(response.status_code, 200)
# Submit with no fields entered
response = self.url_post('unicef', url, {})
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'form', 'name', 'This field is required.')
# Submit with form data
response = self.url_post('unicef', url, self.data)
self.assertEqual(response.status_code, 302)
# Check new BaselineTerm created successfully
baselineterm = BaselineTerm.objects.all().last()
self.assertEqual(baselineterm.name, "Test Baseline Term")
def test_delete(self):
# Log in as an org administrator
self.login(self.admin)
# Delete baselineterm from setUp()
response = self.url_post(
'unicef', reverse('baseline.baselineterm_delete', args=[self.baselineterm.pk]))
# This should delete the single BaselineTerm and redirect
self.assertEqual(response.status_code, 302)
self.assertRedirects(
response, 'http://unicef.testserver/indicators/', fetch_redirect_response=False)
self.assertEqual(BaselineTerm.objects.all().count(), 0)
def test_update(self):
# Log in as an org administrator
self.login(self.admin)
url = reverse('baseline.baselineterm_update', args=[self.baselineterm.pk])
response = self.url_get('unicef', url)
self.assertEqual(response.status_code, 200)
self.data["name"] = "Baseline Term Updated"
response = self.url_post('unicef', url, self.data)
self.assertEqual(response.status_code, 302)
# Check updated BaselineTerm
baselineterm_updated = BaselineTerm.objects.get(pk=self.baselineterm.pk)
self.assertRedirects(
response,
'http://unicef.testserver/indicators/read/%d/' % self.baselineterm.pk,
fetch_redirect_response=False)
self.assertEqual(baselineterm_updated.name, "Baseline Term Updated")
def test_read(self):
# Log in as an org administrator
self.login(self.admin)
# Try to read the one BaselineTerm
response = self.url_get(
'unicef', reverse('baseline.baselineterm_read', args=[self.baselineterm.pk]))
self.assertEqual(response.status_code, 200)
# Try to view BaselineTerm that does not exist
fake_baselineterm_pk = self.baselineterm.pk + 100
response = self.url_get(
'unicef', reverse('baseline.baselineterm_read', args=[fake_baselineterm_pk]))
self.assertEqual(response.status_code, 404)
def test_data_spoof(self):
# Turn on show_spoof_data for this org
self.org.show_spoof_data = True
self.org.save()
url = reverse('baseline.baselineterm_data_spoof')
# Log in as an org administrator
self.login(self.admin)
response = self.url_get('unicef', url)
self.assertEqual(response.status_code, 200)
# Submit with no fields entered
response = self.url_post('unicef', url, {})
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'form', 'contacts', 'This field is required.')
spoof_data = {
'contacts': [self.contact1.pk],
'start_date': "May 1, 2015",
'end_date': "May 2, 2015",
'baseline_question': self.poll1_question1.pk,
'follow_up_question': self.poll1_question2.pk,
'baseline_minimum': 30,
'baseline_maximum': 40,
'follow_up_minimum': 10,
'follow_up_maximum': 20
}
# Submit with valid form data
response = self.url_post('unicef', url, spoof_data)
self.assertEqual(response.status_code, 302)
self.assertRedirects(
response,
'http://unicef.testserver/indicators/',
fetch_redirect_response=False)
# Check new spoofed data created successfully
# 3 PollRuns, Responses, and Answers
# for 1 Baseline Date and 2 Follow Up Dates
self.assertEqual(PollRun.objects.all().count(), 3)
self.assertEqual(Response.objects.all().count(), 3)
self.assertEqual(Answer.objects.all().count(), 3)
def test_data_spoof_hide(self):
# Turn off show_spoof_data for this org
self.org.show_spoof_data = False
self.org.save()
url = reverse('baseline.baselineterm_data_spoof')
# Log in as an org administrator
self.login(self.admin)
response = self.url_get('unicef', url)
# We should not be able to spoof data
self.assertEqual(response.status_code, 302)
self.assertRedirects(
response,
'http://unicef.testserver/indicators/',
fetch_redirect_response=False)
| python |
# Copyright (c) 2020 Spanish National Research Council
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
iso_map = {
"C": "Coruña, A",
"VI": "Araba/Álava",
"AB": "Albacete",
"A": "Alicante/Alacant",
"AL": "Almería",
"O": "Asturias",
"AV": "Ávila",
"BA": "Badajoz",
"PM": "Balears, Illes",
"B": "Barcelona",
"BI": "Bizkaia",
"BU": "Burgos",
"CC": "Cáceres",
"CA": "Cádiz",
"S": "Cantabria",
"CS": "Castellón/Castelló",
"CE": "Ceuta",
"CR": "Ciudad Real",
"CO": "Córdoba",
"CU": "Cuenca",
"SS": "Gipuzkoa",
"GI": "Girona",
"GR": "Granada",
"GU": "Guadalajara",
"H": "Huelva",
"HU": "Huesca",
"J": "Jaén",
"LO": "Rioja, La",
"GC": "Palmas, Las",
"LE": "León",
"L": "Lleida",
"LU": "Lugo",
"M": "Madrid",
"MA": "Málaga",
"ML": "Melilla",
"MU": "Murcia",
"NA": "Navarra",
"NC": "Navarra", # this is region's iso code, which appears by error in raw data of provinces
"OR": "Ourense",
"P": "Palencia",
"PO": "Pontevedra",
"SA": "Salamanca",
"TF": "Santa Cruz de Tenerife",
"SG": "Segovia",
"SE": "Sevilla",
"SO": "Soria",
"T": "Tarragona",
"TE": "Teruel",
"TO": "Toledo",
"V": "Valencia/València",
"VA": "Valladolid",
"ZA": "Zamora",
"Z": "Zaragoza",
}
def add_province_info(df_orig, df_prov):
df_orig.insert(1, "province id", 0)
df_orig.insert(2, "province", 0)
df_orig.insert(3, "region id", 0)
df_orig.insert(4, "region", 0)
# Homogenize codes, names, etc. using INE data
df_orig["province"] = df_orig["province iso"].apply(iso_map.get)
for p in df_orig["province"].unique():
# print("-", p)
df_orig.loc[
df_orig["province"] == p,
("province id", "region", "region id")
] = (
df_prov.loc[df_prov["provincia"] == p][
["id provincia", "autonomia", "id auto"]
].values[0]
)
del df_orig['province iso']
| python |
# flake8: noqa
# Copyright (c) 2015 - 2017 Holger Nahrstaedt
# Copyright (c) 2016-2017 The pyedflib Developers
# <https://github.com/holgern/pyedflib>
# See LICENSE for license details.
from __future__ import division, print_function, absolute_import
from ._extensions._pyedflib import *
from .edfwriter import *
from .edfreader import *
from . import highlevel
from . import data
from pyedflib.version import version as __version__
from numpy.testing import Tester
__all__ = [s for s in dir() if not s.startswith('_')]
try:
# In Python 2.x the name of the tempvar leaks out of the list
# comprehension. Delete it to not make it show up in the main namespace.
del s
except NameError:
pass
test = Tester().test
| python |
"""
Holds functions responsible for objects validation across FAT-Forensics.
"""
# Author: Kacper Sokol <[email protected]>
# License: new BSD
import warnings
from typing import Union
import numpy as np
import fatf.utils.tools as fut
__all__ = ['is_numerical_dtype',
'is_textual_dtype',
'is_base_dtype',
'is_flat_dtype',
'are_similar_dtypes',
'are_similar_dtype_arrays',
'is_numerical_array',
'is_textual_array',
'is_base_array',
'is_1d_array',
'is_2d_array',
'is_structured_row',
'is_1d_like',
'is_structured_array'] # yapf: disable
_NUMPY_VERSION = [int(i) for i in np.version.version.split('.')]
_NUMPY_1_13 = fut.at_least_verion([1, 13], _NUMPY_VERSION)
# Unsigned byte, Boolean, (signed) byte -- Boolean, unsigned integer,
# (signed) integer, floating-point and complex-floating point.
_NUMPY_NUMERICAL_KINDS = set('B?buifc')
# Unicode string
_NUMPY_TEXTUAL_KINDS = set('U')
# Zero-terminated bytes
_NUMPY_TEXTUAL_KINDS_UNSUPPORTED = set('Sa')
# O, M, m and V are considered complex objects
_NUMPY_BASE_KINDS = set('?buifcBSaU')
def is_numerical_dtype(dtype: np.dtype) -> bool:
"""
Determines whether a numpy dtype object is of numerical type.
Checks whether the ``dtype`` is of one of the following (numerical) types:
unsigned byte, boolean, (signed) byte -- boolean, unsigned integer,
(signed) integer, floating-point or complex-floating point.
Parameters
----------
dtype : numpy.dtype
The dtype to be checked.
Raises
------
TypeError
The input is not a numpy's dtype object.
ValueError
The dtype is structured -- this function only accepts plane dtypes.
Returns
-------
is_numerical : boolean
True if the dtype is of a numerical type, False otherwise.
"""
if not isinstance(dtype, np.dtype):
raise TypeError('The input should be a numpy dtype object.')
# If the dtype is complex
if dtype.names is not None:
raise ValueError('The numpy dtype object is structured. '
'Only base dtype are allowed.')
is_numerical = dtype.kind in _NUMPY_NUMERICAL_KINDS
return is_numerical
def is_textual_dtype(dtype: np.dtype) -> bool:
"""
Determines whether a numpy dtype object is of textual type.
Checks whether the ``dtype`` is a unicode string type (textual). The
zero-terminated bytes type is unsupported and not considered a textual
type.
Parameters
----------
dtype : numpy.dtype
The dtype to be checked.
Raises
------
TypeError
The input is not a numpy's dtype object.
ValueError
The dtype is structured -- this function only accepts plane dtypes.
Returns
-------
is_textual : boolean
True if the dtype is of a textual type, False otherwise.
"""
if not isinstance(dtype, np.dtype):
raise TypeError('The input should be a numpy dtype object.')
# If the dtype is complex
if dtype.names is not None:
raise ValueError('The numpy dtype object is structured. '
'Only base dtype are allowed.')
if dtype.kind in _NUMPY_TEXTUAL_KINDS_UNSUPPORTED:
warnings.warn(
'Zero-terminated bytes type is not supported and is not '
'considered to be a textual type. Please use any other textual '
'type.',
category=UserWarning)
is_textual = False
else:
is_textual = dtype.kind in _NUMPY_TEXTUAL_KINDS
return is_textual
def is_base_dtype(dtype: np.dtype) -> bool:
"""
Determines whether a numpy dtype object is one of base types.
Checks whether the ``dtype`` is of any type but ``numpy.void`` --
this usually happens when a numpy array holds objects instead of base
type entities.
Parameters
----------
dtype : numpy.dtype
The dtype to be checked.
Raises
------
TypeError
The input is not a numpy's dtype object.
ValueError
The dtype is structured -- this function only accepts plane dtypes.
Returns
-------
is_basic : boolean
True if the dtype is of a base type, False otherwise.
"""
if not isinstance(dtype, np.dtype):
raise TypeError('The input should be a numpy dtype object.')
# If the dtype is complex
if dtype.names is not None:
raise ValueError('The numpy dtype object is structured. '
'Only base dtype are allowed.')
is_basic = dtype.kind in _NUMPY_BASE_KINDS
return is_basic
def is_flat_dtype(dtype: np.dtype) -> bool:
"""
Determines whether a numpy dtype object is flat.
Checks whether the ``dtype`` just encodes one element or a shape. A dtype
can characterise an array of other base types, which can then be embedded
as an element of another array.
Parameters
----------
dtype : numpy.dtype
The dtype to be checked.
Raises
------
TypeError
The input is not a numpy's dtype object.
ValueError
The dtype is structured -- this function only accepts plane dtypes.
Returns
-------
is_flat : boolean
True if the dtype is flat, False otherwise.
"""
if not isinstance(dtype, np.dtype):
raise TypeError('The input should be a numpy dtype object.')
# If the dtype is complex
if dtype.names is not None:
raise ValueError('The numpy dtype object is structured. '
'Only base dtype are allowed.')
# pylint: disable=len-as-condition
if _NUMPY_1_13: # pragma: no cover
is_flat = not bool(dtype.ndim)
else: # pragma: no cover
is_flat = len(dtype.shape) == 0
return is_flat
def are_similar_dtypes(dtype_a: np.dtype,
dtype_b: np.dtype,
strict_comparison: bool = False) -> bool:
"""
Checks whether two numpy dtypes are similar.
If ``strict_comparison`` is set to True the both dtypes have to be exactly
the same. Otherwise, if both are either numerical or textual dtypes, they
are considered similar.
Parameters
----------
dtype_a : numpy.dtype
The first dtype to be compared.
dtype_b : numpy.dtype
The second dtype to be compared.
strict_comparison : boolean, optional (default=False)
When set to True the dtypes have to match exactly. Otherwise, if both
are either numerical or textual dtypes, they are considered similar.
Raises
------
TypeError
Either of the inputs is not a numpy's dtype object.
ValueError
Either of the input dtypes is structured -- this function only accepts
plane dtypes.
Returns
-------
are_similar : boolean
True if both dtypes are similar, False otherwise.
"""
if not isinstance(dtype_a, np.dtype):
raise TypeError('dtype_a should be a numpy dtype object.')
if not isinstance(dtype_b, np.dtype):
raise TypeError('dtype_b should be a numpy dtype object.')
if dtype_a.names is not None:
raise ValueError('The dtype_a is a structured numpy dtype object. '
'Only base dtype are allowed.')
if dtype_b.names is not None:
raise ValueError('The dtype_b is a structured numpy dtype object. '
'Only base dtype are allowed.')
are_similar = False
if strict_comparison:
are_similar = dtype_a == dtype_b
else:
if ((is_numerical_dtype(dtype_a) and is_numerical_dtype(dtype_b))
or (is_textual_dtype(dtype_a) and is_textual_dtype(dtype_b))):
are_similar = True
else:
are_similar = dtype_a == dtype_b
return are_similar
def are_similar_dtype_arrays(array_a: np.ndarray,
array_b: np.ndarray,
strict_comparison: bool = False) -> bool:
"""
Determines whether two numpy array-like object have a similar data type.
If ``strict_comparison`` is set to True the dtypes of both arrays have to
be exactly the same. Otherwise, if both their dtypes are either numerical
or textual dtypes, they are considered similar.
If one of the arrays is a structured array and the other one is a classic
numpy array the function returns False.
Parameters
----------
array_a : numpy.ndarray
The first array to be checked.
array_b : numpy.ndarray
The second array to be checked.
strict_comparison : boolean, optional (default=False)
When set to True the dtypes have to match exactly. Otherwise, if both
are either numerical or textual dtypes, they are considered similar.
Raises
------
TypeError
Either of the inputs is not a numpy array-like object.
Returns
-------
are_similar : boolean
True if both arrays have a similar dtype, False otherwise.
"""
if not isinstance(array_a, np.ndarray):
raise TypeError('array_a should be a numpy array-like object.')
if not isinstance(array_b, np.ndarray):
raise TypeError('array_b should be a numpy array-like object.')
is_a_structured = is_structured_array(array_a)
is_b_structured = is_structured_array(array_b)
if is_a_structured and is_b_structured:
are_similar = True
if len(array_a.dtype) != len(array_b.dtype):
are_similar = False
# Check names and types.
if are_similar:
for i in range(len(array_a.dtype)):
are_similar = array_a.dtype.names[i] == array_b.dtype.names[i]
if not are_similar:
break
are_similar = are_similar_dtypes(
array_a.dtype[i], array_b.dtype[i], strict_comparison)
if not are_similar:
break
elif not is_a_structured and not is_b_structured:
are_similar = are_similar_dtypes(array_a.dtype, array_b.dtype,
strict_comparison)
else:
are_similar = False
return are_similar
def is_numerical_array(array: np.ndarray) -> bool:
"""
Determines whether a numpy array-like object has a numerical data type.
Checks whether the ``array`` is of one of the following (numerical) types:
boolean, (signed) byte -- boolean, unsigned integer, (signed) integer,
floating-point or complex-floating point.
Parameters
----------
array : numpy.ndarray
The array to be checked.
Raises
------
TypeError
The input array is not a numpy array-like object.
Returns
-------
is_numerical : boolean
True if the array has a numerical data type, False otherwise.
"""
if not isinstance(array, np.ndarray):
raise TypeError('The input should be a numpy array-like object.')
if is_structured_array(array):
is_numerical = True
for i in range(len(array.dtype)):
if not is_numerical_dtype(array.dtype[i]):
is_numerical = False
break
else:
is_numerical = is_numerical_dtype(array.dtype)
return is_numerical
def is_textual_array(array: np.ndarray) -> bool:
"""
Determines whether a numpy array-like object has a textual data type.
Checks whether the ``array`` is a unicode string type (textual). The
zero-terminated bytes type is unsupported and not considered a textual
type.
Parameters
----------
array : numpy.ndarray
The array to be checked.
Raises
------
TypeError
The input array is not a numpy array-like object.
Returns
-------
is_textual : boolean
True if the array has a textual data type, False otherwise.
"""
if not isinstance(array, np.ndarray):
raise TypeError('The input should be a numpy array-like object.')
if is_structured_array(array):
is_textual = True
for i in range(len(array.dtype)):
if not is_textual_dtype(array.dtype[i]):
is_textual = False
break
else:
is_textual = is_textual_dtype(array.dtype)
return is_textual
def is_base_array(array: np.ndarray) -> bool:
"""
Determines whether a numpy array-like object holds base data types.
Checks whether the ``array`` is of any type but ``numpy.void`` --
this usually happens when a numpy array holds objects instead of base
type entities.
Parameters
----------
array : numpy.ndarray
The array to be checked.
Raises
------
TypeError
The input array is not a numpy array-like object.
Returns
-------
is_basic : boolean
True if the array is of a base data type, False otherwise.
"""
if not isinstance(array, np.ndarray):
raise TypeError('The input should be a numpy array-like object.')
if is_structured_array(array):
is_basic = True
for i in range(len(array.dtype)):
if not is_base_dtype(array.dtype[i]):
is_basic = False
break
else:
is_basic = is_base_dtype(array.dtype)
return is_basic
def is_1d_array(array: np.ndarray) -> bool:
"""
Determines whether a numpy array-like object is 1-dimensional.
Parameters
----------
array : numpy.ndarray
The array to be checked.
Raises
------
TypeError
The input array is not a numpy array-like object.
Warns
-----
UserWarning
The input array is 1-dimensional but its components are 1D structured.
Returns
-------
is_1d : boolean
True if the array is 1-dimensional, False otherwise.
"""
if not isinstance(array, np.ndarray):
raise TypeError('The input should be a numpy array-like.')
if is_structured_array(array):
is_1d = False
if len(array.dtype) == 1 and len(array.shape) == 1:
message = ('Structured (pseudo) 1-dimensional arrays are not '
'acceptable. A 1-dimensional structured numpy array '
'can be expressed as a classic numpy array with a '
'desired type.')
warnings.warn(message, category=UserWarning)
else:
is_1d = len(array.shape) == 1
return is_1d
def is_2d_array(array: np.ndarray) -> bool:
"""
Determines whether a numpy array-like object has 2 dimensions.
Parameters
----------
array : numpy.ndarray
The array to be checked.
Raises
------
TypeError
The input array is not a numpy array-like object.
Warns
-----
UserWarning
The input array is 2-dimensional but its components are 1D structured.
Returns
-------
is_2d : boolean
True if the array is 2-dimensional, False otherwise.
"""
if not isinstance(array, np.ndarray):
raise TypeError('The input should be a numpy array-like.')
if is_structured_array(array):
# pylint: disable=len-as-condition
if len(array.shape) == 2 and len(array.dtype) == 1:
is_2d = False
message = ('2-dimensional arrays with 1D structured elements are '
'not acceptable. Such a numpy array can be expressed '
'as a classic 2D numpy array with a desired type.')
warnings.warn(message, category=UserWarning)
elif len(array.shape) == 1 and len(array.dtype) > 0:
is_2d = True
for name in array.dtype.names:
if not is_flat_dtype(array.dtype[name]):
# This is a complex (multi-dimensional) embedded dtype
is_2d = False
break
else:
is_2d = False
else:
is_2d = len(array.shape) == 2
return is_2d
def is_structured_row(structured_row: np.void) -> bool:
"""
Determines whether the input is a structured numpy array's row object.
Parameters
----------
structured_row : numpy.void
The object to be checked.
Raises
------
TypeError
The input is not a structured numpy array's row object.
Returns
-------
is_structured_row : boolean
True if the input is array is a structured numpy array's row object,
False otherwise.
"""
if not isinstance(structured_row, np.void):
raise TypeError('The input should be a row of a structured numpy '
'array (numpy.void type).')
return len(structured_row.dtype) != 0
def is_1d_like(oned_like_object: Union[np.ndarray, np.void]) -> bool:
"""
Checks if the input is either a 1D numpy array or a structured numpy row.
Parameters
----------
oned_like_object : Union[numpy.ndarray, numpy.void]
The object to be checked.
Raises
------
TypeError
The input is neither a numpy ndarray -- array-like object -- nor a
numpy void -- a row of a structured numpy array.
Returns
-------
is_1d_like_array : boolean
True if the input is either a 1-dimensional numpy array or a row of a
structured numpy array, False otherwise.
"""
is_1d_like_array = False
if isinstance(oned_like_object, np.void):
is_1d_like_array = is_structured_row(oned_like_object)
elif isinstance(oned_like_object, np.ndarray):
is_1d_like_array = is_1d_array(oned_like_object)
else:
raise TypeError('The input should either be a numpy array-like object '
'(numpy.ndarray) or a row of a structured numpy array '
'(numpy.void).')
return is_1d_like_array
def is_structured_array(array: np.ndarray) -> bool:
"""
Determines whether a numpy array-like object is a structured array.
Parameters
----------
array : numpy.ndarray
The array to be checked.
Raises
------
TypeError
The input array is not a numpy array-like object.
Returns
-------
is_structured : boolean
True if the array is a structured array, False otherwise.
"""
if not isinstance(array, np.ndarray):
raise TypeError('The input should be a numpy array-like.')
return len(array.dtype) != 0
| python |
'''Validação de URL com POO
Pontos de Obsevação em uma URL: caracteres padrões → "?", "&", "https://", "http://", "www." '''
import re
class ExtratorURL:
def __init__(self, url):
self.url = self.clear_url(url)
self.url_validation()
def clear_url(self, url):
if type(url) == str:
return url.strip()
else:
return ''
def url_validation(self):
if not self.url: # Para verificar se a url esta vazia ou não,
raise ValueError('A URL está vazia')
padraoURL = re.compile('(http(s)?://)(www.)?bytebank.com(.br)?(/cambio)?')
match = padraoURL.match(self.url.lower().strip())
if not match:
raise ValueError('URL não é VÁLIDA')
def get_url_base(self):
interrogacaoLocal = self.url.find('?')
urlBase = self.url[:interrogacaoLocal]
return urlBase
def get_url_parameter(self):
interrogacaoLocal = self.url.find('?')
urlParameter = self.url[interrogacaoLocal + 1:]
return urlParameter
def get_parameter_value(self, parameterName):
localParameter = self.get_url_parameter().find(parameterName)
parameterIndex = localParameter + len(parameterName) + 1 # Localizador do parâmetro
divParameter = self.get_url_parameter().find('&', parameterIndex)
if divParameter == -1:
return self.get_url_parameter()[parameterIndex:]
else:
return self.get_url_parameter()[parameterIndex:divParameter]
def __len__(self):
return len(self.url)
def __str__(self):
print()
return f'A URL é: {self.url}\nBase: {self.get_url_base()}\nParâmetros: {self.get_url_parameter()}\n' \
f'Tamnho URL: {len(self.url)} chars\n'
def __eq__(self, other):
return self.url == other.url
extratorURL = ExtratorURL(input('Copie ou digite a URL: ').lower().strip())
print(extratorURL)
parameterName = 'quantidade'
print(f'O parâmetro "{parameterName.upper()}" é igual à \033[1;33;40m{extratorURL.get_parameter_value(parameterName)}\033[m')
| python |
#!/usr/bin/env python3
project = "stories"
copyright = "2018, Artem Malyshev"
author = "Artem Malyshev"
version = "0.9"
release = "0.9"
templates_path = ["templates"]
source_suffix = ".rst"
master_doc = "index"
language = None
exclude_patterns = ["_build"]
pygments_style = "sphinx"
html_theme = "alabaster"
html_static_path = ["static"]
html_sidebars = {
"**": [
"sidebarlogo.html",
"stats.html",
"globaltoc.html",
"relations.html",
"updates.html",
"links.html",
"searchbox.html",
"image_popup.html",
"gitter_sidecar.html",
]
}
html_theme_options = {
"show_powered_by": False,
"show_related": True,
"show_relbars": True,
"description": "Business transaction DSL. It provides a simple way to define a complex business transaction that includes processing by many different objects.", # noqa: E501
"github_user": "dry-python",
"github_repo": "stories",
"github_type": "star",
"github_count": True,
"github_banner": True,
}
| python |
from transformers import AutoModelWithLMHead, AutoTokenizer
def run_gpt2(gpt2_input):
tokenizer = AutoTokenizer.from_pretrained('gpt2')
model = AutoModelWithLMHead.from_pretrained('gpt2')
sequence = gpt2_input
input = tokenizer.encode(sequence, return_tensors='pt')
generated = model.generate(input, max_length=250, do_sample=True)
resulting_string = tokenizer.decode(generated.tolist()[0])
return resulting_string.replace(sequence,'')
| python |
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A batched replay buffer of nests of Tensors which can be sampled uniformly.
- Each add assumes tensors have batch_size as first dimension, and will store
each element of the batch in an offset segment, so that each batch dimension has
its own contiguous memory. Within batch segments, behaves as a circular buffer.
The get_next function returns 'ids' in addition to the data. This is not really
needed for the batched replay buffer, but is returned to be consistent with
the API for a priority replay buffer, which needs the ids to update priorities.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import gin
import numpy as np
import tensorflow as tf
from tf_agents.replay_buffers import replay_buffer
from tf_agents.replay_buffers import table
from tf_agents.specs import tensor_spec
from tf_agents.utils import common
BufferInfo = collections.namedtuple('BufferInfo',
['ids', 'probabilities'])
@gin.configurable
class TFUniformReplayBuffer(replay_buffer.ReplayBuffer):
"""A TFUniformReplayBuffer with batched adds and uniform sampling."""
def __init__(self,
data_spec,
batch_size,
max_length=1000,
scope='TFUniformReplayBuffer',
device='cpu:*',
table_fn=table.Table,
dataset_drop_remainder=False,
dataset_window_shift=None,
stateful_dataset=False):
"""Creates a TFUniformReplayBuffer.
The TFUniformReplayBuffer stores episodes in `B == batch_size` blocks of
size `L == max_length`, with total frame capacity
`C == L * B`. Storage looks like:
```
block1 ep1 frame1
frame2
...
ep2 frame1
frame2
...
<L frames total>
block2 ep1 frame1
frame2
...
ep2 frame1
frame2
...
<L frames total>
...
blockB ep1 frame1
frame2
...
ep2 frame1
frame2
...
<L frames total>
```
Multiple episodes may be stored within a given block, up to `max_length`
frames total. In practice, new episodes will overwrite old ones as the
block rolls over its `max_length`.
Args:
data_spec: A TensorSpec or a list/tuple/nest of TensorSpecs describing a
single item that can be stored in this buffer.
batch_size: Batch dimension of tensors when adding to buffer.
max_length: The maximum number of items that can be stored in a single
batch segment of the buffer.
scope: Scope prefix for variables and ops created by this class.
device: A TensorFlow device to place the Variables and ops.
table_fn: Function to create tables `table_fn(data_spec, capacity)` that
can read/write nested tensors.
dataset_drop_remainder: If `True`, then when calling
`as_dataset` with arguments `single_deterministic_pass=True` and
`sample_batch_size is not None`, the final batch will be dropped if it
does not contain exactly `sample_batch_size` items. This is helpful for
static shape inference as the resulting tensors will always have
leading dimension `sample_batch_size` instead of `None`.
dataset_window_shift: Window shift used when calling
`as_dataset` with arguments `single_deterministic_pass=True` and
`num_steps is not None`. This determines how the resulting frames are
windowed. If `None`, then there is no overlap created between frames
and each frame is seen exactly once. For example, if `max_length=5`,
`num_steps=2`, `sample_batch_size=None`, and
`dataset_window_shift=None`, then the datasets returned will have
frames `{[0, 1], [2, 3], [4]}`.
If `num_steps is not None`, then windows are created
with a window overlap of `dataset_window_shift` and you will see each
frame up to `num_steps` times. For example, if `max_length=5`,
`num_steps=2`, `sample_batch_size=None`, and `dataset_window_shift=1`,
then the datasets returned will have windows of shifted repeated frames:
`{[0, 1], [1, 2], [2, 3], [3, 4], [4, 5]}`.
For more details, see the documentation of `tf.data.Dataset.window`,
specifically for the `shift` argument.
The default behavior is to not overlap frames
(`dataset_window_shift=None`) but users often want to see all
combinations of frame sequences, in which case `dataset_window_shift=1`
is the appropriate value.
stateful_dataset: whether the dataset contains stateful ops or not.
"""
self._batch_size = batch_size
self._max_length = max_length
capacity = self._batch_size * self._max_length
super(TFUniformReplayBuffer, self).__init__(
data_spec, capacity, stateful_dataset)
self._id_spec = tensor_spec.TensorSpec([], dtype=tf.int64, name='id')
self._capacity_value = np.int64(self._capacity)
self._batch_offsets = (
tf.range(self._batch_size, dtype=tf.int64) * self._max_length)
self._scope = scope
self._device = device
self._table_fn = table_fn
self._dataset_drop_remainder = dataset_drop_remainder
self._dataset_window_shift = dataset_window_shift
with tf.device(self._device), tf.compat.v1.variable_scope(self._scope):
self._capacity = tf.constant(capacity, dtype=tf.int64)
self._data_table = table_fn(self._data_spec, self._capacity_value)
self._id_table = table_fn(self._id_spec, self._capacity_value)
self._last_id = common.create_variable('last_id', -1)
self._last_id_cs = tf.CriticalSection(name='last_id')
def variables(self):
return (self._data_table.variables() +
self._id_table.variables() +
[self._last_id])
@property
def device(self):
return self._device
@property
def table_fn(self):
return self._table_fn
@property
def scope(self):
return self._scope
# Methods defined in ReplayBuffer base class
def _num_frames(self):
num_items_single_batch_segment = self._get_last_id() + 1
total_frames = num_items_single_batch_segment * self._batch_size
return tf.minimum(total_frames, self._capacity)
def _add_batch(self, items):
"""Adds a batch of items to the replay buffer.
Args:
items: A tensor or list/tuple/nest of tensors representing a batch of
items to be added to the replay buffer. Each element of `items` must match
the data_spec of this class. Should be shape [batch_size, data_spec, ...]
Returns:
An op that adds `items` to the replay buffer.
Raises:
ValueError: If called more than once.
"""
tf.nest.assert_same_structure(items, self._data_spec)
with tf.device(self._device), tf.name_scope(self._scope):
id_ = self._increment_last_id()
write_rows = self._get_rows_for_id(id_)
write_id_op = self._id_table.write(write_rows, id_)
write_data_op = self._data_table.write(write_rows, items)
return tf.group(write_id_op, write_data_op)
def _get_next(self,
sample_batch_size=None,
num_steps=None,
time_stacked=True):
"""Returns an item or batch of items sampled uniformly from the buffer.
Sample transitions uniformly from replay buffer. When sub-episodes are
desired, specify num_steps, although note that for the returned items to
truly be sub-episodes also requires that experience collection be
single-threaded.
Args:
sample_batch_size: (Optional.) An optional batch_size to specify the
number of items to return. See get_next() documentation.
num_steps: (Optional.) Optional way to specify that sub-episodes are
desired. See get_next() documentation.
time_stacked: Bool, when true and num_steps > 1 get_next on the buffer
would return the items stack on the time dimension. The outputs would be
[B, T, ..] if sample_batch_size is given or [T, ..] otherwise.
Returns:
A 2 tuple, containing:
- An item, sequence of items, or batch thereof sampled uniformly
from the buffer.
- BufferInfo NamedTuple, containing:
- The items' ids.
- The sampling probability of each item.
Raises:
ValueError: if num_steps is bigger than the capacity.
"""
with tf.device(self._device), tf.name_scope(self._scope):
with tf.name_scope('get_next'):
min_val, max_val = _valid_range_ids(
self._get_last_id(), self._max_length, num_steps)
rows_shape = () if sample_batch_size is None else (sample_batch_size,)
assert_nonempty = tf.compat.v1.assert_greater(
max_val,
min_val,
message='TFUniformReplayBuffer is empty. Make sure to add items '
'before sampling the buffer.')
with tf.control_dependencies([assert_nonempty]):
num_ids = max_val - min_val
probability = tf.cond(
pred=tf.equal(num_ids, 0),
true_fn=lambda: 0.,
false_fn=lambda: 1. / tf.cast(num_ids * self._batch_size, # pylint: disable=g-long-lambda
tf.float32))
ids = tf.random.uniform(
rows_shape, minval=min_val, maxval=max_val, dtype=tf.int64)
# Move each id sample to a random batch.
batch_offsets = tf.random.uniform(
rows_shape, minval=0, maxval=self._batch_size, dtype=tf.int64)
batch_offsets *= self._max_length
ids += batch_offsets
if num_steps is None:
rows_to_get = tf.math.mod(ids, self._capacity)
data = self._data_table.read(rows_to_get)
data_ids = self._id_table.read(rows_to_get)
else:
if time_stacked:
step_range = tf.range(num_steps, dtype=tf.int64)
if sample_batch_size:
step_range = tf.reshape(step_range, [1, num_steps])
step_range = tf.tile(step_range, [sample_batch_size, 1])
ids = tf.tile(tf.expand_dims(ids, -1), [1, num_steps])
else:
step_range = tf.reshape(step_range, [num_steps])
rows_to_get = tf.math.mod(step_range + ids, self._capacity)
data = self._data_table.read(rows_to_get)
data_ids = self._id_table.read(rows_to_get)
else:
data = []
data_ids = []
for step in range(num_steps):
steps_to_get = tf.math.mod(ids + step, self._capacity)
items = self._data_table.read(steps_to_get)
data.append(items)
data_ids.append(self._id_table.read(steps_to_get))
data = tuple(data)
data_ids = tuple(data_ids)
probabilities = tf.fill(rows_shape, probability)
buffer_info = BufferInfo(ids=data_ids,
probabilities=probabilities)
return data, buffer_info
@gin.configurable(
'tf_agents.tf_uniform_replay_buffer.TFUniformReplayBuffer.as_dataset')
def as_dataset(self,
sample_batch_size=None,
num_steps=None,
num_parallel_calls=None,
single_deterministic_pass=False):
return super(TFUniformReplayBuffer, self).as_dataset(
sample_batch_size, num_steps, num_parallel_calls,
single_deterministic_pass=single_deterministic_pass)
def _as_dataset(self,
sample_batch_size=None,
num_steps=None,
num_parallel_calls=None):
"""Creates a dataset that returns entries from the buffer in shuffled order.
Args:
sample_batch_size: (Optional.) An optional batch_size to specify the
number of items to return. See as_dataset() documentation.
num_steps: (Optional.) Optional way to specify that sub-episodes are
desired. See as_dataset() documentation.
num_parallel_calls: (Optional.) Number elements to process in parallel.
See as_dataset() documentation.
Returns:
A dataset of type tf.data.Dataset, elements of which are 2-tuples of:
- An item or sequence of items or batch thereof
- Auxiliary info for the items (i.e. ids, probs).
"""
def get_next(_):
return self.get_next(sample_batch_size, num_steps, time_stacked=True)
dataset = tf.data.experimental.Counter().map(
get_next, num_parallel_calls=num_parallel_calls)
return dataset
def _single_deterministic_pass_dataset(self,
sample_batch_size=None,
num_steps=None,
num_parallel_calls=None):
"""Creates a dataset that returns entries from the buffer in fixed order.
Args:
sample_batch_size: (Optional.) An optional batch_size to specify the
number of items to return. See as_dataset() documentation.
num_steps: (Optional.) Optional way to specify that sub-episodes are
desired. See as_dataset() documentation.
num_parallel_calls: (Optional.) Number elements to process in parallel.
See as_dataset() documentation.
Returns:
A dataset of type tf.data.Dataset, elements of which are 2-tuples of:
- An item or sequence of items or batch thereof
- Auxiliary info for the items (i.e. ids, probs).
Raises:
ValueError: If `dataset_drop_remainder` is set, and
`sample_batch_size > self.batch_size`. In this case all data will
be dropped.
"""
static_size = tf.get_static_value(sample_batch_size)
static_num_steps = tf.get_static_value(num_steps)
static_self_batch_size = tf.get_static_value(self._batch_size)
static_self_max_length = tf.get_static_value(self._max_length)
if (self._dataset_drop_remainder
and static_size is not None
and static_self_batch_size is not None
and static_size > static_self_batch_size):
raise ValueError(
'sample_batch_size ({}) > self.batch_size ({}) and '
'dataset_drop_remainder is True. In '
'this case, ALL data will be dropped by the deterministic dataset.'
.format(static_size, static_self_batch_size))
if (self._dataset_drop_remainder
and static_num_steps is not None
and static_self_max_length is not None
and static_num_steps > static_self_max_length):
raise ValueError(
'num_steps_size ({}) > self.max_length ({}) and '
'dataset_drop_remainder is True. In '
'this case, ALL data will be dropped by the deterministic dataset.'
.format(static_num_steps, static_self_max_length))
def get_row_ids(_):
"""Passed to Dataset.range(self._batch_size).flat_map(.), gets row ids."""
with tf.device(self._device), tf.name_scope(self._scope):
with tf.name_scope('single_deterministic_pass_dataset'):
# Here we pass num_steps=None because _valid_range_ids uses
# num_steps to determine a hard stop when sampling num_steps starting
# from the returned indices. But in our case, we want all the indices
# and we'll use TF dataset's window() mechanism to get
# num_steps-length blocks. The window mechanism handles this stuff
# for us.
min_frame_offset, max_frame_offset = _valid_range_ids(
self._get_last_id(), self._max_length, num_steps=None)
# With auto-deps the top-level return of assert_less is not touched,
# even though the operation is executed. So we add a mark_used call.
tf.compat.v1.assert_less(
min_frame_offset,
max_frame_offset,
message='TFUniformReplayBuffer is empty. Make sure to add items '
'before asking the buffer for data.').mark_used()
min_max_frame_range = tf.range(min_frame_offset, max_frame_offset)
drop_remainder = self._dataset_drop_remainder
window_shift = self._dataset_window_shift
def group_windows(ds_):
return ds_.batch(num_steps, drop_remainder=drop_remainder)
if sample_batch_size is None:
def row_ids(b):
# Create a vector of shape [num_frames] and slice it along each
# frame.
ids = tf.data.Dataset.from_tensor_slices(
b * self._max_length + min_max_frame_range)
if num_steps is not None:
ids = (ids.window(num_steps, shift=window_shift)
.flat_map(group_windows))
return ids
return tf.data.Dataset.range(self._batch_size).flat_map(row_ids)
else:
def batched_row_ids(batch):
# Create a matrix of indices shaped [num_frames, batch_size]
# and slice it along each frame row to get groups of batches
# for frame 0, frame 1, ...
return tf.data.Dataset.from_tensor_slices(
(min_max_frame_range[:, tf.newaxis]
+ batch * self._max_length))
indices_ds = (
tf.data.Dataset.range(self._batch_size)
.batch(sample_batch_size, drop_remainder=drop_remainder)
.flat_map(batched_row_ids))
if num_steps is not None:
# We have sequences of num_frames rows shaped [sample_batch_size].
# Window and group these to rows of shape
# [num_steps, sample_batch_size], then
# transpose them to get index tensors of shape
# [sample_batch_size, num_steps].
indices_ds = (indices_ds.window(num_steps, shift=window_shift)
.flat_map(group_windows)
.map(tf.transpose))
return indices_ds
# Get our indices as a dataset; each time we reinitialize the iterator we
# update our min/max id bounds from the state of the replay buffer.
ds = tf.data.Dataset.range(1).flat_map(get_row_ids)
def get_data(id_):
with tf.device(self._device), tf.name_scope(self._scope):
with tf.name_scope('single_deterministic_pass_dataset'):
data = self._data_table.read(id_ % self._capacity)
buffer_info = BufferInfo(ids=id_, probabilities=())
return (data, buffer_info)
# Deterministic even though num_parallel_calls > 1. Operations are
# run in parallel but then the results are returned in original stream
# order.
ds = ds.map(get_data, num_parallel_calls=num_parallel_calls)
return ds
def _gather_all(self):
"""Returns all the items in buffer, shape [batch_size, timestep, ...].
Returns:
All the items currently in the buffer.
"""
with tf.device(self._device), tf.name_scope(self._scope):
with tf.name_scope('gather_all'):
# Make ids, repeated over batch_size. Shape [batch_size, num_ids, ...].
min_val, max_val = _valid_range_ids(
self._get_last_id(), self._max_length)
ids = tf.range(min_val, max_val)
ids = tf.stack([ids] * self._batch_size)
rows = tf.math.mod(ids, self._max_length)
# Make batch_offsets, shape [batch_size, 1], then add to rows.
batch_offsets = tf.expand_dims(
tf.range(self._batch_size, dtype=tf.int64) * self._max_length,
1)
rows += batch_offsets
# Expected shape is [batch_size, max_length, ...].
data = self._data_table.read(rows)
return data
def _clear(self, clear_all_variables=False):
"""Return op that resets the contents of replay buffer.
Args:
clear_all_variables: boolean indicating if all variables should be
cleared. By default, table contents will be unlinked from
replay buffer, but values are unmodified for efficiency. Set
`clear_all_variables=True` to reset all variables including Table
contents.
Returns:
op that clears or unlinks the replay buffer contents.
"""
table_vars = self._data_table.variables() + self._id_table.variables()
def _init_vars():
assignments = [self._last_id.assign(-1)]
if clear_all_variables:
assignments += [v.assign(tf.zeros_like(v)) for v in table_vars]
return tf.group(*assignments, name='clear')
return self._last_id_cs.execute(_init_vars)
# Helper functions.
def _increment_last_id(self, increment=1):
"""Increments the last_id in a thread safe manner.
Args:
increment: amount to increment last_id by.
Returns:
An op that increments the last_id.
"""
def _assign_add():
return self._last_id.assign_add(increment).value()
return self._last_id_cs.execute(_assign_add)
def _get_last_id(self):
def last_id():
return self._last_id.value()
return self._last_id_cs.execute(last_id)
def _get_rows_for_id(self, id_):
"""Make a batch_size length list of tensors, with row ids for write."""
id_mod = tf.math.mod(id_, self._max_length)
rows = self._batch_offsets + id_mod
return rows
def _valid_range_ids(last_id, max_length, num_steps=None):
"""Returns the [min_val, max_val) range of ids.
When num_steps is provided, [min_val, max_val+num_steps) are also valid ids.
Args:
last_id: The last id added to the buffer.
max_length: The max length of each batch segment in the buffer.
num_steps: Optional way to specify that how many ids need to be valid.
Returns:
A tuple (min_id, max_id) for the range [min_id, max_id) of valid ids.
"""
if num_steps is None:
num_steps = tf.constant(1, tf.int64)
min_id_not_full = tf.constant(0, dtype=tf.int64)
max_id_not_full = tf.maximum(last_id + 1 - num_steps + 1, 0)
min_id_full = last_id + 1 - max_length
max_id_full = last_id + 1 - num_steps + 1
return (tf.where(last_id < max_length, min_id_not_full, min_id_full),
tf.where(last_id < max_length, max_id_not_full, max_id_full))
| python |
import numpy as np
from protosc.model.utils import train_xvalidate, create_clusters, select_features
from protosc.model.filter import FilterModel
from protosc.simulation import create_correlated_data, create_independent_data
from protosc.feature_matrix import FeatureMatrix
def get_test_matrix(n_row=100, n_col=50):
X = np.zeros((n_row, n_col))
X = X + np.arange(n_row).reshape(n_row, 1)
X = X + np.arange(n_col).reshape(1, n_col)/1000
y = np.random.randint(2, size=n_row)
return FeatureMatrix(X), y
def test_select_fold():
n_fold = 5
n_row = 100
n_col = 50
X, y = get_test_matrix(n_row, n_col)
rng = np.random.default_rng()
for X_train, y_train, X_val, y_val in X.kfold(y, n_fold, rng, balance=False):
assert np.allclose(X_train.shape, ((n_fold-1)/n_fold*n_row, n_col))
assert len(y_train) == X_train.shape[0]
assert np.allclose(X_val.shape, 1/n_fold*n_row, n_col)
assert len(y_val) == X_val.shape[0]
assert len(np.unique(X_train[:])) == X_train.size
assert len(np.unique(X_val[:])) == X_val.size
for X_train, y_train, X_val, y_val in X.kfold(y, n_fold, rng, balance=True):
assert np.sum(y_train) == len(y_train)/2
assert np.sum(y_val) == len(y_val)/2
assert len(np.unique(X_train[:])) == X_train.size
assert len(np.unique(X_val[:])) == X_val.size
assert isinstance(
train_xvalidate(X_train[:], y_train, X_val[:], y_val), float)
def test_select_clusters():
X, _, truth = create_correlated_data()
X = FeatureMatrix.from_matrix(X)
features_sorted = np.random.permutation(X.shape[1])
cluster_groups = create_clusters(features_sorted, X)
for cluster in cluster_groups:
assert np.all(np.array(
truth["clusters"][cluster]) == truth["clusters"][cluster][0])
def test_select_features():
X, y, _ = create_independent_data()
selected_features, clusters = select_features(X, y)
assert isinstance(selected_features, list)
assert isinstance(clusters, list)
| python |
# -*- coding: utf-8 -*-
import base64
import hashlib
import math
import time
from datetime import datetime
# from ccxt.base.errors import AuthenticationError, InvalidOrder
from ccxt.base.errors import ExchangeError
from ccxt.base.exchange import Exchange
class qtrade (Exchange):
def describe(self):
return self.deep_extend(super(qtrade, self).describe(), {
'id': 'qtrade',
'name': 'qTrade',
'countries': ['US'],
'rateLimit': 100,
# 'has': {
# 'fetchCurrencies': True,
# 'fetchTickers': True,
# 'fetchOpenOrders': True,
# 'fetchMyTrades': True,
# 'fetchDepositAddress': True,
# },
'urls': {
'logo': 'hhttps://qtrade.io/images/logo.png',
'api': 'https://api.qtrade.io/v1',
'www': 'https://qtrade.io/',
'doc': 'https://qtrade-exchange.github.io/qtrade-docs/',
'fees': 'https://qtrade.io/fees',
'referral': 'https://qtrade.io/?ref=AZCXUQ6P5KCG',
},
'api': {
'public': {
'get': [
'markets',
'market/{market_id}',
'currencies',
'tickers',
# 'ticker/{market_string}', # NOTE: dont implement
'ticker_by_id/{market_id}',
# 'orderbook/{market_string}', # NOTE: dont implement
'orderbook_by_id/{market_id}', # NOTE: dont implement
'market/{market_id}/ohlcv/{interval}',
],
},
'private': {
'get': [
# 'user/me', # NOTE: dont implement
'user/balances',
'user/market/{market_id}',
'user/orders',
'user/order/{order_id}',
'user/withdraws',
'user/withdraw/{withdraw_id}',
'user/deposits',
# 'user/deposit/{deposit_id}', # NOTE: This endpoint currently non-functional
'user/transfers' # NOTE: Returns a list of the user's Transfers and metadata.
],
'post': [
'user/cancel_order',
# 'user/deposit_address/{currency}' # NOTE: dont implement
'user/sell_limit',
'user/buy_limit',
],
},
},
# 'commonCurrencies': {
# 'EPC': 'Epacoin',
# },
'fees': {
'trading': {
'maker': 0.005,
'taker': 0.005,
},
},
'precision': {
'amount': 6,
'price': 8,
},
})
# def fetch_currencies(self, params={}):
# currencies = self.publicGetCurrencies(params)
# ids = list(currencies.keys())
# result = {}
# for i in range(0, len(ids)):
# id = ids[i]
# currency = currencies[id]
# precision = self.safe_integer(currency, 'decimal')
# uppercase = id.upper()
# code = self.common_currency_code(uppercase)
# active = self.safe_integer(currency, 'active') == 1
# maintenance = self.safe_integer(currency, 'under_maintenance')
# if maintenance != 0:
# active = False
# canWithdraw = self.safe_integer(currency, 'is_withdrawal_active') == 1
# canDeposit = self.safe_integer(currency, 'is_deposit_active') == 1
# if not canWithdraw or not canDeposit:
# active = False
# result[code] = {
# 'id': id,
# 'code': code,
# 'name': currency['name'],
# 'active': active,
# 'precision': precision,
# 'funding': {
# 'withdraw': {
# 'active': canWithdraw,
# 'fee': self.safe_float(currency, 'txWithdrawalFee'),
# },
# 'deposit': {
# 'active': canDeposit,
# 'fee': self.safe_float(currency, 'txDepositFee'),
# },
# },
# 'limits': {
# 'amount': {
# 'min': self.safe_float(currency, 'minAmountTrade'),
# 'max': math.pow(10, precision),
# },
# 'price': {
# 'min': math.pow(10, -precision),
# 'max': math.pow(10, precision),
# },
# 'cost': {
# 'min': None,
# 'max': None,
# },
# 'withdraw': {
# 'min': self.safe_float(currency, 'MinWithdrawal'),
# 'max': math.pow(10, precision),
# },
# 'deposit': {
# 'min': self.safe_float(currency, 'minDeposit'),
# 'max': None,
# },
# },
# 'info': currency,
# }
# return result
def fetch_markets(self, params={}):
markets = self.publicGetMarkets()['data']['markets']
result = []
for market in markets:
id = market['id']
baseId = market['market_currency']
quoteId = market['base_currency']
base = self.common_currency_code(baseId)
quote = self.common_currency_code(quoteId)
symbol = base + '/' + quote
active = market['can_trade']
precision = self.precision
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': active,
'precision': precision,
'limits': {
'amount': {
'min': math.pow(10, -precision['amount']),
'max': math.pow(10, precision['amount']),
},
'price': {
'min': math.pow(10, -precision['price']),
'max': math.pow(10, precision['price']),
},
'cost': {
'min': None,
'max': None,
},
},
'info': market,
})
return result
def parse_ticker(self, ticker, market=None):
symbol = market['symbol']
timestamp = ticker['date']
ticker = ticker['ticker']
last = self.safe_float(ticker, 'last')
open_price = self.safe_float(ticker, 'open')
if last and open_price:
change = last - open_price
else:
change = None
if self.safe_float(ticker, 'day_change'):
percentage = self.safe_float(ticker, 'day_change') * 100
else:
percentage = None
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'day_high'),
'low': self.safe_float(ticker, 'day_low'),
'bid': self.safe_float(ticker, 'bid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'ask'),
'askVolume': None,
'vwap': None,
'open': self.safe_float(ticker, 'day_open'),
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': self.safe_float(ticker, 'day_avg_price'),
'baseVolume': self.safe_float(ticker, 'day_volume_market'),
'quoteVolume': self.safe_float(ticker, 'day_volume_base'),
'info': ticker,
}
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
ticker = self.publicGetTickerByIdMarketId(self.extend({
'market_id': market['id'],
}, params))['data']
ticker = {
'date': self.milliseconds(),
'ticker': ticker,
}
return self.parse_ticker(ticker, market)
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
tickers = self.publicGetTickers(params)['data']['markets']
result = {}
timestamp = self.milliseconds()
for ticker in tickers:
market = self.markets_by_id[ticker['id']]
symbol = market['symbol']
ticker = {
'date': timestamp,
'ticker': ticker,
}
result[symbol] = self.parse_ticker(ticker, market)
return result
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
timestamp = self.milliseconds()
orderbook = self.publicGetOrderbookByIdMarketId(self.extend({
'market_id': self.market_id(symbol),
}, params))['data']
result = dict()
buy_orders = list()
for price, amount in orderbook['buy'].items():
buy_orders.append([float(price), float(amount)])
result['buy'] = sorted(buy_orders, key=lambda t: t[0], reverse=True)
sell_orders = list()
for price, amount in orderbook['sell'].items():
sell_orders.append([float(price), float(amount)])
result['sell'] = sorted(sell_orders, key=lambda t: t[0])
return self.parse_order_book(result, timestamp, 'buy', 'sell')
def fetch_balance(self, params={}):
self.load_markets()
balances = self.privateGetUserBalances(params)['data']['balances']
result = {'info': balances}
for balance in balances:
amount = balance['balance']
currency = self.common_currency_code(balance['currency'])
account = {
'free': float(amount),
'used': 0.0,
'total': float(amount),
}
account['used'] = account['total'] - account['free']
result[currency] = account
return self.parse_balance(result)
def parse_trade(self, trade, market=None):
# Common fields
created_at = datetime.strptime(trade['created_at'], "%Y-%m-%dT%H:%M:%S.%fZ")
timestamp = int(created_at.timestamp() * 1000)
symbol = market['symbol']
price = self.safe_float(trade, 'price')
amount = self.safe_float(trade, 'market_amount')
# Result
result = dict()
result['datetime'] = self.iso8601(timestamp)
result['timestamp'] = timestamp
result['symbol'] = symbol
result['price'] = price
result['amount'] = amount
result['info'] = trade['info']
# My trade info
trade_id = self.safe_string(trade, 'id')
if trade_id:
result['id'] = trade_id
result['order'] = self.safe_string(trade, 'order_id')
result['type'] = trade['type']
result['side'] = trade['side']
if trade['taker']:
result['takerOrMaker'] = 'taker'
else:
result['takerOrMaker'] = 'maker'
result['cost'] = self.safe_float(trade, 'base_amount')
fee = self.safe_float(trade, 'base_fee')
fee_currency = market['quote']
result['fee'] = {
'cost': fee,
'currency': fee_currency
}
return result
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
data = self.publicGetMarketMarketId(self.extend({
'market_id': market['id'],
}, params))['data']
trades = list()
for trade in data['recent_trades']:
trade_obj = trade.copy()
trade_obj['info'] = trade
trades.append(trade_obj)
return self.parse_trades(trades, market, since, limit)
def parse_order(self, order, market=None):
order_id = self.safe_string(order, 'id')
market = self.markets_by_id[order['market_id']]
symbol = market['symbol']
created_at = datetime.strptime(order['created_at'], "%Y-%m-%dT%H:%M:%S.%fZ")
timestamp = int(created_at.timestamp() * 1000)
side, order_type = order['order_type'].split('_')
if order['open']:
status = 'open'
else:
status = 'closed'
price = self.safe_float(order, 'price')
amount = self.safe_float(order, 'market_amount')
remaining = self.safe_float(order, 'market_amount_remaining')
filled = amount - remaining
cost = filled * price
trades = list()
if order['trades']:
for trade in order['trades']:
trade_obj = trade.copy()
trade_obj['order_id'] = order_id
trade_obj['info'] = trade
trades.append(self.parse_trade(trade_obj, market=market))
return {
'id': order_id,
'datetime': self.iso8601(timestamp),
'timestamp': timestamp,
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': order_type,
'side': side,
'price': price,
'amount': amount,
'filled': filled,
'remaining': remaining,
'cost': cost,
'trades': trades,
'info': order['info'],
}
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
if side == 'buy':
method = 'privatePostUserBuyLimit'
else:
method = 'privatePostUserSellLimit'
data = getattr(self, method)(self.extend({
'market_id': market['id'],
'price': str(self.price_to_precision(symbol, price)),
'amount': str(self.amount_to_precision(symbol, amount)),
}, params))['data']['order']
# if not data:
# raise InvalidOrder(self.id + ' ' + self.json(response))
order_obj = data.copy()
order_obj['info'] = data
order = self.parse_order(order_obj)
id = order['id']
self.orders[id] = order
return order
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
result = self.privatePostUserCancelOrder(self.extend({
'id': int(id)
}, params))
return result
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
data = self.privateGetUserOrderOrderId(self.extend({
'order_id': id
}, params))['data']['order']
order_obj = data.copy()
order_obj['info'] = data
order = self.parse_order(order_obj)
return order
def _parse_raw_orders(self, raw_orders, market, since, limit):
order_objes = list()
for order in raw_orders:
order_obj = order.copy()
order_obj['info'] = order
order_objes.append(order_obj)
return self.parse_orders(order_objes, market, since, limit)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
if symbol:
market = self.market(symbol)
data = self.privateGetUserMarketMarketId(self.extend({
'market_id': int(market['id'])
}, params))['data']
raw_orders = data['closed_orders'] + data['open_orders']
else:
market = None
raw_orders = self.privateGetUserOrders(self.extend({}, params))['data']['orders']
return self._parse_raw_orders(raw_orders, market, since, limit)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
if symbol:
market = self.market(symbol)
data = self.privateGetUserMarketMarketId(self.extend({
'market_id': market['id']
}, params))['data']
raw_orders = data['open_orders']
orders = self._parse_raw_orders(raw_orders, market, since, limit)
else:
total_orders = self.fetch_orders(symbol, since, limit, params)
orders = self.filter_by(total_orders, 'status', 'open')
return orders
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
if symbol:
market = self.market(symbol)
data = self.privateGetUserMarketMarketId(self.extend({
'market_id': market['id']
}, params))['data']
raw_orders = data['closed_orders']
orders = self._parse_raw_orders(raw_orders, market, since, limit)
else:
total_orders = self.fetch_orders(symbol, since, limit, params)
orders = self.filter_by(total_orders, 'status', 'closed')
return orders
# def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
# self.load_markets()
# market = self.market(symbol)
# trades = self.privatePostTradeHistory(self.extend({
# 'market': market['id'],
# }, params))
# return self.parse_trades(trades['trade_history'], market, since, limit)
# def fetch_deposit_address(self, code, params={}):
# self.load_markets()
# currency = self.currency(code)
# response = self.privatePostDepositAddress(self.extend({
# 'currency': currency['id'],
# }, params))
# address = self.safe_string(response, 'deposit_address')
# self.check_address(address)
# tag = self.safe_string(response, 'payment_id')
# return {
# 'currency': code,
# 'address': address,
# 'tag': tag,
# 'info': response,
# }
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api']
query = self.omit(params, self.extract_params(path))
url += '/' + self.implode_params(path, params)
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
if method == 'GET':
if query:
url += '?' + self.urlencode(query)
elif query:
body = self.json(query)
timestamp = str(int(time.time()))
request_details = method + "\n"
request_details += '/v1/' + self.implode_params(path, params) + "\n"
request_details += timestamp + "\n"
if body:
request_details += body + "\n"
else:
request_details += "\n"
request_details += self.secret
hsh = hashlib.sha256(request_details.encode("utf8")).digest()
signature = base64.b64encode(hsh)
headers = {
"Authorization": "HMAC-SHA256 {}:{}".format(self.apiKey, signature.decode("utf8")),
"HMAC-Timestamp": timestamp,
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
try:
response = self.fetch2(path, api, method, params, headers, body)
except Exception as e:
raise ExchangeError(e)
# if response:
# success = self.safe_integer(response, 'success')
# if success == 0:
# message = self.safe_string(response, 'message')
# if message == 'Invalid APIKey':
# raise AuthenticationError(message)
# raise ExchangeError(message)
return response
| python |
import os
from typing import List
#
# get next filename under the [exchange directory]. if there is no folder for filename - the folder will be created
#
def get_next_report_filename(dir, filename_mask):
filename_mask2 = filename_mask % (dir, 0)
directory = os.path.dirname(filename_mask2)
try:
os.stat(directory)
except:
os.mkdir(directory)
print("New directory created:", directory)
deals_id = 0
while os.path.exists(filename_mask % (directory, deals_id)):
deals_id += 1
return deals_id
# get next filename in indexed way: if file file.txt exists so the file_0.txt will be created.. and so on
def get_next_filename_index(path):
path = os.path.expanduser(path)
# if not os.path.exists(path):
# return path
root, ext = os.path.splitext(os.path.expanduser(path))
directory = os.path.dirname(root)
fname = os.path.basename(root)
candidate = fname+ext
index = 0
ls = set(os.listdir(directory))
while candidate in ls:
candidate = "{}_{}{}".format(fname,index,ext)
index += 1
return os.path.join(directory, candidate)
def dict_value_from_path(src_dict: dict, path: List[str], case_sensitive: bool = False):
"""
returns the value of dict field specified via "path" in form of a list of keys. By default the keys are matching
case insensitive way.
Example:
src_dict = {"level1:{"level2":{"level3:value}}}
list_of_keys = ["level1", "level2", "level3"]
:param src_dict: dict from where to extract data b
:param path: list of keys to specify the needed data
:param case_sensitive: case sensototy flag for matching keys of dict against path entries
:return: value of a dict branch
"""
s = src_dict.copy()
key_upper = dict()
key = ""
for p in path:
if not case_sensitive:
key_upper_key = {key.upper(): key for key in s.keys()}
key = key_upper_key[p.upper()] if p.upper() in key_upper_key else None
else:
key = p
try:
s = s[key]
except Exception as e:
s = None
break
return s
| python |
#!/usr/bin/env python
# coding: utf-8
# This notebook was prepared by [Donne Martin](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges).
# # Challenge Notebook
# ## Problem: Implement Fizz Buzz.
#
# * [Constraints](#Constraints)
# * [Test Cases](#Test-Cases)
# * [Algorithm](#Algorithm)
# * [Code](#Code)
# * [Unit Test](#Unit-Test)
# * [Solution Notebook](#Solution-Notebook)
# ## Constraints
#
# * What is fizz buzz?
# * Return the string representation of numbers from 1 to n
# * Multiples of 3 -> 'Fizz'
# * Multiples of 5 -> 'Buzz'
# * Multiples of 3 and 5 -> 'FizzBuzz'
# * Can we assume the inputs are valid?
# * No
# * Can we assume this fits memory?
# * Yes
# ## Test Cases
#
# <pre>
# * None -> Exception
# * < 1 -> Exception
# * 15 ->
# [
# '1',
# '2',
# 'Fizz',
# '4',
# 'Buzz',
# 'Fizz',
# '7',
# '8',
# 'Fizz',
# 'Buzz',
# '11',
# 'Fizz',
# '13',
# '14',
# 'FizzBuzz'
# ]
# </pre>
# ## Algorithm
#
# Refer to the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/arrays_strings/fizz_buzz/fizz_buzz_solution.ipynb). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start.
# ## Code
# In[ ]:
class Solution(object):
def fizz_buzz(self, num):
# TODO: Implement me
pass
# ## Unit Test
# **The following unit test is expected to fail until you solve the challenge.**
# In[ ]:
# %load test_fizz_buzz.py
import unittest
class TestFizzBuzz(unittest.TestCase):
def test_fizz_buzz(self):
solution = Solution()
self.assertRaises(TypeError, solution.fizz_buzz, None)
self.assertRaises(ValueError, solution.fizz_buzz, 0)
expected = [
'1',
'2',
'Fizz',
'4',
'Buzz',
'Fizz',
'7',
'8',
'Fizz',
'Buzz',
'11',
'Fizz',
'13',
'14',
'FizzBuzz'
]
self.assertEqual(solution.fizz_buzz(15), expected)
print('Success: test_fizz_buzz')
def main():
test = TestFizzBuzz()
test.test_fizz_buzz()
if __name__ == '__main__':
main()
# ## Solution Notebook
#
# Review the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/arrays_strings/fizz_buzz/fizz_buzz_solution.ipynb) for a discussion on algorithms and code solutions.
| python |
#IP Address of the SQL server
host = "157.230.209.171"
#MySql username
user = "easley_1267"
#MySQL password
password = "ROY7iOUUQAt18r8qnsXf5jO3foUHgAbp" | python |
import pandas as pd
def convert_jh_global_time_series_to_long(df, name):
"""Converts JH global time series data from wide to long format"""
df = df.melt(id_vars=['Province/State', 'Country/Region', 'Lat', 'Long'],
var_name='date',
value_name=name)
# Convert to datetime
df['date'] = pd.to_datetime(df['date'], format="%m/%d/%y").dt.normalize()
# Rename columns
df.columns = ['province/state', 'country/region',
'latitude', 'longitude', 'date', name]
return df
def merge_dataframes(df1, df2, df3=None):
"""Merges JH global time series dataframes"""
merged_df = pd.merge(df1, df1,
on=['Province/State', 'Country/Region',
'Lat', 'Long', 'date'],
how='inner')
if df3:
merged_df = pd.merge(merged_df, df3,
on=['Province/State', 'Country/Region',
'Lat', 'Long', 'date'],
how='inner')
return merged_df
def consolidate_country_regions(df):
"""Selects the rows with overall country stats and drops region column"""
rtn_df = (df.loc[df['province/state'].isnull()]
.drop(columns=['province/state']))
return rtn_df
def get_top_n_countries(df, n, response):
"""
Returns a list of the top countries by response
:param df: pandas dataframe
:param n {int}: number of countries to select
:param response {string}: deaths, confirmed, or recovered
"""
top_df = df.loc[df['date'] == df['date'].max()]
top_df = top_df.sort_values(by=[response], ascending=False)
return list(top_df['country/region'].iloc[0:n])
def clean_country_names(df):
"""
Given a dataframe with only 1 column 'country/region'
cleans country names
"""
cleaned_df = df.replace({'country/region':
{'US': 'United States',
'Taiwan*': 'Taiwan',
'Korea, South': 'South Korea'}
})
return cleaned_df
# Calculate Incidence, Prevalence, Morbidity, Mortality
# https://www.health.ny.gov/diseases/chronic/basicstat.htm
# Join Political Leanings
# https://www.cpds-data.org/
# Freedom Index
# https://rsf.org/en/ranking_table
# https://www.cato.org/sites/cato.org/files/human-freedom-index-files/human-freedom-index-2019.pdf
# - https://www.reddit.com/r/IntellectualDarkWeb/comments/b07on4/political_compass_of_countries_data_from_the/
# Air Pollutions
# https://projects.iq.harvard.edu/files/covid-pm/files/pm_and_covid_mortality.pdf
# https://ourworldindata.org/air-pollution
# https://ourworldindata.org/outdoor-air-pollution
# https://ourworldindata.org/indoor-air-pollution
# - https://github.com/owid/covid-19-data/tree/master/public/data
| python |
import time
def example(seconds):
print('Starting task')
for i in range(seconds):
print(i)
time.sleep(1)
print('Task completed')
if __name__ == '__main__':
example(10)
| python |
"""The wireless version of a connection"""
from Connection import Connection
class Wireless_Connection(Connection):
type = "Wireless_Connection"
def __init__(self, source, dest):
"""
Create a connection between wireless devices.
"""
Connection.__init__(self, source, dest)
| python |
from celery import shared_task
@shared_task
def add(a, b):
return (a+b)
| python |
# This file is part of the History Store (histore).
#
# Copyright (C) 2018-2021 New York University.
#
# The History Store (histore) is released under the Revised BSD License. See
# file LICENSE for full license details.
"""Writer for archives that are materialized as Json files on the file
system.
"""
from typing import Optional
import json
from histore.archive.serialize.base import ArchiveSerializer
from histore.archive.serialize.default import DefaultSerializer
from histore.archive.row import ArchiveRow
from histore.archive.writer import ArchiveWriter
from histore.document.json.writer import JsonWriter
class ArchiveFileWriter(ArchiveWriter):
"""Archive writer that outputs rows in an archive as Json serialized rows
in a text file. Each row is stored in a separate line in the text file. The
output file is a Json array. The first and the last row of the file open
and close the array.
"""
def __init__(
self, filename: str, row_counter: Optional[int] = 0,
serializer: Optional[ArchiveSerializer] = None,
compression: Optional[str] = None,
encoder: Optional[json.JSONEncoder] = None
):
"""Initialize the output file, row counter, and the serializer that is
being used.
Parameters
----------
filename: string
Path to the output file.
row_counter: int, default=0
Counter that is used to generate unique internal row identifier.
The current value of the counter is the value for the next unique
identifier.
serializer: histore.archive.serialize.base.ArchiveSerializer, default=None
Implementation of the archive serializer interface that is used to
serialize rows that are written to file.
compression: string, default=None
String representing the compression mode for the output file.
encoder: json.JSONEncoder, default=None
Encoder used when writing archive rows as JSON objects to file.
"""
super(ArchiveFileWriter, self).__init__(row_counter)
# Use the default serializer if no serializer was given
self.serializer = serializer if serializer else DefaultSerializer()
# Create Json writer for the archive rows.
self.writer = JsonWriter(
filename=filename,
compression=compression,
encoder=encoder
)
def close(self):
"""Write the last row to the output file and close the output array and
the output file.
"""
self.writer.close()
def write_archive_row(self, row: ArchiveRow):
"""Add the given row to the output file.
Parameters
----------
row: histore.archive.row.ArchiveRow
Row in a new version of a dataset archive.
"""
self.write_buffer(row)
def write_buffer(self, row: Optional[ArchiveRow] = None):
"""Write the archive row in the internal buffer to the output file.
Replace the buffer with the given (next output row).
Parameters
----------
row: histore.archive.row.ArchiveRow, default=None
Next row in the output stream. This row will be kept in the
internal buffer and the previous row is being written to the
output file.
"""
self.writer.write(self.serializer.serialize_row(row))
| python |
#!/usr/bin/env python
from ALU import *
import numpy as np
import pandas as pd
import pickle
class Dataset():
def __init__(self, data_bits, path, label_bit_msk=None):
if label_bit_msk is None:
label_bit_msk = [True for _ in range(data_bits)]
elif(len(label_bit_msk) > data_bits):
raise Exception("unsupported label bit mask length")
self.path = path
self.data_bits = data_bits
self.label_bit_msk = [i!=0 for i in label_bit_msk]
self.alu = ALU(self.data_bits, ['x'])
self.data_dim = self.alu.data_dim
self.label_dim = min(self.alu.label_dim, sum(self.label_bit_msk))
self.filename = str()
def __iter__(self):
"""
only support generating the whole table now
If use this on tf.dataset.from_generator, plz at least suffle something
use ds.shuffle(cache = 1000)
"""
number, ops = self.alu.gen_range()
arr = lambda x : np.array(x, dtype = "uint8")
for op in ops:
for B in number:
for A in number:
data, label = self._get_data_label(A, B, op)
yield arr(data), arr(label)
def __call__(self, form = "csv", batch_size = 1000, shuffle = True):
if form is "csv":
self.path = self.path + "dataset_csv/3ops/"
self.filename = "xor_{}.csv".format(self.data_bits)
self._csv(shuffle)
elif form is "batch":
self.path = self.path + "dataset{}".format(self.data_bits)
number, ops = self.alu.gen_range()
datas = []
labels = []
operations = []
data_dim = self.data_dim
label_dim = self.label_dim
total_size = len(ops) * len(number)**2
i = 0
for op in ops:
for B in number:
for A in number:
data, label = self._get_data_label(A, B, op)
datas.append(data)
labels.append(label)
operations.append(op)
i = i + 1
if i%batch_size is 0 or i is total_size:
name = self.filename + "_"+ str(i//batch_size)
actual_size = batch_size if i % batch_size is 0 else i % batch_size
data_arr = np.array(datas, dtype= 'uint8').reshape((actual_size, data_dim))
label_arr = np.array(labels, dtype = 'uint8').reshape((actual_size, label_dim))
dataset = dict()
dataset["data"] = data_arr
dataset["label"] = label_arr
dataset["operations"] = operations
with open(self.path + name + '.batch', 'wb+') as f:
pickle.dump(dataset, f, protocol=pickle.HIGHEST_PROTOCOL)
datas = []
labels = []
operations = []
else:
raise Exception("Illegal format type")
def _csv(self, shuffle = False):
number, ops = self.alu.gen_range()
datas = []
labels = []
data_dim = self.alu.data_dim-1
label_dim = self.label_dim
total_size = len(ops) * len(number)**2
i = 0
for op in ops:
for B in number:
for A in number:
data, label = self._get_data_label(A, B, op)
datas.append(data)
labels.append(label)
data_arr = np.array(datas, dtype='uint8').reshape((total_size, data_dim))
label_arr = np.array(labels, dtype = 'uint8').reshape((total_size, label_dim))
df = pd.DataFrame(np.hstack((data_arr, label_arr)))
if shuffle:
df = df.sample(frac=1).reset_index(drop=True)
df.to_csv(self.path + self.filename, header=False, index=False)
def _get_data_label(self, A, B, op):
"""
return the list of data and label
"""
in1, in2, opc, out = self.alu(A, B, op)
data = list(in1) + list(in2)# + list(opc)
label = list(out)
label = [i for i,j in zip(label, self.label_bit_msk) if j]
return data, label
if __name__ == '__main__':
import os
script_path = os.path.abspath(__file__)
project_dir = script_path[:script_path.rfind("src")]
output_path = project_dir + "dataset/"
# import pathlib
# project_path = pathlib.Path(__file__).parent.parent.parent
# output_path = project_path / "dataset"
# ds = Dataset(6, "ALU-6-14_batch", output_path)
ds = Dataset(6, output_path, [True for i in range(6)])
ds()
# for data, label in iter(ds):
# print(data)
# print(label)
| python |
'''
Regrid the GBT data to match the VLA HI data.
'''
from spectral_cube import SpectralCube
from astropy.utils.console import ProgressBar
import numpy as np
import os
from cube_analysis.io_utils import create_huge_fits
from paths import fourteenB_HI_data_path, data_path
# Load the non-pb masked cube
vla_cube = SpectralCube.read(fourteenB_HI_data_path("M33_14B-088_HI.clean.image.fits"))
gbt_path = os.path.join(data_path, "GBT")
cube = SpectralCube.read(os.path.join(gbt_path, "m33_gbt_vlsr_highres.fits"))
# Ta* to T_mb as per @low-sky
Tmb_conv = 1.052
save_name = os.path.join(gbt_path, "14B-088_items/m33_gbt_vlsr_highres_Tmb_14B088_spectralregrid.fits")
# Spectral interpolation, followed by reprojection.
if not os.path.exists(save_name):
cube = cube.spectral_interpolate(vla_cube.spectral_axis)
if cube._is_huge:
output_fits = create_huge_fits(save_name, cube.header, return_hdu=True)
for chan in ProgressBar(cube.shape[0]):
output_fits[0].data[chan] = cube[chan].value * Tmb_conv
output_fits.flush()
output_fits.close()
else:
(cube * Tmb_conv).write(save_name, overwrite=True)
else:
cube = SpectralCube.read(save_name)
# Make the reprojected header
new_header = cube.header.copy()
new_header["NAXIS"] = 3
new_header["NAXIS1"] = vla_cube.shape[2]
new_header["NAXIS2"] = vla_cube.shape[1]
new_header["NAXIS3"] = vla_cube.shape[0]
kwarg_skip = ['TELESCOP', 'BUNIT', 'INSTRUME']
for key in cube.header:
if key == 'HISTORY':
continue
if key in vla_cube.header:
if "NAXIS" in key:
continue
if key in kwarg_skip:
continue
new_header[key] = vla_cube.header[key]
new_header.update(cube.beam.to_header_keywords())
new_header["BITPIX"] = -32
# We're going to convert to Tmb below
new_header.comments['BUNIT'] = 'Tmb'
# Build up the reprojected cube per channel
save_name = os.path.join(gbt_path, "14B-088_items/m33_gbt_vlsr_highres_Tmb_14B088.fits")
output_fits = create_huge_fits(save_name, new_header, return_hdu=True)
targ_header = vla_cube[0].header
for chan in ProgressBar(cube.shape[0]):
reproj_chan = \
cube[chan].reproject(targ_header).value.astype(np.float32)
output_fits[0].data[chan] = reproj_chan
if chan % 200 == 0:
output_fits.flush()
output_fits.close()
# Now do it again from the native gridding size
cube = SpectralCube.read(os.path.join(gbt_path, "m33_gbt_vlsr.fits"))
# Ta* to T_mb as per @low-sky
Tmb_conv = 1.052
save_name = os.path.join(gbt_path, "14B-088_items/m33_gbt_vlsr_Tmb_14B088_spectralregrid.fits")
# Spectral interpolation, followed by reprojection.
if not os.path.exists(save_name):
cube = cube.spectral_interpolate(vla_cube.spectral_axis)
if cube._is_huge:
output_fits = create_huge_fits(save_name, cube.header, return_hdu=True)
for chan in ProgressBar(cube.shape[0]):
output_fits[0].data[chan] = cube[chan].value * Tmb_conv
output_fits.flush()
output_fits.close()
else:
(cube * Tmb_conv).write(save_name, overwrite=True)
else:
cube = SpectralCube.read(save_name)
# Make the reprojected header
new_header = cube.header.copy()
new_header["NAXIS"] = 3
new_header["NAXIS1"] = vla_cube.shape[2]
new_header["NAXIS2"] = vla_cube.shape[1]
new_header["NAXIS3"] = vla_cube.shape[0]
kwarg_skip = ['TELESCOP', 'BUNIT', 'INSTRUME']
for key in cube.header:
if key == 'HISTORY':
continue
if key in vla_cube.header:
if "NAXIS" in key:
continue
if key in kwarg_skip:
continue
new_header[key] = vla_cube.header[key]
new_header.update(cube.beam.to_header_keywords())
new_header["BITPIX"] = -32
# We're going to convert to Tmb below
new_header.comments['BUNIT'] = 'Tmb'
# Build up the reprojected cube per channel
save_name = os.path.join(gbt_path, "14B-088_items/m33_gbt_vlsr_Tmb_14B088.fits")
output_fits = create_huge_fits(save_name, new_header, return_hdu=True)
targ_header = vla_cube[0].header
for chan in ProgressBar(cube.shape[0]):
reproj_chan = \
cube[chan].reproject(targ_header).value.astype(np.float32)
output_fits[0].data[chan] = reproj_chan
if chan % 200 == 0:
output_fits.flush()
output_fits.close()
| python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
class TX():
def __init__(self):
self.txid = ''
self.inputs = []
self.outputs = []
self.block_height = 0
self.confirmations = 0
def print_tx(self):
print '\nblock ', str(self.block_height), "(" + str(self.confirmations) + " confirmations)", self.txid
print 'IN:', self.inputs
print 'OUT:', self.outputs
print 'primeInput:', self.prime_input_address()
def prime_input_address(self):
addresses = []
for tx_input in self.inputs:
addresses.append(tx_input['address'])
return sorted(addresses)[0]
def received_value(self, address):
value = 0
for output in self.outputs:
if output['address'] == address:
value += output['value']
return value
def is_receiving_tx(self, address):
received = True
for tx_input in self.inputs:
if tx_input['address'] == address:
received = False
return received
def sent_value(self, address):
value = 0
for tx_input in self.inputs:
if tx_input['address'] == address:
value += tx_input['value']
change = 0
for tx_output in self.outputs:
if tx_output['address'] == address:
change += tx_output['value']
return value-change
def is_sending_tx(self, address):
sending = False
for tx_input in self.inputs:
if tx_input['address'] == address:
sending = True
return sending
def to_dict(self, address):
tx_dict = {"txid": self.txid,
"prime_input_address": self.prime_input_address(),
"inputs": self.inputs,
"outputs": self.outputs,
"block_height": self.block_height,
"confirmations": self.confirmations,
"receiving": self.is_receiving_tx(address)}
if tx_dict["receiving"] is True:
tx_dict["receivedValue"] = self.received_value(address)
else:
tx_dict["sentValue"] = self.sent_value(address)
return tx_dict | python |
from series import fibonacci, lucas, sum_series
# Fibonacci tests"
# Expected Outcome
def test_zero():
expected = 0
actual = fibonacci(0)
assert actual == expected
def test_one():
expected = 1
actual = fibonacci(1)
assert actual == expected
def test_15n():
expected = 610
actual = fibonacci(15)
assert actual == expected
# Edge Case
def test_negative():
expected = 0
actual = fibonacci(-4)
assert actual == expected
# Expected Failure
def test_letter():
expected = "Input should be a one integer"
actual = fibonacci("a")
assert actual == expected
def test_float():
expected = "Input should be a one integer"
actual = fibonacci(1.354)
assert actual == expected
# LUCAS TESTS
# Expected Outcome
def test_zero_lucas():
expected = 2
actual = lucas(0)
assert actual == expected
def test_one_lucas():
expected = 1
actual = lucas(1)
assert actual == expected
def test_three_lucas():
expected = 4
actual = lucas(3)
assert actual == expected
def test_four_lucas():
expected = 7
actual = lucas(4)
assert actual == expected
def test_15n_lucas():
expected = 1364
actual = lucas(15)
assert actual == expected
# Edge case
def test_negative_lucas():
expected = 2
actual = lucas(-4)
assert actual == expected
# expected failure
def test_15n_lucas():
expected = "Input should be a one integer"
actual = lucas("a")
assert actual == expected
# SUM_SERIES TESTS
# Expected Outcome
def test_zero_sum_series_fibonacci():
expected = 0
actual = sum_series(0)
assert actual == expected
def test_zero_sum_series_fibonacci_params():
expected = 0
actual = sum_series(0, 0, 1)
assert actual == expected
def test_zero_sum_series_lucas():
expected = 2
actual = sum_series(0, 2, 1)
assert actual == expected
def test_sum_series_new_sequence():
expected = 123
actual = sum_series(8, 3, 4)
assert actual == expected
# Edge Cases
def test_sum_series_new_sequence_negative():
expected = 3
actual = sum_series(-4, 3, 4)
assert actual == expected
def test_sum_series_new_sequence_negative_params():
expected = 6
actual = sum_series(4, -3, 4)
assert actual == expected
# expected failure
def test_sum_series_letters():
expected = "Input allows only integers"
actual = sum_series('a', 3, 4)
assert actual == expected
def test_sum_series_letters_in_params():
expected = "Input allows only integers"
actual = sum_series(5, 'a', 4)
assert actual == expected
| python |
from django.urls import path
from . import views
app_name = 'orders'
urlpatterns = [
path('create/', views.order_create, name='order_create'),
path(
'order_list/<str:username>/',
views.orderlist,
name='order_list'
),
path(
'order_list/<int:id>/detail/',
views.orderlistdetail,
name='order_list_detail'
),
path(
'my_sales/<str:username>/',
views.ordersales,
name='ordersales'
),
path(
'my_sales/<str:username>/',
views.ordersales,
name='ordersales'
),
] | python |
'''
Python 3.6
This script contains functions to clean the text in the tweets.
Methods here are not called directly.
Instead, they are called from either "NLTK_clean_tweet_testing.py" or "TextBlob_clean_tweet_testing.py"
'''
print("Importing tweetCleaner...")
from bs4 import BeautifulSoup
import re
from nltk.stem import WordNetLemmatizer
wordnet_lemmatizer = WordNetLemmatizer()
"""
Returns a list of stopwords called StopWordList.
The file containing the stopwords is titled "stopwords.txt".
"""
def StopWordListCreator():
StopWordList = []
with open("stopwords.txt","r",encoding="utf-8") as stopwords:
for stopword in stopwords.readlines():
StopWordList.append(stopword[:-1])
return StopWordList
def StopWordRemover(tweet):
'''
Removes all stopwords in the tweet, w.r.t. the StopWordList created above.
'''
tweet_words = tweet.split()
new_tweet = []
for word in tweet_words:
if word in StopWordListCreator():
pass
else:
new_tweet.append(word)
return (" ").join(new_tweet)
def lowercase(tweet):
'''
Returns the tweet in lowercase.
'''
return tweet.lower()
def removeSpecialChars(tweet):
'''
Removes special characters which are specifically found in tweets.
'''
#Converts HTML tags to the characters they represent
soup = BeautifulSoup(tweet, "html.parser")
tweet = soup.get_text()
#Convert www.* or https?://* to empty strings
tweet = re.sub('((www\.[^\s]+)|(https?://[^\s]+))','',tweet)
#Convert @username to empty strings
tweet = re.sub('@[^\s]+','',tweet)
#Remove additional white spaces
tweet = re.sub('[\s]+', ' ', tweet)
#Replace #word with word
tweet = re.sub(r'#([^\s]+)', r'\1', tweet)
#Trims the tweet
tweet = tweet.strip('\'"')
return tweet
def removeAllNonAlpha(tweet):
'''
Remove all characters which are not alphabets, numbers or whitespaces.
'''
tweet = re.sub('[^A-Za-z0-9 ]+','', tweet)
return tweet
def lemmatizer(tweet):
'''
Attempts to replace every individual word with it's root word.
'''
word_list = []
for word in tweet.split():
word_list.append(wordnet_lemmatizer.lemmatize(word))
return (" ".join(word_list))
print("Finished importing tweetCleaner.") | python |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import json
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
MAX_FLOAT = 1e30
MIN_FLOAT = -1e30
@register_criterion("coqa")
class CoqaCriterion(FairseqCriterion):
def __init__(self, task, ranking_head_name, save_predictions):
super().__init__(task)
self.ranking_head_name = ranking_head_name
self.start_n_top = 5 ##################################
self.end_n_top = 5
if save_predictions is not None:
self.prediction_h = True
else:
self.prediction_h = None
def __del__(self):
pass
#if self.prediction_h is not None:
# self.prediction_h.close()
@staticmethod
def add_args(parser):
# fmt: off
parser.add_argument('--save-predictions', metavar='FILE',
help='file to save predictions to')
parser.add_argument('--ranking-head-name',
default='coqa',
help='name of the classification head to use')
parser.add_argument('--n-best-size',
default=5,
help='n best size for predictions')
parser.add_argument('--start-n-top',
default=5,
help='Beam size for span start')
parser.add_argument('--end-n-top',
default=5,
help='Beam size for span end')
# fmt: on
def get_masked_data(self, data, mask):
return data * mask+MIN_FLOAT * (1-mask)
def tile(self, data, size):
for dim in range(-1, -1*len(size)-1, -1):
multiple_num = size[dim]
ori_data = data
for _ in range(multiple_num-1):
data = torch.cat([data, ori_data], dim=dim)
return data
def forward(self, model, sample, reduce=True): ####fairseq_task.py 430줄
"""Compute ranking loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
def compute_loss(label, predict, predict_mask, label_smoothing=0.0):
masked_predict = self.get_masked_data(predict, predict_mask)
masked_predict = predict #[b,l]
if label_smoothing > 1e-10:
onehot_label = F.one_hot(label, masked_predict.size(-1))
onehot_label = (onehot_label * (1-label_smoothing) +
label_smoothing / masked_predict.size(-1).FloatTensor()) * predict_mask
log_likelihood = F.log_softmax(masked_predict, dim=-1)
loss = - (onehot_label*log_likelihood).sum(-1)
else:
CEL = torch.nn.CrossEntropyLoss()
loss = CEL(masked_predict, label)
return loss
assert (
hasattr(model, "classification_heads")
and self.ranking_head_name in model.classification_heads
), "model must provide sentence ranking head for --criterion=coqa"
logits, _ = model(
sample["net_input"],
classification_head_name=self.ranking_head_name,
)
p_mask = sample["net_input"]["p_mask"]
preds = {}
target_exist = sample["start_position"]!=None
##start
start_result = logits["start_result"]
sample_size = start_result.size()[0]
start_result_mask = 1-p_mask
start_result = torch.squeeze(start_result, dim=-1)
start_result = self.get_masked_data(start_result, start_result_mask)
start_prob = F.softmax(start_result, dim=-1)
if not self.training:
start_top_prob, start_top_index = torch.topk(start_prob, k=self.start_n_top)
preds["start_prob"] = start_top_prob
preds["start_index"] = start_top_index
##end
end_result = logits["end_result"]
if self.training:
end_result_mask = 1-p_mask
end_result = torch.squeeze(end_result, dim=-1)
end_result = self.get_masked_data(end_result, end_result_mask)
end_prob = F.softmax(end_result, dim=-1)
else:
end_result_mask = torch.unsqueeze(1-p_mask, dim=1)
end_result_mask = self.tile(end_result_mask, (1, self.start_n_top, 1))
end_result = torch.transpose(torch.squeeze(end_result, dim=-1), 1, 2)
end_result = self.get_masked_data(end_result, end_result_mask)
end_prob = F.softmax(end_result, dim=-1)
end_top_prob, end_top_index = torch.topk(end_prob, k=self.start_n_top)
preds["end_prob"] = end_top_prob
preds["end_index"] = end_top_index
##unk
unk_result = logits["unk_result"]
unk_result_mask = torch.max(1-p_mask, dim=-1).values
unk_result = torch.squeeze(unk_result, dim=-1)
unk_result = self.get_masked_data(unk_result, unk_result_mask)
unk_prob = F.sigmoid(unk_result)
preds["unk_prob"] = unk_prob
##yes
yes_result = logits["yes_result"]
yes_result_mask = torch.max(1-p_mask, dim=-1).values
yes_result = torch.squeeze(yes_result, dim=-1)
yes_result = self.get_masked_data(yes_result, yes_result_mask)
yes_prob = F.sigmoid(yes_result)
preds["yes_prob"] = yes_prob
##no
no_result = logits["no_result"]
no_result_mask = torch.max(1-p_mask, dim=-1).values
no_result = torch.squeeze(no_result, dim=-1)
no_result = self.get_masked_data(no_result, no_result_mask)
no_prob = F.sigmoid(no_result)
preds["no_prob"] = no_prob
##num
num_result = logits["num_result"]
num_result_mask = torch.max(1-p_mask, dim=-1, keepdim=True).values
num_result = self.get_masked_data(num_result, num_result_mask)
num_probs = F.softmax(num_result, dim=-1)
preds["num_probs"] = num_probs
##opt
opt_result = logits["opt_result"]
opt_result_mask = torch.max(1-p_mask, dim=-1, keepdim=True).values
opt_result = self.get_masked_data(opt_result, opt_result_mask)
opt_probs = F.softmax(opt_result, dim=-1)
preds["opt_probs"] = opt_probs
if target_exist and self.training:
start_label = sample["start_position"]
start_loss = compute_loss(start_label, start_result, 1-p_mask) # [b],[b,l],[b,l]
end_label = sample["end_position"]
end_loss = compute_loss(end_label, end_result, 1-p_mask) # [b], [b,l], [b,l]
loss = torch.mean(start_loss + end_loss)
unk_label = sample["is_unk"]
unk_loss = F.binary_cross_entropy_with_logits(unk_result, unk_label.half())
loss += torch.mean(unk_loss)
yes_label = sample["is_yes"]
yes_loss = F.binary_cross_entropy_with_logits(yes_result, yes_label.half())
loss += torch.mean(yes_loss)
no_label = sample["is_no"]
no_loss = F.binary_cross_entropy_with_logits(no_result, no_label.half())
loss += torch.mean(no_loss)
num_label = sample["number"]
num_result_mask = torch.max(1-p_mask, dim=-1, keepdim=True).values
num_loss = compute_loss(num_label, num_result, num_result_mask)
loss += torch.mean(num_loss)
opt_label = sample["option"]
opt_result_mask = torch.max(1-p_mask, dim=-1, keepdim=True).values
opt_loss = compute_loss(opt_label, opt_result, opt_result_mask)
loss += torch.mean(opt_loss)
targets = sample
elif target_exist:
start_label = sample["start_position"]
start_loss = compute_loss(start_label, start_result, 1-p_mask) # [b],[b,l],[b,l]
end_label = sample["end_position"]
end_result = end_result[:,0,:]
end_loss = compute_loss(end_label, end_result, 1-p_mask) # [b],[b,k,l],[b,l]
loss = torch.mean(start_loss + end_loss)
unk_label = sample["is_unk"]
unk_loss = F.binary_cross_entropy_with_logits(unk_result, unk_label.half())
loss += torch.mean(unk_loss)
yes_label = sample["is_yes"]
yes_loss = F.binary_cross_entropy_with_logits(yes_result, yes_label.half())
loss += torch.mean(yes_loss)
no_label = sample["is_no"]
no_loss = F.binary_cross_entropy_with_logits(no_result, no_label.half())
loss += torch.mean(no_loss)
num_label = sample["number"]
num_result_mask = torch.max(1-p_mask, dim=-1, keepdim=True).values
num_loss = compute_loss(num_label, num_result, num_result_mask)
loss += torch.mean(num_loss)
opt_label = sample["option"]
opt_result_mask = torch.max(1-p_mask, dim=-1, keepdim=True).values
opt_loss = compute_loss(opt_label, opt_result, opt_result_mask)
loss += torch.mean(opt_loss)
targets = sample
else:
loss = torch.tensor(0.0, requires_grad=True)
targets = None
logging_output = {
"loss": loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample_size,
"sample_size": sample_size,
}
if self.prediction_h is not None and not self.training:
predictions = []
for i in range(sample["nsentences"]):
pred = {}
pred["unique_id"] = sample["id"].tolist()[i]
pred["qas_id"] = sample["qas_id"].tolist()[i]
pred["start_prob"] = preds["start_prob"].tolist()[i]
pred["start_index"] = preds["start_index"].tolist()[i]
pred["end_prob"] = preds["end_prob"].tolist()[i]
pred["end_index"] = preds["end_index"].tolist()[i]
pred["unk_prob"] = preds["unk_prob"].tolist()[i]
pred["yes_prob"] = preds["yes_prob"].tolist()[i]
pred["no_prob"] = preds["no_prob"].tolist()[i]
pred["num_probs"] = preds["num_probs"].tolist()[i]
pred["opt_probs"] = preds["opt_probs"].tolist()[i]
prediction = json.dumps(pred)
predictions.append(prediction)
#self.prediction_h.write(prediction)
#self.prediction_h.write("\n")
return loss, predictions, sample_size, logging_output
return loss, sample_size, logging_output
###한번의 batch마다 불러짐
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
if sample_size != ntokens:
metrics.log_scalar(
"nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| python |
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render, get_object_or_404
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from .models import MyModel
def mymodel_list(request):
paginate_by = 24
qs = MyModel.objects.all()
paginator = Paginator(qs, paginate_by)
page_number = request.GET.get("page")
try:
page = paginator.page(page_number)
except PageNotAnInteger:
# If page parameter is not an integer, show first page.
page = paginator.page(1)
except EmptyPage:
# If page parameter is out of range, show last existing page.
page = paginator.page(paginator.num_pages)
context = {
'object_list': page,
}
return render(request, "{{ app_name }}/mymodel_list.html", context)
def mymodel_details(request, object_id):
instance = get_object_or_404(MyModel, pk=object_id)
context = {
'object': instance,
}
return render(request, "{{ app_name }}/mymodel_details.html", context)
| python |
import tkinter as tk
from sudokuUI import SudokuUI
root = tk.Tk()
#p = [ [0,i,i+1] for i in range(9) ] + [ [1,(i+3)% 9, i + 1] for i in range(9)] + [ [2,(i+6) % 9, i+1] for i in range(9)] + [[3,(i+1)%9,i+1] for i in range(9)] + [[4,(i+4)%9,i+1] for i in range(9)] + [[5, (i+7)% 9, i + 1] for i in range(9)] + [[6,(i+2)%9,i+1] for i in range(9)] + [[7,(i+5)%9,i+1] for i in range(9)] + [[8, (i+8)% 9, i + 1] for i in range(9)]
p = [ [0,i,i+1] for i in range(9) ] + [ [1,(i+3)% 9, i + 1] for i in range(9)] + [ [2,(i+6) % 9, i+1] for i in range(9)] + [[3,(i+1)%9,i+1] for i in range(9)] + [[4,(i+4)%9,i+1] for i in range(9)] + [[5, (i+7)% 9, i + 1] for i in range(9)] + [[6,(i+2)%9,i+1] for i in range(9)] + [[7,(i+5)%9,i+1] for i in range(9)]
s = SudokuUI(root, 60, p)
root.geometry("800x800")
root.mainloop() | python |
# SPDX-License-Identifier: MIT
# Copyright (c) 2021 scmanjarrez. All rights reserved.
# This work is licensed under the terms of the MIT license.
from contextlib import closing
import sqlite3 as sql
DB = 'diptico.db'
def setup_db():
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.executescript(
"""
CREATE TABLE IF NOT EXISTS parts (
part INTEGER,
volume INTEGER,
title TEXT,
url TEXT,
finished INTEGER DEFAULT 0,
PRIMARY KEY (part, volume)
);
CREATE TABLE IF NOT EXISTS chapters (
part INTEGER,
volume INTEGER,
title TEXT,
url TEXT,
new INTEGER DEFAULT 1,
FOREIGN KEY (part, volume) REFERENCES parts(part, volume),
PRIMARY KEY (part, volume, title)
);
CREATE TABLE IF NOT EXISTS mestionora (
title TEXT PRIMARY KEY
);
CREATE TABLE IF NOT EXISTS users (
uid INTEGER PRIMARY KEY,
notifications INTEGER DEFAULT 1
);
"""
)
def parts():
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute('SELECT part, volume, title, url FROM parts')
return cur.fetchall()
def name_part(part):
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute('SELECT DISTINCT(title) FROM parts WHERE part = ?',
[part])
return cur.fetchone()[0]
def total_parts():
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute('SELECT part, title '
'FROM parts '
'ORDER BY rowid')
ret = cur.fetchall()
group = [[(p, t) for p, t in ret if p == r]
for r in range(1, ret[-1][0] + 1)]
return [max(set(g), key=g.count) for g in group]
def n_parts():
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute('SELECT count(DISTINCT part) '
'FROM parts')
return cur.fetchone()[0]
def n_volumes(part):
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute('SELECT count(volume) '
'FROM parts '
'WHERE part = ?',
[part])
return cur.fetchone()[0]
def total_volumes(part):
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute('SELECT volume '
'FROM parts '
'WHERE part = ? '
'ORDER BY rowid',
[part])
return cur.fetchall()
def unfinished_part():
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute('SELECT part, volume, title, url '
'FROM parts '
'WHERE finished = 0')
return cur.fetchone()
def add_part(part, volume, title, url):
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute('INSERT INTO parts '
'(part, volume, title, url) '
'VALUES (?, ?, ?, ?)',
[part, volume, title, url])
db.commit()
def part_cached(part, volume):
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute(
'SELECT EXISTS ('
'SELECT 1 FROM parts '
'WHERE part = ? AND volume = ?'
')',
[part, volume])
return cur.fetchone()[0]
def set_finished(part, volume):
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute('UPDATE parts '
'SET finished = 1 '
'WHERE part = ? AND volume = ?',
[part, volume])
db.commit()
def chapters(part, volume):
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute('SELECT title, url '
'FROM chapters '
'WHERE part = ? AND volume = ? '
'ORDER BY rowid',
[part, volume])
return cur.fetchall()
def n_chapters(part, volume):
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute('SELECT count(title) '
'FROM chapters '
'WHERE part = ? AND volume = ?',
[part, volume])
return cur.fetchall()
def new_chapters():
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute('SELECT part, volume, title '
'FROM chapters '
'WHERE new = 1')
return cur.fetchall()
def add_chapter(part, volume, title, url):
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute('INSERT INTO chapters '
'(part, volume, title, url) '
'VALUES (?, ?, ?, ?)',
[part, volume, title, url])
db.commit()
def chapter_cached(part, volume, title):
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute(
'SELECT EXISTS ('
'SELECT 1 FROM chapters '
'WHERE part = ? AND volume = ? AND title = ?)',
[part, volume, title])
return cur.fetchone()[0]
def unset_new(part, volume, title):
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute('UPDATE chapters '
'SET new = 0 '
'WHERE part = ? AND volume = ? '
'AND title = ?',
[part, volume, title])
db.commit()
def add_mestionora(titles):
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute('DELETE FROM mestionora')
cur.executemany('INSERT INTO mestionora '
'VALUES (?)',
[(tit,) for tit in titles])
db.commit()
def mestionora_chapters():
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute('SELECT * FROM mestionora')
return [ch[0] for ch in cur.fetchall()]
def users():
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute('SELECT uid FROM users')
return cur.fetchall()
def cached(uid):
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute(
'SELECT EXISTS ('
'SELECT 1 FROM users WHERE uid = ?'
')',
[uid])
return cur.fetchone()[0]
def add_user(uid):
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute('INSERT INTO users (uid) VALUES (?)',
[uid])
db.commit()
def del_user(uid):
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute('DELETE FROM users '
'WHERE uid = ?',
[uid])
db.commit()
def notifications(uid):
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute('SELECT notifications FROM users '
'WHERE uid = ?',
[uid])
return cur.fetchone()[0] # (x,)
def toggle_notifications(uid):
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute('UPDATE users SET notifications = -notifications '
'WHERE uid = ?',
[uid])
db.commit()
def all_users_notify():
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute('SELECT uid FROM users '
'WHERE notifications = 1')
return cur.fetchall()
| python |
########
# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############
import os
import yaml
import logging
from distutils.version import StrictVersion
from cloudify.manager import get_rest_client
from cloudify.utils import get_admin_api_token
from cloudify.constants import EVENTS_EXCHANGE_NAME
from cloudify_agent.worker import (
CloudifyOperationConsumer,
)
logger = logging.getLogger('mgmtworker')
class HookConsumer(CloudifyOperationConsumer):
routing_key = 'events.hooks'
HOOKS_CONFIG_PATH = '/opt/mgmtworker/config/hooks.conf'
def __init__(self, queue_name, registry, max_workers=5):
super(HookConsumer, self).__init__(queue_name,
exchange_type='topic',
registry=registry,
threadpool_size=max_workers)
self.queue = queue_name
self.exchange = EVENTS_EXCHANGE_NAME
def handle_task(self, full_task):
event_type = full_task['event_type']
hook = self._get_hook(event_type)
if not hook:
return
logger.info(
'The hook consumer received `{0}` event and the hook '
'implementation is: `{1}`'.format(event_type,
hook.get('implementation'))
)
try:
task = self._get_task(full_task, hook)
result = super(HookConsumer, self).handle_task(task)
except Exception as e:
result = {'ok': False, 'error': e.message}
logger.error('{0!r}, while running the hook triggered by the '
'event: {1}'.format(e, event_type))
return result
def _get_hook(self, event_type):
if not os.path.exists(self.HOOKS_CONFIG_PATH):
logger.warn("The hook consumer received `{0}` event but the "
"hooks config file doesn't exist".format(event_type))
return None
with open(self.HOOKS_CONFIG_PATH) as hooks_conf_file:
try:
hooks_yaml = yaml.safe_load(hooks_conf_file)
hooks_conf = hooks_yaml.get('hooks', {}) if hooks_yaml else {}
except yaml.YAMLError:
logger.error(
"The hook consumer received `{0}` event but the hook "
"config file is invalid yaml".format(event_type)
)
return None
for hook in hooks_conf:
if hook.get('event_type') == event_type:
return hook
logger.info("The hook consumer received `{0}` event but didn't find a "
"compatible hook in the configuration".format(event_type))
return None
def _get_task(self, full_task, hook):
hook_context, operation_context = self._get_contexts(
full_task,
hook['implementation']
)
task = {
'cloudify_task': {
'kwargs': {
'__cloudify_context': operation_context
},
'args': [hook_context]
}
}
kwargs = hook.get('inputs') or {}
task['cloudify_task']['kwargs'].update(kwargs)
return task
def _get_contexts(self, full_task, implementation):
hook_context = full_task['context']
tenant = hook_context.pop('tenant')
tenant_name = tenant.get('name')
hook_context['tenant_name'] = tenant.get('name')
hook_context['event_type'] = full_task['event_type']
hook_context['timestamp'] = full_task['timestamp']
hook_context['arguments'] = full_task['message']['arguments']
operation_context = dict(
type='hook',
tenant=tenant,
no_ctx_kwarg=True,
task_target=self.queue,
tenant_name=tenant_name,
rest_token=hook_context.get('rest_token'),
plugin=self._get_plugin(tenant_name, implementation)
)
if operation_context['plugin']:
split_task_name = implementation.split('.')[1:]
operation_context['task_name'] = '.'.join(split_task_name)
else:
operation_context['task_name'] = implementation
return hook_context, operation_context
def _get_plugin(self, tenant_name, implementation):
package_name = implementation.split('.')[0]
filter_plugin = {'package_name': package_name}
admin_api_token = get_admin_api_token()
rest_client = get_rest_client(tenant=tenant_name,
api_token=admin_api_token)
plugins = rest_client.plugins.list(**filter_plugin)
if not plugins:
return {}
plugins.sort(key=lambda p: StrictVersion(p.package_version),
reverse=True)
return {
'package_name': package_name,
'package_version': plugins[0]['package_version'],
'visibility': plugins[0]['visibility']
}
| python |
# Generated by Django 2.2.3 on 2019-07-30 13:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0002_auto_20190730_0034'),
('profiles', '0002_profile_follows'),
]
operations = [
migrations.AddField(
model_name='profile',
name='favorites',
field=models.ManyToManyField(related_name='favorited_by', to='events.Event'),
),
]
| python |
import numpy as np
import warnings
from time import sleep
from main import get_prediction
from example_data_base import save_historical_data, get_historical_data
prediction_rates = {}
def get_random_array(n):
return np.random.randint(0, 10, n).tolist()
def convert_to_db_format(predictions):
cars = predictions.get('cars')
features = predictions.get('features')
hist_array = []
for c in cars:
car_rate = c['rate']
if car_rate in prediction_rates:
prediction_rates[car_rate] += 1
else:
prediction_rates[car_rate] = 1
car_record = []
car_record.extend(features)
car_record.append(c['name'])
car_record.append(get_rate(c['doubleRate']))
hist_array.append(car_record)
return hist_array
def get_rate(predict_rate):
if predict_rate > 0.39:
return 5.0
elif predict_rate > 0.29:
return 4.0
elif predict_rate > 0.19:
return 3.0
elif predict_rate > 0.09:
return 2.0
else:
return 1.0
def generate_data(n):
for i in range(n):
engine = get_random_array(5)
car_body = get_random_array(4)
costs = get_random_array(3)
car_details = get_random_array(3)
equipment = get_random_array(3)
driving_features = get_random_array(4)
arguments = {"engine": engine,
"car_body": car_body,
"costs": costs,
"car_details": car_details,
"equipment": equipment,
"driving_features": driving_features}
predictions = get_prediction(arguments)
db_records = convert_to_db_format(predictions)
save_historical_data(db_records, python_call=True)
print("Finished for [%d/%d]" % (i + 1, n))
sleep(1)
if __name__ == "__main__":
warnings.filterwarnings("ignore")
generate_data(150)
history = get_historical_data(python_call=True)
print(len(history))
for p in prediction_rates:
print(p, prediction_rates[p])
| python |
# Ivan Carvalho
# Solution to https://www.urionlinejudge.com.br/judge/problems/view/2057
#!/usr/bin/env python2.7
# encoding : utf-8
numero = sum([int(i) for i in raw_input().split(" ")])
if numero < 0:
print numero + 24
elif numero < 24:
print numero
else:
print numero-24
| python |
"""Centralized setup of logging for the service."""
import logging.config
import sys
from os import path
def setup_logging(conf):
"""Create the services logger."""
if conf and path.isfile(conf):
logging.config.fileConfig(conf)
print("Configure logging, from conf:{}".format(conf), file=sys.stdout)
return logging.getLogger(__name__)
else:
print(
"Unable to configure logging, attempted conf:{}".format(conf),
file=sys.stderr,
)
def log_error(msg):
"""Log error."""
logging.error(msg)
def log_bpm_error(msg):
"""Log error."""
logging.error(msg)
logging.error(
"The connection with Python and Camunda API is not proper. Ensure you have passed env variables properly and have set listener in Keycloak(camunda-rest-api)"
)
def log_info(msg):
"""Log info."""
logging.info(msg)
| python |
import setuptools
setuptools.setup(
name='pytorch-nce2',
version='0.0.1',
author='Kaiyu Shi',
author_email='[email protected]',
description='An NCE implementation in pytorch',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
url='https://github.com/Stonesjtu/Pytorch-NCE',
packages=['nce'],
classifiers=[
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| python |
import os
import errno
import librosa
import librosa.display
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
DATA_PATH = 'raw_data'
SAMPLE_RATE = 16000
DURATION = 2.5
OFFSET = 0.5
HOP_LENGTH = 512
# MFCC -> (n_mfcc, t)
# t = sample_rate * time / hop_length
MAX_LENGTH = int((SAMPLE_RATE * DURATION // HOP_LENGTH) + 1)
def preprocess_data():
dir_lists = os.listdir(DATA_PATH)
mfcc_vectors = []
labels = []
for dir_list in dir_lists:
if dir_list == '.DS_Store':
continue
file_path = os.path.join(DATA_PATH, dir_list)
files = os.listdir(file_path)
print("==================== {} ====================".format(dir_list))
for file in files:
if file == '.DS_Store':
continue
label = get_label(file.strip('.wav'))
mfcc = wav2mfcc(os.path.join(file_path, file), duration=DURATION, offset=OFFSET)
print(file, mfcc.shape, label.shape)
mfcc_vectors.append(mfcc)
labels.append(label)
mfcc_vectors = np.array(mfcc_vectors)
labels = np.array(labels)
np.savez('train_data.npz', x_train=mfcc_vectors, y_train=labels)
print(mfcc_vectors.shape, labels.shape)
def get_label(file_name):
''' Filename identifiers
Modality (01 = full-AV, 02 = video-only, 03 = audio-only).
Vocal channel (01 = speech, 02 = song).
Emotion (01 = neutral, 02 = calm, 03 = happy, 04 = sad, 05 = angry, 06 = fearful, 07 = disgust, 08 = surprised).
Emotional intensity (01 = normal, 02 = strong). NOTE: There is no strong intensity for the ‘neutral’ emotion.
Statement (01 = “Kids are talking by the door”, 02 = “Dogs are sitting by the door”).
Repetition (01 = 1st repetition, 02 = 2nd repetition).
Actor (01 to 24. Odd numbered actors are male, even numbered actors are female).
'''
file_name = file_name.split('-')
label = []
if int(file_name[6])%2 != 0: # male
if file_name[2] == '01':
label.append(0)
elif file_name[2] == '02':
label.append(1)
elif file_name[2] == '03':
label.append(2)
elif file_name[2] == '04':
label.append(3)
elif file_name[2] == '05':
label.append(4)
elif file_name[2] == '06':
label.append(5)
elif file_name[2] == '07':
label.append(6)
elif file_name[2] == '08':
label.append(7)
else: # female
if file_name[2] == '01':
label.append(8)
elif file_name[2] == '02':
label.append(9)
elif file_name[2] == '03':
label.append(10)
elif file_name[2] == '04':
label.append(11)
elif file_name[2] == '05':
label.append(12)
elif file_name[2] == '06':
label.append(13)
elif file_name[2] == '07':
label.append(14)
elif file_name[2] == '08':
label.append(15)
label = np.array(label)
return label
def wav2mfcc(file_path, sr=None, offset=0.0, duration=None, n_mfcc=13, max_length=MAX_LENGTH):
data, sr = librosa.load(file_path, mono=True, sr=sr, offset=offset, duration=duration)
data = data[::3]
mfcc = librosa.feature.mfcc(data, sr=16000, n_mfcc=n_mfcc)
if (max_length > mfcc.shape[1]):
#print(max_length, mfcc.shape[1])
pad_width = max_length - mfcc.shape[1]
mfcc = np.pad(mfcc, pad_width=((0, 0), (0, pad_width)), mode='constant')
else:
mfcc = mfcc[:, :max_length]
'''
# plot
plt.figure()
plt.subplot(2,1,1)
librosa.display.waveplot(data, sr=sr)
plt.subplot(2,1,2)
librosa.display.specshow(mfcc, x_axis='time')
#plt.colorbar()
plt.title('MFCC')
plt.tight_layout()
plt.show()
'''
return mfcc
def load_dataset(split_ratio=0.8, random_state=42):
data = np.load('train_data.npz')
x_train, y_train = data['x_train'], data['y_train']
data.close()
#y_train = np_utils.to_categorical(y_train, 16)
return train_test_split(x_train, y_train, test_size= (1 - split_ratio), random_state=random_state, shuffle=True)
def save_model(model, model_name):
file_path = 'model/{}.h5'.format(model_name)
if not os.path.exists(os.path.dirname(file_path)):
try:
os.makedirs(os.path.dirname(file_path))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
model.save(file_path)
def plot_loss(history, file_name):
file_path = 'images/{}.png'.format(file_name)
if not os.path.exists(os.path.dirname(file_path)):
try:
os.makedirs(os.path.dirname(file_path))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
plt.figure()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model train vs validation loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper right')
plt.savefig(file_path)
plt.show()
def plot_accuracy(history, file_name):
file_path = 'images/{}.png'.format(file_name)
if not os.path.exists(os.path.dirname(file_path)):
try:
os.makedirs(os.path.dirname(file_path))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
plt.figure()
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model train vs validation accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.savefig(file_path)
plt.show()
if __name__ == "__main__":
preprocess_data()
#file_path = 'raw_data/Actor_08/03-01-08-01-02-01-08.wav'
#file_name = '03-01-08-01-02-01-08'
#mfcc = wav2mfcc(file_path, sr=None, offset=0.5, duration=2.5, n_mfcc=13)
| python |
#
# Copyright (c) 2013-2018 Quarkslab.
# This file is part of IRMA project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the top-level directory
# of this distribution and at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# No part of the project, including this file, may be copied,
# modified, propagated, or distributed except according to the
# terms contained in the LICENSE file.
import asynctest
from pathlib import Path
from irmacl_async.apiclient import AAPI
SAMPLES_DIR = Path(__file__).parent / "samples"
ZIP_SAMPLE = "zipbomb.zip"
class TestZipBomb(asynctest.TestCase):
async def test_zipbomb(self):
async with AAPI() as api:
probelist = await api.probes.list()
probe = 'Unarchive'
if probe not in probelist:
raise asynctest.SkipTest(
"Skipping {} not present".format(probe))
sample = SAMPLES_DIR / ZIP_SAMPLE
scan = api.scans.scan(
[sample], linger=True, probes=[probe], force=True)
self.assertEqual(len(scan.results), 1)
self.assertEqual(scan.probes_finished, 1)
result = await api.scans.result(scan.results[0])
self.assertEqual(len(result.probe_results), 1)
probe_result = result.probe_results[0]
self.assertEqual(probe_result.status, -1)
self.assertIsNotNone(probe_result.error)
self.assertNone(probe_result.results)
if __name__ == "__main__":
asynctest.main()
| python |
from django.db import models
class Category(models.Model):
name = models.CharField(max_length=128, unique=True)
def __str__(self):
return self.name
class Page(models.Model):
category = models.ForeignKey(Category, on_delete=models.CASCADE)
title = models.CharField(max_length=128)
url = models.URLField()
views = models.IntegerField(default=0)
def __str__(self):
return self.title
| python |
#!/usr/bin/env python
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# mock, just outputs empty .h/.cpp files
import os
import sys
if len(sys.argv) == 2:
basename, ext = os.path.splitext(sys.argv[1])
with open('%s.h' % basename, 'w') as f:
f.write('// %s.h\n' % basename)
with open('%s.cpp' % basename, 'w') as f:
f.write('// %s.cpp\n' % basename)
| python |
from .unigram import UniGramModel
| python |
import os
import pandas as pd
jaea_fns_175 = pd.read_csv(os.path.join(__path__[0], "JAEA_FNS_175.csv")).set_index("E")
| python |
import torch
import torch.nn as nn
from utils import split_data,read_json_file, get_text
from dataset import my_dataset,my_collate_fn
from model import my_model,weights_init
from engine import train_fn,eval_fn
import cv2
from sklearn import model_selection
import pandas as pd
vocab="- !#$%&'()*+,./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`lr{|}~\""
num_cha=len(vocab)
print(num_cha)
data=read_json_file(path='../data/For_task_2/data.json')
img_paths=list(data.keys())
txt_paths=list(data.values())
batch_size=32
X_train, X_val, y_train, y_val = model_selection.train_test_split(img_paths, txt_paths, test_size=0.2, random_state=1)
train_dataset = my_dataset(X_train,y_train,vocab)
val_dataset = my_dataset(X_val,y_val,vocab)
#test_dataset = my_dataset(X_test,y_test)
print(len(train_dataset))
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size, shuffle=True, collate_fn=my_collate_fn,)
val_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size, shuffle=False, collate_fn=my_collate_fn,)
#test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size, shuffle=False,collate_fn=my_collate_fn,)
model=my_model(num_cha)
model.apply(weights_init)
NUM_EPOCHS=50
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print("Using ",device)
model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=3e-4)
MODEL_SAVE_PATH = './weights/my_model.pth'
# model.load_state_dict(torch.load(MODEL_SAVE_PATH))
def train(model,MODEL_SAVE_PATH ,NUM_EPOCHS,optimizer):
best_val_loss=999
print("Training...")
log=[]
for epoch in range(1,NUM_EPOCHS+1):
train_loss = train_fn(model, train_dataloader, optimizer,device)
val_loss = eval_fn(model, val_dataloader,device)
log_epoch = {"epoch": epoch, "train_loss": train_loss, "val_loss": val_loss}
log.append(log_epoch)
df = pd.DataFrame(log)
df.to_csv("./weights/logs2.csv")
if val_loss < best_val_loss:
best_val_loss = val_loss
torch.save(model.state_dict(),MODEL_SAVE_PATH)
print("Epoch {} || epoch_train_loss: {:.4f} || Epoch_val_loss: {:.4f} ".format(epoch + 1,train_loss, val_loss))
train(model,MODEL_SAVE_PATH ,NUM_EPOCHS,optimizer) | python |
# creating a tupples
#empty tupple
s1=()
print('s1 : ',s1)
#tupple with multiple elements and accessing it
s2=(2782,'thakur',99)
print('s2 : ',s2)
#another way to create tupples and access them
S3=(82,85,96,56,70,99)
print('S3 : ',S3)
s4=74,'sandeep',90
print('s4 : ',s4)
s3=(82)
print('s3=(82): ',s3)
#creating new tupple and including previous tupple values in it
s5=s1,(27,'thakur')
print('s5=s1,(27,\'thakur\') : ',s5)
#1 slicing
print(S3[0])
print('s3[0] : ',S3[0])
print('s3[::-1] : ',S3[::-1])
print('s3[0:2] : ',S3[0:2])
#2 add i.e concatination
print('s3+s2 : ',S3+s2)
#3 replication
print('s3*5 : ',S3*5)
#some functions of tupples
print('min(s3) : ',min(S3))
print('max(s3) : ',max(S3))
print('len(s3) : ',len(S3))
| python |
P = 10
objects = [(5, 18),(2, 9), (4, 12), (6,25)]
print("Items available: ",objects)
print("***********************************")
objects = filter(lambda x: x[0]<=P, objects)
objects = sorted(objects, key=lambda x: x[1]/x[0], reverse=True)
weight, value, subset = 0, 0, []
print("Items filtered and sorted: ",objects)
print("***********************************")
for item in objects:
if weight + item[0] <= P:
weight = weight + item[0]
value = value + item[1]
subset.append(item)
print("Subset selected: ",subset)
print("Total value: " ,value)
print("Total weight: ",weight)
| python |
from setuptools import setup, find_packages
setup(
name="JsonDataManager",
license="MIT",
version="1.0",
author="PieSignal",
author_email="[email protected]",
url="https://github.com/PieSignal/JsonDataManager",
requires=["typing >= 3.7.4.1, <4"],
packages=find_packages(),
)
| python |
import json
import re
import sys
from math import sin, cos, sqrt, atan2, radians
def main():
LAT_ORIGIN = radians(39.103119) # YOUR LOCATION LATITUDE IN ( )
LON_ORIGIN = radians(-84.512016) # YOUR LOCATION LONGITUDE IN ( )
radius_of_earth = 6378.0
results = []
with open("list.txt") as airports:
with open('airports.json') as json_file:
data = json.load(json_file)
for line in airports:
if line.strip():
regex = r"\((.*)\)"
matches = re.search(regex, line)
if matches:
DEST = "K" + matches.group(1)
#for airport in data:
airport = data[DEST]
#if DEST == airport:
lat2 = radians(airport["lat"])
lon2 = radians(airport["lon"])
dlon = lon2 - LON_ORIGIN
dlat = lat2 - LAT_ORIGIN
a = sin(dlat / 2)**2 + cos(LAT_ORIGIN) * \
cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
if (len(sys.argv) > 1):
if (sys.argv[1] == "-km"):
distance = radius_of_earth * c
else:
distance = radius_of_earth * c * .621371
else:
distance = radius_of_earth * c * .621371
result = {
"name": airport["name"],
"distance": distance
}
results.append(result)
results = [dict(t) for t in {tuple(d.items()) for d in results}]
results = sorted(results, key=lambda k: k['distance'])
for result in results:
print(result)
if __name__ == "__main__":
import time
start = time.time()
main()
end = time.time()
print(end-start)
| python |
import vcf
import argparse
from record import Record, PhaseSet, ChromosomoHaplotype
from stats import PhaseSetStats, HapStats
def get_phase_set_stats(template_phase_set:PhaseSet, phase_set:PhaseSet):
prev_record: Record
record: Record
t_record: Record
t_prev_record: Record
record_count = 0
switch_error_count = 0
mismatch_error_count = 0
total_record = len(phase_set.records_idx)
prev_switch_error = False
last_record_pos = 0
last_record_idx = 0
first_record_idx = 0
for record_pos in phase_set.records.keys():
record = phase_set.records[record_pos]
record_count += 1
t_record = template_phase_set.records[record_pos]
if record_count == total_record:
last_record_idx = record.idx
last_record_pos = record.pos
if record_count == 1:
prev_record = record
first_record_idx = record.idx
t_prev_record = t_record
else:
switched = record.switched(prev_record)
t_switched = t_record.switched(t_prev_record)
if switched != t_switched: # switch error
if record_count > 2 and record_count < total_record:
switch_error_count += 1
if prev_switch_error: # mismatch error
mismatch_error_count += 1
switch_error_count -= 2
prev_switch_error = False
else:
prev_switch_error = True
else: #no switch error for ajunct pos, reset
prev_switch_error = False
prev_record = record
t_prev_record = t_record
S50 = total_record
N50 = last_record_pos - phase_set.starting_pos
spaned_record = last_record_idx - first_record_idx + 1
AN50 = N50/spaned_record * S50
return AN50, S50, N50, switch_error_count, mismatch_error_count, spaned_record
def get_haplotype_stats_chromo(template_chromo:ChromosomoHaplotype, in_chromo:ChromosomoHaplotype, out, contig):
template_phase_set:PhaseSet
phase_set : PhaseSet
template_phase_set = list(template_chromo.chromo_phase_set.values() )[0]
chromo_snp_count = len(template_phase_set.records_idx)
chromo_span = max(template_phase_set.records_idx) - min(template_phase_set.records_idx)
hap_stats = HapStats(chromo_snp_count, chromo_span)
index = 0
for phase_set in in_chromo.chromo_phase_set.values():
AN50, S50, N50, switch_error_count, mismatch_error_count, spanned_snp = get_phase_set_stats(template_phase_set, phase_set)
phase_set_stats = PhaseSetStats(switch_error_count, mismatch_error_count, S50, N50, AN50, spanned_snp)
if S50 < 2:
continue
hap_stats.insert_phase_set_stats(0, phase_set_stats)
index += 1
out.write("%s\t%d\t%d\t%d\t%d\t%.8f\t%.8f\n" % (contig, phase_set_stats.get_AN50(), phase_set_stats.get_N50(), phase_set_stats.get_phased_snp(), spanned_snp, phase_set_stats.get_switch_error(), phase_set_stats.get_mismatch_error()))
out.write("%s\t%d\t%d\t%d\t%d\t%.8f\t%.8f\n" % (contig + "_total", hap_stats.get_AN50(), hap_stats.get_N50(), hap_stats.get_total_phased(), hap_stats.get_total_spanned(), hap_stats.get_switch_error(), hap_stats.get_mismatch_error()))
return hap_stats
def get_haplotype_stats(template_vcf:vcf.Reader, in_vcf:vcf.Reader, out):
contigs = in_vcf.contigs.keys()
hap_stats = HapStats()
for contig in contigs:
try:
template_vcf.fetch(contig)
template_chromo = ChromosomoHaplotype(template_vcf, contig)
in_chromo = ChromosomoHaplotype(in_vcf, contig)
chromo_hap_stats = get_haplotype_stats_chromo(template_chromo, in_chromo, out, contig)
hap_stats.insert_hap_stats(chromo_hap_stats)
except:
continue
out.write("%s\t%d\t%d\t%d\t%d\t%.8f\t%.8f\n" % ("total", hap_stats.get_AN50(), hap_stats.get_N50(), hap_stats.get_total_phased(), hap_stats.get_total_spanned(),hap_stats.get_switch_error(), hap_stats.get_mismatch_error()))
def main():
parser = argparse.ArgumentParser('phaseset_to_vcf.py')
parser.add_argument('-t', '--template', help='template vcf, indexed', required=True)
parser.add_argument('-v', '--vcf', help='input vcf, indexed', required=True)
parser.add_argument('-o', '--out', help='output stats', required=True)
options = parser.parse_args()
in_vcf = vcf.Reader(filename=options.vcf)
template_vcf = vcf.Reader(filename=options.template)
outf = open(options.out, 'w')
outf.write("Chromosome\tAN50\tN50\tphased_snp\ttotal_snp\tswitch_error_rate\tmismatch_error_rate\n")
get_haplotype_stats(template_vcf, in_vcf, outf)
outf.close()
return
if __name__ == '__main__':
main()
| python |
# File that prepares the transcripts into CSV for insertion into the database
# Created by Thomas Orth
import pandas as pd
import sys
# CHANGE THESE VALUES DEPENDING ON THE TRANSCRIPT
name = "Charles Terry"
summary = "Charles Terry is interviewed about his life in old trenton and other aspects such as working for the Board of Education."
audio_path = "https://archive.org/download/CharlesTerryInterview415115/Charles%20Terry%20Interview%204%EF%80%A215%EF%80%A215.MP3"
text_path = "charles.pdf"
title = "Charles Terry Interview Transcription"
content = ""
# Read raw transcript data
with open(sys.argv[1]) as f:
content = ''.join(f.readlines())
# Prepare the transcript csv
x = pd.DataFrame(columns=['title', 'text_file_path', 'audio_file_path', 'summary', 'text_content'], data=[[title, text_path, audio_path, summary, content.replace('"', '')]])
x.to_csv("insert_data_transcript.csv", sep="|", index=False)
# Prepare the participants csv
participants = [[name]]
p = pd.DataFrame(columns=['name'], data=participants)
p.to_csv("insert_data_participants.csv", sep="|", index=False)
# Prepare the locations CSV
locations = [["Mercer Street"]]
l = pd.DataFrame(columns=['street_name'], data=locations)
l.to_csv("insert_data_locations.csv", sep="|", index=False)
# Prepare the keywords CSV
keywords = [["charles"], ["neighborhood"]]
k = pd.DataFrame(columns=['keyword'], data=keywords)
k.to_csv('insert_data_keywords.csv', sep="|", index=False)
| python |
from __future__ import print_function
import numpy as np
from collections import defaultdict
import matplotlib.pyplot as plt
import matplotlib.patches as patches
class PQTNode:
"""PQT Node class"""
def __init__(self, bounds=[[0., 1.], [0., 1.]]):
self.children = []
self.bounds = bounds
self.content = defaultdict(list)
self.p = 0.
def __str__(self):
return "[{:.3},{:.3}]x[{:.3},{:.3}] ".format(self.bounds[0][0],
self.bounds[0][1],
self.bounds[1][0],
self.bounds[1][1]) \
+ "{} chldrn {:.3} prb".format(len(self.children), self.p)
def __repr__(self):
return "PQTNode({}, {})".format(self.bounds[0], self.bounds[1])
def split(self):
""" Adds children to the current node """
x0, x1 = self.bounds[0]
y0, y1 = self.bounds[1]
xc, yc = 0.5*(x0+x1), 0.5*(y0+y1)
# Add subcoordinates
self.children = [
PQTNode([[x0,xc],[y0,yc]]),
PQTNode([[xc,x1],[y0,yc]]),
PQTNode([[xc,x1],[yc,y1]]),
PQTNode([[x0,xc],[yc,y1]])
]
return self.children
def encloses(self, coord):
""" Checks if point passed is bounded
Parameters:
coord - tuple of point
Returns:
whether or not enclosing
"""
x0, x1 = self.bounds[0]
y0, y1 = self.bounds[1]
return x0 <= coord[0] < x1 \
and y0 <= coord[1] < y1
def draw(self, ax, show_prob=False, p_hat=0.01):
""" Draws a rectangle corresponding to the cell"""
x0, x1 = self.bounds[0]
y0, y1 = self.bounds[1]
ax.add_patch(patches.Rectangle((x0,y0), x1-x0, y1-y0,
fill=None, linewidth=0.5))
if show_prob:
ax.add_patch(patches.Rectangle((x0,y0), x1-x0, y1-y0,
linewidth=0.5, alpha=self.p/p_hat, facecolor="red"))
def center(self):
return [0.5*sum(self.bounds[0]), 0.5*sum(self.bounds[1])]
class PQTDecomposition:
"""PQT Decomposition data structure class"""
def __init__(self):
self.root = PQTNode()
self.leaves = []
def from_points(self, points=[], p_hat=0.01):
""" Initialize from points
Parameters:
points - list of sample point tuples,
p_hat - maximum probability of a leaf,
"""
n_pts = float(len(points))
# Check that atoms do not have probability higher than p_hat, if they
# are then we set p_hat to the probability of an atom.
atom_p = 1./n_pts
self.p_hat = atom_p if (atom_p > p_hat) else p_hat
def gen_pqt(node, pts):
node.p = len(pts)/n_pts
# The first condition is the subpartitioning rule for a pqt.
if node.p >= p_hat and len(pts) > 1:
# Add children to the current node
node.split()
# For each new node, generate from all points that fall inside
# the cell
for child in node.children:
gen_pqt(child, [pt for pt in pts if child.encloses(pt)])
else:
# Otherwise the node is a leaf, so add it
self.leaves.append(node)
# Start recursion through the root node
gen_pqt(self.root, points)
return self
def from_pdf(self, pdf, p_hat=0.01):
""" Initialize from pdf
Parameters:
pdf - function f(x,y) with compact support contained in
the bounding square
p_hat - maximum probability of a leaf
"""
from scipy.integrate import nquad
self.p_hat = p_hat
def gen_pqt(node):
# Compute the probability over the cell
node.p,_ = nquad(pdf, node.bounds)
# If the probability is too high then split the cell and generate
# sub-trees
if node.p >= p_hat:
node.split()
for child in node.children:
gen_pqt(child)
else:
# Otherwise the node is a leaf
self.leaves.append(node)
gen_pqt(self.root)
return self
def __ref__(self):
return "PQTDecomposition()"
def __str__(self):
print_str = ""
# Store node, depth data on stack. Work through tree depth first
node_stack = [(self.root, 0)]
# If there are things on the stack
while node_stack:
node, depth = node_stack.pop()
i = None
for i in xrange(depth):
print_str += " "
else:
if i is not None:
print_str += "- "
print_str += str(node) + "\n"
# If the node has children then process them next on the stack
for child in node.children:
node_stack.append((child,depth+1))
return print_str
def enclosing_leaf(self, coords):
def _get_leaf(node):
# Check all children (if any)
for child in node.children:
# Search down branch if contains coord
if child.encloses(coords):
return _get_leaf(child)
return node
# Check if the point is enclosed by the pqt
if self.root.encloses(coords):
return _get_leaf(self.root)
return None
def add_point(self, coord, attr='pts'):
leaf = self.enclosing_leaf(coord)
if not leaf:
return False
leaf.content[attr].append(coord)
return True
def add_points(self, coords, attr='pts'):
all_suc = True
for coord in coords:
all_suc &= self.add_point(coord, attr=attr)
def draw(self, show_prob=False):
""" Draws the pqt using matplotlib
Parameters:
show_prob - whether or not probability should be displayed
as a shade
"""
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
for leaf in self.leaves:
leaf.draw(ax, show_prob=show_prob, p_hat=self.p_hat)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.plot()
plt.show()
if __name__ == "__main__":
from random import random
#n_pts = 1000
#pts = [(random(),random()) for i in xrange(n_pts)]
#decomp = PQTDecomposition().from_points(pts, p_hat=0.001, store=True)
def pdf(x, y):
return 3./4. * (2 - x**2 - y**2)
decomp = PQTDecomposition().from_pdf(pdf, p_hat=0.001)
empt_leaf = decomp.enclosing_leaf([0.9,0.9])
decomp.draw(show_prob=True)
| python |
''' '''
'''
ISC License
Copyright (c) 2016, Autonomous Vehicle Systems Lab, University of Colorado at Boulder
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
#
# Basilisk Scenario Script and Integrated Test
#
# Purpose: Integrated test of the spacecraftPlus(), extForceTorque, simpleNav(),
# MRP_Feedback() with attitude navigation modules. Illustrates how
# attitude guidance behavior can be changed in a very modular manner.
# Author: Hanspeter Schaub
# Creation Date: Dec. 2, 2016
#
import pytest
import os
import numpy as np
# import general simulation support files
from Basilisk.utilities import SimulationBaseClass
from Basilisk.utilities import unitTestSupport # general support file with common unit test functions
import matplotlib.pyplot as plt
from Basilisk.utilities import macros
from Basilisk.utilities import orbitalMotion
from Basilisk.utilities import RigidBodyKinematics
# import simulation related support
from Basilisk.simulation import spacecraftPlus
from Basilisk.simulation import extForceTorque
from Basilisk.utilities import simIncludeGravBody
from Basilisk.simulation import simple_nav
# import FSW Algorithm related support
from Basilisk.fswAlgorithms import MRP_Feedback
from Basilisk.fswAlgorithms import hillPoint
from Basilisk.fswAlgorithms import attTrackingError
# import message declarations
from Basilisk.fswAlgorithms import fswMessages
# Plotting functions
def plot_attitude_error(timeLineSet, dataSigmaBR):
plt.figure(1)
fig = plt.gcf()
ax = fig.gca()
vectorData = unitTestSupport.pullVectorSetFromData(dataSigmaBR)
sNorm = np.array([np.linalg.norm(v) for v in vectorData])
plt.plot(timeLineSet, sNorm,
color=unitTestSupport.getLineColor(1, 3),
)
plt.xlabel('Time [min]')
plt.ylabel('Attitude Error Norm $|\sigma_{B/R}|$')
ax.set_yscale('log')
def plot_control_torque(timeLineSet, dataLr):
plt.figure(2)
for idx in range(1, 4):
plt.plot(timeLineSet, dataLr[:, idx],
color=unitTestSupport.getLineColor(idx, 3),
label='$L_{r,' + str(idx) + '}$')
plt.legend(loc='lower right')
plt.xlabel('Time [min]')
plt.ylabel('Control Torque $L_r$ [Nm]')
def plot_rate_error(timeLineSet, dataOmegaBR):
plt.figure(3)
for idx in range(1, 4):
plt.plot(timeLineSet, dataOmegaBR[:, idx],
color=unitTestSupport.getLineColor(idx, 3),
label='$\omega_{BR,' + str(idx) + '}$')
plt.legend(loc='lower right')
plt.xlabel('Time [min]')
plt.ylabel('Rate Tracking Error [rad/s] ')
return
def plot_orientation(timeLineSet, dataPos, dataVel, dataSigmaBN):
vectorPosData = unitTestSupport.pullVectorSetFromData(dataPos)
vectorVelData = unitTestSupport.pullVectorSetFromData(dataVel)
vectorMRPData = unitTestSupport.pullVectorSetFromData(dataSigmaBN)
data = np.empty([len(vectorPosData), 3])
for idx in range(0, len(vectorPosData)):
ir = vectorPosData[idx] / np.linalg.norm(vectorPosData[idx])
hv = np.cross(vectorPosData[idx], vectorVelData[idx])
ih = hv / np.linalg.norm(hv)
itheta = np.cross(ih, ir)
dcmBN = RigidBodyKinematics.MRP2C(vectorMRPData[idx])
data[idx] = [np.dot(ir, dcmBN[0]), np.dot(itheta, dcmBN[1]), np.dot(ih, dcmBN[2])]
plt.figure(4)
labelStrings = (r'$\hat\imath_r\cdot \hat b_1$'
, r'${\hat\imath}_{\theta}\cdot \hat b_2$'
, r'$\hat\imath_h\cdot \hat b_3$')
for idx in range(0, 3):
plt.plot(timeLineSet, data[:, idx],
color=unitTestSupport.getLineColor(idx + 1, 3),
label=labelStrings[idx])
plt.legend(loc='lower right')
plt.xlabel('Time [min]')
plt.ylabel('Orientation Illustration')
## \defgroup Tutorials_2_1
## @{
## How to use guidance modules to align the spacecraft frame to the orbit or Hill frame.
#
# Attitude Alignment with Hill Orbit Frame {#scenarioAttitudeGuidance}
# ====
#
# Scenario Description
# -----
# This script sets up a 6-DOF spacecraft which is orbiting the Earth. The scenario is
# setup to be run in two different setups:
# Setup | useAltBodyFrame
# ----- | -------------------
# 1 | False
# 2 | True
#
# To run the default scenario 1., call the python script through
#
# python scenarioAttitudeGuidance.py
#
# The simulation layout is shown in the following illustration. A single simulation process is created
# which contains both the spacecraft simulation modules, as well as the Flight Software (FSW) algorithm
# modules.
# 
#
# When the simulation completes 4 plots are shown for the MRP attitude history, the rate
# tracking errors, the control torque vector, as well as the projection of the body-frame B
# axes \f$\hat b_1\f$, b2 and b3 onto the respect Hill or Orbit frame axes \f$\hat\imath_r\f$,
# \f$\hat\imath_{\theta}\f$ and \f$\hat\imath_h\f$. This latter plot illustrates how the body
# is being aligned with respect to this Hill frame.
#
# The basic simulation setup is the same as the one used in
# [scenarioAttitudeFeedback.py](@ref scenarioAttitudeFeedback).
# The dynamics simulation is setup using a SpacecraftPlus() module to which a gravity
# effector is attached. Note that both the rotational and translational degrees of
# freedom of the spacecraft hub are turned on here to get a 6-DOF simulation. For more
# information on how to setup orbit, see [scenarioBasicOrbit.py](@ref scenarioBasicOrbit)
#
# However, instead of doing an inertial pointing maneuver, here the hillFrame() attitude guidance module
# is used:
# ~~~~~~~~~~~~~{.py}
# attGuidanceConfig = hillPoint.hillPointConfig()
# attGuidanceWrap = scSim.setModelDataWrap(attGuidanceConfig)
# attGuidanceWrap.ModelTag = "hillPoint"
# attGuidanceConfig.inputNavDataName = sNavObject.outputTransName
# attGuidanceConfig.inputCelMessName = earth.bodyInMsgName
# attGuidanceConfig.outputDataName = "guidanceOut"
# scSim.AddModelToTask(simTaskName, attGuidanceWrap, attGuidanceConfig)
# ~~~~~~~~~~~~~
#
# In contrast to the simple inertial pointing guidance module, this module also requires the
# spacecraft's position and velocity information. The planet ephemeris message relative to which the Hill pointing
# is being achieved by setting the `inputCelMessName` message.
# This is useful, for example, if orbiting the sun, and wanting to point the spacecraft back at the
# Earth which is also orbiting the sun. In this scenario, however, the spacecraft is to point at the
# Earth while already orbiting the Earth. Thus, this planet ephemeris input message is not set, which
# in return zeros the planets position and velocity vector states in the guidance module.
#
#
# Setup 1
# -----
#
# Which scenario is run is controlled at the bottom of the file in the code
# ~~~~~~~~~~~~~{.py}
# if __name__ == "__main__":
# run(
# True, # show_plots
# False # useAltBodyFrame
# )
# ~~~~~~~~~~~~~
# The first 2 arguments can be left as is. The remaining argument(s) control the
# simulation scenario flags to turn on or off certain simulation conditions. The
# default scenario shown has the `useAltBodyFrame` flag turned off. This means that we seek
# to align the body frame *B* with the Hill reference frame *R*. The
# resulting attitude and control torque histories are shown below. Note that the projections
# of the body frame axes onto the Hill frame axes all converge to +1, indidcating that B becomes
# asympotically aligned with R as desired.
# 
# 
# 
#
#
# Setup 2
# -----
#
# To run the second scenario, change the main routine at the bottom of the file to read:
# ~~~~~~~~~~~~~{.py}
# if __name__ == "__main__":
# run(
# True, # show_plots
# True # useAltBodyFrame
# )
# ~~~~~~~~~~~~~
# Here the control should not align the principal body frame *B* with *R*, but rather an alternate,
# corrected body frame *Bc*. For example, consider the Earth observing sensors to be mounted pointing in the
# positive \f$\hat b_1\f$ direction. In scenario 1 this sensor platform is actually pointing away from
# the Earth. Thus, we define the corrected body frame orientation as a 180 deg rotation about
# \f$\hat b_2\f$. This flips the orientation of the final first and third body axis. This is achieved
# through:
# ~~~~~~~~~~~~~{.py}
# attErrorConfig.sigma_R0R = [0,1,0]
# ~~~~~~~~~~~~~
# The DCM \f$[R_0R]\f$ is the same as the body to corrected body DCM \f$[B_cB]\f$.
# The resulting attitude and control torque histories are shown below. Note that the projections
# of the 2nd body frame axis onto the 2nd Hill frame axes converges to +1, while the other
# projections converge to -1. This indicates that the desired asymptotic Earth observing attitude
# is achieved.
# 
# 
# 
#
## @}
def run(show_plots, useAltBodyFrame):
'''Call this routine directly to run the tutorial scenario.'''
# Create simulation variable names
simTaskName = "simTask"
simProcessName = "simProcess"
# Create a sim module as an empty container
scSim = SimulationBaseClass.SimBaseClass()
scSim.TotalSim.terminateSimulation()
# set the simulation time variable used later on
simulationTime = macros.min2nano(10.)
#
# create the simulation process
#
dynProcess = scSim.CreateNewProcess(simProcessName)
# create the dynamics task and specify the integration update time
simulationTimeStep = macros.sec2nano(0.1)
dynProcess.addTask(scSim.CreateNewTask(simTaskName, simulationTimeStep))
# if this scenario is to interface with the BSK Viz, uncomment the following lines
# unitTestSupport.enableVisualization(scSim, dynProcess, simProcessName, 'earth')
# The Viz only support 'earth', 'mars', or 'sun'
#
# setup the simulation tasks/objects
#
# initialize spacecraftPlus object and set properties
scObject = spacecraftPlus.SpacecraftPlus()
scObject.ModelTag = "spacecraftBody"
# define the simulation inertia
I = [900., 0., 0.,
0., 800., 0.,
0., 0., 600.]
scObject.hub.mHub = 750.0 # kg - spacecraft mass
scObject.hub.r_BcB_B = [[0.0], [0.0], [0.0]] # m - position vector of body-fixed point B relative to CM
scObject.hub.IHubPntBc_B = unitTestSupport.np2EigenMatrix3d(I)
# add spacecraftPlus object to the simulation process
scSim.AddModelToTask(simTaskName, scObject)
# clear prior gravitational body and SPICE setup definitions
gravFactory = simIncludeGravBody.gravBodyFactory()
# setup Earth Gravity Body
earth = gravFactory.createEarth()
earth.isCentralBody = True # ensure this is the central gravitational body
mu = earth.mu
# attach gravity model to spaceCraftPlus
scObject.gravField.gravBodies = spacecraftPlus.GravBodyVector(gravFactory.gravBodies.values())
#
# initialize Spacecraft States with initialization variables
#
# setup the orbit using classical orbit elements
oe = orbitalMotion.ClassicElements()
oe.a = 10000000.0 # meters
oe.e = 0.1
oe.i = 33.3 * macros.D2R
oe.Omega = 48.2 * macros.D2R
oe.omega = 347.8 * macros.D2R
oe.f = 85.3 * macros.D2R
rN, vN = orbitalMotion.elem2rv(mu, oe)
scObject.hub.r_CN_NInit = unitTestSupport.np2EigenVectorXd(rN) # m - r_CN_N
scObject.hub.v_CN_NInit = unitTestSupport.np2EigenVectorXd(vN) # m/s - v_CN_N
scObject.hub.sigma_BNInit = [[0.1], [0.2], [-0.3]] # sigma_BN_B
scObject.hub.omega_BN_BInit = [[0.001], [-0.01], [0.03]] # rad/s - omega_BN_B
# setup extForceTorque module
# the control torque is read in through the messaging system
extFTObject = extForceTorque.ExtForceTorque()
extFTObject.ModelTag = "externalDisturbance"
# use the input flag to determine which external torque should be applied
# Note that all variables are initialized to zero. Thus, not setting this
# vector would leave it's components all zero for the simulation.
scObject.addDynamicEffector(extFTObject)
scSim.AddModelToTask(simTaskName, extFTObject)
# add the simple Navigation sensor module. This sets the SC attitude, rate, position
# velocity navigation message
sNavObject = simple_nav.SimpleNav()
sNavObject.ModelTag = "SimpleNavigation"
scSim.AddModelToTask(simTaskName, sNavObject)
#
# setup the FSW algorithm tasks
#
# setup hillPoint guidance module
attGuidanceConfig = hillPoint.hillPointConfig()
attGuidanceWrap = scSim.setModelDataWrap(attGuidanceConfig)
attGuidanceWrap.ModelTag = "hillPoint"
attGuidanceConfig.inputNavDataName = sNavObject.outputTransName
attGuidanceConfig.inputCelMessName = earth.bodyInMsgName
attGuidanceConfig.outputDataName = "guidanceOut"
scSim.AddModelToTask(simTaskName, attGuidanceWrap, attGuidanceConfig)
# setup the attitude tracking error evaluation module
attErrorConfig = attTrackingError.attTrackingErrorConfig()
attErrorWrap = scSim.setModelDataWrap(attErrorConfig)
attErrorWrap.ModelTag = "attErrorInertial3D"
scSim.AddModelToTask(simTaskName, attErrorWrap, attErrorConfig)
attErrorConfig.outputDataName = "attErrorMsg"
if useAltBodyFrame:
attErrorConfig.sigma_R0R = [0, 1, 0]
attErrorConfig.inputRefName = attGuidanceConfig.outputDataName
attErrorConfig.inputNavName = sNavObject.outputAttName
# setup the MRP Feedback control module
mrpControlConfig = MRP_Feedback.MRP_FeedbackConfig()
mrpControlWrap = scSim.setModelDataWrap(mrpControlConfig)
mrpControlWrap.ModelTag = "MRP_Feedback"
scSim.AddModelToTask(simTaskName, mrpControlWrap, mrpControlConfig)
mrpControlConfig.inputGuidName = attErrorConfig.outputDataName
mrpControlConfig.vehConfigInMsgName = "vehicleConfigName"
mrpControlConfig.outputDataName = extFTObject.cmdTorqueInMsgName
mrpControlConfig.K = 3.5
mrpControlConfig.Ki = -1.0 # make value negative to turn off integral feedback
mrpControlConfig.P = 30.0
mrpControlConfig.integralLimit = 2. / mrpControlConfig.Ki * 0.1
mrpControlConfig.domega0 = [0.0, 0.0, 0.0]
#
# Setup data logging before the simulation is initialized
#
numDataPoints = 100
samplingTime = simulationTime / (numDataPoints - 1)
scSim.TotalSim.logThisMessage(mrpControlConfig.outputDataName, samplingTime)
scSim.TotalSim.logThisMessage(attErrorConfig.outputDataName, samplingTime)
scSim.TotalSim.logThisMessage(sNavObject.outputTransName, samplingTime)
scSim.TotalSim.logThisMessage(sNavObject.outputAttName, samplingTime)
#
# create simulation messages
#
# create the FSW vehicle configuration message
vehicleConfigOut = fswMessages.VehicleConfigFswMsg()
vehicleConfigOut.ISCPntB_B = I # use the same inertia in the FSW algorithm as in the simulation
unitTestSupport.setMessage(scSim.TotalSim,
simProcessName,
mrpControlConfig.vehConfigInMsgName,
vehicleConfigOut)
#
# initialize Simulation
#
scSim.InitializeSimulationAndDiscover()
#
# configure a simulation stop time time and execute the simulation run
#
scSim.ConfigureStopTime(simulationTime)
scSim.ExecuteSimulation()
#
# retrieve the logged data
#
dataLr = scSim.pullMessageLogData(mrpControlConfig.outputDataName + ".torqueRequestBody", range(3))
dataSigmaBR = scSim.pullMessageLogData(attErrorConfig.outputDataName + ".sigma_BR", range(3))
dataOmegaBR = scSim.pullMessageLogData(attErrorConfig.outputDataName + ".omega_BR_B", range(3))
dataPos = scSim.pullMessageLogData(sNavObject.outputTransName + ".r_BN_N", range(3))
dataVel = scSim.pullMessageLogData(sNavObject.outputTransName + ".v_BN_N", range(3))
dataSigmaBN = scSim.pullMessageLogData(sNavObject.outputAttName + ".sigma_BN", range(3))
np.set_printoptions(precision=16)
#
# plot the results
#
fileName = os.path.basename(os.path.splitext(__file__)[0])
timeLineSet = dataSigmaBR[:, 0] * macros.NANO2MIN
plt.close("all") # clears out plots from earlier test runs
plot_attitude_error(timeLineSet, dataSigmaBR)
figureList = {}
pltName = fileName + "1" + str(int(useAltBodyFrame))
figureList[pltName] = plt.figure(1)
plot_control_torque(timeLineSet, dataLr)
pltName = fileName + "2" + str(int(useAltBodyFrame))
figureList[pltName] = plt.figure(2)
plot_rate_error(timeLineSet, dataOmegaBR)
plot_orientation(timeLineSet, dataPos, dataVel, dataSigmaBN)
pltName = fileName + "4" + str(int(useAltBodyFrame))
figureList[pltName] = plt.figure(4)
if show_plots:
plt.show()
# close the plots being saved off to avoid over-writing old and new figures
plt.close("all")
return dataPos, dataSigmaBN, numDataPoints, figureList
#
# This statement below ensures that the unit test scrip can be run as a
# stand-along python script
#
if __name__ == "__main__":
run(
True, # show_plots
False # useAltBodyFrame
)
| python |
class Matrix(object):
def __init__(self, matrix_string):
self.__matrix = [[int(el) for el in line.split()]
for line in matrix_string.splitlines()]
def row(self, index):
return self.__matrix[index-1].copy()
def column(self, index):
return [el[index-1] for el in self.__matrix]
| python |
def texto(num):
cores = {'Vermelho': '\033[31;1m', 'Azul': '\033[1;34m', 'Limpa': '\033[m'}
print(f'{cores["Vermelho"]}ERRO! "{cores["Azul"]}{num}{cores["Vermelho"]}" não é um valor válido!{cores["Limpa"]}')
def leiadinheiro(msg):
while True:
resp = str(input(msg)).strip()
resp1 = resp.replace(' ', '')
resp1 = resp1.replace(',', '.')
if '.' in resp1:
cont = 0
val = True
for pos, info in enumerate(resp1+' '):
if cont > 1:
texto(resp)
break
if str(info).isalpha():
val = False
texto(resp)
break
if info in '.':
cont += 1
if info == ' ':
if len(resp1) == 1:
texto(resp)
break
if cont == 1 and len(resp1) != 1 and val:
break
elif resp1.isnumeric():
break
else:
texto(resp)
return float(resp1)
| python |
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Static data and helper functions."""
import math
import re
import sys
import time
import boto
from third_party.retry_decorator.decorators import retry
# We don't use the oauth2 authentication plugin directly; importing it here
# ensures that it's loaded and available by default. Note: we made this static
# state instead of Command instance state because the top-level gsutil code
# needs to check it.
HAVE_OAUTH2 = False
try:
from oauth2_plugin import oauth2_helper
HAVE_OAUTH2 = True
except ImportError:
pass
TWO_MB = 2 * 1024 * 1024
NO_MAX = sys.maxint
# Binary exponentiation strings.
_EXP_STRINGS = [
(0, 'B', 'bit'),
(10, 'KB', 'kbit'),
(20, 'MB', 'Mbit'),
(30, 'GB', 'Gbit'),
(40, 'TB', 'Tbit'),
(50, 'PB', 'Pbit'),
]
# Detect platform types.
IS_WINDOWS = 'win32' in str(sys.platform).lower()
IS_LINUX = 'linux' in str(sys.platform).lower()
IS_OSX = 'darwin' in str(sys.platform).lower()
Retry = retry
# Enum class for specifying listing style.
class ListingStyle(object):
SHORT = 'SHORT'
LONG = 'LONG'
LONG_LONG = 'LONG_LONG'
def HasConfiguredCredentials():
"""Determines if boto credential/config file exists."""
config = boto.config
has_goog_creds = (config.has_option('Credentials', 'gs_access_key_id') and
config.has_option('Credentials', 'gs_secret_access_key'))
has_amzn_creds = (config.has_option('Credentials', 'aws_access_key_id') and
config.has_option('Credentials', 'aws_secret_access_key'))
has_oauth_creds = (HAVE_OAUTH2 and
config.has_option('Credentials', 'gs_oauth2_refresh_token'))
has_auth_plugins = config.has_option('Plugin', 'plugin_directory')
return (has_goog_creds or has_amzn_creds or has_oauth_creds
or has_auth_plugins)
def _RoundToNearestExponent(num):
i = 0
while i+1 < len(_EXP_STRINGS) and num >= (2 ** _EXP_STRINGS[i+1][0]):
i += 1
return i, round(float(num) / 2 ** _EXP_STRINGS[i][0], 2)
def MakeHumanReadable(num):
"""Generates human readable string for a number of bytes.
Args:
num: The number, in bytes.
Returns:
A string form of the number using size abbreviations (KB, MB, etc.).
"""
i, rounded_val = _RoundToNearestExponent(num)
return '%s %s' % (rounded_val, _EXP_STRINGS[i][1])
def MakeBitsHumanReadable(num):
"""Generates human readable string for a number of bits.
Args:
num: The number, in bits.
Returns:
A string form of the number using bit size abbreviations (kbit, Mbit, etc.)
"""
i, rounded_val = _RoundToNearestExponent(num)
return '%s %s' % (rounded_val, _EXP_STRINGS[i][2])
def Percentile(values, percent, key=lambda x:x):
"""Find the percentile of a list of values.
Taken from: http://code.activestate.com/recipes/511478/
Args:
values: a list of numeric values. Note that the values MUST BE already
sorted.
percent: a float value from 0.0 to 1.0.
key: optional key function to compute value from each element of the list
of values.
Returns:
The percentile of the values.
"""
if not values:
return None
k = (len(values) - 1) * percent
f = math.floor(k)
c = math.ceil(k)
if f == c:
return key(values[int(k)])
d0 = key(values[int(f)]) * (c-k)
d1 = key(values[int(c)]) * (k-f)
return d0 + d1
def ExtractErrorDetail(e):
"""Extract <Details> text from XML content.
Args:
e: The GSResponseError that includes XML to be parsed.
Returns:
(exception_name, d), where d is <Details> text or None if not found.
"""
exc_name_parts = re.split("[\.']", str(type(e)))
if len(exc_name_parts) < 2:
# Shouldn't happen, but have fallback in case.
exc_name = str(type(e))
else:
exc_name = exc_name_parts[-2]
if not hasattr(e, 'body'):
return (exc_name, None)
detail_start = e.body.find('<Details>')
detail_end = e.body.find('</Details>')
if detail_start != -1 and detail_end != -1:
return (exc_name, e.body[detail_start+9:detail_end])
return (exc_name, None)
| python |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import os
QUERYENGINE_API_ROOT = "http://{host}:{port}/v3/queryengine".format(
host=os.environ["QUERYENGINE_API_HOST"], port=os.environ["QUERYENGINE_API_PORT"]
)
AUTH_API_ROOT = "http://{host}:{port}/v3/auth".format(
host=os.environ["AUTH_API_HOST"], port=os.environ["AUTH_API_PORT"]
)
META_API_ROOT = "http://{host}:{port}/v3/meta".format(
host=os.environ["META_API_HOST"], port=os.environ["META_API_PORT"]
)
DATALAB_API_ROOT = "http://{host}:{port}/v3/datalab".format(
host=os.environ["DATALAB_API_HOST"], port=os.environ["DATALAB_API_PORT"]
)
DATAFLOW_API_ROOT = "http://{host}:{port}/v3/dataflow".format(
host=os.environ["DATAFLOW_API_HOST"], port=os.environ["DATAFLOW_API_PORT"]
)
DATAHUB_API_ROOT = "http://{host}:{port}/v3".format(
host=os.environ["DATAHUB_API_HOST"], port=os.environ["DATAHUB_API_PORT"]
)
JUPYTERHUB_USER = os.environ["JUPYTERHUB_USER"]
| python |
# Copyright (C) 2018 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Cycle Task Entry RBAC Factory."""
from ggrc.models import all_models
from integration.ggrc import Api
from integration.ggrc.access_control.rbac_factories import base
from integration.ggrc.models import factories
class CycleTaskEntryRBACFactory(base.BaseRBACFactory):
"""Cycle Task Entry RBAC factory class."""
def __init__(self, user_id, acr, parent=None):
"""Set up objects for Cycle Task Entry permission tests.
Args:
user_id: Id of user under which all operations will be run.
acr: Instance of ACR that should be assigned for tested user.
parent: Model name in scope of which objects should be set up.
"""
# pylint: disable=unused-argument
self.setup_workflow_scope(user_id, acr)
self.api = Api()
self.create()
if user_id:
user = all_models.Person.query.get(user_id)
self.api.set_user(user)
def create(self):
"""Create new Cycle Task Entry object."""
cycle_task = all_models.CycleTaskGroupObjectTask.query.first()
return self.api.post(all_models.CycleTaskEntry, {
"cycle_task_entry": {
"description": "New Comment",
"is_declining_review": "",
"context": None,
"cycle_task_group_object_task": {
"id": cycle_task.id,
"type": "CycleTaskGroupObjectTask",
},
"cycle": {
"id": cycle_task.cycle.id,
"type": "Cycle",
},
}
})
def read(self):
"""Read existing Cycle Task Entry object."""
cycle_task_entry = all_models.CycleTaskEntry.query.first()
return self.api.get(cycle_task_entry, cycle_task_entry.id)
def update(self):
"""Update title of existing Cycle Task Entry object."""
cycle_task_entry = all_models.CycleTaskEntry.query.first()
return self.api.put(
cycle_task_entry,
{"description": factories.random_str()}
)
def delete(self):
"""Delete Cycle Task Entry object."""
cycle_task_entry = all_models.CycleTaskEntry.query.first()
return self.api.delete(cycle_task_entry)
| python |
from simplecv.data import test_transforms as ttas
from albumentations import Compose, OneOf, Normalize
from albumentations import HorizontalFlip, VerticalFlip, RandomRotate90, RandomCrop
from simplecv.api.preprocess import albu
from albumentations.pytorch import ToTensorV2
import torch.nn as nn
config = dict(
model=dict(
type='GSiameseResNet',
params=dict(
backbone=dict(
resnet_type='resnext101_32x4d',
include_conv5=True,
batchnorm_trainable=True,
pretrained=True,
freeze_at=0,
# 16 or 32
output_stride=32,
with_cp=(False, False, False, False),
norm_layer=nn.BatchNorm2d,
),
neck=dict(
in_channels_list=(256, 512, 1024, 2048),
out_channels=256,
),
head=dict(
in_channels=256,
out_channels=256,
num_classes=5,
upsample_scale=4.0,
num_blocks=1,
bottleneck_channels=128
),
loss=dict(
cls_weight=1.0,
ignore_index=255,
dam=dict(
ohem=dict(
ratio=0.8
)
),
loc=dict(
tversky_loss=dict(alpha=0.7, beta=0.3),
bce_loss=dict(),
)
)
),
),
data=dict(
train=dict(
type='Xview2PairwiseDataLoader',
params=dict(
image_dir=('./xview2/train/images', './xview2/tier3/images'),
label_dir=('./xview2/train/labels', './xview2/tier3/labels'),
mode='segm',
include=('pre', 'post'),
CV=dict(
on=True,
cur_k=0,
k_fold=5,
),
transforms=Compose([
OneOf([
HorizontalFlip(True),
VerticalFlip(True),
RandomRotate90(True)
], p=0.75),
albu.RandomDiscreteScale([0.75, 1.25, 1.5], p=0.5),
RandomCrop(640, 640, True),
Normalize(mean=(0.485, 0.456, 0.406,
0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225,
0.229, 0.224, 0.225), max_pixel_value=255),
ToTensorV2(True),
]),
batch_size=4,
num_workers=4,
training=True
),
),
test=dict(
type='Xview2PairwiseDataLoader',
params=dict(
image_dir=('./xview2/train/images', './xview2/tier3/images'),
label_dir=('./xview2/train/labels', './xview2/tier3/labels'),
mode='segm',
include=('pre', 'post'),
CV=dict(
on=True,
cur_k=0,
k_fold=5,
),
transforms=Compose([
Normalize(mean=(0.485, 0.456, 0.406,
0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225,
0.229, 0.224, 0.225), max_pixel_value=255),
ToTensorV2(True),
]),
batch_size=1,
num_workers=0,
training=False
),
),
),
optimizer=dict(
type='sgd',
params=dict(
momentum=0.9,
weight_decay=0.0001
),
grad_clip=dict(
max_norm=35,
norm_type=2,
)
),
learning_rate=dict(
type='poly',
params=dict(
base_lr=0.03,
power=0.9,
max_iters=30000,
)),
train=dict(
forward_times=1,
num_iters=30000,
eval_per_epoch=False,
summary_grads=False,
summary_weights=False,
distributed=True,
apex_sync_bn=True,
sync_bn=False,
eval_after_train=True,
log_interval_step=50,
save_ckpt_interval_epoch=40,
eval_interval_epoch=40,
),
test=dict(
tta=[
ttas.Rotate90k(1),
ttas.Rotate90k(2),
ttas.Rotate90k(3),
ttas.HorizontalFlip(),
ttas.VerticalFlip(),
ttas.Transpose(),
ttas.Scale(scale_factor=0.75),
ttas.Scale(scale_factor=1.0),
ttas.Scale(scale_factor=1.25),
ttas.Scale(scale_factor=1.5),
]
),
)
| python |
from .logit_lens import LogitLens
| python |
"""
@author Huaze Shen
@date 2019-07-19
"""
def combination_sum_2(candidates, target):
results = []
if candidates is None or len(candidates) == 0:
return results
candidates = sorted(candidates)
combination = []
helper(results, combination, candidates, 0, target)
return results
def helper(results, combination, candidates, start_index, remain_target):
if remain_target == 0:
results.append(combination[:])
return
for i in range(start_index, len(candidates)):
if candidates[i] > remain_target:
return
if i > start_index and candidates[i] == candidates[i - 1]:
continue
combination.append(candidates[i])
helper(results, combination, candidates, i + 1, remain_target - candidates[i])
combination.pop()
if __name__ == '__main__':
candidates_ = [10, 1, 2, 7, 6, 1, 5]
target_ = 8
print(combination_sum_2(candidates_, target_))
| python |
from django.test import TestCase
from django.urls import reverse
from user.forms import (AssociatedEmailChoiceForm, AddEmailForm,
LoginForm, ProfileForm, RegistrationForm)
from user.models import User
class TestForms(TestCase):
def create_test_forms(self, FormClass, valid_dict, invalid_dict, user=None):
"""
Helper method to create a valid and invalid form of a certain form class.
Some forms require the user object
"""
if user:
self.valid_form = FormClass(user=user, data=valid_dict)
self.invalid_form = FormClass(user=user, data=invalid_dict)
else:
self.valid_form = FormClass(data=valid_dict)
self.invalid_form = FormClass(data=invalid_dict)
def run_test_forms(self, invalid_form_errors):
"""
Helper method to test the valid form and an invalid form.
Input the expected form error of the invalid form.
Remember, this method name cannot begin with 'test'
"""
self.assertTrue(self.valid_form.is_valid())
self.assertFalse(self.invalid_form.is_valid())
self.assertEqual(self.invalid_form.errors, invalid_form_errors)
def test_associated_email_choice_form(self):
"""
Choice field in form, cannot use create helper function
"""
user = User.objects.get(email='[email protected]')
self.valid_form = AssociatedEmailChoiceForm(user=user,
selection_type='primary', data={'associated_email':'[email protected]'})
self.invalid_form = AssociatedEmailChoiceForm(user=user,
selection_type='public', data={'associated_email':'[email protected]'})
self.run_test_forms({'associated_email':['Select a valid choice. That choice is not one of the available choices.']})
def test_associated_email_form(self):
self.create_test_forms(AddEmailForm, {'email':'[email protected]'},
{'email':'nonexistent'})
self.run_test_forms({'email': ['Enter a valid email address.']})
def test_login_form(self):
self.create_test_forms(LoginForm, {'username':'admin','password':'Tester11!'},
{'username':'admin', 'password':'wrong'})
self.run_test_forms({'__all__':['Please enter a correct username/email and password. Note that the password field is case-sensitive.']})
def test_profile_form(self):
self.create_test_forms(ProfileForm, {'first_names':'Tester Mid',
'last_name':'Bot',
'url':'http://physionet.org'},
{'first_names':'Tester Mid',
'last_name':'', 'phone':'0'})
self.run_test_forms({'last_name': ['This field is required.']})
def test_user_creation_form(self):
self.create_test_forms(RegistrationForm, {'email': '[email protected]',
'username': 'The-Tester', 'first_names': 'Tester Mid',
'last_name': 'Bot'}, {'email': '[email protected]',
'username': 'bot-net', 'first_names': '', 'last_name': 'Bot'})
self.run_test_forms({'first_names': ['This field is required.']})
| python |
# Copyright (c) 2015-2017 The Switch Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0, which is in the LICENSE file.
"""
This file should only include the version. Do not import any packages or
modules here because this file needs to be executed before SWITCH is
installed and executed in environments that don't have any dependencies
installed.
"""
__version__='2.0.1'
| python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .dependency import Dependency
from ..config import Configuration
from ..util.process import Process
from ..util.color import Color
import os
class Hashcat(Dependency):
dependency_required = False
dependency_name = 'hashcat'
dependency_url = 'https://hashcat.net/hashcat/'
@staticmethod
def should_use_force():
command = ['hashcat', '-I']
stderr = Process(command).stderr()
return 'No devices found/left' in stderr
@staticmethod
def crack_handshake(handshake, show_command=False):
# Generate hccapx
hccapx_file = HcxPcapTool.generate_hccapx_file(
handshake, show_command=show_command)
key = None
# Crack hccapx
for additional_arg in ([], ['--show']):
command = [
'hashcat',
'--quiet',
'-m', '2500',
hccapx_file,
Configuration.wordlist
]
if Hashcat.should_use_force():
command.append('--force')
command.extend(additional_arg)
if show_command:
Color.pl('{+} {D}Running: {W}{P}%s{W}' % ' '.join(command))
process = Process(command)
stdout, stderr = process.get_output()
if ':' not in stdout:
continue
else:
key = stdout.split(':', 5)[-1].strip()
break
if os.path.exists(hccapx_file):
os.remove(hccapx_file)
return key
@staticmethod
def crack_pmkid(pmkid_file, verbose=False):
'''
Cracks a given pmkid_file using the PMKID/WPA2 attack (-m 16800)
Returns:
Key (str) if found; `None` if not found.
'''
# Run hashcat once normally, then with --show if it failed
# To catch cases where the password is already in the pot file.
for additional_arg in ([], ['--show']):
command = [
'hashcat',
'--quiet', # Only output the password if found.
'-m', '16800', # WPA-PMKID-PBKDF2
'-a', '0', # Wordlist attack-mode
pmkid_file,
Configuration.wordlist
]
if Hashcat.should_use_force():
command.append('--force')
command.extend(additional_arg)
if verbose and additional_arg == []:
Color.pl('{+} {D}Running: {W}{P}%s{W}' % ' '.join(command))
# TODO: Check status of hashcat (%); it's impossible with --quiet
hashcat_proc = Process(command)
hashcat_proc.wait()
stdout = hashcat_proc.stdout()
if ':' not in stdout:
# Failed
continue
else:
# Cracked
key = stdout.strip().split(':', 1)[1]
return key
class HcxDumpTool(Dependency):
dependency_required = False
dependency_name = 'hcxdumptool'
dependency_url = 'https://github.com/ZerBea/hcxdumptool'
def __init__(self, target, pcapng_file):
# Create filterlist
filterlist = Configuration.temp('pmkid.filterlist')
with open(filterlist, 'w') as filter_handle:
filter_handle.write(target.bssid.replace(':', ''))
if os.path.exists(pcapng_file):
os.remove(pcapng_file)
command = [
'hcxdumptool',
'-i', Configuration.interface,
'--filterlist', filterlist,
'--filtermode', '2',
'-c', str(target.channel),
'-o', pcapng_file
]
self.proc = Process(command)
def poll(self):
return self.proc.poll()
def interrupt(self):
self.proc.interrupt()
class HcxPcapTool(Dependency):
dependency_required = False
dependency_name = 'hcxpcaptool'
dependency_url = 'https://github.com/ZerBea/hcxtools'
def __init__(self, target):
self.target = target
self.bssid = self.target.bssid.lower().replace(':', '')
self.pmkid_file = Configuration.temp('pmkid-%s.16800' % self.bssid)
@staticmethod
def generate_hccapx_file(handshake, show_command=False):
hccapx_file = Configuration.temp('generated.hccapx')
if os.path.exists(hccapx_file):
os.remove(hccapx_file)
command = [
'hcxpcaptool',
'-o', hccapx_file,
handshake.capfile
]
if show_command:
Color.pl('{+} {D}Running: {W}{P}%s{W}' % ' '.join(command))
process = Process(command)
stdout, stderr = process.get_output()
if not os.path.exists(hccapx_file):
raise ValueError('Failed to generate .hccapx file, output: \n%s\n%s' % (
stdout, stderr))
return hccapx_file
@staticmethod
def generate_john_file(handshake, show_command=False):
john_file = Configuration.temp('generated.john')
if os.path.exists(john_file):
os.remove(john_file)
command = [
'hcxpcaptool',
'-j', john_file,
handshake.capfile
]
if show_command:
Color.pl('{+} {D}Running: {W}{P}%s{W}' % ' '.join(command))
process = Process(command)
stdout, stderr = process.get_output()
if not os.path.exists(john_file):
raise ValueError('Failed to generate .john file, output: \n%s\n%s' % (
stdout, stderr))
return john_file
def get_pmkid_hash(self, pcapng_file):
if os.path.exists(self.pmkid_file):
os.remove(self.pmkid_file)
command = [
'hcxpcaptool',
'-z', self.pmkid_file,
pcapng_file
]
hcxpcap_proc = Process(command)
hcxpcap_proc.wait()
if not os.path.exists(self.pmkid_file):
return None
with open(self.pmkid_file, 'r') as f:
output = f.read()
# Each line looks like:
# hash*bssid*station*essid
# Note: The dumptool will record *anything* it finds, ignoring the filterlist.
# Check that we got the right target (filter by BSSID)
matching_pmkid_hash = None
for line in output.split('\n'):
fields = line.split('*')
if len(fields) >= 3 and fields[1].lower() == self.bssid:
# Found it
matching_pmkid_hash = line
break
os.remove(self.pmkid_file)
return matching_pmkid_hash
| python |
from PyQt5.QtWidgets import QWidget, \
QHBoxLayout,\
QVBoxLayout,\
QDialog,\
QLineEdit,\
QLabel,\
QPushButton
from PyQt5.QtCore import Qt
class NewFile(QDialog):
def __init__(self, parent=None):
super().__init__(parent)
self.name = QLineEdit()
self.name.setText("Untilted")
self.__btn_create_clicked = False
self.setup_ui()
self.show()
def setup_ui(self):
self.setWindowTitle("New File")
self.resize(300, 80)
self.setWindowModality(Qt.ApplicationModal)
main_lay = QVBoxLayout()
lay1 = QHBoxLayout()
lay2 = QHBoxLayout()
label = QLabel()
label.setText("File name:")
lay1.addWidget(label)
lay1.addWidget(self.name)
btn_ok = QPushButton("Create")
btn_ok.clicked.connect(self.__btn_ok_click)
btn_cancel = QPushButton("Cancel")
btn_cancel.clicked.connect(self.close)
lay2.addWidget(btn_ok)
lay2.addWidget(btn_cancel)
main_lay.addLayout(lay1)
main_lay.addLayout(lay2)
self.setLayout(main_lay)
def __btn_ok_click(self):
self.__btn_create_clicked = True
if self.name.text() == "":
self.name.setText("Untilted")
self.close()
def is_create_clicked(self):
return self.__btn_create_clicked
| python |
'''
Base class for RTE test suite
'''
import abc
import numpy as np
class BaseTestRTE(object):
'''
base class to test all interfaces
'''
__metaclass__ = abc.ABCMeta
@property
@abc.abstractmethod
def _interface(self):
return None
def test_apply_bc_0(self):
'''
apply zero boundary condition
'''
downward_flux = np.random.randn(2, 20, 3)
out_array = self._interface.apply_zero_bc(downward_flux)
self.validate_bcs(out_array, 0)
def test_apply_inc_flux(self):
'''
apply incident flux
'''
downward_flux = np.random.randn(2, 20, 3)
incident_flux = 10*np.ones((2, 3), dtype=np.double)
out_array = self._interface.apply_gpoint_bc(
downward_flux, incident_flux)
self.validate_bcs(out_array, incident_flux)
def test_apply_scaled_inc_flux(self):
'''
apply incident flux
'''
downward_flux = np.random.randn(2, 20, 3)
incident_flux = 10*np.ones((2, 3), dtype=np.double)
scale_factor = np.arange(3, dtype=np.double)
out_array = self._interface.apply_scaled_gpoint_bc(
downward_flux, incident_flux, scale_factor)
self.validate_bcs(out_array, scale_factor*incident_flux)
def validate_bcs(self, array, target):
'''
check if bcs are applied correctly.
Args:
array (ndarray):
output from RTE.
target (ndarray or float):
what to check against.
'''
if self._interface.direction == 'top_to_bottom':
assert np.all(array[:, 0, :] == target)
else:
assert np.all(array[:, -1, :] == target)
| python |
"""A client for Team Foundation Server."""
from __future__ import unicode_literals
import logging
import os
import re
import sys
import tempfile
import xml.etree.ElementTree as ET
from six.moves.urllib.parse import unquote
from rbtools.clients import RepositoryInfo, SCMClient
from rbtools.clients.errors import (InvalidRevisionSpecError,
SCMError,
TooManyRevisionsError)
from rbtools.utils.appdirs import user_data_dir
from rbtools.utils.checks import check_gnu_diff, check_install
from rbtools.utils.diffs import filename_match_any_patterns
from rbtools.utils.process import execute
class TFExeWrapper(object):
"""Implementation wrapper for using VS2017's tf.exe."""
REVISION_WORKING_COPY = '--rbtools-working-copy'
def __init__(self, config=None, options=None):
"""Initialize the wrapper.
Args:
config (dict, optional):
The loaded configuration.
options (argparse.Namespace, optional):
The command line options.
"""
self.config = config
self.options = options
def get_local_path(self):
"""Return the local path to the working tree.
Returns:
unicode:
The filesystem path of the repository on the client system.
"""
workfold = self._run_tf(['vc', 'workfold', os.getcwd()])
m = re.search('^Collection: (.*)$', workfold, re.MULTILINE)
if m:
return unquote(m.group(1))
logging.debug('Could not find the collection from "tf vc workfold"')
return None
def get_repository_info(self):
"""Return repository information for the current working tree.
Returns:
rbtools.clients.RepositoryInfo:
The repository info structure.
"""
path = self.get_local_path()
if path:
# Now that we know it's TFS, make sure we have GNU diff installed, and
# error out if we don't.
check_gnu_diff()
return RepositoryInfo(path=path, local_path=path)
return None
def parse_revision_spec(self, revisions):
"""Parse the given revision spec.
Args:
revisions (list of unicode):
A list of revisions as specified by the user. Items in the list
do not necessarily represent a single revision, since the user
can use the TFS-native syntax of ``r1~r2``. Versions passed in
can be any versionspec, such as a changeset number,
``L``-prefixed label name, ``W`` (latest workspace version), or
``T`` (latest upstream version).
Raises:
rbtools.clients.errors.TooManyRevisionsError:
Too many revisions were specified.
rbtools.clients.errors.InvalidRevisionSpecError:
The given revision spec could not be parsed.
Returns:
dict:
A dictionary with the following keys:
``base`` (:py:class:`unicode`):
A revision to use as the base of the resulting diff.
``tip`` (:py:class:`unicode`):
A revision to use as the tip of the resulting diff.
``parent_base`` (:py:class:`unicode`, optional):
The revision to use as the base of a parent diff.
These will be used to generate the diffs to upload to Review Board
(or print). The diff for review will include the changes in (base,
tip], and the parent diff (if necessary) will include (parent,
base].
If a single revision is passed in, this will return the parent of
that revision for "base" and the passed-in revision for "tip".
If zero revisions are passed in, this will return revisions
relevant for the "current change" (changes in the work folder which
have not yet been checked in).
"""
n_revisions = len(revisions)
if n_revisions == 1 and '~' in revisions[0]:
revisions = revisions[0].split('~')
n_revisions = len(revisions)
if n_revisions == 0:
# Most recent checked-out revision -- working copy
return {
'base': self._convert_symbolic_revision('W'),
'tip': self.REVISION_WORKING_COPY,
}
elif n_revisions == 1:
# Either a numeric revision (n-1:n) or a changelist
revision = self._convert_symbolic_revision(revisions[0])
return {
'base': revision - 1,
'tip': revision,
}
elif n_revisions == 2:
# Diff between two numeric revisions
return {
'base': self._convert_symbolic_revision(revisions[0]),
'tip': self._convert_symbolic_revision(revisions[1]),
}
else:
raise TooManyRevisionsError
return {
'base': None,
'tip': None,
}
def _convert_symbolic_revision(self, revision, path=None):
"""Convert a symbolic revision into a numeric changeset.
Args:
revision (unicode):
The TFS versionspec to convert.
path (unicode, optional):
The itemspec that the revision applies to.
Returns:
int:
The changeset number corresponding to the versionspec.
"""
# We pass results_unicode=False because that uses the filesystem
# encoding to decode the output, but the XML results we get should
# always be UTF-8, and are well-formed with the encoding specified. We
# can therefore let ElementTree determine how to decode it.
data = self._run_tf(['vc', 'history', '/stopafter:1', '/recursive',
'/format:detailed', '/version:%s' % revision,
path or os.getcwd()])
m = re.search('^Changeset: (\d+)$', data, re.MULTILINE)
if not m:
logging.debug('Failed to parse output from "tf vc history":\n%s',
data)
raise InvalidRevisionSpecError(
'"%s" does not appear to be a valid versionspec' % revision)
def diff(self, revisions, include_files, exclude_patterns, **kwargs):
"""Return the generated diff.
Args:
revisions (dict):
A dictionary containing ``base`` and ``tip`` keys.
include_files (list):
A list of file paths to include in the diff.
exclude_patterns (list):
A list of file paths to exclude from the diff.
**kwargs (dict, unused):
Unused keyword arguments.
Returns:
dict:
A dictionary containing the following keys:
``diff`` (:py:class:`bytes`):
The contents of the diff to upload.
``base_commit_id` (:py:class:`unicode`, optional):
The ID of the commit that the change is based on, if available.
This is necessary for some hosting services that don't provide
individual file access.
"""
base = str(revisions['base'])
tip = str(revisions['tip'])
if tip == self.REVISION_WORKING_COPY:
# TODO: support committed revisions
return self._diff_working_copy(base, include_files,
exclude_patterns)
else:
raise SCMError('Posting committed changes is not yet supported '
'for TFS when using the tf.exe wrapper.')
def _diff_working_copy(self, base, include_files, exclude_patterns):
"""Return a diff of the working copy.
Args:
base (unicode):
The base revision to diff against.
include_files (list):
A list of file paths to include in the diff.
exclude_patterns (list):
A list of file paths to exclude from the diff.
Returns:
dict:
A dictionary containing ``diff``, ``parent_diff``, and
``base_commit_id`` keys. In the case of TFS, the parent diff key
will always be ``None``.
"""
# We pass results_unicode=False because that uses the filesystem
# encoding, but the XML results we get should always be UTF-8, and are
# well-formed with the encoding specified. We can therefore let
# ElementTree determine how to decode it.
status = self._run_tf(['vc', 'status', '/format:xml'],
results_unicode=False)
root = ET.fromstring(status)
diff = []
for pending_change in root.findall(
'./PendingSet/PendingChanges/PendingChange'):
action = pending_change.attrib['chg'].split(' ')
old_filename = \
pending_change.attrib.get('srcitem', '').encode('utf-8')
new_filename = pending_change.attrib['item'].encode('utf-8')
local_filename = pending_change.attrib['local']
old_version = \
pending_change.attrib.get('svrfm', '0').encode('utf-8')
file_type = pending_change.attrib['type']
encoding = pending_change.attrib['enc']
new_version = b'(pending)'
old_data = b''
new_data = b''
binary = (encoding == '-1')
copied = 'Branch' in action
if (not file_type or (not os.path.isfile(local_filename) and
'Delete' not in action)):
continue
if (exclude_patterns and
filename_match_any_patterns(local_filename,
exclude_patterns,
base_dir=None)):
continue
if 'Add' in action:
old_filename = b'/dev/null'
if not binary:
with open(local_filename, 'rb') as f:
new_data = f.read()
old_data = b''
elif 'Delete' in action:
old_data = self._run_tf(
['vc', 'view', '/version:%s' % old_version.decode('utf-8'),
old_filename.decode('utf-8')],
results_unicode=False)
new_data = b''
new_version = b'(deleted)'
elif 'Edit' in action:
if not binary:
old_data = self._run_tf(
['vc', 'view', old_filename.decode('utf-8'),
'/version:%s' % old_version.decode('utf-8')],
results_unicode=False)
with open(local_filename, 'rb') as f:
new_data = f.read()
old_label = b'%s\t%s' % (old_filename, old_version)
new_label = b'%s\t%s' % (new_filename, new_version)
if copied:
diff.append(b'Copied from: %s\n' % old_filename)
if binary:
if 'Add' in action:
old_filename = new_filename
diff.append(b'--- %s\n' % old_label)
diff.append(b'+++ %s\n' % new_label)
diff.append(b'Binary files %s and %s differ\n'
% (old_filename, new_filename))
elif old_filename != new_filename and old_data == new_data:
# Renamed file with no changes.
diff.append(b'--- %s\n' % old_label)
diff.append(b'+++ %s\n' % new_label)
else:
old_tmp = tempfile.NamedTemporaryFile(delete=False)
old_tmp.write(old_data)
old_tmp.close()
new_tmp = tempfile.NamedTemporaryFile(delete=False)
new_tmp.write(new_data)
new_tmp.close()
unified_diff = execute(
['diff', '-u',
'--label', old_label.decode('utf-8'),
'--label', new_label.decode('utf-8'),
old_tmp.name, new_tmp.name],
extra_ignore_errors=(1,),
log_output_on_error=False,
results_unicode=False)
diff.append(unified_diff)
os.unlink(old_tmp.name)
os.unlink(new_tmp.name)
return {
'diff': b''.join(diff),
'parent_diff': None,
'base_commit_id': base,
}
def _run_tf(self, args, **kwargs):
"""Run the "tf" command.
Args:
args (list):
A list of arguments to pass to rb-tfs.
**kwargs (dict):
Additional keyword arguments for the :py:meth:`execute` call.
Returns:
unicode:
The output of the command.
"""
command = ['tf'] + args + ['/noprompt']
if getattr(self.options, 'tfs_login', None):
command.append('/login:%s' % self.options.tfs_login)
return execute(command, ignore_errors=True, **kwargs)
class TEEWrapper(object):
"""Implementation wrapper for using Team Explorer Everywhere."""
REVISION_WORKING_COPY = '--rbtools-working-copy'
def __init__(self, config=None, options=None):
"""Initialize the wrapper.
Args:
config (dict, optional):
The loaded configuration.
options (argparse.Namespace, optional):
The command line options.
"""
self.config = config
self.options = options
self.tf = None
tf_locations = []
if options and getattr(options, 'tf_cmd', None):
tf_locations.append(options.tf_cmd)
if sys.platform.startswith('win'):
# First check in the system path. If that doesn't work, look in the
# two standard install locations.
tf_locations.extend([
'tf.cmd',
(r'%programfiles(x86)%\Microsoft Visual Studio 12.0\Common7'
r'\IDE\tf.cmd'),
(r'%programfiles%\Microsoft Team Foundation Server 12.0\Tools'
r'\tf.cmd'),
])
else:
tf_locations.append('tf')
for location in tf_locations:
location = os.path.expandvars(location)
if check_install([location, 'help']):
self.tf = location
break
def get_local_path(self):
"""Return the local path to the working tree.
Returns:
unicode:
The filesystem path of the repository on the client system.
"""
if self.tf is None:
logging.debug('Unable to execute "tf help": skipping TFS')
return None
workfold = self._run_tf(['workfold', os.getcwd()])
m = re.search('^Collection: (.*)$', workfold, re.MULTILINE)
if m:
return unquote(m.group(1))
logging.debug('Could not find the collection from "tf workfold"')
return None
def get_repository_info(self):
"""Return repository information for the current working tree.
Returns:
rbtools.clients.RepositoryInfo:
The repository info structure.
"""
path = self.get_local_path()
if path:
# Now that we know it's TFS, make sure we have GNU diff installed,
# and error out if we don't.
check_gnu_diff()
return RepositoryInfo(path=path, local_path=path)
return None
def parse_revision_spec(self, revisions):
"""Parse the given revision spec.
Args:
revisions (list of unicode):
A list of revisions as specified by the user. Items in the list
do not necessarily represent a single revision, since the user
can use the TFS-native syntax of ``r1~r2``. Versions passed in
can be any versionspec, such as a changeset number,
``L``-prefixed label name, ``W`` (latest workspace version), or
``T`` (latest upstream version).
Returns:
dict:
A dictionary with the following keys:
``base`` (:py:class:`unicode`):
A revision to use as the base of the resulting diff.
``tip`` (:py:class:`unicode`):
A revision to use as the tip of the resulting diff.
``parent_base`` (:py:class:`unicode`, optional):
The revision to use as the base of a parent diff.
These will be used to generate the diffs to upload to Review Board
(or print). The diff for review will include the changes in (base,
tip], and the parent diff (if necessary) will include (parent,
base].
If a single revision is passed in, this will return the parent of
that revision for "base" and the passed-in revision for "tip".
If zero revisions are passed in, this will return revisions
relevant for the "current change" (changes in the work folder which
have not yet been checked in).
Raises:
rbtools.clients.errors.TooManyRevisionsError:
Too many revisions were specified.
rbtools.clients.errors.InvalidRevisionSpecError:
The given revision spec could not be parsed.
"""
n_revisions = len(revisions)
if n_revisions == 1 and '~' in revisions[0]:
revisions = revisions[0].split('~')
n_revisions = len(revisions)
if n_revisions == 0:
# Most recent checked-out revision -- working copy
return {
'base': self._convert_symbolic_revision('W'),
'tip': self.REVISION_WORKING_COPY,
}
elif n_revisions == 1:
# Either a numeric revision (n-1:n) or a changelist
revision = self._convert_symbolic_revision(revisions[0])
return {
'base': revision - 1,
'tip': revision,
}
elif n_revisions == 2:
# Diff between two numeric revisions
return {
'base': self._convert_symbolic_revision(revisions[0]),
'tip': self._convert_symbolic_revision(revisions[1]),
}
else:
raise TooManyRevisionsError
return {
'base': None,
'tip': None,
}
def _convert_symbolic_revision(self, revision, path=None):
"""Convert a symbolic revision into a numeric changeset.
Args:
revision (unicode):
The TFS versionspec to convert.
path (unicode, optional):
The itemspec that the revision applies to.
Returns:
int:
The changeset number corresponding to the versionspec.
"""
args = ['history', '-stopafter:1', '-recursive', '-format:xml']
# 'tf history -version:W'` doesn't seem to work (even though it's
# supposed to). Luckily, W is the default when -version isn't passed,
# so just elide it.
if revision != 'W':
args.append('-version:%s' % revision)
args.append(path or os.getcwd())
# We pass results_unicode=False because that uses the filesystem
# encoding to decode the output, but the XML results we get should
# always be UTF-8, and are well-formed with the encoding specified. We
# can therefore let ElementTree determine how to decode it.
data = self._run_tf(args, results_unicode=False)
try:
root = ET.fromstring(data)
item = root.find('./changeset')
if item is not None:
return int(item.attrib['id'])
else:
raise Exception('No changesets found')
except Exception as e:
logging.debug('Failed to parse output from "tf history": %s\n%s',
e, data, exc_info=True)
raise InvalidRevisionSpecError(
'"%s" does not appear to be a valid versionspec' % revision)
def diff(self, revisions, include_files, exclude_patterns):
"""Return the generated diff.
Args:
revisions (dict):
A dictionary containing ``base`` and ``tip`` keys.
include_files (list):
A list of file paths to include in the diff.
exclude_patterns (list):
A list of file paths to exclude from the diff.
Returns:
dict:
A dictionary containing the following keys:
``diff`` (:py:class:`bytes`):
The contents of the diff to upload.
``base_commit_id` (:py:class:`unicode`, optional):
The ID of the commit that the change is based on, if available.
This is necessary for some hosting services that don't provide
individual file access.
"""
base = str(revisions['base'])
tip = str(revisions['tip'])
if tip == self.REVISION_WORKING_COPY:
return self._diff_working_copy(base, include_files,
exclude_patterns)
else:
raise SCMError('Posting committed changes is not yet supported '
'for TFS when using the Team Explorer Everywhere '
'wrapper.')
def _diff_working_copy(self, base, include_files, exclude_patterns):
"""Return a diff of the working copy.
Args:
base (unicode):
The base revision to diff against.
include_files (list):
A list of file paths to include in the diff.
exclude_patterns (list):
A list of file paths to exclude from the diff.
Returns:
dict:
A dictionary containing ``diff``, ``parent_diff``, and
``base_commit_id`` keys. In the case of TFS, the parent diff key
will always be ``None``.
"""
# We pass results_unicode=False because that uses the filesystem
# encoding, but the XML results we get should always be UTF-8, and are
# well-formed with the encoding specified. We can therefore let
# ElementTree determine how to decode it.
status = self._run_tf(['status', '-format:xml'], results_unicode=False)
root = ET.fromstring(status)
diff = []
for pending_change in root.findall('./pending-changes/pending-change'):
action = pending_change.attrib['change-type'].split(', ')
new_filename = pending_change.attrib['server-item'].encode('utf-8')
local_filename = pending_change.attrib['local-item']
old_version = pending_change.attrib['version'].encode('utf-8')
file_type = pending_change.attrib.get('file-type')
new_version = b'(pending)'
old_data = b''
new_data = b''
copied = 'branch' in action
if (not file_type or (not os.path.isfile(local_filename) and
'delete' not in action)):
continue
if (exclude_patterns and
filename_match_any_patterns(local_filename,
exclude_patterns,
base_dir=None)):
continue
if 'rename' in action:
old_filename = \
pending_change.attrib['source-item'].encode('utf-8')
else:
old_filename = new_filename
if copied:
old_filename = \
pending_change.attrib['source-item'].encode('utf-8')
old_version = (
'%d' % self._convert_symbolic_revision(
'W', old_filename.decode('utf-8')))
if 'add' in action:
old_filename = b'/dev/null'
if file_type != 'binary':
with open(local_filename) as f:
new_data = f.read()
old_data = b''
elif 'delete' in action:
old_data = self._run_tf(
['print', '-version:%s' % old_version.decode('utf-8'),
old_filename.decode('utf-8')],
results_unicode=False)
new_data = b''
new_version = b'(deleted)'
elif 'edit' in action:
old_data = self._run_tf(
['print', '-version:%s' % old_version.decode('utf-8'),
old_filename.decode('utf-8')],
results_unicode=False)
with open(local_filename) as f:
new_data = f.read()
old_label = b'%s\t%s' % (old_filename, old_version)
new_label = b'%s\t%s' % (new_filename, new_version)
if copied:
diff.append(b'Copied from: %s\n' % old_filename)
if file_type == 'binary':
if 'add' in action:
old_filename = new_filename
diff.append(b'--- %s\n' % old_label)
diff.append(b'+++ %s\n' % new_label)
diff.append(b'Binary files %s and %s differ\n'
% (old_filename, new_filename))
elif old_filename != new_filename and old_data == new_data:
# Renamed file with no changes
diff.append(b'--- %s\n' % old_label)
diff.append(b'+++ %s\n' % new_label)
else:
old_tmp = tempfile.NamedTemporaryFile(delete=False)
old_tmp.write(old_data)
old_tmp.close()
new_tmp = tempfile.NamedTemporaryFile(delete=False)
new_tmp.write(new_data)
new_tmp.close()
unified_diff = execute(
['diff', '-u',
'--label', old_label.decode('utf-8'),
'--label', new_label.decode('utf-8'),
old_tmp.name, new_tmp.name],
extra_ignore_errors=(1,),
log_output_on_error=False,
results_unicode=False)
diff.append(unified_diff)
os.unlink(old_tmp.name)
os.unlink(new_tmp.name)
if len(root.findall('./candidate-pending-changes/pending-change')) > 0:
logging.warning('There are added or deleted files which have not '
'been added to TFS. These will not be included '
'in your review request.')
return {
'diff': b''.join(diff),
'parent_diff': None,
'base_commit_id': base,
}
def _run_tf(self, args, **kwargs):
"""Run the "tf" command.
Args:
args (list):
A list of arguments to pass to rb-tfs.
**kwargs (dict):
Additional keyword arguments for the :py:meth:`execute` call.
Returns:
unicode:
The output of the command.
"""
cmdline = [self.tf, '-noprompt']
if getattr(self.options, 'tfs_login', None):
cmdline.append('-login:%s' % self.options.tfs_login)
cmdline += args
# Use / style arguments when running on windows.
if sys.platform.startswith('win'):
for i, arg in enumerate(cmdline):
if arg.startswith('-'):
cmdline[i] = '/' + arg[1:]
return execute(cmdline, ignore_errors=True, **kwargs)
class TFHelperWrapper(object):
"""Implementation wrapper using our own helper."""
def __init__(self, helper_path, config=None, options=None):
"""Initialize the wrapper.
Args:
helper_path (unicode):
The path to the helper binary.
config (dict, optional):
The loaded configuration.
options (argparse.Namespace, optional):
The command line options.
"""
self.helper_path = helper_path
self.config = config
self.options = options
def get_local_path(self):
"""Return the local path to the working tree.
Returns:
unicode:
The filesystem path of the repository on the client system.
"""
rc, path, errors = self._run_helper(['get-collection'],
ignore_errors=True)
if rc == 0:
return path.strip()
return None
def get_repository_info(self):
"""Return repository information for the current working tree.
Returns:
rbtools.clients.RepositoryInfo:
The repository info structure.
"""
path = self.get_local_path()
if path:
return RepositoryInfo(path=path, local_path=path)
return None
def parse_revision_spec(self, revisions):
"""Parse the given revision spec.
Args:
revisions (list of unicode):
A list of revisions as specified by the user. Items in the list
do not necessarily represent a single revision, since the user
can use the TFS-native syntax of ``r1~r2``. Versions passed in
can be any versionspec, such as a changeset number,
``L``-prefixed label name, ``W`` (latest workspace version), or
``T`` (latest upstream version).
Returns:
dict:
A dictionary with the following keys:
``base`` (:py:class:`unicode`):
A revision to use as the base of the resulting diff.
``tip`` (:py:class:`unicode`):
A revision to use as the tip of the resulting diff.
``parent_base`` (:py:class:`unicode`, optional):
The revision to use as the base of a parent diff.
These will be used to generate the diffs to upload to Review Board
(or print). The diff for review will include the changes in (base,
tip], and the parent diff (if necessary) will include (parent,
base].
If a single revision is passed in, this will return the parent of
that revision for "base" and the passed-in revision for "tip".
If zero revisions are passed in, this will return revisions
relevant for the "current change" (changes in the work folder which
have not yet been checked in).
Raises:
rbtools.clients.errors.TooManyRevisionsError:
Too many revisions were specified.
rbtools.clients.errors.InvalidRevisionSpecError:
The given revision spec could not be parsed.
"""
if len(revisions) > 2:
raise TooManyRevisionsError
rc, revisions, errors = self._run_helper(
['parse-revision'] + revisions, split_lines=True)
if rc == 0:
return {
'base': revisions[0].strip(),
'tip': revisions[1].strip()
}
else:
raise InvalidRevisionSpecError('\n'.join(errors))
def diff(self, revisions, include_files, exclude_patterns):
"""Return the generated diff.
Args:
revisions (dict):
A dictionary containing ``base`` and ``tip`` keys.
include_files (list):
A list of file paths to include in the diff.
exclude_patterns (list):
A list of file paths to exclude from the diff.
Returns:
dict:
A dictionary containing the following keys:
``diff`` (:py:class:`bytes`):
The contents of the diff to upload.
``base_commit_id` (:py:class:`unicode`, optional):
The ID of the commit that the change is based on, if available.
This is necessary for some hosting services that don't provide
individual file access.
Raises:
rbtools.clients.errors.SCMError:
Something failed when creating the diff.
"""
base = revisions['base']
tip = revisions['tip']
rc, diff, errors = self._run_helper(['diff', '--', base, tip],
ignore_errors=True,
results_unicode=False,
log_output_on_error=False)
if rc in (0, 2):
if rc == 2:
# Magic return code that means success, but there were
# un-tracked files in the working directory.
logging.warning('There are added or deleted files which have '
'not been added to TFS. These will not be '
'included in your review request.')
return {
'diff': diff,
'parent_diff': None,
'base_commit_id': None,
}
else:
raise SCMError(errors.strip())
def _run_helper(self, args, **kwargs):
"""Run the rb-tfs binary.
Args:
args (list):
A list of arguments to pass to rb-tfs.
**kwargs (dict):
Additional keyword arguments for the :py:meth:`execute` call.
Returns:
tuple:
A 3-tuple of return code, output, and error output. The output and
error output may be lists depending on the contents of ``kwargs``.
"""
if len(args) == 0:
raise ValueError('_run_helper called without any arguments')
cmdline = ['java']
cmdline += getattr(self.config, 'JAVA_OPTS', ['-Xmx2048M'])
cmdline += ['-jar', self.helper_path]
cmdline.append(args[0])
if self.options:
if self.options.debug:
cmdline.append('--debug')
if getattr(self.options, 'tfs_shelveset_owner', None):
cmdline += ['--shelveset-owner',
self.options.tfs_shelveset_owner]
if getattr(self.options, 'tfs_login', None):
cmdline += ['--login', self.options.tfs_login]
cmdline += args[1:]
return execute(cmdline,
with_errors=False,
results_unicode=False,
return_error_code=True,
return_errors=True,
**kwargs)
class TFSClient(SCMClient):
"""A client for Team Foundation Server."""
name = 'Team Foundation Server'
server_tool_names = 'Team Foundation Server'
supports_diff_exclude_patterns = True
supports_patch_revert = True
def __init__(self, config=None, options=None):
"""Initialize the client.
Args:
config (dict, optional):
The loaded configuration.
options (argparse.Namespace, optional):
The command line options.
"""
super(TFSClient, self).__init__(config, options)
# There are three different backends that can be used to access the
# underlying TFS repository. We try them in this order:
# - VS2017+ tf.exe
# - Our custom rb-tfs wrapper, built on the TFS Java SDK
# - Team Explorer Everywhere's tf command
use_tf_exe = False
try:
tf_vc_output = execute(['tf', 'vc', 'help'], ignore_errors=True,
none_on_ignored_error=True)
# VS2015 has a tf.exe but it's not good enough.
if (tf_vc_output and
'Version Control Tool, Version 15' in tf_vc_output):
use_tf_exe = True
except OSError:
pass
helper_path = os.path.join(user_data_dir('rbtools'), 'packages', 'tfs',
'rb-tfs.jar')
if use_tf_exe:
self.tf_wrapper = TFExeWrapper(config, options)
elif os.path.exists(helper_path):
self.tf_wrapper = TFHelperWrapper(helper_path, config, options)
else:
self.tf_wrapper = TEEWrapper(config, options)
def get_local_path(self):
"""Return the local path to the working tree.
Returns:
unicode:
The filesystem path of the repository on the client system.
"""
return self.tf_wrapper.get_local_path()
def get_repository_info(self):
"""Return repository information for the current working tree.
Returns:
rbtools.clients.RepositoryInfo:
The repository info structure.
"""
return self.tf_wrapper.get_repository_info()
def parse_revision_spec(self, revisions):
"""Parse the given revision spec.
Args:
revisions (list of unicode):
A list of revisions as specified by the user. Items in the list
do not necessarily represent a single revision, since the user
can use the TFS-native syntax of ``r1~r2``. Versions passed in
can be any versionspec, such as a changeset number,
``L``-prefixed label name, ``W`` (latest workspace version), or
``T`` (latest upstream version).
Returns:
dict:
A dictionary with the following keys:
``base`` (:py:class:`unicode`):
A revision to use as the base of the resulting diff.
``tip`` (:py:class:`unicode`):
A revision to use as the tip of the resulting diff.
``parent_base`` (:py:class:`unicode`, optional):
The revision to use as the base of a parent diff.
These will be used to generate the diffs to upload to Review Board
(or print). The diff for review will include the changes in (base,
tip], and the parent diff (if necessary) will include (parent,
base].
If a single revision is passed in, this will return the parent of
that revision for "base" and the passed-in revision for "tip".
If zero revisions are passed in, this will return revisions
relevant for the "current change" (changes in the work folder which
have not yet been checked in).
Raises:
rbtools.clients.errors.TooManyRevisionsError:
Too many revisions were specified.
rbtools.clients.errors.InvalidRevisionSpecError:
The given revision spec could not be parsed.
"""
return self.tf_wrapper.parse_revision_spec(revisions)
def diff(self, revisions, include_files=[], exclude_patterns=[],
no_renames=False, extra_args=[]):
"""Return the generated diff.
Args:
revisions (dict):
A dictionary containing ``base`` and ``tip`` keys.
include_files (list, optional):
A list of file paths to include in the diff.
exclude_patterns (list, optional):
A list of file paths to exclude from the diff.
extra_args (list, optional):
Unused.
Returns:
dict:
A dictionary containing the following keys:
``diff`` (:py:class:`bytes`):
The contents of the diff to upload.
``base_commit_id` (:py:class:`unicode`, optional):
The ID of the commit that the change is based on, if available.
This is necessary for some hosting services that don't provide
individual file access.
"""
return self.tf_wrapper.diff(revisions, include_files, exclude_patterns)
| python |
########### IMPORTING THE REQURIED LIBRARIES ###########
from __future__ import print_function
from bs4 import BeautifulSoup as soup
from random import choice
from terminaltables import AsciiTable
from .proxy import _proxy
from .utils import *
import requests
######## DECLARING THE CLASS FOR GETTING COVID-19 DATA ########
class Corona:
proxy = _proxy()
######## GETTING THE HTML PAGE THROUGH GET REQUEST ########
def getPageResponse( self, url ):
page = None
try:
resp = requests.get( url, timeout = MAX_TIMEOUT )
page = soup( resp.text, 'lxml' )
except requests.ConnectionError:
print( "\n###### STARTING RANDOM PROXIES #######\n" );
resp = self.proxy.loadDataByIPRotation( url )
page = soup( resp.text, 'lxml' )
return page
def extractCounts( self, page, choice = "w" ):
total_cases = None
total_deaths = None
total_cured = None
if( choice == "w" ):
total_cases = page.findAll( "div", {
"id": "maincounter-wrap"
} )[ 0 ].div.text.strip()
total_deaths = page.findAll( "div", {
"id": "maincounter-wrap"
} )[ 1 ].div.text.strip()
total_cured = page.findAll( "div", {
"id": "maincounter-wrap"
} )[ 2 ].div.text.strip()
elif( choice == "c" ):
total_cases = int( extractNumbers( page.findAll( "div",{
"class": "table-responsive"
} )[ 7 ].tbody.findAll( "tr" )[ -2 : -1 ][ 0 ].findAll( "td" )[ 1 ].text.strip() ) )
total_cases += int( page.findAll( "div",{
"class": "table-responsive"
} )[ 7 ].tbody.findAll( "tr" )[ -2 : -1 ][ 0 ].findAll( "td" )[ 2 ].text.strip() )
total_deaths = int( page.findAll( "div",{
"class": "table-responsive"
} )[ 7 ].tbody.findAll( "tr" )[ -2 : -1 ][ 0 ].findAll( "td" )[ 4 ].text.strip() )
total_cured = int( page.findAll( "div",{
"class": "table-responsive"
} )[ 7 ].tbody.findAll( "tr" )[ -2 : -1 ][ 0 ].findAll( "td" )[ 3 ].text.strip() )
counts = AsciiTable( [
[ "Total Cases", "Total Deaths", "Total Cured" ],
[ total_cases, total_deaths, total_cured ]
] )
return counts
########## EXTRACTING THE TABLE ###########
def extractTableData( self, page, choice = "w" ):
table = None
table_heading = None
table_content = None
if choice == "w":
try:
table = page.find( "table",{
"id": "main_table_countries_today"
} )
# table_heading = [ item.text.strip() for item in table.thead.tr if item != "\n" ]
table_heading = [ "Country", "Confirmed\nCases", "New Cases", "Confirmed\nDeaths", "New Deaths", "Recovered", "Active cases", "Serious/\nCritical cases" ];
table_content = []
for rows in table.tbody:
data = [ item.text.strip() for item in rows if item != "\n" ]
if data:
table_content.append( data[ : -2 ] )
table_content.insert( 0, table_heading )
table = AsciiTable( table_content )
except:
print( "\nSource page format has changed." )
exit();
elif choice == "c":
try:
table = page.findAll( "div",{
"class": "table-responsive"
} )[ 7 ]
# table_heading = [ item.text.strip() for item in table.thead.tr if item != "\n" ]
table_heading = [ "Sl. No.", "States/\nUnion Territories", "Confirmed cases\n( Indian National )", "Confirmed cases\n( Foreign National )", "Cured/Discharged/\nMigrated", "Death" ];
table_content = []
for rows in table.tbody:
data = [ item.text.strip() for item in rows if item != "\n" ]
if data:
table_content.append( data )
table_content.insert( 0, table_heading )
table = AsciiTable( table_content[ : -2 ] )
except:
print( "\nSource page format has changed." )
exit();
return table
| python |
#!/usr/bin/env python3
import sys
import re
# www.hackerrank.com
# http://www.hackerrank.com
# Regex_Pattern = r'^\w{3}\W{1}\w+\W{1}\w{3}$'
Regex_Pattern = r'^\d{1}\w{4}\.$'
print(str(bool(re.search(Regex_Pattern, input()))).lower())
| python |
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
import os,re
from waflib import Utils,Options,Context
gnuopts='''
bindir, user commands, ${EXEC_PREFIX}/bin
sbindir, system binaries, ${EXEC_PREFIX}/sbin
libexecdir, program-specific binaries, ${EXEC_PREFIX}/libexec
sysconfdir, host-specific configuration, ${PREFIX}/etc
sharedstatedir, architecture-independent variable data, ${PREFIX}/com
localstatedir, variable data, ${PREFIX}/var
libdir, object code libraries, ${EXEC_PREFIX}/lib%s
includedir, header files, ${PREFIX}/include
oldincludedir, header files for non-GCC compilers, /usr/include
datarootdir, architecture-independent data root, ${PREFIX}/share
datadir, architecture-independent data, ${DATAROOTDIR}
infodir, GNU "info" documentation, ${DATAROOTDIR}/info
localedir, locale-dependent data, ${DATAROOTDIR}/locale
mandir, manual pages, ${DATAROOTDIR}/man
docdir, documentation root, ${DATAROOTDIR}/doc/${PACKAGE}
htmldir, HTML documentation, ${DOCDIR}
dvidir, DVI documentation, ${DOCDIR}
pdfdir, PDF documentation, ${DOCDIR}
psdir, PostScript documentation, ${DOCDIR}
'''%Utils.lib64()
_options=[x.split(', ')for x in gnuopts.splitlines()if x]
def configure(conf):
def get_param(varname,default):
return getattr(Options.options,varname,'')or default
env=conf.env
env.LIBDIR=env.BINDIR=[]
env.EXEC_PREFIX=get_param('EXEC_PREFIX',env.PREFIX)
env.PACKAGE=getattr(Context.g_module,'APPNAME',None)or env.PACKAGE
complete=False
iter=0
while not complete and iter<len(_options)+1:
iter+=1
complete=True
for name,help,default in _options:
name=name.upper()
if not env[name]:
try:
env[name]=Utils.subst_vars(get_param(name,default).replace('/',os.sep),env)
except TypeError:
complete=False
if not complete:
lst=[x for x,_,_ in _options if not env[x.upper()]]
raise conf.errors.WafError('Variable substitution failure %r'%lst)
def options(opt):
inst_dir=opt.add_option_group('Installation prefix','By default, "waf install" will put the files in\
"/usr/local/bin", "/usr/local/lib" etc. An installation prefix other\
than "/usr/local" can be given using "--prefix", for example "--prefix=$HOME"')
for k in('--prefix','--destdir'):
option=opt.parser.get_option(k)
if option:
opt.parser.remove_option(k)
inst_dir.add_option(option)
inst_dir.add_option('--exec-prefix',help='installation prefix for binaries [PREFIX]',default='',dest='EXEC_PREFIX')
dirs_options=opt.add_option_group('Installation directories')
for name,help,default in _options:
option_name='--'+name
str_default=default
str_help='%s [%s]'%(help,re.sub(r'\$\{([^}]+)\}',r'\1',str_default))
dirs_options.add_option(option_name,help=str_help,default='',dest=name.upper())
| python |
import logging
import numpy
import parse_cif_file
import os
import sys
from operator import itemgetter
def get_dihedral_angle1(p0,p1,p2,p3):
"""http://stackoverflow.com/q/20305272/1128289"""
p = [p0, p1, p2, p3]
b = p[:-1] - p[1:]
b[0] *= -1
v = numpy.array( [ v - (v.dot(b[1])/b[1].dot(b[1])) * b[1] for v in [b[0], b[2]] ] )
# Normalize vectors
v /= numpy.sqrt(np.einsum('...i,...i', v, v)).reshape(-1,1)
b1 = b[1] / np.linalg.norm(b[1])
x = numpy.dot(v[0], v[1])
m = numpy.cross(v[0], b1)
y = numpy.dot(m, v[1])
return numpy.degrees(np.arctan2( y, x ))
def get_dihedral_angle2(p0,p1,p2,p3):
"""formula from Wikipedia article on "Dihedral angle"; formula was removed
from the most recent version of article (no idea why, the article is a
mess at the moment) but the formula can be found in at this permalink to
an old version of the article:
https://en.wikipedia.org/w/index.php?title=Dihedral_angle&oldid=689165217#Angle_between_three_vectors
uses 1 sqrt, 3 cross products"""
# p0 = p[0]
# p1 = p[1]
# p2 = p[2]
# p3 = p[3]
b0 = -1.0*(p1 - p0)
b1 = p2 - p1
b2 = p3 - p2
b0xb1 = numpy.cross(b0, b1)
b1xb2 = numpy.cross(b2, b1)
b0xb1_x_b1xb2 = numpy.cross(b0xb1, b1xb2)
y = numpy.dot(b0xb1_x_b1xb2, b1)*(1.0/numpy.linalg.norm(b1))
x = numpy.dot(b0xb1, b1xb2)
return numpy.degrees(numpy.arctan2(y, x))
def get_dihedral_angle(p0, p1, p2, p3):
"""Praxeolitic formula
1 sqrt, 1 cross product"""
b0 = -1.0 * (p1 - p0)
b1 = p2 - p1
b2 = p3 - p2
# normalize b1 so that it does not influence magnitude of vector
# rejections that come next
b1 /= numpy.linalg.norm(b1)
# vector rejections
# v = projection of b0 onto plane perpendicular to b1
# = b0 minus component that aligns with b1
# w = projection of b2 onto plane perpendicular to b1
# = b2 minus component that aligns with b1
v = b0 - numpy.dot(b0, b1) * b1
w = b2 - numpy.dot(b2, b1) * b1
# angle between v and w in a plane is the torsion angle
# v and w may not be normalized but that's fine since tan is y/x
x = numpy.dot(v, w)
y = numpy.dot(numpy.cross(b1, v), w)
return numpy.degrees(numpy.arctan2(y, x))
def calculate_dihedral_angles(cif_file_name, in_dir, out_dir):
cif_file = '{}/{}'.format(in_dir, cif_file_name)
cif,bf,ent_id = parse_cif_file.get_coordinates(cif_file)
#cif= parse_cif_file.get_coordinates(cif_file)
outfilename = '{}/{}.csv'.format(out_dir, cif_file_name.split(".cif")[0])
fo = open(outfilename, 'w')
for model in cif.keys():
seq = sorted(list(set([(i[0], i[1], i[2]) for i in cif[model].keys()])),key=itemgetter(1, 0))
for r in range(1, len(seq) - 1):
phi_atoms = ((seq[r - 1][0], seq[r - 1][1], seq[r - 1][2], 'C'),
(seq[r][0], seq[r][1], seq[r][2], 'N'),
(seq[r][0], seq[r][1], seq[r][2], 'CA'),
(seq[r][0], seq[r][1], seq[r][2], 'C'))
psi_atoms = ((seq[r][0], seq[r][1], seq[r][2], 'N'),
(seq[r][0], seq[r][1], seq[r][2], 'CA'),
(seq[r][0], seq[r][1], seq[r][2], 'C'),
(seq[r + 1][0], seq[r + 1][1], seq[r + 1][2], 'N'))
try:
phi = get_dihedral_angle2(cif[model][phi_atoms[0]],
cif[model][phi_atoms[1]],
cif[model][phi_atoms[2]],
cif[model][phi_atoms[3]])
psi = get_dihedral_angle2(cif[model][psi_atoms[0]],
cif[model][psi_atoms[1]],
cif[model][psi_atoms[2]],
cif[model][psi_atoms[3]])
b=bf[model][phi_atoms[1]]
if seq[r+1][2] == 'PRO':
rtype='XPR'
elif r==1 or r==(len(seq)-2):
rtype='TER'
elif seq[r][2] == 'GLY':
rtype='GLY'
else:
rtype='REG'
fo.write('{},{},{},{},{},{},{}\n'.format(seq[r][0], seq[r][2], round(phi,4), round(psi,4),b,rtype,ent_id))
except KeyError:
logging.warning('Coordinate data not found for {}/{}'.format(phi_atoms, psi_atoms))
if __name__ == "__main__":
# calculate_dihedral_angles('4txr.cif','/Users/kumaran/Downloads','/Users/kumaran')
in_path = sys.argv[1]
out_path = sys.argv[2]
flist = [_ for _ in os.listdir(in_path) if _.endswith('.cif')]
for fname in flist:
print (fname)
logging.info('Working on {}'.format(fname))
calculate_dihedral_angles(fname, in_path, out_path)
| python |
# -*- coding: utf-8 -*-
"""
cdeweb.errors
~~~~~~~~~~~~~
Error views.
:copyright: Copyright 2016 by Matt Swain.
:license: MIT, see LICENSE file for more details.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
from flask import render_template, request, jsonify
from . import app
log = logging.getLogger(__name__)
def get_message(e):
if hasattr(e, 'data') and 'messages' in e.data:
return e.data['messages']
if hasattr(e, 'description'):
return e.description
elif hasattr(e, 'msg'):
return e.msg
elif hasattr(e, 'message'):
return e.message
else:
return repr(e)
@app.errorhandler(400)
def forbidden(e):
if request.accept_mimetypes.accept_json and not request.accept_mimetypes.accept_html:
response = jsonify({'error': 'bad request', 'message': get_message(e)})
response.status_code = 400
return response
return render_template('400.html', description=get_message(e)), 400
@app.errorhandler(403)
def forbidden(e):
if request.accept_mimetypes.accept_json and not request.accept_mimetypes.accept_html:
response = jsonify({'error': 'forbidden', 'message': get_message(e)})
response.status_code = 403
return response
return render_template('403.html', description=get_message(e)), 403
@app.errorhandler(404)
def page_not_found(e):
if request.accept_mimetypes.accept_json and not request.accept_mimetypes.accept_html:
response = jsonify({'error': 'not found', 'message': get_message(e)})
response.status_code = 404
return response
return render_template('404.html', description=get_message(e)), 404
@app.errorhandler(422)
def unprocessable_entity(e):
if request.accept_mimetypes.accept_json and not request.accept_mimetypes.accept_html:
response = jsonify({'error': 'unprocessable entity', 'message': get_message(e)})
response.status_code = 422
return response
return render_template('422.html', description=get_message(e)), 422
@app.errorhandler(500)
def internal_server_error(e):
if request.accept_mimetypes.accept_json and not request.accept_mimetypes.accept_html:
response = jsonify({'error': 'internal server error', 'message': get_message(e)})
response.status_code = 500
return response
return render_template('500.html', description=get_message(e)), 500
@app.errorhandler(503)
def internal_server_error(e):
if request.accept_mimetypes.accept_json and not request.accept_mimetypes.accept_html:
response = jsonify({'error': 'service unavailable', 'message': get_message(e)})
response.status_code = 503
return response
return render_template('503.html', description=get_message(e)), 503
| python |
from setuptools import setup
setup(
name='zipf',
version='0.1',
author='Amira Khan',
packages=['zipf'],
install_requires=[
'matplotlib',
'pandas',
'scipy',
'pyyaml',
'pytest'],
entry_points={
'console_scripts': [
'countwords = zipf.countwords:main',
'collate = zipf.collate:main',
'plotcounts = zipf.plotcounts:main']})
| python |
from abc import ABC, abstractmethod
import ccxt
from PySide6 import QtWidgets
# import ccxt.async_support as ccxt
from XsCore import xsIni
from ccxt import Exchange
class PluginBase(ABC):
name: str = ""
display_name: str = ""
info: str = ""
help_doc = "" # 不重写为没有文档 使用文档说明,为md文件,存放database的plugin_help下
def __init__(self):
self.exchange: Exchange = None
@abstractmethod
def get_ui(self) -> QtWidgets.QVBoxLayout():
pass
def init_exchange(self, ex_name):
# config = {
# 'proxies': {
# 'http': 'http://127.0.0.1:41081',
# 'https': 'http://127.0.0.1:41081'
# },
# 'verbose': True
# }
config = {}
value = xsIni.getAppValue('api_key')
if value != '':
config['apiKey'] = value
value = xsIni.getAppValue('api_secret')
if value != '':
config['secret'] = value
value = xsIni.getAppValue('api_changepass')
if value != '':
config['password'] = value
http = xsIni.getAppValue('daiLi_http')
https = xsIni.getAppValue('daiLi_https')
if http != '' and https != '':
config['proxies'] = {
'http': http,
'https': https
}
value = xsIni.getAppBool('is_print_log')
if value:
config['verbose'] = True
self.exchange: Exchange = getattr(ccxt, ex_name)(config)
| python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Enable periodic transmission of DB and job-produced content to BigQuery."""
__author__ = [
'Michael Gainer ([email protected])',
]
import base64
import collections
import copy
import datetime
import logging
import os
import random
import re
import sys
import traceback
import apiclient
import httplib2
import oauth2client
from common import catch_and_log
from common import crypto
from common import schema_fields
from common import utils as common_utils
from controllers import sites
from controllers import utils
from models import analytics
from models import courses
from models import custom_modules
from models import data_sources
from models import jobs
from models import roles
from models import transforms
from modules.dashboard import dashboard
from modules.dashboard import tabs
from google.appengine.ext import db
from google.appengine.ext import deferred
# CourseBuilder setup strings
XSRF_ACTION_NAME = 'data_pump'
DASHBOARD_ACTION = 'data_pump'
# Separate permission to be able to push user data delegable to non-super-users
ACCESS_PERMISSION = 'push_data'
ACCESS_PERMISSION_DESCRIPTION = 'Can push user data outside CourseBuilder.'
# Connection parameters for discovering and auth to BigQuery.
BIGQUERY_RW_SCOPE = 'https://www.googleapis.com/auth/bigquery'
BIGQUERY_API_NAME = 'bigquery'
BIGQUERY_API_VERSION = 'v2'
# API endpoint for initiating a retryable upload.
BIGQUERY_API_UPLOAD_URL_PREFIX = (
'https://www.googleapis.com/upload/bigquery/v2/projects/')
# UI for BigQuery interactive queries
BIGQUERY_UI_URL_PREFIX = 'https://bigquery.cloud.google.com/table/'
# Max of about 20 min of retries (random exponential backoff from 2^1...2^MAX)
MAX_CONSECUTIVE_FAILURES = 10
MAX_RETRY_BACKOFF_SECONDS = 600
# Config for secret
PII_SECRET_LENGTH = 20
PII_SECRET_DEFAULT_LIFETIME = '30 days'
# Constants for accessing job context settings map
UPLOAD_URL = 'upload_url'
LAST_START_OFFSET = 'last_start_offset'
LAST_END_OFFSET = 'last_end_offset'
LAST_PAGE_SENT = 'last_page_sent'
LAST_PAGE_NUM_ITEMS = 'last_page_num_items'
CONSECUTIVE_FAILURES = 'consecutive_failures'
FAILURE_REASON = 'failure_reason'
ITEMS_UPLOADED = 'items_uploaded'
PII_SECRET = 'pii_secret'
# Constants for items within course settings schema
DATA_PUMP_SETTINGS_SCHEMA_SECTION = 'data_pump'
PROJECT_ID = 'project_id'
DATASET_NAME = 'dataset_name'
JSON_KEY = 'json_key'
TABLE_LIFETIME = 'table_lifetime'
PII_ENCRYPTION_TOKEN = 'pii_encryption_token'
def _get_data_source_class_by_name(name):
source_classes = data_sources.Registry.get_rest_data_source_classes()
for source_class in source_classes:
if source_class.__name__ == name and source_class.exportable():
return source_class
return None
class DataPumpJob(jobs.DurableJobBase):
@staticmethod
def get_description():
"""Job to push data from CourseBuilder to BigQuery.
The job operates from the deferred queue, and takes advantage of the
underlying TaskQueue retry and backoff support. One job is created
for each DataSource (see models/data_source). This job moves data
from the paginated data source up to Google BigQuery via the
retryable POST method.
Jobs here run on the TaskQueue named "default along with all other
CB deferred tasks because that queue has a reasonable set of config
parameters. However, there is nothing about these jobs that
requires interleaving with others if queue parameters need to be
tuned. Functional tests will need to be changed to have
execute_all_deferred_tasks() pass the name of the new queue.
"""
def __init__(self, app_context, data_source_class_name):
if not _get_data_source_class_by_name(data_source_class_name):
raise ValueError(
'No such data source "%s", or data source is not marked '
'as exportable.' % data_source_class_name)
super(DataPumpJob, self).__init__(app_context)
self._data_source_class_name = data_source_class_name
self._job_name = 'job-datapump-%s-%s' % (self._data_source_class_name,
self._namespace)
def non_transactional_submit(self):
"""Callback used when UI gesture indicates this job should start."""
sequence_num = super(DataPumpJob, self).non_transactional_submit()
deferred.defer(self.main, sequence_num)
return sequence_num
def _mark_job_canceled(self, job, message, duration):
"""Override default behavior of setting job.output to error string."""
if job.output:
job_context, data_source_context = self._load_state(
job, job.sequence_num)
else:
job_context = self._build_job_context(None, None)
data_source_context = self._build_data_source_context()
job_context[FAILURE_REASON] = message
self._save_state(jobs.STATUS_CODE_FAILED, job, job.sequence_num,
job_context, data_source_context,
use_transaction=False)
def _build_data_source_context(self):
"""Set up context class specific to data source type we pull from."""
data_source_class = _get_data_source_class_by_name(
self._data_source_class_name)
context_class = data_source_class.get_context_class()
# TODO(mgainer): if we start getting timeout failures, perhaps learn
# proper chunk size from history, rather than using default.
default_chunk_size = data_source_class.get_default_chunk_size()
return context_class.build_blank_default({}, default_chunk_size)
def _build_job_context(self, upload_url, pii_secret):
"""Set up context object used to maintain this job's internal state."""
job_context = {
UPLOAD_URL: upload_url,
LAST_START_OFFSET: 0,
LAST_END_OFFSET: -1,
LAST_PAGE_SENT: -1,
LAST_PAGE_NUM_ITEMS: 0,
CONSECUTIVE_FAILURES: [],
FAILURE_REASON: '',
ITEMS_UPLOADED: 0,
PII_SECRET: pii_secret,
}
return job_context
def _load_state(self, job, sequence_num):
if job.sequence_num != sequence_num:
raise ValueError(
'Abandoning stale job with sequence %d; '
'there is a new job with sequence %d running.' % (
sequence_num, job.sequence_num))
data_source_class = _get_data_source_class_by_name(
self._data_source_class_name)
content = transforms.loads(job.output)
job_context = content['job_context']
data_source_context_class = data_source_class.get_context_class()
data_source_context = data_source_context_class.build_from_dict(
content['data_source_context'])
return job_context, data_source_context
def _save_state(self, state, job, sequence_num, job_context,
data_source_context, use_transaction=True):
# Job context may have been made with blank values for these two items.
# Recover them from the previous context if they are not set (and if
# the previous context is present enough to have them)
try:
prev_job_context, _ = self._load_state(job, sequence_num)
if not job_context[PII_SECRET]:
job_context[PII_SECRET] = prev_job_context[PII_SECRET]
if not job_context[UPLOAD_URL]:
job_context[UPLOAD_URL] = prev_job_context[UPLOAD_URL]
except (ValueError, AttributeError):
pass
# Convert data source context object to plain dict.
data_source_class = _get_data_source_class_by_name(
self._data_source_class_name)
context_class = data_source_class.get_context_class()
data_source_context_dict = context_class.save_to_dict(
data_source_context)
# Set job object state variables.
now = datetime.datetime.now()
job.output = transforms.dumps({
'job_context': job_context,
'data_source_context': data_source_context_dict,
})
job.status_code = state
job.execution_time_sec += int((now - job.updated_on).total_seconds())
job.updated_on = now
logging.info('Data pump job %s saving contexts: %s %s',
self._job_name, str(job_context), str(data_source_context))
# Using _update in DurableJobEntity
# pylint: disable=protected-access
if use_transaction:
xg_on = db.create_transaction_options(xg=True)
db.run_in_transaction_options(
xg_on, jobs.DurableJobEntity._update, self._job_name,
sequence_num, job.status_code, job.output,
job.execution_time_sec)
else:
jobs.DurableJobEntity._update(self._job_name, sequence_num,
job.status_code, job.output,
job.execution_time_sec)
@classmethod
def _parse_pii_encryption_token(cls, token):
parts = token.split('/')
return (parts[0],
datetime.datetime(year=1970, month=1, day=1) +
datetime.timedelta(seconds=int(parts[1])))
@classmethod
def _is_pii_encryption_token_valid(cls, token):
try:
_, valid_until_date = cls._parse_pii_encryption_token(token)
return valid_until_date > datetime.datetime.now()
except ValueError:
return False
@classmethod
def _build_new_pii_encryption_token(cls, timedelta_string):
hmac_secret = base64.urlsafe_b64encode(
os.urandom(int(PII_SECRET_LENGTH * 0.75)))
table_lifetime_seconds = common_utils.parse_timedelta_string(
timedelta_string).total_seconds()
unix_epoch = datetime.datetime(year=1970, month=1, day=1)
now = datetime.datetime.now()
table_lifetime_timedelta = datetime.timedelta(
seconds=table_lifetime_seconds)
valid_until_timestamp = int(
(now - unix_epoch + table_lifetime_timedelta).total_seconds())
pii_encryption_token = '%s/%d' % (hmac_secret,
valid_until_timestamp)
return pii_encryption_token
@classmethod
def _get_pii_token(cls, app_context):
"""Retrieve or generate and save a secret used to encrypt exported PII.
All PII data in objects exported to BigQuery is either suppressed
or transformed via a one-way hash using a secret value. The point
of the transformation is so that exported data cannot trivially be
correlated to any individual's data in CourseBuilder, but records
in exported data encoded using the same key can. (E.g., a user_id
is the key for students; this key should be usable to correlate a
user's language preference with his test scores.)
Once data has been exported from CourseBuilder to BigQuery, the
internal permissions from CourseBuilder no longer apply. To minimize
the ability of those with access to the data to perform long-term
correlations that might identify individuals, the secret used to
encode PII is automatically rotated on a period determined by the
course settings. We re-use the expiration period for tables, or
default to 30 days if no period is selected.
The format for the stored setting is a string composed of:
- A randomly-generated secret encoded as a base-64 string
- A slash character ('/')
- A Unix timestamp indicating the expiration date of the token.
The expiration date approach is chosen so that within the expiration
period, different data sources can be re-exported multiple times, but
still correlated with one another in BigQuery. Upon expiration, a
new token is generated and used. Data exported before and after the
changeover cannot be directly correlated. (It may be possible to
force a correlation if old versions of the data tables were downloaded
by comparing non-key fields in the old/new versions, if the non-key
fields are sufficiently discriminative)
Args:
app_context: Standard CB application context object.
Returns:
Secret string used for encoding PII data upon export.
"""
course_settings = app_context.get_environ()
pump_settings = course_settings.get(DATA_PUMP_SETTINGS_SCHEMA_SECTION,
{})
pii_encryption_token = pump_settings.get(PII_ENCRYPTION_TOKEN)
if (not pii_encryption_token or
not cls._is_pii_encryption_token_valid(pii_encryption_token)):
pii_encryption_token = cls._build_new_pii_encryption_token(
pump_settings.get(TABLE_LIFETIME,
PII_SECRET_DEFAULT_LIFETIME))
pump_settings[PII_ENCRYPTION_TOKEN] = pii_encryption_token
course = courses.Course(None, app_context=app_context)
course.save_settings(course_settings)
return pii_encryption_token
@classmethod
def _get_pii_secret(cls, app_context):
secret, _ = cls._parse_pii_encryption_token(
cls._get_pii_token(app_context))
return secret
def _get_bigquery_settings(self, app_context):
"""Pull settings necessary for using BigQuery from DB.
This is nice and verbose and paranoid, so that if there is any
misconfiguration, the end-user gets a nice message that's specific
about the particular problem, rather than just a KeyError or
ValueError.
Args:
app_context: The standard app context for the course in question.
Returns:
A namedtuple containing private_key, client_email, project_id
and dataset_id members. The first three are required to connect
to BigQuery, and the last is the dataset within BigQuery to
which the data pump will restrict itself for insert/write/delete
operations.
Raises:
ValueError: if any expected element is missing or malformed.
"""
pump_settings = app_context.get_environ().get(
DATA_PUMP_SETTINGS_SCHEMA_SECTION, {})
dataset_id = (
pump_settings.get(DATASET_NAME) or
re.sub('[^a-z_:-]', '', app_context.get_slug().lower()) or
'course')
project_id = pump_settings.get(PROJECT_ID)
if not project_id:
raise ValueError('Cannot pump data without a course settings value '
'for the target Google BigQuery project ID')
json_key = pump_settings.get(JSON_KEY)
if not json_key:
raise ValueError('Cannot pump data without a JSON client key '
'allowing access to the target Google BigQuery '
'project')
try:
json_key = transforms.loads(json_key)
except ValueError:
raise ValueError('Cannot decode JSON client key for the target '
'Google BigQuery project.')
if 'private_key' not in json_key or 'client_email' not in json_key:
raise ValueError('The JSON client key for the target Google '
'BigQuery project does not seem to be well '
'formed; either the "private_key" or '
'"client_email" field is missing.')
table_lifetime_seconds = common_utils.parse_timedelta_string(
pump_settings.get(TABLE_LIFETIME, '')).total_seconds()
Settings = collections.namedtuple('Settings', [
'private_key', 'client_email', PROJECT_ID, 'dataset_id',
'table_lifetime_seconds'])
return Settings(json_key['private_key'], json_key['client_email'],
project_id, dataset_id, table_lifetime_seconds)
def _get_bigquery_service(self, bigquery_settings):
"""Get BigQuery API client plus HTTP client with auth credentials."""
credentials = oauth2client.client.SignedJwtAssertionCredentials(
bigquery_settings.client_email, bigquery_settings.private_key,
BIGQUERY_RW_SCOPE)
http = httplib2.Http()
http = credentials.authorize(http)
return apiclient.discovery.build(BIGQUERY_API_NAME,
BIGQUERY_API_VERSION, http=http), http
def _maybe_create_course_dataset(self, service, bigquery_settings):
"""Create dataset within BigQuery if it's not already there."""
datasets = service.datasets()
try:
datasets.get(projectId=bigquery_settings.project_id,
datasetId=bigquery_settings.dataset_id).execute()
except apiclient.errors.HttpError, ex:
if ex.resp.status != 404:
raise
datasets.insert(projectId=bigquery_settings.project_id,
body={
'datasetReference': {
'projectId': bigquery_settings.project_id,
'datasetId': bigquery_settings.dataset_id
}}).execute()
def _maybe_delete_previous_table(self, tables, bigquery_settings):
"""Delete previous version of table for data source, if it exists."""
# TODO(mgainer): Make clobbering old table and replacing optional.
# For now, we assume people will be writing queries in terms of
# a single table name, and will be irritated at having to change
# their queries all the time if we add a timestamp to the table
# name. And no, AFAICT, the BigQuery API does not permit renaming
# of tables, just creation and deletion.
table_name = self._data_source_class_name.replace('DataSource', '')
try:
tables.delete(projectId=bigquery_settings.project_id,
datasetId=bigquery_settings.dataset_id,
tableId=table_name).execute()
except apiclient.errors.HttpError, ex:
if ex.resp.status != 404:
raise
def _json_schema_member_to_bigquery_schema(self, name, structure):
item = {'name': name}
if 'description' in structure:
item['description'] = structure['description']
if 'properties' in structure: # It's a sub-registry.
item['type'] = 'RECORD'
item['mode'] = 'NULLABLE'
item['fields'] = self._json_schema_to_bigquery_schema(
structure['properties'])
elif 'items' in structure: # It's an array
if 'items' in structure['items']:
raise ValueError(
'BigQuery schema descriptions do not support nesting '
'arrays directly in other arrays. Instead, nest '
'structures in arrays; those structures may contain '
'sub-arrays. Problem arises trying to pump data for %s' %
self._data_source_class_name)
item = self._json_schema_member_to_bigquery_schema(
name, structure['items'])
item['mode'] = 'REPEATED'
else:
item['mode'] = ('NULLABLE' if structure.get('optional')
else 'REQUIRED')
if structure['type'] in ('string', 'text', 'html', 'url', 'file'):
item['type'] = 'STRING'
elif structure['type'] in 'integer':
item['type'] = 'INTEGER'
elif structure['type'] in 'number':
item['type'] = 'FLOAT'
elif structure['type'] in 'boolean':
item['type'] = 'BOOLEAN'
elif structure['type'] in ('date', 'datetime'):
item['type'] = 'TIMESTAMP'
else:
raise ValueError(
'Unrecognized schema scalar type "%s" '
'when trying to make schema for data-pumping %s' % (
structure['type'], self._data_source_class_name))
return item
def _json_schema_to_bigquery_schema(self, json_schema_dict):
fields = []
for name, structure in json_schema_dict.iteritems():
fields.append(self._json_schema_member_to_bigquery_schema(
name, structure))
return fields
def _create_data_table(self, tables, bigquery_settings, schema):
"""Instantiate and provide schema for new BigQuery table."""
table_name = self._data_source_class_name.replace('DataSource', '')
request = {
'kind': 'bigquery#table',
'tableReference': {
'projectId': bigquery_settings.project_id,
'datasetId': bigquery_settings.dataset_id,
'tableId': table_name,
},
'schema': {'fields': schema}
}
# If user has requested it, set the time at which table should be
# reclaimed (as milliseconds since Unix epoch).
if bigquery_settings.table_lifetime_seconds:
now = datetime.datetime.now()
expiration_delta = datetime.timedelta(
seconds=bigquery_settings.table_lifetime_seconds)
unix_epoch = datetime.datetime(year=1970, month=1, day=1)
expiration_ms = int(
(now + expiration_delta - unix_epoch).total_seconds()) * 1000
request['expirationTime'] = expiration_ms
# Allow exceptions from here to propagate; we don't expect any problems,
# so if we have any, the upload should abort.
tables.insert(
projectId=bigquery_settings.project_id,
datasetId=bigquery_settings.dataset_id,
body=request).execute()
def _create_upload_job(self, http, bigquery_settings):
"""Before uploading, we must create a job to handle the upload.
Args:
http: An HTTP client object configured to send our auth token
bigquery_settings: Configs for talking to bigquery.
Returns:
URL specific to this upload job. Subsequent PUT requests to send
pages of data must be sent to this URL.
Raises:
Exception: on unexpected responses from BigQuery API.
"""
uri = '%s%s/jobs?uploadType=resumable' % (
BIGQUERY_API_UPLOAD_URL_PREFIX, bigquery_settings.project_id)
headers = {
'Content-Type': 'application/json',
'X-Upload-Content-Type': 'application/octet-stream',
}
table_name = self._data_source_class_name.replace('DataSource', '')
body = transforms.dumps({
'kind': 'bigquery#job',
'configuration': {
'load': {
'createDisposition': 'CREATE_NEVER', # Already exists.
'destinationTable': {
'projectId': bigquery_settings.project_id,
'datasetId': bigquery_settings.dataset_id,
'tableId': table_name,
},
'ignoreUnknownValues': False,
'sourceFormat': 'NEWLINE_DELIMITED_JSON',
}
}
})
response, content = http.request(uri, method='POST',
body=body, headers=headers)
if int(response.get('status', 0)) != 200:
raise Exception('Got non-200 response when trying to create a '
'new upload job. Reponse was: "%s"; content '
'was "%s"' % (str(response), str(content)))
location = response.get('location')
if not location:
raise Exception('Expected response to contain a "location" item '
'giving a URL to send subsequent content to, but '
'instead got "%s"' % str(response))
return location
def _initiate_upload_job(self, bigquery_service, bigquery_settings, http,
app_context):
"""Coordinate table cleanup, setup, and initiation of upload job."""
data_source_class = _get_data_source_class_by_name(
self._data_source_class_name)
catch_and_log_ = catch_and_log.CatchAndLog()
table_schema = data_source_class.get_schema(app_context, catch_and_log_)
schema = self._json_schema_to_bigquery_schema(table_schema)
tables = bigquery_service.tables()
self._maybe_create_course_dataset(bigquery_service, bigquery_settings)
self._maybe_delete_previous_table(tables, bigquery_settings)
self._create_data_table(tables, bigquery_settings, schema)
upload_url = self._create_upload_job(http, bigquery_settings)
return upload_url
def _note_retryable_failure(self, message, job_context):
"""Log a timestamped message into the job context object."""
timestamp = datetime.datetime.now().strftime(
utils.HUMAN_READABLE_DATETIME_FORMAT)
job_context[CONSECUTIVE_FAILURES].append(timestamp + ' ' + message)
def _randomized_backoff_timeout(self, job_context):
num_failures = len(job_context[CONSECUTIVE_FAILURES])
if not num_failures:
return 0
return min(MAX_RETRY_BACKOFF_SECONDS,
random.randrange(2 ** num_failures, 2 ** (num_failures + 1)))
def _check_upload_state(self, http, job_context):
"""Check with the BigQuery upload server to get state of our upload.
Due to various communication failure cases, we may not be aware of
the actual state of the upload as known to the server. Issue a blank
PUT request to evoke a response that will indicate:
- How far along we are in the upload
- Whether the upload has already completed
- Whether the upload job has taken too long and expired
Args:
http: An HTTP client object configured to send our auth token
job_context: Hash containing configuration for this upload job.
Returns:
A 2-tuple of next page to load (or None if no page should be
loaded), and the next jobs.STATUS_CODE_<X> to transition to.
"""
response, _ = http.request(job_context[UPLOAD_URL], method='PUT',
headers={'Content-Range': 'bytes */*'})
return self._handle_put_response(response, job_context, is_upload=False)
def _send_data_page_to_bigquery(self, data, is_last_chunk, next_page,
http, job, sequence_num, job_context,
data_source_context):
# BigQuery expects one JSON object per newline-delimed record,
# not a JSON array containing objects, so convert them individually.
# Less efficient, but less hacky than converting and then string
# manipulation.
lines = []
total_len = 0
for item in data:
line = transforms.dumps(item)
line += '\n'
total_len += len(line)
lines.append(line)
# Round data size up to next multiple of 256K, per
# https://cloud.google.com/bigquery/loading-data-post-request#chunking
padding_amount = 0
if not is_last_chunk:
round_to = 256 * 1024
if total_len % round_to:
padding_amount = round_to - (total_len % round_to)
lines.append(' ' * padding_amount)
payload = ''.join(lines)
# We are either re-attempting to send a page, or sending a new page.
# Adjust the job_context's last-sent state to reflect this.
job_context[LAST_PAGE_NUM_ITEMS] = len(data)
if next_page == job_context[LAST_PAGE_SENT]:
job_context[LAST_END_OFFSET] = (
job_context[LAST_START_OFFSET] + len(payload) - 1)
elif next_page == job_context[LAST_PAGE_SENT] + 1:
job_context[LAST_PAGE_SENT] = next_page
job_context[LAST_START_OFFSET] = (
job_context[LAST_END_OFFSET] + 1)
job_context[LAST_END_OFFSET] = (
job_context[LAST_START_OFFSET] + len(payload) - 1)
else:
raise Exception(
'Internal error - unexpected condition in sending page. '
'next_page=%d last_page=%d, num_items=%d' % (
next_page, job_context[LAST_PAGE_SENT], len(data)))
logging.info(
'Sending to BigQuery. %d items; %d padding bytes; is-last: %s',
len(data), padding_amount, str(is_last_chunk))
headers = {
'Content-Range': 'bytes %d-%d/%s' % (
job_context[LAST_START_OFFSET],
job_context[LAST_END_OFFSET],
(job_context[LAST_END_OFFSET] + 1) if is_last_chunk else '*')
}
response, _ = http.request(job_context[UPLOAD_URL], method='PUT',
body=payload, headers=headers)
_, next_state = self._handle_put_response(response, job_context,
is_upload=True)
return next_state
def _handle_put_response(self, response, job_context, is_upload=True):
"""Update job_context state depending on response from BigQuery."""
status = int(response['status'])
logging.info('Response from bigquery: %d; %s', status, str(response))
next_page = None
next_status = jobs.STATUS_CODE_STARTED
if status == 308:
# Google's push-partial-data usurps the usual meaning of 308 to
# instead mean "partial request incomplete"; here, it's telling
# us that the request has partially completed, and it will give
# us a Range: header to indicate how far it thinks we've gone.
# We only care about the upper end of the range.
if 'range' not in response:
last_offset_received = -1
else:
last_offset_received = int(response['range'].split('-')[1])
if last_offset_received == job_context[LAST_END_OFFSET]:
# The nominal case; the reported index of the last byte
# received exactly matches what we think we sent. Tell our
# caller we are ready to try the next page, and count up
# the total number of items sent only now that we have seen
# the receiving side's acknowledgement.
next_page = job_context[LAST_PAGE_SENT] + 1
job_context[ITEMS_UPLOADED] += job_context[LAST_PAGE_NUM_ITEMS]
job_context[LAST_PAGE_NUM_ITEMS] = 0
# Don't clear the list of failures if this is handling the
# pre-check done before uploading. Experiments show that
# persistent problems with our requests result in 503's on
# upload, but 308's (reporting no progress made) on check.
# We want to eventually fail out if we're constantly getting
# errors, so ignore the "success" on checking status.
if is_upload:
job_context[CONSECUTIVE_FAILURES] = []
elif (last_offset_received >= job_context[LAST_START_OFFSET] - 1 and
last_offset_received < job_context[LAST_END_OFFSET]):
# If the last offset received is not the same as the last offset
# sent, that's possibly OK; verify that the last offset received
# is sane. Here, "sane" means that we accept seeing the
# last offset of the previous page sent (last_start_offset-1)
# up to, but not including the last_end_offset (for the page
# we just sent). Anything lower means that our algorithm
# mistakenly skipped past a failure. Anything higher means
# that we have somehow become confused and decided to step
# backward (or BigQuery is lying to us).
prev_page_size = (job_context[LAST_END_OFFSET] -
job_context[LAST_START_OFFSET] + 1)
bytes_received = (last_offset_received -
job_context[LAST_START_OFFSET] + 1)
self._note_retryable_failure(
'Incomplete upload detected - %d of %d bytes received '
'for page %d' %
(bytes_received, prev_page_size,
job_context[LAST_PAGE_SENT]), job_context)
next_page = job_context[LAST_PAGE_SENT]
else:
raise ValueError(
'Uploaded byte count of %d does not fall in the range '
'%d to %d, the start/end range for previously-sent page '
'number %d. Abandoning upload.' % (
last_offset_received, job_context[LAST_START_OFFSET],
job_context[LAST_END_OFFSET],
job_context[LAST_PAGE_SENT]))
elif status in (200, 201):
# BigQuery confirms that it has seen the upload complete. (Note
# that this is *not* a promise that the upload has parsed
# correctly; there doesn't seem to be a clean way to ask about
# that other than to probe the table for number of rows uploaded
# until we see the desired number or time out. Ick.)
job_context[ITEMS_UPLOADED] += job_context[LAST_PAGE_NUM_ITEMS]
job_context[LAST_PAGE_NUM_ITEMS] = 0
next_status = jobs.STATUS_CODE_COMPLETED
elif status == 404:
# Unlikely, but possible. For whatever reason, BigQuery has
# decided that our upload URL is no longer valid. (Docs say that
# we are allowed up to a day to get an upload done, but do not
# promise that this is the only reason a job may become invalid.)
# We need to start again from scratch. To start over, we will
# just skip uploading a data page this round, and set ourselves up
# to be called back again from the deferred-tasks queue. When the
# callback happens, STATUS_CODE_QUEUED will indicate we need to
# re-init everything from scratch.
next_status = jobs.STATUS_CODE_QUEUED
elif status in (500, 502, 503, 504):
# Server Error, Bad Gateway, Service Unavailable or Gateway Timeout.
# In all of these cases, we do a randomized exponential delay before
# retrying.
self._note_retryable_failure('Retryable server error %d' % status,
job_context)
else:
raise ValueError(
'Got unexpected status code %d from BigQuery in response %s' %
(status, str(response)))
return next_page, next_status
def _fetch_page_data(self, app_context, data_source_context, next_page):
"""Get the next page of data from the data source."""
data_source_class = _get_data_source_class_by_name(
self._data_source_class_name)
catch_and_log_ = catch_and_log.CatchAndLog()
is_last_page = False
with catch_and_log_.propagate_exceptions('Loading page of data'):
schema = data_source_class.get_schema(app_context, catch_and_log_)
required_jobs = data_sources.utils.get_required_jobs(
data_source_class, app_context, catch_and_log_)
data, _ = data_source_class.fetch_values(
app_context, data_source_context, schema, catch_and_log_,
next_page, *required_jobs)
if (data_source_class.get_default_chunk_size() == 0 or
not hasattr(data_source_context, 'chunk_size') or
len(data) < data_source_context.chunk_size):
is_last_page = True
else:
# Here, we may have read to the end of the table and just
# happened to end up on an even chunk boundary. Attempt to
# read one more row so that we can discern whether we really
# are at the end.
# Don't use the normal data_source_context; we don't want it
# to cache a cursor for the next page that will only retrieve
# one row.
throwaway_context = copy.deepcopy(data_source_context)
throwaway_context.chunk_size = 1
next_data, actual_page = data_source_class.fetch_values(
app_context, throwaway_context, schema, catch_and_log_,
next_page + 1, *required_jobs)
if not next_data or actual_page == next_page:
is_last_page = True
return data, is_last_page
def _send_next_page(self, sequence_num, job):
"""Coordinate table setup, job setup, sending pages of data."""
# Gather necessary resources
app_context = sites.get_course_index().get_app_context_for_namespace(
self._namespace)
pii_secret = self._get_pii_secret(app_context)
bigquery_settings = self._get_bigquery_settings(app_context)
bigquery_service, http = self._get_bigquery_service(bigquery_settings)
# If this is our first call after job start (or we have determined
# that we need to start over from scratch), do initial setup.
# Otherwise, re-load context objects from saved version in job.output
if job.status_code == jobs.STATUS_CODE_QUEUED:
upload_url = self._initiate_upload_job(
bigquery_service, bigquery_settings, http, app_context)
job_context = self._build_job_context(upload_url, pii_secret)
data_source_context = self._build_data_source_context()
else:
job_context, data_source_context = self._load_state(
job, sequence_num)
if hasattr(data_source_context, 'pii_secret'):
data_source_context.pii_secret = pii_secret
logging.info('Data pump job %s loaded contexts: %s %s',
self._job_name, str(job_context), str(data_source_context))
# Check BigQuery's state. Based on that, choose the next page of data
# to push. Depending on BigQuery's response, we may or may not be
# able to send a page now.
next_page, next_state = self._check_upload_state(http, job_context)
if next_page is not None:
data, is_last_chunk = self._fetch_page_data(
app_context, data_source_context, next_page)
next_state = self._send_data_page_to_bigquery(
data, is_last_chunk, next_page,
http, job, sequence_num, job_context, data_source_context)
self._save_state(next_state, job, sequence_num, job_context,
data_source_context)
# If we are not done, enqueue another to-do item on the deferred queue.
if len(job_context[CONSECUTIVE_FAILURES]) >= MAX_CONSECUTIVE_FAILURES:
raise Exception('Too many consecutive failures; abandoning job.')
elif not job.has_finished:
backoff_seconds = self._randomized_backoff_timeout(job_context)
logging.info('%s re-queueing for subsequent work', self._job_name)
deferred.defer(self.main, sequence_num, _countdown=backoff_seconds)
else:
logging.info('%s complete', self._job_name)
def main(self, sequence_num):
"""Callback entry point. Manage namespaces, failures; send data."""
logging.info('%s de-queued and starting work.', self._job_name)
job = self.load()
if not job:
raise deferred.PermanentTaskFailure(
'Job object for %s not found!' % self._job_name)
if job.has_finished:
return # We have been canceled; bail out immediately.
with common_utils.Namespace(self._namespace):
try:
self._send_next_page(sequence_num, job)
except Exception, ex:
try:
# Log origin of exception to permit troubleshooting.
# Do this in try/finally block to conform to Python docs'
# recommendation to avoid circular reference to traceback
# object.
origin_traceback = sys.exc_info()[2]
logging.critical('%s: job abandoned due to fatal error %s',
self._job_name, str(ex))
logging.critical(''.join(
traceback.format_tb(origin_traceback)))
finally:
pass
# Log failure in job object as well.
if job.output:
job_context, data_source_context = self._load_state(
job, sequence_num)
else:
job_context = self._build_job_context(None, None)
data_source_context = (self._build_data_source_context())
job_context[FAILURE_REASON] = str(ex)
self._save_state(jobs.STATUS_CODE_FAILED, job, sequence_num,
job_context, data_source_context)
# PermanentTaskFailure tells deferred queue to give up on us.
raise deferred.PermanentTaskFailure('Job %s failed: %s' % (
self._job_name, str(ex)))
def get_display_dict(self, app_context):
"""Set up dict for Jinja rendering on data_pump.html."""
ret = {
'name': self._data_source_class_name,
'status': 'Has Never Run',
'active': False,
}
job = self.load()
if job:
ret['status'] = jobs.STATUS_CODE_DESCRIPTION[job.status_code]
ret['active'] = not job.has_finished
ret['sequence_number'] = job.sequence_num
ret['updated_on'] = job.updated_on.strftime(
utils.HUMAN_READABLE_TIME_FORMAT)
if job.has_finished:
duration = job.execution_time_sec
else:
duration = int((datetime.datetime.now() -
job.updated_on) .total_seconds())
ret['duration'] = datetime.timedelta(days=0, seconds=duration)
ret['last_updated'] = job.updated_on.strftime(
utils.HUMAN_READABLE_DATETIME_FORMAT)
bigquery_settings = self._get_bigquery_settings(app_context)
ret['bigquery_url'] = '%s%s:%s.%s' % (
BIGQUERY_UI_URL_PREFIX, bigquery_settings.project_id,
bigquery_settings.dataset_id,
self._data_source_class_name.replace('DataSource', ''))
try:
job_context, _ = self._load_state(job, job.sequence_num)
ret['job_context'] = job_context
current_secret = DataPumpJob._get_pii_secret(app_context)
if job_context[PII_SECRET] != current_secret:
ret['pii_secret_is_out_of_date'] = True
del job_context[PII_SECRET]
except (ValueError, AttributeError):
# When jobs framework catches a failure, it overwrites the
# job.output with the failure message as a string. We will
# get here if we fail to parse job.output as a JSON-packed
# object.
ret['message'] = job.output
data_source_class = _get_data_source_class_by_name(
self._data_source_class_name)
ret['source_url'] = '%s/rest/data/%s/items?chunk_size=10' % (
app_context.get_slug(), data_source_class.get_name())
catch_and_log_ = catch_and_log.CatchAndLog()
ret['schema'] = data_source_class.get_schema(app_context,
catch_and_log_)
ret['generator_statuses'] = []
ret['available'] = True
ret['any_generator_running'] = False
required_generators = data_source_class.required_generators()
if not required_generators:
ret['generator_statuses'].append('(No dependencies)')
ret['has_any_generators'] = False
else:
ret['has_any_generators'] = True
for generator_class in required_generators:
generator = generator_class(app_context)
job = generator.load()
ret['generator_statuses'].append(
analytics.display.get_generator_status_message(
generator_class, job))
if not job or job.status_code != jobs.STATUS_CODE_COMPLETED:
ret['available'] = False
if job and not job.has_finished:
ret['any_generator_running'] = True
return ret
class DataPumpJobsDataSource(data_sources.SynchronousQuery):
"""Present DataPump job status as an analytic generated at page-render time.
This is a very mild hack. Since the data pump job controls show up as a
sub-tab under Dashboard -> Analytics, the easiest way to generate tab
content is to act as though we are an analytic. And we are, in a sense -
this analytic just happens to generate a table of data-pump job statuses,
rather than analytics about student performance. This also conveniently
re-uses all the mechanics for authorization, dispatch, page-painting, etc.
"""
@staticmethod
def required_generators():
return []
@staticmethod
def fill_values(app_context, template_values):
template_values['xsrf_token'] = (
crypto.XsrfTokenManager.create_xsrf_token(XSRF_ACTION_NAME))
source_classes = [
ds for ds in data_sources.Registry.get_rest_data_source_classes()
if ds.exportable()]
source_classes.sort(key=lambda c: c.__name__)
# pylint: disable=protected-access
template_values['pumps'] = []
for source_class in source_classes:
job = DataPumpJob(app_context, source_class.__name__)
template_values['pumps'].append(job.get_display_dict(app_context))
pump_settings = app_context.get_environ().get(
DATA_PUMP_SETTINGS_SCHEMA_SECTION, {})
template_values['need_settings'] = (
not pump_settings.has_key(PROJECT_ID) or
not pump_settings.has_key(JSON_KEY))
template_values[DATASET_NAME] = pump_settings.get(DATASET_NAME)
custom_module = None
class DashboardExtension(object):
"""Respond to UI run/cancel commands for individual data pump jobs."""
@classmethod
def register(cls):
# Register new permission for pushing student data to external location.
dashboard.DashboardHandler.add_external_permission(
ACCESS_PERMISSION, ACCESS_PERMISSION_DESCRIPTION)
# Register a new Analytics sub-tab for showing data pump status and
# start/stop buttons.
data_pump_visualization = analytics.Visualization(
'data_pumps', 'Data Pumps', 'data_pump.html',
data_source_classes=[DataPumpJobsDataSource])
tabs.Registry.register('analytics', 'data_pump', 'Data Pump',
[data_pump_visualization])
def post_action(handler):
cls(handler).post_data_pump()
dashboard.DashboardHandler.post_actions.append(DASHBOARD_ACTION)
setattr(dashboard.DashboardHandler, 'post_%s' % DASHBOARD_ACTION,
post_action)
dashboard.DashboardHandler.map_action_to_permission(
'post_%s' % DASHBOARD_ACTION, ACCESS_PERMISSION)
@classmethod
def unregister(cls):
dashboard.DashboardHandler.post_actions.remove(DASHBOARD_ACTION)
setattr(dashboard.DashboardHandler, 'post_%s' % DASHBOARD_ACTION, None)
dashboard.DashboardHandler.unmap_action_to_permission(
'post_%s' % DASHBOARD_ACTION, ACCESS_PERMISSION)
dashboard.DashboardHandler.remove_external_permission(ACCESS_PERMISSION)
roles.Roles.unregister_permissions(custom_module)
def post_data_pump(self):
source_name = self.handler.request.get('data_source')
data_source_class = _get_data_source_class_by_name(source_name)
if data_source_class:
data_pump_job = DataPumpJob(self.handler.app_context, source_name)
action = self.handler.request.get('pump_action')
if action == 'start_pump':
data_pump_job.submit()
elif action == 'cancel_pump':
data_pump_job.cancel()
elif action == 'run_generators':
for generator_class in data_source_class.required_generators():
generator_class(self.handler.app_context).submit()
elif action == 'cancel_generators':
for generator_class in data_source_class.required_generators():
generator_class(self.handler.app_context).cancel()
self.handler.redirect(self.handler.get_action_url(
'analytics', extra_args={'tab': 'data_pump'}, fragment=source_name))
def __init__(self, handler):
self.handler = handler
def register_module():
"""Adds this module to the registry. Called once at startup."""
project_id = schema_fields.SchemaField(
DATA_PUMP_SETTINGS_SCHEMA_SECTION + ':' + PROJECT_ID,
'Project ID', 'string',
description='The ID (not the name!) of the Project to which to '
'send data. See the list of projects and their IDs at '
'https://console.developers.google.com/project',
i18n=False)
dataset_name = schema_fields.SchemaField(
DATA_PUMP_SETTINGS_SCHEMA_SECTION + ':' + DATASET_NAME,
'Dataset Name', 'string',
description='Name of the BigQuery dataset to which to pump tables. '
'If not set, this will default to the name of the course.',
optional=True, i18n=False)
json_key = schema_fields.SchemaField(
DATA_PUMP_SETTINGS_SCHEMA_SECTION + ':' + JSON_KEY,
'JSON Key', 'text',
i18n=False,
description='Contents of a JSON key created in the Developers Console '
'for the instance where BigQuery is to be run. See '
# TODO(mgainer): Get CB location of instructions to get client key
# for destination application.
'the instructions at ')
table_lifetime = schema_fields.SchemaField(
DATA_PUMP_SETTINGS_SCHEMA_SECTION + ':' + TABLE_LIFETIME,
'Table Lifetime', 'string',
optional=True, i18n=False,
description='Amount of time a table pushed to BigQuery will last. '
'After this amount of time, the table will be automatically deleted. '
'(This is useful if your data retention or privacy policy mandates '
'a limited time for analysis after which personal data must be '
'removed.) Leaving this field blank or setting it to zero will '
'cause BigQuery to indefinitely retain data. Supported units are: '
'"weeks", "days", "hours", "minutes", "seconds". Units may be '
'specified as their first letter, singular, or plural. Spaces '
'and commas may be used or omitted. E.g., both of the following '
'are equivalent: "3w1d7h", "3 weeks, 1 day, 7 hours"')
pii_encryption_token = schema_fields.SchemaField(
DATA_PUMP_SETTINGS_SCHEMA_SECTION + ':' + PII_ENCRYPTION_TOKEN,
'PII Encryption Token', 'string',
optional=True, i18n=False, editable=False,
description='Automatically generated encryption secret used to '
'obscure PII fields when these are pushed to BigQuery. This '
'key lasts only as long as the Table Lifetime setting above, or '
'30 days if the limit is not set. After this secret has expired, '
'a new secret will be generated. PII items with the same un-obscured '
'value which are obscured with different values for this secret will '
'have different values. Most importantly, this means that joins on '
'fields that should be the same (e.g., user ID) will not work.')
course_settings_fields = (
lambda c: project_id,
lambda c: json_key,
lambda c: dataset_name,
lambda c: table_lifetime,
lambda c: pii_encryption_token,
)
def on_module_enabled():
data_sources.Registry.register(DataPumpJobsDataSource)
courses.Course.OPTIONS_SCHEMA_PROVIDERS[
DATA_PUMP_SETTINGS_SCHEMA_SECTION] += course_settings_fields
tabs.Registry.register('settings', 'data_pump', 'Data Pump',
DATA_PUMP_SETTINGS_SCHEMA_SECTION)
DashboardExtension.register()
def on_module_disabled():
for field in course_settings_fields:
courses.Course.OPTIONS_SCHEMA_PROVIDERS[
DATA_PUMP_SETTINGS_SCHEMA_SECTION].remove(field)
DashboardExtension.unregister()
global custom_module
custom_module = custom_modules.Module(
'Data Pump', 'Pushes DB and generated content to a BigQuery project',
[], [],
notify_module_enabled=on_module_enabled,
notify_module_disabled=on_module_disabled)
return custom_module
| python |
# link:https://leetcode.com/problems/design-browser-history/
class BrowserHistory:
def __init__(self, homepage: str):
self.forw_memo = [] # forw_memo stores the future url
self.back_memo = [] # back_memo stores the previous url
self.curr_url = homepage
def visit(self, url: str) -> None:
self.back_memo.append(self.curr_url)
self.curr_url = url
self.forw_memo = [] # clear forw_memo
def back(self, steps: int) -> str:
while self.back_memo and steps >= 1:
self.forw_memo.append(self.curr_url)
pop_url = self.back_memo.pop()
self.curr_url = pop_url
steps -= 1
return self.curr_url
def forward(self, steps: int) -> str:
while self.forw_memo and steps >= 1:
self.back_memo.append(self.curr_url)
pop_url = self.forw_memo.pop()
self.curr_url = pop_url
steps -= 1
return self.curr_url
| python |
'''
Models utility module.
'''
import tensorflow as tf
def dense(input_size,output_size,depth,size):
'''Create a dense model with specific input_size,output_size,depth and number of neuros.'''
layers = [tf.keras.layers.Flatten(input_shape=(input_size,input_size,3))]
for i in range(depth):
layers.append(tf.keras.layers.Dense(size,activation='relu'))
layers.append(tf.keras.layers.Dense(output_size))
return tf.keras.Sequential(layers)
def conv(input_size,output_size,depth,size):
'''Create a conv model with specific input_size,output_size,depth and number of neuros.'''
layers = [tf.keras.layers.Conv2D(size,(3, 3),activation='relu',input_shape=(input_size,input_size,3))]
for i in range(depth-1):
layers += [
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Conv2D(size,(3, 3),activation='relu',padding='same')]
layers += [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(size,activation='relu'),
tf.keras.layers.Dense(output_size)]
return tf.keras.Sequential(layers)
def models(input_size,output_size):
'''This generator returns models to test in the experiment.'''
#dense layers, different sizes
for i in range(1,4):
for j in range(1,6):
yield dense(input_size,output_size,i,j*32),i,j*32,'dense'
#conv model, different sizes
for i in range(1,4):
for j in range(1,6):
yield conv(input_size,output_size,i,j*8),i,j*8,'conv'
| python |
# -*- coding: utf-8 -*-
"""
@file
@brief
"""
import timeit
import pandas
def unit(x):
"""
Optimizes the rendering of time.
.. runpython::
:showcode:
from jupytalk.benchmark.mlprediction import unit
print(unit(34))
print(unit(3.4))
print(unit(0.34))
print(unit(0.034))
print(unit(0.0034))
print(unit(0.00034))
print(unit(0.000034))
print(unit(0.0000034))
print(unit(0.00000034))
"""
if x >= 1:
return "%1.2f s" % x
elif x >= 1e-3:
return "%1.2f ms" % (x * 1000)
elif x >= 1e-6:
return "%1.2f µs" % (x * 1000**2)
elif x >= 1e-9:
return "%1.2f ns" % (x * 1000**3)
else:
return "%1.2g s" % x
def timeexec(legend, code, number=50, repeat=200, verbose=True, context=None):
"""
Measures the time for a given expression.
@param legend name of the experiment
@param code code to measure (as a string)
@param number number of time to run the expression
(and then divide by this number to get an average)
@param repeat number of times to repeat the computation
of the above average
@param verbose print the time
@param globals context (usuable equal to ``globals()``)
@return dictionary
.. runpython::
:showcode:
from jupytalk.benchmark.mlprediction import timeexec
code = "3 * 45535266234653452"
print(timeexec("multiplication", code))
"""
if context is None:
context = globals()
rep = timeit.repeat(code, number=number, repeat=repeat, globals=context)
ave = sum(rep) / (number * repeat)
std = (sum((x / number - ave)**2 for x in rep) / repeat)**0.5
fir = rep[0] / number
fir3 = sum(rep[:3]) / (3 * number)
las3 = sum(rep[-3:]) / (3 * number)
rep.sort()
mini = rep[len(rep) // 20] / number
maxi = rep[-len(rep) // 20] / number
if verbose:
print("Average: %s deviation %s (with %d runs) in [%s, %s]" % (
unit(ave), unit(std), number, unit(mini), unit(maxi)))
return dict(legend=legend, average=ave, deviation=std, first=fir, first3=fir3,
last3=las3, repeat=repeat, min5=mini, max5=maxi, code=code, run=number)
def make_dataframe(labels, arrays):
"""
Builds a dataframe from multiple arrays.
@param labels list of labels
@param arrays list of arrays (or one array)
@return dataframes
"""
if labels is not None:
df = [pandas.DataFrame(data={'Label': labels})]
else:
df = []
if isinstance(arrays, list):
for i, ar in enumerate(arrays):
d = pandas.DataFrame(
data=ar, columns=["F%d_%d" % (i, j) for j in range(ar.shape[1])])
df.append(d)
else:
ar = arrays
d = pandas.DataFrame(
data=ar, columns=["F%d" % j for j in range(ar.shape[1])])
df.append(d)
return pandas.concat(df, axis=1)
| python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.