content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
def get_sitekey(driver):
return driver.find_element_by_class_name("g-recaptcha").get_attribute(
"data-sitekey"
) | python |
# Authors: James Bergstra
# License: MIT
import numpy as np
import time
import pyopencl as cl
import numpy
mf = cl.mem_flags
PROFILING = 0
ctx = cl.create_some_context()
if PROFILING:
queue = cl.CommandQueue(
ctx,
properties=cl.command_queue_properties.PROFILING_ENABLE)
else:
queue = cl.CommandQueue(ctx)
_cache = {}
def pairwise_pyopencl_cpu_prepare(shp, dtype):
N, D = shp
ctype = {
'float32': 'float',
'float64': 'double',
}[str(dtype)]
odd_d = "" if 0 == D % 2 else """
__global %(ctype)s * a1 = (__global %(ctype)s*) (a);
%(ctype)s diff = a1[(n0 + 1) * %(D)s - 1] - a1[(m0 + 1) * %(D)s - 1];
buf.s0 += diff * diff;
"""
prg = cl.Program(ctx, """
__kernel void lower(__global %(ctype)s2 *a, __global %(ctype)s *c)
{
for(int n0 = get_global_id(0); n0 < %(N)s; n0 += get_global_size(0))
{
for(int m0 = get_global_id(1); m0 < %(N)s; m0 += get_global_size(1))
{
if (n0 < m0) continue;
__global %(ctype)s2 *an = a + n0 * %(D)s / 2;
__global %(ctype)s2 *am = a + m0 * %(D)s / 2;
%(ctype)s2 buf = 0;
for (int d = 0; d < %(D)s/2; ++d)
{
%(ctype)s2 diff = am[d] - an[d];
buf += diff * diff;
}
%(odd_d)s;
c[m0 * %(N)s + n0] = sqrt(buf.s0 + buf.s1);
}
}
}
__kernel void upper(__global %(ctype)s *a, __global %(ctype)s *c)
{
for(int n0 = get_global_id(0); n0 < %(N)s; n0 += get_global_size(0))
{
for(int m0 = get_global_id(1); m0 < %(N)s; m0 += get_global_size(1))
{
if (n0 >= m0) continue;
c[m0 * %(N)s + n0] = c[n0 * %(N)s + m0];
}
}
}
""" % locals()).build()
return prg.lower, prg.upper
comptimes = []
def pairwise_pyopencl_cpu(data):
data = np.asarray(data, order='C')
N, D = data.shape
try:
lower, upper = _cache[(data.shape, data.dtype)]
except:
lower, upper = pairwise_pyopencl_cpu_prepare(data.shape, data.dtype)
_cache[(data.shape, data.dtype)] = lower, upper
data_buf = cl.Buffer(ctx, mf.COPY_HOST_PTR, hostbuf=data)
dest_buf = cl.Buffer(ctx, mf.WRITE_ONLY, N * N * data.dtype.itemsize)
try:
rval, _ = cl.enqueue_map_buffer(queue, dest_buf, cl.map_flags.READ,
offset=0, shape=(N, N), dtype=data.dtype)
need_copy = False
except TypeError: #OSX's OCL needs this?
rval = np.empty((N, N), dtype=data.dtype)
need_copy = True
lower(queue, (N, 1), (1, 1), data_buf, dest_buf)
upper(queue, (4, 4), (1, 1), data_buf, dest_buf)
if need_copy:
cl.enqueue_copy(queue, rval, dest_buf)
else:
queue.finish()
if PROFILING:
comptimes.append(1e-9 * (ev.profile.end - ev.profile.start))
print 'computation time', min(comptimes)
return rval
benchmarks = (
pairwise_pyopencl_cpu,
)
| python |
def one():
return 1
| python |
# Copyright (c) 2020 Xvezda <[email protected]>
#
# Use of this source code is governed by an MIT-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/MIT.
__title__ = 'maskprocessor'
__version__ = '0.0.5'
| python |
from django.shortcuts import render
def index(request):
return render(request,'front_end/index.html')
def additional(request):
return render(request,'front_end/additional.html') | python |
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from metautils.singleton import Singleton
class SingletonTestCase(TestCase):
def test_creates_instance(self):
class instance(object, metaclass=Singleton()):
pass
self.assertNotIsInstance(instance, type)
def test_has_methods(self):
class instance(object, metaclass=Singleton()):
def method(self):
return 'm'
self.assertEqual(instance.method(), 'm')
def test_has_valus(self):
class instance(object, metaclass=Singleton()):
a = 'a'
self.assertEqual(instance.a, 'a')
def test_single_instance_of_type(self):
class instance(object, metaclass=Singleton()):
pass
with self.assertRaises(TypeError):
type(instance)()
def test_new_erasure(self):
called = 0
def new(cls):
nonlocal called
called += 1
return object.__new__(cls)
class instance(object, metaclass=Singleton()):
__new__ = new
self.assertEqual(called, 1)
self.assertIsNot(instance.__new__, new)
| python |
# ===================== exercicio 4 =====================
'''
EXERCICIO: Escreva uma funcao que recebe um objeto de colecoes
e retorna o valor do maior numero dentro dessa colecao
faca outra funcao que retorna o menor numero dessa colecao
'''
def maior(colecao):
maior_item = colecao[0]
for item in colecao:
if item > maior_item:
maior_item = item
return maior_item
def menor(colecao):
menor_item = colecao[0]
for item in colecao:
if item < menor_item:
menor_item = item
return menor_item
lista = ([1,-2,1.2,87.2,1289,-7,0])
print(menor(lista))
print(maior(lista)) | python |
#!/usr/bin/env python3
# Copyright (C) 2017-2020 The btclib developers
#
# This file is part of btclib. It is subject to the license terms in the
# LICENSE file found in the top-level directory of this distribution.
#
# No part of btclib including this file, may be copied, modified, propagated,
# or distributed except according to the terms contained in the LICENSE file.
"Tests for `btclib.curve` module."
import secrets
from typing import Dict
import pytest
from btclib.alias import INF, INFJ
from btclib.curve import CURVES, Curve, double_mult, mult, multi_mult, secp256k1
from btclib.curvegroup import _jac_from_aff
from btclib.numbertheory import mod_sqrt
from btclib.pedersen import second_generator
# FIXME Curve repr should use "dedbeef 00000000", not "0xdedbeef00000000"
# FIXME test curves when n>p
# test curves: very low cardinality
low_card_curves: Dict[str, Curve] = {}
# 13 % 4 = 1; 13 % 8 = 5
low_card_curves["ec13_11"] = Curve(13, 7, 6, (1, 1), 11, 1, False)
low_card_curves["ec13_19"] = Curve(13, 0, 2, (1, 9), 19, 1, False)
# 17 % 4 = 1; 17 % 8 = 1
low_card_curves["ec17_13"] = Curve(17, 6, 8, (0, 12), 13, 2, False)
low_card_curves["ec17_23"] = Curve(17, 3, 5, (1, 14), 23, 1, False)
# 19 % 4 = 3; 19 % 8 = 3
low_card_curves["ec19_13"] = Curve(19, 0, 2, (4, 16), 13, 2, False)
low_card_curves["ec19_23"] = Curve(19, 2, 9, (0, 16), 23, 1, False)
# 23 % 4 = 3; 23 % 8 = 7
low_card_curves["ec23_19"] = Curve(23, 9, 7, (5, 4), 19, 1, False)
low_card_curves["ec23_31"] = Curve(23, 5, 1, (0, 1), 31, 1, False)
all_curves: Dict[str, Curve] = {}
all_curves.update(low_card_curves)
all_curves.update(CURVES)
ec23_31 = low_card_curves["ec23_31"]
def test_exceptions() -> None:
# good curve
Curve(13, 0, 2, (1, 9), 19, 1, False)
with pytest.raises(ValueError, match="p is not prime: "):
Curve(15, 0, 2, (1, 9), 19, 1, False)
with pytest.raises(ValueError, match="negative a: "):
Curve(13, -1, 2, (1, 9), 19, 1, False)
with pytest.raises(ValueError, match="p <= a: "):
Curve(13, 13, 2, (1, 9), 19, 1, False)
with pytest.raises(ValueError, match="negative b: "):
Curve(13, 0, -2, (1, 9), 19, 1, False)
with pytest.raises(ValueError, match="p <= b: "):
Curve(13, 0, 13, (1, 9), 19, 1, False)
with pytest.raises(ValueError, match="zero discriminant"):
Curve(11, 7, 7, (1, 9), 19, 1, False)
err_msg = "Generator must a be a sequence\\[int, int\\]"
with pytest.raises(ValueError, match=err_msg):
Curve(13, 0, 2, (1, 9, 1), 19, 1, False) # type: ignore
with pytest.raises(ValueError, match="Generator is not on the curve"):
Curve(13, 0, 2, (2, 9), 19, 1, False)
with pytest.raises(ValueError, match="n is not prime: "):
Curve(13, 0, 2, (1, 9), 20, 1, False)
with pytest.raises(ValueError, match="n not in "):
Curve(13, 0, 2, (1, 9), 71, 1, False)
with pytest.raises(ValueError, match="INF point cannot be a generator"):
Curve(13, 0, 2, INF, 19, 1, False)
with pytest.raises(ValueError, match="n is not the group order: "):
Curve(13, 0, 2, (1, 9), 17, 1, False)
with pytest.raises(ValueError, match="invalid h: "):
Curve(13, 0, 2, (1, 9), 19, 2, False)
# n=p -> weak curve
# missing
with pytest.raises(UserWarning, match="weak curve"):
Curve(11, 2, 7, (6, 9), 7, 2, True)
def test_aff_jac_conversions() -> None:
for ec in all_curves.values():
# just a random point, not INF
q = 1 + secrets.randbelow(ec.n - 1)
Q = mult(q, ec.G, ec)
QJ = _jac_from_aff(Q)
assert Q == ec._aff_from_jac(QJ)
x_Q = ec._x_aff_from_jac(QJ)
assert Q[0] == x_Q
assert INF == ec._aff_from_jac(_jac_from_aff(INF))
# relevant for BIP340-Schnorr signature verification
assert not ec.has_square_y(INF)
with pytest.raises(ValueError, match="infinity point has no x-coordinate"):
ec._x_aff_from_jac(INFJ)
with pytest.raises(TypeError, match="not a point"):
ec.has_square_y("notapoint") # type: ignore
def test_add_double_aff() -> None:
"Test self-consistency of add and double in affine coordinates."
for ec in all_curves.values():
# add G and the infinity point
assert ec._add_aff(ec.G, INF) == ec.G
assert ec._add_aff(INF, ec.G) == ec.G
# double G
G2 = ec._add_aff(ec.G, ec.G)
assert G2 == ec._double_aff(ec.G)
# double INF
assert ec._add_aff(INF, INF) == INF
assert ec._double_aff(INF) == INF
# add G and minus G
assert ec._add_aff(ec.G, ec.negate(ec.G)) == INF
# add INF and "minus" INF
assert ec._add_aff(INF, ec.negate(INF)) == INF
def test_add_double_jac() -> None:
"Test self-consistency of add and double in Jacobian coordinates."
for ec in all_curves.values():
# add G and the infinity point
assert ec._jac_equality(ec._add_jac(ec.GJ, INFJ), ec.GJ)
assert ec._jac_equality(ec._add_jac(INFJ, ec.GJ), ec.GJ)
# double G
GJ2 = ec._add_jac(ec.GJ, ec.GJ)
assert ec._jac_equality(GJ2, ec._double_jac(ec.GJ))
# double INF
assert ec._jac_equality(ec._add_jac(INFJ, INFJ), INFJ)
assert ec._jac_equality(ec._double_jac(INFJ), INFJ)
# add G and minus G
assert ec._jac_equality(ec._add_jac(ec.GJ, ec.negate_jac(ec.GJ)), INFJ)
# add INF and "minus" INF
assert ec._jac_equality(ec._add_jac(INFJ, ec.negate_jac(INFJ)), INFJ)
def test_add_double_aff_jac() -> None:
"Test consistency between affine and Jacobian add/double methods."
for ec in all_curves.values():
# just a random point, not INF
q = 1 + secrets.randbelow(ec.n - 1)
Q = mult(q, ec.G, ec)
QJ = _jac_from_aff(Q)
# add Q and G
R = ec._add_aff(Q, ec.G)
RJ = ec._add_jac(QJ, ec.GJ)
assert R == ec._aff_from_jac(RJ)
# double Q
R = ec._double_aff(Q)
RJ = ec._double_jac(QJ)
assert R == ec._aff_from_jac(RJ)
assert R == ec._add_aff(Q, Q)
assert ec._jac_equality(RJ, ec._add_jac(QJ, QJ))
def test_ec_repr() -> None:
for ec in all_curves.values():
ec_repr = repr(ec)
if ec in low_card_curves.values() or ec.psize < 24:
ec_repr = ec_repr[:-1] + ", False)"
ec2 = eval(ec_repr)
assert str(ec) == str(ec2)
def test_is_on_curve() -> None:
for ec in all_curves.values():
with pytest.raises(ValueError, match="point must be a tuple"):
ec.is_on_curve("not a point") # type: ignore
with pytest.raises(ValueError, match="x-coordinate not in 0..p-1: "):
ec.y(ec.p)
# just a random point, not INF
q = 1 + secrets.randbelow(ec.n - 1)
Q = mult(q, ec.G, ec)
with pytest.raises(ValueError, match="y-coordinate not in 1..p-1: "):
ec.is_on_curve((Q[0], ec.p))
def test_negate() -> None:
for ec in all_curves.values():
# just a random point, not INF
q = 1 + secrets.randbelow(ec.n - 1)
Q = mult(q, ec.G, ec)
minus_Q = ec.negate(Q)
assert ec.add(Q, minus_Q) == INF
# Jacobian coordinates
QJ = _jac_from_aff(Q)
minus_QJ = ec.negate_jac(QJ)
assert ec._jac_equality(ec._add_jac(QJ, minus_QJ), INFJ)
# negate of INF is INF
minus_INF = ec.negate(INF)
assert minus_INF == INF
# negate of INFJ is INFJ
minus_INFJ = ec.negate_jac(INFJ)
assert ec._jac_equality(minus_INFJ, INFJ)
with pytest.raises(TypeError, match="not a point"):
ec.negate(ec.GJ) # type: ignore
with pytest.raises(TypeError, match="not a Jacobian point"):
ec.negate_jac(ec.G) # type: ignore
def test_symmetry() -> None:
"""Methods to break simmetry: quadratic residue, odd/even, low/high"""
for ec in low_card_curves.values():
# just a random point, not INF
q = 1 + secrets.randbelow(ec.n - 1)
Q = mult(q, ec.G, ec)
x_Q = Q[0]
y_odd = ec.y_odd(x_Q)
assert y_odd % 2 == 1
y_even = ec.y_odd(x_Q, False)
assert y_even % 2 == 0
assert y_even == ec.p - y_odd
y_low = ec.y_low(x_Q)
y_high = ec.y_low(x_Q, False)
assert y_low < y_high
assert y_high == ec.p - y_low
# compute quadratic residues
hasRoot = {1}
for i in range(2, ec.p):
hasRoot.add(i * i % ec.p)
if ec.p % 4 == 3:
quad_res = ec.y_quadratic_residue(x_Q)
not_quad_res = ec.y_quadratic_residue(x_Q, False)
# in this case only quad_res is a quadratic residue
assert quad_res in hasRoot
root = mod_sqrt(quad_res, ec.p)
assert quad_res == (root * root) % ec.p
root = ec.p - root
assert quad_res == (root * root) % ec.p
assert not_quad_res == ec.p - quad_res
assert not_quad_res not in hasRoot
with pytest.raises(ValueError, match="no root for "):
mod_sqrt(not_quad_res, ec.p)
else:
assert ec.p % 4 == 1
# cannot use y_quadratic_residue in this case
err_msg = "field prime is not equal to 3 mod 4: "
with pytest.raises(ValueError, match=err_msg):
ec.y_quadratic_residue(x_Q)
with pytest.raises(ValueError, match=err_msg):
ec.y_quadratic_residue(x_Q, False)
# in this case neither or both y_Q are quadratic residues
neither = y_odd not in hasRoot and y_even not in hasRoot
both = y_odd in hasRoot and y_even in hasRoot
assert neither or both
if y_odd in hasRoot: # both have roots
root = mod_sqrt(y_odd, ec.p)
assert y_odd == (root * root) % ec.p
root = ec.p - root
assert y_odd == (root * root) % ec.p
root = mod_sqrt(y_even, ec.p)
assert y_even == (root * root) % ec.p
root = ec.p - root
assert y_even == (root * root) % ec.p
else:
err_msg = "no root for "
with pytest.raises(ValueError, match=err_msg):
mod_sqrt(y_odd, ec.p)
with pytest.raises(ValueError, match=err_msg):
mod_sqrt(y_even, ec.p)
# with the last curve
with pytest.raises(ValueError, match="low1high0 must be bool or 1/0"):
ec.y_low(x_Q, 2)
with pytest.raises(ValueError, match="odd1even0 must be bool or 1/0"):
ec.y_odd(x_Q, 2)
with pytest.raises(ValueError, match="quad_res must be bool or 1/0"):
ec.y_quadratic_residue(x_Q, 2)
@pytest.mark.fifth
def test_assorted_mult() -> None:
ec = ec23_31
H = second_generator(ec)
for k1 in range(-ec.n + 1, ec.n):
K1 = mult(k1, ec.G, ec)
for k2 in range(ec.n):
K2 = mult(k2, H, ec)
shamir = double_mult(k1, ec.G, k2, ec.G, ec)
assert shamir == mult(k1 + k2, ec.G, ec)
shamir = double_mult(k1, INF, k2, H, ec)
assert ec.is_on_curve(shamir)
assert shamir == K2
shamir = double_mult(k1, ec.G, k2, INF, ec)
assert ec.is_on_curve(shamir)
assert shamir == K1
shamir = double_mult(k1, ec.G, k2, H, ec)
assert ec.is_on_curve(shamir)
K1K2 = ec.add(K1, K2)
assert K1K2 == shamir
k3 = 1 + secrets.randbelow(ec.n - 1)
K3 = mult(k3, ec.G, ec)
K1K2K3 = ec.add(K1K2, K3)
assert ec.is_on_curve(K1K2K3)
boscoster = multi_mult([k1, k2, k3], [ec.G, H, ec.G], ec)
assert ec.is_on_curve(boscoster)
assert K1K2K3 == boscoster, k3
k4 = 1 + secrets.randbelow(ec.n - 1)
K4 = mult(k4, H, ec)
K1K2K3K4 = ec.add(K1K2K3, K4)
assert ec.is_on_curve(K1K2K3K4)
points = [ec.G, H, ec.G, H]
boscoster = multi_mult([k1, k2, k3, k4], points, ec)
assert ec.is_on_curve(boscoster)
assert K1K2K3K4 == boscoster, k4
assert K1K2K3 == multi_mult([k1, k2, k3, 0], points, ec)
assert K1K2 == multi_mult([k1, k2, 0, 0], points, ec)
assert K1 == multi_mult([k1, 0, 0, 0], points, ec)
assert INF == multi_mult([0, 0, 0, 0], points, ec)
err_msg = "mismatch between number of scalars and points: "
with pytest.raises(ValueError, match=err_msg):
multi_mult([k1, k2, k3, k4], [ec.G, H, ec.G], ec)
def test_double_mult() -> None:
H = second_generator(secp256k1)
G = secp256k1.G
# 0*G + 1*H
T = double_mult(1, H, 0, G)
assert T == H
T = multi_mult([1, 0], [H, G])
assert T == H
# 0*G + 2*H
exp = mult(2, H)
T = double_mult(2, H, 0, G)
assert T == exp
T = multi_mult([2, 0], [H, G])
assert T == exp
# 0*G + 3*H
exp = mult(3, H)
T = double_mult(3, H, 0, G)
assert T == exp
T = multi_mult([3, 0], [H, G])
assert T == exp
# 1*G + 0*H
T = double_mult(0, H, 1, G)
assert T == G
T = multi_mult([0, 1], [H, G])
assert T == G
# 2*G + 0*H
exp = mult(2, G)
T = double_mult(0, H, 2, G)
assert T == exp
T = multi_mult([0, 2], [H, G])
assert T == exp
# 3*G + 0*H
exp = mult(3, G)
T = double_mult(0, H, 3, G)
assert T == exp
T = multi_mult([0, 3], [H, G])
assert T == exp
# 0*G + 5*H
exp = mult(5, H)
T = double_mult(5, H, 0, G)
assert T == exp
T = multi_mult([5, 0], [H, G])
assert T == exp
# 0*G - 5*H
exp = mult(-5, H)
T = double_mult(-5, H, 0, G)
assert T == exp
T = multi_mult([-5, 0], [H, G])
assert T == exp
# 1*G - 5*H
exp = secp256k1.add(G, T)
T = double_mult(-5, H, 1, G)
assert T == exp
# FIXME
# T = multi_mult([-5, 1], [H, G])
# assert T == exp
| python |
from gatco.response import json, text
from application.server import app
from application.database import db
from application.extensions import auth
from random import randint
from application.models.model import User, Role,TodoSchedule,TodoScheduleDetail,EmployeeRelTodo
# @app.route("/api/v1/todoschedule", methods=['POST']
# @app.route("/api/v1/test", methods=['GET'])
def pre_post_todo_schedule(request=None, Model=None, result=None, **kw):
param = request.json
currentUser = auth.current_user(request)
if (currentUser is None):
return json({"error_code":"SESSION_EXPIRED","error_message":"Hết phiên làm việc, vui lòng đăng nhập lại!"}, status=520)
if result['id'] is not None:
list_data_before_commit = []
start_time_working = result['start_time_working']
end_time_working = result['end_time_working']
todo_schedule_id = result['id']
for index in range(0,len(result["todoscheduledetail"])):
todoschedule_detail = TodoScheduleDetail.query.filter(TodoScheduleDetail.id == result['todoscheduledetail'][index]['id']).first()
todo_list = todoschedule_detail.todo
employee_list = todoschedule_detail.employee
for employee in employee_list:
for todo in todo_list:
data_before_commit = {'todo_schedule_id':todo_schedule_id,\
'employee_id':employee.id,\
'employee_name':employee.name,'employee' : employee,'todo_id':todo.id,\
'todo_name':todo.todo_name,'todo' : todo,\
'day_working':todoschedule_detail.day_working,\
'time_working':todoschedule_detail.time_working}
list_data_before_commit.append(data_before_commit)
group_data_before_commit = group_list_data_follow_employee(list_data_before_commit)
for data_commit in list_data_before_commit:
employee_assign = find_employee_be_assign(group_data_before_commit)
data_add = EmployeeRelTodo(
start_time_working=start_time_working,\
end_time_working = end_time_working,\
todo_schedule_id = todo_schedule_id,\
day_working=data_commit['day_working'],time_working=data_commit['time_working'],\
employee_id=data_commit['employee_id'],employee_name=data_commit['employee_name'],\
employee = data_commit['employee'],employee_assign_name = employee_assign.name,\
employee_assign_id = employee_assign.id,employee_assign=employee_assign,\
todo_id = data_commit['todo_id'],todo_name = data_commit['todo_name'],\
todo = data_commit['todo'])
group_data_before_commit = group_list_data_after_find(employee_assign,\
data_commit['todo'].level_diffcult,group_data_before_commit)
db.session.add(data_add)
db.session.commit()
# @app.route("/api/v1/test", methods=['POST'])
def group_list_data_follow_employee(list_data_before_commit):
# list_data_before_commit = request.json
group_data_before_commit = []
for data in list_data_before_commit:
check_id_match = False
for val in group_data_before_commit:
if val['employee'].id == data['employee'].id:
val['total_level_dif_todo'] += data['todo'].level_diffcult
check_id_match = True
if check_id_match is False:
group_data_before_commit.append({
'employee':data['employee'],
'total_level_dif_todo':data['todo'].level_diffcult
})
print('group_data_before_commit',group_data_before_commit)
return group_data_before_commit
def find_employee_be_assign(group_data_before_commit):
total_level_dif_todo_min = group_data_before_commit[0]['total_level_dif_todo']
employee_has_total_level_dif_todo_min = group_data_before_commit[0]['employee']
for val in group_data_before_commit:
if total_level_dif_todo_min > val['total_level_dif_todo']:
total_level_dif_todo_min = val['total_level_dif_todo']
employee_has_total_level_dif_todo_min = val['employee']
return employee_has_total_level_dif_todo_min
def group_list_data_after_find(employee_be_assign,level_diffcult,group_data_before_commit):
for data in group_data_before_commit:
if data['employee'].id == employee_be_assign.id:
data['total_level_dif_todo'] += level_diffcult
return group_data_before_commit
def pre_delete_todo_schedule(request=None, Model=None, result=None, **kw):
param = request.json
if param['id'] is not None: # """ if put param['id'] -> not none else post param['id'] -> none"""
employee_rel_todo_match = EmployeeRelTodo.query.filter(EmployeeRelTodo.todo_schedule_id == param['id']).delete()
else:
pass
def pre_put_todo_schedule(request=None, Model=None, result=None, **kw):
pre_delete_todo_schedule(request=request, Model=Model, result=result)
pre_post_todo_schedule(request=request, Model=Model, result=result) | python |
from django.conf.urls import url
from .views import message_list
from .views import message_read
urlpatterns = [
url(r'^list$', message_list),
url(r'^read/(?P<message_id>\d+)', message_read),
]
| python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['AggregateConfigRuleArgs', 'AggregateConfigRule']
@pulumi.input_type
class AggregateConfigRuleArgs:
def __init__(__self__, *,
aggregate_config_rule_name: pulumi.Input[str],
aggregator_id: pulumi.Input[str],
config_rule_trigger_types: pulumi.Input[str],
resource_types_scopes: pulumi.Input[Sequence[pulumi.Input[str]]],
risk_level: pulumi.Input[int],
source_identifier: pulumi.Input[str],
source_owner: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
exclude_resource_ids_scope: Optional[pulumi.Input[str]] = None,
input_parameters: Optional[pulumi.Input[Mapping[str, Any]]] = None,
maximum_execution_frequency: Optional[pulumi.Input[str]] = None,
region_ids_scope: Optional[pulumi.Input[str]] = None,
resource_group_ids_scope: Optional[pulumi.Input[str]] = None,
tag_key_scope: Optional[pulumi.Input[str]] = None,
tag_value_scope: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a AggregateConfigRule resource.
:param pulumi.Input[str] aggregate_config_rule_name: The name of the rule.
:param pulumi.Input[str] aggregator_id: The Aggregator Id.
:param pulumi.Input[str] config_rule_trigger_types: The trigger type of the rule. Valid values: `ConfigurationItemChangeNotification`: The rule is triggered upon configuration changes. `ScheduledNotification`: The rule is triggered as scheduled.
:param pulumi.Input[Sequence[pulumi.Input[str]]] resource_types_scopes: Resource types to be evaluated. [Alibaba Cloud services that support Cloud Config.](https://www.alibabacloud.com/help/en/doc-detail/127411.htm)
:param pulumi.Input[int] risk_level: The risk level of the resources that are not compliant with the rule. Valid values: `1`: critical `2`: warning `3`: info.
:param pulumi.Input[str] source_identifier: The identifier of the rule. For a managed rule, the value is the name of the managed rule. For a custom rule, the value is the ARN of the custom rule. Using managed rules, refer to [List of Managed rules.](https://www.alibabacloud.com/help/en/doc-detail/127404.htm)
:param pulumi.Input[str] source_owner: Specifies whether you or Alibaba Cloud owns and manages the rule. Valid values: `CUSTOM_FC`: The rule is a custom rule and you own the rule. `ALIYUN`: The rule is a managed rule and Alibaba Cloud owns the rule.
:param pulumi.Input[str] description: The description of the rule.
:param pulumi.Input[str] exclude_resource_ids_scope: The rule monitors excluded resource IDs, multiple of which are separated by commas, only applies to rules created based on managed rules, , custom rule this field is empty.
:param pulumi.Input[Mapping[str, Any]] input_parameters: The settings map of the input parameters for the rule.
:param pulumi.Input[str] maximum_execution_frequency: The frequency of the compliance evaluations. Valid values: `One_Hour`, `Three_Hours`, `Six_Hours`, `Twelve_Hours`, `TwentyFour_Hours`. System default value is `TwentyFour_Hours` and valid when the `config_rule_trigger_types` is `ScheduledNotification`.
:param pulumi.Input[str] region_ids_scope: The rule monitors region IDs, separated by commas, only applies to rules created based on managed rules.
:param pulumi.Input[str] resource_group_ids_scope: The rule monitors resource group IDs, separated by commas, only applies to rules created based on managed rules.
:param pulumi.Input[str] tag_key_scope: The rule monitors the tag key, only applies to rules created based on managed rules.
:param pulumi.Input[str] tag_value_scope: The rule monitors the tag value, use with the TagKeyScope options. only applies to rules created based on managed rules.
"""
pulumi.set(__self__, "aggregate_config_rule_name", aggregate_config_rule_name)
pulumi.set(__self__, "aggregator_id", aggregator_id)
pulumi.set(__self__, "config_rule_trigger_types", config_rule_trigger_types)
pulumi.set(__self__, "resource_types_scopes", resource_types_scopes)
pulumi.set(__self__, "risk_level", risk_level)
pulumi.set(__self__, "source_identifier", source_identifier)
pulumi.set(__self__, "source_owner", source_owner)
if description is not None:
pulumi.set(__self__, "description", description)
if exclude_resource_ids_scope is not None:
pulumi.set(__self__, "exclude_resource_ids_scope", exclude_resource_ids_scope)
if input_parameters is not None:
pulumi.set(__self__, "input_parameters", input_parameters)
if maximum_execution_frequency is not None:
pulumi.set(__self__, "maximum_execution_frequency", maximum_execution_frequency)
if region_ids_scope is not None:
pulumi.set(__self__, "region_ids_scope", region_ids_scope)
if resource_group_ids_scope is not None:
pulumi.set(__self__, "resource_group_ids_scope", resource_group_ids_scope)
if tag_key_scope is not None:
pulumi.set(__self__, "tag_key_scope", tag_key_scope)
if tag_value_scope is not None:
pulumi.set(__self__, "tag_value_scope", tag_value_scope)
@property
@pulumi.getter(name="aggregateConfigRuleName")
def aggregate_config_rule_name(self) -> pulumi.Input[str]:
"""
The name of the rule.
"""
return pulumi.get(self, "aggregate_config_rule_name")
@aggregate_config_rule_name.setter
def aggregate_config_rule_name(self, value: pulumi.Input[str]):
pulumi.set(self, "aggregate_config_rule_name", value)
@property
@pulumi.getter(name="aggregatorId")
def aggregator_id(self) -> pulumi.Input[str]:
"""
The Aggregator Id.
"""
return pulumi.get(self, "aggregator_id")
@aggregator_id.setter
def aggregator_id(self, value: pulumi.Input[str]):
pulumi.set(self, "aggregator_id", value)
@property
@pulumi.getter(name="configRuleTriggerTypes")
def config_rule_trigger_types(self) -> pulumi.Input[str]:
"""
The trigger type of the rule. Valid values: `ConfigurationItemChangeNotification`: The rule is triggered upon configuration changes. `ScheduledNotification`: The rule is triggered as scheduled.
"""
return pulumi.get(self, "config_rule_trigger_types")
@config_rule_trigger_types.setter
def config_rule_trigger_types(self, value: pulumi.Input[str]):
pulumi.set(self, "config_rule_trigger_types", value)
@property
@pulumi.getter(name="resourceTypesScopes")
def resource_types_scopes(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
Resource types to be evaluated. [Alibaba Cloud services that support Cloud Config.](https://www.alibabacloud.com/help/en/doc-detail/127411.htm)
"""
return pulumi.get(self, "resource_types_scopes")
@resource_types_scopes.setter
def resource_types_scopes(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "resource_types_scopes", value)
@property
@pulumi.getter(name="riskLevel")
def risk_level(self) -> pulumi.Input[int]:
"""
The risk level of the resources that are not compliant with the rule. Valid values: `1`: critical `2`: warning `3`: info.
"""
return pulumi.get(self, "risk_level")
@risk_level.setter
def risk_level(self, value: pulumi.Input[int]):
pulumi.set(self, "risk_level", value)
@property
@pulumi.getter(name="sourceIdentifier")
def source_identifier(self) -> pulumi.Input[str]:
"""
The identifier of the rule. For a managed rule, the value is the name of the managed rule. For a custom rule, the value is the ARN of the custom rule. Using managed rules, refer to [List of Managed rules.](https://www.alibabacloud.com/help/en/doc-detail/127404.htm)
"""
return pulumi.get(self, "source_identifier")
@source_identifier.setter
def source_identifier(self, value: pulumi.Input[str]):
pulumi.set(self, "source_identifier", value)
@property
@pulumi.getter(name="sourceOwner")
def source_owner(self) -> pulumi.Input[str]:
"""
Specifies whether you or Alibaba Cloud owns and manages the rule. Valid values: `CUSTOM_FC`: The rule is a custom rule and you own the rule. `ALIYUN`: The rule is a managed rule and Alibaba Cloud owns the rule.
"""
return pulumi.get(self, "source_owner")
@source_owner.setter
def source_owner(self, value: pulumi.Input[str]):
pulumi.set(self, "source_owner", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the rule.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="excludeResourceIdsScope")
def exclude_resource_ids_scope(self) -> Optional[pulumi.Input[str]]:
"""
The rule monitors excluded resource IDs, multiple of which are separated by commas, only applies to rules created based on managed rules, , custom rule this field is empty.
"""
return pulumi.get(self, "exclude_resource_ids_scope")
@exclude_resource_ids_scope.setter
def exclude_resource_ids_scope(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "exclude_resource_ids_scope", value)
@property
@pulumi.getter(name="inputParameters")
def input_parameters(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
The settings map of the input parameters for the rule.
"""
return pulumi.get(self, "input_parameters")
@input_parameters.setter
def input_parameters(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "input_parameters", value)
@property
@pulumi.getter(name="maximumExecutionFrequency")
def maximum_execution_frequency(self) -> Optional[pulumi.Input[str]]:
"""
The frequency of the compliance evaluations. Valid values: `One_Hour`, `Three_Hours`, `Six_Hours`, `Twelve_Hours`, `TwentyFour_Hours`. System default value is `TwentyFour_Hours` and valid when the `config_rule_trigger_types` is `ScheduledNotification`.
"""
return pulumi.get(self, "maximum_execution_frequency")
@maximum_execution_frequency.setter
def maximum_execution_frequency(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "maximum_execution_frequency", value)
@property
@pulumi.getter(name="regionIdsScope")
def region_ids_scope(self) -> Optional[pulumi.Input[str]]:
"""
The rule monitors region IDs, separated by commas, only applies to rules created based on managed rules.
"""
return pulumi.get(self, "region_ids_scope")
@region_ids_scope.setter
def region_ids_scope(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region_ids_scope", value)
@property
@pulumi.getter(name="resourceGroupIdsScope")
def resource_group_ids_scope(self) -> Optional[pulumi.Input[str]]:
"""
The rule monitors resource group IDs, separated by commas, only applies to rules created based on managed rules.
"""
return pulumi.get(self, "resource_group_ids_scope")
@resource_group_ids_scope.setter
def resource_group_ids_scope(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_ids_scope", value)
@property
@pulumi.getter(name="tagKeyScope")
def tag_key_scope(self) -> Optional[pulumi.Input[str]]:
"""
The rule monitors the tag key, only applies to rules created based on managed rules.
"""
return pulumi.get(self, "tag_key_scope")
@tag_key_scope.setter
def tag_key_scope(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tag_key_scope", value)
@property
@pulumi.getter(name="tagValueScope")
def tag_value_scope(self) -> Optional[pulumi.Input[str]]:
"""
The rule monitors the tag value, use with the TagKeyScope options. only applies to rules created based on managed rules.
"""
return pulumi.get(self, "tag_value_scope")
@tag_value_scope.setter
def tag_value_scope(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tag_value_scope", value)
@pulumi.input_type
class _AggregateConfigRuleState:
def __init__(__self__, *,
aggregate_config_rule_name: Optional[pulumi.Input[str]] = None,
aggregator_id: Optional[pulumi.Input[str]] = None,
config_rule_trigger_types: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
exclude_resource_ids_scope: Optional[pulumi.Input[str]] = None,
input_parameters: Optional[pulumi.Input[Mapping[str, Any]]] = None,
maximum_execution_frequency: Optional[pulumi.Input[str]] = None,
region_ids_scope: Optional[pulumi.Input[str]] = None,
resource_group_ids_scope: Optional[pulumi.Input[str]] = None,
resource_types_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
risk_level: Optional[pulumi.Input[int]] = None,
source_identifier: Optional[pulumi.Input[str]] = None,
source_owner: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
tag_key_scope: Optional[pulumi.Input[str]] = None,
tag_value_scope: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering AggregateConfigRule resources.
:param pulumi.Input[str] aggregate_config_rule_name: The name of the rule.
:param pulumi.Input[str] aggregator_id: The Aggregator Id.
:param pulumi.Input[str] config_rule_trigger_types: The trigger type of the rule. Valid values: `ConfigurationItemChangeNotification`: The rule is triggered upon configuration changes. `ScheduledNotification`: The rule is triggered as scheduled.
:param pulumi.Input[str] description: The description of the rule.
:param pulumi.Input[str] exclude_resource_ids_scope: The rule monitors excluded resource IDs, multiple of which are separated by commas, only applies to rules created based on managed rules, , custom rule this field is empty.
:param pulumi.Input[Mapping[str, Any]] input_parameters: The settings map of the input parameters for the rule.
:param pulumi.Input[str] maximum_execution_frequency: The frequency of the compliance evaluations. Valid values: `One_Hour`, `Three_Hours`, `Six_Hours`, `Twelve_Hours`, `TwentyFour_Hours`. System default value is `TwentyFour_Hours` and valid when the `config_rule_trigger_types` is `ScheduledNotification`.
:param pulumi.Input[str] region_ids_scope: The rule monitors region IDs, separated by commas, only applies to rules created based on managed rules.
:param pulumi.Input[str] resource_group_ids_scope: The rule monitors resource group IDs, separated by commas, only applies to rules created based on managed rules.
:param pulumi.Input[Sequence[pulumi.Input[str]]] resource_types_scopes: Resource types to be evaluated. [Alibaba Cloud services that support Cloud Config.](https://www.alibabacloud.com/help/en/doc-detail/127411.htm)
:param pulumi.Input[int] risk_level: The risk level of the resources that are not compliant with the rule. Valid values: `1`: critical `2`: warning `3`: info.
:param pulumi.Input[str] source_identifier: The identifier of the rule. For a managed rule, the value is the name of the managed rule. For a custom rule, the value is the ARN of the custom rule. Using managed rules, refer to [List of Managed rules.](https://www.alibabacloud.com/help/en/doc-detail/127404.htm)
:param pulumi.Input[str] source_owner: Specifies whether you or Alibaba Cloud owns and manages the rule. Valid values: `CUSTOM_FC`: The rule is a custom rule and you own the rule. `ALIYUN`: The rule is a managed rule and Alibaba Cloud owns the rule.
:param pulumi.Input[str] tag_key_scope: The rule monitors the tag key, only applies to rules created based on managed rules.
:param pulumi.Input[str] tag_value_scope: The rule monitors the tag value, use with the TagKeyScope options. only applies to rules created based on managed rules.
"""
if aggregate_config_rule_name is not None:
pulumi.set(__self__, "aggregate_config_rule_name", aggregate_config_rule_name)
if aggregator_id is not None:
pulumi.set(__self__, "aggregator_id", aggregator_id)
if config_rule_trigger_types is not None:
pulumi.set(__self__, "config_rule_trigger_types", config_rule_trigger_types)
if description is not None:
pulumi.set(__self__, "description", description)
if exclude_resource_ids_scope is not None:
pulumi.set(__self__, "exclude_resource_ids_scope", exclude_resource_ids_scope)
if input_parameters is not None:
pulumi.set(__self__, "input_parameters", input_parameters)
if maximum_execution_frequency is not None:
pulumi.set(__self__, "maximum_execution_frequency", maximum_execution_frequency)
if region_ids_scope is not None:
pulumi.set(__self__, "region_ids_scope", region_ids_scope)
if resource_group_ids_scope is not None:
pulumi.set(__self__, "resource_group_ids_scope", resource_group_ids_scope)
if resource_types_scopes is not None:
pulumi.set(__self__, "resource_types_scopes", resource_types_scopes)
if risk_level is not None:
pulumi.set(__self__, "risk_level", risk_level)
if source_identifier is not None:
pulumi.set(__self__, "source_identifier", source_identifier)
if source_owner is not None:
pulumi.set(__self__, "source_owner", source_owner)
if status is not None:
pulumi.set(__self__, "status", status)
if tag_key_scope is not None:
pulumi.set(__self__, "tag_key_scope", tag_key_scope)
if tag_value_scope is not None:
pulumi.set(__self__, "tag_value_scope", tag_value_scope)
@property
@pulumi.getter(name="aggregateConfigRuleName")
def aggregate_config_rule_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the rule.
"""
return pulumi.get(self, "aggregate_config_rule_name")
@aggregate_config_rule_name.setter
def aggregate_config_rule_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "aggregate_config_rule_name", value)
@property
@pulumi.getter(name="aggregatorId")
def aggregator_id(self) -> Optional[pulumi.Input[str]]:
"""
The Aggregator Id.
"""
return pulumi.get(self, "aggregator_id")
@aggregator_id.setter
def aggregator_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "aggregator_id", value)
@property
@pulumi.getter(name="configRuleTriggerTypes")
def config_rule_trigger_types(self) -> Optional[pulumi.Input[str]]:
"""
The trigger type of the rule. Valid values: `ConfigurationItemChangeNotification`: The rule is triggered upon configuration changes. `ScheduledNotification`: The rule is triggered as scheduled.
"""
return pulumi.get(self, "config_rule_trigger_types")
@config_rule_trigger_types.setter
def config_rule_trigger_types(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "config_rule_trigger_types", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the rule.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="excludeResourceIdsScope")
def exclude_resource_ids_scope(self) -> Optional[pulumi.Input[str]]:
"""
The rule monitors excluded resource IDs, multiple of which are separated by commas, only applies to rules created based on managed rules, , custom rule this field is empty.
"""
return pulumi.get(self, "exclude_resource_ids_scope")
@exclude_resource_ids_scope.setter
def exclude_resource_ids_scope(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "exclude_resource_ids_scope", value)
@property
@pulumi.getter(name="inputParameters")
def input_parameters(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
The settings map of the input parameters for the rule.
"""
return pulumi.get(self, "input_parameters")
@input_parameters.setter
def input_parameters(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "input_parameters", value)
@property
@pulumi.getter(name="maximumExecutionFrequency")
def maximum_execution_frequency(self) -> Optional[pulumi.Input[str]]:
"""
The frequency of the compliance evaluations. Valid values: `One_Hour`, `Three_Hours`, `Six_Hours`, `Twelve_Hours`, `TwentyFour_Hours`. System default value is `TwentyFour_Hours` and valid when the `config_rule_trigger_types` is `ScheduledNotification`.
"""
return pulumi.get(self, "maximum_execution_frequency")
@maximum_execution_frequency.setter
def maximum_execution_frequency(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "maximum_execution_frequency", value)
@property
@pulumi.getter(name="regionIdsScope")
def region_ids_scope(self) -> Optional[pulumi.Input[str]]:
"""
The rule monitors region IDs, separated by commas, only applies to rules created based on managed rules.
"""
return pulumi.get(self, "region_ids_scope")
@region_ids_scope.setter
def region_ids_scope(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region_ids_scope", value)
@property
@pulumi.getter(name="resourceGroupIdsScope")
def resource_group_ids_scope(self) -> Optional[pulumi.Input[str]]:
"""
The rule monitors resource group IDs, separated by commas, only applies to rules created based on managed rules.
"""
return pulumi.get(self, "resource_group_ids_scope")
@resource_group_ids_scope.setter
def resource_group_ids_scope(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_ids_scope", value)
@property
@pulumi.getter(name="resourceTypesScopes")
def resource_types_scopes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Resource types to be evaluated. [Alibaba Cloud services that support Cloud Config.](https://www.alibabacloud.com/help/en/doc-detail/127411.htm)
"""
return pulumi.get(self, "resource_types_scopes")
@resource_types_scopes.setter
def resource_types_scopes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "resource_types_scopes", value)
@property
@pulumi.getter(name="riskLevel")
def risk_level(self) -> Optional[pulumi.Input[int]]:
"""
The risk level of the resources that are not compliant with the rule. Valid values: `1`: critical `2`: warning `3`: info.
"""
return pulumi.get(self, "risk_level")
@risk_level.setter
def risk_level(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "risk_level", value)
@property
@pulumi.getter(name="sourceIdentifier")
def source_identifier(self) -> Optional[pulumi.Input[str]]:
"""
The identifier of the rule. For a managed rule, the value is the name of the managed rule. For a custom rule, the value is the ARN of the custom rule. Using managed rules, refer to [List of Managed rules.](https://www.alibabacloud.com/help/en/doc-detail/127404.htm)
"""
return pulumi.get(self, "source_identifier")
@source_identifier.setter
def source_identifier(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_identifier", value)
@property
@pulumi.getter(name="sourceOwner")
def source_owner(self) -> Optional[pulumi.Input[str]]:
"""
Specifies whether you or Alibaba Cloud owns and manages the rule. Valid values: `CUSTOM_FC`: The rule is a custom rule and you own the rule. `ALIYUN`: The rule is a managed rule and Alibaba Cloud owns the rule.
"""
return pulumi.get(self, "source_owner")
@source_owner.setter
def source_owner(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_owner", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter(name="tagKeyScope")
def tag_key_scope(self) -> Optional[pulumi.Input[str]]:
"""
The rule monitors the tag key, only applies to rules created based on managed rules.
"""
return pulumi.get(self, "tag_key_scope")
@tag_key_scope.setter
def tag_key_scope(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tag_key_scope", value)
@property
@pulumi.getter(name="tagValueScope")
def tag_value_scope(self) -> Optional[pulumi.Input[str]]:
"""
The rule monitors the tag value, use with the TagKeyScope options. only applies to rules created based on managed rules.
"""
return pulumi.get(self, "tag_value_scope")
@tag_value_scope.setter
def tag_value_scope(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tag_value_scope", value)
class AggregateConfigRule(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
aggregate_config_rule_name: Optional[pulumi.Input[str]] = None,
aggregator_id: Optional[pulumi.Input[str]] = None,
config_rule_trigger_types: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
exclude_resource_ids_scope: Optional[pulumi.Input[str]] = None,
input_parameters: Optional[pulumi.Input[Mapping[str, Any]]] = None,
maximum_execution_frequency: Optional[pulumi.Input[str]] = None,
region_ids_scope: Optional[pulumi.Input[str]] = None,
resource_group_ids_scope: Optional[pulumi.Input[str]] = None,
resource_types_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
risk_level: Optional[pulumi.Input[int]] = None,
source_identifier: Optional[pulumi.Input[str]] = None,
source_owner: Optional[pulumi.Input[str]] = None,
tag_key_scope: Optional[pulumi.Input[str]] = None,
tag_value_scope: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a Cloud Config Aggregate Config Rule resource.
For information about Cloud Config Aggregate Config Rule and how to use it, see [What is Aggregate Config Rule](https://help.aliyun.com/).
> **NOTE:** Available in v1.124.0+.
## Import
Cloud Config Aggregate Config Rule can be imported using the id, e.g.
```sh
$ pulumi import alicloud:cfg/aggregateConfigRule:AggregateConfigRule example <aggregator_id>:<config_rule_id>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] aggregate_config_rule_name: The name of the rule.
:param pulumi.Input[str] aggregator_id: The Aggregator Id.
:param pulumi.Input[str] config_rule_trigger_types: The trigger type of the rule. Valid values: `ConfigurationItemChangeNotification`: The rule is triggered upon configuration changes. `ScheduledNotification`: The rule is triggered as scheduled.
:param pulumi.Input[str] description: The description of the rule.
:param pulumi.Input[str] exclude_resource_ids_scope: The rule monitors excluded resource IDs, multiple of which are separated by commas, only applies to rules created based on managed rules, , custom rule this field is empty.
:param pulumi.Input[Mapping[str, Any]] input_parameters: The settings map of the input parameters for the rule.
:param pulumi.Input[str] maximum_execution_frequency: The frequency of the compliance evaluations. Valid values: `One_Hour`, `Three_Hours`, `Six_Hours`, `Twelve_Hours`, `TwentyFour_Hours`. System default value is `TwentyFour_Hours` and valid when the `config_rule_trigger_types` is `ScheduledNotification`.
:param pulumi.Input[str] region_ids_scope: The rule monitors region IDs, separated by commas, only applies to rules created based on managed rules.
:param pulumi.Input[str] resource_group_ids_scope: The rule monitors resource group IDs, separated by commas, only applies to rules created based on managed rules.
:param pulumi.Input[Sequence[pulumi.Input[str]]] resource_types_scopes: Resource types to be evaluated. [Alibaba Cloud services that support Cloud Config.](https://www.alibabacloud.com/help/en/doc-detail/127411.htm)
:param pulumi.Input[int] risk_level: The risk level of the resources that are not compliant with the rule. Valid values: `1`: critical `2`: warning `3`: info.
:param pulumi.Input[str] source_identifier: The identifier of the rule. For a managed rule, the value is the name of the managed rule. For a custom rule, the value is the ARN of the custom rule. Using managed rules, refer to [List of Managed rules.](https://www.alibabacloud.com/help/en/doc-detail/127404.htm)
:param pulumi.Input[str] source_owner: Specifies whether you or Alibaba Cloud owns and manages the rule. Valid values: `CUSTOM_FC`: The rule is a custom rule and you own the rule. `ALIYUN`: The rule is a managed rule and Alibaba Cloud owns the rule.
:param pulumi.Input[str] tag_key_scope: The rule monitors the tag key, only applies to rules created based on managed rules.
:param pulumi.Input[str] tag_value_scope: The rule monitors the tag value, use with the TagKeyScope options. only applies to rules created based on managed rules.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: AggregateConfigRuleArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Cloud Config Aggregate Config Rule resource.
For information about Cloud Config Aggregate Config Rule and how to use it, see [What is Aggregate Config Rule](https://help.aliyun.com/).
> **NOTE:** Available in v1.124.0+.
## Import
Cloud Config Aggregate Config Rule can be imported using the id, e.g.
```sh
$ pulumi import alicloud:cfg/aggregateConfigRule:AggregateConfigRule example <aggregator_id>:<config_rule_id>
```
:param str resource_name: The name of the resource.
:param AggregateConfigRuleArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(AggregateConfigRuleArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
aggregate_config_rule_name: Optional[pulumi.Input[str]] = None,
aggregator_id: Optional[pulumi.Input[str]] = None,
config_rule_trigger_types: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
exclude_resource_ids_scope: Optional[pulumi.Input[str]] = None,
input_parameters: Optional[pulumi.Input[Mapping[str, Any]]] = None,
maximum_execution_frequency: Optional[pulumi.Input[str]] = None,
region_ids_scope: Optional[pulumi.Input[str]] = None,
resource_group_ids_scope: Optional[pulumi.Input[str]] = None,
resource_types_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
risk_level: Optional[pulumi.Input[int]] = None,
source_identifier: Optional[pulumi.Input[str]] = None,
source_owner: Optional[pulumi.Input[str]] = None,
tag_key_scope: Optional[pulumi.Input[str]] = None,
tag_value_scope: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = AggregateConfigRuleArgs.__new__(AggregateConfigRuleArgs)
if aggregate_config_rule_name is None and not opts.urn:
raise TypeError("Missing required property 'aggregate_config_rule_name'")
__props__.__dict__["aggregate_config_rule_name"] = aggregate_config_rule_name
if aggregator_id is None and not opts.urn:
raise TypeError("Missing required property 'aggregator_id'")
__props__.__dict__["aggregator_id"] = aggregator_id
if config_rule_trigger_types is None and not opts.urn:
raise TypeError("Missing required property 'config_rule_trigger_types'")
__props__.__dict__["config_rule_trigger_types"] = config_rule_trigger_types
__props__.__dict__["description"] = description
__props__.__dict__["exclude_resource_ids_scope"] = exclude_resource_ids_scope
__props__.__dict__["input_parameters"] = input_parameters
__props__.__dict__["maximum_execution_frequency"] = maximum_execution_frequency
__props__.__dict__["region_ids_scope"] = region_ids_scope
__props__.__dict__["resource_group_ids_scope"] = resource_group_ids_scope
if resource_types_scopes is None and not opts.urn:
raise TypeError("Missing required property 'resource_types_scopes'")
__props__.__dict__["resource_types_scopes"] = resource_types_scopes
if risk_level is None and not opts.urn:
raise TypeError("Missing required property 'risk_level'")
__props__.__dict__["risk_level"] = risk_level
if source_identifier is None and not opts.urn:
raise TypeError("Missing required property 'source_identifier'")
__props__.__dict__["source_identifier"] = source_identifier
if source_owner is None and not opts.urn:
raise TypeError("Missing required property 'source_owner'")
__props__.__dict__["source_owner"] = source_owner
__props__.__dict__["tag_key_scope"] = tag_key_scope
__props__.__dict__["tag_value_scope"] = tag_value_scope
__props__.__dict__["status"] = None
super(AggregateConfigRule, __self__).__init__(
'alicloud:cfg/aggregateConfigRule:AggregateConfigRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
aggregate_config_rule_name: Optional[pulumi.Input[str]] = None,
aggregator_id: Optional[pulumi.Input[str]] = None,
config_rule_trigger_types: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
exclude_resource_ids_scope: Optional[pulumi.Input[str]] = None,
input_parameters: Optional[pulumi.Input[Mapping[str, Any]]] = None,
maximum_execution_frequency: Optional[pulumi.Input[str]] = None,
region_ids_scope: Optional[pulumi.Input[str]] = None,
resource_group_ids_scope: Optional[pulumi.Input[str]] = None,
resource_types_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
risk_level: Optional[pulumi.Input[int]] = None,
source_identifier: Optional[pulumi.Input[str]] = None,
source_owner: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
tag_key_scope: Optional[pulumi.Input[str]] = None,
tag_value_scope: Optional[pulumi.Input[str]] = None) -> 'AggregateConfigRule':
"""
Get an existing AggregateConfigRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] aggregate_config_rule_name: The name of the rule.
:param pulumi.Input[str] aggregator_id: The Aggregator Id.
:param pulumi.Input[str] config_rule_trigger_types: The trigger type of the rule. Valid values: `ConfigurationItemChangeNotification`: The rule is triggered upon configuration changes. `ScheduledNotification`: The rule is triggered as scheduled.
:param pulumi.Input[str] description: The description of the rule.
:param pulumi.Input[str] exclude_resource_ids_scope: The rule monitors excluded resource IDs, multiple of which are separated by commas, only applies to rules created based on managed rules, , custom rule this field is empty.
:param pulumi.Input[Mapping[str, Any]] input_parameters: The settings map of the input parameters for the rule.
:param pulumi.Input[str] maximum_execution_frequency: The frequency of the compliance evaluations. Valid values: `One_Hour`, `Three_Hours`, `Six_Hours`, `Twelve_Hours`, `TwentyFour_Hours`. System default value is `TwentyFour_Hours` and valid when the `config_rule_trigger_types` is `ScheduledNotification`.
:param pulumi.Input[str] region_ids_scope: The rule monitors region IDs, separated by commas, only applies to rules created based on managed rules.
:param pulumi.Input[str] resource_group_ids_scope: The rule monitors resource group IDs, separated by commas, only applies to rules created based on managed rules.
:param pulumi.Input[Sequence[pulumi.Input[str]]] resource_types_scopes: Resource types to be evaluated. [Alibaba Cloud services that support Cloud Config.](https://www.alibabacloud.com/help/en/doc-detail/127411.htm)
:param pulumi.Input[int] risk_level: The risk level of the resources that are not compliant with the rule. Valid values: `1`: critical `2`: warning `3`: info.
:param pulumi.Input[str] source_identifier: The identifier of the rule. For a managed rule, the value is the name of the managed rule. For a custom rule, the value is the ARN of the custom rule. Using managed rules, refer to [List of Managed rules.](https://www.alibabacloud.com/help/en/doc-detail/127404.htm)
:param pulumi.Input[str] source_owner: Specifies whether you or Alibaba Cloud owns and manages the rule. Valid values: `CUSTOM_FC`: The rule is a custom rule and you own the rule. `ALIYUN`: The rule is a managed rule and Alibaba Cloud owns the rule.
:param pulumi.Input[str] tag_key_scope: The rule monitors the tag key, only applies to rules created based on managed rules.
:param pulumi.Input[str] tag_value_scope: The rule monitors the tag value, use with the TagKeyScope options. only applies to rules created based on managed rules.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _AggregateConfigRuleState.__new__(_AggregateConfigRuleState)
__props__.__dict__["aggregate_config_rule_name"] = aggregate_config_rule_name
__props__.__dict__["aggregator_id"] = aggregator_id
__props__.__dict__["config_rule_trigger_types"] = config_rule_trigger_types
__props__.__dict__["description"] = description
__props__.__dict__["exclude_resource_ids_scope"] = exclude_resource_ids_scope
__props__.__dict__["input_parameters"] = input_parameters
__props__.__dict__["maximum_execution_frequency"] = maximum_execution_frequency
__props__.__dict__["region_ids_scope"] = region_ids_scope
__props__.__dict__["resource_group_ids_scope"] = resource_group_ids_scope
__props__.__dict__["resource_types_scopes"] = resource_types_scopes
__props__.__dict__["risk_level"] = risk_level
__props__.__dict__["source_identifier"] = source_identifier
__props__.__dict__["source_owner"] = source_owner
__props__.__dict__["status"] = status
__props__.__dict__["tag_key_scope"] = tag_key_scope
__props__.__dict__["tag_value_scope"] = tag_value_scope
return AggregateConfigRule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="aggregateConfigRuleName")
def aggregate_config_rule_name(self) -> pulumi.Output[str]:
"""
The name of the rule.
"""
return pulumi.get(self, "aggregate_config_rule_name")
@property
@pulumi.getter(name="aggregatorId")
def aggregator_id(self) -> pulumi.Output[str]:
"""
The Aggregator Id.
"""
return pulumi.get(self, "aggregator_id")
@property
@pulumi.getter(name="configRuleTriggerTypes")
def config_rule_trigger_types(self) -> pulumi.Output[str]:
"""
The trigger type of the rule. Valid values: `ConfigurationItemChangeNotification`: The rule is triggered upon configuration changes. `ScheduledNotification`: The rule is triggered as scheduled.
"""
return pulumi.get(self, "config_rule_trigger_types")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The description of the rule.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="excludeResourceIdsScope")
def exclude_resource_ids_scope(self) -> pulumi.Output[Optional[str]]:
"""
The rule monitors excluded resource IDs, multiple of which are separated by commas, only applies to rules created based on managed rules, , custom rule this field is empty.
"""
return pulumi.get(self, "exclude_resource_ids_scope")
@property
@pulumi.getter(name="inputParameters")
def input_parameters(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:
"""
The settings map of the input parameters for the rule.
"""
return pulumi.get(self, "input_parameters")
@property
@pulumi.getter(name="maximumExecutionFrequency")
def maximum_execution_frequency(self) -> pulumi.Output[str]:
"""
The frequency of the compliance evaluations. Valid values: `One_Hour`, `Three_Hours`, `Six_Hours`, `Twelve_Hours`, `TwentyFour_Hours`. System default value is `TwentyFour_Hours` and valid when the `config_rule_trigger_types` is `ScheduledNotification`.
"""
return pulumi.get(self, "maximum_execution_frequency")
@property
@pulumi.getter(name="regionIdsScope")
def region_ids_scope(self) -> pulumi.Output[Optional[str]]:
"""
The rule monitors region IDs, separated by commas, only applies to rules created based on managed rules.
"""
return pulumi.get(self, "region_ids_scope")
@property
@pulumi.getter(name="resourceGroupIdsScope")
def resource_group_ids_scope(self) -> pulumi.Output[Optional[str]]:
"""
The rule monitors resource group IDs, separated by commas, only applies to rules created based on managed rules.
"""
return pulumi.get(self, "resource_group_ids_scope")
@property
@pulumi.getter(name="resourceTypesScopes")
def resource_types_scopes(self) -> pulumi.Output[Sequence[str]]:
"""
Resource types to be evaluated. [Alibaba Cloud services that support Cloud Config.](https://www.alibabacloud.com/help/en/doc-detail/127411.htm)
"""
return pulumi.get(self, "resource_types_scopes")
@property
@pulumi.getter(name="riskLevel")
def risk_level(self) -> pulumi.Output[int]:
"""
The risk level of the resources that are not compliant with the rule. Valid values: `1`: critical `2`: warning `3`: info.
"""
return pulumi.get(self, "risk_level")
@property
@pulumi.getter(name="sourceIdentifier")
def source_identifier(self) -> pulumi.Output[str]:
"""
The identifier of the rule. For a managed rule, the value is the name of the managed rule. For a custom rule, the value is the ARN of the custom rule. Using managed rules, refer to [List of Managed rules.](https://www.alibabacloud.com/help/en/doc-detail/127404.htm)
"""
return pulumi.get(self, "source_identifier")
@property
@pulumi.getter(name="sourceOwner")
def source_owner(self) -> pulumi.Output[str]:
"""
Specifies whether you or Alibaba Cloud owns and manages the rule. Valid values: `CUSTOM_FC`: The rule is a custom rule and you own the rule. `ALIYUN`: The rule is a managed rule and Alibaba Cloud owns the rule.
"""
return pulumi.get(self, "source_owner")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
return pulumi.get(self, "status")
@property
@pulumi.getter(name="tagKeyScope")
def tag_key_scope(self) -> pulumi.Output[Optional[str]]:
"""
The rule monitors the tag key, only applies to rules created based on managed rules.
"""
return pulumi.get(self, "tag_key_scope")
@property
@pulumi.getter(name="tagValueScope")
def tag_value_scope(self) -> pulumi.Output[Optional[str]]:
"""
The rule monitors the tag value, use with the TagKeyScope options. only applies to rules created based on managed rules.
"""
return pulumi.get(self, "tag_value_scope")
| python |
import setuptools, os
PACKAGE_NAME = ''
VERSION = ''
AUTHOR = ''
EMAIL = ''
DESCRIPTION = ''
GITHUB_URL = ''
parent_dir = os.path.dirname(os.path.realpath(__file__))
import_name = os.path.basename(parent_dir)
with open(f'{parent_dir}/README.md', 'r') as f:
long_description = f.read()
setuptools.setup(
name=PACKAGE_NAME,
version=VERSION,
author=AUTHOR,
author_email=EMAIL,
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
url=GITHUB_URL,
packages=[
f'{import_name}',
f'{import_name}.models',
f'{import_name}.utils',
],
package_data={'': []},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[],
) | python |
import inspect
from pathlib import PurePath
from typing import List, Dict, Callable, Optional, Union, Tuple
from .. import util
from .calculator import Calculator
from .config_function import ConfigFunction
from .config_item import ConfigItem
from .config_item import ConfigItem
from .parser import Parser, PropertyKeys
from ... import logger
from ...logger import Text
_CONFIG_PRINT_LEN = 40
def _is_class_method(func: Callable):
if not callable(func):
return False
spec: inspect.Signature = inspect.signature(func)
params: List[inspect.Parameter] = list(spec.parameters.values())
if len(params) != 1:
return False
p = params[0]
if p.kind != p.POSITIONAL_OR_KEYWORD:
return False
return p.name == 'self'
class Configs:
r"""
You should sub-class this class to create your own configurations
"""
_calculators: Dict[str, List[ConfigFunction]] = {}
_evaluators: Dict[str, List[ConfigFunction]] = {}
def __init_subclass__(cls, **kwargs):
configs = {}
for k, v in cls.__annotations__.items():
if not Parser.is_valid(k):
continue
configs[k] = ConfigItem(k,
True, v,
k in cls.__dict__, cls.__dict__.get(k, None))
evals = []
for k, v in cls.__dict__.items():
if not Parser.is_valid(k):
continue
if _is_class_method(v):
evals.append((k, v))
continue
configs[k] = ConfigItem(k,
k in cls.__annotations__, cls.__annotations__.get(k, None),
True, v)
for e in evals:
cls._add_eval_function(e[1], e[0], 'default')
for k, v in configs.items():
setattr(cls, k, v)
@classmethod
def _add_config_function(cls,
func: Callable,
name: Union[ConfigItem, List[ConfigItem]],
option: str, *,
is_append: bool
):
if PropertyKeys.calculators not in cls.__dict__:
cls._calculators = {}
calc = ConfigFunction(func, config_names=name, option_name=option, is_append=is_append)
if type(calc.config_names) == str:
config_names = [calc.config_names]
else:
config_names = calc.config_names
for n in config_names:
if n not in cls._calculators:
cls._calculators[n] = []
cls._calculators[n].append(calc)
@classmethod
def _add_eval_function(cls,
func: Callable,
name: str,
option: str):
if PropertyKeys.evaluators not in cls.__dict__:
cls._evaluators = {}
calc = ConfigFunction(func,
config_names=name,
option_name=option,
is_append=False,
check_string_names=False)
if name not in cls._evaluators:
cls._evaluators[name] = []
cls._evaluators[name].append(calc)
@classmethod
def calc(cls, name: Union[ConfigItem, List[ConfigItem]] = None,
option: str = None, *,
is_append: bool = False):
r"""
Use this as a decorator to register configuration options.
Arguments:
name: the configuration item or a list of items.
If it is a list of items the function should return
tuple.
option (str, optional): name of the option.
If not provided it will be derived from the
function name.
"""
def wrapper(func: Callable):
cls._add_config_function(func, name, option, is_append=is_append)
return func
return wrapper
@classmethod
def list(cls, name: str = None):
return cls.calc(name, f"_{util.random_string()}", is_append=True)
@classmethod
def set_hyperparams(cls, *args: ConfigItem, is_hyperparam=True):
r"""
Identifies configuration as (or not) hyper-parameters
Arguments:
*args: list of configurations
is_hyperparam (bool, optional): whether the provided configuration
items are hyper-parameters. Defaults to ``True``.
"""
if PropertyKeys.hyperparams not in cls.__dict__:
cls._hyperparams = {}
for h in args:
cls._hyperparams[h.key] = is_hyperparam
@classmethod
def aggregate(cls, name: Union[ConfigItem, any], option: str,
*args: Tuple[Union[ConfigItem, any], str]):
r"""
Aggregate configs
Arguments:
name: name of the aggregate
option: aggregate option
*args: list of options
"""
assert args
if PropertyKeys.aggregates not in cls.__dict__:
cls._aggregates = {}
if name.key not in cls._aggregates:
cls._aggregates[name.key] = {}
pairs = {p[0].key: p[1] for p in args}
cls._aggregates[name.key][option] = pairs
class ConfigProcessor:
def __init__(self, configs, values: Dict[str, any] = None):
self.parser = Parser(configs, values)
self.calculator = Calculator(configs=configs,
options=self.parser.options,
evals=self.parser.evals,
types=self.parser.types,
values=self.parser.values,
list_appends=self.parser.list_appends,
aggregate_parent=self.parser.aggregate_parent)
def __call__(self, run_order: Optional[List[Union[List[str], str]]] = None):
self.calculator(run_order)
@staticmethod
def __is_primitive(value):
if value is None:
return True
if type(value) == str:
return True
if type(value) == int:
return True
if type(value) == bool:
return True
if type(value) == list and all([ConfigProcessor.__is_primitive(v) for v in value]):
return True
if type(value) == dict and all([ConfigProcessor.__is_primitive(v) for v in value.values()]):
return True
return False
@staticmethod
def __to_yaml(value):
if ConfigProcessor.__is_primitive(value):
return value
else:
return ConfigProcessor.__to_str(value)
@staticmethod
def __to_str(value):
if str(value) == ConfigProcessor.__default_repr(value):
if value.__class__.__module__ == '__main__':
return value.__class__.__name__
else:
return f"{value.__class__.__module__}.{value.__class__.__name__}"
else:
return str(value)
def save(self, configs_path: PurePath):
orders = {k: i for i, k in enumerate(self.calculator.topological_order)}
configs = {}
for k, v in self.parser.types.items():
configs[k] = {
'name': k,
'type': str(v),
'value': self.__to_yaml(self.parser.values.get(k, None)),
'order': orders.get(k, -1),
'options': list(self.parser.options.get(k, {}).keys()),
'computed': self.__to_yaml(getattr(self.calculator.configs, k, None)),
'is_hyperparam': self.parser.hyperparams.get(k, None),
'is_explicitly_specified': (k in self.parser.explicitly_specified)
}
with open(str(configs_path), "w") as file:
file.write(util.yaml_dump(configs))
@staticmethod
def __default_repr(value):
return '<%s.%s object at %s>' % (
value.__class__.__module__,
value.__class__.__name__,
hex(id(value))
)
def get_hyperparams(self):
order = self.calculator.topological_order.copy()
hyperparams = {}
for key in order:
if (self.parser.hyperparams.get(key, False) or
key in self.parser.explicitly_specified):
value = getattr(self.calculator.configs, key, None)
if key in self.parser.options:
value = self.parser.values[key]
if type(value) not in {int, float, str}:
value = ConfigProcessor.__to_str(value)
hyperparams[key] = value
return hyperparams
def __print_config(self, key, *, value=None, option=None,
other_options=None, is_ignored=False, is_list=False):
parts = ['\t']
if is_ignored:
parts.append((key, Text.subtle))
return parts
is_hyperparam = self.parser.hyperparams.get(key, None)
if is_hyperparam is None:
is_hyperparam = key in self.parser.explicitly_specified
if is_hyperparam:
parts.append((key, [Text.key, Text.highlight]))
else:
parts.append((key, Text.key))
if is_list:
parts.append(('[]', Text.subtle))
parts.append((' = ', Text.subtle))
if other_options is None:
other_options = []
if value is not None:
value_str = ConfigProcessor.__to_str(value)
value_str = value_str.replace('\n', '')
if len(value_str) < _CONFIG_PRINT_LEN:
parts.append((f"{value_str}", Text.value))
else:
parts.append((f"{value_str[:_CONFIG_PRINT_LEN]}...", Text.value))
parts.append('\t')
if option is not None:
if len(other_options) == 0:
parts.append((option, Text.subtle))
else:
parts.append((option, Text.none))
if value is None and option is None:
parts.append(("None", Text.value))
parts.append('\t')
if len(other_options) > 0:
parts.append(('\t[', Text.subtle))
for i, opt in enumerate(other_options):
if i > 0:
parts.append((', ', Text.subtle))
parts.append(opt)
parts.append((']', Text.subtle))
return parts
def print(self):
order = self.calculator.topological_order.copy()
order.sort()
added = set(order)
ignored = set()
for k in self.parser.types:
if k not in added:
added.add(k)
order.append(k)
ignored.add(k)
logger.log("Configs:", Text.heading)
for k in order:
computed = getattr(self.calculator.configs, k, None)
if k in ignored:
parts = self.__print_config(k, is_ignored=True)
elif k in self.parser.list_appends:
parts = self.__print_config(k,
value=computed,
is_list=True)
elif k in self.parser.options:
v = self.parser.values[k]
opts = self.parser.options[k]
lst = list(opts.keys())
if v in opts:
lst.remove(v)
else:
v = None
parts = self.__print_config(k,
value=computed,
option=v,
other_options=lst)
else:
parts = self.__print_config(k, value=computed)
logger.log(parts)
logger.log()
| python |
class InitError(Exception):
pass
class SendMsgError(Exception):
pass
class GetAccessTokenError(Exception):
pass
class GetUserTicketError(Exception):
pass
class APIValueError(Exception):
pass
class UploadTypeError(Exception):
pass
class UploadError(Exception):
pass
class SuiteTicketError(Exception):
pass
class CacheNotExistError(Exception):
pass
| python |
from jinja2 import Environment, FileSystemLoader
from http_server import Content, web_server
file_loader = FileSystemLoader('templates')
env = Environment(loader=file_loader)
data = {
"name": "HMTMCSE",
"age": 30,
"register_id": 12,
}
template = env.get_template('page.html')
output = template.render(data=data)
# Custom Web server for see the output into browser
Content.html = output
web_server.serve_forever()
# Browse from the browser http://localhost:1212/
| python |
"""
Tests of the Block class
"""
try:
import unittest2 as unittest
except ImportError:
import unittest
from neo.core.block import Block
class TestBlock(unittest.TestCase):
def test_init(self):
b = Block(name='a block')
self.assertEqual(b.name, 'a block')
self.assertEqual(b.file_origin, None)
if __name__ == "__main__":
unittest.main()
| python |
from django.shortcuts import render,redirect
from django.http import HttpResponse, JsonResponse
from django.http.response import HttpResponseRedirect
from django.contrib.auth import authenticate, logout
from django.contrib.auth import login as save_login
from django.contrib.auth.forms import AuthenticationForm as AF
from django import forms
from .form import *
from main.models import *
from django.contrib.auth.models import User
from post.form import Post
from post.form import Comments
from django.views.generic import TemplateView
from django.shortcuts import get_object_or_404
from django.db.models import Q
from django.contrib import messages
from django.template.loader import render_to_string
import redis
#import rcache
from django.core.cache import cache
from django.conf import settings
from django.core.cache.backends.base import DEFAULT_TIMEOUT
#CACHE_TTL = getattr(settings, 'CACHE_TTL',DEFAULT_TIMEOUT)
redis_instance = redis.StrictRedis(host=settings.REDIS_HOST,port=settings.REDIS_PORT, db=0)
# Create your views here.
def login(request):
if request.method=="POST":
form = AF(request,data=request.POST)
if form.is_valid():
username = form.cleaned_data.get("username")
password = form.cleaned_data.get("password")
user = authenticate(username=username,password=password)
if user is not None:
save_login(request,user)
messages.success(request, "Logged in")
return redirect('/home/')
form = AF()
return render(request=request,template_name="login.html",context={"form":form})
def signup(request):
form = NewUserForm(request.POST or None)
if request.method == "POST":
form = NewUserForm(request.POST)
if form.is_valid():
user = form.save()
username = form.cleaned_data.get('username')
save_login(request,user)
return redirect('/')
form = NewUserForm
return render(request=request,template_name="signup.html",context={"form":form})
def logout_usernow(request):
logout(request)
messages.success(request, "Logged Out!!!")
return redirect('/login/')
class home(TemplateView):
template_name = 'home.html'
def get( self, request):
#if 'postt' in redis_instance.keys("*"):
#posts = cache.get('postt')
# posts[key.decode("utf-8")] = redis_instance.get(key)
# args = {'form':form, 'posts':posts}
# return render(request,self.template_name,args)
#else:
#if not redisintance :
form = PostForm()
posts = Post.objects.all()#[:5]
#value = readytoset(posts)
args = {'form':form,'posts':posts}
return render(request,self.template_name,args)
def post(self,request):
form = PostForm(request.POST or None)
if request.method == "POST":
if form.is_valid():
form = form.save(commit=False)
form.user = request.user
form.save()
form = PostForm()
args = {'form': form}
return redirect('/home/')
class profile(TemplateView):
template_name = 'profile.html'
def get(self,request):
posts = Post.objects.filter(user = request.user)
args = {'posts':posts}
print(posts)
return render(request,self.template_name,args)
class search(TemplateView):
template_name = 'search.html'
def get(self,request):
if request.method == 'GET':
query = request.GET.get('q')
submitbutton = request.GET.get('submit')
if query is not None:
lookups = Q(username=query)
results = User.objects.filter(username=query)
context = {'results':results,'submitbutton':submitbutton}
return render(request,self.template_name,context)
#else:
return render(request,self.template_name)
#else:
#return render(request,self.template_name)
class postshown(TemplateView):
template_name = 'post.html'
def get( self, request):
form = CommentForm()
button = False
idd = int(request.GET.get('postid'))
posts = Post.objects.get(post_id=idd)
cmt = Comments.objects.filter(post_id=idd)
comment = Comments.objects.filter(post_id=idd).count()
like_count = LikeDislike.objects.filter(post_id=idd).filter(value='1').count()
print(like_count)
dislike_count = LikeDislike.objects.filter(post_id=idd).filter(value='2').count()
if request.user == posts.user:
button = True
args = {'form':form, 'posts':posts,'cmt':cmt,'comment':comment,'like_count':like_count,'dislike_count':dislike_count,'button':button}
return render(request,self.template_name,args)
def post(self,request):
form = CommentForm(request.POST or None)
if request.method == "POST":
if form.is_valid():
form =form.save(commit=False)
form.user = request.user
idd = int(request.GET.get('postid'))
form.post_id = idd
print(form.comment)
form.save()
form = CommentForm()
args = {'form':form}
return render(request,self.template_name,args)
def like(request):
postid = int(request.POST.get('postid'))
is_liked = False
if LikeDislike.objects.filter(post_id=postid,user=request.user):
if LikeDislike.objects.filter(post_id=postid,user=request.user,value='1'):
obj = LikeDislike.objects.filter(post_id=postid).filter(user=request.user).filter(value='1')
obj.delete()
else:
obj = LikeDislike.objects.filter(post_id=postid).filter(user=request.user).update(value='1')
obj.save()
else:
obj = LikeDislike(user=request.user,post_id=postid,value='1')
obj.save()
is_liked = True
like_count = LikeDislike.objects.filter(post_id=postid).filter(user=request.user).filter(value='1').count()
args = {'is_liked':is_liked,'like_count':like_count}
if request.is_ajax():
html = render_to_string('like_section.html',args,request=request)
return JsonResponse({'form':html})
def dislike(request):
postid = int(request.POST.get('postid'))
is_liked = False
if LikeDislike.objects.filter(post_id=postid,user=request.user):
if LikeDislike.objects.filter(post_id=postid,user=request.user,value='2'):
obj = LikeDislike.objects.filter(post_id=postid).filter(user=request.user).filter(value='2')
obj.delete()
else:
obj = LikeDislike.objects.filter(post_id=postid).filter(user=request.user).update(value='2')
obj.save()
else:
obj = LikeDislike(user=request.user,post_id=postid,value='2')
obj.save()
is_liked = True
dislike_count = LikeDislike.objects.filter(post_id=postid).filter(user=request.user).filter(value='2').count()
args = {'is_liked':is_liked,'dislike_count':dislike_count}
if request.is_ajax():
html = render_to_string('like_section.html',args,request=request)
return JsonResponse({'form':html})
def delete(request):
postid = int(request.GET.get('postid'))
uid = request.user.id
like = LikeDislike.objects.filter(post_id=postid)
like.delete()
comment = Comments.objects.filter(post_id=postid)
comment.delete()
post = Post.objects.get(post_id=postid)
post.delete()
return redirect('/home/')
| python |
import autoparse
@autoparse.program
def main(host, port=1234, *, verbose=False, lol: [1, 2, 3] = 1):
"""Do something.
Positional arguments:
host The hostname to connect to.
port The port to connect to.
Optional arguments:
--verbose Print more status messages.
--lol One of 1, 2 or 3.
"""
print('host:', repr(host))
print('port:', repr(port))
print('verbose:', repr(verbose))
print('lol:', repr(lol))
if __name__ == '__main__':
main()
| python |
#Programa 4.5 = Conta de telefone com três faixas de preço
minutos = int (input("Quantos minutos você utilizou este mês: "))
if minutos < 200:
preco = 0.20
else:
if minutos < 400:
preco = 0.18
else:
preco = 0.15
print(f"Você vai pagar este mês: RS {minutos * preco:6.2f}")
| python |
"""
Script reads in monthly data reanalysis (ERA-Interim or ERAi) on grid of
1.9 x 2.5 (latitude,longitude). Data was interpolated on the model grid using
a bilinear interpolation scheme.
Notes
-----
Author : Zachary Labe
Date : 19 February 2019
Usage
-----
[1] readDataR(variable,level,detrend,sliceeq)
[2] readDataRMeans(variable)
"""
def readDataR(variable,level,detrend,sliceeq):
"""
Function reads monthly data from ERA-Interim
Parameters
----------
variable : string
variable name to read
level : string
Height of variable (surface or profile)
detrend : binary
True/False whether to remove a linear trend at all grid points
sliceeq : binary
True/False whether to slice at the equator for only northern hemisphere
Returns
-------
lat : 1d numpy array
latitudes
lon : 1d numpy array
longitudes
time : 1d numpy array
standard time (months since 1979-1-1, 00:00:00)
lev : 1d numpy array
levels (17)
var : 4d numpy array or 5d numpy array
[year,month,lat,lon] or [year,month,level,lat,lon]
Usage
-----
lat,lon,time,lev,var = readDataR(variable,level,detrend)
"""
print('\n>>> Using readDataR function! \n')
###########################################################################
###########################################################################
###########################################################################
### Import modules
import numpy as np
from netCDF4 import Dataset
import calc_Detrend as DT
### Declare knowns
months = 12
years = np.arange(1979,2016+1,1)
if variable == 'SNC':
years = np.arange(1979,2015+1,1)
### Directory for experiments (remote server - Seley)
directorydata = '/seley/zlabe/ERAI/'
###########################################################################
###########################################################################
###########################################################################
### Read in lat,lon,time from known file
if level == 'surface': # 3d variables
if variable == 'SNC': # Snow data only through 2015!
dataq = Dataset(directorydata + 'SNC_1979-2016.nc') # 1979-2015
time = dataq.variables['time'][:]
lev = 'surface'
lat = dataq.variables['latitude'][:]
lon = dataq.variables['longitude'][:]
dataq.close()
else:
dataq = Dataset(directorydata + 'T2M_1979-2016.nc')
time = dataq.variables['time'][:]
lev = 'surface'
lat = dataq.variables['latitude'][:]
lon = dataq.variables['longitude'][:]
dataq.close()
###########################################################################
###########################################################################
if sliceeq == False:
### Create empty variable
varq = np.empty((time.shape[0],lat.shape[0],lon.shape[0]))
varq[:,:,:] = np.nan ### fill with nans
elif sliceeq == True:
### Slice for Northern Hemisphere
latq = np.where(lat >= 0)[0]
lat = lat[latq]
### Create empty variable
varq = np.empty((time.shape[0],lat.shape[0],lon.shape[0]))
varq[:,:,:] = np.nan ### fill with nans
print('SLICE for Northern Hemisphere!')
else:
print(ValueError('Selected wrong slicing!'))
###########################################################################
###########################################################################
elif level == 'profile': # 4d variables
dataq = Dataset(directorydata + 'TEMP_1979-2016.nc')
time = dataq.variables['time'][:]
lev = dataq.variables['level'][:]
lat = dataq.variables['latitude'][:]
lon = dataq.variables['longitude'][:]
dataq.close()
###########################################################################
###########################################################################
if sliceeq == False:
### Create empty variable
varq = np.empty((time.shape[0],lev.shape[0],
lat.shape[0],lon.shape[0]))
varq[:,:,:,:] = np.nan ### fill with nans
elif sliceeq == True:
### Slice for Northern Hemisphere
latq = np.where(lat >= 0)[0]
lat = lat[latq]
### Create empty variable
varq = np.empty((time.shape[0],lev.shape[0],
lat.shape[0],lon.shape[0]))
varq[:,:,:,:] = np.nan ### fill with nans
print('SLICE for Northern Hemisphere!')
else:
print(ValueError('Selected wrong slicing!'))
###########################################################################
###########################################################################
else:
print(ValueError('Selected wrong height - (surface or profile!)!'))
###########################################################################
###########################################################################
### Path name for file for each ensemble member
filename = directorydata + variable + '_1979-2016.nc'
###########################################################################
###########################################################################
### Read in Data
if sliceeq == False:
if level == 'surface': # 3d variables
data = Dataset(filename,'r')
varq[:,:,:] = data.variables[variable][:]
print('Completed: Read data %s!' % (variable))
elif level == 'profile': # 4d variables
data = Dataset(filename,'r')
varq[:,:,:,:] = data.variables[variable][:]
data.close()
print('Completed: Read data %s!' % (variable))
else:
print(ValueError('Selected wrong height - (surface or profile!)!'))
###########################################################################
###########################################################################
elif sliceeq == True:
if level == 'surface': # 3d variables
data = Dataset(filename,'r')
varq[:,:,:] = data.variables[variable][:,latq,:]
data.close()
print('Completed: Read data %s!' % (variable))
elif level == 'profile': # 4d variables
data = Dataset(filename,'r')
varq[:,:,:,:] = data.variables[variable][:,:,latq,:]
data.close()
print('Completed: Read data %s!' % (variable))
else:
print(ValueError('Selected wrong height - (surface or profile!)!'))
###########################################################################
###########################################################################
###########################################################################
### Reshape to split years and months
if level == 'surface': # 3d variables
var = np.reshape(varq,(varq.shape[0]//12,months,
lat.shape[0],lon.shape[0]))
elif level == 'profile': # 4d variables
var = np.reshape(varq,(varq.shape[0]//12,months,lev.shape[0],
lat.shape[0],lon.shape[0]))
else:
print(ValueError('Selected wrong height - (surface or profile!)!'))
print('\nCompleted: Reshaped %s array!' % (variable))
### Save computer memory
del varq
###########################################################################
###########################################################################
###########################################################################
### Convert units
if variable in ('TEMP','T2M'):
var = var - 273.15 # Kelvin to degrees Celsius
print('Completed: Changed units (K to C)!')
elif variable == 'SWE':
var = var*1000. # Meters to Millimeters
print('Completed: Changed units (m to mm)!')
elif variable in ('Z1000','Z850','Z700','Z500','Z300','Z200','Z50','Z30','THICK'):
var = var/9.80665 # m^2/s^2 divide by gravity m/s^2 to m
print('Completed: Changed units (m^2/s^2 to m)!')
elif variable == 'SLP':
var = var/100. # Pa to hPa
print('Completed: Changed units (Pa to hPa)!')
###########################################################################
###########################################################################
###########################################################################
### Missing data (fill value to nans)
var[np.where(var <= -8.99999987e+33)] = np.nan
var[np.where(var >= 8.99999987e+33)] = np.nan
print('Completed: Filled missing data to nan!')
### Detrend data if turned on
if detrend == True:
var = DT.detrendDataR(var,level,'monthly')
print('\n>>> Completed: Finished readDataR function!')
return lat,lon,time,lev,var
###############################################################################
def readDataRMeans(variable):
"""
Function reads monthly data from ERA-Interim. Average
is taken over the polar cap (65-90, 0-360) and weighted
by cosine of latitude. Variables are all 4d.
Parameters
----------
variable : string
variable name to read
Returns
-------
lat : 1d numpy array
latitudes
lon : 1d numpy array
longitudes
lev : 1d numpy array
levels (17)
var : 3d numpy array
[year,month,lev]
Usage
-----
lat,lon,time,lev,var = readDataRMeans(variable)
"""
print('\n>>> Using readDataRMeans function! \n')
###########################################################################
###########################################################################
###########################################################################
### Import modules
import numpy as np
from netCDF4 import Dataset
import calc_Detrend as DT
### Declare knowns
months = 12
years = np.arange(1979,2016+1,1)
### Directory for experiments (remote server - Seley)
directorydata = '/seley/zlabe/ERAI/'
###########################################################################
###########################################################################
dataq = Dataset(directorydata + 'TEMP_1979-2016.nc')
time = dataq.variables['time'][:]
lev = dataq.variables['level'][:]
lat = dataq.variables['latitude'][:]
lon = dataq.variables['longitude'][:]
dataq.close()
###########################################################################
###########################################################################
varq = np.empty((time.shape[0],lev.shape[0]))
varq[:,:] = np.nan ### fill with nans
###########################################################################
###########################################################################
### Path name for file for each ensemble member
filename = directorydata + variable + '_mean_1979-2016.nc'
###########################################################################
###########################################################################
### Read in Data
data = Dataset(filename,'r')
varq[:,:] = data.variables[variable][:]
data.close()
###########################################################################
###########################################################################
###########################################################################
### Reshape to split years and months
var = np.reshape(varq,(varq.shape[0]//12,months,lev.shape[0]))
### Save computer memory
del varq
###########################################################################
###########################################################################
###########################################################################
### Convert units
if variable in ('TEMP','T2M'):
var = var - 273.15 # Kelvin to degrees Celsius
print('Completed: Changed units (K to C)!')
elif variable == 'SWE':
var = var*1000. # Meters to Millimeters
print('Completed: Changed units (m to mm)!')
elif variable in ('Z1000','Z850','Z700','Z500','Z300','Z200','Z50','Z30',
'GEOP'):
var = var/9.80665 # m^2/s^2 divide by gravity m/s^2 to m
print('Completed: Changed units (m^2/s^2 to m)!')
elif variable == 'SLP':
var = var/100. # Pa to hPa
print('Completed: Changed units (Pa to hPa)!')
###########################################################################
###########################################################################
###########################################################################
### Missing data (fill value to nans)
var[np.where(var <= -8.99999987e+33)] = np.nan
var[np.where(var >= 8.99999987e+33)] = np.nan
print('Completed: Filled missing data to nan!')
print('\n>>> Completed: Finished readDataRMeans function!')
return lat,lon,lev,var
#### Test function -- no need to use
#variable = 'Z500'
#level = 'surface'
#detrend = True
#sliceeq = False
#
#lat,lon,time,lev,var = readDataR(variable,level,detrend,sliceeq)
#lat,lon,lev,var = readDataRMeans('TEMP')
| python |
from argparse import ArgumentParser
from dataclasses import dataclass
from typing import Optional
from environs import Env
@dataclass
class Config:
SUPERUSER: str
DATABASE_PATH: str
PBKDF2_PWD_HASHER_HASH_FUNC: str
PBKDF2_PWD_HASHER_ITERATIONS: int
PBKDF2_PWD_HASHER_SALT_LENGTH: int
MAX_YEARS_OF_STATISTICS: int
LOGGING_CONFIG: dict
WEB_SECRET_KEY: str
WEB_RUN_ON_HOST: str
WEB_RUN_ON_PORT: int
TGBOT_TOKEN: Optional[str]
TGBOT_UPDATES_LIMIT: int
TGBOT_UPDATES_TIMEOUT: int
def init_config(env_path: Optional[str] = None) -> Config:
env = Env()
env.read_env(env_path)
with env.prefixed("MYFUNDS_"):
return Config(
SUPERUSER=env.str("SUPERUSER"),
DATABASE_PATH=env.str("DATABASE_PATH"),
PBKDF2_PWD_HASHER_HASH_FUNC=env.str("PBKDF2_PWD_HASHER_HASH_FUNC"),
PBKDF2_PWD_HASHER_ITERATIONS=env.int("PBKDF2_PWD_HASHER_ITERATIONS"),
PBKDF2_PWD_HASHER_SALT_LENGTH=env.int("PBKDF2_PWD_HASHER_SALT_LENGTH"),
MAX_YEARS_OF_STATISTICS=env.int("MAX_YEARS_OF_STATISTICS", 5),
LOGGING_CONFIG=env.json("LOGGING_CONFIG", "{}"),
WEB_SECRET_KEY=env.str("WEB_SECRET_KEY"),
WEB_RUN_ON_HOST=env.str("WEB_RUN_ON_HOST", "localhost"),
WEB_RUN_ON_PORT=env.int("WEB_RUN_ON_PORT", 8080),
TGBOT_TOKEN=env.str("TGBOT_TOKEN", None),
TGBOT_UPDATES_LIMIT=env.int("TGBOT_UPDATES_LIMIT", 10),
TGBOT_UPDATES_TIMEOUT=env.int("TGBOT_UPDATES_TIMEOUT", 20),
)
def init_env_parser() -> ArgumentParser:
parser = ArgumentParser()
parser.add_argument(
"--env", type=str, default=None, help="environment configuration file path"
)
return parser
| python |
import argparse
import os
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.cnn import fuse_conv_bn
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint,
wrap_fp16_model)
from mmdet.apis import multi_gpu_test, single_gpu_test
from mmdet.datasets import (build_dataloader, build_dataset,
replace_ImageToTensor)
from mmdet.models import build_detector
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
# build the model and load checkpoint
model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
#model = MMDataParallel(model, device_ids=[0])
example = torch.rand(2, 3, 320, 320)
traced_script_module = torch.jit.trace(model, example)
traced_script_module.save("model_cpp.pt")
if __name__ == '__main__':
main()
| python |
#! coding: utf-8
from django.utils.translation import ugettext_lazy as _, get_language
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation
from django.core.cache import cache
from log.models import AuditLog
from datetime import datetime
from django.db import models
from utils.models import Generic, Country
from error_reporting.models import ErrorReport
from main import choices
DECS = 'DeCS'
GENERAL = 'general'
PENDING = 0
# Auxiliar table Type of source [318]
class SourceType(Generic):
class Meta:
verbose_name = _("source type")
verbose_name_plural = _("source types")
acronym = models.CharField(_("Acronym"), max_length=25, blank=True)
language = models.CharField(_("Language"), max_length=10, choices=choices.LANGUAGES_CHOICES)
name = models.CharField(_("Name"), max_length=255)
def get_translations(self):
translation_list = ["%s^%s" % (self.language, self.name.strip())]
translation = SourceTypeLocal.objects.filter(source_type=self.id)
if translation:
other_languages = ["%s^%s" % (trans.language, trans.name.strip()) for trans in translation]
translation_list.extend(other_languages)
return translation_list
def __str__(self):
lang_code = get_language()
cache_id = "main_sourcetype-{}-{}".format(lang_code, self.id)
sourcetype_local = cache.get(cache_id)
if not sourcetype_local:
translation = SourceTypeLocal.objects.filter(source_type=self.id, language=lang_code)
if translation:
sourcetype_local = translation[0].name
else:
sourcetype_local = self.name
cache.set(cache_id, sourcetype_local, None)
return sourcetype_local
class SourceTypeLocal(models.Model):
class Meta:
verbose_name = _("Translation")
verbose_name_plural = _("Translations")
source_type = models.ForeignKey(SourceType, verbose_name=_("Source type"), on_delete=models.CASCADE)
language = models.CharField(_("language"), max_length=10, choices=choices.LANGUAGES_CHOICES)
name = models.CharField(_("name"), max_length=255)
# Auxiliar table Language of source [317]
class SourceLanguage(Generic):
class Meta:
verbose_name = _("Source language")
verbose_name_plural = _("Source languages")
acronym = models.CharField(_("Acronym"), max_length=25, blank=True)
language = models.CharField(_("Language"), max_length=10, choices=choices.LANGUAGES_CHOICES)
name = models.CharField(_("Name"), max_length=255)
def get_translations(self):
translation_list = ["%s^%s" % (self.language, self.name.strip())]
translation = SourceLanguageLocal.objects.filter(source_language=self.id)
if translation:
other_languages = ["%s^%s" % (trans.language, trans.name.strip()) for trans in translation]
translation_list.extend(other_languages)
return translation_list
def __str__(self):
lang_code = get_language()
cache_id = "main_sourcelanguage-{}-{}".format(lang_code, self.id)
sourcelanguage_local = cache.get(cache_id)
if not sourcelanguage_local:
translation = SourceLanguageLocal.objects.filter(source_language=self.id, language=lang_code)
if translation:
sourcelanguage_local = translation[0].name
else:
sourcelanguage_local = self.name
cache.set(cache_id, sourcelanguage_local, None)
return sourcelanguage_local
class SourceLanguageLocal(models.Model):
class Meta:
verbose_name = _("Translation")
verbose_name_plural = _("Translations")
source_language = models.ForeignKey(SourceLanguage, verbose_name=_("Source language"), on_delete=models.CASCADE)
language = models.CharField(_("Language"), max_length=10, choices=choices.LANGUAGES_CHOICES)
name = models.CharField(_("Name"), max_length=255)
# Auxiliar table LIS type [302]
class ThematicArea(Generic):
class Meta:
verbose_name = _("Thematic area")
verbose_name_plural = _("Thematic areas")
acronym = models.CharField(_("Acronym"), max_length=25, blank=True)
language = models.CharField(_("Language"), max_length=10, choices=choices.LANGUAGES_CHOICES)
name = models.CharField(_("Name"), max_length=255)
def get_translations(self):
translation_list = ["%s^%s" % (self.language, self.name.strip())]
translation = ThematicAreaLocal.objects.filter(thematic_area=self.id)
if translation:
other_languages = ["%s^%s" % (trans.language, trans.name.strip()) for trans in translation]
translation_list.extend(other_languages)
return translation_list
def __str__(self):
lang_code = get_language()
cache_id = "thematicarea-{}-{}".format(lang_code, self.id)
thematicarea_name_local = cache.get(cache_id)
if not thematicarea_name_local:
translation = ThematicAreaLocal.objects.filter(thematic_area=self.id, language=lang_code)
if translation:
thematicarea_name_local = translation[0].name
else:
thematicarea_name_local = self.name
cache.set(cache_id, thematicarea_name_local, None)
return thematicarea_name_local
class ThematicAreaLocal(models.Model):
class Meta:
verbose_name = _("Translation")
verbose_name_plural = _("Translations")
thematic_area = models.ForeignKey(ThematicArea, verbose_name=_("Thematic area"), on_delete=models.CASCADE)
language = models.CharField(_("Language"), max_length=10, choices=choices.LANGUAGES_CHOICES)
name = models.CharField(_("Name"), max_length=255)
# Relation resource -- thematic areas/ Field lis type (302)
class ResourceThematic(Generic, AuditLog):
STATUS_CHOICES = (
(0, _('Pending')),
(1, _('Admitted')),
(2, _('Refused')),
)
class Meta:
verbose_name = _("Thematic area")
verbose_name_plural = _("Thematic areas")
object_id = models.PositiveIntegerField()
content_type = models.ForeignKey(ContentType, related_name='thematics', on_delete=models.PROTECT)
content_object = GenericForeignKey('content_type', 'object_id')
thematic_area = models.ForeignKey(ThematicArea, related_name='+', on_delete=models.PROTECT)
status = models.SmallIntegerField(_('Status'), choices=STATUS_CHOICES, default=PENDING, blank=True)
def __str__(self):
return str(self.thematic_area.name)
# DeCS descriptors table
class Descriptor(Generic, AuditLog):
STATUS_CHOICES = (
(0, _('Pending')),
(1, _('Admitted')),
(2, _('Refused')),
)
object_id = models.PositiveIntegerField()
content_type = models.ForeignKey(ContentType, related_name='descriptors', on_delete=models.PROTECT)
content_object = GenericForeignKey('content_type', 'object_id')
text = models.CharField(_('Descriptor'), max_length=255, blank=True)
code = models.CharField(_('Code'), max_length=50, blank=True)
status = models.SmallIntegerField(_('Status'), choices=STATUS_CHOICES, default=PENDING)
primary = models.BooleanField(_('Primary?'), default=False)
def __str__(self):
return str(self.text)
# Keywords table
class Keyword(Generic, AuditLog):
STATUS_CHOICES = (
(0, _('Pending')),
(1, _('Admitted')),
(2, _('Refused')),
)
object_id = models.PositiveIntegerField()
content_type = models.ForeignKey(ContentType, related_name='keywords', on_delete=models.PROTECT)
content_object = GenericForeignKey('content_type', 'object_id')
text = models.CharField(_('Text'), max_length=255, blank=True)
status = models.SmallIntegerField(_('Status'), choices=STATUS_CHOICES, default=PENDING)
user_recomendation = models.BooleanField(_('User recomendation?'), default=False)
def __str__(self):
return str(self.text)
# Main table
class Resource(Generic, AuditLog):
class Meta:
verbose_name = _("Resource")
verbose_name_plural = _("Resources")
STATUS_CHOICES = (
(0, _('Pending')),
(1, _('Admitted')),
(2, _('Refused')),
(3, _('Deleted')),
)
# status (399)
status = models.SmallIntegerField(_('Status'), choices=STATUS_CHOICES, null=True, default=0)
# title (311)
title = models.CharField(_('Title'), max_length=510, blank=False, help_text=_("Transcribe as it appears on the internet resource. If there is no title, provide a brief, simple but explanatory title"))
# link (351)
link = models.TextField(_('Link'), blank=False)
# originator (313)
originator = models.TextField(_('Originator'), blank=False, help_text=_("Institutional or personnel name of the responsible for the existence of the internet resource. Ex. Brazilian Society for Dental Research"))
# originator_location (314)
originator_location = models.ManyToManyField(Country, verbose_name=_('Originator location'), blank=False)
# author (315)
author = models.TextField(_('Authors'), blank=True, help_text=_("Enter one per line. Only filled if different from the originator of the resource"))
# language of resource (317)
source_language = models.ManyToManyField(SourceLanguage, verbose_name=_("Source language"), blank=False)
# source type (318)
source_type = models.ManyToManyField(SourceType, verbose_name=_("Source type"), blank=False)
# abstract (319)
abstract = models.TextField(_("Abstract"), blank=False, help_text=_("Include information on the content and operation of the internet resource"))
# time period (341)
time_period_textual = models.CharField(_('Temporal range'), max_length=255, blank=True)
# objective (361)
objective = models.TextField(_('Objective'), blank=True)
# responsible cooperative center
cooperative_center_code = models.CharField(_('Cooperative center'), max_length=55, blank=True)
# relations
error_reports = GenericRelation(ErrorReport)
thematics = GenericRelation(ResourceThematic)
descriptors = GenericRelation(Descriptor)
def get_fields(self):
return [(field.verbose_name, field.value_to_string(self)) for field in Resource._meta.fields]
def __str__(self):
return str(self.title)
| python |
from nltk.tree import *
#import hobbs
dp1 = Tree('dp', [Tree('d', ['the']), Tree('np', ['dog'])])
dp2 = Tree('dp', [Tree('d', ['the']), Tree('np', ['cat'])])
vp = Tree('vp', [Tree('v', ['chased']), dp2])
tree = Tree('s', [dp1, vp])
#print(tree)
t=tree.treepositions()
#print(t)
#for i in tree:
# print('\n',i,'\n')
#tr=Tree.fromstring('(S(NP(DT the)(N castle)(PP in(NP (N camelot))))(VP remained(NP (DT the)(N residence(PP of(NP (DT the)(N king)))))(PP until(NP (CD 536)(WRB when(SBAR (-NONE- 0)(S (NP he)(VP moved (NP it)(PP to(NP (N london)))))))))))')
#tr.pretty_print()
trr=Tree.fromstring("(S(NP I)(VP(VP (V shot) (NP (Det an) (N elephant)))(PP (P in) (NP (Det my) (N pajamas)))))")
for pos in trr.treepositions():
if trr[pos] == 'PRP him':
print (pos)
#ṇprint(trr.productions())
trr.pretty_print()
tree7 =Tree.fromstring('(S(NP (DT the) (N castle) (PP in (NP (N camelot))))(VP remained(NP (DT the) (N residence (PP of (NP (DT the) (N king)))))(PP until(NP (CD 536) (WRB when (S (NP he) (VP moved (NP it) (PP to (NP (N london))))))))))')
print(tree7.productions())
tree7.pretty_print()
#for pos in tree7.treepositions():
#if tree7[pos] == 'he':
#print (pos)
#(S(NP (DT the)(JJ little)(JJ yellow)(NN dog))(VBD barked)(IN at)(NP (DT the)(NN cat)))
| python |
"""
Depth first traversal includes 3 traversing methods:
1. Inorder
2. Preorder
3. Postorder
"""
from typing import Optional
from binary_tree_node import Node # type: ignore
def inorder(root: Optional[Node]) -> None:
"""
In inorder traversal we recursively traverse in following manner:
1. We traverse the left subtree
2. We visit the current node
3. We traverse the right subtree
"""
if not root:
return None
inorder(root.left)
print(root.data, end=" ")
inorder(root.right)
def preorder(root: Optional[Node]) -> None:
"""
In preorder traversal we recursively traverse in the following manner:
1. Visit the current node
2. Traverse the left subtree
3. Traverse the right subtree
"""
if not root:
return None
print(root.data, end=" ")
preorder(root.left)
preorder(root.right)
def postorder(root: Optional[Node]) -> None:
"""
In postorder traversal we recursively traverse in the following manner:
1. Traverse the left subtree
2. Traverse the right subtree
3. Visit the current node
"""
if not root:
return None
postorder(root.left)
postorder(root.right)
print(root.data, end=" ")
if __name__ == "__main__":
"""
1
2 3
4 5
"""
root = Node(1)
root.left = Node(2)
root.right = Node(3)
root.left.left = Node(4)
root.left.right = Node(5)
print("inorder traversal:")
inorder(root)
print("\npreorder traversal:")
preorder(root)
print("\npostorder traversal:")
postorder(root)
print()
| python |
import unittest
from .solution import FreqStack
from ..utils import proxyCall
class TestCase(unittest.TestCase):
def setUp(self):
self.stack = FreqStack()
def test_example_one(self):
allCmds = ["push","push","push","push","push","push","pop","pop","pop","pop"]
allArgs = [[5],[7],[5],[7],[4],[5],[],[],[],[]]
output = [proxyCall(self.stack, cmd, args)
for cmd, args in zip(allCmds, allArgs)]
self.assertListEqual(output, [None,None,None,None,None,None,5,7,5,4])
| python |
num = 1
num = 2
num=- 3
num=4
num = 5
| python |
"""
Purpose: Stackoverflow answer
Date created: 2021-01-09
URL: https://stackoverflow.com/questions/65643483/bokeh-plot-is-empty/65643667#65643667
Contributor(s):
Mark M.
"""
import re
import pandas as pd
import bokeh
sample = """
2018-10-22 7468.629883 2.282400e+09 0.263123 NASDAQ
2018-10-23 7437.540039 2.735820e+09 -0.416272 NASDAQ
2018-10-24 7108.399902 2.935550e+09 -4.425390 NASDAQ
2018-10-25 7318.339844 2.741810e+09 2.953406 NASDAQ
2018-10-26 7167.209961 2.964780e+09 -2.065084 NASDAQ
""".strip()
lines = [re.split(r"\s+", line) for line in sample.split("\n")]
df = pd.DataFrame(data=lines)
df.columns = ["Date","Adj Close","Volume","Day_Perc_Change","Name"]
df.loc[: , "Date"] = pd.to_datetime(df.loc[: , "Date"], infer_datetime_format = True)
df.loc[: , "Adj Close"] = df.loc[: , "Adj Close"].astype(float) | python |
import os
import traceback
from copy import deepcopy
from time import sleep
import django_rq
import kubernetes.stream as stream
import websocket
from django.utils import timezone
from kubernetes import client, config
from rq import get_current_job
from api.models import KubePod, ModelRun
from master.settings import MPI_COMMAND
MAX_POD_RETRIES = 20
service_template = client.V1Service(
api_version="v1",
kind="Service",
metadata=client.V1ObjectMeta(
name="",
labels={
"app": "mlbench",
"chart": "mlbench-2.0.0",
"component": "worker",
"release": os.environ.get("MLBENCH_KUBE_RELEASENAME"),
"heritage": "Helm",
"set": "",
},
),
spec=client.V1ServiceSpec(
selector={
"app": "mlbench",
"release": os.environ.get("MLBENCH_KUBE_RELEASENAME"),
"set": "",
},
cluster_ip="None",
ports=[client.V1ServicePort(name="dummy", port=22)],
),
)
statefulset_template = client.V1StatefulSet(
api_version="apps/v1",
kind="StatefulSet",
metadata=client.V1ObjectMeta(
name="",
labels={
"app": "mlbench",
"chart": "mlbench-2.0.0",
"component": "worker",
"release": os.environ.get("MLBENCH_KUBE_RELEASENAME"),
"heritage": "Helm",
"set": "",
},
),
spec=client.V1StatefulSetSpec(
replicas=0,
selector=client.V1LabelSelector(
match_labels={
"app": "mlbench",
"release": os.environ.get("MLBENCH_KUBE_RELEASENAME"),
"set": "",
}
),
service_name="",
pod_management_policy="Parallel",
update_strategy=client.V1StatefulSetUpdateStrategy(type="RollingUpdate"),
template=client.V1PodTemplateSpec(
metadata=client.V1ObjectMeta(
labels={
"app": "mlbench",
"chart": "mlbench-2.0.0",
"component": "worker",
"release": os.environ.get("MLBENCH_KUBE_RELEASENAME"),
"heritage": "Helm",
"set": "",
}
),
spec=client.V1PodSpec(
service_account_name="mlbench-worker-sa",
affinity=client.V1Affinity(
pod_anti_affinity=client.V1PodAntiAffinity(
required_during_scheduling_ignored_during_execution=[
client.V1PodAffinityTerm(
label_selector=client.V1LabelSelector(
match_expressions=[
client.V1LabelSelectorRequirement(
key="component",
operator="In",
values=["worker"],
)
]
),
topology_key="kubernetes.io/hostname",
)
]
)
),
containers=[
client.V1Container(
name="",
image="",
image_pull_policy="Always",
stdin=True,
tty=True,
ports=[
client.V1ContainerPort(
name="ssh",
container_port=22,
host_port=16166,
protocol="TCP",
)
],
resources=client.V1ResourceRequirements(
limits={"cpu": "1", "nvidia.com/gpu": "0"}
),
volume_mounts=[
client.V1VolumeMount(
name="mlbench-ssh-key", mount_path="/ssh-key/root"
)
],
security_context=client.V1SecurityContext(privileged=True),
)
],
volumes=[
client.V1Volume(
name="mlbench-ssh-key",
secret=client.V1SecretVolumeSource(
secret_name="{}-ssh-key".format(
os.environ.get("MLBENCH_KUBE_RELEASENAME")
),
default_mode=256,
),
)
],
),
),
),
)
def create_statefulset(model_run, release_name, namespace, job=None):
"""Creates a stateful set from the given run.
The stateful set will have the name [release-name]-mlbench-worker-[model_run.name]
Args:
model_run (:obj:`ModelRun`): The model run with appropriate values
release_name (str): Release name
namespace (str): Kubernetes namespace
job: Job to write output to
Returns:
(str): Name of stateful set
"""
core = client.CoreV1Api()
kube_api = client.AppsV1Api()
statefulset_name = "{1}-mlbench-worker-{0}".format(
release_name, model_run.name
).lower()
# create service
service = deepcopy(service_template)
service.metadata.name = statefulset_name
service.metadata.labels["set"] = model_run.name
service.spec.selector["set"] = model_run.name
response = core.create_namespaced_service(namespace, service)
# create stateful set
statefulset = deepcopy(statefulset_template)
statefulset.metadata.name = statefulset_name
statefulset.metadata.labels["set"] = model_run.name
statefulset.spec.selector.match_labels["set"] = model_run.name
statefulset.spec.service_name = statefulset_name
statefulset.spec.replicas = int(model_run.num_workers)
container = statefulset.spec.template.spec.containers[0]
container.resources.limits["cpu"] = model_run.cpu_limit
if model_run.gpu_enabled:
container.resources.limits["nvidia.com/gpu"] = "1"
container.image = model_run.image
container.name = "{}-worker".format(model_run.name).lower()
statefulset.spec.template.spec.service_account_name = "{}-mlbench-worker-sa".format(
os.environ.get("MLBENCH_KUBE_RELEASENAME")
)
statefulset.spec.template.metadata.labels["set"] = model_run.name
response = kube_api.create_namespaced_stateful_set(namespace, statefulset)
if job is not None:
job.meta["stdout"].append("Waiting for pods to become available\n")
job.save()
# wait for StatefulSet to be created
while True:
response = kube_api.read_namespaced_stateful_set_status(
statefulset_name, namespace
)
s = response.status
if job is not None:
job.meta["stdout"].append(
"Waiting for workers: Current: {}/{}, Replicas: {}/{}, "
"Ready: {}, "
"Observed Gen: {}/{}".format(
s.current_replicas,
response.spec.replicas,
s.replicas,
response.spec.replicas,
s.ready_replicas,
s.observed_generation,
response.metadata.generation,
)
)
job.save()
if (
s.current_replicas == response.spec.replicas
and s.replicas == response.spec.replicas
and s.ready_replicas == response.spec.replicas
and s.observed_generation == response.metadata.generation
):
break
sleep(1)
return statefulset_name
def delete_statefulset(
statefulset_name, namespace, grace_period_seconds=5, in_cluster=True
):
"""Delete a stateful set in a given namespace
Args:
statefulset_name (str): Stateful set to delete
namespace (str): Namespace on which stateful set was deployed
grace_period_seconds (int): Grace period for deletion
in_cluster (bool): Running inside cluster or not. Default `True`
"""
if in_cluster:
config.load_incluster_config()
kube_api = client.AppsV1Api()
kube_api.delete_namespaced_stateful_set(
statefulset_name,
namespace,
pretty=True,
grace_period_seconds=grace_period_seconds,
propagation_policy="Foreground",
)
def delete_service(statefulset_name, namespace, in_cluster=True):
"""Deletes a service in a given namespace and stateful set
Args:
statefulset_name (str): Name of stateful set for service
namespace (str): Namespace on which it was deployed
in_cluster (bool): Running inside cluster or not. Default `True`
"""
if in_cluster:
config.load_incluster_config()
kube_api = client.CoreV1Api()
kube_api.delete_namespaced_service(
statefulset_name,
namespace,
body=client.V1DeleteOptions(
propagation_policy="Foreground",
),
)
def check_nodes_available_for_execution(model_run, job=None):
if job is not None:
job.meta["stdout"].append("Waiting for nodes to be available\n")
job.save()
max_workers = int(os.environ.get("MLBENCH_MAX_WORKERS"))
active_runs = ModelRun.objects.filter(state=ModelRun.STARTED)
utilized_workers = sum(r.num_workers for r in active_runs)
if utilized_workers == max_workers:
return False
available_workers = max_workers - utilized_workers
pending_runs = ModelRun.objects.filter(state=ModelRun.INITIALIZED).order_by(
"num_workers"
)
for r in pending_runs:
if r.num_workers > available_workers:
return False
if r.id == model_run.id:
return True
available_workers -= r.num_workers
return False # this should never be reached!
@django_rq.job("default", result_ttl=-1, timeout=-1, ttl=None)
def run_model_job(model_run):
"""RQ Job to execute OpenMPI
Arguments:
model_run {models.ModelRun} -- the database entry this job is
associated with
"""
release_name = os.environ.get("MLBENCH_KUBE_RELEASENAME")
ns = os.environ.get("MLBENCH_NAMESPACE")
job = get_current_job()
job.meta["stdout"] = []
job.meta["stderr"] = []
job.meta["stdout"].append("Initializing run")
job.meta["workhorse_pid"] = os.getpid()
job.save()
model_run.job_id = job.id
model_run.save()
set_name = ""
try:
while not check_nodes_available_for_execution(model_run, job):
sleep(30)
model_run.state = ModelRun.STARTED
model_run.save()
config.load_incluster_config()
v1 = client.CoreV1Api()
set_name = create_statefulset(model_run, release_name, ns, job)
job.meta["stdout"].append("Created stateful set, starting run.")
job.save()
# start run
ret = v1.list_namespaced_pod(
ns,
label_selector="component=worker,app=mlbench,release={0},set={1}".format(
release_name, model_run.name
),
)
retries = 0
while retries < MAX_POD_RETRIES:
if len(ret.items) == 0:
sleep(10)
ret = v1.list_namespaced_pod(
ns,
label_selector="component=worker,app=mlbench,release={0},set={1}".format(
release_name, model_run.name
),
)
continue
pods = []
db_pods = []
hosts = []
for i in ret.items:
pods.append(
(
i.status.pod_ip,
i.metadata.namespace,
i.metadata.name,
str(i.metadata.labels),
)
)
try:
db_pod = KubePod.objects.get(name=i.metadata.name)
db_pods.append(db_pod)
hosts.append("{}.{}".format(i.metadata.name, set_name))
except KubePod.DoesNotExist:
sleep(10)
retries += 1
break # wait for pods to be in DB
if len(hosts) > 0:
break
if retries == MAX_POD_RETRIES:
raise Exception("Couldn't find pods in db")
model_run.pods.set(db_pods)
model_run.save()
job.meta["pods"] = pods
job.meta["stdout"].append(str(hosts))
job.save()
# Write hostfile
max_gpu_per_worker = int(os.environ.get("MLBENCH_MAX_GPU_PER_WORKER", 0))
slots = max_gpu_per_worker or 1
hosts_with_slots = []
for host in hosts:
for _ in range(slots):
hosts_with_slots.append(host)
# Use `question 22 <https://www.open-mpi.org/faq/?category=running#mpirun-hostfile`_ to add slots # noqa: E501
exec_command = model_run.command.format(
hosts=",".join(hosts_with_slots),
run_id=model_run.id,
rank=0,
backend=model_run.backend,
)
# Add mpirun to run on mpi
cmd_prepend = ""
cmd_append = ""
if model_run.backend == "mpi":
cmd_prepend = MPI_COMMAND.format(hosts=",".join(hosts_with_slots))
if model_run.gpu_enabled:
cmd_append += " --gpu"
if model_run.light_target:
cmd_append += " --light"
if model_run.use_horovod:
cmd_append += "--horovod"
job.meta["command"] = cmd_prepend + exec_command + cmd_append
job.meta["master_name"] = ret.items[0].metadata.name
job.save()
streams = []
for i, n in enumerate(ret.items):
name = n.metadata.name
cmd = (
cmd_prepend
+ model_run.command.format(
hosts=",".join(hosts_with_slots),
run_id=model_run.id,
rank=i,
backend=model_run.backend,
)
+ cmd_append
).split(" ")
resp = stream.stream(
v1.connect_get_namespaced_pod_exec,
name,
ns,
command=cmd,
stderr=True,
stdin=False,
stdout=True,
tty=False,
_preload_content=False,
_request_timeout=None,
)
streams.append(resp)
if not model_run.run_on_all_nodes:
break
job.meta["stdout"].append("Started run.")
job.save()
# keep writing openmpi output to job metadata
cont = True
while any(s.is_open() for s in streams) and cont:
for s in streams:
try:
if not s.is_open():
# cont = False
continue
s.update(timeout=5)
if s.peek_stdout(timeout=5):
out = s.read_stdout()
if "Goal Reached!" in out:
cont = False
job.meta["stdout"] += out.splitlines()
if s.peek_stderr(timeout=5):
err = s.read_stderr()
job.meta["stderr"] += err.splitlines()
job.save()
except websocket.WebSocketConnectionClosedException:
# cont = False
job.meta["stderr"] += [
"Websocket exception",
traceback.format_exc(),
]
continue
except BrokenPipeError:
# Client closed connection prematurely
cont = False
job.meta["stderr"] += [
"Container closed connection " "prematurely",
"This could be "
"caused by an exception or by"
"training being finished",
]
continue
for s in streams:
s.close()
model_run.state = ModelRun.FINISHED
model_run.finished_at = timezone.now()
model_run.save()
except (Exception, BaseException):
model_run.state = ModelRun.FAILED
job.meta["stderr"].append("Run failed")
job.meta["stderr"].append(traceback.format_exc())
job.save()
model_run.save()
finally:
if set_name:
delete_statefulset(set_name, ns)
delete_service(set_name, ns)
| python |
# email_outbound/models.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from django.core.mail import EmailMultiAlternatives
from django.apps import apps
from django.db import models
from wevote_functions.functions import extract_email_addresses_from_string, generate_random_string, \
positive_value_exists
from wevote_settings.models import fetch_next_we_vote_id_email_integer, fetch_site_unique_id_prefix
FRIEND_ACCEPTED_INVITATION_TEMPLATE = 'FRIEND_ACCEPTED_INVITATION_TEMPLATE'
FRIEND_INVITATION_TEMPLATE = 'FRIEND_INVITATION_TEMPLATE'
GENERIC_EMAIL_TEMPLATE = 'GENERIC_EMAIL_TEMPLATE'
LINK_TO_SIGN_IN_TEMPLATE = 'LINK_TO_SIGN_IN_TEMPLATE'
VERIFY_EMAIL_ADDRESS_TEMPLATE = 'VERIFY_EMAIL_ADDRESS_TEMPLATE'
SEND_BALLOT_TO_SELF = 'SEND_BALLOT_TO_SELF'
SEND_BALLOT_TO_FRIENDS = 'SEND_BALLOT_TO_FRIENDS'
SIGN_IN_CODE_EMAIL_TEMPLATE = 'SIGN_IN_CODE_EMAIL_TEMPLATE'
KIND_OF_EMAIL_TEMPLATE_CHOICES = (
(GENERIC_EMAIL_TEMPLATE, 'Generic Email'),
(FRIEND_ACCEPTED_INVITATION_TEMPLATE, 'Accept an invitation to be a Friend'),
(FRIEND_INVITATION_TEMPLATE, 'Invite Friend'),
(LINK_TO_SIGN_IN_TEMPLATE, 'Link to sign in.'),
(VERIFY_EMAIL_ADDRESS_TEMPLATE, 'Verify Senders Email Address'),
(SEND_BALLOT_TO_SELF, 'Send ballot to self'),
(SEND_BALLOT_TO_FRIENDS, 'Send ballot to friends'),
(SIGN_IN_CODE_EMAIL_TEMPLATE, 'Send code to verify sign in.'),
)
TO_BE_PROCESSED = 'TO_BE_PROCESSED'
BEING_ASSEMBLED = 'BEING_ASSEMBLED'
SCHEDULED = 'SCHEDULED'
ASSEMBLY_STATUS_CHOICES = (
(TO_BE_PROCESSED, 'Email to be assembled'),
(BEING_ASSEMBLED, 'Email being assembled with template'),
(SCHEDULED, 'Sent to the scheduler'),
)
WAITING_FOR_VERIFICATION = 'WAITING_FOR_VERIFICATION'
BEING_SENT = 'BEING_SENT'
SENT = 'SENT'
SEND_STATUS_CHOICES = (
(TO_BE_PROCESSED, 'Message to be processed'),
(BEING_SENT, 'Message being sent'),
(SENT, 'Message sent'),
)
class EmailAddress(models.Model):
"""
We give every email address its own unique we_vote_id for things like invitations
"""
# The we_vote_id identifier is unique across all We Vote sites, and allows us to share our data with other
# organizations
# It starts with "wv" then we add on a database specific identifier like "3v" (WeVoteSetting.site_unique_id_prefix)
# then the string "email", and then a sequential integer like "123".
# We keep the last value in WeVoteSetting.we_vote_id_last_email_integer
we_vote_id = models.CharField(
verbose_name="we vote id of this email address", max_length=255, default=None, null=True,
blank=True, unique=True)
voter_we_vote_id = models.CharField(
verbose_name="we vote id for the email owner", max_length=255, null=True, blank=True, unique=False)
# Until an EmailAddress has had its ownership verified, multiple voter accounts can try to use it
normalized_email_address = models.EmailField(
verbose_name='email address', max_length=255, null=False, blank=False, unique=False)
# Has this email been verified by the owner?
email_ownership_is_verified = models.BooleanField(default=False)
# Has this email had a permanent bounce? If so, we should not send emails to it.
email_permanent_bounce = models.BooleanField(default=False)
secret_key = models.CharField(
verbose_name="secret key to verify ownership of email", max_length=255, null=True, blank=True, unique=True)
deleted = models.BooleanField(default=False) # If email address is removed from person's account, mark as deleted
# We override the save function so we can auto-generate we_vote_id
def save(self, *args, **kwargs):
# Even if this data came from another source we still need a unique we_vote_id
if self.we_vote_id:
self.we_vote_id = self.we_vote_id.strip().lower()
if self.we_vote_id == "" or self.we_vote_id is None: # If there isn't a value...
# ...generate a new id
site_unique_id_prefix = fetch_site_unique_id_prefix()
next_local_integer = fetch_next_we_vote_id_email_integer()
# "wv" = We Vote
# site_unique_id_prefix = a generated (or assigned) unique id for one server running We Vote
# "email" = tells us this is a unique id for a EmailAddress
# next_integer = a unique, sequential integer for this server - not necessarily tied to database id
self.we_vote_id = "wv{site_unique_id_prefix}email{next_integer}".format(
site_unique_id_prefix=site_unique_id_prefix,
next_integer=next_local_integer,
)
super(EmailAddress, self).save(*args, **kwargs)
class EmailOutboundDescription(models.Model):
"""
Specifications for a single email we want to send. This data is used to assemble an EmailScheduled
"""
kind_of_email_template = models.CharField(max_length=50, choices=KIND_OF_EMAIL_TEMPLATE_CHOICES,
default=GENERIC_EMAIL_TEMPLATE)
sender_voter_name = models.CharField(
verbose_name='sender full name', max_length=255, null=True, blank=True, unique=False)
sender_voter_we_vote_id = models.CharField(
verbose_name="we vote id for the sender", max_length=255, null=True, blank=True, unique=False)
sender_voter_email = models.EmailField(
verbose_name='email address for sender', max_length=255, null=True, blank=True, unique=False)
recipient_voter_we_vote_id = models.CharField(
verbose_name="we vote id for the recipient if we have it", max_length=255, null=True, blank=True, unique=False)
recipient_email_we_vote_id = models.CharField(
verbose_name="email we vote id for recipient", max_length=255, null=True, blank=True, unique=False)
# We include this here for data monitoring and debugging
recipient_voter_email = models.EmailField(
verbose_name='email address for recipient', max_length=255, null=True, blank=True, unique=False)
template_variables_in_json = models.TextField(null=True, blank=True)
date_last_changed = models.DateTimeField(verbose_name='date last changed', null=True, auto_now=True)
class EmailScheduled(models.Model):
"""
Used to tell the email server literally what to send. If an email bounces temporarily, we will
want to trigger the EmailOutboundDescription to generate an new EmailScheduled entry.
"""
subject = models.CharField(verbose_name="email subject", max_length=255, null=True, blank=True, unique=False)
message_text = models.TextField(null=True, blank=True)
message_html = models.TextField(null=True, blank=True)
sender_voter_name = models.CharField(
verbose_name='sender full name', max_length=255, null=True, blank=True, unique=False)
sender_voter_we_vote_id = models.CharField(
verbose_name="we vote id for the sender", max_length=255, null=True, blank=True, unique=False)
sender_voter_email = models.EmailField(
verbose_name='sender email address', max_length=255, null=True, blank=True, unique=False)
recipient_voter_we_vote_id = models.CharField(
verbose_name="we vote id for the recipient", max_length=255, null=True, blank=True, unique=False)
recipient_email_we_vote_id = models.CharField(
verbose_name="we vote id for the email", max_length=255, null=True, blank=True, unique=False)
recipient_voter_email = models.EmailField(
verbose_name='recipient email address', max_length=255, null=True, blank=True, unique=False)
send_status = models.CharField(max_length=50, choices=SEND_STATUS_CHOICES, default=TO_BE_PROCESSED)
email_outbound_description_id = models.PositiveIntegerField(
verbose_name="the internal id of EmailOutboundDescription", default=0, null=False)
date_last_changed = models.DateTimeField(verbose_name='date last changed', null=True, auto_now=True)
class EmailManager(models.Model):
def __unicode__(self):
return "EmailManager"
def clear_secret_key_from_email_address(self, email_secret_key):
"""
:param email_secret_key:
:return:
"""
email_address_found = False
email_address = None
status = ''
try:
if positive_value_exists(email_secret_key):
email_address = EmailAddress.objects.get(
secret_key=email_secret_key,
)
email_address_found = True
success = True
else:
email_address_found = False
success = False
status += "SECRET_KEY_MISSING "
except EmailAddress.DoesNotExist:
success = True
status += "EMAIL_ADDRESS_NOT_FOUND "
except Exception as e:
success = False
status += 'EMAIL_ADDRESS_DB_RETRIEVE_ERROR ' + str(e) + ' '
if email_address_found:
try:
email_address.secret_key = None
email_address.save()
except Exception as e:
success = False
status += 'EMAIL_ADDRESS_DB_SAVE_ERROR ' + str(e) + ' '
results = {
'success': success,
'status': status,
}
return results
def create_email_address_for_voter(self, normalized_email_address, voter, email_ownership_is_verified=False):
return self.create_email_address(normalized_email_address, voter.we_vote_id, email_ownership_is_verified)
def create_email_address(self, normalized_email_address, voter_we_vote_id='', email_ownership_is_verified=False,
make_primary_email=True):
secret_key = generate_random_string(12)
status = ""
normalized_email_address = str(normalized_email_address)
normalized_email_address = normalized_email_address.strip()
normalized_email_address = normalized_email_address.lower()
if not positive_value_exists(normalized_email_address):
email_address_object = EmailAddress()
results = {
'status': "EMAIL_ADDRESS_FOR_VOTER_MISSING_RAW_EMAIL ",
'success': False,
'email_address_object_saved': False,
'email_address_object': email_address_object,
}
return results
try:
email_address_object = EmailAddress.objects.create(
normalized_email_address=normalized_email_address,
voter_we_vote_id=voter_we_vote_id,
email_ownership_is_verified=email_ownership_is_verified,
secret_key=secret_key,
)
email_address_object_saved = True
success = True
status += "EMAIL_ADDRESS_FOR_VOTER_CREATED "
except Exception as e:
email_address_object_saved = False
email_address_object = EmailAddress()
success = False
status += "EMAIL_ADDRESS_FOR_VOTER_NOT_CREATED " + str(e) + ' '
results = {
'success': success,
'status': status,
'email_address_object_saved': email_address_object_saved,
'email_address_object': email_address_object,
}
return results
def create_email_outbound_description(
self, sender_voter_we_vote_id, sender_voter_email, sender_voter_name='',
recipient_voter_we_vote_id='',
recipient_email_we_vote_id='', recipient_voter_email='', template_variables_in_json='',
kind_of_email_template=''):
status = ""
if not positive_value_exists(kind_of_email_template):
kind_of_email_template = GENERIC_EMAIL_TEMPLATE
try:
email_outbound_description = EmailOutboundDescription.objects.create(
sender_voter_we_vote_id=sender_voter_we_vote_id,
sender_voter_email=sender_voter_email,
sender_voter_name=sender_voter_name,
recipient_voter_we_vote_id=recipient_voter_we_vote_id,
recipient_email_we_vote_id=recipient_email_we_vote_id,
recipient_voter_email=recipient_voter_email,
kind_of_email_template=kind_of_email_template,
template_variables_in_json=template_variables_in_json,
)
email_outbound_description_saved = True
success = True
status += "EMAIL_OUTBOUND_DESCRIPTION_CREATED "
except Exception as e:
email_outbound_description_saved = False
email_outbound_description = EmailOutboundDescription()
success = False
status += "EMAIL_OUTBOUND_DESCRIPTION_NOT_CREATED " + str(e) + " "
results = {
'success': success,
'status': status,
'email_outbound_description_saved': email_outbound_description_saved,
'email_outbound_description': email_outbound_description,
}
return results
def find_and_merge_all_duplicate_emails(self, voter_we_vote_id):
success = True
status = ''
already_merged_email_we_vote_ids = []
list_results = self.retrieve_voter_email_address_list(voter_we_vote_id)
if list_results['email_address_list_found']:
initial_email_address_list = list_results['email_address_list']
for email_address_object in initial_email_address_list:
for comparison_email_address_object in initial_email_address_list:
if comparison_email_address_object.we_vote_id in already_merged_email_we_vote_ids:
# If this email has already been merged, skip forward
continue
if email_address_object.normalized_email_address != \
comparison_email_address_object.normalized_email_address:
# If we are looking at different email addresses, skip forward
continue
if email_address_object.we_vote_id == comparison_email_address_object.we_vote_id:
# If we are looking at the same email entry, skip forward
continue
# Merge verified email addresses where both are verified
if email_address_object.email_ownership_is_verified \
and comparison_email_address_object.email_ownership_is_verified:
friend_results = update_friend_invitation_email_link_with_new_email(
comparison_email_address_object.we_vote_id, email_address_object.we_vote_id)
if not friend_results['success']:
status += friend_results['status']
merge_results = self.merge_two_duplicate_emails(
email_address_object, comparison_email_address_object)
status += merge_results['status']
already_merged_email_we_vote_ids.append(email_address_object.we_vote_id)
already_merged_email_we_vote_ids.append(comparison_email_address_object.we_vote_id)
# Merge verified email addresses where both are not verified
elif not email_address_object.email_ownership_is_verified \
and not comparison_email_address_object.email_ownership_is_verified:
friend_results = update_friend_invitation_email_link_with_new_email(
comparison_email_address_object.we_vote_id, email_address_object.we_vote_id)
if not friend_results['success']:
status += friend_results['status']
merge_results = self.merge_two_duplicate_emails(
email_address_object, comparison_email_address_object)
status += merge_results['status']
already_merged_email_we_vote_ids.append(email_address_object.we_vote_id)
already_merged_email_we_vote_ids.append(comparison_email_address_object.we_vote_id)
# Now look for the same emails where one is verified and the other isn't
list_results2 = self.retrieve_voter_email_address_list(voter_we_vote_id)
if list_results2['email_address_list_found']:
initial_email_address_list = list_results2['email_address_list']
for email_address_object in initial_email_address_list:
for comparison_email_address_object in initial_email_address_list:
if comparison_email_address_object.we_vote_id in already_merged_email_we_vote_ids:
# If this email has already been merged, skip forward
continue
if email_address_object.normalized_email_address != \
comparison_email_address_object.normalized_email_address:
# If we are looking at different email addresses, skip forward
continue
if email_address_object.we_vote_id == comparison_email_address_object.we_vote_id:
# If we are looking at the same email entry, skip forward
continue
# If here, the normalized_email_addresses match
if email_address_object.email_ownership_is_verified:
# Delete the comparison_email_address
try:
friend_results = update_friend_invitation_email_link_with_new_email(
comparison_email_address_object.we_vote_id, email_address_object.we_vote_id)
if not friend_results['success']:
status += friend_results['status']
already_merged_email_we_vote_ids.append(email_address_object.we_vote_id)
already_merged_email_we_vote_ids.append(comparison_email_address_object.we_vote_id)
comparison_email_address_object.delete()
except Exception as e:
status += "COULD_NOT_DELETE_UNVERIFIED_EMAIL " + str(e) + " "
results = {
'success': success,
'status': status,
}
return results
def merge_two_duplicate_emails(self, email_address_object1, email_address_object2):
"""
We assume that the checking to see if these are duplicates has been done outside of this function.
We will keep email_address_object1 and eliminate email_address_object2.
:param email_address_object1:
:param email_address_object2:
:return:
"""
success = True
status = ''
try:
test_we_vote_id = email_address_object1.we_vote_id
test_we_vote_id = email_address_object2.we_vote_id
except Exception as e:
status += 'PROBLEM_WITH_EMAIL1_OR_EMAIL2 ' + str(e) + ' '
success = False
results = {
'success': success,
'status': status,
}
return results
if email_address_object1.voter_we_vote_id != email_address_object2.voter_we_vote_id:
status += 'ONLY_MERGE_EMAILS_FROM_SAME_VOTER '
success = False
results = {
'success': success,
'status': status,
}
return results
if email_address_object1.normalized_email_address != email_address_object2.normalized_email_address:
status += 'ONLY_MERGE_EMAILS_WITH_SAME_NORMALIZED_EMAIL_ADDRESS '
success = False
results = {
'success': success,
'status': status,
}
return results
at_least_one_is_verified = email_address_object1.email_ownership_is_verified \
or email_address_object2.email_ownership_is_verified
both_are_bouncing = email_address_object1.email_permanent_bounce \
and email_address_object2.email_permanent_bounce
try:
email_address_object1.email_ownership_is_verified = at_least_one_is_verified
email_address_object1.email_permanent_bounce = both_are_bouncing
email_address_object1.save()
except Exception as e:
status += "COULD_NOT_SAVE_EMAIL1 " + str(e) + " "
# We don't need to handle repairing the primary email link here
# because it is done in heal_primary_email_data_for_voter
# Are there any scheduled emails for email_address_object2 waiting to send?
try:
email_address_object2.delete()
except Exception as e:
status += "COULD_NOT_DELETE_EMAIL2 " + str(e) + " "
success = False
results = {
'success': success,
'status': status,
}
return results
def parse_raw_emails_into_list(self, email_addresses_raw):
success = True
status = "EMAIL_MANAGER_PARSE_RAW_EMAILS"
email_list = extract_email_addresses_from_string(email_addresses_raw)
results = {
'success': success,
'status': status,
'at_least_one_email_found': True,
'email_list': email_list,
}
return results
def retrieve_email_address_object(self, normalized_email_address, email_address_object_we_vote_id='',
voter_we_vote_id=''):
"""
There are cases where we store multiple entries for the same normalized_email_address (prior to an email
address being verified)
:param normalized_email_address:
:param email_address_object_we_vote_id:
:param voter_we_vote_id:
:return:
"""
exception_does_not_exist = False
exception_multiple_object_returned = False
email_address_object_found = False
email_address_object = EmailAddress()
email_address_object_id = 0
email_address_list_found = False
email_address_list = []
status = ""
try:
if positive_value_exists(email_address_object_we_vote_id):
if positive_value_exists(voter_we_vote_id):
email_address_object = EmailAddress.objects.get(
we_vote_id__iexact=email_address_object_we_vote_id,
voter_we_vote_id__iexact=voter_we_vote_id,
deleted=False
)
else:
email_address_object = EmailAddress.objects.get(
we_vote_id__iexact=email_address_object_we_vote_id,
deleted=False
)
email_address_object_id = email_address_object.id
email_address_object_we_vote_id = email_address_object.we_vote_id
email_address_object_found = True
success = True
status += "RETRIEVE_EMAIL_ADDRESS_FOUND_BY_WE_VOTE_ID "
elif positive_value_exists(normalized_email_address):
email_address_queryset = EmailAddress.objects.all()
if positive_value_exists(voter_we_vote_id):
email_address_queryset = email_address_queryset.filter(
normalized_email_address__iexact=normalized_email_address,
voter_we_vote_id__iexact=voter_we_vote_id,
deleted=False
)
else:
email_address_queryset = email_address_queryset.filter(
normalized_email_address__iexact=normalized_email_address,
deleted=False
)
# We need the email that has been verified email at top of list
email_address_queryset = email_address_queryset.order_by('-email_ownership_is_verified')
email_address_list = email_address_queryset
if len(email_address_list):
if len(email_address_list) == 1:
# If only one email is found, return the results as a single email
email_address_object = email_address_list[0]
email_address_object_id = email_address_object.id
email_address_object_we_vote_id = email_address_object.we_vote_id
email_address_object_found = True
email_address_list_found = False
success = True
status += "RETRIEVE_EMAIL_ADDRESS_FOUND_BY_NORMALIZED_EMAIL_ADDRESS "
else:
success = True
email_address_list_found = True
status += 'RETRIEVE_EMAIL_ADDRESS_OBJECT-EMAIL_ADDRESS_LIST_RETRIEVED '
else:
success = True
email_address_list_found = False
status += 'RETRIEVE_EMAIL_ADDRESS_OBJECT-NO_EMAIL_ADDRESS_LIST_RETRIEVED '
else:
email_address_object_found = False
success = False
status += "RETRIEVE_EMAIL_ADDRESS_VARIABLES_MISSING "
except EmailAddress.DoesNotExist:
exception_does_not_exist = True
success = True
status += "RETRIEVE_EMAIL_ADDRESS_NOT_FOUND "
except Exception as e:
success = False
status += 'FAILED retrieve_email_address_object EmailAddress ' + str(e) + ' '
results = {
'success': success,
'status': status,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
'email_address_object_found': email_address_object_found,
'email_address_object_id': email_address_object_id,
'email_address_object_we_vote_id': email_address_object_we_vote_id,
'email_address_object': email_address_object,
'email_address_list_found': email_address_list_found,
'email_address_list': email_address_list,
}
return results
def retrieve_email_address_object_from_secret_key(self, email_secret_key):
"""
:param email_secret_key:
:return:
"""
email_address_object_found = False
email_address_object = EmailAddress()
email_address_object_id = 0
email_address_object_we_vote_id = ""
email_ownership_is_verified = False
status = ''
try:
if positive_value_exists(email_secret_key):
email_address_object = EmailAddress.objects.get(
secret_key=email_secret_key,
deleted=False
)
email_address_object_id = email_address_object.id
email_address_object_we_vote_id = email_address_object.we_vote_id
email_ownership_is_verified = email_address_object.email_ownership_is_verified
email_address_object_found = True
success = True
status += "RETRIEVE_EMAIL_ADDRESS_FOUND_BY_SECRET_KEY "
else:
email_address_object_found = False
success = False
status += "RETRIEVE_EMAIL_ADDRESS_BY_SECRET_KEY_VARIABLE_MISSING "
except EmailAddress.DoesNotExist:
success = True
status += "RETRIEVE_EMAIL_ADDRESS_BY_SECRET_KEY_NOT_FOUND "
except Exception as e:
success = False
status += 'FAILED retrieve_email_address_object_from_secret_key EmailAddress ' + str(e) + ' '
results = {
'success': success,
'status': status,
'email_address_object_found': email_address_object_found,
'email_address_object_id': email_address_object_id,
'email_address_object_we_vote_id': email_address_object_we_vote_id,
'email_address_object': email_address_object,
'email_ownership_is_verified': email_ownership_is_verified,
}
return results
def verify_email_address_object_from_secret_key(self, email_secret_key):
"""
:param email_secret_key:
:return:
"""
email_address_object_found = False
email_address_object = EmailAddress()
email_address_object_id = 0
email_address_object_we_vote_id = ""
status = ''
try:
if positive_value_exists(email_secret_key):
email_address_object = EmailAddress.objects.get(
secret_key=email_secret_key,
deleted=False
)
email_address_object_id = email_address_object.id
email_address_object_we_vote_id = email_address_object.we_vote_id
email_address_object_found = True
success = True
status += "VERIFY_EMAIL_ADDRESS_FOUND_BY_WE_VOTE_ID "
else:
email_address_object_found = False
success = False
status += "VERIFY_EMAIL_ADDRESS_VARIABLES_MISSING "
except EmailAddress.DoesNotExist:
success = True
status += "VERIFY_EMAIL_ADDRESS_NOT_FOUND "
except Exception as e:
success = False
status += 'FAILED verify_email_address_object_from_secret_key EmailAddress '
email_ownership_is_verified = False
if email_address_object_found:
try:
# Note that we leave the secret key in place so we can find the owner we_vote_id in a subsequent call
email_address_object.email_ownership_is_verified = True
email_address_object.save()
email_ownership_is_verified = True
except Exception as e:
success = False
status += 'FAILED_TO_SAVE_EMAIL_OWNERSHIP_IS_VERIFIED ' + str(e) + " "
else:
status += 'EMAIL_ADDRESS_OBJECT_NOT_FOUND '
results = {
'success': success,
'status': status,
'email_address_object_found': email_address_object_found,
'email_address_object_id': email_address_object_id,
'email_address_object_we_vote_id': email_address_object_we_vote_id,
'email_address_object': email_address_object,
'email_ownership_is_verified': email_ownership_is_verified,
}
return results
def retrieve_voter_email_address_list(self, voter_we_vote_id):
"""
:param voter_we_vote_id:
:return:
"""
status = ""
if not positive_value_exists(voter_we_vote_id):
success = False
status += 'VALID_VOTER_WE_VOTE_ID_MISSING '
results = {
'success': success,
'status': status,
'voter_we_vote_id': voter_we_vote_id,
'email_address_list_found': False,
'email_address_list': [],
}
return results
email_address_list = []
try:
email_address_queryset = EmailAddress.objects.all()
email_address_queryset = email_address_queryset.filter(
voter_we_vote_id__iexact=voter_we_vote_id,
deleted=False
)
email_address_queryset = email_address_queryset.order_by('-id') # Put most recent email at top of list
email_address_list = email_address_queryset
if len(email_address_list):
success = True
email_address_list_found = True
status += 'EMAIL_ADDRESS_LIST_RETRIEVED '
else:
success = True
email_address_list_found = False
status += 'NO_EMAIL_ADDRESS_LIST_RETRIEVED '
except EmailAddress.DoesNotExist:
# No data found. Not a problem.
success = True
email_address_list_found = False
status += 'NO_EMAIL_ADDRESS_LIST_RETRIEVED_DoesNotExist '
email_address_list = []
except Exception as e:
success = False
email_address_list_found = False
status += 'FAILED retrieve_voter_email_address_list EmailAddress '
results = {
'success': success,
'status': status,
'voter_we_vote_id': voter_we_vote_id,
'email_address_list_found': email_address_list_found,
'email_address_list': email_address_list,
}
return results
def retrieve_primary_email_with_ownership_verified(self, voter_we_vote_id, normalized_email_address=''):
status = ""
email_address_list = []
email_address_list_found = False
email_address_object = EmailAddress()
email_address_object_found = False
try:
if positive_value_exists(voter_we_vote_id):
email_address_queryset = EmailAddress.objects.all()
email_address_queryset = email_address_queryset.filter(
voter_we_vote_id__iexact=voter_we_vote_id,
email_ownership_is_verified=True,
deleted=False
)
email_address_queryset = email_address_queryset.order_by('-id') # Put most recent email at top of list
email_address_list = email_address_queryset
elif positive_value_exists(normalized_email_address):
email_address_queryset = EmailAddress.objects.all()
email_address_queryset = email_address_queryset.filter(
normalized_email_address__iexact=normalized_email_address,
email_ownership_is_verified=True,
deleted=False
)
email_address_queryset = email_address_queryset.order_by('-id') # Put most recent email at top of list
email_address_list = email_address_queryset
else:
email_address_list = []
if len(email_address_list):
success = True
email_address_list_found = True
status += 'RETRIEVE_PRIMARY_EMAIL_ADDRESS_OBJECT-EMAIL_ADDRESS_LIST_RETRIEVED '
else:
success = True
email_address_list_found = False
status += 'RETRIEVE_PRIMARY_EMAIL_ADDRESS_OBJECT-NO_EMAIL_ADDRESS_LIST_RETRIEVED '
except EmailAddress.DoesNotExist:
success = True
status += "RETRIEVE_PRIMARY_EMAIL_ADDRESS_NOT_FOUND "
except Exception as e:
success = False
status += 'FAILED retrieve_primary_email_with_ownership_verified EmailAddress ' + str(e) + " "
if email_address_list_found:
email_address_object_found = True
email_address_object = email_address_list[0]
results = {
'success': success,
'status': status,
'email_address_object_found': email_address_object_found,
'email_address_object': email_address_object,
}
return results
def fetch_primary_email_with_ownership_verified(self, voter_we_vote_id):
results = self.retrieve_primary_email_with_ownership_verified(voter_we_vote_id)
if results['email_address_object_found']:
email_address_object = results['email_address_object']
return email_address_object.normalized_email_address
return ""
def retrieve_scheduled_email_list_from_send_status(self, sender_voter_we_vote_id, send_status):
status = ""
scheduled_email_list = []
try:
email_scheduled_queryset = EmailScheduled.objects.all()
email_scheduled_queryset = email_scheduled_queryset.filter(
sender_voter_we_vote_id=sender_voter_we_vote_id,
send_status=send_status,
)
scheduled_email_list = email_scheduled_queryset
if len(scheduled_email_list):
success = True
scheduled_email_list_found = True
status += 'SCHEDULED_EMAIL_LIST_RETRIEVED '
else:
success = True
scheduled_email_list_found = False
status += 'NO_SCHEDULED_EMAIL_LIST_RETRIEVED '
except EmailScheduled.DoesNotExist:
# No data found. Not a problem.
success = True
scheduled_email_list_found = False
status += 'NO_SCHEDULED_EMAIL_LIST_RETRIEVED_DoesNotExist '
scheduled_email_list = []
except Exception as e:
success = False
scheduled_email_list_found = False
status += 'FAILED retrieve_scheduled_email_list_from_send_status EmailAddress ' + str(e) + " "
results = {
'success': success,
'status': status,
'scheduled_email_list_found': scheduled_email_list_found,
'scheduled_email_list': scheduled_email_list,
}
return results
def update_scheduled_email_with_new_send_status(self, email_scheduled_object, send_status):
try:
email_scheduled_object.send_status = send_status
email_scheduled_object.save()
return email_scheduled_object
except Exception as e:
return email_scheduled_object
def schedule_email(self, email_outbound_description, subject, message_text, message_html,
send_status=TO_BE_PROCESSED):
status = ''
try:
email_scheduled = EmailScheduled.objects.create(
sender_voter_name=email_outbound_description.sender_voter_name,
sender_voter_we_vote_id=email_outbound_description.sender_voter_we_vote_id,
sender_voter_email=email_outbound_description.sender_voter_email,
recipient_voter_we_vote_id=email_outbound_description.recipient_voter_we_vote_id,
recipient_email_we_vote_id=email_outbound_description.recipient_email_we_vote_id,
recipient_voter_email=email_outbound_description.recipient_voter_email,
message_html=message_html,
message_text=message_text,
email_outbound_description_id=email_outbound_description.id,
send_status=send_status,
subject=subject,
)
email_scheduled_saved = True
email_scheduled_id = email_scheduled.id
success = True
status += "SCHEDULE_EMAIL_CREATED "
except Exception as e:
email_scheduled_saved = False
email_scheduled = EmailScheduled()
email_scheduled_id = 0
success = False
status += "SCHEDULE_EMAIL_NOT_CREATED " + str(e) + ' '
results = {
'success': success,
'status': status,
'email_scheduled_saved': email_scheduled_saved,
'email_scheduled_id': email_scheduled_id,
'email_scheduled': email_scheduled,
}
return results
def send_scheduled_email(self, email_scheduled):
success = True
status = ""
# DALE 2016-11-3 sender_voter_email is no longer required, because we use a system email
# if not positive_value_exists(email_scheduled.sender_voter_email):
# status += "MISSING_SENDER_VOTER_EMAIL"
# success = False
if not positive_value_exists(email_scheduled.recipient_voter_email):
status += "MISSING_RECIPIENT_VOTER_EMAIL"
success = False
if not positive_value_exists(email_scheduled.subject):
status += "MISSING_EMAIL_SUBJECT "
success = False
# We need either plain text or HTML message
if not positive_value_exists(email_scheduled.message_text) and \
not positive_value_exists(email_scheduled.message_html):
status += "MISSING_EMAIL_MESSAGE "
success = False
if success:
return self.send_scheduled_email_via_sendgrid(email_scheduled)
else:
email_scheduled_sent = False
results = {
'success': success,
'status': status,
'email_scheduled_sent': email_scheduled_sent,
}
return results
def send_scheduled_email_via_sendgrid(self, email_scheduled):
"""
Send a single scheduled email
:param email_scheduled:
:return:
"""
status = ""
success = True
sendgrid_turned_off_for_testing = False
if sendgrid_turned_off_for_testing:
status += "SENDGRID_TURNED_OFF_FOR_TESTING "
results = {
'success': success,
'status': status,
'email_scheduled_sent': True,
}
return results
if positive_value_exists(email_scheduled.sender_voter_name):
# TODO DALE Make system variable
system_sender_email_address = "{sender_voter_name} via We Vote <[email protected]>" \
"".format(sender_voter_name=email_scheduled.sender_voter_name)
else:
system_sender_email_address = "We Vote <[email protected]>" # TODO DALE Make system variable
mail = EmailMultiAlternatives(
subject=email_scheduled.subject,
body=email_scheduled.message_text,
from_email=system_sender_email_address,
to=[email_scheduled.recipient_voter_email],
# headers={"Reply-To": email_scheduled.sender_voter_email}
)
# 2020-01-19 Dale commented out Reply-To header because with it, Gmail gives phishing warning
if positive_value_exists(email_scheduled.message_html):
mail.attach_alternative(email_scheduled.message_html, "text/html")
try:
mail.send()
status += "SENDING_VIA_SENDGRID "
except Exception as e:
status += "COULD_NOT_SEND_VIA_SENDGRID " + str(e) + ' '
email_scheduled_sent = True
results = {
'success': success,
'status': status,
'email_scheduled_sent': email_scheduled_sent,
}
return results
def send_scheduled_email_list(self, messages_to_send):
"""
Take in a list of scheduled_email_id's, and send them
:param messages_to_send:
:return:
"""
success = False
status = ""
results = {
'success': success,
'status': status,
'at_least_one_email_found': True,
}
return results
def send_scheduled_emails_waiting_for_verification(self, sender_we_vote_id, sender_name=''):
"""
Searched the scheduled email for the text "Your friend" (with three spaces) and replace with sender_name
:param sender_we_vote_id:
:param sender_name:
:return:
"""
at_least_one_email_found = False
save_scheduled_email = False
send_status = WAITING_FOR_VERIFICATION
success = True
status = ""
scheduled_email_results = self.retrieve_scheduled_email_list_from_send_status(
sender_we_vote_id, send_status)
status += scheduled_email_results['status']
if scheduled_email_results['scheduled_email_list_found']:
scheduled_email_list = scheduled_email_results['scheduled_email_list']
for scheduled_email in scheduled_email_list:
at_least_one_email_found = True
if positive_value_exists(sender_name):
# Check scheduled_email.message_text and scheduled_email.message_html
# if there is a variable that hasn't been filled in yet.
try:
if scheduled_email.message_text:
save_scheduled_email = True
scheduled_email.message_text = \
scheduled_email.message_text.replace('Your friend', sender_name)
except Exception as e:
status += "COULD_NOT_REPLACE_NAME_IN_MESSAGE_TEXT " + str(e) + " "
try:
if scheduled_email.message_html:
save_scheduled_email = True
scheduled_email.message_html = \
scheduled_email.message_html.replace('Your friend', sender_name)
except Exception as e:
status += "COULD_NOT_REPLACE_NAME_IN_HTML " + str(e) + " "
if save_scheduled_email:
try:
scheduled_email.save()
status += "SCHEDULED_EMAIL_SAVED "
except Exception as e:
status += "COULD_NOT_SAVE_SCHEDULED_EMAIL " + str(e) + " "
send_results = self.send_scheduled_email(scheduled_email)
email_scheduled_sent = send_results['email_scheduled_sent']
status += send_results['status']
if email_scheduled_sent:
# If scheduled email sent successfully change their status from WAITING_FOR_VERIFICATION to SENT
send_status = SENT
try:
scheduled_email.send_status = send_status
scheduled_email.save()
except Exception as e:
status += "FAILED_TO_UPDATE_SEND_STATUS: " + str(e) + ' '
results = {
'success': success,
'status': status,
'at_least_one_email_found': at_least_one_email_found,
}
return results
def update_email_address_with_new_secret_key(self, email_we_vote_id):
results = self.retrieve_email_address_object('', email_we_vote_id)
if results['email_address_object_found']:
email_address_object = results['email_address_object']
try:
email_address_object.secret_key = generate_random_string(12)
email_address_object.save()
return email_address_object.secret_key
except Exception as e:
return ""
else:
return ""
def update_email_address_object_as_verified(self, email_address_object):
try:
email_address_object.email_ownership_is_verified = True
email_address_object.save()
return email_address_object
except Exception as e:
return email_address_object
def update_friend_invitation_email_link_with_new_email(deleted_email_we_vote_id, updated_email_we_vote_id):
success = True
status = ""
try:
FriendInvitationEmailLink = apps.get_model('friend', 'FriendInvitationEmailLink')
try:
FriendInvitationEmailLink.objects.filter(recipient_email_we_vote_id=deleted_email_we_vote_id).\
update(recipient_email_we_vote_id=updated_email_we_vote_id)
except Exception as e:
status += "FAILED_TO_UPDATE-FriendInvitationEmailLink " + str(e) + ' '
except Exception as e:
status += "FAILED_TO_LOAD-FriendInvitationEmailLink " + str(e) + ' '
results = {
'success': success,
'status': status,
}
return results
| python |
from django.urls import path, include
urlpatterns = [
path('launches/', include('api_spacex.launches.urls'))
]
| python |
# TRAINS - Keras with Tensorboard example code, automatic logging model and Tensorboard outputs
#
# Train a simple deep NN on the MNIST dataset.
# Gets to 98.40% test accuracy after 20 epochs
# (there is *a lot* of margin for parameter tuning).
# 2 seconds per epoch on a K520 GPU.
from __future__ import print_function
import numpy as np
import tensorflow
from keras.callbacks import TensorBoard, ModelCheckpoint
from keras.datasets import mnist
from keras.models import Sequential, Model
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD, Adam, RMSprop
from keras.utils import np_utils
from keras.models import load_model, save_model, model_from_json
from trains import Task
class TensorBoardImage(TensorBoard):
@staticmethod
def make_image(tensor):
import tensorflow as tf
from PIL import Image
tensor = np.stack((tensor, tensor, tensor), axis=2)
height, width, channels = tensor.shape
image = Image.fromarray(tensor)
import io
output = io.BytesIO()
image.save(output, format='PNG')
image_string = output.getvalue()
output.close()
return tf.Summary.Image(height=height,
width=width,
colorspace=channels,
encoded_image_string=image_string)
def on_epoch_end(self, epoch, logs={}):
super(TensorBoardImage, self).on_epoch_end(epoch, logs)
import tensorflow as tf
images = self.validation_data[0] # 0 - data; 1 - labels
img = (255 * images[0].reshape(28, 28)).astype('uint8')
image = self.make_image(img)
summary = tf.Summary(value=[tf.Summary.Value(tag='image', image=image)])
self.writer.add_summary(summary, epoch)
batch_size = 128
nb_classes = 10
nb_epoch = 6
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255.
X_test /= 255.
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation('relu'))
# model.add(Dropout(0.2))
model.add(Dense(512))
model.add(Activation('relu'))
# model.add(Dropout(0.2))
model.add(Dense(10))
model.add(Activation('softmax'))
model2 = Sequential()
model2.add(Dense(512, input_shape=(784,)))
model2.add(Activation('relu'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
# Connecting TRAINS
task = Task.init(project_name='examples', task_name='Keras with TensorBoard example')
# setting model outputs
labels = dict(('digit_%d' % i, i) for i in range(10))
task.set_model_label_enumeration(labels)
board = TensorBoard(histogram_freq=1, log_dir='/tmp/histogram_example', write_images=False)
model_store = ModelCheckpoint(filepath='/tmp/histogram_example/weight.{epoch}.hdf5')
# load previous model, if it is there
try:
model.load_weights('/tmp/histogram_example/weight.1.hdf5')
except:
pass
history = model.fit(X_train, Y_train,
batch_size=batch_size, epochs=nb_epoch,
callbacks=[board, model_store],
verbose=1, validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
| python |
from data.db.db import *
async def GivePlayerGold(interaction,arg1,arg2,owner_id):
if interaction.user.id==owner_id:
execute(f"SELECT Gold FROM PlayerEconomy WHERE UserID = ?",arg1)
reply = cur.fetchall()
Gold = reply[0][0]
Gold = int(Gold) + int(arg2)
execute("UPDATE PlayerEconomy SET Gold = ? WHERE UserID = ?", Gold, arg1)
commit()
await interaction.response.send_message(f"Successfully given `{arg2}` Gold to <@{arg1}>")
else:
await interaction.response.send_message("No I dont think i will.")
async def TakePlayerGold(interaction,arg1,arg2,owner_id):
if interaction.user.id==owner_id:
execute(f"SELECT Gold FROM PlayerEconomy WHERE UserID = ?",arg1)
reply = cur.fetchall()
Gold = reply[0][0]
Gold = int(Gold) - int(arg2)
execute("UPDATE PlayerEconomy SET Gold = ? WHERE UserID = ?", Gold, arg1)
commit()
await interaction.response.send_message(f"Successfully taken `{arg2}` Gold from <@{arg1}>")
else:
await interaction.response.send_message("No I dont think i will.") | python |
from .mongodbRepositorio import conexaoBanco, inserirDocumento
import datetime
from bson import ObjectId
import re
def salvarDoadorBD(registro, nome, dt_cadastro, cidade,
bairro, grupoabo, fatorrh, fone, celular, sexo,
dt_nascimento, dt_ultima_doacao, dt_proximo_doacao, mongodb):
# mongodb
con = conexaoBanco(mongodb)
# cria documento formato json
docNovo = {
'registro': registro,
'nome': nome,
'dtreg': dt_cadastro,
'cidade': cidade,
'bairro': bairro,
'grupoabo': grupoabo,
'fatorrh': fatorrh,
'fone': fone,
'celular': celular,
'sexo': sexo,
'dtnasc': dt_nascimento,
'data_ultima_doacao': dt_ultima_doacao,
'data_proxima_doacao': dt_proximo_doacao,
'data_ultima_notificacao': ''
}
# salvar na coleção
id_doc = inserirDocumento(con, docNovo, mongodb.collection_doador)
print('salvo no mongodb: ', id_doc)
def editarDoadorBD(registro, nome, dt_cadastro, cidade,
bairro, grupoabo, fatorrh, fone, celular, sexo,
dt_nascimento, dt_ultima_doacao, dt_proximo_doacao, data_ultima_notificacao, mongodb):
# mongodb
con = conexaoBanco(mongodb)
# cria documento formato json
docNovo = {
'registro': registro,
'nome': nome,
'dtreg': dt_cadastro,
'cidade': cidade,
'bairro': bairro,
'grupoabo': grupoabo,
'fatorrh': fatorrh,
'fone': fone,
'celular': celular,
'sexo': sexo,
'dtnasc': dt_nascimento,
'data_ultima_doacao': dt_ultima_doacao,
'data_proxima_doacao': dt_proximo_doacao,
'data_ultima_notificacao': data_ultima_notificacao
}
# salvar na coleção
#id_doc = editarDocumentoDoador(con, docNovo, mongodb.collection_doador)
#print('editado no mongodb: ', id_doc)
def editarNotificacaoDoadorBD(registro, permissao, mongodb):
# mongodb
con = conexaoBanco(mongodb)
print('atualizando permissao de notificacao:', registro, permissao)
# salvar na coleção
servico = con[mongodb.collection_doador]
id = servico.update_one({"registro": registro},
{"$set": {"permissao_notificacao": permissao}}, upsert=True)
def editarUltimaNotificacaoDoadorBD(registro, data, mongodb):
# mongodb
con = conexaoBanco(mongodb)
print('NOTIFICADO: atualizando data_ultima_notificacao:', registro, data)
# salvar na coleção
servico = con[mongodb.collection_doador]
id = servico.update_one({"registro": registro},
{"$set": {"data_ultima_notificacao": (data)}}, upsert=True)
def listarDoadoresBD(mongodb):
con = conexaoBanco(mongodb)
collection = con[mongodb.collection_doador]
return list(collection.find())[0:100]
def listarDoadoresParaNotificarPrimeiraVezBD(mongodb):
con = conexaoBanco(mongodb)
collection = con[mongodb.collection_doador]
return list(collection.find({'data_ultima_notificacao': ''}))[0:100]
def listarDoadoresPorCodigos(codigos, mongodb):
con = conexaoBanco(mongodb)
collection = con[mongodb.collection_doador]
lista = list()
for cod in codigos:
print('cod:', cod)
lista.append(list(collection.find({'registro': cod })))
return lista
def listarDoadoresParaNotificaMasculinoBD(mongodb):
con = conexaoBanco(mongodb)
collection = con[mongodb.collection_doador]
return list(collection.find({'sexo': 'MASCULINO'}))[0:100]
def listarDoadoresParaNotificaFemininoBD(mongodb):
con = conexaoBanco(mongodb)
collection = con[mongodb.collection_doador]
#dataInicio = datetime.datetime.now() - datetime.timedelta(90)
#dataFim = datetime.datetime.now()
return list(collection.find({'sexo': 'FEMININO'}))[0:100]
#'data_ultima_notificacao': {'$gte': dataInicio, '$lt': dataFim}}
def listarDoadoresParaNotificarMasculinoBD(mongodb):
con = conexaoBanco(mongodb)
collection = con[mongodb.collection_doador]
dataInicio = datetime.datetime.now() - datetime.timedelta(60)
dataFim = datetime.datetime.now()
return list(collection.find({'sexo': 'MASCULINO',
'data_ultima_notificacao': {'$gte': dataInicio, '$lt': dataFim}}
))[0:100]
def listarDoadoresPorTipoBD(grupo, fator, mongodb):
con = conexaoBanco(mongodb)
collection = con[mongodb.collection_doador]
rgxGrupo = re.compile('.*'+grupo+'.*', re.IGNORECASE)
rgxFator = re.compile('.*'+fator+'.*', re.IGNORECASE)
return list(collection.find({'grupoabo': rgxGrupo, 'fatorrh': rgxFator}))
def listarDoadoresPorLocalidadeBD(cidade, bairro, mongodb):
con = conexaoBanco(mongodb)
collection = con[mongodb.collection_doador]
rgxCidade = re.compile('.*'+cidade+'.*', re.IGNORECASE)
rgxBairro = re.compile('.*'+bairro+'.*', re.IGNORECASE)
return list(collection.find({'cidade': rgxCidade, 'bairro': rgxBairro}))
def listarBairrosPorCidadeBD(cidade, mongodb):
con = conexaoBanco(mongodb)
collection = con[mongodb.collection_doador]
rgxCidade = re.compile('.*'+cidade+'.*', re.IGNORECASE)
# return list(collection.group(key={"bairro":1}, condition={'cidade':rgxCidade},
# initial={"count":0}, reduce={}))
return list( collection.aggregate([
{"$match": {"cidade": rgxCidade}},
{"$group": {"_id": {"bairro": "$bairro"}}},
{"$project": {
"_id": 0,
"bairro": "$_id.bairro"
}},
{"$sort": {"bairro": 1}}
])
)
#def listarDoadoresAptosParaNotificar(mongodb):
# TODO implementação | python |
import zof
APP = zof.Application(__name__)
FLOW_MOD = zof.compile('''
type: FLOW_MOD
msg:
table_id: $table
command: ADD
match: []
instructions:
- instruction: APPLY_ACTIONS
actions:
- action: OUTPUT
port_no: $port
''')
@APP.message('CHANNEL_UP')
def channel_up(event):
FLOW_MOD.send(table=0, port='CONTROLLER')
if __name__ == '__main__':
zof.run()
| python |
#!/usr/bin/python3
# Grab data from the Riff.CC MySQL service and render it to the Curator's PostgreSQL database
# Credits:
# - https://stackoverflow.com/questions/10195139/how-to-retrieve-sql-result-column-value-using-column-name-in-python
# - https://github.com/PyMySQL/PyMySQL
# - https://stackoverflow.com/questions/37926717/psycopg2-unable-to-insert-into-specific-columns
# Import needed modules
from __future__ import with_statement
import os
import sys
import yaml
import pymysql.cursors
import psycopg2
# Dynamically load in our magic config files
configname = os.path.expanduser('~/.rcc-tools.yml')
config = yaml.safe_load(open(configname))
# Check if the config is empty
if config is None:
print("Failed to load configuration.")
sys.exit(1338)
# Get our Riff.CC credentials and load them in
sqlpassword = config["password"]
curator_user = config["curator_user"]
curator_pass = config["curator_pass"]
curator_host = config["curator_host"]
# Connect to the Unit3D database
connection = pymysql.connect(host='localhost',
user='unit3d',
password=sqlpassword,
database='unit3d',
cursorclass=pymysql.cursors.DictCursor)
# Connect to the Curator database
connpg = psycopg2.connect(host=curator_host,
database="collection",
user=curator_user,
password=curator_pass)
# create a cursor
cursorpg = connpg.cursor()
with connection:
with connection.cursor() as cursor:
# Ingest releases
# Read everything from Unit3D (traditional site), filtering for only valid torrents
sql = "SELECT * FROM `torrents` WHERE status=1"
cursor.execute(sql)
result_set = cursor.fetchall()
for row in result_set:
# For every existing release, gather relevant metadata and massage it into Curator.
release_id = row["id"]
name = row["name"]
slug = row["slug"]
description = row["description"]
mediainfo = row["mediainfo"]
category_id = row["category_id"]
uploader_id = row["user_id"]
featured = bool(row["featured"])
created_at = row["created_at"]
updated_at = row["updated_at"]
type_id = row["type_id"]
ipfs_hash = None
if row["stream_id"] is not None:
ipfs_hash = row["stream_id"]
resolution_id = row["resolution_id"]
print("Processing release id: " + str(release_id) + " (name: " + str(name) + ")")
# do this the right way - https://www.psycopg.org/docs/usage.html?highlight=escape#the-problem-with-the-query-parameters
SQL = '''INSERT INTO releases
(id, name, category_id, type_id, resolution_id, uploader_id, featured, created_at, updated_at, description, mediainfo, slug, ipfs_hash)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
ON CONFLICT (id) DO UPDATE SET
(id, name, category_id, type_id, resolution_id, uploader_id, featured, created_at, updated_at, description, mediainfo, slug, ipfs_hash)
= (EXCLUDED.id, EXCLUDED.name, EXCLUDED.category_id, EXCLUDED.type_id, EXCLUDED.resolution_id, EXCLUDED.uploader_id, EXCLUDED.featured, EXCLUDED.created_at, EXCLUDED.updated_at, EXCLUDED.description, EXCLUDED.mediainfo, EXCLUDED.slug, EXCLUDED.ipfs_hash);'''
data = (release_id, name, category_id, type_id, resolution_id, uploader_id, featured, created_at, updated_at, description, mediainfo, slug, ipfs_hash)
cursorpg.execute(SQL, data)
# We could move this outside the loop and simply commit everything in one go.
# Write the data to the Curator.
connpg.commit()
# Reset any re-used params by setting them to empty strings, just in case.
name = ""
slug = ""
# Ingest categories from Unit3D
sql = "SELECT * FROM `categories`"
cursor.execute(sql)
result_set = cursor.fetchall()
for row in result_set:
print(row)
category_id = row["id"]
name = row["name"]
slug = row["slug"]
image = row["image"]
SQL = '''INSERT INTO categories
(id, name, slug, image)
VALUES (%s, %s, %s, %s)
ON CONFLICT (id) DO UPDATE SET
(id, name, slug, image)
= (EXCLUDED.id, EXCLUDED.name, EXCLUDED.slug, EXCLUDED.image);'''
data = (category_id, name, slug, image)
cursorpg.execute(SQL, data)
# We could move this outside the loop and simply commit everything in one go.
# Write the data to the Curator.
connpg.commit()
| python |
#!/usr/bin/env python
# Copyright (C) 2015 Dmitry Rodionov
# This software may be modified and distributed under the terms
# of the MIT license. See the LICENSE file for details.
from ..dtrace.apicalls import apicalls
import inspect
from sets import Set
from os import sys, path
def choose_package_class(file_type, file_name, suggestion=None):
if suggestion is not None:
name = suggestion
else:
name = _guess_package_name(file_type, file_name)
if not name:
return None
full_name = "modules.packages.%s" % name
try:
# FIXME(rodionovd):
# I couldn't figure out how to make __import__ import anything from
# the (grand)parent package, so here I just patch the PATH
sys.path.append(path.abspath(path.join(path.dirname(__file__), '..', '..')))
# Since we don't know the package class yet, we'll just import everything
# from this module and then try to figure out the required member class
module = __import__(full_name, globals(), locals(), ['*'])
except ImportError:
raise Exception("Unable to import package \"{0}\": it does not "
"exist.".format(name))
try:
pkg_class = _found_target_class(module, name)
except IndexError as err:
raise Exception("Unable to select package class (package={0}): "
"{1}".format(full_name, err))
return pkg_class
def _found_target_class(module, name):
""" Searches for a class with the specific name: it should be
equal to capitalized $name.
"""
members = inspect.getmembers(module, inspect.isclass)
return [x[1] for x in members if x[0] == name.capitalize()][0]
def _guess_package_name(file_type, file_name):
if "Bourne-Again" in file_type or "bash" in file_type:
return "bash"
elif "Mach-O" in file_type and "executable" in file_type:
return "macho"
elif "directory" in file_type and (file_name.endswith(".app") or file_name.endswith(".app/")):
return "app"
elif "Zip archive" in file_type and file_name.endswith(".zip"):
return "zip"
else:
return None
class Package(object):
""" Base analysis package """
# Our target may touch some files; keep an eye on them
touched_files = Set()
def __init__(self, target, host, **kwargs):
if not target or not host:
raise Exception("Package(): `target` and `host` arguments are required")
self.host = host
self.target = target
# Any analysis options?
self.options = kwargs.get("options", {})
# A timeout for analysis
self.timeout = kwargs.get("timeout", None)
# Command-line arguments for the target.
self.args = self.options.get("args", [])
# Choose an analysis method (or fallback to apicalls)
self.method = self.options.get("method", "apicalls")
# Should our target be launched as root or not
self.run_as_root = _string_to_bool(self.options.get("run_as_root", "False"))
def prepare(self):
""" Preparation routine. Do anything you want here. """
pass
def start(self):
""" Runs an analysis process.
This function is a generator.
"""
self.prepare()
if self.method == "apicalls":
self.apicalls_analysis()
else:
raise Exception("Unsupported analysis method. Try `apicalls`.")
def apicalls_analysis(self):
kwargs = {
'args': self.args,
'timeout': self.timeout,
'run_as_root': self.run_as_root
}
for call in apicalls(self.target, **kwargs):
# Send this API to Cuckoo host
self.host.send_api(call)
# Handle file IO APIs
self.handle_files(call)
def handle_files(self, call):
""" Remember what files our target has been working with during the analysis"""
def makeabs(filepath):
# Is it a relative path? Suppose it's relative to our dtrace working directory
if not path.isfile(filepath):
filepath = path.join(path.dirname(__file__), "..", "dtrace", filepath)
return filepath
if call.api in ["fopen", "freopen", "open"]:
self.open_file(makeabs(call.args[0]))
if call.api in ["rename"]:
self.move_file(makeabs(call.args[0]), makeabs(call.args[1]))
if call.api in ["copyfile"]:
self.copy_file(makeabs(call.args[0]), makeabs(call.args[1]))
if call.api in ["remove", "unlink"]:
self.remove_file(makeabs(call.args[0]))
def open_file(self, filepath):
self.touched_files.add(filepath)
def move_file(self, frompath, topath):
# Remove old reference if needed
if frompath in self.touched_files:
self.touched_files.remove(frompath)
self.touched_files.add(topath)
def copy_file(self, frompath, topath):
# Add both files to the watch list
self.touched_files.update([frompath, topath])
def remove_file(self, filepath):
# TODO(rodionovd): we're actually unable to dump this file
# because well, it was removed
self.touched_files.add(filepath)
def _string_to_bool(raw):
if not isinstance(raw, basestring):
raise Exception("Unexpected input: not a string :/")
return raw.lower() in ("yes", "true", "t", "1")
| python |
from PIL import Image, ImageDraw
from vk_bot.config import *
import io, requests, random, os
from vk_bot.core.modules.basicplug import BasicPlug
from vk_bot.core.modules.upload import Upload
class Quote(BasicPlug, Upload):
doc = "Фильтр Вьетнам"
command = ("вьетнам",)
def main(self):
url = self.event.object['attachments'][0]['photo']['sizes'][-1]['url']
img = requests.get(url).content
f = io.BytesIO(img)
image = Image.open(f)
draw = ImageDraw.Draw(image)
pix = image.load()
(width, height) = image.size
for i in range(width):
for j in range(height):
a = pix[i, j][0]
b = pix[i, j][1]
c = pix[i, j][2]
S = (a + b + c) // 3
draw.point((i, j), (S, S, S))
vietnam = Image.open('pics/u-s-_helicopters_vietnam.jpg')
resized_img = vietnam.resize((width, height), Image.ANTIALIAS)
#resized_img = ImageEnhance.Brightness(resized_img).enhance(1.2)
image.paste(resized_img.convert('RGB'), (0, 0), resized_img)
name = f"name{random.randint(0, 1000)}.jpg"
image.save(name)
try:
attachment = self.uploadphoto(name)
self.sendmsg("Дэржите фотку", attachment)
finally:
os.remove(name)
| python |
from data import Data
from projects.job import Job
import json
from .service import Service
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
class Firestore(Service):
def __init__(self, service_account_path_file, timestamp_name='timestamp', collection='default'):
cred = credentials.Certificate(service_account_path_file)
firebase_admin.initialize_app(cred)
self.db = firestore.client()
self.collection = self.db.collection(collection)
self.timestamp_name = timestamp_name
def connect(self, connection={}):
pass
def db_collection(self):
return self.client[self.db][self.collection]
def read(self, job: Job) -> Data:
data = Data()
docs = self.collection.where(timestamp_name, u'>=', job.from_time).where(timestamp_name, u'<', job.to_time).stream()
for doc in docs:
data.add_doc(doc.to_dict())
return data
def write(self, data: Data, job: Job) -> Data:
docs = data.get_docs()
batch = db.batch()
for doc in docs:
self.collection.set(doc)
batch.commit()
return data.set_docs(docs)
| python |
"""Prepare a lexical data file for spacy train."""
import gzip
import json
import math
import sys
import typer
from itertools import islice
from pathlib import Path
def main(
full_vocabulary_path: Path = typer.Argument(..., help='Path to the full vocabulary'),
input_vocabulary_path: Path = typer.Argument(..., help='Path to the input vocabulary')
):
probs, oov_prob = read_freqs(full_vocabulary_path, input_vocabulary_path)
out = sys.stdout
header = {'lang': 'fi', 'settings': {'oov_prob': oov_prob}}
write_json_line(header, out)
for orth, p in probs.items():
word_data = {'orth': orth, 'prob': p}
write_json_line(word_data, out)
def read_freqs(full_loc, freq_loc):
total = 0
n = 0
with gzip.open(full_loc, 'rt', encoding='utf-8') as f:
for i, line in enumerate(f):
n = i + 1
freq, token = line.strip().split(' ', 1)
freq = int(freq)
total += freq
log_total = math.log(total)
probs = {}
remaining_freq = total
with gzip.open(freq_loc, 'rt', encoding='utf-8') as f:
for line in f:
freq, token = line.strip().split(' ', 1)
freq = int(freq)
probs[token] = math.log(freq) - log_total
remaining_freq -= freq
# Our OOV estimate is the remaining probability mass distributed evenly on
# the excluded word types.
oov_prob = math.log(remaining_freq) - log_total - math.log(n - len(probs))
return probs, oov_prob
def write_json_line(obj, fp):
json.dump(obj, fp=fp, ensure_ascii=False)
fp.write('\n')
if __name__ == '__main__':
typer.run(main)
| python |
# coding=utf-8
"""
Singular Value Decomposition Based Collaborative Filtering Recommender
[Rating Prediction]
Literature:
Badrul Sarwar , George Karypis , Joseph Konstan , John Riedl:
Incremental Singular Value Decomposition Algorithms for Highly Scalable Recommender Systems
Fifth International Conference on Computer and Information Science 2002.
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.3.7894
"""
# © 2018. Case Recommender (MIT License)
import numpy as np
from scipy.sparse.linalg import svds
from caserec.recommenders.rating_prediction.base_rating_prediction import BaseRatingPrediction
from caserec.utils.extra_functions import timed
__author__ = 'Arthur Fortes <[email protected]>'
class SVD(BaseRatingPrediction):
def __init__(self, train_file=None, test_file=None, output_file=None, factors=10, sep='\t', output_sep='\t',
random_seed=None):
"""
Matrix Factorization for rating prediction
Matrix factorization models map both users and items to a joint latent factor space of dimensionality f,
such that user-item interactions are modeled as inner products in that space.
Usage::
>> MatrixFactorization(train, test).compute()
:param train_file: File which contains the train set. This file needs to have at least 3 columns
(user item feedback_value).
:type train_file: str
:param test_file: File which contains the test set. This file needs to have at least 3 columns
(user item feedback_value).
:type test_file: str, default None
:param output_file: File with dir to write the final predictions
:type output_file: str, default None
:param factors: Number of latent factors per user/item
:type factors: int, default 10
:param sep: Delimiter for input files
:type sep: str, default '\t'
:param output_sep: Delimiter for output file
:type output_sep: str, default '\t'
:param random_seed: Number of seed. Lock random numbers for reproducibility of experiments.
:type random_seed: int, default None
"""
super(SVD, self).__init__(train_file=train_file, test_file=test_file, output_file=output_file, sep=sep,
output_sep=output_sep)
self.recommender_name = 'SVD'
self.factors = factors
if random_seed is not None:
np.random.seed(random_seed)
# internal vars
self.feedback_triples = None
self.prediction_matrix = None
def init_model(self):
"""
Method to treat and initialize the model
"""
self.feedback_triples = []
# Map interaction with ids
for user in self.train_set['feedback']:
for item in self.train_set['feedback'][user]:
self.feedback_triples.append((self.user_to_user_id[user], self.item_to_item_id[item],
self.train_set['feedback'][user][item]))
self.create_matrix()
def fit(self):
"""
This method performs Singular Value Decomposition over the training data.
"""
u, s, vt = svds(self.matrix, k=self.factors)
s_diagonal_matrix = np.diag(s)
self.prediction_matrix = np.dot(np.dot(u, s_diagonal_matrix), vt)
def predict_score(self, u, i, cond=True):
"""
Method to predict a single score for a pair (user, item)
:param u: User ID
:type u: int
:param i: Item ID
:type i: int
:param cond: Use max and min values of train set to limit score
:type cond: bool, default True
:return: Score generate for pair (user, item)
:rtype: float
"""
rui = self.train_set["mean_value"] + self.prediction_matrix[u][i]
if cond:
if rui > self.train_set["max_value"]:
rui = self.train_set["max_value"]
elif rui < self.train_set["min_value"]:
rui = self.train_set["min_value"]
return rui
def predict(self):
"""
This method computes a final rating for unknown pairs (user, item)
"""
if self.test_file is not None:
for user in self.test_set['users']:
for item in self.test_set['feedback'][user]:
self.predictions.append((user, item, self.predict_score(self.user_to_user_id[user],
self.item_to_item_id[item], True)))
else:
raise NotImplemented
def compute(self, verbose=True, metrics=None, verbose_evaluation=True, as_table=False, table_sep='\t'):
"""
Extends compute method from BaseRatingPrediction. Method to run recommender algorithm
:param verbose: Print recommender and database information
:type verbose: bool, default True
:param metrics: List of evaluation measures
:type metrics: list, default None
:param verbose_evaluation: Print the evaluation results
:type verbose_evaluation: bool, default True
:param as_table: Print the evaluation results as table
:type as_table: bool, default False
:param table_sep: Delimiter for print results (only work with verbose=True and as_table=True)
:type table_sep: str, default '\t'
"""
super(SVD, self).compute(verbose=verbose)
if verbose:
self.init_model()
print("training_time:: %4f sec" % timed(self.fit))
if self.extra_info_header is not None:
print(self.extra_info_header)
print("prediction_time:: %4f sec" % timed(self.predict))
print('\n')
else:
# Execute all in silence without prints
self.init_model()
self.fit()
self.predict()
self.write_predictions()
if self.test_file is not None:
self.evaluate(metrics, verbose_evaluation, as_table=as_table, table_sep=table_sep)
| python |
#!/usr/bin/python
import numpy
from pylab import *
from numpy import *
from scipy import *
from scipy.stats import mode
from scipy.misc.common import factorial
from scipy.spatial.distance import correlation,euclidean
from math import log
import os
path=os.getenv('P_Dir')
#Mutual information
'''
Definition:
p(x,y)
I(X;Y) = sum sum p(x,y) log --------
x in X y in Y p(x)p(y)
'''
def log2(n): return log(n)*1.0/log(2)
def log10(n): return log(n)*1.0/log(10)
def mutual_info(x,y):
N=double(x.size)
I=0.0
eps = numpy.finfo(float).eps
for l1 in unique(x):
for l2 in unique(y):
#Find the intersections
l1_ids=nonzero(x==l1)[0]
l2_ids=nonzero(y==l2)[0]
pxy=(double(intersect1d(l1_ids,l2_ids).size)/N)+eps
I+=pxy*log2(pxy/((l1_ids.size/N)*(l2_ids.size/N)))
return I
#Normalized mutual information
def nmi(x,y):
N=x.size
I=mutual_info(x,y)
Hx=0
for l1 in unique(x):
l1_count=nonzero(x==l1)[0].size
Hx+=-(double(l1_count)/N)*log2(double(l1_count)/N)
Hy=0
for l2 in unique(y):
l2_count=nonzero(y==l2)[0].size
Hy+=-(double(l2_count)/N)*log2(double(l2_count)/N)
return I/((Hx+Hy)/2)
PLV=loadtxt('%s/PLV_sync.dat' %path,unpack=True)
Corr=loadtxt('%s/Correlation_Sorted_By_Pairs.dat' %path,unpack=True)
XCorr=correlation(PLV[2],Corr[2])
print (XCorr) | python |
from app import db
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
login = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(120), index=True, unique=True)
password = db.Column(db.String(120))
last_login = db.Column(db.TIMESTAMP)
def get_id(self):
try:
return unicode(self.id) # python 2
except NameError:
return str(self.id) # python 3
def __repr__(self): # pragma: no cover
return '<User %r>' % self.login | python |
import tensorflow as tf
def sample_gumbel(shape, eps=1e-20):
"""Sample from Gumbel(0, 1)"""
U = tf.random_uniform(shape,minval=0,maxval=1)
return -tf.log(-tf.log(U + eps) + eps)
def gumbel_softmax_sample(logits, temperature):
""" Draw a sample from the Gumbel-Softmax distribution"""
y = logits + sample_gumbel(tf.shape(logits))
return tf.nn.softmax( y / temperature)
def gumbel_softmax(logits, temperature, hard=False):
"""Sample from the Gumbel-Softmax distribution and optionally discretize.
Args:
logits: [batch_size, n_class] unnormalized log-probs
temperature: non-negative scalar
hard: if True, take argmax, but differentiate w.r.t. soft sample y
Returns:
[batch_size, n_class] sample from the Gumbel-Softmax distribution.
If hard=True, then the returned sample will be one-hot, otherwise it will
be a probabilitiy distribution that sums to 1 across classes
"""
y = gumbel_softmax_sample(logits, temperature)
if hard:
k = tf.shape(logits)[-1]
#y_hard = tf.cast(tf.one_hot(tf.argmax(y,1),k), y.dtype)
y_hard = tf.cast(tf.equal(y,tf.reduce_max(y,1,keep_dims=True)),y.dtype)
y = tf.stop_gradient(y_hard - y) + y
return y | python |
# -*- coding: utf-8 -*-
'''
Tests neo_utils.core.
@author: Pierre Thibault (pierre.thibault1 -at- gmail.com)
@license: MIT
@since: 2010-11-10
'''
__docformat__ = "epytext en"
import unittest
from neo_utils.core import count
from neo_utils.core import every
from neo_utils.core import inverse_linked_list
from neo_utils.core import Prototype
from neo_utils.core import negate
from neo_utils.core import some
from neo_utils.core import transform
class TestNeoUtils(unittest.TestCase):
"""TestNeoUtils the methods of the module """
EMPTY_LIST = []
ALL_FALSE = [False, 0, []]
ALL_TRUE = [True, 1, -45, (1)]
SOME_TRUE = (0, False, [1], [])
@staticmethod
def indentity(p):
return p
def assert_linked_list_order(self, linked_list, sequence_order):
current_node = linked_list
index = 0
while current_node:
self.assertEqual(current_node, sequence_order[index])
current_node = current_node.next
index += 1
self.assertEqual(index, len(sequence_order))
def test_every(self):
self.assertTrue(every(TestNeoUtils.indentity,
TestNeoUtils.EMPTY_LIST))
self.assertFalse(every(TestNeoUtils.indentity,
TestNeoUtils.ALL_FALSE))
self.assertTrue(every(TestNeoUtils.indentity,
TestNeoUtils.ALL_TRUE))
self.assertFalse(every(TestNeoUtils.indentity,
TestNeoUtils.SOME_TRUE))
def test_count(self):
self.assertEqual(0, count(TestNeoUtils.indentity,
TestNeoUtils.EMPTY_LIST))
self.assertEqual(0, count(TestNeoUtils.indentity,
TestNeoUtils.ALL_FALSE))
self.assertEqual(4, count(TestNeoUtils.indentity,
TestNeoUtils.ALL_TRUE))
self.assertEqual(1, count(TestNeoUtils.indentity,
TestNeoUtils.SOME_TRUE))
def test_inverse_linked_list(self):
o1 = Prototype()
o2 = Prototype()
o3 = Prototype()
o1.next = o2
o2.next = o3
o3.next = None
self.assert_linked_list_order(inverse_linked_list(o1), (o3, o2, o1))
self.assert_linked_list_order(inverse_linked_list(None), tuple())
o1 = Prototype()
o2 = Prototype()
o1.next = o2
o2.next = None
self.assert_linked_list_order(inverse_linked_list(o1), (o2, o1))
def test_negate(self):
negation = negate(TestNeoUtils.indentity)
result = []
for i in TestNeoUtils.SOME_TRUE:
result.append(negation(i))
self.assertEqual(result, [True, True, False, True])
def test_some(self):
self.assertFalse(some(TestNeoUtils.indentity,
TestNeoUtils.EMPTY_LIST))
self.assertFalse(some(TestNeoUtils.indentity, TestNeoUtils.ALL_FALSE))
self.assertTrue(some(TestNeoUtils.indentity, TestNeoUtils.ALL_TRUE))
self.assertTrue(some(TestNeoUtils.indentity, TestNeoUtils.SOME_TRUE))
def test_transform(self):
l = [4, 5, 7]
transform(lambda x: x + 1, l)
self.assertEqual(l, [5, 6, 8])
l = []
transform(lambda x: x * x, l)
self.assertEqual(l, [])
if __name__ == "__main__":
#import sys;sys.argv = ['', 'TestNeoUtils.testName']
unittest.main() | python |
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from __future__ import absolute_import
import inspect
import six
from twisted.internet.defer import inlineCallbacks
from autobahn.wamp import protocol
from autobahn.wamp.types import ComponentConfig
from autobahn.websocket.util import parse_url
from autobahn.twisted.websocket import WampWebSocketClientFactory
# new API
# from autobahn.twisted.connection import Connection
import txaio
txaio.use_twisted()
__all__ = [
'ApplicationSession',
'ApplicationSessionFactory',
'ApplicationRunner',
'Application',
'Service',
# new API
'Session'
]
try:
from twisted.application import service
except (ImportError, SyntaxError):
# Not on PY3 yet
service = None
__all__.pop(__all__.index('Service'))
class ApplicationSession(protocol.ApplicationSession):
"""
WAMP application session for Twisted-based applications.
"""
class ApplicationSessionFactory(protocol.ApplicationSessionFactory):
"""
WAMP application session factory for Twisted-based applications.
"""
session = ApplicationSession
"""
The application session class this application session factory will use. Defaults to :class:`autobahn.twisted.wamp.ApplicationSession`.
"""
class ApplicationRunner(object):
"""
This class is a convenience tool mainly for development and quick hosting
of WAMP application components.
It can host a WAMP application component in a WAMP-over-WebSocket client
connecting to a WAMP router.
"""
log = txaio.make_logger()
def __init__(self, url, realm, extra=None, serializers=None, ssl=None, proxy=None):
"""
:param url: The WebSocket URL of the WAMP router to connect to (e.g. `ws://somehost.com:8090/somepath`)
:type url: unicode
:param realm: The WAMP realm to join the application session to.
:type realm: unicode
:param extra: Optional extra configuration to forward to the application component.
:type extra: dict
:param serializers: A list of WAMP serializers to use (or None for default serializers).
Serializers must implement :class:`autobahn.wamp.interfaces.ISerializer`.
:type serializers: list
:param ssl: (Optional). If specified this should be an
instance suitable to pass as ``sslContextFactory`` to
:class:`twisted.internet.endpoints.SSL4ClientEndpoint`` such
as :class:`twisted.internet.ssl.CertificateOptions`. Leaving
it as ``None`` will use the result of calling Twisted's
:meth:`twisted.internet.ssl.platformTrust` which tries to use
your distribution's CA certificates.
:type ssl: :class:`twisted.internet.ssl.CertificateOptions`
:param proxy: Explicit proxy server to use; a dict with ``host`` and ``port`` keys
:type proxy: dict or None
"""
assert(type(url) == six.text_type)
assert(realm is None or type(realm) == six.text_type)
assert(extra is None or type(extra) == dict)
assert(proxy is None or type(proxy) == dict)
self.url = url
self.realm = realm
self.extra = extra or dict()
self.serializers = serializers
self.ssl = ssl
self.proxy = proxy
def run(self, make, start_reactor=True):
"""
Run the application component.
:param make: A factory that produces instances of :class:`autobahn.asyncio.wamp.ApplicationSession`
when called with an instance of :class:`autobahn.wamp.types.ComponentConfig`.
:type make: callable
:param start_reactor: if True (the default) this method starts
the Twisted reactor and doesn't return until the reactor
stops. If there are any problems starting the reactor or
connect()-ing, we stop the reactor and raise the exception
back to the caller.
:returns: None is returned, unless you specify
``start_reactor=False`` in which case the Deferred that
connect() returns is returned; this will callback() with
an IProtocol instance, which will actually be an instance
of :class:`WampWebSocketClientProtocol`
"""
if start_reactor:
# only select framework, set loop and start logging when we are asked
# start the reactor - otherwise we are running in a program that likely
# already tool care of all this.
from twisted.internet import reactor
txaio.use_twisted()
txaio.config.loop = reactor
txaio.start_logging(level='info')
isSecure, host, port, resource, path, params = parse_url(self.url)
# factory for use ApplicationSession
def create():
cfg = ComponentConfig(self.realm, self.extra)
try:
session = make(cfg)
except Exception as e:
if start_reactor:
# the app component could not be created .. fatal
self.log.error("{err}", err=e)
reactor.stop()
else:
# if we didn't start the reactor, it's up to the
# caller to deal with errors
raise
else:
return session
# create a WAMP-over-WebSocket transport client factory
transport_factory = WampWebSocketClientFactory(create, url=self.url, serializers=self.serializers, proxy=self.proxy)
# supress pointless log noise like
# "Starting factory <autobahn.twisted.websocket.WampWebSocketClientFactory object at 0x2b737b480e10>""
transport_factory.noisy = False
# if user passed ssl= but isn't using isSecure, we'll never
# use the ssl argument which makes no sense.
context_factory = None
if self.ssl is not None:
if not isSecure:
raise RuntimeError(
'ssl= argument value passed to %s conflicts with the "ws:" '
'prefix of the url argument. Did you mean to use "wss:"?' %
self.__class__.__name__)
context_factory = self.ssl
elif isSecure:
from twisted.internet.ssl import optionsForClientTLS
context_factory = optionsForClientTLS(host)
from twisted.internet import reactor
if self.proxy is not None:
from twisted.internet.endpoints import TCP4ClientEndpoint
client = TCP4ClientEndpoint(reactor, self.proxy['host'], self.proxy['port'])
transport_factory.contextFactory = context_factory
elif isSecure:
from twisted.internet.endpoints import SSL4ClientEndpoint
assert context_factory is not None
client = SSL4ClientEndpoint(reactor, host, port, context_factory)
else:
from twisted.internet.endpoints import TCP4ClientEndpoint
client = TCP4ClientEndpoint(reactor, host, port)
d = client.connect(transport_factory)
# as the reactor shuts down, we wish to wait until we've sent
# out our "Goodbye" message; leave() returns a Deferred that
# fires when the transport gets to STATE_CLOSED
def cleanup(proto):
if hasattr(proto, '_session') and proto._session is not None:
if proto._session.is_attached():
return proto._session.leave()
elif proto._session.is_connected():
return proto._session.disconnect()
# when our proto was created and connected, make sure it's cleaned
# up properly later on when the reactor shuts down for whatever reason
def init_proto(proto):
reactor.addSystemEventTrigger('before', 'shutdown', cleanup, proto)
return proto
# if we connect successfully, the arg is a WampWebSocketClientProtocol
d.addCallback(init_proto)
# if the user didn't ask us to start the reactor, then they
# get to deal with any connect errors themselves.
if start_reactor:
# if an error happens in the connect(), we save the underlying
# exception so that after the event-loop exits we can re-raise
# it to the caller.
class ErrorCollector(object):
exception = None
def __call__(self, failure):
self.exception = failure.value
reactor.stop()
connect_error = ErrorCollector()
d.addErrback(connect_error)
# now enter the Twisted reactor loop
reactor.run()
# if we exited due to a connection error, raise that to the
# caller
if connect_error.exception:
raise connect_error.exception
else:
# let the caller handle any errors
return d
class _ApplicationSession(ApplicationSession):
"""
WAMP application session class used internally with :class:`autobahn.twisted.app.Application`.
"""
def __init__(self, config, app):
"""
:param config: The component configuration.
:type config: Instance of :class:`autobahn.wamp.types.ComponentConfig`
:param app: The application this session is for.
:type app: Instance of :class:`autobahn.twisted.wamp.Application`.
"""
# noinspection PyArgumentList
ApplicationSession.__init__(self, config)
self.app = app
@inlineCallbacks
def onConnect(self):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.onConnect`
"""
yield self.app._fire_signal('onconnect')
self.join(self.config.realm)
@inlineCallbacks
def onJoin(self, details):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.onJoin`
"""
for uri, proc in self.app._procs:
yield self.register(proc, uri)
for uri, handler in self.app._handlers:
yield self.subscribe(handler, uri)
yield self.app._fire_signal('onjoined')
@inlineCallbacks
def onLeave(self, details):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.onLeave`
"""
yield self.app._fire_signal('onleave')
self.disconnect()
@inlineCallbacks
def onDisconnect(self):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.onDisconnect`
"""
yield self.app._fire_signal('ondisconnect')
class Application(object):
"""
A WAMP application. The application object provides a simple way of
creating, debugging and running WAMP application components.
"""
log = txaio.make_logger()
def __init__(self, prefix=None):
"""
:param prefix: The application URI prefix to use for procedures and topics,
e.g. ``"com.example.myapp"``.
:type prefix: unicode
"""
self._prefix = prefix
# procedures to be registered once the app session has joined the router/realm
self._procs = []
# event handler to be subscribed once the app session has joined the router/realm
self._handlers = []
# app lifecycle signal handlers
self._signals = {}
# once an app session is connected, this will be here
self.session = None
def __call__(self, config):
"""
Factory creating a WAMP application session for the application.
:param config: Component configuration.
:type config: Instance of :class:`autobahn.wamp.types.ComponentConfig`
:returns: obj -- An object that derives of
:class:`autobahn.twisted.wamp.ApplicationSession`
"""
assert(self.session is None)
self.session = _ApplicationSession(config, self)
return self.session
def run(self, url=u"ws://localhost:8080/ws", realm=u"realm1", start_reactor=True):
"""
Run the application.
:param url: The URL of the WAMP router to connect to.
:type url: unicode
:param realm: The realm on the WAMP router to join.
:type realm: unicode
"""
runner = ApplicationRunner(url, realm)
return runner.run(self.__call__, start_reactor)
def register(self, uri=None):
"""
Decorator exposing a function as a remote callable procedure.
The first argument of the decorator should be the URI of the procedure
to register under.
:Example:
.. code-block:: python
@app.register('com.myapp.add2')
def add2(a, b):
return a + b
Above function can then be called remotely over WAMP using the URI `com.myapp.add2`
the function was registered under.
If no URI is given, the URI is constructed from the application URI prefix
and the Python function name.
:Example:
.. code-block:: python
app = Application('com.myapp')
# implicit URI will be 'com.myapp.add2'
@app.register()
def add2(a, b):
return a + b
If the function `yields` (is a co-routine), the `@inlineCallbacks` decorator
will be applied automatically to it. In that case, if you wish to return something,
you should use `returnValue`:
:Example:
.. code-block:: python
from twisted.internet.defer import returnValue
@app.register('com.myapp.add2')
def add2(a, b):
res = yield stuff(a, b)
returnValue(res)
:param uri: The URI of the procedure to register under.
:type uri: unicode
"""
def decorator(func):
if uri:
_uri = uri
else:
assert(self._prefix is not None)
_uri = "{0}.{1}".format(self._prefix, func.__name__)
if inspect.isgeneratorfunction(func):
func = inlineCallbacks(func)
self._procs.append((_uri, func))
return func
return decorator
def subscribe(self, uri=None):
"""
Decorator attaching a function as an event handler.
The first argument of the decorator should be the URI of the topic
to subscribe to. If no URI is given, the URI is constructed from
the application URI prefix and the Python function name.
If the function yield, it will be assumed that it's an asynchronous
process and inlineCallbacks will be applied to it.
:Example:
.. code-block:: python
@app.subscribe('com.myapp.topic1')
def onevent1(x, y):
print("got event on topic1", x, y)
:param uri: The URI of the topic to subscribe to.
:type uri: unicode
"""
def decorator(func):
if uri:
_uri = uri
else:
assert(self._prefix is not None)
_uri = "{0}.{1}".format(self._prefix, func.__name__)
if inspect.isgeneratorfunction(func):
func = inlineCallbacks(func)
self._handlers.append((_uri, func))
return func
return decorator
def signal(self, name):
"""
Decorator attaching a function as handler for application signals.
Signals are local events triggered internally and exposed to the
developer to be able to react to the application lifecycle.
If the function yield, it will be assumed that it's an asynchronous
coroutine and inlineCallbacks will be applied to it.
Current signals :
- `onjoined`: Triggered after the application session has joined the
realm on the router and registered/subscribed all procedures
and event handlers that were setup via decorators.
- `onleave`: Triggered when the application session leaves the realm.
.. code-block:: python
@app.signal('onjoined')
def _():
# do after the app has join a realm
:param name: The name of the signal to watch.
:type name: unicode
"""
def decorator(func):
if inspect.isgeneratorfunction(func):
func = inlineCallbacks(func)
self._signals.setdefault(name, []).append(func)
return func
return decorator
@inlineCallbacks
def _fire_signal(self, name, *args, **kwargs):
"""
Utility method to call all signal handlers for a given signal.
:param name: The signal name.
:type name: str
"""
for handler in self._signals.get(name, []):
try:
# FIXME: what if the signal handler is not a coroutine?
# Why run signal handlers synchronously?
yield handler(*args, **kwargs)
except Exception as e:
# FIXME
self.log.info("Warning: exception in signal handler swallowed: {err}", err=e)
if service:
# Don't define it if Twisted's service support isn't here
class Service(service.MultiService):
"""
A WAMP application as a twisted service.
The application object provides a simple way of creating, debugging and running WAMP application
components inside a traditional twisted application
This manages application lifecycle of the wamp connection using startService and stopService
Using services also allows to create integration tests that properly terminates their connections
It can host a WAMP application component in a WAMP-over-WebSocket client
connecting to a WAMP router.
"""
factory = WampWebSocketClientFactory
def __init__(self, url, realm, make, extra=None, context_factory=None):
"""
:param url: The WebSocket URL of the WAMP router to connect to (e.g. `ws://somehost.com:8090/somepath`)
:type url: unicode
:param realm: The WAMP realm to join the application session to.
:type realm: unicode
:param make: A factory that produces instances of :class:`autobahn.asyncio.wamp.ApplicationSession`
when called with an instance of :class:`autobahn.wamp.types.ComponentConfig`.
:type make: callable
:param extra: Optional extra configuration to forward to the application component.
:type extra: dict
:param context_factory: optional, only for secure connections. Passed as contextFactory to
the ``listenSSL()`` call; see https://twistedmatrix.com/documents/current/api/twisted.internet.interfaces.IReactorSSL.connectSSL.html
:type context_factory: twisted.internet.ssl.ClientContextFactory or None
You can replace the attribute factory in order to change connectionLost or connectionFailed behaviour.
The factory attribute must return a WampWebSocketClientFactory object
"""
self.url = url
self.realm = realm
self.extra = extra or dict()
self.make = make
self.context_factory = context_factory
service.MultiService.__init__(self)
self.setupService()
def setupService(self):
"""
Setup the application component.
"""
is_secure, host, port, resource, path, params = parse_url(self.url)
# factory for use ApplicationSession
def create():
cfg = ComponentConfig(self.realm, self.extra)
session = self.make(cfg)
return session
# create a WAMP-over-WebSocket transport client factory
transport_factory = self.factory(create, url=self.url)
# setup the client from a Twisted endpoint
if is_secure:
from twisted.application.internet import SSLClient
ctx = self.context_factory
if ctx is None:
from twisted.internet.ssl import optionsForClientTLS
ctx = optionsForClientTLS(host)
client = SSLClient(host, port, transport_factory, contextFactory=ctx)
else:
if self.context_factory is not None:
raise Exception("context_factory specified on non-secure URI")
from twisted.application.internet import TCPClient
client = TCPClient(host, port, transport_factory)
client.setServiceParent(self)
# new API
class Session(ApplicationSession):
def onJoin(self, details):
return self.on_join(details)
def onLeave(self, details):
return self.on_leave(details)
def onDisconnect(self):
return self.on_disconnect()
def on_join(self):
pass
def on_leave(self, details):
self.disconnect()
def on_disconnect(self):
pass
| python |
#!/usr/bin/env python3
"""
Created on Fri Sep 20 12:37:07 2019
@author: mikhail-matrosov
"""
from pycoercer.basic_validator import BasicValidator
class Options():
def __init__(self,
allow_unknown=True,
purge_unknown=False,
require_all=False,
break_loops=True,
load_as_jsonschema=False,
validate_schemas=True,
**_):
self.allow_unknown = allow_unknown
self.purge_unknown = purge_unknown
self.require_all = require_all
self.break_loops = break_loops # Makes Phimera ~10-15% slower
self.load_as_jsonschema = load_as_jsonschema
self.validate_schemas = validate_schemas
def __eq__(self, other):
return self.__dict__ == other.__dict__
def replace(self, **kwargs):
'''Returns a new instance'''
data = self.__dict__.copy()
data.update(kwargs)
return Options(**data)
class Validator(BasicValidator):
def __init__(self, schemas: dict = None, options=None, **kwargs):
super().__init__()
self.registry = {}
self.options = (options or Options()).replace(**kwargs)
if schemas:
self.update(schemas)
def __getitem__(self, k):
return self.registry[k]
def __setitem__(self, key, schema: dict):
self.update({key: schema})
def update(self, schemas: dict, options=None, **kwargs):
options = (options or self.options).replace(**kwargs)
self.options, options_backup = options, self.options
if options.load_as_jsonschema:
schemas = {k: {'type': 'dict', 'schema': v}
for k, v in schemas.items()}
# Validate input schemas
if options.validate_schemas:
schemas, err = pycoercer_schema_validator(schemas)
if err:
raise ValueError(err)
self._schemas.update(schemas)
# Code generation
self.registry.update({
name: self.generate_function(schema, options, name)
for name, schema in schemas.items()
})
# Validate examples
try:
if options.validate_schemas:
self._test_examples()
finally: # even if exception
self._positive_examples.clear()
self._negative_examples.clear()
self.options = options_backup
pycoercer_schema = {
'str': {'type': 'str'},
'int': {'type': 'int'},
'bool': {'type': 'bool'},
'rules': {
'type': 'dict',
'items': {
'title': None,
'description': None,
'examples': {'type': 'list'},
'negative_examples': {'type': 'list'},
'allow_unknown': 'bool',
'purge_unknown': 'bool',
'rename': 'str',
'synonyms': {'type': 'list'},
'required': 'bool',
'require_all': 'bool',
'nullable': 'bool',
'if_null': {},
'default': {},
'type': {
'nullable': True,
'type': 'str',
'map': {
'object': 'dict',
'array': 'list',
'string': 'str',
'integer': 'int',
'boolean': 'bool',
'None': None,
'null': None
},
'enum': ['dict', 'list', 'str', 'int', 'float', 'number',
'bool']
},
'coerce': 'str',
'map': {
'type': 'dict',
'allow_unknown': True
},
'enum': {
'type': 'list',
'synonyms': ['allowed']
},
'regex': {
'type': 'str',
'synonyms': ['pattern']
},
'items': {
'type': 'dict', # TODO: list notation for lists
'values': 'obj',
'synonyms': ['schema', 'properties']
},
'rules': {'type': 'str'},
'keys': {
'rules': 'obj',
'synonyms': ['keysrules']
},
'values': {
'rules': 'obj',
'synonyms': ['valuesrules']
},
'min': {},
'max': {},
'min_len': {
'type': 'int',
'synonyms': ['minLength', 'minlength']
},
'max_len': {
'type': 'int',
'synonyms': ['maxLength', 'maxlength']
},
'one_of': {
'type': 'list',
'values': 'obj',
'synonyms': ['oneOf', 'oneof']
},
'any_of': {
'type': 'list',
'values': 'obj',
'synonyms': ['anyOf', 'anyof']
},
'post_coerce': 'str'
# todo: if_invalid
}
},
'obj': {
'any_of': [
{'type': None},
'str',
'rules'
]
},
'obj_dict': {
'type': 'dict',
'values': 'obj'
}
}
_pcsv = Validator(
pycoercer_schema,
allow_unknown=False,
purge_unknown=False,
require_all=False,
break_loops=True,
load_as_jsonschema=False,
validate_schemas=False)
pycoercer_schema_validator = _pcsv['obj_dict']
| python |
""" owns all PlaybackController AVS namespace interaction
https://developer.amazon.com/public/solutions/alexa/alexa-voice-service/reference/playbackcontroller
"""
from __future__ import unicode_literals
class PlaybackController(object):
""" owns all PlaybackController AVS namespace interaction """
def __init__(self, connection):
self._connection = connection
def play_command_issued(self):
""" notifies AVS that user started/resumed playback """
header = {'namespace': 'PlaybackController',
'name': 'PlayCommandIssued'}
self._connection.send_event(header, include_state=True)
def pause_command_issued(self):
""" notifies AVS that user paused playback """
header = {'namespace': 'PlaybackController',
'name': 'PauseCommandIssued'}
self._connection.send_event(header, include_state=True)
def next_command_issued(self):
""" notifies AVS that user skips to next track """
header = {'namespace': 'PlaybackController',
'name': 'NextCommandIssued'}
self._connection.send_event(header, include_state=True)
def previous_command_issued(self):
""" notifies AVS that user skips to previous track """
header = {'namespace': 'PlaybackController',
'name': 'PreviousCommandIssued'}
self._connection.send_event(header, include_state=True)
| python |
import logging
import json
import jsonpickle
from tqdm.autonotebook import tqdm
from seml.database import get_collection
from seml.settings import SETTINGS
States = SETTINGS.STATES
__all__ = ['get_results']
def parse_jsonpickle(db_entry):
import jsonpickle.ext.numpy as jsonpickle_numpy
jsonpickle_numpy.register_handlers()
try:
p = jsonpickle.pickler.Pickler(keys=False)
parsed = jsonpickle.loads(json.dumps(db_entry, default=p.flatten), keys=False)
except IndexError:
parsed = db_entry
return parsed
def get_results(db_collection_name, fields=None,
to_data_frame=False, mongodb_config=None,
states=None, filter_dict=None, parallel=False):
"""
Get experiment results from the MongoDB.
Parameters
----------
db_collection_name: str
Name of the MongoDB collection.
fields: list (optional).
Database attributes to extract. Default: ['config', 'result'].
to_data_frame: bool, default: False
Whether to convert the results into a Pandas DataFrame.
mongodb_config: dict (optional)
MongoDB credential dictionary. If None, uses the credentials specified by `seml configure`.
states: list of strings (optional)
Extract only experiments with certain states. Default: ['COMPLETED'].
filter_dict: dict (optional)
Custom dictionary for filtering results from the MongoDB.
parallel: bool, default: False
If True, unserialize entries in parallel. Use for very large experiment collections.
Returns
-------
"""
import pandas as pd
if fields is None:
fields = ['config', 'result']
if states is None:
states = States.COMPLETED
if filter_dict is None:
filter_dict = {}
collection = get_collection(db_collection_name, mongodb_config=mongodb_config,)
if len(states) > 0:
if 'status' in filter_dict:
logging.warning("'states' argument is not empty and will overwrite 'filter_dict['status']'.")
filter_dict['status'] = {'$in': states}
cursor = collection.find(filter_dict, fields)
results = [x for x in tqdm(cursor, total=collection.count_documents(filter_dict))]
if parallel:
from multiprocessing import Pool
with Pool() as p:
parsed = list(tqdm(p.imap(parse_jsonpickle, results),
total=len(results)))
else:
parsed = [parse_jsonpickle(entry) for entry in tqdm(results)]
if to_data_frame:
parsed = pd.io.json.json_normalize(parsed, sep='.')
return parsed
| python |
from airflow.decorators import dag
from airflow.providers.airbyte.operators.airbyte import AirbyteTriggerSyncOperator
from airflow.providers.airbyte.sensors.airbyte import AirbyteJobSensor
from airflow.utils.dates import days_ago
@dag(start_date=days_ago(1), schedule_interval=None, tags=["example"])
def airbyte():
"""Define an example Airbyte DAG which triggers an Airbyte sync operation."""
async_source_destination = AirbyteTriggerSyncOperator(
task_id="airbyte_trigger_async",
connection_id="{{ var.value.AIRBYTE_CONNECTION_ID }}",
asynchronous=True,
)
AirbyteJobSensor(
task_id="airbyte_job_sensor",
airbyte_job_id=async_source_destination.output,
)
dag = airbyte()
| python |
class Solution:
def partitionLabels(self, S):
"""
:type S: str
:rtype: List[int]
"""
idxes = dict(zip(S, range(len(S))))
ans, left, right = [], 0, 0
for i, ch in enumerate(S):
right = max(right, idxes[ch])
if right == i:
ans.append(right - left + 1)
left = right = i + 1
return ans | python |
from typing import AnyStr
from typing import Union
from typing import Type
from nezzle.graphics.edges.baseedge import BaseEdge
from nezzle.graphics.edges.edgefactory import EdgeClassFactory
class EdgeConverter(object):
@staticmethod
def convert(edge: BaseEdge, edge_type: Union[Type, AnyStr]):
if isinstance(edge_type, str):
edge_type = EdgeClassFactory.create(edge_type)
if type(edge) == edge_type:
return
attr = edge.to_dict()
attr["ITEM_TYPE"] = edge_type.ITEM_TYPE
new_edge = edge_type.from_dict(attr=attr, source=edge.source, target=edge.target)
return new_edge
| python |
# -*- coding: utf-8 -*-
import re
import os
class Config:
src = 'src/WS101.md'
dest = 'WS101.md'
pattern = '{{import\((.+)\)}}'
def import_resource(match):
if not match:
return ''
path = match.groups()[0]
return ('# file: ' + path + '\n' +
'# ' + ('-' * (6 + len(path))) + '\n\n' +
open(path).read())
def main():
raw = open(Config.src).read()
build = re.sub(Config.pattern, import_resource, raw)
open(Config.dest, 'w').write(build)
# required for git pre-commit hook
print(Config.dest)
if __name__ == '__main__':
main()
| python |
"""Public API for yq"""
load("//lib/private:yq.bzl", _is_split_operation = "is_split_operation", _yq_lib = "yq_lib")
_yq_rule = rule(
attrs = _yq_lib.attrs,
implementation = _yq_lib.implementation,
toolchains = ["@aspect_bazel_lib//lib:yq_toolchain_type"],
)
def yq(name, srcs, expression = ".", args = [], outs = None, **kwargs):
"""Invoke yq with an expression on a set of input files.
For yq documentation, see https://mikefarah.gitbook.io/yq.
To use this rule you must register the yq toolchain in your WORKSPACE:
```starlark
load("@aspect_bazel_lib//lib:repositories.bzl", "register_yq_toolchains")
register_yq_toolchains(version = "4.24.5")
```
Usage examples:
```starlark
load("@aspect_bazel_lib//lib:yq.bzl", "yq")
```
```starlark
# Remove fields
yq(
name = "safe-config",
srcs = ["config.yaml"],
expression = "del(.credentials)",
)
```
```starlark
# Merge two yaml documents
yq(
name = "ab",
srcs = [
"a.yaml",
"b.yaml",
],
expression = ". as $item ireduce ({}; . * $item )",
)
```
```starlark
# Split a yaml file into several files
yq(
name = "split",
srcs = ["multidoc.yaml"],
outs = [
"first.yml",
"second.yml",
],
args = [
"-s '.a'", # Split expression
"--no-doc", # Exclude document separator --
],
)
```
```starlark
# Convert a yaml file to json
yq(
name = "convert-to-json",
srcs = ["foo.yaml"],
args = ["-o=json"],
outs = ["foo.json"],
)
```
```starlark
# Convert a json file to yaml
yq(
name = "convert-to-yaml",
srcs = ["bar.json"],
args = ["-P"],
outs = ["bar.yaml"],
)
```
```starlark
# Call yq in a genrule
genrule(
name = "generate",
srcs = ["farm.yaml"],
outs = ["genrule_output.yaml"],
cmd = "$(YQ_BIN) '.moo = \"cow\"' $(location farm.yaml) > $@",
toolchains = ["@yq_toolchains//:resolved_toolchain"],
)
```
yq is capable of parsing and outputting to other formats. See their [docs](https://mikefarah.gitbook.io/yq) for more examples.
Args:
name: Name of the rule
srcs: List of input file labels
expression: yq expression (https://mikefarah.gitbook.io/yq/commands/evaluate). Defaults to the identity
expression "."
args: Additional args to pass to yq. Note that you do not need to pass _eval_ or _eval-all_ as this
is handled automatically based on the number `srcs`. Passing the output format or the parse format
is optional as these can be guessed based on the file extensions in `srcs` and `outs`.
outs: Name of the output files. Defaults to a single output with the name plus a ".yaml" extension, or
the extension corresponding to a passed output argment (e.g., "-o=json"). For split operations you
must declare all outputs as the name of the output files depends on the expression.
**kwargs: Other common named parameters such as `tags` or `visibility`
"""
args = args[:]
if not _is_split_operation(args):
# For split operations we can't predeclare outs because the name of the resulting files
# depends on the expression. For non-split operations, set a default output file name
# based on the name and the output format passed, defaulting to yaml.
if not outs:
outs = [name + ".yaml"]
if "-o=json" in args or "--outputformat=json" in args:
outs = [name + ".json"]
if "-o=xml" in args or "--outputformat=xml" in args:
outs = [name + ".xml"]
elif "-o=props" in args or "--outputformat=props" in args:
outs = [name + ".properties"]
elif "-o=c" in args or "--outputformat=csv" in args:
outs = [name + ".csv"]
elif "-o=t" in args or "--outputformat=tsv" in args:
outs = [name + ".tsv"]
elif outs and len(outs) == 1:
# If an output file with an extension was provided, try to set the corresponding output
# argument if it wasn't already passed.
if outs[0].endswith(".json") and "-o=json" not in args and "--outputformat=json" not in args:
args.append("-o=json")
elif outs[0].endswith(".xml") and "-o=xml" not in args and "--outputformat=xml" not in args:
args.append("-o=xml")
elif outs[0].endswith(".properties") and "-o=props" not in args and "--outputformat=props" not in args:
args.append("-o=props")
elif outs[0].endswith(".csv") and "-o=c" not in args and "--outputformat=csv" not in args:
args.append("-o=c")
elif outs[0].endswith(".tsv") and "-o=t" not in args and "--outputformat=tsv" not in args:
args.append("-o=t")
# If the input files are json or xml, set the parse flag if it isn't already set
if len(srcs) > 0:
if srcs[0].endswith(".json") and "-P" not in args:
args.append("-P")
elif srcs[0].endswith(".xml") and "-p=xml" not in args:
args.append("-p=xml")
_yq_rule(
name = name,
srcs = srcs,
expression = expression,
args = args,
outs = outs,
**kwargs
)
| python |
"""The output package contains the various output modules."""
from pathlib import Path
from typing import Any, Optional, Tuple
from tunable import Selectable, Tunable
from ..simulation.simulator import World
ShapeType = Tuple[int, int]
def ensure_path(path: str) -> str:
"""
Ensures that the parent directory to the to path exists.
:param path: Path
:return: the path
"""
path = Path(path)
if not path.parent.is_dir():
path.parent.mkdir(parents=True, exist_ok=True)
return str(path)
def ensure_extension(path: str, extension: str) -> str:
"""
Ensures that the path ends with extension, possibly adding it.
:param path: Path
:param extension: Extension
:return: Final path
"""
path = Path(path)
if not isinstance(extension, list):
extension = [extension]
if not path.suffix or path.suffix and path.suffix not in extension:
path = path.parent / (path.name + extension[0])
path = str(path)
if OutputIndividualFilesWildcard.value in path:
path = path.replace(OutputIndividualFilesWildcard.value, "")
return path
def ensure_path_and_extension(path: str, extension: str) -> str:
"""
Ensures that the parent directory to path exists,
and it has extension, possibly by adding it.
:param path: Path
:param extension: Extension
:return: Final path
"""
ensure_path(path)
return ensure_extension(path, extension)
def ensure_number(path: str, number: int, disable_individual: bool = False) -> str:
"""
Depending on configuration, add a number to the path for consecutive output files.
:param path: Path
:param number: Number
:param disable_individual: Possibility to disable adding of a number
:return: Path with number
"""
if OutputIndividualFiles.value and not disable_individual and number != -1:
path = Path(path)
stem = path.stem
if OutputIndividualFilesWildcard.value not in stem:
stem += OutputIndividualFilesWildcard.value
digits = OutputIndividualFilesZeros.value
stem = stem.replace(OutputIndividualFilesWildcard.value, f"{number:0>{digits}}")
path = path.parent / (stem + path.suffix)
path = str(path)
return path
def ensure_path_and_extension_and_number(
path: str, extension: str, number: int, disable_individual: bool = False
) -> str:
"""
Ensures that a path exists, has an extension and a number.
:param path: Path
:param extension: Extension
:param number: Number
:param disable_individual: Whether to disable adding of number
:return: Final path
"""
path = ensure_number(path, number, disable_individual=disable_individual)
return ensure_path_and_extension(path, extension)
def check_overwrite(path: str, overwrite: bool = False) -> str:
"""
Check if a path exists, if so raising a RuntimeError if overwriting is disabled.
:param path: Path
:param overwrite: Whether to overwrite
:return: Path
"""
if Path(path).is_file() and not overwrite:
raise RuntimeError(
f"Requested existing {path!r} as output, but overwriting is disabled."
)
return path
class OutputIndividualFiles(Tunable):
"""Output individual files"""
default: bool = True
class OutputIndividualFilesZeros(Tunable):
"""Amount of digits used for outputting the frame number of individual file names"""
default: int = 3
class OutputIndividualFilesWildcard(Tunable):
"""Pattern for individual file names"""
default: str = '{}'
class OutputReproducibleFiles(Tunable):
"""Output files in a reproducible manner"""
default: bool = True
class Output(Selectable, Selectable.Multiple):
"""
Base class of the Output classes.
"""
def output(self, world: World, **kwargs) -> Optional[Any]:
"""
Outputs the World, this function is usually called by either write or display.
:param world: World
:param kwargs: Additional arguments
:return:
"""
pass
def write(self, world: World, file_name: str, **kwargs) -> None:
"""
Output and write the World to file_name.
:param world: World
:param file_name: Filename to write output to
:param kwargs: Additional arguments
:return:
"""
pass
def display(self, world: World, **kwargs) -> None:
"""
Output and display the World, e.g. via a GUI window.
:param world: World
:param kwargs: Additional arguments
:return:
"""
raise RuntimeError("Not implemented")
| python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
##
# Copyright (C) Benjamin D. McGinnes, 2013-2018
# [email protected]
# OpenPGP/GPG key: 0x321E4E2373590E5D
#
# Version: 0.1.2
#
# BTC: 1KvKMVnyYgLxU1HnLQmbWaMpDx3Dz15DVU
#
#
# Requirements:
#
# * Python 3.4 or later.
# * GPGME 1.10.0 or later with Python bindings.
#
# Options and notes:
#
# The config.py file must be customised prior to running either
# gen-auth.py or authinfo.py in order to set the correct path for the
# GPG configuration and adjust other settings.
#
# No longer requires PyCrypto, SimpleCrypt, python-gnupg or gconfig.py.
# Instead requires GPG and GPGME with Python bindings.
# Passphrase handled by gpg-agent.
#
# Python requirements raised due to GPGME requirements.
# May also work with Python 2.7, but untested.
#
##
from license import __author__
from license import __copyright__
from license import __copyrighta__
from license import __license__
from license import __bitcoin__
__version__ = "0.1.2"
import os
import os.path
import gpg
if os.path.exists("oauth.py.gpg") is True:
oauthy = "oauth.py.gpg"
elif os.path.exists("oauth.py.asc") is True:
oauthy = "oauth.py.asc"
else:
oauthy = None
if oauthy is not None:
with open(oauthy, "rb") as afile:
authdata = gpg.Context().decrypt(afile)
exec(authdata[0].decode("utf-8"))
else:
print("""
You must run gen-auth.py first.
""")
APP_KEY = oauth.APP_KEY
APP_SECRET = oauth.APP_SECRET
OAUTH_TOKEN = oauth.OAUTH_TOKEN
OAUTH_TOKEN_SECRET = oauth.OAUTH_TOKEN_SECRET
| python |
# temp.py
import os
import time
import RPi.GPIO as GPIO
import Adafruit_DHT as dht
sensor = dht.DHT11
temp_pin =4
red= 17
green= 27
GPIO.setmode(GPIO.BCM)
GPIO.setup(green, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(red,GPIO.OUT,initial=GPIO.LOW)
GPIO.setwarnings(False)
def printTemp():
h, t = dht.read_retry(sensor,temp_pin)
if h is not None and t is not None:
print("Temperature = {0:0.1f}*C Humidity = {1:0.1f}%".format(t, h))
else:
print('Read error')
printTemp()
GPIO.output(red,GPIO.HIGH)
GPIO.output(green,GPIO.HIGH)
print("on")
time.sleep(5)
GPIO.output(red, GPIO.LOW)
printTemp()
GPIO.output(green, GPIO.LOW)
print("off")
time.sleep(5)
| python |
import re
from datetime import datetime
from unittest.mock import patch
import pytest
from requests import Response
from requests.exceptions import RequestException
from http_nudger.monitor import url_check
URL = "https://google.com"
@pytest.fixture
def http_response():
resp = Response()
resp.status_code = 200
resp._content = b"ABC123"
return resp
@patch("requests.get")
def test_url_check(requests_get_mock, freezer, http_response):
now = datetime.utcnow()
requests_get_mock.return_value = http_response
url_status = url_check(URL, 5, None)
assert url_status.timestamp == now
assert url_status.url == URL
assert url_status.status_code == http_response.status_code
assert url_status.failure_reason is None
assert url_status.regexp is None
assert url_status.regexp_matched is False
requests_get_mock.side_effect = RequestException("Some reason")
url_status = url_check(URL, 5, None)
assert url_status.timestamp == now
assert url_status.url == URL
assert url_status.status_code == -1
assert url_status.failure_reason == "Some reason"
assert url_status.regexp is None
assert url_status.regexp_matched is False
@patch("requests.get")
def test_url_check_regexp_match(requests_get_mock, http_response):
regexp = re.compile("[0-9]+")
requests_get_mock.return_value = http_response
url_status = url_check(URL, 5, regexp)
assert url_status.regexp == regexp.pattern
assert url_status.regexp_matched is True
requests_get_mock.side_effect = RequestException("Some reason")
url_status = url_check(URL, 5, regexp)
assert url_status.regexp == regexp.pattern
assert url_status.regexp_matched is False
@patch("requests.get")
def test_url_check_regexp_not_match(requests_get_mock, http_response):
regexp = re.compile("DEF?")
requests_get_mock.return_value = http_response
url_status = url_check(URL, 5, regexp)
assert url_status.regexp == regexp.pattern
assert url_status.regexp_matched is False
requests_get_mock.side_effect = RequestException("Some reason")
url_status = url_check(URL, 5, regexp)
assert url_status.regexp == regexp.pattern
assert url_status.regexp_matched is False
| python |
import numpy as np
from itertools import combinations
from sklearn import gaussian_process
from from_fits import create_image_from_fits_file
from simulations import simulate
# First find best NCLEAN using cv_cc.py
# Plot covariance matrix of the residuals (not difmap, but, probably, AIPS?)
# Plot covariogramm, GP fit?
if False:
# Estimate correlation in image pixel values
# FIXME: Better use residuals image from difmap or AIPS
image_fits = '/home/ilya/code/vlbi_errors/vlbi_errors/residuals_3c273_15000.fits'
image = create_image_from_fits_file(image_fits)
slices = [slice(50 * i, 50 * (i+1)) for i in range(20)]
sigma2_list = list()
for slice1, slice2 in list(combinations(slices, 2))[:51]:
print "slices {} {}".format(slice1, slice2)
data = image.image[slice1, slice2]
X = list()
y = list()
for (i, j), val in np.ndenumerate(data):
X.append([i, j])
y.append(val)
Y = np.array(y).reshape(2500, 1)
gp = gaussian_process.GaussianProcess(thetaL=(0.01, 0.01),
thetaU=(100., 100.),
theta0=(1., 1.), nugget=0.0003**2,
storage_mode='full')
gpf = gp.fit(X, Y)
Y_pred = gpf.predict(X)
y_pred = Y_pred.reshape((50, 50))
fwhm = 2.355 * gpf.theta_
print "FWHM {}".format(fwhm)
# GP variance
sigma2 = gpf.sigma2
print "GP std {}".format(np.sqrt(sigma2))
sigma2_list.append((slice1, slice2, gpf.theta_))
# Simulate gradient of RM on MOJAVE frequencies. Get "observed" data & model
# images & model data (w/o noise)
from mojave import get_epochs_for_source
path_to_script = '/home/ilya/code/vlbi_errors/difmap/final_clean_nw'
base_dir = '/home/ilya/vlbi_errors/article'
# sources = ['1514-241', '1302-102', '0754+100', '0055+300', '0804+499',
# '1749+701', '0454+844']
mapsize_dict = {'x': (512, 0.1), 'y': (512, 0.1), 'j': (512, 0.1),
'u': (512, 0.1)}
mapsize_common = (512, 0.1)
source = '0454+844'
epoch = '2006_03_09'
max_jet_flux = 0.0015
epochs = get_epochs_for_source(source, use_db='multifreq')
simulate(source, epoch, ['x', 'y', 'j', 'u'],
n_sample=3, max_jet_flux=max_jet_flux, rotm_clim_sym=[-300, 300],
rotm_clim_model=[-300, 300],
path_to_script=path_to_script, mapsize_dict=mapsize_dict,
mapsize_common=mapsize_common, base_dir=base_dir,
rotm_value_0=0., rotm_grad_value=0., n_rms=2.,
download_mojave=False, spix_clim_sym=[-1.5, 1],
spix_clim_model=[-1.5, 1], qu_fraction=0.3)
| python |
def app_data_preparation(file_list, lock_period, impute):
'''
recieves file list of data file names/paths in a certain order:
1) icp das
2) metering devices
3) SVO
4) VDNH
5) COVID
6) self-isolation index
lock_period - can be specified as tuple (start date, edn date)in case new lockdown is introduced
impute=True - NaN values will be imputed using KNN algorithm;
impute=False - NaN values will be dropped
'''
# data processing and analysis
import os
import pandas as pd
# module with information about holidays
import holidays
from app_processing import app_icp_preprocess, app_meter_preprocess
from app_processing import app_svo_preprocess, app_vdnh_preprocess
from app_processing import app_isolation_preprocessing, app_covid_preprocessing, app_imputing_data
# -------------------------------------------------DATA-LOAD--------------------------------------------------------
# icp das
icp_features_url = os.path.join(os.getcwd(), 'data', 'building_features.pickle')
# metering device
metering_features_url = os.path.join(os.getcwd(), 'data', 'meter_features.pickle')
# ---------------------------------------------FEATURE-SELECTION----------------------------------------------------
# relevant icp_das features
icp_das = app_icp_preprocess(file_list[0], icp_features_url)
# relevant metering devices features
meter_dev = app_meter_preprocess(file_list[1], metering_features_url)
# temperature, atmospheric pressure, cloudness
svo = app_svo_preprocess(file_list[2], ['T', 'U', 'c'])
# precipitation
vdnh = app_vdnh_preprocess(file_list[3])
# covid cases
cov = app_covid_preprocessing(file_list[4])
# isolation index
iso = app_isolation_preprocessing(file_list[5])
# ---------------------------------------------MERGING-DATASETS-----------------------------------------------------
def merge_data(*args):
'''
merging datasets
'''
data = args[0]
for i in range(1, len(args)):
data = data.merge(args[i], how='left', on='time')
return data
data = merge_data(icp_das, meter_dev, svo, vdnh, cov, iso)
data = data.set_index('time')
# ----------------------------------------------ADD-COVID-CASES-----------------------------------------------------
# populating daily values
data['covid_cases'] = data['covid_cases'].groupby(pd.Grouper(freq='D')).ffill()
data['isolation_idx'] = data['isolation_idx'].groupby(pd.Grouper(freq='D')).ffill()
# fill leaking values
data.loc[:'2020-03', 'covid_cases'] = data.loc[:'2020-03', 'covid_cases'].fillna(0)
data.loc[:'2020-03','isolation_idx'] = data.loc[:'2020-03', 'isolation_idx'].fillna(0)
# ----------------------------------------SPECIFY-WEEKDAYS-AND-MONTHS-----------------------------------------------
# add weekday
data['weekday'] = data.index.weekday
# add month
data['month'] = data.index.month
# add yearday
data['yearday'] = data.index.dayofyear
# add monthday
data['monthday'] = data.index.to_series().dt.day
# -----------------------------------------------ADD-HOLIDAYS-------------------------------------------------------
# add holidays
rus_holidays = holidays.Russia()
def holidays_selector(df, holidays_list):
res = []
for t in df.index:
if t in holidays_list:
res.append(1)
else:
res.append(0)
return pd.DataFrame({'time': df.index, 'holiday': res})
all_holidays = holidays_selector(data, rus_holidays)
# -----------------------------------------------ADD-LOCKDOWN-------------------------------------------------------
# set time of lockdown in Moscow
lockdown = pd.DataFrame(pd.date_range(start='2020-03-30 00:00',
end='2020-06-08 23:00', freq='H'), columns=['time'])
# set corresponding column to 1
lockdown['lockdown'] = 1
# in case of new lockdown
if lock_period is not None:
new_lockdown = pd.DataFrame(pd.date_range(start=lock_period[0],
end=lock_period[1], freq='H'), columns=['time'])
lockdown.append(new_lockdown)
# add lockdown periods
data = merge_data(data, all_holidays, lockdown).set_index('time')
# -----------------------------------------------FILL-NAs-----------------------------------------------------------
data['lockdown'] = data['lockdown'].fillna(0)
data['precipitation'] = data['precipitation'].fillna(0)
if impute:
# TODO: make user to decide which columns to impute
data = app_imputing_data(data)
return data
| python |
"""
Boronic Acid Factory
====================
"""
from ..functional_groups import BoronicAcid
from .functional_group_factory import FunctionalGroupFactory
from .utilities import _get_atom_ids
class BoronicAcidFactory(FunctionalGroupFactory):
"""
Creates :class:`.BoronicAcid` instances.
Creates functional groups from substructures, which match the
``[*][B]([O][H])[O][H]`` functional group string.
Examples
--------
*Creating Functional Groups with the Factory*
You want to create a building block which has :class:`.BoronicAcid`
functional groups. You want the boron atom in those functional
groups to be the *bonder* atom and the OH groups to be *deleter*
atoms.
.. testcode:: creating-functional-groups-with-the-factory
import stk
building_block = stk.BuildingBlock(
smiles='OB(O)CCCB(O)O',
functional_groups=(stk.BoronicAcidFactory(), ),
)
.. testcode:: creating-functional-groups-with-the-factory
:hide:
assert all(
isinstance(functional_group, stk.BoronicAcid)
for functional_group
in building_block.get_functional_groups()
)
assert building_block.get_num_functional_groups() == 2
*Changing the Bonder and Deleter Atoms*
You want to create a building block which has :class:`.BoronicAcid`
functional groups. You want the oxygen atoms to be treated as
*bonder* atoms, and the hydrogen atoms to be treated as *deleter*
atoms.
.. testcode:: changing-the-bonder-and-deleter-atoms
import stk
boronic_acid_factory = stk.BoronicAcidFactory(
# The indices of the oxygen atoms in the functional
# group string (see docstring) are 2 and 4.
bonders=(2, 4),
# The indices of the hydrogen atoms in the
# functional group string (see docstring) are 3 and 5.
deleters=(3, 5),
)
building_block = stk.BuildingBlock(
smiles='OB(O)CCCB(O)O',
functional_groups=(boronic_acid_factory, ),
)
.. testcode:: changing-the-bonder-and-deleter-atoms
:hide:
fg1, fg2 = building_block.get_functional_groups()
assert fg1.get_num_bonders() == 2
assert sum(1 for _ in fg1.get_deleters()) == 2
assert fg2.get_num_bonders() == 2
assert sum(1 for _ in fg2.get_deleters()) == 2
assert all(
isinstance(atom, stk.O)
for functional_group
in building_block.get_functional_groups()
for atom
in functional_group.get_bonders()
)
assert all(
isinstance(atom, stk.H)
for functional_group
in building_block.get_functional_groups()
for atom
in functional_group.get_deleters()
)
See Also
--------
:class:`.GenericFunctionalGroup`
Defines *bonders* and *deleters*.
"""
def __init__(
self,
bonders=(1, ),
deleters=(2, 3, 4, 5),
placers=None,
):
"""
Initialize a :class:`.BoronicAcidFactory` instance.
Parameters
----------
bonders : :class:`tuple` of :class:`int`
The indices of atoms in the functional group string, which
are *bonder* atoms.
deleters : :class:`tuple` of :class:`int`
The indices of atoms in the functional group string, which
are *deleter* atoms.
placers : :class:`tuple` of :class:`int`, optional
The indices of atoms in the functional group string, which
are *placer* atoms. If ``None``, `bonders` will be used.
"""
self._bonders = bonders
self._deleters = deleters
self._placers = bonders if placers is None else placers
def get_functional_groups(self, molecule):
ids = _get_atom_ids('[*][B]([O][H])[O][H]', molecule)
for atom_ids in ids:
atoms = tuple(molecule.get_atoms(atom_ids))
yield BoronicAcid(
boron=atoms[1],
oxygen1=atoms[2],
hydrogen1=atoms[3],
oxygen2=atoms[4],
hydrogen2=atoms[5],
atom=atoms[0],
bonders=tuple(atoms[i] for i in self._bonders),
deleters=tuple(atoms[i] for i in self._deleters),
placers=tuple(atoms[i] for i in self._placers),
)
| python |
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 2 10:36:23 2019
@author: Bahman
"""
import csv
import math
import numpy as np
import random
from matplotlib import pyplot as plt
def readMyCSVData(fileName):
with open(fileName, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
data = []
label = []
for row in reader:
data.append([float(row[0]), float(row[2]), float(row[4]), float(row[10]), float(row[11]), float(row[12])])
if len(row) == 15:
if row[14] == ' <=50K':
label.append(-1)
elif row[14] == ' >50K':
label.append(+1)
else:
print("Data Error!!")
csvfile.close()
return data, label
def average(listNumbers):
return sum(listNumbers)/float(len(listNumbers))
def standarDeviation(listNumbers):
avgerage = average(listNumbers)
return math.sqrt(sum([pow(x-avgerage,2) for x in listNumbers])/float(len(listNumbers)-1))
def dataStandardization(data):
print("Scaling the variables:", end="")
normalParameters = [(average(feature), standarDeviation(feature)) for feature in zip(*data)]
for row in data:
for i in range(len(row)):
row[i] = (row[i] - normalParameters[i][0]) / normalParameters[i][1]
print("...OK")
def splitDataTrainTest(dataX, dataY, percentage):
dataLen = len(dataX)
testLen = round(percentage * dataLen)
trainX = dataX.copy()
trainY = dataY.copy()
testX = []
testY = []
for k in range(testLen):
i = random.randrange(len(trainX))
testX.append(trainX[i])
testY.append(trainY[i])
trainX.pop(i)
trainY.pop(i)
return trainX, trainY, testX, testY
def predictBySVM(a, b, data):
results = []
for xV in data:
value = np.dot(xV, a) + b
if value > 0.0:
results.append(+1)
else:
results.append(-1)
return results
def accuracy(predictedData, testData):
correct = 0
for i in range(len(testData)):
if testData[i] == predictedData[i]:
correct += 1
return correct/float(len(testData))
def vectorMagnitude(data):
return math.sqrt(sum([i ** 2 for i in data]))
#//////Main
originalTrainX, originalTrainY = readMyCSVData('train.txt')
originalTestX, originalTestY = readMyCSVData('test.txt')
print("Training data read: ", len(originalTrainX))
print("Testing data read: ", len(originalTestX))
dataStandardization(originalTrainX)
dataStandardization(originalTestX)
regularizations = [1e-5, 5e-5, 1e-4, 5e-4, 1e-3, 5e-3, 1e-2, 1e-1, 1]
seasons = 1000
kStep = 30
steps = 4000
random.uniform(0, 1)
a = [random.uniform(0, 1) for _ in range(len(originalTrainX[0]))]
b = random.uniform(0, 1)
trainX, trainY, testX, testY = splitDataTrainTest(originalTrainX, originalTrainY, 0.1)
dicAccuracylanda = {}
dicCofALanda = {}
dicCofBLanda = {}
dicCofAllLanda = {}
for landa in regularizations:
accuracySeason = {}
coefficientASeason = {}
coefficientBSeason = {}
coefficientMagnitudeSeason = {}
for season in range(seasons):
stepLength = 1.0 / (0.1 * season + 100) #etaa
seasonTrainX, seasonTrainY, heldOutvalidationX, heldOutvalidationY = splitDataTrainTest(trainX, trainY, 0.1)
for step in range(steps):
k = random.randrange(len(trainX)) #Nb = 1 #number of batch items
if trainY[k]*(np.dot(trainX[k], a) + b) >= 1:
for feature in range(len(trainX[k])):
a[feature] = a[feature] - stepLength * landa * a[feature]
else:
for feature in range(len(trainX[k])):
a[feature] = a[feature] - stepLength * (landa * a[feature] - trainY[k] * trainX[k][feature])
b = b + stepLength * trainY[k]
if step % kStep == 0:
accuracyS = accuracy(predictBySVM(a, b, heldOutvalidationX), heldOutvalidationY)
accuracySeason[step] = accuracyS
magnitudeA = vectorMagnitude(a)
coefficientASeason[step] = magnitudeA
coefficientBSeason[step] = b
coefficientMagnitudeSeason[step] = math.sqrt(magnitudeA*magnitudeA + b*b)
dicAccuracylanda[landa] = accuracySeason
dicCofALanda[landa] = coefficientASeason
dicCofBLanda[landa] = coefficientBSeason
dicCofAllLanda[landa] = coefficientMagnitudeSeason
#select the best landa
bestLanda = -0.1
maxAccuracy = 0.0
for landa in dicAccuracylanda:
items = (sorted(dicAccuracylanda[landa]))
accuracy = dicAccuracylanda[landa][items[-1]]
if accuracy > maxAccuracy:
maxAccuracy = accuracy
bestLanda = landa
#Cof a and b with the best landa
for season in range(seasons):
stepLength = 1.0 / (0.1 * season + 100) #etaa
for step in range(steps):
k = random.randrange(len(originalTrainX)) #Nb = 1 #number of batch items
if originalTrainY[k]*(np.dot(originalTrainX[k], a) + b) >= 1:
for feature in range(len(originalTrainX[k])):
a[feature] = a[feature] - stepLength * bestLanda * a[feature]
else:
for feature in range(len(originalTrainX[k])):
a[feature] = a[feature] - stepLength * (bestLanda * a[feature] - originalTrainY[k] * originalTrainX[k][feature])
b = b + stepLength * originalTrainY[k]
print("Cof. a = ", a)
print("Cof. b = ", b)
for item in sorted(dicAccuracylanda):
lists = sorted(dicAccuracylanda[item].items())
x, y = zip(*lists)
plt.plot(x, y, label = "landa = " + str(item))
plt.legend(loc='upper left')
plt.xlabel('Season Step')
plt.ylabel('Accuracy')
plt.show()
for item in sorted(dicCofAllLanda):
lists = sorted(dicCofAllLanda[item].items())
x, y = zip(*lists)
plt.plot(x, y, label = "landa = " + str(item))
plt.legend(loc='upper left')
plt.xlabel('Season Step')
plt.ylabel('Magnitude of Cof. Vector')
plt.show()
for item in sorted(dicCofALanda):
lists = sorted(dicCofALanda[item].items())
x, y = zip(*lists)
plt.plot(x, y, label = "landa = " + str(item))
plt.legend(loc='upper left')
plt.xlabel('Season Step')
plt.ylabel('Magnitude of Cof. "a" vector')
plt.show()
for item in sorted(dicCofBLanda):
lists = sorted(dicCofBLanda[item].items())
x, y = zip(*lists)
plt.plot(x, y, label = "landa = " + str(item))
plt.legend(loc='upper left')
axes = plt.gca()
axes.set_ylim([-2.0,0.0])
plt.xlabel('Season Step')
plt.ylabel('Cof. "b"')
plt.show()
predictedLabels = predictBySVM(a, b, originalTestX)
with open("submission.txt", "w") as text_file:
for item in predictedLabels:
if item == -1:
print('<=50K', file=text_file)
elif item == 1:
print('>50K', file=text_file)
else:
print("Data Error2!")
text_file.close()
| python |
from opendc.models.scenario import Scenario
from opendc.models.portfolio import Portfolio
from opendc.util.rest import Response
def GET(request):
"""Get this Scenario."""
request.check_required_parameters(path={'scenarioId': 'string'})
scenario = Scenario.from_id(request.params_path['scenarioId'])
scenario.check_exists()
scenario.check_user_access(request.google_id, False)
return Response(200, 'Successfully retrieved scenario.', scenario.obj)
def PUT(request):
"""Update this Scenarios name."""
request.check_required_parameters(path={'scenarioId': 'string'}, body={'scenario': {
'name': 'string',
}})
scenario = Scenario.from_id(request.params_path['scenarioId'])
scenario.check_exists()
scenario.check_user_access(request.google_id, True)
scenario.set_property('name',
request.params_body['scenario']['name'])
scenario.update()
return Response(200, 'Successfully updated scenario.', scenario.obj)
def DELETE(request):
"""Delete this Scenario."""
request.check_required_parameters(path={'scenarioId': 'string'})
scenario = Scenario.from_id(request.params_path['scenarioId'])
scenario.check_exists()
scenario.check_user_access(request.google_id, True)
scenario_id = scenario.get_id()
portfolio = Portfolio.from_id(scenario.obj['portfolioId'])
portfolio.check_exists()
if scenario_id in portfolio.obj['scenarioIds']:
portfolio.obj['scenarioIds'].remove(scenario_id)
portfolio.update()
old_object = scenario.delete()
return Response(200, 'Successfully deleted scenario.', old_object)
| python |
from pdb_util import get_interatomic_distance
from gcd_pdb import read_pdb
from pdb_parsing_tools import get_resname, get_atom, isatom
# rename atoms of a particular residue according to a pair of templates
def rename_atoms_of_selected_residue(
pdbfile, resname, template_pdb_start, template_pdb_target, newfilename):
# first step is to construct the mapping from start to target template
# for this we need to get the closest atom in template_pdb_target to each
# atom in template_pdb_start. Assume templates are aligned.
_, resis, ligands, solvent, ions, _ = read_pdb(template_pdb_start)
records_start = [r for r in (resis + ligands + solvent + ions) if r['resname'] == resname]
_, resis, ligands, solvent, ions, _ = read_pdb(template_pdb_target)
records_target = [r for r in (resis + ligands + solvent + ions) if r['resname'] == resname]
distance_matrix = []
for rtarget in records_target:
matrix_row = []
for rstart in records_start:
matrix_row.append(get_interatomic_distance(rtarget['xyz'], rstart['xyz']))
distance_matrix.append(matrix_row)
match_indices = [row.index(min(row)) for row in distance_matrix]
records_match = [records_start[i] for i in match_indices]
lookup = {}
for i in range(len(records_match)):
rtarget = records_target[i]
rmatch = records_match[i]
lookup[rmatch['atom']] = rtarget['atom']
print('replacing all instances of %s with %s' % (rmatch['atom'], rtarget['atom']))
def update_record(record):
new_atom = lookup[get_atom(record)]
new_record = record[:12] + ("% 4s" % new_atom) + record[16:]
return new_record
with open(pdbfile, 'r') as oldfile:
with open(newfilename, 'w') as newfile:
count = 0
for record in oldfile.readlines():
if isatom(record) and get_resname(record) == resname.strip():
newfile.write(update_record(record))
count += 1
else:
newfile.write(record)
print('updated %i atom names' % count)
print('updated file written to %s' % newfilename)
if __name__ == "__main__":
import sys
rename_atoms_of_selected_residue(*sys.argv[1:6])
| python |
__author__ = "Alex Rudy"
__version__ = "0.6.0"
| python |
from datetime import datetime
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
from apscheduler.executors.pool import ProcessPoolExecutor
from .models import Timelapse
from . import appbuilder
import cv2
import os
HOST_URL = appbuilder.app.config['HOST_URL']
jobstores = {
'default': SQLAlchemyJobStore(url='sqlite:///jobs.sqlite')
}
executors = {
'default': ProcessPoolExecutor(4)
}
scheduler = BackgroundScheduler(jobstores=jobstores, executors=executors)
scheduler.start()
def schedule_recording(timelapse):
scheduler.add_job(
capture_frame,
trigger='interval',
max_instances=999999,
misfire_grace_time=None,
start_date=timelapse.start_date,
end_date=timelapse.end_date,
seconds=timelapse.frequency,
args=(timelapse.id, timelapse.url, timelapse.folder_name),
)
scheduler.add_job(
render_timelapse,
trigger='date',
misfire_grace_time=None,
run_date=timelapse.end_date,
args=(timelapse.id, timelapse.folder_name, timelapse.framerate),
)
session = appbuilder.get_session()
timelapse.status = 'queued'
session.add(timelapse)
session.commit()
def capture_frame(id, url, folder):
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S.%f")
image_path = './timelapses/{}/{}.jpg'.format(folder, timestamp)
capture = cv2.VideoCapture(url)
status, frame = capture.read()
cv2.imwrite(image_path, frame)
session = appbuilder.get_session()
timelapse = session.query(Timelapse).get(id)
if timelapse.status == 'queued':
timelapse.status = 'recording'
timelapse.progress += 1
timelapse.preview = '{}/preview/{}/{}.jpg'.format(HOST_URL, folder, timestamp)
session.add(timelapse)
session.commit()
def render_timelapse(id, folder, framerate):
session = appbuilder.get_session()
timelapse = session.query(Timelapse).get(id)
timelapse.status = 'rendering'
session.add(timelapse)
session.commit()
path = './timelapses/' + folder
images = sorted(list(os.listdir(path)))
frame = cv2.imread(os.path.join(path, images[0]))
height, width, layers = frame.shape
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
video = cv2.VideoWriter(path + '.mp4', fourcc, framerate, (width, height))
for image in images:
video.write(cv2.imread(os.path.join(path, image)))
video.release()
timelapse.status = 'done'
timelapse.video = HOST_URL + '/video/' + folder + '.mp4'
session.add(timelapse)
session.commit()
| python |
# Given a string and a pattern, find all anagrams of the pattern in the given string.
# Anagram is actually a Permutation of a string.
# Example:
# Input: String="ppqp", Pattern="pq"
# Output: [1, 2]
# Explanation: The two anagrams of the pattern in the given string are "pq" and "qp".
# Input: String="abbcabc", Pattern="abc"
# Output: [2, 3, 4]
# Explanation: The three anagrams of the pattern in the given string are "bca", "cab", and "abc".
# sliding window:O(N + M) (M is the number of characters in pattern string)
# space:O(K)-> O(M)(M is the worst case) (k is the number of distinct letters in string pattern)
def string_anagram(str, pattern):
window_start, matched = 0, 0
result = []
char_pattern = dict()
for char in pattern:
if char not in char_pattern:
char_pattern[char] = 0
char_pattern[char] += 1
for window_end in range(len(str)):
right_char = str[window_end]
if right_char in char_pattern:
char_pattern[right_char] -= 1
if char_pattern[right_char] == 0:
matched += 1
if matched == len(char_pattern):
result.append(window_start)
if window_end >= len(pattern) -1:
left_char = str[window_start]
window_start += 1
if left_char in char_pattern:
if char_pattern[left_char] == 0:
matched -= 1
char_pattern[left_char] += 1
return result
print(string_anagram("ppqp","pq"))
print(string_anagram("abbcabc","abc")) | python |
import numpy as np
import imageio
import cv2
import sys, os
#Processing Original Image
def process_img(location_img):
image = imageio.imread(location_img)
image = image.astype(np.float32)/255
return image
#Load and construct Ground Truth
def read_gt(location_gt):
entries = os.listdir(location_gt)
gt_images = []
#Collect all human labelled images
for entry in entries:
ground_truth = imageio.imread(location_gt+entry)
ground_truth = ground_truth.astype(np.float64)/255
gt_images.append(ground_truth)
return gt_images
#Construct Ground Truth representation from all human labelled images
def construct_gt(location_gt):
gt_images = read_gt(location_gt)
size = gt_images[0].shape[:2]
pixels = np.zeros((size))
for k in range(len(gt_images)):
ret, bw_img = cv2.threshold(gt_images[k],0.0001,1,cv2.THRESH_BINARY)
for i in range(size[0]):
for j in range(size[1]):
if(bw_img[i,j][0]>0 and bw_img[i,j][1]==0 and bw_img[i,j][2]==0):
pixels[i][j] += 1
#Each pixel is in foreground if N-1 out of N humans labelled the pixel in the foreground, else in the background
pixels = np.where(pixels >=len(gt_images)-1, 1., 0.)
F = len(np.where(pixels>0)[0])
B = len(np.where(pixels==0)[0])
print("Foreground area of constructed Ground Truth is %d pixels"% F)
print("Background area of constructed Ground Truth is %d pixels\n"% B)
return pixels, F
| python |
import os
import urllib
from google.appengine.api import users
from google.appengine.ext import ndb
import jinja2
import webapp2
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
class HomePageHandler(webapp2.RequestHandler):
def get(self):
template_values = {}
template = JINJA_ENVIRONMENT.get_template('index.html')
self.response.write(template.render(template_values))
class BrowseHandler(webapp2.RequestHandler):
def get(self):
template_values = {}
template = JINJA_ENVIRONMENT.get_template('vis.html')
self.response.write(template.render(template_values))
application = webapp2.WSGIApplication([
('/', HomePageHandler),
('/vis', BrowseHandler),
], debug=True) | python |
import typing as t
from dataclasses import (
InitVar,
dataclass,
field,
)
from .container import (
DIContext,
get_di_container,
)
from .errors import (
ConfigError,
DIErrors,
)
@dataclass(frozen=True)
class Inject:
"""
A class that can serve as:
* a descriptor for a `Component` class
* a default value of a function argument
that should be used to mark a place for injecting dependencies as an attribute or an argument
of a function.
"""
context: DIContext = field(init=False)
name: InitVar[str] = None
interface: InitVar[t.Type] = None
qualifier: InitVar[t.Any] = None
get_qualifier: InitVar[t.Callable[[t.Any], t.Any]] = None
label: str = None
annotation: t.Type = None
def __post_init__(
self,
name: str,
interface: t.Type,
qualifier: t.Any,
get_qualifier: t.Callable[[t.Any], t.Any] = None,
):
object.__setattr__(
self,
"context",
DIContext(
name=name, interface=interface, qualifier=qualifier, get_qualifier=get_qualifier
),
)
def __set_name__(self, owner, name: str) -> None:
annotation = owner.__annotations__.get(name) if hasattr(owner, "__annotations__") else None
# supporting object's immutability
object.__setattr__(self, "label", name)
if annotation:
object.__setattr__(self.context, "interface", annotation)
def __get__(self, instance: t.Any, owner: t.Type) -> t.Any:
if instance is None:
return self
container = get_di_container(instance)
if not container:
raise DIErrors.NO_CONTAINER_PROVIDED.with_params(
class_name=instance.__class__.__qualname__, attribute=self.label
)
context = self.context.determine(instance)
try:
return context.get(container=container)
except ConfigError as e:
raise e.with_params(
class_name=instance.__class__.__qualname__,
attribute=self.label,
context=e.params.get("context"),
)
| python |
#!/usr/bin/env python
import sys
from os import path, makedirs
from argparse import ArgumentParser
import pickle
import math
from random import sample
import numpy as np
from time import time
from scipy.signal import gaussian
from skimage import io
from skimage.feature import ORB, match_descriptors, plot_matches
from skimage.measure import ransac
from skimage import transform as tf
try:
from mpi4py import MPI
except:
print("mpi4py could not be loaded")
def main(argv):
"""Generate matching point-pairs for stack registration."""
# parse arguments
parser = ArgumentParser(description="""
Generate matching point-pairs for stack registration.""")
parser.add_argument('imgdir',
help='a directory with images')
parser.add_argument('outputdir',
help='directory to write results')
parser.add_argument('-u', '--pairs',
help='pickle with pairs to process')
parser.add_argument('-c', '--connectivityfile',
help='file containing connectivity specification')
parser.add_argument('-t', '--n_tiles', type=int, default=4,
help='the number of tiles in the montage')
parser.add_argument('-f', '--overlap_fraction', type=float, nargs=2,
default=[0.1, 0.1],
help='section overlap in [y,x]')
parser.add_argument('-o', '--offsets', type=int, default=1,
help='the number of sections in z to consider')
parser.add_argument('-d', '--downsample_factor', type=int, default=1,
help='the factor to downsample the images by')
parser.add_argument('-w', '--transformname', default="EuclideanTransform",
help='scikit-image transform class name')
parser.add_argument('-k', '--n_keypoints', type=int, default=10000,
help='the number of initial keypoints to generate')
parser.add_argument('-r', '--residual_threshold', type=float, default=2,
help='inlier threshold for ransac')
parser.add_argument('-n', '--num_inliers', type=int, default=None,
help='the number of ransac inliers to look for')
parser.add_argument('-p', '--plotpairs', action='store_true',
help='create plots of point-pairs')
parser.add_argument('-m', '--usempi', action='store_true',
help='use mpi4py')
args = parser.parse_args()
imgdir = args.imgdir
outputdir = args.outputdir
if not path.exists(outputdir):
makedirs(outputdir)
confilename = args.connectivityfile
n_tiles = args.n_tiles
overlap_fraction = args.overlap_fraction
offsets = args.offsets
ds = args.downsample_factor
transformname = args.transformname
n_keypoints = args.n_keypoints
residual_threshold = args.residual_threshold
num_inliers = args.num_inliers
plotpairs = args.plotpairs
usempi = args.usempi & ('mpi4py' in sys.modules)
# get the image collection (reshaped to n_slcs x n_tiles)
imgs = io.ImageCollection(path.join(imgdir, '*.tif'))
n_slcs = int(len(imgs) / n_tiles)
imgs = [imgs[(slc + 1) * n_tiles - n_tiles:slc * n_tiles + n_tiles]
for slc in range(0, n_slcs)]
# determine which pairs of images to process
connectivities = read_connectivities(confilename)
unique_pairs = generate_unique_pairs(n_slcs, offsets, connectivities)
upairstring = 'unique_pairs' + '_c' + str(offsets) + '_d' + str(ds)
pairfile = path.join(outputdir, upairstring + '.pickle')
with open(pairfile, 'wb') as f:
pickle.dump(unique_pairs, f)
if args.pairs:
try:
with open(args.pairs, 'rb') as f:
pairs = pickle.load(f)
except:
pairs = find_missing_pairs(outputdir, unique_pairs, offsets, ds)
# pairs = find_small_pairs(outputdir, unique_pairs, offsets, ds, npairs=10)
# pairs = find_failed_pairs(outputdir, unique_pairs, offsets, ds)
else:
pairs = unique_pairs
# get the feature class
orb = ORB(n_keypoints=n_keypoints, fast_threshold=0.08,
n_scales=8, downscale=1.2)
if usempi:
# start the mpi communicator
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
# scatter the pairs
local_nrs = scatter_series(len(pairs), comm, size, rank,
MPI.SIGNED_LONG_LONG)
else:
local_nrs = np.array(range(0, len(pairs)), dtype=int)
# process the assigned pairs
allpairs = []
for i in local_nrs:
pair = get_pair(outputdir, imgs, pairs[i], offsets,
ds, overlap_fraction, orb, plotpairs,
residual_threshold, num_inliers, transformname)
# FIXME: handle case where get_pair fails
allpairs.append(pair)
return allpairs
# ========================================================================== #
# function defs
# ========================================================================== #
def scatter_series(n, comm, size, rank, SLL):
"""Scatter a series of jobnrs over processes."""
nrs = np.array(range(0, n), dtype=int)
local_n = np.ones(size, dtype=int) * n / size
local_n[0:n % size] += 1
local_nrs = np.zeros(local_n[rank], dtype=int)
displacements = tuple(sum(local_n[0:r]) for r in range(0, size))
comm.Scatterv([nrs, tuple(local_n), displacements,
SLL], local_nrs, root=0)
return local_nrs
def read_connectivities(confilename):
"""Read pair connectivities from file.
specified for each pair per line as:
type imno1 imno2
where type is one of x y tlbr trbl
connectivities = [['z', 0, 0], ['z', 1, 1], ['z', 2, 2], ['z', 3, 3],
['y', 0, 2], ['y', 1, 3],
['x', 0, 1], ['x', 2, 3],
['tlbr', 0, 3], ['trbl', 1, 2]]
# NOTE: ['trbl', 1, 2] non-overlapping for M3 dataset
"""
with open(confilename) as f:
con = [line.rstrip('\n').split() for line in f]
con = [[c[0], int(c[1]), int(c[2])] for c in con]
return con
def generate_pairstring(offsets, ds, p):
"""Get the pair identifier."""
pairstring = 'pair' + \
'_c' + str(offsets) + \
'_d' + str(ds) + \
'_s' + str(p[0][0]).zfill(4) + \
'-t' + str(p[0][1]) + \
'_s' + str(p[1][0]).zfill(4) + \
'-t' + str(p[1][1])
return pairstring
def generate_unique_pairs(n_slcs, offsets, connectivities):
"""Get a list of unique pairs with certain connectivity.
list is of the form [[slcIm1, tileIm1], [slcIm2, tileIm2], 'type']
"""
all_pairs = [[[slc, c[1]], [slc+o, c[2]], c[0]]
for slc in range(0, n_slcs)
for o in range(0, offsets+1)
for c in connectivities]
unique_pairs = []
for pair in all_pairs:
if (([pair[1], pair[0], pair[2]] not in unique_pairs) &
(pair[0] != pair[1]) &
(pair[1][0] != n_slcs)):
unique_pairs.append(pair)
return unique_pairs
def find_missing_pairs(directory, unique_pairs, offsets, ds):
"""Get a list of missing pairs.
list is of the form [[slcIm1, tileIm1], [slcIm2, tileIm2], 'type']
"""
missing_pairs = []
for p in unique_pairs:
pairstring = generate_pairstring(offsets, ds, p)
pairfile = path.join(directory, pairstring + ".pickle")
try:
open(pairfile, 'rb')
except:
missing_pairs.append(p)
return missing_pairs
def find_small_pairs(directory, unique_pairs, offsets, ds, npairs=100):
"""Get a list of failed pairs.
list is of the form [[slcIm1, tileIm1], [slcIm2, tileIm2], 'type']
"""
failed_pairs = []
for p in unique_pairs:
pairstring = generate_pairstring(offsets, ds, p)
pairfile = path.join(directory, pairstring + ".pickle")
p, src, _, model, _ = pickle.load(open(pairfile, 'rb'))
population = range(0, src.shape[0])
try:
sample(population, npairs)
except ValueError:
failed_pairs.append(p)
return failed_pairs
def find_failed_pairs(directory, unique_pairs, offsets, ds):
"""Get a list of failed pairs.
list is of the form [[slcIm1, tileIm1], [slcIm2, tileIm2], 'type']
"""
failed_pairs = []
for p in unique_pairs:
pairstring = generate_pairstring(offsets, ds, p)
pairfile = path.join(directory, pairstring + ".pickle")
p, _, _, model, _ = pickle.load(open(pairfile, 'rb'))
if np.isnan(model.params).any():
failed_pairs.append(p)
return failed_pairs
def downsample_images(p, imgs, ds):
"""Subsample images with downsample_factor"""
if ds > 1:
full_im1 = tf.rescale(imgs[p[0][0]][p[0][1]], 1./ds)
full_im2 = tf.rescale(imgs[p[1][0]][p[1][1]], 1./ds)
else:
full_im1 = imgs[p[0][0]][p[0][1]]
full_im2 = imgs[p[1][0]][p[1][1]]
return full_im1, full_im2
def select_imregions(ptype, full_im1, full_im2, overlap_pixels):
"""Select image regions to extract keypoints from."""
if ptype == 'z':
im1 = full_im1
im2 = full_im2
elif ptype in 'y':
y1 = full_im1.shape[0] - overlap_pixels[0]
y2 = overlap_pixels[0]
im1 = full_im1[y1:, :]
im2 = full_im2[:y2, :]
elif ptype in 'x':
x1 = full_im1.shape[1] - overlap_pixels[1]
x2 = overlap_pixels[1]
im1 = full_im1[:, x1:]
im2 = full_im2[:, :x2]
elif ptype in 'tlbr': # TopLeft - BottomRight
x1 = full_im1.shape[1] - 2 * overlap_pixels[1]
y1 = full_im1.shape[0] - 2 * overlap_pixels[0]
x2 = 2 * overlap_pixels[1]
y2 = 2 * overlap_pixels[0]
im1 = full_im1[y1:, x1:]
im2 = full_im2[:y2, :x2]
elif ptype in 'trbl': # TopRight - BottomLeft
x1 = full_im1.shape[1] - 2 * overlap_pixels[1]
y1 = 2 * overlap_pixels[0]
x2 = 2 * overlap_pixels[1]
y2 = full_im2.shape[0] - 2 * overlap_pixels[0]
im1 = full_im1[:y1, x1:]
im2 = full_im2[y2:, :x2]
return im1, im2
def get_keypoints(orb, im):
"""Get matching keypoints."""
orb.detect_and_extract(im)
kp = orb.keypoints
ds = orb.descriptors
return kp, ds
def reset_imregions(ptype, kp_im1, kp_im2, overlap_pixels, imshape):
"""Transform keypoints back to full image space."""
if ptype in 'z':
pass
elif ptype in 'y':
kp_im1[:, 0] += imshape[0] - overlap_pixels[0]
elif ptype in 'x':
kp_im1[:, 1] += imshape[1] - overlap_pixels[1]
elif ptype in 'tlbr': # TopLeft - BottomRight
kp_im1[:, 0] += imshape[0] - 2 * overlap_pixels[0]
kp_im1[:, 1] += imshape[1] - 2 * overlap_pixels[1]
elif ptype in 'trbl': # TopRight - BottomLeft
kp_im1[:, 0] += imshape[0] - 2 * overlap_pixels[0]
kp_im2[:, 1] += imshape[1] - 2 * overlap_pixels[1]
return kp_im1, kp_im2
def plot_pair_ransac(outputdir, pairstring, p, full_im1, full_im2,
kp_im1, kp_im2, matches, inliers):
"""Create plots of orb keypoints vs. ransac inliers."""
import matplotlib.pyplot as plt
fig, (ax1, ax2) = plt.subplots(2, 1)
plot_matches(ax1, full_im1, full_im2, kp_im1, kp_im2,
matches, only_matches=True)
ax1.axis('off')
plot_matches(ax2, full_im1, full_im2, kp_im1, kp_im2,
matches[inliers], only_matches=True)
ax2.axis('off')
plotdir = path.join(outputdir, 'plotpairs')
if not path.exists(plotdir):
makedirs(plotdir)
fig.savefig(path.join(plotdir, pairstring))
plt.close(fig)
def get_pair(outputdir, imgs, p, offsets, ds,
overlap_fraction, orb,
plotpairs=0, res_th=10, num_inliers=100,
transformname="EuclideanTransform"):
"""Create inlier keypoint pairs."""
pair_tstart = time()
overlap_pixels = [int(math.ceil(d * of * 1/ds))
for d, of in zip(imgs[0][0].shape, overlap_fraction)]
f1, f2 = downsample_images(p, imgs, ds)
p1, p2 = select_imregions(p[2], f1, f2, overlap_pixels)
kp1, de1 = get_keypoints(orb, p1)
kp2, de2 = get_keypoints(orb, p2)
kp1, kp2 = reset_imregions(p[2], kp1, kp2, overlap_pixels, f1.shape)
matches = match_descriptors(de1, de2, cross_check=True)
dst = kp1[matches[:, 0]][:, ::-1]
src = kp2[matches[:, 1]][:, ::-1]
transform = eval("tf.%s" % transformname)
model, inliers = ransac((src, dst), transform, min_samples=4,
residual_threshold=res_th,
max_trials=1000, stop_sample_num=num_inliers)
# get the weighing kernel in z
k = gaussian(offsets*2+1, 1, sym=True)
w = k[offsets - (p[1][0] - p[0][0])]
# transform from downsampled space to full
S = np.array([[ds, 0, 0],
[0, ds, 0],
[0, 0, 1]])
s = np.c_[src, np.ones(src.shape[0])].dot(S)[inliers, :2]
d = np.c_[dst, np.ones(dst.shape[0])].dot(S)[inliers, :2]
pair = (p, s, d, model, w)
pairstring = generate_pairstring(offsets, ds, p)
pairfile = path.join(outputdir, pairstring + '.pickle')
pickle.dump(pair, open(pairfile, 'wb'))
if plotpairs:
plot_pair_ransac(outputdir, pairstring, p,
f1, f2, kp1, kp2, matches, inliers)
print('%s done in: %6.2f s; matches: %05d; inliers: %05d'
% (pairstring, time() - pair_tstart, len(matches), np.sum(inliers)))
return pair
if __name__ == "__main__":
main(sys.argv)
| python |
from typing import List, Dict
from .exceptions import ProductsNotFound
from .interfaces import CartProduct
from ...repositories.interfaces import AbstractRepository
def dict_to_products(
requested_products: List[Dict], product_repository: AbstractRepository
) -> List[CartProduct]:
requested_ids = {p["id"] for p in requested_products}
products = product_repository.find_by_ids(list(requested_ids))
if len(requested_ids) != len(products):
found_ids = {p["id"] for p in products}
raise ProductsNotFound(requested_ids.difference(found_ids))
grouped_products = {p["id"]: p for p in products}
return [
CartProduct(
**{
"id": p["id"],
"quantity": p["quantity"],
"unit_amount": grouped_products[p["id"]]["amount"],
"total_amount": grouped_products[p["id"]]["amount"]
* p["quantity"],
"discount": 0,
"is_gift": False,
}
)
for p in requested_products
]
| python |
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
from pandas import *
import os, os.path
import sys
sys.path.append('/home/will/HIVReportGen/AnalysisCode/')
# <codecell>
store = HDFStore('/home/will/HIVReportGen/Data/SplitRedcap/2013-01-16/EntireCohort.hdf')
# <codecell>
redcap_data = store['redcap']
seq_data = store['seq_data']
visit_data = store['visit_redcap']
pat_data = store['pat_redcap']
# <codecell>
ofields = ['Latest viral load', 'Latest CD4 count (cells/uL)', 'Total Modified Hopkins Dementia Score']
wanted_fields = ['CalcAge', 'Gender', 'Drug User Classification', 'Hepatitis C status (HCV)', 'Predicted-R5']
seq_fields = ['LTR', 'Vpr', 'Tat', 'V3']
# <codecell>
have_seq = seq_data[seq_fields].apply(lambda x: x.notnull()).fillna(False)
pat_fields = visit_data
all_fields = concat([pat_fields, have_seq], axis = 1)
all_fields['Predicted-R5'] = all_fields['Predicted-R5']>=0.8
# <codecell>
def check_fun(df):
wanted_drugs = ["Current ART (choice='%s')" % d for d in ['TDF', 'Truvada', 'Atripla']]
start_niave = df['Current ART status'][0] == 'naive'
on_therapy = (df['Current ART status'] == 'on').any()
on_wanted = df[wanted_drugs].any().any()
return start_niave & on_therapy & on_wanted
wanted_drugs = ["Current ART (choice='%s')" % d for d in ['TDF', 'Truvada', 'Atripla']]
tdata = all_fields[['Current ART status'] + wanted_drugs]
res = tdata.groupby(level = 0).apply(check_fun)
# <codecell>
all_fields.index.names = ['Patient ID', 'Visit Number']
output = merge(all_fields[[]].reset_index(), DataFrame({'result':res}), left_on = 'Patient ID', right_index = True)
print output[['Patient ID', 'Visit Number', 'result']].head(n = 20).to_string()
# <codecell>
output.to_csv('/home/will/tmpstuf/drugged_data.csv')
# <codecell>
all_fields.fillna(False).to_csv('/home/will/HIVSystemsBio/NewPatientInfo_extreme.csv')
# <codecell>
ols?
# <codecell>
mask = redcap_data['Patient ID'] == 'A0008'
ofields = ['Latest viral load', 'Latest CD4 count (cells/uL)', 'Total Modified Hopkins Dementia Score']
other_fields = ['Gender', 'Current ART status', 'Age', 'Hepatitis C status (HCV)', 'Hepatitis B status (HBV)', 'Years seropositive', 'HIV seropositive date']
race_fields = ["Race (choice='Asian')",
"Race (choice='American Indian/Alaska Native')",
"Race (choice='Black or African American')",
"Race (choice='Native Hawaiian or other Pacific Islander')",
"Race (choice='White')",
"Race (choice='More than one race')",
"Race (choice='Unknown')",
]
drug_fields = [
'Amphetamines',
'Barbiturates',
'Benzodiazepines',
'Cannabinoid',
'Cocaine + metabolite',
'Opiates',
'Phencyclidine']
print redcap_data[['Patient visit number', 'Date of visit']+ other_fields][mask].to_string(), '\n\n\n\n'
print redcap_data[['Patient visit number', 'Date of visit']+ ofields][mask].to_string(), '\n\n\n\n'
print redcap_data[['Patient visit number', 'Date of visit']+ race_fields][mask].T.to_string(), '\n\n\n\n'
print redcap_data[['Patient visit number', 'Date of visit']+ drug_fields][mask].to_string(), '\n\n\n\n'
# <codecell>
t = redcap_data['Event Name'].apply(lambda x: int(x.split(' - ')[0][1:]))
t.unique()
redcap_data['VisitNum'] = redcap_data['VisitNum'].combine_first(t)
# <codecell>
t = all_fields['Event Name'].dropna().apply(lambda x: int(x.split(' - ')[0][1:]))
all_fields['VisitNum'] = all_fields['VisitNum'].combine_first(t)
# <codecell>
all_fields['Drug User Classification'].unique()
# <codecell>
drug_fields = [
'Cocaine + metabolite',
'Amphetamines',
'Barbiturates',
'Benzodiazepines',
'Cannabinoid',
'Opiates',
'Phencyclidine']
drug_fields[1:]
# <codecell>
drug_fields = [
'Cocaine + metabolite',
'Amphetamines',
'Barbiturates',
'Benzodiazepines',
'Cannabinoid',
'Opiates',
'Phencyclidine']
admit_fields = [
"Drugs used (choice='Marijuana')",
"Drugs used (choice='Cocaine (crack, nasal, smoke, inject)')",
"Drugs used (choice='Heroin (nasal, inject)')",
"Drugs used (choice='Methamphetamine (smoke, nasal, inject)')",
"Drugs used (choice='Benzodiazapine (i.e. valium, ativan, xanax, klonipin, etc)')",
"Drugs used (choice='Narcotics')",
"Drugs used (choice='Ecstasy')",
"Drugs used (choice='PCP')",
"Drugs used (choice='Ritalin')",
"Drugs used (choice='Other')"]
tmp = all_fields[drug_fields + admit_fields +['LTR']].reset_index()
def check_PN(df):
any_pos = df[drug_fields].any().any()
any_admit = df[admit_fields].any().any()
return (any_admit | any_pos)
def check_PC(df):
pos_coc = df[drug_fields[0]].any()
pos_other = df[drug_fields[1:]].any().any()
return pos_coc and ~pos_other
def check_mdu(df):
num_pos = df[drug_fields].any().sum()
return num_pos > 1
def check_ltr(df):
return df['LTR'].values[-1]
#print tmp
checks = {'LTR': check_ltr,
'PN': check_PN,
'PC': check_PC,
'MDU': check_mdu,}
nchecks = list(checks.items())
res = []
valid_visits = tmp['Visit Number']=='A'
for visit in range(10):
visit_str = 'R%02i' % visit
visit_mask = tmp['Visit Number'] == visit_str
valid_visits |= visit_mask
res.append(('#Patients', visit_str, visit_mask.sum()))
ntmp = tmp.ix[valid_visits]
pats = ntmp.groupby('Patient ID')
for pat, ndf in pats:
for name, func in checks.items():
nres = func(ndf)
print nres
raise KeyError
#df = DataFrame(res, columns = ['Header', 'VisitNum', 'Value'])
#res = pivot_table(df, rows = ['VisitNum'], cols='Header', values= 'Value')
#print res
# <codecell>
tmp = read_csv('/home/will/HIVSystemsBio/NewCytokineAnalysis/CytoPatData.csv', sep = '\t')
wanted_pats = tmp['Patient ID']
wanted_data = {}
wanted_visits = dict([(p, v) for p,v in zip(tmp['Patient ID'].values, tmp['VisitNum'].values)])
for key, group in redcap_data.groupby('Patient ID'):
if key in wanted_visits:
vname = wanted_visits[key]
wnum = int(vname[1:])
wdata = group['VisitNum']<= wnum
res = group[drug_fields].ix[wdata].mean(axis = 0)
wanted_data[key] = res
print wanted_data.keys()[:5]
drug_mean = DataFrame(wanted_data).T.rename(columns = dict([(col, 'TOSample-'+col) for col in drug_fields]))
drug_mean.ix[wanted_pats].to_csv('/home/will/HIVSystemsBio/NewCytokineAnalysis/ToSampledrug.csv')
# <codecell>
from itertools import groupby
import csv
def missing_test(visit_nums, visit_dates, check_ser):
for v, date, val in zip(visit_nums, visit_dates, check_ser):
if val != val:
yield v, date, 'Missing Value', 1
def consistency_test(visit_nums, visit_dates, check_ser):
#print t
if len(check_ser.dropna().unique())>1:
for v, date, val in zip(visit_nums, visit_dates, check_ser):
yield v, date, 'Inconsitent Value', 1
def diagnose_test(visit_nums, visit_dates, check_ser, debug = False):
tmp = DataFrame({'Visit':visit_nums, 'Date':visit_dates, 'Check':check_ser}).dropna()
#print tmp
tmp.sort(columns = 'Date')
is_sick = False
for _, row in tmp.iterrows():
if (row['Check'] == False) and (is_sick == True):
yield row['Visit'], row['Date'], 'Inconsistent Diagnosis', 1
is_sick |= row['Check']==1
def nearby_date(check_dates, visit_dates):
(check_dates - visit_dates).weeks
with open('/home/will/tmpstuf/test_smells.csv') as handle:
junk = handle.next()
check_rules = [row for row in csv.reader(handle, delimiter = '\t') if row[3].strip()]
messages = []
for patid, df in redcap_data.groupby('Patient ID'):
for col, report_col, _, testfun in check_rules:
if (testfun == 'consistency_test') or (testfun == 'date_consistency'):
msgs = list(consistency_test(df['Patient visit number'], df['Date of visit'], df[col]))
elif testfun == 'diagnose_test':
#if col == 'Hepatitis C status (HCV)':
#print col, df[col]
#print len(list(diagnose_test(df['Patient visit number'], df['Date of visit'], df[col], debug = True)))
#raise KeyError
msgs = list(diagnose_test(df['Patient visit number'], df['Date of visit'], df[col]))
else:
msgs = list(missing_test(df['Patient visit number'], df['Date of visit'], df[col]))
for v, date, msg, score in msgs:
messages.append((col, report_col, patid, v, date, msg, score))
# <codecell>
tmp = DataFrame(messages, columns = ['Colname', 'Grouping', 'Patient ID', 'Visit', 'VisitDate', 'Message', 'Wrongness'])
print tmp.head(n= 100).to_string()
# <codecell>
res = pivot_table(tmp, rows = 'VisitDate', cols = 'Message', values = 'Wrongness', aggfunc=np.sum)
#res['Inconsitent Value'].dropna()
plt.figure(figsize = (10,10))
rolling_mean(res, 30, min_periods=2).plot(ax = plt.gca())
# <codecell>
tmp.groupby(['Patient ID']).sum().min()
# <codecell>
redcap_data['Hepatitis C status (HCV)'].dropna()
# <codecell>
| python |
# Copyright (c) 2017 Niklas Rosenstein
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE
import abc
import types
import weakref
from .manager import WidgetManager
class AwaitingListeners(list):
def next(self):
for item in self:
try:
next(item)
except StopIteration:
pass
__next__ = next
class BaseWidget(object):
"""
Base class for C4D native widgets. Widgets are usually bound to a
#WidgetManager, only then they can allocate IDs and take part in the
dialog layout.
# Members
id (str): The ID of the widget. May be #None.
manager (WidgetManager): A #WidgetManager (internally stored as a weak
reference). If this member is set to #None, the widget is "unbound".
Unbound widgets can not allocate IDs and are not part of any dialog.
enabled (bool): Whether the widget is enabled.
visible (bool): Whether the widget is visible.
parent (Widget): The parent #Widget (internally stored as a weak reference).
"""
__metaclass__ = abc.ABCMeta
def __init__(self, id=None):
self.id = id
self._manager = None
self._allocated_ids = []
self._free_id_offset = 0 # Index of the next free ID in _allocated_ids
self._named_ids = {}
self._render_dirty = 0 # Dirty-count after rendering, set by WidgetManager
self._enabled = self._enabled_temp = True
self._visible = self._visible_temp = True
self._parent = None
self._listeners = {}
@property
def manager(self):
if self._manager is None:
manager = None
else:
manager = self._manager()
if manager is None:
raise RuntimeError('lost reference to WidgetManager')
return manager
@manager.setter
def manager(self, manager):
if manager is not None and not isinstance(manager, WidgetManager):
raise TypeError('expected WidgetManager')
# Remove the widget from the previous manager.
old = self._manager() if self._manager is not None else None
if old:
old._id_widget_map.pop(self.id, None)
if manager is None:
self._manager = None
else:
self._manager = weakref.ref(manager)
manager._id_widget_map[self.id] = weakref.ref(self)
@property
def dialog(self):
manager = self.manager
if manager:
return manager.dialog()
return dialog
@property
def enabled(self):
return self._enabled_temp
@enabled.setter
def enabled(self, value):
self._enabled_temp = bool(value)
manager = self.manager
if self._enabled_temp != self._enabled and manager:
manager.layout_changed()
@property
def visible(self):
while self:
if not self._visible_temp:
return False
self = self.parent
return True
@visible.setter
def visible(self, value):
self._visible_temp = bool(value)
manager = self.manager
if self._visible_temp != self._visible and manager:
manager.layout_changed()
@property
def parent(self):
if self._parent is None:
return None
else:
parent = self._parent()
if parent is None:
raise RuntimeError('lost reference to parent')
return parent
@parent.setter
def parent(self, parent):
if parent is not None and not isinstance(parent, BaseGroupWidget):
raise TypeError('expected BaseGroupWidget')
if parent is None:
self._parent = None
else:
self._parent = weakref.ref(parent)
@property
def previous_sibling(self):
parent = self.parent
if parent:
index = parent._children.index(self) - 1
if index < 0: return None
return parent._children[index]
return None
@property
def next_sibling(self):
parent = self.parent
if parent:
index = parent._children.index(self) + 1
if index >= len(parent._children): return None
return parent._children[index]
return None
def remove(self):
"""
Removes the widget from the hierarchy.
"""
parent = self.parent
if parent is not None:
parent._children.remove(self)
parent.layout_changed()
self._parent = None
def alloc_id(self, name=None):
"""
Allocates a new, unused ID for a dialog element. If a *name* is specified,
the returned ID will be saved under that name and can be retrieved using
#get_named_id().
"""
manager = self.manager
if self._free_id_offset < len(self._allocated_ids):
# Re-use existing IDs.
result = self._allocated_ids[self._free_id_offset]
self._free_id_offset += 1
else:
result = manager.alloc_id()
self._allocated_ids.append(result)
self._free_id_offset = len(self._allocated_ids)
if name is not None:
self._named_ids[name] = result
return result
def get_named_id(self, name, default=NotImplemented):
"""
Returns the value of a named ID previously created with #alloc_id().
Raises a #KeyError if the named ID does not exist. If *default* is
specified, it will be returned instead of a #KeyError being raised.
"""
try:
return self._named_ids[name]
except KeyError:
if default is NotImplemented:
raise
return default
def add_event_listener(self, name, func=None):
"""
Adds an event listener. If *func* is omitted, returns a decorator.
"""
def decorator(func):
self._listeners.setdefault(name, []).append(func)
return func
if func is not None:
decorator(func)
return None
else:
return decorator
def send_event(self, __name, *args, **kwargs):
"""
Sends an event to all listeners listening to that event. If any listener
returns a value evaluating to #True, the event is no longer propagated
to any other listeners and #True will be returned. If no listener returns
#True, #False is returned from this function.
A listener may return a generator object in which case the first yielded
value is used as the True/False response. The initiator of the event may
query the generator a second time (usually resulting in #StopIteration).
Returns an #AwaitingListeners object and the result value.
"""
awaiting_listeners = AwaitingListeners()
result = False
for listener in self._listeners.get(__name, []):
obj = listener(*args, **kwargs)
if isinstance(obj, types.GeneratorType):
awaiting_listeners.append(obj)
obj = next(obj)
if obj:
result = True
break
return awaiting_listeners, result
def save_state(self):
"""
Save the state and value of the widget so it can be restored in the
same way the next time the widget is rendered.
"""
pass
def on_render_begin(self):
"""
This method is called on all widgets that are about to be rendered.
"""
# We don't flush already allocated IDs, but we want to be able to
# re-use them.
self._free_id_offset = 0
# Also flush the named IDs mapping.
self._named_ids.clear()
@abc.abstractmethod
def render(self, dialog):
"""
Called to render the widget into the #c4d.gui.GeDialog. Widgets that
encompass multiple Cinema 4D dialog elements should enclose them in
their own group, unless explicitly documented for the widget.
Not doing so can mess up layouts in groups that have more than one
column and/or row.
# Example
```python
def render(self, dialog):
id = self.alloc_id(name='edit_field')
dialog.AddEditNumberArrows(id, c4d.BFH_SCALEFIT)
```
"""
pass
def init_values(self, dialog):
pass
def command_event(self, id, bc):
"""
Called when a Command-event is received. Returns #True to mark the
event has being handled and avoid further progression.
"""
pass
def input_event(self, bc):
"""
Called when an Input-event is received. Returns #True to mark the
event has being handled and avoid further progression.
"""
pass
def layout_changed(self):
"""
Should be called after a widget changed its properties. The default
implementation will simply call the parent's #layout_changed() method,
if there is a parent. The #WidgetManager will also be notified. At the
next possible chance, the widget will be re-rendered (usually requiring
a re-rendering of the whole parent group).
"""
manager = self.manager
if manager is not None:
manager.layout_changed()
parent = self.parent
if parent is not None:
parent.layout_changed()
def update_state(self, dialog):
"""
This function is called from #update() by default. It should perform a
non-recursive update of the dialog. The default implementation updates
the enabled and visibility state of the allocated widget IDs.
"""
changed = False
parent = self.parent
parent_id = parent.get_named_id('group', None) if isinstance(parent, Group) else None
awaiting_listeners = AwaitingListeners()
if self._enabled_temp != self._enabled:
awaiting_listeners = self.send_event('enabling-changed', self)[0]
changed = True
self._enabled = self._enabled_temp
for v in self._allocated_ids:
dialog.Enable(v, self._enabled)
if self._visible_temp != self._visible:
awaiting_listeners = self.send_event('visibility-changed', self)[0]
changed = True
self._visible = self._visible_temp
for v in self._allocated_ids:
dialog.HideElement(v, not self._visible)
if parent_id is None: # Notify the elements themselves
dialog.queue_layout_changed(v)
if changed and parent_id is not None:
dialog.queue_layout_changed(parent_id)
if awaiting_listeners:
dialog.widgets.queue(next, awaiting_listeners)
def update(self, dialog):
"""
Called to update the visual of the element. Groups will use this to
re-render their contents when their layout has changed.
"""
self.update_state(dialog)
class BaseGroupWidget(BaseWidget):
def __init__(self, id=None):
BaseWidget.__init__(self, id)
self._children = []
self._forward_events = set(['enabling-changed', 'visibility-changed'])
@property
def children(self):
return self._children
def pack(self, widget):
"""
Adds a child widget.
"""
if not isinstance(widget, BaseWidget):
raise TypeError('expected BaseWidget')
widget.remove()
widget.parent = self
widget.manager = self.manager
self._children.append(widget)
self.layout_changed()
def flush_children(self):
"""
Removes all children.
"""
for child in self._children[:]:
assert child.parent is self, (child, parent)
child.remove()
assert len(self._children) == 0
# BaseWidget overrides
@BaseWidget.manager.setter
def manager(self, manager):
# Propagate the new manager to child widgets.
for child in self._children:
child.manager = manager
BaseWidget.manager.__set__(self, manager)
def on_render_begin(self):
BaseWidget.on_render_begin(self)
for child in self._children:
child.on_render_begin()
def render(self, dialog):
for child in self._children:
child.render(dialog)
def init_values(self, dialog):
for child in self._children:
child.init_values(dialog)
def command_event(self, id, bc):
for child in self._children:
if child.command_event(id, bc):
return True
return False
def input_event(self, bc):
for child in self._children:
if child.input_event(bc):
return True
return False
def update(self, dialog):
BaseWidget.update(self, dialog)
for child in self._children:
child.update(dialog)
def save_state(self):
for child in self._children:
child.save_state()
def send_event(self, __name, *args, **kwargs):
awaiting_listeners, result = super(BaseGroupWidget, self).send_event(
__name, *args, **kwargs)
if __name in self._forward_events:
for child in self._children:
awaiting_listeners += child.send_event(__name, *args, **kwargs)[0]
return awaiting_listeners, result
from .widgets import Group
| python |
from __future__ import annotations
import toolsql
contract_creation_blocks_schema: toolsql.DBSchema = {
'tables': {
'contract_creation_blocks': {
'columns': [
{
'name': 'address',
'type': 'Text',
'primary': True,
},
{
'name': 'block_number',
'type': 'Integer',
'index': True,
},
],
},
},
}
| python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from argparse import ArgumentParser
from gspread import authorize
from json import dumps
from oauth2client.service_account import ServiceAccountCredentials
from copy import deepcopy
prefix_github = 'https://github.com/'
prefix_mirror = 'FIWARE-GEs/'
scope = ['https://spreadsheets.google.com/feeds']
ws_c = 'Catalog'
ws_g = 'GitHub'
ws_d = 'Docker'
c_output = 'enablers_clair.json'
r_output = 'reposynchronizer.json'
p_output = 'prcloser.json'
a_output = 'apispectransformer.json'
tm_output = 'metrics_endpoints.json'
te_output = 'enablers_tsc.json'
columns_c = ['GE Tech Name',
'GE Full Name',
'Status',
'Chapter',
'Owner',
'HelpDesk',
'Academy',
'Read the Docs',
'Stack Overflow',
'Q&A',
'Academy-Legacy',
'Catalog-Legacy',
'Type-Legacy',
'Coverall']
columns_d = ['GE Tech Name',
'Entry Full Name',
'Entry Tech Name',
'Docker Image',
'Repository']
columns_g = ['GE Tech Name',
'Entry Full Name',
'Entry Tech Name',
'Repository',
'API',
'Transform']
tsc_dashboard_template = {
'enabler': '',
'catalogue': '',
'academy': '',
'readthedocs': '',
'helpdesk': '',
'coverall': '',
'github': list(),
'docker': list()
}
tsc_enablers_template = {
'name': '',
'status': '',
'chapter': '',
'type': '',
'owner': ''
}
# Returns GE row from the main sheet, needed to verify the status, if deprecated
def get_id(f_array, f_index, f_entry):
for row in range(1, len(f_array)):
if f_array[row][f_index] == f_entry:
return row
return None
# Fills in empty cells
def normalize(f_array, f_index):
for row in range(1, len(f_array)):
if f_array[row][f_index] == '':
f_array[row][f_index] = f_array[row - 1][f_index]
return f_array
# Returns column id by name
def return_index(f_index, f_array):
if f_index in f_array[0]:
return f_array[0].index(f_index)
return None
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--id', required=True, help='ID of google doc', action="store")
parser.add_argument('-c', help='FIWARE Clair', action="store_true")
parser.add_argument('-r', help='Repository Synchronizer', action="store_true")
parser.add_argument('-p', help='Pull Request Closer', action="store_true")
parser.add_argument('-a', help='API Specifications Transformer', action="store_true")
parser.add_argument('-tm', help='FIWARE TSC Dashboard - metrics', action="store_true")
parser.add_argument('-te', help='FIWARE TSC Dashboard - enablers', action="store_true")
args = parser.parse_args()
result = dict()
index_c = dict()
index_g = dict()
index_d = dict()
f = None
print("Started")
# Download the content (sheets -> raw values)
credentials = ServiceAccountCredentials.from_json_keyfile_name('auth.json', scope)
gc = authorize(credentials)
ws_c = gc.open_by_key(args.id).worksheet(ws_c)
values_c = ws_c.get_all_values()
ws_g = gc.open_by_key(args.id).worksheet(ws_g)
values_g = ws_g.get_all_values()
ws_d = gc.open_by_key(args.id).worksheet(ws_d)
values_d = ws_d.get_all_values()
# Find indexes of columns (sheet can be reorganized in different ways) and fill in empty cells
for el in columns_c:
index_c[el] = return_index(el, values_c)
if index_c[el] is None:
print('Column "' + el + '" not found in the doc')
else:
values_c = normalize(values_c, index_c[el])
for el in columns_g:
index_g[el] = return_index(el, values_g)
if index_g[el] is None:
print('Column "' + el + '" not found in the doc')
else:
values_g = normalize(values_g, index_g[el])
for el in columns_d:
index_d[el] = return_index(el, values_d)
if index_d[el] is None:
print('Column "' + el + '" not found in the doc')
else:
values_d = normalize(values_d, index_d[el])
# FIWARE Clair
if args.c:
result['enablers'] = list()
for el in range(1, len(values_d)):
if values_d[el][index_d['Docker Image']] not in ['-', '?']:
# check status
el_c = get_id(values_c, index_c['GE Tech Name'], values_d[el][index_d['GE Tech Name']])
if values_c[el_c][index_c['Status']] in ['deprecated']:
continue
# fill in entity
item = {'name': values_c[el_c][index_c['GE Tech Name']],
'image': values_d[el][index_d['Docker Image']]}
if values_d[el][index_d['Entry Tech Name']] != '-':
item['name'] += '.' + values_d[el][index_d['Entry Tech Name']]
result['enablers'].append(item)
result['enablers'] = sorted(result['enablers'], key=lambda k: k['name'])
f = open(c_output, 'w')
# Repository Synchronizer
if args.r:
result['repositories'] = list()
for el in range(1, len(values_g)):
if values_g[el][index_g['Repository']] not in ['-', '?']:
# check status
el_c = get_id(values_c, index_c['GE Tech Name'], values_g[el][index_g['GE Tech Name']])
if values_c[el_c][index_c['Status']] in ['deprecated']:
continue
# fill in entity
item = {'source': values_g[el][index_g['Repository']],
'target': prefix_mirror + values_g[el][index_g['GE Tech Name']]}
if values_g[el][index_g['Entry Tech Name']] != '-':
item['target'] += '.' + values_g[el][index_g['Entry Tech Name']]
result['repositories'].append(item)
result['repositories'] = sorted(result['repositories'], key=lambda k: k['target'])
f = open(r_output, 'w')
# Pull Request Closer
if args.p:
result['repositories'] = list()
for el in range(1, len(values_g)):
if values_g[el][index_g['Repository']] not in ['-', '?']:
# check status
el_c = get_id(values_c, index_c['GE Tech Name'], values_g[el][index_g['GE Tech Name']])
if values_c[el_c][index_c['Status']] in ['deprecated']:
continue
# fill in entity
item = prefix_mirror + values_g[el][index_g['GE Tech Name']]
if values_g[el][index_g['Entry Tech Name']] != '-':
item += '.' + values_g[el][index_g['Entry Tech Name']]
result['repositories'].append(item)
result['repositories'] = sorted(result['repositories'])
f = open(p_output, 'w')
# API Specifications Transformer
if args.a:
result = {'repositories': list(),
'format': 'swagger20',
'branches': ['master', 'gh-pages']}
for el in range(1, len(values_g)):
if values_g[el][index_g['API']] not in ['-', '?']:
# check status
el_c = get_id(values_c, index_c['GE Tech Name'], values_g[el][index_g['GE Tech Name']])
if values_c[el_c][index_c['Status']] in ['deprecated']:
continue
# fill in entity
item = {'target': 'Fiware/specifications',
'source': 'FIWARE-GEs/' + values_g[el][index_c['GE Tech Name']],
'files': list()}
if values_g[el][index_g['Entry Tech Name']] != '-':
item['source'] += '.' + values_g[el][index_c['Entry Tech Name']]
file = {'source': values_g[el][index_g['API']],
'target': 'OpenAPI/' + values_g[el][index_g['GE Tech Name']] + '/openapi.json',
'transform': True}
if values_g[el][index_g['Transform']] == 'FALSE':
file['transform'] = False
item['files'].append(file)
result['repositories'].append(item)
f = open(a_output, 'w')
# FIWARE TSC Dashboard - metrics
if args.tm:
result = list()
for el in range(1, len(values_c)):
item = deepcopy(tsc_dashboard_template)
item['enabler'] = values_c[el][index_c['GE Full Name']]
if values_c[el][index_c['Catalog-Legacy']] not in ['-']:
item['catalogue'] = values_c[el][index_c['Catalog-Legacy']]
if values_c[el][index_c['Academy-Legacy']] not in ['-']:
item['academy'] = values_c[el][index_c['Academy-Legacy']]
if values_c[el][index_c['Read the Docs']] not in ['-']:
item['readthedocs'] = values_c[el][index_c['Read the Docs']]
if values_c[el][index_c['HelpDesk']] not in ['?', '-']:
item['helpdesk'] = values_c[el][index_c['HelpDesk']]
if values_c[el][index_c['Coverall']] not in ['?', '-']:
item['coverall'] = values_c[el][index_c['Coverall']]
for el_g in range(1, len(values_g)):
if values_g[el_g][index_g['GE Tech Name']] == values_c[el][index_c['GE Tech Name']]:
if values_g[el_g][index_g['Repository']] not in ['?', '-']:
item['github'].append(values_g[el_g][index_g['Repository']])
for el_d in range(1, len(values_d)):
if values_d[el_d][index_d['GE Tech Name']] == values_c[el][index_c['GE Tech Name']]:
if values_d[el_d][index_d['Docker Image']]not in ['?', '-']:
item['docker'].append(values_d[el_d][index_d['Docker Image']])
result.append(item)
result = sorted(result, key=lambda k: k['enabler'])
f = open(tm_output, 'w')
# FIWARE TSC Dashboard - enablers
if args.te:
result = list()
for el in range(1, len(values_c)):
item = deepcopy(tsc_enablers_template)
item['name'] = values_c[el][index_c['GE Full Name']]
item['status'] = values_c[el][index_c['Status']]
if values_c[el][index_c['Chapter']] not in ['-']:
item['chapter'] = values_c[el][index_c['Chapter']]
if values_c[el][index_c['Type-Legacy']] not in ['-']:
item['type'] = values_c[el][index_c['Type-Legacy']]
item['owner'] = values_c[el][index_c['Owner']]
result.append(item)
result = sorted(result, key=lambda k: k['name'])
f = open(te_output, 'w')
f.write(dumps(result, indent=4, ensure_ascii=False) + '\n')
print("Finished")
| python |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import models.spiking_util as spiking
"""
Relevant literature:
- Zenke et al. 2018: "SuperSpike: Supervised Learning in Multilayer Spiking Neural Networks"
- Bellec et al. 2020: "A solution to the learning dilemma for recurrent networks of spiking neurons"
- Fang et al. 2020: "Incorporating Learnable Membrane Time Constant to Enhance Learning of Spiking Neural Networks"
- Ledinauskas et al. 2020: "Training Deep Spiking Neural Networks"
- Perez-Nieves et al. 2021: "Neural heterogeneity promotes robust learning"
- Yin et al. 2021: "Accurate and efficient time-domain classification with adaptive spiking recurrent neural networks"
- Zenke et al. 2021: "The Remarkable Robustness of Surrogate Gradient Learning for Instilling Complex Function in Spiking Neural Networks"
- Fang et al. 2021: "Spike-based Residual Blocks"
- Paredes-Valles et al. 2020: "Unsupervised Learning of a Hierarchical Spiking Neural Network for Optical Flow Estimation: From Events to Global Motion Perception"
"""
class ConvLIF(nn.Module):
"""
Convolutional spiking LIF cell.
Design choices:
- Arctan surrogate grad (Fang et al. 2021)
- Hard reset (Ledinauskas et al. 2020)
- Detach reset (Zenke et al. 2021)
- Multiply previous voltage with leak; incoming current with (1 - leak) (Fang et al. 2020)
- Make leak numerically stable with sigmoid (Fang et al. 2020)
- Learnable threshold instead of bias
- Per-channel leaks normally distributed (Yin et al. 2021)
- Residual added to spikes (Fang et al. 2021)
"""
def __init__(
self,
input_size,
hidden_size,
kernel_size,
stride=1,
activation="arctanspike",
act_width=10.0,
leak=(-4.0, 0.1),
thresh=(0.8, 0.0),
learn_leak=True,
learn_thresh=True,
hard_reset=True,
detach=True,
norm=None,
):
super().__init__()
# shapes
padding = kernel_size // 2
self.input_size = input_size
self.hidden_size = hidden_size
# parameters
self.ff = nn.Conv2d(input_size, hidden_size, kernel_size, stride=stride, padding=padding, bias=False)
if learn_leak:
self.leak = nn.Parameter(torch.randn(hidden_size, 1, 1) * leak[1] + leak[0])
else:
self.register_buffer("leak", torch.randn(hidden_size, 1, 1) * leak[1] + leak[0])
if learn_thresh:
self.thresh = nn.Parameter(torch.randn(hidden_size, 1, 1) * thresh[1] + thresh[0])
else:
self.register_buffer("thresh", torch.randn(hidden_size, 1, 1) * thresh[1] + thresh[0])
# weight init
w_scale = math.sqrt(1 / input_size)
nn.init.uniform_(self.ff.weight, -w_scale, w_scale)
# spiking and reset mechanics
assert isinstance(
activation, str
), "Spiking neurons need a valid activation, see models/spiking_util.py for choices"
self.spike_fn = getattr(spiking, activation)
self.register_buffer("act_width", torch.tensor(act_width))
self.hard_reset = hard_reset
self.detach = detach
# norm
if norm == "weight":
self.ff = nn.utils.weight_norm(self.ff)
self.norm = None
elif norm == "group":
groups = min(1, input_size // 4) # at least instance norm
self.norm = nn.GroupNorm(groups, input_size)
else:
self.norm = None
def forward(self, input_, prev_state, residual=0):
# input current
if self.norm is not None:
input_ = self.norm(input_)
ff = self.ff(input_)
# generate empty prev_state, if None is provided
if prev_state is None:
prev_state = torch.zeros(2, *ff.shape, dtype=ff.dtype, device=ff.device)
v, z = prev_state # unbind op, removes dimension
# clamp thresh
thresh = self.thresh.clamp_min(0.01)
# get leak
leak = torch.sigmoid(self.leak)
# detach reset
if self.detach:
z = z.detach()
# voltage update: decay, reset, add
if self.hard_reset:
v_out = v * leak * (1 - z) + (1 - leak) * ff
else:
v_out = v * leak + (1 - leak) * ff - z * thresh
# spike
z_out = self.spike_fn(v_out, thresh, self.act_width)
return z_out + residual, torch.stack([v_out, z_out])
class ConvPLIF(nn.Module):
"""
Convolutional spiking LIF cell with adaptation based on pre-synaptic trace.
Adapted from Paredes-Valles et al. 2020.
Design choices: see ConvLIF.
"""
def __init__(
self,
input_size,
hidden_size,
kernel_size,
stride=1,
activation="arctanspike",
act_width=10.0,
leak_v=(-4.0, 0.1),
leak_pt=(-4.0, 0.1),
add_pt=(-2.0, 0.1),
thresh=(0.8, 0.0),
learn_leak=True,
learn_thresh=True,
hard_reset=True,
detach=True,
norm=None,
):
super().__init__()
# shapes
padding = kernel_size // 2
self.input_size = input_size
self.hidden_size = hidden_size
# parameters
self.ff = nn.Conv2d(input_size, hidden_size, kernel_size, stride=stride, padding=padding, bias=False)
self.pool = nn.AvgPool2d(kernel_size, stride, padding=padding)
if learn_leak:
self.leak_v = nn.Parameter(torch.randn(hidden_size, 1, 1) * leak_v[1] + leak_v[0])
self.leak_pt = nn.Parameter(torch.randn(hidden_size, 1, 1) * leak_pt[1] + leak_pt[0])
self.add_pt = nn.Parameter(torch.randn(hidden_size, 1, 1) * add_pt[1] + add_pt[0])
else:
self.register_buffer("leak_v", torch.randn(hidden_size, 1, 1) * leak_v[1] + leak_v[0])
self.register_buffer("leak_pt", torch.randn(hidden_size, 1, 1) * leak_pt[1] + leak_pt[0])
self.register_buffer("add_pt", torch.randn(hidden_size, 1, 1) * add_pt[1] + add_pt[0])
if learn_thresh:
self.thresh = nn.Parameter(torch.randn(hidden_size, 1, 1) * thresh[1] + thresh[0])
else:
self.register_buffer("thresh", torch.randn(hidden_size, 1, 1) * thresh[1] + thresh[0])
# weight init
w_scale = math.sqrt(1 / input_size)
nn.init.uniform_(self.ff.weight, -w_scale, w_scale)
# spiking and reset mechanics
assert isinstance(
activation, str
), "Spiking neurons need a valid activation, see models/spiking_util.py for choices"
self.spike_fn = getattr(spiking, activation)
self.register_buffer("act_width", torch.tensor(act_width))
self.hard_reset = hard_reset
self.detach = detach
def forward(self, input_, prev_state, residual=0):
# input current
ff = self.ff(input_)
# generate empty prev_state, if None is provided
if prev_state is None:
prev_state = torch.zeros(3, *ff.shape, dtype=ff.dtype, device=ff.device)
v, z, pt = prev_state # unbind op, removes dimension
# clamp thresh
thresh = self.thresh.clamp_min(0.01)
# get leaks
leak_v = torch.sigmoid(self.leak_v)
leak_pt = torch.sigmoid(self.leak_pt)
# get pt scaling
add_pt = torch.sigmoid(self.add_pt)
# pre-trace update: decay, add
# mean of incoming channels, avg pooling over receptive field
pt_out = pt * leak_pt + (1 - leak_pt) * self.pool(input_.abs().mean(1, keepdim=True))
# detach reset
if self.detach:
z = z.detach()
# voltage update: decay, reset, add
if self.hard_reset:
v_out = v * leak_v * (1 - z) + (1 - leak_v) * (ff - add_pt * pt_out)
else:
v_out = v * leak_v + (1 - leak_v) * (ff - add_pt * pt_out) - z * thresh
# spike
z_out = self.spike_fn(v_out, thresh, self.act_width)
return z_out + residual, torch.stack([v_out, z_out, pt_out])
class ConvALIF(nn.Module):
"""
Convolutional spiking ALIF cell.
Design choices:
- Adaptive threshold (Bellec et al. 2020, Yin et al. 2021)
- Parameters from Yin et al. 2021
- Arctan surrogate grad (Fang et al. 2021)
- Soft reset (Ledinauskas et al. 2020, Yin et al. 2021)
- Detach reset (Zenke et al. 2021)
- Multiply previous voltage with leak; incoming current with (1 - leak) (Fang et al. 2020)
- Make leak numerically stable with sigmoid (Fang et al. 2020)
- Per-channel leaks normally distributed (Yin et al. 2021)
- Residual added to spikes (Fang et al. 2021)
"""
def __init__(
self,
input_size,
hidden_size,
kernel_size,
stride=1,
activation="arctanspike",
act_width=10.0,
leak_v=(-4.0, 0.1),
leak_t=(-4.0, 0.1),
t0=(0.01, 0.0),
t1=(1.8, 0.0),
learn_leak=True,
learn_thresh=False,
hard_reset=False,
detach=True,
norm=None,
):
super().__init__()
# shapes
padding = kernel_size // 2
self.input_size = input_size
self.hidden_size = hidden_size
# parameters
self.ff = nn.Conv2d(input_size, hidden_size, kernel_size, stride=stride, padding=padding, bias=False)
if learn_leak:
self.leak_v = nn.Parameter(torch.randn(hidden_size, 1, 1) * leak_v[1] + leak_v[0])
self.leak_t = nn.Parameter(torch.randn(hidden_size, 1, 1) * leak_t[1] + leak_t[0])
else:
self.register_buffer("leak_v", torch.randn(hidden_size, 1, 1) * leak_v[1] + leak_v[0])
self.register_buffer("leak_t", torch.randn(hidden_size, 1, 1) * leak_t[1] + leak_t[0])
if learn_thresh:
self.t0 = nn.Parameter(torch.randn(hidden_size, 1, 1) * t0[1] + t0[0])
self.t1 = nn.Parameter(torch.randn(hidden_size, 1, 1) * t1[1] + t1[0])
else:
self.register_buffer("t0", torch.randn(hidden_size, 1, 1) * t0[1] + t0[0])
self.register_buffer("t1", torch.randn(hidden_size, 1, 1) * t1[1] + t1[0])
# weight init
w_scale = math.sqrt(1 / input_size)
nn.init.uniform_(self.ff.weight, -w_scale, w_scale)
# spiking and reset mechanics
assert isinstance(
activation, str
), "Spiking neurons need a valid activation, see models/spiking_util.py for choices"
self.spike_fn = getattr(spiking, activation)
self.register_buffer("act_width", torch.tensor(act_width))
self.hard_reset = hard_reset
self.detach = detach
def forward(self, input_, prev_state, residual=0):
# input current
ff = self.ff(input_)
# generate empty prev_state, if None is provided
if prev_state is None:
prev_state = torch.zeros(3, *ff.shape, dtype=ff.dtype, device=ff.device)
v, z, t = prev_state # unbind op, removes dimension
# clamp thresh
t0 = self.t0.clamp_min(0.01)
t1 = self.t1.clamp_min(0)
# get leaks
leak_v = torch.sigmoid(self.leak_v)
leak_t = torch.sigmoid(self.leak_t)
# threshold update: decay, add
t_out = t * leak_t + (1 - leak_t) * z
# threshold: base + adaptive
thresh = t0 + t1 * t_out
# detach reset
if self.detach:
z = z.detach()
# voltage update: decay, reset, add
if self.hard_reset:
v_out = v * leak_v * (1 - z) + (1 - leak_v) * ff
else:
v_out = v * leak_v + (1 - leak_v) * ff - z * (t0 + t1 * t)
# spike
z_out = self.spike_fn(v_out, thresh, self.act_width)
return z_out + residual, torch.stack([v_out, z_out, t_out])
class ConvXLIF(nn.Module):
"""
Convolutional spiking LIF cell with threshold adaptation based on pre-synaptic trace.
Crossing between PLIF and ALIF.
Design choices: see ConvALIF.
"""
def __init__(
self,
input_size,
hidden_size,
kernel_size,
stride=1,
activation="arctanspike",
act_width=10.0,
leak_v=(-4.0, 0.1),
leak_pt=(-4.0, 0.1),
t0=(0.01, 0.0),
t1=(1.8, 0.0),
learn_leak=True,
learn_thresh=False,
hard_reset=False,
detach=True,
norm=None,
):
super().__init__()
# shapes
padding = kernel_size // 2
self.input_size = input_size
self.hidden_size = hidden_size
# parameters
self.ff = nn.Conv2d(input_size, hidden_size, kernel_size, stride=stride, padding=padding, bias=False)
self.pool = nn.AvgPool2d(kernel_size, stride, padding=padding)
if learn_leak:
self.leak_v = nn.Parameter(torch.randn(hidden_size, 1, 1) * leak_v[1] + leak_v[0])
self.leak_pt = nn.Parameter(torch.randn(hidden_size, 1, 1) * leak_pt[1] + leak_pt[0])
else:
self.register_buffer("leak_v", torch.randn(hidden_size, 1, 1) * leak_v[1] + leak_v[0])
self.register_buffer("leak_pt", torch.randn(hidden_size, 1, 1) * leak_pt[1] + leak_pt[0])
if learn_thresh:
self.t0 = nn.Parameter(torch.randn(hidden_size, 1, 1) * t0[1] + t0[0])
self.t1 = nn.Parameter(torch.randn(hidden_size, 1, 1) * t1[1] + t1[0])
else:
self.register_buffer("t0", torch.randn(hidden_size, 1, 1) * t0[1] + t0[0])
self.register_buffer("t1", torch.randn(hidden_size, 1, 1) * t1[1] + t1[0])
# weight init
w_scale = math.sqrt(1 / input_size)
nn.init.uniform_(self.ff.weight, -w_scale, w_scale)
# spiking and reset mechanics
assert isinstance(
activation, str
), "Spiking neurons need a valid activation, see models/spiking_util.py for choices"
self.spike_fn = getattr(spiking, activation)
self.register_buffer("act_width", torch.tensor(act_width))
self.hard_reset = hard_reset
self.detach = detach
def forward(self, input_, prev_state, residual=0):
# input current
ff = self.ff(input_)
# generate empty prev_state, if None is provided
if prev_state is None:
prev_state = torch.zeros(3, *ff.shape, dtype=ff.dtype, device=ff.device)
v, z, pt = prev_state # unbind op, removes dimension
# clamp thresh
t0 = self.t0.clamp_min(0.01)
t1 = self.t1.clamp_min(0)
# get leaks
leak_v = torch.sigmoid(self.leak_v)
leak_pt = torch.sigmoid(self.leak_pt)
# pre-trace update: decay, add
# mean of incoming channels, avg pooling over receptive field
pt_out = pt * leak_pt + (1 - leak_pt) * self.pool(input_.abs().mean(1, keepdim=True))
# threshold: base + adaptive
thresh = t0 + t1 * pt_out
# detach reset
if self.detach:
z = z.detach()
# voltage update: decay, reset, add
if self.hard_reset:
v_out = v * leak_v * (1 - z) + (1 - leak_v) * ff
else:
v_out = v * leak_v + (1 - leak_v) * ff - z * (t0 + t1 * pt)
# spike
z_out = self.spike_fn(v_out, thresh, self.act_width)
return z_out + residual, torch.stack([v_out, z_out, pt_out])
class ConvLIFRecurrent(nn.Module):
"""
Convolutional recurrent spiking LIF cell.
Design choices:
- Arctan surrogate grad (Fang et al. 2021)
- Hard reset (Ledinauskas et al. 2020)
- Detach reset (Zenke et al. 2021)
- Multiply previous voltage with leak; incoming current with (1 - leak) (Fang et al. 2020)
- Make leak numerically stable with sigmoid (Fang et al. 2020)
- Learnable threshold instead of bias
- Per-channel leaks normally distributed (Yin et al. 2021)
"""
def __init__(
self,
input_size,
hidden_size,
kernel_size,
activation="arctanspike",
act_width=10.0,
leak=(-4.0, 0.1),
thresh=(0.8, 0.0),
learn_leak=True,
learn_thresh=True,
hard_reset=True,
detach=True,
norm=None,
):
super().__init__()
# shapes
padding = kernel_size // 2
self.input_size = input_size
self.hidden_size = hidden_size
# parameters
self.ff = nn.Conv2d(input_size, hidden_size, kernel_size, padding=padding, bias=False)
self.rec = nn.Conv2d(hidden_size, hidden_size, kernel_size, padding=padding, bias=False)
if learn_leak:
self.leak = nn.Parameter(torch.randn(hidden_size, 1, 1) * leak[1] + leak[0])
else:
self.register_buffer("leak", torch.randn(hidden_size, 1, 1) * leak[1] + leak[0])
if learn_thresh:
self.thresh = nn.Parameter(torch.randn(hidden_size, 1, 1) * thresh[1] + thresh[0])
else:
self.register_buffer("thresh", torch.randn(hidden_size, 1, 1) * thresh[1] + thresh[0])
# weight init
w_scale_ff = math.sqrt(1 / input_size)
w_scale_rec = math.sqrt(1 / hidden_size)
nn.init.uniform_(self.ff.weight, -w_scale_ff, w_scale_ff)
nn.init.uniform_(self.rec.weight, -w_scale_rec, w_scale_rec)
# spiking and reset mechanics
assert isinstance(
activation, str
), "Spiking neurons need a valid activation, see models/spiking_util.py for choices"
self.spike_fn = getattr(spiking, activation)
self.register_buffer("act_width", torch.tensor(act_width))
self.hard_reset = hard_reset
self.detach = detach
# norm
if norm == "weight":
self.ff = nn.utils.weight_norm(self.ff)
self.rec = nn.utils.weight_norm(self.rec)
self.norm_ff = None
self.norm_rec = None
elif norm == "group":
groups_ff = min(1, input_size // 4) # at least instance norm
groups_rec = min(1, hidden_size // 4) # at least instance norm
self.norm_ff = nn.GroupNorm(groups_ff, input_size)
self.norm_rec = nn.GroupNorm(groups_rec, hidden_size)
else:
self.norm_ff = None
self.norm_rec = None
def forward(self, input_, prev_state):
# input current
if self.norm_ff is not None:
input_ = self.norm_ff(input_)
ff = self.ff(input_)
# generate empty prev_state, if None is provided
if prev_state is None:
prev_state = torch.zeros(2, *ff.shape, dtype=ff.dtype, device=ff.device)
v, z = prev_state # unbind op, removes dimension
# recurrent current
if self.norm_rec is not None:
z = self.norm_rec(z)
rec = self.rec(z)
# clamp thresh
thresh = self.thresh.clamp_min(0.01)
# get leak
leak = torch.sigmoid(self.leak)
# detach reset
if self.detach:
z = z.detach()
# voltage update: decay, reset, add
if self.hard_reset:
v_out = v * leak * (1 - z) + (1 - leak) * (ff + rec)
else:
v_out = v * leak + (1 - leak) * (ff + rec) - z * thresh
# spike
z_out = self.spike_fn(v_out, thresh, self.act_width)
return z_out, torch.stack([v_out, z_out])
class ConvPLIFRecurrent(nn.Module):
"""
Convolutional recurrent spiking LIF cell with adaptation based on pre-synaptic trace.
Adapted from Paredes-Valles et al. 2020.
Design choices: see ConvLIFRecurrent.
"""
def __init__(
self,
input_size,
hidden_size,
kernel_size,
activation="arctanspike",
act_width=10.0,
leak_v=(-4.0, 0.1),
leak_pt=(-4.0, 0.1),
add_pt=(-2.0, 0.1),
thresh=(0.8, 0.0),
learn_leak=True,
learn_thresh=True,
hard_reset=True,
detach=True,
norm=None,
):
super().__init__()
# shapes
padding = kernel_size // 2
self.input_size = input_size
self.hidden_size = hidden_size
# parameters
self.ff = nn.Conv2d(input_size, hidden_size, kernel_size, padding=padding, bias=False)
self.rec = nn.Conv2d(hidden_size, hidden_size, kernel_size, padding=padding, bias=False)
self.pool = nn.AvgPool2d(kernel_size, stride=1, padding=padding)
if learn_leak:
self.leak_v = nn.Parameter(torch.randn(hidden_size, 1, 1) * leak_v[1] + leak_v[0])
self.leak_pt = nn.Parameter(torch.randn(hidden_size, 1, 1) * leak_pt[1] + leak_pt[0])
self.add_pt = nn.Parameter(torch.randn(hidden_size, 1, 1) * add_pt[1] + add_pt[0])
else:
self.register_buffer("leak_v", torch.randn(hidden_size, 1, 1) * leak_v[1] + leak_v[0])
self.register_buffer("leak_pt", torch.randn(hidden_size, 1, 1) * leak_pt[1] + leak_pt[0])
self.register_buffer("add_pt", torch.randn(hidden_size, 1, 1) * add_pt[1] + add_pt[0])
if learn_thresh:
self.thresh = nn.Parameter(torch.randn(hidden_size, 1, 1) * thresh[1] + thresh[0])
else:
self.register_buffer("thresh", torch.randn(hidden_size, 1, 1) * thresh[1] + thresh[0])
# weight init
w_scale_ff = math.sqrt(1 / input_size)
w_scale_rec = math.sqrt(1 / hidden_size)
nn.init.uniform_(self.ff.weight, -w_scale_ff, w_scale_ff)
nn.init.uniform_(self.rec.weight, -w_scale_rec, w_scale_rec)
# spiking and reset mechanics
assert isinstance(
activation, str
), "Spiking neurons need a valid activation, see models/spiking_util.py for choices"
self.spike_fn = getattr(spiking, activation)
self.register_buffer("act_width", torch.tensor(act_width))
self.hard_reset = hard_reset
self.detach = detach
def forward(self, input_, prev_state, residual=0):
# input current
ff = self.ff(input_)
# generate empty prev_state, if None is provided
if prev_state is None:
prev_state = torch.zeros(3, *ff.shape, dtype=ff.dtype, device=ff.device)
v, z, pt = prev_state # unbind op, removes dimension
# recurrent current
rec = self.rec(z)
# clamp thresh
thresh = self.thresh.clamp_min(0.01)
# get leaks
leak_v = torch.sigmoid(self.leak_v)
leak_pt = torch.sigmoid(self.leak_pt)
# get pt scaling
add_pt = torch.sigmoid(self.add_pt)
# pre-trace update: decay, add
# mean of incoming channels, avg pooling over receptive field
pt_out = pt * leak_pt + (1 - leak_pt) * self.pool(input_.abs().mean(1, keepdim=True))
# detach reset
if self.detach:
z = z.detach()
# voltage update: decay, reset, add
if self.hard_reset:
v_out = v * leak_v * (1 - z) + (1 - leak_v) * (ff + rec - add_pt * pt_out)
else:
v_out = v * leak_v + (1 - leak_v) * (ff + rec - add_pt * pt_out) - z * thresh
# spike
z_out = self.spike_fn(v_out, thresh, self.act_width)
return z_out + residual, torch.stack([v_out, z_out, pt_out])
class ConvALIFRecurrent(nn.Module):
"""
Convolutional recurrent spiking ALIF cell.
Design choices:
- Adaptive threshold (Bellec et al. 2020, Yin et al. 2021)
- Parameters from Yin et al. 2021
- Arctan surrogate grad (Fang et al. 2021)
- Soft reset (Ledinauskas et al. 2020, Yin et al. 2021)
- Detach reset (Zenke et al. 2021)
- Multiply previous voltage with leak; incoming current with (1 - leak) (Fang et al. 2020)
- Make leak numerically stable with sigmoid (Fang et al. 2020)
- Per-channel leaks normally distributed (Yin et al. 2021)
"""
def __init__(
self,
input_size,
hidden_size,
kernel_size,
activation="arctanspike",
act_width=10.0,
leak_v=(-4.0, 0.1),
leak_t=(-4.0, 0.1),
t0=(0.01, 0.0),
t1=(1.8, 0.0),
learn_leak=True,
learn_thresh=False,
hard_reset=False,
detach=True,
norm=None,
):
super().__init__()
# shapes
padding = kernel_size // 2
self.input_size = input_size
self.hidden_size = hidden_size
# parameters
self.ff = nn.Conv2d(input_size, hidden_size, kernel_size, padding=padding, bias=False)
self.rec = nn.Conv2d(hidden_size, hidden_size, kernel_size, padding=padding, bias=False)
if learn_leak:
self.leak_v = nn.Parameter(torch.randn(hidden_size, 1, 1) * leak_v[1] + leak_v[0])
self.leak_t = nn.Parameter(torch.randn(hidden_size, 1, 1) * leak_t[1] + leak_t[0])
else:
self.register_buffer("leak_v", torch.randn(hidden_size, 1, 1) * leak_v[1] + leak_v[0])
self.register_buffer("leak_t", torch.randn(hidden_size, 1, 1) * leak_t[1] + leak_t[0])
if learn_thresh:
self.t0 = nn.Parameter(torch.randn(hidden_size, 1, 1) * t0[1] + t0[0])
self.t1 = nn.Parameter(torch.randn(hidden_size, 1, 1) * t1[1] + t1[0])
else:
self.register_buffer("t0", torch.randn(hidden_size, 1, 1) * t0[1] + t0[0])
self.register_buffer("t1", torch.randn(hidden_size, 1, 1) * t1[1] + t1[0])
# weight init
w_scale_ff = math.sqrt(1 / input_size)
w_scale_rec = math.sqrt(1 / hidden_size)
nn.init.uniform_(self.ff.weight, -w_scale_ff, w_scale_ff)
nn.init.uniform_(self.rec.weight, -w_scale_rec, w_scale_rec)
# spiking and reset mechanics
assert isinstance(
activation, str
), "Spiking neurons need a valid activation, see models/spiking_util.py for choices"
self.spike_fn = getattr(spiking, activation)
self.register_buffer("act_width", torch.tensor(act_width))
self.hard_reset = hard_reset
self.detach = detach
def forward(self, input_, prev_state):
# input current
ff = self.ff(input_)
# generate empty prev_state, if None is provided
if prev_state is None:
prev_state = torch.zeros(3, *ff.shape, dtype=ff.dtype, device=ff.device)
v, z, t = prev_state # unbind op, removes dimension
# recurrent current
rec = self.rec(z)
# clamp thresh
t0 = self.t0.clamp_min(0.01)
t1 = self.t1.clamp_min(0)
# get leaks
leak_v = torch.sigmoid(self.leak_v)
leak_t = torch.sigmoid(self.leak_t)
# threshold update: decay, add
t_out = t * leak_t + (1 - leak_t) * z
# threshold: base + adaptive
thresh = t0 + t1 * t_out
# detach reset
if self.detach:
z = z.detach()
# voltage update: decay, reset, add
if self.hard_reset:
v_out = v * leak_v * (1 - z) + (1 - leak_v) * (ff + rec)
else:
v_out = v * leak_v + (1 - leak_v) * (ff + rec) - z * (t0 + t1 * t)
# spike
z_out = self.spike_fn(v_out, thresh, self.act_width)
return z_out, torch.stack([v_out, z_out, t_out])
class ConvXLIFRecurrent(nn.Module):
"""
Convolutional recurrent spiking LIF cell with threshold adaptation based on pre-synaptic trace.
Crossing between PLIF and ALIF.
Design choices: see ConvALIFRecurrent.
"""
def __init__(
self,
input_size,
hidden_size,
kernel_size,
stride=1,
activation="arctanspike",
act_width=10.0,
leak_v=(-4.0, 0.1),
leak_pt=(-4.0, 0.1),
t0=(0.01, 0.0),
t1=(1.8, 0.0),
learn_leak=True,
learn_thresh=False,
hard_reset=False,
detach=True,
norm=None,
):
super().__init__()
# shapes
padding = kernel_size // 2
self.input_size = input_size
self.hidden_size = hidden_size
# parameters
self.ff = nn.Conv2d(input_size, hidden_size, kernel_size, stride=stride, padding=padding, bias=False)
self.rec = nn.Conv2d(hidden_size, hidden_size, kernel_size, padding=padding, bias=False)
self.pool = nn.AvgPool2d(kernel_size, stride, padding=padding)
if learn_leak:
self.leak_v = nn.Parameter(torch.randn(hidden_size, 1, 1) * leak_v[1] + leak_v[0])
self.leak_pt = nn.Parameter(torch.randn(hidden_size, 1, 1) * leak_pt[1] + leak_pt[0])
else:
self.register_buffer("leak_v", torch.randn(hidden_size, 1, 1) * leak_v[1] + leak_v[0])
self.register_buffer("leak_pt", torch.randn(hidden_size, 1, 1) * leak_pt[1] + leak_pt[0])
if learn_thresh:
self.t0 = nn.Parameter(torch.randn(hidden_size, 1, 1) * t0[1] + t0[0])
self.t1 = nn.Parameter(torch.randn(hidden_size, 1, 1) * t1[1] + t1[0])
else:
self.register_buffer("t0", torch.randn(hidden_size, 1, 1) * t0[1] + t0[0])
self.register_buffer("t1", torch.randn(hidden_size, 1, 1) * t1[1] + t1[0])
# weight init
w_scale_ff = math.sqrt(1 / input_size)
w_scale_rec = math.sqrt(1 / hidden_size)
nn.init.uniform_(self.ff.weight, -w_scale_ff, w_scale_ff)
nn.init.uniform_(self.rec.weight, -w_scale_rec, w_scale_rec)
# spiking and reset mechanics
assert isinstance(
activation, str
), "Spiking neurons need a valid activation, see models/spiking_util.py for choices"
self.spike_fn = getattr(spiking, activation)
self.register_buffer("act_width", torch.tensor(act_width))
self.hard_reset = hard_reset
self.detach = detach
def forward(self, input_, prev_state):
# input current
ff = self.ff(input_)
# generate empty prev_state, if None is provided
if prev_state is None:
prev_state = torch.zeros(3, *ff.shape, dtype=ff.dtype, device=ff.device)
v, z, pt = prev_state # unbind op, removes dimension
# recurrent current
rec = self.rec(z)
# clamp thresh
t0 = self.t0.clamp_min(0.01)
t1 = self.t1.clamp_min(0)
# get leaks
leak_v = torch.sigmoid(self.leak_v)
leak_pt = torch.sigmoid(self.leak_pt)
# pre-trace update: decay, add
# mean of incoming channels, avg pooling over receptive field
pt_out = pt * leak_pt + (1 - leak_pt) * self.pool(input_.abs().mean(1, keepdim=True))
# threshold: base + adaptive
thresh = t0 + t1 * pt_out
# detach reset
if self.detach:
z = z.detach()
# voltage update: decay, reset, add
if self.hard_reset:
v_out = v * leak_v * (1 - z) + (1 - leak_v) * (ff + rec)
else:
v_out = v * leak_v + (1 - leak_v) * (ff + rec) - z * (t0 + t1 * pt)
# spike
z_out = self.spike_fn(v_out, thresh, self.act_width)
return z_out, torch.stack([v_out, z_out, pt_out])
class SpikingRecurrentConvLayer(nn.Module):
"""
Layer comprised of a convolution followed by a recurrent convolutional block,
both spiking. Default: no bias, arctanspike, no downsampling, no norm, LIF.
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size=3,
stride=1,
recurrent_block_type="lif",
activation_ff="arctanspike",
activation_rec="arctanspike",
**kwargs,
):
super().__init__()
assert recurrent_block_type in ["lif", "alif", "plif", "xlif"]
if recurrent_block_type == "lif":
FeedforwardBlock = ConvLIF
RecurrentBlock = ConvLIFRecurrent
elif recurrent_block_type == "alif":
FeedforwardBlock = ConvALIF
RecurrentBlock = ConvALIFRecurrent
elif recurrent_block_type == "plif":
FeedforwardBlock = ConvPLIF
RecurrentBlock = ConvPLIFRecurrent
else:
FeedforwardBlock = ConvXLIF
RecurrentBlock = ConvXLIFRecurrent
kwargs.pop("spiking_feedforward_block_type", None)
self.conv = FeedforwardBlock(
in_channels,
out_channels,
kernel_size,
stride,
activation_ff,
**kwargs,
)
self.recurrent_block = RecurrentBlock(
out_channels, out_channels, kernel_size, activation=activation_rec, **kwargs
)
def forward(self, x, prev_state):
if prev_state is None:
prev_state = [None, None]
ff, rec = prev_state # unbind op, removes dimension
x1, ff = self.conv(x, ff)
x2, rec = self.recurrent_block(x1, rec)
return x2, torch.stack([ff, rec])
class SpikingResidualBlock(nn.Module):
"""
Spiking residual block as in "Spike-based Residual Blocks", Fang et al. 2021.
Default: no bias, arctanspike, no downsampling, no norm, LIF.
"""
def __init__(
self,
in_channels,
out_channels,
stride=1,
spiking_feedforward_block_type="lif",
activation="arctanspike",
**kwargs,
):
super().__init__()
assert spiking_feedforward_block_type in ["lif", "alif", "plif", "xlif"]
if spiking_feedforward_block_type == "lif":
FeedforwardBlock = ConvLIF
elif spiking_feedforward_block_type == "alif":
FeedforwardBlock = ConvALIF
elif spiking_feedforward_block_type == "plif":
FeedforwardBlock = ConvPLIF
else:
FeedforwardBlock = ConvXLIF
self.conv1 = FeedforwardBlock(
in_channels, out_channels, kernel_size=3, stride=stride, activation=activation, **kwargs
)
self.conv2 = FeedforwardBlock(
out_channels, out_channels, kernel_size=3, stride=1, activation=activation, **kwargs
)
def forward(self, x, prev_state):
if prev_state is None:
prev_state = [None, None]
conv1, conv2 = prev_state # unbind op, removes dimension
residual = x
x1, conv1 = self.conv1(x, conv1)
x2, conv2 = self.conv2(x1, conv2, residual=residual) # add res inside
return x2, torch.stack([conv1, conv2])
class SpikingUpsampleConvLayer(nn.Module):
"""
Upsampling spiking layer (bilinear interpolation + Conv2d) to increase spatial resolution (x2) in a decoder.
Default: no bias, arctanspike, no downsampling, no norm, LIF.
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
spiking_feedforward_block_type="lif",
activation="arctanspike",
**kwargs,
):
super().__init__()
assert spiking_feedforward_block_type in ["lif", "alif", "plif", "xlif"]
if spiking_feedforward_block_type == "lif":
FeedforwardBlock = ConvLIF
elif spiking_feedforward_block_type == "alif":
FeedforwardBlock = ConvALIF
elif spiking_feedforward_block_type == "plif":
FeedforwardBlock = ConvPLIF
else:
FeedforwardBlock = ConvXLIF
self.conv2d = FeedforwardBlock(
in_channels, out_channels, kernel_size, stride=stride, activation=activation, **kwargs
)
def forward(self, x, prev_state):
x_up = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=False)
x1, state = self.conv2d(x_up, prev_state)
return x1, state
class SpikingTransposedConvLayer(nn.Module):
"""
Transposed spiking convolutional layer to increase spatial resolution (x2) in a decoder.
Default: no bias, arctanspike, no downsampling, no norm, LIF.
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
spiking_feedforward_block_type="lif",
activation="arctanspike",
**kwargs,
):
raise NotImplementedError
| python |
################################################################################
# Project : AuShadha
# Description : Surgical History Views
# Author : Dr.Easwar T.R
# Date : 16-09-2013
# License : GNU-GPL Version 3,Please see AuShadha/LICENSE.txt for details
################################################################################
# General Module imports-----------------------------------
import importlib
from datetime import datetime, date, time
# General Django Imports----------------------------------
from django.shortcuts import render_to_response
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.template import RequestContext
#from django.core.context_processors import csrf
from django.contrib.auth.models import User
import json
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
# Application Specific Model Imports-----------------------
import AuShadha.settings as settings
from AuShadha.settings import APP_ROOT_URL
from AuShadha.core.serializers.data_grid import generate_json_for_datagrid
from AuShadha.utilities.forms import aumodelformerrorformatter_factory
from AuShadha.apps.ui.ui import ui as UI
#from patient.models import PatientDetail
from history.surgical_history.models import SurgicalHistory, SurgicalHistoryForm
PatientDetail = UI.get_module("PatientRegistration")
# Views start here -----------------------------------------
@login_required
def surgical_history_json(request, patient_id = None):
try:
if patient_id:
patient_id = int(patient_id)
else:
action = unicode(request.GET.get('action'))
patient_id = int(request.GET.get('patient_id'))
if action == 'add':
return surgical_history_add(request, patient_id)
patient_detail_obj = PatientDetail.objects.get(pk=patient_id)
surgical_history_obj = SurgicalHistory.objects.filter(
patient_detail=patient_detail_obj)
jsondata = generate_json_for_datagrid(surgical_history_obj)
return HttpResponse(jsondata, content_type="application/json")
except(AttributeError, NameError, TypeError, ValueError, KeyError):
raise Http404("ERROR:: Bad request.Invalid arguments passed")
except(PatientDetail.DoesNotExist):
raise Http404("ERROR:: Patient requested does not exist.")
@login_required
def surgical_history_add(request, patient_id = None):
success = True
error_message = None
form_errors = None
addData = None
if request.user:
user = request.user
try:
if patient_id:
patient_id = int(patient_id)
else:
patient_id = int(request.GET.get('patient_id'))
patient_detail_obj = PatientDetail.objects.get(pk=patient_id)
#patient_detail_obj.generate_urls()
if not getattr(patient_detail_obj, 'urls', None):
patient_detail_obj.save()
p_urls = patient_detail_obj.urls
surgical_history_obj = SurgicalHistory(patient_detail=patient_detail_obj)
except TypeError or ValueError or AttributeError:
raise Http404("BadRequest")
except PatientDetail.DoesNotExist:
raise Http404("BadRequest: Patient Data Does Not Exist")
if request.method == "GET" and request.is_ajax():
surgical_history_form = SurgicalHistoryForm( instance=surgical_history_obj, auto_id = False )
variable = RequestContext(request,
{"user": user,
"patient_detail_obj": patient_detail_obj,
"surgical_history_form": surgical_history_form,
"surgical_history_obj": surgical_history_obj,
'addUrl' : p_urls['add']['surgical_history']
})
return render_to_response('surgical_history/add.html', variable)
elif request.method == 'POST' and request.is_ajax():
surgical_history_form = SurgicalHistoryForm(request.POST,
instance=surgical_history_obj)
if surgical_history_form.is_valid():
surgical_history_obj = surgical_history_form.save()
#surgical_history_obj.generate_urls()
m_urls = surgical_history_obj.urls
print "Surgical History URLS: "
print m_urls
#patient_detail_obj.generate_urls()
p_urls = patient_detail_obj.urls
fields_list = [field for field in surgical_history_obj._meta.fields if field.serialize]
success = True
error_message = "Surgical History Data Edited Successfully"
form_errors = None
addData = {f.name:f.value_to_string(surgical_history_obj) for f in fields_list}
addData['add'] = p_urls['add']['surgical_history']
addData['json']= p_urls['json']['surgical_history']
addData['edit']= m_urls['edit']
addData['del'] = m_urls['del']
else:
success = False
error_message = aumodelformerrorformatter_factory(surgical_history_form)
form_errors = True
addData = None
data = {
'success': success,
'error_message': error_message,
"form_errors": None,
"addData": addData
}
jsondata = json.dumps(data)
return HttpResponse(jsondata, content_type='application/json')
else:
raise Http404("BadRequest: Unsupported Request Method")
else:
raise Http404("You need to Login")
@login_required
def surgical_history_edit(request, surgical_history_id = None):
if request.user:
user = request.user
try:
surgical_history_id = int(surgical_history_id)
surgical_history_obj = SurgicalHistory.objects.get(pk= surgical_history_id)
#surgical_history_obj.generate_urls()
if not getattr(surgical_history_obj, 'urls', None):
surgical_history_obj.save()
m_urls = surgical_history_obj.urls
except TypeError or ValueError or AttributeError:
raise Http404("BadRequest")
except SurgicalHistory.DoesNotExist:
raise Http404("BadRequest: Patient Data Does Not Exist")
if request.method == "GET" and request.is_ajax():
print "Received request for Editing Surgical History"
print "Surgical History URLS is, ", m_urls
surgical_history_form = SurgicalHistoryForm(instance=surgical_history_obj, auto_id = False )
variable = RequestContext(request,
{ "user": user,
"patient_detail_obj" : surgical_history_obj.patient_detail,
"surgical_history_form": surgical_history_form,
"surgical_history_obj" : surgical_history_obj,
'editUrl' : m_urls['edit'],
'delUrl' : m_urls['del'],
})
return render_to_response('surgical_history/edit.html', variable)
elif request.method == 'POST' and request.is_ajax():
surgical_history_form = SurgicalHistoryForm(request.POST,
instance=surgical_history_obj)
if surgical_history_form.is_valid():
surgical_history_obj = surgical_history_form.save()
#surgical_history_obj.generate_urls()
m_urls = surgical_history_obj.urls
#surgical_history_obj.patient_detail.generate_urls()
patient_detail_obj = surgical_history_obj.patient_detail
if not getattr(patient_detail_obj, 'urls', None):
patient_detail_obj.save()
p_urls = patient_detail_obj.urls
fields_list = [field for field in surgical_history_obj._meta.fields if field.serialize]
success = True
error_message = "Surgical History Data Edited Successfully"
form_errors = None
addData = {f.name:f.value_to_string(surgical_history_obj) for f in fields_list}
addData['add'] = p_urls['add']['surgical_history']
addData['json']= p_urls['json']['surgical_history']
addData['edit']= m_urls['edit']
addData['del'] = m_urls['del']
else:
success = False
error_message = aumodelformerrorformatter_factory(surgical_history_form)
form_errors = True
addData = None
data = {
'success': success,
'error_message': error_message,
"form_errors": None,
"addData": addData
}
jsondata = json.dumps(data)
return HttpResponse(jsondata, content_type='application/json')
else:
raise Http404("BadRequest: Unsupported Request Method")
else:
raise Http404("You need to Login")
@login_required
def surgical_history_del(request, surgical_history_id = None):
user = request.user
if request.user and user.is_superuser:
if request.method == "GET":
try:
if surgical_history_id:
surgical_history_id = int(surgical_history_id)
else:
surgical_history_id = int(request.GET.get('surgical_history_id'))
surgical_history_obj = SurgicalHistory.objects.get(pk=surgical_history_id)
patient_detail_obj = surgical_history_obj.patient_detail
except TypeError or ValueError or AttributeError:
raise Http404("BadRequest")
except SurgicalHistory.DoesNotExist:
raise Http404(
"BadRequest: Surgical History Data Does Not Exist")
surgical_history_obj.delete()
success = True
error_message = "Surgical History Data Deleted Successfully"
data = {'success': success, 'error_message': error_message}
jsondata = json.dumps(data)
return HttpResponse(jsondata, content_type='application/json')
else:
raise Http404("BadRequest: Unsupported Request Method")
else:
raise Http404("Server Error: No Permission to delete.")
| python |
"""First_Django_Project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.urls import path
import Farmer_Hand.views
from accounts.views import (login_view, register_view, logout_view)
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', Farmer_Hand.views.index, name='index_page'),
url(r'^post/(?P<pk>[0-9]+)$', Farmer_Hand.views.view_post, name='view_post') ,
url(r'^login/',login_view, name='login'),
url(r'$post/', Farmer_Hand.views.view_post, name='post'),
url(r'^register/',register_view, name='register'),
url(r'^logout/',logout_view, name='logout'),
]
| python |
# -*- coding: utf-8 -*-
""" ADDL: Alzheimer's Disease Deep Learning Tool
Preprocess Pipeline:
Required arguments:
-P, --preprocess Data preprocess pipeline flag
--P_input_data_dir P_INPUT_DATA_DIR
Input directory containing original NIfTI files
--P_train_list P_TRAIN_LIST
Training data list file
--P_test_list P_TEST_LIST
Test data list file
--label_file LABEL_FILE
Label file
--output_dir OUTPUT_DIR
Output directory to contain all results
Optinal arguments:
--P_png_low_index P_PNG_LOW_INDEX
Png file index from which to select, include.
default 10
--P_png_high_index P_PNG_HIGH_INDEX
Png file index till which to select, exclude.
default 72
Train Pipeline:
Required arguments:
-T, --train Model training flag
--T_input_data_dir T_INPUT_DATA_DIR
Input directory containing packed binary data
--T_run_id T_RUN_ID Name of tensorboard log file
--output_dir OUTPUT_DIR
Output directory to contain all results
Optinal arguments:
--T_epoch T_EPOCH Epoch to train network. default 300
--T_batch_size T_BATCH_SIZE
Batch size. default 128
--T_tensorboard_verbose T_TENSORBOARD_VERBOSE
Tensorboard verbose level, 0 | 1 | 2 | 3.
default 3
--T_tensorboard_dir T_TENSORBOARD_DIR
Directory to contain tensorboard log file.
default /tmp/tflearn_logs/
Inference Pipeline:
Required arguments:
-I, --inference Subject level inference flag
--I_input_test_png_dir I_INPUT_TEST_PNG_DIR
Input directory containing testing set png files
--I_input_model I_INPUT_MODEL
Trained model
--label_file LABEL_FILE
Label file
--output_dir OUTPUT_DIR
Output directory to contain all results
Preprocess and Train Pipeline:
Required arguments:
-P, --preprocess Data preprocess pipeline flag
-T, --train Model training flag
--P_input_data_dir P_INPUT_DATA_DIR
Input directory containing original NIfTI files
--P_train_list P_TRAIN_LIST
Training data list file
--P_test_list P_TEST_LIST
Test data list file
--label_file LABEL_FILE
Label file
--T_run_id T_RUN_ID Name of tensorboard log file
--output_dir OUTPUT_DIR
Output directory to contain all results
Optinal arguments:
--T_epoch T_EPOCH Epoch to train network. default 300
--T_batch_size T_BATCH_SIZE
Batch size. default 128
--T_tensorboard_verbose T_TENSORBOARD_VERBOSE
Tensorboard verbose level, 0 | 1 | 2 | 3.
default 3
--T_tensorboard_dir T_TENSORBOARD_DIR
Directory to contain tensorboard log file.
default /tmp/tflearn_logs/
Preprocess and Inference Pipeline:
Required arguments:
-P, --preprocess Data preprocess pipeline flag
-I, --inference Subject level inference flag
--P_input_data_dir P_INPUT_DATA_DIR
Input directory containing original NIfTI files
--P_study_specific_template P_STUDY_SPECIFIC_TEMPLATE
Study specific template file
--I_input_model I_INPUT_MODEL
Trained model
--output_dir OUTPUT_DIR
Output directory to contain all results
Structure of output_dir:
output_dir/ // Output directory specified in command line
├── data/ // Original data to preprocess
│ ├── struc/ // Preprocessed data and intermediate result
├── png/ // Decomposed PNG files
├── png_split/ // PNG files split into train and test set
│ ├── train/
│ ├── test/
├── data_binary/ // Packed train and test data in binary
├── data_binary_subject/ // Packed test data in binary by subject
├── model/ // Trained model parameters
├── ADSCReport.csv // Subject level test report
"""
from __future__ import division, print_function, absolute_import
import os
import argparse
parser = argparse.ArgumentParser(
description='Alzheimer\'s Disease Classification Tool')
parser.add_argument('-P', '--preprocess', action='store_true',
help='Data preprocess pipeline flag')
parser.add_argument('--P_input_data_dir',
help='Input directory containing original NIfTI files')
parser.add_argument('--P_train_list',
help='Training data list file')
parser.add_argument('--P_test_list',
help='Test data list file')
parser.add_argument('--P_study_specific_template',
help='Study specific template file')
parser.add_argument('--P_png_low_index', type=int, default=10,
help='Png file index from which to select, include. \
default 10')
parser.add_argument('--P_png_high_index', type=int, default=72,
help='Png file index till which to select, exclude. \
default 72')
parser.add_argument('-T', '--train', action='store_true',
help='Model training flag')
parser.add_argument('--T_input_data_dir',
help='Input directory containing packed binary data')
parser.add_argument('--T_run_id',
help='Name of tensorboard log file')
parser.add_argument('--T_epoch', type=int, default=300,
help='Epoch to train network. default 300')
parser.add_argument('--T_batch_size', type=int, default=128,
help='Batch size. default 128')
parser.add_argument('--T_tensorboard_verbose', type=int, default=3,
help='Tensorboard verbose level, 0 | 1 | 2 | 3. default 3')
parser.add_argument('--T_tensorboard_dir',
default='/tmp/tflearn_logs/',
help='Directory to contain tensorboard log file. \
default /tmp/tflearn_logs/')
parser.add_argument('-I', '--inference', action='store_true',
help='Subject level inference flag')
parser.add_argument('--I_input_test_png_dir',
help='Input directory containing testing set png files')
parser.add_argument('--I_input_model',
help='Trained model')
parser.add_argument('--label_file',
help='Label file')
parser.add_argument('--output_dir',
help='Output directory to contain all results')
args = parser.parse_args()
preprocess = args.preprocess
P_input_data_dir = args.P_input_data_dir
P_train_list = args.P_train_list
P_test_list = args.P_test_list
P_study_specific_template = args.P_study_specific_template
P_png_low_index = args.P_png_low_index
P_png_high_index = args.P_png_high_index
train = args.train
T_input_data_dir = args.T_input_data_dir
T_run_id = args.T_run_id
T_epoch = args.T_epoch
T_batch_size = args.T_batch_size
T_tensorboard_verbose = args.T_tensorboard_verbose
T_tensorboard_dir = args.T_tensorboard_dir
inference = args.inference
I_input_test_png_dir = args.I_input_test_png_dir
I_input_model = args.I_input_model
label_file = args.label_file
output_dir = args.output_dir
assert (preprocess or train or inference), \
"At least one behavior must be specified"
assert not (train and inference), "Train and inference unsupported."
g_dict_behavior = {
1 : 'Preprocess',
2 : 'Train',
4 : 'Inference',
3 : 'Preprocess and train',
5 : 'Preprocess and inference'
}
g_behavior = 0;
if preprocess: g_behavior += 1
if train : g_behavior += 2
if inference : g_behavior += 4
##### Command line argument validity checking
def cli_check():
## Preprocess
dict_behavior1_required_argument = {
'P_input_data_dir' : P_input_data_dir,
'P_train_list' : P_train_list,
'P_test_list' : P_test_list,
'label_file' : label_file,
'output_dir' : output_dir
}
## Train
dict_behavior2_required_argument = {
'T_input_data_dir' : T_input_data_dir,
'T_run_id' : T_run_id,
'output_dir' : output_dir
}
## Inference
dict_behavior4_required_argument = {
'I_input_test_png_dir' : I_input_test_png_dir,
'I_input_model' : I_input_model,
'label_file' : label_file,
'output_dir' : output_dir
}
## Preprocessing and train
dict_behavior3_required_argument = {
'P_input_data_dir' : P_input_data_dir,
'P_train_list' : P_train_list,
'P_test_list' : P_test_list,
'T_run_id' : T_run_id,
'label_file' : label_file,
'output_dir' : output_dir
}
## Preprocess and inference
dict_behavior5_required_argument = {
'P_input_data_dir' : P_input_data_dir,
'P_study_specific_template' : P_study_specific_template,
'I_input_model' : I_input_model,
'output_dir' : output_dir
}
list_dict_behavior_required_argument = [
{},
dict_behavior1_required_argument,
dict_behavior2_required_argument,
dict_behavior3_required_argument,
dict_behavior4_required_argument,
dict_behavior5_required_argument
]
assert g_behavior in g_dict_behavior
print('\nBehavior:', g_dict_behavior[g_behavior])
for k, v in list_dict_behavior_required_argument[g_behavior].items():
assert v != None, 'missing required argument: ' + k
cli_check()
if P_input_data_dir != None and P_input_data_dir[-1] != '/':
P_input_data_dir += '/'
if T_input_data_dir != None and T_input_data_dir[-1] != '/':
T_input_data_dir += '/'
if T_tensorboard_dir != None and T_tensorboard_dir[-1] != '/':
T_tensorboard_dir += '/'
if I_input_test_png_dir != None and I_input_test_png_dir[-1] != '/':
I_input_test_png_dir += '/'
if output_dir != None and output_dir[-1] != '/':
output_dir += '/'
##### Tools
g_binSelectData = '../tools/data_acquire/pickupNiftiByDatalist.py'
g_dirPreprocess = './1.DataPreprocessing/'
g_binPreprocess = g_dirPreprocess + 'preprocess.py'
g_binPreprocessI = g_dirPreprocess + 'preprocessI.py'
g_DirDecomp = './2.NIfTI2PNG/'
g_binDecomp = g_DirDecomp + 'nii2Png.py'
g_binDecompNoLab = g_DirDecomp + 'nii2PngNoLabel.py'
g_binSplit = './2.NIfTI2PNG/splitTrainTestSet.py'
g_binBinData = './3.PNG2Binary/png2pkl.py'
g_binBinTestData = './3.PNG2Binary/png2pkl_sbjtest.py'
g_binModelTrain = './4.ModelTrainTest/residual_network_2classes.py'
g_binInference = './4.ModelTrainTest/residual_network_sbjrecognize_2classes.py'
##### Output directories
g_dataDir = output_dir + 'data/'
g_dataPrepDir = g_dataDir + 'struc/'
g_pngDir = output_dir + 'png/'
g_pngSplitDir = output_dir + 'png_split/'
g_pngSplitTrainDir = g_pngSplitDir + 'train/'
g_pngSplitTestDir = g_pngSplitDir + 'test/'
g_binDataDir = output_dir + 'data_binary/'
g_binTestDataDir = output_dir + 'data_binary_subject_testset/'
g_modelDir = output_dir + 'model/'
g_testReport = output_dir + 'ADSCReport.csv'
##### Execute cmd as Linux shell command
def exec_cmd(cmd):
print('exec_cmd(): cmd = ', cmd)
ret = os.system(cmd)
if ret != 0:
print('!!!FAILED!!!, exit.')
exit(-1)
cntEqual = 30
##### Preorpcess function when only -P or -P -T are specified
def preprocess():
##### Stage1: Select Data
print('\n' + '='*cntEqual + ' ADDL Preprocess Stage1: Select Data ' + \
'='*cntEqual)
if os.path.exists(g_dataDir + 'DONE'):
print('Already done. Skip.')
else:
exec_cmd('rm -rf ' + g_dataDir + '*')
cmd = 'python ' + g_binSelectData + ' ' + P_input_data_dir + ' '
cmd += P_train_list + ' ' + P_test_list + ' ' + g_dataDir
exec_cmd(cmd)
exec_cmd('touch ' + g_dataDir + 'DONE')
##### Stage2: Preprocess
print('\n' + '='*cntEqual + ' ADDL Preprocess Stage2: Preprocessing ' + \
'='*cntEqual)
if os.path.exists(g_dataPrepDir + 'DONE'):
print('Already done. Skip.')
else:
exec_cmd('rm -f ' + g_dataPrepDir + '*')
cmd = 'python ' + g_binPreprocess + ' '
cmd += g_dataDir + ' --scriptsDir ' + g_dirPreprocess
exec_cmd(cmd)
exec_cmd('touch ' + g_dataPrepDir + 'DONE')
##### Stage3: Decompose Preprocessed Data into PNG Files
print('\n' + '='*cntEqual + \
' ADDL Preprocess Stage3: Decompose into PNG Files ' + '='*cntEqual)
if os.path.exists(g_pngDir + 'DONE'):
print('Already done. Skip.')
else:
exec_cmd('rm -rf ' + g_pngDir + '*')
cmd = 'python ' + g_binDecomp + ' '
cmd += g_dataPrepDir + ' ' + g_pngDir + ' '
cmd += str(P_png_low_index) + ' ' + str(P_png_high_index) + ' '
cmd += label_file + ' --scriptsDir ' + g_DirDecomp
exec_cmd(cmd)
exec_cmd('touch ' + g_pngDir + 'DONE')
##### Stage4: Split PNG files into Training and Testing Set
print('\n' + '='*cntEqual + \
' ADDL Preprocess Stage4: Split into Training and Testing Set ' + \
'='*cntEqual)
if os.path.exists(g_pngSplitDir + 'DONE'):
print('Already done. Skip.')
else:
exec_cmd('rm -rf ' + g_pngSplitDir + '*')
cmd = 'python ' + g_binSplit + ' ' + g_pngDir + ' '
cmd += P_train_list + ' ' + P_test_list + ' ' + g_pngSplitDir
exec_cmd(cmd)
exec_cmd('touch ' + g_pngSplitDir + 'DONE')
##### Stage5: Pack Training and Testing Data into Binary
print('\n' + '='*cntEqual + \
' ADDL Preprocess Stage5: Pack Data into Binary ' + '='*cntEqual)
if os.path.exists(g_binDataDir + 'DONE'):
print('Already done. Skip.')
else:
exec_cmd('rm -f ' + g_binDataDir + '*')
cmd = 'python ' + g_binBinData + ' ' + g_pngSplitTrainDir + ' '
cmd += g_binDataDir + ' ' + label_file + ' train_'
exec_cmd(cmd)
cmd = 'python ' + g_binBinData + ' ' + g_pngSplitTestDir + ' '
cmd += g_binDataDir + ' ' + label_file + ' test_'
exec_cmd(cmd)
exec_cmd('touch ' + g_binDataDir + 'DONE')
##### Preprocess function when -P -I are specified
def preprocessI():
##### Stage1: Preprocess
print('\n' + '='*cntEqual + ' ADDL PreprocessI Stage1: Preprocessing ' + \
'='*cntEqual)
if os.path.exists(g_dataPrepDir + 'DONE'):
print('Already done. Skip.')
else:
exec_cmd('cp -r ' + P_input_data_dir + '* ' + g_dataDir)
exec_cmd('rm -f ' + g_dataPrepDir + '*')
cmd = 'python ' + g_binPreprocessI + ' ' + g_dataDir + ' '
cmd += P_study_specific_template + ' --scriptsDir ' + g_dirPreprocess
exec_cmd(cmd)
exec_cmd('touch ' + g_dataPrepDir + 'DONE')
##### Stage2: Decompose Preprocessed Data into PNG Files
print('\n' + '='*cntEqual + \
' ADDL PreprocessI Stage2: Decompose into PNG Files ' + '='*cntEqual)
if os.path.exists(g_pngDir + 'DONE'):
print('Already done. Skip.')
else:
exec_cmd('rm -rf ' + g_pngDir + '*')
cmd = 'python ' + g_binDecompNoLab + ' '
cmd += g_dataPrepDir + ' ' + g_pngDir + ' '
cmd += str(P_png_low_index) + ' ' + str(P_png_high_index) + ' '
cmd += ' --scriptsDir ' + g_DirDecomp
exec_cmd(cmd)
exec_cmd('touch ' + g_pngDir + 'DONE')
##### Model training function
def train():
print('\n' + '='*cntEqual + ' ADDL Train Stage1: Training Model ' + \
'='*cntEqual)
if os.path.exists(g_modelDir + 'DONE'):
print('Already done. Skip.')
else:
exec_cmd('rm -f ' + g_modelDir + '*')
cmd = 'python ' + g_binModelTrain + ' ' + T_input_data_dir + ' '
cmd += str(T_epoch) + ' ' + str(T_batch_size) + ' '
cmd += g_modelDir + ' ' + T_run_id
cmd += ' --tensorboardVerbose ' + str(T_tensorboard_verbose)
cmd += ' --tensorboardDir ' + T_tensorboard_dir
exec_cmd(cmd)
cmd = 'mv ' + g_modelDir[:-1] + '-* ' + g_modelDir
exec_cmd(cmd)
exec_cmd('touch ' + g_modelDir + 'DONE')
##### Subject level classification function
def inference(input_test_png_dir):
##### Stage1: Pack Testing Data into Binary
print('\n' + '='*cntEqual + \
' ADDL Inference Stage1: Pack Data into Binary by Subject ' + \
'='*cntEqual)
if os.path.exists(g_binTestDataDir + 'DONE'):
print('Already done. Skip.')
else:
exec_cmd('rm -rf ' + g_binTestDataDir + '*')
cmd = 'python ' + g_binBinTestData + ' '
cmd += input_test_png_dir + ' ' + g_binTestDataDir
if label_file != None:
cmd += ' --labelFile ' + label_file
exec_cmd(cmd)
exec_cmd('touch ' + g_binTestDataDir + 'DONE')
##### Stage2: Subject Level Classification
print('\n' + '='*cntEqual + \
' ADDL Inference Stage2: Subject Level Classification ' + \
'='*cntEqual)
if os.path.exists(g_testReport):
print('Already done. Skip.')
else:
cmd = 'python ' + g_binInference + ' '
cmd += g_binTestDataDir + ' ' + I_input_model + ' ' + g_testReport
exec_cmd(cmd)
print('\nCheck \'%s\' for test report.' % (g_testReport))
##### main()
## Initialize output directory
g_dirs = list([
output_dir,
g_dataDir,
g_pngDir,
g_pngSplitDir,
g_binDataDir,
g_binTestDataDir,
g_modelDir
])
for dd in g_dirs:
if not os.path.exists(dd): exec_cmd('mkdir ' + dd)
if 1 == g_behavior:
preprocess()
elif 2 == g_behavior:
train()
elif 4 == g_behavior:
inference(I_input_test_png_dir)
elif 3 == g_behavior:
preprocess()
T_input_data_dir = g_binDataDir
train()
elif 5 == g_behavior:
preprocessI()
inference(g_pngDir)
else:
print('\nImpossible\n')
exit(0)
| python |
from .serverless import ServerlessHandler
| python |
#!/usr/bin/env python
#Script in dev for send commands to multiple switches at once through Telnet.
import telnetlib
import time
TELNET_PORT = 23
TELNET_TIMEOUT = 6
def send_command(remote_conn, cmd):
cmd = cmd.rstrip()
remote_conn.write(cmd + '\n')
time.sleep(6)
return remote_conn.read_very_eager()
def login(remote_conn, username, password):
output = remote_conn.read_until("ername:", TELNET_TIMEOUT)
remote_conn.write(username + '\n')
output = remote_conn.read_until("ssword", TELNET_TIMEOUT)
remote_conn.write(password + '\n')
return output
def main():
ip_addrs = ['172.16.1.78','172.16.1.79','172.16.1.80','172.16.1.81']
for ip_addr in ip_addrs:
remote_conn = telnetlib.Telnet(ip_addr, TELNET_PORT, TELNET_TIMEOUT)
username = 'cisco'
password = 'cisco'
output = login(remote_conn, username, password)
output = send_command(remote_conn, 'terminal length 0')
output = send_command(remote_conn, 'sh ip int br')
print output
if __name__ == "__main__":
main()
| python |
# Importar librería
import os
# Declaracion de variables
clientes = []
numCuentas = 0
opcion = 0
# Declaración de métodos
def crearCuenta(clientes):
global numCuentas
# Con este método se crea una cuenta bancaria
nombre = input('Introduzca nombre: ')
apellido = input('Introduzca apellido: ')
# Se crea lista donde el index es el nombre de la variable
cuenta = {'nombre': nombre, 'apellido': apellido, 'cuenta': {'saldo': 0, 'numeroCuenta': numCuentas}}
clientes.append(cuenta)
numCuentas += 1
print('Cuenta creada ---> ' + str(numCuentas))
input('Pulse Enter para continuar...')
return clientes, numCuentas
def hacerDeposito(clientes):
# Con este método se incrementa el saldo de la cuenta
if len(clientes) > 0:
cuenta = input('Inidique la cuenta al cual realizará el depósito: ')
cantidad = input('Indique la cantidad a depositar: ')
saldoActual = clientes[int(cuenta)]['cuenta']['saldo']
clientes[int(cuenta)]['cuenta']['saldo'] = saldoActual + int(cantidad)
print('Se ha realizado el depósito')
else:
print('No existen cuentas')
input('Pulse Enter para continuar...')
def verCuentas(clientes):
# Con este método se pueden visualizar todas las cuenta
if len(clientes) > 0:
for cliente in clientes:
print('Nombre: ' + cliente['nombre'])
print('Apellido: ' + cliente['apellido'])
print('N° Cuenta: ' + str(cliente['cuenta']['numeroCuenta']))
print('\n')
else:
print('No existen cuentas')
input('Pulse Enter para continuar...')
def consultarSaldo(clientes):
# Con este método se podrá ver el saldo en la cuenta
if len(clientes) > 0:
cuenta = input('Inidique la cuenta que desea consultar: ')
print('El saldo de la cuenta ' + cuenta + ' es de: ' + str(clientes[int(cuenta)]['cuenta']['saldo']) + ' Dólares.')
else:
print('No existen cuentas')
input('Pulse Enter para continuar...')
def hacerRetiro(clientes):
# Con este método se podrá restar saldo a la cuenta
if len(clientes) > 0:
cuenta = input('Inidique la cuenta al cual realizará el retiro: ')
cantidad = input('Indique la cantidad a retirar: ')
saldoActual = clientes[int(cuenta)]['cuenta']['saldo']
clientes[int(cuenta)]['cuenta']['saldo'] = saldoActual - int(cantidad)
print('Se realizó el retiro')
else:
print('No existen cuentas')
input('Pulse Enter para continuar...')
while ('6' != opcion):
opcion = input('''Seleccione la operación a realizar:
1. Ver Cuentas
2. Crear Cuenta
3. Ver Saldo
4. Hacer Depósito
5. Hacer Retiro
6. Salir
''')
print('\n')
if opcion == '1':
verCuentas(clientes)
elif opcion == '2':
crearCuenta(clientes)
elif opcion == '3':
consultarSaldo(clientes)
elif opcion == '4':
hacerDeposito(clientes)
elif opcion == '5':
hacerRetiro(clientes)
os.system("CLS")
print('Fin del Programa')
| python |
from kedro.pipeline import Pipeline
from kedro_mlflow.pipeline.pipeline_ml import PipelineML
def pipeline_ml(
training: Pipeline, inference: Pipeline, input_name: str = None,
) -> PipelineML:
pipeline = PipelineML(
nodes=training.nodes, inference=inference, input_name=input_name
)
return pipeline
| python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#http://www.cnblogs.com/way_testlife/archive/2011/04/17/2019013.html
import Image
im = Image.open("a.jpg")
#分别打印图片的原格式,高和宽的数组、颜色模式
print im.format, im.size, im.mode
#显示图片
im.show()
| python |
# ----------------------------------
# CLEES DirectControl
# Author : Tompa
# ----------------------------------
# --- General libs
import json
# --- Private Libs
import clees_mqtt
# VAR ---
Dircntl = []
Repmsg = []
def init():
global Dircntl
global Repmsg
with open('clees_directcontrol.json') as f:
Dircntl = json.load(f)
Repmsg = Dircntl['reportmessages']
# loop throgh all dircntls and add pretxt
pretxt = clees_mqtt.getpretopic()
for i in range (0,len(Repmsg)):
Repmsg[i]['listenfor'] = pretxt +'/'+ Repmsg[i]['listenfor']
Repmsg[i]['sendto'] = pretxt +'/'+ Repmsg[i]['sendto']
def process(topic,msg):
global Repmsg
for i in range (0,len(Repmsg)):
if Repmsg[i]['listenfor'] == topic:
if Repmsg[i]['whenmsg'] == msg:
clees_mqtt.publish(Repmsg[i]['sendto'],Repmsg[i]['withmsg'])
| python |
from flask import Flask, request, jsonify
import json
import requests
import shutil
import logging
import boto3
from botocore.exceptions import ClientError
import os
from flask_sqlalchemy import SQLAlchemy
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from datetime import datetime
app = Flask(__name__)
# example mysql connection string: mysql://scott:tiger@localhost/foo
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ["MYSQL_Connection"]
db = SQLAlchemy(app)
migrate = Migrate(app, db)
class Images(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128))
original_url = db.Column(db.String(128))
path = db.Column(db.String(128))
timestamp = db.Column(db.String(128))
def __init__(self, name, url, path):
self.name = name
self.original_url = url
self.path = path
self.timestamp = datetime.now()
@app.route("/")
def index():
return "FriendsChallenge!"
@app.route("/api/image",methods=["POST"])
def save_image():
image_url = request.get_json().get("image")
print("[+] downloading image")
image_file = requests.get(image_url, stream=True)
image_name = image_url.split("/")[-1]
s3_client = boto3.client("s3")
print("[+] saving image locally")
with open(image_name,"wb") as f:
image_file.raw.decode_content = True
shutil.copyfileobj(image_file.raw, f)
print("[+] Sending to s3")
s3_client.upload_file(image_name,os.environ["S3_BUCKET"],image_name)
os.remove(image_name)
image_db = Images(image_name,image_url,os.environ["S3_BUCKET"]+"/"+image_name)
db.session.add(image_db)
db.session.commit()
return jsonify({"message":"task completed!"}), 200
@app.route("/api/image",methods=["GET"])
def get_images():
all_images = db.session.query(Images).all()
list_images = []
for image in all_images:
list_images.append({"name":image.name,"path":image.path})
return jsonify({"images":list_images}), 200
if __name__ == "__main__":
app.run(debug=True,host="0.0.0.0") | python |
# -*- coding: utf-8 -*-
"""
Created on Tue May 29 11:23:10 2018
@author: eemeg
"""
def ICASAR(n_comp, spatial_data = None, temporal_data = None, figures = "window",
bootstrapping_param = (200,0), ica_param = (1e-4, 150), tsne_param = (30,12), hdbscan_param = (35,10),
out_folder = './ICASAR_results/', ica_verbose = 'long', inset_axes_side = {'x':0.1, 'y':0.1},
create_all_ifgs_flag = False, max_n_all_ifgs = 1000, load_fastICA_results = False):
"""
Perform ICASAR, which is a robust way of applying sICA to data. As PCA is also performed as part of this,
the sources and time courses found by PCA are also returned. Note that this can be run with eitehr 1d data (e.g. time series for a GPS station),
or on 2d data (e.g. a time series of interferograms) by providing a 'mask', that is used to convert 1d row vectors to 2d masked arrays.
A note on reference areas/pixels:
ICASAR requires each interferogram to be mean centered (ie the mean of all the pixels for a single interferogram is 0).
Therefore, when the time series is reconstructed using the result of ICASAR (i.e. tcs * sources), these will produce
the mean centered time series. If you wish to work
Inputs:
n_comp | int | Number of ocmponents that are retained from PCA and used as the input for ICA.
spatial_data | dict or None | Required:
displacement_r2 | rank 2 array | row vectors of the ifgs
mask | rank 2 array | mask to conver the row vectors to rank 2 masked arrays.
Optional (ie don't have to exist in the dictionary):
ifg_dates | list | dates of the interferograms in the form YYYYMMDD_YYYYMMDD. If supplied, IC strength vs temporal baseline plots will be produced.
lons | rank 2 array | lons of each pixel in the image. Changed to rank 2 in version 2.0, from rank 1 in version 1.0 . If supplied, ICs will be geocoded as kmz.
lats | rank 2 array | lats of each pixel in the image. Changed to rank 2 in version 2.0, from rank 1 in version 1.0
dem | rank 2 array | height in metres of each pixel in the image. If supplied, IC vs dem plots will be produced.
temporal_data | dict or None | contains 'mixtures_r2' as time signals as row vectors and 'xvals' which are the times for each item in the time signals.
figures | string, "window" / "png" / "none" / "png+window" | controls if figures are produced, noet none is the string none, not the NoneType None
bootstrapping_param | tuple | (number of ICA runs with bootstrap, number of ICA runs without bootstrapping ) e.g. (100,10)
ica_param | tuple | Used to control ICA, (ica_tol, ica_maxit)
hdbscan_param | tuple | Used to control the clustering (min_cluster_size, min_samples)
tsne_param | tuple | Used to control the 2d manifold learning (perplexity, early_exaggeration)
out_folder | string | if desired, can set the name of the folder results are saved to. Should end with a /
ica_verbose | 'long' or 'short' | if long, full details of ICA runs are given. If short, only the overall progress
inset_axes_side | dict | inset axes side length as a fraction of the full figure, in x and y direction in the 2d figure of clustering results.
create_all_ifgs_flag | boolean | If spatial_data contains incremental ifgs (i.e. the daisy chain), these can be recombined to create interferograms
between all possible acquisitions to improve performance with lower magnitude signals (that are hard to see in
in short temporal baseline ifgs).
e.g. for 3 interferogams between 4 acquisitions: a1__i1__a2__i2__a3__i3__a4
This option would also make: a1__i4__a3, a1__i5__a4, a2__i6__a4
max_n_all_ifgs | If after creating all the ifgs there are more than this number, select only this many at random. Useful as the number of ifgs created grows with the square of the number of ifgs.
load_fastICA_results | boolean | The multiple runs of FastICA are slow, so if now paramters are being changed here, previous runs can be reloaded.
Outputs:
S_best | rank 2 array | the recovered sources as row vectors (e.g. 5 x 1230)
mask | rank 2 boolean | Same as inputs, but useful to save. mask to convert the ifgs as rows into rank 2 masked arrays. Used for figure outputs, an
tcs | rank 2 array | the time courses for the recoered sources (e.g. 17 x 5)
source_residuals | ? | the residual when each input mixture is reconstructed using the sources and time courses
Iq_sorted | ?| the cluster quality index for each centrotype
n_clusters | int | the number of clusters found. Doens't include noise, so is almost always 1 less than the length of Iq
S_all_info | dictionary| useful for custom plotting. Sources: all the sources in a rank 3 array (e.g. 500x500 x1200 for 6 sources recovered 200 times)
labels: label for each soure
xy: x and y coordinats for 2d representaion of all sources
phUnw_mean | r2 array | the mean for each interfeorram. subtract from (tcs * sources) to get back original ifgs.
History:
2018/06/?? | MEG | Written
2019/11/?? | MEG | Rewrite to be more robust and readable
2020/06/03 | MEG | Update figure outputs.
2020/06/09 | MEG | Add a raise Exception so that data cannot have nans in it.
2020/06/12 | MEG | Add option to name outfolder where things are saved, and save results there as a pickle.
2020/06/24 | MEG | Add the ica_verbose option so that ICASAR can be run without generating too many terminal outputs.
2020/09/09 | MEG | Major update to now handle temporal data (as well as spatial data)
2020/09/11 | MEG | Small update to allow an argument to be passed to plot_2d_interactive_fig to set the size of the inset axes.
2020/09/16 | MEG | Update to clarify the names of whether variables contain mixtures or soruces.
2021/04/13 | MEG | Update so that lons and lats are now rank2 tensors (ie matrices with a lon or lat for each pixel)
2021/04/13 | MEG | Add option to create_all_ifgs_from_incremental
2021_10_07 | MEG | Add option to limit the number of ifgs created from incremental. (e.g. if 5000 are generated but default value of 1000 is used, 1000 will be randomly chosen from the 5000)
2021_10_20 | MEG | Also save the 2d position of each source, and its HDBSSCAN label in the .pickle file.
Stack overview:
PCA_meg2 # do PCA
maps_tcs_rescale # rescale spatial maps from PCA so they have the same range, then rescale time courses so no change. makes comparison easier. )
pca_variance_line # plot of variance for each PC direction
component_plot with PCA sources
bootstrap_ICA with bootstrapping
bootstrap_ICA and without bootstrapping
bootstrapped_sources_to_centrotypes # run HDBSCAN (clustering), TSNE (2d manifold) and make figure showing this. Choose source most representative of each cluster (centrotype).
plot_2d_interactive_fig # interactive figure showing clustering and 2d manifold representaiton.
bss_components_inversion # inversion to get time coures for each centrotype.
component_plot # with ICASAR sources
r2_arrays_to_googleEarth # geocode spatial sources and make a .kmz for use with Google Earth.
"""
# external functions
import numpy as np
import numpy.ma as ma
import matplotlib.pyplot as plt
import shutil # used to make/remove folders etc
import os # ditto
import pickle # to save outputs.
from pathlib import Path
# internal functions
from icasar.blind_signal_separation import PCA_meg2
from icasar.aux import bss_components_inversion, maps_tcs_rescale, r2_to_r3, r2_arrays_to_googleEarth, dem_and_temporal_source_figure
from icasar.aux import plot_spatial_signals, plot_temporal_signals, plot_pca_variance_line
from icasar.aux import prepare_point_colours_for_2d, prepare_legends_for_2d, create_all_ifgs, signals_to_master_signal_comparison, plot_source_tc_correlations
from icasar.aux2 import plot_2d_interactive_fig, baseline_from_names, update_mask_sources_ifgs
# -10: Check for an unusual combination of inputs:
if (create_all_ifgs_flag) and ('ifg_dates' not in spatial_data.keys()):
raise Exception(f"'ifg_dates' (in the form yyyymmdd_yyyymmdd) are usually optional, but not if the 'create_all_ifgs_flag' is set to True. Exiting. " )
# -9 Check inputs, unpack either spatial or temporal data, and check for nans
if temporal_data is None and spatial_data is None: # check inputs
raise Exception("One of either spatial or temporal data must be supplied. Exiting. ")
if temporal_data is not None and spatial_data is not None:
raise Exception("Only either spatial or temporal data can be supplied, but not both. Exiting. ")
if spatial_data is not None: # if we have spatial data
mixtures = spatial_data['mixtures_r2'] # these are the mixtures we'll perform PCA and ICA on
mask = spatial_data['mask'] # the mask that converts row vector mixtures into 2d (rank 2) arrays.
if 'ifg_dates' in spatial_data: # dates the ifgs span is optional.
ifg_dates = spatial_data['ifg_dates']
else:
ifg_dates = None # set to None if there are none.
spatial = True
if temporal_data is not None: # if we have temporal data
mixtures = temporal_data['mixtures_r2'] # these are the mixture we'll perform PCA and ICA on.
xvals = temporal_data['xvals']
spatial = False
if np.max(np.isnan(mixtures)):
raise Exception("Unable to proceed as the data ('phUnw') contains Nans. ")
#-8: sort out various things for figures, and check input is of the correct form
if type(out_folder) == str:
print(f"Trying to conver the 'out_folder' arg which is a string to a pathlib Path. ")
out_folder = Path(out_folder)
fig_kwargs = {"figures" : figures}
if figures == "png" or figures == "png+window": # if figures will be png, make
fig_kwargs['png_path'] = out_folder # this will be passed to various figure plotting functions
elif figures == 'window' or figures == 'none':
pass
else:
raise ValueError("'figures' should be 'window', 'png', 'png+window', or 'None'. Exiting...")
# -7: Check argument
if ica_verbose == 'long':
fastica_verbose = True
elif ica_verbose == 'short':
fastica_verbose = False
else:
print(f"'ica_verbose should be either 'long' or 'short'. Setting to 'short' and continuing. ")
ica_verbose = 'short'
fastica_verbose = False
# -6: Determine if we have both lons and lats and so can geocode the ICs (ge_kmz = True), and check both rank 2
if spatial_data is not None: # if we're working with spatial data, we should check lons and lats as they determine if the ICs will be geocoded.
if ('lons' in spatial_data) and ('lats' in spatial_data): #
print(f"As 'lons' and 'lats' have been provided, the ICs will be geocoded. ")
if (len(spatial_data['lons'].shape) != 2) or (len(spatial_data['lats'].shape) != 2):
raise Exception(f"'lons' and 'lats' should be rank 2 tensors (i.e. matrices with a lon or lat for each pixel in the interferogram. Exiting... ")
ge_kmz = True
elif ('lons' in spatial_data) and ('lats' not in spatial_data):
raise Exception(f"Either both or neither of 'lons' and 'lats' should be provided, but only 'lons' was. Exiting... ")
elif ('lons' not in spatial_data) and ('lats' in spatial_data):
raise Exception(f"Either both or neither of 'lons' and 'lats' should be provided, but only 'lats' was. Exiting... ")
else:
ge_kmz = False
else:
ge_kmz = False # if there's no spatial data, assume that we must be working with temporal.
# -5: Check the temporal dimension of the time series and the ifg_dates agree
if spatial_data is not None: # if we're working with spatial data, we should check the ifgs and acq dates are the correct lengths as these are easy to confuse.
if ifg_dates is not None:
n_ifgs = spatial_data['mixtures_r2'].shape[0] # get the number of incremental ifgs
if n_ifgs != len(spatial_data['ifg_dates']): # and check it's equal to the list of ifg dates (YYYYMMDD_YYYYMMDD)
raise Exception(f"There should be an equal number of incremental interferogram and dates (in the form YYYYMMDD_YYYYMMDD), but they appear to be different. Exiting...")
# -4: Check the sizes of the spatial data inputs, and assign None to the DEM if it doesn't exist
if spatial_data is not None: # if we're working with spatial data
spatial_data_r2_arrays = ['mask', 'dem', 'lons', 'lats'] # we need to check the spatial data is the correct resolution (ie all the same)
spatial_data_r2_arrays_present = list(spatial_data.keys()) # we alse need to determine which of these spatial data we actually have.
spatial_data_r2_arrays = [i for i in spatial_data_r2_arrays if i in spatial_data_r2_arrays_present] # remove any from the check list incase they're not provided.
for spatial_data_r2_array1 in spatial_data_r2_arrays: # first loop through each spatial data
for spatial_data_r2_array2 in spatial_data_r2_arrays: # second loo through each spatial data
if spatial_data[spatial_data_r2_array1].shape != spatial_data[spatial_data_r2_array2].shape: # check the size is equal
raise Exception(f"All the spatial data should be the same size, but {spatial_data_r2_array1} is of shape {spatial_data[spatial_data_r2_array1].shape}, "
f"and {spatial_data_r2_array2} is of shape {spatial_data[spatial_data_r2_array2].shape}. Exiting.")
if 'dem' not in spatial_data_r2_arrays_present: # the dem is not compulsory
spatial_data['dem'] = None # so set it to None if not available.
# -3: Possibly change the matplotlib backend.
if figures == 'png':
plt.switch_backend('agg') # with this backend, no windows are created during figure creation.
# -2: create a folder that will be used for outputs
if os.path.exists(out_folder): # see if the folder we'll write to exists.
if load_fastICA_results: # we will need the .pkl of results from a previous run, so can't just delete the folder.
existing_files = os.listdir(out_folder) # get all the ICASAR outputs.
print(f"As 'load_fastICA' is set to True, all but the FastICA_results.pkl file will be deleted. ")
for existing_file in existing_files:
if existing_file == 'FastICA_results.pkl': # if it's the results from the time consuming FastICA runs...
pass # ignore it
else:
os.remove(out_folder / existing_file) # but if not, delete it.
else:
print("Removing the existing outputs directory and creating a new empty one... ", end = '') # if we don't care about the FastICA results file, just delete the folder and then make a new one.
shutil.rmtree(out_folder) # try to remove folder
os.mkdir(out_folder)
print("Done.")
else:
os.mkdir(out_folder) # if it never existed, make it.
n_converge_bootstrapping = bootstrapping_param[0] # unpack input tuples
n_converge_no_bootstrapping = bootstrapping_param[1]
# -1: Possibly create all interferograms from incremental
if create_all_ifgs_flag:
print(f"Creating all possible interferogram pairs from the incremental interferograms...", end = '')
mixtures_incremental = np.copy(mixtures) # make a copy of the originals that we can use to calculate the time courses.
mixtures_incremental_mc = mixtures_incremental - np.mean(mixtures_incremental, axis = 1)[:, np.newaxis] # mean centre the mixtures (i.e. the mean of each image is 0, so removes the effect of a reference pixel)
mixtures, ifg_dates = create_all_ifgs(mixtures_incremental, spatial_data['ifg_dates'], max_n_all_ifgs) # if ifg_dates is None, None is also returned.
print(" Done!")
# 0: Mean centre the mixtures
mixtures_mean = np.mean(mixtures, axis = 1)[:,np.newaxis] # get the mean for each ifg (ie along rows. )
mixtures_mc = mixtures - mixtures_mean # mean centre the data (along rows)
n_mixtures = np.size(mixtures_mc, axis = 0)
# 1: do sPCA once (and possibly create a figure of the PCA sources)
print('Performing PCA to whiten the data....', end = "")
PC_vecs, PC_vals, PC_whiten_mat, PC_dewhiten_mat, x_mc, x_decorrelate, x_white = PCA_meg2(mixtures_mc, verbose = False)
if spatial:
x_decorrelate_rs, PC_vecs_rs = maps_tcs_rescale(x_decorrelate[:n_comp,:], PC_vecs[:,:n_comp]) # rescale to new desicred range, and truncate to desired number of components.
else:
x_decorrelate_rs = x_decorrelate[:n_comp,:] # truncate to desirec number of components
PC_vecs_rs = PC_vecs[:,:n_comp]
print('Done!')
if fig_kwargs['figures'] != "none":
plot_pca_variance_line(PC_vals, title = '01_PCA_variance_line', **fig_kwargs)
if spatial:
plot_spatial_signals(x_decorrelate_rs.T, mask, PC_vecs_rs.T, mask.shape, title = '02_PCA_sources_and_tcs', shared = 1, **fig_kwargs) # the usual plot of the sources and their time courses (ie contributions to each ifg)
if ifg_dates is not None: # if we have ifg_dates
temporal_baselines = baseline_from_names(ifg_dates) # we can use these to calcaulte temporal baselines
spatial_data_temporal_info_pca = {'temporal_baselines' : temporal_baselines, 'tcs' : PC_vecs_rs} # and use them in the following figure
else:
spatial_data_temporal_info_pca = None # but we might also not have them
dem_and_temporal_source_figure(x_decorrelate_rs, spatial_data['mask'], fig_kwargs, spatial_data['dem'], # also compare the sources to the DEM, and the correlation between their time courses and the temporal baseline of each interferogram.
spatial_data_temporal_info_pca, fig_title = '03_PCA_source_correlations')
else:
plot_temporal_signals(x_decorrelate_rs, '02_PCA_sources', **fig_kwargs)
# 2: Make or load the results of the multiple ICA runs.
if load_fastICA_results:
print(f"Loading the results of multiple FastICA runs. ")
try:
with open(out_folder / 'FastICA_results.pkl', 'rb') as f:
S_hist = pickle.load(f)
A_hist = pickle.load(f)
except:
print(f"Failed to open the results from the previous runs of FastICA. Switching 'load_fastICA_results' to False and trying to continue anyway. ")
load_fastICA_results = False
if not load_fastICA_results:
print(f"No results were found for the multiple ICA runs, so these will now be performed. ")
S_hist, A_hist = perform_multiple_ICA_runs(n_comp, mixtures_mc, bootstrapping_param, ica_param,
x_white, PC_dewhiten_mat, ica_verbose)
with open(out_folder / 'FastICA_results.pkl', 'wb') as f:
pickle.dump(S_hist, f)
pickle.dump(A_hist, f)
# 3: Convert the sources from lists from each run to a single matrix.
if spatial:
sources_all_r2, sources_all_r3 = sources_list_to_r2_r3(S_hist, mask) # convert to more useful format. r2 one is (n_components x n_runs) x n_pixels, r3 one is (n_components x n_runs) x ny x nx, and a masked array
else:
sources_all_r2 = S_hist[0] # get the sources recovered by the first run
for S_hist_one in S_hist[1:]: # and then loop through the rest
sources_all_r2 = np.vstack((sources_all_r2, S_hist_one)) # stacking them vertically.
# 4: Do clustering and 2d manifold representation, plus get centrotypes of clusters, and make an interactive plot.
S_best, labels_hdbscan, xy_tsne, clusters_by_max_Iq_no_noise, Iq = bootstrapped_sources_to_centrotypes(sources_all_r2, hdbscan_param, tsne_param) # do the clustering and project to a 2d plane. clusters_by_max_Iq_no_noise is an array of which cluster number is best (ie has the highest Iq)
labels_colours = prepare_point_colours_for_2d(labels_hdbscan, clusters_by_max_Iq_no_noise) # make a list of colours so that each point with the same label has the same colour, and all noise points are grey
legend_dict = prepare_legends_for_2d(clusters_by_max_Iq_no_noise, Iq)
marker_dict = {'labels' : np.ravel(np.hstack((np.zeros((1, n_comp*n_converge_bootstrapping)), np.ones((1, n_comp*n_converge_no_bootstrapping)))))} # boostrapped are labelled as 0, and non bootstrapped as 1
marker_dict['styles'] = ['o', 'x'] # bootstrapped are 'o's, and non-bootstrapped are 'x's
plot_2d_labels = {'title' : '04_clustering_and_manifold_results',
'xlabel' : 'TSNE dimension 1',
'ylabel' : 'TSNE dimension 2'}
if spatial:
plot_2d_labels['title']
spatial_data_S_all = {'images_r3' : sources_all_r3} # spatial data stored in rank 3 format (ie n_imaces x height x width)
plot_2d_interactive_fig(xy_tsne.T, colours = labels_colours, spatial_data = spatial_data_S_all, # make the 2d interactive plot
labels = plot_2d_labels, legend = legend_dict, markers = marker_dict, inset_axes_side = inset_axes_side,
fig_filename = plot_2d_labels['title'], **fig_kwargs)
else:
temporal_data_S_all = {'tcs_r2' : sources_all_r2,
'xvals' : temporal_data['xvals'] } # make a dictionary of the sources recovered from each run
plot_2d_interactive_fig(xy_tsne.T, colours = labels_colours, temporal_data = temporal_data_S_all, # make the 2d interactive plot
labels = plot_2d_labels, legend = legend_dict, markers = marker_dict, inset_axes_side = inset_axes_side,
fig_filename = plot_2d_labels['title'], **fig_kwargs)
Iq_sorted = np.sort(Iq)[::-1]
n_clusters = S_best.shape[0] # the number of sources/centrotypes is equal to the number of clusters
# 5: Make time courses using centrotypes (i.e. S_best, the spatial patterns found by ICA)
if create_all_ifgs_flag:
inversion_results = bss_components_inversion(S_best, [mixtures_incremental_mc, mixtures_mc]) # invert to fit both the incremental and all possible ifgs.
tcs_all = inversion_results[1]['tcs'].T
else:
inversion_results = bss_components_inversion(S_best, [mixtures_mc]) # invert to fit the incremetal ifgs.
tcs_all = inversion_results[0]['tcs'].T
source_residuals = inversion_results[0]['residual']
tcs = inversion_results[0]['tcs'].T
# 6: Possibly make figure of the centrotypes (chosen sources) and time courses.
if fig_kwargs['figures'] != "none":
if spatial:
plot_spatial_signals(S_best.T, mask, tcs.T, mask.shape, title = '05_ICASAR_sourcs_and_tcs', shared = 1, **fig_kwargs) # plot the chosen sources
else:
plot_temporal_signals(S_best, '04_ICASAR_sources', **fig_kwargs)
# 7: Possibly geocode the recovered sources and make a Google Earth file.
if ge_kmz:
#import pdb; pdb.set_trace()
print('Creating a Google Earth .kmz of the geocoded independent components... ', end = '')
S_best_r3 = r2_to_r3(S_best, mask)
r2_arrays_to_googleEarth(S_best_r3, spatial_data['lons'], spatial_data['lats'], 'IC', out_folder = out_folder) # note that lons and lats should be rank 2 (ie an entry for each pixel in the ifgs)
print('Done!')
# 8: Calculate the correlations between the DEM and the ICs, and the ICs time courses and the temporal baselines of the interferograms.
if (spatial_data is not None):
if ifg_dates is not None: # if we have ifg_dates
spatial_data_temporal_info_ica = {'temporal_baselines' : temporal_baselines, 'tcs' : tcs_all} # use them in the following figure. Note that time courses here are from pca
else:
spatial_data_temporal_info_ica = None # but we might also not have them
dem_and_temporal_source_figure(S_best, spatial_data['mask'], fig_kwargs, spatial_data['dem'], # also compare the sources to the DEM, and the correlation between their time courses and the temporal baseline of each interferogram.
spatial_data_temporal_info_ica, fig_title = '06_ICA_source_correlations')
# 11: Save the results:
print('Saving the key results as a .pkl file... ', end = '') # note that we don't save S_all_info as it's a huge file.
if spatial:
with open(out_folder / 'ICASAR_results.pkl', 'wb') as f:
pickle.dump(S_best, f)
pickle.dump(mask, f)
pickle.dump(tcs, f)
pickle.dump(source_residuals, f)
pickle.dump(Iq_sorted, f)
pickle.dump(n_clusters, f)
pickle.dump(xy_tsne, f)
pickle.dump(labels_hdbscan, f)
f.close()
print("Done!")
else: # if temporal data, no mask to save
with open(out_folder / 'ICASAR_results.pkl', 'wb') as f:
pickle.dump(S_best, f)
pickle.dump(tcs, f)
pickle.dump(source_residuals, f)
pickle.dump(Iq_sorted, f)
pickle.dump(n_clusters, f)
pickle.dump(xy_tsne, f)
pickle.dump(labels_hdbscan, f)
f.close()
print("Done!")
S_all_info = {'sources' : sources_all_r2, # package into a dict to return
'labels' : labels_hdbscan,
'xy' : xy_tsne }
return S_best, tcs, source_residuals, Iq_sorted, n_clusters, S_all_info, mixtures_mean
#%%
def LiCSBAS_to_ICASAR(LiCSBAS_out_folder, filtered = False, figures = False, n_cols=5, crop_pixels = None, return_r3 = False,
ref_area = False):
""" A function to prepare the outputs of LiCSBAS for use with LiCSALERT.
LiCSBAS uses nans for masked areas - here these are converted to masked arrays. Can also create three figures: 1) The Full LiCSBAS ifg, and the area
that it has been cropped to 2) The cumulative displacement 3) The incremental displacement.
Inputs:
h5_file | string | path to h5 file. e.g. cum_filt.h5
figures | boolean | if True, make figures
n_cols | int | number of columns for figures. May want to lower if plotting a long time series
crop_pixels | tuple | coords to crop images to. x then y, 00 is top left. e.g. (10, 500, 600, 900).
x_start, x_stop, y_start, y_stop, No checking that inputted values make sense.
Note, generally better to have cropped (cliped in LiCSBAS language) to the correct area in LiCSBAS_for_LiCSAlert
return_r3 | boolean | if True, the rank 3 data is also returns (n_ifgs x height x width). Not used by ICASAR, so default is False
ref_area | boolean | If True, the reference area (in pixels, x then y) used by LiCSBAS is extracted and returned to the user.
Outputs:
displacment_r3 | dict | Keys: cumulative, incremental. Stored as masked arrays. Mask should be consistent through time/interferograms
Also lons and lats, which are the lons and lats of all pixels in the images (ie rank2, and not column or row vectors)
Also Dem, mask, and E N U (look vector components in east north up diretcion)
displacment_r2 | dict | Keys: cumulative, incremental, mask. Stored as row vectors in arrays.
Also lons and lats, which are the lons and lats of all pixels in the images (ie rank2, and not column or row vectors)
Also Dem, mask, and E N U (look vector components in east north up diretcion)
tbaseline_info | dict| imdates : acquisition dates as strings
daisy_chain : names of the daisy chain of ifgs, YYYYMMDD_YYYYMMDD
baselines : temporal baselines of incremental ifgs
2019/12/03 | MEG | Written
2020/01/13 | MEG | Update depreciated use of dataset.value to dataset[()] when working with h5py files from LiCSBAS
2020/02/16 | MEG | Add argument to crop images based on pixel, and return baselines etc
2020/11/24 | MEG | Add option to get lons and lats of pixels.
2021/04/15 | MEG | Update lons and lats to be packaged into displacement_r2 and displacement_r3
2021_04_16 | MEG | Add option to also open the DEM that is in the .hgt file.
2021_05_07 | MEG | Change the name of baseline_info to tbaseline_info to be consistent with LiCSAlert
2021_09_22 | MEG | Add functionality to extract the look vector componenets (ENU files)
2021_09_23 | MEG | Add option to extract where the LiCSBAS reference area is.
2021_09_28 | MEG | Fix cropping option.
"""
import h5py as h5
import numpy as np
import numpy.ma as ma
import matplotlib.pyplot as plt
import os
import re
import pathlib
#from pathlib import Path
from icasar.aux2 import add_square_plot
from icasar.aux import col_to_ma
def rank3_ma_to_rank2(ifgs_r3, consistent_mask = False):
"""A function to take a time series of interferograms stored as a rank 3 array,
and convert it into the ICA(SAR) friendly format of a rank 2 array with ifgs as
row vectors, and an associated mask.
For use with ICA, the mask must be consistent (ie the same pixels are masked throughout the time series).
Inputs:
ifgs_r3 | r3 masked array | ifgs in rank 3 format
consistent_mask | boolean | If True, areas of incoherence are consistent through the whole stack
If false, a consistent mask will be made. N.b. this step can remove the number of pixels dramatically.
"""
n_ifgs = ifgs_r3.shape[0]
# 1: Deal with masking
mask_coh_water = ifgs_r3.mask #get the mask as a rank 3, still boolean
if consistent_mask:
mask_coh_water_consistent = mask_coh_water[0,] # if all ifgs are masked in the same way, just grab the first one
else:
mask_coh_water_sum = np.sum(mask_coh_water, axis = 0) # sum to make an image that shows in how many ifgs each pixel is incoherent
mask_coh_water_consistent = np.where(mask_coh_water_sum == 0, np.zeros(mask_coh_water_sum.shape),
np.ones(mask_coh_water_sum.shape)).astype(bool) # make a mask of pixels that are never incoherent
ifgs_r3_consistent = ma.array(ifgs_r3, mask = ma.repeat(mask_coh_water_consistent[np.newaxis,], n_ifgs, axis = 0)) # mask with the new consistent mask
# 2: Convert from rank 3 to rank 2
n_pixs = ma.compressed(ifgs_r3_consistent[0,]).shape[0] # number of non-masked pixels
ifgs_r2 = np.zeros((n_ifgs, n_pixs))
for ifg_n, ifg in enumerate(ifgs_r3_consistent):
ifgs_r2[ifg_n,:] = ma.compressed(ifg)
return ifgs_r2, mask_coh_water_consistent
def ts_quick_plot(ifgs_r3, title):
"""
A quick function to plot a rank 3 array of ifgs.
Inputs:
title | string | title
"""
n_ifgs = ifgs_r3.shape[0]
n_rows = int(np.ceil(n_ifgs / n_cols))
fig1, axes = plt.subplots(n_rows,n_cols)
fig1.suptitle(title)
for n_ifg in range(n_ifgs):
ax=np.ravel(axes)[n_ifg] # get axes on it own
matrixPlt = ax.imshow(ifgs_r3[n_ifg,],interpolation='none', aspect='equal') # plot the ifg
ax.set_xticks([])
ax.set_yticks([])
fig1.colorbar(matrixPlt,ax=ax)
ax.set_title(f'Ifg: {n_ifg}')
for axe in np.ravel(axes)[(n_ifgs):]: # delete any unused axes
axe.set_visible(False)
def daisy_chain_from_acquisitions(acquisitions):
"""Given a list of acquisiton dates, form the names of the interferograms that would create a simple daisy chain of ifgs.
Inputs:
acquisitions | list | list of acquistiion dates in form YYYYMMDD
Returns:
daisy_chain | list | names of daisy chain ifgs, in form YYYYMMDD_YYYYMMDD
History:
2020/02/16 | MEG | Written
"""
daisy_chain = []
n_acqs = len(acquisitions)
for i in range(n_acqs-1):
daisy_chain.append(f"{acquisitions[i]}_{acquisitions[i+1]}")
return daisy_chain
def baseline_from_names(names_list):
"""Given a list of ifg names in the form YYYYMMDD_YYYYMMDD, find the temporal baselines in days_elapsed
Inputs:
names_list | list | in form YYYYMMDD_YYYYMMDD
Returns:
baselines | list of ints | baselines in days
History:
2020/02/16 | MEG | Documented
"""
from datetime import datetime
baselines = []
for file in names_list:
master = datetime.strptime(file.split('_')[-2], '%Y%m%d')
slave = datetime.strptime(file.split('_')[-1][:8], '%Y%m%d')
baselines.append(-1 *(master - slave).days)
return baselines
def create_lon_lat_meshgrids(corner_lon, corner_lat, post_lon, post_lat, ifg):
""" Return a mesh grid of the longitudes and latitues for each pixels. Not tested!
I think Corner is the top left, but not sure this is always the case
"""
ny, nx = ifg.shape
x = corner_lon + (post_lon * np.arange(nx))
y = corner_lat + (post_lat * np.arange(ny))
xx, yy = np.meshgrid(x,y)
geocode_info = {'lons_mg' : xx,
'lats_mg' : yy}
return geocode_info
def get_param_par(mlipar, field):
"""
Get parameter from mli.par or dem_par file. Examples of fields are;
- range_samples
- azimuth_lines
- range_looks
- azimuth_looks
- range_pixel_spacing (m)
- azimuth_pixel_spacing (m)
- radar_frequency (Hz)
"""
import subprocess as subp
value = subp.check_output(['grep', field,mlipar]).decode().split()[1].strip()
return value
def read_img(file, length, width, dtype=np.float32, endian='little'):
"""
Read image data into numpy array.
endian: 'little' or 'big' (not 'little' is regarded as 'big')
"""
if endian == 'little':
data = np.fromfile(file, dtype=dtype).reshape((length, width))
else:
data = np.fromfile(file, dtype=dtype).byteswap().reshape((length, width))
return data
# -1: Check for common argument errors:
if not isinstance(LiCSBAS_out_folder, pathlib.PurePath):
raise Exception(f"'LiCSBAS_out_folder' must be a pathlib Path, but instead is a {type(LiCSBAS_out_folder)}. Exiting. ")
# 0: Work out the names of LiCSBAS folders - not tested exhaustively!
LiCSBAS_folders = {}
LiCSBAS_folders['all'] = os.listdir(LiCSBAS_out_folder)
for LiCSBAS_folder in LiCSBAS_folders['all']:
if bool(re.match(re.compile('TS_.'), LiCSBAS_folder)): # the timeseries output, which is named depending on mutlitlooking and clipping.
LiCSBAS_folders['TS_'] = LiCSBAS_folder
else:
pass
if re.match(re.compile('GEOCml.+clip'), LiCSBAS_folder): # see if there is a folder of multilooked and clipped
LiCSBAS_folders['ifgs'] = LiCSBAS_folder
elif re.match(re.compile('GEOCml.+'), LiCSBAS_folder): # see if there is a folder of multilooked and clipped
LiCSBAS_folders['ifgs'] = LiCSBAS_folder
elif re.match(re.compile('GEOC'), LiCSBAS_folder): # see if there is a folder of multilooked and clipped
LiCSBAS_folders['ifgs'] = LiCSBAS_folder
else:
pass
if 'TS_' not in LiCSBAS_folders:
raise Exception(f"Unable to find the TS_* folder that contains the .h5 files with the LiCSBAS results. Exiting. ")
# 1: Open the h5 file with the incremental deformation in.
displacement_r3 = {} # here each image will 1 x width x height stacked along first axis
displacement_r2 = {} # here each image will be a row vector 1 x pixels stacked along first axis
tbaseline_info = {}
if filtered:
cumh5 = h5.File(LiCSBAS_out_folder / LiCSBAS_folders['TS_'] / 'cum_filt.h5' ,'r') # either open the filtered file from LiCSBAS
else:
cumh5 = h5.File(LiCSBAS_out_folder / LiCSBAS_folders['TS_'] / 'cum.h5' ,'r') # or the non filtered file from LiCSBAS
tbaseline_info["acq_dates"] = cumh5['imdates'][()].astype(str).tolist() # get the acquisition dates
cumulative = cumh5['cum'][()] # get cumulative displacements as a rank3 numpy array
cumulative *= 0.001 # LiCSBAS default is mm, convert to m
if ref_area:
ref_str = cumh5['refarea'][()]
ref_xy = {'x_start' : int(ref_str.split('/')[0].split(':')[0]), # convert the correct part of the string to an integer
'x_stop' : int(ref_str.split('/')[0].split(':')[1]),
'y_start' : int(ref_str.split('/')[1].split(':')[0]),
'y_stop' : int(ref_str.split('/')[1].split(':')[1])}
# 2: Mask the data
mask_coh_water = np.isnan(cumulative) # get where masked
displacement_r3["cumulative"] = ma.array(cumulative, mask=mask_coh_water) # rank 3 masked array of the cumulative displacement
displacement_r3["incremental"] = np.diff(displacement_r3['cumulative'], axis = 0) # displacement between each acquisition - ie incremental
if displacement_r3["incremental"].mask.shape == (): # in the case where no pixels are masked, the diff operation on the mask collapses it to nothing.
displacement_r3["incremental"].mask = mask_coh_water[1:] # in which case, we can recreate the mask from the rank3 mask, but dropping one from the first dimension as incremental is always one smaller than cumulative.
n_im, length, width = displacement_r3["cumulative"].shape
# if figures:
# ts_quick_plot(displacement_r3["cumulative"], title = 'Cumulative displacements')
# ts_quick_plot(displacement_r3["incremental"], title = 'Incremental displacements')
displacement_r2['cumulative'], displacement_r2['mask'] = rank3_ma_to_rank2(displacement_r3['cumulative']) # convert from rank 3 to rank 2 and a mask
displacement_r2['incremental'], _ = rank3_ma_to_rank2(displacement_r3['incremental']) # also convert incremental, no need to also get mask as should be same as above
# 3: work with the acquisiton dates to produces names of daisy chain ifgs, and baselines
tbaseline_info["ifg_dates"] = daisy_chain_from_acquisitions(tbaseline_info["acq_dates"])
tbaseline_info["baselines"] = baseline_from_names(tbaseline_info["ifg_dates"])
tbaseline_info["baselines_cumulative"] = np.cumsum(tbaseline_info["baselines"]) # cumulative baslines, e.g. 12 24 36 48 etc
# 4: get the lons and lats of each pixel in the ifgs
geocode_info = create_lon_lat_meshgrids(cumh5['corner_lon'][()], cumh5['corner_lat'][()],
cumh5['post_lon'][()], cumh5['post_lat'][()], displacement_r3['incremental'][0,:,:]) # create meshgrids of the lons and lats for each pixel
displacement_r2['lons'] = geocode_info['lons_mg'] # add to the displacement dict
displacement_r2['lats'] = geocode_info['lats_mg']
displacement_r3['lons'] = geocode_info['lons_mg'] # add to the displacement dict (rank 3 one)
displacement_r3['lats'] = geocode_info['lats_mg']
# 4: Open the parameter file to get the number of pixels in width and height (though this should agree with above)
try:
width = int(get_param_par(LiCSBAS_out_folder / LiCSBAS_folders['ifgs'] / 'slc.mli.par', 'range_samples'))
length = int(get_param_par(LiCSBAS_out_folder / LiCSBAS_folders['ifgs'] / 'slc.mli.par', 'azimuth_lines'))
except:
print(f"Failed to open the 'slc.mli.par' file, so taking the width and length of the image from the h5 file and trying to continue. ")
(_, length, width) = cumulative.shape
# 5: get the DEM
try:
dem = read_img(LiCSBAS_out_folder / LiCSBAS_folders['ifgs'] / 'hgt', length, width)
displacement_r2['dem'] = dem # and added to the displacement dict in the same was as the lons and lats
displacement_r3['dem'] = dem #
except:
print(f"Failed to open the DEM from the hgt file for this volcano, but trying to continue anyway.")
# 6: Get the E N U files (these are the components of the ground to satellite look vector in east north up directions. )
try:
for component in ['E', 'N', 'U']:
look_vector_component = read_img(LiCSBAS_out_folder / LiCSBAS_folders['ifgs'] / f"{component}.geo", length, width)
displacement_r2[component] = look_vector_component
displacement_r3[component] = look_vector_component
except:
print(f"Failed to open the E N U files (look vector components), but trying to continue anyway.")
if crop_pixels is not None:
print(f"Cropping the images in x from {crop_pixels[0]} to {crop_pixels[1]} "
f"and in y from {crop_pixels[2]} to {crop_pixels[3]} (NB matrix notation - 0,0 is top left). ")
if figures:
ifg_n_plot = 1 # which number ifg to plot. Shouldn't need to change.
title = f'Cropped region, ifg {ifg_n_plot}'
fig_crop, ax = plt.subplots()
fig_crop.canvas.set_window_title(title)
ax.set_title(title)
ax.imshow(col_to_ma(displacement_r2['incremental'][ifg_n_plot,:], displacement_r2['mask']),
interpolation='none', aspect='auto') # plot the uncropped ifg
#import pdb; pdb.set_trace()
for product in displacement_r3:
if len(displacement_r3[product].shape) == 2: # if it's a rank 2, assume only x, y
resized_r2 = displacement_r3[product][crop_pixels[2]:crop_pixels[3], crop_pixels[0]:crop_pixels[1]] # and crop
displacement_r2[product] = resized_r2
displacement_r3[product] = resized_r2
elif len(displacement_r3[product].shape) == 3: # if it's a rank 3, assume times, x, y
resized_r3 = displacement_r3[product][:, crop_pixels[2]:crop_pixels[3], crop_pixels[0]:crop_pixels[1]] # and crop only last two dimensions
displacement_r3[product] = resized_r3
displacement_r2[product], displacement_r2['mask'] = rank3_ma_to_rank2(resized_r3) # convert from rank 3 to rank 2 and a mask
else:
pass
# for product in displacement_r3:
# print(f"{product} : {displacement_r3[product].shape}")
# import pdb; pdb.set_trace()
# for disp_dict in [displacement_r2, displacement_r3]:
# for product in disp_dict:
# if len(disp_dict[product].shape) == 2: # if it's a rank 2, assume only x, y
# disp_dict[product] = disp_dict[product][crop_pixels[2]:crop_pixels[3], crop_pixels[0]:crop_pixels[1]] # and crop
# elif len(disp_dict[product].shape) == 3: # if it's a rank 3, assume times, x, y
# disp_dict[product] = disp_dict[product][:, crop_pixels[2]:crop_pixels[3], crop_pixels[0]:crop_pixels[1]] # and crop only last two dimensions
# else:
# pass
if figures:
add_square_plot(crop_pixels[0], crop_pixels[1], crop_pixels[2], crop_pixels[3], ax) # draw a box showing the cropped region
if return_r3:
if ref_area:
return displacement_r3, displacement_r2, tbaseline_info, ref_xy
else:
return displacement_r3, displacement_r2, tbaseline_info
else:
if ref_area:
return displacement_r2, tbaseline_info, ref_xy
else:
return displacement_r2, tbaseline_info
#%%
def update_mask_sources_ifgs(mask_sources, sources, mask_ifgs, ifgs):
""" Given two masks of pixels, create a mask of pixels that are valid for both. Also return the two sets of data with the new masks applied.
Inputs:
mask_sources | boolean rank 2| original mask
sources | r2 array | sources as row vectors
mask_ifgs | boolean rank 2| new mask
ifgs | r2 array | ifgs as row vectors
Returns:
ifgs_new_mask
sources_new_mask
mask_both | boolean rank 2| original mask
History:
2020/02/19 | MEG | Written
2020/06/26 | MEG | Major rewrite.
2021_04_20 | MEG | Add check that sources and ifgs are both rank 2 (use row vectors if only one source, but it must be rank2 and not rank 1)
"""
import numpy as np
import numpy.ma as ma
from icasar.aux import col_to_ma
def apply_new_mask(ifgs, mask_old, mask_new):
"""Apply a new mask to a collection of ifgs (or sources) that are stored as row vectors with an accompanying mask.
Inputs:
ifgs | r2 array | ifgs as row vectors
mask_old | r2 array | mask to convert a row of ifg into a rank 2 masked array
mask_new | r2 array | the new mask to be applied. Note that it must not unmask any pixels that are already masked.
Returns:
ifgs_new_mask | r2 array | as per ifgs, but with a new mask.
History:
2020/06/26 | MEG | Written
"""
n_pixs_new = len(np.argwhere(mask_new == False))
ifgs_new_mask = np.zeros((ifgs.shape[0], n_pixs_new)) # initiate an array to store the modified sources as row vectors
for ifg_n, ifg in enumerate(ifgs): # Loop through each source
ifg_r2 = col_to_ma(ifg, mask_old) # turn it from a row vector into a rank 2 masked array
ifg_r2_new_mask = ma.array(ifg_r2, mask = mask_new) # apply the new mask
ifgs_new_mask[ifg_n, :] = ma.compressed(ifg_r2_new_mask) # convert to row vector and places in rank 2 array of modified sources
return ifgs_new_mask
# check some inputs. Not exhuastive!
if (len(sources.shape) != 2) or (len(ifgs.shape) != 2):
raise Exception(f"Both 'sources' and 'ifgs' must be rank 2 arrays (even if they are only a single source). Exiting. ")
mask_both = ~np.logical_and(~mask_sources, ~mask_ifgs) # make a new mask for pixels that are in the sources AND in the current time series
n_pixs_sources = len(np.argwhere(mask_sources == False)) # masked pixels are 1s, so invert with 1- bit so that non-masked are 1s, then sum to get number of pixels
n_pixs_new = len(np.argwhere(mask_ifgs == False)) # ditto for new mask
n_pixs_both = len(np.argwhere(mask_both == False)) # ditto for the mutual mask
print(f"Updating masks and ICA sources. Of the {n_pixs_sources} in the sources and {n_pixs_new} in the current LiCSBAS time series, "
f"{n_pixs_both} are in both and can be used in this iteration of LiCSAlert. ")
ifgs_new_mask = apply_new_mask(ifgs, mask_ifgs, mask_both) # apply the new mask to the old ifgs and return the non-masked elemts as row vectors.
sources_new_mask = apply_new_mask(sources, mask_sources, mask_both) # ditto for the sources.
return ifgs_new_mask, sources_new_mask, mask_both
#%%
def bootstrapped_sources_to_centrotypes(sources_r2, hdbscan_param, tsne_param):
""" Given the products of the bootstrapping, run the 2d manifold and clustering algorithms to create centrotypes.
Inputs:
mixtures_r2 | rank 2 array | all the sources recovered after bootstrapping. If 5 components and 100 bootstrapped runs, this will be 500 x n_pixels (or n_times)
hdbscan_param | tuple | Used to control the clustering (min_cluster_size, min_samples)
tsne_param | tuple | Used to control the 2d manifold learning (perplexity, early_exaggeration)
Returns:
S_best | rank 2 array | the recovered sources as row vectors (e.g. 5 x 1230)
labels_hdbscan | rank 2 array | the cluster number for each of the sources in sources_all_r2 e.g 1000,
xy_tsne | rank 2 array | the x and y coordinates of where each space is in the 2D space. e.g. 1000x2
clusters_by_max_Iq_no_noise | rank 1 array | clusters ranked by quality index (Iq). e.g. 3,0,1,4,2
Iq | list | cluster quality index for each cluster. Entry 0 is Iq (cluster quality index) for the first cluster
History:
2020/08/26 | MEG | Created from a script.
2021_04_16 | MEG | Remove unused figure arguments.
"""
import numpy as np
import hdbscan # used for clustering
from sklearn.manifold import TSNE # t-distributed stochastic neighbour embedding
perplexity = tsne_param[0] # unpack tuples
early_exaggeration = tsne_param[1]
min_cluster_size = hdbscan_param[0]
min_samples = hdbscan_param[1]
# 1: Create the pairwise comparison matrix
print('\nStarting to compute the pairwise distance matrices....', end = '')
D, S = pairwise_comparison(sources_r2)
print('Done!')
# 2: Clustering with all the recovered sources
print('Starting to cluster the sources using HDBSCAN....', end = "")
clusterer_precom = hdbscan.HDBSCAN(metric = 'precomputed', min_cluster_size = min_cluster_size,
min_samples = min_samples, cluster_selection_method = 'leaf')
labels_hdbscan = clusterer_precom.fit_predict(D) # D is n_samples x n_samples, then returns a rank 1 which is the cluster number (ie label) for each source
Iq = cluster_quality_index(labels_hdbscan, S) # calculate the cluster quality index, using S (n_samples x n_samples), and the label for each one
# note that Iq is ordered by cluster, so the first value is the cluster quality index for 1st cluster (which is usually labelled -1 and the noise points)
if np.min(labels_hdbscan) == (-1): # if HDBSCAN has identified noise
Iq = Iq[1:] # delete the first entry, as this is the Iq of the noise (which isn't a cluster)
clusters_by_max_Iq_no_noise = np.argsort(Iq)[::-1] # clusters by best Iqfirst (ie cluster)
print('Done!')
# 3: 2d manifold with all the recovered sources
print('Starting to calculate the 2D manifold representation....', end = "")
manifold_tsne = TSNE(n_components = 2, metric = 'precomputed', perplexity = perplexity, early_exaggeration = early_exaggeration)
xy_tsne = manifold_tsne.fit(D).embedding_
print('Done!' )
# 4: Determine the number of clusters from HDBSCAN
if np.min(labels_hdbscan) == (-1): # if we have noise (which is labelled as -1 byt HDBSCAN),
n_clusters = np.size(np.unique(labels_hdbscan)) - 1 # noise doesn't count as a cluster so we -1 from number of clusters
else:
n_clusters = np.size(np.unique(labels_hdbscan)) # but if no noise, number of clusters is just number of different labels
if n_clusters == 0:
print("No clusters have been found. Often, this is caused by running the FastICA algorithm too few times, or setting"
"the hdbscan_param 'min_cluster_size' too high. ")
return None, labels_hdbscan, xy_tsne, clusters_by_max_Iq_no_noise, Iq
else:
# 4: Centrotypes (object that is most similar to all others in the cluster)
print('Calculating the centrotypes and associated time courses...', end = '')
S_best_args = np.zeros((n_clusters, 1)).astype(int)
for i, clust_number in enumerate(clusters_by_max_Iq_no_noise): # loop through each cluster in order of how good they are (i.e. highest Iq first)
source_index = np.ravel(np.argwhere(labels_hdbscan == clust_number)) # get the indexes of sources in this cluster
S_this_cluster = np.copy(S[source_index, :][:, source_index]) # similarities for just this cluster
in_cluster_arg = np.argmax(np.sum(S_this_cluster, axis = 1)) # the sum of a column of S_this... is the similarity between 1 source and all the others. Look for the column that's the maximum
S_best_args[i,0] = source_index[in_cluster_arg] # conver the number in the cluster to the number overall (ie 2nd in cluster is actually 120th source)
S_best = np.copy(sources_r2[np.ravel(S_best_args),:]) # these are the centrotype sources
print('Done!' )
return S_best, labels_hdbscan, xy_tsne, clusters_by_max_Iq_no_noise, Iq
#%%
def perform_multiple_ICA_runs(n_comp, mixtures_mc, bootstrapping_param, ica_param,
mixtures_white = None, dewhiten_matrix = None, ica_verbose = 'long'):
"""
ICASAR requires ICA to be run many times, wither with or without bootstrapping. This function performs this.
Inputs:
n_comp | int | the number of souces we aim to recover.
mixutres_mc | rank 2 array | mixtures as rows, mean centered along rows. I.e. of size n_varaibles x n_observations.
bootstrapping_param | tuple | (number of ICA runs with bootstrap, number of ICA runs without bootstrapping ) e.g. (100,10)
ica_param | tuple | Used to control ICA, (ica_tol, ica_maxit)
mixtures_white | rank 2 | mean centered and decorellated and unit variance in each dimension (ie whitened). As per mixtures, row vectors.
dewhiten_matrix | rank 2 | n_comp x n_comp. mixtures_mc = dewhiten_matrix @ mixtures_white
ica_verbose | 'long' or 'short' | if long, full details of ICA runs are given. If short, only the overall progress
Returns:
S_best | list of rank 2 arrays | the sources from each run of the FastICA algorithm, n_comp x n_pixels. Bootstrapped ones first, non-bootstrapped second.
A_hist | list of rank 2 arrays | the time courses from each run of the FastICA algorithm. n_ifgs x n_comp. Bootstrapped ones first, non-bootstrapped second.
History:
2021_04_23 | MEG | Written
"""
# 1: unpack a tuple and check a few inputs.
n_converge_bootstrapping = bootstrapping_param[0] # unpack input tuples
n_converge_no_bootstrapping = bootstrapping_param[1]
if (n_converge_no_bootstrapping > 0) and ((mixtures_white is None) or (dewhiten_matrix is None)):
raise Exception(f"If runs without bootstrapping are to be performed, the whitened data and the dewhitening matrix must be provided, yet one "
f"or more of these are 'None'. This is as PCA is performed to whiten the data, yet if bootstrapping is not being used "
f"the data don't change, so PCA doesn't need to be run (and it can be computationally expensive). Exiting. ")
# 2: do ICA multiple times
# First with bootstrapping
A_hist_BS = [] # ditto but with bootstrapping
S_hist_BS = []
n_ica_converge = 0
n_ica_fail = 0
if ica_verbose == 'short' and n_converge_bootstrapping > 0: # if we're only doing short version of verbose, and will be doing bootstrapping
print(f"FastICA progress with bootstrapping: ", end = '')
while n_ica_converge < n_converge_bootstrapping:
S, A, ica_converged = bootstrap_ICA(mixtures_mc, n_comp, bootstrap = True, ica_param = ica_param, verbose = ica_verbose) # note that this will perform PCA on the bootstrapped samples, so can be slow.
if ica_converged:
n_ica_converge += 1
A_hist_BS.append(A) # record results
S_hist_BS.append(S) # record results
else:
n_ica_fail += 1
if ica_verbose == 'long':
print(f"sICA with bootstrapping has converged {n_ica_converge} of {n_converge_bootstrapping} times. \n") # longer (more info) update to terminal
else:
print(f"{int(100*(n_ica_converge/n_converge_bootstrapping))}% ", end = '') # short update to terminal
# and without bootstrapping
A_hist_no_BS = [] # initiate to store time courses without bootstrapping
S_hist_no_BS = [] # and recovered sources
n_ica_converge = 0 # reset the counters for the second lot of ica
n_ica_fail = 0
if ica_verbose == 'short' and n_converge_no_bootstrapping > 0: # if we're only doing short version of verbose, and are actually doing ICA with no bootstrapping
print(f"FastICA progress without bootstrapping: ", end = '')
while n_ica_converge < n_converge_no_bootstrapping:
S, A, ica_converged = bootstrap_ICA(mixtures_mc, n_comp, bootstrap = False, ica_param = ica_param,
X_whitened = mixtures_white, dewhiten_matrix = dewhiten_matrix, verbose = ica_verbose) # no bootstrapping, so PCA doesn't need to be run each time and we can pass it the whitened data.
if ica_converged:
n_ica_converge += 1
A_hist_no_BS.append(A) # record results
S_hist_no_BS.append(S) # record results
else:
n_ica_fail += 1
if ica_verbose == 'long':
print(f"sICA without bootstrapping has converged {n_ica_converge} of {n_converge_no_bootstrapping} times. \n",)
else:
print(f"{int(100*(n_ica_converge/n_converge_no_bootstrapping))}% ", end = '')
# 3: change data structure for sources, and compute similarities and distances between them.
A_hist = A_hist_BS + A_hist_no_BS # list containing the time courses from each run. i.e. each is: times x n_components
S_hist = S_hist_BS + S_hist_no_BS # list containing the soures from each run. i.e.: each os n_components x n_pixels
return S_hist, A_hist
#%%
def bootstrap_ICA(X, n_comp, bootstrap = True, ica_param = (1e-4, 150),
X_whitened = None, dewhiten_matrix = None, verbose = True):
""" A function to perform ICA either with or without boostrapping.
If not performing bootstrapping, performance can be imporoved by passing the whitened data and the dewhitening matrix
(so that PCA does not have to be peroformed).
Inputs:
X | rank2 array | data as row vectors (ie n_variables x n_samples)
n_comp | int | number of sources to recover
X_whitened | rank2 array | data as row vectors (ie n_variables x n_samples), but whitened (useful if not bootstrapping)
ica_param | tuple | Used to control ICA, (ica_tol, ica_maxit)
X_whitened | rank2 array | data as row vectors (e.g. 10 x 20,000 for 10 ifgs of 20000 pixels), but whitened. Useful to pass to function if not bootstapping as
this can then be calculated only once.
dewhiten_matrix | rank2 array | Converts the time courses recovered when using whitened data back to unwhiteend.
size is n_ifgs x n_sources.
X_white = A x S
X = dewhiten x A x S
Needed if not bootstrapping and don't want to do PCA each time (as above)
verbose | boolean | If True, the FastICA algorithm returns how many times it took to converge (or if it didn't converge)
Returns:
S | rank2 array | sources as row vectors (ie n_sources x n_samples)
A | rank 2 array | time courses as columns (ie n_ifgs x n_sources)
ica_success | boolean | True is the FastICA algorithm does converge.
History:
2020/06/05 | MEG | Written
2020/06/09 | MEG | Update to able to hand the case in which PCA fails (normally to do with finding the inverse of a matrix)
"""
import numpy as np
from icasar.blind_signal_separation import PCA_meg2, fastica_MEG
from icasar.aux import maps_tcs_rescale
n_loop_max = 1000 # when trying to make bootstrapped samples, if one can't be found after this many attempts, raise an error. Best left high.
n_ifgs = X.shape[0]
# 0: do the bootstrapping and determine if we need to do PCA
if bootstrap:
pca_needed = True # PCA will always be needed if bootstrapping
input_ifg_args = np.arange(n_comp-1) # initiate as a crude way to get into the loop
n_loop = 0 # to count how many goes it takes to generate a good bootstrap sample
while len(np.unique(input_ifg_args)) < n_comp and n_loop < 100: # try making a list of samples to bootstrap with providing it has enough unique items for subsequent pca to work
input_ifg_args = np.random.randint(0, n_ifgs, n_ifgs) # generate indexes of samples to select for bootstrapping
n_loop += 1
if n_loop == n_loop_max: # if we exited beacuse we were stuck in a loop, error message and stop
raise Exception(f'Unable to bootstrap the data as the number of training data must be sufficently'
f' bigger than "n_components" sought that there are "n_components" unique items in'
f' a bootsrapped sample. ') # error message
X = X[input_ifg_args, :] # bootstrapped smaple
else: # if we're not bootstrapping, need to work out if we actually need to do PCA
if X_whitened is not None and dewhiten_matrix is not None:
pca_needed = False
else:
pca_needed = True
print(f"Even though bootstrapping is not being used, PCA is being performed. "
f"This step could be sped up significantly by running PCA beforehand and "
f"computing 'X_whiten' and 'dewhiten_matrix' only once. ")
# 1 get whitened data using PCA, if we need to (ie if X_whitened and dewhiten_matrix aren't provided)
if pca_needed:
try:
pca_vecs, _, _, dewhiten_matrix, _, _, X_whitened = PCA_meg2(X, verbose = False) # pca on bootstrapped data
pca_success = True
except:
pca_success = False
else:
pca_success = True
if pca_success: # If PCA was a success, do ICA (note, if not neeed, success is set to True)
X_whitened = X_whitened[:n_comp,] # reduce dimensionality
W, S, A_white, _, _, ica_success = fastica_MEG(X_whitened, n_comp=n_comp, algorithm="parallel",
whiten=False, maxit=ica_param[1], tol = ica_param[0], verbose = verbose) # do ICA
A = dewhiten_matrix[:,0:n_comp] @ A_white # turn ICA mixing matrix back into a time courses (ie dewhiten)
S, A = maps_tcs_rescale(S, A) # rescale so spatial maps have a range or 1 (so easy to compare)
return S, A, ica_success
else: # or if not a success, say that
ica_success = False
return None, None, ica_success
#%%
def pairwise_comparison(sources_r2):
""" Compte the pairwise distances and similarities for ICA sources.
Note that this uses the absolute value of the similarities, so is invariant to sign flips of the data.
Inputs:
sources_r2 | rank 2 array | sources as row vectors
"""
import numpy as np
S = np.corrcoef(sources_r2) # Similarity matrix
S = np.abs(S) # covariance of 1 and -1 are equivalent for our case
D = 1 - S # convert to dissimilarity
return D, S
#%%
def sources_list_to_r2_r3(sources, mask = None):
"""A function to convert a list of the outputs of multiple ICA runs (which are lists) into rank 2 and rank 3 arrays.
Inputs:
sources | list | list of runs of ica (e.g. 10, or 20 etc.), each item would be n_sources x n_pixels
mask | boolean | Only needed for two_d. Converts row vector back to masked array.
Outputs:
sources_r2 | rank 2 array | each source as a row vector (e.g. n_sources_total x n_pixels)
sources_r3 | rank 3 masked array | each source as a rank 2 image. (e.g. n_souces_total x source_height x source_width )
History:
2018_06_29 | MEG | Written
2020/08/27 | MEG | Update to handle both 1d and 2d signals.
2020/09/11 | MEG | Change sources_r3 so that it's now a masked array (sources_r3_ma)
"""
import numpy as np
import numpy.ma as ma
from icasar.aux import col_to_ma
n_converge_needed = len(sources)
n_comp = np.size(sources[0], axis = 0)
n_pixels = np.size(sources[0], axis = 1)
sources_r2 = np.zeros(((n_converge_needed * n_comp), n_pixels)) # convert from list to one big array
for i in range(n_converge_needed):
sources_r2[i*n_comp:((i*n_comp) + n_comp), :] = sources[i]
n_sources_total = np.size(sources_r2, axis = 0)
if mask is not None:
sources_r3 = ma.zeros((col_to_ma(sources_r2[0,:], mask).shape))[np.newaxis, :, :] # get the size of one image (so rank 2)
sources_r3 = ma.repeat(sources_r3, n_sources_total, axis = 0) # and then extend to make rank 3
for i in range(n_sources_total):
sources_r3[i,:,:] = col_to_ma(sources_r2[i,:], mask)
else:
sources_r3 = None
return sources_r2, sources_r3
#%%
def cluster_quality_index(labels, S):
"""
A function to calculate the cluster quality index (Iq). If a cluster has only one element in it,
the cluster quality index is set to nan (np.nan)
Inputs:
labels | rank 1 array | label number for each data point
S | rank 2 array | similiarit between each data point
Returns:
Iq | list | cluster quality index
2018_05_28 | written
2018_05_30 | if clusters have only one point in them, set Iq to 0
"""
import numpy as np
Iq = [] # initiate cluster quality index
for i in np.unique(labels): # loop through each label (there will be as many loops here as there are clusters)
labels_1cluster = np.ravel(np.argwhere(labels == i))
if np.size(labels_1cluster) < 2: # check if cluster has only one point in it
Iq_temp = np.nan
else:
S_intra = np.copy(S[labels_1cluster, :][:,labels_1cluster]) # The similarties between the items in the cluster
S_intra = np.where(np.eye(np.size(S_intra, axis = 0)) == 1, np.nan, S_intra) # change the diagonals to nans
S_inter = np.copy(S[labels_1cluster, :]) # The similarties between the items in the cluster and those out of the cluster
S_inter = np.delete(S_inter, labels_1cluster, axis = 1) # horizontal axis remove similarities with itself
Iq_temp = np.nanmean(S_intra) - np.mean(S_inter) # Iq is the difference between the mean of the distances inside the cluster, and the mean distance between items in the cluster and out of the cluster
Iq.append(Iq_temp) # append whichever value of Iq (np.nan or a numeric value)
return Iq
| python |
#!/usr/bin/env python3
"""
Aggregate machine ads into time bins by site
"""
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import elasticsearch
import elasticsearch_dsl as edsl
import datetime
import dateutil
import re
import logging
import time
from urllib.parse import urlparse, urlunparse
def parse_timedelta(time_str):
parts = re.match(
r"((?P<days>(\d+?\.?\d*))d)?((?P<hours>(\d+?\.?\d*))h)?((?P<minutes>(\d+?\.?\d*))m)?((?P<seconds>(\d+?\.?\d*))s)?",
time_str,
)
if not parts:
raise ValueError
parts = parts.groupdict()
if not any([v is not None for v in list(parts.values())]):
raise ValueError
time_params = {}
for (name, param) in parts.items():
if param:
time_params[name] = float(param)
return datetime.timedelta(**time_params)
def get_datetime(value):
try:
return datetime.datetime.utcnow() - parse_timedelta(value)
except ValueError:
return dateutil.parser.parse(value)
def snap_to_interval(dt, interval):
ts = time.mktime(dt.timetuple())
ts = ts - (ts % int(interval.total_seconds()))
return datetime.datetime.utcfromtimestamp(ts)
def parse_index(url_str):
url = urlparse(url_str)
return {
"host": urlunparse(url._replace(path="", params="", query="", fragment="")),
"index": url.path[1:],
}
parser = ArgumentParser(
description=__doc__, formatter_class=ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--after", default="2d", help="maximum time to look back", type=get_datetime,
)
parser.add_argument(
"--before", default="0d", help="minimum time to look back", type=get_datetime,
)
parser.add_argument(
"--interval", default="20m", help="aggregation interval", type=parse_timedelta,
)
parser.add_argument(
"-y",
"--dry-run",
default=False,
action="store_true",
help="query status, but do not ingest into ES",
)
parser.add_argument(
"-v",
"--verbose",
default=False,
action="store_true",
help="use verbose logging in ES",
)
parser.add_argument(
"-i",
"--input-index",
type=parse_index,
default="http://elk-1.icecube.wisc.edu:9200/condor_status",
)
parser.add_argument(
"-o",
"--output-index",
type=parse_index,
default="http://elk-1.icecube.wisc.edu:9200/glidein_resources",
)
options = parser.parse_args()
logging.basicConfig(
level=logging.INFO, format="%(asctime)s %(levelname)s %(name)s : %(message)s"
)
if options.verbose:
logging.getLogger("elasticsearch").setLevel("DEBUG")
# round time range to nearest interval
after = snap_to_interval(options.after, options.interval)
# ...only if last bin is far enough in the past to be complete
if datetime.datetime.utcnow() - options.before > options.interval:
before = snap_to_interval(options.before, options.interval)
else:
before = options.before
if not before > after:
parser.error("--before must be > --after")
# note different capitalization conventions for GPU and Cpu
RESOURCES = ("GPUs", "Cpus", "Memory", "Disk")
STATUSES = ("evicted", "removed", "finished", "failed")
# Accumulate offered and claimed resources in time bins, weighting by the
# fraction of each bin that intersects the glidein lifetime
summarize_resources = edsl.A(
"scripted_metric",
init_script="""
state.interval = (Long)(params.interval);
HashMap metrics = new HashMap();
for (resource in params.RESOURCES) {
for (status in params.STATUSES) {
String key = "claimed."+status+"."+resource;
metrics.put(key, 0.0);
}
metrics.put("offered."+resource, 0.0);
}
state.metrics = metrics;
""",
map_script="""
// The time range of each item intersects one or more buckets, but does not
// necessarily overlap each completely. Ideally we would use the exact overlap
// fraction to weight contributions to each bucket, but since Elastic does not
// give us access to the bucket key, we have to settle for the average overlap
// fraction.
long left = doc[params.left].value.toInstant().toEpochMilli();
long right = doc[params.right].value.toInstant().toEpochMilli();
long total_interval = (state.interval*((right+params.interval)/state.interval-left/state.interval));
double active_fraction = (right-left).doubleValue()/total_interval.doubleValue();
HashMap metrics = state.metrics;
for (resource in params.RESOURCES) {
if (!doc.containsKey("Total"+resource)) {
continue;
}
double capacity = doc["Total"+resource].value.doubleValue();
for (status in params.STATUSES) {
String source = "occupancy."+status+"."+resource;
String dest = "claimed."+status+"."+resource;
if (doc.containsKey(source)) {
metrics[dest] += active_fraction*doc[source].value*capacity;
}
}
metrics["offered."+resource] += active_fraction*capacity;
}
""",
combine_script="""
return state.metrics;
""",
reduce_script="""
Map aggregate = new HashMap();
for (state in states) {
if (state == null) {
continue;
}
for (entry in state.entrySet()) {
if (aggregate.containsKey(entry.getKey())) {
aggregate[entry.getKey()] += entry.getValue();
} else {
aggregate[entry.getKey()] = entry.getValue();
}
}
}
return aggregate;
""",
params={
"left": "DaemonStartTime",
"right": "LastHeardFrom",
"interval": int(options.interval.total_seconds() * 1000),
"RESOURCES": RESOURCES,
"STATUSES": STATUSES + ("total",),
},
)
def scan_aggs(search, source_aggs, inner_aggs={}, size=10):
"""
Helper function used to iterate over all possible bucket combinations of
``source_aggs``, returning results of ``inner_aggs`` for each. Uses the
``composite`` aggregation under the hood to perform this.
"""
def run_search(**kwargs):
s = search[:0]
s.aggs.bucket("comp", "composite", sources=source_aggs, size=size, **kwargs)
for agg_name, agg in inner_aggs.items():
s.aggs["comp"][agg_name] = agg
return s.execute()
response = run_search()
while response.aggregations.comp.buckets:
for b in response.aggregations.comp.buckets:
yield b
if "after_key" in response.aggregations.comp:
after = response.aggregations.comp.after_key
else:
after = response.aggregations.comp.buckets[-1].key
response = run_search(after=after)
def resource_summaries(host, index, after, before, interval):
by_site = [
{k: edsl.A("terms", field=k + ".keyword")}
for k in ("site", "country", "institution", "resource")
]
# split sites into GPU/CPU partitions
by_site.append(
{"slot_type": edsl.A("terms", script='doc.TotalGPUs.value > 0 ? "GPU" : "CPU"')}
)
# NB: @timestamp is not included in the composite aggregation, as this
# buckets documents for _every_ combination of the source values, meaning
# that a document will be added to the bucket N times if N of its
# @timestamp values fall into the time range. To emulate ES 7.x range
# semantics (one doc falls in many buckets, each bucket sees only one copy
# of each doc), we split date_histogram off into a sub-aggregation.
by_timestamp = edsl.A(
"date_histogram",
field="@timestamp",
interval=int(interval.total_seconds() * 1000),
)
by_timestamp.bucket("resources", summarize_resources)
buckets = scan_aggs(
(
edsl.Search()
.using(elasticsearch.Elasticsearch(host))
.index(index)
.filter("range", **{"@timestamp": {"gte": after, "lt": before}})
),
by_site,
{"timestamp": by_timestamp},
size=1,
)
for site in buckets:
for bucket in site.timestamp.buckets:
# Filter buckets to query time range. This should be possible to do
# in the query DSL, but bucket_selector does not support
# date_histogram buckets, and the corresponding ticket has been
# open for years:
# https://github.com/elastic/elasticsearch/issues/23874
timestamp = datetime.datetime.utcfromtimestamp(bucket.key / 1000)
if timestamp >= after and timestamp < before and bucket.doc_count > 0:
data = bucket.resources.value.to_dict()
data["count"] = bucket.doc_count
data["_keys"] = site.key.to_dict()
data["_keys"]["timestamp"] = timestamp.strftime("%Y-%m-%dT%H:%M:%S")
yield data
buckets = resource_summaries(
options.input_index["host"],
options.input_index["index"],
after,
before,
options.interval,
)
def make_insert(
generator,
index=options.output_index["index"],
id_keys=["timestamp", "resource", "site", "slot_type"],
):
for entry in generator:
data = dict(entry)
data["_index"] = index
data["_type"] = "resource_summary"
key = data.pop("_keys")
data["_id"] = ".".join([key[k] for k in id_keys])
data.update(key)
yield data
if options.dry_run:
import json
import sys
for bucket in make_insert(buckets):
json.dump(bucket, sys.stdout)
sys.stdout.write("\n")
else:
es = elasticsearch.Elasticsearch(hosts=options.output_index["host"], timeout=5000)
index = options.output_index["index"]
success, _ = elasticsearch.helpers.bulk(
es, make_insert(buckets), max_retries=20, initial_backoff=2, max_backoff=3600,
)
| python |
# file: asynchronous-inquiry.py
# auth: Albert Huang <[email protected]>
# desc: demonstration of how to do asynchronous device discovery by subclassing
# the DeviceDiscoverer class
# $Id: asynchronous-inquiry.py 405 2006-05-06 00:39:50Z albert $
#
# XXX Linux only (5/5/2006)
import bluetooth
import select
class MyDiscoverer(bluetooth.DeviceDiscoverer):
def pre_inquiry(self):
self.done = False
def device_discovered(self, address, device_class, rssi, name):
print("%s - %s" % (address, name))
# get some information out of the device class and display it.
# voodoo magic specified at:
#
# https://www.bluetooth.org/foundry/assignnumb/document/baseband
major_classes = ( "Miscellaneous",
"Computer",
"Phone",
"LAN/Network Access point",
"Audio/Video",
"Peripheral",
"Imaging" )
major_class = (device_class >> 8) & 0xf
if major_class < 7:
print(" %s" % major_classes[major_class])
else:
print(" Uncategorized")
print(" services:")
service_classes = ( (16, "positioning"),
(17, "networking"),
(18, "rendering"),
(19, "capturing"),
(20, "object transfer"),
(21, "audio"),
(22, "telephony"),
(23, "information"))
for bitpos, classname in service_classes:
if device_class & (1 << (bitpos-1)):
print(" %s" % classname)
print(" RSSI: " + str(rssi))
def inquiry_complete(self):
self.done = True
d = MyDiscoverer()
d.find_devices(lookup_names = True)
readfiles = [ d, ]
while True:
rfds = select.select( readfiles, [], [] )[0]
if d in rfds:
d.process_event()
if d.done: break
| python |
#!/usr/bin/env python
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, window, asc, desc, lead, lag, udf, hour, month, stddev, lit
from pyspark.sql.window import Window
from pyspark.sql.types import FloatType, IntegerType, DateType
from pyspark import SparkConf
import yaml
import datetime
import os
conf = SparkConf()
conf.set("spark.jars", os.getenv("HOME") + "/.ivy2/jars/org.postgresql_postgresql-42.1.1.jar")
conf.set("spark.executor.extrajavaoptions", "-Xmx15000m")
conf.set("spark.executor.memory", "15g")
conf.set("spark.driver.memory", "15g")
conf.set("spark.storage.memoryFraction", "0")
spark = SparkSession.builder \
.config(conf=conf) \
.master("local[4]") \
.appName("Wifi Drop on Outage Calculator") \
.getOrCreate()
config = open('config.yaml')
config = yaml.load(config)
#connect to the database
pw_df = spark.read.jdbc("jdbc:postgresql://timescale.lab11.eecs.umich.edu/powerwatch", "pw_dedupe",
properties={"user": config['user'], "password": config['password'],"driver":"org.postgresql.Driver"})
#read the data that we care about
pw_df = pw_df.select(pw_df['core_id'],pw_df['time'],pw_df['product_id'])
pw_df = pw_df.filter("product_id = 7008 OR product_id = 7009")
pw_df = pw_df.withColumn("packet", lit(255)) #this is the max amount of data per packet
pw_df = pw_df.groupBy("core_id",month("time")).sum()
#pw_df.repartition(1).write.format("com.databricks.spark.csv").option("header", "true").save("monthly_data_usage")
pw_df.groupBy("core_id").agg(stddev("sum(packet)")).show(200)
| python |
from django.core.exceptions import ValidationError
from cyder.base.tests import ModelTestMixin
from cyder.core.ctnr.models import Ctnr
from cyder.core.system.models import System
from cyder.cydhcp.constants import STATIC
from cyder.cydhcp.interface.static_intr.models import StaticInterface
from cyder.cydhcp.network.models import Network
from cyder.cydhcp.range.models import Range
from cyder.cydns.address_record.models import AddressRecord
from cyder.cydns.cname.models import CNAME
from cyder.cydns.domain.models import Domain
from cyder.cydns.ip.utils import ip_to_reverse_name
from cyder.cydns.nameserver.models import Nameserver
from cyder.cydns.ptr.models import PTR
from cyder.cydns.soa.models import SOA
from cyder.cydns.tests.utils import create_zone, DNSTest
class NSTestsModels(DNSTest, ModelTestMixin):
def setUp(self):
super(NSTestsModels, self).setUp()
self.r = Domain.objects.create(name="ru")
self.f_r = Domain.objects.create(name="foo.ru")
self.b_f_r = Domain.objects.create(name="bar.foo.ru")
Domain.objects.create(name="asdf")
for d in (self.r, self.f_r, self.b_f_r):
self.ctnr.domains.add(d)
create_zone('128.in-addr.arpa')
self.s = System.objects.create(name='test_system')
self.net1 = Network.objects.create(network_str='128.193.0.0/17')
self.sr1 = Range.objects.create(
network=self.net1, range_type=STATIC, start_str='128.193.99.2',
end_str='128.193.99.14')
self.sr2 = Range.objects.create(
network=self.net1, range_type=STATIC, start_str='128.193.1.1',
end_str='128.193.1.14')
self.net2 = Network.objects.create(network_str='14.10.1.0/30')
self.sr3 = Range.objects.create(
network=self.net2, range_type=STATIC, start_str='14.10.1.1',
end_str='14.10.1.2')
for r in (self.sr1, self.sr2, self.sr3):
self.ctnr.ranges.add(r)
def create_zone(self, name):
domain = create_zone(name)
self.ctnr.domains.add(domain)
return domain
@property
def objs(self):
"""Create objects for test_create_delete."""
return (
Nameserver.objects.create(
domain=self.r, server='ns2.moot.ru'),
Nameserver.objects.create(
domain=self.r, server='ns5.moot.ru'),
Nameserver.objects.create(
domain=self.r, server=u'ns3.moot.ru'),
Nameserver.objects.create(
domain=self.b_f_r, server='n1.moot.ru'),
Nameserver.objects.create(
domain=self.b_f_r, server='ns2.moot.ru'),
Nameserver.objects.create(
domain=self.r, server='asdf.asdf'),
)
def test_add_invalid(self):
self.assertRaises(
ValidationError, Nameserver.objects.create,
domain=self.f_r, server='ns3.foo.ru', ctnr=self.ctnr)
def testtest_add_ns_in_domain(self):
# Use an A record as a glue record.
glue = AddressRecord.objects.create(
label='ns2', ctnr=self.ctnr, domain=self.r, ip_str='128.193.1.10',
ip_type='4')
ns = Nameserver.objects.create(domain=self.r, server='ns2.ru')
self.assertTrue(ns.glue)
self.assertEqual(ns.server, ns.glue.fqdn)
self.assertRaises(ValidationError, glue.delete)
glue = AddressRecord.objects.create(
label='ns3', ctnr=self.ctnr, domain=self.f_r,
ip_str='128.193.1.10', ip_type='4')
ns = Nameserver.objects.create(domain=self.f_r, server='ns3.foo.ru')
self.assertTrue(ns.glue)
self.assertEqual(ns.server, ns.glue.fqdn)
def test_disallow_name_update_of_glue_A(self):
# Glue records should not be allowed to change their name.
glue = AddressRecord.objects.create(
label='ns39', ctnr=self.ctnr, domain=self.f_r,
ip_str='128.193.1.77', ip_type='4')
ns = Nameserver.objects.create(domain=self.f_r, server='ns39.foo.ru')
self.assertTrue(ns.glue)
self.assertEqual(ns.glue, glue)
glue.label = "ns22"
self.assertRaises(ValidationError, glue.save)
def test_disallow_name_update_of_glue_Intr(self):
# Glue records should not be allowed to change their name.
glue = StaticInterface.objects.create(
label='ns24', domain=self.f_r, ctnr=self.ctnr,
ip_str='128.193.99.10', ip_type='4', system=self.s,
mac="11:22:33:44:55:66")
ns = Nameserver.objects.create(domain=self.f_r, server='ns24.foo.ru')
self.assertTrue(ns.glue)
self.assertEqual(ns.glue, glue)
glue.label = "ns22"
self.assertRaises(ValidationError, glue.save)
def test_disallow_delete_of_glue_intr(self):
# Interface glue records should not be allowed to be deleted.
glue = StaticInterface.objects.create(
label='ns24', domain=self.f_r, ctnr=self.ctnr,
ip_str='128.193.99.10', ip_type='4', system=self.s,
mac="11:22:33:44:55:66")
ns = Nameserver.objects.create(domain=self.f_r, server='ns24.foo.ru')
self.assertTrue(ns.glue)
self.assertEqual(ns.glue, glue)
self.assertRaises(ValidationError, glue.delete)
def test_manual_assign_of_glue(self):
# Test that assigning a different glue record doesn't get overriden by
# the auto assinging during the Nameserver's clean function.
glue = StaticInterface.objects.create(
label='ns25', domain=self.f_r, ctnr=self.ctnr,
ip_str='128.193.99.10', ip_type='4', system=self.s,
mac="11:22:33:44:55:66")
ns = Nameserver.objects.create(domain=self.f_r, server='ns25.foo.ru')
self.assertTrue(ns.glue)
self.assertEqual(ns.glue, glue)
glue2 = AddressRecord.objects.create(
label='ns25', ctnr=self.ctnr, domain=self.f_r,
ip_str='128.193.1.78', ip_type='4')
ns.full_clean()
# Make sure things didn't get overridden.
self.assertEqual(ns.glue, glue)
ns.glue = glue2
ns.save()
# Refresh the object.
ns = Nameserver.objects.get(pk=ns.pk)
# Again, make sure things didn't get overridden.
self.assertEqual(ns.glue, glue2)
# Make sure we still can't delete.
self.assertRaises(ValidationError, glue2.delete)
self.assertRaises(ValidationError, ns.glue.delete)
# We shuold be able to delete the other one.
glue.delete()
def testtest_add_ns_in_domain_intr(self):
# Use an Interface as a glue record.
glue = StaticInterface.objects.create(
label='ns232', domain=self.r, ctnr=self.ctnr,
ip_str='128.193.99.10', ip_type='4', system=self.s,
mac="12:23:45:45:45:45")
ns = Nameserver.objects.create(domain=self.r, server='ns232.ru')
self.assertTrue(ns.glue)
self.assertEqual(ns.server, ns.glue.fqdn)
self.assertRaises(ValidationError, glue.delete)
glue = StaticInterface.objects.create(
label='ns332', domain=self.f_r, ctnr=self.ctnr,
ip_str='128.193.1.10', ip_type='4', system=self.s,
mac="11:22:33:44:55:66")
ns = Nameserver.objects.create(domain=self.f_r, server='ns332.foo.ru')
self.assertTrue(ns.glue)
self.assertEqual(ns.server, ns.glue.fqdn)
def test_add_ns_outside_domain(self):
ns = Nameserver.objects.create(domain=self.f_r, server='ns2.ru')
self.assertFalse(ns.glue)
def test_update_glue_to_no_intr(self):
glue = StaticInterface.objects.create(
label='ns34', domain=self.r, ctnr=self.ctnr, ip_str='128.193.1.10',
ip_type='4', system=self.s, mac="11:22:33:44:55:66")
data = {'domain': self.r, 'server': 'ns34.ru'}
ns = Nameserver.objects.create(domain=self.r, server='ns34.ru')
self.assertTrue(ns.glue)
ns.server = "ns4.wee"
ns.save()
self.assertTrue(ns.glue is None)
def test_update_glue_record_intr(self):
# Glue records can't change their name.
glue = StaticInterface.objects.create(
label='ns788', domain=self.r, ctnr=self.ctnr,
ip_str='128.193.1.10', ip_type='4', system=self.s,
mac="11:22:33:44:55:66")
ns = Nameserver.objects.create(domain=self.r, server='ns788.ru')
self.assertTrue(ns.glue)
glue.label = "asdfasdf"
self.assertRaises(ValidationError, glue.save)
def test_update_glue_to_no_glue(self):
glue = AddressRecord.objects.create(
label='ns3', ctnr=self.ctnr, domain=self.r, ip_str='128.193.1.10',
ip_type='4')
ns = Nameserver.objects.create(domain=self.r, server='ns3.ru')
self.assertTrue(ns.glue)
ns.server = "ns4.wee"
ns.save()
self.assertTrue(ns.glue is None)
def test_delete_ns(self):
glue = AddressRecord.objects.create(
label='ns4', ctnr=self.ctnr, domain=self.f_r,
ip_str='128.196.1.10', ip_type='4')
ns = Nameserver.objects.create(domain=self.f_r, server='ns4.foo.ru')
self.assertTrue(ns.glue)
self.assertEqual(ns.server, ns.glue.fqdn)
ns.delete()
self.assertFalse(Nameserver.objects.filter(
server='ns2.foo.ru', domain=self.f_r).exists())
def test_invalid_create(self):
glue = AddressRecord.objects.create(
label='ns2', ctnr=self.ctnr, domain=self.r, ip_str='128.193.1.10',
ip_type='4')
glue.save()
self.assertRaises(
ValidationError, Nameserver.objects.create,
domain=self.r, server='ns2 .ru', ctnr=self.ctnr)
self.assertRaises(
ValidationError, Nameserver.objects.create,
domain=self.r, server='ns2$.ru', ctnr=self.ctnr)
self.assertRaises(
ValidationError, Nameserver.objects.create,
domain=self.r, server='ns2..ru', ctnr=self.ctnr)
self.assertRaises(
ValidationError, Nameserver.objects.create,
domain=self.r, server='ns2.ru ', ctnr=self.ctnr)
self.assertRaises(
ValidationError, Nameserver.objects.create,
domain=self.r, server='', ctnr=self.ctnr)
def test_add_dup(self):
def x():
Nameserver.objects.create(domain=self.r, server='ns2.moot.ru')
x()
self.assertRaises(ValidationError, x)
def _get_post_data(self, random_str):
"""Return a valid set of data"""
return {
'root_domain': '{0}.oregonstate.com'.format(random_str),
'soa_primary': 'ns1.oregonstate.com',
'soa_contact': 'noc.oregonstate.com',
'nameserver_1': 'ns1.oregonstate.com',
'ttl_1': '1234'
}
def test_bad_nameserver_soa_state_case_1_0(self):
# This is Case 1
root_domain = self.create_zone('asdf10.asdf')
for ns in root_domain.nameserver_set.all():
ns.delete()
# At this point we should have a domain at the root of a zone with no
# other records in it.
# Adding a record shouldn't be allowed because there is no NS record on
# the zone's root domain.
self.assertRaises(
ValidationError, AddressRecord.objects.create,
label='', ctnr=self.ctnr, domain=root_domain, ip_type="6",
ip_str="1::")
self.assertRaises(
ValidationError, CNAME.objects.create,
label='', ctnr=self.ctnr, domain=root_domain, target="asdf")
def test_bad_nameserver_soa_state_case_1_1(self):
# This is Case 1
root_domain = self.create_zone('asdf111.asdf')
for ns in root_domain.nameserver_set.all():
ns.delete()
# At this point we should have a domain at the root of a zone with no
# other records in it.
# Let's create a child domain and try to add a record there.
cdomain = Domain.objects.create(name="test." + root_domain.name)
# Adding a record shouldn't be allowed because there is no NS record on
# the zone's root domain.
self.assertRaises(
ValidationError, AddressRecord.objects.create,
label='', ctnr=self.ctnr, domain=cdomain, ip_type="6",
ip_str="1::")
self.assertRaises(
ValidationError, CNAME.objects.create,
label='', ctnr=self.ctnr, domain=cdomain, target="asdf")
def test_bad_nameserver_soa_state_case_1_2(self):
# This is Case 1 ... with ptr's
root_domain = self.create_zone('12.in-addr.arpa')
for ns in root_domain.nameserver_set.all():
ns.delete()
# At this point we should have a domain at the root of a zone with no
# other records in it.
# Adding a record shouldn't be allowed because there is no NS record on
# the zone's root domain.
self.assertRaises(
ValidationError, PTR.objects.create,
ctnr=self.ctnr, fqdn="asdf", ip_str="12.10.1.1", ip_type="4")
def test_bad_nameserver_soa_state_case_1_3(self):
# This is Case 1 ... with ptr's
root_domain = self.create_zone('13.in-addr.arpa')
for ns in root_domain.nameserver_set.all():
ns.delete()
# At this point we should have a domain at the root of a zone with no
# other records in it.
# Let's create a child domain and try to add a record there.
cdomain = Domain.objects.create(name="10.13.in-addr.arpa")
# Adding a record shouldn't be allowed because there is no NS record on
# the zone's root domain.
self.assertRaises(
ValidationError, PTR.objects.create,
ctnr=self.ctnr, fqdn="asdf", ip_str="13.10.1.1", ip_type="4")
def test_bad_nameserver_soa_state_case_1_4(self):
# This is Case 1 ... with StaticInterfaces's
reverse_root_domain = self.create_zone('14.in-addr.arpa')
root_domain = self.create_zone('asdf14.asdf')
for ns in root_domain.nameserver_set.all():
ns.delete()
# At this point we should have a domain at the root of a zone with no
# other records in it.
# Let's create a child domain and try to add a record there.
cdomain = Domain.objects.create(name="10.14.in-addr.arpa")
# Adding a record shouldn't be allowed because there is no NS record on
# the zone's root domain.
self.assertRaises(
ValidationError, StaticInterface.objects.create,
label="asdf", domain=root_domain, ip_str="14.10.1.1", ip_type="4",
mac="11:22:33:44:55:66", system=self.s, ctnr=self.ctnr)
# See record.tests for the case a required view is deleted.
def test_bad_nameserver_soa_state_case_2_0(self):
# This is Case 2
root_domain = self.create_zone('asdf20.asdf')
self.assertEqual(root_domain.nameserver_set.count(), 1)
ns = root_domain.nameserver_set.all()[0]
# At this point we should have a domain at the root of a zone with one
# NS record associated to the domain.
AddressRecord.objects.create(
label='', ctnr=self.ctnr, domain=root_domain, ip_type="6",
ip_str="1::")
self.assertRaises(ValidationError, ns.delete)
def test_bad_nameserver_soa_state_case_2_1(self):
# This is Case 2
root_domain = self.create_zone('asdf21.asdf')
self.assertEqual(root_domain.nameserver_set.count(), 1)
ns = root_domain.nameserver_set.all()[0]
# At this point we should have a domain at the root of a zone with one
# NS record associated to the domain.
# Let's create a child domain and add a record there, then try to
# delete the NS record
cdomain = Domain.objects.create(name="test." + root_domain.name)
self.ctnr.domains.add(cdomain)
AddressRecord.objects.create(
label='', ctnr=self.ctnr, domain=cdomain, ip_type="6",
ip_str="1::")
self.assertRaises(ValidationError, ns.delete)
def test_bad_nameserver_soa_state_case_2_2(self):
# This is Case 2 ... with PTRs
root_domain = self.create_zone('14.in-addr.arpa')
self.assertEqual(root_domain.nameserver_set.count(), 1)
ns = root_domain.nameserver_set.all()[0]
# At this point we should have a domain at the root of a zone with one
# NS record associated to the domain.
PTR.objects.create(
ctnr=self.ctnr, fqdn="bloo.asdf", ip_str="14.10.1.1", ip_type="4")
self.assertRaises(ValidationError, ns.delete)
def test_bad_nameserver_soa_state_case_2_3(self):
# This is Case 2 ... with PTRs
Domain.objects.create(name='14.in-addr.arpa')
root_domain = self.create_zone('10.14.in-addr.arpa')
self.assertEqual(root_domain.nameserver_set.count(), 1)
ns = root_domain.nameserver_set.all()[0]
# At this point we should have a domain at the root of a zone with one
# NS record associated to the domain.
# Let's create a child domain and add a record there, then try to
# delete the NS record.
cdomain = Domain.objects.create(name="test." + root_domain.name)
PTR.objects.create(
ctnr=self.ctnr, fqdn="bloo.asdf", ip_str="14.10.1.1", ip_type="4")
self.assertRaises(ValidationError, ns.delete)
def test_bad_nameserver_soa_state_case_3_0(self):
# This is Case 3
root_domain = self.create_zone('asdf30.asdf')
for ns in root_domain.nameserver_set.all():
ns.delete()
ns.domain.soa.delete()
root_domain = Domain.objects.get(pk=root_domain.pk)
# At this point we should have a domain pointed at no SOA record with
# no records attached to it. It also has no child domains.
# Add a record to the domain.
AddressRecord.objects.create(
label='', ctnr=self.ctnr, domain=root_domain, ip_type="6",
ip_str="1::")
self.assertRaises(
ValidationError, SOA.objects.create,
primary="asdf.asdf", contact="asdf.asdf", description="asdf",
root_domain=root_domain)
def test_bad_nameserver_soa_state_case_3_1(self):
# This is Case 3
root_domain = self.create_zone('asdf31.asdf')
# Try case 3 but add a record to a child domain of root_domain.
bad_root_domain = Domain.objects.create(
name="below." + root_domain.name)
cdomain = Domain.objects.create(name="test." + bad_root_domain.name)
self.ctnr.domains.add(cdomain)
# Add a record to the domain.
AddressRecord.objects.create(
label='', ctnr=self.ctnr, domain=cdomain, ip_type="6",
ip_str="1::")
# Now try to add the domain to the zone that has no NS records at its
# root.
self.assertRaises(
ValidationError, SOA.objects.create,
root_domain=bad_root_domain, contact="a", primary='b')
def test_bad_nameserver_soa_state_case_3_2(self):
# This is Case 3 ... with PTRs
root_domain = create_zone('14.in-addr.arpa')
for ns in root_domain.nameserver_set.all():
ns.delete()
root_domain.soa.delete()
root_domain = Domain.objects.get(pk=root_domain.pk)
self.assertIsNone(root_domain.soa)
# At this point we should have a domain pointed at no SOA record with
# no records attached to it. It also has no child domains.
# Add a record to the domain.
self.assertRaises(
ValidationError, PTR.objects.create,
ctnr=self.ctnr, fqdn="bloo.asdf", ip_str="14.10.1.1", ip_type="4")
def test_bad_nameserver_soa_state_case_3_3(self):
# This is Case 3 ... with PTRs
root_domain = create_zone('14.in-addr.arpa')
bad_root_domain = Domain.objects.create(name="10." + root_domain.name)
cdomain = Domain.objects.create(name="1.10.14.in-addr.arpa")
PTR.objects.create(
fqdn=('eh.' + cdomain.name), ctnr=self.ctnr, ip_type="4",
ip_str="14.10.1.1")
# Now try to add the domain to the zone that has no NS records at its
# root.
self.assertRaises(
ValidationError, SOA.objects.create,
root_domain=bad_root_domain, contact="a", primary='b')
| python |
"""
This file is part of the opendrive-beamng project.
--------------------------------------------------------------------------------
Server class - deals with initialization, configuring of the environment, sim
launch and socket comms.
Notes:
- Set `BNG_HOME` env variable to beamNG.tech path
TODO:
- Switch to select / non-blocking
--------------------------------------------------------------------------------
Copyright 2021 David Pescariu
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__version__ = '1.0.0'
import socket
import pickle
import time
from datetime import datetime
from typing import Dict
from beamngpy import BeamNGpy, Scenario, Vehicle
from beamngpy.sensors import Lidar, Camera
from ..utils.logger import Log
class Server:
def __init__(self, options: Dict[str, str], host: str = '', port: int = 6555) -> None:
"""
Initialize the Server
Args:
options (Dict[str, str]): Options / Characteristics used to construct
the vehicle, scenario, and different sensors
host (str, optional): IP/Hostname that the server listens for, defaults
to '' - loopback / all.
port (int, optional): Port that the server listens for, defaults to 6555.
"""
Log.info("Init")
self.HOST = host
self.PORT = port
self.OPTIONS = options
Log.info("Starting & Initializing BeamNG")
self.beamng = BeamNGpy('localhost', 64256) # Using BNG_HOME env var
self.beamng.open(launch=True)
Log.info("Connection successful")
self._init_beamNG()
Log.done("Starting & Initializing BeamNG")
def _init_beamNG(self) -> None:
"""
Initialize beamNG:
Create the scenario, vehicle, sensors, and load everything
"""
self.scenario = Scenario(
self.OPTIONS['scenario_map'],
self.OPTIONS['scenario_name'],
description=self.OPTIONS['scenario_desc']
)
self.vehicle = Vehicle(
self.OPTIONS['vehicle_name'],
model=self.OPTIONS['vehicle_model'],
license=self.OPTIONS['vehicle_license']
)
self.lidar_sensor = Lidar(max_dist=180, vres=24, vangle=25)
self.vehicle.attach_sensor('lidar', self.lidar_sensor)
self.front_camera = Camera(
self.OPTIONS['f_cam_pos'],
self.OPTIONS['f_cam_dir'],
self.OPTIONS['f_cam_fov'],
self.OPTIONS['f_cam_res'],
colour=True, annotation=True
)
self.vehicle.attach_sensor('front_camera', self.front_camera)
self.scenario.add_vehicle(
self.vehicle,
self.OPTIONS['vehicle_pos'],
self.OPTIONS['vehicle_rot'],
self.OPTIONS['vehicle_rot_quat']
)
self.scenario.make(self.beamng)
self.beamng.load_scenario(self.scenario)
def start_socket(self, send_delay: float = 0.369) -> None:
"""
Initialize the socket and await (blocking) connections
Args:
send_delay (float, optional): How long to wait before sending a new
packet. Defaults to 0.369.
Packet data - List:
[0]: vehicle_state
[1]: lidar_data
[2]: front_camera_data
"""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((self.HOST, self.PORT))
s.listen()
Log.info("Socket ready")
while True:
try:
conn, addr = s.accept()
with conn:
Log.done(f"New connection {addr}")
while conn:
self.vehicle.poll_sensors()
self._points = self.lidar_sensor.data['points']
self._camera = self.front_camera.data['colour']
self._packet = [
self.vehicle.state,
self._points,
self._camera
]
conn.send(pickle.dumps(self._packet))
Log.info(f"Sent data! @ {datetime.now()}")
time.sleep(send_delay)
except ConnectionResetError:
Log.warn("Lost connection")
if input('quit? (y/n)').find('y'):
break
| python |
import discord
import gspread
from discord.ext import commands
from oauth2client.service_account import ServiceAccountCredentials
from gspread.exceptions import CellNotFound
class Gsheets:
@classmethod
def start(cls):
"""Starts gsheets API instance."""
scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
creds = ServiceAccountCredentials.from_json_keyfile_name('google_secret.json', scope)
return gspread.authorize(creds)
class Tournaments(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name='tornei')
@commands.cooldown(1, 60, commands.BucketType.user)
async def tornei(self, ctx):
"""Searches on the gsheets for tournaments counter."""
client = Gsheets.start()
sh = client.open('Tornei Brawlhalla').sheet1
embed = discord.Embed(title='Classifica tornei Brawlhalla',
url='https://docs.google.com/spreadsheets/d/1q9Hr8qrAUVpdq5OyV1SF4b7n5C2j0QGQg-JXXSJ1B8s'
'/edit?usp=sharing',
colour=discord.Colour(0x00ff07))
embed.set_footer(text='Powered by Google Drive API', icon_url='http://icons.iconarchive.com/icons'
'/marcus-roberto/google-play/128/Google-Drive-icon.png')
cell = 3
while True:
player_cell = sh.acell('A' + str(cell))
if player_cell.value == '':
break
embed.add_field(name="**{}**".format(player_cell.value),
value=f"Tornei vinti: {sh.cell(player_cell.row, 2).value}",
inline=True)
cell += 1
await ctx.send(embed=embed)
@commands.command(name='tornei_add')
@commands.is_owner()
async def add_tourn(self, ctx, user: str):
"""Add one win to user."""
client = Gsheets.start()
sh = client.open("Tornei Brawlhalla").sheet1
try:
cell = sh.find(user)
value = int(sh.cell(cell.row, 2).value)
sh.update_cell(cell.row, 2, value + 1)
await ctx.send("Fatto! Congratulazioni a {}".format(ctx.message.content[12:]))
except CellNotFound:
await ctx.send("Utente non trovato.")
def setup(client):
client.add_cog(Tournaments(bot=client))
| python |
import sys
import os
import numpy as np
import time
from PIL import Image
APS = 100;
TileFolder = sys.argv[1] + '/';
heat_map_out = 'patch-level-color.txt';
def whiteness(png):
wh = (np.std(png[:,:,0].flatten()) + np.std(png[:,:,1].flatten()) + np.std(png[:,:,2].flatten())) / 3.0;
return wh;
def blackness(png):
bk = np.mean(png);
return bk;
def redness(png):
rd = np.mean((png[:,:,0] >= 190) * (png[:,:,1] <= 100) * (png[:,:,2] <= 100));
return rd;
def load_data():
X = np.zeros(shape=(1000000, 3), dtype=np.float32);
coor = np.zeros(shape=(1000000, 2), dtype=np.int32);
ind = 0;
for fn in os.listdir(TileFolder):
full_fn = TileFolder + '/' + fn;
if not os.path.isfile(full_fn):
continue;
if len(fn.split('_')) < 4:
continue;
x_off = float(fn.split('_')[0]);
y_off = float(fn.split('_')[1]);
svs_pw = float(fn.split('_')[2]);
png_pw = float(fn.split('_')[3].split('.png')[0]);
png = np.array(Image.open(full_fn).convert('RGB'));
for x in range(0, png.shape[1], APS):
if x + APS > png.shape[1]:
continue;
for y in range(0, png.shape[0], APS):
if y + APS > png.shape[0]:
continue;
X[ind, 0] = whiteness(png[y:y+APS, x:x+APS, :]);
X[ind, 1] = blackness(png[y:y+APS, x:x+APS, :]);
X[ind, 2] = redness(png[y:y+APS, x:x+APS, :]);
coor[ind, 0] = np.int32(x_off + (x + APS/2) * svs_pw / png_pw);
coor[ind, 1] = np.int32(y_off + (y + APS/2) * svs_pw / png_pw);
ind += 1;
X = X[0:ind];
coor = coor[0:ind];
return X, coor;
def split_validation():
Wh, coor = load_data();
fid = open(TileFolder + '/' + heat_map_out, 'w');
for idx in range(0, Wh.shape[0]):
fid.write('{} {} {} {} {}\n'.format(coor[idx][0], coor[idx][1], Wh[idx][0], Wh[idx][1], Wh[idx][2]));
fid.close();
def main():
split_validation();
if __name__ == "__main__":
main();
| python |
# see: https://github.com/gabrielfalcao/HTTPretty/issues/242#issuecomment-160942608
from httpretty import HTTPretty as OriginalHTTPretty
try:
from requests.packages.urllib3.contrib.pyopenssl \
import inject_into_urllib3, extract_from_urllib3
pyopenssl_override = True
except:
pyopenssl_override = False
class MyHTTPretty(OriginalHTTPretty):
""" pyopenssl monkey-patches the default ssl_wrap_socket() function in the 'requests' library,
but this can stop the HTTPretty socket monkey-patching from working for HTTPS requests.
Our version extends the base HTTPretty enable() and disable() implementations to undo
and redo the pyopenssl monkey-patching, respectively.
"""
@classmethod
def enable(cls):
OriginalHTTPretty.enable()
if pyopenssl_override:
# Take out the pyopenssl version - use the default implementation
extract_from_urllib3()
@classmethod
def disable(cls):
OriginalHTTPretty.disable()
if pyopenssl_override:
# Put the pyopenssl version back in place
inject_into_urllib3()
| python |
import contextlib
import random
import time
from sorting import (
bubble_sort,
selection_sort,
insertion_sort,
merge_sort,
)
@contextlib.contextmanager
def timeit(name):
start = time.time()
yield
end = time.time()
took = end - start
print(f"The {name} took {took:.4f}s")
def nearly_sorted_array(size):
array = [i for i in range(0, size + 1)]
for i in range(10, size, 10):
array[i], array[i - 1] = array[i - 1], array[i]
return array
if __name__ == '__main__':
number_of_items = 5001
normal_array = [random.randint(0, number_of_items)
for i in range(number_of_items)]
random.shuffle(normal_array)
nearly_sorted = nearly_sorted_array(number_of_items)
reversed_array = sorted(normal_array, reverse=True)
sorted_array = sorted(normal_array)
algorithms = {
#"bubble_sort": bubble_sort.sort,
#"selection_sort": selection_sort.sort,
"insertion_sort": insertion_sort.sort,
"merge_sort": merge_sort.sort,
}
print("Sorting random array")
print("-" * 50)
for name, sort in algorithms.items():
copy_array = list(normal_array)
with timeit(name):
sort(copy_array)
assert copy_array == sorted(normal_array)
print("\n\nSorting nearly sorted array")
print("-" * 50)
for name, sort in algorithms.items():
copy_array = list(nearly_sorted)
with timeit(name):
sort(copy_array)
assert copy_array == sorted(nearly_sorted)
print("\n\nSorting reversed sorted array")
print("-" * 50)
for name, sort in algorithms.items():
copy_array = list(reversed_array)
with timeit(name):
sort(copy_array)
assert copy_array == sorted(reversed_array)
| python |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
from TreeNode import *
class Solution:
def recoverFromPreorder(self, S: str) -> TreeNode:
s = S.split("-") # s = ['1', '2', '', '3', '', '4', '5', '', '6', '', '7']
result = TreeNode(s[0])
s = s[1:]
left = []
right = []
state = False
for i, j in enumerate(s):
if j.isdigit():
if state:
# Another branch
left = s[:i]
right = s[i:]
break
else:
state = True
else:
state = False
if (not left and not right) and s:
left = s
# left = ['2', '', '3', '', '4']
# right = ['5', '', '6', '', '7']
left = ["-" if i == "" else i for i in left]
right = ["-" if i == "" else i for i in right]
left_s = "".join(left)
right_s = "".join(right)
# left_s = "2-3-4"
# right_s = "5-6-7"
if left_s != "":
result.left = self.recoverFromPreorder(left_s)
if right_s != "":
result.right = self.recoverFromPreorder(right_s)
return result | python |
# Generated by Django 3.0.2 on 2020-01-12 12:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('newshows', '0002_setting_profile'),
]
operations = [
migrations.AddField(
model_name='setting',
name='addmonitored',
field=models.BooleanField(default=True),
),
]
| python |
import requests
import json
#Assignment Object
#Properties: TOKEN, id, name, description, created_at, updated_at, due_at
#Functions:
class Assignment:
def __init__(self, TOKEN, assignment_id, assignment_name, assignment_description, assignment_created_at, assignment_updated_at, assignment_due_at):
self.TOKEN = TOKEN
self.id = assignment_id
self.name = assignment_name
self.description = assignment_description
self.created_at = assignment_created_at
self.updated_at = assignment_updated_at
self.due_at = assignment_due_at
| python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.