content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
# Copyright 2016, FBPIC contributors
# Authors: Remi Lehe, Manuel Kirchen
# License: 3-Clause-BSD-LBNL
"""
This file is part of the Fourier-Bessel Particle-In-Cell code (FB-PIC)
It defines the structure necessary to implement the moving window.
"""
from fbpic.utils.threading import njit_parallel, prange
# Check if CUDA is available, then import CUDA functions
from fbpic.utils.cuda import cuda_installed
if cuda_installed:
from fbpic.utils.cuda import cuda, cuda_tpb_bpg_2d
class MovingWindow(object):
"""
Class that contains the moving window's variables and methods
"""
def __init__( self, comm, dt, v, time ):
"""
Initializes a moving window object.
Parameters
----------
comm: a BoundaryCommunicator object
Contains information about the MPI decomposition
and about the longitudinal boundaries
dt: float
The timestep of the simulation.
v: float (meters per seconds), optional
The speed of the moving window
time: float (seconds)
The time (in the simulation) at which the moving
window was initialized
"""
# Check that the boundaries are open
if ((comm.rank == comm.size-1) and (comm.right_proc is not None)) \
or ((comm.rank == 0) and (comm.left_proc is not None)):
raise ValueError('The simulation is using a moving window, but '
'the boundaries are periodic.\n Please select open '
'boundaries when initializing the Simulation object.')
# Attach moving window speed
self.v = v
# Attach time of last move
self.t_last_move = time - dt
# Get the positions of the global physical domain
zmin_global_domain, zmax_global_domain = comm.get_zmin_zmax(
local=False, with_damp=False, with_guard=False )
# Attach reference position of moving window (only for the first proc)
# (Determines by how many cells the window should be moved)
if comm.rank == 0:
self.zmin = zmin_global_domain
def move_grids(self, fld, ptcl, comm, time):
"""
Calculate by how many cells the moving window should be moved.
If this is non-zero, shift the fields on the interpolation grid,
and increment the positions between which the continuously-injected
particles will be generated.
Parameters
----------
fld: a Fields object
Contains the fields data of the simulation
ptcl: a list of Particles object
This is passed in order to increment the positions between
which the continuously-injection particles will be generated
comm: an fbpic BoundaryCommunicator object
Contains the information on the MPI decomposition
time: float (seconds)
The global time in the simulation
This is used in order to determine how much the window should move
"""
# To avoid discrepancies between processors, only the first proc
# decides whether to send the data, and broadcasts the information.
dz = comm.dz
if comm.rank==0:
# Move the continuous position of the moving window object
self.zmin += self.v * (time - self.t_last_move)
# Find the number of cells by which the window should move
zmin_global_domain, zmax_global_domain = comm.get_zmin_zmax(
local=False, with_damp=False, with_guard=False )
n_move = int( (self.zmin - zmin_global_domain)/dz )
else:
n_move = None
# Broadcast the information to all proc
if comm.size > 1:
n_move = comm.mpi_comm.bcast( n_move )
# Move the grids
if n_move != 0:
# Move the global domain
comm.shift_global_domain_positions( n_move*dz )
# Shift the fields
Nm = len(fld.interp)
for m in range(Nm):
# Modify the values of the corresponding z's
fld.interp[m].zmin += n_move*fld.interp[m].dz
fld.interp[m].zmax += n_move*fld.interp[m].dz
# Shift/move fields by n_move cells in spectral space
self.shift_spect_grid( fld.spect[m], n_move )
# Because the grids have just been shifted, there is a shift
# in the cell indices that are used for the prefix sum.
for species in ptcl:
if species.use_cuda:
species.prefix_sum_shift += n_move
# This quantity is reset to 0 whenever prefix_sum is recalculated
# Prepare the positions of injection for the particles
# (The actual creation of particles is done when the routine
# exchange_particles of boundary_communicator.py is called)
if comm.rank == comm.size-1:
for species in ptcl:
if species.continuous_injection:
# Increment the positions for the generation of particles
# (Particles are generated when `generate_particles` is called)
species.injector.increment_injection_positions(
self.v, time-self.t_last_move )
# Change the time of the last move
self.t_last_move = time
def shift_spect_grid( self, grid, n_move,
shift_rho=True, shift_currents=True ):
"""
Shift the spectral fields by n_move cells (with respect to the
spatial grid). Shifting is done either on the CPU or the GPU,
if use_cuda is True. (Typically n_move is positive, and the
fields are shifted backwards)
Parameters
----------
grid: an SpectralGrid corresponding to one given azimuthal mode
Contains the values of the fields in spectral space,
and is modified by this function.
n_move: int
The number of cells by which the grid should be shifted
shift_rho: bool, optional
Whether to also shift the charge density
Default: True, since rho is only recalculated from
scratch when the particles are exchanged
shift_currents: bool, optional
Whether to also shift the currents
Default: False, since the currents are recalculated from
scratch at each PIC cycle
"""
if grid.use_cuda:
shift = grid.d_field_shift
# Get a 2D CUDA grid of the size of the grid
tpb, bpg = cuda_tpb_bpg_2d( grid.Ep.shape[0], grid.Ep.shape[1] )
# Shift all the fields on the GPU
shift_spect_array_gpu[tpb, bpg]( grid.Ep, shift, n_move )
shift_spect_array_gpu[tpb, bpg]( grid.Em, shift, n_move )
shift_spect_array_gpu[tpb, bpg]( grid.Ez, shift, n_move )
shift_spect_array_gpu[tpb, bpg]( grid.Bp, shift, n_move )
shift_spect_array_gpu[tpb, bpg]( grid.Bm, shift, n_move )
shift_spect_array_gpu[tpb, bpg]( grid.Bz, shift, n_move )
if shift_rho:
shift_spect_array_gpu[tpb, bpg]( grid.rho_prev, shift, n_move )
if shift_currents:
shift_spect_array_gpu[tpb, bpg]( grid.Jp, shift, n_move )
shift_spect_array_gpu[tpb, bpg]( grid.Jm, shift, n_move )
shift_spect_array_gpu[tpb, bpg]( grid.Jz, shift, n_move )
else:
shift = grid.field_shift
# Shift all the fields on the CPU
shift_spect_array_cpu( grid.Ep, shift, n_move )
shift_spect_array_cpu( grid.Em, shift, n_move )
shift_spect_array_cpu( grid.Ez, shift, n_move )
shift_spect_array_cpu( grid.Bp, shift, n_move )
shift_spect_array_cpu( grid.Bm, shift, n_move )
shift_spect_array_cpu( grid.Bz, shift, n_move )
if shift_rho:
shift_spect_array_cpu( grid.rho_prev, shift, n_move )
if shift_currents:
shift_spect_array_cpu( grid.Jp, shift, n_move )
shift_spect_array_cpu( grid.Jm, shift, n_move )
shift_spect_array_cpu( grid.Jz, shift, n_move )
@njit_parallel
def shift_spect_array_cpu( field_array, shift_factor, n_move ):
"""
Shift the field 'field_array' by n_move cells on CPU.
This is done in spectral space and corresponds to multiplying the
fields with the factor exp(i*kz_true*dz)**n_move .
Parameters
----------
field_array: 2darray of complexs
Contains the value of the fields, and is modified by
this function
shift_factor: 1darray of complexs
Contains the shift array, that is multiplied to the fields in
spectral space to shift them by one cell in spatial space
( exp(i*kz_true*dz) )
n_move: int
The number of cells by which the grid should be shifted
"""
Nz, Nr = field_array.shape
# Loop over the 2D array (in parallel over z if threading is enabled)
for iz in prange( Nz ):
power_shift = 1. + 0.j
# Calculate the shift factor (raising to the power n_move ;
# for negative n_move, we take the complex conjugate, since
# shift_factor is of the form e^{i k dz})
for i in range( abs(n_move) ):
power_shift *= shift_factor[iz]
if n_move < 0:
power_shift = power_shift.conjugate()
# Shift the fields
for ir in range( Nr ):
field_array[iz, ir] *= power_shift
if cuda_installed:
@cuda.jit
def shift_spect_array_gpu( field_array, shift_factor, n_move ):
"""
Shift the field 'field_array' by n_move cells on the GPU.
This is done in spectral space and corresponds to multiplying the
fields with the factor exp(i*kz_true*dz)**n_move .
Parameters
----------
field_array: 2darray of complexs
Contains the value of the fields, and is modified by
this function
shift_factor: 1darray of complexs
Contains the shift array, that is multiplied to the fields in
spectral space to shift them by one cell in spatial space
( exp(i*kz_true*dz) )
n_move: int
The number of cells by which the grid should be shifted
"""
# Get a 2D CUDA grid
iz, ir = cuda.grid(2)
# Only access values that are actually in the array
if ir < field_array.shape[1] and iz < field_array.shape[0]:
power_shift = 1. + 0.j
# Calculate the shift factor (raising to the power n_move ;
# for negative n_move, we take the complex conjugate, since
# shift_factor is of the form e^{i k dz})
for i in range( abs(n_move) ):
power_shift *= shift_factor[iz]
if n_move < 0:
power_shift = power_shift.conjugate()
# Shift fields
field_array[iz, ir] *= power_shift
|
python
|
from django.shortcuts import render
from swpp.models import Profile
from swpp.serializers import ProfileSerializer
from rest_framework import generics, mixins, permissions
from swpp.permissions import IsOwnerOrReadOnly
class ProfileList(generics.ListAPIView):
queryset = Profile.objects.all()
serializer_class = ProfileSerializer
class ProfileDetails(generics.RetrieveUpdateAPIView):
queryset = Profile.objects.all()
serializer_class = ProfileSerializer
permission_classes = (IsOwnerOrReadOnly, )
def put(self, request, *args, **kwargs):
return self.partial_update(request, *args, **kwargs)
|
python
|
KIND = {
'JOB': 'job',
'DEPLOYMENT': 'deployment'
}
COMMAND = {
'DELETE': 'delete',
'CREATE': 'create'
}
|
python
|
# Copied from http://www.djangosnippets.org/snippets/369/
import re
import unicodedata
from htmlentitydefs import name2codepoint
from django.utils.encoding import smart_unicode, force_unicode
from slughifi import slughifi
def slugify(s, entities=True, decimal=True, hexadecimal=True, model=None, slug_field='slug', pk=None):
s = smart_unicode(s)
# we don't want a string > 40 characters
if len(s) > 40:
s = s[:40]
s = slughifi(s)
slug = s
if model:
# return unique slug for a model (appending integer counter)
def get_query():
query = model.objects.filter(**{ slug_field: slug })
if pk:
query = query.exclude(pk=pk)
return query
counter = 2
while get_query():
slug = "%s-%s" % (s, counter)
counter += 1
return slug
|
python
|
from typing import Optional
from typing import Tuple
import attr
@attr.s(auto_attribs=True)
class SlotAttentionParams:
# model configs
resolution: Tuple[int, int] = (128, 128) # since we not using ViT
# Slot Attention module params
num_slots: int = 7 # at most 6 obj per image/video
# dim of slots embedding
slot_size: int = 64
num_iterations: int = 3
# MLP hidden size in Slot Attention
slot_mlp_size: int = 128 # FFN after cross attention
# whether treat bg slot separately
use_bg_sep_slot: bool = False
# setting about sem-pos separate model
use_sempos_sep: bool = True
# encoder params
# UNet as encoder
use_unet: bool = False
# StackedResBlocks as encoder
use_resnet: bool = False
# Conv encoder-decoder
out_features: int = 64
kernel_size: int = 5
enc_pos_size: int = 64 # number of dims for positional information
enc_channels: Tuple[int, ...] = (3, 64, 64, 64, 64)
enc_resolution: Tuple[int, int] = resolution # image size
enc_norm: str = ''
# decoder params
dec_pos_size: int = None # if is int, then use cat instead of add
dec_resolution: Tuple[int, int] = (8, 8)
dec_channels: Tuple[int, ...] = (64, 64, 64, 64, 64)
dec_norm: str = ''
# use self-entropy loss to masks
use_entropy_loss: bool = False
entropy_loss_w: float = 1e-3
# architecture of CLIP pre-trained model
use_clip_vision: bool = False
clip_arch: str = 'ViT-B/32'
clip_vision_channel: int = 64
clip_text_channel: int = 512
# Text2Slot model
text_encoder: str = 'clip'
context_len: int = 0
use_text2slot: bool = True
text2slot_arch: str = 'MLP' # or 'Transformer' or 'DETR'
# for MLP
text2slot_hidden_sizes: Tuple[int] = (512, )
normalize_slots: bool = True
# data
# data_root: str = "/scratch/ssd004/scratch/ziyiwu/data/CLEVR_viewpoint_video_4obj"
# data_root: str = "/scratch/ssd004/scratch/ziyiwu/data/CLEVR_viewpoint_video"
data_root: str = "/scratch/ssd004/scratch/ziyiwu/data/clevr_video/train/"
shuffle_obj: bool = False
prompt: str = 'a {color} {shape}'
pad_text: str = 'background'
# Normalization for natural img or original slot attention one
simple_normalize: bool = True # since we not using ViT
center_crop: Tuple[int] = None # (128, 128)
# training settings
gpus: int = 4
batch_size: int = 64 * 4
val_batch_size: int = 64 * 4
max_epochs: int = 16
num_sanity_val_steps: int = 1
num_train_images: Optional[int] = None
num_val_images: Optional[int] = None
is_logger_enabled: bool = True
is_verbose: bool = True
num_workers: int = 6
n_samples: int = 5
# optimization settings
cosine_decay: bool = True
lr: float = 0.0008
warmup_steps_pct: float = 0.025
decay_steps_pct: float = 0.2
scheduler_gamma: float = 0.5
weight_decay: float = 0.0
grad_clip_norm: float = 0.2
|
python
|
from verifai.simulators.car_simulator.examples.control_utils.LQR_computation import *
from verifai.simulators.car_simulator.simulator import *
from verifai.simulators.car_simulator.lane import *
from verifai.simulators.car_simulator.car_object import *
from verifai.simulators.car_simulator.client_car_sim import *
import numpy as np
from dotmap import DotMap
def controller(x_trajectory, u_trajectory, control_params):
x, y, v, theta = x_trajectory[-1]
wheelbase = control_params.wheelbase
a_star = control_params.a_star
v_star = control_params.v_star
control_freq = control_params.control_freq
dt = control_params.dt
Q = control_params.Q
R = control_params.R
A, B = extract_AB(speed=v, dt=dt, wheelbase=wheelbase)
if len(u_trajectory)%control_freq == 0:
a = a_star if np.linalg.norm(v - v_star) > 0.1 else 0.0
c = np.array([0.0, a * control_freq * dt, 0.0])
K, k = discrete_LQR(A, B, Q, R, c)
u = K.dot(np.array([[x],
[v - v_star],
[-theta+np.pi/2]])) + k
u = min(float(u), np.pi / 4.)
u = max(float(u), -np.pi / 4.)
control = np.array([u, a])
else:
control = u_trajectory[-1]
return control
def lanekeeping(sample, control_params, width=0.13):
x_init = sample.init_conditions.x_init[0]
theta_init = -sample.init_conditions.theta_init[0] + np.pi/2
v_init = 0.0
y_init = 0.0
x0 = np.array([x_init, y_init, v_init, theta_init])
a_star = control_params.a_star
u_domain = {'omega':[-np.pi/4, np.pi/4],
'acc':[-a_star, a_star]}
compute_control = lambda x, u: controller(x_trajectory=x, u_trajectory=u,
control_params=control_params)
car = bicycle_model(x0=x0, u_domain=u_domain, compute_control=compute_control,
wheelbase = control_params.wheelbase, dt = control_params.dt,
color='red')
lanes = []
lanes.append(straight_lane([0., -1.], [0., 1.], width))
lanes.append(lanes[0].shifted(1))
lanes.append(lanes[0].shifted(-1))
world = simulator_world(lanes=lanes, cars=[car])
sim = simulator(dt=control_params.dt, iters = 100, sprite_scale=control_params.sprite_scale,
window_size = control_params.window_size, magnify =0.25)
sim.init_simulator(world=world, task_name='Lane Keeping')
sim.run()
sim.exit_simulation()
traj_x, traj_y, _, _ = np.array(car.trajectory).T
data = {}
traj = {}
traj['xdeviation'] = [(j * control_params.dt, 0.5 - np.abs(v)) for j, v in enumerate(traj_x)]
return traj
def lanekeeping_simulator(sample):
print(sample)
control_params = DotMap()
control_params.wheelbase =2.995
control_params.a_star = 3.968
control_params.v_star = sample.init_conditions.cruising_speed[0]*5./18.
control_params.dt = 0.032
control_params.control_freq = 2
control_params.R = 50*np.identity(1)
control_params.Q= np.diag([100.0, 0.0, 5.0])
control_params.sprite_scale = 1/800
control_params.window_size = 800
width = 1.0
return lanekeeping(sample, control_params, width)
PORT = 8888
BUFSIZE = 4096
N_SIM_STEPS = 100
simulation_data = DotMap()
simulation_data.port = PORT
simulation_data.bufsize = BUFSIZE
simulation_data.simulation = lanekeeping_simulator
client_task = ClientCar(simulation_data)
while True:
if not client_task.run_client():
print("End of all simulations")
break
|
python
|
import numpy as np
from mesostat.metric.impl.mar import unstack_factor, rel_err
def predict(x, alpha, u, beta):
# return np.einsum('ai,ijk', alpha, x) + np.einsum('ai,ijk', beta, u)
return x.dot(alpha.T) + u.dot(beta.T)
def fit_mle(x, y, u):
# # Construct linear system for transition matrices
# M11 = np.einsum('ajk,bjk', x, y)
# M12 = np.einsum('ajk,bjk', x, x)
# M13 = np.einsum('ajk,bjk', x, u)
# M21 = np.einsum('ajk,bjk', u, y)
# M22 = M13.T #np.einsum('ajk,bjk', u, x)
# M23 = np.einsum('ajk,bjk', u, u)
# Construct linear system for transition matrices
# NOTE: In this form, trials and timesteps are concatenated, so there is no explicit trial dimension
M11 = x.T.dot(y)
M12 = x.T.dot(x)
M13 = x.T.dot(u)
M21 = u.T.dot(y)
M22 = M13.T
M23 = u.T.dot(u)
# Solve system
M12INV = np.linalg.inv(M12)
M23INV = np.linalg.inv(M23)
TMP11 = M11 - M13.dot(M23INV.dot(M21))
TMP12 = M12 - M13.dot(M23INV.dot(M22))
TMP21 = M21 - M22.dot(M12INV.dot(M11))
TMP22 = M23 - M22.dot(M12INV.dot(M13))
alpha = np.linalg.inv(TMP12).dot(TMP11).T
beta = np.linalg.inv(TMP22).dot(TMP21).T
return alpha, beta
|
python
|
'''
The MIT License (MIT)
Copyright © 2021 Opentensor.ai
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the “Software”), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of
the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
'''
"""
DPN synapse
Bittensor endpoint trained on PIL images to detect objects using DPN.
"""
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
from types import SimpleNamespace
from munch import Munch
import bittensor
from routers.pkm import PKMRouter
from bittensor.utils.batch_transforms import Normalize
class DPNSynapse(bittensor.synapse.Synapse):
""" Bittensor endpoint trained on PIL images to detect objects using an DPN.
"""
def __init__( self, config: Munch = None, **kwargs):
r""" Init a new DPN synapse module.
Args:
config (:obj: `munch.Munch`, `required`)
munch namespace config item.
"""
super(DPNSynapse, self).__init__(config = config, **kwargs)
if config == None:
config = DPNSynapse.default_config()
bittensor.config.Config.update_with_kwargs(config.synapse, kwargs)
DPNSynapse.check_config(config)
self.config = config
in_planes, out_planes = config.synapse.in_planes, config.synapse.out_planes
num_blocks, dense_depth = config.synapse.num_blocks, config.synapse.dense_depth
# Transform Network
""" Transform network.
Layers take in image inputs normalizes them and applies
4 convolutional layers.
Image encoder: transforms PIL-encoded tensors to a common shape.
[batch_size, channels, rows, cols] -> [batch_size, -1, -1, -1]
Output: [batch_size, self.transform_dim (9728)]
"""
self.transform = Normalize((0.1307,), (0.3081,), device=self.device)
self.adaptive_pool = nn.AdaptiveAvgPool2d((32, 32))
self.transform_conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.transform_bn1 = nn.BatchNorm2d(64)
self.last_planes = 64
self.transform_layer1 = self._make_layer(in_planes[0], out_planes[0], num_blocks[0], dense_depth[0], stride=1)
self.transform_layer2 = self._make_layer(in_planes[1], out_planes[1], num_blocks[1], dense_depth[1], stride=2)
self.transform_layer3 = self._make_layer(in_planes[2], out_planes[2], num_blocks[2], dense_depth[2], stride=1)
self.transform_layer4 = self._make_layer(in_planes[3], out_planes[3], num_blocks[3], dense_depth[3], stride=2)
self.transform_dim = (out_planes[3] * 4)+(((num_blocks[3]+1) * 4)*dense_depth[3])
# dendrite: (PKM layer) queries network using pooled embeddings as context.
# [batch_size, -1] -> topk * [batch_size, bittensor.__network_dim__]
self.router = PKMRouter(config, query_dim = self.transform_dim)
# Context layers.
"""
Distillation model for remote context. This layer takes input
coming from transform layer, and runs it through 3 linear layers,
projecting it to bittensor.__network_dim__.
"""
self.context_layer1 = nn.Linear(self.transform_dim, 512)
self.context_layer2 = nn.Linear(512, 256)
self.context_layer3 = nn.Linear(256, bittensor.__network_dim__)
# hidden layer.
self.hidden_layer1 = nn.Linear(self.transform_dim + bittensor.__network_dim__, 512)
self.hidden_layer2 = nn.Linear(512, 256)
self.hidden_layer3 = nn.Linear(256, bittensor.__network_dim__)
# Layers to project target down to target size passed by config
# (number of classes)
self.target_layer1 = nn.Linear(bittensor.__network_dim__, 128)
self.target_layer2 = nn.Linear(128, self.config.synapse.target_dim)
self.to(self.device)
@staticmethod
def default_config() -> Munch:
parser = argparse.ArgumentParser();
DPNSynapse.add_args(parser)
config = bittensor.config.Config.to_config(parser);
return config
@staticmethod
def add_args(parser: argparse.ArgumentParser):
r""" This function adds the configuration items for the DPN synapse.
These args are use to instantiate a Dual Path model.
Instantiating a configuration with the defaults will yield a "shallow" DPN-26 configuration.
For deeper network configurations, it is possible to set the num_blocks parameter to (3, 4, 20, 3) for a
DPN-92.
For DPN-98 set the following:
in_planes: (160, 320, 640, 1280)
out_planes: (256, 512, 1024, 2048)
num_blocks: (3, 6, 20, 3)
dense_depth: (16, 32, 32, 128)
"""
def to_list(arg):
return [int(i) for i in arg.split(",")]
parser.add_argument('--synapse.in_planes', default='160, 320, 640, 1280', action="append", type=to_list)
parser.add_argument('--synapse.out_planes', default='256, 512, 1024, 2048', action="append", type=to_list)
parser.add_argument('--synapse.num_blocks', default='3, 6, 20, 3', action="append", type=to_list)
parser.add_argument('--synapse.dense_depth', default='16, 32, 32, 128', action="append", type=to_list)
parser.add_argument('--synapse.target_dim', default=10, type=int, help='Final logit layer dimension. i.e. 10 for CIFAR-10.')
parser = PKMRouter.add_args(parser)
@staticmethod
def check_config(config: Munch):
assert isinstance(config.synapse.in_planes, list), 'synapse.in_planes must be a tuple, got {}'.format(config.synapse.in_planes)
assert isinstance(config.synapse.out_planes, list), 'synapse.out_planes must be a tuple, got {}'.format(config.synapse.out_planes)
assert isinstance(config.synapse.num_blocks, list), 'synapse.num_blocks must be a tuple, got {}'.format(config.synapse.num_blocks)
assert isinstance(config.synapse.dense_depth, list), 'synapse.dense_depth must be a tuple, got {}'.format(config.synapse.dense_depth)
assert all(isinstance(el, int) for el in config.synapse.in_planes), 'synapse.in_planes must be a tuple of ints, got {}'.format(config.synapse.in_planes)
assert all(isinstance(el, int) for el in config.synapse.out_planes), 'synapse.out_planes must be a tuple of ints, got {}'.format(config.synapse.out_planes)
assert all(isinstance(el, int) for el in config.synapse.num_blocks), 'synapse.num_blocks must be a tuple of ints, got {}'.format(config.synapse.num_blocks)
assert all(isinstance(el, int) for el in config.synapse.dense_depth), 'synapse.dense_depth must be a tuple of ints, got {}'.format(config.synapse.dense_depth)
def forward_image ( self, images: torch.Tensor):
r""" Forward image inputs through the DPN synapse .
Args:
inputs (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_dim, channels, rows, cols)`, `required`):
Image tensors produced by calling PIL.toTensor() and with sequence dimension.
Returns:
hidden (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_dim, bittensor.__network_dim__)`, `required`):
Hidden layer encoding produced by using local_context.
"""
# images: remove sequence dimension from images.
# images.shape = [batch_size, channels, rows, cols]
images = images.view(images.shape[0] * images.shape[1], images.shape[2], images.shape[3], images.shape[4])
# hidden: hidden layer using local context for local computation only.
# hidden.shape = [batch_size, __network_dim__]
hidden = self.forward (images = images.to(self.device), remote = False).local_hidden
# hidden: re-add sequence dimension to outputs.
# hidden.shape = [batch_size, sequence_dim, __network_dim__]
hidden = torch.unsqueeze(hidden, 1)
return hidden
def local_forward ( self, images: torch.Tensor, targets: torch.Tensor = None ) -> SimpleNamespace:
r""" Forward pass non-sequential image inputs and targets through the DPN Synapse.
Args:
images (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, channels, rows, cols)`, `required`):
PIL.toTensor() encoded images.
targets (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.target_size)`, `optional`):
Image labels.
remote (:obj:`bool')`, `optional`):
Switch between local and remote context. If true, function makes quries to the remote network.
Returns:
SimpleNamespace (
local_context (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, bittensor.__network_dim__)`, `required`):
Pre-Hidden layer context, trained to match the remote context.
local_hidden (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, bittensor.__network_dim__)`, `required`):
Hidden layer produced from the context.
local_target (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_dim)`, `optional`):
FFNN Target predictions using local_context.
local_target_loss (:obj:`torch.FloatTensor` of shape :obj:`(1)`, `optional`):
FFNN Classification loss using local_context.
local_accuracy (:obj:`torch.FloatTensor` of shape :obj:`(1)`, `optional`):
Accuracy of target predictions.
transform (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, transform_dim)`, `optional`):
transformation of various sized images to batch-size transform dim.
)
"""
# Return vars to be filled.
output = SimpleNamespace ()
r"""
Transform the images into a common shape (32x32)
"""
# transform: transform images to common shape.
# transform.shape = [batch_size, self.transform_dim]
transform = self.transform(images)
transform = self.adaptive_pool(transform)
transform = F.relu(self.transform_bn1(self.transform_conv1(transform.detach())))
transform = self.transform_layer1(transform)
transform = self.transform_layer2(transform)
transform = self.transform_layer3(transform)
transform = self.transform_layer4(transform)
transform = F.avg_pool2d(transform, 4)
output.transform = torch.flatten(transform, start_dim=1)
# local_context: distillation model for remote_context.
# local_context.shape = [batch_size, bittensor.__network_dim__]
local_context = self.context_layer1(output.transform.detach())
local_context = self.context_layer2(local_context)
output.local_context = self.context_layer3(local_context)
# local_hidden: hidden layer encoding using local_context.
# local_hidden.shape = [batch_size, bittensor.__network_dim__]
local_hidden = torch.cat([output.transform, output.local_context], dim=1)
local_hidden = self.hidden_layer1(local_hidden)
local_hidden = self.hidden_layer2(local_hidden)
output.local_hidden = self.hidden_layer3(local_hidden)
if targets is not None:
# local_target: projection of local_hidden onto target dimension.
# local_target.shape = [batch_size, target_dim]
targets.to(self.device)
local_target = self.target_layer1(output.local_hidden)
local_target = self.target_layer2(local_target)
output.local_target = F.log_softmax(local_target, dim=1)
# local_target_loss: loss between local_target and passed targets.
# local_target_loss.shape = [1]
output.local_target_loss = F.nll_loss(output.local_target, targets)
# Record extra metadata accuracy.
max_logit = local_target.data.max(1, keepdim=True)[1]
correct = max_logit.eq( targets.data.view_as(max_logit) ).sum()
output.local_accuracy = (100.0 * correct) / targets.shape[0]
return output
def remote_forward(self, neuron: bittensor.neuron.Neuron, images: torch.Tensor, targets: torch.Tensor = None) -> SimpleNamespace:
"""
Forward pass non-sequential image inputs and targets through the synapse. Makes RPC queries to downstream neurons.
Args:
neuron (:obj: `bittensor.neuron.Neuron`, `required`):
Bittensor neuron, used for making queries to the remote network.
images (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, channels, rows, cols)`, `required`):
PIL.toTensor() encoded images.
targets (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_dim)`, `optional`, defaults to None):
Image labels.
Returns:
self.local_forward() + SimpleNamespace (
router (:obj:`SimpleNamespace`, `required`):
Outputs from the pkm dendrite remote call.
distillation_loss (:obj:`torch.FloatTensor` of shape :obj:`(1)`, `optional`):
Distillation loss between the local and remote context.
remote_hidden (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, bittensor.__network_dim__)`, `optional`):
Hidden layer encoding produced using the remote context.
remote_target (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_dim)`, `optional`):
FFNN Target predictions using the remote_context.
remote_target_loss (:obj:`torch.FloatTensor` of shape :obj:`(1)`, `optional`):
FFNN Classification loss using the remote_context.
)
"""
# Call the local forward pass.
# output = bittensor.SynapseOutput
output = self.local_forward( images, targets )
# Make remote queries using the PKMRouter.
# remote_context: responses from a bittensor remote network call.
# remote_context.shape = [batch_size, bittensor.__network_dim__]
images = torch.unsqueeze(images, 1)
output.router = self.router.forward_image( neuron, images, output.transform )
remote_context = torch.squeeze( output.router.response, 1 ).to(self.device)
# Distill the local context to match the remote context.
# distillation_loss: distillation loss between local_context and remote_context
# distillation_loss.shape = [1]
output.distillation_loss = F.mse_loss(output.local_context, remote_context.detach() )
# remote_hidden: hidden layer encoding using remote_context.
# remote_hidden.shape = [batch_size, bittensor.__network_dim__]
remote_hidden = torch.cat([output.transform, remote_context], dim=1)
remote_hidden = self.hidden_layer1(remote_hidden)
remote_hidden = self.hidden_layer2(remote_hidden)
output.remote_hidden = self.hidden_layer3(remote_hidden)
if targets is not None:
# remote_target: projection of remote_hidden onto target dimension.
# remote_target.shape = [batch_size, config.target_size]
remote_target = self.target_layer1(output.remote_hidden)
remote_target = self.target_layer2(remote_target)
output.remote_target = F.log_softmax(remote_target, dim=1)
# remote_target_loss: loss between remote_target and passed targets.
# remote_target_loss.shape = [1]
output.remote_target_loss = F.nll_loss(output.remote_target, targets)
return output
def _make_layer(self, in_planes, out_planes, num_blocks, dense_depth, stride):
""" Generates a sequential container containing Bottleneck layers.
Args:
in_planes (tuple):
4-element tuple describing the in_planes config.
out_planes (tuple):
4-element tuple describing the out_planes config.
num_blocks (tuple):
4-element tuple describing the number of blocks at this layer.
dense_depth (tuple):
4-element tuple describing the depth of this layer.
stride (int):
Convolutional stride length.
Returns:
nn.Sequential: A torch.nn sequential container containing the layers outlined in the inputs.
"""
strides = [stride] + [1]*(num_blocks-1)
layers = []
for i,stride in enumerate(strides):
layers.append(self.Bottleneck(self.last_planes, in_planes, out_planes, dense_depth, stride, i==0))
self.last_planes = out_planes + (i+2) * dense_depth
return nn.Sequential(*layers)
class Bottleneck(nn.Module):
def __init__(self, last_planes, in_planes, out_planes, dense_depth, stride, first_layer):
super(DPNSynapse.Bottleneck, self).__init__()
self.out_planes = out_planes
self.dense_depth = dense_depth
self.conv1 = nn.Conv2d(last_planes, in_planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv2 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=32, bias=False)
self.bn2 = nn.BatchNorm2d(in_planes)
self.conv3 = nn.Conv2d(in_planes, out_planes+dense_depth, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes + dense_depth)
self.shortcut = nn.Sequential()
if first_layer:
self.shortcut = nn.Sequential(
nn.Conv2d(last_planes, out_planes + dense_depth, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_planes + dense_depth)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
x = self.shortcut(x)
d = self.out_planes
out = torch.cat([x[:,:d,:,:]+out[:,:d,:,:], x[:,d:,:,:], out[:,d:,:,:]], 1)
out = F.relu(out)
return out
|
python
|
from srf.io.listmode import save_h5
import numpy as np
data = np.fromfile("normal_scan_true.txt", dtype=np.float32).reshape(-1,7)
result = {'fst': data[:, :3], 'snd': data[:, 3:6], 'weight': np.ones_like(data[:,0])}
save_h5('input.h5', result)
|
python
|
from flask import Flask
from . import api, web
app = Flask(
__name__,
static_url_path='/assets',
static_folder='static',
template_folder='templates')
app.config['SECRET_KEY'] = 'secret' # this is fine if running locally
app.register_blueprint(api.bp)
app.register_blueprint(web.bp)
|
python
|
default_app_config = 'user_deletion.apps.UserDeletionConfig'
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
""" Copyright 2012-2021 Smartling, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this work except in compliance with the License.
* You may obtain a copy of the License in the LICENSE file, or at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
from smartlingApiSdk.ApiV2 import ApiV2
class JobBatchesV2Api(ApiV2):
def __init__(self, userIdentifier, userSecret, projectId, proxySettings=None, permanentHeaders={}, env='prod'):
ApiV2.__init__(self, userIdentifier, userSecret, projectId, proxySettings, permanentHeaders=permanentHeaders, env=env)
def createJobBatchV2(self, authorize, translationJobUid, fileUris, localeWorkflows, **kwargs):
"""
method : POST
api url : /job-batches-api/v2/projects/{projectId}/batches
as curl : curl -X POST "https://api.smartling.com/job-batches-api/v2/projects/$smartlingProjectId/batches" -H "Authorization: Bearer $smartlingToken" -H "Content-Type: application/json" -d '{"translationJobUid": "$translationJobUid", "authorize": true, "fileUris": ["example.json", "test.xml"]}'
Responses:
200 : OK
404 : provided translationJobUid is not found in the TMS
details : https://api-reference.smartling.com/#operation/createJobBatchV2
"""
kw = {
'authorize':authorize,
'translationJobUid':translationJobUid,
'fileUris':fileUris,
'localeWorkflows':localeWorkflows,
}
kw.update(kwargs)
url = self.urlHelper.getUrl('/job-batches-api/v2/projects/{projectId}/batches', **kwargs)
response, status = self.commandJson('POST', url, kw)
return response, status
def getJobBatchesListV2(self, translationJobUid='', status='', sortBy='createdDate', orderBy='desc', offset=0, limit=20, **kwargs):
"""
method : GET
api url : /job-batches-api/v2/projects/{projectId}/batches
as curl : curl -X GET \'https://api.smartling.com/job-batches-api/v2/projects/$smartlingProjectId/batches?translationJobUid={translationJobUid}&status={status}&sortBy=createdDate&orderBy=desc&offset=0&limit=20' \-H "Authorization: Bearer $smartlingToken"
Responses:
200 : OK
details : https://api-reference.smartling.com/#operation/getJobBatchesListV2
"""
kw = {
'translationJobUid':translationJobUid,
'status':status,
'sortBy':sortBy,
'orderBy':orderBy,
'offset':offset,
'limit':limit,
}
kw.update(kwargs)
url = self.urlHelper.getUrl('/job-batches-api/v2/projects/{projectId}/batches', **kwargs)
response, status = self.command('GET', url, kw)
return response, status
def getJobBatchStatusV2(self, batchUid, **kwargs):
"""
method : GET
api url : /job-batches-api/v2/projects/{projectId}/batches/{batchUid}
Responses:
200 : OK
404 : Batch provided in path is not found
details : https://api-reference.smartling.com/#operation/getJobBatchStatusV2
"""
kw = {
}
kw.update(kwargs)
url = self.urlHelper.getUrl('/job-batches-api/v2/projects/{projectId}/batches/{batchUid}', batchUid=batchUid, **kwargs)
response, status = self.command('GET', url, kw)
return response, status
def processBatchActionV2(self, batchUid, action, fileUri, reason, **kwargs):
"""
method : PUT
api url : /job-batches-api/v2/projects/{projectId}/batches/{batchUid}
as curl : curl -X PUT \'https://api.smartling.com/job-batches-api/v2/projects/$smartlingProjectId/batches/$batchUid' \-H "Authorization: Bearer $smartlingToken" \-H "Content-Type: application/json" \-d '{ "action": "CANCEL_FILE", "fileUri": "file1.xml", "reason": "Requested asset doesn't exist in Zendesk" }'
Responses:
200 : SUCCESS
404 : Batch provided in path is not found
details : https://api-reference.smartling.com/#operation/processBatchActionV2
"""
kw = {
'action':action,
'fileUri':fileUri,
'reason':reason,
}
kw.update(kwargs)
url = self.urlHelper.getUrl('/job-batches-api/v2/projects/{projectId}/batches/{batchUid}', batchUid=batchUid, **kwargs)
response, status = self.commandJson('PUT', url, kw)
return response, status
def uploadFileToJobBatchV2(self, batchUid, file, fileUri, fileType, authorize=False, localeIdsToAuthorize=[], callbackUrl='', directives={}, **kwargs):
"""
method : POST
api url : /job-batches-api/v2/projects/{projectId}/batches/{batchUid}/file
as curl : curl -X POST \'https://api.smartling.com/job-batches-api/v2/projects/$smartlingProjectId/batches/{batchUid}/file' \-H "Authorization: Bearer $smartlingToken" \-F "[email protected];type=text/plain" \-F "fileUri=file.properties" \-F "fileType=javaProperties" \-F "localeIdsToAuthorize[]=fr-FR" \-F "localeIdsToAuthorize[]=ru-RU"
Responses:
202 : ACCEPTED
404 : Batch provided in path is not found
details : https://api-reference.smartling.com/#operation/uploadFileToJobBatchV2
"""
kw = {
'file':self.processFile(file),
'fileUri':fileUri,
'fileType':fileType,
'authorize':authorize,
'localeIdsToAuthorize':localeIdsToAuthorize,
'callbackUrl':callbackUrl,
}
self.addLibIdDirective(kw)
self.processDirectives(kw, directives)
url = self.urlHelper.getUrl('/job-batches-api/v2/projects/{projectId}/batches/{batchUid}/file', batchUid=batchUid)
return self.uploadMultipart(url, kw)
|
python
|
# ===============================================================================
# NAME: SerialHVisitor.py
#
# DESCRIPTION: A visitor responsible for the generation of header file
# for each serializable class.
#
# AUTHOR: reder
# EMAIL: [email protected]
# DATE CREATED : June 4, 2007
#
# Copyright 2013, California Institute of Technology.
# ALL RIGHTS RESERVED. U.S. Government Sponsorship acknowledged.
# ===============================================================================
#
# Python standard modules
#
import datetime
import logging
import os
import sys
from getpass import getuser
from fprime_ac.generators import formatters
# from fprime_ac.utils import DiffAndRename
from fprime_ac.generators.visitors import AbstractVisitor
#
# Python extension modules and custom interfaces
#
# from Cheetah import Template
# from fprime_ac.utils import version
from fprime_ac.utils import ConfigManager, DictTypeConverter
#
# Import precompiled templates here
#
try:
from fprime_ac.generators.templates.serialize import SerialHeader
from fprime_ac.generators.templates.serialize import SerialImport
from fprime_ac.generators.templates.serialize import SerialBody
except ImportError:
print("ERROR: must generate python templates first.")
sys.exit(-1)
#
# Universal globals used within module go here.
# (DO NOT USE MANY!)
#
# Global logger init. below.
PRINT = logging.getLogger("output")
DEBUG = logging.getLogger("debug")
typelist = ["U8", "I8", "U16", "I16", "U32", "I32", "U64", "I64", "F32", "F64", "bool"]
#
# Module class or classes go here.
class SerializableVisitor(AbstractVisitor.AbstractVisitor):
"""
A visitor class responsible for generation of component header
classes in C++.
"""
__instance = None
__config = None
__fp = None
__form = None
__form_comment = None
def __init__(self):
"""
Constructor.
"""
super().__init__()
self.__config = ConfigManager.ConfigManager.getInstance()
self.__form = formatters.Formatters()
self.__form_comment = formatters.CommentFormatters()
DEBUG.info("SerializableVisitor: Instanced.")
self.bodytext = ""
self.prototypetext = ""
def _get_args_string(self, obj):
"""
Return a string of (type, name) args, comma separated
for use in templates that generate prototypes.
"""
arg_str = ""
for (name, mtype, size, format, comment) in obj.get_members():
if isinstance(mtype, tuple):
arg_str += "{} {}, ".format(mtype[0][1], name)
elif mtype == "string":
arg_str += "const {}::{}String& {}, ".format(obj.get_name(), name, name)
elif mtype not in typelist:
arg_str += "const {}& {}, ".format(mtype, name)
elif size is not None:
arg_str += "const {}* {}, ".format(mtype, name)
arg_str += "NATIVE_INT_TYPE %sSize, " % (name)
else:
arg_str += "{} {}".format(mtype, name)
arg_str += ", "
arg_str = arg_str.strip(", ")
return arg_str
def _get_conv_mem_list(self, obj):
"""
Return a list of port argument tuples
"""
arg_list = list()
for (name, mtype, size, format, comment) in obj.get_members():
typeinfo = None
if isinstance(mtype, tuple):
mtype = mtype[0][1]
typeinfo = "enum"
elif mtype == "string":
mtype = "{}::{}String".format(obj.get_name(), name)
typeinfo = "string"
elif mtype not in typelist:
typeinfo = "extern"
arg_list.append((name, mtype, size, format, comment, typeinfo))
return arg_list
def _get_enum_string_list(self, enum_list):
""""""
enum_tuple = enum_list[0]
enum_list = enum_list[1]
enum_str_list = []
for e in enum_list:
# No value, No comment
if (e[1] is None) and (e[2] is None):
s = "%s," % (e[0])
# No value, With comment
elif (e[1] is None) and (e[2] is not None):
s = "{}, // {}".format(e[0], e[2])
# With value, No comment
elif (e[1] is not None) and (e[2] is None):
s = "{} = {},".format(e[0], e[1])
# With value and comment
elif (e[1] is not None) and (e[2] is not None):
s = "%s = %s, // %s" % (e)
else:
pass
enum_str_list.append(s)
enum_str_list[-1] = enum_str_list[-1].replace(",", "")
return (enum_tuple, enum_str_list)
def _writeTmpl(self, c, visit_str):
"""
Wrapper to write tmpl to files desc.
"""
DEBUG.debug("SerializableVisitor:%s" % visit_str)
DEBUG.debug("===================================")
DEBUG.debug(c)
self.__fp.writelines(c.__str__())
DEBUG.debug("===================================")
def initFilesVisit(self, obj):
"""
Defined to generate files for generated code products.
@param obj: the instance of the concrete element to operation on.
"""
# Build filename here...
# file location will be based on namespace
namespace = obj.get_namespace()
dict_dir = os.environ["DICT_DIR"]
if namespace is None:
output_dir = "%s/serializable/" % (dict_dir)
else:
output_dir = "{}/serializable/{}".format(
dict_dir, namespace.replace("::", "/")
)
# make directory
if not (os.path.isdir(output_dir)):
os.makedirs(output_dir)
pyfile = output_dir + "/" + obj.get_name() + ".py"
# make empty __init__.py
open("{}/{}".format(output_dir, "__init__.py"), "w").close()
# Open file for writing here...
DEBUG.info("Open file: %s" % pyfile)
self.__fp = open(pyfile, "w")
if self.__fp is None:
raise Exception("Could not open %s file.") % pyfile
DEBUG.info("Completed")
def startSourceFilesVisit(self, obj):
"""
Defined to generate header for command python class.
@param obj: the instance of the command model to visit.
"""
c = SerialHeader.SerialHeader()
d = datetime.datetime.now()
c.date = d.strftime("%A, %d %B %Y")
c.user = getuser()
c.source = obj.get_xml_filename()
self._writeTmpl(c, "startSourceFilesVisit")
def includes1Visit(self, obj):
"""
Defined to generate includes within a file.
Usually used for the base classes but also for Serial types
@param args: the instance of the concrete element to operation on.
"""
c = SerialImport.SerialImport()
self._writeTmpl(c, "includes1Visit")
def includes2Visit(self, obj):
pass
def namespaceVisit(self, obj):
pass
def publicVisit(self, obj):
"""
Defined to generate public stuff within a class.
@param args: the instance of the concrete element to operation on.
"""
c = SerialBody.SerialBody()
c.name = obj.get_name()
c.mem_list = list()
for (n, t, s, f, comment) in obj.get_members():
# convert XML types to Python classes
(
type_string,
dontcare,
type_name,
use_size,
) = DictTypeConverter.DictTypeConverter().convert(t, s)
if type_name == "enum":
format_string = DictTypeConverter.DictTypeConverter().format_replace(
f, 0, "d", "s"
)
# check for an error
if format_string is None:
PRINT.info(
'Member %s in serializable %s had error processing format specifier "%s"'
% (n, c.name, f)
)
sys.exit(-1)
else:
f = format_string
c.mem_list.append((n, type_string, f, int(s) if use_size else 1))
self._writeTmpl(c, "publicVisit")
def protectedVisit(self, obj):
pass
def privateVisit(self, obj):
pass
def finishSourceFilesVisit(self, obj):
self.__fp.close()
|
python
|
import time
import os
import binascii
import re
from datetime import datetime
from bson.json_util import dumps, loads
from flask.helpers import get_template_attribute
from flask import render_template
from init import app, rdb
from utils.jsontools import *
from utils.dbtools import makeUserMeta
from db import tagdb, db
from utils.crypto import *
from utils.exceptions import UserError
from bson import ObjectId
import redis_lock
from config import UserConfig
from utils.logger import log, log_ne
from services.tcb import filterOperation
from services.emailSender import send_noreply
from services.comment import listThread
def query_user_basic_info(uid) :
obj = db.users.find_one({"_id": ObjectId(uid)})
if obj is None :
return None
return obj['profile']
def verify_session(sid, stype) :
session_obj = loads(rdb.get(sid).decode('utf-8'))
if isinstance(stype, list) :
ret = session_obj['type'] in stype
else :
ret = session_obj['type'] == stype
return ret, session_obj
def login_auth_qq(openid, nickname) :
user_obj = db.users.find_one({'profile.openid_qq': openid})
if user_obj is not None :
sid, _ = do_login(user_obj)
return True, sid
else :
reg_sid = require_session('LOGIN_OR_SIGNUP_OPENID_QQ', openid_qq = openid)
return False, reg_sid
def bind_qq_openid(user, openid) :
binded_user = db.users.find_one({'profile.openid_qq': openid})
if binded_user is not None :
if str(binded_user['_id']) == str(user['_id']) :
return True
else :
return False
db.users.update_one({'_id': ObjectId(user['_id'])}, {'$set': {'profile.openid_qq': openid}})
return True
def require_session(session_type, **kwargs) :
# TODO: add challenge code to redis
if session_type not in ['LOGIN', 'SIGNUP', 'LOGIN_OR_SIGNUP_OPENID_QQ'] :
raise UserError('INCORRECT_SESSION_TYPE')
sid = binascii.hexlify(bytearray(random_bytes(16))).decode()
session_obj = {
'type': session_type,
'openid_qq': kwargs['openid_qq'] if session_type == 'LOGIN_OR_SIGNUP_OPENID_QQ' else ''
}
rdb.set(sid, dumps(session_obj), ex = UserConfig.SESSION_EXPIRE_TIME)
log(obj = {'sid': sid})
return sid
def logout(redis_user_key) :
common_user_obj = rdb.get(redis_user_key)
log(obj = {'redis_user_key': redis_user_key, 'user': common_user_obj})
rdb.delete(redis_user_key)
def do_login(user_obj) :
user_id = str(user_obj['_id'])
redis_user_key_lookup_key = f"user-{user_id}"
redis_user_key = rdb.get(redis_user_key_lookup_key)
logged_in = False
if redis_user_key :
# user already logged in on some other machines
redis_user_obj_json_str = rdb.get(redis_user_key)
if redis_user_obj_json_str :
logged_in = True
# reset expire time
rdb.set(redis_user_key, redis_user_obj_json_str, ex = UserConfig.LOGIN_EXPIRE_TIME)
rdb.set(redis_user_key_lookup_key, redis_user_key, ex = UserConfig.LOGIN_EXPIRE_TIME)
if logged_in :
profile = user_obj['profile']
profile['access_control_status'] = user_obj['access_control']['status']
return redis_user_key, profile
openid_qq = user_obj['profile']['openid_qq'] if 'openid_qq' in user_obj['profile'] else None
common_user_obj = {
'_id': user_obj['_id'],
'profile': {
'uid': str(user_obj['_id']),
'username': user_obj['profile']['username'],
'image': user_obj['profile']['image'],
'desc': user_obj['profile']['desc'],
'email': user_obj['profile']['email'],
'bind_qq': True if openid_qq else False
},
'access_control': user_obj['access_control'],
'settings': user_obj['settings']
}
redis_user_value = dumps(common_user_obj)
redis_user_key = binascii.hexlify(bytearray(random_bytes(16))).decode()
redis_user_key_lookup_key = f"user-{user_obj['_id']}"
rdb.set(redis_user_key, redis_user_value, ex = UserConfig.LOGIN_EXPIRE_TIME)
rdb.set(redis_user_key_lookup_key, redis_user_key, ex = UserConfig.LOGIN_EXPIRE_TIME)
log(obj = {'redis_user_key': redis_user_key, 'user': common_user_obj})
profile = common_user_obj['profile']
profile['access_control_status'] = user_obj['access_control']['status']
return redis_user_key, profile
def unbind_qq(user) :
def updater(obj) :
obj['profile']['bind_qq'] = False
return obj
db.users.update_one({'_id': ObjectId(user['_id'])}, {'$set': {'profile.openid_qq': ''}})
_updateUserRedisValue(user['_id'], updater)
# we allow the same user to login multiple times and all of his login sessions are valid
def login(username, password, challenge, login_session_id) :
log(obj = {'username': username, 'challenge': challenge, 'login_session_id': login_session_id})
if len(username) > UserConfig.MAX_USERNAME_LENGTH :
raise UserError('USERNAME_TOO_LONG')
if len(username) < UserConfig.MIN_USERNAME_LENGTH :
raise UserError('USERNAME_TOO_SHORT')
if len(password) > UserConfig.MAX_PASSWORD_LENGTH :
raise UserError('PASSWORD_TOO_LONG')
if len(password) < UserConfig.MIN_PASSWORD_LENGTH :
raise UserError('PASSWORD_TOO_SHORT')
session_verified, session_obj = verify_session(login_session_id, ['LOGIN', 'LOGIN_OR_SIGNUP_OPENID_QQ'])
if session_verified :
user_obj = db.users.find_one({'profile.username': username})
if not user_obj :
user_obj = db.users.find_one({'profile.email': username.lower()})
if not user_obj :
log(level = 'SEC', obj = {'msg': 'USER_NOT_EXIST'})
raise UserError('INCORRECT_LOGIN')
crypto_method = user_obj['crypto']['crypto_method']
if crypto_method == 'PBKDF2' :
if not verify_password_PBKDF2(password, user_obj['crypto']['salt1'], user_obj['crypto']['password_hashed']) :
log(level = 'SEC', obj = {'msg': 'WRONG_PASSWORD'})
raise UserError('INCORRECT_LOGIN')
# update crypto to Argon2
crypto_method, password_hashed, salt1, salt2, master_key_encryptyed = generate_user_crypto_Argon2(password)
db.users.update_one({'_id': user_obj['_id']}, {'$set': {'crypto': {
'crypto_method': crypto_method,
'password_hashed': password_hashed,
'salt1': salt1,
'salt2': salt2,
'master_key_encryptyed': master_key_encryptyed
}}})
elif crypto_method == 'Argon2' :
if not verify_password_Argon2(password, user_obj['crypto']['salt1'], user_obj['crypto']['password_hashed']) :
log(level = 'SEC', obj = {'msg': 'WRONG_PASSWORD'})
raise UserError('INCORRECT_LOGIN')
# bind QQ OpenID if present
if session_obj['type'] == 'LOGIN_OR_SIGNUP_OPENID_QQ' :
openid_qq = session_obj['openid_qq']
bind_qq_openid(user_obj, openid_qq)
return do_login(user_obj)
raise UserError('INCORRECT_SESSION')
def query_user_batch(uids) :
uids = [ObjectId(i) for i in uids]
return list(db.users.aggregate([
{'$match': {'_id': {'$in': uids}}},
{'$project': {'profile.username': 1, 'profile.desc': 1, 'profile.image': 1, '_id': 1}}
]))
def query_user(uid) :
try :
obj = db.users.find_one({'_id': ObjectId(uid)})
del obj['access_control']
del obj['crypto']
del obj['settings']
if 'email' in obj['profile'] and obj['profile']['email'] :
em: str = obj['profile']['email']
gravatar = md5(em.strip().lower())
obj['profile']['gravatar'] = gravatar
del obj['profile']['email']
if 'openid_qq' in obj['profile'] :
del obj['profile']['openid_qq']
except :
raise UserError('USER_NOT_EXIST')
return obj
def queryBlacklist(user, language) :
if 'blacklist' in user['settings'] :
if isinstance(user['settings']['blacklist'], list) :
return tagdb.translate_tag_ids_to_user_language(user['settings']['blacklist'], language)[0]
else :
return 'default'
else :
return 'default'
def queryUsername(username) :
user_obj_find = db.users.find_one({'profile.username': username})
if user_obj_find is None :
raise UserError('USER_NOT_EXIST')
del user_obj_find['access_control']
del user_obj_find['crypto']
del user_obj_find['settings']
del user_obj_find['profile']['email']
del user_obj_find['profile']['openid_qq']
return user_obj_find
def checkIfUserExists(username) :
user_obj_find = db.users.find_one({'profile.username': username})
if user_obj_find is not None :
return True
return False
def checkIfEmailExists(email: str) :
user_obj_find = db.users.find_one({'profile.email': email.lower()})
if user_obj_find is not None :
return True
return False
def checkIsAuthorized(user, op) :
filterOperation(op, user)
def signup(username, password, email, challenge, signup_session_id) :
log(obj = {'username': username, 'email': email, 'challenge': challenge, 'signup_session_id': signup_session_id})
if len(username) > UserConfig.MAX_USERNAME_LENGTH :
raise UserError('USERNAME_TOO_LONG')
if len(username) < UserConfig.MIN_USERNAME_LENGTH :
raise UserError('USERNAME_TOO_SHORT')
if len(password) > UserConfig.MAX_PASSWORD_LENGTH :
raise UserError('PASSWORD_TOO_LONG')
if len(password) < UserConfig.MIN_PASSWORD_LENGTH :
raise UserError('PASSWORD_TOO_SHORT')
session_verified, session_obj = verify_session(signup_session_id, 'SIGNUP')
if session_verified :
if session_obj['type'] == 'LOGIN_OR_SIGNUP_OPENID_QQ' :
openid_qq = session_obj['openid_qq']
else :
openid_qq = None
if email :
if len(email) > UserConfig.MAX_EMAIL_LENGTH or not re.match(r"[^@]+@[^@]+\.[^@]+", email):
raise UserError('INCORRECT_EMAIL')
crypto_method, password_hashed, salt1, salt2, master_key_encryptyed = generate_user_crypto_Argon2(password)
with redis_lock.Lock(rdb, 'signup:' + username) :
user_obj_find = db.users.find_one({'profile.username': username})
if user_obj_find is not None :
raise UserError('USER_EXIST')
if email :
user_obj_email = db.users.find_one({'profile.email': email.lower()})
if user_obj_email is not None :
raise UserError('EMAIL_EXIST')
if openid_qq :
binded_user = db.users.find_one({'profile.openid_qq': openid_qq})
if binded_user is not None :
raise UserError('QQ_ALREADY_BIND')
user_obj = {
'profile': {
'username': username,
'desc': 'Write something here',
'pubkey': '',
'image': 'default',
'email': email,
'openid_qq': openid_qq if openid_qq else '' # bind if present
},
'crypto': {
'crypto_method': crypto_method,
'password_hashed': password_hashed,
'salt1': salt1,
'salt2': salt2,
'master_key_encryptyed': master_key_encryptyed
},
'access_control': {
'status': 'normal',
'access_mode': 'blacklist',
'allowed_ops': [],
'denied_ops': []
},
'settings': {
'blacklist': 'default'
},
'meta': {
'created_at': datetime.now()
}
}
uid = db.users.insert_one(user_obj).inserted_id
log(obj = {'uid': uid, 'profile': user_obj['profile']})
return uid
raise UserError('INCORRECT_SESSION')
def update_userphoto(redis_user_key, user_id, file_key) :
log(obj = {'redis_user_key': redis_user_key, 'user_id': user_id, 'file_key': file_key})
photo_file = None
if file_key.startswith("upload-image-") :
filename = rdb.get(file_key)
if filename :
photo_file = filename.decode('ascii')
if photo_file is None :
raise UserError('NO_PHOTO')
obj = db.users.find_one({'_id': ObjectId(user_id)})
if obj is None :
raise UserError('USER_NOT_EXIST')
log(obj = {'old_photo_file': obj['profile']['image'], 'photo_file': photo_file})
db.users.update_one({'_id': ObjectId(user_id)}, {'$set': {'profile.image': photo_file}})
def updater(obj) :
obj['profile']['image'] = photo_file
return obj
_updateUserRedisValue(user_id, updater)
return photo_file
def update_desc(redis_user_key, user_id, new_desc) :
log(obj = {'redis_user_key': redis_user_key, 'user_id': user_id, 'new_desc': new_desc})
if len(new_desc) > UserConfig.MAX_DESC_LENGTH :
raise UserError('DESC_TOO_LONG')
obj = db.users.find_one({'_id': ObjectId(user_id)})
if obj is None :
raise UserError('USER_NOT_EXIST')
log(obj = {'old_desc': obj['profile']['desc']})
db.users.update_one({'_id': ObjectId(user_id)}, {'$set': {'profile.desc': new_desc}})
def updater(obj) :
obj['profile']['desc'] = new_desc
return obj
_updateUserRedisValue(user_id, updater)
def update_username(redis_user_key, user_id, new_name) :
log(obj = {'redis_user_key': redis_user_key, 'user_id': user_id, 'new_name': new_name})
if len(new_name) > UserConfig.MAX_USERNAME_LENGTH or len(new_name) < UserConfig.MIN_USERNAME_LENGTH :
raise UserError('NAME_LENGTH')
user_obj_find = db.users.find_one({'profile.username': new_name})
if user_obj_find is not None :
raise UserError('USER_ALREADY_EXIST')
obj = db.users.find_one({'_id': ObjectId(user_id)})
if obj is None :
raise UserError('USER_NOT_EXIST')
log(obj = {'old_name': obj['profile']['username']})
db.users.update_one({'_id': ObjectId(user_id)}, {'$set': {'profile.username': new_name}})
def updater(obj) :
obj['profile']['username'] = new_name
return obj
_updateUserRedisValue(user_id, updater)
def update_email(redis_user_key, user_id, new_email) :
log(obj = {'redis_user_key': redis_user_key, 'user_id': user_id, 'new_email': new_email})
if len(new_email) > UserConfig.MAX_EMAIL_LENGTH or not re.match(r"[^@]+@[^@]+\.[^@]+", new_email):
raise UserError('INCORRECT_EMAIL')
obj = db.users.find_one({'_id': ObjectId(user_id)})
if obj is None :
raise UserError('USER_NOT_EXIST')
user_obj_email = db.users.find_one({'profile.email': new_email})
if user_obj_email is not None and str(user_obj_email['_id']) != str(obj['_id']) :
raise UserError('EMAIL_EXIST')
log(obj = {'old_email': obj['profile']['email']})
db.users.update_one({'_id': ObjectId(user_id)}, {'$set': {'profile.email': new_email}})
def updater(obj) :
obj['profile']['email'] = new_email
return obj
_updateUserRedisValue(user_id, updater)
def update_blacklist(redis_user_key, user_id, blacklist) :
log(obj = {'redis_user_key': redis_user_key, 'user_id': user_id, 'blacklist': blacklist})
obj = db.users.find_one({'_id': ObjectId(user_id)})
if obj is None :
raise UserError('USER_NOT_EXIST')
log(obj = {'old_blacklist': obj['settings']['blacklist']})
if isinstance(blacklist, str) :
blacklist = 'default'
elif isinstance(blacklist, list) :
blacklist = tagdb.filter_and_translate_tags(blacklist)
else :
raise UserError('INCORRECT_BLACKLIST')
db.users.update_one({'_id': ObjectId(user_id)}, {'$set': {'settings.blacklist': blacklist}})
def updater(obj) :
obj['settings']['blacklist'] = blacklist
return obj
_updateUserRedisValue(user_id, updater)
def update_password(user_id, old_pass, new_pass) :
if len(old_pass) > UserConfig.MAX_PASSWORD_LENGTH or len(old_pass) < UserConfig.MIN_PASSWORD_LENGTH:
raise UserError('PASSWORD_LENGTH')
if len(new_pass) > UserConfig.MAX_PASSWORD_LENGTH or len(new_pass) < UserConfig.MIN_PASSWORD_LENGTH:
raise UserError('PASSWORD_LENGTH')
obj = db.users.find_one({'_id': ObjectId(user_id)})
if obj is None :
raise UserError('USER_NOT_EXIST')
log(obj = {'username': obj['profile']['username']})
crypto_method = obj['crypto']['crypto_method']
if crypto_method == 'PBKDF2' :
if not verify_password_PBKDF2(old_pass, obj['crypto']['salt1'], obj['crypto']['password_hashed']) :
raise UserError('INCORRECT_PASSWORD')
# generate a new Argon2 security context
crypto_method, password_hashed, salt1, salt2, master_key_encryptyed = generate_user_crypto_Argon2(new_pass)
crypto = {
'crypto_method': crypto_method,
'password_hashed': password_hashed,
'salt1': salt1,
'salt2': salt2,
'master_key_encryptyed': master_key_encryptyed
}
elif crypto_method == 'Argon2' :
if not verify_password_Argon2(old_pass, obj['crypto']['salt1'], obj['crypto']['password_hashed']) :
raise UserError('INCORRECT_PASSWORD')
crypto_method, password_hashed, salt1, salt2, master_key_encryptyed = update_crypto_Argon2(old_pass, new_pass, obj['crypto']['salt2'], obj['crypto']['master_key_encryptyed'])
crypto = {
'crypto_method': crypto_method,
'password_hashed': password_hashed,
'salt1': salt1,
'salt2': salt2,
'master_key_encryptyed': master_key_encryptyed
}
db.users.update_one({'_id': ObjectId(user_id)}, {'$set': {'crypto': crypto}})
def request_password_reset(email, user_language) :
user_obj = db.users.find_one({'profile.email': email})
if user_obj is None :
raise UserError('EMAIL_NOT_EXIST')
reset_key = random_bytes_str(16)
rdb.set('passreset-' + reset_key, email)
if user_language not in ['CHS', 'ENG'] :
user_language = 'ENG'
template_file = f'PatchyVideo-passreset-{user_language}.html'
title = get_template_attribute(template_file, 'get_title')
html_doc = render_template(template_file, key = reset_key)
send_noreply(email, str(title()), html_doc, mime = 'html')
def reset_password(reset_key, new_pass) :
if len(new_pass) > UserConfig.MAX_PASSWORD_LENGTH or len(new_pass) < UserConfig.MIN_PASSWORD_LENGTH:
raise UserError('PASSWORD_LENGTH')
reset_key_content = rdb.get('passreset-' + reset_key)
try :
email = reset_key_content.decode('ascii')
assert len(email) > 0
obj = db.users.find_one({'profile.email': email})
assert obj is not None
except :
raise UserError('INCORRECT_KEY')
# generate a new Argon2 security context
crypto_method, password_hashed, salt1, salt2, master_key_encryptyed = generate_user_crypto_Argon2(new_pass)
crypto = {
'crypto_method': crypto_method,
'password_hashed': password_hashed,
'salt1': salt1,
'salt2': salt2,
'master_key_encryptyed': master_key_encryptyed
}
db.users.update_one({'_id': obj['_id']}, {'$set': {'crypto': crypto}})
def _updateUserRedisValue(user_id, updater) :
redis_user_key_lookup_key = f"user-{str(user_id)}"
redis_user_key_ttl = rdb.ttl(redis_user_key_lookup_key)
redis_user_key = rdb.get(redis_user_key_lookup_key)
if redis_user_key :
redis_user_obj_json = rdb.get(redis_user_key)
if redis_user_obj_json :
redis_user_obj = loads(redis_user_obj_json)
redis_user_obj = updater(redis_user_obj)
rdb.set(redis_user_key, dumps(redis_user_obj), ex = redis_user_key_ttl)
def whoAmI(user) :
return user['access_control']['status']
def updateUserRole(user_id, role, user) :
filterOperation('updateUserRole', user, user_id)
old_user_obj = db.users.find_one({'_id': ObjectId(user_id)})
if old_user_obj is None :
raise UserError('USER_NOT_EXIST')
log(obj = {'user_id': user_id, 'new_role': role, 'old_role': old_user_obj['access_control']['status']})
if role not in ['normal', 'admin'] :
raise UserError('INCORRECT_ROLE')
db.users.update_one({'_id': ObjectId(user_id)}, {'$set': {'access_control.status': role}})
def updater(obj) :
obj['access_control']['status'] = role
return obj
_updateUserRedisValue(user_id, updater)
def updateUserAccessMode(user_id, mode, user) :
filterOperation('updateUserAccessMode', user, user_id)
old_user_obj = db.users.find_one({'_id': ObjectId(user_id)})
if old_user_obj is None :
raise UserError('USER_NOT_EXIST')
log(obj = {'user_id': user_id, 'new_mode': mode, 'old_mode': old_user_obj['access_control']['access_mode']})
if mode not in ['blacklist', 'whitelist'] :
raise UserError('INCORRECT_ACCESS_MODE')
db.users.update_one({'_id': ObjectId(user_id)}, {'$set': {'access_control.access_mode': mode}})
def updater(obj) :
obj['access_control']['access_control'] = mode
return obj
_updateUserRedisValue(user_id, updater)
def getUserAllowedOps(user_id, user) :
filterOperation('getUserAllowedOps', user, user_id)
old_user_obj = db.users.find_one({'_id': ObjectId(user_id)})
if old_user_obj is None :
raise UserError('USER_NOT_EXIST')
return old_user_obj['access_control']['allowed_ops']
def updateUserAllowedOps(user_id, allowed_ops, user) :
filterOperation('updateUserAllowedOps', user, user_id)
old_user_obj = db.users.find_one({'_id': ObjectId(user_id)})
if old_user_obj is None :
raise UserError('USER_NOT_EXIST')
log(obj = {'user_id': user_id, 'new_ops': allowed_ops, 'old_ops': old_user_obj['access_control']['allowed_ops']})
db.users.update_one({'_id': ObjectId(user_id)}, {'$set': {'access_control.allowed_ops': allowed_ops}})
def updater(obj) :
obj['access_control']['allowed_ops'] = allowed_ops
return obj
_updateUserRedisValue(user_id, updater)
def getUserDeniedOps(user_id, user) :
filterOperation('getUserDeniedOps', user, user_id)
old_user_obj = db.users.find_one({'_id': ObjectId(user_id)})
if old_user_obj is None :
raise UserError('USER_NOT_EXIST')
return old_user_obj['access_control']['denied_ops']
def updateUserDeniedOps(user_id, denied_ops, user) :
filterOperation('updateUserDeniedOps', user, user_id)
old_user_obj = db.users.find_one({'_id': ObjectId(user_id)})
if old_user_obj is None :
raise UserError('USER_NOT_EXIST')
log(obj = {'user_id': user_id, 'new_ops': denied_ops, 'old_ops': old_user_obj['access_control']['denied_ops']})
db.users.update_one({'_id': ObjectId(user_id)}, {'$set': {'access_control.denied_ops': denied_ops}})
def updater(obj) :
obj['access_control']['denied_ops'] = denied_ops
return obj
_updateUserRedisValue(user_id, updater)
def listUsers(user, offset, limit, query = None, order = 'latest') :
filterOperation('listUsers', user)
if order not in ['latest', 'oldest'] :
raise UserError('INCORRECT_ORDER')
if query :
query = re.escape(query)
query = f'^.*{query}.*$'
query_obj = {'profile.username': {'$regex': query}}
else :
query_obj = {}
result = db.users.find(query_obj)
if order == 'latest':
result = result.sort([("meta.created_at", -1)])
if order == 'oldest':
result = result.sort([("meta.created_at", 1)])
items = result.skip(offset).limit(limit)
count = items.count()
items = [i for i in items]
for i in range(len(items)) :
del items[i]["crypto"]
return items, count
def viewOpinion(user) :
uobj = db.users.find_one({'_id': user['_id']})
if 'comment_thread' in uobj :
return listThread(uobj['comment_thread'])
else :
return None, None
|
python
|
import socket
import threading
class Server:
def __init__(self, ip, port):
self.sck = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sck.bind((ip, port))
self.sck.listen()
self.conCallback = None
self.clientThrCallback = None
self.disconCallback = None
self.clients = { }
self.nextClID = 0
def acceptClients(self):
while True:
sck, address = self.sck.accept()
if self.conCallback is not None:
self.conCallback(sck, address, self.nextClID)
thr = threading.Thread(target=self.clientThr, args=(sck, address, self.nextClID))
self.nextClID += 1
thr.start()
def acceptClientsAsync(self):
thr = threading.Thread(target=self.acceptClients)
thr.start()
return thr
def clientThr(self, sck, address, id):
self.clients[id] = (sck, address)
try:
if self.clientThrCallback is not None:
self.clientThrCallback(sck, address, id)
except Exception as e:
del self.clients[id]
sck.close()
raise e
if self.disconCallback is not None:
self.disconCallback(sck, address, id)
del self.clients[id]
sck.close()
def send(self, client, data):
if type(client) == int:
self.clients[client][0].send(data)
else:
client.send(data)
def broadcast(self, data):
for id, cl in self.clients.items():
cl[0].send(data)
|
python
|
from math import pi, cos, log, floor
from torch.optim.lr_scheduler import _LRScheduler
class CosineWarmupLR(_LRScheduler):
'''
Cosine lr decay function with warmup.
Ref: https://github.com/PistonY/torch-toolbox/blob/master/torchtoolbox/optimizer/lr_scheduler.py
https://github.com/Randl/MobileNetV3-pytorch/blob/master/cosine_with_warmup.py
Lr warmup is proposed by
`Accurate, Large Minibatch SGD:Training ImageNet in 1 Hour`
`https://arxiv.org/pdf/1706.02677.pdf`
Cosine decay is proposed by
`Stochastic Gradient Descent with Warm Restarts`
`https://arxiv.org/abs/1608.03983`
Args:
optimizer (Optimizer): optimizer of a model.
iter_in_one_epoch (int): number of iterations in one epoch.
epochs (int): number of epochs to train.
lr_min (float): minimum(final) lr.
warmup_epochs (int): warmup epochs before cosine decay.
last_epoch (int): init iteration. In truth, this is last_iter
Attributes:
niters (int): number of iterations of all epochs.
warmup_iters (int): number of iterations of all warmup epochs.
cosine_iters (int): number of iterations of all cosine epochs.
'''
def __init__(self, optimizer, epochs, iter_in_one_epoch, lr_min=0, warmup_epochs=0, last_epoch=-1):
self.lr_min = lr_min
self.niters = epochs * iter_in_one_epoch
self.warmup_iters = iter_in_one_epoch * warmup_epochs
self.cosine_iters = iter_in_one_epoch * (epochs - warmup_epochs)
super(CosineWarmupLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
if self.last_epoch < self.warmup_iters:
return [(self.lr_min + (base_lr - self.lr_min) * self.last_epoch / self.warmup_iters) for base_lr in self.base_lrs]
else:
return [(self.lr_min + (base_lr - self.lr_min) * (1 + cos(pi * (self.last_epoch - self.warmup_iters) / self.cosine_iters)) / 2) for base_lr in self.base_lrs]
class CosineAnnealingWarmRestarts(_LRScheduler):
'''
copied from https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#CosineAnnealingWarmRestarts
Set the learning rate of each parameter group using a cosine annealing
schedule, where :math:`\eta_{max}` is set to the initial lr, :math:`T_{cur}`
is the number of epochs since the last restart and :math:`T_{i}` is the number
of epochs between two warm restarts in SGDR:
.. math::
\eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})(1 +
\cos(\frac{T_{cur}}{T_{i}}\pi))
When :math:`T_{cur}=T_{i}`, set :math:`\eta_t = \eta_{min}`.
When :math:`T_{cur}=0`(after restart), set :math:`\eta_t=\eta_{max}`.
It has been proposed in
`SGDR: Stochastic Gradient Descent with Warm Restarts`_.
Args:
optimizer (Optimizer): Wrapped optimizer.
T_0 (int): Number of iterations for the first restart.
T_mult (int, optional): A factor increases :math:`T_{i}` after a restart. Default: 1.
eta_min (float, optional): Minimum learning rate. Default: 0.
last_epoch (int, optional): The index of last epoch. Default: -1.
.. _SGDR\: Stochastic Gradient Descent with Warm Restarts:
https://arxiv.org/abs/1608.03983
'''
def __init__(self, optimizer, T_0, T_mult=1, eta_min=0, last_epoch=-1, warmup_epochs=0, decay_rate=0.5):
if T_0 <= 0 or not isinstance(T_0, int):
raise ValueError("Expected positive integer T_0, but got {}".format(T_0))
if T_mult < 1 or not isinstance(T_mult, int):
raise ValueError("Expected integer T_mult >= 1, but got {}".format(T_mult))
if warmup_epochs < 0 or not isinstance(warmup_epochs, int):
raise ValueError("Expected positive integer warmup_epochs, but got {}".format(warmup_epochs))
self.T_0 = T_0
self.T_i = T_0
self.T_mult = T_mult
self.eta_min = eta_min
self.warmup_epochs = warmup_epochs
self.decay_rate = decay_rate
self.decay_power = 0
super(CosineAnnealingWarmRestarts, self).__init__(optimizer, last_epoch)
self.T_cur = self.last_epoch
def get_lr(self):
if self.last_epoch < self.warmup_epochs:
return [(self.eta_min + (base_lr - self.eta_min) * self.T_cur / self.warmup_epochs) for base_lr in self.base_lrs]
else:
return [self.eta_min + (base_lr * (self.decay_rate**self.decay_power) - self.eta_min) * (1 + cos(pi * self.T_cur / self.T_i)) / 2
for base_lr in self.base_lrs]
def step(self, epoch=None):
'''Step could be called after every batch update
Example:
>>> scheduler = CosineAnnealingWarmRestarts(optimizer, T_0, T_mult)
>>> iters = len(dataloader)
>>> for epoch in range(20):
>>> for i, sample in enumerate(dataloader):
>>> inputs, labels = sample['inputs'], sample['labels']
>>> scheduler.step(epoch + i / iters)
>>> optimizer.zero_grad()
>>> outputs = net(inputs)
>>> loss = criterion(outputs, labels)
>>> loss.backward()
>>> optimizer.step()
This function can be called in an interleaved way.
Example:
>>> scheduler = CosineAnnealingWarmRestarts(optimizer, T_0, T_mult)
>>> for epoch in range(20):
>>> scheduler.step()
>>> scheduler.step(26)
>>> scheduler.step() # scheduler.step(27), instead of scheduler(20)
'''
if epoch is None:
epoch = self.last_epoch + 1
self.T_cur = self.T_cur + 1
if self.T_cur >= self.T_i:
self.T_cur = self.T_cur - self.T_i
self.T_i = self.T_i * self.T_mult
else:
if epoch < 0:
raise ValueError("Expected non-negative epoch, but got {}".format(epoch))
if epoch < self.warmup_epochs:
self.T_cur = epoch
else:
epoch_cur = epoch - self.warmup_epochs
if epoch_cur >= self.T_0:
if self.T_mult == 1:
self.T_cur = epoch_cur % self.T_0
self.decay_power = epoch_cur // self.T_0
else:
n = int(log((epoch_cur / self.T_0 * (self.T_mult - 1) + 1), self.T_mult))
self.T_cur = epoch_cur - self.T_0 * (self.T_mult ** n - 1) / (self.T_mult - 1)
self.T_i = self.T_0 * self.T_mult ** (n)
self.decay_power = n
else:
self.T_i = self.T_0
self.T_cur = epoch_cur
self.last_epoch = floor(epoch)
for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):
param_group['lr'] = lr
|
python
|
from construct import *
from construct.lib import *
switch_integers__opcode = Struct(
'code' / Int8ub,
'body' / Switch(this.code, {1: Int8ub, 2: Int16ul, 4: Int32ul, 8: Int64ul, }),
)
switch_integers = Struct(
'opcodes' / GreedyRange(LazyBound(lambda: switch_integers__opcode)),
)
_schema = switch_integers
|
python
|
# -*- coding: utf-8 -*-
# bricks.py: utility collections.
#
# Copyright (C) 2009, 2010 Raymond Hettinger <[email protected]>
# Copyright (C) 2010 Lukáš Lalinský <[email protected]>
# Copyright (C) 2010 Yesudeep Mangalapilly <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Utility collections or "bricks".
:module: watchdog.utils.bricks
:author: Yesudeep Mangalapilly <[email protected]>
:author: Lukáš Lalinský <[email protected]>
:author: Raymond Hettinger <[email protected]>
Classes
=======
.. autoclass:: OrderedSetQueue
:members:
:show-inheritance:
:inherited-members:
.. autoclass:: OrderedSet
"""
import sys
import collections
try:
import queue
except ImportError:
import Queue as queue
class OrderedSetQueue(queue.Queue):
"""Thread-safe implementation of an ordered set queue.
Disallows adding a duplicate item while maintaining the
order of items in the queue. The implementation leverages
locking already implemented in the base class
redefining only the primitives. Since the internal queue
is not replaced, the order is maintained. The set is used
merely to check for the existence of an item.
Queued items must be immutable and hashable so that they can be used
as dictionary keys. You must implement **only read-only properties** and
the :meth:`Item.__hash__()`, :meth:`Item.__eq__()`, and
:meth:`Item.__ne__()` methods for items to be hashable.
An example implementation follows::
class Item(object):
def __init__(self, a, b):
self._a = a
self._b = b
@property
def a(self):
return self._a
@property
def b(self):
return self._b
def _key(self):
return (self._a, self._b)
def __eq__(self, item):
return self._key() == item._key()
def __ne__(self, item):
return self._key() != item._key()
def __hash__(self):
return hash(self._key())
:author: Lukáš Lalinský <[email protected]>
:url: http://stackoverflow.com/questions/1581895/how-check-if-a-task-is-already-in-python-queue
"""
def _init(self, maxsize):
queue.Queue._init(self, maxsize)
self._set_of_items = set()
def _put(self, item):
if item not in self._set_of_items:
queue.Queue._put(self, item)
self._set_of_items.add(item)
def _get(self):
item = queue.Queue._get(self)
self._set_of_items.remove(item)
return item
if not sys.version < (2, 6, 0):
KEY, PREV, NEXT = range(3)
class OrderedSet(collections.MutableSet):
"""
Implementation based on a doubly-linked link and an internal dictionary.
This design gives :class:`OrderedSet` the same big-Oh running times as
regular sets including O(1) adds, removes, and lookups as well as
O(n) iteration.
.. ADMONITION:: Implementation notes
Runs on Python 2.6 or later (and runs on Python 3.0 or later
without any modifications).
:author: Raymond Hettinger <[email protected]>
:url: http://code.activestate.com/recipes/576694/
"""
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[PREV]
curr[NEXT] = end[PREV] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev, _next = self.map.pop(key)
prev[NEXT] = _next
_next[PREV] = prev
def __iter__(self):
end = self.end
curr = end[NEXT]
while curr is not end:
yield curr[KEY]
curr = curr[NEXT]
def __reversed__(self):
end = self.end
curr = end[PREV]
while curr is not end:
yield curr[KEY]
curr = curr[PREV]
def pop(self, last=True):
if not self:
raise KeyError('set is empty')
key = next(reversed(self)) if last else next(iter(self))
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
def __del__(self):
self.clear() # remove circular references
|
python
|
# -*- coding: utf-8 -*-
"""
PyEVO reCAPTCHA API module
===============================================
.. module:: pyevo.api.recaptcha
:platform: Unix, Windows
:synopsis: PyEVO reCAPTCHA API module
.. moduleauthor:: (C) 2012 Oliver Gutiérrez
TODO: Check recaptcha API module for incomplete class method get_challenge
"""
# Python imports
import urllib2, urllib
RECAPTCHA_API_SERVER='https://www.google.com/recaptcha/api'
RECAPTCHA_VERIFY_SERVER='http://www.google.com/recaptcha/api/verify'
class RECAPTCHAHelper(object):
"""
reCAPTCHA API helper
"""
def __init__(self,public_key,private_key,api_server=RECAPTCHA_API_SERVER,verify_server=RECAPTCHA_VERIFY_SERVER,fail_silently=True):
"""
Class initialization
"""
self.public_key=public_key
self.private_key=private_key
self.api_server=RECAPTCHA_API_SERVER
self.verify_server=RECAPTCHA_VERIFY_SERVER
self.fail_silently=fail_silently
def verify(self,captcharesp,challenge):
"""
Recaptcha verification
"""
if not (captcharesp and challenge and len(captcharesp) and len(challenge)):
return False
# Generate request to recaptcha servers
verifreq = urllib2.Request (
url = self.verify_server,
data = urllib.urlencode ({
'privatekey': self.private_key,
'remoteip' : None,
'challenge': challenge.encode('utf-8'),
'response' : captcharesp.encode('utf-8'),
}),
headers = {
'Content-type': 'application/x-www-form-urlencoded',
'User-agent': 'Python'
}
)
# Do request
try:
resp=urllib2.urlopen(verifreq)
except:
# In case of connection error return fail_silently as value for the verification
return self.fail_silently
# Check captcha response
return_values=resp.read().splitlines();
resp.close();
return_code=return_values[0]
if (return_code=='true'):
return True
# Failed verification
return False
# def get_challenge(self):
# """
# TODO: Get reCAPTCHA image and challenge data
# """
# challenge=
# imgurl='http://www.google.com/recaptcha/api/image?c=%s' % challenge
# pass
|
python
|
from ._container import AadModelContainer
from onnxconverter_common.topology import Topology
from onnxconverter_common.data_types import FloatTensorType
from ad_examples.aad.forest_aad_detector import AadForest
def _get_aad_operator_name(model):
# FIXME: not all possible AAD models are currently supported
if not isinstance(model, AadForest):
raise ValueError("No proper operator name found for '%s'" % type(model))
return "AadForest"
def _parse_aad(scope, model, inputs):
this_operator = scope.declare_local_operator(_get_aad_operator_name(model), model)
this_operator.inputs = inputs
# FIXME: probably another variable is required for anomality label
score_variable = scope.declare_local_variable('score', FloatTensorType())
this_operator.outputs.append(score_variable)
return this_operator.outputs
def parse_aad(model, initial_types=None, target_opset=None,
custom_conversion_functions=None, custom_shape_calculators=None):
raw_model_container = AadModelContainer(model)
topology = Topology(raw_model_container, default_batch_size='None',
initial_types=initial_types, target_opset=target_opset,
custom_conversion_functions=custom_conversion_functions,
custom_shape_calculators=custom_shape_calculators)
scope = topology.declare_scope('__root__')
inputs = []
for var_name, initial_type in initial_types:
inputs.append(scope.declare_local_variable(var_name, initial_type))
for variable in inputs:
raw_model_container.add_input(variable)
outputs = _parse_aad(scope, model, inputs)
for variable in outputs:
raw_model_container.add_output(variable)
return topology
|
python
|
#import matplotlib.pyplot as plt
from flask import Flask, render_template, jsonify
import requests
import json
import numpy as np
import time
app = Flask(__name__)
@app.route('/')
def index():
r = requests.get("http://127.0.0.1:5000/chain").text
r = json.loads(r)
# Fetch the chain length
chain_length = len(r["chain"])
blocks_data = []
for i in range(1,chain_length):
block_dict = {}
transaction_length = len(r["chain"][i]["transactions"])
block_dict["Block Number"] = r["chain"][i]["index"]
block_dict["Previous Hash"] = r["chain"][i]["previous_hash"]
block_dict["Timestamp"] = r["chain"][i]["timestamp"]
block_dict["Total Transactions"] = transaction_length
blocks_data.append(block_dict)
return render_template("graph.html", data=blocks_data)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8000, debug=True)
|
python
|
import numpy as np
import lsst.afw.table as afwTable
import lsst.pex.config as pexConfig
import lsst.pipe.base as pipeBase
import lsst.geom as geom
import lsst.sphgeom as sphgeom
from lsst.meas.base.forcedPhotCcd import ForcedPhotCcdTask, ForcedPhotCcdConfig
from .forcedPhotDia import DiaSrcReferencesTask
__all__ = ("ForcedPhotCoaddDiaSrcConfig", "ForcedPhotCoaddDiaSrcTask")
class ForcedPhotCoaddDiaSrcConfig(ForcedPhotCcdConfig):
coaddName = pexConfig.Field(dtype=str, default='deep',
doc="Name of coadd")
def setDefaults(self):
ForcedPhotCcdTask.ConfigClass.setDefaults(self)
self.references.retarget(DiaSrcReferencesTask)
self.measurement.copyColumns = {"id": "id", "coord_ra": "coord_ra",
"coord_dec": "coord_dec",
"base_PsfFlux_instFlux":"diaSrc_base_PsfFlux_instFlux",
"base_PsfFlux_instFluxErr": "diaSrc_base_PsfFlux_instFluxErr",
}
self.measurement.plugins.names = ['base_SdssShape', 'base_DiaTransformedCentroid',
'base_PsfFlux', 'base_LocalBackground',
'base_PixelFlags', 'base_CircularApertureFlux']
self.measurement.slots.centroid = 'base_DiaTransformedCentroid'
self.measurement.slots.shape = 'base_SdssShape'
self.measurement.slots.apFlux = None
self.measurement.slots.modelFlux = None
self.measurement.slots.psfFlux = None
self.measurement.slots.calibFlux = None
# These radii were chosen because they are among the default measured in the pipeline. If the default
# changes then these will not be able to be copied.
radii = [3., 6., 9., 12.]
for radius in radii:
base = int(radius)
decimal = int((radius - int(radius))*10)
input_name = f"base_CircularApertureFlux_{base}_{decimal}_instFlux"
output_name = f"diaSrc_base_CircularApertureFlux_{base}_{decimal}_instFlux"
self.measurement.copyColumns[input_name] = output_name
input_name = f"base_CircularApertureFlux_{base}_{decimal}_instFluxErr"
output_name = f"diaSrc_base_CircularApertureFlux_{base}_{decimal}_instFluxErr"
self.measurement.copyColumns[input_name] = output_name
self.measurement.plugins["base_CircularApertureFlux"].radii = radii
# Use a large aperture to be independent of seeing in calibration
self.measurement.plugins["base_CircularApertureFlux"].maxSincRadius = 12.0
class ForcedPhotCoaddDiaSrcTask(ForcedPhotCcdTask):
"""!
A command-line driver for performing forced measurement on Coadd images from DIASrc catalogs.
"""
ConfigClass = ForcedPhotCoaddDiaSrcConfig
RunnerClass = pipeBase.ButlerInitializedTaskRunner
_DefaultName = "forcedPhotCoaddDiaSrc"
dataPrefix = "deepCoadd_"
def __init__(self, butler=None, refSchema=None, **kwds):
"""Initialize the task.
ForcedPhotImageTask takes two keyword arguments beyond the usual CmdLineTask arguments:
- refSchema: the Schema of the reference catalog, passed to the constructor of the references
subtask
- butler: a butler that will be passed to the references subtask to allow it to load its Schema
from disk
At least one of these arguments must be present; if both are, schema takes precedence.
"""
super(ForcedPhotCcdTask, self).__init__(butler, refSchema, **kwds)
self.primaryKey = self.measurement.schema.addField("detect_isPrimary", type="Flag", doc="set to True if inside inner patch and tract region")
def writeOutput(self, dataRef, sources):
"""!Write source table
@param dataRef Data reference from butler
@param sources SourceCatalog to save
"""
dataRef.put(sources, "deepDiff_forced_template_diaSrc",
flags=afwTable.SOURCE_IO_NO_FOOTPRINTS)
def _getConfigName(self):
"""!Return the name of the config dataset. Forces config comparison from run-to-run
"""
return "forcedPhotCoaddDiaSrc_config"
def _getMetadataName(self):
"""!Return the name of the metadata dataset. Forced metadata to be saved
"""
return None
@classmethod
def _makeArgumentParser(cls):
parser = pipeBase.ArgumentParser(name=cls._DefaultName)
parser.add_id_argument("--id", "deepDiff_differenceExp", help="data ID with raw CCD keys"
"e.g. --id visit=12345 ccd")
return parser
def runDataRef(self, dataRef):
"""!Measure a single exposure using forced detection for a reference catalog.
@param[in] dataRef An lsst.daf.persistence.ButlerDataRef
@param[in] psfCache Size of PSF cache, or None. The size of the PSF cache can have
a significant effect upon the runtime for complicated PSF models.
"""
exposure = dataRef.get('deepDiff_differenceExp')
catalog = dataRef.get('deepDiff_diaSrc')
expWcs = exposure.getWcs()
butler = dataRef.butlerSubset.butler
# I need to get the template images/catalogs for all overlapping tracts
skyMap = butler.get(datasetType=self.config.coaddName + "Coadd_skyMap")
skyCorners = [expWcs.pixelToSky(geom.Point2D(pixPos)) for pixPos in exposure.getBBox().getCorners()]
imagePoly = sphgeom.ConvexPolygon.convexHull([coord.getVector() for coord in skyCorners])
tractPatchList = skyMap.findTractPatchList(skyCorners)
allMeasCat = None
for tract, patchList in tractPatchList:
for patch in patchList:
self.log.info('Processing patch %s from tract %s' % (patch.getIndex(),tract))
patchPoly = patch.getOuterSkyPolygon(tract.getWcs())
if patchPoly.intersects(imagePoly) is False:
self.log.info('No intersection with the boundary patch.')
continue
validObject = np.array(
[patchPoly.contains(sphgeom.UnitVector3d(sphgeom.LonLat.fromRadians(s.getRa().asRadians(),
s.getDec().asRadians())))
for s in catalog])
refCat = catalog[validObject]
expCorners = [tract.getWcs().skyToPixel(pos) for pos in skyCorners]
expBBox = geom.Box2I()
for corner in expCorners:
expBBox.include(geom.Point2I(corner))
overlapBox = geom.Box2I(patch.getOuterBBox())
overlapBox.clip(expBBox)
patchArgDict = dict(
datasetType=self.dataPrefix+ "calexp_sub",
bbox=overlapBox,
tract=tract.getId(),
patch="%s,%s" % (patch.getIndex()[0], patch.getIndex()[1]),
filter=exposure.getFilter().getName()
)
coaddPatch = butler.get(**patchArgDict)
# I need to filter out objects whose parents are not in the bounding box
refCatIdDict = {ref.getId(): ref.getParent() for ref in refCat}
# Add 0 for objects without a parent
refCatIdDict[0] = 0
parentGood = np.array([refCatIdDict[ref.getId()] in refCatIdDict for ref in refCat])
if np.sum(parentGood==False) > 1:
self.log.info("Removing %d/%d objects without parents" % (np.sum(parentGood==False),
len(parentGood)))
refCat = refCat.copy(deep=True)[parentGood]
if len(refCat) == 0:
self.log.info('No references available.')
continue
measCat = self.measurement.generateMeasCat(coaddPatch, refCat, expWcs,
idFactory=self.makeIdFactory(dataRef))
self.log.info("Performing forced measurement on %s" % (patchArgDict))
self.attachFootprints(measCat, refCat, coaddPatch, expWcs, dataRef)
exposureId = self.getExposureId(dataRef)
self.measurement.run(measCat, coaddPatch, refCat, expWcs, exposureId=exposureId)
# Label primary objects
innerBox = geom.Box2D(patch.getInnerBBox())
insideBox = np.array([innerBox.contains(s.getCentroid()) for s in measCat])
primaryTract = np.array([skyMap.findTract(s.getCoord())==tract for s in measCat])
primary = (insideBox) & (primaryTract)
# I can't set the whole array, so I do it one item at a time
for s,p in zip(measCat, primary):
s.set(self.primaryKey,p)
if self.config.doApCorr:
self.applyApCorr.run(
catalog=measCat,
apCorrMap=coaddPatch.getInfo().getApCorrMap()
)
self.catalogCalculation.run(measCat)
if allMeasCat is None:
allMeasCat = measCat
else:
allMeasCat.extend(measCat)
if allMeasCat is not None:
self.writeOutput(dataRef, allMeasCat)
|
python
|
# Generated by Django 2.1 on 2018-09-08 14:06
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0003_market'),
]
operations = [
migrations.AlterUniqueTogether(
name='market',
unique_together={('name', 'exchange')},
),
]
|
python
|
import serial
import struct
import time
def init_Serial():
print("Opening Serial Port COM 10")
ser = serial.Serial(
port='COM10',
baudrate=115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS
)
return ser
def wait_for_Pi(ser_i):
print("Wait until Raspberry Pi is ready")
cnt = 0
while cnt < 3:
x = ser_i.read()
print(": %s" %x.encode("hex"))
if x == '\x03':
cnt = cnt + 1
print("Raspberry is ready")
def open_Kernel():
print("Opening File")
fid = open("..\05_uart0\kernel8.img", "rb")
# Get binaries
data = fid.read()
# Get file size
f_size = fid.tell()
print("Filesize: %d" % f_size)
fid.close();
return f_size, data
def send_Kernel_size(ser_i, size):
print("Send Kernel size to RPI")
data = struct.pack('<i',size)
for i in data:
ser_i.write(i)
print("Waiting for Acknowledgment")
recv = ser_i.read(2)
if recv == "OK":
print("Received Acknowledgment")
else:
print("Error after sending size")
print("restart")
return False
return True
def send_Kernel(ser_i, kernel_data):
print("sending Kernel...")
print(len(kernel_data))
for tmp, byte in enumerate(kernel_data):
ser_i.write(byte)
print(tmp+1)
# Check if sended byte == received byte
#recv = ser_i.read(1)
#print(": %s == %s" % (recv.encode("hex"), byte.encode("hex")))
#check the size
"""recv = ser_i.read(4)
print(": %s" % recv[0].encode("hex"))
print(": %s" % recv[1].encode("hex"))
print(": %s" % recv[2].encode("hex"))
print(": %s" % recv[3].encode("hex"))"""
print("finished sending")
# check if Raspberry Pi is sending a charakter after the while-loop
#test = ser_i.read(1)
#print(": %s" % test.encode("hex"))
#print("Successfull")
return True
def main():
print("Serial Transmit Kernel.img")
ser_i = init_Serial()
wait_for_Pi(ser_i)
size, kernel_data = open_Kernel()
guard = send_Kernel_size(ser_i, size)
if(guard == True):
send_Kernel(ser_i, kernel_data)
if __name__ == "__main__":
main()
|
python
|
from .Updater import Converters
class _DataManager:
def unpack(self, data):
return self.unpackItems(data.items())
def unpackItems(self, items):
return {key: self.itemsToDict(value) for key, value in items}
def itemsToDict(self, data):
if hasattr(data, "__dict__"):
return self.unpackItems(data.__dict__.items())
else:
return data
def pack(self, dataObject, data):
for key, value in data.items():
if hasattr(dataObject, key):
if not isinstance(getattr(dataObject, key), dict) and isinstance(value, dict):
self.pack(getattr(dataObject, key), value)
else:
setattr(dataObject, key, value)
return dataObject
class _DatabaseLoader(_DataManager):
def load(self, database, data):
try:
self.database = database
self.data = data
for converter in Converters.getConverters(self.getOldVersion()):
self.data = converter(self.data)
return self.pack(self.database, self.data)
except:
return database
def getOldVersion(self):
try:
return self.data["version"]
except:
return ""
DatabaseLoader = _DatabaseLoader()
|
python
|
#!/usr/bin/env python
from __future__ import print_function
import roslib
roslib.load_manifest('mct_watchdog')
import rospy
import threading
import functools
import numpy
import mct_introspection
import yaml
import cv
import Image as PILImage
import ImageDraw as PILImageDraw
import ImageFont as PILImageFont
from cv_bridge.cv_bridge import CvBridge
from mct_utilities import file_tools
# Messages and Services
from std_srvs.srv import Empty
from std_srvs.srv import EmptyResponse
from mct_msg_and_srv.msg import FramesDropped
from sensor_msgs.msg import Image
class FrameDropWatchdog(object):
"""
Frame drop watchdog monitors the number of frames dropped by the system.
"""
def __init__(self,max_seq_age=150):
rospy.init_node('frame_drop_watchdog')
self.max_seq_age = max_seq_age
self.lock = threading.Lock()
self.frames_dropped = {}
self.latest_seq = None
self.ready = False
camera_assignment = file_tools.read_camera_assignment()
self.number_of_cameras = len(camera_assignment)
self.bridge = CvBridge()
self.info_image_size = (400,90)
self.font = PILImageFont.truetype("/usr/share/fonts/truetype/ubuntu-font-family/Ubuntu-B.ttf", 16)
# Subscribe to camera info topics
self.frames_dropped_sub = {}
frames_dropped_topics = self.wait_for_topics()
for topic in frames_dropped_topics:
camera = get_camera_from_topic(topic)
handler = functools.partial(self.frames_dropped_handler, camera)
self.frames_dropped_sub[camera] = rospy.Subscriber(topic, FramesDropped, handler)
# Setup total frames dropped service
self.total_dropped_pub = rospy.Publisher('total_frames_dropped', FramesDropped)
# Setup reset service
self.reset_srv = rospy.Service('frame_drop_watchdog_reset', Empty, self.reset_handler)
# Create watchdog info image
self.image_watchdog_pub = rospy.Publisher('image_frame_drop_watchdog', Image)
self.ready = True
def wait_for_topics(self):
"""
Wait for the frames_dropped topics to be published.
"""
while 1:
frames_dropped_topics = mct_introspection.find_topics_w_ending('frames_dropped')
if len(frames_dropped_topics) == self.number_of_cameras:
break
rospy.sleep(0.25)
return frames_dropped_topics
def reset_handler(self,req):
"""
Handler for the nodes reset service - empties the frames_dropped buffer.
"""
with self.lock:
self.frames_dropped = {}
self.latest_seq = None
return EmptyResponse()
def frames_dropped_handler(self, camera, data):
if not self.ready:
return
with self.lock:
try:
self.frames_dropped[data.seq][camera] = data.frames_dropped
except KeyError:
self.frames_dropped[data.seq] = {camera:data.frames_dropped}
self.update_latest_seq(data.seq)
def update_latest_seq(self,seq):
if self.latest_seq is None:
self.latest_seq = seq
else:
self.latest_seq = max([seq, self.latest_seq])
def publish_watchdog_image(self, seq, total_frames_dropped, cameras_w_drops):
"""
Publish image for GUI w/ seq #, total frames dropped, other info?
"""
pil_info_image = PILImage.new('RGB', self.info_image_size,(255,255,255))
draw = PILImageDraw.Draw(pil_info_image)
info_items = [
('seq', seq),
('dropped', total_frames_dropped),
('cameras', cameras_w_drops),
]
text_x, text_y, step_y = 10, 10, 20
for i, item in enumerate(info_items):
label, value = item
label_text = '{0}:'.format(label)
if type(value) == float:
value_text = '{0:<1.6f}'.format(value)
else:
value_text = '{0}'.format(value)
draw.text( (text_x,text_y+step_y*i), label_text, font=self.font, fill=(0,0,0))
draw.text( (text_x+100,text_y+step_y*i), value_text, font=self.font, fill=(0,0,0))
cv_info_image = cv.CreateImageHeader(pil_info_image.size, cv.IPL_DEPTH_8U, 3)
cv.SetData(cv_info_image, pil_info_image.tostring())
# Convert to a rosimage and publish
info_rosimage = self.bridge.cv_to_imgmsg(cv_info_image,'rgb8')
self.image_watchdog_pub.publish(info_rosimage)
def run(self):
"""
Node, main loop. While the node
"""
while not rospy.is_shutdown():
with self.lock:
for seq, data in sorted(self.frames_dropped.items()):
if len(data) == self.number_of_cameras:
total_frames_dropped = sum(data.values())
self.total_dropped_pub.publish(seq,total_frames_dropped)
cameras_w_drops = [c for c, n in data.iteritems() if n > 0]
cameras_w_drops = [int(c.split('_')[1]) for c in cameras_w_drops]
del self.frames_dropped[seq]
self.publish_watchdog_image(seq, total_frames_dropped, cameras_w_drops)
else:
if self.latest_seq - seq > self.max_seq_age:
del self.frames_dropped[seq]
# Utility functions
# ----------------------------------------------------------------------------
def get_camera_from_topic(topic):
camera = topic.split('/')[2]
return camera
# -----------------------------------------------------------------------------
if __name__ == '__main__':
node = FrameDropWatchdog()
node.run()
|
python
|
#!/usr/bin/python
#
# Example of complex
# Graph representing a network
#
import gvgen
# Creates the new graph instance
graph = gvgen.GvGen(None, "overlap=\"scale\";\nlabelfloat=\"true\";\nsplines=\"true\";")
# We define different styles
graph.styleAppend("router", "shapefile", "router.png")
graph.styleAppend("router", "color", "white")
graph.styleAppend("router", "label", "")
# Creates items
insidenet = graph.newItem("Inside network")
internet = graph.newItem("Internet")
win1 = graph.newItem("Windows", insidenet)
win2 = graph.newItem("Windows", insidenet)
linux = graph.newItem("Linux", insidenet)
hurd = graph.newItem("GNU/Hurd", insidenet)
sun = graph.newItem("Sun", internet)
router = graph.newItem("Router")
# Time to apply styles and set some properties
graph.styleApply("router", router)
graph.propertyAppend(win1, "shapefile", "wingdows.png")
graph.propertyAppend(win2, "shapefile", "wingdows.png")
graph.propertyAppend(linux, "shapefile", "linux.png")
graph.propertyAppend(hurd, "shapefile", "hurd.png")
graph.propertyAppend(sun, "shapefile", "sun.png")
graph.propertyAppend(win1, "label", "")
graph.propertyAppend(win2, "label", "")
graph.propertyAppend(linux, "label", "")
graph.propertyAppend(hurd, "label", "")
graph.propertyAppend(sun, "label", "")
# Links from "foo" to "bar"
graph.newLink(win1, router)
graph.newLink(win2, router)
graph.newLink(linux, router)
graph.newLink(hurd, router)
graph.newLink(router, sun)
# Outputs the graphviz code
graph.dot()
|
python
|
"""
Functions related to calculating the rotational energy of asymmetric
molecules. Townes and Schawlow, Ch. 4
"""
from pylab import poly1d
def asymmetry (A,B,C):
"""
Ray's asymmetry parameter for molecular rotation.
For a prolate symmetric top (B = C), kappa = -1.
For an oblate symmetric top (B = A), kappa = +1.
See Townes and Schawlow, Ch. 4.
"""
return (2.*B - A - C)/(A - C)
def b_prolate(kappa):
"""
0 = prolate <= b_P <= -1 = oblate
Townes and Schawlow, Ch. 4
"""
return (kappa+1.)/(kappa-3.)
def b_oblate(kappa):
"""
-1 = oblate <= b_O <= 0 = prolate
Townes and Schawlow, Ch. 4
"""
return (kappa-1.)/(kappa+3.)
def asym_quantum_factor(J,b):
"""
This takes the places of K^2 in calculating the energy levels
for asymmetric rotators. Townes and Schawlow, Ch. 4. For
J > 6 this returns an empty tuple. Note that it doesn't matter which version of
b is used since b_prolate(kappa) = b_oblate(-kappa) and the equations are
symmetric in b or depend on b**2.
"""
roots = ()
if J == 0:
roots = (0,)
elif J == 1:
roots = (0., 1+b, 1-b)
elif J == 2:
roots = ( 4., 1-3*b, 1+3*b)
p = poly1d([1, -4, -12*b**2])
roots = roots + tuple(p.r)
elif J == 3:
roots = (4.,)
p = poly1d([1, -4, -60*b**2])
roots = roots + tuple(p.r)
p = poly1d([1, -10+6*b, 9-54*b-15*b**2])
roots = roots + tuple(p.r)
p = poly1d([1, -10-6*b, 9+54*b-15*b**2])
roots = roots + tuple(p.r)
elif J == 4:
p = poly1d([1, -10*(1-b), 9-90*b-63*b**2])
roots = tuple(p.r)
p = poly1d([1, -10*(1+b), 9+90*b-63*b**2])
roots = roots + tuple(p.r)
p = poly1d([1, -20, 64-28*b**2])
roots = roots + tuple(p.r)
p = poly1d([1, -20, 64-208*b**2, 2880*b**2])
roots = roots + tuple(p.r)
elif J == 5:
p = poly1d([1, -20, 64-108*b**2])
roots = tuple(p.r)
p = poly1d([1, -20, 64-528*b**2,6720*b**2])
roots = roots + tuple(p.r)
p = poly1d([1, -35+15*b, 259-510*b-213*b**2, -225+3375*b+4245*b**2-675*b**3])
roots = roots + tuple(p.r)
p = poly1d([1, -35-15*b, 259+510*b-213*b**2, -225-3375*b+4245*b**2+675*b**3])
roots = roots + tuple(p.r)
elif J == 6:
p = poly1d([1, -35+21*b, 259-714*b-525*b**2, -225+4725*b+9165*b**2-3465*b**3])
roots = tuple(p.r)
p = poly1d([1, -35-21*b, 259+714*b-525*b**2, -225-4725*b+9165*b**2+3465*b**3])
roots = roots + tuple(p.r)
p = poly1d([1, -56, 784-336*b**2, -2304+9984*b**2])
roots = roots + tuple(p.r)
p = poly1d([1, -56, 784-1176*b**2, -2304+53664*b**2, -483840*b**2+55440*b**4])
roots = roots + tuple(p.r)
else:
roots = ()
return roots
def Ejk(A,B,C,J):
"""
Rotational energy of an asymmetric molecule using Eq. 4-4 and 4-5 in
Townes and Schawlow. Returns energies in units used for A,B and C
"""
kappa = asymmetry(A,B,C)
# assume prolate form
b_P = b_prolate(kappa)
ws = asym_quantum_factor(J,b_P)
# print "w's=",ws
result = []
for w in ws:
result.append( (B+C)*J*(J+1)/2. + (A - (B+C)/2. )*w )
return result
def plot_E_vs_kappa(Amax,C,maxJ):
"""
Plots diagram showing how energy of an asymmetric rotor depends on its
asymmetry as it varies between prolate and oblate, and how the J(-K,+K)
labelling arises. Townes and Schawlow , Ch. 4.
This assumes that the volume of the moment of inertia ellipsoid is a
constant::
Ia*Ib*Ic = 3*V/(2*pi)
or::
(h/8 pi**2)**3(ABC) = 3*V/(2*pi)
or::
A*B*C = K, a constant
The ellipsoid's minimum semi-axis C is also a constant. So in the prolate case,
B=C and K = Amax*C*C. In the oblate case, Amin=B and K = A*A*C.
The constraints are then::
2*B = (1+kappa)*A + (1-kappa)*C
and::
A = Amax*C/B
"""
n_kappas = 21
kappas = linspace(-1,1,n_kappas)
for J in range(maxJ):
n_columns = 2*J+1
# create a matrix on n_kappas rows and n_
E = zeros((n_kappas,n_columns),float)
for i in range(n_kappas):
kappa = kappas[i]
p = poly1d([2,(kappa-1)*C,-(kappa+1)*Amax*C])
if p.r[0] > 0.:
B = p.r[0]
else:
B = p.r[1]
print B
A = Amax*C/B
# This should yield n_columns of energy values for this kappa
Es = Ejk(A,B,C,J)
E[i,:] = Es
# Now we have a 2D array of energies to plot
for k in range(n_columns):
# select a line style and plot
if J%3 == 0:
ls = "-"
elif J%3 == 1:
ls = "--"
elif J%3 == 2:
ls = ":"
else:
ls = "-."
plot(kappas,E[:,k],label=r"$J_{\tau}="+str(J)+"_{"+str(k-J)+"}$",ls=ls,lw=2)
# label the lines
for K in range(J+1):
# For prolate, B=C
E_prolate = C*J*(J+1)+(Amax-C)*K**2
# For oblate, B=A, using the last value of A
E_oblate = A*J*(J+1)+(C-A)*K**2
text(-0.98+0.07*(J%2),E_prolate,r"$"+str(J)+"_{"+str(K)+"}$")
text( 0.93-0.07*(J%2),E_oblate,r"$"+str(J)+"_{"+str(K)+"}$")
def test(A,B,C,maxJ):
"""
Checks the calculation of energy levels. For example, for water::
>>> test(835.83910,435.347353,278.139826,5)
0 [0.0]
1 [713.48717899999997, 1113.978926, 1271.186453]
2 [4056.8435790000003, 2855.3683380000002, 2383.7457570000001,
4094.7813912145548, 2102.5237247854457]
3 [6197.3051160000005, 6374.3863667070382, 4103.8418232929625,
8620.1335561107444, 5204.2902778892558, 8614.2071531433267, 4266.9715188566752]
4 [11569.54077149916, 8277.1955485008384, 11529.522215260266, 6745.1388347397333,
14830.335414585266, 9021.318375414734, 14831.098979756451, 9490.7431163792135,
6664.6834838643363]
Divide by 29.997 to convert GHz to 1/cm
"""
for J in range(maxJ):
print J,Ejk(A,B,C,J)
if __name__ == "__main__":
C=25
A=125
maxK = 4
plot_E_vs_kappa(A,C,maxK)
a = axis()
b = [a[0], a[1], -10, a[3]]
axis(b)
rcParams.update({'legend.fontsize': 10})
legend(loc=9)
title(r"$\mathrm{Asymmetric~rotor,~A}_{\mathrm{max}}=$"+str(A)+"$,~\mathrm{C}=$"+str(C))
xlabel(r"$\mathrm{Asymmetry}$")
ylabel(r'$E(J,\tau)/h~(GHz)$')
show()
|
python
|
import matplotlib.pyplot as plt
import numpy as np
import torch
# from scipy.special import softmax
# from mpl_toolkits.axes_grid1 import make_axes_locatable
def compare_distogram(outputs, targets):
plt.figure(num=1, figsize=[15, 10])
plt.clf()
# names = ['Distance','Omega','Phi','Theta']
names = ['dNN','dCaCa','dCbCb','dNCa','dNCb','dCaCb']
n = len(targets)
for i,(output,target,name) in enumerate(zip(outputs,targets,names)):
if isinstance(output, torch.Tensor):
output = torch.squeeze(output[-1,:,:]).cpu().detach().numpy()
if isinstance(target, torch.Tensor):
target = torch.squeeze(target[-1,:,:]).cpu().detach().numpy()
mask = target > 0
plt.subplot(n,3, i*n+1)
plt.imshow(output, vmin=0)
plt.colorbar()
tit = name + "(prediction)"
plt.title(tit)
plt.subplot(n,3, i*n+2)
plt.imshow(target, vmin=0)
plt.colorbar()
tit = name + "(target)"
plt.title(tit)
plt.subplot(n, 3, i * n + 3)
plt.imshow(np.abs(mask * output - target), vmin=0)
plt.colorbar()
tit = name + "(diff)"
plt.title(tit)
plt.pause(0.5)
return
def plotfullprotein(p1,p2,p3,t1,t2,t3):
plt.figure(num=2, figsize=[15, 10])
plt.clf()
n = t1.shape[1]
axes = plt.axes(projection='3d')
axes.set_xlabel("x")
axes.set_ylabel("y")
axes.set_zlabel("z")
line1 = axes.plot3D(t1[0, :], t1[1, :], t1[2, :], 'red', marker='x')
a = t1[0,:].T
b = t2[0,:].T
tx = np.concatenate((a[:,None],b[:,None]),axis=1)
a = t1[1,:].T
b = t2[1,:].T
ty = np.concatenate((a[:,None],b[:,None]),axis=1)
a = t1[2,:].T
b = t2[2,:].T
tz = np.concatenate((a[:,None],b[:,None]),axis=1)
for i in range(n):
line2 = axes.plot3D(tx[i,:], ty[i,:], tz[i,:], 'red', marker='d')
a = t1[0,:].T
b = t3[0,:].T
tx = np.concatenate((a[:,None],b[:,None]),axis=1)
a = t1[1,:].T
b = t3[1,:].T
ty = np.concatenate((a[:,None],b[:,None]),axis=1)
a = t1[2,:].T
b = t3[2,:].T
tz = np.concatenate((a[:,None],b[:,None]),axis=1)
for i in range(n):
line3 = axes.plot3D(tx[i,:], ty[i,:], tz[i,:], 'red', marker='o')
line1 = axes.plot3D(p1[0, :], p1[1, :], p1[2, :], 'blue', marker='x')
a = p1[0,:].T
b = p2[0,:].T
tx = np.concatenate((a[:,None],b[:,None]),axis=1)
a = p1[1,:].T
b = p2[1,:].T
ty = np.concatenate((a[:,None],b[:,None]),axis=1)
a = p1[2,:].T
b = p2[2,:].T
tz = np.concatenate((a[:,None],b[:,None]),axis=1)
for i in range(n):
line2 = axes.plot3D(tx[i,:], ty[i,:], tz[i,:], 'blue', marker='d')
a = p1[0,:].T
b = p3[0,:].T
tx = np.concatenate((a[:,None],b[:,None]),axis=1)
a = p1[1,:].T
b = p3[1,:].T
ty = np.concatenate((a[:,None],b[:,None]),axis=1)
a = p1[2,:].T
b = p3[2,:].T
tz = np.concatenate((a[:,None],b[:,None]),axis=1)
for i in range(n):
line3 = axes.plot3D(tx[i,:], ty[i,:], tz[i,:], 'blue', marker='o')
plt.pause(0.5)
return
def plotcoordinates(pred,target):
plt.figure(num=1, figsize=[15, 10])
plt.clf()
axes = plt.axes(projection='3d')
axes.set_xlabel("x")
axes.set_ylabel("y")
axes.set_zlabel("z")
line = axes.plot3D(pred[0,:],pred[1,:], pred[2,:], 'green', marker='x')
line2 = axes.plot3D(target[0,:],target[1,:], target[2,:], 'red', marker='x')
plt.pause(2.5)
return
|
python
|
import functools
import requests
import suds.transport as transport
import traceback
try:
import cStringIO as StringIO
except ImportError:
import StringIO
__all__ = ['RequestsTransport']
def handle_errors(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except requests.HTTPError as e:
buf = StringIO.StringIO(e.response.content)
raise transport.TransportError(
'Error in requests\n' + traceback.format_exc(),
e.response.status_code,
buf,
)
except requests.RequestException:
raise transport.TransportError(
'Error in requests\n' + traceback.format_exc(),
000,
)
return wrapper
class RequestsTransport(transport.Transport):
def __init__(self, session=None):
transport.Transport.__init__(self)
self._session = session or requests.Session()
@handle_errors
def open(self, request):
resp = self._session.get(request.url)
resp.raise_for_status()
return StringIO.StringIO(resp.content)
@handle_errors
def send(self, request):
resp = self._session.post(
request.url,
data=request.message,
headers=request.headers,
)
if resp.headers.get('content-type') not in ('text/xml',
'application/soap+xml'):
resp.raise_for_status()
return transport.Reply(
resp.status_code,
resp.headers,
resp.content,
)
|
python
|
'''
@author: Frank
'''
import zstacklib.utils.http as http
import zstacklib.utils.log as log
import zstacklib.utils.plugin as plugin
import zstacklib.utils.jsonobject as jsonobject
import zstacklib.utils.daemon as daemon
import zstacklib.utils.iptables as iptables
import os.path
import functools
import traceback
import pprint
logger = log.get_logger(__name__)
TESTAGENT_PORT = 9393
class TestAgentError(Exception):
''' test agent failed '''
class TestAgent(plugin.Plugin):
pass
class TestAgentServer(object):
http_server = http.HttpServer(port=TESTAGENT_PORT)
http_server.logfile_path = log.get_logfile_path()
def __init__(self):
self.plugin_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'plugins')
self.plugin_rgty = plugin.PluginRegistry(self.plugin_path)
def start(self, in_thread=True):
self.plugin_rgty.configure_plugins({})
self.plugin_rgty.start_plugins()
if in_thread:
self.http_server.start_in_thread()
else:
self.http_server.start()
def stop(self):
self.plugin_rgty.stop_plugins()
self.http_server.stop()
class AgentResponse(object):
def __init__(self, success=True, error=None):
self.success = success
self.error = error if error else ''
class AgentCommand(object):
def __init__(self):
pass
def replyerror(func):
@functools.wraps(func)
def wrap(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
content = traceback.format_exc()
err = '%s\n%s\nargs:%s' % (str(e), content, pprint.pformat([args, kwargs]))
rsp = AgentResponse()
rsp.success = False
rsp.error = err
logger.warn(err)
raise TestAgentError('500 Internal Server Error: %s' % err)
return wrap
class TestAgentDaemon(daemon.Daemon):
def __init__(self, pidfile):
super(TestAgentDaemon, self).__init__(pidfile)
def run(self):
self.agent = TestAgentServer()
self.agent.start(False)
def build_http_path(ip, path):
return 'http://%s:%s/%s' % (ip, str(TESTAGENT_PORT), path.lstrip('/'))
|
python
|
#a POC of queue manager with two button to increment and decrement the current value and broadcast it by wifi
import machine
from machine import I2C, Pin
import time
import network
#set to True to enable a display, False to disable it
use_display=True
if use_display:
#display setup, i have a 128x32 oled on this board, tune the values to your display
import ssd1306
rst = Pin(16, Pin.OUT)
rst.value(1)
scl = Pin(5, Pin.OUT, Pin.PULL_UP)
sda = Pin(4, Pin.OUT, Pin.PULL_UP)
i2c = I2C(scl=scl, sda=sda, freq=450000)
oled = ssd1306.SSD1306_I2C(128, 32, i2c, addr=0x3c)
#service function to redraw a display with a string
def draw_display(text):
if use_display:
oled.fill(0)
oled.text(str(text),5,15)
oled.show()
else:
print('You are at:', counter)
#service function to generate the network name
def essid_rename(actual_counter):
essid=essid_base+str(actual_counter)
ap_if.config(essid=essid, authmode=network.AUTH_WPA_WPA2_PSK, password='some random char 12345678900000**')
ap_if.active(True)
#setup the button up to pin 12
button_up = machine.Pin(12, machine.Pin.IN, machine.Pin.PULL_UP)
#setup the button down to pin 0
button_down = machine.Pin(0, machine.Pin.IN, machine.Pin.PULL_UP)
#seconds of sleep between consecutive button press, to avoid multiple readings
button_pause=0.1
#counter value
counter=0
#combo button status, to avoid increment and decrement counter after a combo pressure
combo=False
#setup wireles interface
ap_if = network.WLAN(network.AP_IF)
#configure a string for the essid base name
essid_base="It's the turn of:"
#just clean the oled
draw_display("Press a button")
print("Press a button...")
#let's start an infinite loop to keep checking the status of the buttons
while True:
#reset function, pressing both buttons will reset the counter to 0
if not button_up.value() and not button_down.value():
print('Combo Button pressed!', counter)
counter=0
combo=True
draw_display('Reset complete')
time.sleep(2)
draw_display('We serve:'+str(counter))
essid_rename(counter)
if not button_up.value() and not combo:#up button counter
counter+=1
print('Button up pressed!', counter)
draw_display('We serve:'+str(counter))
essid_rename(counter)
time.sleep(button_pause)
if not button_down.value() and not combo:#down button counter plus negative number check
if counter>0:
counter-=1
else:
counter=0
print('Button down pressed!', counter)
draw_display('We serve:'+str(counter))
essid_rename(counter)
time.sleep(button_pause)
#reset combo button status
combo=False
|
python
|
import sys
import PyNexusZipCrawler
# GET NEXUS MODS MOD ID FROM USER INPUT
f_id = raw_input("Enter the Nexus Mods File ID you want to crawl: ")
# CRAWL IT
PyNexusZipCrawler.crawl_zip_content(f_id, "110")
|
python
|
import logging
import os
import pyaudio, wave, pylab
import numpy as np
import librosa, librosa.display
import matplotlib.pyplot as plt
from scipy.io.wavfile import write
from setup_logging import setup_logging
INPUT_DEVICE = 0
MAX_INPUT_CHANNELS = 1 # Max input channels
DEFAULT_SAMPLE_RATE = 44100 # Default sample rate of microphone or recording device
DURATION = 5 # 3 seconds
CHUNK_SIZE = 1024
logger = logging.getLogger('sound')
class Sound(object):
def __init__(self):
# Set default configurations for recording device
# sd.default.samplerate = DEFAULT_SAMPLE_RATE
# sd.default.channels = DEFAULT_CHANNELS
self.format = pyaudio.paInt16
self.channels = MAX_INPUT_CHANNELS
self.sample_rate = DEFAULT_SAMPLE_RATE
self.chunk = CHUNK_SIZE
self.duration = DURATION
self.path = os.path.join(os.getcwd(), "recorded0.wav")
self.device = INPUT_DEVICE
self.frames = []
self.audio = pyaudio.PyAudio()
self.device_info()
print()
logger.info("Audio device configurations currently used")
logger.info(f"Default input device index = {self.device}")
logger.info(f"Max input channels = {self.channels}")
logger.info(f"Default samplerate = {self.sample_rate}")
def device_info(self):
num_devices = self.audio.get_device_count()
keys = ['name', 'index', 'maxInputChannels', 'defaultSampleRate']
logger.info(f"List of System's Audio Devices configurations:")
logger.info(f"Number of audio devices: {num_devices}")
for i in range(num_devices):
info_dict = self.audio.get_device_info_by_index(i)
logger.info([(key, value) for key, value in info_dict.items() if key in keys])
def record(self):
# start Recording
self.audio = pyaudio.PyAudio()
stream = self.audio.open(
format=self.format,
channels=self.channels,
rate=self.sample_rate,
input=True,
frames_per_buffer=self.chunk,
input_device_index=self.device)
logger.info(f"Recording started for {self.duration} seconds")
self.frames = []
for i in range(0, int(self.sample_rate / self.chunk * self.duration)):
data = stream.read(self.chunk)
self.frames.append(data)
logger.info ("Recording Completed")
# stop Recording
stream.stop_stream()
stream.close()
self.audio.terminate()
self.save()
def save(self):
waveFile = wave.open(self.path, 'wb')
waveFile.setnchannels(self.channels)
waveFile.setsampwidth(self.audio.get_sample_size(self.format))
waveFile.setframerate(self.sample_rate)
waveFile.writeframes(b''.join(self.frames))
waveFile.close()
logger.info(f"Recording saved to {self.path}")
sound = Sound()
|
python
|
# -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
# Autogenerated By : src/main/python/generator/generator.py
# Autogenerated From : scripts/builtin/cox.dml
from typing import Dict, Iterable
from systemds.operator import OperationNode, Matrix, Frame, List, MultiReturn, Scalar
from systemds.script_building.dag import OutputType
from systemds.utils.consts import VALID_INPUT_TYPES
def cox(X: Matrix,
TE: Matrix,
F: Matrix,
R: Matrix,
**kwargs: Dict[str, VALID_INPUT_TYPES]):
"""
This script fits a cox Proportional hazard regression model.
The Breslow method is used for handling ties and the regression parameters
are computed using trust region newton method with conjugate gradient
:param X: Location to read the input matrix X containing the survival data
containing the following information
1: timestamps
2: whether an event occurred (1) or data is censored (0)
3: feature vectors
:param TE: Column indices of X as a column vector which contain timestamp
(first row) and event information (second row)
:param F: Column indices of X as a column vector which are to be used for
fitting the Cox model
:param R: If factors (categorical variables) are available in the input matrix
X, location to read matrix R containing the start and end indices of
the factors in X
R[,1]: start indices
R[,2]: end indices
Alternatively, user can specify the indices of the baseline level of
each factor which needs to be removed from X; in this case the start
and end indices corresponding to the baseline level need to be the same;
if R is not provided by default all variables are considered to be continuous
:param alpha: Parameter to compute a 100*(1-alpha)% confidence interval for the betas
:param tol: Tolerance ("epsilon")
:param moi: Max. number of outer (Newton) iterations
:param mii: Max. number of inner (conjugate gradient) iterations, 0 = no max
:return: A D x 7 matrix M, where D denotes the number of covariates, with the following schema:
M[,1]: betas
M[,2]: exp(betas)
M[,3]: standard error of betas
M[,4]: Z
M[,5]: P-value
M[,6]: lower 100*(1-alpha)% confidence interval of betas
M[,7]: upper 100*(1-alpha)% confidence interval of betas
:return: Two matrices containing a summary of some statistics of the fitted model:
1 - File S with the following format
- row 1: no. of observations
- row 2: no. of events
- row 3: log-likelihood
- row 4: AIC
- row 5: Rsquare (Cox & Snell)
- row 6: max possible Rsquare
2 - File T with the following format
- row 1: Likelihood ratio test statistic, degree of freedom, P-value
- row 2: Wald test statistic, degree of freedom, P-value
- row 3: Score (log-rank) test statistic, degree of freedom, P-value
:return: Additionally, the following matrices are stored (needed for prediction)
1- A column matrix RT that contains the order-preserving recoded timestamps from X
2- Matrix XO which is matrix X with sorted timestamps
3- Variance-covariance matrix of the betas COV
4- A column matrix MF that contains the column indices of X with the baseline factors removed (if available)
"""
params_dict = {'X': X, 'TE': TE, 'F': F, 'R': R}
params_dict.update(kwargs)
vX_0 = Matrix(X.sds_context, '')
vX_1 = Matrix(X.sds_context, '')
vX_2 = Matrix(X.sds_context, '')
vX_3 = Matrix(X.sds_context, '')
vX_4 = Matrix(X.sds_context, '')
vX_5 = Matrix(X.sds_context, '')
output_nodes = [vX_0, vX_1, vX_2, vX_3, vX_4, vX_5, ]
op = MultiReturn(X.sds_context, 'cox', output_nodes, named_input_nodes=params_dict)
vX_0._unnamed_input_nodes = [op]
vX_1._unnamed_input_nodes = [op]
vX_2._unnamed_input_nodes = [op]
vX_3._unnamed_input_nodes = [op]
vX_4._unnamed_input_nodes = [op]
vX_5._unnamed_input_nodes = [op]
return op
|
python
|
from flask import Flask, jsonify
import json
from flask import Flask, render_template
import numpy as np
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save references to each table
Measurement = Base.classes.measurement
Station = Base.classes.station
app = Flask(__name__)
@app.route("/")
def index():
return render_template("index.html", title="SQLAlchemy API Homework with Navigation")
@app.route("/api/v1.0/precipitation")
def precipitation():
session = Session(engine)
results = session.query(Measurement.date,Measurement.prcp).\
order_by(Measurement.date).all()
session.close()
precipitation = list(np.ravel(results))
precipitation = {precipitation[i]: precipitation[i + 1] for i in range(0, len(precipitation), 2)}
return render_template('index2.html', jsonfile=json.dumps(precipitation))
@app.route("/api/v1.0/precipitation2")
def precipitation2():
session = Session(engine)
results = session.query(Measurement.date,Measurement.prcp).\
order_by(Measurement.date).all()
session.close()
precipitation = list(np.ravel(results))
precipitation = {precipitation[i]: precipitation[i + 1] for i in range(0, len(precipitation), 2)}
return jsonify(precipitation)
@app.route("/api/v1.0/stations")
def stations():
session = Session(engine)
results = session.query(Station.station).\
order_by(Station.station).all()
session.close()
stations = list(np.ravel(results))
return render_template('index2.html', jsonfile=json.dumps(stations))
@app.route("/api/v1.0/stations2")
def stations2():
session = Session(engine)
results = session.query(Station.station).\
order_by(Station.station).all()
session.close()
stations = list(np.ravel(results))
return jsonify(stations)
@app.route("/api/v1.0/tobs")
def tobs():
session = Session(engine)
results = session.query(Measurement.date, Measurement.tobs).\
filter(Measurement.date >= '2016-08-23').\
order_by(Measurement.date).all()
session.close()
tobs = list(np.ravel(results))
tobs = {tobs[i]: tobs[i + 1] for i in range(0, len(tobs), 2)}
return render_template('index2.html', jsonfile=json.dumps(tobs))
@app.route("/api/v1.0/tobs2")
def tobs2():
session = Session(engine)
results = session.query(Measurement.date, Measurement.tobs).\
filter(Measurement.date >= '2016-08-23').\
order_by(Measurement.date).all()
session.close()
tobs = list(np.ravel(results))
tobs = {tobs[i]: tobs[i + 1] for i in range(0, len(tobs), 2)}
return jsonify(tobs)
@app.route("/api/v1.0/<start_date>")
def data_start_date(start_date):
session = Session(engine)
results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start_date).all()
session.close()
start_date = []
for min, avg, max in results:
start_date2 = {}
start_date2["Minimum_Temp"] = min
start_date2["AVG_Temp"] = avg
start_date2["Max_Temp"] = max
start_date.append(start_date2)
return jsonify(start_date)
@app.route("/api/v1.0/<start_date>/<end_date>")
def data_start_end_date(start_date, end_date):
session = Session(engine)
results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()
session.close()
end_date = []
for min, avg, max in results:
end_date2 = {}
end_date2["Minimum_Temp"] = min
end_date2["AVG_Temp"] = avg
end_date2["Max_Temp"] = max
end_date.append(end_date2)
return jsonify(end_date)
if __name__ == "__main__":
app.run(debug=True)
|
python
|
class User():
def __init__(self, first_name, last_name, gender, email):
self.first_name = first_name
self.last_name = last_name
self.gender = gender
self.email = email
self.login_attempts = 0
def describe_user(self):
print(self.first_name)
print(self.last_name)
print(self.gender)
print(self.email)
def greet_user(self):
print("Hello "+ self.first_name.title())
def increment_login_attempts(self):
self.login_attempts += 1
def reset_login_attempts(self):
self.login_attempts = 0
user1 = User('Karandeep', 'Bhardwaj', 'male', '[email protected]')
user1 = User('Jaya', 'Sachdeva', 'female','[email protected]')
user1 = User('Megha', 'Bhardwaj', 'female', '[email protected]')
def print_for_user(o):
o.describe_user()
class Privileges():
def __init__(self, list):
self.privileges = list
def show_privileges(self):
for privilege in self.privileges:
print(privilege)
class Admin(User):
def __init__(self, first_name, last_name, gender, email):
super().__init__(first_name, last_name, gender, email)
self.privileges = ['can add post', 'can delete post', 'can ban user', 'can reset the password for user']
self.privilege = Privileges(self.privileges)
|
python
|
#!/usr/bin/env python
import pod
import sys, binascii
from StringIO import StringIO
def ConvertMac(dotted):
if dotted.find(":") == -1:
str = binascii.unhexlify(dotted)
else:
str = "".join([chr(eval("0x"+i)) for i in dotted.split(":")])
if len(str) != 6:
raise ValueError("Not a MAC address")
return str
def Help():
print "Usage: getmac.py"
print "Copies mac to mac.bin"
sys.exit(-1)
if __name__ == "__main__":
p = pod.Pod("turbo")
p.GetMac()
p.Close()
|
python
|
from typing import List
from ..codes import *
from dataclasses import dataclass
@dataclass(repr=True, eq=True)
class OppoCommand:
"""Represents a command to an OppoDevice"""
code: OppoCodeType
_parameters: List[str]
_response_codes: List[str]
def __init__(self, code: OppoCodeType, parameters: List[str] = None, response_codes: List[str] = None):
if parameters is None:
parameters = []
if response_codes is None:
response_codes = []
self.code = self._translate(code)
self._parameters = parameters
self._response_codes = response_codes + [self.code.value]
def encode(self):
params = ""
if len(self._parameters) > 0:
params = " " + " ".join(list(map(str, self._parameters)))
return f"#{self.code.value}{params}\r".encode()
@property
def expected_response_codes(self):
return self._response_codes
def _translate(self, code: OppoCodeType):
if isinstance(code, str):
return OppoCode(code)
return code
|
python
|
from django.urls import path
from rest_framework import routers
from .views import *
router = routers.DefaultRouter()
router.register('notes', NoteViewSet, basename='notes')
router.register('projects', ProjectViewSet, basename='projects')
router.register('habits', HabitViewSet, basename='habits')
urlpatterns = router.urls + [
path('subtasks/', SubtaskViewSet.as_view({'post': 'create'})),
path('subtasks/<int:pk>/', SubtaskViewSet.as_view({'put': 'update',
'patch': 'partial_update',
'delete': 'destroy'}))
]
|
python
|
import torch.nn as nn
import torch
from Postional import PositionalEncoding
class TransAm(nn.Module):
def __init__(self, feature_size=200, num_layers=1, dropout=0.1):
super(TransAm, self).__init__()
self.model_type = 'Transformer'
self.src_mask = None
self.pos_encoder = PositionalEncoding(feature_size)
self.encoder_layer = nn.TransformerEncoderLayer(d_model=feature_size, nhead=10, dropout=dropout)
self.transformer_encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=num_layers)
self.decoder = nn.Linear(feature_size, 1)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.decoder.bias.data.zero_()
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, src):
if self.src_mask is None or self.src_mask.size(0) != len(src):
device = src.device
mask = self._generate_square_subsequent_mask(len(src)).to(device)
self.src_mask = mask
src = self.pos_encoder(src)
output = self.transformer_encoder(src, self.src_mask) # , self.src_mask)
output = self.decoder(output)
return output
def _generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
|
python
|
# just a package
|
python
|
class Solution:
def isPalindrome(self, x: int) -> bool:
temp = str(x)
length = len(temp)
flag = 0
for i in range(0,length):
if temp[i:i+1] != temp[length-1-i:length-i]:
flag = 1
if flag == 1:
return False
else:
return True
|
python
|
from django.contrib import admin
from .models import Pokemon
admin.site.register(Pokemon)
|
python
|
#!/usr/local/bin/python3
#-*- encoding: utf-8 -*-
from flask import Flask
from flask_restx import Api
from setting import config
from app.api.client import worker_client
def run():
app = Flask(__name__)
api = Api(
app,
version='dev_0.1',
title='Integrated Worker Server API',
description='작업 명령 서버',
terms_url="/",
contact="[email protected]",
license="MIT",
url_scheme='http'
)
api.add_namespace(worker_client, '/worker/client')
app.run(host=config.HOST, port=config.PORT, debug=config.DEBUG, use_reloader=False)
# api.add_namespace(command_check, '/command/check')
# api.add_namespace(command_check, '/client/check')
# api.add_namespace(order_worker_client_clientOrder_check, '/order/client')
# api.add_namespace(ra_command_sync_client_device, '/save/client/device')
|
python
|
# ==============================CS-199==================================
# FILE: MyAI.py
#
# AUTHOR: Vaibhav Yengul
#
# DESCRIPTION: This file contains the MyAI class. You will implement your
# agent in this file. You will write the 'getAction' function,
# the constructor, and any additional helper functions.
#
# NOTES: - MyAI inherits from the abstract AI class in AI.py.
#
# - DO NOT MAKE CHANGES TO THIS FILE.
# ==============================CS-199==================================
from AI import AI
from Action import Action
import random
class TileInfo:
def __init__(self, numbr, _uncover):
self.number = numbr
self.uncover = _uncover
self.voteNumber = 0
class MyAI(AI):
def __init__(self, rowDimension, colDimension, totalMines, startX, startY):
self.rows = colDimension
self.cols = rowDimension
self.totalMines = totalMines
self.minesLeft = totalMines
self.prev_x = startX
self.prev_y = startY
self.Tiles = [[TileInfo(-10, False) for j in range(self.cols)] for i in range(self.rows)]
self.queue = []
self.voteq = []
self.debug = False
self.uncoverCount = 0
def getAction(self, number: int) -> "Action Object":
newx, newy = self.prev_x, self.prev_y
(self.Tiles[newx][newy]).number = number
(self.Tiles[newx][newy]).uncover = True
self.uncoverCount += 1
top_left = (newx - 1, newy + 1)
top_right = (newx + 1, newy + 1)
top = (newx, newy + 1)
left = (newx - 1, newy)
right = (newx + 1, newy)
bt_left = (newx - 1, newy - 1)
bt = (newx, newy - 1)
bt_right = (newx + 1, newy - 1)
listof = [top, top_left, top_right, left, right, bt, bt_left, bt_right];
if number == 0:
for move in listof:
if 0 <= move[0] < self.rows and 0 <= move[1] < self.cols:
self.Tiles[move[0]][move[1]].voteNumber = -1
elif number > 0:
for move in listof:
if 0 <= move[0] < self.rows and 0 <= move[1] < self.cols and self.Tiles[move[0]][move[1]].voteNumber!=-1:
self.Tiles[move[0]][move[1]].voteNumber += 1
if number == -1:
self.minesLeft -= 1
for move in listof:
if 0 <= move[0] < self.rows and 0 <= move[1] < self.cols:
if self.Tiles[move[0]][move[1]].number > 0:
self.Tiles[move[0]][move[1]].number -= 1
elif number > 0:
for move in listof:
if 0 <= move[0] < self.rows and 0 <= move[1] < self.cols:
if self.Tiles[move[0]][move[1]].number == -1:
self.Tiles[newx][newy].number -= 1
queue2 = []
if number == 0:
for x1 in range(newx - 1, newx + 2):
for y1 in range(newy - 1, newy + 2):
if 0 <= x1 < self.rows and 0 <= y1 < self.cols:
if x1 == newx and y1 == newy:
continue
queue2.append([x1, y1, AI.Action.UNCOVER])
"""
if number == 1:
for x in range(newx-2, newx+3):
queue2.append([x, newy-2])
queue2.append([x, newy + 2])
queue2.extend([[newx-2, newy-1], [newx-2, newy], [newx-2, newy+1], [newx+2, newy-1],[newx+2, newy], [newx+2, newy+1]]);
"""
queue3 = []
for c in queue2:
if self.rows > c[0] >= 0 and self.cols > c[1] >= 0 and not (self.Tiles[c[0]][c[1]]).uncover:
queue3.append(c);
for a in queue3:
found = False
for item in self.queue:
if (a[0] == item[0] and a[1] == item[1]):
found = True
break;
if not found:
self.queue.append(a);
# print(" ; ".join(str(i) for i in self.queue))
if self.debug:
self.printBoard();
action = -10
inval = 0
while action == -10 and inval < 10:
action = self.getNextAct(action)
inval += 1
if (action == -10):
cnt, ctb = 0, 0
nx, ny, nnx, nny = -1, -1, -1, -1
for x in range(self.rows):
for y in range(self.cols):
if self.Tiles[x][y].number == -1:
ctb += 1
nnx, nny = x, y
if self.Tiles[x][y].number == -10:
cnt += 1
nx, ny = x, y
if cnt == 1:
self.prev_x = nx
self.prev_y = ny
action = AI.Action.UNCOVER if ctb == self.totalMines else AI.Action.FLAG
if self.debug:
print(action, self.prev_x,self.prev_y,"\n")
return Action(action, nx, ny);
if cnt == 0:
self.prev_x = nnx
self.prev_y = nny
action = AI.Action.UNCOVER
if self.debug:
print(action, self.prev_x, self.prev_y, "\n")
return Action(AI.Action.UNCOVER, nnx, nny)
portion = 2/3
if self.rows == 30:
portion = 4/5
if(action == -10 and self.uncoverCount > (portion * self.rows*self.cols)):
if not self.Tiles[self.rows-1][self.cols-1].uncover:
self.prev_x = self.rows-1
self.prev_y = self.cols - 1
action = AI.Action.UNCOVER
elif not self.Tiles[self.rows-1][0].uncover:
self.prev_x = self.rows-1
self.prev_y = 0
action = AI.Action.UNCOVER
elif not self.Tiles[0][self.cols-1].uncover:
self.prev_x = 0
self.prev_y = self.cols - 1
action = AI.Action.UNCOVER
elif not self.Tiles[0][0].uncover:
self.prev_x = 0
self.prev_y = 0
action = AI.Action.UNCOVER
if (action == -10):
# add voting mechanism
self.recalculateVotes()
a = random.choice(self.voteq)
self.prev_x = a[0]
self.prev_y = a[1]
action = a[2]
if self.debug:
print(action, self.prev_x,self.prev_y,"\n")
return Action(action, self.prev_x, self.prev_y);
def recalculateVotes(self):
self.voteq.clear()
if self.debug:
self.printVoteBoard()
max = -100
min = 100
xmax, ymax = [], []
xmin, ymin = [], []
for a in range(self.rows):
for b in range(self.cols):
if self.Tiles[a][b].number != -10 or self.Tiles[a][b].uncover: continue
if self.Tiles[a][b].voteNumber > max:
max = self.Tiles[a][b].voteNumber
xmax = [a]
ymax = [b]
elif self.Tiles[a][b].voteNumber == max:
xmax.append(a)
ymax.append(b)
if self.Tiles[a][b].voteNumber ==0:
continue
if self.Tiles[a][b].voteNumber < min :
min = self.Tiles[a][b].voteNumber
xmin = [a]
ymin = [b]
elif self.Tiles[a][b].voteNumber == min:
xmin.append(a)
ymin.append(b)
for i in range(len(xmax)):
self.voteq.append([xmax[i], ymax[i], AI.Action.FLAG])
break;
def printBoard(self):
print("\n")
for i in range(self.rows):
print("\t".join([str(x.number) for x in self.Tiles[i]]))
print("\n")
def printVoteBoard(self):
print("\n")
for i in range(self.rows):
vb = [str(x.voteNumber) for x in self.Tiles[i]]
vb = [str(t) if self.Tiles[i][j].number == -10 else str(-1) for j, t in enumerate(vb)]
print("\t".join(vb))
print("\n")
def getNextAct(self, action):
if (len(self.queue) and action == -10):
a = self.queue.pop(0)
self.prev_x = a[0]
self.prev_y = a[1]
if self.Tiles[a[0]][a[1]].uncover:
action = -10
else:
action = a[2]
if action == -10 and len(self.queue) == 0:
self.fillqueue()
queue3 = []
for c in self.queue:
if self.rows > c[0] >= 0 and self.cols > c[1] >= 0 and not (self.Tiles[c[0]][c[1]]).uncover:
queue3.append(c)
self.queue = queue3
if (len(self.queue)):
a = self.queue.pop(0);
self.prev_x = a[0]
self.prev_y = a[1]
action = a[2]
return action;
def fillqueue(self):
for y in range(1, self.cols - 1):
if self.Tiles[self.rows - 2][y].number == -10 or self.Tiles[self.rows - 2][y].number == -1 or \
self.Tiles[self.rows - 2][y].number == 0: continue
self.identifyPatterns(self.rows - 2, y)
if not self.queue:
for y in range(1, self.cols - 1):
if self.Tiles[1][y].number == -10 or self.Tiles[1][y].number == 0 or self.Tiles[1][
y].number == -1: continue
self.identifyPatterns2(1, y)
if not self.queue:
for x in range(1, self.rows - 1):
if self.Tiles[x][1].number == -10 or self.Tiles[x][1].number == 0 or self.Tiles[x][1].number == -1:
continue
self.identifyPatterns4(x, 1)
if not self.queue:
for x in range(1, self.rows - 1):
if self.Tiles[x][self.cols - 2].number == -10 or self.Tiles[x][self.cols - 2].number == 0 or \
self.Tiles[x][self.cols - 2].number == -1: continue
self.identifyPatterns5(x, self.cols - 2)
if not self.queue:
for x in range(1, self.rows - 1):
for y in range(1, self.cols - 1):
if self.Tiles[x][y].number == -10 or self.Tiles[x][y].number == 0 or self.Tiles[x][
y].number == -1: continue
self.identifyPatterns3(x, y)
if not self.queue:
for y in range(1, self.cols - 1):
if self.Tiles[0][y].number == -10 or self.Tiles[0][y].number == 0 or self.Tiles[0][
y].number == -1: continue
# row 0
if self.Tiles[0][y].number == 1 and [t.uncover for t in self.Tiles[1][y - 1:y + 2]] == [True, True,
True] and \
self.Tiles[0][y - 1].uncover and not self.Tiles[0][y + 1].uncover:
self.queue.append([0, y + 1, AI.Action.FLAG])
elif self.Tiles[0][y].number == 1 and [t.uncover for t in self.Tiles[1][y - 1:y + 2]] == [True, True,
True] and \
self.Tiles[0][y + 1].uncover and not self.Tiles[0][y - 1].uncover:
self.queue.append([0, y - 1, AI.Action.FLAG])
for y in range(1, self.cols - 1):
g = self.rows - 1
if self.Tiles[g][y].number == -10 or self.Tiles[g][y].number == 0 or self.Tiles[g][
y].number == -1: continue
if self.Tiles[g][y].number == 1 and [t.uncover for t in self.Tiles[g - 1][y - 1:y + 2]] == [True, True,
True] and \
self.Tiles[g][y - 1].uncover and not self.Tiles[g][y + 1].uncover:
self.queue.append([g, y + 1, AI.Action.FLAG])
elif self.Tiles[g][y].number == 1 and [t.uncover for t in self.Tiles[g - 1][y - 1:y + 2]] == [True,
True,
True] and \
self.Tiles[g][y + 1].uncover and not self.Tiles[g][y - 1].uncover:
self.queue.append([g, y - 1, AI.Action.FLAG])
for x in range(1, self.rows - 1):
if self.Tiles[x][0].number == -10 or self.Tiles[0][y].number == 0 or self.Tiles[0][
y].number == -1: continue
# print([t[0].uncover for t in self.Tiles[x - 1:x + 2]])
# col-0
if self.Tiles[x][0].number == 1 and [t[0].uncover for t in self.Tiles[x - 1:x + 2]] == [True, True,
True] and \
self.Tiles[x + 1][1].uncover and self.Tiles[x][1].uncover and not self.Tiles[x - 1][1].uncover:
self.queue.append([x - 1, 1, AI.Action.FLAG])
elif self.Tiles[x][0].number == 1 and [t[0].uncover for t in self.Tiles[x - 1:x + 2]] == [True, True,
True] and \
self.Tiles[x - 1][1].uncover and self.Tiles[x][1].uncover and not self.Tiles[x + 1][1].uncover:
self.queue.append([x + 1, 1, AI.Action.FLAG])
for x in range(1, self.rows - 1):
g = self.cols - 1
# col-last
# print([t[g].uncover for t in self.Tiles[x - 1:x + 2]])
if self.Tiles[x][g].number == 1 and [t[g].uncover for t in self.Tiles[x - 1:x + 2]] == [True, True,
True] and \
self.Tiles[x + 1][g - 1].uncover and self.Tiles[x][g - 1].uncover and not self.Tiles[x - 1][
g - 1].uncover:
self.queue.append([x - 1, g - 1, AI.Action.FLAG])
elif self.Tiles[x][g].number == 1 and [t[g].uncover for t in self.Tiles[x - 1:x + 2]] == [True, True,
True] and \
self.Tiles[x - 1][g - 1].uncover and self.Tiles[x][g - 1].uncover and not self.Tiles[x + 1][
g - 1].uncover:
self.queue.append([x + 1, g - 1, AI.Action.FLAG])
if not self.queue:
self.fillqueue2()
if not self.queue:
corners = {"tl":[1,1], "tr":[1, self.cols-2], "bl":[self.rows-2, 1], "br":[self.rows-2, self.cols-2]}
for c in corners.keys():
self.identifyCornerPatters(c, corners[c][0], corners[c][1]);
def fillqueue2(self):
for x1 in range(self.rows):
for y1 in range(self.cols):
if self.Tiles[x1][y1].uncover and self.Tiles[x1][y1].number == 0:
top_left = (x1 - 1, y1 + 1)
top_right = (x1 + 1, y1 + 1)
top = (x1, y1 + 1)
left = (x1 - 1, y1)
right = (x1 + 1, y1)
bt_left = (x1 - 1, y1 - 1)
bt = (x1, y1 - 1)
bt_right = (x1 + 1, y1 - 1)
listof = [top, top_left, top_right, left, right, bt, bt_left, bt_right];
for move in listof:
if 0 <= move[0] < self.rows and 0 <= move[1] < self.cols and self.Tiles[move[0]][
move[1]].number == -10 and not self.Tiles[move[0]][move[1]].uncover \
and self.Tiles[move[0]][move[1]].number != -1:
self.queue.append([move[0], move[1], AI.Action.UNCOVER])
def identifyCornerPatters(self, corner, x, y):
if self.minesLeft> 2:
return
pat = [[0 for _ in range(3)] for _ in range(3)]
for i in [-1, 0, 1]:
for j in [-1, 0, 1]:
pat[i + 1][j + 1] = self.Tiles[x + i][y + j].number
notuncvr = []
pat2 = [[False for _ in range(3)] for _ in range(3)]
for i in [-1, 0, 1]:
for j in [-1, 0, 1]:
pat2[i + 1][j + 1] = self.Tiles[x + i][y + j].uncover
if not self.Tiles[x + i][y + j].uncover:
notuncvr.append([x + i, y + j])
if corner=="bl" and pat2==[[True,True,True], [False,False,True], [False,False,True]]:
if (pat[1][2]==1 or pat[2][2]==1) and (pat[0][0]==1 or pat[0][1]==1) and self.minesLeft==2:
self.queue.append([x, y-1, AI.Action.FLAG])
self.queue.append([x+1, y, AI.Action.FLAG])
else:
self.queue.append([x, y, AI.Action.FLAG])
elif corner=="tr" and pat2==[[True,False,False], [True,False,False], [True,True,True]]:
if (pat[2][1]==1 or pat[2][2]==1) and (pat[0][0]==1 or pat[1][0]==1) and self.minesLeft==2:
self.queue.append([x-1, y, AI.Action.FLAG])
self.queue.append([x, y+1, AI.Action.FLAG])
else:
self.queue.append([x, y, AI.Action.FLAG])
elif corner=="br" and pat2==[[True,True,True], [True,False,False], [True,False,False]]:
if (pat[1][0]==1 or pat[2][0]==1) and (pat[0][1]==1 or pat[0][2]==1) and self.minesLeft==2:
self.queue.append([x+1, y, AI.Action.FLAG])
self.queue.append([x, y+1, AI.Action.FLAG])
else:
self.queue.append([x, y, AI.Action.FLAG])
elif corner=="tl" and pat2==[[False,False,True], [False,False,True], [True,True,True]]:
if (pat[1][2]==1 or pat[0][2]==1) and (pat[2][0]==1 or pat[2][1]==1) and self.minesLeft==2:
self.queue.append([x+1, y, AI.Action.FLAG])
self.queue.append([x, y-1, AI.Action.FLAG])
else:
self.queue.append([x, y, AI.Action.FLAG])
def isValidTile(self, a, b):
return 0<=a<self.rows and 0<=b<self.cols
def identifyPatterns3(self, x, y):
pat = [[0 for _ in range(3)] for _ in range(3)]
for i in [-1, 0, 1]:
for j in [-1, 0, 1]:
pat[i + 1][j + 1] = self.Tiles[x + i][y + j].number
notuncvr = []
pat2 = [[False for _ in range(3)] for _ in range(3)]
for i in [-1, 0, 1]:
for j in [-1, 0, 1]:
pat2[i + 1][j + 1] = self.Tiles[x + i][y + j].uncover
if not self.Tiles[x + i][y + j].uncover:
notuncvr.append([x + i, y + j])
# 1-2-1 pattern
if pat[1] == [1, 2, 1] and pat2[2] == [True, True, True] and not pat2[0][0] and not pat2[0][2]:
self.queue.append([x - 1, y - 1, AI.Action.FLAG])
self.queue.append([x - 1, y + 1, AI.Action.FLAG])
elif pat[1] == [1, 2, 1] and pat2[0] == [True, True, True] and not pat2[2][0] and not pat2[2][2]:
self.queue.append([x + 1, y - 1, AI.Action.FLAG])
self.queue.append([x + 1, y + 1, AI.Action.FLAG])
elif [t[1] for t in pat] == [1, 2, 1] and [t[0] for t in pat2] == [True, True, True] and not pat2[0][
2] and not pat2[2][2]:
self.queue.append([x + 1, y + 1, AI.Action.FLAG])
self.queue.append([x - 1, y + 1, AI.Action.FLAG])
elif [t[1] for t in pat] == [1, 2, 1] and [t[2] for t in pat2] == [True, True, True] and not pat2[0][
0] and not pat2[2][0]:
self.queue.append([x + 1, y - 1, AI.Action.FLAG])
self.queue.append([x - 1, y - 1, AI.Action.FLAG])
#mirror done
elif pat[1][1]==2 and pat[1][2]==1 and pat2[2] == [True, True, True] and pat2[1][0] and \
pat2[0]==[False,False,False]:
self.queue.append([x-1, y-1, AI.Action.FLAG])
elif pat[1][1]==2 and pat[1][0]==1 and pat2[2] == [True, True, True] and pat2[1][2] and \
pat2[0]==[False,False,False]:
self.queue.append([x-1, y+1, AI.Action.FLAG])
#mirror done
elif pat[1][1]==2 and pat[2][1]==1 and [t[0] for t in pat2] == [True, True, True] and pat2[0][1] and \
[t[2] for t in pat2]==[False,False,False]:
self.queue.append([x-1, y+1, AI.Action.FLAG])
elif pat[1][1]==2 and pat[0][1]==1 and [t[0] for t in pat2] == [True, True, True] and pat2[2][1] and \
[t[2] for t in pat2]==[False,False,False]:
self.queue.append([x+1, y+1, AI.Action.FLAG])
#mirror done
elif pat[1][1]==2 and pat[0][1]==1 and [t[2] for t in pat2] == [True, True, True] and pat2[2][1] and \
[t[0] for t in pat2]==[False,False,False]:
self.queue.append([x+1, y-1, AI.Action.FLAG])
elif pat[1][1]==2 and pat[2][1]==1 and [t[2] for t in pat2] == [True, True, True] and pat2[0][1] and \
[t[0] for t in pat2]==[False,False,False]:
self.queue.append([x-1, y-1, AI.Action.FLAG])
elif pat[1][1] == 2 and pat[1][2] == 1 and pat2[0] == [True, True, True] and pat2[1][0] and \
pat2[2] == [False, False, False]:
self.queue.append([x + 1, y - 1, AI.Action.FLAG])
elif pat[1][1] == 2 and pat[1][0] == 1 and pat2[0] == [True, True, True] and pat2[1][2] and \
pat2[2] == [False, False, False]:
self.queue.append([x + 1, y + 1, AI.Action.FLAG])
elif pat[1][1] == 1 and pat[1][2] == 1 and [t[0] for t in pat2]==[True, True, True] and \
pat2[2] == [True, True, True] and not pat2[0][1] and not pat2[0][2] and \
self.isValidTile(x-1, y+2) and not self.Tiles[x-1][y+2].uncover:
self.queue.append([x - 1, y + 2, AI.Action.UNCOVER])
elif pat[1][1] == 1 and pat[1][2] == 1 and [t[0] for t in pat2]==[True, True, True] and \
pat2[0] == [True, True, True] and not pat2[2][1] and not pat2[2][2] and \
self.isValidTile(x+1, y+2) and not self.Tiles[x+1][y+2].uncover:
self.queue.append([x + 1, y + 2, AI.Action.UNCOVER])
elif pat[1][1] == 1 and pat[2][1] == 1 and [t[0] for t in pat2]==[True, True, True] and \
pat2[0] == [True, True, True] and not pat2[1][2] and not pat2[2][2] and \
self.isValidTile(x+2, y+1) and not self.Tiles[x+2][y+1].uncover:
self.queue.append([x + 2, y + 1, AI.Action.UNCOVER])
elif pat[1][1] == 1 and pat[2][1] == 1 and [t[2] for t in pat2]==[True, True, True] and \
pat2[0] == [True, True, True] and not pat2[1][0] and not pat2[2][0] and \
self.isValidTile(x+2, y-1) and not self.Tiles[x+2][y-1].uncover:
self.queue.append([x + 2, y - 1, AI.Action.UNCOVER])
##
elif pat[1][1] == 1 and pat[1][0] == 1 and [t[2] for t in pat2]==[True, True, True] and \
pat2[0] == [True, True, True] and not pat2[2][0] and not pat2[2][1] and \
self.isValidTile(x+1, y-2) and not self.Tiles[x+1][y-2].uncover:
self.queue.append([x + 1, y - 2, AI.Action.UNCOVER])
elif pat[1][1] == 1 and pat[1][0] == 1 and [t[2] for t in pat2] == [True, True, True] and \
pat2[2] == [True, True, True] and not pat2[0][0] and not pat2[0][1] and \
self.isValidTile(x - 1, y - 2) and not self.Tiles[x - 1][y - 2].uncover:
self.queue.append([x - 1, y - 2, AI.Action.UNCOVER])
elif pat[1][1] == 1 and pat[0][1] == 1 and [t[2] for t in pat2]==[True, True, True] and \
pat2[2] == [True, True, True] and not pat2[0][0] and not pat2[1][0] and \
self.isValidTile(x-2, y-1) and not self.Tiles[x-2][y-1].uncover:
self.queue.append([x - 2, y - 1, AI.Action.UNCOVER])
elif pat[1][1] == 1 and pat[0][1] == 1 and [t[0] for t in pat2]==[True, True, True] and \
pat2[2] == [True, True, True] and not pat2[0][2] and not pat2[1][2] and \
self.isValidTile(x-2, y+1) and not self.Tiles[x-2][y+1].uncover:
self.queue.append([x - 2, y + 1, AI.Action.UNCOVER])
elif (pat[1][1] == 2 and len(notuncvr) == 2):
for nuc in notuncvr:
self.queue.append([nuc[0], nuc[1], AI.Action.FLAG])
elif (pat[1][1] == 1 and len(notuncvr) == 1):
for nuc in notuncvr:
self.queue.append([nuc[0], nuc[1], AI.Action.FLAG])
elif (pat[1][1] == 3 and len(notuncvr) == 3):
for nuc in notuncvr:
self.queue.append([nuc[0], nuc[1], AI.Action.FLAG])
def identifyPatterns(self, x, y):
pat = [[0 for _ in range(3)] for _ in range(3)]
# print("\nPattern printing:\n");
for i in [-1, 0, 1]:
for j in [-1, 0, 1]:
pat[i + 1][j + 1] = self.Tiles[x + i][y + j].number
notuncvr = []
pat2 = [[False for _ in range(3)] for _ in range(3)]
for i in [-1, 0, 1]:
for j in [-1, 0, 1]:
pat2[i + 1][j + 1] = self.Tiles[x + i][y + j].uncover
if not self.Tiles[x + i][y + j].uncover:
notuncvr.append([x + i, y + j])
if (pat[1][1] == 1 and len(notuncvr) == 1):
for nuc in notuncvr:
self.queue.append([nuc[0], nuc[1], AI.Action.FLAG])
elif (pat[1][1] == 2 and len(notuncvr) == 2):
for nuc in notuncvr:
self.queue.append([nuc[0], nuc[1], AI.Action.FLAG])
elif (pat[1][1] == 3 and len(notuncvr) == 3):
for nuc in notuncvr:
self.queue.append([nuc[0], nuc[1], AI.Action.FLAG])
elif (pat[1][1] == 1 and pat[1][0] == -10 and pat[2][1] == 1 and pat[2][0] == -10):
if not self.Tiles[x - 1][y - 1].uncover:
self.queue.append([x - 1, y - 1, AI.Action.UNCOVER])
elif (pat[1][1] == 1 and pat[1][2] == -10 and pat[2][1] == 1 and pat[2][2] == -10):
if not self.Tiles[x - 1][y + 1].uncover:
self.queue.append([x - 1, y + 1, AI.Action.UNCOVER])
elif (pat[1] == [1, 2, 1] and pat2[2] == [False, False, False]):
self.queue.append([x + 1, y + 1, AI.Action.FLAG])
self.queue.append([x + 1, y - 1, AI.Action.FLAG])
elif (pat[1][1] == 2 and pat[1][2] == -10 and pat[2][1] == 2 and pat[2][2] == -10):
if not self.Tiles[x + 1][y + 1].uncover:
self.queue.append([x + 1, y + 1, AI.Action.FLAG])
if not self.Tiles[x - 1][y - 1].uncover:
self.queue.append([x - 1, y - 1, AI.Action.FLAG])
elif (pat[1][1] == 2 and pat[1][0] == -10 and pat[2][1] == 2 and pat[2][0] == -10):
if not self.Tiles[x + 1][y - 1].uncover:
self.queue.append([x + 1, y - 1, AI.Action.FLAG])
if not self.Tiles[x][y - 1].uncover:
self.queue.append([x, y - 1, AI.Action.FLAG])
elif pat[1] == [1,2,1] and pat2[0] == [False, False, False] and pat2[2] == [True, True, True]:
self.queue.append([x-1, y-1, AI.Action.FLAG])
self.queue.append([x - 1, y + 1, AI.Action.FLAG])
elif (pat[0][1] == 2 and pat[1][1] == 2 and pat[1][2] == -10 and pat[2][1] == 1 and pat[2][2] == -10 and pat[0][
2] == -10):
if not self.Tiles[x - 1][y + 1].uncover:
self.queue.append([x - 1, y + 1, AI.Action.FLAG])
elif (pat[1][0] == 2 and pat[1][1] == 2 and pat[1][2] == 1 and pat[2][0] == -10 and pat[2][1] == -10 and pat[2][
2] == 1):
if not self.Tiles[x + 1][y - 1].uncover:
self.queue.append([x + 1, y - 1, AI.Action.FLAG])
if not self.Tiles[x + 1][y].uncover:
self.queue.append([x + 1, y, AI.Action.FLAG])
elif (pat[1][1] == 2 and len(notuncvr) == 2):
for nuc in notuncvr:
self.queue.append([nuc[0], nuc[1], AI.Action.FLAG])
elif (pat[1][1] == 1 and len(notuncvr) == 1):
for nuc in notuncvr:
self.queue.append([nuc[0], nuc[1], AI.Action.FLAG])
elif (pat[1][1] == 3 and len(notuncvr) == 3):
for nuc in notuncvr:
self.queue.append([nuc[0], nuc[1], AI.Action.FLAG])
elif (pat[1][0] == 1 and pat[1][1] == 2 and pat[1][2] == 2 and pat[2][0] == 1 and pat[2][1] == -10 and pat[2][
2] == -10):
if not self.Tiles[x + 1][y + 1].uncover:
self.queue.append([x + 1, y + 1, AI.Action.FLAG])
if not self.Tiles[x + 1][y].uncover:
self.queue.append([x + 1, y, AI.Action.FLAG])
elif (pat[1][1] == 2 and pat[1][0] == 1 and pat[2][1] == 1 and pat2[0] == [False, False, False] and not pat2[1][
2] and not pat2[2][2]):
self.queue.append([x - 1, y + 1, AI.Action.UNCOVER])
def identifyPatterns2(self, x, y):
pat = [[0 for _ in range(3)] for _ in range(3)]
# print("\nPattern printing:\n");
for i in [-1, 0, 1]:
for j in [-1, 0, 1]:
if 0 <= x + i < self.rows and 0 <= y + j < self.cols:
pat[i + 1][j + 1] = self.Tiles[x + i][y + j].number
notuncvr = []
pat2 = [[False for _ in range(3)] for _ in range(3)]
for i in [-1, 0, 1]:
for j in [-1, 0, 1]:
pat2[i + 1][j + 1] = self.Tiles[x + i][y + j].uncover
if not self.Tiles[x + i][y + j].uncover:
notuncvr.append([x + i, y + j])
if [t[0] for t in pat2]==[False,False,False] and [t[2] for t in pat2]==[True, True, True] and [t[1] for t in pat] == [1,2,1]:
self.queue.append([x-1,y-1,AI.Action.FLAG])
self.queue.append([x + 1, y - 1, AI.Action.FLAG])
elif (pat[1][1] == 1 and pat[1][0] == -10 and pat[0][1] == 1 and pat[0][0] == -10):
if self.Tiles[x + i][y + j].number < -99:
self.queue.append([x + 1, y - 1, AI.Action.UNCOVER])
elif (pat[1][1] == 1 and pat[1][2] == -10 and pat[0][1] == 1 and pat[0][2] == -10):
if self.Tiles[x + 1][y + 1].number < -99:
self.queue.append([x + 1, y + 1, AI.Action.UNCOVER])
elif (pat[1][1] == 2 and pat[1][2] == -10 and pat[0][1] == 2 and pat[0][2] == -10):
self.queue.append([x - 1, y + 1, AI.Action.FLAG])
self.queue.append([x, y + 1, AI.Action.FLAG])
elif (pat[1][1] == 2 and pat[1][0] == -10 and pat[0][1] == 2 and pat[0][0] == -10 and pat2[2] == [True, True, True]
and [t[2] for t in pat2]== [True, True, True]):
self.queue.append([x - 1, y - 1, AI.Action.FLAG])
self.queue.append([x, y - 1, AI.Action.FLAG])
elif (pat[0][1] == 2 and pat[1][1] == 2 and pat[2][1] == 1 and pat[0][2] == -10 and pat[1][2] == -10 and pat[2][
2] == -10
and pat[0][2] != -10 and pat[1][2] != -10 and pat[2][2] != -10): # 2 -10
self.queue.append([x - 1, y + 1, AI.Action.FLAG]) # "2" -10
self.queue.append([x, y + 1, AI.Action.FLAG]) # 1 -10
elif pat[1] == [1,2,1] and pat2[0] == [False, False, False] and pat2[2] == [True, True, True]:
self.queue.append([x-1, y-1, AI.Action.FLAG])
self.queue.append([x - 1, y + 1, AI.Action.FLAG])
elif (pat[0][0] == -10 and pat[1][0] == -10 and pat[2][0] == -10 and pat[0][1] == 1 and pat[1][1] == 2 and
pat[2][1] == 2 and pat[0][2] != -10 and pat[1][2] != -10 and pat[2][2] != -10):
self.queue.append([x + 1, y - 1, AI.Action.FLAG]) # -10 2
elif (pat[1][0] == 1 and pat[1][1] == 2 and pat[1][2] == 2 and pat[0][0] == 1 and pat[0][1] == -10 and pat[0][
2] == -10):
self.queue.append([x - 1, y + 1, AI.Action.FLAG])
self.queue.append([x - 1, y, AI.Action.FLAG])
elif (pat[1][1] == 2 and len(notuncvr) == 2):
for nuc in notuncvr:
self.queue.append([nuc[0], nuc[1], AI.Action.FLAG])
elif (pat[1][1] == 1 and len(notuncvr) == 1):
for nuc in notuncvr:
self.queue.append([nuc[0], nuc[1], AI.Action.FLAG])
elif (pat[1][1] == 3 and len(notuncvr) == 3):
for nuc in notuncvr:
self.queue.append([nuc[0], nuc[1], AI.Action.FLAG])
# for i in [-1, 0, 1]:
# print("\t".join([str(pat[i+1][0]), str(pat[1+i][1]), str(pat[i+1][2])]))
def identifyPatterns4(self, x, y):
pat = [[0 for _ in range(3)] for _ in range(3)]
for i in [-1, 0, 1]:
for j in [-1, 0, 1]:
pat[i + 1][j + 1] = self.Tiles[x + i][y + j].number
notuncvr = []
pat2 = [[False for _ in range(3)] for _ in range(3)]
for i in [-1, 0, 1]:
for j in [-1, 0, 1]:
pat2[i + 1][j + 1] = self.Tiles[x + i][y + j].uncover
if not self.Tiles[x + i][y + j].uncover:
notuncvr.append([x + i, y + j])
if [t[0] for t in pat2] == [False, False, False] and [t[1] for t in pat] == [1, 2, 1] and [t[2] for t in
pat2] == [True, True,
True]:
self.queue.append([x - 1, y - 1, AI.Action.FLAG])
self.queue.append([x + 1, y - 1, AI.Action.FLAG])
elif pat2[2] == [False, False, False] and pat2[0] == [True,True,True] and pat[1][0]==1 and pat[1]==[1, 1, 1]:
self.queue.append([x + 1, y + 1, AI.Action.UNCOVER])
def identifyPatterns5(self, x, y):
pat = [[0 for _ in range(3)] for _ in range(3)]
for i in [-1, 0, 1]:
for j in [-1, 0, 1]:
pat[i + 1][j + 1] = self.Tiles[x + i][y + j].number
notuncvr = []
pat2 = [[False for _ in range(3)] for _ in range(3)]
for i in [-1, 0, 1]:
for j in [-1, 0, 1]:
pat2[i + 1][j + 1] = self.Tiles[x + i][y + j].uncover
if not self.Tiles[x + i][y + j].uncover:
notuncvr.append([x + i, y + j])
if (pat[1][1] == 2 and len(notuncvr) == 2):
for nuc in notuncvr:
self.queue.append([nuc[0], nuc[1], AI.Action.FLAG])
elif (pat[1][1] == 1 and len(notuncvr) == 1):
for nuc in notuncvr:
self.queue.append([nuc[0], nuc[1], AI.Action.FLAG])
elif (pat[1][1] == 3 and len(notuncvr) == 3):
for nuc in notuncvr:
self.queue.append([nuc[0], nuc[1], AI.Action.FLAG])
elif [t[2] for t in pat2] == [False, False, False] and [t[1] for t in pat] == [1, 2, 1] and [t[0] for t in
pat2] == [True, True,
True]:
self.queue.append([x - 1, y + 1, AI.Action.FLAG])
self.queue.append([x + 1, y + 1, AI.Action.FLAG])
elif (pat[1] == [2,1,1] or pat[1]==[1,1,1]) and pat2[0] == [False, False, False] and pat2[2]==[True, True, True]:
self.queue.append([x-1, y-1, AI.Action.UNCOVER])
elif (pat[1] == [2,1,1] or pat[1]==[1,1,1]) and pat2[2] == [False, False, False] and pat2[0]==[True, True, True]:
self.queue.append([x+1, y-1, AI.Action.UNCOVER])
elif pat[1] == [1,2,1] and pat2[0] == [False, False, False] and pat2[2]==[True, True, True]:
self.queue.append([x-1, y-1, AI.Action.FLAG])
self.queue.append([x - 1, y + 1, AI.Action.FLAG])
elif pat[1] == [1,2,1] and pat2[2] == [False, False, False] and pat2[0]==[True, True, True]:
self.queue.append([x + 1, y - 1, AI.Action.FLAG])
self.queue.append([x + 1, y + 1, AI.Action.FLAG])
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import getopt
import os.path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
from datetime import datetime
from TwitterEngine import instances, BackendChooser
def parseargs(name, argv):
date = datetime.now()
execute = False
try:
opts, _args = getopt.getopt(argv, 'hed:', ['execute', 'date'])
except getopt.GetoptError:
print('%s [-h]' % name)
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print '%s [-d "YYYY-MM-DD [HH:mm:SS]"]' % name
sys.exit()
elif opt in ('-e', '--execute'):
execute = True
elif opt in ('-d', '--date'):
try:
if len(arg) > 10:
date = datetime.strptime(arg, '%Y-%m-%d %H:%M:%S')
else:
date = datetime.strptime(arg, '%Y-%m-%d')
except ValueError as e:
print "Date format accepted: YYYY-MM-DD [HH:mm:SS]"
raise e
return (date, execute)
if __name__ == '__main__':
try:
engine_config = instances.INSTANCES[0]
(max_date, execute) = parseargs(sys.argv[0], sys.argv[1:])
except ValueError:
sys.exit(1)
backend = BackendChooser.GetBackend(engine_config)
print "Calling delete with parameters max_date = %s, execute = %s." % (max_date, execute)
backend.RemoveOldTweets(max_date, execute)
|
python
|
from django.contrib import admin
from django.contrib.auth.decorators import login_required, permission_required
from django.urls import path
from . import views
from .api import views as api_views
app_name = 'main_app'
urlpatterns = [
path('api/sites/', api_views.SitesListCreateAPIView.as_view(), name='sites_rest_api'),
path('api/sites/<uuid>', view=api_views.SitesRetrieveUpdateDestroyAPIView.as_view(),
name='sites_rest_api'),
path('api/devices/', api_views.DevicesListCreateAPIView.as_view(), name='devices_rest_api'),
path('api/devices/<uuid>', api_views.DevicesRetrieveUpdateDestroyAPIView.as_view(),
name='devices_rest_api'),
path('', views.HomeTemplateView.as_view(), name='home'),
path('sites', views.SitesListView.as_view(), name='viewsites'),
path('sites/<int:pk>', views.SitesDetailView.as_view(), name='sitesdetail'),
path('updatesite/<int:pk>', views.SiteUpdateView.as_view(), name='updatesite'),
path('deletesite/<int:pk>', views.SiteDeleteView.as_view(), name='deletesite'),
path('devices', views.DeviceListView.as_view(), name='viewdevices'),
path('create/', views.SiteCreateView.as_view(), name='createsite'),
path('create_device/', views.DeviceCreateView.as_view(), name='createdevice'),
path('devices/<int:pk>', views.DeviceDetailView.as_view(), name='devicedetail'),
path('devices/config/<int:pk>', views.DeviceConfigDetailView.as_view(), name='deviceconfig'),
path('devices/script/<int:pk>', views.DeviceScriptDetailView.as_view(), name='devicescript'),
path('updatedevice/<int:pk>', views.DeviceUpdateView.as_view(), name='updatedevice'),
path('deletedevice/<int:pk>', views.DeviceDeleteView.as_view(), name='deletedevice'),
path('deleteconfig/config/<int:pk>', views.DeviceConfigDeleteView.as_view(), name='deleteconfig'),
path('devices/syncconfig/<deviceip>&<deviceid>', views.sync_configuration, name='configsync'),
path('devices/platformsync/<deviceip>&<deviceid>', views.get_platform_detail, name='platformsync'),
path('search/', views.device_search_function, name='search'),
path('devices/syncvlans/<deviceip>&<deviceid>', views.sync_device_vlans, name='vlanssync'),
path('devices/tasks/vlanchange/<deviceip>&<deviceid>', views.port_vlan_assignment, name='vlanchange'),
]
|
python
|
import numpy as np
from features.DetectorDescriptorTemplate import DetectorDescriptorBundle
from features.cv_sift import cv_sift
class SiftDetectorDescriptorBundle(DetectorDescriptorBundle):
def __init__(self, descriptor):
sift = cv_sift()
super(SiftDetectorDescriptorBundle, self).__init__(sift, descriptor)
self.is_detector = True
self.is_descriptor = True
self.is_both = True
self.csv_flag = False
self.patch_input = True
def detect_feature(self, image):
return self.detector.detect_feature_cv_kpt(image)
def extract_descriptor(self, image, feature):
return self.descriptor.extract_descriptor(image, feature)
def extract_all(self, image):
feature = self.detector.detect_feature_cv_kpt(image)
descriptor_vector = []
descriptor_vector = self.descriptor.extract_descriptor(
image, feature)
return feature, descriptor_vector
|
python
|
import torch
import torch.nn.functional as F
import torch.nn as nn
from transformers import BertModel
# Convlution,MaxPooling層からの出力次元の算出用関数
def out_size(sequence_length, filter_size, padding = 0, dilation = 1, stride = 1):
length = sequence_length + 2 * padding - dilation * (filter_size - 1) - 1
length = int(length/stride)
return length + 1
class CNN(torch.nn.Module):
def __init__(self, params, gat = None):
super(CNN, self).__init__()
self.bert = BertModel.from_pretrained('bert-base-uncased')
self.conv_layers = nn.ModuleList()
self.pool_layers = nn.ModuleList()
poolingLayer_out_size = 0
self.dropout = params['cnn_dropout']
self.filter_size = params['cnn_filter_sizes']
if bool(self.dropout[0]) :
self.drp1 = nn.Dropout(p = self.dropout[0])
if bool(self.dropout[1]) :
self.drp2 = nn.Dropout(p = self.dropout[1])
for fsz in self.filter_size :
l_conv = nn.Conv1d(params['embedding_dim'], params['cnn_out_channels'], fsz, stride = params['cnn_conv_stride'])
torch.nn.init.xavier_uniform_(l_conv.weight)
l_pool = nn.MaxPool1d(params['cnn_pool_stride'], stride = params['cnn_pool_stride'])
l_out_size = out_size(params['sequence_length'], fsz, stride = params['cnn_conv_stride'])
pool_out_size = int(l_out_size * params['cnn_out_channels'] / params['cnn_pool_stride'])
poolingLayer_out_size += pool_out_size
self.conv_layers.append(l_conv)
self.pool_layers.append(l_pool)
self.linear1 = nn.Linear(poolingLayer_out_size, params['cnn_hidden_dim1'])
self.linear2 = nn.Linear(params['cnn_hidden_dim1'], params['classes'])
torch.nn.init.xavier_uniform_(self.linear1.weight)
torch.nn.init.xavier_uniform_(self.linear2.weight)
def forward(self, texts):
texts = self.bert(texts)[0].detach_()
texts = texts.permute(0, 2, 1)
if bool(self.dropout[0]):
texts = self.drp1(texts)
conv_out = []
for i in range(len(self.filter_size)) :
outputs = self.conv_layers[i](texts)
outputs = outputs.view(outputs.shape[0], 1, outputs.shape[1] * outputs.shape[2])
outputs = self.pool_layers[i](outputs)
outputs = nn.functional.relu(outputs)
outputs = outputs.view(outputs.shape[0], -1)
conv_out.append(outputs)
del outputs
if len(self.filter_size) > 1 :
outputs = torch.cat(conv_out, 1)
else:
outputs = conv_out[0]
outputs = self.linear1(outputs)
outputs = nn.functional.relu(outputs)
if bool(self.dropout[1]) :
outputs = self.drp2(outputs)
outputs = self.linear2(outputs)
return outputs
|
python
|
from pi import KafkaProducerClient
class LogProducer(object):
# TODO: Implement parallel processing
producer = KafkaProducerClient()
for idx in range(1000):
data = {
"res": {
"body": {
"success": False,
"code": "INTERNAL_SERVER_ERROR",
"message": "There is an error trying to process your transaction at the moment. Please try again in a while.",
"data": {}
},
"_headers": {
"set-cookie": "id-mercury=; Path=/apis/v1; Expires=Thu, 01 Jan 1970 00:00:00 GMT",
"x-accel-buffering": "no",
"access-control-allow-headers": "undefined",
"access-control-allow-credentials": "true",
"access-control-expose-headers": "id-mercury",
"x-server-timestamp": "1559037314590",
"content-type": "application/json; charset=utf-8",
"content-length": "167",
"etag": "W/\"a7-e+mYDAtUpp7U59+za+6pr7UE294\"",
"x-response-time": "97.723ms"
}
},
"req": {
"body": {
"merchantId": "MORESUPERMARKET",
"transactionId": "12781910260852152512",
"merchantOrderId": "1278-1910260852",
"amount": 28208,
"instrumentType": "MOBILE",
"instrumentReference": "9154548181",
"message": "Collect for Order Id:1278-1910260852",
"email": "",
"expiresIn": 180,
"shortName": "",
"subMerchant": "",
"storeId": "1278",
"terminalId": "J1910"
},
"headers": {
"host": "mercury.traefik.prod.phonepe.com",
"user-agent": "Go-http-client/1.1",
"content-length": "454",
"content-type": "application/json",
"x-client-ip": "103.39.0.112",
"x-forwarded-by": "103.243.35.246:443",
"x-forwarded-for": "103.39.0.112, 10.85.22.27",
"x-forwarded-host": "mercury.traefik.prod.phonepe.com",
"x-forwarded-port": "80",
"x-forwarded-proto": "http",
"x-forwarded-server": "prd-traefik101",
"x-real-ip": "10.85.22.27",
"x-verify": "1ca27036776dbb3d41316e13b82b046e50d8bf3d9d2e96ebc473076f8ab18d11",
"accept-encoding": "gzip",
"authorization": "Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzUxMiJ9.eyJpc3MiOiJwaG9uZXBlLWFwaSIsImV4cCI6MzMwODcxODQ2MzgsImlhdCI6MTUzMDI3NTgzOCwic3ViIjoiTU9SRVNVUEVSTUFSS0VUIiwicm9sZSI6Im1lcmNoYW50IiwidHlwZSI6InN0YXRpYyJ9.106JWEJDuEKEpb0VodD_F5JTbjUoi6O8JHGWz0T4N2CE9gm4_MIoJnq69J5MB0ZEqpNtD-XcwNl6m2Va5IKjFA",
"x-salt-index": "1",
"x-auth-mode": "dummy"
}
},
"responseTime": 98
}
# producer.send_message(data=data)
|
python
|
#!/usr/bin/env python
import os, sys
from typing import Union, List
import pprint
pp = pprint.PrettyPrinter(indent=4, stream=sys.stderr)
from google.protobuf.compiler import plugin_pb2 as plugin
from google.protobuf.descriptor_pool import DescriptorPool
from google.protobuf.descriptor import Descriptor, FieldDescriptor, FileDescriptor
from gen_decoder import gen_decoder_section
from gen_encoder import gen_encoder_section
import gen_util as util
import gen_sol_constants as sol_constants
import solidity_protobuf_extensions_pb2 as solpbext
def gen_fields(msg: Descriptor) -> str:
return '\n'.join(map((lambda f: (" {type} {name};").format(type = util.gen_fieldtype(f), name = f.name)), msg.fields))
def gen_map_fields_decl_for_field(f: FieldDescriptor) -> str:
return (sol_constants.MAP_FIELD_DEFINITION).format(
name = f.name,
key_type = util.gen_global_type_name_from_field(f.message_type.fields[0]),
container_type = util.gen_global_type_name_from_field(f)
)
def gen_map_fields(msg: Descriptor) -> str:
map_fields = list(filter(lambda f: f.message_type and f.message_type.GetOptions().map_entry, msg.fields))
return '\n'.join(map(gen_map_fields_decl_for_field, map_fields))
# below gen_* codes for generating external library
def gen_struct_definition(msg: Descriptor) -> str:
"""Generates the following part.
struct Data {
...
}
"""
map_fields = gen_map_fields(msg)
if map_fields.strip():
map_fields = "\n //non serialized fields" + map_fields
else:
map_fields = ""
fields = gen_fields(msg)
if (fields or map_fields):
return (sol_constants.STRUCT_DEFINITION).format(
fields = fields,
map_fields = map_fields
)
else:
return (sol_constants.STRUCT_DEFINITION).format(
fields = " bool x;",
map_fields = map_fields
)
def gen_enums(msg: Union[Descriptor, FileDescriptor]) -> str:
return '\n'.join(map(util.gen_enumtype, msg.enum_types_by_name.values()))
# below gen_* codes for generating internal library
def gen_enum_definition(msg: Union[Descriptor, FileDescriptor]) -> str:
"""Generates the following parts.
enum Foo { ... }
function encode_Foo(...) { ... }
function decode_Foo(...) { ... }
enum Bar { ... }
function encode_Bar(...) { ... }
function decode_Bar(...) { ... }
...
"""
enums = gen_enums(msg)
if enums.strip():
return (sol_constants.ENUMS_DEFINITION).format(
enums = gen_enums(msg)
)
else:
return ""
# below gen_* codes for generating internal library
def gen_utility_functions(msg: Descriptor) -> str:
return (sol_constants.UTILITY_FUNCTION).format(
name = util.gen_internal_struct_name(msg)
)
def gen_map_insert_on_store(f: FieldDescriptor, parent_msg: Descriptor) -> str:
for nt in parent_msg.nested_types:
if nt.GetOptions().map_entry:
if f.message_type and f.message_type is nt:
return ('output._size_{name} = input._size_{name};\n').format(name = f.name)
return ''
def gen_store_code_for_field(f: FieldDescriptor, msg: Descriptor) -> str:
tmpl = ""
if util.field_is_message(f) and util.field_is_repeated(f):
tmpl = sol_constants.STORE_REPEATED
elif util.field_is_message(f):
tmpl = sol_constants.STORE_MESSAGE
else:
return (sol_constants.STORE_OTHER).format(
field = f.name
)
libname = util.gen_struct_codec_lib_name_from_field(f)
return tmpl.format(
i = f.number,
field = f.name,
lib = libname,
map_insert_code = gen_map_insert_on_store(f, msg)
)
def gen_store_codes(msg: Descriptor) -> str:
return ''.join(map((lambda f: gen_store_code_for_field(f, msg)), msg.fields))
def gen_store_function(msg: Descriptor) -> str:
"""Generates the following.
function store(Data memory input, Data storage output) internal {
...
}
"""
return (sol_constants.STORE_FUNCTION).format(
name = util.gen_internal_struct_name(msg),
store_codes = gen_store_codes(msg)
)
def gen_value_copy_code(value_field, dst_flagment):
if util.field_is_message(value_field):
return ("{struct_name}.store(value, {dst}.value);").format(
struct_name = util.gen_struct_codec_lib_name_from_field(value_field),
dst = dst_flagment
)
else:
return ("{dst}.value = value;").format(dst = dst_flagment)
def gen_map_helper_codes_for_field(f: FieldDescriptor, nested_type: Descriptor) -> str:
kf = nested_type.fields[0]
vf = nested_type.fields[1]
key_type = util.gen_global_type_name_from_field(kf)
value_type = util.gen_global_type_name_from_field(vf)
field_type = util.gen_global_type_name_from_field(f)
if util.is_complex_type(value_type):
value_storage_type = "memory"
else:
value_storage_type = ""
return (sol_constants.MAP_HELPER_CODE).format(
name = util.to_camel_case(f.name),
val_name = "self.{0}".format(f.name),
map_name = "self._size_{0}".format(f.name),
key_type = key_type,
value_type = value_type,
field_type = field_type,
value_storage_type = value_storage_type,
key_storage_type = "memory" if util.is_complex_type(key_type) else "",
container_type = util.gen_global_type_name_from_field(f)
)
def gen_array_helper_codes_for_field(f: FieldDescriptor) -> str:
field_type = util.gen_global_type_name_from_field(f)
return (sol_constants.ARRAY_HELPER_CODE).format(
name = util.to_camel_case(f.name),
val_name = "self.{0}".format(f.name),
field_type = field_type,
field_storage_type = "memory" if util.is_complex_type(field_type) else ""
)
def gen_map_helper(nested_type: Descriptor, parent_msg: Descriptor, all_map_fields: List[FieldDescriptor]) -> str:
if nested_type.GetOptions().map_entry:
map_fields = list(filter(
lambda f: f.message_type and f.message_type is nested_type,
parent_msg.fields))
all_map_fields.extend(map_fields)
return ''.join(map(lambda f: gen_map_helper_codes_for_field(f, nested_type), map_fields))
else:
return ''
def gen_map_helpers(msg: Descriptor, all_map_fields: List[FieldDescriptor]) -> str:
return ''.join(map((lambda nt: gen_map_helper(nt, msg, all_map_fields)), msg.nested_types))
def gen_array_helpers(msg: Descriptor, all_map_fields: List[FieldDescriptor]) -> str:
array_fields = filter(lambda t: util.field_is_repeated(t) and t not in all_map_fields, msg.fields)
return ''.join(map(lambda f: gen_array_helper_codes_for_field(f), array_fields))
def gen_codec(msg: Descriptor, delegate_codecs: List[str]):
delegate_lib_name = util.gen_delegate_lib_name(msg)
all_map_fields = []
# delegate codec
delegate_codecs.append(sol_constants.CODECS.format(
delegate_lib_name = delegate_lib_name,
enum_definition = gen_enum_definition(msg),
struct_definition = gen_struct_definition(msg),
decoder_section = gen_decoder_section(msg),
encoder_section = gen_encoder_section(msg),
store_function = gen_store_function(msg),
map_helper = gen_map_helpers(msg, all_map_fields),
array_helper = gen_array_helpers(msg, all_map_fields),
utility_functions = gen_utility_functions(msg)
))
for nested in msg.nested_types:
nested = nested if not util.ALLOW_RESERVED_KEYWORDS else util.MessageWrapper(nested)
gen_codec(nested, delegate_codecs)
def gen_global_enum(file: FileDescriptor, delegate_codecs: List[str]):
"""Generates the following parts.
library FILE_NAME_GLOBAL_ENUMS {
enum Foo { ... }
function encode_Foo(...) { ... }
function decode_Foo(...) { ... }
enum Bar { ... }
function encode_Bar(...) { ... }
function decode_Bar(...) { ... }
...
}
"""
delegate_codecs.append(sol_constants.GLOBAL_ENUM_CODECS.format(
delegate_lib_name = util.gen_global_enum_name(file),
enum_definition = gen_enum_definition(file),
))
RUNTIME_FILE_NAME = "ProtoBufRuntime.sol"
PROTOBUF_ANY_FILE_NAME = "GoogleProtobufAny.sol"
GEN_RUNTIME = False
COMPILE_META_SCHEMA = False
def apply_options(params_string):
global GEN_RUNTIME
params = util.parse_urllike_parameter(params_string)
if 'gen_runtime' in params and 'use_runtime' in params:
raise ValueError('"gen_runtime" and "use_runtime" cannot be used together')
if "gen_runtime" in params:
GEN_RUNTIME = True
change_runtime_file_names(params["gen_runtime"])
if "use_runtime" in params:
GEN_RUNTIME = False
change_runtime_file_names(params["use_runtime"])
if "ignore_protos" in params:
util.set_ignored_protos(params["ignore_protos"])
if "pb_libname" in params:
util.change_pb_libname_prefix(params["pb_libname"])
if "for_linking" in params:
sys.stderr.write("warning: for_linking option is still under experiment due to slow-pace of solidity development\n")
util.set_library_linking_mode()
if "gen_internal_lib" in params:
util.set_internal_linking_mode()
if "use_builtin_enum" in params:
sys.stderr.write("warning: use_builtin_enum option is still under experiment because we cannot set value to solidity's enum\n")
util.set_enum_as_constant(True)
if "compile_meta_schema" in params:
global COMPILE_META_SCHEMA
COMPILE_META_SCHEMA = True
if "solc_version" in params:
util.set_solc_version(params["solc_version"])
if "allow_reserved_keywords" in params:
util.set_allow_reserved_keywords(True)
def change_runtime_file_names(name: str):
if not name.endswith(".sol"):
raise ValueError('Only *.sol file is acceptable, but {0} is specified'.format(name))
global RUNTIME_FILE_NAME, PROTOBUF_ANY_FILE_NAME
RUNTIME_FILE_NAME = name
# GoogleProtobufAny.sol and ProtoBufRuntime.sol must be put together in the same directory
PROTOBUF_ANY_FILE_NAME = os.path.join(
os.path.dirname(RUNTIME_FILE_NAME),
os.path.basename(PROTOBUF_ANY_FILE_NAME))
def gen_output_path(dependency: FileDescriptor) -> str:
dirname = os.path.dirname(dependency.name)
basename = os.path.basename(dependency.name).replace('.proto', '.sol')
if dependency.GetOptions().HasExtension(solpbext.file_options):
opts = dependency.GetOptions().Extensions[solpbext.file_options]
if opts.dirpath:
dirname = opts.dirpath
if dirname:
return '{0}/{1}'.format(dirname, basename)
else:
return '{0}'.format(basename)
def gen_relative_import_path(target: str, start: str) -> str:
target = os.path.join('root', target)
start = os.path.join('root', start)
d = os.path.relpath(os.path.dirname(target), os.path.dirname(start))
if not d.startswith('.'):
d = os.path.join('.', d)
return os.path.join(d, os.path.basename(target))
def generate_code(request, response):
pool = DescriptorPool()
for f in request.proto_file:
pool.Add(f)
generated = 0
apply_options(request.parameter)
for proto_file in map(lambda f: pool.FindFileByName(f.name), request.proto_file):
# skip google.protobuf namespace
if (proto_file.package == "google.protobuf") and (not COMPILE_META_SCHEMA):
continue
# skip native solidity type definition
if proto_file.package == "solidity":
continue
# skip descriptors listed by ignored_protos
if util.ignores_proto(proto_file.name):
continue
# main output
output = []
output_path = gen_output_path(proto_file)
# generate sol library
# prologue
output.append('// SPDX-License-Identifier: Apache-2.0\npragma solidity ^{0};'.format(util.SOLIDITY_VERSION))
for pragma in util.SOLIDITY_PRAGMAS:
output.append('{0};'.format(pragma))
if GEN_RUNTIME:
output.append('import "{0}";'.format(gen_relative_import_path(RUNTIME_FILE_NAME, output_path)))
output.append('import "{0}";'.format(gen_relative_import_path(PROTOBUF_ANY_FILE_NAME, output_path)))
else:
output.append('import "{0}";'.format(RUNTIME_FILE_NAME))
output.append('import "{0}";'.format(PROTOBUF_ANY_FILE_NAME))
for dep in proto_file.dependencies:
if dep.package == "solidity":
continue
if (dep.package == "google.protobuf") and (not COMPILE_META_SCHEMA):
continue
if util.ignores_proto(dep.name):
continue
dep_output_path = gen_output_path(dep)
output.append('import "{0}";'.format(gen_relative_import_path(dep_output_path, output_path)))
# generate per message codes
delegate_codecs = []
for msg in proto_file.message_types_by_name.values():
msg = msg if not util.ALLOW_RESERVED_KEYWORDS else util.MessageWrapper(msg)
gen_codec(msg, delegate_codecs)
if len(proto_file.enum_types_by_name):
gen_global_enum(proto_file, delegate_codecs)
# epilogue
output = output + delegate_codecs
if len(delegate_codecs) > 0: # if it has any contents, output pb.sol file
# Fill response
f = response.file.add()
f.name = output_path
f.content = '\n'.join(output)
# increase generated file count
generated = generated + 1
if generated > 0 and GEN_RUNTIME:
try:
with open(os.path.dirname(os.path.realpath(__file__)) + '/runtime/ProtoBufRuntime.sol', 'r') as runtime:
rf = response.file.add()
rf.name = RUNTIME_FILE_NAME
rf.content = '// SPDX-License-Identifier: Apache-2.0\npragma solidity ^{0};\n'.format(util.SOLIDITY_VERSION) + runtime.read()
except Exception as e:
sys.stderr.write(
"required to generate solidity runtime at {} but cannot open runtime with error {}\n".format(
RUNTIME_FILE_NAME, e
)
)
try:
with open(os.path.dirname(os.path.realpath(__file__)) + '/runtime/GoogleProtobufAny.sol', 'r') as runtime:
rf = response.file.add()
rf.name = PROTOBUF_ANY_FILE_NAME
rf.content = '// SPDX-License-Identifier: Apache-2.0\npragma solidity ^{0};\n'.format(util.SOLIDITY_VERSION) + runtime.read()
except Exception as e:
sys.stderr.write(
"required to generate solidity runtime at {} but cannot open runtime with error {}\n".format(
PROTOBUF_ANY_FILE_NAME, e
)
)
if __name__ == '__main__':
# Read request message from stdin
if hasattr(sys.stdin, 'buffer'):
data = sys.stdin.buffer.read()
else:
data = sys.stdin.read()
# Parse request
request = plugin.CodeGeneratorRequest()
request.ParseFromString(data)
# pp.pprint(request)
# Create response
response = plugin.CodeGeneratorResponse()
# Generate code
generate_code(request, response)
# Serialise response message
output = response.SerializeToString()
# Write to stdout
if hasattr(sys.stdin, 'buffer'):
sys.stdout.buffer.write(output)
else:
sys.stdout.write(output)
|
python
|
import json
import os
import toml
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
CONFIG_FILE = 'config.toml'
CONFIG_FILE_DIR = os.path.join(BASE_DIR, CONFIG_FILE)
CONFIG_DATA = toml.load(CONFIG_FILE_DIR)
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = CONFIG_DATA['settings']['DEBUG']
ALLOWED_HOSTS = CONFIG_DATA['settings']['ALLOWED_HOSTS']
# Application definition
INSTALLED_APPS = [
'modeltranslation',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'ckeditor',
'ckeditor_uploader',
'mptt',
'snowpenguin.django.recaptcha3',
'debug_toolbar',
'settings.apps.SettingsConfig',
'users.apps.UsersConfig',
'news.apps.NewsConfig',
'email_notification.apps.EmailSendConfig',
'comments.apps.CommentsConfig',
]
AUTH_USER_MODEL = 'users.UserModel'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django_currentuser.middleware.ThreadLocalUserMiddleware',
]
DOMAIN_URL = CONFIG_DATA['settings']['DOMAIN_URL']
INTERNAL_IPS = CONFIG_DATA['settings']['INTERNAL_IPS']
ROOT_URLCONF = 'django_news.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_news.wsgi.application'
# REDIS & CELERY
REDIS_HOST = os.environ['REDIS_HOST']
REDIS_PORT = os.environ['REDIS_PORT']
REDIS_URL = 'redis://' + REDIS_HOST + ':' + REDIS_PORT + '/0'
CELERY_BROKER_URL = REDIS_URL
# Database
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': REDIS_URL,
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
}
}
}
# Password validation
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
gettext = lambda s: s
LANGUAGES = (
('ru', gettext('Russian')),
('en', gettext('English')),
)
LOCALE_PATHS = (os.path.join(BASE_DIR, 'locale'),)
MODELTRANSLATION_DEFAULT_LANGUAGE = 'en'
CKEDITOR_UPLOAD_PATH = 'uploads/'
CKEDITOR_CONFIGS = {
'default': {
'skin': 'moono-lisa',
'toolbar_Basic': [
['Source', '-', 'Bold', 'Italic']
],
'toolbar_YourCustomToolbarConfig': [
{'name': 'document', 'items': ['Source', '-', 'Save', 'NewPage', 'Preview', 'Print', '-', 'Templates']},
{'name': 'clipboard', 'items': ['Cut', 'Copy', 'Paste', 'PasteText', 'PasteFromWord', '-', 'Undo', 'Redo']},
{'name': 'editing', 'items': ['Find', 'Replace', '-', 'SelectAll']},
{'name': 'forms',
'items': ['Form', 'Checkbox', 'Radio', 'TextField', 'Textarea', 'Select', 'Button', 'ImageButton',
'HiddenField']},
'/',
{'name': 'basicstyles',
'items': ['Bold', 'Italic', 'Underline', 'Strike', 'Subscript', 'Superscript', '-', 'RemoveFormat']},
{'name': 'paragraph',
'items': ['NumberedList', 'BulletedList', '-', 'Outdent', 'Indent', '-', 'Blockquote', 'CreateDiv', '-',
'JustifyLeft', 'JustifyCenter', 'JustifyRight', 'JustifyBlock', '-', 'BidiLtr', 'BidiRtl',
'Language']},
{'name': 'links', 'items': ['Link', 'Unlink', 'Anchor']},
{'name': 'insert',
'items': ['Image', 'Flash', 'Table', 'HorizontalRule', 'Smiley', 'SpecialChar', 'PageBreak', 'Iframe']},
'/',
{'name': 'styles', 'items': ['Styles', 'Format', 'Font', 'FontSize']},
{'name': 'colors', 'items': ['TextColor', 'BGColor']},
{'name': 'tools', 'items': ['Maximize', 'ShowBlocks']},
{'name': 'about', 'items': ['About']},
'/', # put this to force next toolbar on new line
{'name': 'yourcustomtools', 'items': [
# put the name of your editor.ui.addButton here
'Preview',
'Maximize',
]},
],
'toolbar': 'YourCustomToolbarConfig', # put selected toolbar config here
'tabSpaces': 4,
'extraPlugins': ','.join([
'uploadimage', # the upload image feature
# your extra plugins here
'div',
'autolink',
'autoembed',
'embedsemantic',
'autogrow',
# 'devtools',
'widget',
'lineutils',
'clipboard',
'dialog',
'dialogui',
'elementspath'
]),
}
}
# STATIC & MEDIA
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'news/static')]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# SMTP
EMAIL_HOST = CONFIG_DATA['smtp']['EMAIL_HOST']
EMAIL_USE_TLS = CONFIG_DATA['smtp']['EMAIL_USE_TLS']
EMAIL_USE_SSL = CONFIG_DATA['smtp']['EMAIL_USE_SSL']
EMAIL_PORT = CONFIG_DATA['smtp']['EMAIL_PORT']
EMAIL_HOST_USER = os.getenv('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = os.getenv('EMAIL_HOST_PASSWORD')
# RECAPTCHA
RECAPTCHA_PUBLIC_KEY = os.getenv('RECAPTCHA_PUBLIC_KEY')
RECAPTCHA_PRIVATE_KEY = os.getenv('RECAPTCHA_PRIVATE_KEY')
RECAPTCHA_DEFAULT_ACTION = 'generic'
RECAPTCHA_SCORE_THRESHOLD = 0.5
SITE_ID = 1
|
python
|
from location import GeoCoordinate, geo_to_cartesian
import time
class Value:
def __init__(self, value, unit):
self.value = value
self.unit = unit
class Measurement:
def __init__(self, row):
self.parameter = row["parameter"]
self.value = Value(row["value"], row["unit"])
self.location_geo = GeoCoordinate(row["latitude"], row["longitude"])
self.location = self.location_geo
self.source = row["source"]
self.time = time.strptime(row['date'], "%Y-%m-%dT%H:%M:%S.%fZ")
self.confidence = row['confidence']
def convert_location_to_cartesian(self):
self.location_cart = geo_to_cartesian(self.location_geo)
self.location = self.location_cart
def convert_location_to_geo(self):
self.location = self.location_geo
|
python
|
"""
LC89. Gray Code
The gray code is a binary numeral system where two successive values differ in only one bit.
Given a non-negative integer n representing the total number of bits in the code, print the sequence of gray code. A gray code sequence must begin with 0.
Example 1:
Input: 2
Output: [0,1,3,2]
Explanation:
00 - 0
01 - 1
11 - 3
10 - 2
For a given n, a gray code sequence may not be uniquely defined.
For example, [0,2,3,1] is also a valid gray code sequence.
00 - 0
10 - 2
11 - 3
01 - 1
Example 2:
Input: 0
Output: [0]
Explanation: We define the gray code sequence to begin with 0.
A gray code sequence of n has size = 2n, which for n = 0 the size is 20 = 1.
Therefore, for n = 0 the gray code sequence is [0].
"""
# Runtime: 40 ms, faster than 22.57% of Python3 online submissions for Gray Code.
# Memory Usage: 14.7 MB, less than 5.26% of Python3 online submissions for Gray Code.
class Solution:
def grayCode(self, n: int) -> List[int]:
if n == 0:
return [0]
res = {}
curr = "0" * n
self.dfs(res, curr, n, 0)
return [int(key, 2) for key,_ in sorted(res.items(), key=lambda x:x[1])]
def dfs(self, res, curr, n, index):
res[curr] = index
for i in range(n):
if curr[i] == "0":
tmp = curr[:i] + "1" + curr[i+1:]
else:
tmp = curr[:i] + "0" + curr[i+1:]
if tmp in res:
continue
self.dfs(res, tmp, n, index+1)
break
|
python
|
"""The IPython HTML Notebook"""
import os
# Packagers: modify this line if you store the notebook static files elsewhere
DEFAULT_STATIC_FILES_PATH = os.path.join(os.path.dirname(__file__), "static")
del os
from .nbextensions import install_nbextension
|
python
|
import unittest
import pytest
from anchore_engine.db import Image, get_thread_scoped_session
from anchore_engine.services.policy_engine.engine.tasks import ImageLoadTask
from anchore_engine.services.policy_engine.engine.policy.gate import ExecutionContext
from anchore_engine.services.policy_engine import _init_distro_mappings
from test.integration.services.policy_engine.fixtures import cls_test_data_env2, cls_anchore_db
from anchore_engine.subsys import logger
@pytest.fixture(scope='class')
def cls_fully_loaded_test_env(cls_test_data_env2, request):
"""
Load the test env, including a feed sync and image analysis. Places the env in the class's test_env and test_image vars
:param cls_test_data_env:
:param request:
:return:
"""
_init_distro_mappings()
from anchore_engine.services.policy_engine.engine.tasks import FeedsUpdateTask
t = FeedsUpdateTask()
t.execute()
for image_id, path in request.cls.test_env.image_exports():
logger.info(('Ensuring loaded: image id: {} from file: {}'.format(image_id, path)))
t = ImageLoadTask(image_id=image_id, user_id='0', url='file://' + path)
t.execute()
db = get_thread_scoped_session()
test_image = db.query(Image).get((request.cls.test_env.get_images_named(request.cls.__default_image__)[0][0], '0'))
request.cls.test_image = test_image
db.rollback()
@pytest.fixture(scope='class')
def cls_no_feeds_test_env(cls_test_data_env2, request):
"""
Same as fully_loaded_test_env but does not sync feeds
:param cls_test_data_env:
:param request:
:return:
"""
_init_distro_mappings()
for image_id, path in request.cls.test_env.image_exports():
logger.info(('Ensuring loaded: image id: {} from file: {}'.format(image_id, path)))
t = ImageLoadTask(image_id=image_id, user_id='0', url='file://' + path)
t.execute()
db = get_thread_scoped_session()
test_image = db.query(Image).get((request.cls.test_env.get_images_named(request.cls.__default_image__)[0][0], '0'))
request.cls.test_image = test_image
db.rollback()
class GateUnitTest(unittest.TestCase):
__default_image__ = 'node'
gate_clazz = None
def get_initialized_trigger(self, trigger_name, config=None, **kwargs):
clazz = self.gate_clazz.get_trigger_named(trigger_name)
trigger = clazz(self.gate_clazz, **kwargs)
context = ExecutionContext(db_session=get_thread_scoped_session(), configuration=config)
gate = trigger.gate_cls()
return trigger, gate, context
|
python
|
#!/usr/bin/python
import csv
import pycurl
import json
def insertToElasticSearch(data):
esData={'year' : data[0],
'week': data[1],
'state' : data[2],
'area': data[3],
'location' : data[4],
'totalCase' : data[5],
'durationInDays': data[6],
'geo' : {
'lat' : data[7],
'lon' : data[8],
}
}
# 1 to 4 inclusive
#server = str(random.randrange(1,5))
server = "localhost"
c = pycurl.Curl()
url = 'http://' + server + ':9200/govmy/dengue/?pretty'
c.setopt(c.URL, url)
c.setopt(c.POSTFIELDS, json.dumps(esData))
c.perform()
with open('lokalitihotspot2015.csv', 'rb') as csvfile:
#with open('test.csv', 'rb') as csvfile:
propreader = csv.reader(csvfile)
next(csvfile)
for row in propreader:
insertToElasticSearch(row)
|
python
|
# -*- coding: utf-8 -*-
# Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
networking_calico.plugins.ml2.drivers.calico.test.lib
~~~~~~~~~~~
Common code for Neutron driver UT.
"""
import eventlet
import eventlet.queue
import inspect
import logging
import mock
import sys
# When you're working on a test and need to see logging - both from the test
# code and the code _under_ test - uncomment the following line.
#
# logging.basicConfig(level=logging.DEBUG)
_log = logging.getLogger(__name__)
sys.modules['etcd'] = m_etcd = mock.MagicMock()
sys.modules['neutron'] = m_neutron = mock.MagicMock()
sys.modules['neutron.agent'] = m_neutron.agent
sys.modules['neutron.agent.rpc'] = m_neutron.agent.rpc
sys.modules['neutron.common'] = m_neutron.common
sys.modules['neutron.common.exceptions'] = m_neutron.common.exceptions
sys.modules['neutron.db'] = m_neutron.db
sys.modules['neutron.db.models'] = m_neutron.db.models
sys.modules['neutron.db.models.l3'] = m_neutron.db.models.l3
sys.modules['neutron.openstack'] = m_neutron.openstack
sys.modules['neutron.openstack.common'] = m_neutron.openstack.common
sys.modules['neutron.openstack.common.db'] = m_neutron.openstack.common.db
sys.modules['neutron.plugins'] = m_neutron.plugins
sys.modules['neutron.plugins.ml2'] = m_neutron.plugins.ml2
sys.modules['neutron.plugins.ml2.drivers'] = m_neutron.plugins.ml2.drivers
sys.modules['neutron.plugins.ml2.rpc'] = m_neutron.plugins.ml2.rpc
sys.modules['sqlalchemy'] = m_sqlalchemy = mock.Mock()
sys.modules['sqlalchemy.orm'] = m_sqlalchemy.orm
sys.modules['sqlalchemy.orm.exc'] = m_sqlalchemy.orm.exc
sys.modules['networking_calico.compat'] = m_compat = mock.MagicMock()
port1 = {'binding:vif_type': 'tap',
'binding:host_id': 'felix-host-1',
'id': 'DEADBEEF-1234-5678',
'network_id': 'calico-network-id',
'device_id': 'instance-1',
'device_owner': 'compute:nova',
'fixed_ips': [{'subnet_id': 'subnet-id-10.65.0--24',
'ip_address': '10.65.0.2'}],
'mac_address': '00:11:22:33:44:55',
'admin_state_up': True,
'security_groups': ['SGID-default'],
'status': 'ACTIVE'}
port2 = {'binding:vif_type': 'tap',
'binding:host_id': 'felix-host-1',
'id': 'FACEBEEF-1234-5678',
'network_id': 'calico-network-id',
'device_id': 'instance-2',
'device_owner': 'compute:nova',
'fixed_ips': [{'subnet_id': 'subnet-id-10.65.0--24',
'ip_address': '10.65.0.3'}],
'mac_address': '00:11:22:33:44:66',
'admin_state_up': True,
'security_groups': ['SGID-default'],
'status': 'ACTIVE'}
# Port with an IPv6 address.
port3 = {'binding:vif_type': 'tap',
'binding:host_id': 'felix-host-2',
'id': 'HELLO-1234-5678',
'network_id': 'calico-network-id',
'device_id': 'instance-3',
'device_owner': 'compute:nova',
'fixed_ips': [{'subnet_id': 'subnet-id-2001:db8:a41:2--64',
'ip_address': '2001:db8:a41:2::12'}],
'mac_address': '00:11:22:33:44:66',
'admin_state_up': True,
'security_groups': ['SGID-default'],
'status': 'ACTIVE'}
floating_ports = [{'fixed_port_id': 'DEADBEEF-1234-5678',
'fixed_ip_address': '10.65.0.2',
'floating_ip_address': '192.168.0.1'}]
class EtcdException(Exception):
pass
class EtcdKeyNotFound(EtcdException):
pass
class EtcdClusterIdChanged(EtcdException):
pass
class EtcdEventIndexCleared(EtcdException):
pass
class EtcdValueError(EtcdException):
pass
class EtcdDirNotEmpty(EtcdValueError):
pass
m_etcd.EtcdException = EtcdException
m_etcd.EtcdKeyNotFound = EtcdKeyNotFound
m_etcd.EtcdClusterIdChanged = EtcdClusterIdChanged
m_etcd.EtcdEventIndexCleared = EtcdEventIndexCleared
m_etcd.EtcdValueError = EtcdValueError
m_etcd.EtcdDirNotEmpty = EtcdDirNotEmpty
class DBError(Exception):
pass
m_compat.db_exc.DBError = DBError
class NoResultFound(Exception):
pass
m_sqlalchemy.orm.exc.NoResultFound = NoResultFound
# Define a stub class, that we will use as the base class for
# CalicoMechanismDriver.
class DriverBase(object):
def __init__(self, agent_type, vif_type, vif_details):
pass
# Define another stub class that mocks out leader election: assume we're always
# the leader. This is a fake elector: it never votes (get it!?).
class GrandDukeOfSalzburg(object):
def __init__(self, *args, **kwargs):
pass
def master(self):
return True
def stop(self):
pass
# Replace Neutron's SimpleAgentMechanismDriverBase - which is the base class
# that CalicoMechanismDriver inherits from - with this stub class.
m_neutron.plugins.ml2.drivers.mech_agent.SimpleAgentMechanismDriverBase = \
DriverBase
# Import all modules used by the mechanism driver so we can hook their logging.
from networking_calico import datamodel_v3
from networking_calico import etcdutils
from networking_calico import etcdv3
from networking_calico.plugins.ml2.drivers.calico import election
from networking_calico.plugins.ml2.drivers.calico import endpoints
from networking_calico.plugins.ml2.drivers.calico import mech_calico
from networking_calico.plugins.ml2.drivers.calico import policy
from networking_calico.plugins.ml2.drivers.calico import status
from networking_calico.plugins.ml2.drivers.calico import subnets
from networking_calico.plugins.ml2.drivers.calico import syncer
# Replace the elector.
mech_calico.Elector = GrandDukeOfSalzburg
REAL_EVENTLET_SLEEP_TIME = 0.01
# Value used to indicate 'timeout' in poll and sleep processing.
TIMEOUT_VALUE = object()
class Lib(object):
# Ports to return when the driver asks the OpenStack database for all
# current ports.
osdb_ports = []
# Subnets that the OpenStack database knows about.
osdb_subnets = []
def setUp(self):
# Announce the current test case.
_log.info("TEST CASE: %s", self.id())
# Mock calls to sys.exit.
self.sys_exit_p = mock.patch("sys.exit")
self.sys_exit_p.start()
# Hook eventlet.
self.setUp_eventlet()
# Hook logging.
self.setUp_logging()
# If an arg mismatch occurs, we want to see the complete diff of it.
self.maxDiff = None
# Create an instance of CalicoMechanismDriver.
mech_calico.mech_driver = None
self.driver = mech_calico.CalicoMechanismDriver()
# Hook the (mock) Neutron database.
self.db = mech_calico.plugin_dir.get_plugin()
self.db_context = mech_calico.ctx.get_admin_context()
self.db_context.session.query.return_value.filter_by.side_effect = (
self.port_query
)
# Arrange what the DB's get_ports will return.
self.db.get_ports.side_effect = self.get_ports
self.db.get_port.side_effect = self.get_port
# Arrange DB's get_subnet and get_subnets calls.
self.db.get_subnet.side_effect = self.get_subnet
self.db.get_subnets.side_effect = self.get_subnets
# Arrange what the DB's get_security_groups query will return (the
# default SG).
self.db.get_security_groups.return_value = [
{'id': 'SGID-default',
'security_group_rules': [
{'remote_group_id': 'SGID-default',
'remote_ip_prefix': None,
'protocol': -1,
'direction': 'ingress',
'ethertype': 'IPv4',
'port_range_min': -1},
{'remote_group_id': 'SGID-default',
'remote_ip_prefix': None,
'protocol': -1,
'direction': 'ingress',
'ethertype': 'IPv6',
'port_range_min': -1},
{'remote_group_id': None,
'remote_ip_prefix': None,
'protocol': -1,
'direction': 'egress',
'ethertype': 'IPv4',
'port_range_min': -1},
{'remote_group_id': None,
'remote_ip_prefix': None,
'protocol': -1,
'direction': 'egress',
'ethertype': 'IPv6',
'port_range_min': -1}
]}
]
self.db.get_security_group_rules.return_value = [
{'remote_group_id': 'SGID-default',
'remote_ip_prefix': None,
'protocol': -1,
'direction': 'ingress',
'ethertype': 'IPv4',
'security_group_id': 'SGID-default',
'port_range_min': -1},
{'remote_group_id': 'SGID-default',
'remote_ip_prefix': None,
'protocol': -1,
'direction': 'ingress',
'ethertype': 'IPv6',
'security_group_id': 'SGID-default',
'port_range_min': -1},
{'remote_group_id': None,
'remote_ip_prefix': None,
'protocol': -1,
'direction': 'egress',
'ethertype': 'IPv4',
'security_group_id': 'SGID-default',
'port_range_min': -1},
{'remote_group_id': None,
'remote_ip_prefix': None,
'protocol': -1,
'direction': 'egress',
'security_group_id': 'SGID-default',
'ethertype': 'IPv6',
'port_range_min': -1}
]
self.db._get_port_security_group_bindings.side_effect = (
self.get_port_security_group_bindings
)
self.port_security_group_bindings = [
{'port_id': 'DEADBEEF-1234-5678',
'security_group_id': 'SGID-default'},
{'port_id': 'FACEBEEF-1234-5678',
'security_group_id': 'SGID-default'},
{'port_id': 'HELLO-1234-5678',
'security_group_id': 'SGID-default'},
]
def setUp_eventlet(self):
"""setUp_eventlet
Setup to intercept sleep calls made by the code under test, and hence
to (i) control when those expire, and (ii) allow time to appear to pass
(to the code under test) without actually having to wait for that time.
"""
# Reset the simulated time (in seconds) that has passed since the
# beginning of the test.
self.current_time = 0
# Make time.time() return current_time.
self.old_time = sys.modules['time'].time
sys.modules['time'].time = lambda: self.current_time
# Reset the dict of current sleepers. In each dict entry, the key is
# an eventlet.Queue object and the value is the time at which the sleep
# should complete.
self.sleepers = {}
# Reset the list of spawned eventlet threads.
self.threads = []
# Replacement for eventlet.sleep: sleep for some simulated passage of
# time (as directed by simulated_time_advance), instead of for real
# elapsed time.
def simulated_time_sleep(secs=None):
if secs is None:
# Thread just wants to yield to any other waiting thread.
self.give_way()
return
# Create a new queue.
queue = eventlet.Queue(1)
queue.stack = inspect.stack()[1][3]
# Add it to the dict of sleepers, together with the waking up time.
self.sleepers[queue] = self.current_time + secs
_log.info("T=%s: %s: Start sleep for %ss until T=%s",
self.current_time,
queue.stack,
secs,
self.sleepers[queue])
# Do a zero time real sleep, to allow other threads to run.
self.real_eventlet_sleep(REAL_EVENTLET_SLEEP_TIME)
# Block until something is posted to the queue.
queue.get(True)
# Wake up.
return None
# Replacement for eventlet.spawn: track spawned threads so that we can
# kill them all when a test case ends.
def simulated_spawn(*args):
# Do the real spawn.
thread = self.real_eventlet_spawn(*args)
# Remember this thread.
self.threads.append(thread)
_log.info("New thread %s", thread)
# Also return it.
return thread
def simulated_spawn_after(secs, fn, *args):
def sleep_then_run():
simulated_time_sleep(secs)
fn(*args)
return simulated_spawn(sleep_then_run)
# Hook sleeping.
self.real_eventlet_sleep = eventlet.sleep
eventlet.sleep = simulated_time_sleep
# Similarly hook spawning.
self.real_eventlet_spawn = eventlet.spawn
eventlet.spawn = simulated_spawn
self.real_eventlet_spawn_after = eventlet.spawn_after
eventlet.spawn_after = simulated_spawn_after
def setUp_logging(self):
"""Setup to intercept and display logging by the code under test.
To see this logging, you also need to uncomment the logging.basicConfig
call near the top of this file.
"""
import logging
for module in [
election,
endpoints,
mech_calico,
policy,
status,
subnets,
syncer,
datamodel_v3,
etcdutils,
etcdv3,
]:
module.LOG = logging.getLogger("\t%-15s\t" %
module.__name__.split('.')[-1])
# Tear down after each test case.
def tearDown(self):
_log.info("Clean up remaining green threads...")
for thread in self.threads:
_log.info("Kill thread %s", thread)
thread.kill()
_log.info("All threads killed")
# Stop hooking eventlet.
self.tearDown_eventlet()
# Stop mocking sys.exit.
self.sys_exit_p.stop()
def tearDown_eventlet(self):
# Restore the real eventlet.sleep and eventlet.spawn.
eventlet.sleep = self.real_eventlet_sleep
eventlet.spawn = self.real_eventlet_spawn
eventlet.spawn_after = self.real_eventlet_spawn_after
# Repair time.time()
sys.modules['time'].time = self.old_time
# Method for the test code to call when it wants to advance the simulated
# time.
def simulated_time_advance(self, secs):
while (secs > 0):
_log.info("T=%s: Want to advance by %s", self.current_time, secs)
# Determine the time to advance to in this iteration: either the
# full time that we've been asked for, or the time at which the
# next sleeper should wake up, whichever of those is earlier.
wake_up_time = self.current_time + secs
for queue in self.sleepers.keys():
if self.sleepers[queue] < wake_up_time:
# This sleeper will wake up before the time that we've been
# asked to advance to.
wake_up_time = self.sleepers[queue]
# Advance to the determined time.
secs -= (wake_up_time - self.current_time)
self.current_time = wake_up_time
_log.info("T=%s", self.current_time)
# Wake up all sleepers that should now wake up.
for queue in self.sleepers.keys():
if self.sleepers[queue] <= self.current_time:
_log.info("T=%s >= %s: %s: Wake up!",
self.current_time,
self.sleepers[queue],
queue.stack)
del self.sleepers[queue]
queue.put_nowait(TIMEOUT_VALUE)
# Allow woken (and possibly other) threads to run.
self.real_eventlet_sleep(REAL_EVENTLET_SLEEP_TIME)
def give_way(self):
"""give_way
Method for test code to call when it wants to allow other eventlet
threads to run.
"""
self.real_eventlet_sleep(REAL_EVENTLET_SLEEP_TIME)
def check_update_port_status_called(self, context):
self.db.update_port_status.assert_called_once_with(
context._plugin_context,
context._port['id'],
mech_calico.constants.PORT_STATUS_ACTIVE)
self.db.update_port_status.reset_mock()
def get_port(self, context, port_id):
return self.get_ports(context, filters={'id': [port_id]})[0]
def get_ports(self, context, filters=None):
if filters is None:
return self.osdb_ports
assert filters.keys() == ['id']
allowed_ids = set(filters['id'])
return [p for p in self.osdb_ports if p['id'] in allowed_ids]
def get_subnet(self, context, id):
matches = [s for s in self.osdb_subnets if s['id'] == id]
if matches and len(matches) == 1:
return matches[0]
elif ':' in id:
return {'gateway_ip': '2001:db8:a41:2::1'}
else:
return {'gateway_ip': '10.65.0.1'}
def get_subnets(self, context, filters=None):
if filters:
self.assertTrue('id' in filters)
matches = [s for s in self.osdb_subnets
if s['id'] in filters['id']]
else:
matches = [s for s in self.osdb_subnets]
return matches
def notify_security_group_update(self, id, rules, port, type):
"""Notify a new or changed security group definition."""
# Prep appropriate responses for next get_security_group and
# _get_port_security_group_bindings calls.
self.db.get_security_group.return_value = {
'id': id,
'security_group_rules': rules
}
if port is None:
self.db._get_port_security_group_bindings.return_value = []
else:
self.db._get_port_security_group_bindings.return_value = [
{'port_id': port['id']}
]
self.db.get_port.return_value = port
if type == 'rule':
# Call security_groups_rule_updated with the new or changed ID.
mech_calico.security_groups_rule_updated(
mock.MagicMock(), mock.MagicMock(), [id]
)
def get_port_security_group_bindings(self, context, filters):
if filters is None:
return self.port_security_group_bindings
assert filters.keys() == ['port_id']
allowed_ids = set(filters['port_id'])
return [b for b in self.port_security_group_bindings
if b['port_id'] in allowed_ids]
def port_query(self, **kw):
if kw.get('port_id', None):
for port in self.osdb_ports:
if port['id'] == kw['port_id']:
return port['fixed_ips']
elif kw.get('fixed_port_id', None):
fips = []
for fip in floating_ports:
if fip['fixed_port_id'] == kw['fixed_port_id']:
fips.append(fip)
return fips
else:
raise Exception("port_query doesn't know how to handle kw=%r" % kw)
return None
class FixedUUID(object):
def __init__(self, uuid):
self.uuid = uuid
self.uuid4_p = mock.patch('uuid.uuid4')
def __enter__(self):
guid = mock.MagicMock()
guid.get_hex.return_value = self.uuid
guid.__str__.return_value = self.uuid
uuid4 = self.uuid4_p.start()
uuid4.return_value = guid
def __exit__(self, type, value, traceback):
self.uuid4_p.stop()
|
python
|
# vim: fdm=marker
'''
author: Fabio Zanini
date: 11/12/14
content: Get trees of haplotype alignments.
'''
# Modules
import os
import argparse
from operator import itemgetter, attrgetter
import numpy as np
from matplotlib import cm
import matplotlib.pyplot as plt
from Bio import Phylo
from hivwholeseq.patients.patients import load_patients, Patient
from hivwholeseq.utils.sequence import align_muscle
from hivwholeseq.utils.tree import build_tree_fasttree
from hivwholeseq.utils.argparse import RoiAction
from hivwholeseq.store.store_tree_consensi import annotate_tree
from hivwholeseq.utils.nehercook.ancestral import ancestral_sequences
from hivwholeseq.utils.tree import tree_to_json, filter_rare_leaves
from hivwholeseq.utils.generic import write_json
# Functions
def load_alignments(filename):
'''Load alignments from website file'''
import zipfile, zlib
from Bio import AlignIO
import StringIO
alis = []
with zipfile.ZipFile(filename, 'r') as zf:
for fn in zf.namelist():
f = StringIO.StringIO(zf.read(fn))
ali = {'time': float(fn.split('_')[0]),
'ali': AlignIO.read(f, 'fasta')}
alis.append(ali)
return alis
def get_region_count_trajectories(patient, region, VERBOSE=0, countmin=5):
'''Get haplotype trajectories in a region (from the website alignments)'''
import numpy as np
from hivwholeseq.website.filenames import get_precompiled_alignments_filename
filename = get_precompiled_alignments_filename(patient.code, region)
alis = load_alignments(filename)
seqs_set = set()
for ali in alis:
seqs_set |= set([''.join(seq).replace('-', '')
for seq in ali['ali']
if int(seq.name.split('_')[1]) >= countmin])
seqs_set = list(seqs_set)
hct = np.zeros((len(seqs_set), len(alis)), int)
for it, ali in enumerate(alis):
for seq in ali['ali']:
s = ''.join(seq).replace('-', '')
count = int(seq.name.split('_')[1])
if count < countmin:
continue
iseq = seqs_set.index(s)
hct[iseq, it] = count
seqs_set = np.array(seqs_set, 'S'+str(np.max(map(len, seqs_set))))
times = np.array(map(itemgetter('time'), alis))
ind = np.array([i for i, t in enumerate(patient.times) if t in times])
# Filter out all time points without any counts
ind_keep = hct.any(axis=0)
ind = ind[ind_keep]
hct = hct[:, ind_keep]
return (hct.T, ind, seqs_set)
def annotate_tree_for_plot(tree, minfreq=0.02):
'''Add annotations for plotting'''
from matplotlib import cm
cmap = cm.jet
last_tp = max(leaf.DSI for leaf in tree.get_terminals())
def get_color(node):
return map(int, np.array(cmap(node.DSI/last_tp*0.9)[:-1]) * 255)
# Annotate leaves
for leaf in tree.get_terminals():
leaf.color = get_color(leaf)
if leaf.frequency >= minfreq:
leaf.label = ('t = '+str(int(leaf.DSI))+
', f = '+'{:1.2f}'.format(leaf.frequency))
else:
leaf.label = ''
# Color internal branches
for node in tree.get_nonterminals(order='postorder'):
node.label = ''
node.DSI = np.mean([c.DSI for c in node.clades])
node.color = get_color(node)
# Script
if __name__ == '__main__':
# Parse input args
parser = argparse.ArgumentParser(description='Get local trees',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--patients', nargs='+',
help='Patients to analyze')
parser.add_argument('--roi', required=True, action=RoiAction,
help='Region of interest (e.g. F1 300 350 or V3 0 +oo)')
parser.add_argument('--verbose', type=int, default=0,
help='Verbosity level [0-4]')
parser.add_argument('--maxreads', type=int, default=-1,
help='Number of reads analyzed per sample')
parser.add_argument('--plot', action='store_true',
help='Plot local haplotype trajectories')
parser.add_argument('--freqmin', type=float, default=0.01,
help='Minimal frequency to keep the haplotype')
args = parser.parse_args()
pnames = args.patients
roi = args.roi
VERBOSE = args.verbose
maxreads = args.maxreads
use_plot = args.plot
freqmin = args.freqmin
patients = load_patients()
if pnames is not None:
patients = patients.loc[pnames]
for pname, patient in patients.iterrows():
patient = Patient(patient)
if VERBOSE >= 1:
print pname
if os.path.isfile(patient.get_local_tree_filename(roi[0], format='json')):
if VERBOSE >= 2:
print 'Get tree'
region = roi[0]
tree = patient.get_local_tree(region)
elif os.path.isfile(patient.get_local_tree_filename(' '.join(map(str, roi)), format='json')):
if VERBOSE >= 2:
print 'Get tree'
region = ' '.join(map(str, roi))
tree = patient.get_local_tree(region)
else:
raise IOError('Tree file not found')
if VERBOSE >= 2:
print 'Filter out too rare leaves'
filter_rare_leaves(tree, freqmin, VERBOSE=VERBOSE)
if use_plot:
if VERBOSE >= 2:
print 'Annotate tree for plotting'
annotate_tree_for_plot(tree, minfreq=0.1)
if VERBOSE >= 2:
print 'Plot'
fig, ax = plt.subplots()
ax.set_title(patient.code+', '+region)
Phylo.draw(tree, axes=ax, do_show=False, label_func=attrgetter('label'),
show_confidence=False)
ax.grid(True)
ax.set_ylim(ax.get_ylim()[0] * 1.04, -ax.get_ylim()[0] * 0.04)
plt.tight_layout()
plt.ion()
plt.show()
|
python
|
#!/usr/bin/env python
from csvkit.unicsv import UnicodeCSVReader, UnicodeCSVWriter
class CSVKitReader(UnicodeCSVReader):
"""
A unicode-aware CSV reader with some additional features.
"""
pass
class CSVKitWriter(UnicodeCSVWriter):
"""
A unicode-aware CSV writer with some additional features.
"""
def __init__(self, f, encoding='utf-8', line_numbers=False, **kwargs):
self.row_count = 0
self.line_numbers = line_numbers
UnicodeCSVWriter.__init__(self, f, encoding, lineterminator='\n', **kwargs)
def _append_line_number(self, row):
if self.row_count == 0:
row.insert(0, 'line_number')
else:
row.insert(0, self.row_count)
self.row_count += 1
def writerow(self, row):
if self.line_numbers:
row = list(row)
self._append_line_number(row)
# Convert embedded Mac line endings to unix style line endings so they get quoted
row = [i.replace('\r', '\n') if isinstance(i, basestring) else i for i in row]
UnicodeCSVWriter.writerow(self, row)
def writerows(self, rows):
for row in rows:
self.writerow(row)
|
python
|
#! /usr/bin/env python
from tkinter import NoDefaultRoot, Tk, ttk, filedialog
from _tkinter import getbusywaitinterval
from tkinter.constants import *
from math import sin, pi
import base64, zlib, os
################################################################################
ICON = b'eJxjYGAEQgEBBiApwZDBzMAgxsDAoAHEQCEGBQaIOAwkQDE2UOSkiUM\
Gp/rlyd740Ugzf8/uXROxAaA4VvVAqcfYAFCcoHqge4hR/+btWwgCqoez8aj//fs\
XWiAARfCrhyCg+XA2HvV/YACoHs4mRj0ywKWe1PD//p+B4QMOmqGeMAYAAY/2nw=='
################################################################################
class GUISizeTree(ttk.Frame):
@classmethod
def main(cls):
# Create the application's root.
NoDefaultRoot()
root = Tk()
# Restrict sizing and add title.
root.minsize(350, 175)
root.title('Directory Size')
# Create the application's icon.
with open('tree.ico', 'wb') as file:
file.write(zlib.decompress(base64.b64decode(ICON)))
root.iconbitmap('tree.ico')
os.remove('tree.ico')
# Configure the SizeTree object.
view = cls(root)
view.grid(row=0, column=0, sticky=NSEW)
# Setup the window for resizing.
root.grid_rowconfigure(0, weight=1)
root.grid_columnconfigure(0, weight=1)
# Enter the GUI main event loop.
root.mainloop()
def __init__(self, master=None, **kw):
super().__init__(master, **kw)
# Configure the progressbar.
self.__progress = ttk.Progressbar(self, orient=HORIZONTAL)
self.__progress.grid(row=0, column=0, columnspan=4, sticky=EW)
# Configure the tree.
self.__tree = ttk.Treeview(self, selectmode=BROWSE,
columns=('d_size', 'f_size', 'path'))
self.__tree.heading('#0', text=' Name', anchor=W)
self.__tree.heading('d_size', text=' Total Size', anchor=W)
self.__tree.heading('f_size', text=' File Size', anchor=W)
self.__tree.heading('path', text=' Path', anchor=W)
self.__tree.column('#0', minwidth=80, width=160)
self.__tree.column('d_size', minwidth=80, width=160)
self.__tree.column('f_size', minwidth=80, width=160)
self.__tree.column('path', minwidth=80, width=160)
self.__tree.grid(row=1, column=0, columnspan=3, sticky=NSEW)
# Configure the scrollbar.
self.__scroll = ttk.Scrollbar(self, orient=VERTICAL,
command=self.__tree.yview)
self.__tree.configure(yscrollcommand=self.__scroll.set)
self.__scroll.grid(row=1, column=3, sticky=NS)
# Configure the path button.
self.__label = ttk.Button(self, text='Path:', command=self.choose)
self.__label.bind('<Return>', self.choose)
self.__label.grid(row=2, column=0)
# Configure the directory dialog.
head, tail = os.getcwd(), True
while tail:
head, tail = os.path.split(head)
self.__dialog = filedialog.Directory(self, initialdir=head)
# Configure the path entry box.
self.__path = ttk.Entry(self, cursor='xterm')
self.__path.bind('<Control-Key-a>', self.select_all)
self.__path.bind('<Control-Key-/>', lambda event: 'break')
self.__path.bind('<Return>', self.search)
self.__path.grid(row=2, column=1, sticky=EW)
self.__path.focus_set()
# Configure the execution button.
self.__run = ttk.Button(self, text='Search', command=self.search)
self.__run.bind('<Return>', self.search)
self.__run.grid(row=2, column=2)
# Configure the sizegrip.
self.__grip = ttk.Sizegrip(self)
self.__grip.grid(row=2, column=3, sticky=SE)
# Configure the grid.
self.grid_rowconfigure(1, weight=1)
self.grid_columnconfigure(1, weight=1)
# Configure root item in tree.
self.__root = None
def choose(self, event=None):
# Get a directory path via a dialog.
path = self.__dialog.show()
if path:
# Fill entry box with user path.
self.__path.delete(0, END)
self.__path.insert(0, os.path.abspath(path))
def select_all(self, event):
# Select the contents of the widget.
event.widget.selection_range(0, END)
return 'break'
def search(self, event=None):
if self.__run['state'].string == NORMAL:
# Show background work progress.
self.__run['state'] = DISABLED
path = os.path.abspath(self.__path.get())
if os.path.isdir(path):
self.__progress.configure(mode='indeterminate', maximum=100)
self.__progress.start()
# Search while updating display.
if self.__root is not None:
self.__tree.delete(self.__root)
tree = SizeTree(self.update, path)
nodes = tree.total_nodes + 1
# Build user directory treeview.
self.__progress.stop()
self.__progress.configure(mode='determinate', maximum=nodes)
self.__root = self.__tree.insert('', END, text=tree.name)
self.build_tree(self.__root, tree)
# Indicate completion of search.
self.__run['state'] = NORMAL
else:
self.shake()
def shake(self):
# Check frame rate.
assert getbusywaitinterval() == 20, 'Values are hard-coded for 50 FPS.'
# Get application root.
root = self
while not isinstance(root, Tk):
root = root.master
# Schedule beginning of animation.
self.after_idle(self.__shake, root, 0)
def __shake(self, root, frame):
frame += 1
# Get the window's location and update X value.
x, y = map(int, root.geometry().split('+')[1:])
x += int(sin(pi * frame / 2.5) * sin(pi * frame / 50) * 5)
root.geometry('+{}+{}'.format(x, y))
# Schedule next frame or restore search button.
if frame < 50:
self.after(20, self.__shake, root, frame)
else:
self.__run['state'] = NORMAL
def build_tree(self, node, tree):
# Make changes to the treeview and progress bar.
text = 'Unknown!' if tree.dir_error else convert(tree.total_size)
self.__tree.set(node, 'd_size', text)
text = 'Unknown!' if tree.file_error else convert(tree.file_size)
self.__tree.set(node, 'f_size', text)
self.__tree.set(node, 'path', tree.path)
self.__progress.step()
# Update the display and extract any child node.
self.update()
for child in tree.children:
subnode = self.__tree.insert(node, END, text=child.name)
self.build_tree(subnode, child)
################################################################################
class SizeTree:
"Create a tree structure outlining a directory's size."
def __init__(self, callback, path):
callback()
self.path = path
head, tail = os.path.split(path)
self.name = tail or head
self.children = []
self.file_size = 0
self.total_size = 0
self.total_nodes = 0
self.file_error = False
self.dir_error = False
try:
dir_list = os.listdir(path)
except OSError:
self.dir_error = True
else:
for name in dir_list:
path_name = os.path.join(path, name)
if os.path.isdir(path_name):
size_tree = SizeTree(callback, path_name)
self.children.append(size_tree)
self.total_size += size_tree.total_size
self.total_nodes += size_tree.total_nodes + 1
elif os.path.isfile(path_name):
try:
self.file_size += os.path.getsize(path_name)
except OSError:
self.file_error = True
self.total_size += self.file_size
################################################################################
def convert(number):
"Convert bytes into human-readable representation."
if not number:
return '0 Bytes'
assert 0 < number < 1 << 110, 'number out of range'
ordered = reversed(tuple(format_bytes(partition_number(number, 1 << 10))))
cleaned = ', '.join(item for item in ordered if item[0] != '0')
return cleaned
def partition_number(number, base):
"Continually divide number by base until zero."
div, mod = divmod(number, base)
yield mod
while div:
div, mod = divmod(div, base)
yield mod
def format_bytes(parts):
"Format partitioned bytes into human-readable strings."
for power, number in enumerate(parts):
yield '{} {}'.format(number, format_suffix(power, number))
def format_suffix(power, number):
"Compute the suffix for a certain power of bytes."
return (PREFIX[power] + 'byte').capitalize() + ('s' if number != 1 else '')
PREFIX = ' kilo mega giga tera peta exa zetta yotta bronto geop'.split(' ')
################################################################################
if __name__ == '__main__':
GUISizeTree.main()
|
python
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import subprocess
from typing import Tuple
import torch
try:
torch.classes.load_library(
f"{os.environ['CONDA_PREFIX']}/lib/libtorchscript_pinocchio.so"
)
except OSError:
print(
"Warning: Failed to load 'libtorchscript_pinocchio.so' from CONDA_PREFIX, loading from default build directory 'polymetis/build' instead..."
)
project_root_dir = (
subprocess.run(["git", "rev-parse", "--show-toplevel"], stdout=subprocess.PIPE)
.stdout.strip()
.decode("ascii")
)
torch.classes.load_library(
os.path.join(
project_root_dir,
"polymetis/build/libtorchscript_pinocchio.so",
)
)
class RobotModelPinocchio(torch.nn.Module):
"""
A robot model able to compute kinematics & dynamics of a robot given an urdf.
Implemented as a ``torch.nn.Module`` wrapped around a C++ custom class that leverages
`Pinocchio <https://github.com/stack-of-tasks/pinocchio>`_ -
a C++ rigid body dynamics library.
"""
def __init__(self, urdf_filename: str, ee_joint_name: str):
super().__init__()
self.model = torch.classes.torchscript_pinocchio.RobotModelPinocchio(
urdf_filename, ee_joint_name
)
def get_joint_angle_limits(self) -> torch.Tensor:
return self.model.get_joint_angle_limits()
def get_joint_velocity_limits(self) -> torch.Tensor:
return self.model.get_joint_velocity_limits()
def forward_kinematics(
self, joint_positions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Computes end-effector position and orientation from a given joint position.
Args:
joint_positions: A given set of joint angles.
Returns:
Tuple[torch.Tensor, torch.Tensor]: End-effector position, end-effector orientation as quaternion
"""
pos, quat = self.model.forward_kinematics(joint_positions)
return pos.to(joint_positions), quat.to(joint_positions)
def compute_jacobian(self, joint_positions: torch.Tensor) -> torch.Tensor:
return self.model.compute_jacobian(joint_positions).to(joint_positions)
def inverse_dynamics(
self,
joint_positions: torch.Tensor,
joint_velocities: torch.Tensor,
joint_accelerations: torch.Tensor,
) -> torch.Tensor:
"""Computes the desired torques to achieve a certain joint acceleration from
given joint positions and velocities.
Returns:
torch.Tensor: desired torques
"""
return self.model.inverse_dynamics(
joint_positions, joint_velocities, joint_accelerations
).to(joint_positions)
|
python
|
from unittest.mock import patch
from datetime import datetime
import httpx
import pytest
from src.zever_local.inverter import (
Inverter,
InverterData,
ZeversolarError,
ZeversolarTimeout,
)
_registry_id = "EAB241277A36"
_registry_key = "ZYXTBGERTXJLTSVS"
_hardware_version = "M11"
_software_version = "18625-797R+17829-719R"
_time = "16:22"
_date = "20/02/2022"
_serial_number = "ZS150045138C0104"
_content = f"1\n1\n{_registry_id}\n{_registry_key}\n{_hardware_version}\n{_software_version}\n{_time} {_date}\n1\n1\n{_serial_number}\n1234\n8.9\nOK\nError"
_content2 = f"1\n1\n{_registry_id}\n{_registry_key}\n{_hardware_version}\n{_software_version}\n{_time} {_date}\n1\n1\n{_serial_number}\n1234\n1.23\nOK\nError"
_byte_content = _content.encode()
async def test_async_connect():
"""Fetch the inverter info."""
url = "test"
my_inverter = Inverter(url)
mock_response = httpx.Response(
200, request=httpx.Request("Get", "https://test.t"), content=_byte_content
)
with patch("src.zever_local.inverter.httpx.AsyncClient.get") as mock_device_info:
mock_device_info.return_value = mock_response
await my_inverter.async_connect()
mac_address = my_inverter.mac_address
serial_number = my_inverter.serial_number
assert mac_address == "EA-B2-41-27-7A-36"
assert serial_number == _serial_number
async def test_async_get_data():
"""Fetch inverter data."""
url = "test"
my_inverter = Inverter(url)
mock_response = httpx.Response(
200, request=httpx.Request("Get", f"https://{url}"), content=_byte_content
)
with patch("src.zever_local.inverter.httpx.AsyncClient.get") as mock_device_info:
mock_device_info.return_value = mock_response
my_inverter_data = await my_inverter.async_get_data()
energy_today_KWh = my_inverter_data.energy_today_KWh
assert energy_today_KWh == 8.09
async def test_async_get_data_ZeversolarError():
"""Fetch inverter data throws an error."""
url = "test"
with pytest.raises(ZeversolarError):
my_inverter = Inverter(url)
await my_inverter.async_get_data()
async def test_async_get_data_ZeversolarTimeout():
"""Fetch inverter data timouts."""
url = "test"
with pytest.raises(ZeversolarTimeout):
with patch("src.zever_local.inverter.httpx.AsyncClient.get") as mock_device_info:
mock_device_info.side_effect = httpx.TimeoutException("Timeout")
my_inverter = Inverter(url)
await my_inverter.async_get_data()
async def test_async_connect_ZeversolarError():
"""Connect to inverter data throws an error."""
url = "test"
with pytest.raises(ZeversolarError):
my_inverter = Inverter(url)
await my_inverter.async_connect()
async def test_async_connect_ZeversolarTimeout():
"""Connect to inverter data timouts."""
url = "test"
with pytest.raises(ZeversolarTimeout):
with patch("src.zever_local.inverter.httpx.AsyncClient.get") as mock_device_info:
mock_device_info.side_effect = httpx.TimeoutException("Timeout")
my_inverter = Inverter(url)
await my_inverter.async_connect()
def test_InverterData():
"""Test the inverter data class."""
my_inverter_data = InverterData(_content2.split('\n'))
energy_today_KWh = my_inverter_data.energy_today_KWh
unknown0 = my_inverter_data.unknown0
unknown1 = my_inverter_data.unknown1
registry_id = my_inverter_data.registry_id
registry_key = my_inverter_data.registry_key
hardware_version = my_inverter_data.hardware_version
software_version = my_inverter_data.software_version
my_datetime = my_inverter_data.datetime
communication_status = my_inverter_data.communication_status
unknown8 = my_inverter_data.unknown8
serial_number = my_inverter_data.serial_number
pac_watt = my_inverter_data.pac_watt
energy_today_KWh = my_inverter_data.energy_today_KWh
status = my_inverter_data.status
unknown13 = my_inverter_data.unknown13
mac_address = my_inverter_data.mac_address
assert unknown0 == '1'
assert unknown1 == '1'
assert registry_id == _registry_id
assert registry_key == _registry_key
assert hardware_version == _hardware_version
assert software_version == _software_version
assert datetime(2022, 2, 20, 16, 22) == my_datetime
assert communication_status == '1'
assert unknown8 == '1'
assert serial_number == _serial_number
assert pac_watt == 1234
assert energy_today_KWh == 1.23
assert status == 'OK'
assert unknown13 == "Error"
assert mac_address == "EA-B2-41-27-7A-36"
def test_InverterData_bugfix():
"""Test the inverter data class fixing the energy bug."""
my_inverter_data = InverterData(_content.split('\n'))
energy_today_KWh = my_inverter_data.energy_today_KWh
assert energy_today_KWh == 8.09
async def test_Inverter_power_on():
"""Power on inverter."""
url = "test"
my_inverter = Inverter(url)
mock_response = httpx.Response(
200, request=httpx.Request("Get", f"https://{url}"), content=_byte_content
)
with patch("src.zever_local.inverter.httpx.AsyncClient.get") as mock_device_info:
mock_device_info.return_value = mock_response
my_inverter = Inverter(url)
await my_inverter.async_connect()
with patch("src.zever_local.inverter.httpx.AsyncClient.post") as mock_device_info:
mock_device_info.return_value = mock_response
my_result = await my_inverter.power_on()
assert my_result
async def test_Inverter_power_off():
"""Power on inverter."""
url = "test"
my_inverter = Inverter(url)
mock_response = httpx.Response(
200, request=httpx.Request("Get", f"https://{url}"), content=_byte_content
)
with patch("src.zever_local.inverter.httpx.AsyncClient.get") as mock_device_info:
mock_device_info.return_value = mock_response
my_inverter = Inverter(url)
await my_inverter.async_connect()
with patch("src.zever_local.inverter.httpx.AsyncClient.post") as mock_device_info:
mock_device_info.return_value = mock_response
my_result = await my_inverter.power_off()
assert my_result
async def test_Inverter_power_on_ZeversolarError():
"""Power off inverter."""
url = "test"
my_inverter = Inverter(url)
with pytest.raises(ZeversolarError):
my_inverter = Inverter(url)
await my_inverter.power_on()
async def test_Inverter_power_on_ZeversolarTimeout():
"""Power off inverter has a timeout."""
url = "test"
my_inverter = Inverter(url)
with pytest.raises(ZeversolarTimeout):
with patch("src.zever_local.inverter.httpx.AsyncClient.post") as mock_device_info:
mock_device_info.side_effect = httpx.TimeoutException("Timeout")
my_inverter = Inverter(url)
await my_inverter.power_on()
async def test_async_connect():
"""Fetch the inverter info."""
url = ""
my_inverter = Inverter(url)
mock_response = httpx.Response(
200, request=httpx.Request("Get", f"https://{url}"), content=_byte_content
)
with patch("src.zever_local.inverter.httpx.AsyncClient.get") as mock_device_info:
mock_device_info.return_value = mock_response
await my_inverter.async_connect()
mac_address = my_inverter.mac_address
serial_number = my_inverter.serial_number
assert mac_address == "EA-B2-41-27-7A-36"
assert serial_number == _serial_number
|
python
|
"""
Tests for the game class
"""
import unittest
import numpy as np
from nashpy.algorithms.vertex_enumeration import vertex_enumeration
class TestVertexEnumeration(unittest.TestCase):
"""
Tests for the vertex enumeration algorithm
"""
def test_three_by_two_vertex_enumeration(self):
A = np.array([[3, 3], [2, 5], [0, 6]])
B = np.array([[3, 2], [2, 6], [3, 1]])
expected_equilibria = sorted(
[
(np.array([1, 0, 0]), np.array([1, 0])),
(np.array([0, 1 / 3, 2 / 3]), np.array([1 / 3, 2 / 3])),
(np.array([4 / 5, 1 / 5, 0]), np.array([2 / 3, 1 / 3])),
],
key=lambda a: list(np.round(a[0], 4)),
)
equilibria = sorted(
vertex_enumeration(A, B), key=lambda a: list(np.round(a[0], 4))
)
for equilibrium, expected_equilibrium in zip(equilibria, expected_equilibria):
for strategy, expected_strategy in zip(equilibrium, expected_equilibrium):
self.assertTrue(all(np.isclose(strategy, expected_strategy)))
def test_with_negative_utilities(self):
A = np.array([[1, -1], [-1, 1]])
B = -A
expected_equilibrium = (np.array([0.5, 0.5]), np.array([0.5, 0.5]))
equilibrium = next(vertex_enumeration(A, B))
for strategy, expected_strategy in zip(equilibrium, expected_equilibrium):
assert all(np.isclose(strategy, expected_strategy)), strategy
|
python
|
#!/usr/bin/env python3
__author__ = "Will Kamp"
__copyright__ = "Copyright 2013, Matrix Mariner Inc."
__license__ = "BSD"
__email__ = "[email protected]"
__status__ = "Development" # "Prototype", "Development", or "Production"
'''This is the wrapper program that ties it all together to complete this set of programs'
task of compiling charts into the MX Mariner format.
'''
import sys
import os
import inspect
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)
from mxmcc import regions
from mxmcc import catalog
from mxmcc import tilebuilder
from mxmcc import tilesmerge
from mxmcc import gemf
from mxmcc import zdata
from mxmcc import verify
from mxmcc import tiles_opt
from mxmcc.checkpoint import *
from mxmcc import encryption_shim
import mbutil as mb
import re
import shutil
PROFILE_MX_R = 'MX_REGION' # (default) renders standard MX Mariner gemf + zdat
PROFILE_MB_C = 'MB_CHARTS' # renders each chart as mbtiles file
PROFILE_MB_R = 'MB_REGION' # renders entire region as mbtiles file
def _build_catalog(checkpoint_store, profile, region):
# build catalog
point = CheckPoint.CHECKPOINT_CATALOG
if checkpoint_store.get_checkpoint(region, profile) < point:
print('building catalog for:', region)
if not regions.is_valid_region(region):
region_dir = regions.find_custom_region_path(region)
if region_dir is not None:
catalog.build_catalog_for_bsb_directory(region_dir, region)
else:
raise Exception('custom region: %s does not have a directory' % region)
else:
catalog.build_catalog_for_region(region)
checkpoint_store.clear_checkpoint(region, profile, point)
else:
print('skipping checkpoint', point)
def _create_tiles(checkpoint_store, profile, region):
# create tiles
point = CheckPoint.CHECKPOINT_TILE_VERIFY
if checkpoint_store.get_checkpoint(region, profile) < point:
print('building tiles for:', region)
tilebuilder.build_tiles_for_catalog(region)
# verify
if not verify.verify_catalog(region):
raise Exception(region + ' was not verified... ' + verify.error_message)
checkpoint_store.clear_checkpoint(region, profile, point)
else:
print('skipping checkpoint', point)
def _merge_tiles(checkpoint_store, profile, region):
# merge
point = CheckPoint.CHECKPOINT_MERGE
if checkpoint_store.get_checkpoint(region, profile) < point:
print('merging tiles for:', region)
tilesmerge.merge_catalog(region)
checkpoint_store.clear_checkpoint(region, profile, point)
else:
print('skipping checkpoint', point)
def _optimize_tiles(checkpoint_store, profile, region, base_dir=config.merged_tile_dir):
# optimize
point = CheckPoint.CHECKPOINT_OPT
if checkpoint_store.get_checkpoint(region, profile) < point:
# if platform.system() == 'Windows':
# tiles_opt.set_nothreads()
tiles_opt.optimize_dir(os.path.join(base_dir, region))
# verify all optimized tiles are there
if not verify.verify_opt(region, base_dir=base_dir):
raise Exception(region + ' was not optimized fully')
checkpoint_store.clear_checkpoint(region, profile, point)
else:
print('skipping checkpoint', point)
def _should_encrypt(region):
encrypted_providers = {regions.provider_wavey_lines, regions.provider_ukho}
return regions.provider_for_region(region) in encrypted_providers
def _encrypt_region(checkpoint_store, profile, region):
print('encrypting tiles for region:', region)
# encryption
point = CheckPoint.CHECKPOINT_ENCRYPTED
if checkpoint_store.get_checkpoint(region, profile) < point:
if not encryption_shim.encrypt_region(region):
raise Exception('encryption failed!')
checkpoint_store.clear_checkpoint(region, profile, point)
else:
print('skipping checkpoint', point)
def _create_gemf(checkpoint_store, profile, region):
point = CheckPoint.CHECKPOINT_ARCHIVE
if checkpoint_store.get_checkpoint(region, profile) < point:
print('archiving gemf for region:', region)
should_encrypt = _should_encrypt(region)
if should_encrypt:
name = region + '.enc'
else:
name = region + '.opt'
gemf.generate_gemf(name, add_uid=should_encrypt)
#if should_encrypt:
# encryption_shim.generate_token(region)
checkpoint_store.clear_checkpoint(region, profile, point)
else:
print('skipping checkpoint', point)
def _create_zdat(checkpoint_store, profile, region):
point = CheckPoint.CHECKPOINT_METADATA
if checkpoint_store.get_checkpoint(region, profile) < point:
print('building zdat metadata archive for:', region)
zdata.generate_zdat_for_catalog(region)
checkpoint_store.clear_checkpoint(region, profile, point)
else:
print('skipping checkpoint', point)
def _fill_tiles(region):
# fill
# print('filling tile \"holes\"', region)
# filler.fill_all_in_region(region)
print(region, 'fill skipped')
def _create_region_mb_tiles(checkpoint_store, profile, region):
point = CheckPoint.CHECKPOINT_ARCHIVE
if checkpoint_store.get_checkpoint(region, profile) < point:
print('archiving mbtiles for region:', region)
region_dir = os.path.join(config.merged_tile_dir, region + '.opt')
mbtiles_file = os.path.join(config.compiled_dir, region + '.mbtiles')
if os.path.isfile(mbtiles_file):
os.remove(mbtiles_file)
mb.disk_to_mbtiles(region_dir, mbtiles_file, format='png', scheme='xyz')
checkpoint_store.clear_checkpoint(region, profile, point)
else:
print('skipping checkpoint', point)
def __create_chart_mb_tiles(region):
region_charts_dir = os.path.join(config.unmerged_tile_dir, region + '.opt')
for chart in os.listdir(region_charts_dir):
print('archiving mbtiles for chart:', chart)
chart_dir = os.path.join(region_charts_dir, chart)
prefix = re.sub(r'\W+', '_', chart).lower()
mbtiles_file = os.path.join(config.compiled_dir, prefix + '.mbtiles')
if os.path.isfile(mbtiles_file):
os.remove(mbtiles_file)
mb.disk_to_mbtiles(chart_dir, mbtiles_file, format='png', scheme='xyz')
def _create_chart_mb_tiles(checkpoint_store, profile, region):
point = CheckPoint.CHECKPOINT_ARCHIVE
if checkpoint_store.get_checkpoint(region, profile) < point:
__create_chart_mb_tiles(region)
checkpoint_store.clear_checkpoint(region, profile, point)
else:
print('skipping checkpoint', point)
def _skip_zoom(region):
tile_path = os.path.join(config.unmerged_tile_dir, region)
for chart in os.listdir(tile_path):
zs = []
for z_dir in os.listdir(os.path.join(tile_path, chart)):
try:
z = int(z_dir)
zs.append(z)
except ValueError:
pass
zs.sort(reverse=True)
if len(zs) > 1 and (zs[0] - zs[1]) == 1:
i = 0
for z in zs:
if i % 2:
p = os.path.join(tile_path, chart, str(z))
shutil.rmtree(p)
i += 1
def compile_region(region, profile=PROFILE_MX_R, perform_clean=True):
region = region.upper()
profile = profile.upper()
checkpoint_store = CheckPointStore()
_build_catalog(checkpoint_store, profile, region)
_create_tiles(checkpoint_store, profile, region)
if 'REGION' in profile:
_merge_tiles(checkpoint_store, profile, region)
_fill_tiles(region)
_optimize_tiles(checkpoint_store, profile, region)
if 'MX_' in profile:
should_encrypt = _should_encrypt(region)
if should_encrypt:
_encrypt_region(checkpoint_store, profile, region)
_create_gemf(checkpoint_store, profile, region)
_create_zdat(checkpoint_store, profile, region)
if 'MB_' in profile:
_create_region_mb_tiles(checkpoint_store, profile, region)
elif 'CHARTS' in profile and 'MB_' in profile:
_skip_zoom(region)
_optimize_tiles(checkpoint_store, profile, region, base_dir=config.unmerged_tile_dir)
_create_chart_mb_tiles(checkpoint_store, profile, region)
print('final checkpoint', checkpoint_store.get_checkpoint(region, profile))
if perform_clean and checkpoint_store.get_checkpoint(region, profile) > CheckPoint.CHECKPOINT_ENCRYPTED:
cleanup(region, config.unmerged_tile_dir)
cleanup(region, config.merged_tile_dir)
def cleanup(region, base_dir):
for ea in os.listdir(base_dir):
if region in ea:
abs_path = os.path.join(base_dir, ea)
print('clean', abs_path)
for root, dirs, files in os.walk(abs_path, topdown=False):
for name in files:
p = os.path.join(root, name)
try:
os.remove(p)
except:
print('failed to delete', p)
for name in dirs:
os.rmdir(os.path.join(root, name))
def print_usage():
print('usage:\n$python mxmcc.py <region> <optional profile>')
if __name__ == "__main__":
if config.check_dirs():
args = sys.argv
if len(args) < 2:
print_usage()
else:
rgn = args[1]
if len(args) >= 3:
prof = args[2]
else:
prof = PROFILE_MX_R
compile_region(rgn, prof)
else:
print('Your mxmcc directory structure is not ready\n' +
'Please edit the top portion of config.py, run config.py,\n' +
'and place charts in their corresponding directories.')
|
python
|
import time
import numpy as np
import dolfin as df
from finmag.energies import Demag
from finmag.field import Field
from finmag.util.meshes import sphere
import matplotlib.pyplot as plt
radius = 5.0
maxhs = [0.2, 0.4, 0.5, 0.6, 0.7, 0.8, 0.85, 0.9, 0.95, 1.0]
unit_length = 1e-9
m_0 = (1, 0, 0)
Ms = 1
H_ref = np.array((- Ms / 3.0, 0, 0))
vertices = []
solvers = ["FK", "FK", "GCR", "Treecode"]
solvers_label = ["FK", "FK opt", "GCR", "Treecode"]
timings = [[], [], [], []]
errors = [[], [], [], []]
for maxh in maxhs:
mesh = sphere(r=radius, maxh=maxh, directory="meshes")
vertices.append(mesh.num_vertices())
S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1)
m_function = df.Function(S3)
m_function.assign(df.Constant(m_0))
m = Field(S3, m_function)
for i, solver in enumerate(solvers):
demag = Demag(solver)
if solver == "FK":
if i == 0:
demag.parameters["phi_1_solver"] = "default"
demag.parameters["phi_1_preconditioner"] = "default"
demag.parameters["phi_2_solver"] = "default"
demag.parameters["phi_2_preconditioner"] = "default"
if i == 1:
demag.parameters["phi_1_solver"] = "cg"
demag.parameters["phi_1_preconditioner"] = "ilu"
demag.parameters["phi_2_solver"] = "cg"
demag.parameters["phi_2_preconditioner"] = "ilu"
demag.setup(m, Ms, unit_length)
start = time.time()
for j in xrange(10):
H = demag.compute_field()
elapsed = (time.time() - start) / 10.0
H = H.reshape((3, -1)).mean(axis=1)
error = abs(H[0] - H_ref[0]) / abs(H_ref[0])
timings[i].append(elapsed)
errors[i].append(error)
fig = plt.figure()
ax = fig.add_subplot(211)
ax.set_title("Runtime")
for i, solver in enumerate(solvers):
ax.plot(vertices, timings[i], label=solvers_label[i])
ax.legend(loc=2)
ax.set_xlabel("vertices")
ax.set_ylabel("time (s)")
ax = fig.add_subplot(212)
ax.set_title("Inaccuracy")
for i, solver in enumerate(solvers):
ax.plot(vertices, errors[i], label=solvers_label[i])
ax.legend(loc=2)
ax.set_xlabel("vertices")
ax.set_ylabel("relative error (%)")
fig.tight_layout()
fig.savefig("benchmark.png")
|
python
|
from datetime import datetime, timezone
from hamcrest.core.string_description import StringDescription
from pytest import mark, raises
from preacher.core.datetime import DatetimeWithFormat
from preacher.core.verification.hamcrest import after, before
ORIGIN = datetime(2019, 12, 15, 12, 34, 56, tzinfo=timezone.utc)
@mark.parametrize('value', [
None,
1,
1.2,
complex(1, 2),
'str',
])
def test_datetime_matcher_invalid_creation(value):
with raises(TypeError):
before(value)
with raises(TypeError):
after(value)
@mark.parametrize('item', [None, 1])
def test_datetime_matcher_invalid_validation(item):
matcher = before(ORIGIN)
with raises(TypeError):
matcher.matches(item)
matcher = after(ORIGIN)
with raises(TypeError):
matcher.matches(item)
@mark.parametrize(('value', 'item', 'before_expected', 'after_expected'), [
(ORIGIN, '2019-12-15T12:34:55Z', True, False),
(ORIGIN, '2019-12-15T12:34:56Z', False, False),
(ORIGIN, '2019-12-15T12:34:57Z', False, True),
(DatetimeWithFormat(ORIGIN), '2019-12-15T12:34:55Z', True, False),
(DatetimeWithFormat(ORIGIN), '2019-12-15T12:34:56Z', False, False),
(DatetimeWithFormat(ORIGIN), '2019-12-15T12:34:57Z', False, True),
])
def test_datetime_matcher(value, item, before_expected, after_expected):
matcher = before(ORIGIN)
assert matcher.matches(item) == before_expected
description = StringDescription()
matcher.describe_to(description)
assert str(description).startswith('a value before <')
description = StringDescription()
matcher.describe_mismatch(item, description)
assert str(description).startswith('was <')
matcher = after(value)
assert matcher.matches(item) == after_expected
description = StringDescription()
matcher.describe_to(description)
assert str(description).startswith('a value after <')
description = StringDescription()
matcher.describe_mismatch(item, description)
assert str(description).startswith('was <')
|
python
|
#!/usr/bin/env python
from setuptools import setup, find_packages
import dbbackup
def get_requirements():
return open('requirements.txt').read().splitlines()
def get_test_requirements():
return open('requirements-tests.txt').read().splitlines()
keywords = [
'django', 'database', 'media', 'backup',
'amazon', 's3' 'dropbox',
]
setup(
name='django-dbbackup',
version=dbbackup.__version__,
description=dbbackup.__doc__,
author=dbbackup.__author__,
author_email=dbbackup.__email__,
install_requires=get_requirements(),
tests_require=get_test_requirements(),
license='BSD',
url=dbbackup.__url__,
keywords=keywords,
packages=find_packages(),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Environment :: Console',
'Framework :: Django',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Database',
'Topic :: System :: Archiving',
'Topic :: System :: Archiving :: Backup',
'Topic :: System :: Archiving :: Compression'
],
)
|
python
|
from utils.rooster_utils import prediction, get_model, load_configurations, set_seed
import torch
import sys
TARGET_SR = 44100
settings = load_configurations(mode="detector")
if(settings == -1):
print("Error: Failed while loading configurations")
sys.exit()
set_seed(settings["globals"]["seed"])
melspectrogram_parameters = settings["dataset"]["params"]["melspectrogram_parameters"]
device = torch.device(settings["globals"]["device"])
model = get_model(settings["model"])
model = model.to(device)
model.train(False)
prediction = prediction(test_audio_path="test_audio/rooster_competition.wav",
model_config=model,
mel_params=melspectrogram_parameters,
target_sr=TARGET_SR,
threshold=0.4, batch_size=120, period = 0.5, steps=4) # period)
print("Total number of roosters", len(prediction))
standings = prediction.sort_values(by='crow_length_msec', ascending=False)
#print(standings)
print("Duration of crow from each rooster in milliseconds")
for index, rooster in prediction.iterrows():
print(rooster["rooster_id"], ":", rooster["crow_length_msec"] )
print('\n')
rank = 1
print("Ranking of roosters by crow length")
for index, rooster in standings.iterrows():
print(rank, ":", int(rooster["rooster_id"]))
rank += 1
print("All prediction data")
print(prediction)
#print(standings)
|
python
|
# Given an integer array nums, find the contiguous
# subarray (containing at least one number) which
# has the largest sum and return its sum.
# Example:
# Input: [-2,1,-3,4,-1,2,1,-5,4],
# Output: 6
# Explanation: [4,-1,2,1] has the largest sum = 6.
# Follow up:
# If you have figured out the O(n) solution, try coding
# another solution using the divide and conquer approach,
# which is more subtle.
# EXERCISE ==> https://leetcode.com/problems/maximum-product-subarray/
### UNCOMPLETED SOLUTION
class Solution(object):
def maxProduct(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# if (len(nums) == 1):
# return nums[0]
# if (len(nums) == 2):
# if (nums[0] > nums[1]):
# return nums[0]
# return nums[1]
max = nums[0] * nums[1]
for idx, num in enumerate(nums):
subArr = nums[idx:-1]
tmp_max = 0
for idx2, num2 in enumerate(subArr):
#return max
|
python
|
from funcx_endpoint.endpoint.utils.config import Config
from parsl.providers import LocalProvider
config = Config(
scaling_enabled=True,
provider=LocalProvider(
init_blocks=1,
min_blocks=1,
max_blocks=1,
),
max_workers_per_node=2,
funcx_service_address='https://api.funcx.org/v1'
)
# For now, visible_to must be a list of URNs for globus auth users or groups, e.g.:
# urn:globus:auth:identity:{user_uuid}
# urn:globus:groups:id:{group_uuid}
meta = {
"name": "$name",
"description": "",
"organization": "",
"department": "",
"public": False,
"visible_to": []
}
|
python
|
import abc
from numpy import ndarray
class AbstractGAN(abc.ABC):
def __init__(self, run_dir: str, outputs_dir: str, model_dir: str, generated_datasets_dir: str,
resolution: int, channels: int, epochs: int, output_save_frequency: int,
model_save_frequency: int, loss_save_frequency: int,
latent_space_save_frequency: int, dataset_generation_frequency: int, dataset_size: int,
latent_dim: int, latent_space_rows: int = 6, latent_space_columns: int = 6, outputs_rows: int = 6,
outputs_columns: int = 6):
self._run_dir = run_dir
self._outputs_dir = outputs_dir
self._model_dir = model_dir
self._generated_datasets_dir = generated_datasets_dir
self._resolution = resolution
self._channels = channels
self._epochs = epochs
self._output_save_frequency = output_save_frequency
self._model_save_frequency = model_save_frequency
self._loss_save_frequency = loss_save_frequency
self._latent_space_save_frequency = latent_space_save_frequency
self._latent_dim = latent_dim
self._dataset_generation_frequency = dataset_generation_frequency
self._dataset_size = dataset_size
self._latent_space_rows = latent_space_rows
self._latent_space_columns = latent_space_columns
self._outputs_rows = outputs_rows
self._outputs_columns = outputs_columns
self._epoch = 0
@abc.abstractmethod
def _build_models(self) -> None:
pass
@abc.abstractmethod
def train(self, dataset: ndarray, classes: ndarray) -> list:
pass
@abc.abstractmethod
def _save_models_architectures(self) -> None:
pass
@abc.abstractmethod
def _save_outputs(self) -> None:
pass
@abc.abstractmethod
def _save_latent_space(self) -> None:
pass
@abc.abstractmethod
def _save_losses(self) -> None:
pass
@abc.abstractmethod
def _save_models(self) -> None:
pass
@abc.abstractmethod
def _generate_dataset(self) -> None:
pass
|
python
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import os
from comnetsemu.cli import CLI, spawnXtermDocker
from comnetsemu.net import Containernet, VNFManager
from mininet.link import TCLink
from mininet.log import info, setLogLevel
from mininet.node import Controller, RemoteController
if __name__ == "__main__":
# Only used for auto-testing.
AUTOTEST_MODE = os.environ.get("COMNETSEMU_AUTOTEST_MODE", 0)
# Create template host, switch, and link
hconfig = {"inNamespace": True}
http_link_config = {"bw": 1}
video_link_config = {"bw": 10}
host_link_config = {}
setLogLevel("info")
net = Containernet(
controller=Controller,
link=TCLink,
xterms=False,
autoSetMacs=True,
autoStaticArp=True,
)
mgr = VNFManager(net)
info("*** Add controller\n")
controller = RemoteController("c1", ip="127.0.0.1", port=6633)
net.addController(controller)
info("*** Creating hosts\n")
h1 = net.addDockerHost(
"h1",
dimage="dev_test",
ip="10.0.0.1",
docker_args={"hostname": "h1"},
)
h2 = net.addDockerHost(
"h2",
dimage="dev_test",
ip="10.0.0.2",
docker_args={"hostname": "h2"},
)
h3 = net.addDockerHost(
"h3",
dimage="dev_test",
ip="10.0.0.3",
docker_args={"hostname": "h3"},
)
h4 = net.addDockerHost(
"h4",
dimage="dev_test",
ip="10.0.0.4",
docker_args={"hostname": "h4"},
)
h5 = net.addDockerHost(
"h5",
dimage="dev_test",
ip="10.0.0.5",
docker_args={"hostname": "h5"},
)
h6 = net.addDockerHost(
"h6",
dimage="dev_test",
ip="10.0.0.6",
docker_args={"hostname": "h6"},
)
h7 = net.addDockerHost(
"h7",
dimage="dev_test",
ip="10.0.0.7",
docker_args={"hostname": "h7"},
)
h8 = net.addDockerHost(
"h8",
dimage="dev_test",
ip="10.0.0.8",
docker_args={"hostname": "h8"},
)
info("*** Adding switch and links\n")
for i in range(7):
sconfig = {"dpid": "%016x" % (i + 1)}
net.addSwitch("s%d" % (i + 1), protocols="OpenFlow10", **sconfig)
# s1 = net.addSwitch("s1")
# s2 = net.addSwitch("s2")
# s3 = net.addSwitch("s3")
# s4 = net.addSwitch("s4")
# s5 = net.addSwitch("s5")
# s6 = net.addSwitch("s6")
# s7 = net.addSwitch("s7")
# Add switch links
net.addLink("s1", "s3", **http_link_config)
net.addLink("s1", "s4", **http_link_config)
net.addLink("s2", "s4", **http_link_config)
net.addLink("s2", "s5", **http_link_config)
net.addLink("s3", "s6", **http_link_config)
net.addLink("s4", "s6", **http_link_config)
net.addLink("s4", "s7", **http_link_config)
net.addLink("s5", "s7", **http_link_config)
# Add host links
net.addLink("h1", "s1", **host_link_config)
net.addLink("h2", "s1", **host_link_config)
net.addLink("h3", "s2", **host_link_config)
net.addLink("h4", "s2", **host_link_config)
net.addLink("h5", "s6", **host_link_config)
net.addLink("h6", "s6", **host_link_config)
net.addLink("h7", "s7", **host_link_config)
net.addLink("h8", "s7", **host_link_config)
info("\n*** Starting network\n")
net.start()
srv4 = mgr.addContainer(
"srv4",
"h4",
"echo_server",
"python /home/server.py",
docker_args={},
)
srv7 = mgr.addContainer(
"srv7",
"h7",
"echo_server",
"python /home/server.py",
docker_args={},
)
srv8 = mgr.addContainer(
"srv8",
"h8",
"echo_server",
"python /home/server.py",
docker_args={},
)
srv1 = mgr.addContainer("srv1", "h1", "dev_test", "bash", docker_args={})
srv2 = mgr.addContainer("srv2", "h2", "dev_test", "bash", docker_args={})
srv3 = mgr.addContainer("srv3", "h3", "dev_test", "bash", docker_args={})
srv5 = mgr.addContainer("srv5", "h5", "dev_test", "bash", docker_args={})
srv6 = mgr.addContainer("srv6", "h6", "dev_test", "bash", docker_args={})
if not AUTOTEST_MODE:
# Cannot spawn xterm for srv1 since BASH is not installed in the image:
# echo_server.
spawnXtermDocker("srv3")
CLI(net)
mgr.removeContainer("srv1")
mgr.removeContainer("srv2")
mgr.removeContainer("srv3")
mgr.removeContainer("srv4")
mgr.removeContainer("srv5")
mgr.removeContainer("srv6")
mgr.removeContainer("srv7")
mgr.removeContainer("srv8")
net.stop()
mgr.stop()
|
python
|
import streamlit as st
import pandas as pd
import joblib
model = joblib.load('/content/drive/MyDrive/models/cc_foodrcmdns.pkl')
df = pd.read_csv('dataset/indianfoodMAIN.csv')
recp_name = st.selectbox("Select Recipe", df['recp_name'].values)
st.write(recp_name)
def findRcmdn(value):
data = []
index = df[df['recp_name'] == value].index[0]
distances = sorted(list(enumerate(model[index])),reverse=True,key = lambda x: x[1])
for i in distances[1:6]:
# print(df.iloc[i[0]].translatedrecipename,df.iloc[i[0]].cuisine)
print(f"{df.iloc[i[0]]['recp_name'] } , Cuisin : {df.iloc[i[0]].cuisine}")
allvalues = { "recp_name": df.iloc[i[0]]['recp_name'],
"cuisine": df.iloc[i[0]]['cuisine'],
"image-url": df.iloc[i[0]]['image-url'],
"url": df.iloc[i[0]]["url"],
}
data.append(allvalues)
return data
def custom_markdown(name, img_url, URL,csn):
mymark = f"""
<div class="w3-container w3-red">
<h1> {name} </h1>
<h5>Cuisine: {csn}</h5>
</div>
<img src={img_url} alt="" style="width:50%">
<div class="w3-container">
<p> Recipe Instructions: <a href={URL} target="_blank" >Read...</a> </p>
</div>
<div class="w3-container w3-red">
</div>
"""
return mymark
if st.button("Show"):
st.text("Recipe Recommendations....")
recommendations = findRcmdn(recp_name)
for result in recommendations:
st.markdown(custom_markdown(name= result['recp_name'], img_url=result['image-url'], URL=result["url"],csn=result["cuisine"] ), True )
# st.info(result['recp_name'])
|
python
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
# Some utility functions
def average_norm_clip(grad, clip_val):
'''
Compute the norm and clip it if necessary.
The first dimension will be batchsize.
Args:
grad(Tensor): the gradient
clip_val(float): value to clip to
'''
batchsize = grad.size(0)
avg_l2_norm = 0.0
for i in range(batchsize):
avg_l2_norm += grad[i].data.norm()
avg_l2_norm /= batchsize
if avg_l2_norm > clip_val:
# print("l2_norm: %.5f clipped to %.5f" % (avg_l2_norm, clip_val))
grad *= clip_val / avg_l2_norm
def accumulate(acc, new):
''' accumulate by the same key in a list of dicts
Args:
acc(dict): the dict to accumulate to
new(dict): new dict entry
Returns:
A new dict containing the accumulated sums of each key.
'''
ret = { k: new[k] if a is None else a + new[k] for k, a in acc.items() if k in new }
ret.update({ k : v for k, v in new.items() if not (k in acc) })
return ret
def add_err(overall_err, new_err):
''' Add ``new_err`` to ``overall_err``
Args:
overall_err(float): summed overall error
new_err(float): new error
'''
if overall_err is None:
return new_err
else:
overall_err += new_err
return overall_err
def add_stats(stats, key, value):
''' Feed ``value`` to ``stats[key]``'''
if stats:
stats[key].feed(value)
def check_terminals(has_terminal, batch):
''' Check if the environment sent a terminal signal '''
# Block backpropagation if we go pass a terminal node.
for i, terminal in enumerate(batch["terminal"]):
if terminal: has_terminal[i] = True
def check_terminals_anyT(has_terminal, batch, T):
''' Check if any of ``batch[t], t <= T`` is terminal'''
for t in range(T):
check_terminals(has_terminal, batch[t])
|
python
|
import gc
import numpy as np
import pandas as pd
from datetime import datetime
from functools import partial
import tensorflow as tf
from sklearn import preprocessing
from .. import utils
from ..config import cfg
from .base_model import BaseModel
class Graph():
'''Container class for tf.Graph and associated variables.
'''
def __init__(self):
self.graph = None
class FFNModel(BaseModel):
def __init__(self, X_train, y_train, X_test, params_file=None, folds_lookup=None,
prefix=None, tf_path=None, logger=None):
BaseModel.__init__(self, X_train, y_train, X_test, params_file, folds_lookup,
prefix, logger)
self.graph = None
self.n_inputs = None
self.n_outputs = None
self.initializer = None
self.regularizer = None
self.activation = None
self.tf_path = tf_path
self.logdir = None
self.output_suffix = '_dnn_pred'
def preprocess(self, imputer_strategy='mean'):
'''Mean-fill NaN, center, and scale inputs
'''
train_idx = self.X_train.index
test_idx = self.X_test.index
cols_in = self.X_train.columns
train_len = self.X_train.shape[0]
X = np.concatenate([self.X_train.values, self.X_test.values], axis=0)
imputer = preprocessing.Imputer(strategy=imputer_strategy, axis=0, verbose=1)
self.logger.info('filling NaN...')
X[X == np.inf] = np.nan
X[X == -np.inf] = np.nan
X = imputer.fit_transform(X)
self.logger.info('standardizing inputs...')
X = preprocessing.scale(X)
self.X_train = pd.DataFrame(X[:train_len, :], index=train_idx, columns=cols_in)
self.X_test = pd.DataFrame(X[train_len:, :], index=test_idx, columns=cols_in)
del X
self.logger.info('preprocessing complete.')
def init_hparams(self):
'''interpret params.yaml file to set tf.Graph params
'''
self.n_inputs = self.X_train.shape[1]
if 'n_outputs' in self.params:
self.n_outputs = self.params['n_outputs']
else:
self.n_outputs = 1
if 'init_mode' in self.params:
init_mode = self.params['init_mode']
else:
init_mode = 'FAN_AVG'
if 'init_uniform' in self.params:
init_uniform = self.params['init_uniform']
else:
init_uniform = True
self.initializer = (
tf.contrib.layers
.variance_scaling_initializer(mode=init_mode,
uniform=init_uniform))
if 'l1_reg_weight' in self.params:
l1_reg = float(self.params['l1_reg_weight'])
else:
l1_reg = 0.0
if 'l2_reg_weight' in self.params:
l2_reg = float(self.params['l2_reg_weight'])
else:
l2_reg = 0.0
reg={'None': None,
'l1': tf.contrib.layers.l1_regularizer(scale=l1_reg),
'l2': tf.contrib.layers.l2_regularizer(scale=l2_reg),
'l1-l2': tf.contrib.layers.l1_l2_regularizer(
scale_l1=l1_reg, scale_l2=l2_reg)}
if 'regularizer' in self.params:
self.regularizer = reg[self.params['regularizer']]
else:
self.regularizer = None
act={'elu': tf.nn.elu,
'relu': tf.nn.relu,
'leaky-relu': tf.nn.leaky_relu,
'selu': tf.nn.selu,
'crelu': tf.nn.crelu,
'tanh': tf.tanh,
'sigmoid': tf.sigmoid}
if 'activation' in self.params:
self.activation = act[self.params['activation']]
else:
self.activation = tf.nn.relu
self.logger.info(f'Activation not specified in params. ' +
f'Using ReLU.')
optimizers = {
'sgd': tf.train.GradientDescentOptimizer,
'momentum': partial(tf.train.MomentumOptimizer,
momentum=float(self.params['momentum'])),
'adam': partial(tf.train.AdamOptimizer,
beta1=float(self.params['adam_beta1']),
beta2=float(self.params['adam_beta2']),
epsilon=float(self.params['adam_epsilon'])),
'adagrad': tf.train.AdagradOptimizer,
'adadelta': tf.train.AdadeltaOptimizer,
'adamw': partial(tf.contrib.opt.AdamWOptimizer,
beta1=float(self.params['adam_beta1']),
beta2=float(self.params['adam_beta2']),
epsilon=float(self.params['adam_epsilon']),
weight_decay=float(self.params['adam_weight_decay']))}
if 'optimizer' in self.params:
self.optimizer = optimizers[self.params['optimizer']]
else:
self.optimizer = tf.train.GradientDescentOptimizer
self.logger.info(f'Optimizer not specified in params. ' +
f'Using GradientDescentOptimizer')
def _shuffle_idx(self, X):
'''Shuffle batch order when training with minibatches.
'''
idx = X.index.values
rng = np.random.RandomState(datetime.now().microsecond)
return rng.permutation(idx)
def get_batch(self, X_in, y_in, idx, batch_size, batch_no):
'''Used in train_mode='minibatch', i.e. each epoch trains against
full training set (shuffled).
'''
idx_batch = idx[batch_size * (batch_no-1):batch_size * batch_no]
X_batch = X_in.reindex(idx_batch).values
y_batch = y_in.reindex(idx_batch).values
return X_batch, y_batch
def get_sample(self, X_in, y_in, batch_size):
rng = np.random.RandomState(datetime.now().microsecond)
idx_in = X_in.index.values
idx_sample = rng.choice(idx_in, size=batch_size, replace=False)
X_batch = X_in.loc[idx_sample, :].values
y_batch = y_in.loc[idx_sample].values
return X_batch, y_batch
def init_tensorboard(self):
'''set directory and filename for tensorboard logs and checkpoint file
'''
now = datetime.now().strftime("%m%d-%H%M")
comment = self.prefix + ''
self.logdir = f'{self.tf_path}/tensorboard_logs/{now}{comment}/'
self.ckpt_file = f'{self.tf_path}/sessions/{self.prefix}_tf_model.ckpt'
def ff_layer(self, g, layer_in, layer_no):
with g.graph.as_default():
layer = tf.layers.dropout(layer_in,
rate=self.params['drop_rates'][layer_no],
training=g.train_flag,
name='drop_' + str(layer_no + 1))
layer = tf.layers.dense(layer,
self.params['layers'][layer_no],
kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer,
name='dense_' + str(layer_no + 1))
if self.params['use_batch_norm'][layer_no]:
layer = tf.layers.batch_normalization(
layer, training=g.train_flag,
momentum=self.params['batch_norm_momentum'],
name='bn_' + str(layer_no + 1))
layer = self.activation(layer, name='act_' + str(layer_no + 1))
return g, layer
def _grid_cv_fold(self, fold):
params_grid, keys = self._get_cv_params_grid()
columns_list = ['fold_no', *keys]
for met in self.metrics:
columns_list.extend(['best_' + met, 'rnd_' + met])
fold_results_list = []
X_train, y_train, X_val, y_val = self._get_fold_data(fold)
for i, param_set in enumerate(params_grid):
params_str = ''
for j in range(len(param_set)):
self.params[keys[j]] = param_set[j]
params_str += f'{keys[j]}={self.params[keys[j]]} '
self.logger.info(params_str)
self.init_hparams()
self.train_eval(X_train, y_train, X_val, y_val)
self.sess.close()
best_evals = self.best_eval_multi()
for eval in best_evals:
self.logger.info(f' best val {eval[0]}: {eval[1]:.4f}, ' +
f'round {eval[2]}')
self.logger.info('')
results_row = [fold, *(str(k) for k in param_set)]
for eval in best_evals:
results_row.extend([eval[1], eval[2]])
round_results = pd.DataFrame([results_row], columns=columns_list, index=[i])
fold_results_list.append(round_results)
return pd.concat(fold_results_list, axis=0)
def grid_cv(self, val_rounds):
'''Grid cross-valdidation. Permutes params/values in self.cv_grid (dict).
Args: val_rounds, integer: number of CV rounds
(mimimum: 1, maximum: number of folds)
Returns: no return; updates self.cv_results with grid CV results
'''
self.load_hparams()
keys = [*self.cv_grid.keys()]
columns = []
for met in self.metrics:
columns.extend(['best_' + met, 'rnd_' + met])
results_list = []
self.logger.info(f'starting grid CV.')
self.logger.info(f'base params: {self.params}')
for fold in range(1, val_rounds + 1):
self.logger.info(f'------------ FOLD {fold} OF {val_rounds} ------------')
fold_results = self._grid_cv_fold(fold)
results_list.append(fold_results)
self.cv_results = pd.concat(results_list, axis=0)
# display/log grid CV summary
groupby = [self.cv_results[key] for key in keys]
summ_df = self.cv_results[columns].groupby(groupby).mean()
self.logger.info(self.parse_summ_df(summ_df))
# reset/reload all params from params file
self.load_hparams()
def cv_predictions(self):
'''Generate fold-by-fold predictions. For each fold k, train on all other
folds and make predictions for k. For test set, train on the full training
dataset.
Loads all hyperparameters from the params.yaml file. Will overwrite any/all
instance.params settings.
Args: none.
Returns: pandas DataFrame with predictions for each fold in the training set,
combined with predictions for the test set.
'''
self.logger.info(f'starting predictions for CV outputs...')
self.load_hparams()
self.logger.info(f'all params restored from {self.params_file}.')
train_preds = []
for fold in range(1, self.n_folds + 1):
_, val_idx = self._get_fold_indices(fold)
X_train, y_train, X_val, y_val = self._get_fold_data(fold)
fold_outputs = self.train_eval(X_train, y_train, X_val, y_val, return_preds=True)
self.sess.close()
preds_ser = pd.Series(fold_outputs, index=val_idx)
train_preds.append(preds_ser)
self.logger.info(f'fold {fold} CV outputs complete.')
train_preds = pd.concat(train_preds)
return train_preds.rename(self.prefix + self.output_suffix, inplace=True)
def test_predictions(self):
test_preds = self.train_eval(self.X_train, self.y_train,
self.X_test, None, return_preds=True)
self.sess.close()
test_preds = pd.Series(test_preds, index=self.X_test.index)
self.logger.info(f'test set outputs complete.')
return test_preds.rename(self.prefix + self.output_suffix, inplace=True)
class DNNRegressor(FFNModel):
def __init__(self, X_train, y_train, X_test, params_file=None, folds_lookup=None,
prefix=None, weights, tf_path=None, logger=None):
FFNModel.__init__(self, X_train, y_train, X_test, params_file, folds_lookup,
prefix, tf_path, logger)
self.metrics = ['MSE', 'MAE']
self.n_outputs = 1
def init_hparams(self):
FFNModel.init_hparams(self)
self.n_outputs = 1
def best_eval_multi(self):
'''Return the minimum value for MSE and MAE
'''
return FFNModel.best_eval_multi(self, 'min')
def build_graph(self):
self.init_hparams()
self.init_tensorboard()
g = Graph()
g.graph = tf.Graph()
with g.graph.as_default():
g.X = tf.placeholder(tf.float32, shape=(None,
self.n_inputs),
name='X')
g.y = tf.placeholder(tf.float32, shape=(None), name='y')
g.train_flag = tf.placeholder_with_default(False, shape=(), name='training')
g.stack = [g.X]
for layer_no, layer in enumerate(self.params['layers']):
g, layer_out = self.ff_layer(g, g.stack[-1], layer_no)
g.stack.append(layer_out)
g.drop = tf.layers.dropout(g.stack[-1],
rate=self.params['drop_rates'][-1],
training=g.train_flag,
name='drop_before_logits')
g.dnn_outputs = tf.layers.dense(g.drop, 1, activation=None)
with tf.name_scope('loss'):
g.MAE = tf.reduce_mean(tf.abs(g.dnn_outputs - g.y))
g.MSE = tf.reduce_mean(tf.square(g.dnn_outputs - g.y))
g.exp_error = tf.reduce_mean(tf.subtract(tf.exp(tf.abs(g.dnn_outputs - g.y)), 1))
g.reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
with tf.name_scope('train'):
g.optimizer = self.optimizer(learning_rate=float(self.params['eta']))
objective = self.params['objective']
if objective == 'MAE':
g.loss=tf.add_n([g.MAE] + g.reg_losses, name='combined_loss')
elif objective == 'MSE':
g.loss=tf.add_n([g.MSE] + g.reg_losses, name='combined_loss')
elif objective == 'exp_error':
g.loss=tf.add_n([g.exp_error] + g.reg_losses, name='combined_loss')
g.training_op = g.optimizer.minimize(g.loss)
g.extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if self.params['regularizer'] != 'None':
with tf.name_scope('reg_losses'):
g.train_reg_loss = tf.summary.scalar('train', tf.add_n(g.reg_losses))
g.val_reg_loss = tf.summary.scalar('val', tf.add_n(g.reg_losses))
with tf.name_scope('MSE'):
g.train_mse = tf.summary.scalar('train', g.MSE)
g.val_mse = tf.summary.scalar('val', g.MSE)
with tf.name_scope('MAE'):
g.train_mae = tf.summary.scalar('train', g.MAE)
g.val_mae = tf.summary.scalar('val', g.MAE)
g.file_writer = tf.summary.FileWriter(self.logdir, tf.get_default_graph())
g.saver = tf.train.Saver()
return g
def train_eval(self, X_train, y_train, X_val, y_val, return_preds=False, save_ckpt=False):
g = self.build_graph()
self.evals_out = {'round': [],
'train': {'MSE': [], 'MAE': []},
'val': {'MSE': [], 'MAE': []}}
train_batch_size = self.params['train_batch_size']
val_batch_size = self.params['val_batch_size']
n_val_batches = self.params['n_val_batches']
if not return_preds:
# add header for logger
self.logger.info(f' RND TRAIN | VAL')
self.logger.info(f' MSE MAE | MSE MAE')
self.sess = tf.InteractiveSession(graph=g.graph,
config=tf.ConfigProto(allow_soft_placement=True))
self.sess.run(tf.global_variables_initializer())
if self.params['train_mode'] == 'minibatch':
n_batches = (X_train.shape[0] // train_batch_size) + 1
train_batch_size = X_train.shape[0] // n_batches
self.logger.info(f'CV batch size scaled: {train_batch_size} n_batches {n_batches}')
self.params['tboard_evals_step'] = 1
self.params['log_evals_step'] = 1
self.logger.info(f'evals set to every epoch')
for epoch in range(self.params['n_epochs']):
if self.params['train_mode'] == 'minibatch':
idx = self._shuffle_idx(X_train)
for batch in range(1, n_batches+1):
X_train_batch, y_train_batch = self.get_batch(X_train,
y_train, idx, train_batch_size, batch)
train_op_dict = {g.X: X_train_batch, g.y: y_train_batch, g.train_flag:True}
self.sess.run([g.training_op, g.extra_update_ops], feed_dict=train_op_dict)
elif self.params['train_mode'] == 'sgd':
X_train_batch, y_train_batch = self.get_sample(
X_train, y_train, batch_size)
train_op_dict = {g.X: X_train_batch,
g.y: y_train_batch,
g.train_flag:True}
self.sess.run([g.training_op, g.extra_update_ops], feed_dict=train_op_dict)
if ((epoch + 1) % self.params['tboard_evals_step'] == 0
and not return_preds):
train_mse_summ = g.train_mse.eval(
feed_dict={g.X: X_train_batch, g.y: y_train_batch, g.train_flag:False})
train_mae_summ = g.train_mae.eval(
feed_dict={g.X: X_train_batch, g.y: y_train_batch, g.train_flag:False})
g.file_writer.add_summary(train_mse_summ, epoch+1)
g.file_writer.add_summary(train_mae_summ, epoch+1)
X_val_batch, y_val_batch = self.get_sample(X_val, y_val, val_batch_size)
val_mse_summ = g.val_mse.eval(
feed_dict={g.X: X_val_batch, g.y: y_val_batch, g.train_flag:False})
val_mae_summ =g. val_mae.eval(
feed_dict={g.X: X_val_batch, g.y: y_val_batch, g.train_flag:False})
g.file_writer.add_summary(val_mse_summ, epoch+1)
g.file_writer.add_summary(val_mae_summ, epoch+1)
if self.params['regularizer'] in ['l1', 'l2', 'l1-l2']:
train_reg_loss_summ = g.train_reg_loss.eval(
feed_dict={g.X: X_train_batch, g.y: y_train_batch, g.train_flag:False})
g.file_writer.add_summary(train_reg_loss_summ, epoch)
val_reg_loss_summ = g.val_reg_loss.eval(
feed_dict={g.X: X_val_batch, g.y: y_val_batch, g.train_flag:False})
g.file_writer.add_summary(val_reg_loss_summ, epoch)
if ((epoch + 1) % self.params['log_evals_step'] == 0
and not return_preds):
round_evals = {'train': {'MSE': [], 'MAE': []},
'val': {'MSE': [], 'MAE': []}}
for i in range(n_val_batches):
X_train_batch, y_train_batch = self.get_sample(
X_train, y_train, train_batch_size)
round_evals['train']['MSE'].append(
g.MSE.eval(feed_dict={g.X: X_train_batch,
g.y: y_train_batch,
g.train_flag:False}))
round_evals['train']['MAE'].append(
g.MAE.eval(feed_dict={g.X: X_train_batch,
g.y: y_train_batch,
g.train_flag:False}))
X_val_batch, y_val_batch = self.get_sample(
X_val, y_val, val_batch_size)
round_evals['val']['MSE'].append(
g.MSE.eval(feed_dict={g.X: X_val_batch,
g.y: y_val_batch,
g.train_flag:False}))
round_evals['val']['MAE'].append(
g.MAE.eval(feed_dict={g.X: X_val_batch,
g.y: y_val_batch,
g.train_flag:False}))
train_mse_ = sum(round_evals['train']['MSE']) / n_val_batches
train_mae_ = sum(round_evals['train']['MAE']) / n_val_batches
eval_mse_ = sum(round_evals['val']['MSE']) / n_val_batches
eval_mae_ = sum(round_evals['val']['MAE']) / n_val_batches
# add round results for logger
self.logger.info(f' {str(epoch + 1):>4} {train_mse_:>10.4f} ' +
f'{train_mae_:>10.4f} | ' +
f'{eval_mse_:>10.4f} {eval_mae_:>10.4f}')
self.evals_out['round'].append(epoch + 1)
self.evals_out['train']['MSE'].append(train_mse_)
self.evals_out['train']['MAE'].append(train_mae_)
self.evals_out['val']['MSE'].append(eval_mse_)
self.evals_out['val']['MAE'].append(eval_mae_)
if save_ckpt:
save_path = g.saver.save(self.sess, self.ckpt_file)
g.file_writer.close()
self.logger.info(f'checkpoint saved as \'{self.ckpt_file}\'.')
if return_preds:
chunk_size = int(self.params['predict_chunk_size'])
n_chunks = X_val.shape[0] // chunk_size + 1
fold_preds = []
for i in range(n_chunks):
feed_dict={train_flag:False,
X: X_val.iloc[(i*chunk_size):((i+1)*chunk_size), :].values}
preds_chunk = g.dnn_outputs.eval(feed_dict=feed_dict)
fold_preds.extend(preds_chunk.ravel())
return fold_preds
class FFNClassifier(DNNModel):
def __init__(self, X_train, y_train, X_test, params_file=None, folds_lookup=None,
prefix=None, weights=None, tf_path=None, logger=None):
DNNModel.__init__(self, X_train, y_train, X_test, params_file, folds_lookup,
prefix, tf_path, logger)
self.y_train = self.y_train.astype(int)
self.metrics = ['AUC', 'acc', 'precision', 'recall']
self.ckpt_file = ckpt_file
def init_hparams(self):
FFNModel.init_hparams(self)
if 'pos_weight' not in self.params:
self.params['pos_weight'] = 1.0
def best_eval_multi(self):
'''Return the maximum round result for all metrics
(AUC, accuracy, precision, and recall)
'''
return FFNModel.best_eval_multi(self, 'max')
def build_graph(self):
self.init_hparams()
self.init_tensorboard()
g = Graph()
g.graph = tf.Graph()
with g.graph.as_default():
g.X = tf.placeholder(tf.float32, shape=(None, int(self.n_inputs)), name='X')
if self.n_outputs == 1:
g.y = tf.placeholder(tf.int32,
shape=(None),
name='y')
g.y_2d = tf.one_hot(g.y, 2, axis=-1)
else:
g.y = tf.placeholder(tf.int32,
shape=(None, int(self.n_outputs)),
name='y')
g.y_2d = tf.identity(g.y, name='y_passthru')
g.train_flag = tf.placeholder_with_default(False, shape=(), name='training')
g.stack = [g.X]
for layer_no, layer in enumerate(self.params['layers']):
g, layer_out = self.ff_layer(g, g.stack[-1], layer_no)
g.stack.append(layer_out)
g.drop_final = tf.layers.dropout(g.stack[-1],
rate=self.params['drop_rates'][-1],
training=g.train_flag,
name='drop_before_logits')
if self.n_outputs == 1:
g.logits = tf.layers.dense(g.drop_final, 2, name='logits')
else:
g.logits = tf.layers.dense(g.drop_final, int(self.n_outputs), name='logits')
with tf.name_scope('predictions'):
g.soft_preds_sparse = tf.nn.softmax(g.logits, name='soft_preds_sparse')
# TODO: adjust for multi-class
g.soft_preds_scalar = g.soft_preds_sparse[:, 1]
#g.soft_preds = tf.slice(g.soft_preds, [0, 1], [-1, 1])
g.hard_preds_scalar = tf.argmax(g.logits, axis=-1, name='hard_preds_scalar')
if self.n_outputs == 1:
g.hard_preds_sparse = tf.one_hot(g.hard_preds_scalar, 2,
name='hard_preds_sparse')
else:
g.hard_preds_sparse = tf.one_hot(g.hard_preds_scalar,
self.n_outputs,
name='hard_preds_sparse')
with tf.name_scope('loss'):
g.xentropy = tf.nn.weighted_cross_entropy_with_logits(g.y_2d,
logits=g.logits, pos_weight=self.params['pos_weight'])
g.xentropy_mean=tf.reduce_mean(g.xentropy, name='xentropy')
g.reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
g.combined_loss=tf.add_n([g.xentropy_mean] + g.reg_losses, name='combined_loss')
# BINARY classification: tf.metrics 'accuracy', 'auc', 'precision', and 'recall'
if self.n_outputs == 1:
with tf.name_scope('binary_metrics'):
g.train_acc_val, g.train_acc_op = tf.metrics.accuracy(
labels=g.y, predictions=g.hard_preds_scalar)
g.train_auc_val, g.train_auc_op = tf.metrics.auc(
labels=g.y, predictions=g.soft_preds_scalar)
g.train_precision_val, g.train_precision_op = tf.metrics.precision(
labels=g.y, predictions=g.hard_preds_scalar)
g.train_recall_val, g.train_recall_op = tf.metrics.recall(
labels=g.y, predictions=g.hard_preds_scalar)
g.val_acc_val, g.val_acc_op = tf.metrics.accuracy(
labels=g.y, predictions=g.hard_preds_scalar)
g.val_auc_val, g.val_auc_op = tf.metrics.auc(
labels=g.y, predictions=g.soft_preds_scalar)
g.val_precision_val, g.val_precision_op = tf.metrics.precision(
labels=g.y, predictions=g.hard_preds_scalar)
g.val_recall_val, g.val_recall_op = tf.metrics.recall(
labels=g.y, predictions=g.hard_preds_scalar)
# EXPERIMENTAL: tf.metrics 'mean_per_class_accuracy', 'precision_at_k',
# and 'recall_at_k' for multi- classification
k = 1 # top-1 scores
if self.n_outputs > 2:
with tf.name_scope('multiclass_metrics'):
g.train_acc_val, g.train_acc_op = tf.metrics.mean_per_class_accuracy(
g.y, g.hard_preds_scalar, num_classes=self.n_outputs)
g.train_precision_val, g.train_precision_op = tf.metrics.precision_at_k(
g.y_2d, g.hard_preds_sparse, k)
g.train_recall_val, g.train_recall_op = tf.metrics.recall_at_k(
g.y_2d, g.hard_preds_sparse, k)
g.val_acc_val, g.val_acc_op = tf.metrics.mean_per_class_accuracy(
g.y, g.hard_preds_scalar, num_classes=self.n_outputs)
g.val_precision_val, g.val_precision_op = tf.metrics.precision_at_k(
g.y_2d, g.hard_preds_sparse, k)
g.val_recall_val, g.val_recall_op = tf.metrics.recall_at_k(
g.y_2d, g.hard_preds_sparse, k)
with tf.name_scope('train'):
g.optimizer = self.optimizer(learning_rate=float(self.params['eta']))
g.training_op = g.optimizer.minimize(g.combined_loss)
g.extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.name_scope('xentropy'):
g.train_xentropy = tf.summary.scalar('train', g.xentropy_mean)
g.val_xentropy = tf.summary.scalar('val', g.xentropy_mean)
if self.params['regularizer'] != 'None':
with tf.name_scope('reg_losses'):
g.train_reg_loss = tf.summary.scalar('train', tf.add_n(g.reg_losses))
g.val_reg_loss = tf.summary.scalar('val', tf.add_n(g.reg_losses))
with tf.name_scope('ROC_AUC'):
g.train_auc = tf.summary.scalar('train', g.train_auc_val)
g.val_auc = tf.summary.scalar('val', g.val_auc_val)
with tf.name_scope('accuracy'):
g.train_acc = tf.summary.scalar('train', g.train_acc_val)
g.val_acc = tf.summary.scalar('val', g.val_acc_val)
g.file_writer = tf.summary.FileWriter(self.logdir, tf.get_default_graph())
g.saver = tf.train.Saver()
return g
def train_eval(self, X_train, y_train, X_val, y_val, return_preds=False,
save_ckpt=False):
g = self.build_graph()
self.evals_out = {'round': [],
'train': {'AUC': [], 'acc': [], 'precision': [], 'recall': []},
'val': {'AUC': [], 'acc': [], 'precision': [], 'recall': []}}
train_batch_size = self.params['train_batch_size']
val_batch_size = self.params['val_batch_size']
n_val_batches = self.params['n_val_batches']
if not return_preds:
self.logger.info(f' RND TRAIN | VAL')
self.logger.info(f' acc auc prec recall | acc auc prec recall')
self.sess = tf.InteractiveSession(graph=g.graph,
config=tf.ConfigProto(allow_soft_placement=True))
self.sess.run(tf.global_variables_initializer())
if self.params['train_mode'] == 'minibatch':
n_train_batches = (X_train.shape[0] // train_batch_size) + 1
train_batch_size = X_train.shape[0] // n_train_batches
self.logger.info(f'CV batch size scaled: {train_batch_size} n_train_batches {n_train_batches}')
self.params['tboard_evals_step'] = 1
self.params['log_evals_step'] = 1
self.logger.info(f'evals set to every epoch')
for epoch in range(self.params['n_epochs']):
if self.params['train_mode'] == 'minibatch':
idx = self._shuffle_idx(X_train)
for batch in range(1, n_batches+1):
X_train_batch, y_train_batch = self.get_batch(X_train,
y_train, idx, train_batch_size, batch)
train_op_dict = {g.X: X_train_batch, g.y: y_train_batch, g.train_flag:True}
self.sess.run([g.training_op, g.extra_update_ops],
feed_dict=train_op_dict)
elif self.params['train_mode'] == 'sgd':
X_train_batch, y_train_batch = self.get_sample(
X_train, y_train, batch_size)
train_op_dict = {X: X_train_batch,
y: y_train_batch,
train_flag:True}
self.sess.run([g.training_op, g.extra_update_ops],
feed_dict=train_op_dict)
# Tensorboard evals
if ((epoch + 1) % self.params['tboard_evals_step'] == 0
and not return_preds):
self.sess.run(tf.local_variables_initializer())
train_eval_dict = {g.X: X_train_batch, g.y: y_train_batch, g.train_flag:False}
self.sess.run(g.train_acc_op, feed_dict=train_eval_dict)
train_xent_summ, train_acc_summ =\
self.sess.run([g.train_xentropy, g.train_acc],
feed_dict=train_eval_dict)
g.file_writer.add_summary(train_xent_summ, epoch+1)
g.file_writer.add_summary(train_acc_summ, epoch+1)
X_val_batch, y_val_batch = self.get_sample(X_val, y_val, val_batch_size)
val_eval_dict = {g.X: X_val_batch, g.y: y_val_batch, g.train_flag:False}
self.sess.run(g.val_acc_op, feed_dict=val_eval_dict)
val_xent_summ, val_acc_summ =\
self.sess.run([g.val_xentropy, g.val_acc],
feed_dict=val_eval_dict)
g.file_writer.add_summary(val_xent_summ, epoch+1)
g.file_writer.add_summary(val_acc_summ, epoch+1)
# eval AUC for binary classification only
if self.n_outputs == 1:
self.sess.run(g.train_auc_op, feed_dict=train_eval_dict)
train_auc_summ = self.sess.run(g.train_auc, feed_dict=train_eval_dict)
g.file_writer.add_summary(train_auc_summ, epoch+1)
self.sess.run(g.val_auc_op, feed_dict=train_eval_dict)
val_auc_summ = self.sess.run(g.val_auc, feed_dict=val_eval_dict)
g.file_writer.add_summary(val_auc_summ, epoch+1)
if self.params['regularizer'] in ['l1', 'l2', 'l1-l2']:
train_reg_loss_summ = g.train_reg_loss.eval(
feed_dict=train_eval_dict)
g.file_writer.add_summary(train_reg_loss_summ, epoch)
val_reg_loss_summ = g.val_reg_loss.eval(
feed_dict=val_eval_dict)
g.file_writer.add_summary(val_reg_loss_summ, epoch)
# logger evals for BINARY classification
if ((epoch + 1) % self.params['log_evals_step'] == 0
and self.n_outputs == 1 and not return_preds):
self.sess.run(tf.local_variables_initializer())
for i in range(n_val_batches):
X_train_batch, y_train_batch = self.get_sample(
X_train, y_train, train_batch_size)
self.sess.run([g.train_acc_op, g.train_auc_op, g.train_precision_op, g.train_recall_op],
feed_dict={g.X: X_train_batch, g.y: y_train_batch, g.train_flag:False})
X_val_batch, y_val_batch = self.get_sample(
X_val, y_val, val_batch_size)
self.sess.run([g.val_acc_op, g.val_auc_op, g.val_precision_op, g.val_recall_op],
feed_dict={g.X: X_val_batch, g.y: y_val_batch, g.train_flag:False})
train_acc_, train_auc_, train_precision_, train_recall_ =\
self.sess.run([g.train_acc_val, g.train_auc_val, g.train_precision_val, g.train_recall_val])
val_acc_, val_auc_, val_precision_, val_recall_ =\
self.sess.run([g.val_acc_val, g.val_auc_val, g.val_precision_val, g.val_recall_val])
# log evals
self.logger.info(f' {str(epoch + 1):>4} {train_acc_:.4f} {train_auc_:.4f} ' +
f'{train_precision_:.4f} {train_recall_:.4f} | ' +
f'{val_acc_:.4f} {val_auc_:.4f} ' +
f'{val_precision_:.4f} {val_recall_:.4f}')
# record evals to self.evals_out (for plot_results)
self.evals_out['round'].append(epoch + 1)
self.evals_out['train']['acc'].append(train_acc_)
self.evals_out['train']['AUC'].append(train_auc_)
self.evals_out['train']['precision'].append(train_precision_)
self.evals_out['train']['recall'].append(train_recall_)
self.evals_out['val']['acc'].append(val_acc_)
self.evals_out['val']['AUC'].append(val_auc_)
self.evals_out['val']['precision'].append(val_precision_)
self.evals_out['val']['recall'].append(val_recall_)
# logger evals for MULTICLASS classification
if ((epoch + 1) % self.params['log_evals_step'] == 0
and self.n_outputs > 2 and not return_preds):
self.sess.run(tf.local_variables_initializer())
for i in range(n_val_batches):
X_train_batch, y_train_batch = self.get_sample(
X_train, y_train, train_batch_size)
self.sess.run([g.train_acc_op, g.train_precision_op, g.train_recall_op],
feed_dict={g.X: X_train_batch, g.y: y_train_batch, g.train_flag:False})
X_val_batch, y_val_batch = self.get_sample(
X_val, y_val, val_batch_size)
self.sess.run([g.val_acc_op, g.val_precision_op, g.val_recall_op],
feed_dict={g.X: X_val_batch, g.y: y_val_batch, g.train_flag:False})
train_acc_, train_precision_, train_recall_ =\
self.sess.run([g.train_acc_val, g.train_precision_val, g.train_recall_val])
val_acc_, val_precision_, val_recall_ =\
self.sess.run([g.val_acc_val, g.val_precision_val, g.val_recall_val])
# log evals
self.logger.info(f' {str(epoch + 1):>4} {train_acc_:.4f} ' +
f'{train_precision_:.4f} {train_recall_:.4f} | ' +
f'{val_acc_:.4f} ' +
f'{val_precision_:.4f} {val_recall_:.4f}')
# record evals for plot_results()
self.evals_out['round'].append(epoch + 1)
self.evals_out['train']['acc'].append(train_acc_)
self.evals_out['train']['precision'].append(train_precision_)
self.evals_out['train']['recall'].append(train_recall_)
self.evals_out['val']['acc'].append(val_acc_)
self.evals_out['val']['precision'].append(val_precision_)
self.evals_out['val']['recall'].append(val_recall_)
if save_ckpt:
save_path = saver.save(self.sess, self.ckpt_file)
file_writer.close()
self.logger.info(f'checkpoint saved as \'{self.ckpt_file}\'.')
#------- TODO: ADD SUPPORT FOR MULTI-CLASS -------
if return_preds and self.n_outputs == 1:
chunk_size = int(self.params['predict_chunk_size'])
n_chunks = X_val.shape[0] // chunk_size + 1
fold_preds = []
for i in range(n_chunks):
feed_dict={train_flag:False,
X: X_val.iloc[(i*chunk_size):((i+1)*chunk_size), :].values}
preds_chunk = g.soft_preds_scalar.eval(feed_dict=feed_dict)
fold_preds.extend(preds_chunk.ravel())
return fold_preds
|
python
|
import cosypose
import os
import yaml
from joblib import Memory
from pathlib import Path
import getpass
import socket
import torch.multiprocessing
torch.multiprocessing.set_sharing_strategy('file_system')
hostname = socket.gethostname()
username = getpass.getuser()
PROJECT_ROOT = Path(cosypose.__file__).parent.parent
PROJECT_DIR = PROJECT_ROOT
DATA_DIR = PROJECT_DIR / 'data'
LOCAL_DATA_DIR = PROJECT_DIR / 'local_data'
TEST_DATA_DIR = LOCAL_DATA_DIR
DASK_LOGS_DIR = LOCAL_DATA_DIR / 'dasklogs'
SYNT_DS_DIR = LOCAL_DATA_DIR / 'synt_datasets'
BOP_DS_DIR = LOCAL_DATA_DIR / 'bop_datasets'
BOP_TOOLKIT_DIR = PROJECT_DIR / 'deps' / 'bop_toolkit_cosypose'
BOP_CHALLENGE_TOOLKIT_DIR = PROJECT_DIR / 'deps' / 'bop_toolkit_challenge'
EXP_DIR = LOCAL_DATA_DIR / 'experiments'
RESULTS_DIR = LOCAL_DATA_DIR / 'results'
DEBUG_DATA_DIR = LOCAL_DATA_DIR / 'debug_data'
DEPS_DIR = PROJECT_DIR / 'deps'
CACHE_DIR = LOCAL_DATA_DIR / 'joblib_cache'
assert LOCAL_DATA_DIR.exists()
CACHE_DIR.mkdir(exist_ok=True)
TEST_DATA_DIR.mkdir(exist_ok=True)
DASK_LOGS_DIR.mkdir(exist_ok=True)
SYNT_DS_DIR.mkdir(exist_ok=True)
RESULTS_DIR.mkdir(exist_ok=True)
DEBUG_DATA_DIR.mkdir(exist_ok=True)
ASSET_DIR = DATA_DIR / 'assets'
MEMORY = Memory(CACHE_DIR, verbose=2)
CONDA_PREFIX = os.environ['CONDA_PREFIX']
if 'CONDA_PREFIX_1' in os.environ:
CONDA_BASE_DIR = os.environ['CONDA_PREFIX_1']
CONDA_ENV = os.environ['CONDA_DEFAULT_ENV']
else:
CONDA_BASE_DIR = os.environ['CONDA_PREFIX']
CONDA_ENV = 'base'
cfg = yaml.load((PROJECT_DIR / 'config_yann.yaml').read_text(), Loader=yaml.FullLoader)
SLURM_GPU_QUEUE = cfg['slurm_gpu_queue']
SLURM_QOS = cfg['slurm_qos']
DASK_NETWORK_INTERFACE = cfg['dask_network_interface']
# Kwai path
KWAI_PATH = "/data2/cxt/kwai/IMG_3486"
|
python
|
from django.contrib.auth.models import User
from django.core import mail
from django.test import TestCase
from hc.api.models import Check
from hc.test import BaseTestCase
class LogoutTestCase(BaseTestCase):
def test_it_logs_out_users(self):
form = {'email': '[email protected]', 'password': 'password'}
# make sure a user is logged in successfully
response = self.client.post("/accounts/login/", form)
self.assertEquals(response.status_code, 302)
# logout user and test it redirects to index
r = self.client.get("/accounts/logout", follow=True)
self.assertEqual(r.status_code, 200)
self.assertTemplateUsed('front/welcome.html')
|
python
|
"""
Human-explainable AI.
This is the class and function reference of FACET for advanced model selection,
inspection, and simulation.
"""
__version__ = "1.2.0"
__logo__ = (
r"""
_ ____ _ _ ___ __ ___ _____
_-´ _- / ___\ / \ /\ /\ /\ /\ / \ | /\ / ` | |
| ,-´ , | | | __ / _ \ / \/ \ / \/ \ / _ \ |___ / \ | |__ |
| | | | | | |_] |/ ___ \/ /\ /\ \ /\ /\ \/ ___ \ | /----\| | |
`'-| ' \____/_/ \_\/ \/ \_\ \/ \_\/ \_\ | / \\__, |___ |
"""
)[1:]
|
python
|
"""LiveSimulator: This class reads in various Bro IDS logs. The class utilizes
the BroLogReader and simply loops over the static bro log
file, replaying rows and changing any time stamps
Args:
eps (int): Events Per Second that the simulator will emit events (default = 10)
max_rows (int): The maximum number of rows to generate (default = None (go forever))
"""
from __future__ import print_function
import os
import time
import datetime
import itertools
# Third party
import numpy as np
# Local Imports
from brothon import bro_log_reader
from brothon.utils import file_utils
class LiveSimulator(object):
"""LiveSimulator: This class reads in various Bro IDS logs. The class utilizes the
BroLogReader and simply loops over the static bro log file
replaying rows at the specified EPS and changing timestamps to 'now()'
"""
def __init__(self, filepath, eps=10, max_rows=None):
"""Initialization for the LiveSimulator Class
Args:
eps (int): Events Per Second that the simulator will emit events (default = 10)
max_rows (int): The maximum number of rows to generate (default = None (go forever))
"""
# Compute EPS timer
# Logic:
# - Normal distribution centered around 1.0/eps
# - Make sure never less than 0
# - Precompute 1000 deltas and then just cycle around
self.eps_timer = itertools.cycle([max(0, delta) for delta in np.random.normal(1.0/float(eps), .5/float(eps), size=1000)])
# Initialize the Bro log reader
self.log_reader = bro_log_reader.BroLogReader(filepath, tail=False)
# Store max_rows
self.max_rows = max_rows
def readrows(self):
"""Using the BroLogReader this method yields each row of the log file
replacing timestamps, looping and emitting rows based on EPS rate
"""
# Loop forever or until max_rows is reached
num_rows = 0
while True:
# Yield the rows from the internal reader
for row in self.log_reader.readrows():
yield self.replace_timestamp(row)
# Sleep and count rows
time.sleep(next(self.eps_timer))
num_rows += 1
# Check for max_rows
if self.max_rows and (num_rows >= self.max_rows):
return
@staticmethod
def replace_timestamp(row):
"""Replace the timestamp with now()"""
if 'ts' in row:
row['ts'] = datetime.datetime.utcnow()
return row
def test():
"""Test for LiveSimulator Python Class"""
# Grab a test file
data_path = file_utils.relative_dir(__file__, '../data')
test_path = os.path.join(data_path, 'conn.log')
print('Opening Data File: {:s}'.format(test_path))
# Create a LiveSimulator reader
reader = LiveSimulator(test_path, max_rows=10)
for line in reader.readrows():
print(line)
print('Read with max_rows Test successful!')
if __name__ == '__main__':
# Run the test for easy testing/debugging
test()
|
python
|
from unittest import TestCase
from dynamic_fixtures.fixtures.basefixture import BaseFixture
class BaseFixtureTestCase(TestCase):
def test_load_not_implemented(self):
"""
Case: load is not implemented
Expected: Error get raised
"""
fixture = BaseFixture("Name", "Module")
with self.assertRaises(NotImplementedError):
fixture.load()
|
python
|
from empregado import Empregado
class Operario(Empregado):
def __init__(self, nome, endereco, telefone, codigo_setor, salario_base, imposto, valor_producao, comissao):
super().__init__(nome, endereco, telefone, codigo_setor, salario_base, imposto)
self._valor_producao = valor_producao
self._comissao = comissao
@property
def valor_producao(self):
return self._valor_producao
@valor_producao.setter
def valor_producao(self, valor):
if valor >= 0:
self._valor_producao = valor
@property
def comissao(self):
return self._comissao
@comissao.setter
def comissao(self, comissao):
if 0 <= comissao <= 100:
self._comissao = comissao
def calcular_salario(self):
return super().calcular_salario() + (self.comissao / 100 * self.valor_producao)
|
python
|
import inspect
import typing
from chia import instrumentation
class Factory:
name_to_class_mapping: typing.Optional[typing.Dict] = None
default_section: typing.Optional[str] = None
i_know_that_var_args_are_not_supported = False
@classmethod
def create(cls, config: dict, observers=(), **kwargs):
if not hasattr(config, "keys"):
config = {"name": config}
unused_config_keys = set(config.keys())
temp_observable = instrumentation.NamedObservable(cls.__name__)
for observer in observers:
temp_observable.register(observer)
if isinstance(cls.name_to_class_mapping, dict):
name = config["name"]
unused_config_keys -= {"name"}
target_class = cls.name_to_class_mapping[name]
temp_observable.notify(
instrumentation.ConfigMessage(
cls.__name__,
f"{cls.__name__}.name",
name,
source="config_dict",
)
)
else:
# If mapping is not a dict, interpret as type directly
target_class = cls.name_to_class_mapping
name = target_class.__name__
init_method_signature = inspect.signature(target_class)
call_spec_kwargs = dict()
for parameter, param_spec in init_method_signature.parameters.items():
# Sanity check
if (
param_spec.kind == inspect.Parameter.POSITIONAL_ONLY
or param_spec.kind == inspect.Parameter.VAR_KEYWORD
or param_spec.kind == inspect.Parameter.VAR_POSITIONAL
):
if not cls.i_know_that_var_args_are_not_supported:
raise ValueError(
f"Unsupported kind of constructor parameter {parameter}"
)
else:
# Skip the unsupported parameters
continue
# Try to find it
if parameter in kwargs.keys():
# Parameter-given config keys are not "unused", just overridden
unused_config_keys -= {parameter}
param_value = kwargs[parameter]
elif parameter in config.keys():
unused_config_keys -= {parameter}
param_value = config[parameter]
temp_observable.notify(
instrumentation.ConfigMessage(
cls.__name__,
f"{target_class.__name__}.{parameter}",
param_value,
source="config_dict",
)
)
elif f"{name}_userdefaults.{parameter}" in config.keys():
param_value = config[f"{name}_userdefaults.{parameter}"]
temp_observable.notify(
instrumentation.ConfigMessage(
cls.__name__,
f"{target_class.__name__}.{parameter}",
param_value,
source="userdefaults",
)
)
elif param_spec.default != inspect.Signature.empty:
param_value = param_spec.default
temp_observable.notify(
instrumentation.ConfigMessage(
cls.__name__,
f"{target_class.__name__}.{parameter}",
param_value,
source="default",
)
)
else:
raise ValueError(
f"Could not find a value for constructor parameter {parameter}"
)
call_spec_kwargs[parameter] = param_value
# Call constructor
instance = target_class(**call_spec_kwargs)
# Register observers if possible
if isinstance(instance, instrumentation.Observable):
for observer in observers:
instance.register(observer)
# Warn about unused config keys
for unused_config_key in unused_config_keys:
temp_observable.log_warning(
f"Config key {target_class.__name__}.{unused_config_key} unused"
)
for observer in observers:
temp_observable.unregister(observer)
return instance
class ContainerFactory(Factory):
@classmethod
def create(cls, config: dict, **kwargs):
name = config["name"]
target_class = cls.name_to_class_mapping[name]
return target_class(config, **kwargs)
|
python
|
from useintest.modules.consul.consul import ConsulServiceController, consul_service_controllers, \
Consul1_0_0ServiceController, Consul0_8_4ServiceController, ConsulDockerisedService
|
python
|
import math
# S1: A quick brown dog jumps over the lazy fox.
# S2: A quick brown fox jumps over the lazy dog.
# With the two sentences above I will implement the calculation based on calculation
# values.
def magnitude(v1):
vResult = [abs(a * b) for a, b in zip(v1, v1)]
mag = math.sqrt(sum(vResult, 0))
return mag
def word_order(s1, s2):
c1 = str.split(s1[:-1].lower())
c2 = str.split(s2[:-1].lower())
v1 = list(range(1,len(c1)+1))
v2 = list()
for word in range(len((c1))):
for val in range(len(c2)):
if(c1[word] == c2[val]):
v2.append(val+1)
vResult = [abs(a - b) for a, b in zip(v1, v2)]
vResult2 = [abs(a * b) for a, b in zip(v1, v2)]
mag1 = magnitude(vResult)
mag2 = magnitude(vResult2)
if(mag2 != 0):
val = mag1 / mag2
elif((mag1 + mag2) == 0):
val = 1
else:
val = 0
return val
def isclose(a, b, rel_tol=1e-04, abs_tol=0.0):
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def test_word_order():
s1 = "A quick brown dog jumps over the lazy fox."
s2 = "A quick brown fox jumps over the lazy dog."
s3 = "A quick brown cat jumps over the lazy dog."
s4 = "The fat bird runs across a green bog."
s5 = "Big fat bird runs across an orange bog."
s6 = "Big fat bird is an orange bog."
assert isclose(0.067091, word_order(s1, s2))
assert isclose(0.000000, word_order(s1, s1))
assert isclose(0.806225, word_order(s3, s4))
assert isclose(1, word_order(s3, s5))
assert isclose(1, word_order(s3, s6))
|
python
|
import time
import asyncio
from typing import List
import threading
import numpy as np
import spotipy
from spotipy.oauth2 import SpotifyOAuth
from eventhook import EventHook
class NotPlayingError(Exception):
def __init__(self):
self.message = "Spotify not playing"
class MonitorConfig:
def __init__(
self,
refresh_accuracy_seconds: float = 1.0,
refresh_max_delay_seconds: float = 30.0,
refresh_next_event_divisor: float = 1.5,
not_playing_refresh_seconds: float = 5.0,
tick_accuracy_seconds: float = 0.25,
tick_max_delay_seconds: float = 10.0,
tick_next_event_divisor: float = 2.0,
section_offset_seconds: float = 0.25,
):
self.refresh_accuracy_seconds = refresh_accuracy_seconds
self.refresh_max_delay_seconds = refresh_max_delay_seconds
self.refresh_next_event_divisor = refresh_next_event_divisor
self.not_playing_refresh_seconds = not_playing_refresh_seconds
self.tick_accuracy_seconds = tick_accuracy_seconds
self.tick_max_delay_seconds = tick_max_delay_seconds
self.tick_next_event_divisor = tick_next_event_divisor
self.section_offset_seconds = section_offset_seconds
class spotifyMonitor:
def __init__(
self,
config: MonitorConfig = MonitorConfig(),
debug: bool = False,
) -> None:
self.config = config
self.sp = self._generate_spotify_auth()
self.on_track_change = EventHook()
self.on_section_change = EventHook()
self.on_stop = EventHook()
self.current_track = {"id": None, "progress": 0.0, "sections": []}
self.current_section = {"id": None, "track_id": None}
self.next_section = {"id": None, "track_id": None}
self._loop = asyncio.get_event_loop()
self._last_tick = self._get_tick_time()
self.debug = debug
self._ticking = False
self._playing = True
def start(self):
try:
self._loop.call_soon(self._refresh)
self._loop.run_forever()
finally:
self._loop.run_until_complete(self._loop.shutdown_asyncgens())
self._loop.close()
def stop(self):
self._loop.stop()
def _generate_spotify_auth(self) -> spotipy.Spotify:
scope = "user-read-playback-state"
return spotipy.Spotify(
auth_manager=SpotifyOAuth(
scope=scope,
client_id="397df7bde7e64245bf93014ce0d36b4f",
client_secret="5d7d498988714957990b45afa47fdd36",
redirect_uri="http://127.0.0.1:9090",
)
)
def _refresh(self):
try:
self._refresh_track_status()
self._playing = True
if self.debug:
print(" Refresh {}".format(self.current_track["progress"]))
if self._ticking == False:
self._last_tick = self._get_tick_time()
self._ticking = True
self._loop.call_soon(self._tick)
delay = (
self.current_track["duration"] - self.current_track["progress"]
) / self.config.refresh_next_event_divisor
if delay > self.config.refresh_max_delay_seconds:
delay = self.config.refresh_max_delay_seconds
elif delay < self.config.refresh_accuracy_seconds:
delay = self.config.refresh_accuracy_seconds
except NotPlayingError:
if self._playing:
self._playing = False
self.on_stop.fire()
delay = 5
if self.debug:
print(" Refresh (not playing)")
self._loop.call_later(delay=delay, callback=self._refresh)
def _tick(self):
if self._playing:
this_tick = self._get_tick_time()
self.current_track["progress"] += (this_tick - self._last_tick) / 1000
self._last_tick = this_tick
if self.debug:
print(" Tick {}".format(self.current_track["progress"]))
current_section_id = self._calculate_current_section_id(self.current_track)
if current_section_id != self.current_section["id"]:
section_info = self._calculate_section_info(
self.current_track, current_section_id
)
self._trigger_section_change(self.current_track, section_info)
self.current_section = section_info["current_section"]
self.next_section = section_info["next_section"]
delay = (
self.next_section["start"] - self.current_track["progress"]
) / self.config.tick_next_event_divisor
if delay > self.config.tick_max_delay_seconds:
delay = self.config.tick_max_delay_seconds
elif delay < self.config.tick_accuracy_seconds:
delay = self.next_section["start"] - self.current_track["progress"]
if delay < 0:
delay = self.config.tick_accuracy_seconds
self._loop.call_later(delay=delay, callback=self._tick)
else:
self._ticking = False
def _get_tick_time(self) -> float:
return time.time_ns() // 1000000
def _refresh_track_status(self):
current_track = self._get_current_track_status()
track_change = self.current_track["id"] != current_track["id"]
section_info = self._calculate_section_info(current_track)
section_change = (
self.current_section["id"] != section_info["current_section"]["id"]
or self.current_section["track_id"]
!= section_info["current_section"]["track_id"]
)
if track_change:
self._trigger_track_change(current_track, section_info)
elif section_change:
self._trigger_section_change(current_track, section_info)
self.current_track = current_track
self._last_tick = self._get_tick_time()
self.current_section = section_info["current_section"]
self.next_section = section_info["next_section"]
def _trigger_track_change(self, track, section_info):
nth = threading.Thread(
target=self.on_track_change.fire(
previous_track=self.current_track,
current_track=track,
current_section=section_info["current_section"],
next_section=section_info["next_section"],
)
)
nth.start()
def _trigger_section_change(self, track, section_info):
nth = threading.Thread(
target=self.on_section_change.fire(
current_track=track,
current_section=section_info["current_section"],
next_section=section_info["next_section"],
)
)
nth.start()
def _get_current_track_status(self) -> dict:
track = self._get_spotify_currently_playing()
if track["id"] != self.current_track["id"]:
track_info = self._get_spotify_track_info(track_id=track["id"])
track_features = self._get_spotify_track_features(track_id=track["id"])
current_track = {**track, **track_info, **track_features}
else:
current_track = self.current_track
current_track["progress"] = track["progress"]
return current_track
def _calculate_section_info(self, track, current_section_id: int = None) -> dict:
if not current_section_id:
current_section_id = self._calculate_current_section_id(track)
track_sections = track["sections"]
section = {
**{"id": current_section_id, "track_id": track["id"]},
**track_sections[current_section_id],
}
if current_section_id + 1 < len(track_sections):
next_section = track_sections[current_section_id + 1]
else:
next_section = {
"id": 0,
"track_id": None,
"tempo": None,
"loudness": None,
"start": track["duration"],
}
return {"current_section": section, "next_section": next_section}
def _calculate_current_section_id(self, track) -> int:
current_section_id = 0
for index, section in enumerate(track["sections"]):
if section["start"] < track["progress"]:
current_section_id = index
if section["start"] > track["progress"]:
break
return current_section_id
def _get_spotify_currently_playing(self) -> dict:
# print(" CALL to currently_playing")
try:
result = self.sp.currently_playing()
if result:
if result["is_playing"]:
return {
"id": result["item"]["id"],
"name": result["item"]["name"],
"artist": result["item"]["artists"][0]["name"],
"duration": result["item"]["duration_ms"] / 1000,
"progress": result["progress_ms"] / 1000,
}
else:
raise NotPlayingError
else:
raise NotPlayingError
# FIXME - Add 401 error here
except ValueError:
return {
"id": None,
"name": None,
"artist": None,
"duration": None,
"progress": None,
}
def _get_spotify_track_info(self, track_id) -> dict:
# print(" CALL to audio_analysis")
try:
result = self.sp.audio_analysis(track_id=track_id)
for section in result["sections"]:
section["start"] = section["start"] - self.config.section_offset_seconds
loudnesses = [
section["loudness"]
for section in result["sections"]
if "loudness" in section
]
return {
"id": track_id,
"duration": result["track"]["duration"],
"tempo": result["track"]["tempo"],
"loudness": result["track"]["loudness"],
"key": result["track"]["key"],
"sections": result["sections"],
"sections_loudness_mean": np.mean(loudnesses),
"sections_loudness_upperq": np.quantile(loudnesses, 0.75),
}
# FIXME - Add 401 error here
except ValueError:
return {"tempo": None, "loudness": None, "sections": List()}
def _get_spotify_track_features(self, track_id) -> dict:
try:
result = self.sp.audio_features(tracks=[track_id])
return {
"danceability": result[0]["danceability"],
"energy": result[0]["energy"],
"key": result[0]["key"],
"loudness": result[0]["loudness"],
"speechiness": result[0]["speechiness"],
"acousticness": result[0]["acousticness"],
"instrumentalness": result[0]["instrumentalness"],
"liveness": result[0]["liveness"],
"valence": result[0]["valence"],
"tempo": result[0]["tempo"],
"time_signature": result[0]["time_signature"],
}
# FIXME - Add 401 error here
except ValueError:
return {"tempo": None, "loudness": None, "sections": List()}
def _get_playlist(self, playlist_id) -> dict:
try:
result = self.sp.playlist(playlist_id=playlist_id)
tracks = []
for item in result["tracks"]["items"]:
tracks.append(
{
"playlist_name": result["name"],
"playlist_id": result["id"],
"id": item["track"]["id"],
"name": item["track"]["name"],
"duration": item["track"]["duration_ms"] / 1000,
}
)
return tracks
except ValueError:
return []
|
python
|
a, b = input().split()
print("Yes" if (int(a + b) ** (1 / 2)).is_integer() else "No")
|
python
|
# pip3 install PySocks
import socks
import socket
from urllib import request
from urllib.error import URLError
socks.set_default_proxy(socks.SOCKS5, '127.0.0.1', 9742)
socket.socket = socks.socksocket
try:
response = request.urlopen('http://httpbin.org/get')
print(response.read().decode('utf-8'))
except URLError as e:
print(e.reason)
|
python
|
import torch
from kondo import Spec
from torchrl.experiments import BaseExperiment
from torchrl.utils.storage import TransitionTupleDataset
from torchrl.contrib.controllers import DDPGController
class DDPGExperiment(BaseExperiment):
def __init__(self, actor_lr=1e-4, critic_lr=1e-3, gamma=0.99,
tau=1e-2, batch_size=32, buffer_size=1000,
n_ou_reset_interval=100000, **kwargs):
self._controller_args = dict(
actor_lr=actor_lr,
critic_lr=critic_lr,
gamma=gamma,
tau=tau,
n_reset_interval=n_ou_reset_interval,
)
self.buffer = TransitionTupleDataset(size=buffer_size)
self.batch_size = batch_size
super().__init__(**kwargs)
def store(self, transition_list):
self.buffer.extend(transition_list)
def build_controller(self):
return DDPGController(self.envs.observation_space.shape[0],
self.envs.action_space.shape[0],
self.envs.action_space.low,
self.envs.action_space.high,
**self._controller_args,
device=self.device)
def train(self):
if len(self.buffer) < self.batch_size:
return {}
b_idx = torch.randperm(len(self.buffer))[:self.batch_size]
b_transition = [b.to(self.device) for b in self.buffer[b_idx]]
return self.controller.learn(*b_transition)
@staticmethod
def spec_list():
return [
Spec(
group='ddpg',
params=dict(
env_id=['Pendulum-v0'],
gamma=.99,
n_train_interval=1,
n_frames=30000,
batch_size=128,
buffer_size=int(1e6),
actor_lr=1e-4,
critic_lr=1e-3,
tau=1e-2,
# n_ou_reset_interval=10000,
# ou_mu = 0.0
# ou_theta = 0.15
# ou_sigma = 0.2
),
exhaustive=True
)
]
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from numap import NuMap
def hello_world(element, *args, **kwargs):
print "Hello element: %s " % element,
print "Hello args: %s" % (args,),
print "Hello kwargs: %s" % (kwargs,)
return element
ELEMENTS = ('element_0', 'element_1', 'element_2', 'element_3', 'element_4')
result_iterator = NuMap(hello_world, ELEMENTS,
args=('arg_0', 'arg_1'),
kwargs={'kwarg_0':'val_0', 'kwarg_1':'val_1'})
results = tuple(result_iterator)
assert results == ('element_0', 'element_1', 'element_2', 'element_3', 'element_4')
|
python
|
import collections
import itertools
import json
from pathlib import Path
import re
import sqlite3
import string
import attr
import nltk
import numpy as np
def clamp(value, abs_max):
value = max(-abs_max, value)
value = min(abs_max, value)
return value
def to_dict_with_sorted_values(d, key=None):
return {k: sorted(v, key=key) for k, v in d.items()}
@attr.s
class SpiderItem:
text = attr.ib()
code = attr.ib()
schema = attr.ib()
orig = attr.ib()
orig_schema = attr.ib()
@attr.s
class Column:
id = attr.ib()
table = attr.ib()
name = attr.ib()
unsplit_name = attr.ib()
orig_name = attr.ib()
type = attr.ib()
foreign_key_for = attr.ib(default=None)
@attr.s
class Table:
id = attr.ib()
name = attr.ib()
unsplit_name = attr.ib()
orig_name = attr.ib()
columns = attr.ib(factory=list)
primary_keys = attr.ib(factory=list)
@attr.s
class Schema:
db_id = attr.ib()
tables = attr.ib()
columns = attr.ib()
foreign_key_graph = attr.ib()
orig = attr.ib()
connection = attr.ib(default=None)
@attr.s
class PreprocessedSchema:
column_names = attr.ib(factory=list)
table_names = attr.ib(factory=list)
table_bounds = attr.ib(factory=list)
column_to_table = attr.ib(factory=dict)
table_to_columns = attr.ib(factory=dict)
foreign_keys = attr.ib(factory=dict)
foreign_keys_tables = attr.ib(factory=lambda: collections.defaultdict(set))
primary_keys = attr.ib(factory=list)
STOPWORDS = set(nltk.corpus.stopwords.words("english"))
PUNKS = set(a for a in string.punctuation)
class EncPreproc:
# def __init__(self) -> None:
def __init__(
self,
tables_file,
dataset_path,
include_table_name_in_column,
fix_issue_16_primary_keys,
qq_max_dist,
cc_max_dist,
tt_max_dist,
):
self._tables_file = tables_file
self._dataset_path = dataset_path
self.include_table_name_in_column = include_table_name_in_column
self.fix_issue_16_primary_keys = fix_issue_16_primary_keys
self.texts = collections.defaultdict(list)
self.counted_db_ids = set()
self.preprocessed_schemas = {}
self.qq_max_dist = qq_max_dist
self.cc_max_dist = cc_max_dist
self.tt_max_dist = tt_max_dist
self.relation_ids = {}
def add_relation(name):
self.relation_ids[name] = len(self.relation_ids)
def add_rel_dist(name, max_dist):
for i in range(-max_dist, max_dist + 1):
add_relation((name, i))
add_rel_dist("qq_dist", qq_max_dist)
add_rel_dist("cc_dist", cc_max_dist)
add_rel_dist("tt_dist", tt_max_dist)
rel_names = [
"qc_default",
"qt_default",
"cq_default",
"cc_default",
"cc_foreign_key_forward",
"cc_foreign_key_backward",
"cc_table_match",
"ct_default",
"ct_foreign_key",
"ct_primary_key",
"ct_table_match",
"ct_any_table",
"tq_default",
"tc_default",
"tc_primary_key",
"tc_table_match",
"tc_any_table",
"tc_foreign_key",
"tt_default",
"tt_foreign_key_forward",
"tt_foreign_key_backward",
"tt_foreign_key_both",
"qcCEM",
"cqCEM",
"qtTEM",
"tqTEM",
"qcCPM",
"cqCPM",
"qtTPM",
"tqTPM",
"qcNUMBER",
"cqNUMBER",
"qcTIME",
"cqTIME",
"qcCELLMATCH",
"cqCELLMATCH",
]
for rel in rel_names:
add_relation(rel)
self.schemas = None
self.eval_foreign_key_maps = None
print("before load_trees")
self.schemas, self.eval_foreign_key_maps = self.load_tables([self._tables_file])
print("before connecting")
for db_id, schema in self.schemas.items():
sqlite_path = Path(self._dataset_path) / db_id / f"{db_id}.sqlite"
source: sqlite3.Connection
with sqlite3.connect(sqlite_path) as source:
dest = sqlite3.connect(":memory:")
dest.row_factory = sqlite3.Row
source.backup(dest)
schema.connection = dest
def get_desc(self, tokenized_utterance, db_id):
item = SpiderItem(
text=[x.text for x in tokenized_utterance[1:-1]],
code=None,
schema=self.schemas[db_id],
orig=None,
orig_schema=self.schemas[db_id].orig,
)
return self.preprocess_item(item, "train")
def compute_relations(
self, desc, enc_length, q_enc_length, c_enc_length, c_boundaries, t_boundaries
):
sc_link = desc.get("sc_link", {"q_col_match": {}, "q_tab_match": {}})
cv_link = desc.get("cv_link", {"num_date_match": {}, "cell_match": {}})
# Catalogue which things are where
loc_types = {}
for i in range(q_enc_length):
loc_types[i] = ("question",)
c_base = q_enc_length
for c_id, (c_start, c_end) in enumerate(zip(c_boundaries, c_boundaries[1:])):
for i in range(c_start + c_base, c_end + c_base):
loc_types[i] = ("column", c_id)
t_base = q_enc_length + c_enc_length
for t_id, (t_start, t_end) in enumerate(zip(t_boundaries, t_boundaries[1:])):
for i in range(t_start + t_base, t_end + t_base):
loc_types[i] = ("table", t_id)
relations = np.empty((enc_length, enc_length), dtype=np.int64)
for i, j in itertools.product(range(enc_length), repeat=2):
def set_relation(name):
relations[i, j] = self.relation_ids[name]
i_type, j_type = loc_types[i], loc_types[j]
if i_type[0] == "question":
if j_type[0] == "question":
set_relation(("qq_dist", clamp(j - i, self.qq_max_dist)))
elif j_type[0] == "column":
# set_relation('qc_default')
j_real = j - c_base
if f"{i},{j_real}" in sc_link["q_col_match"]:
set_relation("qc" + sc_link["q_col_match"][f"{i},{j_real}"])
elif f"{i},{j_real}" in cv_link["cell_match"]:
set_relation("qc" + cv_link["cell_match"][f"{i},{j_real}"])
elif f"{i},{j_real}" in cv_link["num_date_match"]:
set_relation("qc" + cv_link["num_date_match"][f"{i},{j_real}"])
else:
set_relation("qc_default")
elif j_type[0] == "table":
j_real = j - t_base
if f"{i},{j_real}" in sc_link["q_tab_match"]:
set_relation("qt" + sc_link["q_tab_match"][f"{i},{j_real}"])
else:
set_relation("qt_default")
elif i_type[0] == "column":
if j_type[0] == "question":
i_real = i - c_base
if f"{j},{i_real}" in sc_link["q_col_match"]:
set_relation("cq" + sc_link["q_col_match"][f"{j},{i_real}"])
elif f"{j},{i_real}" in cv_link["cell_match"]:
set_relation("cq" + cv_link["cell_match"][f"{j},{i_real}"])
elif f"{j},{i_real}" in cv_link["num_date_match"]:
set_relation("cq" + cv_link["num_date_match"][f"{j},{i_real}"])
else:
set_relation("cq_default")
elif j_type[0] == "column":
col1, col2 = i_type[1], j_type[1]
if col1 == col2:
set_relation(("cc_dist", clamp(j - i, self.cc_max_dist)))
else:
set_relation("cc_default")
if desc["foreign_keys"].get(str(col1)) == col2:
set_relation("cc_foreign_key_forward")
if desc["foreign_keys"].get(str(col2)) == col1:
set_relation("cc_foreign_key_backward")
if (
desc["column_to_table"][str(col1)]
== desc["column_to_table"][str(col2)]
):
set_relation("cc_table_match")
elif j_type[0] == "table":
col, table = i_type[1], j_type[1]
set_relation("ct_default")
if self.match_foreign_key(desc, col, table):
set_relation("ct_foreign_key")
col_table = desc["column_to_table"][str(col)]
if col_table == table:
if col in desc["primary_keys"]:
set_relation("ct_primary_key")
else:
set_relation("ct_table_match")
elif col_table is None:
set_relation("ct_any_table")
elif i_type[0] == "table":
if j_type[0] == "question":
i_real = i - t_base
if f"{j},{i_real}" in sc_link["q_tab_match"]:
set_relation("tq" + sc_link["q_tab_match"][f"{j},{i_real}"])
else:
set_relation("tq_default")
elif j_type[0] == "column":
table, col = i_type[1], j_type[1]
set_relation("tc_default")
if self.match_foreign_key(desc, col, table):
set_relation("tc_foreign_key")
col_table = desc["column_to_table"][str(col)]
if col_table == table:
if col in desc["primary_keys"]:
set_relation("tc_primary_key")
else:
set_relation("tc_table_match")
elif col_table is None:
set_relation("tc_any_table")
elif j_type[0] == "table":
table1, table2 = i_type[1], j_type[1]
if table1 == table2:
set_relation(("tt_dist", clamp(j - i, self.tt_max_dist)))
else:
set_relation("tt_default")
forward = table2 in desc["foreign_keys_tables"].get(
str(table1), ()
)
backward = table1 in desc["foreign_keys_tables"].get(
str(table2), ()
)
if forward and backward:
set_relation("tt_foreign_key_both")
elif forward:
set_relation("tt_foreign_key_forward")
elif backward:
set_relation("tt_foreign_key_backward")
return relations
@classmethod
def match_foreign_key(cls, desc, col, table):
foreign_key_for = desc["foreign_keys"].get(str(col))
if foreign_key_for is None:
return False
foreign_table = desc["column_to_table"][str(foreign_key_for)]
return desc["column_to_table"][str(col)] == foreign_table
def validate_item(self, item, section):
return True, None
def preprocess_item(self, item, validation_info):
question, question_for_copying = item.text, item.text
question = [x.replace("Ġ", "") for x in question]
question_for_copying = [x.replace("Ġ", "") for x in question_for_copying]
preproc_schema = self._preprocess_schema(item.schema)
assert preproc_schema.column_names[0][0].startswith("<type:")
column_names_without_types = [col[1:] for col in preproc_schema.column_names]
sc_link = self.compute_schema_linking(
question, column_names_without_types, preproc_schema.table_names
)
# print(sc_link)
cv_link = self.compute_cell_value_linking(question, item.schema)
# if cv_link['cell_match']:
# print(question)
return {
"raw_question": question,
"question": question,
"question_for_copying": question_for_copying,
"db_id": item.schema.db_id,
"sc_link": sc_link,
"cv_link": cv_link,
"columns": preproc_schema.column_names,
"tables": preproc_schema.table_names,
"table_bounds": preproc_schema.table_bounds,
"column_to_table": preproc_schema.column_to_table,
"table_to_columns": preproc_schema.table_to_columns,
"foreign_keys": preproc_schema.foreign_keys,
"foreign_keys_tables": preproc_schema.foreign_keys_tables,
"primary_keys": preproc_schema.primary_keys,
}
def _preprocess_schema(self, schema):
if schema.db_id in self.preprocessed_schemas:
return self.preprocessed_schemas[schema.db_id]
result = self.preprocess_schema_uncached(
schema,
self._tokenize,
self.include_table_name_in_column,
self.fix_issue_16_primary_keys,
)
self.preprocessed_schemas[schema.db_id] = result
return result
def _tokenize(self, presplit, unsplit):
return presplit
def _tokenize_for_copying(self, presplit, unsplit):
return presplit, presplit
# schema linking, similar to IRNet
@classmethod
def compute_schema_linking(cls, question, column, table):
def partial_match(x_list, y_list):
x_str = " ".join(x_list)
y_str = " ".join(y_list)
if x_str in STOPWORDS or x_str in PUNKS:
return False
if re.match(rf"\b{re.escape(x_str)}\b", y_str):
assert x_str in y_str
return True
else:
return False
def exact_match(x_list, y_list):
x_str = " ".join(x_list)
y_str = " ".join(y_list)
if x_str == y_str:
return True
else:
return False
q_col_match = dict()
q_tab_match = dict()
col_id2list = dict()
for col_id, col_item in enumerate(column):
if col_id == 0:
continue
col_id2list[col_id] = col_item
tab_id2list = dict()
for tab_id, tab_item in enumerate(table):
tab_id2list[tab_id] = tab_item
# 5-gram
n = 5
while n > 0:
for i in range(len(question) - n + 1):
n_gram_list = question[i : i + n]
n_gram = " ".join(n_gram_list)
if len(n_gram.strip()) == 0:
continue
# exact match case
for col_id in col_id2list:
if exact_match(n_gram_list, col_id2list[col_id]):
for q_id in range(i, i + n):
q_col_match[f"{q_id},{col_id}"] = "CEM"
for tab_id in tab_id2list:
if exact_match(n_gram_list, tab_id2list[tab_id]):
for q_id in range(i, i + n):
q_tab_match[f"{q_id},{tab_id}"] = "TEM"
# partial match case
for col_id in col_id2list:
if partial_match(n_gram_list, col_id2list[col_id]):
for q_id in range(i, i + n):
if f"{q_id},{col_id}" not in q_col_match:
q_col_match[f"{q_id},{col_id}"] = "CPM"
for tab_id in tab_id2list:
if partial_match(n_gram_list, tab_id2list[tab_id]):
for q_id in range(i, i + n):
if f"{q_id},{tab_id}" not in q_tab_match:
q_tab_match[f"{q_id},{tab_id}"] = "TPM"
n -= 1
return {"q_col_match": q_col_match, "q_tab_match": q_tab_match}
@classmethod
def load_tables(cls, paths):
schemas = {}
eval_foreign_key_maps = {}
for path in paths:
schema_dicts = json.load(open(path))
for schema_dict in schema_dicts:
tables = tuple(
Table(
id=i,
name=name.split(),
unsplit_name=name,
orig_name=orig_name,
)
for i, (name, orig_name) in enumerate(
zip(
schema_dict["table_names"],
schema_dict["table_names_original"],
)
)
)
columns = tuple(
Column(
id=i,
table=tables[table_id] if table_id >= 0 else None,
name=col_name.split(),
unsplit_name=col_name,
orig_name=orig_col_name,
type=col_type,
)
for i, (
(table_id, col_name),
(_, orig_col_name),
col_type,
) in enumerate(
zip(
schema_dict["column_names"],
schema_dict["column_names_original"],
schema_dict["column_types"],
)
)
)
# Link columns to tables
for column in columns:
if column.table:
column.table.columns.append(column)
for column_id in schema_dict["primary_keys"]:
# Register primary keys
column = columns[column_id]
column.table.primary_keys.append(column)
foreign_key_graph = None
for source_column_id, dest_column_id in schema_dict["foreign_keys"]:
# Register foreign keys
source_column = columns[source_column_id]
dest_column = columns[dest_column_id]
source_column.foreign_key_for = dest_column
db_id = schema_dict["db_id"]
assert db_id not in schemas
schemas[db_id] = Schema(
db_id, tables, columns, foreign_key_graph, schema_dict
)
eval_foreign_key_maps[db_id] = cls.build_foreign_key_map(schema_dict)
for db_id, schema_el in schemas.items():
san2orig = {}
orig2san = {}
for table_el in schema_el.tables:
sanitized_name = f"{'_'.join(table_el.name)}".lower()
orig_name = f"{table_el.orig_name}".lower()
san2orig[sanitized_name] = orig_name
orig2san[orig_name] = sanitized_name
for col_el in table_el.columns:
sanitized_name = (
f"{'_'.join(col_el.table.name)}.{'_'.join(col_el.name)}".lower()
)
orig_name = f"{col_el.table.orig_name}.{col_el.orig_name}".lower()
san2orig[sanitized_name] = orig_name
orig2san[orig_name] = sanitized_name
schema_el.san2orig = san2orig
schema_el.orig2san = orig2san
return schemas, eval_foreign_key_maps
@classmethod
def build_foreign_key_map(cls, entry):
cols_orig = entry["column_names_original"]
tables_orig = entry["table_names_original"]
# rebuild cols corresponding to idmap in Schema
cols = []
for col_orig in cols_orig:
if col_orig[0] >= 0:
t = tables_orig[col_orig[0]]
c = col_orig[1]
cols.append("__" + t.lower() + "." + c.lower() + "__")
else:
cols.append("__all__")
def keyset_in_list(k1, k2, k_list):
for k_set in k_list:
if k1 in k_set or k2 in k_set:
return k_set
new_k_set = set()
k_list.append(new_k_set)
return new_k_set
foreign_key_list = []
foreign_keys = entry["foreign_keys"]
for fkey in foreign_keys:
key1, key2 = fkey
key_set = keyset_in_list(key1, key2, foreign_key_list)
key_set.add(key1)
key_set.add(key2)
foreign_key_map = {}
for key_set in foreign_key_list:
sorted_list = sorted(list(key_set))
midx = sorted_list[0]
for idx in sorted_list:
foreign_key_map[cols[idx]] = cols[midx]
return foreign_key_map
@classmethod
def compute_cell_value_linking(cls, tokens, schema):
def isnumber(word):
try:
float(word)
return True
except:
return False
def db_word_match(word, column, table, db_conn):
# return False #fixme
cursor = db_conn.cursor()
word = word.replace("'", "")
p_str = (
f"select {column} from {table} where {column} like '{word} %' or {column} like '% {word}' or "
f"{column} like '% {word} %' or {column} like '{word}'"
)
# return False # TODO: fixmes
# print("hi")
try:
cursor.execute(p_str)
p_res = cursor.fetchall()
if len(p_res) == 0:
return False
else:
return p_res
except sqlite3.OperationalError as e:
# print(p_str)
return False
num_date_match = {}
cell_match = {}
for q_id, word in enumerate(tokens):
if len(word.strip()) == 0:
continue
if word in STOPWORDS or word in PUNKS:
continue
num_flag = isnumber(word)
CELL_MATCH_FLAG = "CELLMATCH"
for col_id, column in enumerate(schema.columns):
if col_id == 0:
assert column.orig_name == "*"
continue
# word is number
if num_flag:
if column.type in ["number", "time"]: # TODO fine-grained date
num_date_match[f"{q_id},{col_id}"] = column.type.upper()
else:
ret = db_word_match(
word,
column.orig_name,
column.table.orig_name,
schema.connection,
)
if ret:
# print(word, ret)
cell_match[f"{q_id},{col_id}"] = CELL_MATCH_FLAG
cv_link = {"num_date_match": num_date_match, "cell_match": cell_match}
return cv_link
@classmethod
def preprocess_schema_uncached(
cls,
schema,
tokenize_func,
include_table_name_in_column,
fix_issue_16_primary_keys,
):
r = PreprocessedSchema()
last_table_id = None
for i, column in enumerate(schema.columns):
col_toks = tokenize_func(column.name, column.unsplit_name)
# assert column.type in ["text", "number", "time", "boolean", "others"]
type_tok = f"<type: {column.type}>"
column_name = [type_tok] + col_toks
if include_table_name_in_column:
if column.table is None:
table_name = ["<any-table>"]
else:
table_name = tokenize_func(
column.table.name, column.table.unsplit_name
)
column_name += ["<table-sep>"] + table_name
r.column_names.append(column_name)
table_id = None if column.table is None else column.table.id
r.column_to_table[str(i)] = table_id
if table_id is not None:
columns = r.table_to_columns.setdefault(str(table_id), [])
columns.append(i)
if last_table_id != table_id:
r.table_bounds.append(i)
last_table_id = table_id
if column.foreign_key_for is not None:
r.foreign_keys[str(column.id)] = column.foreign_key_for.id
r.foreign_keys_tables[str(column.table.id)].add(
column.foreign_key_for.table.id
)
r.table_bounds.append(len(schema.columns))
assert len(r.table_bounds) == len(schema.tables) + 1
for i, table in enumerate(schema.tables):
table_toks = tokenize_func(table.name, table.unsplit_name)
r.table_names.append(table_toks)
last_table = schema.tables[-1]
r.foreign_keys_tables = to_dict_with_sorted_values(r.foreign_keys_tables)
r.primary_keys = (
[column.id for table in schema.tables for column in table.primary_keys]
if fix_issue_16_primary_keys
else [
column.id
for column in last_table.primary_keys
for table in schema.tables
]
)
return r
|
python
|
# SPDX-FileCopyrightText: 2022 Eva Herrada for Adafruit Industries
# SPDX-License-Identifier: MIT
import board
from kmk.kmk_keyboard import KMKKeyboard as _KMKKeyboard
from kmk.matrix import DiodeOrientation
class KMKKeyboard(_KMKKeyboard):
row_pins = (board.D10, board.MOSI, board.MISO, board.D8)
col_pins = (
board.D4,
board.D7,
board.SCK,
)
diode_orientation = DiodeOrientation.COLUMNS
i2c = board.I2C
|
python
|
# -*- coding: utf-8 -*-
counter = 100 # An integer assignment
miles = 1000.0 # A floating point
name = "John" # A string
print (counter)
print (miles)
print (name)
|
python
|
from .tests import _index
def main():
suites = _index.suites
passes = 0
fails = 0
for s in suites:
s.run()
print(s)
passes += s.passes
fails += s.fails
print(f'###################\nSUMMARY OF ALL TEST SUITES\nTotal Passing Tests: {passes}\nTotal Failing Tests: {fails}\nPercent Passing: {(passes/(passes+fails)) * 100}%')
|
python
|
#!/usr/bin/python
# -*- coding: latin-1 -*-
import os, subprocess
import numpy as np
import GenericUsefulScripts as GUS
from astropy import units as u
from astropy.io import ascii, fits
from astropy.convolution import convolve
from astropy.stats import SigmaClip
from astropy.coordinates import SkyCoord
from photutils.background import MedianBackground, Background2D
from skimage.transform import resize
import multiprocessing
import ChrisFuncs
import pandas as pd
space = ' '
def data_reduction(galaxy_name, path_fits_input = 'standard'):
# ---------------------------------------------------------------------------
# Galaxy Aperture Stuff, from Dustpedia (to mask and bkg evaluation purposes)
DustPedia_Photom = pd.read_csv('../DustPedia_Tables/DustPedia_Aperture_Photometry_2.2.csv')
subtable = DustPedia_Photom.loc[DustPedia_Photom['name'] == galaxy_name]
ra, dec = subtable['ra'].values[0], subtable['dec'].values[0]
ap_cen_coord = SkyCoord(ra*u.deg, dec*u.deg, frame = 'fk5')
semimaj = subtable['semimaj_arcsec'].values[0]
axial_ratio, pos_angle = subtable['axial_ratio'].values[0], subtable['pos_angle'].values[0]
# ---------------------------------------------------------------------------
subprocess.call('mkdir ../'+galaxy_name+'/_ReducedMaps/', shell = True)
list_data = []
if path_fits_input == 'standard': path_fits_input = '../'+galaxy_name+'/Caapr/Temp/Processed_Maps'
else: path_fits_input = '../'+galaxy_name+'/'+path_fits_input
header_fits = '../'+galaxy_name+'/Caapr/Maps/'
print('Reading original maps...')
filelist = [x for x in os.listdir('Caapr/Maps') if x.endswith('.fits')]
for file in filelist:
if file.endswith('Thumbnail.fits'): continue # Don't work with thumbnails
elif file.endswith('Error.fits'): continue # Don't work with Errors
signal_path = path_fits_input+'/'+file
list_data.append(GUS.FitsUtils(signal_path))
print(space+signal_path+' read')
print('...done!')
print()
for data in list_data:
if os.path.exists('../'+galaxy_name+'/_ReducedMaps/'+data.bandname+'.fits'):
print(data.bandname+'.fits already reduced, skipping to next band')
continue
else: print('Processing band', data.bandname)
# Galaxy Aperture Stuff, from Dustpedia (to mask and bkg evaluation purposes)
centre_x, centre_y = ap_cen_coord.to_pixel(data.wcs)
pixel_scale = (data.get_pixel_scale()*u.deg).to('arcsec').value
Gal_Ap_Stuff = centre_x, centre_y, semimaj/pixel_scale, axial_ratio, pos_angle
# Reduce band
signal_reduced = reduce(data, Gal_Ap_Stuff)
# Save fits
hdu = fits.PrimaryHDU(signal_reduced)
hdu.header = data.hdr
hdu.writeto('../'+galaxy_name+'/_ReducedMaps/'+data.bandname+'.fits')
print()
print('Data reduction phase over.')
print()
return
def data_reduction_parallel(galaxy_name, processes = 5, path_fits_input = 'standard'):
from itertools import repeat
# ---------------------------------------------------------------------------
# Galaxy Aperture Stuff, from Dustpedia (to mask and bkg evaluation purposes)
DustPedia_Photom = pd.read_csv('../DustPedia_Tables/DustPedia_Aperture_Photometry_2.2.csv')
subtable = DustPedia_Photom.loc[DustPedia_Photom['name'] == galaxy_name]
ra, dec = subtable['ra'].values[0], subtable['dec'].values[0]
ap_cen_coord = SkyCoord(ra*u.deg, dec*u.deg, frame = 'fk5')
semimaj = subtable['semimaj_arcsec'].values[0]
axial_ratio, pos_angle = subtable['axial_ratio'].values[0], subtable['pos_angle'].values[0]
# ---------------------------------------------------------------------------
subprocess.call('mkdir ../'+galaxy_name+'/_ReducedMaps/', shell = True)
list_data = []
if path_fits_input == 'standard': path_fits_input = '../'+galaxy_name+'/Caapr/Temp/Processed_Maps'
else: path_fits_input = '../'+galaxy_name+'/'+path_fits_input
header_fits = '../'+galaxy_name+'/Caapr/Maps/'
print('Reading original maps...')
filelist = [x for x in os.listdir('Caapr/Maps') if x.endswith('.fits')]
for file in filelist:
if file.endswith('Thumbnail.fits'): continue # Don't work with thumbnails
elif file.endswith('Error.fits'): continue # Don't work with Errors
signal_path = path_fits_input+'/'+file
list_data.append(GUS.FitsUtils(signal_path))
print(space+signal_path+' read')
print('...done!')
print()
pool = multiprocessing.Pool()
with multiprocessing.Pool(processes=processes) as pool:
func = zip(list_data, repeat(galaxy_name), \
repeat(ap_cen_coord), repeat(semimaj), repeat(axial_ratio), repeat(pos_angle))
pool.starmap(reduction_loop_parallel, func)
print()
print('Data reduction phase over.')
print()
return
def reduction_loop_parallel(data, galaxy_name, ap_cen_coord, semimaj, axial_ratio, pos_angle):
if os.path.exists('../'+galaxy_name+'/_ReducedMaps/'+data.bandname+'.fits'):
print(data.bandname+'.fits already reduced, skipping to next band')
return
else: print('Processing band', data.bandname)
# Galaxy Aperture Stuff, from Dustpedia (to mask and bkg evaluation purposes)
centre_x, centre_y = ap_cen_coord.to_pixel(data.wcs)
pixel_scale = (data.get_pixel_scale()*u.deg).to('arcsec').value
Gal_Ap_Stuff = centre_x, centre_y, semimaj/pixel_scale, axial_ratio, pos_angle
# Reduce band
signal_reduced = reduce(data, Gal_Ap_Stuff)
# Save fits
hdu = fits.PrimaryHDU(signal_reduced)
hdu.header = data.hdr
hdu.writeto('../'+galaxy_name+'/_ReducedMaps/'+data.bandname+'.fits')
return
def reduce(data, Gal_Ap_Stuff, psf_degrad = True, sky_sub = True):
#if data.bandname[:7] == 'Spitzer':
# print
# print(space+"Spitzer bands usually have a problem with sky subtraction")
# print(space+"Evaluated background average is "+str(bkg_average)+". Perhaps it's too low.")
# print(space+"Do you want to insert the bkg average by hand? (insert value or n)")
# answer = raw_input()
# if answer == 'n': pass
# else: bkg_average = float(answer)
#else: pass
ok_nan = np.where(np.nan_to_num(data.signal_with_nans-1) == 0) # I know, can't do anything 'bout it
if sky_sub:
# Sky subtraction
print(space+'Sky subtraction for '+data.bandname+' ...')
# 1) Flatten the background
signal_flat, check_sub = sky_flattening(data, Gal_Ap_Stuff)
# 2) If check_sub is sub, the sky has already been flattened + removed
# if not, remove the average background
if check_sub == 'sub':
signal_skysub = signal_flat.copy()
elif check_sub == 'unsub':
bkg_average = evaluate_bkg_avg(signal_flat, Gal_Ap_Stuff)
if bkg_average < 0:
print(space+"Evaluated background average is lower than 0. Returning original map.")
signal_skysub = signal_flat.copy()
else:
print(space+"Evaluated background average is {0:.2E}".format(bkg_average))
signal_skysub = signal_flat - bkg_average
else:
print(space+'No sky flattening + subtraction requested. Hey, whatever you want.')
signal_skysub = data.signal.copy()
if psf_degrad:
print(space+'PSF degradation for '+data.bandname+' ...')
if data.bandname == 'SPIRE_350':
return signal_skysub
else:
try:
kernel_path = '../_kernels/Kernel_LoRes_'+data.bandname+'_to_SPIRE_350.fits'
kernel = fits.getdata(kernel_path)
kernel_resized = resize(kernel, (101, 101), preserve_range = True)
signal_conv = convolve(signal_skysub, kernel = kernel_resized, boundary = None, preserve_nan = True)
signal_conv[ok_nan] = np.nan
except:
print(space+'No LowResolution kernel, switching to (slower) HighResolution.')
kernel_path = '../_kernels/Kernel_HiRes_'+data.bandname+'_to_SPIRE_350.fits'
kernel = fits.getdata(kernel_path)
kernel_resized = resize(kernel, (101, 101), preserve_range = True)
signal_conv = convolve(signal_skysub, kernel = kernel_resized, boundary = None, preserve_nan = True)
signal_conv[ok_nan] = np.nan
return signal_conv
else:
print(space+'No PSF degradation requested. I beg you to reconsider.')
signal_skysub[ok_nan] = np.nan
return signal_skysub
def sky_flattening(data, Gal_Ap_Stuff):
from astropy.modeling.polynomial import Polynomial2D
from astropy.modeling.fitting import LevMarLSQFitter
from scipy.ndimage.interpolation import zoom
# 1) Read data, get pixel scale
image = data.signal_with_nans
pix_size = (data.get_pixel_scale()*u.deg).to('arcsec').value
bandname = data.bandname
# 2) If image has pixels smaller than some limit, downsample image to improve processing time
pix_size_limit = 2.0
if pix_size<pix_size_limit: downsample_factor = int(np.ceil(pix_size_limit/pix_size))
else: downsample_factor = 1
image_ds = GUS.Downsample(image, downsample_factor)
# 3) Sigma clip the downsampled image
clip_value = GUS.SigmaClip(image_ds, tolerance=0.01, sigma_thresh=3.0, median=True)
noise_value = clip_value[0]
field_value = clip_value[1]
cutoff_sigma = 2.0
cutoff = field_value + ( cutoff_sigma * noise_value )
# 4) Mask the image removing galaxy emission...
image_masked = image_ds.copy()
centre_i, centre_j, mask_semimaj_pix, mask_axial_ratio, mask_angle = Gal_Ap_Stuff
ellipse_mask = EllipseMask(image_ds, mask_semimaj_pix/downsample_factor, mask_axial_ratio, mask_angle, centre_i/downsample_factor, centre_j/downsample_factor)
image_masked[ np.where( ellipse_mask==1 ) ] = np.nan
# ...and image pixels identified as having high SNR
image_masked[ np.where( image_masked>cutoff ) ] = np.nan
# 5) Use astropy to set up 2-dimensional polynomial to the image
image_masked[ np.where( np.isnan(image_masked)==True ) ] = field_value
poly_model = Polynomial2D(degree=5)
i_coords, j_coords = np.mgrid[:image_masked.shape[0], :image_masked.shape[1]]
fitter = LevMarLSQFitter()
i_coords = i_coords.flatten()
j_coords = j_coords.flatten()
image_flattened = image_masked.flatten()
good = np.where(np.isnan(image_flattened)==False)
i_coords = i_coords[good]
j_coords = j_coords[good]
# 6) Attempt polynomial fit; if insufficient data then skip onwards
image_flattened = image_flattened[good]
try:
fit = fitter(poly_model, i_coords, j_coords, image_flattened)
except:
print(space+'Error fitting polinomial sky model. Returning unalterated image.')
return image
# 7) Create final polynomial filter (undoing downsampling using lorenzoriano GitHub script)
i_coords, j_coords = np.mgrid[:image_ds.shape[0], :image_ds.shape[1]]
poly_fit = fit(i_coords, j_coords)
poly_full = zoom(poly_fit, [ float(image.shape[0])/float(poly_fit.shape[0]), \
float(image.shape[1])/float(poly_fit.shape[1]) ], mode='nearest')
# 8) Establish background variation before application of filter
sigma_thresh = 3.0
clip_in = GUS.SigmaClip(image, tolerance=0.005, median=True, sigma_thresh=sigma_thresh)
bg_in = image[ np.where( image<clip_in[1] ) ]
spread_in = np.mean( np.abs( bg_in - clip_in[1] ) )
# 9) How much reduction in background variation there was due to application of the filter
image_sub = image - poly_full
clip_sub = GUS.SigmaClip(image_sub, tolerance=0.005, median=True, sigma_thresh=sigma_thresh)
bg_sub = image_sub[ np.where( image_sub < clip_sub[1] ) ]
spread_sub = np.mean( np.abs( bg_sub - clip_sub[1] ) )
spread_diff = spread_in / spread_sub
# If the filter made significant difference, apply to image and return it; otherwise, just return the unaltered map
if spread_diff>1.1:
print(space+bandname+' background is significantly variable; removing polynomial background fit.')
return image_sub, 'sub'
else:
print(space+bandname+' background is not significantly variable; leaving image unaltered.')
return image, 'unsub'
def evaluate_bkg_avg(image, Gal_Ap_Stuff):
'''
Function to evaluate the mean background in an elliptical annulus between 1.25 and 1.601 times the galaxy semimajor axis (from DustPedia photometric table).
Args: Array, semi-major axis of inside edge of annulus (pix), width of annulus (pix), axial ratio, position angle (deg), i & j coords of centre of ellipse
Returns: Numpy array containing the mean background per pixel.
'''
centre_x, centre_y, semimaj_pix, axial_ratio, pos_angle = Gal_Ap_Stuff
# =========
# Evaluate pixels in background annulus
bg_inner_semimaj_pix = semimaj_pix * 1.25
bg_width = (semimaj_pix * 1.601) - bg_inner_semimaj_pix
bg_calc = AnnulusSum(image, bg_inner_semimaj_pix, bg_width, axial_ratio, pos_angle, centre_x, centre_y)
bg_clip = GUS.SigmaClip(bg_calc[2], median=False, sigma_thresh=3.0)
# =========
return bg_clip[1]
def check_Dustpedia(galaxy_name, working_bands):
'''
Function to check if DustPedia photometric flux and the one measured in the same apertures with our data reduction are compatible.
Args: Galaxy name, working bands, if wanted, perform Galactic Extinction Correction
Returns: Nothing, generates a plot in Reduction folder.
'''
import os, subprocess
from astropy.io import fits, ascii
from astropy import units as u
import pandas as pd
import numpy as np
from photutils import SkyEllipticalAperture, SkyEllipticalAnnulus, aperture_photometry
from astropy.coordinates import SkyCoord
from matplotlib import pyplot as plt
subprocess.call('mkdir ../'+galaxy_name+'/Reduction/', shell = True)
path_galaxy_photometry = '../'+galaxy_name+'/Reduction/'+galaxy_name+'_photometry.dat'
# =========
# Read DustPedia Photometric Table
DustPedia_Photom = pd.read_csv('../DustPedia_Tables/DustPedia_Aperture_Photometry_2.2.csv')
# Rearrange in order of increasing effective wavelenght
right_order = [u'name', u'ra', u'dec', u'semimaj_arcsec', u'axial_ratio', u'pos_angle', u'global_flag',
u'GALEX_FUV', u'GALEX_FUV_err', u'GALEX_FUV_flag', u'GALEX_NUV', u'GALEX_NUV_err', u'GALEX_NUV_flag',
u'SDSS_u', u'SDSS_u_err', u'SDSS_u_flag', u'SDSS_g', u'SDSS_g_err', u'SDSS_g_flag',
u'SDSS_r', u'SDSS_r_err', u'SDSS_r_flag', u'SDSS_i', u'SDSS_i_err', u'SDSS_i_flag',
u'SDSS_z', u'SDSS_z_err', u'SDSS_z_flag',
u'2MASS_J', u'2MASS_J_err', u'2MASS_J_flag', u'2MASS_H', u'2MASS_H_err', u'2MASS_H_flag',
u'2MASS_Ks', u'2MASS_Ks_err', u'2MASS_Ks_flag',
u'WISE_3.4', u'WISE_3.4_err', u'WISE_3.4_flag', u'Spitzer_3.6', u'Spitzer_3.6_err', u'Spitzer_3.6_flag',
u'Spitzer_4.5', u'Spitzer_4.5_err', u'Spitzer_4.5_flag', u'WISE_4.6', u'WISE_4.6_err', u'WISE_4.6_flag',
u'Spitzer_5.8', u'Spitzer_5.8_err', u'Spitzer_5.8_flag', u'Spitzer_8.0', u'Spitzer_8.0_err', u'Spitzer_8.0_flag',
u'WISE_12', u'WISE_12_err', u'WISE_12_flag', u'WISE_22', u'WISE_22_err', u'WISE_22_flag',
u'Spitzer_24', u'Spitzer_24_err', u'Spitzer_24_flag', u'Spitzer_70', u'Spitzer_70_err', u'Spitzer_70_flag',
u'PACS_70', u'PACS_70_err', u'PACS_70_flag', u'PACS_100', u'PACS_100_err', u'PACS_100_flag',
u'PACS_160', u'PACS_160_err', u'PACS_160_flag', u'Spitzer_160', u'Spitzer_160_err', u'Spitzer_160_flag',
u'SPIRE_250', u'SPIRE_250_err', u'SPIRE_250_flag', u'SPIRE_350', u'SPIRE_350_err', u'SPIRE_350_flag',
u'SPIRE_500', u'SPIRE_500_err', u'SPIRE_500_flag']
DustPedia_Photom = DustPedia_Photom[right_order]
gal_phot = DustPedia_Photom.loc[DustPedia_Photom['name'] == galaxy_name]
# Fist, remove _flag columns
to_remove = gal_phot.columns.str.contains('flag', case=False)
gal_phot = gal_phot.loc[:,~to_remove]
# Extract ra, dec, semimaj, axial ratio and pos_angle, then remove them
ra, dec = gal_phot['ra'].values[0], gal_phot['dec'].values[0]
semimaj, axial_ratio, pos_angle = gal_phot['semimaj_arcsec'].values[0], gal_phot['axial_ratio'].values[0], gal_phot['pos_angle'].values[0]
to_remove = ['name', 'ra', 'dec', 'semimaj_arcsec', 'axial_ratio', 'pos_angle']
gal_phot = gal_phot.drop(columns=to_remove)
# And remove empy columns
#gal_phot = gal_phot.dropna(axis='columns')
# Extract working bands fluxes and errors
gal_phot_flux = gal_phot[working_bands]
gal_phot_flux = gal_phot_flux.transpose()
working_bands_err = [t+'_err' for t in working_bands]
gal_phot_err = gal_phot[working_bands_err]
gal_phot_err = gal_phot_err.transpose()
galaxy_photometry = pd.DataFrame(np.concatenate((gal_phot_flux.values, gal_phot_err.values), axis=1))
galaxy_photometry.columns = ['Flux', 'Error']
galaxy_photometry.index = working_bands
galaxy_photometry = galaxy_photometry.fillna(0) # Fill NaN entries with zeroes
# Save
galaxy_photometry.index.names = ['Band'] # Rename the index column as "Band"
galaxy_photometry.to_csv(path_galaxy_photometry, sep='\t', index = False)
# =========
# =========
# APERTURES
# Read the apertures + radii
positions = SkyCoord(ra*u.deg, dec*u.deg, frame='icrs')
DustPedia_aperture = SkyEllipticalAperture(positions, a=semimaj*u.arcsec, b=semimaj*u.arcsec/axial_ratio, theta=pos_angle*u.deg)
DustPedia_annulus = SkyEllipticalAnnulus(positions, a_in=semimaj*u.arcsec*1.25, a_out=semimaj*u.arcsec*1.601, \
b_out=semimaj*u.arcsec/axial_ratio, theta=pos_angle*u.deg)
# =========
# =========
# Galactic Extinction Correction dictionary
GalCorr_path = '../'+galaxy_name+'/galactic_extinction_correction.txt'
if os.path.exists(GalCorr_path): pass
else: GalExtCorr(galaxy_name, working_bands, ra, dec)
GalCorrection_dictionary = dict(zip(ascii.read(GalCorr_path)['Band'].data, \
ascii.read(GalCorr_path)['Correction'].data))
# =========
# =========
# Read reduced data and perform photometry
path_fits = '../'+galaxy_name+'/_ReducedMaps/'
list_data = []
for file in os.listdir(path_fits):
if not file.endswith('.fits'): continue
elif file.startswith('In'): continue
list_data.append(GUS.FitsUtils(path_fits+file))
list_fluxes = []
for data in list_data:
# Perform photometry
phot_table = aperture_photometry(data.signal, DustPedia_aperture, wcs = data.wcs)
phot_table['aperture_sum'].info.format = '%.4g'
# Put results in a single file
phot = GUS.round_arr(phot_table['aperture_sum'].data, 2)
# Galactic extintion correction
phot *= GalCorrection_dictionary[data.bandname]
list_fluxes.append(abs(phot))
fluxes = np.array(list_fluxes)
# Sort w.r.t wavelengths
list_wvl = (t.get_wavelength() for t in list_data)
list_band = (t.bandname for t in list_data)
wvl, fluxes, bandnames = (t for t in zip(*sorted(zip(list_wvl, fluxes, list_band))))
wvl, fluxes = np.array(wvl), np.array(fluxes)[:,0]
# Save the results
ascii.write([bandnames, GUS.round_arr(wvl,2), GUS.round_arr(fluxes, 2)], '../'+galaxy_name+'/Reduction/'+galaxy_name+'_fluxes.txt', \
names = ['Band', 'Wvl', 'Fluxes'], overwrite=True)
# =========
# =========
# Re-read Dustpedia Photometry
data_CAAPR = ascii.read(path_galaxy_photometry)
fluxes_CAAPR, errors_CAAPR = data_CAAPR['Flux'].data, data_CAAPR['Error'].data
compatibility = np.abs(np.array(fluxes_CAAPR) - np.array(fluxes))/np.sqrt(np.array(errors_CAAPR)**2)
ascii.write([GUS.round_arr(compatibility,2)], '../'+galaxy_name+'/Reduction/'+galaxy_name+'_comp.txt', format='fixed_width_two_line', \
names = ['Comp'], overwrite=True)
# =========
# =========
# Plot
xmin, xmax = np.array(wvl).min(), np.array(wvl).max()
DustpediaCheckPlot = plt.figure(figsize=(15,5))
plt.subplot(2,1,1)
plt.plot(np.array(wvl), np.array(fluxes_CAAPR), \
linestyle = 'None', marker = '.', color = 'navy', label = 'CAAPR+Literature Photometry')
plt.plot(wvl, fluxes, linestyle = 'None', marker = '.', color = 'red', label = 'My Photometry')
plt.xscale('log'), plt.yscale('log')
plt.ylabel(r'Flux (Jy)')
plt.legend()
plt.subplot(2,1,2)
plt.axhline(5, color = 'r', linestyle = '-')
plt.plot(wvl, compatibility, ms = 10.0, linestyle = 'None', color = 'k', marker = '.')
for i in range(len(wvl)):
plt.text(wvl[i], 0.5, bandnames[i], rotation = 90)
plt.xscale('log'), plt.yscale('log')
plt.xlabel(r'Wavelength ($\mu$m)'), plt.ylabel(r'Compatibility $\lambda$')
plt.subplots_adjust(hspace=0.,wspace=0.)
DustpediaCheckPlot.savefig('../'+galaxy_name+'/Reduction/'+galaxy_name+'_SED.pdf', bbox_inches = 'tight')
# =========
return
def GalExtCorr(galaxy_name, list_band, ra, dec):
list_correction = []
for band in list_band:
try:
if band == 'Spitzer_3.6': band = 'IRAC1'
elif band == 'Spitzer_4.5': band = 'IRAC2'
elif band == 'Spitzer_5.8': band = 'IRAC3'
elif band == 'Spitzer_8.0': band = 'IRAC4'
elif band == 'WISE_3.4': band = 'WISE1'
elif band == 'WISE_4.6': band = 'WISE2'
correction = ChrisFuncs.ExtCorrrct(ra, dec, band, verbose = False)
list_correction.append(correction)
except: list_correction.append(1)
ascii.write([list_band, list_correction], \
'../'+galaxy_name+'/galactic_extinction_correction.txt', names = ['Band', 'Correction'])
return
##################################
# QUI COPIO BRUTALMENTE DA CLARK #
##################################
def AnnulusSum(array, rad_inner, width, axial_ratio, angle, i_centre, j_centre):
'''
Function to sum all elements in an annulus centred upon the middle of the given array
Args: Array, semi-major axis of inside edge of annulus (pix), width of annulus (pix), axial ratio, position angle (deg), i & j coords of centre of ellipse
Returns: Numpy array containing the sum of the pixel values in the annulus, the total number of pixels counted, and an array containing the pixel values
'''
# Create slice of input array, containing only the region of interest
i_cutout_min = int(np.floor(max([0, i_centre-(rad_inner+width)])))
i_cutout_max = int(np.ceil(min([(array.shape)[0], i_centre+(rad_inner+width)])))
j_cutout_min = int(np.floor(max([0, j_centre-(rad_inner+width)])))
j_cutout_max = int(np.ceil(min([(array.shape)[1], j_centre+(rad_inner+width)])))
array_slice = array[ int(round(i_cutout_min)):int(round(i_cutout_max))+1, int(round(j_cutout_min)):int(round(j_cutout_max))+1 ]
i_centre_slice = i_centre - i_cutout_min
j_centre_slice = j_centre - j_cutout_min
if array[int(i_centre),int(j_centre)]!=array_slice[int(i_centre_slice),int(j_centre_slice)]:
if np.isnan(array[int(i_centre),int(j_centre)]==False) and np.isnan(array_slice[int(i_centre_slice),int(j_centre_slice)]==False):
print('SEVERE ERROR: AnnulusSum check failed.')
pdb.set_trace()
else:
array = array_slice
i_centre = i_centre_slice
j_centre = j_centre_slice
# Define semi-major & semi-minor axes, then convert input angle to radians
semi_maj_inner = float(rad_inner)
semi_min_inner = float(semi_maj_inner) / float(axial_ratio)
semi_maj_outer = float(rad_inner) + float(width)
semi_min_outer = float(semi_maj_outer) / float(axial_ratio)
angle = np.radians(float(angle))
# Create meshgrids with which to access i & j coordinates for ellipse calculations
i_linespace = np.linspace(0, array.shape[0]-1, array.shape[0])
j_linespace = np.linspace(0, array.shape[1]-1, array.shape[1])
i_grid, j_grid = np.meshgrid(i_linespace, j_linespace, indexing='ij')
# Use meshgrids to create array identifying which coordinates lie within inner ellipse
i_trans = -(j_grid-float(j_centre))*np.sin(angle) + (i_grid-float(i_centre))*np.cos(angle)
j_trans = (j_grid-float(j_centre))*np.cos(angle) + (i_grid-float(i_centre))*np.sin(angle)
ellipse_check_inner = (j_trans**2 / semi_maj_inner**2) + (i_trans**2 / semi_min_inner**2 )
# Use meshgrids to create array identifying which coordinates lie within outer ellipse
i_trans = -(j_grid-float(j_centre))*np.sin(angle) + (i_grid-float(i_centre))*np.cos(angle)
j_trans = (j_grid-float(j_centre))*np.cos(angle) + (i_grid-float(i_centre))*np.sin(angle)
ellipse_check_outer = (j_trans**2 / semi_maj_outer**2) + (i_trans**2 / semi_min_outer**2 )
# Calculate flux & pixels in aperture, and store pixel values
annulus_where = np.where( (ellipse_check_outer<=1) & (ellipse_check_inner>1) & (np.isnan(array)==False) )
annulus_tot = sum( array[ annulus_where ] )
annulus_count = annulus_where[0].shape[0]
annulus_pix = array[ annulus_where ]
annulus_nan = np.where( (ellipse_check_outer<=1) & (ellipse_check_inner>1) & (np.isnan(array)==True) )
# Return results
return [annulus_tot, annulus_count, annulus_pix, annulus_nan]
def EllipseMask(array, rad, axial_ratio, angle, i_centre, j_centre):
'''
Function to return a mask identifying all pixels within an ellipse of given parameters
Args: Array, semi-major axis (pix), axial ratio, position angle (deg), i & j coords of centre of ellipse
Returns: Mask array of same dimensions as input array where pixels that lie within ellipse have value 1
'''
# Define semi-major & semi-minor axes, then convert input angle to radians
semi_maj = float(rad)
semi_min = float(rad) / float(axial_ratio)
if angle.dtype != 'float': angle = float(angle.value)
try:
if angle.unit == 'rad': pass
else: angle = np.radians(angle) # Convert the angle in radians
except: angle = np.radians(angle) # Vabbè, assumo che sia da convertire e sticazzi
# Create meshgrids with which to access i & j coordinates for ellipse calculations
i_linespace = np.linspace(0, array.shape[0]-1, array.shape[0])
j_linespace = np.linspace(0, array.shape[1]-1, array.shape[1])
i_grid, j_grid = np.meshgrid(i_linespace, j_linespace, indexing='ij')
# Use meshgrids to create array identifying which coordinates lie within ellipse
i_trans = -(j_grid-float(j_centre))*np.sin(angle) + (i_grid-float(i_centre))*np.cos(angle)
j_trans = (j_grid-float(j_centre))*np.cos(angle) + (i_grid-float(i_centre))*np.sin(angle)
ellipse_check = (j_trans**2 / semi_maj**2) + (i_trans**2 / semi_min**2 )
# Create ellipse mask
ellipse_mask = np.zeros([array.shape[0], array.shape[1]])
ellipse_mask[ np.where( ellipse_check<=1 ) ] = 1.0
# Return array
return ellipse_mask
def CircleSum(fits, i_centre, j_centre, r):
'''
Function to sum all pixel elements inside a given circle... the old-fashioned way
Args: Array to be used, i & j coordinates of centre of circle, radius of circle
Returns: Sum of elements within circle, number of pixels within circle
'''
i_centre, j_centre, r = int(i_centre), int(j_centre), int(r)
ap_sum = 0.0
ap_pix = 0.0
ap_values = []
for i in range(-r, r+1):
for j in range(-r, r+1):
if i**2.0 + j**2.0 <= r**2.0:
try:
ap_sum += fits[i_centre+i, j_centre+j]
ap_pix += 1.0
ap_values.append(fits[i_centre+i, j_centre+j])
except:
continue
return [ap_sum, ap_pix, ap_values]
|
python
|
def model_to_dicts(Schema, model):
# 如果是分页器返回,需要传入model.items
common_schema = Schema(many=True) # 用已继承ma.ModelSchema类的自定制类生成序列化类
output = common_schema.dump(model) # 生成可序列化对象
return output
|
python
|
"""OsservaPrezzi class for aio_osservaprezzi."""
from .const import ENDPOINT, REGIONS
from .models import Station
from .exceptions import (
RegionNotFoundException,
StationsNotFoundException,
OsservaPrezziConnectionError,
OsservaPrezziException,
)
from typing import Any
import asyncio
import aiohttp
import async_timeout
class OsservaPrezzi:
def __init__(
self,
parameters,
session: aiohttp.ClientSession = None,
request_timeout: int = 8,
) -> "OsservaPrezzi":
"""Initialize connection with OsservaPrezzi API."""
self._session = session
self._close_session = False
self.request_timeout = request_timeout
try:
self._parameters = f"region={REGIONS[parameters['region']]}\
&province={parameters['province']}\
&town={parameters['town']}\
&carb="
except KeyError as exception:
raise RegionNotFoundException(
"Error occurred while trying to find the region."
) from exception
async def _request(self) -> Any:
"""Handle a request to OsservaPrezzi API."""
method = "POST"
headers = {"Content-Type": "application/x-www-form-urlencoded"}
if self._session is None:
self._session = aiohttp.ClientSession()
self._close_session = True
try:
with async_timeout.timeout(self.request_timeout):
response = await self._session.request(
method, ENDPOINT, data=self._parameters, headers=headers,
)
response.raise_for_status()
except asyncio.TimeoutError as exception:
raise OsservaPrezziConnectionError(
"Timeout occurred while connecting to OsservaPrezzi."
) from exception
except (aiohttp.ClientError, aiohttp.ClientResponseError) as exception:
raise OsservaPrezziConnectionError(
"Error occurred while connecting to OsservaPrezzi."
) from exception
if "application/json" not in response.headers.get("Content-Type", ""):
raise OsservaPrezziException("Unexpected response from OsservaPrezzi.")
return (await response.json())["array"]
async def get_stations(self):
data = await self._request()
return [Station.from_dict(s) for s in data]
async def get_station_by_id(self, id):
stations = await self.get_stations()
try:
return next(filter(lambda d: d.id == id, stations))
except Exception:
raise StationsNotFoundException("Couldn't find specified station.")
async def close(self) -> None:
"""Close the session."""
if self._close_session and self._session:
await self._session.close()
async def __aenter__(self) -> "OsservaPrezzi":
"""Async enter."""
return self
async def __aexit__(self, *exc_info) -> None:
"""Async exit."""
await self.close()
|
python
|
import telebot
import os
TOKEN = os.getenv('TELE_TOKEN')
bot = telebot.TeleBot(TOKEN)
@bot.message_handler(commands=['start'])
def start_message(message):
markup = telebot.types.ReplyKeyboardMarkup()
# start_btn = telebot.types.KeyboardButton("/start")
help_btn = telebot.types.KeyboardButton("/help")
markup.add(help_btn)
bot.send_message(message.chat.id, 'Привет! Это супер классный бот!', reply_markup=markup)
@bot.message_handler(commands=['help'])
def help_message(message):
bot.send_message(message.chat.id, 'Введите дату в формате ГГГГ-ММ-ДД')
@bot.message_handler(content_types=['text'])
def get_text_messages(message):
msg = message.text
bot.send_message(message.from_user.id, msg)
bot.polling(none_stop=True, interval=0)
|
python
|
# [h] paint and arrange groups
'''Paint each group of glyphs in the font with a different color.'''
# debug
import hTools2
reload(hTools2)
if hTools2.DEBUG:
import hTools2.modules.color
reload(hTools2.modules.color)
# import
from hTools2.modules.color import paint_groups
# run
f = CurrentFont()
paint_groups(f)
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.