content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
import datetime
import sqlalchemy
# noinspection PyPackageRequirements
from models.model_base import ModelBase
class Roll(ModelBase):
__tablename__ = 'rolls'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True, autoincrement=True)
created = sqlalchemy.Column(sqlalchemy.DateTime, default=datetime.datetime.now)
name = sqlalchemy.Column(sqlalchemy.String, unique=True, nullable=False)
|
python
|
import json
import os
class Const:
def __init__(self, logger = None):
self.init_const()
self.logger = logger
controllerAddr = "NOT SET"
controllerPort = "NOT SET"
BASE_PATH = "/home/mmeinen/polybox/code/DC-MONDRIAN" #TODO: needs to be set whenever run somewhere else... (not elegant but works...)
PATH_TO_CONFIG_FILE = "Endpoint_TP/config/config.json"
tpAddr = "NOT SET"
# Timeouts are in seconds and 0 menas it never times out
IDLE_TIMEOUT = 0#10*60
HARD_TIMEOUT = 0#60*60
endpointTPPort = "6633"
TCP_PROTO = "TCP"
UDP_PROTO = "UDP"
ENDPOINT_TP_PREFIX = "[EndpointTP] "
TRANSFER_MODULE_PREFIX = ENDPOINT_TP_PREFIX+"[Transfer Module] "
FETCHER_PREFIX = ENDPOINT_TP_PREFIX+"[Fetcher] "
CONNECTION_STATE = ENDPOINT_TP_PREFIX+"[Connection State] "
@classmethod
def init_const(self):
'''
Initialize the constants according to the config.json file
'''
Const.PATH_TO_CONFIG_FILE = os.path.join(Const.BASE_PATH, Const.PATH_TO_CONFIG_FILE)
try:
with open(Const.PATH_TO_CONFIG_FILE, "r") as jsonFile:
data = json.load(jsonFile)
Const.controllerAddr = data["controllerAddr"]
Const.controllerPort = data["controllerPort"]
Const.tpAddr = data["tpAddr"]
Const.IDLE_TIMEOUT = data["idleTimeout"]
Const.HARD_TIMEOUT = data["hardTimeout"]
Const.endpointTPPort = data["endpointTPPort"]
except json.JSONDecodeError as e:
self.logger.info("[Const] ERROR: Reading config.json failed!")
exit(1)
|
python
|
donations = [
{
'price': 1,
'thanks': True,
'col_size': 12,
},
{
'price': 5,
'thanks': True,
'link': True,
'col_size': 12,
},
{
'price': 10,
'thanks': True,
'link': True,
'status': 'SUPPORTER',
'col_size': 6,
},
{
'price': 25,
'thanks': True,
'link': True,
'postcard': True,
'status': 'SUPPORTER',
'col_size': 4,
},
{
'price': 50,
'thanks': True,
'link': True,
'postcard': True,
'status': 'LOVER',
'col_size': 4,
},
{
'price': 100,
'thanks': True,
'link': True,
'postcard': True,
'status': 'AMBASSADOR',
'col_size': 4,
},
{
'price': 200,
'thanks': True,
'link': True,
'postcard': True,
'status': 'AMBASSADOR',
'box': True,
'col_size': 3,
},
{
'price': 500,
'thanks': True,
'link': True,
'postcard': True,
'status': 'PRODUCER',
'feature': True,
'box': True,
'homepage_link': True,
'col_size': 2,
},
{
'price': 800,
'thanks': True,
'link': True,
'postcard': True,
'status': 'DEVOTEE',
'homepage_link': True,
'homepage_character': True,
'col_size': 3,
},
]
|
python
|
def merge_the_tools(string, n):
out = [list(string)[k:k+n] for k in range(0,len(list(string)),n)]
for x in out:
print(''.join(sorted(set(x), key=x.index)))
|
python
|
# Write the pseudo code for a program that reads a target csv file and adds the correct typing to its contents.
# After that it should transpose the resulting csv file
|
python
|
# Author: Simon Liedtke <[email protected]>
#
# This module was developed with funding provided by
# the Google Summer of Code (2013).
"""
Overview
^^^^^^^^
The database package exports the following classes and exceptions:
:classes:
- Database
:exceptions:
- EntryAlreadyAddedError
- NoSuchEntryError
- EntryAlreadyStarredError
- EntryAlreadyUnstarredError
- EntryNotFoundError
- TagAlreadyAssignedError
- NoSuchTagError
- NonRemovableTagError
:functions:
- disable_undo
"""
from __future__ import absolute_import
from sunpy.database.database import Database, EntryAlreadyAddedError,\
EntryAlreadyStarredError, EntryAlreadyUnstarredError, NoSuchTagError,\
EntryNotFoundError, TagAlreadyAssignedError, disable_undo, split_database
from sunpy.database.commands import NoSuchEntryError, NonRemovableTagError
__all__ = [
'Database', 'EntryAlreadyAddedError', 'NoSuchEntryError', 'NoSuchTagError',
'NonRemovableTagError', 'EntryAlreadyStarredError',
'EntryAlreadyUnstarredError', 'EntryNotFoundError',
'TagAlreadyAssignedError', 'disable_undo', 'split_database']
|
python
|
import numpy as np
import torch
import matplotlib.pyplot as plt
from collections import OrderedDict
def linear_size(output):
output_size = np.array(output.size())
h, w = output_size[2], output_size[3]
size = int(h * w)
return size
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
conv_normal_mean = 0.0
conv_normal_sd = 0.02
bnorm_mean = 1.0
bnorm_sd = 0.02
bnorm_fill = 0
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(conv_normal_mean,
conv_normal_sd)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(bnorm_mean,
bnorm_sd)
m.bias.data.fill_(bnorm_fill)
def aduc(x, use_gpu=None):
if use_gpu is None:
use_gpu = torch.cuda.is_available()
if use_gpu:
return x.cuda()
else:
return x
def set_names(discr_a, discr_b,
gener_a, gener_b,
opt_gener_a, opt_gener_b,
opt_discr_a, opt_discr_b):
discr_a.__doc__ = 'discr_a'
discr_b.__doc__ = 'discr_b'
gener_a.__doc__ = 'gener_a'
gener_b.__doc__ = 'gener_b'
opt_gener_a.__doc__ = 'opt_gener_a'
opt_gener_b.__doc__ = 'opt_gener_b'
opt_discr_a.__doc__ = 'opt_discr_a'
opt_discr_b.__doc__ = 'opt_discr_b'
pass
def train_stage(*args):
for arg in args:
arg.train()
def create_checkpoint(*args):
checkpoint = OrderedDict()
for net in args:
name = net.__doc__
checkpoint[name] = net.state_dict()
return checkpoint
def exp_moving_mean(data, window=250):
if not isinstance(data, np.ndarray):
data = np.array(data)
alpha = 2 / (window + 1.0)
alpha_rev = 1 - alpha
n = data.shape[0]
pows = alpha_rev**(np.arange(n + 1))
scale_arr = 1 / pows[:-1]
offset = data[0] * pows[1:]
pw0 = alpha * alpha_rev**(n - 1)
mult = data * pw0 * scale_arr
cumsums = mult.cumsum()
out = offset + cumsums * scale_arr[::-1]
return out
def visualize_loss(da_loss_log, db_loss_log,
ga_loss_log, gb_loss_log,
exp_window=None):
if exp_window is not None:
da_loss_log = exp_moving_mean(da_loss_log, exp_window)
db_loss_log = exp_moving_mean(db_loss_log, exp_window)
ga_loss_log = exp_moving_mean(ga_loss_log, exp_window)
gb_loss_log = exp_moving_mean(gb_loss_log, exp_window)
plt.figure(figsize=(10, 4))
plt.tight_layout()
plt.subplot(1, 2, 1)
plt.plot(ga_loss_log, label="gener_a")
plt.plot(gb_loss_log, label="gener_b")
plt.xlabel("train step")
plt.ylabel("MSE")
plt.title("generators loss")
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(da_loss_log, label="discr_a")
plt.plot(db_loss_log, label="discr_b")
plt.tight_layout()
plt.xlabel("train step")
plt.ylabel("MSE")
plt.title("discriminators loss")
plt.legend()
plt.tight_layout()
plt.show()
def plot_geners(sample_a, sample_b,
gener_a, gener_b):
gener_a.eval()
gener_b.eval()
plt.figure(figsize=(8, 6))
plt.subplot(2, 3, 1)
plt.imshow(sample_b.cpu().view(1, 1, 28, 28).data[0]
.numpy().reshape((28, 28)),
cmap='binary')
plt.title("b")
plt.subplot(2, 3, 2)
plt.imshow(gener_a(sample_b.view(1, 1, 28, 28)).cpu().data[0]
.numpy().reshape((28, 28)),
cmap='binary')
plt.title("gener_a(b)")
plt.subplot(2, 3, 3)
plt.imshow(gener_b(gener_a(sample_b.view(1, 1, 28, 28))).cpu().data[0]
.numpy().reshape((28, 28)),
cmap='binary')
plt.title("gener_b(gener_a(b))")
plt.subplot(2, 3, 4)
plt.imshow(sample_a.cpu().view(1, 1, 28, 28).data[0]
.numpy().reshape((28, 28)),
cmap='binary')
plt.title("a")
plt.subplot(2, 3, 5)
plt.imshow(gener_b(sample_a.view(1, 1, 28, 28)).cpu().data[0]
.numpy().reshape((28, 28)),
cmap='binary')
plt.title("gener_b(a)")
plt.subplot(2, 3, 6)
plt.imshow(gener_a(gener_b(sample_a.view(1, 1, 28, 28))).cpu().data[0]
.numpy().reshape((28, 28)),
cmap='binary')
plt.title("gener_a(gener_b(a))")
plt.tight_layout()
plt.show()
def grad_norm(model, norm_type=2):
total_norm = 0
for param in model.parameters():
param_norm = param.grad.data.norm(norm_type)
total_norm += param_norm ** norm_type
total_norm = total_norm ** (1. / norm_type)
return total_norm
def plot_grad_norms(da_grad_log, db_grad_log,
ga_grad_log, gb_grad_log):
plt.figure(figsize=(10, 4))
plt.subplot(1, 2, 1)
plt.plot(ga_grad_log, label="gener_a")
plt.plot(gb_grad_log, label="gener_b")
plt.xlabel("step")
plt.ylabel("grad norm")
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(da_grad_log, label="disrc_a")
plt.plot(db_grad_log, label="discr_b")
plt.xlabel("step")
plt.ylabel("grad norm")
plt.legend()
plt.tight_layout()
plt.show()
|
python
|
"""treelstm.py - TreeLSTM RNN models
Written by Riddhiman Dasgupta (https://github.com/dasguptar/treelstm.pytorch)
Rewritten in 2018 by Long-Huei Chen <[email protected]>
To the extent possible under law, the author(s) have dedicated all copyright
and related and neighboring rights to this software to the public domain
worldwide. This software is distributed without any warranty.
You should have received a copy of the CC0 Public Domain Dedication along with
this software. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
"""
import torch
import torch.nn as nn
class TreeLSTMBase(nn.Module):
@staticmethod
def extract_tree(parse):
"""
Args:
line: A list of tokens, where each token consists of a word,
optionally followed by u"│"-delimited features.
Returns:
A sequence of words, a sequence of features, and num of features.
"""
if parse is None:
return [], [], -1
parents = parse.cpu().numpy()
trees = dict()
root = None
for i in range(1, len(parents) + 1):
if i - 1 not in trees.keys() and parents[i - 1] != -1:
idx = i
prev = None
while True:
parent = parents[idx - 1]
if parent == -1:
break
tree = Tree()
if prev is not None:
tree.add_child(prev)
trees[idx - 1] = tree
tree.idx = idx - 1
if parent - 1 in trees.keys():
trees[parent - 1].add_child(tree)
break
elif parent == 0:
root = tree
break
else:
prev = tree
idx = parent
return root
class ChildSumTreeLSTM(TreeLSTMBase):
def __init__(self, rnn_type, input_size, hidden_size, bias=True):
super(ChildSumTreeLSTM, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.ioux = nn.Linear(input_size, 3 * self.hidden_size, bias=bias)
self.iouh = nn.Linear(
self.hidden_size, 3 * self.hidden_size, bias=bias)
self.fx = nn.Linear(input_size, self.hidden_size, bias=bias)
self.fh = nn.Linear(self.hidden_size, self.hidden_size, bias=bias)
def forward(self, parses, embeds):
states_c, states_h = zip(*[
self.tree_forward(
ChildSumTreeLSTM.extract_tree(parses[:, j]), embeds[:, j, :])
for j in range(parses.size(1))
])
states_c = torch.cat(states_c, dim=1)
states_h = torch.cat(states_h, dim=1)
return (states_c, states_h)
def tree_forward(self, tree, embed):
for idx in range(tree.num_children):
self.tree_forward(tree.children[idx], embed)
if tree.num_children > 0:
child_c, child_h = zip(*map(lambda x: x.state, tree.children))
child_c = torch.cat(child_c, dim=0)
child_h = torch.cat(child_h, dim=0)
else: # leaf nodes
child_c = embed[0].detach().new_zeros(
1, self.hidden_size).requires_grad_()
child_h = embed[0].detach().new_zeros(
1, self.hidden_size).requires_grad_()
tree.state = self.node_forward(embed[tree.idx], child_c, child_h)
return tree.state
def node_forward(self, embeds, child_c, child_h):
child_h_sum = torch.sum(child_h, dim=0, keepdim=True)
iou = self.ioux(embeds) + self.iouh(child_h_sum)
i, o, u = torch.chunk(iou, 3, dim=1)
i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)
f = self.fh(child_h) + self.fx(embeds).repeat(len(child_h), 1)
fc = torch.mul(torch.sigmoid(f), child_c)
c = torch.mul(i, u) + torch.sum(fc, dim=0, keepdim=True)
h = torch.mul(o, torch.tanh(c))
return c, h
class BinaryTreeLSTM(TreeLSTMBase):
def __init__(self, rnn_type, hidden_size, bias=False):
super(BinaryTreeLSTM, self).__init__()
self.hidden_size = hidden_size
self.iou0 = nn.Linear(
self.hidden_size, 3 * self.hidden_size, bias=bias)
self.iou1 = nn.Linear(
self.hidden_size, 3 * self.hidden_size, bias=bias)
self.f0 = nn.Linear(self.hidden_size, self.hidden_size, bias=bias)
self.f1 = nn.Linear(self.hidden_size, self.hidden_size, bias=bias)
def forward(self, child_c, child_h):
iou = self.iou0(child_h[0]) + self.iou1(child_h[1])
i, o, u = torch.chunk(iou, 3, dim=2)
i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)
f = torch.cat((self.f0(child_h[0]), self.f1(child_h[1])), dim=0)
fc = torch.mul(torch.sigmoid(f), torch.cat(child_c, dim=0)).sum(
dim=0, keepdim=True)
c = torch.mul(i, u) + fc
h = torch.mul(o, torch.tanh(c))
return c, h
class Tree():
def __init__(self):
self.parent = None
self.num_children = 0
self.children = list()
self.state = None
self.idx = None
def add_child(self, child):
child.parent = self
self.num_children += 1
self.children.append(child)
def __len__(self):
if getattr(self, '_size'):
return self._size
count = 1
for i in range(self.num_children):
count += self.children[i].size()
self._size = count
return self._size
def depth(self):
if getattr(self, '_depth'):
return self._depth
count = 0
if self.num_children > 0:
for i in range(self.num_children):
child_depth = self.children[i].depth()
if child_depth > count:
count = child_depth
count += 1
self._depth = count
return self._depth
|
python
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : losses.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 10/04/2018
#
# This file is part of NSCL-PyTorch.
# Distributed under terms of the MIT license.
import torch
import torch.nn as nn
import torch.nn.functional as F
import jactorch
__all__ = ['SigmoidCrossEntropy', 'MultilabelSigmoidCrossEntropy']
class SigmoidCrossEntropy(nn.Module):
def __init__(self, one_hot=False):
super().__init__()
self.one_hot = one_hot
self.bce = nn.BCEWithLogitsLoss(reduction='none')
def forward(self, input, target):
if not self.one_hot:
target = jactorch.one_hot_nd(target, input.size(-1))
return self.bce(input, target).sum(dim=-1).mean()
class MultilabelSigmoidCrossEntropy(nn.Module):
def __init__(self, one_hot=False):
super().__init__()
self.one_hot = one_hot
self.bce = nn.BCEWithLogitsLoss(reduction='none')
def forward(self, input, labels):
if type(labels) in (tuple, list):
labels = torch.tensor(labels, dtype=torch.int64, device=input.device)
assert input.dim() == 1
if not self.one_hot:
with torch.no_grad():
mask = torch.zeros_like(input)
if labels.size(0) > 0:
ones = torch.ones_like(labels, dtype=torch.float32)
mask.scatter_(0, labels, ones)
labels = mask
return self.bce(input, labels).sum(dim=-1).mean()
class MultitaskLossBase(nn.Module):
def __init__(self):
super().__init__()
self._sigmoid_xent_loss = SigmoidCrossEntropy()
self._multilabel_sigmoid_xent_loss = MultilabelSigmoidCrossEntropy()
def _mse_loss(self, pred, label):
return (pred - label).abs()
def _bce_loss(self, pred, label):
return -( jactorch.log_sigmoid(pred) * label + jactorch.log_sigmoid(-pred) * (1 - label) ).mean()
def _xent_loss(self, pred, label):
logp = F.log_softmax(pred, dim=-1)
return -logp[label].mean()
|
python
|
from typing import List
class Solution1:
def rotate(self, matrix: List[List[int]]) -> None:
n = len(matrix)
for i in range(n//2):
for j in range(n-n//2):
matrix[i][j], matrix[~j][i], matrix[~i][~j], matrix[j][~i] = \
matrix[~j][i], matrix[~i][~j], matrix[j][~i], matrix[i][j]
class Solution2:
def rotate(self, matrix):
matrix[:] = zip(*matrix[::-1])
|
python
|
# -*- coding: utf-8 -*-
from .darts import *
from .priors import *
from .posterior import *
from .plotting import *
from .utils import *
from .plot_system_evolution import *
__version__ = "1.1.0"
# try:
# __DART_BOARD_SETUP__
# except NameError:
# __DART_BOARD_SETUP__ = False
#
# if not __DART_BOARD_SETUP__:
# __all__ = ["DartBoard"]
|
python
|
# Input: a comma-separated list of integers, representing a program and its data
TEST = "1,9,10,3,2,3,11,0,99,30,40,50"
with open('day02.txt', 'r') as infile:
PUZZLE = infile.read()
def part1(data):
"""Return the value at position 0 after running the program.
Put 12 and 2 in positions 1 and 2 before running. Instructions are:
1, x, y, r - add the values at positions x and y and put result at r
2, x, y, r - multiply instead of adding
99 - halt the execution
"""
program = data.split(',')
for index in range(len(program)):
program[index] = int(program[index])
# Test data has no position 12 and shouldn't be changed
if len(program) > 12:
program[1] = 12
program[2] = 2
index = 0
operation = program[index]
while operation != 99:
input1 = program[index + 1]
input2 = program[index + 2]
output = program[index + 3]
if operation == 1:
program[output] = program[input1] + program[input2]
else:
program[output] = program[input1] * program[input2]
index += 4
operation = program[index]
return program[0]
assert part1(TEST) == 3500
assert part1("1,0,0,0,99") == 2 # p[0] = p[0] + p[0] = 1 + 1 = 2
assert part1("2,3,0,3,99") == 2 # p[3] = p[3] * p[0] = 3 * 2 = 6
assert part1("1,1,1,4,99,5,6,0,99") == 30 # p[4] = p[1] + p[1] = 1 + 1 = 2
# p[0] = p[5] * p[6] = 5 * 6 = 30
print(part1(PUZZLE))
def part2(data):
"""Return 100 * x + y where x and y in positions 1 and 2
make the program halt with 19690720 in position 0.
In the problem description, x and y are called noun and verb.
The values will be between 0 and 99, inclusive.
"""
def result(memory, noun, verb):
memory[1] = noun
memory[2] = verb
ip = 0 # instruction pointer
opcode = memory[ip]
while opcode != 99:
parameter1 = memory[ip+1]
parameter2 = memory[ip+2]
parameter3 = memory[ip+3]
if opcode == 1:
memory[parameter3] = memory[parameter1] + memory[parameter2]
else:
memory[parameter3] = memory[parameter1] * memory[parameter2]
ip += 4
opcode = memory[ip]
return memory[0]
program = data.split(',')
for index in range(len(program)):
program[index] = int(program[index])
# exhaustive search for the two values
for noun in range(100):
for verb in range(100):
# each run modifies the program, so use a copy of the original one
if result(program.copy(), noun, verb) == 19690720:
return 100*noun + verb
print(part2(PUZZLE))
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 29 18:28:55 2019
@author: yoelr
"""
from . import Facility
from ..decorators import cost
from thermosteam import Stream
import numpy as np
from ... import HeatUtility
# from copy import copy
__all__ = ('CoolingTower',) #'CoolingTowerWithPowerDemand')
@cost('Flow rate', 'Cooling water pump',
S=557183, kW=1021, cost=283671, CE=551, n=0.8, BM=3.1)
@cost('Flow rate', 'Cooling tower',
S=557183, kW=1598, cost=1375e3, CE=551, n=0.7, BM=1.5)
class CoolingTower(Facility):
"""Create a cooling tower that is cost based on flow rate of cooling water."""
_units = {'Flow rate': 'kmol/hr'}
_N_heat_utilities = 1
_N_outs = _N_ins = 2
evaporation = 0.01
blowdown = 0.001
def __init__(self, ID=''):
thermo = HeatUtility.cooling_agents['Cooling water'].thermo
self.makeup_water = makeup_water = Stream('cooling_tower_makeup_water',
thermo=thermo)
loss = makeup_water.flow_proxy()
loss.ID = 'evaporation_and_blowdown'
super().__init__(ID, ('return_cooling_water', makeup_water),
('cooling_water', loss), thermo=thermo)
self.cooling_water_utilities = set()
def _design(self):
cwu = self.cooling_water_utilities
if not cwu:
for u in self.system.units:
if u is self: continue
for hu in u.heat_utilities:
if hu.ID == 'Cooling water':
cwu.add(hu)
used = self._ins[0]
#: Cooling water flow rate (kmol/hr)
used.mol[0] = \
self.design_results['Flow rate'] = \
self.cooling_water = sum([i.flow for i in cwu])
hu = self.heat_utilities[0]
cw = hu.cooling_agents['Cooling water']
self._outs[0].T = cw.T
hu.ID = 'Cooling water'
hu.cost = -self.cooling_water*cw.price_kmol
self.makeup_water.mol[0] = self.cooling_water * (self.evaporation + self.blowdown)
CoolingTower._N_outs = CoolingTower._N_ins = 2
# class CoolingTowerWithPowerDemand(CoolingTower):
# _has_power_utility = True
# _N_heat_utilities = 1
# cost_options = copy(CoolingTower.cost_items)
# cost_options['Cooling tower'].kW = 0.1
# def _cost(self):
# super()._cost()
# q = self._molar_flow # kmol/hr
# hu = self.heat_utilities[0]
# cw = hu.cooling_agents['Cooling water']
# hu.ID = 'Cooling water'
# hu.flow = -q
# hu.cost = -q*cw.price_kmol
|
python
|
# Evalúa qué puntos del grid del CRU corresponden a
# la Cuenca del Valle de México.
import os
import pandas as pd
import numpy as np
import geoviews as gv
import geopandas as gpd
gv.extension("matplotlib")
gv.output(size = 150)
fdir_d = os.getcwd() + "/data/Cuencas/Regiones_Hidrologicas_Administrativas/"
fdir_r = os.getcwd() + "/results/sequia/"
fname = "rha250kgw.shp"
# Si no existe la carpeta, la crea.
if not os.path.exists(fdir_r):
os.mkdir(fdir_r)
# Se cargan las regiones hidrológico administrativas.
gdf = gpd.read_file(fdir_d + fname)
# Se obtiene el contorno de las cuencas-
gdf["boundary"] = gdf.boundary
# Se selecciona la Cuenca del Valle de México.
cuenca = gv.Path(
gdf[gdf["ORG_CUENCA"] == "Aguas del Valle de México"]).opts(
color = "black")
# Número de puntos de grid a revisar en dirección de la longitud y la latitud.
n = 4
lon = np.empty((1, n))
lat = np.empty((n, 1))
lonp = np.empty((1, n))
latp = np.empty((n, 1))
# Pivotes iniciales de lon y lat.
lon_0 = -99.75
lat_0 = 20.75
# Número de punto de grid con respecto a los archivos mexico_cru.
lonp_0 = 38
latp_0 = 12
# Se genera la lista de longitudes y latitudes cada 0.5°.
for i in range(0,n):
lon[0, i] = lon_0 + 0.5 * i
lat[i, 0] = lat_0 - 0.5 * i
lonp[0, i] = lonp_0 + 1 * i
latp[i, 0] = latp_0 - 1 * i
# Se genera la malla.
lons = np.repeat(lon, n, axis = 0)
lats = np.repeat(lat, n, axis = 1)
lons_p = np.repeat(lonp, n, axis = 0)
lats_p = np.repeat(latp, n, axis = 1)
# Se concatenan y crean pares de lon y lat en toda la malla.
points = list(zip(lons.flatten(), lats.flatten(),
lons_p.flatten(), lats_p.flatten()))
# Se convierten a puntos de geoviews.
points_gv = [gv.Points(x).opts(color = "black") for x in points]
carre = []
# Se crea cadena wkt de perímetro de área de influencia de los puntos de grid.
for i, element in enumerate(points):
carre.append("POLYGON(("
+ str(element[0] - 0.25) + " " + str(element[1] - 0.25) + ", "
+ str(element[0] - 0.25) + " " + str(element[1] + 0.25) + ", "
+ str(element[0] + 0.25) + " " + str(element[1] + 0.25) + ", "
+ str(element[0] + 0.25) + " " + str(element[1] - 0.25) + ", "
+ str(element[0] - 0.25) + " " + str(element[1] - 0.25) + "))")
# Se crea GeoDataFrame con geometría a partir de wkt.
df_poly = pd.DataFrame({"geometry": carre})
df_poly["geometry"] = gpd.GeoSeries.from_wkt(df_poly["geometry"])
gdf_poly = gpd.GeoDataFrame(
df_poly, geometry = df_poly.geometry, crs = "epsg:4326")
# Se proyecta la geometría a UTM y calcular área.
gdf_poly["Area"] = gdf_poly.to_crs("epsg:32633").area
gdf_poly["Intersect"] = 0
for i in gdf_poly.index:
# Se calcula área de intersección entre áreas de influencia y cuenca.
overlay = gpd.overlay(
gdf[gdf["ORG_CUENCA"] == "Aguas del Valle de México"],
gdf_poly[gdf_poly.index == i], how = "intersection"
)["geometry"].to_crs("epsg:32633").area
# Se asegura que haya ceros en el GeoDataFrame en caso de no haber
# intersección.
if len(overlay.index) > 0:
gdf_poly.loc[i, "Intersect"] = overlay[0]
# Se calcula el porcentaje de intersección entre áreas de influencia
# y cuenca.
gdf_poly["Per_intersect"] = gdf_poly["Intersect"] / gdf_poly["Area"]
# Se calcula el porcentaje del área de la cuenca cubierto por cada área
# de influencia.
gdf_poly["Per_cuenca"] = (
gdf_poly["Intersect"] /
gdf[gdf["ORG_CUENCA"] == "Aguas del Valle de México"].to_crs(
"epsg:32633").area.iloc[0]
)
opts_yes = {"alpha": 0.3, "edgecolor": "black",
"facecolor": "blue", "linewidth": 1.5}
opts_no = {"alpha": 0.3, "edgecolor": "black",
"facecolor": "red", "linewidth": 1.5}
graph = cuenca
columns = ["lon", "lat", "lonp", "latp", "Per_intersect", "Per_cuenca"]
df_cuenca = pd.DataFrame(columns = columns)
# Se concatenan las gráficas de la cuenca y los puntos de grid.
for element in points_gv:
graph *= element
# Se concatenan las gráficas de la cuenca y los puntos de grid.
for i in range(0, len(gdf_poly)):
# Si el porcentaje de intersección es menor a 0.5, el área de
# influencia se grafica de color rojo.
if gdf_poly.Per_intersect[i] < 0.5:
graph *= gv.Shape(gdf_poly.geometry[i]).opts(**opts_no)
# Si el porcentaje de intersección es mayor o igual a 0.5, el
# área de influencia se grafica de color azul.
else:
graph *= gv.Shape(gdf_poly.geometry[i]).opts(**opts_yes)
# Se crea un DataFrame con la información de los puntos de
# grid a utilizar.
df_cuenca = df_cuenca.append({
columns[0]: points[i][0], columns[1]: points[i][1],
columns[2]: points[i][2], columns[3]: points[i][3],
columns[4]: gdf_poly.at[i, columns[4]],
columns[5]: gdf_poly.at[i, columns[5]]},
ignore_index = True)
# Se asegura que el número de punto de grid con respecto a los archivos
# mexico_cru sea un entero.
df_cuenca = df_cuenca.astype({columns[2]: "int32", columns[3]: "int32"})
# Se calcula la suma acumulada del área de la cuenca cubierto por cada
# área de influencia.
df_cuenca["Per_cuenca_cumsum"] = df_cuenca.Per_cuenca.cumsum()
graph.opts(
title = "Región Hidrológico Administrativa\nAguas del Valle de México",
fontsize = 18)
gv.save(graph, fdir_r + "cuenca_grid.png")
df_cuenca.to_csv(fdir_r + "grid_points.csv", index = False)
|
python
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019-2021 Ramon van der Winkel.
# All rights reserved.
# Licensed under BSD-3-Clause-Clear. See LICENSE file for details.
# importeer individuele competitie historie
from django.core.management.base import BaseCommand
from HistComp.models import HistCompetitie, HistCompetitieIndividueel
from NhbStructuur.models import NhbVereniging
from Sporter.models import Sporter
import argparse
TOEGESTANE_KLASSEN = ('Recurve', 'Compound', 'Barebow', 'Longbow', 'Instinctive Bow')
class Command(BaseCommand):
help = "Importeer historische competitie uitslag, individueel"
verbose = False
def __init__(self, stdout=None, stderr=None, no_color=False, force_color=False):
super().__init__(stdout, stderr, no_color, force_color)
self._count_not6scores = 0
self._count_noname = 0
self._count_skip = 0
self._count_error = 0
self._count_added = 0
self._count_dupe = 0
self._count_dupe_bow = 0
self._boogtype2histcomp = dict() # [boogtype] = HistCompetitie
def add_arguments(self, parser):
parser.add_argument('filename', nargs=1, type=argparse.FileType("r"),
help="in te lezen file")
parser.add_argument('seizoen', nargs=1,
help="competitie seizoen: 20xx/20yy")
parser.add_argument('comptype', nargs=1, choices=('18', '25'),
help="competitie type: 18 of 25")
parser.add_argument('--verbose', action='store_true')
@staticmethod
def make_or_find_histcompetitie(seizoen, comp_type, klasse):
# check if the record already exists
objs = HistCompetitie.objects.filter(
seizoen=seizoen,
comp_type=comp_type,
klasse=klasse,
is_team=False)
if len(objs):
# return existing object
histcompetitie = objs[0]
else:
# create new object
histcompetitie = HistCompetitie()
histcompetitie.seizoen = seizoen
histcompetitie.comp_type = comp_type
histcompetitie.klasse = klasse
histcompetitie.is_team = False
histcompetitie.save()
return histcompetitie
def _verwijder_eerdere_import(self, seizoen, comptype):
objs = HistCompetitie.objects.filter(seizoen=seizoen, comp_type=comptype)
if len(objs):
objs.delete()
@staticmethod
def _convert_scores(scores):
aantal = 0
totaal = 0
getallen = list()
for score in scores:
getal = int(score)
getallen.append(getal)
if getal:
aantal += 1
totaal += getal
# for
return getallen, aantal, totaal
def _import(self, lines, seizoen, comptype):
# sanity-check voor de hele file
linenr = 0
for line in lines:
linenr += 1
spl = line.split(';')
if len(spl) != 11:
self.stderr.write("[ERROR] Fout in regel %s: niet 11 kolommen" % linenr)
self._count_error += 1
if linenr > 1 and spl[2] not in TOEGESTANE_KLASSEN:
self.stderr.write('[ERROR] Regel %s: onbekende klasse %s' % (linenr, repr(spl[1])))
self._count_error += 1
# for
if lines[0].split(";")[0] != "bondsnummer":
self.stderr.write("[ERROR] Eerste regels bevat geen headers: %s" % repr(lines[0]))
self._count_error += 1
del lines[0]
if self._count_error > 0:
return
histcomps = dict() # ['klasse'] = HistCompetitie()
indiv_scores = list() # (gem, scores, HistCompetitieIndividueel)
bulk = list()
line_nr = 0
for line in lines:
line_nr += 1
spl = line.strip().split(";")
# spl = [lid_nr, klasse, score1..7, gemiddelde]
lid_nr = spl[0]
ver_nr = spl[1]
klasse = spl[2] # boogtype
try:
histcompetitie = histcomps[klasse]
except KeyError:
# nieuwe klasse
histcomps[klasse] = histcompetitie = self.make_or_find_histcompetitie(seizoen, comptype, klasse)
# fantaseer een redelijk boogtype voor elke klasse
if "Recurve" in klasse:
boogtype = "R"
elif "Compound" in klasse:
boogtype = "C"
elif "Barebow" in klasse:
boogtype = "BB"
elif "Longbow" in klasse:
boogtype = "LB"
elif "Instinctive" in klasse:
boogtype = "IB"
else:
self.stdout.write('[WARNING] Onzeker welk boogtype voor klasse %s' % repr(klasse))
self._count_skip += 1
continue
self._boogtype2histcomp[boogtype] = histcompetitie
# overslaan als er niet ten minste 6 scores zijn
scores, _, totaal = self._convert_scores(spl[3:3+7])
# naam van het lid erbij zoeken (spelling in CRM is leidend)
try:
sporter = Sporter.objects.get(lid_nr=lid_nr)
except Sporter.DoesNotExist:
# kan naam nu niet vonden - toch importeren en later aanvullen
print("[WARNING] Kan naam niet vinden bij NHB nummer %s" % repr(lid_nr))
sporter = None
self._count_noname += 1
# naam van de vereniging opzoeken en opslaan
try:
ver = NhbVereniging.objects.get(ver_nr=ver_nr)
ver_naam = ver.naam
except NhbVereniging.DoesNotExist:
# fall-back voor recent verwijderde verenigingen
if ver_nr == '1026':
ver_naam = 'Victoria'
elif ver_nr == '1058':
ver_naam = 'Willem Tell'
elif ver_nr == '1093':
ver_naam = 'De Bosjagers'
elif ver_nr == '1147':
ver_naam = 'Diana'
elif ver_nr == '1152':
ver_naam = 'Ons Genoegen'
elif ver_nr == '1170':
ver_naam = 'Batavieren Treffers'
elif ver_nr == '1191':
ver_naam = 'Eendracht St Sebast'
elif ver_nr == '1226':
ver_naam = 'Centaur Asten'
else:
ver_naam = '?'
self.stdout.write('[WARNING] Kan geen naam opzoeken voor verwijderde vereniging %s' % ver_nr)
gemiddelde = float(spl[10].replace(',', '.')) # 9,123 --> 9.123
hist = HistCompetitieIndividueel()
hist.histcompetitie = histcompetitie
hist.rank = 0
hist.schutter_nr = lid_nr
if sporter:
hist.schutter_naam = " ".join([sporter.voornaam, sporter.achternaam])
hist.boogtype = boogtype
hist.vereniging_nr = ver_nr
hist.vereniging_naam = ver_naam
hist.score1 = scores[0]
hist.score2 = scores[1]
hist.score3 = scores[2]
hist.score4 = scores[3]
hist.score5 = scores[4]
hist.score6 = scores[5]
hist.score7 = scores[6]
scores.sort(reverse=True)
lowest = scores[-1]
if hist.score7 == lowest:
hist.laagste_score_nr = 7
elif hist.score6 == lowest:
hist.laagste_score_nr = 6
elif hist.score5 == lowest:
hist.laagste_score_nr = 5
elif hist.score4 == lowest:
hist.laagste_score_nr = 4
elif hist.score3 == lowest:
hist.laagste_score_nr = 3
elif hist.score2 == lowest:
hist.laagste_score_nr = 2
else:
hist.laagste_score_nr = 1
hist.gemiddelde = gemiddelde
hist.totaal = totaal - lowest
# check if the record already exists
dupe = HistCompetitieIndividueel.objects.filter(
histcompetitie=hist.histcompetitie,
schutter_nr=hist.schutter_nr,
vereniging_nr=hist.vereniging_nr)
if len(dupe) > 0:
tup = (gemiddelde, scores, len(indiv_scores), dupe[0])
indiv_scores.append(tup)
self._count_dupe += 1
else:
tup = (gemiddelde, scores, len(indiv_scores), hist)
indiv_scores.append(tup)
bulk.append(hist)
self._count_added += 1
if len(bulk) >= 100:
HistCompetitieIndividueel.objects.bulk_create(bulk)
bulk = list()
# for
if len(bulk):
HistCompetitieIndividueel.objects.bulk_create(bulk)
# deel de rank nummers opnieuw uit
ranks = dict() # ['boogtype'] = int
indiv_scores.sort(reverse=True)
for gem, scores, nr, hist in indiv_scores:
try:
ranks[hist.boogtype] += 1
hist.rank = ranks[hist.boogtype]
except KeyError:
hist.rank = 1
ranks[hist.boogtype] = hist.rank
hist.save()
# for
def _delete_dupes(self):
""" Sommige BB/IB/LB schutters staan in de geïmporteerde data OOK genoemd
in de recurve klasse, met exact dezelfde scores.
Dit was nodig in het oude programma voor het team schieten waarbij een
Recurve team ook BB/IB/LB schutters mag bevatten.
Andere schutters schieten zowel de R als C klasse of the BB en R klasse
en hebben dan niet dezelfde scores.
Zoek de NHB nummers van R schutters die ook in de BB/IB/LB voorkomen
Als de scores ook overeen komen, verwijder dan het records in de R klasse.
"""
self.stdout.write("[INFO] Removing duplicates from Recurve results (dupe with BB/IB/LB)")
try:
histcomp_r = self._boogtype2histcomp['R']
except KeyError:
return
# doorloop de kleinste klassen
for boogtype in ('BB', 'IB', 'LB'):
for houtobj in HistCompetitieIndividueel.objects.filter(boogtype=boogtype,
histcompetitie=self._boogtype2histcomp[boogtype]):
# zoek dit nummer op in de Recurve klasse
try:
robj = HistCompetitieIndividueel.objects.get(boogtype='R',
histcompetitie=histcomp_r,
schutter_nr=houtobj.schutter_nr)
except HistCompetitieIndividueel.DoesNotExist:
pass
else:
if houtobj.totaal == robj.totaal:
# controleer dat alle scores overeen komen
if (houtobj.score1 == robj.score1 and houtobj.score2 == robj.score2 and
houtobj.score3 == robj.score3 and houtobj.score4 == robj.score4 and
houtobj.score5 == robj.score5 and houtobj.score6 == robj.score6 and
houtobj.score7 == robj.score7):
# gevonden
# verwijder het recurve object
# hierdoor valt helaas een gat in de ranking
self._count_dupe_bow += 1
robj.delete()
# print("lid_nr:%s, hout:%s, totaal_1:%s, totaal_2:%s" % (houtobj.schutter_nr, houtobj.boogtype, houtobj.totaal, robj.totaal))
# if
# for
# for
def handle(self, *args, **options):
# self.stderr.write("import individuele competitie historie. args=%s, options=%s" % (repr(args), repr(options)))
self.verbose = options['verbose']
comptype = options['comptype'][0]
seizoen = options['seizoen'][0]
if len(seizoen) != 9 or seizoen[4] != "/":
self.stderr.write("[ERROR] Seizoen moet het formaat 'jaar/jaar+1' hebben, bijvoorbeeld '2010/2011' (was %s)" % repr(seizoen))
return
try:
lines = options['filename'][0].readlines()
except UnicodeDecodeError as exc:
self.stderr.write("File has format issues (%s)" % str(exc))
return
linecount = len(lines)
# verwijder de eerder geïmporteerde uitslag
self._verwijder_eerdere_import(seizoen, comptype)
self._import(lines, seizoen, comptype)
self._delete_dupes()
self.stdout.write("Read %s lines; skipped %s dupes; %s skipped;"
" %s skip with errors; %s skip dupe bow score; added %s records;"
" %s without name" % (linecount, self._count_dupe, self._count_skip,
self._count_error, self._count_dupe_bow,
self._count_added - self._count_dupe_bow,
self._count_noname))
# end of file
|
python
|
#!/usr/bin/env python2
# Copyright (c) 2017, ESET
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from Crypto.Cipher import CAST
import sys
import argparse
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-e", "--encrypt", help="encrypt carbon file", required=False)
parser.add_argument("-d", "--decrypt", help="decrypt carbon file", required=False)
try:
args = parser.parse_args()
except IOError as e:
parser.error(e)
return 0
if len(sys.argv) != 3:
parser.print_help()
return 0
key = "\x12\x34\x56\x78\x9A\xBC\xDE\xF0\xFE\xFC\xBA\x98\x76\x54\x32\x10"
iv = "\x12\x34\x56\x78\x9A\xBC\xDE\xF0"
cipher = CAST.new(key, CAST.MODE_OFB, iv)
if args.encrypt:
plaintext = open(args.encrypt, "rb").read()
while len(plaintext) % 8 != 0:
plaintext += "\x00"
data = cipher.encrypt(plaintext)
open(args.encrypt + "_encrypted", "wb").write(data)
else:
ciphertext = open(args.decrypt, "rb").read()
while len(ciphertext) % 8 != 0:
ciphertext += "\x00"
data = cipher.decrypt(ciphertext)
open(args.decrypt + "_decrypted", "wb").write(data)
if __name__ == "__main__":
main()
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : Joe Gao ([email protected])
import os
import numpy as np
from fastapi import FastAPI
from predictor import main
from storages import DIC_ZODB, milvusDB
app = FastAPI()
collection_cosine = 'dependency'
partition_tags_cosine = '202103'
collection_words = 'words'
partition_tags_subjects = 'subjects'
partition_tags_objects = 'objects'
zodb_code = 'dependency'
db = DIC_ZODB.get(zodb_code)
db.open()
'''ZODB API
# from pydantic import BaseModel
# class ZodbItem(BaseModel):
# key: int
# subj: str
# obj: str
# score: float
# text: str
# @app.post("/zodb_insert")
# async def zodb_insert(item: ZodbItem, db_code: str='dependency'):
# db = DIC_ZODB.get(db_code)
# key = item.key
# value = {'text': item.text, 'subject': item.subj, 'object': item.obj, 'score': item.score}
# msg = db.insert(key, value)
# return {'result': msg}
# @app.post("/zodb_search")
# async def zodb_search(key: int, db_code: str='dependency'):
# db = DIC_ZODB.get(db_code)
# return {'result': db.search(key)}
'''
def normalization(x):
return (x - np.min(x)) * 1 / (np.max(x) - np.min(x)) + 1e-9
def _get_rels(text, rels):
vecs_s, vecs_o, values = [], [], []
for r in rels:
subj = r.get('from_word')
pos_subj = r.get('from_pos')
obj = r.get('to_word')
pos_obj = r.get('to_pos')
score = r.get('score')
tensors = r.get('tensors')
tensor_s = np.array(tensors.get('subject'))
tensor_o = np.array(tensors.get('object'))
vecs_s.append(tensor_s)
vecs_o.append(tensor_o)
values.append({
'text': text,
'subject': subj,
'subject_pos': pos_subj,
'object': obj,
'object_pos': pos_obj,
'score': score,
})
return vecs_s, vecs_o, values
def _get_qqp(text, _text_candidate):
_r_qqp = main('qqp', text, _text_candidate)
_is_match, _score = _r_qqp.get('result'), _r_qqp.get('score')
return _score
@app.get("/{api_name}")
async def pred(api_name: str, input1: str, input2: str=None):
rst = main(api_name, input1, input2)
return rst
# @app.post("/ke_insert")
# async def ke_insert(
# input1: str,
# input2: str=None,
# dim: int=2048,
# model: str='test',
# db_code: str='dependency',
# partition_tag: str='202103',
# index_file_size: int=1024,
# ):
# rst = main(model, input1, input2)
# text, rels = rst.get('text'), rst.get('rels')
# vecs, values = _get_rels(text, rels)
# ids = milvusDB.insert(
# vecs,
# dim=dim,
# collection_name=db_code,
# partition_tag=partition_tag,
# index_file_size=index_file_size,
# )
# milvusDB.commit()
# db = DIC_ZODB.get(db_code)
# msg = db.insert(ids, values)
# return {'result': msg}
def search_cosine(_cosines, values, top_k=5):
rst = []
simis_cosine = milvusDB.search(
_cosines,
collection_name=collection_cosine,
partition_tags=partition_tags_cosine,
top_k=top_k
)
simis_cosine = [[(s.id, s.distance) for s in simi] for simi in simis_cosine]
for simi, value in zip(simis_cosine, values):
_milvus_candidates = []
for _id, _distance in simi:
_milvus_search_rst = db.root.dic_cosines[_id]
_milvus_search_rst['distance'] = _distance
_text_candidate = _milvus_search_rst.get('text')
# qqp_score = _get_qqp(text, _text_candidate)
# _milvus_search_rst['text_match_score'] = qqp_score
_milvus_candidates.append(_milvus_search_rst)
_milvus_candidates = list(sorted(_milvus_candidates, key=lambda x: x.get('distance')))
value['candidates'] = _milvus_candidates
del value['text']
rst.append(value)
return rst
def search_pairs(_vs_s, _vs_o, _cosines, values, top_k=5):
rst = []
simis_subjects = milvusDB.search(np.mean(np.array(_vs_s), axis=1), collection_name=collection_words,
partition_tags=partition_tags_subjects, top_k=top_k)
simis_subjects = [[(s.id, s.distance) for s in simi] for simi in simis_subjects]
simis_subjects = list(sorted(simis_subjects, key=lambda x: x[1]))
simis_objects = milvusDB.search(np.mean(np.array(_vs_o), axis=1), collection_name=collection_words,
partition_tags=partition_tags_objects, top_k=top_k)
simis_objects = [[(s.id, s.distance) for s in simi] for simi in simis_objects]
simis_objects = list(sorted(simis_objects, key=lambda x: x[1]))
all_ids_cosine = []
for simis_subject, simis_object, _cosine, value in zip(simis_subjects, simis_objects, _cosines, values):
_cosine_candidates = []
for simi_s, simi_o in zip(simis_subject, simis_object):
id_s, dist_s = simi_s
id_o, dist_o = simi_o
_key = (id_s, id_o)
if _key in db.root.dic_pairs.keys():
id_cosine = db.root.dic_pairs[_key]
_entity = milvusDB.db.get_entity_by_id(collection_cosine, [id_cosine])[1][0]
cosine_simi = np.mean(np.cos(_cosine, np.array(_entity)))
_value = db.root.dic_cosines[id_cosine]
_value['distance'] = cosine_simi
_cosine_candidates.append(_value)
if len(_cosine_candidates):
value['candidates'] = _cosine_candidates
del value['text']
rst.append(value)
return rst
@app.post("/ke_search")
async def ke_search(
input1: str,
input2: str=None,
search_mod: str='cosine',
model: str='test',
top_k: int=5,
):
rst = main(model, input1, input2, from_api=False)
text, words, rels = rst.get('text'), rst.get('words'), rst.get('rels')
vecs_s, vecs_o, values = _get_rels(text, rels)
_maxlen = max(max(len(_s), len(_o)) for _s, _o in zip(vecs_s, vecs_o))
_vs_s = [np.pad(_v, ((0, _maxlen - len(_v)), (0, 0)), 'mean') for _v in vecs_s]
_vs_o = [np.pad(_v, ((0, _maxlen - len(_v)), (0, 0)), 'mean') for _v in vecs_o]
_cosines = np.mean(np.cos(np.array(_vs_s), np.array(_vs_o)), axis=1)
if search_mod == 'cosine':
rst = search_cosine(_cosines, values, top_k=top_k)
else:
rst = search_pairs(_vs_s, _vs_o, _cosines, values, top_k=top_k)
# if not len(rst):
# rst = search_cosine(_cosines, values, top_k=top_k)
return {'result': {'text': text, 'words': words, 'pairs': rst}}
|
python
|
# -*- coding: utf-8 -*-
import json
from flask.ext.script import Manager
from app import app
import config
app.config.from_object(config.DevelopmentConfig)
manager = Manager(app)
@manager.command
def index_data():
print("Indexing...")
'''try:
app.elasticsearch.indices.create(index='big-one', ignore=400)
app.elasticsearch.cluster.health(wait_for_status='yellow', request_timeout=10)
# Index the data
with open('presentation-data.json') as data_file:
data = json.load(data_file)
for entry in data:
app.elasticsearch.index(
index="big-one",
doc_type="pizza",
id=entry['id'],
body=entry['name'])
except Exception, e:
print "Indexing error: ", str(e)'''
if __name__ == '__main__':
manager.run()
|
python
|
# GOMC Example for the Gibbs Ensemble (GEMC) using MoSDeF [1, 2, 5-10, 13-17]
# Note: In this specific example, we will be using the GEMC_NVT ensemble.
# Import the required packages and specify the force field (FF) being used.
# Note: For GOMC, the residue names are treated as molecules, so the residue names must be unique for each different molecule. [1, 2, 13-17]
# Note: Each residue can be set to a different FF, which is done by setting the residue name to a FF in a dictionary (FF_Dict). The FF selection can be a FF name (set from foyer FF repositor) or a specified FF xml file. [1, 2, 13-17]
import shutil
import pathlib
import random
from pathlib import Path
import os
import shutil
WolfDefaultKind = "VlugtWIntraCutoff"
WolfDefaultPotential = "DSF"
WolfDefaultAlpha = [0.21]
WolfCutoffBoxList = [0]
WolfCutoffCoulombLowerBoundList = [10]
WolfCutoffCoulombUpperBoundList = [15]
WolfCutoffCoulombIntervalList = [0.5]
WolfAlphaLowerBoundList = [0.0]
WolfAlphabUpperBoundList = [0.5]
WolfAlphaIntervalList = [0.01]
shellFile = "cal.sh"
wolfCalFreq = 100
for root, dirs, files in os.walk(".", topdown=False):
for name in files:
if(name == "NVT_Cal_water_ethanol_fe.conf"):
shutil.copy2(shellFile, root)
path2File = os.path.join(root, name)
with open(path2File, "a") as myfile:
defPotLine = "Wolf\tTrue\t{pot}\n".format(pot=WolfDefaultPotential)
myfile.write(defPotLine)
defKindLine = "WolfKind\t{kind}\n".format(kind=WolfDefaultKind)
myfile.write(defKindLine)
defPotLine = "WolfCalibrationFreq\tTrue\t{freq}\n".format(freq=wolfCalFreq)
myfile.write(defPotLine)
for box, wolfCutoffLower, wolfCutoffUpper, wolfCutoffInterval, wolfAlphaLower, wolfAlphaUpper, wolfAlphaInterval, defaultAlpha \
in zip(WolfCutoffBoxList, WolfCutoffCoulombLowerBoundList, WolfCutoffCoulombUpperBoundList, WolfCutoffCoulombIntervalList, \
WolfAlphaLowerBoundList, WolfAlphabUpperBoundList, WolfAlphaIntervalList, WolfDefaultAlpha):
defAlphaLine = "WolfAlpha\t{box}\t{val}\n".format(box=box, val=defaultAlpha)
myfile.write(defAlphaLine)
CutoffLine = "WolfCutoffCoulombRange\t{box}\t{lb}\t{ub}\t{inter}\n".format(box=box, lb=wolfCutoffLower, ub=wolfCutoffUpper, inter=wolfCutoffInterval)
myfile.write(CutoffLine)
alphaLine = "WolfAlphaRange\t{box}\t{lb}\t{ub}\t{inter}\n".format(box=box, lb=wolfAlphaLower, ub=wolfAlphaUpper, inter=wolfAlphaInterval)
myfile.write(alphaLine)
|
python
|
class Solution:
def largestValues(self, root):
maxes = []
if not root:
return maxes
stack = [(root, 0)]
while stack:
node, level = stack.pop()
if level >= len(maxes):
# first time reaching this level]
# append directly.
maxes.append(node.val)
else:
# update maxes if node.val is larger.
maxes[level] = max(node.val, maxes[level])
left, right = node.left, node.right
if left:
stack.append((left, level+1))
if right:
stack.append((right, level+1))
return maxes
|
python
|
import math
import pickle
import torch
from torch import distributed as dist
from torch.utils.data.sampler import Sampler
from torch.nn import SyncBatchNorm
import numpy as np
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def synchronize():
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def all_gather(data):
world_size = get_world_size()
if world_size == 1:
return [data]
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to('cuda')
local_size = torch.IntTensor([tensor.numel()]).to('cuda')
size_list = [torch.IntTensor([0]).to('cuda') for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to('cuda'))
if local_size != max_size:
padding = torch.ByteTensor(size=(max_size - local_size,)).to('cuda')
tensor = torch.cat((tensor, padding), 0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def reduce_loss_dict(loss_dict):
world_size = get_world_size()
if world_size < 2:
return loss_dict
with torch.no_grad():
keys = []
losses = []
for k in sorted(loss_dict.keys()):
keys.append(k)
losses.append(loss_dict[k])
losses = torch.stack(losses, 0)
dist.reduce(losses, dst=0)
if dist.get_rank() == 0:
losses /= world_size
reduced_losses = {k: v for k, v in zip(keys, losses)}
return reduced_losses
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Code is copy-pasted exactly as in torch.utils.data.distributed.
# FIXME remove this once c10d fixes the bug it has
class DistributedSampler(Sampler):
"""Sampler that restricts data loading to a subset of the dataset.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSampler instance as a DataLoader sampler,
and load a subset of the original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Arguments:
dataset: Dataset used for sampling.
num_replicas (optional): Number of processes participating in
distributed training.
rank (optional): Rank of the current process within num_replicas.
"""
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
self.shuffle = shuffle
def __iter__(self):
if self.shuffle:
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
indices += indices[: (self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
offset = self.num_samples * self.rank
indices = indices[offset : offset + self.num_samples]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
# def sync_batchnorm(model):
# world_size = get_world_size()
# process_ids = list(range(world_size))
# process_group = dist.new_group(process_ids)
# model = SyncBatchNorm.convert_sync_batchnorm(model,process_group)
# return model
def simple_group_split(world_size, rank, num_groups):
# world_size: number of all processes
# rank: current process ID
# num_groups: number of groups in total, e.g. world_size=8 and you want to use 4 GPUs in a syncBN group, so num_groups=2
groups = []
rank_list = np.split(np.arange(world_size), num_groups)
rank_list = [list(map(int, x)) for x in rank_list]
for i in range(num_groups):
groups.append(dist.new_group(rank_list[i]))
group_size = world_size // num_groups
print ("Rank no.{} start sync BN on the process group of {}".format(rank, rank_list[rank//group_size]))
return groups[rank//group_size]
def convert_sync_bn(model, process_group, gpu=None):
# convert all BN layers in the model to syncBN
for _, (child_name, child) in enumerate(model.named_children()):
if isinstance(child, torch.nn.modules.batchnorm._BatchNorm):
m = torch.nn.SyncBatchNorm.convert_sync_batchnorm(child, process_group)
if (gpu is not None):
m = m.cuda(gpu)
setattr(model, child_name, m)
else:
convert_sync_bn(child, process_group, gpu)
|
python
|
"""
Contains special test cases that fall outside the scope of remaining test files.
"""
import textwrap
from unittest.mock import patch
from flake8_type_checking.checker import ImportVisitor
from flake8_type_checking.codes import TC001, TC002
from tests import REPO_ROOT, _get_error, mod
class TestFoundBugs:
def test_mixed_errors(self):
example = textwrap.dedent(
f"""
import {mod}
import pytest
from x import y
"""
)
assert _get_error(example) == {
'2:0 ' + TC001.format(module=f'{mod}'),
'3:0 ' + TC002.format(module='pytest'),
'4:0 ' + TC002.format(module='x.y'),
}
def test_type_checking_block_imports_dont_generate_errors(self):
example = textwrap.dedent(
"""
import x
from y import z
if TYPE_CHECKING:
import a
# arbitrary whitespace
from b import c
def test():
pass
"""
)
assert _get_error(example) == {
'2:0 ' + TC002.format(module='x'),
'3:0 ' + TC002.format(module='y.z'),
}
def test_model_declarations_dont_trigger_error(self):
"""
Initially found false positives in Django project, because name
visitor did not capture the SomeModel usage in the example below.
"""
example = textwrap.dedent(
"""
from django.db import models
from app.models import SomeModel
class LoanProvider(models.Model):
fk: SomeModel = models.ForeignKey(
SomeModel,
on_delete=models.CASCADE,
)
"""
)
assert _get_error(example) == set()
def test_all_list_declaration(self):
"""
__all__ declarations originally generated false positives.
"""
example = textwrap.dedent(
"""
from app.models import SomeModel
from another_app.models import AnotherModel
__all__ = [
'SomeModel',
'AnotherModel'
]
"""
)
assert _get_error(example) == set()
def test_all_tuple_declaration(self):
"""
__all__ declarations originally generated false positives.
"""
example = textwrap.dedent(
"""
from app.models import SomeModel
from another_app.models import AnotherModel
__all__ = (
'SomeModel',
'AnotherModel'
)
"""
)
assert _get_error(example) == set()
def test_callable_import(self):
"""
__all__ declarations originally generated false positives.
"""
example = textwrap.dedent(
"""
from x import y
class X:
def __init__(self):
self.all_sellable_models: list[CostModel] = y(
country=self.country
)
"""
)
assert _get_error(example) == set()
def test_ellipsis(self):
example = textwrap.dedent(
"""
x: Tuple[str, ...]
"""
)
assert _get_error(example) == set()
def test_literal(self):
example = textwrap.dedent(
"""
from __future__ import annotations
x: Literal['string']
"""
)
assert _get_error(example) == set()
def test_conditional_import(self):
example = textwrap.dedent(
"""
version = 2
if version == 2:
import x
else:
import y as x
var: x
"""
)
assert _get_error(example) == {"7:4 TC002 Move third-party import 'x' into a type-checking block"}
def test_import_is_local():
"""
Check that if ValueErrors are raised in _import_is_local, we bump it into the TC002 bucket.
"""
def raise_value_error(*args, **kwargs):
raise ValueError('test')
visitor = ImportVisitor(REPO_ROOT, False, False, False, [])
assert visitor._import_is_local(mod) is True
patch('flake8_type_checking.checker.find_spec', raise_value_error).start()
assert visitor._import_is_local(mod) is False
patch.stopall()
|
python
|
from .resources import BaseResource, BaseWithSubclasses
class SwitchConnection:
def __init__(self, resource, connection_alias):
self.resource = resource
self._connection_alias = connection_alias
def __enter__(self):
if issubclass(self.resource, BaseWithSubclasses):
modified = self._create_modified_base_with_subclasses()
elif issubclass(self.resource, BaseResource):
modified = self._create_modified_base()
else:
raise ValueError("'{}' is not a mass_api_client resource.".format(type(self.resource)))
modified.__name__ = "Modified{}".format(self.resource.__name__)
return modified
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def _create_modified_base(self):
class ModifiedResource(self.resource):
_connection_alias = self._connection_alias
return ModifiedResource
def _create_modified_base_with_subclasses(self):
class ModifiedResource(self.resource):
_connection_alias = self._connection_alias
_unmodified_cls = self.resource
@classmethod
def _create_instance_from_data(cls, data):
subcls = cls._unmodified_cls._search_subclass(data['_cls'])
return subcls(cls._connection_alias, **data)
@classmethod
def _deserialize(cls, data, many=False):
if many:
return [cls._deserialize(item) for item in data]
subcls = cls._unmodified_cls._search_subclass(data['_cls'])
return subcls._deserialize(data, many)
return ModifiedResource
|
python
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Connector to generate connect, engine and session object
based on parameters defined in config.yaml
"""
__author__ = 'Laurent.Chen'
__date__ = '2019/7/15'
__version__ = '1.0.0'
import os
import yaml
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
with open(os.path.join(os.path.dirname(__file__), 'config.yaml'), 'r', encoding='utf-8') as f:
config_dict = yaml.full_load(f)
class Connector(object):
"""
MySQL connector
:param str schema: name of target schema
"""
host = config_dict['host']
port = config_dict['port']
user = config_dict['user']
password = config_dict['password']
charset = config_dict['charset']
def __init__(self, schema):
if not isinstance(schema, str):
raise ValueError('schema must be str type')
self.schema = schema
def get_engine(self):
"""
get SQLAlchemy engine
:return: SQLAlchemy engine
:rtype: sqlalchemy.engine.base.Engine
"""
engine = create_engine(f"mysql+mysqldb://{self.user}:{self.password}@{self.host}:{self.port}/{self.schema}"
f"?charset=utf8mb4")
return engine
def get_connect(self):
"""
get SQLAlchemy connect
:return: SQLAlchemy connect
:rtype: sqlalchemy.engine.base.Connection
"""
conn = self.get_engine().connect()
return conn
def get_session(self):
"""
get SQLAlchemy session
:return: SQLAlchemy session
"""
session = sessionmaker(bind=self.get_engine())
return session()
if __name__ == '__main__':
pass
|
python
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from app1.models import MyUser
# Register your models here.
admin.site.register(MyUser, UserAdmin)
|
python
|
class Solution:
def countPairs(self, nums: List[int], k: int) -> int:
result=0
for i in range(len(nums)-1):
for j in range(i+1, len(nums)):
if nums[i]==nums[j] and (i*j)%k==0:
result+=1
return result
|
python
|
from django.conf.urls import url
from ..views.admin import SubmissionRejudgeAPI, ClassSubmissionListAPI
urlpatterns = [
url(r"^submission/rejudge?$", SubmissionRejudgeAPI.as_view(), name="submission_rejudge_api"),
url(r"^class_submission/?$", ClassSubmissionListAPI.as_view(), name="class_submission_api"),
]
|
python
|
# -*- coding: utf-8 -*-
"""Core settings and configuration."""
# Part of Clockwork MUD Server (https://github.com/whutch/cwmud)
# :copyright: (c) 2008 - 2017 Will Hutcheson
# :license: MIT (https://github.com/whutch/cwmud/blob/master/LICENSE.txt)
from os import getcwd
from os.path import join
DEBUG = False
TESTING = False
# General
MUD_NAME = "Clockwork"
MUD_NAME_FULL = "Clockwork MUD Server"
# Networking
DEFAULT_HOST = "localhost"
DEFAULT_PORT = 4000
IDLE_TIME = 180 # seconds
IDLE_TIME_MAX = 600 # seconds
# Logging
LOG_PATH = join(getcwd(), "logs", "mud.log")
LOG_TIME_FORMAT_CONSOLE = "%H:%M:%S,%F"
LOG_TIME_FORMAT_FILE = "%Y-%m-%d %a %H:%M:%S,%F"
LOG_ROTATE_WHEN = "midnight"
LOG_ROTATE_INTERVAL = 1
LOG_UTC_TIMES = False
# Storage
DATA_DIR = join(getcwd(), "data")
# Optional modules
CONTRIB_MODULES = [
# These should be import paths relative to the `contrib` package.
# ".my_contrib_module",
]
GAME_MODULES = [
# These should be import paths relative to the `game` package.
# ".my_game_module",
]
# Advanced
FORCE_GC_COLLECT = False
|
python
|
from FSMConfig import FSMConfig
class GraphicsMouseManager:
def __init__(self):
self.leftDown = False
self.middleDown = False
self.rightDown = False
self.gcLocal = FSMConfig()
self.prevDragX = None
self.prevDragY = None
self.draggedObject = None
def downHandler(self, event):
if(event.num == 1):
self.leftDown = True
self.prevDragX = event.x
self.prevDragY = event.y
for state in self.gcLocal.allStates.values():
if(state.graphic.checkBodyIntersect(event.x, event.y)):
self.draggedObject = state.graphic
break
elif(event.num == 2):
self.middleDown = True
elif(event.num == 3):
self.rightDown = True
#print("Click! {},{} on {}".format(event.x, event.y, event.num))
def upHandler(self, event):
if(event.num == 1):
self.leftDown = False
self.prevDragX = None
self.prevDragY = None
self.draggedObject = None
elif(event.num == 2):
self.middleDown = False
elif(event.num == 3):
self.rightDown = False
#print("Release! {},{} on {}".format(event.x, event.y, event.num))
def motionHandler(self, event):
#print("Move! {},{}".format(event.x, event.y))
for state in self.gcLocal.allStates.values():
state.graphic.unhighlightBody()
state.graphic.unhighlightEdge()
if(self.leftDown):
if(self.draggedObject is not None):
deltaX = event.x - self.prevDragX
deltaY = event.y - self.prevDragY
self.draggedObject.moveBy(deltaX, deltaY)
self.draggedObject.highlightBody()
self.prevDragX = event.x
self.prevDragY = event.y
else:
for state in self.gcLocal.allStates.values():
if(state.graphic.checkBodyIntersect(event.x, event.y)):
state.graphic.highlightBody()
break
for state in self.gcLocal.allStates.values():
if(state.graphic.checkEdgeIntersect(event.x, event.y)):
state.graphic.highlightEdge()
break
|
python
|
#!/usr/bin/env python3.8
# Copyright 2021 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import json
import os
import sys
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--binary',
required=True,
help=(
'The path to the binary in the base directory to list files for. This file'
'is not included in the output'))
parser.add_argument(
'--dest_root', required=True, help="destination path root.")
parser.add_argument(
'--output', required=True, help='The path to the output file.')
parser.add_argument(
'--meta_out', required=True, help='path to metadata for tool.')
parser.add_argument(
'--name', required=True, help='name of host tool in metadata.')
args = parser.parse_args()
directory = os.path.dirname(args.binary)
binary_path = os.path.join(
args.dest_root, os.path.relpath(args.binary, directory))
# the main binary should be first in the list.
dest_files = [binary_path]
with open(args.output, 'w') as f:
print(f'{binary_path}={args.binary}', file=f)
for path, dirs, files in os.walk(os.path.abspath(directory)):
for filename in files:
source_filepath = os.path.join(path, filename)
filepath = os.path.join(
args.dest_root, os.path.relpath(source_filepath, directory))
if binary_path != filepath:
dest_files += [filepath]
print(f'{filepath}={source_filepath}', file=f)
metadata = {
'files': dest_files,
'name': args.name,
'root': 'tools',
'type': 'companion_host_tool'
}
with open(args.meta_out, 'w') as f:
print(json.dumps(metadata, sort_keys=True, indent=2), file=f)
if __name__ == u"__main__":
sys.exit(main())
|
python
|
# Angus Dempster, Francois Petitjean, Geoff Webb
#
# @article{dempster_etal_2020,
# author = {Dempster, Angus and Petitjean, Fran\c{c}ois and Webb, Geoffrey I},
# title = {ROCKET: Exceptionally fast and accurate time classification using random convolutional kernels},
# year = {2020},
# journal = {Data Mining and Knowledge Discovery},
# doi = {https://doi.org/10.1007/s10618-020-00701-z}
# }
#
# https://arxiv.org/abs/1910.13051 (preprint)
import os
import argparse
import numpy as np
import pandas as pd
import time
import torch, torch.nn as nn, torch.optim as optim
from rocket_functions import apply_kernels, generate_kernels
# == notes =====================================================================
# Reproduce the scalability experiments.
#
# Arguments:
# -tr --training_path : training dataset (npy)
# -te --test_path : test dataset (npy)
# -o --output_path : path for results
# -k --num_kernels : number of kernels
# == parse arguments ===========================================================
parser = argparse.ArgumentParser()
parser.add_argument("-path", "--data_path", required = True)
parser.add_argument("-o", "--output_path", required = True)
parser.add_argument("-k", "--num_kernels", type = int)
parser.add_argument("-seed", "--seed", type=int)
arguments = parser.parse_args()
# == training function =========================================================
def train(X,
Y,
X_validation,
Y_validation,
kernels,
num_features,
num_classes,
minibatch_size = 256,
max_epochs = 100,
patience = 2, # x10 minibatches; reset if loss improves
tranche_size = 2 ** 11,
cache_size = 2 ** 14): # as much as possible
# -- init ------------------------------------------------------------------
def init(layer):
if isinstance(layer, nn.Linear):
nn.init.constant_(layer.weight.data, 0)
nn.init.constant_(layer.bias.data, 0)
# -- model -----------------------------------------------------------------
model = nn.Sequential(nn.Linear(num_features, num_classes)) # logistic / softmax regression
loss_function = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters())
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor = 0.5, min_lr = 1e-8)
model.apply(init)
# -- run -------------------------------------------------------------------
minibatch_count = 0
best_validation_loss = np.inf
stall_count = 0
stop = False
num_examples = len(X)
num_tranches = np.int(np.ceil(num_examples / tranche_size))
cache = np.zeros((min(cache_size, num_examples), num_features))
cache_count = 0
for epoch in range(max_epochs):
if epoch > 0 and stop:
break
for tranche_index in range(num_tranches):
if epoch > 0 and stop:
break
a = tranche_size * tranche_index
b = a + tranche_size
Y_tranche = Y[a:b]
# if cached, use cached transform; else transform and cache the result
if b <= cache_count:
X_tranche_transform = cache[a:b]
else:
X_tranche = X[a:b]
X_tranche = (X_tranche - X_tranche.mean(axis = 1, keepdims = True)) / X_tranche.std(axis = 1, keepdims = True) # normalise time series
X_tranche_transform = apply_kernels(X_tranche, kernels)
if epoch == 0 and tranche_index == 0:
# per-feature mean and standard deviation (estimated on first tranche)
f_mean = X_tranche_transform.mean(0)
f_std = X_tranche_transform.std(0) + 1e-8
# normalise and transform validation data
X_validation = (X_validation - X_validation.mean(axis = 1, keepdims = True)) / X_validation.std(axis = 1, keepdims = True) # normalise time series
X_validation_transform = apply_kernels(X_validation, kernels)
X_validation_transform = (X_validation_transform - f_mean) / f_std # normalise transformed features
X_validation_transform = torch.FloatTensor(X_validation_transform)
Y_validation = torch.LongTensor(Y_validation)
X_tranche_transform = (X_tranche_transform - f_mean) / f_std # normalise transformed features
if b <= cache_size:
cache[a:b] = X_tranche_transform
cache_count = b
X_tranche_transform = torch.FloatTensor(X_tranche_transform)
Y_tranche = torch.LongTensor(Y_tranche)
minibatches = torch.randperm(len(X_tranche_transform)).split(minibatch_size)
for minibatch_index, minibatch in enumerate(minibatches):
if epoch > 0 and stop:
break
# abandon undersized minibatches
if minibatch_index > 0 and len(minibatch) < minibatch_size:
break
# -- (optional) minimal lr search ------------------------------
# default lr for Adam may cause training loss to diverge for a
# large number of kernels; lr minimising training loss on first
# update should ensure training loss converges
if epoch == 0 and tranche_index == 0 and minibatch_index == 0:
candidate_lr = 10 ** np.linspace(-1, -6, 6)
best_lr = None
best_training_loss = np.inf
for lr in candidate_lr:
lr_model = nn.Sequential(nn.Linear(num_features, num_classes))
lr_optimizer = optim.Adam(lr_model.parameters())
lr_model.apply(init)
for param_group in lr_optimizer.param_groups:
param_group["lr"] = lr
# perform a single update
lr_optimizer.zero_grad()
Y_tranche_predictions = lr_model(X_tranche_transform[minibatch])
training_loss = loss_function(Y_tranche_predictions, Y_tranche[minibatch])
training_loss.backward()
lr_optimizer.step()
Y_tranche_predictions = lr_model(X_tranche_transform)
training_loss = loss_function(Y_tranche_predictions, Y_tranche).item()
if training_loss < best_training_loss:
best_training_loss = training_loss
best_lr = lr
for param_group in optimizer.param_groups:
param_group["lr"] = best_lr
# -- training --------------------------------------------------
optimizer.zero_grad()
Y_tranche_predictions = model(X_tranche_transform[minibatch])
training_loss = loss_function(Y_tranche_predictions, Y_tranche[minibatch])
training_loss.backward()
optimizer.step()
minibatch_count += 1
if minibatch_count % 10 == 0:
Y_validation_predictions = model(X_validation_transform)
validation_loss = loss_function(Y_validation_predictions, Y_validation)
scheduler.step(validation_loss)
if validation_loss.item() >= best_validation_loss:
stall_count += 1
if stall_count >= patience:
stop = True
else:
best_validation_loss = validation_loss.item()
if not stop:
stall_count = 0
return model, f_mean, f_std
# == run =======================================================================
# -- run through dataset sizes -------------------------------------------------
np.random.seed(arguments.seed)
torch.manual_seed(arguments.seed)
all_num_training_examples = [900000]
results = pd.DataFrame(index = all_num_training_examples,
columns = ["accuracy", "time_training_seconds"],
data = 0)
results.index.name = "num_training_examples"
print(f" {arguments.num_kernels:,} Kernels ".center(80, "="))
for num_training_examples in all_num_training_examples:
if num_training_examples == all_num_training_examples[0]:
print("Number of training examples:" + f"{num_training_examples:,}".rjust(75 - 28 - 5, " ") + ".....", end = "", flush = True)
else:
print(f"{num_training_examples:,}".rjust(75 - 5, " ") + ".....", end = "", flush = True)
# -- read training and validation data -------------------------------------
# if training data does not fit in memory, it is possible to load the
# training data inside the train(...) function, using the *chunksize*
# argument for pandas.read_csv(...) (and roughly substituting chunks for
# tranches); similarly, if the cache does not fit in memory, consider
# caching the transformed features on disk
path = arguments.data_path
train_file = os.path.join(path, 'satellite_train.npy')
test_file = os.path.join(path, 'satellite_test.npy')
X_training, Y_training = np.load(train_file, allow_pickle=True)[()]['data'], np.load(train_file,allow_pickle=True)[()]['label']
X_validation, Y_validation = np.load(test_file, allow_pickle=True)[()]['data'], np.load(test_file, allow_pickle=True)[()]['label']
Y_training = Y_training - 1
Y_validation = Y_validation - 1
print(np.unique(Y_training))
# -- generate kernels ------------------------------------------------------
kernels = generate_kernels(X_training.shape[1], arguments.num_kernels)
# -- train -----------------------------------------------------------------
time_a = time.perf_counter()
model, f_mean, f_std = train(X_training,
Y_training,
X_validation,
Y_validation,
kernels,
arguments.num_kernels * 2,
num_classes = 24)
time_b = time.perf_counter()
results.loc[num_training_examples, "time_training_seconds"] = time_b - time_a
# -- test ------------------------------------------------------------------
# read test data (here, we test on a subset of the full test data)
X_test, Y_test = X_validation, Y_validation
# normalise and transform test data
X_test = (X_test - X_test.mean(axis = 1, keepdims = True)) / X_test.std(axis = 1, keepdims = True) # normalise time series
X_test_transform = apply_kernels(X_test, kernels)
X_test_transform = (X_test_transform - f_mean) / f_std # normalise transformed features
# predict
model.eval()
Y_test_predictions = model(torch.FloatTensor(X_test_transform))
results.loc[num_training_examples, "accuracy"] = (Y_test_predictions.max(1)[1].numpy() == Y_test).mean()
print("Done.")
print(f" FINISHED ".center(80, "="))
results.to_csv(f"{arguments.output_path}/results_scalability_k={arguments.num_kernels}.csv")
|
python
|
from django.contrib import admin
from django.urls import path
from . import views
app_name = 'sbadmin'
urlpatterns = [
path('table/', views.TableView.as_view(), name='table'),
path('chart/', views.ChartView.as_view(), name='chart'),
path('', views.IndexView.as_view(), name='index'),
path('home/', views.home, name='home')
]
|
python
|
# -*- coding: utf-8 -*-
# import bibliotek
import os
import datetime
# zmienna-licznik przeskanowanych folderow i separator
czysazdjecia = countope = 0
lines_seen = set()
# aktualna data i godzina
czasstart = datetime.datetime.now()
print("~~~~~~START~~~~~~\t" + str(czasstart).split(".")[0])
# usunac jesli stosujemy rootdir a w os.walk() wstawic 'rootdir'
print("\nPodaj ścieżkę ddo sprawdzania wykonawców:")
sprwyk = input()
print("\nPodaj ścieżkę dla ew. pliku z błędami:")
sciezka = input()
bledny = (
sciezka
+ "\\"
+ os.path.basename(os.path.normpath(sciezka))
+ "_"
+ czasstart.strftime("%Y-%m-%d")
+ ".txt"
)
print("\nPlik zostanie umieszczony w:\n" + bledny)
input("\nWciśnij ENTER aby kontynuować...")
with open(
r"V:\Dane robocze\maciej\regexy_formuly_skrypty_polecenia\spis_wykonawcow_zambrowski.txt", # noqa
"r",
) as spiswyk:
for line in spiswyk:
lines_seen.add(line.rstrip("\n"))
# for _, dirnames, _ in os.walk(sprwyk):
# countope += len(dirnames)
for subdir, dirs, files in os.walk(sprwyk):
print(countope)
countope += 1
for file in files:
if file == "opis.txt":
opisek = os.path.join(subdir, file)
with open(opisek, "r") as opis:
for line in opis:
if line.startswith("X:"):
if line.rstrip("\n") not in lines_seen:
with open(bledny, "a") as bl:
bl.write(line)
# czas trwania calego skryptu
czaskoniec = datetime.datetime.now()
roznicaczas = czaskoniec - czasstart
czastrwania = roznicaczas.total_seconds() / 60
print("\nCałość zajęła (minuty):")
print("%.2f" % czastrwania)
print("\n~~~~~~KONIEC~~~~~~\t" + str(czaskoniec).split(".")[0])
input("Wciśnij ENTER aby wyjść...")
|
python
|
import os
import sys
import requests
import ConnectWindow, ConnectedWindow, Driver
from PySide2 import QtCore
from PySide2.QtUiTools import QUiLoader
from PySide2.QtWidgets import QApplication, QLineEdit, QPushButton, QTabWidget, QWidget
class LoginWindow(QtCore.QObject):
def __init__(self, ui_file, driver_window, parent=None):
super(LoginWindow, self).__init__(parent)
self.driver_window = driver_window
ui_file = QtCore.QFile(ui_file)
ui_file.open(QtCore.QFile.ReadOnly)
loader = QUiLoader()
self.window = loader.load(ui_file)
ui_file.close()
self.tab_controller = self.window.findChild(QTabWidget)
cancel_button = self.tab_controller.findChild(QPushButton, 'cancel_button')
cancel_button.clicked.connect(self.cancel_button_clicked)
cancel_button_su = self.tab_controller.findChild(QPushButton, 'cancel_button_su')
cancel_button_su.clicked.connect(self.cancel_button_clicked)
login_button = self.tab_controller.findChild(QPushButton, 'login_button')
login_button.clicked.connect(self.login_button_clicked)
sign_up_button = self.tab_controller.findChild(QPushButton, 'sign_up_button')
sign_up_button.clicked.connect(self.sign_up_button_clicked)
self.window.show()
def login_button_clicked(self):
login_button = self.tab_controller.findChild(QPushButton, 'login_button')
login_button.setEnabled(False)
username = self.tab_controller.findChild(QLineEdit, 'username_field')
password = self.tab_controller.findChild(QLineEdit, 'password_field')
url = 'http://127.0.0.1:5000/login'
data = {
'username' : username.text(),
'password' : password.text()
}
result = requests.post(url = url, data = data)
if (result.text == 'Invalid Password' or result.text == 'Username Does Not Exist'):
self.window.statusBar().showMessage(result.text)
login_button.setEnabled(True)
else:
self.window.statusBar().showMessage(result.text)
self.driver_window.LoginSignal()
def sign_up_button_clicked(self):
sign_up_button = self.tab_controller.findChild(QPushButton, 'sign_up_button')
sign_up_button.setEnabled(False)
username = self.tab_controller.findChild(QLineEdit, 'username_input_su')
email = self.tab_controller.findChild(QLineEdit, 'email_input_su')
password = self.tab_controller.findChild(QLineEdit, 'password_input_su')
password_conf = self.tab_controller.findChild(QLineEdit, 'confirm_password_su')
url = 'http://127.0.0.1:5000/signup'
data = {
'username' : username.text(),
'email' : email.text(),
'password' : password.text(),
'password_conf' : password_conf.text()
}
result = requests.post(url = url, data = data)
if (result.text == 'A User Already Exists With That Email Address' or
result.text == 'A User Already Exists With That Username' or
result.text == 'One or More Fields Were Left Blank'):
self.window.statusBar().showMessage(result.text)
sign_up_button.setEnabled(True)
else:
self.window.statusBar().showMessage(result.text)
def cancel_button_clicked(self):
self.window.close()
def get_mainwindow(driver_window):
window = LoginWindow('secureshellinterface.ui', driver_window)
return window
if __name__ == '__main__':
os.system('python Driver.py')
|
python
|
__author__ = 'joon'
import sys
sys.path.insert(0, 'ResearchTools')
from util.construct_filenames import create_token
from util.construct_controls import subcontrol
from util.ios import mkdir_if_missing, save_to_cache, load_from_cache
from util.maths import Jsoftmax, proj_lp, proj_lf, compute_percentiles
from util.dict_with_dot import Map
from util.time_debugging import debug_show_time_elapsed
from util.images import load_image_PIL
from util.construct_args import control2list
from util.parallel import apply_async_wrapper, Sum
from vis.imshow import fpim, vis_seg
from image.mask_box import mask2bbox, bbox_area, bbox_ratio, carve_bbox_to_im
from image.cc import compute_cc
from image.bw_to_rgb import bw_to_rgb
from image.crop import random_crop, random_translation
from image.iou import compute_iou
|
python
|
"""Backend for rendering multi-frame images using PIL.
These are internal APIs and subject to change at any time.
"""
try:
import PIL
except (ImportError):
PIL = None
from .shared import Backend, BackendError, check_output
class PILMultiframeBackend(Backend):
"""Backend for rendering multi-frame images.
This backend is used to render image formats supporting multiple
frames in a single file, such as GIF and TIFF.
Note: For performance reasons, support for rendering single-frame
images is built into the DocViewer widget.
This backend requires the PIL module.
"""
__slots__ = ["im"]
def __init__(self, input_path, **kw):
"""Return a new rendering backend."""
Backend.__init__(self, input_path, **kw)
if PIL:
self.im = PIL.Image.open(input_path)
else:
raise BackendError(
"Could not render {0} because PIL is not available "
"on your system."
.format(input_path)
)
def page_count(self):
"""Return the number of pages in the input file."""
if hasattr(self.im, "num_frames"):
# This attribute is available for some formats, like TIFF
return self.im.num_frames
else:
# Count the number of pages manually
pc = 1
self.im.seek(0)
try:
while True:
self.im.seek(self.im.tell() + 1)
pc += 1
except (EOFError):
# We've seen every frame in the image
return pc
def render_page(self, page_num):
"""Render the specified page of the input file."""
self.im.seek(page_num - 1)
return self.im.copy()
|
python
|
import jwt.exceptions
import pytest
from okay.jwt import main, decode
__author__ = "Cesar Alvernaz"
__copyright__ = "Cesar Alvernaz"
__license__ = "MIT"
from fixtures.jwt_fixtures import VALID_TOKEN, SECRET, \
EXPECTED_TOKEN_PAYLOAD, INVALID_SECRET, VALID_RS256_TOKEN, \
EXPECTED_TOKEN_RS256_PAYLOAD
def test_decode_without_signature_verification():
assert decode(VALID_TOKEN, SECRET) == EXPECTED_TOKEN_PAYLOAD
def test_decode_with_signature_verification():
assert decode(VALID_TOKEN, SECRET, verify_signature=True) == \
EXPECTED_TOKEN_PAYLOAD
def test_decode_with_invalid_secret():
with pytest.raises(jwt.exceptions.InvalidSignatureError):
decode(VALID_TOKEN, INVALID_SECRET, verify_signature=True)
def test_decode_with_invalid_secret_no_validation():
assert decode(VALID_TOKEN, INVALID_SECRET, verify_signature=False) == \
EXPECTED_TOKEN_PAYLOAD
def test_decode_with_invalid_token_type_with_validation():
with pytest.raises(jwt.exceptions.InvalidAlgorithmError):
decode(VALID_RS256_TOKEN, SECRET, verify_signature=True)
def test_decode_with_invalid_token_type_with_no_validation():
assert decode(VALID_RS256_TOKEN, SECRET, verify_signature=False) == \
EXPECTED_TOKEN_RS256_PAYLOAD
# def test_main(capsys):
# """CLI Tests"""
# # capsys is a pytest fixture that allows asserts agains stdout/stderr
# # https://docs.pytest.org/en/stable/capture.html
# main(["7"])
# captured = capsys.readouterr()
# assert "The 7-th Fibonacci number is 13" in captured.out
|
python
|
import os,re, sys
from byo.track import Track, load_track
from byo.io.genome_accessor import GenomeCache, RemoteCache
from byo.io.annotation import AnnotationAccessor
#from byo.io.lazytables import NamedTupleImporter as Importer
import byo.config
import logging
class LazyTranscriptLoader(object):
def __init__(self,system = None):
self.transcripts = None
self.system = system
self.logger = logging.getLogger("LazyTranscriptLoader(system={system.name})".format(system=self.system) )
def __getitem__(self,txname):
if not self.transcripts:
self.transcripts = self.load_transcript_catalogs()
return self.transcripts[txname]
def load_transcript_catalogs(self):
from byo.gene_model import transcripts_from_UCSC
import glob
path = os.path.join(self.system.root, "annotation", self.system.name, "*.ucsc.gz")
sources = glob.glob(path)
if sources:
self.logger.debug('loading {0}'.format(sources[0]))
T = transcripts_from_UCSC(sources[0],system = self.system)
for s in sources[1:]:
self.logger.debug('loading {0}'.format(s))
T.load(s)
self.logger.info("loaded {0} transcript models from {1} source(s)".format(len(T), len(sources)))
return T
else:
self.logger.error("no transcript models found in path '{0}'".format(path))
return {}
class ModelSystem(object):
def __init__(self, name, genome = None, transcript_models = None, root = byo.config.system_root):
self.name = name
self.root = root
if genome == None:
if getattr(byo.config,"genome_server",None):
self.genome = RemoteCache(byo.config.genome_server)[name]
else:
self.genome = GenomeCache(os.path.join(root,"genomes"))[name]
else:
self.genome = genome
if transcript_models == None:
self.transcript_models = LazyTranscriptLoader(system=self)
else:
self.transcript_models = transcript_models
# Fails before first access due to lazy loading of genome
#self.chr_sizes = self.genome.data.chrom_stats
def get_annotations_track(path="",accessor=AnnotationAccessor,**kwargs):
if not path:
path = os.path.join(self.root,"annotation",self.name,"compiled")
return Track(path,accessor,**kwargs)
def load_track(self, path, **kwargs):
return load_track(path, system=self, **kwargs)
def get_refGenes(self):
return self.transcript_models
|
python
|
# -*- coding: utf-8 -*-
# @Time : 2018/5/28 22:03
# @Author : ddvv
# @Site : http://ddvv.life
# @File : xiaomistorespider.py
# @Software: PyCharm
"""
第三方依赖库: Crypto
功能:
1. 获取小米商店应用评论
消息说明:
1. "AppSpider-0010-001" : 应用评论
"""
import scrapy
from appspider.commonapis import *
CONST_INFO = {
'app_name': 'com.xiaomi.market',
'app_version': 'R.1.4.5',
'spider_author': 'ddvv'
}
class xiaomistorecommentsspider(scrapy.Spider):
"""
爬取中国裁判文书APP
"""
# 爬虫名称
name = 'xiaomistorecommentsspider'
def __init__(self, appid, **kwargs):
super().__init__(**kwargs)
self.header = {
"User-Agent": "Dalvik/2.1.0 (Linux; U; Android 7.0; HUAWEI NXT-AL10 Build/HUAWEINXT-AL10)",
"Host": "app.market.xiaomi.com",
"Accept-Encoding": "gzip, deflate"
}
self.appid = appid
# 爬虫入口,发起请求
def start_requests(self):
"""
"""
appid = self.appid
header = self.header
burl = "https://app.market.xiaomi.com/apm/comment/list/{" \
"appid}?channel=market_100_1_android&clientId=70a40c54102b9be2da4664cd819bbc32&co=CN" \
"&densityScaleFactor=3.0&imei=6066eb90c6d80f6e8eaa7afd48256483&la=zh&marketVersion=147&model=HUAWEI" \
"+NXT-AL10&os=C00B577&page={page}&resolution=1080*1812&sdk=24&session=2jmj7l5rSw0yVb_v"
for page in range(0, 10):
url = burl.format(appid=appid, page=page)
yield scrapy.Request(url=url,
headers=header,
method='GET',
callback=self.parse)
# 解析返回值,推送至pipeline
def parse(self, response):
"""
:param response: 爬取的数据返回值。
"""
try:
js = json.loads(response.body.decode())
js['appid'] = self.appid
item = setappspideritem('AppSpider-0009-001', 'json', js, **CONST_INFO)
yield item
except Exception as e:
logger.error(str(e))
|
python
|
#!/usr/bin/python
# Simple tcp fuzz against a target
import socket
from sys import exit,argv
if len(argv) < 2:
print "Performs a simple fuzz against a target"
print "Usage: %s <Target IP Address/hostname> <Target Port>" % str(argv[0])
exit(1)
#Create an arry of buffers, from 10 to 2000, with increments of 20.
buffer=["A"]
counter=100
while len(buffer) <= 30:
buffer.append("A"*counter)
counter=counter+200
for string in buffer:
print "Fuzzing %s:%s with %s bytes" % (str(argv[1]),int(argv[2]),len(string))
s=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connect=s.connect((str(argv[1]),int(argv[2])))
# This next part depends on whatever the RFC is for what you're trying to
# exploit. Up to you to put the 'string' in the right place. Be sure to
# receive bytes after sending anything.
s.recv(1024) # Grab the banner, do not remove
s.send(string + "\r\n") # Sends your evil buffer as 'string'
s.send('QUIT\r\n') # Replace 'QUIT' with whatever ends your session
s.close()
|
python
|
#!/usr/bin/python
import sys
import re
threshold = float(sys.argv[1])
tp = 0
fp = 0
fn = 0
typePr = {}
for line in sys.stdin:
if re.search(r'^\d', line):
fields = line.rstrip('\n').split(' ')
gold = fields[1]
(predicted, conf) = fields[2].split(':')
print "%s\t%s\t%s" % (gold, predicted, conf)
conf = float(conf)
if not typePr.has_key(gold):
typePr[gold] = [0.0, 0.0, 0.0] #tp, fp, fn
if conf > threshold and predicted != 'error':
if predicted == gold:
tp += 1.0
typePr[gold][0] += 1.0
else:
fp += 1.0
typePr[gold][1] += 1.0
fn += 1.0
typePr[gold][2] += 1.0
elif gold != 'error':
fn += 1
print "tp=%s\tfp=%s\tfn=%s" % (tp, fp, fn)
p = float(tp) / float(tp + fp)
r = float(tp) / float(tp + fn)
f = 2 * p * r / (p + r)
print "p=%s\tr=%s\tf=%s" % (p,r,f)
for t in typePr.keys():
tp = typePr[t][0]
fp = typePr[t][1]
fn = typePr[t][2]
if tp + fp > 0:
p = float(tp) / float(tp + fp)
else:
p = 0.0
if tp + fn > 0:
r = float(tp) / float(tp + fn)
else:
r = 0.0
if p + r > 0:
f = 2 * p * r / (p + r)
else:
f = 0.0
print "%s\tp=%s\tr=%s\tf=%s" % (t,p,r,f)
|
python
|
import datetime as dt
import re
from collections import namedtuple
from pathlib import Path
import pytest
import ravenpy
from ravenpy.config.commands import (
BasinStateVariablesCommand,
EvaluationPeriod,
GriddedForcingCommand,
HRUStateVariableTableCommand,
)
from ravenpy.config.rvs import OST, RVC, RVH, RVI, RVP, RVT, Config
from ravenpy.extractors import (
RoutingProductGridWeightExtractor,
RoutingProductShapefileExtractor,
)
from ravenpy.utilities.testdata import get_local_testdata
class TestRV:
def test_end_date(self):
rvi = RVI(None)
rvi.run_name = "test"
rvi.start_date = dt.datetime(2000, 1, 1)
rvi.end_date = dt.datetime(2000, 1, 11)
assert 10 == rvi.duration
rvi.duration = 11
assert dt.datetime(2000, 1, 12) == rvi.end_date
def test_evaluation_metrics(self):
rvi = RVI(None)
rvi.evaluation_metrics = "LOG_NASH"
with pytest.raises(ValueError):
rvi.evaluation_metrics = "JIM"
def test_evaluation_periods(self):
rvi = RVI(None)
assert rvi.evaluation_periods == ""
rvi.evaluation_periods = [
EvaluationPeriod("dry", "1980-01-01", "1989-12-31"),
EvaluationPeriod("wet", "1990-01-01", "2000-12-31"),
]
out = rvi.evaluation_periods
assert len(out.split("\n")) == 2
assert out.startswith(":EvaluationPeriod")
# Check date input
d = EvaluationPeriod("dry", dt.date(1980, 1, 1), dt.date(1989, 12, 31))
assert str(d) == str(rvi.evaluation_periods.splitlines()[0])
class TestOst:
def test_random(self):
o = OST(None)
assert o.random_seed == ""
o.random_seed = 0
assert o.random_seed == "RandomSeed 0"
def test_evaluation_metric_multiplier(self):
config = Config(model=None)
config.rvi.evaluation_metrics = ["RMSE", "NASH_SUTCLIFFE"]
assert config.ost.evaluation_metric_multiplier == 1
with pytest.raises(ValueError):
config.rvi.evaluation_metrics = ["PCT_BIAS"]
config.ost.evaluation_metric_multiplier
class TestRVI:
def test_supress_output(self):
rvi = RVI(None)
rvi.suppress_output = True
assert rvi.suppress_output == ":SuppressOutput\n:DontWriteWatershedStorage"
rvi = RVI(None)
rvi.suppress_output = False
assert rvi.suppress_output == ""
class TestRVC:
@classmethod
def setup_class(self):
sol = open(get_local_testdata("gr4j_cemaneige/solution.rvc")).read()
self.rvc = RVC.create_solution(sol)
def test_parse(self):
assert len(self.rvc.hru_states) == 1
assert self.rvc.hru_states[1].atmosphere == 821.98274
assert self.rvc.hru_states[1].atmos_precip == -1233.16
assert len(self.rvc.basin_states) == 1
assert self.rvc.basin_states[1].channel_storage == 0
assert self.rvc.basin_states[1].qout == (1, 13.21660, 13.29232)
def test_format(self):
rv = self.rvc.to_rv()
assert ":BasinIndex 1 watershed" in rv
class TestRVH:
@classmethod
def setup_class(self):
shp = get_local_testdata("raven-routing-sample/finalcat_hru_info.zip")
extractor = RoutingProductShapefileExtractor(shp)
config = extractor.extract()
self.rvh = RVH(None)
for k, v in config.items():
if k != "channel_profiles":
self.rvh.update(k, v)
def test_import_process(self):
assert len(self.rvh.subbasins) == 46
assert len(self.rvh.land_subbasin_ids) == 41
assert len(self.rvh.lake_subbasin_ids) == 5
assert len(self.rvh.reservoirs) == 5
assert len(self.rvh.hrus) == 51
def test_format(self):
res = self.rvh.to_rv()
sbs = (
re.search(":SubBasins(.+):EndSubBasins", res, re.MULTILINE | re.DOTALL)
.group(1)
.split("\n")
)
sbs = list(filter(None, sbs)) # remove whitespaces
assert len(sbs) == len(self.rvh.subbasins) + 2
assert res.count("ZERO-") == len(self.rvh.reservoirs)
hrus = (
re.search(":HRUs(.+):EndHRUs", res, re.MULTILINE | re.DOTALL)
.group(1)
.split("\n")
)
hrus = list(filter(None, hrus)) # remove whitespaces
assert len(hrus) == len(self.rvh.hrus) + 2
assert res.count(":Reservoir") == len(self.rvh.reservoirs)
class TestRVP:
@classmethod
def setup_class(self):
shp = get_local_testdata("raven-routing-sample/finalcat_hru_info.zip")
extractor = RoutingProductShapefileExtractor(shp)
config = extractor.extract()
self.rvp = RVP(None)
self.rvp.tmpl = "{channel_profiles}"
self.rvp.channel_profiles = config["channel_profiles"]
def test_import_process(self):
assert len(self.rvp.channel_profiles) == 46
def test_format(self):
res = self.rvp.to_rv()
assert res.count(":ChannelProfile") == 46
assert res.count(":EndChannelProfile") == 46
class TestRVT:
@classmethod
def setup_class(self):
input_file = get_local_testdata("raven-routing-sample/VIC_streaminputs.nc")
routing_file = get_local_testdata("raven-routing-sample/finalcat_hru_info.zip")
extractor = RoutingProductGridWeightExtractor(input_file, routing_file)
gws = extractor.extract()
self.gfc = GriddedForcingCommand(grid_weights=gws)
def test_import_process(self):
res = self.gfc.to_rv()
assert ":NumberHRUs 51" in res
assert ":NumberGridCells 100" in res
# FIXME: This test is not superb.
assert len(res.split("\n")) == 226
|
python
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
from frappe.utils import nowdate, get_last_day, add_days
from erpnext.assets.doctype.asset.test_asset import create_asset_data
from erpnext.stock.doctype.purchase_receipt.test_purchase_receipt import make_purchase_receipt
from erpnext.assets.doctype.asset_value_adjustment.asset_value_adjustment import get_current_asset_value
class TestAssetValueAdjustment(unittest.TestCase):
def setUp(self):
create_asset_data()
def test_current_asset_value(self):
pr = make_purchase_receipt(item_code="Macbook Pro",
qty=1, rate=100000.0, location="Test Location")
asset_name = frappe.db.get_value("Asset", {"purchase_receipt": pr.name}, 'name')
asset_doc = frappe.get_doc('Asset', asset_name)
month_end_date = get_last_day(nowdate())
purchase_date = nowdate() if nowdate() != month_end_date else add_days(nowdate(), -15)
asset_doc.available_for_use_date = purchase_date
asset_doc.purchase_date = purchase_date
asset_doc.calculate_depreciation = 1
asset_doc.append("finance_books", {
"expected_value_after_useful_life": 200,
"depreciation_method": "Straight Line",
"total_number_of_depreciations": 3,
"frequency_of_depreciation": 10,
"depreciation_start_date": month_end_date
})
asset_doc.submit()
current_value = get_current_asset_value(asset_doc.name)
self.assertEqual(current_value, 100000.0)
def test_asset_depreciation_value_adjustment(self):
pr = make_purchase_receipt(item_code="Macbook Pro",
qty=1, rate=100000.0, location="Test Location")
asset_name = frappe.db.get_value("Asset", {"purchase_receipt": pr.name}, 'name')
asset_doc = frappe.get_doc('Asset', asset_name)
asset_doc.calculate_depreciation = 1
month_end_date = get_last_day(nowdate())
purchase_date = nowdate() if nowdate() != month_end_date else add_days(nowdate(), -15)
asset_doc.available_for_use_date = purchase_date
asset_doc.purchase_date = purchase_date
asset_doc.calculate_depreciation = 1
asset_doc.append("finance_books", {
"expected_value_after_useful_life": 200,
"depreciation_method": "Straight Line",
"total_number_of_depreciations": 3,
"frequency_of_depreciation": 10,
"depreciation_start_date": month_end_date
})
asset_doc.submit()
current_value = get_current_asset_value(asset_doc.name)
adj_doc = make_asset_value_adjustment(asset = asset_doc.name,
current_asset_value = current_value, new_asset_value = 50000.0)
adj_doc.submit()
expected_gle = (
("_Test Accumulated Depreciations - _TC", 0.0, 50000.0),
("_Test Depreciations - _TC", 50000.0, 0.0)
)
gle = frappe.db.sql("""select account, debit, credit from `tabGL Entry`
where voucher_type='Journal Entry' and voucher_no = %s
order by account""", adj_doc.journal_entry)
self.assertEqual(gle, expected_gle)
def make_asset_value_adjustment(**args):
args = frappe._dict(args)
doc = frappe.get_doc({
"doctype": "Asset Value Adjustment",
"company": args.company or "_Test Company",
"asset": args.asset,
"date": args.date or nowdate(),
"new_asset_value": args.new_asset_value,
"current_asset_value": args.current_asset_value,
"cost_center": args.cost_center or "Main - _TC"
}).insert()
return doc
|
python
|
import logging
import os
from ...utils import import_export_content
from ...utils import paths
from ...utils import transfer
from kolibri.core.tasks.management.commands.base import AsyncCommand
logger = logging.getLogger(__name__)
class Command(AsyncCommand):
def add_arguments(self, parser):
node_ids_help_text = """
Specify one or more node IDs to import. Only the files associated to those node IDs will be imported.
Make sure to call this near the end of the argument list.
e.g.
kolibri manage importcontent network <channel id> --node_ids <id1>,<id2>, [<ids>,...]
"""
parser.add_argument(
"--node_ids", "-n",
# Split the comma separated string we get, into a list of strings
type=lambda x: x.split(","),
default=[],
required=False,
dest="node_ids",
help=node_ids_help_text,
)
exclude_node_ids_help_text = """
Specify one or more node IDs to exclude. Files associated to those node IDs will be not be imported.
Make sure to call this near the end of the argument list.
e.g.
kolibri manage importcontent network <channel id> --exclude_node_ids <id1>,<id2>, [<ids>,...]
"""
parser.add_argument(
"--exclude_node_ids",
type=lambda x: x.split(","),
default=[],
required=False,
dest="exclude_node_ids",
help=exclude_node_ids_help_text
)
parser.add_argument("channel_id", type=str)
parser.add_argument("destination", type=str)
def handle_async(self, *args, **options):
channel_id = options["channel_id"]
data_dir = os.path.realpath(options["destination"])
node_ids = options["node_ids"]
exclude_node_ids = options["exclude_node_ids"]
logger.info("Exporting content for channel id {} to {}".format(channel_id, data_dir))
files, total_bytes_to_transfer = import_export_content.get_files_to_transfer(
channel_id, node_ids, exclude_node_ids, True)
exported_files = []
with self.start_progress(total=total_bytes_to_transfer) as overall_progress_update:
for f in files:
if self.is_cancelled():
break
filename = f.get_filename()
srcpath = paths.get_content_storage_file_path(filename)
dest = paths.get_content_storage_file_path(filename, datafolder=data_dir)
# if the file already exists, add its size to our overall progress, and skip
if os.path.isfile(dest) and os.path.getsize(dest) == f.file_size:
overall_progress_update(f.file_size)
continue
copy = transfer.FileCopy(srcpath, dest)
with copy:
with self.start_progress(total=copy.total_size) as file_cp_progress_update:
for chunk in copy:
if self.is_cancelled():
copy.cancel()
break
length = len(chunk)
overall_progress_update(length)
file_cp_progress_update(length)
else:
exported_files.append(dest)
if self.is_cancelled():
# Cancelled, clean up any already downloading files.
for dest in exported_files:
os.remove(dest)
self.cancel()
|
python
|
import os
import mimetypes
import json
from plantcv.plantcv import fatal_error
# Process results. Parse individual image output files.
###########################################
def process_results(job_dir, json_file):
"""Get results from individual files. Parse the results and recompile for SQLite.
Args:
job_dir: Intermediate file output directory.
json_file: Json data table filehandle object.
:param job_dir: str
:param json_file: obj
"""
if os.path.exists(json_file):
with open(json_file, 'r') as datafile:
try:
data = json.load(datafile)
if "variables" not in data or "entities" not in data:
fatal_error("Invalid JSON file")
except:
fatal_error("Invalid JSON file")
else:
# Data dictionary
data = {"variables": {}, "entities": []}
# Walk through the image processing job directory and process data from each file
for (dirpath, dirnames, filenames) in os.walk(job_dir):
for filename in filenames:
# Make sure file is a text or json file
if 'text/plain' in mimetypes.guess_type(filename) or 'application/json' in mimetypes.guess_type(filename):
# Open results file
with open(os.path.join(dirpath, filename)) as results:
obs = json.load(results)
data["entities"].append(obs)
# Keep track of all metadata variables stored
for vars in obs["metadata"]:
data["variables"][vars] = {"category": "metadata", "datatype": "<class 'str'>"}
# Keep track of all observations variables stored
for othervars in obs["observations"]:
data["variables"][othervars] = {"category": "observations",
"datatype": obs["observations"][othervars]["datatype"]}
# Write out json file with info from all images
with open(json_file, 'w') as datafile:
json.dump(data, datafile)
###########################################
|
python
|
import logging
import traceback
import urllib
import datetime
import mimetypes
import os
import sys
import zlib
import gzip
import StringIO
import json
from pylons import request, response, session, tmpl_context as c
from pylons import app_globals
from pypesvds.lib.base import BaseController, render
from pypesvds.lib.packet import Packet
from pypesvds.lib.utils import abort
log = logging.getLogger(__name__)
mimes = os.path.join(os.path.dirname(__file__), 'mime.types')
mimetypes.init([mimes])
class DataController(BaseController):
def _decompress(self, encoding, data):
""" decompress data if it is gzipped """
filedata = data
if encoding == 'gzip':
log.debug('Found gzipped data, decompressing')
# gzip files have a header preceding the zlib stream.
# try with zlib (streams compressed on the fly) and if
# that fails, try the gzip module
try:
filedata = zlib.decompress(data)
except:
gz_data = StringIO.StringIO(data)
filedata = gzip.GzipFile(fileobj=gz_data).read()
return filedata
def create(self, route=None, id=None):
status = {}
try:
content_encoding = request.headers.get('Content-Encoding', None)
content_type = request.headers.get('Content-Type', None)
content_length = request.headers.get('Content-Length', None)
log.debug('content_encoding: %s' % content_encoding)
log.debug('content_type: %s' % content_type)
log.debug('content_length: %s' % content_length)
except Exception as e:
log.error('Controller Exception: %s' % self.__class__.__name__)
log.error('Reason: %s' % str(e))
log.debug(traceback.print_exc())
abort(500, str(e))
else:
# bad content-type
if content_type == 'application/x-www-form-urlencoded':
abort(415, "Invalid or Unspecified Content-Type")
try:
packet = Packet()
# indicates a file upload
if content_type.startswith('multipart/form-data;'):
log.debug('found multipart form data, attempting to find source filename')
part = request.POST['document']
if part.filename:
fname = unicode(part.filename.lstrip(os.sep))
packet.set_meta('filename', fname)
# update content type based on filename
content_type = unicode(mimetypes.guess_type(fname)[0])
data = part.value
else:
data = request.body
# decompress if compressed
filedata = self._decompress(content_encoding, data)
# update content length since we might be decompressed now
content_length = len(filedata)
if content_length > 0:
packet.add('data', filedata)
else:
abort(400, 'Empty Request')
# set optional user provided id
if id is not None:
log.debug('id: %s' % id)
packet.set_meta('id', id)
# set optional user provided routing info
if route is not None:
log.debug('route: %s' % route)
packet.set_meta('route', route)
# set some common meta attributes on the packet
packet.set_meta('requestmethod', request.method)
packet.set_meta('contentlength', content_length)
packet.set_meta('mimetype', content_type)
packet.set_meta('processingtime', unicode(
datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')))
status = app_globals.dfg.send(packet)
# calls into pypes core are asynchronous so we respond as such
if status['status'] == 'success':
response.status = 202
except Exception as e:
log.error('Controller Exception: %s' % self.__class__.__name__)
log.error('Reason: %s' % str(e))
log.debug(traceback.print_exc())
abort(500, str(e))
# return empty body on success otherwise return status object
return None if status['status'] == 'success' else json.dumps(status)
def delete(self, route, id):
status = {}
try:
packet = Packet()
# set packet meta attributes
packet.set_meta('id', id)
packet.set_meta('requestmethod', request.method)
packet.set_meta('processingtime', unicode(
datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')))
status = app_globals.dfg.send(packet)
# calls into pypes core are asynchronous so we respond as such
if status['status'] == 'success':
response.status = 202
except Exception as e:
status = 'An Undefined Error Has Occurred'
log.error('Controller Exception: %s' % self.__class__.__name__)
log.error('Reason: %s' % str(e))
log.debug(traceback.print_exc())
abort(500, str(e))
# return empty body on success otherwise return status object
return None if status['status'] == 'success' else json.dumps(status)
def get(self, route=None, id=None):
status = {}
try:
packet = Packet()
# set packet meta attributes
if id is not None:
packet.set_meta('id', id)
# set optional user provided routing info
if route is not None:
packet.set_meta('route', route)
packet.set_meta('requestmethod', request.method)
packet.set_meta('processingtime', unicode(
datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')))
status = app_globals.dfg.send(packet)
# calls into pypes core are asynchronous so we respond as such
if status['status'] == 'success':
response.status = 202
except Exception as e:
status = 'An Undefined Error Has Occurred'
log.error('Controller Exception: %s' % self.__class__.__name__)
log.error('Reason: %s' % str(e))
log.debug(traceback.print_exc())
response.content_type = 'application/json'
response.status = 500
status['error'] = str(e)
abort(500, str(e))
# return empty body on success otherwise return status object
return None if status['status'] == 'success' else json.dumps(status)
|
python
|
# Generated by Django 2.0.2 on 2018-05-16 11:02
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('project', '0019_auto_20180516_1034'),
]
operations = [
migrations.AlterField(
model_name='project',
name='allocation_systems',
field=models.ManyToManyField(through='project.ProjectSystemAllocation', to='system.System', verbose_name='Allocation systems'),
),
migrations.AlterField(
model_name='project',
name='category',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='project.ProjectCategory', verbose_name='Category'),
),
migrations.AlterField(
model_name='project',
name='created_time',
field=models.DateTimeField(auto_now_add=True, verbose_name='Created time'),
),
migrations.AlterField(
model_name='project',
name='department',
field=models.CharField(blank=True, max_length=128, verbose_name='Department'),
),
migrations.AlterField(
model_name='project',
name='economic_user',
field=models.BooleanField(default=False, verbose_name='Economic user'),
),
migrations.AlterField(
model_name='project',
name='end_date',
field=models.DateField(verbose_name='End date'),
),
migrations.AlterField(
model_name='project',
name='funding_source',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='project.ProjectFundingSource', verbose_name='Funding source'),
),
migrations.AlterField(
model_name='project',
name='institution',
field=models.ForeignKey(help_text='Institution project is based', on_delete=django.db.models.deletion.CASCADE, to='institution.Institution', verbose_name='Institution'),
),
migrations.AlterField(
model_name='project',
name='members',
field=models.ManyToManyField(through='project.ProjectUserMembership', to=settings.AUTH_USER_MODEL, verbose_name='Members'),
),
migrations.AlterField(
model_name='project',
name='modified_time',
field=models.DateTimeField(auto_now=True, verbose_name='Modified time'),
),
migrations.AlterField(
model_name='project',
name='notes',
field=models.TextField(blank=True, help_text='Internal project notes', max_length=512, verbose_name='Notes'),
),
migrations.AlterField(
model_name='project',
name='requirements_gateways',
field=models.TextField(help_text='Web gateway or portal name and versions', max_length=512, verbose_name='Requirements gateways'),
),
migrations.AlterField(
model_name='project',
name='requirements_onboarding',
field=models.TextField(max_length=512, verbose_name='Requirements onboarding'),
),
migrations.AlterField(
model_name='project',
name='requirements_software',
field=models.TextField(help_text='Software name and versions', max_length=512, verbose_name='Requirements software'),
),
migrations.AlterField(
model_name='project',
name='requirements_training',
field=models.TextField(max_length=512, verbose_name='Requirements training'),
),
migrations.AlterField(
model_name='project',
name='start_date',
field=models.DateField(verbose_name='Start date'),
),
migrations.AlterField(
model_name='project',
name='status',
field=models.PositiveSmallIntegerField(choices=[(1, 'Awaiting Approval'), (2, 'Approved'), (3, 'Declined'), (4, 'Revoked'), (5, 'Suspended'), (6, 'Closed')], default=1, verbose_name='Status'),
),
]
|
python
|
from typing import Tuple, Iterable
from rlp.utils import str_to_bytes
from state.util import utils
from storage.kv_store import KeyValueStorage
# log = get_logger('db')
databases = {}
class KeyValueStorageInMemory(KeyValueStorage):
def __init__(self):
self._dict = {}
def get(self, key):
if isinstance(key, str):
key = key.encode()
return self._dict[key]
def put(self, key, value):
if isinstance(key, str):
key = key.encode()
if isinstance(value, str):
value = value.encode()
self._dict[key] = value
def remove(self, key):
if isinstance(key, str):
key = key.encode()
del self._dict[key]
def setBatch(self, batch: Iterable[Tuple]):
for key, value in batch:
self.put(key, value)
def do_ops_in_batch(self, batch: Iterable[Tuple]):
for op, key, value in batch:
if op == self.WRITE_OP:
self.put(key, value)
elif op == self.REMOVE_OP:
self.remove(key)
else:
raise ValueError('Unknown operation')
def open(self):
pass
def close(self):
pass
def drop(self):
self._dict = {}
def reset(self):
self._dict = {}
def iterator(self, start=None, end=None, include_key=True, include_value=True, prefix=None):
if not (include_key or include_value):
raise ValueError("At least one of includeKey or includeValue "
"should be true")
def filter(key, start, end):
if start and end:
return key in range(start, end)
if start:
return key >= start
if end:
return key <= end
if include_key and include_value:
if start or end:
return {k: v for k, v in self._dict.items() if filter(k, start, end)}
return self._dict.items()
if include_key:
if start or end:
return (k for k in self._dict.keys() if filter(k, start, end))
return self._dict.keys()
if include_value:
if start or end:
return (v for k, v in self._dict.items() if filter(k, start, end))
return self._dict.values()
def closed(self):
return False
def is_byte(self):
return False
def db_path(self) -> str:
return ""
def __eq__(self, other):
return isinstance(other, self.__class__) and self._dict == other._dict
def __hash__(self):
return utils.big_endian_to_int(str_to_bytes(self.__repr__()))
|
python
|
#!/usr/bin/env python
# coding: utf-8
## define the convolutional neural network architecture
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
# can use the below import should you choose to initialize the weights of your Net
import torch.nn.init as I
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# 1 input image channel (grayscale), 32 output channels/feature maps, 5x5 square convolution kernel
# 224-5/1 + 1 = 220
# output tensor: (32, 220, 220)
# after maxpool: (32, 110, 110)
self.conv1 = nn.Conv2d(1, 32, 5)
# 110-3/1 + 1 = 108
# output tensor: (64, 108, 108)
# after maxpool: (64, 54, 54)
self.conv2 = nn.Conv2d(32, 64, 3)
self.pool = nn.MaxPool2d(2,2)
self.drop_layer1 = nn.Dropout(p=0.4)
self.drop_layer2 = nn.Dropout(p=0.2)
self.lin1 = nn.Linear(64*54*54, 3)
def forward(self, x):
## Define the feedforward behavior of this model
x = self.pool(F.relu(self.conv1(x)))
x = self.drop_layer1(x)
x = self.pool(F.relu(self.conv2(x)))
x = self.drop_layer2(x)
# prepare for linear layer by flattening
x = x.view(x.size(0), -1)
x = self.lin1(x)
return x
|
python
|
from .CTCModel import *
|
python
|
'''
Module containing all the requisite classes to perform test steps.
Adding new actions
-------------------
Creating new simple actions in the code is designed to be fairly straightforward, and only
requires three steps:
1. Add an entry for the action on the ``enums`` module
2. Create a function to perform the actual step under the ``TestStep`` class
3. Add an entry to the selector with the enum as a key and the function as a value
Keep in mind that the step function should also validate any required data, and that
updating the schema for proper json validation is essential.
If the parameters for the new action are expected to be enums, you must also add the logic
for converting the parameter from string to enum in the ``UIValidation`` class.
'''
from typing import (
Optional,
Dict,
Any,
)
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
from quilla.ctx import Context
from quilla.common.enums import (
UITestActions,
)
from quilla.steps.base_steps import (
BaseStepFactory,
BaseStep
)
# Steps classes
class TestStep(BaseStep, BaseStepFactory):
'''
Class that contains the definition of a single test step.
Used for setting up validations
Args:
ctx: The runtime context of the application
action: The action enum for this step
target: What the target for this step is, if applicable
parameters: Extra options for certain actions
aggregator: The parent object holding this step
driver: The browser driver
Attributes:
selector: A dictionary that maps action enums to the action function
'''
required_params = [
'action',
]
optional_params = [
'target',
'parameters',
]
@classmethod
def from_dict(
cls,
ctx: Context,
action_dict,
driver: Optional[WebDriver] = None
) -> 'TestStep':
'''
Factory method to extract needed parameters from a dictionary
'''
for item in cls.required_params:
if item not in action_dict:
raise AttributeError('Missing one or more required parameters')
params: Dict[str, Any] = {}
for param in cls.required_params:
params[param] = action_dict[param]
for param in cls.optional_params:
if param in action_dict:
params[param] = action_dict[param]
return TestStep(ctx, **params, driver=driver)
def __init__(
self,
ctx: Context,
action: UITestActions,
target: Optional[str] = None,
parameters: Optional[dict] = None,
driver: Optional[WebDriver] = None,
):
super().__init__(ctx, action, target=target, parameters=parameters, driver=driver)
self.selector = {
UITestActions.CLICK: self._click,
UITestActions.CLEAR: self._clear,
UITestActions.SEND_KEYS: self._send_keys,
UITestActions.NAVIGATE_TO: self._navigate_to,
UITestActions.WAIT_FOR_VISIBILITY: self._wait_for_visibility,
UITestActions.WAIT_FOR_EXISTENCE: self._wait_for_existence,
UITestActions.NAVIGATE_BACK: self._navigate_back,
UITestActions.NAVIGATE_FORWARD: self._navigate_forward,
UITestActions.HOVER: self._hover,
UITestActions.REFRESH: self._refresh,
UITestActions.SET_BROWSER_SIZE: self._set_browser_size,
UITestActions.ADD_COOKIES: self._add_cookies,
UITestActions.SET_COOKIES: self._set_cookies,
UITestActions.CLEAR_COOKIES: self._clear_cookies,
UITestActions.REMOVE_COOKIE: self._remove_cookie,
}
def copy(self) -> 'TestStep':
'''
Creates a shallow copy of the TestStep object
This is used so that each browser can have an independent copy of
the steps, in case any script would want to edit individual browser
steps
'''
return TestStep(
self.ctx,
self.action,
self._target, # Make sure it's passed in raw
self._parameters, # Make sure it's passed in raw
self._driver
)
def perform(self):
'''
Runs the specified action. Wrapper for selecting proper inner method
'''
perform_action = self.selector[self.action]
return perform_action()
def _click(self):
self._verify_target()
self.element.click()
def _clear(self):
self._verify_target()
self.element.clear()
def _send_keys(self):
self._verify_parameters('data')
self.element.send_keys(self.parameters['data'])
def _navigate_to(self):
self._verify_target()
self.driver.get(self.target)
def _wait_for(self, condition):
self._verify_parameters('timeoutInSeconds')
WebDriverWait(self.driver, self.parameters['timeoutInSeconds']).until(condition)
def _wait_for_visibility(self):
self._verify_target()
self._wait_for(EC.visibility_of_element_located(self.locator))
def _wait_for_existence(self):
self._verify_target()
self._wait_for(EC.presence_of_element_located(self.locator))
def _navigate_back(self):
self.driver.back()
def _navigate_forward(self):
self.driver.forward()
def _refresh(self):
self.driver.refresh()
def _set_browser_size(self):
self._verify_parameters('width', 'height')
width = self._parameters['width']
height = self._parameters['height']
self.driver.set_window_size(width, height)
def _set_cookies(self):
self._clear_cookies()
self._add_cookies()
def _add_cookies(self):
self._verify_parameters('cookieJar')
self.driver.add_cookie(self.parameters['cookieJar'])
def _remove_cookie(self):
self._verify_parameters('cookieName')
self.driver.delete_cookie(self.parameters['cookieName'])
def _clear_cookies(self):
self.driver.delete_all_cookies()
def _hover(self):
self._verify_target()
ActionChains(self.driver).move_to_element(self.element).perform()
def _set_zoom_level(self):
self._verify_parameters('zoomLevel')
zoom_level = self._parameters['zoomLevel']
self.driver.execute_script(f'document.body.style.zoom="{zoom_level}%"')
|
python
|
execfile('<%= @tmp_dir %>/common.py')
# weblogic node params
WLHOME = '<%= @weblogic_home_dir %>'
JAVA_HOME = '<%= @java_home_dir %>'
WEBLOGIC_VERSION = '<%= @version %>'
# domain params
DOMAIN_PATH = '<%= @domain_dir %>'
DOMAIN = '<%= @domain_name %>'
APP_PATH = '<%= @app_dir %>'
# adminserver params
ADMIN_SERVER_NAME = '<%= @adminserver_name %>'
ADMIN_SERVER_LISTEN_ADDRESS = '<%= @adminserver_listen_address %>'
MACHINE_NAME = 'LocalMachine'
ESS_SERVER_STARTUP_ARGUMENTS = '<%= @ess_server_startup_arguments %>'
ESS_SERVER_LISTEN_PORT = 8201
ESS_CLUSTER = '<%= @ess_cluster %>'
SOA_CLUSTER = '<%= @soa_cluster %>'
OSB_CLUSTER = '<%= @osb_cluster %>'
BAM_CLUSTER = '<%= @bam_cluster %>'
# templates
WLS_EM_TEMPLATE = '<%= @wls_em_template %>'
WLS_ESS_EM_TEMPLATE = '<%= @wls_ess_em_template %>'
WLS_ESS_TEMPLATE = '<%= @wls_ess_template %>'
# repository
REPOS_DBURL = '<%= @repository_database_url %>'
REPOS_DBUSER_PREFIX = '<%= @repository_prefix %>'
REPOS_DBPASSWORD = sys.argv[2]
readDomain(DOMAIN_PATH)
cd('/')
setOption( "AppDir", APP_PATH )
print 'Adding EM Template'
try:
addTemplate(WLS_EM_TEMPLATE)
except:
print "Probably already added error:", sys.exc_info()[0]
print 'Adding ESS Template'
addTemplate(WLS_ESS_TEMPLATE)
addTemplate(WLS_ESS_EM_TEMPLATE)
if ESS_CLUSTER:
pass
else:
print 'change ess_server1'
cd('/')
changeManagedServer('ess_server1', MACHINE_NAME, ADMIN_SERVER_LISTEN_ADDRESS, ESS_SERVER_LISTEN_PORT, ESS_SERVER_STARTUP_ARGUMENTS, JAVA_HOME)
print 'Change datasources'
print 'Change datasource LocalScvTblDataSource'
changeDatasource('LocalSvcTblDataSource', REPOS_DBUSER_PREFIX+'_STB', REPOS_DBPASSWORD, REPOS_DBURL)
print 'Call getDatabaseDefaults which reads the service table'
getDatabaseDefaults()
# changeDatasourceToXA('EssDS')
print 'end datasources'
print 'Add server groups WSM-CACHE-SVR WSMPM-MAN-SVR JRF-MAN-SVR to AdminServer'
serverGroup = ["WSM-CACHE-SVR" , "WSMPM-MAN-SVR" , "JRF-MAN-SVR"]
setServerGroups(ADMIN_SERVER_NAME, serverGroup)
serverGroup = ["ESS-MGD-SVRS"]
if ESS_CLUSTER:
print 'Add server group ESS-MGD-SVRS to cluster'
cd('/')
setServerGroups('ess_server1', [])
essServers = getClusterServers(ESS_CLUSTER, ADMIN_SERVER_NAME)
cd('/')
for i in range(len(essServers)):
print "Add server group ESS-MGD-SVRS to " + essServers[i]
setServerGroups(essServers[i] , serverGroup)
print 'Assign cluster to defaultCoherenceCluster'
cd('/')
assign('Cluster',ESS_CLUSTER,'CoherenceClusterSystemResource','defaultCoherenceCluster')
cd('/CoherenceClusterSystemResource/defaultCoherenceCluster')
AllArray = []
if SOA_CLUSTER:
AllArray.append(SOA_CLUSTER)
if BAM_CLUSTER:
AllArray.append(BAM_CLUSTER)
if OSB_CLUSTER:
AllArray.append(OSB_CLUSTER)
if ESS_CLUSTER:
AllArray.append(ESS_CLUSTER)
All = ','.join(AllArray)
set('Target', All)
if 'ess_server1' in essServers:
pass
else:
print "delete ess_server1"
cd('/')
delete('ess_server1', 'Server')
if WEBLOGIC_VERSION == '12.2.1':
updateDomain()
dumpStack()
closeDomain()
readDomain(DOMAIN_PATH)
cleanJMS('UMSJMSSystemResource', 'UMSJMSServer_auto', 'UMSJMSFileStore_auto')
recreateUMSJms12c(ADMIN_SERVER_NAME, SOA_CLUSTER, OSB_CLUSTER, BAM_CLUSTER, ESS_CLUSTER, All)
else:
print 'Add server group ESS-MGD-SVRS to ess_server1'
setServerGroups('ess_server1', serverGroup)
print 'end server groups'
updateDomain()
dumpStack()
closeDomain()
print('Exiting...')
exit()
|
python
|
import random
import sys
min = 1
max = 1000
if len(sys.argv) > 1 :
max = int(sys.argv[1])
number = random.randint(min, max)
print('I have selected a number between %d and %d' % (min, max))
print('Please try to guess my number.')
guess_count = 0
while True :
guess = input('Your guess: ')
try :
guess = int(guess)
except :
print("That doesn't look like a number. Try again.")
continue
guess_count += 1
if guess == number :
print('You guess by number in %d guesses!' % (guess_count))
break
elif guess > number :
print('Your guess was too high. Try again.')
else :
print('Your guess was too low. Try again.')
|
python
|
import os
import unittest
from monty.json import MontyDecoder
from monty.serialization import loadfn
from robocrys.util import load_condensed_structure_json
class RobocrysTest(unittest.TestCase):
"""Base test class providing access to common test data. """
_module_dir = os.path.dirname(os.path.abspath(__file__))
_structures_dir = os.path.join(_module_dir, "structures")
_condensed_structures_dir = os.path.join(
_module_dir, "condensed_structures")
_test_structures = {}
for _fn in os.listdir(_structures_dir):
if ".json.gz" in _fn:
_test_structures[_fn.split(".")[0]] = loadfn(os.path.join(
_structures_dir, _fn), cls=MontyDecoder)
_test_condensed_structures = {}
for _fn in os.listdir(_condensed_structures_dir):
if ".json.gz" in _fn:
_test_condensed_structures[_fn.split(".")[0]] = \
load_condensed_structure_json(os.path.join(
_condensed_structures_dir, _fn))
@classmethod
def get_structure(cls, name):
return cls._test_structures[name].copy()
@classmethod
def get_condensed_structure(cls, name):
return cls._test_condensed_structures[name].copy()
|
python
|
'''
Created on Jan. 24, 2018
@author Andrew Habib
'''
from statistics import mean
from collections import Counter
import os
from Util import load_parsed_ep, load_parsed_inf, load_parsed_sb, load_json_list, get_list_of_uniq_jsons
def display_min_max_avg_warnings_per_bug_total():
print("\nMin, Max, Avg (warnings per bug) and Total number of warnings")
print("\nBuggy versions:\n")
rel_path = './b/'
ep_all = load_parsed_ep(rel_path + 'ep_parsed.json')
inf_all = load_parsed_inf(rel_path + 'inf_parsed.json')
sb_all = load_parsed_sb(rel_path + 'sb_parsed.json')
print("Tool Min. Max. Avg. Total")
print("Errorprone", get_min_max_avg_warnings_per_bug_total(ep_all))
print("Infer", get_min_max_avg_warnings_per_bug_total(inf_all))
print("Spotbugs", get_min_max_avg_warnings_per_bug_total(sb_all))
print("\nTotal number of warnings by all tools:",
get_min_max_avg_warnings_per_bug_total(ep_all)[3] + get_min_max_avg_warnings_per_bug_total(inf_all)[3] + get_min_max_avg_warnings_per_bug_total(sb_all)[3])
''''''
print("\nFixed versions:\n")
rel_path = './f/'
ep_all = load_parsed_ep(rel_path + 'ep_parsed.json')
inf_all = load_parsed_inf(rel_path + 'inf_parsed.json')
sb_all = load_parsed_sb(rel_path + 'sb_parsed.json')
print("Tool Total Min. Max. Avg.")
print("Errorprone", get_min_max_avg_warnings_per_bug_total(ep_all))
print("Infer", get_min_max_avg_warnings_per_bug_total(inf_all))
print("Spotbugs", get_min_max_avg_warnings_per_bug_total(sb_all))
print("\nTotal number of warnings by all tools:",
get_min_max_avg_warnings_per_bug_total(ep_all)[3] + get_min_max_avg_warnings_per_bug_total(inf_all)[3] + get_min_max_avg_warnings_per_bug_total(sb_all)[3])
def get_min_max_avg_warnings_per_bug_total(warnings):
count = [w.proj for w in warnings]
counter = Counter(count)
return min(counter.values()), max(counter.values()), mean(counter.values()), len(count)
def get_warnings_bugs_from_each_approach():
print("\nWarnings and bugs from each automatic matching approach")
print("** warnings for combined approach are not unique (duplicates exist) **\n")
rel_path = './diffs_warnings/'
ep_res1 = load_parsed_ep(rel_path + "ep_warnings.json")
inf_res1 = load_parsed_inf(rel_path + "inf_warnings.json")
sb_res1 = load_parsed_sb(rel_path + "sb_warnings.json")
rel_path = './removed_warnings/'
ep_res2 = load_parsed_ep(rel_path + "ep_warnings.json")
inf_res2 = load_parsed_inf(rel_path + "inf_warnings.json")
sb_res2 = load_parsed_sb(rel_path + "sb_warnings.json")
_all_b = []
print("Tool Diff-based Fixed-based Combined")
print(" W B W B W B")
all_b = []
b_diff = get_bugs_from_warnings(ep_res1)
b_fixed = get_bugs_from_warnings(ep_res2)
all_b.extend(b_diff)
all_b.extend(b_fixed)
_all_b.extend(all_b)
print("Error Prone ", len(ep_res1), len(b_diff), len(ep_res2), len(b_fixed), len(ep_res1) + len(ep_res2), len(b_diff | b_fixed))
all_b = []
b_diff = get_bugs_from_warnings(inf_res1)
b_fixed = get_bugs_from_warnings(inf_res2)
all_b.extend(b_diff)
all_b.extend(b_fixed)
_all_b.extend(all_b)
print("Infer ", len(inf_res1), len(b_diff), len(inf_res2), len(b_fixed), len(inf_res1) + len(inf_res2), len(b_diff | b_fixed))
all_b = []
b_diff = get_bugs_from_warnings(sb_res1)
b_fixed = get_bugs_from_warnings(sb_res2)
all_b.extend(b_diff)
all_b.extend(b_fixed)
_all_b.extend(all_b)
print("SpotBugs ", len(sb_res1), len(b_diff), len(sb_res2), len(b_fixed), len(sb_res1) + len(sb_res2), len(b_diff | b_fixed))
print("\nUnique warnings from each approachcombined approach:\n")
rel_path = './diffs_warnings/'
ep_res1 = load_json_list(rel_path + "ep_warnings.json")
inf_res1 = load_json_list(rel_path + "inf_warnings.json")
sb_res1 = load_json_list(rel_path + "sb_warnings.json")
rel_path = './removed_warnings/'
ep_res2 = load_json_list(rel_path + "ep_warnings.json")
inf_res2 = load_json_list(rel_path + "inf_warnings.json")
sb_res2 = load_json_list(rel_path + "sb_warnings.json")
print("Ep ", len(ep_res1), len(ep_res2), len(get_list_of_uniq_jsons(ep_res1 + ep_res2)))
print("Inf", len(inf_res1), len(inf_res2), len(get_list_of_uniq_jsons(inf_res1 + inf_res2)))
print("Sb ", len(sb_res1), len(sb_res2), len(get_list_of_uniq_jsons(sb_res1 + sb_res2)))
print("\nUnique bugs from combined approach: ", len(set(_all_b)))
def get_bugs_from_warnings(warnings):
bugs = set(w.proj for w in warnings)
return bugs
def count_bugs_from_warnings(warnings):
bugs = set(w.proj for w in warnings)
return(len(bugs))
def get_manually_inspected_warnings_bugs():
print("\nManual inspection of warnings aggregated on warnings and bugs levels")
print("\nDiffs-based approach:\n")
rel_path = './diffs_warnings/'
ep_res = load_parsed_ep(rel_path + "ep_warnings.json")
ep_succ = load_parsed_ep(rel_path + "ep_succ.json")
ep_part = load_parsed_ep(rel_path + "ep_part.json")
ep_fail = load_parsed_ep(rel_path + "ep_fail.json")
inf_res = load_parsed_inf(rel_path + "inf_warnings.json")
inf_succ = load_parsed_inf(rel_path + "inf_succ.json")
inf_part = load_parsed_inf(rel_path + "inf_part.json")
inf_fail = load_parsed_inf(rel_path + "inf_fail.json")
sb_res = load_parsed_sb(rel_path + "sb_warnings.json")
sb_succ = load_parsed_sb(rel_path + "sb_succ.json")
sb_part = load_parsed_sb(rel_path + "sb_part.json")
sb_fail = load_parsed_sb(rel_path + "sb_fail.json")
print("Warnings:\n")
print('Tool "Full match" "Partial match" Mismatch Total')
print('"Error Prone"', len(ep_succ), len(ep_part), len(ep_fail), len(ep_res))
print("Infer", len(inf_succ), len(inf_part), len(inf_fail), len(inf_res))
print("Spotbugs", len(sb_succ), len(sb_part), len(sb_fail), len(sb_res))
print("\nBugs:\n")
print('Tool "Full match" "Partial match" Mismatch Total')
print('"Error Prone"', count_bugs_from_warnings(ep_succ), count_bugs_from_warnings(ep_part), count_bugs_from_warnings(ep_fail), count_bugs_from_warnings(ep_res))
print("Infer", count_bugs_from_warnings(inf_succ), count_bugs_from_warnings(inf_part), count_bugs_from_warnings(inf_fail), count_bugs_from_warnings(inf_res))
print("Spotbugs", count_bugs_from_warnings(sb_succ), count_bugs_from_warnings(sb_part), count_bugs_from_warnings(sb_fail), count_bugs_from_warnings(sb_res))
print("\nFixed warnings approach\n")
rel_path = './removed_warnings/'
ep_res = load_parsed_ep(rel_path + "ep_warnings.json")
ep_succ = load_parsed_ep(rel_path + "ep_succ.json")
ep_part = load_parsed_ep(rel_path + "ep_part.json")
ep_fail = load_parsed_ep(rel_path + "ep_fail.json")
inf_res = load_parsed_inf(rel_path + "inf_warnings.json")
inf_succ = load_parsed_inf(rel_path + "inf_succ.json")
inf_part = load_parsed_inf(rel_path + "inf_part.json")
inf_fail = load_parsed_inf(rel_path + "inf_fail.json")
sb_res = load_parsed_sb(rel_path + "sb_warnings.json")
sb_succ = load_parsed_sb(rel_path + "sb_succ.json")
sb_part = load_parsed_sb(rel_path + "sb_part.json")
sb_fail = load_parsed_sb(rel_path + "sb_fail.json")
print("Warnings:\n")
print('Tool "Full match" "Partial match" Mismatch Total')
print('"Error Prone"', len(ep_succ), len(ep_part), len(ep_fail), len(ep_res))
print("Infer", len(inf_succ), len(inf_part), len(inf_fail), len(inf_res))
print("Spotbugs", len(sb_succ), len(sb_part), len(sb_fail), len(sb_res))
print("\nBugs:\n")
print('Tool "Full match" "Partial match" Mismatch Total')
print('"Error Prone"', count_bugs_from_warnings(ep_succ), count_bugs_from_warnings(ep_part), count_bugs_from_warnings(ep_fail), count_bugs_from_warnings(ep_res))
print("Infer", count_bugs_from_warnings(inf_succ), count_bugs_from_warnings(inf_part), count_bugs_from_warnings(inf_fail), count_bugs_from_warnings(inf_res))
print("Spotbugs", count_bugs_from_warnings(sb_succ), count_bugs_from_warnings(sb_part), count_bugs_from_warnings(sb_fail), count_bugs_from_warnings(sb_res))
get_manually_inspected_warnings_bugs_combined_approach()
def get_manually_inspected_warnings_bugs_combined_approach():
print("\nCombined approach\n")
rel_path = './diffs_warnings/'
ep_succ1 = load_json_list(rel_path + "ep_succ.json")
ep_part1 = load_json_list(rel_path + "ep_part.json")
ep_fail1 = load_json_list(rel_path + "ep_fail.json")
inf_succ1 = load_json_list(rel_path + "inf_succ.json")
inf_part1 = load_json_list(rel_path + "inf_part.json")
inf_fail1 = load_json_list(rel_path + "inf_fail.json")
sb_succ1 = load_json_list(rel_path + "sb_succ.json")
sb_part1 = load_json_list(rel_path + "sb_part.json")
sb_fail1 = load_json_list(rel_path + "sb_fail.json")
rel_path = './removed_warnings/'
ep_succ2 = load_json_list(rel_path + "ep_succ.json")
ep_part2 = load_json_list(rel_path + "ep_part.json")
ep_fail2 = load_json_list(rel_path + "ep_fail.json")
inf_succ2 = load_json_list(rel_path + "inf_succ.json")
inf_part2 = load_json_list(rel_path + "inf_part.json")
inf_fail2 = load_json_list(rel_path + "inf_fail.json")
sb_succ2 = load_json_list(rel_path + "sb_succ.json")
sb_part2 = load_json_list(rel_path + "sb_part.json")
sb_fail2 = load_json_list(rel_path + "sb_fail.json")
# comnined data #
ep_succ = get_list_of_uniq_jsons(ep_succ1 + ep_succ2)
ep_part = get_list_of_uniq_jsons(ep_part1 + ep_part2)
ep_fail = get_list_of_uniq_jsons(ep_fail1 + ep_fail2)
inf_succ = get_list_of_uniq_jsons(inf_succ1 + inf_succ2)
inf_part = get_list_of_uniq_jsons(inf_part1 + inf_part2)
inf_fail = get_list_of_uniq_jsons(inf_fail1 + inf_fail2)
sb_succ = get_list_of_uniq_jsons(sb_succ1 + sb_succ2)
sb_part = get_list_of_uniq_jsons(sb_part1 + sb_part2)
sb_fail = get_list_of_uniq_jsons(sb_fail1 + sb_fail2)
print("Warnings:\n")
print('Tool "Full match" "Partial match" Mismatch Total')
print('"Error Prone"', len(ep_succ), len(ep_part), len(ep_fail), len(ep_succ) + len(ep_part) + len(ep_fail))
print('Infer', len(inf_succ), len(inf_part), len(inf_fail), len(inf_succ) + len(inf_part) + len(inf_fail))
print('SpotBugs', len(sb_succ), len(sb_part), len(sb_fail), len(sb_succ) + len(sb_part) + len(sb_fail))
print("\nBugs:\n")
print('Tool "Full match" "Partial match" Mismatch Total')
b_succ, b_part, b_fail = len(Counter(p[' Proj'] for p in ep_succ)), len(Counter(p[' Proj'] for p in ep_part)), len(Counter(p[' Proj'] for p in ep_fail))
print('"Error Prone"', b_succ, b_part, b_fail, b_succ + b_part + b_fail)
b_succ, b_part, b_fail = len(Counter(p[' Proj'] for p in inf_succ)), len(Counter(p[' Proj'] for p in inf_part)), len(Counter(p[' Proj'] for p in inf_fail))
print('Infer', b_succ, b_part, b_fail, b_succ + b_part + b_fail)
b_succ, b_part, b_fail = len(Counter(p[' Proj'] for p in sb_succ)), len(Counter(p[' Proj'] for p in sb_part)), len(Counter(p[' Proj'] for p in sb_fail))
print('SpotBugs', b_succ, b_part, b_fail, b_succ + b_part + b_fail)
def get_cand_detected_bugs_tools_sets():
print("\nCandidate and detected bugs by each tool and each approach")
rel_path = './diffs_warnings/'
ep_res1 = load_parsed_ep(rel_path + "ep_warnings.json")
ep_succ1 = load_parsed_ep(rel_path + "ep_succ.json")
ep_part1 = load_parsed_ep(rel_path + "ep_part.json")
inf_res1 = load_parsed_inf(rel_path + "inf_warnings.json")
inf_succ1 = load_parsed_inf(rel_path + "inf_succ.json")
inf_part1 = load_parsed_inf(rel_path + "inf_part.json")
sb_res1 = load_parsed_sb(rel_path + "sb_warnings.json")
sb_succ1 = load_parsed_sb(rel_path + "sb_succ.json")
sb_part1 = load_parsed_sb(rel_path + "sb_part.json")
rel_path = './removed_warnings/'
ep_res2 = load_parsed_ep(rel_path + "ep_warnings.json")
ep_succ2 = load_parsed_ep(rel_path + "ep_succ.json")
ep_part2 = load_parsed_ep(rel_path + "ep_part.json")
inf_res2 = load_parsed_inf(rel_path + "inf_warnings.json")
inf_succ2 = load_parsed_inf(rel_path + "inf_succ.json")
inf_part2 = load_parsed_inf(rel_path + "inf_part.json")
sb_res2 = load_parsed_sb(rel_path + "sb_warnings.json")
sb_succ2 = load_parsed_sb(rel_path + "sb_succ.json")
sb_part2 = load_parsed_sb(rel_path + "sb_part.json")
print("\nCandidate bugs:\n")
print("Tool Diff-based Fixed-based Both")
ep_cand_diff = get_bugs_from_warnings(ep_res1)
ep_cand_fixed = get_bugs_from_warnings(ep_res2)
print('"Error Prone"', len(ep_cand_diff), len(ep_cand_fixed), len(ep_cand_diff & ep_cand_fixed))
inf_cand_diff = get_bugs_from_warnings(inf_res1)
inf_cand_fixed = get_bugs_from_warnings(inf_res2)
print("Infer", len(inf_cand_diff), len(inf_cand_fixed), len(inf_cand_diff & inf_cand_fixed))
sb_cand_diff = get_bugs_from_warnings(sb_res1)
sb_cand_fixed = get_bugs_from_warnings(sb_res2)
print("Spotbugs", len(sb_cand_diff), len(sb_cand_fixed), len(sb_cand_diff & sb_cand_fixed))
print("\nTrue bugs (fully or partially flagged)\n")
print("Tool Diff-based Fixed-based Both")
ep_succ_diff = get_bugs_from_warnings(ep_succ1) | get_bugs_from_warnings(ep_part1)
ep_succ_fixed = get_bugs_from_warnings(ep_succ2) | get_bugs_from_warnings(ep_part2)
print('"Error Prone"', len(ep_succ_diff), len(ep_succ_fixed), len(ep_succ_diff & ep_succ_fixed))
inf_succ_diff = get_bugs_from_warnings(inf_succ1) | get_bugs_from_warnings(inf_part1)
inf_succ_fixed = get_bugs_from_warnings(inf_succ2) | get_bugs_from_warnings(inf_part2)
print("Infer", len(inf_succ_diff), len(inf_succ_fixed), len(inf_succ_diff & inf_succ_fixed))
sb_succ_diff = get_bugs_from_warnings(sb_succ1) | get_bugs_from_warnings(sb_part1)
sb_succ_fixed = get_bugs_from_warnings(sb_succ2) | get_bugs_from_warnings(sb_part2)
print("Spotbugs", len(sb_succ_diff), len(sb_succ_fixed), len(sb_succ_diff & sb_succ_fixed))
print("\nTrue bugs found by all tools\n")
ep_succ = get_bugs_from_warnings(ep_succ1) | get_bugs_from_warnings(ep_succ2) | get_bugs_from_warnings(ep_part1) | get_bugs_from_warnings(ep_part2)
print("Ep:", len(ep_succ))
inf_succ = get_bugs_from_warnings(inf_succ1) | get_bugs_from_warnings(inf_succ2) | get_bugs_from_warnings(inf_part1) | get_bugs_from_warnings(inf_part2)
print("Inf:", len(inf_succ))
sb_succ = get_bugs_from_warnings(sb_succ1) | get_bugs_from_warnings(sb_succ2) | get_bugs_from_warnings(sb_part1) | get_bugs_from_warnings(sb_part2)
print("Sb:", len(sb_succ))
print("Ep & Inf:", len(ep_succ & inf_succ))
print("Ep & Sb:", len(ep_succ & sb_succ))
print("Inf & Sb:", len(inf_succ & sb_succ))
print("Ep & Inf & Sb:", len(ep_succ & inf_succ & sb_succ))
def get_cand_detected_bugs_tools_table():
print("\nAll candidate and detected bugs by each tool and each approach\n")
rel_path = './diffs_warnings/'
ep_res1 = load_parsed_ep(rel_path + "ep_warnings.json")
ep_succ1 = load_parsed_ep(rel_path + "ep_succ.json")
ep_part1 = load_parsed_ep(rel_path + "ep_part.json")
ep_fail1 = load_parsed_ep(rel_path + "ep_fail.json")
inf_res1 = load_parsed_inf(rel_path + "inf_warnings.json")
inf_succ1 = load_parsed_inf(rel_path + "inf_succ.json")
inf_part1 = load_parsed_inf(rel_path + "inf_part.json")
inf_fail1 = load_parsed_inf(rel_path + "inf_fail.json")
sb_res1 = load_parsed_sb(rel_path + "sb_warnings.json")
sb_succ1 = load_parsed_sb(rel_path + "sb_succ.json")
sb_part1 = load_parsed_sb(rel_path + "sb_part.json")
sb_fail1 = load_parsed_sb(rel_path + "sb_fail.json")
rel_path = './removed_warnings/'
ep_res2 = load_parsed_ep(rel_path + "ep_warnings.json")
ep_succ2 = load_parsed_ep(rel_path + "ep_succ.json")
ep_part2 = load_parsed_ep(rel_path + "ep_part.json")
ep_fail2 = load_parsed_ep(rel_path + "ep_fail.json")
inf_res2 = load_parsed_inf(rel_path + "inf_warnings.json")
inf_succ2 = load_parsed_inf(rel_path + "inf_succ.json")
inf_part2 = load_parsed_inf(rel_path + "inf_part.json")
inf_fail2 = load_parsed_inf(rel_path + "inf_fail.json")
sb_res2 = load_parsed_sb(rel_path + "sb_warnings.json")
sb_succ2 = load_parsed_sb(rel_path + "sb_succ.json")
sb_part2 = load_parsed_sb(rel_path + "sb_part.json")
sb_fail2 = load_parsed_sb(rel_path + "sb_fail.json")
bugs = []
bugs.extend(w.proj for w in ep_res1)
bugs.extend(w.proj for w in inf_res1)
bugs.extend(w.proj for w in sb_res1)
bugs.extend(w.proj for w in ep_res2)
bugs.extend(w.proj for w in inf_res2)
bugs.extend(w.proj for w in sb_res2)
bugs = sorted(list(set(bugs)))
print(" Removed Warnings Diffs-based Combined")
print("Tool Ep Inf SB Ep Inf SB Ep Inf SB")
for b in bugs:
entry = b + " "
#####################################
if b in get_bugs_from_warnings(ep_succ1):
entry += "& F "
elif b in get_bugs_from_warnings(ep_part1):
entry += "& P "
elif b in get_bugs_from_warnings(ep_fail1):
entry += "& M "
else:
entry += "& - "
if b in get_bugs_from_warnings(inf_succ1):
entry += "& F "
elif b in get_bugs_from_warnings(inf_part1):
entry += "& P "
elif b in get_bugs_from_warnings(inf_fail1):
entry += "& M "
else:
entry += "& - "
if b in get_bugs_from_warnings(sb_succ1):
entry += "& F "
elif b in get_bugs_from_warnings(sb_part1):
entry += "& P "
elif b in get_bugs_from_warnings(sb_fail1):
entry += "& M "
else:
entry += "& - "
#####################################
if b in get_bugs_from_warnings(ep_succ2):
entry += "& F "
elif b in get_bugs_from_warnings(ep_part2):
entry += "& P "
elif b in get_bugs_from_warnings(ep_fail2):
entry += "& M "
else:
entry += "& - "
if b in get_bugs_from_warnings(inf_succ2):
entry += "& F "
elif b in get_bugs_from_warnings(inf_part2):
entry += "& P "
elif b in get_bugs_from_warnings(inf_fail2):
entry += "& M "
else:
entry += "& - "
if b in get_bugs_from_warnings(sb_succ2):
entry += "& F "
elif b in get_bugs_from_warnings(sb_part2):
entry += "& P "
elif b in get_bugs_from_warnings(sb_fail2):
entry += "& M "
else:
entry += "& - "
#####################################
if b in get_bugs_from_warnings(ep_succ1) or b in get_bugs_from_warnings(ep_succ2):
entry += "& F "
elif b in get_bugs_from_warnings(ep_part1) or b in get_bugs_from_warnings(ep_part2):
entry += "& P "
elif b in get_bugs_from_warnings(ep_fail1) or b in get_bugs_from_warnings(ep_fail2):
entry += "& M "
else:
entry += "& - "
if b in get_bugs_from_warnings(inf_succ1) or b in get_bugs_from_warnings(inf_succ2):
entry += "& F "
elif b in get_bugs_from_warnings(inf_part1) or b in get_bugs_from_warnings(inf_part2):
entry += "& P "
elif b in get_bugs_from_warnings(inf_fail1) or b in get_bugs_from_warnings(inf_fail2):
entry += "& M "
else:
entry += "& - "
if b in get_bugs_from_warnings(sb_succ1) or b in get_bugs_from_warnings(sb_succ2):
entry += "& F "
elif b in get_bugs_from_warnings(sb_part1) or b in get_bugs_from_warnings(sb_part2):
entry += "& P "
elif b in get_bugs_from_warnings(sb_fail1) or b in get_bugs_from_warnings(sb_fail2):
entry += "& M "
else:
entry += "& - "
entry += "\\\\"
print(entry)
print()
def get_true_detected_bugs_by_each_tool():
rel_path = './diffs_warnings/'
ep_res1 = load_parsed_ep(rel_path + "ep_warnings.json")
ep_succ1 = load_parsed_ep(rel_path + "ep_succ.json")
ep_part1 = load_parsed_ep(rel_path + "ep_part.json")
inf_res1 = load_parsed_inf(rel_path + "inf_warnings.json")
inf_succ1 = load_parsed_inf(rel_path + "inf_succ.json")
inf_part1 = load_parsed_inf(rel_path + "inf_part.json")
sb_res1 = load_parsed_sb(rel_path + "sb_warnings.json")
sb_succ1 = load_parsed_sb(rel_path + "sb_succ.json")
sb_part1 = load_parsed_sb(rel_path + "sb_part.json")
rel_path = './removed_warnings/'
ep_res2 = load_parsed_ep(rel_path + "ep_warnings.json")
ep_succ2 = load_parsed_ep(rel_path + "ep_succ.json")
ep_part2 = load_parsed_ep(rel_path + "ep_part.json")
inf_res2 = load_parsed_inf(rel_path + "inf_warnings.json")
inf_succ2 = load_parsed_inf(rel_path + "inf_succ.json")
inf_part2 = load_parsed_inf(rel_path + "inf_part.json")
sb_res2 = load_parsed_sb(rel_path + "sb_warnings.json")
sb_succ2 = load_parsed_sb(rel_path + "sb_succ.json")
sb_part2 = load_parsed_sb(rel_path + "sb_part.json")
print("\nTrue bugs found by each tool\n")
ep_succ = get_bugs_from_warnings(ep_succ1) | get_bugs_from_warnings(ep_succ2) | get_bugs_from_warnings(ep_part1) | get_bugs_from_warnings(ep_part2)
print("Ep:", len(ep_succ))
with open(os.path.join(os.getcwd(), "ep_detected"), 'w') as f:
f.write("\n".join(i for i in ep_succ))
inf_succ = get_bugs_from_warnings(inf_succ1) | get_bugs_from_warnings(inf_succ2) | get_bugs_from_warnings(inf_part1) | get_bugs_from_warnings(inf_part2)
print("Inf:", len(inf_succ))
with open(os.path.join(os.getcwd(), "inf_detected"), 'w') as f:
f.write("\n".join(i for i in inf_succ))
sb_succ = get_bugs_from_warnings(sb_succ1) | get_bugs_from_warnings(sb_succ2) | get_bugs_from_warnings(sb_part1) | get_bugs_from_warnings(sb_part2)
print("Sb:", len(sb_succ))
with open(os.path.join(os.getcwd(), "sb_detected"), 'w') as f:
f.write("\n".join(i for i in sb_succ))
print()
''' this script has to be run from the results/ directory '''
if __name__ == '__main__':
# display_min_max_avg_warnings_per_bug_total()
# get_warnings_bugs_from_each_approach()
# get_manually_inspected_warnings_bugs()
# get_cand_detected_bugs_tools_sets()
# get_cand_detected_bugs_tools_table()
get_true_detected_bugs_by_each_tool()
|
python
|
from graphene_sqlalchemy import SQLAlchemyObjectType
import graphene
from ..database import db_session
from ..models import ModelFridge
from ..lib.utils import input_to_dictionary
from importlib import import_module
from flask_jwt_extended import jwt_required
class FridgeAttributes:
ingredient_id = graphene.List(graphene.String)
class Fridge(SQLAlchemyObjectType, FridgeAttributes):
ingredients = graphene.List(lambda: import_module('.ingredient', "babylon.schemas").Ingredient)
@graphene.resolve_only_args
def resolve_ingredients(self):
return [ingredient for ingredient in self.ingredients]
class Meta:
model = ModelFridge
interfaces = (graphene.relay.Node,)
class CreateFridgeInput(graphene.InputObjectType, FridgeAttributes):
pass
class CreateFridge(graphene.Mutation):
recipe = graphene.Field(lambda: Fridge, description="Recipe created by this mutation")
class Arguments:
input = CreateFridgeInput(required=True)
@jwt_required
def mutate(self, info, input):
# TODO: Add this
pass
class UpdateFridgeInput(graphene.InputObjectType, FridgeAttributes):
id = graphene.ID(required=True, description="Global ID of the recipe")
class UpdateFridge(graphene.Mutation):
recipe = graphene.Field(lambda: Fridge, description="Recipe created by this mutation")
class Arguments:
input = CreateFridgeInput(required=True)
@jwt_required
def mutate(self, info, input):
data = input_to_dictionary(input)
fridge = db_session.query(ModelFridge).filter_by(id=data["id"])
fridge.update(data)
db_session.commit()
recipe = db_session.query(ModelFridge).filter_by(id=data["id"]).first()
return UpdateFridge(recipe=recipe)
|
python
|
while True:
try:
pilha = input()
correct = 1
par = 0
i = 0
while i < len(pilha) and correct:
if pilha[i] == '(':
par += 1
#print('1', i, par)
if pilha[i] == ')':
if par == 0:
correct = 0
#print('2', i, par)
else:
par -= 1
#print('3', i, par)
if i == len(pilha)-1:
if par % 2 != 0:
correct = 0
#print('5', i, par)
i += 1
if correct:
print('correct')
else:
print('incorrect')
except EOFError:
break
|
python
|
import os
import logging
import argparse
from tqdm import tqdm
import torch
PAD_token = 1
SOS_token = 3
EOS_token = 2
UNK_token = 0
MODE = 'en'
data_version = 'init' # processed
if torch.cuda.is_available():
USE_CUDA = True
else:
USE_CUDA = False
MAX_LENGTH = 10
parser = argparse.ArgumentParser(description='TRADE Multi-Domain DST')
class EmptyParser():
def parse_args(self):
return
parser.ArgumentParser = EmptyParser
# Training Setting
parser.add_argument('-ds','--dataset', help='dataset', required=False, default="multiwoz")
parser.add_argument('-t','--task', help='Task Number', required=False, default="dst")
parser.add_argument('-path','--path', help='path of the file to load', required=False)
parser.add_argument('-sample','--sample', help='Number of Samples', required=False,default=None)
parser.add_argument('-patience','--patience', help='', required=False, default=6, type=int)
parser.add_argument('-es','--earlyStop', help='Early Stop Criteria, BLEU or ENTF1', required=False, default='BLEU')
parser.add_argument('-all_vocab','--all_vocab', help='', required=False, default=1, type=int)
parser.add_argument('-imbsamp','--imbalance_sampler', help='', required=False, default=0, type=int)
parser.add_argument('-data_ratio','--data_ratio', help='', required=False, default=100, type=int)
parser.add_argument('-um','--unk_mask', help='mask out input token to UNK', type=int, required=False, default=1)
parser.add_argument('-bsz','--batch', help='Batch_size', required=False, type=int)
# Testing Setting
parser.add_argument('-rundev','--run_dev_testing', help='', required=False, default=0, type=int)
parser.add_argument('-viz','--vizualization', help='vizualization', type=int, required=False, default=0)
## model predictions
parser.add_argument('-gs','--genSample', help='Generate Sample', type=int, required=False, default=0) #### change this when testing
parser.add_argument('-evalp','--evalp', help='evaluation period', required=False, default=1)
parser.add_argument('-an','--addName', help='An add name for the model folder', required=False, default='')
parser.add_argument('-eb','--eval_batch', help='Evaluation Batch_size', required=False, type=int, default=0)
# Model architecture
parser.add_argument('-gate','--use_gate', help='', required=False, default=1, type=int)
parser.add_argument('-le','--load_embedding', help='', required=False, default=0, type=int)
parser.add_argument('-femb','--fix_embedding', help='', required=False, default=0, type=int)
parser.add_argument('-paral','--parallel_decode', help='', required=False, default=0, type=int)
# Model Hyper-Parameters
parser.add_argument('-dec','--decoder', help='decoder model', required=False)
parser.add_argument('-hdd','--hidden', help='Hidden size', required=False, type=int, default=400)
parser.add_argument('-lr','--learn', help='Learning Rate', required=False, type=float)
parser.add_argument('-dr','--drop', help='Drop Out', required=False, type=float)
parser.add_argument('-lm','--limit', help='Word Limit', required=False,default=-10000)
parser.add_argument('-clip','--clip', help='gradient clipping', required=False, default=10, type=int)
parser.add_argument('-tfr','--teacher_forcing_ratio', help='teacher_forcing_ratio', type=float, required=False, default=0.5)
# parser.add_argument('-l','--layer', help='Layer Number', required=False)
# Unseen Domain Setting
parser.add_argument('-l_ewc','--lambda_ewc', help='regularization term for EWC loss', type=float, required=False, default=0.01)
parser.add_argument('-fisher_sample','--fisher_sample', help='number of sample used to approximate fisher mat', type=int, required=False, default=0)
parser.add_argument("--all_model", action="store_true")
parser.add_argument("--domain_as_task", action="store_true")
parser.add_argument('--run_except_4d', help='', required=False, default=1, type=int)
parser.add_argument("--strict_domain", action="store_true")
parser.add_argument('-exceptd','--except_domain', help='', required=False, default="", type=str)
parser.add_argument('-onlyd','--only_domain', help='', required=False, default="", type=str)
args = vars(parser.parse_known_args(args=[])[0])
if args["load_embedding"]:
args["hidden"] = 400
print("[Warning] Using hidden size = 400 for pretrained word embedding (300 + 100)...")
if args["fix_embedding"]:
args["addName"] += "FixEmb"
if args["except_domain"] != "":
args["addName"] += "Except"+args["except_domain"]
if args["only_domain"] != "":
args["addName"] += "Only"+args["only_domain"]
|
python
|
DIMINISHING_BRIGHTNESS = 0.8
def run(led_wire, string_length, running_time, sleep_time, num_pulses, time_between_pulse, colour, staggered):
pass
## TODO
# start_time = time.time()
# if colour == "random":
# colour_list = [red, dim_orange, dim_yellow, dim_light_green,
# green, dim_turquoise, blue, dim_pink]
# current_colour = list(random.choice(colour_list))
# while (time.time() - start_time) < running_time:
# pulse_start = random.randint(0, string_length)
# led_wire.setPixelColor(pulse_start, Color(current_colour[1],
# current_colour[0], current_colour[2]))
# for i in (1, 2, 3):
# for c in current_colour:
# c * DIMINISHING_BRIGHTNESS
# for j in (-i, i):
# if pulse_start + j > 0 or pulse_start + j < string_length:
# led_wire.setPixelColor(pulse_start - j, Color(current_colour[1],
# current_colour[0], current_colour[2]))
# for i in range(100):
# led_wire.setPixelColor(i, Color(current_colour[1],
# current_colour[0], current_colour[2]))
# led_wire.show()
# time.sleep(sleep_time)
|
python
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""basic data structure wrapper for tensor in paddle.
like stack, array.
"""
import sys
import os
import traceback
import logging
from collections import namedtuple
import numpy as np
from paddle import fluid
from paddle.fluid import layers
from text2sql.utils import fluider
from text2sql.utils import nn_utils
ArrayData = namedtuple("ArrayData", "data pos")
StackData = namedtuple("StackData", "data pos")
class Array(object):
"""Array function simulator"""
def __init__(self):
"""init of class """
super(Array, self).__init__()
@classmethod
def push(cls, array_data, updates, in_place=True):
"""append udpates to array_data.data on array_data.pos
Args:
array_data (TYPE): NULL
updates (TYPE): NULL
in_place (bool): 默认是 True.
Returns: None
Raises: NULL
"""
new_data = nn_utils.batch_scatter(array_data.data, array_data.pos, updates, overwrite=True, in_place=in_place)
new_pos = fluider.increment(array_data.pos, value=1, in_place=in_place)
if in_place:
return array_data
else:
return ArrayData(new_data, new_pos)
class Stack(object):
"""Stack function simulator"""
def __init__(self):
"""init of class """
super(Stack, self).__init__()
@classmethod
def pop(cls, stack_data, mask=True, in_place=True):
"""pop data in stack_data
Args:
stack_data (StackData): (data, pos) with shape ([batch_size, stack_len], [batch_size, 1])
mask (bool): 是否 mask 空栈的返回值。默认为 True
in_place (bool): 默认为 True
Returns: (Variable1, Variable2)
Variable1: pop 得到的值
dtype=stack_data.data.dtype
shape=[-1]
Variable2: 对应位置的值是否合法。入参已经为空的栈,此处为 False。
dtype=bool
shape=[-1]
Raises: NULL
"""
data = stack_data.data
pos = stack_data.pos
# 只有非空的栈才能pop(才合法)
valid_pos = layers.logical_not(cls.empty(stack_data))
new_pos_delta = layers.cast(valid_pos, dtype=pos.dtype)
new_pos = layers.elementwise_sub(pos, new_pos_delta)
# shape = [batch_size]
output = nn_utils.batch_gather(data, new_pos)
# mask 空栈的返回值
if mask:
# shape = [batch_size, 1]
mask_tag = layers.cast(new_pos_delta, dtype=data.dtype) if data.dtype != pos.dtype else new_pos_delta
mask_tag = layers.squeeze(mask_tag, [1])
output = layers.elementwise_mul(output, mask_tag)
# 出栈后原位置置为0
updates = layers.zeros_like(output)
new_data = nn_utils.batch_scatter(data, new_pos, updates, overwrite=True, in_place=in_place)
if in_place:
layers.assign(new_pos, pos)
return output, valid_pos, stack_data
else:
return output, valid_pos, StackData(new_data, new_pos)
@classmethod
def push(cls, stack_data, updates, in_place=True):
"""push udpates to stack_data
Args:
stack_data (TYPE): NULL
updates (TYPE): NULL
in_place (bool): 默认是 True.
Returns: None
Raises: NULL
"""
new_data = nn_utils.batch_scatter(stack_data.data, stack_data.pos, updates, overwrite=True, in_place=in_place)
new_pos = fluider.increment(stack_data.pos, value=1, in_place=in_place)
if in_place:
return stack_data
else:
return StackData(new_data, new_pos)
@classmethod
def empty(cls, stack_data, dtype='bool'):
"""Return True if stack is empty(pos == 0)
Args:
stack_data (TYPE): NULL
dtype (str): result dtype. Default is bool.
Returns: Variable
shape=[-1], dtype=params<dtype>
Raises: NULL
"""
zeros = layers.zeros_like(stack_data.pos)
output = layers.equal(stack_data.pos, zeros)
if dtype != 'bool':
output = layers.cast(output, dtype=dtype)
return output
if __name__ == "__main__":
"""run some simple test cases"""
pass
|
python
|
# -*- coding: utf-8 -*-
import copy
import os
import unittest
from mlcomp.utils import TemporaryDirectory
from mlcomp.report import (ReportSaver, ReportObject, Resource,
default_report_types, Report)
from .helper import to_config
class MyReportObject(ReportObject):
def __init__(self, value=None, children=None, name=None, name_scope=None):
super(MyReportObject, self).__init__(name=name, name_scope=name_scope)
self.value = value
if children:
children = list(children)
self.children = children
def gather_children(self):
if self.children:
ret = copy.copy(self.children)
else:
ret = []
ret.extend(super(MyReportObject, self).gather_children())
return ret
class PersistTestCase(unittest.TestCase):
def test_ReportSaver(self):
report = Report(
children=[
Resource(data=b'123'),
MyReportObject(
Resource(data=b'456'),
children=[
Resource(data=b'789')
]
)
]
)
with default_report_types({'MyReport': MyReportObject}), \
TemporaryDirectory() as tempdir:
# test writing
saver = ReportSaver(tempdir + '/1')
saver.save(report)
report2 = saver.load()
self.assertEqual(
to_config(report),
to_config(report2)
)
self.assertEqual(report.children[0].data,
report2.children[0].data)
self.assertEqual(report.children[1].value.data,
report2.children[1].value.data)
self.assertEqual(report.children[1].children[0].data,
report2.children[1].children[0].data)
# writing to exist dir will be refused
with self.assertRaises(IOError):
saver.save(report)
# test writing to exist but empty dir
os.makedirs(tempdir + '/2')
saver = ReportSaver(tempdir + '/2')
saver.save(report)
# test force writing
saver = ReportSaver(tempdir + '/2', overwrite=True)
saver.save(report)
# test the `save` and `load` method of Report
report.save(tempdir + '/3')
report2 = Report.load(tempdir + '/3')
self.assertEqual(
to_config(report),
to_config(report2)
)
self.assertEqual(report.children[0].data,
report2.children[0].data)
self.assertEqual(report.children[1].value.data,
report2.children[1].value.data)
self.assertEqual(report.children[1].children[0].data,
report2.children[1].children[0].data)
with self.assertRaises(IOError):
report.save(tempdir + '/3')
if __name__ == '__main__':
unittest.main()
|
python
|
from operator import attrgetter
from ubuntui.utils import Padding
from ubuntui.widgets.hr import HR
from urwid import Columns, Text
from conjureup.app_config import app
from conjureup.ui.views.base import NEXT_SCREEN, BaseView
from conjureup.ui.widgets.selectors import CheckList
class AddonsView(BaseView):
title = 'Add-on Selection'
subtitle = 'Choose one or more additional components to add to your spell'
footer = ('Select zero or more add-ons using SPACE, then press ENTER '
'or select CONTINUE to continue')
def __init__(self, next, back):
self.next = next
self.choices = CheckList()
self.extend_command_map({
'enter': NEXT_SCREEN,
})
super().__init__(back)
def build_widget(self):
self.choices.append(HR())
for addon in sorted(app.addons.values(), key=attrgetter('name')):
self.choices.append_option(label=addon.friendly_name,
value=addon.name)
self.choices.append(Padding.line_break(""))
self.choices.append(
Columns([
('fixed', 3, Text('')),
Text(addon.description)
], dividechars=5)
)
self.choices.append(HR())
if app.addons:
self.choices.focus_position = 1
return self.choices
def build_buttons(self):
return [
self.button('CONTINUE', lambda btn: self.next())
]
@property
def selected(self):
return self.choices.selected
|
python
|
# Copyright (C) 2019 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Utils for manipulation with directories and files."""
import csv
import os
import time
from collections import defaultdict
from lib import constants
def wait_file_downloaded(
path_to_csv,
timeout=constants.ux.MAX_USER_WAIT_SECONDS,
poll_frequency=constants.ux.POLL_FREQUENCY
):
"""Wait until file is exist or IOError is raised."""
end_time = time.time() + timeout
while not os.path.exists(path_to_csv):
time.sleep(poll_frequency)
if time.time() > end_time:
raise IOError(
"No such file {} or directory after waiting for {} sec.".format(
path_to_csv, timeout))
file_size = os.path.getsize(path_to_csv)
while True:
current_file_size = os.path.getsize(path_to_csv)
if current_file_size == file_size and file_size != 0:
break
file_size = current_file_size
time.sleep(poll_frequency)
if time.time() > end_time:
raise IOError(
"File {} not changed size from {} bytes during {} sec of "
"waiting.".format(path_to_csv, current_file_size, timeout))
def get_list_objs_scopes_from_csv(path_to_csv):
"""Open according to 'path_to_csv' CSV file witch is expected to contain
exported objects, parse through CSV file's structure and return list of
objects scopes (dicts with keys as exportable field names, values as values
of this field for current instance).
"""
wait_file_downloaded(path_to_csv)
with open(path_to_csv) as csv_file:
rows = csv.reader(csv_file)
object_type = None
keys = []
results = defaultdict(list)
for columns in rows:
if not any(columns):
continue
if columns[0] == "Object type":
# new block started
object_type = None
keys = []
continue
if object_type is None:
keys = columns[1:]
object_type = columns[0]
continue
columns = [unicode(val) for val in columns]
results[object_type].append(dict(zip(keys, columns[1:])))
return results
|
python
|
# /usr/bin/env python3.5
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2017-2020, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
""" Utilities to load and save onnx models """
from typing import Union, List, Tuple, Dict
import os
import copy
from collections import defaultdict
import torch
import torch.nn as nn
import torch.onnx.symbolic_caffe2
import onnx
from aimet_common.utils import AimetLogger
import aimet_torch.utils
import aimet_torch.elementwise_ops as elementwise_ops
from aimet_torch.defs import OpToIOTensors
_logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Utils)
recurrent_onnx_optypes = ['LSTM', 'GRU', 'RNN']
# This is a dict that maps a PyTorch module type to the corresponding ONNX op type (as a string)
map_torch_types_to_onnx = {
nn.Conv2d: ['Conv'],
nn.Dropout: ['Dropout'],
nn.Dropout2d: ['Dropout'],
nn.BatchNorm1d: ['BatchNormalization'],
nn.BatchNorm2d: ['BatchNormalization'],
nn.ReLU: ['Relu'],
nn.ReLU6: ['Clip'],
nn.MaxPool2d: ['MaxPool'],
nn.Linear: ['Gemm', 'MatMul'],
nn.AdaptiveAvgPool2d: ['GlobalAveragePool', 'AveragePool'],
nn.AvgPool2d: ['AveragePool'],
nn.LogSoftmax: ['LogSoftmax'],
nn.RNN: ['RNN'],
nn.LSTM: ['LSTM'],
nn.GRU: ['GRU'],
nn.ConvTranspose2d: ['ConvTranspose'],
nn.Sigmoid: ['Sigmoid'],
nn.Upsample: ['Upsample'],
nn.PReLU: ['PRelu'],
nn.LeakyReLU: ['LeakyRelu'],
nn.Flatten: ['Flatten'],
elementwise_ops.Add: ['Add'],
elementwise_ops.Subtract: ['Sub'],
elementwise_ops.Multiply: ['Mul'],
elementwise_ops.Divide: ['Div'],
elementwise_ops.Concat: ['Concat']
}
# Maps pytorch functional op string names to corresponding onnx types.
pytorch_functional_name_to_onnx_dict = {
'add': 'Add',
'cat': 'Concat',
'mul': 'Mul',
'div': 'Div'
}
onnx_subgraph_op_to_pytorch_module_param_name = {
torch.nn.GroupNorm:
{
# '#depth', 'op_type': {input_index: torch module parameter name}
('#2', 'Mul'): {1: 'weight'},
('#3', 'Add'): {1: 'bias'}
}
}
class OnnxExportApiArgs:
"""
configuration for torch onnx export api invocation
"""
def __init__(self, opset_version: int = None, input_names: List[str] = None, output_names: List[str] = None):
"""
Refer torch documentation https://pytorch.org/docs/1.7.1/onnx.html?highlight=onnx%20export#torch.onnx.export
:param opset_version: onnx opset version to use to export the model
:param input_names: names to assign to the input nodes of the onnx graph, in order
:param output_names: names to assign to the output nodes of the graph, in order
"""
self.opset_version = opset_version
self.input_names = input_names
self.output_names = output_names
@property
def kwargs(self):
"""
formats all override options into kwarg format to appended to onnx export call
"""
return {'opset_version': self.opset_version,
'input_names': self.input_names,
'output_names': self.output_names}
class OnnxSaver:
"""
Utilities to save/load onnx models
"""
@classmethod
def set_node_names(cls, onnx_model_path: str, pytorch_model: torch.nn.Module,
dummy_input: Union[torch.Tensor, Tuple],
onnx_export_args: OnnxExportApiArgs = OnnxExportApiArgs()):
"""
This utility loads a given onnx model file and set the names of all the nodes (ops) to equivalent
pytorch module names given the corresponding pytorch model.
:param onnx_model_path: Path to the ONNX model file
:param pytorch_model: Equivalent PyTorch model instance
:param dummy_input: Dummy input to the model. Used to parse model graph.
:param onnx_export_args: override options for torch.onnx.export call
:return:
"""
onnx_model = cls._map_onnx_nodes_to_pytorch_modules(pytorch_model, dummy_input,
onnx_model_path, onnx_export_args)
onnx.save(onnx_model, onnx_model_path)
@staticmethod
def _create_map_of_tensor_to_node(onnx_model: onnx.ModelProto) -> Tuple[Dict[str, List[onnx.NodeProto]],
Dict[str, onnx.NodeProto]]:
"""
Create and return two dicts
1. Tensor -> list of nodes that consume this tensor
2. Tensor -> node that produces this tensor
:param onnx_model: ONNX model object
:return: The two dicts described above
Note: The list in #1 is ordered exactly in the order that pytorch trace reaches these nodes. This is important
because later on we will use pytorch layer hooks to match these nodes with the equivalent PyTorch modules.
The expectation is that PyTorch trace and PyTorch hooks follow the same execution sequence
"""
map_input_tensor_to_node = {}
map_output_tensor_to_node = {}
for node in onnx_model.graph.node:
for in_tensor in node.input:
if in_tensor in map_input_tensor_to_node:
map_input_tensor_to_node[in_tensor].append(node)
else:
map_input_tensor_to_node[in_tensor] = [node]
for output in node.output:
assert output not in map_output_tensor_to_node, 'More than one node produces the same tensor'
map_output_tensor_to_node[output] = node
return map_output_tensor_to_node, map_input_tensor_to_node
@classmethod
def _add_markers(cls, starting_module, module_name_map):
"""Recursively add marker layers
"""
class CustomMarkerFunc(torch.autograd.Function):
"""
This function helps add a custom layer when exporting to ONNX
Note the input tensor has a trivial operation performed on it (clamp). This is needed to force
pytorch trace to not ignore the function.
"""
@staticmethod
def symbolic(g, inp, identifier, start):
"""
Magic method that helps with exporting a custom ONNX node
"""
return g.op('CustomMarker', inp, id_s=identifier, start_s=start)
@staticmethod
def forward(ctx, inp, _identifier, _start): # pylint: disable=arguments-differ
return inp.clamp(0)
@staticmethod
def backward(ctx, _grad): # pylint: disable=arguments-differ
raise NotImplementedError()
class CustomMarker(torch.nn.Module):
"""
This is a temporary layer that in inserted next to a real layer to distinguish the real layer in the
exported ONNX format
"""
def __init__(self, module, identifier):
super(CustomMarker, self).__init__()
self.marked_module = module
self.identifier = identifier
def forward(self, *inputs):
"""
Forward method for this CustomMarker layer
"""
output = []
for t in inputs:
if isinstance(t, torch.Tensor):
t = CustomMarkerFunc.apply(t, self.identifier, 'True')
output.append(t)
x = self.marked_module(*output)
if isinstance(x, torch.Tensor):
x = [x]
output = []
for t in x:
if isinstance(t, torch.Tensor):
t = CustomMarkerFunc.apply(t, self.identifier, 'False')
output.append(t)
if len(output) == 1:
output = output[0]
else:
output = tuple(output)
return output
for module_name, module_ref in starting_module.named_children():
if aimet_torch.utils.is_leaf_module(module_ref):
marker_layer = CustomMarker(module_ref, module_name_map[module_ref])
setattr(starting_module, module_name, marker_layer)
# recursively call children modules
else:
cls._add_markers(module_ref, module_name_map)
@classmethod
def _map_onnx_nodes_to_pytorch_modules(cls, pt_model, dummy_input, onnx_model_path, onnx_export_args):
"""
Exports an onnx model, maps the nodes in the onnx model to corresponding pytorch modules and names
them accordingly
:param pt_model: PyTorch model
:param dummy_input: Dummy input to run a fwd pass on @pt_model
:param onnx_model_path: Path to the saved ONNX model
:param onnx_export_args: override options for torch.onnx.export call
"""
working_dir = os.path.dirname(onnx_model_path)
onnx_model = cls._create_onnx_model_with_markers(dummy_input, pt_model, working_dir, onnx_export_args)
model_output_names = [output.name for output in onnx_model.graph.output] # pylint: disable=no-member
# Parse the ONNX model and create mapping from input and output tensors to corresponding nodes
map_output_tensor_to_node, map_input_tensor_to_node = cls._create_map_of_tensor_to_node(onnx_model)
# Find all marker nodes
end_marker_map, start_marker_map = cls._create_map_of_marker_nodes(onnx_model)
# Set names
cls._set_onnx_node_names(map_input_tensor_to_node, start_marker_map)
# Remove markers
for markers in start_marker_map.values():
for marker in markers:
cls._detach_start_marker_node(map_input_tensor_to_node, map_output_tensor_to_node, marker)
for markers in end_marker_map.values():
for marker in markers:
cls._detach_end_marker_node(onnx_model, map_input_tensor_to_node, map_output_tensor_to_node, marker)
# Make sure we rename the model outputs to original names
cls._set_output_names(onnx_model, model_output_names, map_output_tensor_to_node, map_input_tensor_to_node)
# Clean up the detached nodes
onnx_model = cls._remove_detached_nodes_from_onnx_graph(onnx_model)
cls._fix_param_names(onnx_model)
cls._fix_initializer_names(onnx_model, pt_model)
return onnx_model
@classmethod
def _fix_initializer_names(cls, onnx_model: onnx.NodeProto, pt_model: torch.nn.Module):
"""
Parameter names in some case do not have reflect the torch param names. This method updates the onnx model
with param names using a custom mapping.
:param onnx_model: Onnx Model
:param pt_model: PyTorch Model
"""
initializer_names = [initializer.name for initializer in onnx_model.graph.initializer]
onnx_node_map = {(node.name, node.op_type): node for node in onnx_model.graph.node}
for module_name, module_ref in pt_model.named_modules():
if not isinstance(module_ref, tuple(onnx_subgraph_op_to_pytorch_module_param_name.keys())):
continue
for (node_suffix, op_type), replace_pairs in \
onnx_subgraph_op_to_pytorch_module_param_name[type(module_ref)].items():
node = onnx_node_map[module_name + node_suffix, op_type]
cls._replace_param_name(initializer_names, module_name, node, replace_pairs)
for index, initializer in enumerate(onnx_model.graph.initializer):
if initializer_names[index] != initializer.name:
initializer.name = initializer_names[index]
@classmethod
def _replace_param_name(cls, initializer_names: List[str], module_name: str,
node: onnx.NodeProto, replace_pairs: Dict[int, str]):
"""
helper method to replace parameter names at the corresponding input tensor index
:param initializer_names: List of model initializer names
:param module_name: PyTorch module name
:param node: Onnx node part of sub-graph that maps to the torch module
:param replace_pairs: dictionary of input tensor indices and param names
"""
for input_index, param_name in replace_pairs.items():
new_param_name = module_name + '.' + param_name
inp_tensor = node.input[input_index]
node.input.remove(inp_tensor)
node.input.insert(input_index, new_param_name)
initializer_index = initializer_names.index(inp_tensor)
initializer_names.remove(inp_tensor)
initializer_names.insert(initializer_index, new_param_name)
@classmethod
def _fix_param_names(cls, onnx_model):
"""
Parameter names have an additional level due to the name of the Marker module itself. This method removes that.
:param onnx_model: Onnx Model
"""
# Rename initializers
for ini in onnx_model.graph.initializer:
if 'marked_module' in ini.name:
name = ini.name
name = name.replace('marked_module.', '')
ini.name = name
# Change the references to initializers in each node
for node in onnx_model.graph.node:
indices_to_replace = []
for index, inp_tensor in enumerate(node.input):
if 'marked_module' in inp_tensor:
indices_to_replace.append(index)
for index in indices_to_replace:
param_name = node.input[index]
node.input.remove(param_name)
node.input.insert(index, param_name.replace('marked_module.', ''))
@classmethod
def _remove_detached_nodes_from_onnx_graph(cls, onnx_model):
"""
Given a ONNX model removes any detached nodes from the graph
:return: Updated onnx model
"""
marker_nodes = [node for node in onnx_model.graph.node if node.op_type == 'CustomMarker']
for node in marker_nodes:
onnx_model.graph.node.remove(node)
return onnx_model
@classmethod
def _set_onnx_node_names(cls, map_input_tensor_to_node, start_marker_map):
"""
Set names of the ONNX nodes using the identifier fields in the marker layers
:param map_input_tensor_to_node: Map of tensor to node consuming that tensor
:param start_marker_map: Map of start marker nodes in the ONNX graph
:return:
"""
def set_name_for_downstream_nodes(starting_nodes, name, depth):
for node in starting_nodes:
if node.op_type == 'CustomMarker': # Recursion end condition
return
if depth == 0:
node.name = name
else:
node.name = name + "#" + str(depth)
for tensor in node.output:
downstream_nodes = map_input_tensor_to_node.get(tensor, [])
set_name_for_downstream_nodes(downstream_nodes, name, depth + 1)
for node_name, markers in start_marker_map.items():
for marker in markers:
out_tensor = marker.output[0]
downstream_nodes = map_input_tensor_to_node.get(out_tensor, [])
set_name_for_downstream_nodes(downstream_nodes, node_name, 0)
@classmethod
def _create_map_of_marker_nodes(cls, onnx_model):
"""
Creates and returns maps of start and end marker nodes
:param onnx_model: Onnx model
:return: Map of end marker node, Map of start marker nodes
"""
start_marker_map = defaultdict(list)
end_marker_map = defaultdict(list)
for node in onnx_model.graph.node:
if node.op_type == 'CustomMarker':
identifier = node.attribute[0].s.decode()
is_start_marker = node.attribute[1].s.decode()
if is_start_marker == 'True':
start_marker_map[identifier].append(node)
else:
end_marker_map[identifier].append(node)
print(start_marker_map.keys())
print(end_marker_map.keys())
return end_marker_map, start_marker_map
@classmethod
def _create_onnx_model_with_markers(cls, dummy_input, pt_model, working_dir, onnx_export_args) -> onnx.ModelProto:
"""
Exports an onnx model with marker nodes inserted
:param dummy_input: Dummy input
:param pt_model: PyTorch model
:param working_dir: Working directory for storing the exported onnx model
:param onnx_export_args: override options for torch.onnx.export call
:return: Onnx model with marker layers
"""
model = copy.deepcopy(pt_model).cpu()
module_name_map = {}
for module_name, module_ref in model.named_modules():
if aimet_torch.utils.is_leaf_module(module_ref):
module_name_map[module_ref] = module_name
cls._add_markers(model, module_name_map)
temp_file = os.path.join(working_dir, 'temp_onnx_model_with_markers.onnx')
torch.onnx.export(model, dummy_input, temp_file, enable_onnx_checker=False, **onnx_export_args.kwargs)
onnx_model = onnx.load(temp_file)
return onnx_model
@classmethod
def _detach_start_marker_node(cls, map_input_tensor_to_node, map_output_tensor_to_node, start_marker):
"""
Given a ONNX start_marker node, detach it from the graph
:param map_input_tensor_to_node: Map of tensor to node consuming the tensor
:param map_output_tensor_to_node: Map of tensor to node producing the tensor
:param start_marker: Reference to the ONNX node to detach
"""
assert len(start_marker.input) == 1
assert len(start_marker.output) == 1
input_tensor = start_marker.input[0]
output_tensor = start_marker.output[0]
for next_node in map_input_tensor_to_node[output_tensor]:
index = list(next_node.input).index(output_tensor)
next_node.input.remove(output_tensor)
next_node.input.insert(index, input_tensor)
map_input_tensor_to_node[input_tensor].append(next_node)
map_input_tensor_to_node[input_tensor].remove(start_marker)
del map_output_tensor_to_node[output_tensor] # No node should produce output tensor anymore
del map_input_tensor_to_node[output_tensor] # No node should consume output tensor anymore
start_marker.input.pop()
start_marker.output.pop()
@classmethod
def _detach_end_marker_node(cls, onnx_model, map_input_tensor_to_node, map_output_tensor_to_node, end_marker):
"""
Given a ONNX end_marker node, detach it from the graph
:param onnx_model: ONNX model instance
:param map_input_tensor_to_node: Map of tensor to node consuming the tensor
:param map_output_tensor_to_node: Map of tensor to node producing the tensor
:param end_marker: Reference to the ONNX node to detach
"""
assert len(end_marker.input) == 1
assert len(end_marker.output) == 1
input_tensor = end_marker.input[0]
output_tensor = end_marker.output[0]
model_outputs = [output.name for output in onnx_model.graph.output]
if output_tensor in model_outputs:
# Degenerate case: somebody did a "return y, y" at the end of the model or something similar
for index, model_output in enumerate(model_outputs):
if model_output == output_tensor:
onnx_model.graph.output[index].name = input_tensor
else:
for next_node in map_input_tensor_to_node[output_tensor]:
index = list(next_node.input).index(output_tensor)
next_node.input.remove(output_tensor)
next_node.input.insert(index, input_tensor)
map_input_tensor_to_node[input_tensor].append(next_node)
map_input_tensor_to_node[input_tensor].remove(end_marker)
if not map_input_tensor_to_node[input_tensor]:
del map_input_tensor_to_node[input_tensor]
del map_output_tensor_to_node[output_tensor] # No node should produce output tensor anymore
if output_tensor in map_input_tensor_to_node:
del map_input_tensor_to_node[output_tensor] # No node should consume output tensor anymore
end_marker.input.pop()
end_marker.output.pop()
@staticmethod
def _set_output_names(onnx_model: onnx.ModelProto, desired_model_output_names,
map_output_tensor_to_node, map_input_tensor_to_node):
# Iterate over the model outputs
for index, output in enumerate(onnx_model.graph.output):
new_tensor = desired_model_output_names[index]
old_tensor = output.name
if old_tensor == new_tensor: # Nothing to do
continue
if old_tensor in map_input_tensor_to_node:
# Degenerate case: model output tensor also is an intermediate tensor that inputs into other nodes
for consumer in map_input_tensor_to_node[old_tensor]:
index = list(consumer.input).index(old_tensor)
consumer.input.remove(old_tensor)
consumer.input.insert(index, new_tensor)
if new_tensor not in map_input_tensor_to_node:
map_input_tensor_to_node[new_tensor] = []
map_input_tensor_to_node[new_tensor].append(consumer)
del map_input_tensor_to_node[old_tensor] # No node should consume old tensor anymore
producer = map_output_tensor_to_node[old_tensor]
output.name = new_tensor
index = list(producer.output).index(old_tensor)
producer.output.remove(old_tensor)
producer.output.insert(index, new_tensor)
del map_output_tensor_to_node[old_tensor]
map_output_tensor_to_node[new_tensor] = producer
# If there were duplicate outputs with the same name, they need to be updated
for output_node in onnx_model.graph.output:
# Ugly double loop - cannot avoid
if output_node.name == old_tensor:
output_node.name = new_tensor
@staticmethod
def _collate_io_tensors_for_multi_layer_recurrent_nodes(onnx_model: onnx.NodeProto,
node_to_io_tensor_name_map: Dict):
"""
Given an ONNX model and corresponding node-tensor map, consolidate multi-layer recurrent nodes
into single map entries
"""
recurrent_nodes = []
for node in onnx_model.graph.node:
if node.op_type in recurrent_onnx_optypes:
recurrent_nodes.append(node.name)
# Collection of recurrent nodes that includes only the first layer nodes
recurrent_root_nodes = [node for node in recurrent_nodes if '#' not in node]
for root_node in recurrent_root_nodes:
# Find nodes corresponding to all other layers of the recurrent node
other_layers = [node for node in recurrent_nodes if node.startswith(root_node + '#')]
# sort the other layers using the depth value following the '#'
other_layers = sorted(other_layers, key=lambda layer: int(layer.split('#')[1]))
# Append the io_tensors for all layers for the current root recurrent node, in order
io_tensor_list = [node_to_io_tensor_name_map[root_node]]
for layer in other_layers:
io_tensor_list.append(node_to_io_tensor_name_map[layer])
del node_to_io_tensor_name_map[layer]
node_to_io_tensor_name_map[root_node] = io_tensor_list
@classmethod
def get_onnx_node_to_io_tensor_names_map(cls, onnx_model: onnx.NodeProto) -> \
(Dict[str, Union[OpToIOTensors, List[OpToIOTensors]]], set):
"""
Given an ONNX model, gets the inputs and output tensor names for each node in the model.
if multiple onnx nodes have the same name then the nodes are provided as a list of inputs and output tensor
names, one for each onnx node.
:param onnx_model: The ONNX model instance
:return: Dictionary of ONNX node name and corresponding input and output tensor names and a set with all valid
param names in model
"""
node_to_io_tensor_name_map = {}
valid_param_set = set()
initializer_names = {initializer.name for initializer in onnx_model.graph.initializer}
for node in onnx_model.graph.node:
if node.name:
onnx_node_io_tensors = OpToIOTensors(list(node.input), list(node.output))
if (node.name not in node_to_io_tensor_name_map) or node.op_type in recurrent_onnx_optypes:
node_to_io_tensor_name_map[node.name] = onnx_node_io_tensors
# update valid params list
for input_tensor in list(node.input):
if input_tensor in initializer_names:
valid_param_set.add(input_tensor)
cls._collate_io_tensors_for_multi_layer_recurrent_nodes(onnx_model, node_to_io_tensor_name_map)
return node_to_io_tensor_name_map, valid_param_set
|
python
|
import os
import boto3
from boto3.dynamodb.conditions import Key
dynamodb = boto3.resource('dynamodb')
def put_atcoder_info(line_message_info):
TABLE = dynamodb.Table(os.environ["ATCODER_INFO_TABLE"])
for atcoder_id in line_message_info:
accepted_count = line_message_info[atcoder_id]["accepted_count"]
new_ac = line_message_info[atcoder_id]["new_ac"]
rated_point_sum = line_message_info[atcoder_id]["rated_point_sum"]
#dictのnew_acが0以外のものをputする(0だとDBの内容が書き変わらないため)
if(new_ac != 0):
TABLE.put_item(
Item={
"atcoder_id": atcoder_id,
"accepted_count": accepted_count,
"new_ac": new_ac,
"rated_point_sum": rated_point_sum
}
)
print("succeed put DB " + str(atcoder_id))
|
python
|
import struct
from logger import Logger
class ClRequestBase:
def __init__(self, payload):
self.message_id = struct.unpack("<B", payload[0:1])[0]
self.message_unique_id = struct.unpack("<H", payload[1:3])[0]
self.payload = payload
Logger.log("processing: " + type(self).__name__)
self.parse()
def parse(self):
""" OVERRIDE THIS TO IMPLEMENT """
raise
def execute(self, client_id):
""" OVERRIDE THIS TO IMPLEMENT """
raise
def response(self):
""" OVERRIDE THIS TO IMPLEMENT """
raise
def broadcast(self):
""" OVERRIDE THIS TO IMPLEMENT """
return None
|
python
|
from torch import nn
from torch.nn import functional as F
class QNet(nn.Module):
def __init__(self, input_channel=4, num_actions=18):
"""
Create a MLP Q network as described in DQN paper
"""
super(QNet, self).__init__()
self.conv1 = nn.Conv2d(input_channel, 32, kernel_size=8, stride=4)
self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
self.fc1 = nn.Linear(7*7*64, 512)
self.fc2 = nn.Linear(512, num_actions)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.fc1(x.flatten(start_dim=1)))
x = self.fc2(x)
return x
class DuelQNet(nn.Module):
def __init__(self, input_channel=4, num_actions=18):
"""
Create a Dueling Q network for atari
"""
super(DuelQNet, self).__init__()
self.conv1 = nn.Conv2d(input_channel, 32, kernel_size=8, stride=4)
self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
self.fc1 = nn.Linear(7*7*64, 512)
self.fc_a = nn.Linear(512, num_actions)
self.fc_v = nn.Linear(512, 1)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.fc1(x.flatten(start_dim=1)))
V = self.fc_v(x)
A = self.fc_a(x)
Q = V + (A - A.mean(dim=1).view(-1, 1))
return Q
|
python
|
from typing import List, Callable
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import FunctionTransformer
from nlpretext.social.preprocess import (
remove_html_tags, remove_mentions, remove_emoji, remove_hashtag)
from nlpretext.basic.preprocess import normalize_whitespace, remove_eol_characters, fix_bad_unicode
class Preprocessor():
def __init__(
self):
"""
Initialize preprocessor object to apply all text transformation
"""
self.__operations = []
self.pipeline = None
def pipe(self, operation: Callable, args: dict = None):
"""
Add an operation and its arguments to pipe in the preprocessor
Parameters
----------
operation : callable
text preprocessing function
args : dict of arguments
"""
self.__operations.append({
'operation': operation,
'args': args
})
@staticmethod
def build_pipeline(operation_list: List[dict]) -> Pipeline:
"""
Build sklearn pipeline from a operation list
Parameters
----------
operation_list : iterable
list of __operations of preprocessing
Returns
-------
sklearn.pipeline.Pipeline
"""
return Pipeline(
steps=[
(
operation['operation'].__name__,
FunctionTransformer(operation['operation'], kw_args=operation['args'])
)
for operation in operation_list])
def run(self, text: str) -> str:
"""
Apply pipeline to text
Parameters
----------
text : string
text to preprocess
Returns
-------
string
"""
operations = self.__operations
if operations == []:
operations_to_pipe = (
remove_html_tags, remove_mentions, remove_emoji, remove_hashtag,
remove_eol_characters, fix_bad_unicode, normalize_whitespace
)
operations = [{'operation': operation, 'args': None} for operation in operations_to_pipe]
self.pipeline = self.build_pipeline(operations)
text = self.pipeline.fit_transform(text)
return text
|
python
|
# produce list of genes in GRCm38
import pandas as pd
import json
# open refgene
refGeneFilename = '../gtex/gtex_mouse/refGene_mouse.txt'
refGene = pd.read_csv(refGeneFilename, sep="\t")
refGene.columns=['','name','chrom','strand','txStart','txEnd','cdsStart','cdsEnd','exonCount','exonStarts','exonEnds','id','name2','cdsStartStat','cdsEndStat','exonFrames']
# open biomart
biomartFilename = 'mart_export_mus_2.txt'
biomart = pd.read_csv(biomartFilename, sep="\t")
seen = {}
results = []
total_len = len(refGene)
for index, row in refGene.iterrows():
ensembl_id = row['name']
if ensembl_id not in seen:
the_loc = biomart.loc[biomart['Gene ID'] == ensembl_id]
gene_name = list(the_loc['Associated Gene Name'])[0]
entrez = list(the_loc['EntrezGene ID'])[0]
if pd.isnull(entrez):
entrez = ''
print ensembl_id, gene_name, 'has no entrez'
else:
entrez = str(int(entrez))
if pd.isnull(gene_name):
gene_name = ''
print ensembl_id, 'has no gene_name'
results.append({
'name': gene_name,
'ensembl_id': ensembl_id,
'entrez_id': entrez,
'description': ""
})
seen[ensembl_id] = True
with open('genes_list_GRCm38_processed.txt', 'w') as output:
json.dump(results, output)
with open('genes_list_GRCm38.txt', 'w') as output:
json.dump(results, output)
|
python
|
import numpy as np
def run_env(
env,
episode_count=100,
n_samples_per_omega=100,
policy=None,
grid=False,
omega_min=0,
omega_max=10,
bins=100,
total_n_samples=500,
):
"""
Simple runner, takes an environment, run a random policy and records everything
"""
if not grid:
inputs = np.zeros((bins * n_samples_per_omega, env.observation_space_size + 1))
targets = np.zeros((bins * n_samples_per_omega, env.observation_space_size))
i = 0
for omega in np.linspace(omega_min, omega_max, bins):
env.set_params(omega)
state = np.array(env.reset())
for t in range(n_samples_per_omega):
# sample one action from policy network or at random
if policy is None:
action = env.action_space.sample()
else:
action = policy.pi(state[np.newaxis, :], log=False)
if env.n_actions == 2:
action = action * 2 - 1
else:
action = action - 1
force = action * omega
# save the current state action in the training set
inputs[i, :] = np.hstack((state, force))
# observe the next state, reward etc
newState, reward, done, info = env.step(action)
newState = np.array(newState)
# compute the delta to be added in the target
delta = np.matrix((newState - state))
targets[i, :] = delta
state = newState
i += 1
if done:
state = np.array(env.reset())
env.close()
else:
low_pos, low_vel = env.low
high_pos, high_vel = env.high
# actions = np.random.randint(low=0, high=env.n_actions, size=timestep)# [1.0/env.n_actions]*env.n_actions)
actions = np.random.uniform(
low=-omega_max, high=omega_max, size=total_n_samples
)
positions = np.random.uniform(low=low_pos, high=high_pos, size=total_n_samples)
velocities = np.random.uniform(low=low_vel, high=high_vel, size=total_n_samples)
start_states = list(zip(positions, velocities, actions))
inputs = np.matrix(start_states)
next_states = list()
action = 1
for state in start_states:
x, x_dot, a = state
env.set_params(a)
newState, reward, done, info = env._step(action, (x, x_dot))
# append delta state
next_states.append(newState - np.array([x, x_dot]))
targets = np.matrix(next_states)
return inputs, targets
# # modify actions:
# if env.n_actions == 3:
# inputs[:,2] = inputs[:,2] - 1
# else:
# inputs[:,2] = 2*inputs[:,2] - 1
# subsampling
ind = np.arange(0, np.shape(inputs)[0])
selected_ind = np.random.choice(ind, size=total_n_samples, replace=True)
inputs = inputs[selected_ind, :]
targets = targets[selected_ind, :]
print("Collected data points: ", inputs.shape)
return inputs, targets
|
python
|
# http header
API_URL = 'https://www.okex.com'
CONTENT_TYPE = 'Content-Type'
OK_ACCESS_KEY = 'OK-ACCESS-KEY'
OK_ACCESS_SIGN = 'OK-ACCESS-SIGN'
OK_ACCESS_TIMESTAMP = 'OK-ACCESS-TIMESTAMP'
OK_ACCESS_PASSPHRASE = 'OK-ACCESS-PASSPHRASE'
ACEEPT = 'Accept'
COOKIE = 'Cookie'
LOCALE = 'Locale='
APPLICATION_JSON = 'application/json'
GET = "GET"
POST = "POST"
DELETE = "DELETE"
SERVER_TIMESTAMP_URL = '/api/general/v3/time'
# account
CURRENCIES_INFO = '/api/account/v3/currencies'
WALLET_INFO = '/api/account/v3/wallet'
CURRENCY_INFO = '/api/account/v3/wallet/'
COIN_TRANSFER = '/api/account/v3/transfer'
COIN_WITHDRAW = '/api/account/v3/withdrawal'
COIN_FEE = '/api/account/v3/withdrawal/fee'
COINS_WITHDRAW_RECORD = '/api/account/v3/withdrawal/history'
COIN_WITHDRAW_RECORD = '/api/account/v3/withdrawal/history/'
LEDGER_RECORD = '/api/account/v3/ledger'
TOP_UP_ADDRESS = '/api/account/v3/deposit/address'
COIN_TOP_UP_RECORDS = '/api/account/v3/deposit/history'
COIN_TOP_UP_RECORD = '/api/account/v3/deposit/history/'
# spot
SPOT_ACCOUNT_INFO = '/api/spot/v3/accounts'
SPOT_COIN_ACCOUNT_INFO = '/api/spot/v3/accounts/'
SPOT_LEDGER_RECORD = '/api/spot/v3/accounts/'
SPOT_ORDER = '/api/spot/v3/orders'
SPOT_ORDERS = '/api/spot/v3/batch_orders'
SPOT_REVOKE_ORDER = '/api/spot/v3/cancel_orders/'
SPOT_REVOKE_ORDERS = '/api/spot/v3/cancel_batch_orders/'
SPOT_ORDERS_LIST = '/api/spot/v3/orders'
SPOT_ORDERS_PENDING = '/api/spot/v3/orders_pending'
SPOT_ORDER_INFO = '/api/spot/v3/orders/'
SPOT_FILLS = '/api/spot/v3/fills'
SPOT_COIN_INFO = '/api/spot/v3/instruments'
SPOT_DEPTH = '/api/spot/v3/instruments/'
SPOT_TICKER = '/api/spot/v3/instruments/ticker'
SPOT_SPECIFIC_TICKER = '/api/spot/v3/instruments/'
SPOT_DEAL = '/api/spot/v3/instruments/'
SPOT_KLINE = '/api/spot/v3/instruments/'
# lever
LEVER_ACCOUNT = '/api/margin/v3/accounts'
LEVER_COIN_ACCOUNT = '/api/margin/v3/accounts/'
LEVER_LEDGER_RECORD = '/api/margin/v3/accounts/'
LEVER_CONFIG = '/api/margin/v3/accounts/availability'
LEVER_SPECIFIC_CONFIG = '/api/margin/v3/accounts/'
LEVER_BORROW_RECORD = '/api/margin/v3/accounts/'
LEVER_SPECIFIC_BORROW_RECORD = '/api/margin/v3/accounts/'
LEVER_BORROW_COIN = '/api/margin/v3/accounts/borrow'
LEVER_REPAYMENT_COIN = '/api/margin/v3/accounts/repayment'
LEVER_ORDER = '/api/margin/v3/orders'
LEVER_ORDERS = '/api/margin/v3/batch_orders'
LEVER_REVOKE_ORDER = '/api/margin/v3/cancel_orders/'
LEVER_REVOKE_ORDERS = '/api/margin/v3/cancel_batch_orders'
LEVER_ORDER_LIST = '/api/margin/v3/orders'
LEVEL_ORDERS_PENDING = '/api/margin/v3/orders_pending'
LEVER_ORDER_INFO = '/api/margin/v3/orders/'
LEVER_FILLS = '/api/margin/v3/fills'
FF = '/api/futures/v3/orders'
# future
FUTURE_POSITION = '/api/futures/v3/position'
FUTURE_SPECIFIC_POSITION = '/api/futures/v3/'
FUTURE_ACCOUNTS = '/api/futures/v3/accounts'
FUTURE_COIN_ACCOUNT = '/api/futures/v3/accounts/'
FUTURE_GET_LEVERAGE = '/api/futures/v3/accounts/'
FUTURE_SET_LEVERAGE = '/api/futures/v3/accounts/'
FUTURE_LEDGER = '/api/futures/v3/accounts/'
FUTURE_DELETE_POSITION = '/api/futures/v3/close_all_orders'
FUTURE_ORDER = '/api/futures/v3/order'
FUTURE_ORDERS = '/api/futures/v3/orders'
FUTURE_REVOKE_ORDER = '/api/futures/v3/cancel_order/'
FUTURE_REVOKE_ORDERS = '/api/futures/v3/cancel_batch_orders/'
FUTURE_ORDERS_LIST = '/api/futures/v3/orders'
FUTURE_ORDER_INFO = '/api/futures/v3/orders/'
FUTURE_FILLS = '/api/futures/v3/fills'
FUTURE_PRODUCTS_INFO = '/api/futures/v3/instruments'
FUTURE_DEPTH = '/api/futures/v3/instruments/'
FUTURE_TICKER = '/api/futures/v3/instruments/ticker'
FUTURE_SPECIFIC_TICKER = '/api/futures/v3/instruments/'
FUTURE_TRADES = '/api/futures/v3/instruments/'
FUTURE_KLINE = '/api/futures/v3/instruments/'
FUTURE_INDEX = '/api/futures/v3/instruments/'
FUTURE_RATE = '/api/futures/v3/rate'
FUTURE_ESTIMAT_PRICE = '/api/futures/v3/instruments/'
FUTURE_HOLDS = '/api/futures/v3/instruments/'
FUTURE_LIMIT = '/api/futures/v3/instruments/'
FUTURE_LIQUIDATION = '/api/futures/v3/instruments/'
FUTURE_MARK = '/api/futures/v3/instruments/'
HOLD_AMOUNT = '/api/futures/v3/accounts/'
#CURRENCY_LIST = '/api/futures/v3/instruments/currencies/'
# ETT
ETT_ACCOUNTS = '/api/ett/v3/accounts'
ETT_ACCOUNT = '/api/ett/v3/accounts/'
ETT_LEDGER = '/api/ett/v3/accounts/'
ETT_ORDER = '/api/ett/v3/orders'
ETT_REVOKE = '/api/ett/v3/orders/'
ETT_ORDER_LIST = '/api/ett/v3/orders'
ETT_SPECIFIC_ORDER = '/api/ett/v3/orders/'
ETT_CONSTITUENTS = '/api/ett/v3/constituents/'
ETT_DEFINE = '/api/ett/v3/define-price/'
# SWAP
SWAP_POSITIONS = '/api/swap/v3/position'
SWAP_POSITION = '/api/swap/v3/'
SWAP_ACCOUNTS = '/api/swap/v3/accounts'
SWAP_ACCOUNT = '/api/swap/v3/'
SWAP_ORDER = '/api/swap/v3/order'
SWAP_ORDERS = '/api/swap/v3/orders'
SWAP_CANCEL_ORDER = '/api/swap/v3/cancel_order/'
SWAP_CANCEL_ORDERS = '/api/swap/v3/cancel_batch_orders/'
SWAP_FILLS = '/api/swap/v3/fills'
SWAP_INSTRUMENTS = '/api/swap/v3/instruments'
SWAP_TICKETS = '/api/swap/v3/instruments/ticker'
SWAP_RATE = '/api/swap/v3/rate'
|
python
|
from django.conf.urls import url
from .views import classify
from .views import delete_conversation
app_name = "classification"
urlpatterns = [
url(r"^classify/$", classify, name="classify"),
url(r"^delete/$", delete_conversation, name="delete"),
]
|
python
|
from django.urls import path
from user.views import CreateUserView
from user.views import CreateTokenView
from user.views import ManageUserView
app_name = 'user'
urlpatterns = [
path(
'create/',
CreateUserView.as_view(),
name='create',
),
path(
'token/',
CreateTokenView.as_view(),
name='token',
),
path(
'me/',
ManageUserView.as_view(),
name='me',
),
]
|
python
|
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.template import loader
from django.core.mail import send_mail
class ContactForm(forms.Form):
subject = forms.CharField(label=_('Subject'), max_length=100)
message = forms.CharField(label=_('Message'), widget=forms.Textarea)
sender = forms.EmailField(label=_('Email address'), help_text=_('A valid e-mail address, please.'))
cc_myself = forms.BooleanField(label=_('Send a copy to yourself?'), required=False)
def save(self, recipients=[]):
if self.errors:
raise ValueError("The ContactForm could not be saved because"
"the data didn't validate.")
if self.cleaned_data['cc_myself']:
recipients.append(self.cleaned_data['sender'])
body = loader.render_to_string('contact/contact_email.txt',
dict(self.cleaned_data, recipients=recipients))
send_mail(self.cleaned_data['subject'], body,
self.cleaned_data['sender'], recipients)
|
python
|
from flask import Flask
from flask_cors import CORS
from .config import config
app = Flask(__name__)
app.secret_key = config['app']['secret_key']
dburi = 'postgresql://{username}:{password}@{host}:{port}/{database}'.format(**config['db'])
app.config.update(
{
'SQLALCHEMY_DATABASE_URI': dburi,
'SQLALCHEMY_TRACK_MODIFICATIONS': False,
}
)
CORS(app, supports_credentials=True)
|
python
|
import tkinter as tk
import math
showString=''
def output(string):
global showString
showString=str(showString)+str(string)
displayLabel['text']=showString
def calculate():
global showString
showString=str(eval(showString))
displayLabel['text']=showString
def pi():
global showString
showString=''
output(math.pi)
def finde():
global showString
showString=''
output(math.e)
def findFact():
global showString
showString=math.factorial(int(showString))
displayLabel['text']=showString
def delete():
global showString
showString=showString[:-1]
displayLabel['text']=showString
def clearAll():
global showString
showString=''
displayLabel['text']=showString
def findLog():
global showString
showString=math.log(float(showString))
displayLabel['text']=showString
def sq(power):
global showString
showString=math.pow(float(showString),power)
displayLabel['text']=showString
def sin():
global showString
showString=float(showString)
showString=round(math.sin(showString),4)
showString=str(showString)
displayLabel['text']=showString
def cos():
global showString
showString=float(showString)
showString=round(math.cos(showString),4)
showString=str(showString)
displayLabel['text']=showString
def tan():
global showString
showString=float(showString)
showString=round(math.tan(showString),4)
showString=str(showString)
displayLabel['text']=showString
def cosec():
global showString
showString=float(showString)
showString=round(math.sin(showString),4)
showString=round((1/showString),4)
showString=str(showString)
displayLabel['text']=showString
def sec():
global showString
showString=float(showString)
showString=round(math.cos(showString),4)
showString=round(1/showString,4)
showString=str(showString)
displayLabel['text']=showString
def cot():
global showString
showString=float(showString)
showString=round(math.tan(showString),4)
showString=round(1/showString,4)
showString=str(showString)
displayLabel['text']=showString
root=tk.Tk()
root.title('Calculator')
root.minsize(width=250,height=250)
display=tk.Frame(root)
displayLabel=tk.Label(display,text=0,font='Verdana 15')
display.grid(row=0)
displayLabel.pack()
buttons=tk.Frame(root)
nineButton=tk.Button(buttons,text=9,width=4,background='DarkOrange1',command=lambda:output('9'))
nineButton.grid(row=3,column=3)
eightButton=tk.Button(buttons,text=8,width=4,background='DarkOrange1',command=lambda:output('8'))
eightButton.grid(row=3,column=2)
sevenButton=tk.Button(buttons,text=7,width=4,background='DarkOrange1',command=lambda:output('7'))
sevenButton.grid(row=3,column=1)
sixButton=tk.Button(buttons,text=6,width=4,background='DarkOrange1',command=lambda:output('6'))
sixButton.grid(row=4,column=3)
fiveButton=tk.Button(buttons,text=5,width=4,background='DarkOrange1',command=lambda:output('5'))
fiveButton.grid(row=4,column=2)
fourButton=tk.Button(buttons,text=4,width=4,background='DarkOrange1',command=lambda:output('4'))
fourButton.grid(row=4,column=1)
threeButton=tk.Button(buttons,text=3,width=4,background='DarkOrange1',command=lambda:output('3'))
threeButton.grid(row=5,column=3)
twoButton=tk.Button(buttons,text=2,width=4,background='DarkOrange1',command=lambda:output('2'))
twoButton.grid(row=5,column=2)
oneButton=tk.Button(buttons,text=1,width=4,background='DarkOrange1',command=lambda:output('1'))
oneButton.grid(row=5,column=1)
zeroButton=tk.Button(buttons,text=0,width=4,background='DarkOrange1',command=lambda:output('0'))
zeroButton.grid(row=6,column=2)
delButton=tk.Button(buttons,text='del',width=4,background='DarkOrange1',command=lambda:delete())
delButton.grid(row=6,column=3)
clearAllButton=tk.Button(buttons,text='CE',width=4,background='DarkOrange1',command=lambda:clearAll())
clearAllButton.grid(row=6,column=1)
addButton=tk.Button(buttons,text='+',width=4,background='DarkOrange1',command=lambda:output('+'))
addButton.grid(row=2,column=4)
subButton=tk.Button(buttons,text='-',width=4,background='DarkOrange1',command=lambda:output('-'))
subButton.grid(row=3,column=4)
divButton=tk.Button(buttons,text='/',width=4,background='DarkOrange1',command=lambda:output('/'))
divButton.grid(row=4,column=4)
multiplyButton=tk.Button(buttons,text='*',width=4,background='DarkOrange1',command=lambda:output('*'))
multiplyButton.grid(row=5,column=4)
remButton=tk.Button(buttons,text='%',width=4,background='DarkOrange1',command=lambda:output('%'))
remButton.grid(row=2,column=1)
calculateButton=tk.Button(buttons,text='=',width=4,background='DarkOrange1',command=lambda:calculate())
calculateButton.grid(row=6,column=4)
squareButton=tk.Button(buttons,text='x^2',width=4,background='DarkOrange1',command=lambda:sq(2))
squareButton.grid(row=2,column=3)
squareRootButton=tk.Button(buttons,text='x^1/2',width=4,background='DarkOrange1',command=lambda:sq(0.5))
squareRootButton.grid(row=2,column=2)
piButton=tk.Button(buttons,text='pi',width=4,background='DarkOrange1',command=lambda:pi())
piButton.grid(row=1,column=3)
eButton=tk.Button(buttons,text='e',width=4,background='DarkOrange1',command=lambda:finde())
eButton.grid(row=1,column=2)
factButton=tk.Button(buttons,text='n!',width=4,background='DarkOrange1',command=lambda:findFact())
factButton.grid(row=1,column=1)
logButton=tk.Button(buttons,text='ln',width=4,background='DarkOrange1',command=lambda:findLog())
logButton.grid(row=1,column=4)
sinButton=tk.Button(buttons,text='sin',width=4,background='DarkOrange1',command=sin)
sinButton.grid(row=0,column=1)
cosButton=tk.Button(buttons,text='cos',width=4,background='DarkOrange1',command=cos)
cosButton.grid(row=0,column=2)
tanButton=tk.Button(buttons,text='tan',width=4,background='DarkOrange1',command=tan)
tanButton.grid(row=0,column=3)
cosecButton=tk.Button(buttons,text='cosec',width=4,background='DarkOrange1',command=cosec)
cosecButton.grid(row=1,column=1)
secButton=tk.Button(buttons,text='sec',width=4,background='DarkOrange1',command=sec)
secButton.grid(row=1,column=2)
cotButton=tk.Button(buttons,text='cot',width=4,background='DarkOrange1',command=cot)
cotButton.grid(row=1,column=3)
powerButton=tk.Button(buttons,text='^(**)',width=4,background='DarkOrange1',command=lambda:output('**'))
powerButton.grid(row=0,column=4)
display.pack(anchor='center',pady=5)
buttons.pack()
root.mainloop()
|
python
|
class Solution:
def findSmallestSetOfVertices(self, n, edges):
ind = [0] * n
for e in edges:
ind[e[1]] += 1
return [i for i, d in enumerate(ind) if d == 0]
|
python
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'IPDB'
db.create_table('switch_ipdb', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('ip', self.gf('django.db.models.fields.IPAddressField')(max_length=15)),
('mac', self.gf('django.db.models.fields.CharField')(max_length=18)),
('first_seen', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('last_seen', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('archived', self.gf('django.db.models.fields.BooleanField')(default=False)),
('updated', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('switch', ['IPDB'])
def backwards(self, orm):
# Deleting model 'IPDB'
db.delete_table('switch_ipdb')
models = {
'switch.ipdb': {
'Meta': {'object_name': 'IPDB'},
'archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'mac': ('django.db.models.fields.CharField', [], {'max_length': '18'}),
'updated': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'switch.macdb': {
'Meta': {'object_name': 'MacDB'},
'archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mac': ('django.db.models.fields.CharField', [], {'max_length': '18'}),
'port': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['switch.Port']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'switch.office': {
'Meta': {'object_name': 'Office'},
'office_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'})
},
'switch.physicalport': {
'Meta': {'ordering': "['name']", 'object_name': 'PhysicalPort'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'port': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['switch.Port']"}),
'remarks': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'room': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['switch.Room']", 'null': 'True', 'blank': 'True'})
},
'switch.port': {
'Meta': {'ordering': "['switch__switch_name', 'number']", 'unique_together': "(('switch', 'number'),)", 'object_name': 'Port'},
'default_vlan': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'expires': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mode': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'number': ('django.db.models.fields.IntegerField', [], {}),
'switch': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['switch.Switch']"}),
'vlan': ('django.db.models.fields.IntegerField', [], {'default': '3', 'null': 'True', 'blank': 'True'})
},
'switch.portlog': {
'Meta': {'ordering': "['-created']", 'object_name': 'PortLog'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'port': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['switch.Port']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'switch.room': {
'Meta': {'object_name': 'Room'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'office': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['switch.Office']"})
},
'switch.roomlog': {
'Meta': {'ordering': "['-created']", 'object_name': 'RoomLog'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'room': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['switch.Room']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'switch.switch': {
'Meta': {'object_name': 'Switch'},
'office': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['switch.Office']"}),
'only_snmp': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'snmp_community': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'switch_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'primary_key': 'True'}),
'switch_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'switch_password': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'switch_username': ('django.db.models.fields.CharField', [], {'default': "'admin'", 'max_length': '50'})
}
}
complete_apps = ['switch']
|
python
|
import numpy as np, pandas as pd, os
from .. import *
from ..utils.utils_traj import unwrap_traj_and_center
from ..measure.compute_msd_simple import msd_fft
#simple routine for computation of individual mean squared displacements
# Programmer: Tim Tyree
# 7.20.2021
def compute_individual_mean_squared_displacement(df,dft1,dft2,DT,pid,pid_col,t_col='t',max_lagtime=None,**kwargs):
'''
Example Usage:
lagt_values,msd_values=compute_individual_mean_squared_displacement(df,dft1,dft2,DT,pid,pid_col='pid_explicit')
'''
#extract the trajectory as a DataFrame instance
t1=float(dft1[dft1.index==pid].values[0])
t2=float(dft2[dft2.index==pid].values[0])
# print(f"computing msd for particle {pid} from times {t1} to {t2} ms...")
#extract the trajectory as a DataFrame instance
boo = df[pid_col]==pid
boo&= df[t_col]>=t1
boo&= df[t_col]<=t2
dff=df[boo]
#extract r from dff
my_r=dff[['x','y']].values
msd_values=msd_fft(my_r)
lagt_values=DT*(np.arange(msd_values.shape[0]))
return lagt_values,msd_values
#trackpy is scaling is unavoidably deprecated
# fps = 1./DT #output time units is in same time units as inputs
# if max_lagtime is None:
# max_lagtime=dff.index.values.shape[0]
# # Input units are pixels and frames. Output units are microns and seconds.
# df_out=trackpy.motion.msd(
# traj=dff,
# mpp=1.,#does nothing
# fps=fps,
# max_lagtime=max_lagtime,
# detail=False
# )
# lagt_values,msd_values=df_out[['lagt','msd']].values.T
# return lagt_values,msd_values
def comp_each_mean_squared_displacement_particle(df,input_fn,DT,ds,width,
minimum_lifetime,crop_start_by,crop_end_by,
pid_col,t_col,max_lagtime=None,use_unwrap=False,
**kwargs):
DS = ds / width
height=width
# df = pd.read_csv(input_fn)
# DT = get_DT(df, t_col=t_col, pid_col=pid_col)
if use_unwrap is True:
#unwrap trajectories
pid_lst = sorted(set(df[pid_col].values))
#(duplicates filtered earlier in full model pipeline. Unnecessary in particle model with explicit tracking_ _ _ _ ) filter_duplicate_trajectory_indices is slow (and can probs be accelerated with a sexy pandas one liner)
# pid_lst_filtered = filter_duplicate_trajectory_indices(pid_lst,df)
df = pd.concat([
unwrap_traj_and_center(df[df[pid_col] == pid],
width=width,
height=height,
**kwargs) for pid in pid_lst
])
#compute t0 and tf for each particle
dft = df.groupby(pid_col)[t_col].describe()
dft0 = dft['min']
dftf = dft['max']
#compute t1 and t2 for each particle
dft1 = dft0 + crop_start_by
dft2 = dftf - crop_end_by
#get the list of particles dft2-dft1 \ge minimum_lifetime
dflifetime_considered = dft2 - dft1
pid_values_to_consider = dflifetime_considered[
dflifetime_considered >= minimum_lifetime].index.values
#compute number of num_individuals
# pid_lst=sorted(set(df[pid_col].values))
num_individuals = len(list(pid_values_to_consider))
# print(f'Computing msd values for {num_individuals} particles...')
#for each particle, set lagt equal to the zero'd time
event_id_lst = sorted(set(df[pid_col].values))
for pid in pid_values_to_consider:
boo = df[pid_col] == pid
tbirth = df.loc[boo, 't'].min()
df.loc[boo, 'lagt'] = df.loc[boo, 't'] - tbirth
df['msd'] = (df['x']**2 + df['y']**2) * DS**2
df['pid'] = df[pid_col]
df_msd = df[['pid', 'lagt', 'msd']].copy()
df_msd.dropna(inplace=True)
return df_msd
def comp_each_mean_squared_displacement(df,input_fn,DT,ds,width,
minimum_lifetime,crop_start_by,crop_end_by,
pid_col,t_col,max_lagtime=None,use_unwrap=False,
**kwargs):
'''
output is in length units of ds/width and duration units of DT.
computes the mean squared displacements for each trajectory listed in input_fn
input_fn gives the location of a trajectory file with columns x,y,frames, and some pid_col.
trajectory that may have periodic periodic boundary conditions on a square domain.
Example Usage:
input_fn=''
df_msd=comp_each_mean_squared_displacement(df,input_fn,DT,ds,width,
minimum_lifetime,crop_start_by,crop_end_by,
pid_col,t_col,max_lagtime=None,
**kwargs)
'''
height=width
DS=ds/width
if use_unwrap:
#unwrap trajectories
pid_lst = sorted(set(df[pid_col].values))
#(duplicates filtered earlier in full model pipeline. Unnecessary in particle model with explicit tracking... filter_duplicate_trajectory_indices is slow (and can probably be accelerated with a sexy pandas one liner)
# pid_lst = filter_duplicate_trajectory_indices(pid_lst,df)
df = pd.concat([unwrap_traj_and_center(df[df[pid_col]==pid], width=width, height=height, **kwargs) for pid in pid_lst])
#compute t0 and tf for each particle
dft=df.groupby(pid_col)[t_col].describe()
dft0=dft['min']
dftf=dft['max']
#compute t1 and t2 for each particle
dft1=dft0+crop_start_by
dft2=dftf-crop_end_by
#get the list of particles dft2-dft1 \ge minimum_lifetime
dflifetime_considered=dft2-dft1
pid_values_to_consider=dflifetime_considered[dflifetime_considered>=minimum_lifetime].index.values
#compute number of num_individuals
# pid_lst=sorted(set(df[pid_col].values))
num_individuals=len(list(pid_values_to_consider))
# print(f'Computing msd values for {num_individuals} particles...')
#how long does it take 1 core to compute the msd's for every particle in this trial?
lagt_out_lst=[];msd_out_lst=[];pid_out_lst=[]
for pid in pid_values_to_consider:
#compute output
lagt_values,msd_values=compute_individual_mean_squared_displacement(df,dft1,dft2,DT,pid,pid_col=pid_col)
pid_values=np.zeros_like(msd_values,dtype='int')
#record output
pid_out_lst.extend(pid_values) #indices that identify the particles
lagt_out_lst.extend(lagt_values) #ms
msd_out_lst.extend(DS**2*msd_values) #units of ds
df_out=pd.DataFrame({'pid':pid_out_lst,'lagt':lagt_out_lst,'msd':msd_out_lst})
return df_out
def compute_each_mean_squared_displacement(input_fn,DT,ds,width,
minimum_lifetime,crop_start_by,crop_end_by,
pid_col,t_col,max_lagtime=None,use_unwrap=False,use_particle_avg=True,
**kwargs):
'''
computes the mean squared displacements for each trajectory listed in input_fn
input_fn gives the location of a trajectory file with columns x,y,frames, and some pid_col.
trajectory that may have periodic periodic boundary conditions on a square domain.
'''
df=pd.read_csv(input_fn)
if not use_particle_avg:
return comp_each_mean_squared_displacement(df,input_fn,DT,ds,width,
minimum_lifetime,crop_start_by,crop_end_by,
pid_col=pid_col,t_col=t_col,max_lagtime=max_lagtime,use_unwrap=use_unwrap,
**kwargs)
else:
return comp_each_mean_squared_displacement_particle(df,input_fn,DT,ds,width,
minimum_lifetime,crop_start_by,crop_end_by,
pid_col=pid_col,t_col=t_col,use_unwrap=use_unwrap,#max_lagtime=max_lagtime,
**kwargs)
def routine_compute_imsd(input_fn,save_folder=None,use_unwrap=False,**kwargs):
#compute results
df_msd=compute_each_mean_squared_displacement(input_fn,use_unwrap=use_unwrap,**kwargs)
#save results
folder_name=os.path.dirname(input_fn)
dirname = folder_name.split('/')[-1]
if save_folder is None:
save_folder = folder_name.replace(dirname,'msd')
if not os.path.exists(save_folder):
os.mkdir(save_folder)
os.chdir(save_folder)
output_fn=os.path.basename(input_fn).replace('.csv','_emsd.csv')
df_msd.to_csv(output_fn, index=False)
return os.path.abspath(output_fn)
|
python
|
import pickle
import requests
import streamlit as st
from requests.auth import HTTPBasicAuth
import os
import json
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
API_URL = "https://ml-api-phn4j6lmdq-uc.a.run.app"
BASIC_AUTH_USERNAME = os.getenv("BASIC_AUTH_USERNAME")
BASIC_AUTH_PASSWORD = os.getenv("BASIC_AUTH_PASSWORD")
# defining the function which will make the prediction using the data which the user inputs
def prediction(Gender, Married, ApplicantIncome, LoanAmount, Credit_History):
# Pre-processing user input
if Gender == "Masculino":
Gender = 0
else:
Gender = 1
if Married == "Solteiro":
Married = 0
else:
Married = 1
if Credit_History == "Com dividas":
Credit_History = 0
else:
Credit_History = 1
LoanAmount = LoanAmount / 1000
data = {
"Gender": Gender,
"Married": Married,
"ApplicantIncome": ApplicantIncome,
"LoanAmount": LoanAmount,
"Credit_History": Credit_History
}
prediction = requests.post(API_URL + '/score', json=data, auth=HTTPBasicAuth(BASIC_AUTH_USERNAME, BASIC_AUTH_PASSWORD))
if prediction == 0:
pred = 'Rejected'
else:
pred = 'Approved'
return pred
# this is the main function in which we define our webpage
def main():
# front end elements of the web page
html_temp = """
<div style ="background-color:yellow;padding:13px">
<h1 style ="color:black;text-align:center;">Consulta de emprestimo</h1>
</div>
"""
# display the front end aspect
st.markdown(html_temp, unsafe_allow_html = True)
# following lines create boxes in which user can enter data required to make prediction
Gender = st.selectbox('Gender',("Masculino","Feminino"))
Married = st.selectbox('Marital Status',("Solteiro","Casado"))
ApplicantIncome = st.number_input("Renda Mensal")
LoanAmount = st.number_input("Valor Emprestimo")
Credit_History = st.selectbox('Credit_History',("Com dividas","Sem dividas"))
result =""
# when 'Predict' is clicked, make the prediction and store it
if st.button("Predict"):
result = prediction(Gender, Married, ApplicantIncome, LoanAmount, Credit_History)
st.success('Your loan is {}'.format(result))
if __name__=='__main__':
main()
|
python
|
#!/usr/bin/env python
"""
test functions for datacubes with raster labels
...
"""
import os
import shutil
import numpy as np
import rasterio
import json
from pathlib import Path
from icecube.bin.config import CubeConfig
from icecube.bin.labels_cube.labels_cube_generator import LabelsDatacubeGenerator
from icecube.bin.generate_cube import IceyeProcessGenerateCube
from icecube.bin.datacube import Datacube
from icecube.bin.datacube_variables import NAME_LABELS_BAND
res_abspath = os.path.join(Path(__file__).parent, "resources")
grd_raster_dir = os.path.join(res_abspath, "grd_stack")
cube_save_dir = os.path.join(res_abspath, "temp")
masks_raster_dir = os.path.join(res_abspath, "masks")
masks_labels_fpath = os.path.join(res_abspath, "labels/dummy_mask_labels.json")
cube_save_fpath = os.path.join(cube_save_dir, "temp.nc")
def create_run_time_masks_labels():
"""
Generated masks contain absoluate file paths according to the local system.
For github actions, dynamic generation must take place
"""
from icecube.bin.labels_cube.create_json_labels import CreateLabels
masks_names = [
"ICEYE_GRD_54549_20210427T215124_hollow_10x10pixels_fake_0.png",
"ICEYE_GRD_54549_20210427T215124_hollow_10x10pixels_fake_1.png",
"ICEYE_GRD_54549_20210427T215124_hollow_10x10pixels_fake_2.png",
]
raster_names = [
"ICEYE_GRD_54549_20210427T215124_hollow_10x10pixels_fake_0.tif",
"ICEYE_GRD_54549_20210427T215124_hollow_10x10pixels_fake_1.tif",
"ICEYE_GRD_54549_20210427T215124_hollow_10x10pixels_fake_2.tif",
]
masks_fpaths = [os.path.join(masks_raster_dir, fpath) for fpath in masks_names]
raster_mask_dict = {}
for raster_name, mask_fpath in zip(raster_names, masks_fpaths):
raster_mask_dict[raster_name] = mask_fpath
create_labels = CreateLabels("raster")
for product_name, mask_fpath in raster_mask_dict.items():
seg_mask = create_labels.create_instance_segmentation(mask_fpath)
create_labels.populate_labels(product_name, seg_mask)
create_labels.write_labels_to_json(masks_labels_fpath)
def delete_temporary_cube_dir(cube_dir):
shutil.rmtree(cube_dir)
def create_temporary_cube_dir(cube_dir):
if os.path.exists(cube_dir):
delete_temporary_cube_dir(cube_dir)
os.mkdir(cube_dir)
def read_json(json_fpath):
with open(json_fpath) as f:
return json.load(f)
def confirm_masks_values_in_cube(cube_save_fpath):
dc = Datacube().read_cube(cube_save_fpath)
assert dc.xrdataset[NAME_LABELS_BAND].attrs # make sure attributes exist
all_products = dc.get_all_products(dc.get_xrarray(NAME_LABELS_BAND))
valid_products = [
product_name for product_name in all_products if product_name != "None"
]
for product_file in valid_products:
mask_local_fpath = os.path.join(
masks_raster_dir, product_file.replace(".tif", ".png")
)
mask_values = rasterio.open(mask_local_fpath).read(1)
cube_mask_values = dc.get_product_values(
product_file, dc.get_xrarray(NAME_LABELS_BAND)
)
assert (
mask_values.all() == cube_mask_values.all()
), "mask values should be same in cube as well"
# Similarly create a check for "None" rasters too.
invalid_indices = [
i for i, product_name in enumerate(all_products) if product_name == "None"
]
gt_zeros = np.zeros((10, 10))
gt_np_nans = np.empty((10, 10))
gt_np_nans[:] = np.nan
for i in invalid_indices:
dummy_values = dc.get_index_values(i, dc.get_xrarray(NAME_LABELS_BAND))
if str(dummy_values.dtype) == "float32" or str(dummy_values.dtype) == "float64":
assert dummy_values.all() == gt_np_nans.all()
else:
assert dummy_values.all() == gt_zeros.all()
def get_product_labels_from_json(product_file, json_labels):
for _, raster_label in enumerate(json_labels):
if raster_label["product_file"] == product_file:
return raster_label["labels"]
raise ValueError(f"Could not find the labels for product_file: {product_file}")
def test_grd_masks_labels_default_config():
"""
Given default configuration of user, create segmentation masks.
"""
create_run_time_masks_labels()
product_type = "GRD"
cc = CubeConfig()
cc.load_config(None)
labels_datacube = LabelsDatacubeGenerator.build(
cc, product_type, masks_labels_fpath, grd_raster_dir
)
# test saving the cube and delete then.
create_temporary_cube_dir(cube_save_dir)
labels_datacube.to_file(cube_save_fpath)
confirm_masks_values_in_cube(cube_save_fpath)
delete_temporary_cube_dir(cube_save_dir)
def test_grd_masks_labels_custom_config():
"""
Given custom configuration of user, create segmentation masks.
"""
cube_config_fpath = os.path.join(res_abspath, "json_config/config_use_case4.json")
create_run_time_masks_labels()
product_type = "GRD"
cc = CubeConfig()
cc.load_config(cube_config_fpath)
labels_datacube = LabelsDatacubeGenerator.build(
cc, product_type, masks_labels_fpath, grd_raster_dir
)
create_temporary_cube_dir(cube_save_dir)
labels_datacube.to_file(cube_save_fpath)
dc = Datacube().read_cube(cube_save_fpath)
assert (
len(dc.get_all_products(dc.get_xrarray(NAME_LABELS_BAND))) == 1
), "Cannot have more than one images with given configuration"
confirm_masks_values_in_cube(cube_save_fpath)
delete_temporary_cube_dir(cube_save_dir)
def test_grd_masks_labels_custom_config2():
"""
Given custom configuration of user, create segmentation masks.
"""
cube_config_fpath = os.path.join(res_abspath, "json_config/config_use_case5.json")
create_run_time_masks_labels()
product_type = "GRD"
cc = CubeConfig()
cc.load_config(cube_config_fpath)
labels_datacube = LabelsDatacubeGenerator.build(
cc, product_type, masks_labels_fpath, grd_raster_dir
)
create_temporary_cube_dir(cube_save_dir)
labels_datacube.to_file(cube_save_fpath)
dc = Datacube().read_cube(cube_save_fpath)
assert (
len(dc.get_all_products(dc.get_xrarray(NAME_LABELS_BAND))) == 6
), "Must contain 3 products with given configuration"
confirm_masks_values_in_cube(cube_save_fpath)
delete_temporary_cube_dir(cube_save_dir)
def test_cube_generator_with_raster_labels():
"""
test end-end workflow with sample raster labels
"""
cube_config_fpath = os.path.join(res_abspath, "json_config/config_use_case4.json")
_ = IceyeProcessGenerateCube.create_cube(
grd_raster_dir, cube_config_fpath, masks_labels_fpath
)
def test_mask_dtype():
"""
Given custom configuration of user, create segmentation masks.
"""
cube_config_fpath = os.path.join(res_abspath, "json_config/config_use_case4.json")
product_type = "GRD"
cc = CubeConfig()
cc.load_config(cube_config_fpath)
labels_datacube = LabelsDatacubeGenerator.build(
cc, product_type, masks_labels_fpath, grd_raster_dir
)
assert str(labels_datacube.xrdataset[NAME_LABELS_BAND].dtype) == "uint8"
|
python
|
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
import numpy as np
class LSCnew(object):
def __init__(self, W,A,L,Bd,dBt):
self.W = W
self.A = A
self.L = L
self.Bd = Bd
self.dBt = dBt
self.u_is = PETSc.IS().createGeneral(range(W.sub(0).dim()))
self.p_is = PETSc.IS().createGeneral(range(W.sub(0).dim(),W.sub(0).dim()+W.sub(1).dim()))
def create(self, pc):
self.diag = None
kspLAMG = PETSc.KSP()
kspLAMG.create(comm=PETSc.COMM_WORLD)
pc = kspLAMG.getPC()
kspLAMG.setType('preonly')
pc.setType('lu')
# pc.setFactorSolverPackage("pastix")
OptDB = PETSc.Options()
OptDB['pc_factor_shift_amount'] = .1
OptDB['pc_factor_mat_ordering_type'] = 'rcm'
OptDB['pc_factor_mat_solver_package'] = 'mumps'
# kspLAMG.setFromOptions()
# kspLAMG.max_it = 1
kspLAMG.setFromOptions()
self.kspLAMG = kspLAMG
# print kspLAMG.view()
nsp = PETSc.NullSpace().create(constant=True)
kspLAMG.setNullSpace(nsp)
kspNLAMG = PETSc.KSP()
kspNLAMG.create(comm=PETSc.COMM_WORLD)
pc = kspNLAMG.getPC()
kspNLAMG.setType('preonly')
pc.setType('lu')
# pc.setFactorSolverPackage("pastix")
# kspNLAMG.max_it = 1
kspNLAMG.setFromOptions()
kspLAMG.setFromOptions()
self.kspNLAMG = kspNLAMG
# print kspNLAMG.view()
def setUp(self, pc):
# self.P = P
F = self.A.getSubMatrix(self.u_is,self.u_is)
self.Bt = self.A.getSubMatrix(self.u_is,self.p_is)
self.kspNLAMG.setOperators(F)
self.P = self.Bd*F*self.dBt
self.kspLAMG.setOperators(self.L)
def apply(self, pc, x, y):
# print 1000
# self.kspLAMG.setOperators(self.B)
x1 = x.getSubVector(self.u_is)
y1 = x1.duplicate()
x2 = x.getSubVector(self.p_is)
y2 = x2.duplicate()
yOut = y2.duplicate()
# self.kspNLAMG.solve(x1, y1)
self.kspLAMG.solve(-x2, y2)
yy2 = self.P*y2
self.kspLAMG.solve(yy2, yOut)
x1 = x1 - self.Bt*yOut
self.kspNLAMG.solve(x1, y1)
y.array = (np.concatenate([y1.array, yOut.array]))
class LSC(object):
def __init__(self, W,A,P,L):
self.W = W
self.A = A
self.P = P
self.L = L
self.u_is = PETSc.IS().createGeneral(range(W.sub(0).dim()))
self.p_is = PETSc.IS().createGeneral(range(W.sub(0).dim(),W.sub(0).dim()+W.sub(1).dim()))
def create(self, pc):
self.diag = None
kspLAMG = PETSc.KSP()
kspLAMG.create(comm=PETSc.COMM_WORLD)
pc = kspLAMG.getPC()
kspLAMG.setType('preonly')
pc.setType('lu')
# pc.setFactorSolverPackage("pastix")
kspLAMG.max_it = 1
kspLAMG.setFromOptions()
self.kspLAMG = kspLAMG
# print kspLAMG.view()
kspNLAMG = PETSc.KSP()
kspNLAMG.create(comm=PETSc.COMM_WORLD)
pc = kspNLAMG.getPC()
kspNLAMG.setType('preonly')
pc.setType('lu')
# pc.setFactorSolverPackage("pastix")
# kspNLAMG.max_it = 1
kspNLAMG.setFromOptions()
self.kspNLAMG = kspNLAMG
# print kspNLAMG.view()
def setUp(self, pc):
# self.P = P
F = self.A.getSubMatrix(self.u_is,self.u_is)
self.Bt = self.A.getSubMatrix(self.u_is,self.p_is)
B = self.A.getSubMatrix(self.p_is,self.u_is)
Q = self.P.getSubMatrix(self.u_is,self.u_is)
self.kspNLAMG.setOperators(F)
Pdiag = Q.getVecLeft()
Q.getDiagonal(Pdiag)
ones,invDiag = Q.getVecs()
ones.set(1)
invDiag.pointwiseDivide(ones,Pdiag)
invDiag = Pdiag
print F.view()
F.diagonalScale(invDiag)
self.Bt.diagonalScale(invDiag)
# self.PP =PETSc.Mat().create()
# self.PP.setSizes([self.W.sub(0).dim(),self.W.sub(0).dim()])
# FBt =PETSc.Mat().create()
# FBt.setSizes([self.W.sub(1).dim(),self.W.sub(0).dim()])
# self.P1 =PETSc.Mat().create()
# self.P.setSizes([self.W.sub(0).dim(),self.W.sub(0).dim()])
FBt = F.matMult(self.Bt)
self.P1 = B.matMult(self.Bt)
self.PP = B.matMult(self.Bt)
self.P1 = B*F*self.Bt
self.PP = B*self.Bt
self.kspLAMG.setOperators(self.PP)
def apply(self, pc, x, y):
# self.kspLAMG.setOperators(self.B)
x1 = x.getSubVector(self.u_is)
y1 = x1.duplicate()
x2 = x.getSubVector(self.p_is)
y2 = x2.duplicate()
yOut = y2.duplicate()
self.kspNLAMG.solve(x1, y1)
self.kspLAMG.solve(x2, y2)
yy2 = self.P1*y2
self.kspLAMG.solve(yy2, yOut)
# y1 = y1 - self.Bt*yOut
y.array = (np.concatenate([y1.array, yOut.array]))
class PCD(object):
def __init__(self, W, Q,F,L):
self.W = W
self.Q = Q
self.F = F
self.L = L
self.u_is = PETSc.IS().createGeneral(range(W.sub(0).dim()))
self.p_is = PETSc.IS().createGeneral(range(W.sub(0).dim(),W.sub(0).dim()+W.sub(1).dim()))
def create(self, pc):
self.diag = None
kspLAMG = PETSc.KSP()
kspLAMG.create(comm=PETSc.COMM_WORLD)
pc = kspLAMG.getPC()
kspLAMG.setType('richardson')
pc.setType('hypre')
# pc.setFactorSolverPackage("pastix")
# OptDB = PETSc.Options()
# OptDB['pc_factor_shift_amount'] = .1
# OptDB['pc_factor_mat_ordering_type'] = 'rcm'
# OptDB['pc_factor_mat_solver_package'] = 'umfpack'
kspLAMG.max_it = 1
kspLAMG.setFromOptions()
self.kspLAMG = kspLAMG
# print kspLAMG.view()
kspNLAMG = PETSc.KSP()
kspNLAMG.create(comm=PETSc.COMM_WORLD)
pc = kspNLAMG.getPC()
kspNLAMG.setType('richardson')
pc.setType('hypre')
# pc.setFactorSolverPackage("pastix")
kspNLAMG.max_it = 1
kspNLAMG.setFromOptions()
self.kspNLAMG = kspNLAMG
# print kspNLAMG.view()
kspQCG = PETSc.KSP()
kspQCG.create(comm=PETSc.COMM_WORLD)
pc = kspQCG.getPC()
kspQCG.setType('cg')
pc.setType('jacobi')
# pc.setType('icc')
# pc.setFactorSolverPackage("pastix")
# kspQCG.max_it = 4
kspQCG.setFromOptions()
self.kspQCG = kspQCG
def setUp(self, pc):
A, P, flag = pc.getOperators()
# self.P = P
self.Bt = P.getSubMatrix(self.u_is,self.p_is)
F = P.getSubMatrix(self.u_is,self.u_is)
del A, P
self.kspNLAMG.setOperators(F)
self.kspLAMG.setOperators(self.L)
self.kspQCG.setOperators(self.Q)
def apply(self, pc, x, y):
# self.kspLAMG.setOperators(self.B)
x1 = x.getSubVector(self.u_is)
y1 = x1.duplicate()
x2 = x.getSubVector(self.p_is)
y2 = x2.duplicate()
yOut = y2.duplicate()
self.kspLAMG.solve(x2, y2)
yy2 = self.F*y2
self.kspQCG.solve(yy2, yOut)
x1 = x1 - self.Bt*yOut
self.kspNLAMG.solve(x1, y1)
y.array = (np.concatenate([y1.array, yOut.array]))
class PCDdirect(object):
def __init__(self, W, Q,F,L):
self.W = W
self.Q = Q
self.F = F
self.L = L
self.u_is = PETSc.IS().createGeneral(range(W.sub(0).dim()))
self.p_is = PETSc.IS().createGeneral(range(W.sub(0).dim(),W.sub(0).dim()+W.sub(1).dim()))
def create(self, pc):
self.diag = None
kspLAMG = PETSc.KSP()
kspLAMG.create(comm=PETSc.COMM_WORLD)
pc = kspLAMG.getPC()
kspLAMG.setType('preonly')
pc.setType('cholesky')
# pc.setFactorSolverPackage("pastix")
OptDB = PETSc.Options()
OptDB['pc_factor_shift_amount'] = .1
OptDB['pc_factor_mat_ordering_type'] = 'rcm'
OptDB['pc_factor_mat_solver_package'] = 'mumps'
# kspLAMG.max_it = 1
kspLAMG.setFromOptions()
self.kspLAMG = kspLAMG
# print kspLAMG.view()
kspNLAMG = PETSc.KSP()
kspNLAMG.create(comm=PETSc.COMM_WORLD)
pc = kspNLAMG.getPC()
kspNLAMG.setType('preonly')
pc.setType('lu')
# pc.setFactorSolverPackage("pastix")
# kspNLAMG.max_it = 1
kspNLAMG.setFromOptions()
self.kspNLAMG = kspNLAMG
# print kspNLAMG.view()
kspQCG = PETSc.KSP()
kspQCG.create(comm=PETSc.COMM_WORLD)
pc = kspQCG.getPC()
kspQCG.setType('preonly')
pc.setType('lu')
# pc.setType('icc')
# pc.setFactorSolverPackage("pastix")
# kspQCG.max_it = 4
kspQCG.setFromOptions()
self.kspQCG = kspQCG
def setUp(self, pc):
A, P, flag = pc.getOperators()
# self.P = P
self.Bt = A.getSubMatrix(self.u_is,self.p_is)
F = A.getSubMatrix(self.u_is,self.u_is)
self.kspNLAMG.setOperators(F)
self.kspLAMG.setOperators(self.L)
self.kspQCG.setOperators(self.Q)
def apply(self, pc, x, y):
# self.kspLAMG.setOperators(self.B)
x1 = x.getSubVector(self.u_is)
y1 = x1.duplicate()
x2 = x.getSubVector(self.p_is)
y2 = x2.duplicate()
yOut = y2.duplicate()
self.kspLAMG.solve(x2, y2)
yy2 = self.F*y2
self.kspQCG.solve(yy2, yOut)
x1 = x1 - self.Bt*yOut
self.kspNLAMG.solve(x1, y1)
y.array = (np.concatenate([y1.array, yOut.array]))
# print y.array
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from flask import Blueprint, jsonify, redirect, render_template, request, url_for
from gator import app, db
from gator.models import Media, News
import time
# create blueprint
core = Blueprint("core", __name__, template_folder="templates")
@core.route("/")
@core.route("/lastnews/")
@core.route("/lastnews/<string:delta>/")
def index(delta=None):
if delta == "today":
end_time = datetime.now()
start_time = end_time.replace(hour=0, minute=0, second=0, microsecond=0)
elif delta == "yesterday":
end_time = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
start_time = end_time - timedelta(days=1)
elif delta == "week":
end_time = datetime.now()
start_time = end_time.replace(hour=0, minute=0, second=0, microsecond=0) - timedelta(days=7)
else:
end_time = datetime.now()
start_time = end_time - timedelta(days=1)
news = News.objects(created_at__gt=start_time, created_at__lte=end_time)\
.order_by("-shares__count")[:(app.config["LINKS_PER_PAGE"] * 2)]
if request.is_xhr:
return jsonify(news=news, delta=delta)
else:
return render_template("lastnews.html", news=news, delta=delta)
@core.route("/timeline/")
@core.route("/timeline/<int:stamp>/")
def timeline(stamp=None):
page = int(request.args.get("page", 1))
search = request.args.get("search", "")
if not stamp:
news = News.objects()
else:
if page == 1:
news = News.objects(created_at__gt=datetime.utcfromtimestamp(stamp))
else:
news = News.objects(created_at__lte=datetime.utcfromtimestamp(stamp))
if search:
news = news.filter(text__icontains=search)
news = news.paginate(page=page, per_page=app.config["LINKS_PER_PAGE"])
stamp = time.time()
if request.is_xhr:
return jsonify(news=news.items, search=search, stamp=stamp)
else:
return render_template("timeline.html", news=news.items, search=search, stamp=stamp)
@core.route("/status/")
def status():
medialist = Media.objects.all()
return render_template("status.html", medialist=medialist)
|
python
|
"""
Python 3.9.10 (tags/v3.9.10:f2f3f53, Jan 17 2022, 15:14:21) [MSC v.1929 64 bit (AMD64)] on win32
Данный модуль отвечает за детекцию движения
"""
import cv2 # Импортируем модуль OpenCV
import time
import os
def corrector(name_file: str, chk_video_det, xy_coord: list, frame_zoom: int, size_detect: int,
lab_o_proc, window, frame_shift, play_speed, but_start, but_pause):
"""Данная функция восстанавливает файл с поврежденной временной шкалой и запускает детектор.
name_file - Имя файла, который передается в обработку
play_speed - Скорость воспроизведения (Пока не работает)
chk_video_det - Флаг отображения окна воспроизведения при поиске
xy_coord - Список координат зоны поиска
frame_zoom - Коэффициент сжатия видео при отображении
size_detect - Размер детектируемого объекта
lab_o_proc - Ссылка на метку для отображения прогресса
window - Ссылка на окно
frame_shift - Сдвиг фреймов при обнаружении движения
play_speed - Пропуск фреймов для ускорения
but_start - Кнопка Старт
but_pause - Кнопка Пауза
"""
if os.path.exists("ffmpeg.exe"):
os.system(f'ffmpeg -i "{name_file}" -map 0:v -vcodec copy -bsf:v h264_mp4toannexb -y "{name_file[:-4]}_source-video.h264"')
os.system(f'ffmpeg -fflags +genpts -r 25 -i "{name_file[:-4]}_source-video.h264" -vcodec copy -y "{name_file[:-4]}_recovered.avi"')
os.remove(f'{name_file[:-4]}_source-video.h264')
return detector(f'{name_file[:-4]}_recovered.avi', chk_video_det, xy_coord, frame_zoom, size_detect,
lab_o_proc, window, frame_shift, play_speed, but_start, but_pause)
else:
return 'Ffmpeg'
def detector(name_file: str, chk_video_det, xy_coord: list, frame_zoom: int, size_detect: int,
lab_o_proc, window, frame_shift, play_speed, but_start, but_pause) -> str:
"""Данная функция производит поиск движения в заданной области, в текущем файле.
name_file - Имя файла, который передается в обработку
chk_video_det - Флаг отображения окна воспроизведения при поиске
xy_coord - Список координат зоны поиска
frame_zoom - Коэффициент сжатия видео при отображении
size_detect - Размер детектируемого объекта
lab_o_proc - Ссылка на метку для отображения прогресса
window - Ссылка на окно
frame_shift - Сдвиг фреймов при обнаружении движения
play_speed - Пропуск фреймов для ускорения
but_start - Кнопка Старт
but_pause - Кнопка Пауза
"""
if but_start['text'] == 'Старт':
return "OK"
none_frame: int = 0 # Счетчик для проверки пустых фреймов
start_detect = time.time() # Получение времени начала обработки видео файла
cap = cv2.VideoCapture(name_file) # Захватываем видео с файла
# cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('H', '2', '6', '4'))
off_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) # Получаем общее количество фреймов
frame_width_det = (cap.get(cv2.CAP_PROP_FRAME_WIDTH)) # Получаем размер исходного видео
frame_height_det = (cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
output = cv2.VideoWriter(name_file[:-4] + "_detect" + name_file[len(name_file) - 4:],
cv2.VideoWriter_fourcc('H', '2', '6', '4'), 20,
(int(frame_width_det), int(frame_height_det))) # Параметры выгрузки MJPG PIM1 XVID
if chk_video_det:
cv2.namedWindow(name_file, 0) # Определяем окно вывода
_, x_win, y_win = window.geometry().split('+')
cv2.moveWindow(name_file, int(x_win)+350, int(y_win))
while True: # Вывод кадров производится в цикле
if but_pause['text'] == 'Продолжить':
cap.release()
output.release()
cv2.destroyAllWindows()
return 'Pause'
if but_start['text'] == 'Старт':
cap.release()
output.release()
cv2.destroyAllWindows()
break
ret1, frame1 = cap.read()
# Данное смещение позволяет сгруппировать очертания двигающегося объекта
for _ in range(frame_shift):
cap.read()
ret2, frame2 = cap.read()
# Данное смещение служит для ускорения
for _ in range(play_speed):
cap.read()
if cap.get(cv2.CAP_PROP_POS_FRAMES) == off_frames:
break
if not ret1 * ret2:
none_frame += 1
if none_frame > 10:
print('Превышено допустимое количество пустых фреймов. Начато восстановление файла.')
output.release() # Закрываем файл для вывода
cv2.destroyAllWindows()
os.remove(f'{name_file[:-4]}_detect{name_file[len(name_file) - 4:]}') # Удаляем его
return 'Correct' # Возвращаем флаг, что надо запустить восстановление
continue
# frame1=frame1[y1_search:y2_search,x1_search:x2_search] #Обрезка фрейма до нужного размера. Может пригодиться
# frame2=frame2[y1_search:y2_search,x1_search:x2_search]
# Вывод в процентах прогресса
lab_o_proc["text"] = str(cap.get(cv2.CAP_PROP_POS_FRAMES) * 100 // off_frames + 1) + " %"
window.update() # Обновление окна для отрисовки прогресса
if ret2:
if chk_video_det:
# Метод для визуализации массива кадров
frame1 = algorithm_detector_1(frame1, frame2, xy_coord, frame_zoom, size_detect, output)
cv2.imshow(name_file, frame1)
cv2.resizeWindow(name_file, int(frame_width_det) // 2,
int(frame_height_det) // 2) # Устанавливаем размер окна вывода
else:
break
if chk_video_det and cv2.getWindowProperty(name_file, 1) == 1: # Выход из программы по закрытию окна
break
if cv2.waitKey(2) == 27: # Выход по ESC
break
cap.release()
output.release()
# Проверяем количество сохраненных фреймов
output = cv2.VideoCapture(name_file[:-4] + "_detect" + name_file[len(name_file) - 4:])
frames_output = int(output.get(cv2.CAP_PROP_FRAME_COUNT))
output.release()
cv2.destroyAllWindows()
if frames_output == 0: # Если сохраненных фреймов нет, то удаляем файл
os.remove(f'{name_file[:-4]}_detect{name_file[len(name_file) - 4:]}') # Удаляем его
end_detect = time.time() # Время завершения обработки видео файла
# Выводит время затраченное на обработку файла
print(name_file, '->', str(time.strftime("%M:%S", time.localtime(end_detect - start_detect))))
return 'OK'
def algorithm_detector_1(frame1, frame2, xy_coord: list, frame_zoom: int, size_detect: int, output):
x1_search = xy_coord[0][0] * frame_zoom
y1_search = xy_coord[0][1] * frame_zoom
x2_search = xy_coord[1][0] * frame_zoom
y2_search = xy_coord[1][1] * frame_zoom
# Обработка видео фрейма для определения движения
diff_frame = cv2.absdiff(frame1, frame2) # Вычитаем из одного кадра другой
gray_frame = cv2.cvtColor(diff_frame, cv2.COLOR_BGR2GRAY) # перевод кадров в черно-белую градацию
blur_frame = cv2.GaussianBlur(gray_frame, (5, 5), 0) # фильтрация лишних контуров
_, thresh_frame = cv2.threshold(blur_frame, 20, 255,
cv2.THRESH_BINARY) # метод для выделения кромки объекта белым цветом любое
# значение больше 20 станет белым 255
dilated_frame = cv2.dilate(thresh_frame, None, iterations=3) # расширение белой зоны
'''
данный метод противоположен методу erosion(), т.е. эрозии объекта,
и расширяет выделенную на предыдущем этапе область
'''
contours, _ = cv2.findContours(dilated_frame, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE) # cv2.RETR_TREE нахождение массива контурных точек
cv2.rectangle(frame1, (x1_search, y1_search), (x2_search, y2_search), (255, 0, 0), 2) # Зона поиска
for contour in contours:
(x, y, w, h) = cv2.boundingRect(
contour)
'''
преобразование массива из предыдущего этапа в кортеж из четырех координат
метод contourArea() по заданным contour точкам, здесь кортежу,
вычисляет площадь зафиксированного объекта в каждый момент времени, это можно проверить
'''
if (w * h) < ((x2_search - x1_search) * (y2_search - y1_search) * int(size_detect) // 100):
continue
if not (x + w > x1_search and x < x2_search and y + h > y1_search and y < y2_search):
continue
output.write(frame2) # Записываем не измененный фрейм
cv2.rectangle(frame1, (x, y), (x + w, y + h), (0, 255, 0), 2) # Получение прямоугольника из точек кортежа
# Рисуем красную точку
# cv2.circle(frame1, (int(frame_width_det) - 50, int(frame_height_det) - 40), 10, (0, 0, 255),-1)
# Также можно было просто нарисовать контур объекта
# cv2.drawContours(frame1, contours, -1, (0, 255, 0), 2)
return frame1
|
python
|
class Const(object):
'''
Bibliography:
[1] VideoRay Example Code [Online]
Available: https://github.com/videoray/Thruster/blob/master/thruster.py
'''
# VRCSR protocol defines
sync_request = 0x5ff5
sync_response = 0x0ff0
protocol_vrcsr_header_size = 6
protocol_vrcsr_xsum_size = 4
# CSR address for sending an application specific custom command
addr_custom_command = 0xf0
propulsion_command = 0xaa
# Flag for the standard thruster response which contain
response_thruster_standard = 0x2
response_ask_nothing = 0x00
# Standard response is the device type followed by 4 32-bit floats and 1 byte
response_thruster_standard_length = 1 + 4 * 4 + 1
thrust_response_length = (
protocol_vrcsr_header_size +
protocol_vrcsr_xsum_size +
response_thruster_standard_length +
protocol_vrcsr_xsum_size
)
# Add your stupid size to this!
response_normal_length = (
protocol_vrcsr_header_size +
protocol_vrcsr_xsum_size +
protocol_vrcsr_xsum_size
)
# TODO: Get R/W flags
csr_address = {
'undervoltage_trigger': (0xa5, 1),
'overvoltage_trigger': (0xa6, 1),
'overcurrent_trigger': (0xa7, 1),
'temp_trigger': (0xa8, 1),
'stall_count_max': (0xa9, 1),
'fault_control': (0xa4, 1),
'fault': (0x14, 4),
'save_settings': (0xee, 2),
'undervoltage_err_cnt': (0xac, 4),
'overvoltage_err_cnt': (0xb0, 4),
'overcurrent_err_cnt': (0xb4, 4),
'temp_err_cnt': (0xb8, 4),
'stall_err_cnt': (0xbc, 4),
}
format_char_map = {
1: 'B', # unsigned char integer 1
2: 'H', # unsigned short integer 2
4: 'I', # unsigned int integer 4
8: 'Q', # unsigned long long integer 8
}
if __name__ == '__main__':
addr, size = Const.csr_address['stall_err_cnt']
|
python
|
src_l = [2, 2, 2, 7, 23, 1, 44, 44, 3, 2, 10, 7, 4, 11]
res_for_check = [23, 1, 3, 10, 4, 11]
res = [el for el in range(len(src_l)) if src_l.count(el) == 1]
print(res == res_for_check)
|
python
|
#!/usr/bin/env python
# coding: utf-8
# In[27]:
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import math
from scipy.optimize import curve_fit
from Funcoes_Bib import splitPlusMinus
df = pd.read_excel ('C:\Users\observer\Desktop\Ensaios_e_Caracterizacoes\Planilhas\Ganho_EM\HSS10MHz\Propagado.xlsm')
columns = pd.DataFrame(df)
gainR1 = columns['EM Gain'][1:11]
noiseR1 = columns['Noise (ADU)'][1:11]
medianR1 = np.median(noiseR1)
errorR1 = columns['Error (ADU)'][1:11]#np.median(abs(GAIN['noise'] - median))/0.67449
#print temp['value'], temp['noise'], temp['error']
# Constroi o modelo de uma reta
x1 = np.linspace(10,300,100)
f1 = lambda x,a,b,: a*x + b
# Faz o ajuste para cada regime, dado o modelo
R1popt, R1pcov = curve_fit(f1, gainR1, noiseR1)
residuos1 = noiseR1 - f1(gainR1, R1popt[0],R1popt[1])
gainR2 = columns['EM Gain'][0:2]
noiseR2 = columns['Noise (ADU)'][0:2]
medianR2 = np.median(noiseR2)
errorR2 = columns['Error (ADU)'][0:2]
# Constroi o modelo de uma reta
x2 = np.linspace(2,10,50)
# Faz o ajuste para cada regime, dado o modelo
R2popt, R2pcov = curve_fit(f1, gainR2, noiseR2)
residuos2 = noiseR2 - f1(gainR2, R2popt[0],R2popt[1])
# In[26]:
fontsize = 14
fig = plt.figure(figsize=(14, 4))
ax = fig.add_subplot(121)
ax.errorbar(gainR1, noiseR1, errorR1, marker='o', c='blue',linewidth=1.0)
ax.errorbar(gainR2, noiseR2, errorR2, marker='o', c='blue',linewidth=1.0)
ax.plot(x1, f1(x1,R1popt[0],R1popt[1]), '--', c='red')
ax.plot(x2, f1(x2,R2popt[0],R2popt[1]), '--', c='red')
plt.xlabel(r'$\mathtt{Ganho \quad EM}$', size=fontsize)
plt.ylabel(r'$\mathtt{Ru \acute{\i} do (ADU)}$', size=fontsize)
plt.title(r'$\mathtt{Gr \acute{a}fico \quad do \quad ru \acute{\i} do \quad em \quad fun \c c \tilde{a} o \quad do \quad Ganho \quad EM}$', size=fontsize)
#plt.ylim(15, 23)
#plt.xlim(-5, 305)
string1 = r'$\mathtt{f(x) = %.3f x + %.2f, \quad x < 10}$'%(R2popt[0],R2popt[1])
string2 = r'$\mathtt{%.4f x + %.2f, \quad x \geq 10}$'%(R1popt[0],R1popt[1])
#string2 = r'$\mathtt{\sigma^2 = %.2e,}$'%(R1pcov[0][0]) + r'$\mathtt{%.2e,}$'%(R1pcov[1][1])
ax.text(0.35, 0.3, string1, ha='left',va='center', transform=ax.transAxes, size=fontsize)
ax.text(0.475, 0.1, string2, ha='left',va='center', transform=ax.transAxes, size=fontsize)
ax = fig.add_subplot(122)
ax.errorbar(gainR1,residuos1,errorR1, marker='o', c='blue',linewidth=1.0)
ax.errorbar(gainR2[0], residuos2[0], errorR2[0], marker='o', c='blue',linewidth=1.0)
plt.xlabel(r'$\mathtt{Ganho \quad EM}$', size=fontsize)
plt.ylabel(r'$\mathtt{Ru \acute{\i} do (ADU)}$', size=fontsize)
plt.title(r'$\mathtt{Gr \acute{a}fico \quad dos \quad res \acute{\i} duos }$', size=fontsize)
plt.show()
|
python
|
import sklearn.svm
from autotabular.pipeline.components.regression.liblinear_svr import LibLinear_SVR
from .test_base import BaseRegressionComponentTest
class SupportVectorComponentTest(BaseRegressionComponentTest):
__test__ = True
res = dict()
res['default_boston'] = 0.6768297818275556
res['default_boston_places'] = 2
res['default_boston_iterative'] = None
res['default_boston_sparse'] = 0.12626519114138912
res['default_boston_sparse_places'] = 2
res['default_boston_iterative_sparse'] = None
res['default_diabetes'] = 0.39152218711865661
res['default_diabetes_iterative'] = None
res['default_diabetes_sparse'] = 0.18704323088631891
res['default_diabetes_iterative_sparse'] = None
sk_mod = sklearn.svm.LinearSVR
module = LibLinear_SVR
|
python
|
#!/usr/bin/env python
import requests
__author__ = "Likhit Jain and Yashita P Jain"
__copyright__ = "Copyright 2019, Kaleyra"
__license__ = "MIT"
__version__ = "1.0"
__email__ = "[email protected]"
__status__ = "Production"
class Klient:
"""
"""
def __init__(self, url):
"""
Initialises the attributes of Klient class.
"""
self.url = url
def response(self):
"""
Makes a HTTP GET request to the server.
:return:response in JSON format
"""
try:
response = requests.get(self.url)
response_json = response.json()
return response_json
except Exception as e:
print(e)
|
python
|
import argparse
from pathlib import Path
import lpips
import torch as th
import wandb
from PIL import Image
import torchvision.transforms as tvt
from tqdm.auto import tqdm
from cgd import losses
from cgd import clip_util
from cgd import script_util
# Define necessary functions
def clip_guided_diffusion(
image_size: int = 128,
num_cutouts: int = 16,
prompts: "list[str]" = [],
image_prompts: "list[str]" = [],
clip_guidance_scale: int = 1000,
tv_scale: float = 150,
range_scale: float = 50,
sat_scale: float = 0,
init_scale: float = 0,
batch_size: int = 1,
init_image: Path = None,
class_cond: bool = True,
cutout_power: float = 1.0,
timestep_respacing: str = "1000",
seed: int = 0,
diffusion_steps: int = 1000,
skip_timesteps: int = 0,
checkpoints_dir: str = script_util.MODEL_PATH,
clip_model_name: str = "ViT-B/32",
randomize_class: bool = True,
prefix_path: Path = Path('./results'),
save_frequency: int = 25,
noise_schedule: str = "linear",
dropout: float = 0.0,
device: str = '',
wandb_project: str = None,
wandb_entity: str = None,
use_augs: bool = False, # enables augmentation, mostly better for timesteps <= 100
use_magnitude: bool = False, # enables magnitude of the gradient
height_offset: int = 0,
width_offset: int = 0,
progress: bool = True,
):
if len(device) == 0:
device = 'cuda' if th.cuda.is_available() else 'cpu'
print(f"Using device {device}. You can specify a device manually with `--device/-dev`")
else:
print(f"Using device {device}")
fp32_diffusion = (device == 'cpu')
wandb_run = None
if wandb_project is not None:
# just use local vars for config
wandb_run = wandb.init(project=wandb_project, entity=wandb_entity, config=locals())
else:
print(f"--wandb_project not specified. Skipping W&B integration.")
th.manual_seed(seed)
if use_magnitude == False and image_size == 64:
use_magnitude = True
tqdm.write("Enabling magnitude for 64x64 checkpoints.")
use_saturation = sat_scale != 0
Path(prefix_path).mkdir(parents=True, exist_ok=True)
Path(checkpoints_dir).mkdir(parents=True, exist_ok=True)
diffusion_path = script_util.download_guided_diffusion(image_size=image_size, checkpoints_dir=checkpoints_dir, class_cond=class_cond)
# Load CLIP model/Encode text/Create `MakeCutouts`
embeds_list = []
weights_list = []
clip_model, clip_size = clip_util.load_clip(clip_model_name, device)
for prompt in prompts:
text, weight = script_util.parse_prompt(prompt)
text, weight = clip_util.encode_text_prompt(text, weight, clip_model_name, device)
embeds_list.append(text)
weights_list.append(weight)
for image_prompt in image_prompts:
img, weight = script_util.parse_prompt(image_prompt)
image_prompt, batched_weight = clip_util.encode_image_prompt(
img, weight, image_size, num_cutouts=num_cutouts, clip_model_name=clip_model_name, device=device)
embeds_list.append(image_prompt)
weights_list.extend(batched_weight)
target_embeds = th.cat(embeds_list)
weights = th.tensor(weights_list, device=device)
if weights.sum().abs() < 1e-3: # smart :)
raise RuntimeError('The weights must not sum to 0.')
weights /= weights.sum().abs()
if use_augs: tqdm.write( f"Augmentations enabled." )
make_cutouts = clip_util.MakeCutouts(cut_size=clip_size, num_cutouts=num_cutouts,
cutout_size_power=cutout_power, use_augs=use_augs)
# Load initial image (if provided)
init_tensor = None
if init_image:
pil_image = Image.open(script_util.fetch(init_image)).convert('RGB').resize((image_size, image_size))
init_tensor = tvt.ToTensor()(pil_image)
init_tensor = init_tensor.to(device).unsqueeze(0).mul(2).sub(1)
# Class randomization requires a starting class index `y`
model_kwargs = {}
if class_cond:
model_kwargs["y"] = th.zeros(
[batch_size], device=device, dtype=th.long)
# Load guided diffusion
gd_model, diffusion = script_util.load_guided_diffusion(
checkpoint_path=diffusion_path,
image_size=image_size, class_cond=class_cond,
diffusion_steps=diffusion_steps,
timestep_respacing=timestep_respacing,
use_fp16=(not fp32_diffusion),
device=device,
noise_schedule=noise_schedule,
dropout=dropout,
)
# This is initialized lazily as it can use a bit of VRAM
if init_tensor is not None and init_scale != 0:
lpips_vgg = lpips.LPIPS(net='vgg').to(device)
current_timestep = None
def cond_fn(x, t, out, y=None):
log = {}
n = x.shape[0]
fac = diffusion.sqrt_one_minus_alphas_cumprod[current_timestep]
sigmas = 1 - fac
x_in = out["pred_xstart"] * fac + x * sigmas
if wandb_project is not None:
log[f'Generations - {timestep_respacing}'] = [
wandb.Image(x, caption=f"Noisy Sample"),
wandb.Image(out['pred_xstart'],
caption=f"Denoised Prediction"),
wandb.Image(x_in, caption=f"Blended (what CLIP sees)"),
]
clip_in = clip_util.CLIP_NORMALIZE(make_cutouts(x_in.add(1).div(2)))
cutout_embeds = clip_model.encode_image(
clip_in).float().view([num_cutouts, n, -1])
dists = losses.spherical_dist_loss(
cutout_embeds.unsqueeze(0), target_embeds.unsqueeze(0))
dists = dists.view([num_cutouts, n, -1])
clip_losses = dists.mul(weights).sum(2).mean(0)
range_losses = losses.range_loss(out["pred_xstart"])
tv_losses = losses.tv_loss(x_in)
clip_losses = clip_losses.sum() * clip_guidance_scale
range_losses = range_losses.sum() * range_scale
tv_losses = tv_losses.sum() * tv_scale
log['CLIP Loss'] = clip_losses.item()
log['Range Loss'] = range_losses.item()
log['TV Loss'] = tv_losses.item()
loss = clip_losses + tv_losses + range_losses
if use_saturation:
sat_losses = th.abs(x_in - x_in.clamp(min=-1, max=1)).mean()
sat_losses = sat_losses.sum() * sat_scale
log['Saturation Loss'] = sat_losses.item()
loss = loss + sat_losses
if init_tensor is not None and init_scale != 0:
init_losses = lpips_vgg(x_in, init_tensor)
init_losses = init_losses.sum() * init_scale
log['Init VGG Loss'] = init_losses.item()
loss = loss + init_losses
log['Total Loss'] = loss.item()
final_loss = -th.autograd.grad(loss, x)[0] # negative gradient
if use_magnitude:
magnitude = final_loss.square().mean().sqrt() # TODO experimental clamping?
log["Magnitude"] = magnitude.item()
final_loss = final_loss * magnitude.clamp(max=0.05) / magnitude
log['Grad'] = final_loss.mean().item()
if progress:
tqdm.write(
"\t".join([f"{k}: {v:.3f}" for k, v in log.items() if "loss" in k.lower()]))
if wandb_project is not None:
wandb_run.log(log)
return final_loss
# Choose between normal or DDIM
if timestep_respacing.startswith("ddim"):
diffusion_sample_loop = diffusion.ddim_sample_loop_progressive
else:
diffusion_sample_loop = diffusion.p_sample_loop_progressive
# def denoised_fn(image): return image
try:
cgd_samples = diffusion_sample_loop(
gd_model,
(batch_size, 3, image_size + height_offset, image_size + width_offset),
clip_denoised=False,
model_kwargs=model_kwargs,
cond_fn=cond_fn,
progress=progress,
skip_timesteps=skip_timesteps,
init_image=init_tensor,
randomize_class=randomize_class,
cond_fn_with_grad=True,
# denoised_fn=denoised_fn,
)
# Gather generator for diffusion
current_timestep = diffusion.num_timesteps - 1
for step, sample in enumerate(cgd_samples):
current_timestep -= 1
if step % save_frequency == 0 or current_timestep == -1:
for batch_idx, image_tensor in enumerate(sample["pred_xstart"]):
yield batch_idx, script_util.log_image(image_tensor, prefix_path, prompts, step, batch_idx)
# if wandb_project is not None: wandb.log({"image": wandb.Image(image_tensor, caption="|".join(prompts))})
# for batch_idx in range(batch_size):
# script_util.create_gif(prefix_path, prompts, batch_idx)
except (RuntimeError, KeyboardInterrupt) as runtime_ex:
if "CUDA out of memory" in str(runtime_ex):
print(f"CUDA OOM error occurred.")
print(
f"Try lowering --image_size/-size, --batch_size/-bs, --num_cutouts/-cutn")
print(
f"--clip_model/-clip (currently {clip_model_name}) can have a large impact on VRAM usage.")
print(f"'RN50' will use the least VRAM. 'ViT-B/32' the second least and is good for its memory/runtime constraints.")
else:
raise runtime_ex
def main():
p = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
p.add_argument("--prompts", "-txts", type=str, default='',
help="the prompt/s to reward paired with weights. e.g. 'My text:0.5|Other text:-0.5' ")
p.add_argument("--image_prompts", "-imgs", type=str, default='',
help="the image prompt/s to reward paired with weights. e.g. 'img1.png:0.5,img2.png:-0.5'")
p.add_argument("--image_size", "-size", type=int, default=128,
help="Diffusion image size. Must be one of [64, 128, 256, 512].")
p.add_argument("--init_image", "-init", type=str, default='',
help="Blend an image with diffusion for n steps")
p.add_argument("--init_scale", "-is", type=int, default=0,
help="(optional) Perceptual loss scale for init image. ")
p.add_argument("--skip_timesteps", "-skip", type=int, default=0,
help="Number of timesteps to blend image for. CLIP guidance occurs after this.")
p.add_argument("--prefix", "-dir", default="results",
type=Path, help="output directory")
p.add_argument("--checkpoints_dir", "-ckpts", default=script_util.MODEL_PATH,
type=Path, help="Path subdirectory containing checkpoints.")
p.add_argument("--batch_size", "-bs", type=int,
default=1, help="the batch size")
p.add_argument("--clip_guidance_scale", "-cgs", type=float, default=1000,
help="Scale for CLIP spherical distance loss. Values will need tinkering for different settings.",)
p.add_argument("--tv_scale", "-tvs", type=float,
default=150., help="Controls the smoothness of the final output.",)
p.add_argument("--range_scale", "-rs", type=float,
default=50., help="Controls how far out of RGB range values may get.",)
p.add_argument("--sat_scale", "-sats", type=float, default=0.,
help="Controls how much saturation is allowed. Used for ddim. From @nshepperd.",)
p.add_argument("--seed", "-seed", type=int,
default=0, help="Random number seed")
p.add_argument("--save_frequency", "-freq", type=int,
default=1, help="Save frequency")
p.add_argument("--diffusion_steps", "-steps", type=int,
default=1000, help="Diffusion steps")
p.add_argument("--timestep_respacing", "-respace", type=str,
default="1000", help="Timestep respacing")
p.add_argument("--num_cutouts", "-cutn", type=int, default=16,
help="Number of randomly cut patches to distort from diffusion.")
p.add_argument("--cutout_power", "-cutpow", type=float,
default=1.0, help="Cutout size power")
p.add_argument("--clip_model", "-clip", type=str, default="ViT-B/32",
help=f"clip model name. Should be one of: {clip_util.CLIP_MODEL_NAMES} or a checkpoint filename ending in `.pt`")
p.add_argument("--uncond", "-uncond", action="store_true",
help='Use finetuned unconditional checkpoints from OpenAI (256px) and Katherine Crowson (512px)')
p.add_argument("--noise_schedule", "-sched", default='linear', type=str,
help="Specify noise schedule. Either 'linear' or 'cosine'.")
p.add_argument("--dropout", "-drop", default=0.0, type=float,
help="Amount of dropout to apply. ")
p.add_argument("--device", "-dev", default='', type=str,
help="Device to use. Either cpu or cuda.")
p.add_argument('--wandb_project', '-proj', default=None,
help='Name W&B will use when saving results.\ne.g. `--wandb_project "my_project"`')
p.add_argument('--wandb_entity', '-ent', default=None,
help='(optional) Name of W&B team/entity to log to.')
p.add_argument('--height_offset', '-ht', default=0, type=int, help='Height offset for image')
p.add_argument('--width_offset', '-wd', default=0, type=int, help='Width offset for image')
p.add_argument('--use_augs', '-augs', action='store_true', help="Uses augmentations from the `quick` clip guided diffusion notebook")
p.add_argument('--use_magnitude', '-mag', action='store_true', help="Uses magnitude of the gradient")
p.add_argument('--quiet', '-q', action='store_true',
help='Suppress output.')
args = p.parse_args()
_class_cond = not args.uncond
prefix_path = args.prefix
Path(prefix_path).mkdir(exist_ok=True)
if len(args.prompts) > 0:
prompts = args.prompts.split('|')
else:
prompts = []
if len(args.image_prompts) > 0:
image_prompts = args.image_prompts.split('|')
else:
image_prompts = []
cgd_generator = clip_guided_diffusion(
prompts=prompts,
image_prompts=image_prompts,
batch_size=args.batch_size,
tv_scale=args.tv_scale,
init_scale=args.init_scale,
range_scale=args.range_scale,
sat_scale=args.sat_scale,
image_size=args.image_size,
class_cond=_class_cond,
randomize_class=(_class_cond),
save_frequency=args.save_frequency,
clip_guidance_scale=args.clip_guidance_scale,
cutout_power=args.cutout_power,
num_cutouts=args.num_cutouts,
timestep_respacing=args.timestep_respacing,
seed=args.seed,
diffusion_steps=args.diffusion_steps,
skip_timesteps=args.skip_timesteps,
init_image=args.init_image,
checkpoints_dir=args.checkpoints_dir,
clip_model_name=args.clip_model,
noise_schedule=args.noise_schedule,
dropout=args.dropout,
device=args.device,
prefix_path=prefix_path,
wandb_project=args.wandb_project,
wandb_entity=args.wandb_entity,
use_augs=False,
use_magnitude=False,
height_offset=args.height_offset,
width_offset=args.width_offset,
progress=not args.quiet,
)
list(enumerate(cgd_generator)) # iterate over generator
if __name__ == "__main__":
main()
|
python
|
from scipy.interpolate import interp1d
from math import cos, pi
import _rrtm_radiation_fortran
from numpy import ndarray
INPUTS = [
# 'do_sw', # 0 Shortwave switch (integer) 1 1 / 0 => do / do not compute SW
# 'do_lw', # 0 Longwave switch (integer) 1 1 / 0 => do / do not compute LW
'p', # 1-3 Atmospheric pressure mb Default is equispaced 0-ps. p[0] is top level
'lev',
'T', # 1-3 Temperature K 283.15 Isothermal
'Tbound',
'ps', # 0-2 Surface pressure mb 1000.
'Ts', # 0-2 Surface temperature K 283.15
'q', # 1-3 Specific humidity g/kg 1.e-5
'h2o',
'o3', # 1-3 Ozone mass mix. rat. kg/kg Default obtained by interpolating a tropical data profile
'co2', # 0 CO2 ppmv 330.
'ch4', # 0 CH4 ppmv 0.
'n2o', # 0 N2O ppmv 0.
'o2', # 0 O2 volume mixing ratio
'cfc11', # 0 CFC11 ppmv 0.
'cfc12', # 0 CFC12 ppmv 0.
'cfc22', # 0 CFC22 ppmv 0.
'ccl4', # CCl4 volume mixing ratio
'aldif', # 0-2 Diffuse near-IR (SW) albedo (frac) 0.07
'aldir', # 0-2 Direct near-IR (SW) albedo (frac) 0.07
'asdif', # 0-2 Diffuse UV+vis alb (frac) 0.07
'asdir', # 0-2 Direct UV+vis alb (frac) 0.07
'lw_surface_emissivity', # should have len(LW_BANDS) members...see above
'zen', # 0-2 Solar zenith angle dgr 72.2 Daily-mean on equator at equinox
# 'calday', # 0 Calendar day (float) 80.5 Insolation computed at specified
# 'orb_yr', # 0 Orbital year (integer) 1995 Year used to compute orbital params
# 'avg', # 0 Insolation average (string) 'daily' Choices are: 'inst', 'daily', 'annual'
# 'lat', # 0-1 Latitude dgr 0. day and lat/lon if solin
# 'lon', # 0-1 Longitude dgr 0. and zen are NOT specified
# 'solin', # 0-2 Insolation W/m2 417.4 Daily-mean on equator at equinox
'scon', # 0 Solar constant W m-2 1367.
# 'tauvis', # 0 Aerosol opt. depth (float) 0. CCM3 only
# 'tau_inf', # 0 Total opt. depth - 1. Greygas scheme only
# 'alpha_greygas', # 0 Tau shape parameter - 1. Greygas scheme only
'cldf', # 1-3 Cloud fraction frac 0.:
# 'in_cld', # 0 Cloud water path flag - 0 0 / 1 => grid avg / in-cloud water paths (CAM3 only)
'cloud_single_scattering_albedo',
'cloud_asymmetry_parameter',
'cloud_forward_scattering_fraction',
'r_liq', # 1-3 Drop radius, liquid micron 10.
'r_ice', # 1-3 Drop radius, ice micron 30.
'clwp', # 1-3 Cloud liquid water path g/m2 0.
'ciwp', # 1-3 Cloud ice water path g/m2 -99. If not passed explicitly, ice frac computed internally (CAM3 only)
# 'flus' # 1-3 Upwelling LW at surface W/m2 -99. If not passed explicitly, computed from Ts using emiss=1 (CAM3 only)
'tauaer_sw', # Aerosol optical depth (iaer=10 only), Dimensions: (ncol,nlay,nbndsw), (non-delta scaled)
'ssaaer_sw', # Aerosol single scattering albedo (iaer=10 only), Dimensions: (ncol,nlay,nbndsw), (non-delta scaled)
'asmaer_sw', # Aerosol asymmetry parameter (iaer=10 only), Dimensions: (ncol,nlay,nbndsw), (non-delta scaled)
'tauaer_lw', # Aerosol optical depth (iaer=10 only), Dimensions: (ncol,nlay,nbndlw), (non-delta scaled)
'Cpd',
'tauc_lw'
]
OUTPUTS = ['swuflx', 'swdflx', 'lwuflx', 'lwdflx', 'SwToa', 'LwToa', 'lwflx', 'swflx', 'hr']
def driver(*args):
# wavenumber bands used by RRTM:
SW_BANDS = range(14)
LW_BANDS = range(16)
# gotta translate between the APIs:
climt_inputs = dict(zip(INPUTS, args))
number_of_layers = len(climt_inputs['T'])
if not climt_inputs['Tbound']: climt_inputs['Tbound'] = climt_inputs['T']
climt_inputs['pbound'] = climt_inputs['lev'].tolist() + climt_inputs['ps'][0].tolist()
climt_inputs['pbound'][0] = 1.e-9 # enforce TOA is at p=0
climt_inputs['Tbound'] = [T[0][0] for T in climt_inputs['Tbound']] + [climt_inputs['Ts'][0][0]]
interpolated_p = interp1d(range(number_of_layers + 1), climt_inputs['pbound'])
interpolated_T = interp1d(range(number_of_layers + 1), climt_inputs['Tbound'])
# import sys; sys.stderr.write(str(climt_inputs['cldf']))
for key in ['co2', 'ch4', 'n2o', 'o2', 'cfc11', 'cfc12', 'cfc22', 'ccl4']:
if not hasattr(climt_inputs[key], '__iter__'):
climt_inputs[key] = [climt_inputs[key]] * number_of_layers
for key in ['lw_surface_emissivity']:
if not hasattr(climt_inputs[key], '__iter__'):
climt_inputs[key] = [climt_inputs[key]] * len(LW_BANDS)
# import pdb; pdb.set_trace()
if 'h2o' in climt_inputs and climt_inputs['h2o']:
h2o_concentration = [[h2o[0][0] for h2o in climt_inputs['h2o']]]
else:
h2o_concentration = [[(((q/1000.)/(1. - (q/1000.)))*1.607793)[0][0] for q in climt_inputs['q']]]
for key in ['tauaer_sw', 'ssaaer_sw', 'asmaer_sw', 'tauaer_lw']:
if not climt_inputs[key]:
climt_inputs[key] = [[0] * len(locals()[key[-2:].upper() + '_BANDS']) for value in climt_inputs['lev']]
else:
if not hasattr(climt_inputs[key][0], '__iter__'):
climt_inputs[key] = [[value] * len(locals()[key[-2:].upper() + '_BANDS']) for value in climt_inputs[key]]
rrtm_inputs = [
# GENERAL, used in both SW and LW
['icld', 1 if 'cldf' in climt_inputs else 0], # Cloud overlap method, 0: Clear only, 1: Random, 2, Maximum/random] 3: Maximum
['permuteseed_sw', 150], # used for monte carlo clouds; must differ from permuteseed_lw by number of subcolumns
['permuteseed_lw', 300], # learn about these later...
['irng', 1], # more monte carlo stuff
['idrv', 0], # whether to also calculate the derivative of flux with respect to surface temp
['cpdair', climt_inputs['Cpd']],
['play', [[interpolated_p(i + .5) for i in range(number_of_layers)]]], # pressure in each layer
['plev', [climt_inputs['pbound']]], # pressure at boundaries of each layer
['tlay', [[interpolated_T(i + .5) for i in range(number_of_layers)]]], # temperature in each layer
['tlev', [climt_inputs['Tbound']]], # temperature at boundaries of each layer
['tsfc', [climt_inputs['Ts']]], # surface temperature
# GASES, used in both SW and LW
['h2ovmr', h2o_concentration],
['o3vmr', [[o3[0][0] * 0.603428 for o3 in climt_inputs['o3']]]], # convert from kg/kg to volume mixing ratio using molecular weight of dry air / ozone
['co2vmr', [[co2 * 1.e-6 for co2 in climt_inputs['co2']]]],
['ch4vmr', [[ch4 * 1.e-6 for ch4 in climt_inputs['ch4']]]],
['n2ovmr', [[n2o * 1.e-6 for n2o in climt_inputs['n2o']]]],
['o2vmr', [climt_inputs['o2']]],
['cfc11vmr', [[cfc11 * 1.e-6 for cfc11 in climt_inputs['cfc11']]]],
['cfc12vmr', [[cfc12 * 1.e-6 for cfc12 in climt_inputs['cfc12']]]],
['cfc22vmr', [[cfc22 * 1.e-6 for cfc22 in climt_inputs['cfc22']]]],
['ccl4vmr', [climt_inputs['ccl4']]],
# SURFACE OPTICAL PROPERTIES
# SW
['aldif', [climt_inputs['aldif'][0][0]]],
['aldir', [climt_inputs['aldir'][0][0]]],
['asdif', [climt_inputs['asdif'][0][0]]],
['asdir', [climt_inputs['asdir'][0][0]]],
# LW
['emis', [[1. or 1 - emis for emis in climt_inputs['lw_surface_emissivity']]]],
# THE SUN - SW
['coszen', [cos(climt_inputs['zen'][0][0] * 2 * pi / 360.)]], # cosine of the solar zenith angle
['adjes', 1.], # flux adjustment for earth/sun distance (if not dyofyr)
['dyofyr', 0], # day of the year used to get Earth/Sun distance (if not adjes)
['scon', climt_inputs['scon']], # solar constant
# CLOUDS, SW see http://www.arm.gov/publications/proceedings/conf16/extended_abs/iacono_mj.pdf
['inflgsw', 2], # Flag for cloud optical properties
# INFLAG = 0 direct specification of optical depths of clouds;
# cloud fraction and cloud optical depth (gray) are
# input for each cloudy layer
# = 1 calculation of combined ice and liquid cloud optical depths (gray)
# as in CCM2; cloud fraction and cloud water path are input for
# each cloudy layer.
# = 2 calculation of separate ice and liquid cloud optical depths, with
# parameterizations determined by values of ICEFLAG and LIQFLAG.
# Cloud fraction, cloud water path, cloud ice fraction, and
# effective ice radius are input for each cloudy layer for all
# parameterizations. If LIQFLAG = 1, effective liquid droplet radius
# is also needed.
['inflglw', 2], #
['iceflgsw', 1], # # Flag for ice particle specification
# ICEFLAG = 0 the optical depths (gray) due to ice clouds are computed as in CCM3.
# = 1 the optical depths (non-gray) due to ice clouds are computed as closely as
# possible to the method in E.E. Ebert and J.A. Curry, JGR, 97, 3831-3836 (1992).
# = 2 the optical depths (non-gray) due to ice clouds are computed by a method
# based on the parameterization used in the radiative transfer model Streamer
# (reference, J. Key, Streamer User's Guide, Technical Report 96-01] Boston
# University, 85 pp. (1996)), which is closely related to the parameterization
# of water clouds due to Hu and Stamnes (see below).
# = 3 the optical depths (non-gray) due to ice clouds are computed by a method
# based on the parameterization given in Fu et al., J. Clim.,11,2223-2237 (1998).
['iceflgslw', 1], #
['liqflgsw', 1], # # Flag for liquid droplet specification
# LIQFLAG = 0 the optical depths (gray) due to water clouds are computed as in CCM3.
# = 1 the optical depths (non-gray) due to water clouds are computed by a method
# based on the parameterization of water clouds due to Y.X. Hu and K. Stamnes,
# J. Clim., 6, 728-742 (1993).
['liqflglw', 1], #
['tauc_sw', [[[0.]* number_of_layers]] * len(SW_BANDS)], # In-cloud optical depth [IS THIS ONE NEEDED GIVEN THE OTHERS?]
['tauc_lw', [[climt_inputs['tauc_lw'] or [0.] * number_of_layers]] * len(LW_BANDS)], # in-cloud optical depth
['cldfrac', [[c[0][0] for c in climt_inputs['cldf']]]], # # layer cloud fraction
['ssac_sw', [[climt_inputs['cloud_single_scattering_albedo'] or [0.] * number_of_layers]] * len(SW_BANDS)], # # In-cloud single scattering albedo
['asmc_sw', [[climt_inputs['cloud_asymmetry_parameter'] or [0.] * number_of_layers]] * len(SW_BANDS)], # # In-cloud asymmetry parameter
['fsfc_sw', [[climt_inputs['cloud_forward_scattering_fraction'] or [0.] * number_of_layers]] * len(SW_BANDS)], ## In-cloud forward scattering fraction (delta function pointing forward "forward peaked scattering")
['ciwp', [[c[0][0] for c in climt_inputs['ciwp']]]], # # in-cloud ice water path (g/m2)
['clwp', [[c[0][0] for c in climt_inputs['clwp']]]], # # in-cloud liquid water path (g/m2)
['reic', [[c[0][0] for c in climt_inputs['r_ice']]]], # # Cloud ice particle effective size (microns)
# specific definition of reicmcl depends on setting of iceflglw:
# iceflglw = 0, ice effective radius, r_ec, (Ebert and Curry, 1992)]
# r_ec must be >= 10.0 microns
# iceflglw = 1, ice effective radius, r_ec, (Ebert and Curry, 1992)]
# r_ec range is limited to 13.0 to 130.0 microns
# iceflglw = 2, ice effective radius, r_k, (Key, Streamer Ref. Manual] 1996)
# r_k range is limited to 5.0 to 131.0 microns
# iceflglw = 3, generalized effective size, dge, (Fu, 1996)]
# dge range is limited to 5.0 to 140.0 microns
# [dge = 1.0315 * r_ec]
['relq', [[c[0][0] for c in climt_inputs['r_liq']]]], # # Cloud water drop effective radius (microns)
# AEROSOLS
# SW
['tauaer_sw', [climt_inputs['tauaer_sw'] or [[0.] * len(SW_BANDS)] * number_of_layers]], # Aerosol optical depth (iaer=10 only), Dimensions, (ncol,nlay,nbndsw)] # (non-delta scaled)
['ssaaer_sw', [climt_inputs['ssaaer_sw'] or [[0.] * len(SW_BANDS)] * number_of_layers]], # Aerosol single scattering albedo (iaer=10 only), Dimensions, (ncol,nlay,nbndsw)] # (non-delta scaled)
['asmaer_sw', [climt_inputs['asmaer_sw'] or [[0.] * len(SW_BANDS)] * number_of_layers]], # Aerosol asymmetry parameter (iaer=10 only), Dimensions, (ncol,nlay,nbndsw)] # (non-delta scaled)
['ecaer_sw', [[[0.] * 6] * number_of_layers]], # Aerosol optical depth at 0.55 micron (iaer=6 only), Dimensions, (ncol,nlay,naerec)] # (non-delta scaled)
['tauaer_lw', [climt_inputs['tauaer_lw'] or [[0.] * len(LW_BANDS)] * number_of_layers]] #
]
for pair in rrtm_inputs:
r_0 = pair[1]
if hasattr(r_0, '__iter__'):
if len(r_0) in [number_of_layers, number_of_layers + 1]:
r_0.reverse()
else:
for r_1 in r_0:
if hasattr(r_1, '__iter__'):
if len(r_1) in [number_of_layers, number_of_layers + 1]:
r_1.reverse()
else:
for r_2 in r_1:
if hasattr(r_2, '__iter__'):
if len(r_2) in [number_of_layers, number_of_layers + 1]:
r_2.reverse()
# import pdb; pdb.set_trace()
out = dict(zip(['swuflx','swdflx','swhr','swuflxc','swdflxc','swhrc','lwuflx','lwdflx','lwhr','lwuflxc','lwdflxc','lwhrc','duflx_dt','duflxc_dt'], \
list(_rrtm_radiation_fortran.driver(*[pair[1] for pair in rrtm_inputs])) ))
#
## new_output = (
## output[0][0], # swuflx
## output[1][0], # swdflx
## output[6][0], # lwuflx
## output[7][0], # lwdflx
## output[1][0][-1] - output[0][0][-1], # swToA
## output[7][0][-1] - output[6][0][-1] # lwToA
## )
## return new_output
# get outputs into CliMT-compatible format
for key in out.keys():
out[key] = out[key].transpose() # make level first index
out[key] = out[key][::-1] # indexing goes top to bottom
# fluxes defined positive downward
for key in ['swuflx','swuflxc','lwuflx','lwuflxc']: out[key] = -out[key]
# TOA fluxes
out['LwToa'] = out['lwuflx'][0]+out['lwdflx'][0]
out['SwToa'] = out['swuflx'][0]+out['swdflx'][0]
# output fluxes at layer midpoints:
for key in ['swuflx','swdflx','swuflxc','swdflxc','lwuflx','lwdflx','lwuflxc','lwdflxc']: out[key] = (out[key][1:]+out[key][:-1])/2.
# total fluxes
out['lwflx'] = out['lwuflx']+out['lwdflx']
out['swflx'] = out['swuflx']+out['swdflx']
out['hr'] = out['swhr']+out['lwhr']
return tuple([out[key] for key in OUTPUTS])
|
python
|
from typing import List
class Solution:
def findUnsortedSubarray(self, nums: List[int]) -> int:
N = len(nums)
min_num = max_num = float('inf')
for i in range(1, N):
if nums[i] < nums[i-1]:
min_num = min(nums[i:])
break
for i in reversed(range(N-1)):
if nums[i] > nums[i+1]:
max_num = max(nums[:i+1])
break
if min_num == float('inf'):
return 0
left = right = 0
for i in range(N):
if nums[i] > min_num:
left = i
break
for i in reversed(range(N)):
if nums[i] < max_num:
right = i
break
return right-left+1
|
python
|
from unittest import main
from tests.base import BaseTestCase
class AppTestCase(BaseTestCase):
"""This class represents the test cases to see if the app is up"""
# App runs ----------------------------------------
def test_app_is_running(self):
# make request
res = self.client().get('/')
# assert
self.assertEqual(res.status_code, 200)
self.assertTrue("Welcome to Limbook Api" in res.get_data(as_text=True))
# Make the tests conveniently executable
if __name__ == "__main__":
main()
|
python
|
amount = int(input())
count = 0
if int(amount/100):
count+=int(amount/100)
amount -= int(amount/100)*100
if int(amount/20):
count+=int(amount/20)
amount -= int(amount/20)*20
if int(amount/10):
count+=int(amount/10)
amount -= int(amount/10)*10
if int(amount/5):
count+=int(amount/5)
amount -= int(amount/5)*5
count+=amount
print(count)
|
python
|
from db import db
class BookModel(db.Model):
__tablename__ = 'books'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(80))
author = db.Column(db.String(80))
isbn = db.Column(db.String(40))
release_date = db.Column(db.String(10))
price = db.Column(db.Float(precision=2))
store_id = db.Column(db.Integer, db.ForeignKey('stores.id'))
store = db.relationship('StoreModel')
def __init__(self, title, price, store_id, author, isbn, release_date):
self.title = title
self.price = price
self.store_id = store_id
self.author = author
self.isbn = isbn
self.release_date = release_date
def json(self):
return {'title': self.title, 'price': self.price, 'author': self.author, 'isbn': self.isbn, 'release_date': self.release_date}
@classmethod
def find_by_title(cls, title):
return cls.query.filter_by(title=title).first()
def save_to_db(self):
db.session.add(self)
db.session.commit()
def delete_from_db(self):
db.session.delete(self)
db.session.commit()
|
python
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Archerx
# @time: 2019/4/15 上午 11:10
from xadmin import views
import xadmin
from . import models
from django.contrib.auth.forms import (UserCreationForm, UserChangeForm)
# from xadmin import PermissionModelMultipleChoiceField
# from xadmin import Fieldset, Main, Side, Row
from xadmin.plugins.auth import UserAdmin
from django.utils.translation import ugettext as _
class GlobalSetting(object):
# menu_style = 'accordion' #分组折叠显示
site_title = 'SDUTCtf'
site_footer = 'ctf.sdutsec.cn'
xadmin.site.register(views.CommAdminView, GlobalSetting) #注册到全局应用
class BaseSetting(object):
enable_themes = True
use_bootswatch = True
xadmin.site.register(views.BaseAdminView, BaseSetting)
class UserDisplay(UserAdmin):
change_user_password_template = None
list_display = ('id','user_phone','username', 'user_major', 'user_number', 'is_staff')
list_filter = ('is_staff', 'is_superuser', 'is_active')
search_fields = ('username', 'first_name', 'last_name', 'email')
ordering = ('username',)
style_fields = {'user_permissions': 'm2m_transfer'}
model_icon = 'fa fa-user'
relfield_style = 'fk-ajax'
def get_model_form(self, **kwargs):
if self.org_obj is None:
self.form = UserCreationForm
else:
self.form = UserChangeForm
return super(UserDisplay, self).get_model_form(**kwargs)
# def get_form_layout(self):
# if self.org_obj:
# self.form_layout = (
# Main(
# Fieldset('',
# 'username', 'password',
# css_class='unsort no_title'
# ),
# Fieldset(_('Personal info'),
# Row('first_name', 'last_name'),
# 'email'
# ),
# Fieldset(_('Permissions'),
# 'groups', 'user_permissions'
# ),
# Fieldset(_('Important dates'),
# 'last_login', 'date_joined'
# ),
# ),
# Side(
# Fieldset(_('Status'),
# 'is_active', 'is_staff', 'is_superuser',
# ),
# )
# )
# return super(UserDisplay, self).get_form_layout()
# xadmin.site.unregister(models.UserProfile)
xadmin.site.register(models.UserProfile, UserDisplay)
class VerifyCodeDisplay(object):
list_display = ('id','code','mobile','add_time')
ordering = ['-add_time']
list_per_page = 10
xadmin.site.register(models.VerifyCode, VerifyCodeDisplay)
class UserLogDisply(object):
list_display = {'user', 'user_login_time', 'user_login_ip', 'user_login_agent', 'user_login_os'}
list_filter = {'user_login_time', 'user_login_agent', 'user_login_os'}
search_fields = {'user__username', 'user_login_ip', 'user_login_agent', 'user_login_os'}
ordering = ['-user_login_time']
list_per_page = 10
xadmin.site.register(models.UserLoginLog, UserLogDisply)
|
python
|
from flask import ( g, redirect, url_for )
from tmc.db import get_db
# Insert relation tool_x_technique
def insert_tool_x_techn(table, tool_id, technique_id):
try:
author_id = g.user['id']
except (NameError, TypeError) as error:
author_id = 1
g.db = get_db()
query='INSERT INTO {} ({}, {}, {}) VALUES (?, ?, ?)'.format(table, 'author_id', 'tool_id', 'technique_id')
result = g.db.execute(query, (author_id, tool_id, technique_id))
g.db.commit()
element_id = result.lastrowid
return element_id
|
python
|
from django.urls import path
from . import views
TYPE = "stream"
urlpatterns = [
path('', views.view_gallery_stream, name=f'gallery-{TYPE}'),
path('edit/<int:pk>/', views.video_stream_edit, name=f"{TYPE}-update"),
path('remove/<int:pk>/', views.removeStream, name=f"{TYPE}-delete"),
]
|
python
|
import pandas as pd
import re
from django.core.management import BaseCommand
from django.conf import settings
from Styling.models import Garments, ImageURLs, Images, ProductCategories
class SanitizeData:
def __init__(self):
self.csv_path = settings.GARMENTS_DATA_URL + '\garment_items.jl'
self.garments = pd.read_json(self.csv_path, lines=True)
def sanitize_garment_data(self):
"""There are no empty cells in this data so I didn't bother writing code to account for them"""
self.garments['brand'] = self.garments.brand.astype('category')
self.garments['gender'] = self.garments.gender.astype('category')
self.garments['price'] = self.garments.price.str.replace(',', '').astype(float)
return self.garments
class Command(BaseCommand):
def __init__(self):
self.sanitize = SanitizeData()
def execute(self, *args, **options):
garments = self.sanitize.sanitize_garment_data()[:1000]
pattern = re.compile('[^a-zA-Z]')
for garment in garments.iterrows():
gar = Garments(
product_id=garment[1].product_id,
brand=pattern.sub('', garment[1].brand).upper(),
gender=garment[1].gender,
price=garment[1].price,
product_description=garment[1].product_description,
product_title=garment[1].product_title,
source=garment[1].source,
url=garment[1].url,
)
gar.save()
for url in garment[1].image_urls:
img_urls = ImageURLs(
image_url=url,
garment=gar
)
img_urls.save()
for product_category in garment[1].product_categories:
product_category = ProductCategories(
product_category=pattern.sub('', product_category).upper(),
garment=gar
)
product_category.save()
for image in garment[1].images:
image = Images(
url=image['url'],
path=image['path'],
checksum=image['checksum'],
garment=gar
)
image.save()
|
python
|
# coding: utf-8
# Import libraries
import pandas as pd
from pandas import ExcelWriter
import pickle
import xlsxwriter
def extract_regulatory_genes():
"""
The EXTRACT_REGULATORY_GENES operation extracts from the set of Transcription Factors associated to a gene, the list of its candidate regulatory genes, i.e., the genes that encode for those TFs. Intermediate results files are exported locally during the execution of the function, while the final set of trasncription factors is returned as a Python dictionary (dict_RegulGenes.p), where each target gene (set as key) is associated to the list of its candidate regulatory genes (set as value).
:return: a Python dictionary
Example::
import genereg as gr
reg_genes_dict = gr.RegulatoryGenes.extract_regulatory_genes()
"""
# Starting from the dictionary containing for each gene of interest the TFs that bind to its promoters,
# extract the names of the genes encoding the TFs in order to identify the candidate regulatory genes of each gene of interest
dict_GeneTF = pickle.load(open('./1_Transcription_Factors/dict_GeneTF.p', 'rb'))
TFs_interest = []
for key, value in dict_GeneTF.items():
TFs = value[:-2] # the TFs are all the elements of the value list, except for the last two
for tf in TFs:
if tf not in TFs_interest:
TFs_interest.append(tf)
# Import the gene-TFs mapping dataframe
Mapping_df = pd.read_excel('./0_Genes_Mapping/Genes_Mapping.xlsx',sheetname='Sheet1',header=0,converters={'ENTREZ_GENE_ID':str,'HGNC_ID':str})
for index, row in Mapping_df.iterrows():
tfs_str = row['TF']
if isinstance(tfs_str,str):
tfs_list = tfs_str.split(', ')
else:
tfs_list = []
Mapping_df.set_value(index,'TF',tfs_list)
# Extract in a list all the names of the TFs contained in the mapping dataframe
mapping_df_TFs = []
for index, row in Mapping_df.iterrows():
tfs = row['TF']
if len(tfs) != 0:
for t in tfs:
if t not in mapping_df_TFs:
mapping_df_TFs.append(t)
# Create a reduced dataframe with all the distinct TFs and their encoding genes, filtering only the TFs of interest previously extracted
distinct_TFs = []
for index, row in Mapping_df.iterrows():
tfs = row['TF']
if len(tfs) != 0:
for t in tfs:
if t in TFs_interest:
if t not in distinct_TFs:
distinct_TFs.append(t)
from collections import defaultdict
dict_tf_gene = defaultdict(list)
for t in distinct_TFs:
dict_tf_gene[t] = []
for index, row in Mapping_df.iterrows():
tf = row['TF']
gene = row['GENE_SYMBOL']
for t in tf:
if t in distinct_TFs:
dict_tf_gene[t].append(gene)
TF_Gene_df = pd.DataFrame(list(dict_tf_gene.items()), columns=['TF_NAME', 'GENE_SYMBOL'])
for index, row in TF_Gene_df.iterrows():
genes = row['GENE_SYMBOL']
if len(genes) == 1:
new_gene = ''.join(genes)
TF_Gene_df.set_value(index,'GENE_SYMBOL',new_gene)
# Create a new empty dictionary with lists as values for each key (gene)
from collections import defaultdict
dict_RegulGenes = defaultdict(list)
# Set the keys and initialize their values as empty lists
for v in dict_GeneTF.keys():
dict_RegulGenes[v] = []
# Get the TFs of each target gene and extract the names of the genes encoding them from the mapping dataframe
for key, value in dict_GeneTF.items():
TFs = value[:-2] # the TFs are all the elements of the value list, except for the last two
for tf in TFs:
# for each TF, search in the mapping dataframe for the name of the encoding gene
if tf in mapping_df_TFs:
# get the name (GENE_SYMBOL) of the gene encoding the transcription factor "tf"
gene_name = TF_Gene_df.loc[TF_Gene_df['TF_NAME'] == tf, 'GENE_SYMBOL'].iloc[0]
# add the regulatory gene in correspondence of the proper gene in the dictionary
dict_RegulGenes[key].append(gene_name)
# in case the transcription factor considered is not mapped in the dataframe,
# then the name of its encoding gene is unknown ('n/a')
else: dict_RegulGenes[key].append('n/a')
# SUMMARY TABLE summarizing for each gene of interest the TFs binding to its promoters and their corresponding encoding genes:
# Each row of the table is indexed by the Gene Symbols of the genes of interest and progressive integers representing the number of TFs for each gene
genes_of_interest = []
for k in dict_GeneTF.keys():
genes_of_interest.append(k)
# Extract the highest number of regulatory genes for a single gene of interest
highest_n = 0
for k, v in dict_RegulGenes.items():
n = len(value)
if n > highest_n:
highest_n = n
top_range = highest_n + 100
# Define the number of rows in the table for each gene of interest
num_lst = []
for i in list(range(1,top_range)):
num_lst.append(i)
# Cartesian product to generate tuples for multi-indexing
import itertools
tuples = []
for i in itertools.product(genes_of_interest,num_lst):
tuples.append(i)
# Set the multiple indexes to be used in the dataframe
index = pd.MultiIndex.from_tuples(tuples, names=['GENE_SYMBOL', '#'])
# Create the dataframe and initialize the empty cells as empty strings
info_genes_of_interest = pd.DataFrame('', index = index, columns = ['Transcription Factors','Regulatory Genes','Entrez_Gene_IDs','ENTREZ_GENE_ID','GENE_SET','#TFs','#RegulatoryGenes (distinct)'])
# Set the correct Entrez Gene ID for each gene of interest
for index, row in info_genes_of_interest.iterrows():
sym = index[0]
n = index[1]
if n == 1:
eid = Mapping_df.loc[Mapping_df['GENE_SYMBOL'] == sym, 'ENTREZ_GENE_ID'].iloc[0]
info_genes_of_interest.loc[(sym, n),'ENTREZ_GENE_ID'] = eid
# Set the gene sets
for key, value in dict_GeneTF.items():
# get the list of gene sets associated to gene 'key'
# (i.e. the last element of the list related to gene 'key')
sets = value[-1]
# set the list of gene sets to the correct cell in the dataframe (in correspondence of index 'key')
n_path = len(sets)
if n_path == 1:
info_genes_of_interest.loc[(key, 1),'GENE_SET'] = sets[0]
if n_path == 2:
info_genes_of_interest.loc[(key, 1),'GENE_SET'] = sets[0]
info_genes_of_interest.loc[(key, 2),'GENE_SET'] = sets[1]
if n_path == 3:
info_genes_of_interest.loc[(key, 1),'GENE_SET'] = sets[0]
info_genes_of_interest.loc[(key, 2),'GENE_SET'] = sets[1]
info_genes_of_interest.loc[(key, 3),'GENE_SET'] = sets[2]
# Set the TFs
for key, value in dict_GeneTF.items():
# get the TFs (i.e. the list of values except for the last two elements)
tfs = value[:-2]
# set the list of TFs to the correct cell in the dataframe (in correspondence of index 'key')
for i in num_lst:
if i <= len(tfs):
info_genes_of_interest.loc[(key, i),'Transcription Factors'] = tfs[i-1]
# Set the regulatory genes (both with their Gene Symbols and Entrez Gene IDs)
for key, value in dict_RegulGenes.items():
# the set of regulatory genes is the list 'value' corresponding to each key (gene).
# Set the list of regulatory genes to the correct cell in the dataframe (in correspondence of index 'key')
for i in num_lst:
if i <= len(value):
info_genes_of_interest.loc[(key, i),'Regulatory Genes'] = value[i-1]
if value[i-1] == 'n/a':
eid = 'n/a'
else:
# get the Entrez Gene ID of the regulatory gene
eid = Mapping_df.loc[Mapping_df['GENE_SYMBOL'] == value[i-1], 'ENTREZ_GENE_ID'].iloc[0]
info_genes_of_interest.loc[(key, i),'Entrez_Gene_IDs'] = eid
# Remove the empty rows in the dataframe
for index, row in info_genes_of_interest.iterrows():
tfs = row['Transcription Factors']
path = row['GENE_SET']
if (tfs == '') & (path == ''):
info_genes_of_interest.drop(index, inplace=True)
# Extract the distinct candidate regulatory genes for each gene of interest:
# Remove from the dictionary containing regulatory genes the duplicated genes, if present, in order to have a dictionary with all the distinct candidate regulatory genes for each gene of interest
for v in dict_GeneTF.keys():
dict_RegulGenes[v] = []
for key, value in dict_GeneTF.items():
TFs = value[:-2]
for tf in TFs:
if tf in mapping_df_TFs:
gene_name = TF_Gene_df.loc[TF_Gene_df['TF_NAME'] == tf, 'GENE_SYMBOL'].iloc[0]
if gene_name not in dict_RegulGenes[key]:
dict_RegulGenes[key].append(gene_name)
# So, the general form of this second dictionary containing the information about regulatory genes is the following:
# dict_RegulGenes = {key: value, ...} = {GENE_SYMBOL: [REG_GENE1, REG_GENE2, REG_GENE3, ...]}, where each regulatory gene is identified by its GENE_SYMBOL
# Export the dictionary of genes of interest and their regulatory genes:
# Save the dictionary into a pickle file
pickle.dump(dict_RegulGenes, open('./2_Regulatory_Genes/dict_RegulGenes.p', 'wb'))
# Only for the sake of clearness, order alphabetically the list of candidate regulatory genes for each gene of interest
dict_RegulGenes_ord = dict_RegulGenes.copy()
for k in dict_RegulGenes_ord.keys():
old = dict_RegulGenes_ord[k]
sorted_genes = sorted(old)
dict_RegulGenes_ord[k] = sorted_genes
# Save the dictionary as a .xlsx file
workbook = xlsxwriter.Workbook('./2_Regulatory_Genes/dict_RegulGenes.xlsx')
worksheet = workbook.add_worksheet()
# Set the headers of the columns
worksheet.write(0,0,'GENE_SYMBOL')
worksheet.write(0,1,'ENTREZ_GENE_ID')
worksheet.write(0,2,'Distinct Regulatory Genes - GENE_SYMBOL')
worksheet.write(0,3,'Distinct Regulatory Genes - ENTREZ_GENE_ID')
row = 1
col = 0
for key in dict_RegulGenes_ord.keys():
row += 1
worksheet.write(row, col, key)
# get the ENtrez Gene ID of the gene of interest
eid = Mapping_df.loc[Mapping_df['GENE_SYMBOL'] == key, 'ENTREZ_GENE_ID'].iloc[0]
worksheet.write(row, col + 1, ''.join(eid))
for item in dict_RegulGenes_ord[key]:
worksheet.write(row, col + 2, ''.join(item))
# get the Entrez Gene ID of the regulatory gene
if item == 'PTRF':
entrez_id = Mapping_df.loc[Mapping_df['GENE_SYMBOL'] == 'CAVIN1', 'ENTREZ_GENE_ID'].iloc[0]
else:
entrez_id = Mapping_df.loc[Mapping_df['GENE_SYMBOL'] == item, 'ENTREZ_GENE_ID'].iloc[0]
worksheet.write(row, col + 3, ''.join(entrez_id))
row += 1
workbook.close()
# Save the dictionary as a .txt file
with open ('./2_Regulatory_Genes/dict_RegulGenes.txt', 'w') as fp:
for p in dict_RegulGenes_ord.items():
fp.write('%s : %s\n\n' % p)
# Count the number of TFs and distinct regulatory genes for each gene of interest:
# Store the number of TFs and distinct regulatory genes for each gene of interest in two dictionaries
from collections import defaultdict
dict_TFs_genes = defaultdict(int)
dict_regul_genes = defaultdict(int)
for k in dict_GeneTF.keys():
dict_TFs_genes[k] = 0
for k in dict_RegulGenes.keys():
dict_regul_genes[k] = 0
for k in dict_GeneTF.keys():
transcription_factors = dict_GeneTF[k][:-2]
number_TFs = len(transcription_factors)
dict_TFs_genes[k] = number_TFs
for k in dict_RegulGenes.keys():
genes = dict_RegulGenes[k]
number_genes = len(genes)
dict_regul_genes[k] = number_genes
# Create a table summarizing for each gene of interest the number of TFs binding to its promoters and the number of distinct genes encoding them
TFs_genes_df = pd.DataFrame(list(dict_TFs_genes.items()), columns=['GENE_SYMBOL', '#TFs'])
TFs_genes_df.set_index('GENE_SYMBOL', inplace=True)
regul_genes_df = pd.DataFrame(list(dict_regul_genes.items()), columns=['GENE_SYMBOL', '#RegulatoryGenes (distinct)'])
regul_genes_df.set_index('GENE_SYMBOL', inplace=True)
# Join the two dataframes into a single one to have both information together
TFs_regul_genes_df = TFs_genes_df.join(regul_genes_df)
TFs_regul_genes_df['GENE_SYMBOL'] = TFs_regul_genes_df.index
TFs_regul_genes_df.index = range(len(TFs_regul_genes_df)) # set a new progressive index for this table
# Add to the dataframe a column for storing also the Entrez Gene ID of each gene, besides the already present Gene Symbol
TFs_regul_genes_df['ENTREZ_GENE_ID'] = ''
# Add the correct Entrez Gene ID for each gene
for index, row in TFs_regul_genes_df.iterrows():
sym = row['GENE_SYMBOL']
eid = Mapping_df.loc[Mapping_df['GENE_SYMBOL'] == sym, 'ENTREZ_GENE_ID'].iloc[0]
TFs_regul_genes_df.set_value(index,'ENTREZ_GENE_ID',eid)
TFs_regul_genes_df_final = TFs_regul_genes_df[['GENE_SYMBOL','ENTREZ_GENE_ID','#TFs','#RegulatoryGenes (distinct)']].copy()
for index, row in TFs_regul_genes_df_final.iterrows():
gene = row['GENE_SYMBOL']
n_tfs = row['#TFs']
n_genes_reg = row['#RegulatoryGenes (distinct)']
info_genes_of_interest.loc[(gene, 1),'#TFs'] = n_tfs
info_genes_of_interest.loc[(gene, 1),'#RegulatoryGenes (distinct)'] = n_genes_reg
# Export the dataframe as a .xlsx file
writer = ExcelWriter('./2_Regulatory_Genes/Full_TFs-RegulatoryGenes_SUMMARY_Table.xlsx')
info_genes_of_interest.to_excel(writer,'Sheet1')
writer.save()
return dict_RegulGenes
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.