seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
33962354417
|
import numpy as np
from sklearn.linear_model import LogisticRegression
from random import randrange
from math import ceil, floor
# Загрузка данных из текстового файла
data = np.genfromtxt('данные двумерная модель.txt', skip_header=1) # Пропустить первую строку с названиями столбцов
# Разделение данных на факторы (x) и зависимую переменную (y)
X = data[:, :-1] # Первые 6 столбцов
y = data[:, -1] # Последний столбец
# Создание и обучение модели множественной регрессии
model = LogisticRegression()
model.fit(X, y)
# Вывод коэффициентов регрессии
print("Коэффициенты регрессии:")
print("a1, a2:", [round(x, 3) for x in model.coef_[0]])
print("b (пересечение):", round(model.intercept_[0], 3))
X_min = [float('inf'), float('inf')]
X_max = [float('-inf'), float('-inf')]
for x in X:
for i in range(len(x)):
if x[i] < X_min[i]:
X_min[i] = x[i]
if x[i] > X_max[i]:
X_max[i] = x[i]
R_X = []
for i in range(len(X_min)):
R_X.append(randrange(ceil(X_min[i]), floor(X_max[i])))
print(R_X)
# Спрогнозировать новое значение y на основе заданных факторов (замените значения x_new)
x_new = np.array([R_X])
y_pred = model.predict(x_new)
print(f"Прогнозное значение y для новых данных: {y_pred[0]:.2f}")
print(f'{model.coef_[0][0]:.3f} {model.coef_[0][1]:.3f}, {model.intercept_[0]:.3f}')
|
IlnazMmm/RKA
|
3 lab/prog2.py
|
prog2.py
|
py
| 1,708 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
72742546747
|
# -*- coding: utf-8 -*-
# @Author : yxn
# @Date : 2022/1/27 20:46
# @IDE : PyCharm(2021.3.1) Python3.98
def toStr(n, base):
"""递归实现"""
converSting = "0123456789ABCDEF"
if n < base:
return converSting[n]
else:
return toStr(n // base, base) + converSting[n % base]
if __name__ == '__main__':
print(toStr(100, 16))
# 获取最大递归深度
import sys
print(sys.getrecursionlimit()) # 1000
|
yxn4065/Data-structure-and-algorithm-Python-
|
13_递归-转换任意进制.py
|
13_递归-转换任意进制.py
|
py
| 468 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6253113584
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from _polytope_ import Polytope, Face
import utilities as utils
from collections import OrderedDict
import numpy as np
import time
import copy
import convex_adversarial.convex_adversarial as ca
import full_lp as flp
class PLNN(nn.Module):
#TODO: determine if building net addition was necessary
# add some explanations for some methods
""" Simple piecewise neural net.
Fully connected layers and ReLus only
"""
def __init__(self, layer_sizes=None, bias=True, dtype=torch.FloatTensor):
super(PLNN, self).__init__()
if layer_sizes is None:
layer_sizes = [32, 64, 128, 64, 32, 10]
self.layer_sizes = layer_sizes
self.dtype = dtype
self.fcs = []
self.bias = bias
self.net = self.build_network(layer_sizes)
def build_network(self, layer_sizes):
layers = OrderedDict()
num = 1
for size_pair in zip(layer_sizes, layer_sizes[1:]):
size, next_size = size_pair
layer = nn.Linear(size, next_size, bias=self.bias).type(self.dtype)
layers[str(num)] = layer
self.fcs.append(layer)
num = num + 1
layers[str(num)] = nn.ReLU()
num = num + 1
del layers[str(num-1)] # No ReLU for the last layer
net = nn.Sequential(layers).type(self.dtype)
print(self.layer_sizes)
return net
def get_parameters(self):
params = []
for fc in self.fcs:
fc_params = [elem for elem in fc.parameters()]
for param in fc_params:
params.append(param)
return params
def config_str_to_config_list(self, config_str):
""" Given str of configs, converts to list of torch tensors of right
layer sizes
"""
assert isinstance(config_str, str)
assert len(config_str) == sum(self.layer_sizes[1:-1])
splits = []
running_idx = 0
for el in self.layer_sizes[1:-1]:
layer_config = config_str[running_idx:running_idx + el]
layer_config = torch.Tensor([float(el) for el in layer_config])
# Do some cuda nonsense here?
splits.append(layer_config)
running_idx += el
return splits
def relu_config(self, x, return_pre_relus=True):
pre_relus = self.forward_by_layer(x)
configs = [(pre_relu.squeeze() > 0).type(torch.float32)
for pre_relu in pre_relus]
if return_pre_relus:
return pre_relus, configs
else:
return configs
def make_adversarial_constraints(self, polytope, true_label,
domain):
""" Given a config computes the linear map in terms of this config
for all neurons INCLUDING the output neurons (logits) and generates
the polytope constraints for the neuron config and
constraints for each of the decision boundaries
configs - as usual
true_label -
"""
# Make all the adversarial_constraints:
#if(x) = Ax + b (in R^#logits)
# adversarial constraints are:
# f_true(x) - f_j(x) = 0 (for all j != true)
# ~ which is ~
# <a_true, x> + b_true - <a_j, x> - b_j = 0
# ~ which is ~
# <a_true - a_j, x> = b_j - b_true
total_a = polytope.linear_map['A']
total_b = polytope.linear_map['b']
num_logits = total_a.shape[0]
facets = []
true_a = total_a[true_label]
true_b = total_b[true_label]
for i in range(num_logits):
if i == true_label:
continue
dec_bound = {'A': true_a - total_a[i],
'b': total_b[i] - true_b}
new_facet = polytope.facet_constructor(None, facet_type='decision',
extra_tightness=dec_bound)
if new_facet.fast_domain_check():
facets.append(new_facet)
return facets
def compute_polytope_config(self, configs, comparison_form_flag=False,
uncertain_constraints=None, as_tensor=False):
lambdas = [torch.diag(config) for config in configs]
js = [torch.diag(-2 * config + 1) for config in configs]
# Compute Z_k = W_k * x + b_k for each layer
wks = [self.fcs[0].weight]
bks = [self.fcs[0].bias]
for (i, fc) in enumerate(self.fcs[1:]):
current_wk = wks[-1]
current_bk = bks[-1]
current_lambda = lambdas[i]
precompute = fc.weight.matmul(current_lambda)
wks.append(precompute.matmul(current_wk))
bks.append(precompute.matmul(current_bk) + fc.bias)
a_stack = []
b_stack = []
for j, wk, bk in zip(js, wks, bks):
a_stack.append(j.matmul(wk))
b_stack.append(-j.matmul(bk))
if as_tensor:
return {'a_stack': a_stack,
'b_stack': b_stack,
'total_a': wks[-1],
'total_b': bks[-1]}
polytope_A = utils.as_numpy(torch.cat(a_stack, dim=0))
polytope_b = utils.as_numpy(torch.cat(b_stack, dim=0))
if(comparison_form_flag):
polytope_A, polytope_b = utils.comparison_form(polytope_A, polytope_b)
return {'poly_a': polytope_A,
'poly_b': polytope_b,
'configs': configs,
'total_a': wks[-1],
'total_b': bks[-1]
}
def compute_polytope(self, x, comparison_form_flag=False, as_tensor=False):
pre_relus, configs = self.relu_config(x, return_pre_relus=True)
poly_out = self.compute_polytope_config(configs, comparison_form_flag,
as_tensor=as_tensor)
poly_out['pre_relus'] = pre_relus
return poly_out
def compute_matrix(self, configs):
M = torch.eye(self.layer_sizes[0])
for config, fc, layer_size in zip(configs, self.fcs, self.layer_sizes):
nullifier = torch.Tensor([config.numpy() for _ in range(0, layer_size)])
M_layer_prime = fc.weight * torch.transpose(nullifier, 0, 1)
M = torch.matmul(M_layer_prime, M)
M = torch.matmul(self.fcs[-1].weight, M)
return M
def forward_by_layer(self, x):
pre_relus = []
x = x.view(-1, self.layer_sizes[0])
for fc in self.fcs[:-1]:
x = fc(x)
pre_relus.append(x.clone())
x = F.relu(x)
return pre_relus
def forward(self, x):
x = x.view(-1, self.layer_sizes[0])
for fc in self.fcs[:-1]:
x = F.relu(fc(x))
return self.fcs[-1](x) # No ReLu on the last one
def compute_interval_bounds(self, domain_obj, compute_logit_bounds=False,
as_tensor=False):
""" For each neuron computes a bound for the range of values each
pre-ReLU can take.
ARGS:
domain_obj : Domain - object used to hold bounding boxes
on_off_format: boolean - if True, we return the more fine-grained
list which displays if neurons are on or
off, instead of stable
RETURNS:
returned_bounds : list of tensors giving pre-Relu bounds
uncertain_set: list of tensors with 1 if uncertain about this
neuron in the list
list of length (# fully connected layers - 1), where each element
is a tensor of shape (num_neurons, 2) for the bounds for the preReLU
"""
box = domain_obj.box_to_tensor()
# setup + asserts
assert all(box[:, 0] <= box[:, 1])
# Redoing this one more time
# Redo this but doing it right :
midpoint_matrix = torch.Tensor([[1.0], [1.0]]) / 2.0
ranges_matrix = torch.Tensor([[-1.0], [1.0]]) / 2.0
returned_bounds = []
dead_set = [] # list of tensors, 1 if always on or off
working_bounds = box
current_low, current_high = box[:, 0], box[:, 1]
if compute_logit_bounds:
layers_to_check = self.fcs
else:
layers_to_check = self.fcs[:-1]
for fc in layers_to_check:
weight, bias = fc.weight, fc.bias
weight_pos, weight_neg = utils.split_tensor_pos(weight)
new_high = (torch.matmul(weight_pos, current_high) +
torch.matmul(weight_neg, current_low))
new_low = (torch.matmul(weight_pos, current_low) +
torch.matmul(weight_neg, current_high))
if bias is not None:
new_high += bias
new_low += bias
returned_bounds.append(torch.stack([new_low, new_high], dim=1))
current_low = F.relu(new_low)
current_high = F.relu(new_high)
if as_tensor:
return returned_bounds
else:
return [utils.as_numpy(_) for _ in returned_bounds]
def compute_improved_ia_bounds(self, domain_obj):
""" Implements the improved interval bounds as presented here:
https://arxiv.org/pdf/1809.03008.pdf (appendix C)
[also done with gradients pushed through so we can build RS loss ]
# CODE HEAVILY BORROWED FROM https://github.com/MadryLab/relu_stable/blob/master/models/MNIST_improved_ia.py
# (but we're transposed from that code)
"""
box = domain_obj.box_to_tensor()
init_lows = box[:, 0]
init_highs = box[:, 1]
assert all(init_lows <= init_highs) # assert lows less than highs
layers_to_check = self.fcs[:-1] # set the
intermed_lows, intermed_highs = [], []
# define the recursive call
def recurs(layer_num, lows, highs, weights, biases):
assert len(lows) == len(highs) == len(weights) == len(biases) == layer_num
# current layer
low = lows[0]
high = highs[0]
weight = weights[0]
bias = biases[0]
# Base case
if layer_num == 1:
weight_pos, weight_neg = utils.split_tensor_pos(weight)
next_low = (torch.matmul(weight_pos, init_lows) +
torch.matmul(weight_neg, init_highs) + bias)
next_high = (toch.matmul(weight_pos, init_highs) +
torch.matmul(weight_neg, init_lows) + bias)
return next_low, next_high
# Recursive case
prev_weight = weights[1]
prev_bias = biases[1]
# Compute W_A, W_N (need to zero out COLUMNS here)
w_a = torch.matmul(weight, (low > 0).diag_embed())
w_n = weight - w_a
w_n_pos, w_n_neg = utils.split_tensor_pos(w_n)
w_prod = torch.matmul(w_a, prev_weight)
b_prod = torch.matmul(w_a, prev_bias)
# Compute prev layer bounds
prev_low = (torch.matmul(w_n_pos, low) +
torch.matmul(w_n_neg, high) + bias)
prev_high = (torch.matmul(w_n_pos, high) +
torch.matmul(w_n_neg, low) + bias)
# Recurse
deeper_lows, deeper_highs = recurs(layer_num - 1, lows[1:], highs[1:],
[w_prod] + weights[2:],
[b_prod] + biases[2:])
return (prev_low + deeper_lows, prev_high + deeper_highs)
# compute the lower and upper bounds for all neurons
running_lows = [init_lows]
running_highs = [init_highs]
running_weights = [self.fcs[0].weight]
running_biases = [self.fcs[0].bias]
for layer_num, layer in enumerate(self.fcs[:-1]):
new_lows, new_highs = recurs(layer_num + 1, running_lows, running_highs,
running_weights, running_biases)
running_lows = [new_lows] + running_lows
running_highs = [new_highs] + running_highs
running_weights = self.fcs[layer_num + 1].weight
running_biases = self.fcs[layer_num + 1].bias
return running_lows[::-1], running_highs[::-1]
def compute_full_lp_bounds(self, domain_obj):
""" Compute the full linear program values.
Code here is in a different file
"""
return flp.compute_full_lp_bounds(self, domain_obj)
def compute_dual_lp_bounds(self, domain_obj):
""" Use KW to actually find the bounds. Uses L_inf bounds to help
get better bounds
"""
low_bounds = torch.Tensor(domain_obj.box_low)
high_bounds = torch.Tensor(domain_obj.box_high)
midpoint = ((low_bounds + high_bounds) / 2.0).view(1, -1)
box_bounds = (low_bounds, high_bounds)
dual_net = ca.DualNetwork(self.net, midpoint, domain_obj.linf_radius,box_bounds=box_bounds).dual_net
bounds, dead_set = [], []
for el in dual_net:
if isinstance(el, ca.DualReLU):
bounds.append(torch.cat((el.zl.view(-1, 1), el.zu.view(-1, 1)),
dim=1))
dead_set.append(~el.I.squeeze())
return bounds
def compute_dual_ia_bounds(self, domain_obj):
""" Use both interval analysis and dual bounds to get best bounds """
ia = self.compute_interval_bounds(domain_obj)
dd = self.compute_dual_lp_bounds(domain_obj)
bounds = []
for i, d in zip(ia, dd):
stacked = torch.stack((i, d))
new_lows = torch.max(stacked[:, :, 0], dim=0)[0]
new_highs = torch.min(stacked[:, :, 1], dim=0)[0]
new_bounds = torch.stack((new_lows, new_highs), dim=1)
bounds.append(new_bounds)
return bounds
def fast_lip_all_vals(self, x, l_q, on_off_neurons):
""" Does the fast_value for all possible c's """
num_logits = self.fcs[-1].out_features
if not isinstance(x, torch.Tensor):
true_label = self(torch.Tensor(x)).max(1)[1].item()
else:
true_label = self(x).max(1)[1].item()
c_vecs, lip_values = [], []
for i in range(num_logits):
if true_label == i:
continue
c_vec = torch.zeros(num_logits)
c_vec[true_label] = 1.0
c_vec[i] = -1.0
lip_value = self.fast_lip(c_vec, l_q, on_off_neurons)
c_vecs.append(c_vec)
lip_values.append(lip_value)
return c_vecs, lip_values
def fast_lip(self, c_vector, l_q, on_off_neurons):
"""
Pytorch implementation of fast_lip. Might be buggy? Who knows?
see : https://arxiv.org/pdf/1804.09699.pdf for details
INPUTS:
c_vector: tensor that multiplies the output vector:
we compute gradient of c^Tf(x)
l_q : int - q_norm of lipschitzness that we compute
(is dual norm: e.g. if bounds come from an l_inf box,
this should be 1)
on_off_neurons : list of LongTensors (entries in -1, 0 or 1)
corresponding to the set of
(off, uncertain, on, respectively) neurons
inside the domain
RETURNS:
upper bound on lipschitz constant
"""
######################################################################
# First generate inputs needed by fast_lip algorithm #
######################################################################
# --- split off active and uncertain neurons
# -1 means off (don't care)
# 0 means UNCERTAIN
# 1 means ACTIVE
active_neuron_list, uncertain_neuron_list = [], []
for neuron_by_layer in on_off_neurons:
active_neuron_list.append((neuron_by_layer == 1))
uncertain_neuron_list.append((neuron_by_layer == 0))
# --- get list of weights, initialize placeholders
weights = [layer.weight for layer in self.fcs[:-1]]
weights.append(c_vector.matmul(self.fcs[-1].weight).view(1, -1))
constant_term = weights[0]
lowers = [torch.zeros_like(constant_term)]
uppers = [torch.zeros_like(constant_term)]
######################################################################
# Loop through layers using the _bound_layer_grad subroutine #
######################################################################
for i in range(len(weights) - 1):
subroutine_out = self._bound_layers_grad(constant_term, lowers[-1],
uppers[-1],
weights[i + 1],
active_neuron_list[i],
uncertain_neuron_list[i])
constant_term, upper, lower = subroutine_out
lowers.append(lower)
uppers.append(upper)
######################################################################
# Finalize and return the output #
######################################################################
low_bound = (constant_term + lowers[-1]).abs()
upp_bound = (constant_term + uppers[-1]).abs()
layerwise_max = torch.where(low_bound > upp_bound, low_bound, upp_bound)
return torch.norm(layerwise_max, p=l_q).item()
def _bound_layers_grad(self, constant_term, lower, upper, weight,
active_neurons, uncertain_neurons):
""" Subroutine for fast_lip.
Assume weight has shape [m, n]
ARGS: (let's make sure the types and shapes all mesh)
constant_term: floatTensor shape (n, n_0)
lower: floatTensor shape (n, n_0)
upper: floatTensor shape (n, n_0)
weight: floatTensor shape (m, n)
active_neurons: torch.Tensor shape (n,)
uncertain_neurons: torch.Tensor shape (n,)
RETURNS:
new constant term, lower, and upper, each with shape (m, n_0)
"""
# ASSERTS ON SHAPES FOR DEBUGGING
n_0 = self.layer_sizes[0]
n = weight.shape[1]
assert constant_term.shape == (n, n_0)
assert lower.shape == (n, n_0)
assert upper.shape == (n, n_0)
assert active_neurons.shape == (n,)
assert uncertain_neurons.shape == (n,)
# Make diagonals and split weights by +/-
active_diag = torch.diag(active_neurons).float()
uncertain_diag = torch.diag(uncertain_neurons).float()
pos_weight, neg_weight = utils.split_tensor_pos(weight)
# Compute the new constant_term
new_constant_term = weight.matmul(active_diag).matmul(constant_term)
# Make new upper bounds/lower bounds
cons_low = constant_term + lower
_, neg_cons_low = utils.split_tensor_pos(cons_low)
cons_upp = constant_term + upper
pos_cons_upp, _ = utils.split_tensor_pos(cons_upp)
new_upper = (pos_weight.matmul(active_diag).matmul(upper) +
neg_weight.matmul(active_diag).matmul(lower) +
neg_weight.matmul(uncertain_diag).matmul(neg_cons_low) +
pos_weight.matmul(uncertain_diag).matmul(pos_cons_upp))
new_lower = (pos_weight.matmul(active_diag).matmul(lower) +
neg_weight.matmul(active_diag).matmul(upper) +
pos_weight.matmul(uncertain_diag).matmul(neg_cons_low) +
neg_weight.matmul(uncertain_diag).matmul(pos_cons_upp))
return new_constant_term, new_upper, new_lower
class PLNN_seq(PLNN):
""" Simple piecewise neural net.
Fully connected layers and ReLus only
built from nn.Sequential
"""
def __init__(self, sequential, layer_sizes, dtype=torch.FloatTensor):
super(PLNN_seq, self).__init__(layer_sizes, dtype)
self.fcs = [layer for layer in sequential if type(layer) == nn.Linear]
self.net = sequential
class LinearRegionCollection(object):
""" Takes a ReturnObj and builds a lot of linear regions and stores them
"""
def __init__(self, plnn_obj, return_obj, objective_vec=None,
do_setup=False):
self.plnn_obj = plnn_obj
self.return_obj = return_obj
self.collection = {}
for config in return_obj.seen_polytopes:
self.collection[config] = LinearRegion(plnn_obj, config,
return_obj=return_obj,
objective_vec=objective_vec,
do_setup=do_setup)
def get_maximum_lipschitz_constant(self):
return max(_.get_lipschitz_constant()
for _ in self.collection.values())
def gradient_angle_list(self):
""" Gets the gradient angles between neighboring linear regions """
angle_list = {}
for (u, v) in self.return_obj.polytope_graph.keys():
u_grad = self.collection[u].get_gradient()
v_grad = self.collection[v].get_gradient()
angle_list[(u, v)] = utils.angle(u_grad, v_grad)
return angle_list
def gradient_magnitude_diff_list(self, grad_fxn=None):
""" Gets the magnitude of gradient difference
between neighboring linear regions
"""
if grad_fxn is None:
grad_fxn = lambda u, v: torch.norm(u - v).item()
output = {}
for (u, v) in self.return_obj.polytope_graph.keys():
u_grad = self.collection[u].get_gradient()
v_grad = self.collection[v].get_gradient()
output[(u, v)] = grad_fxn(u_grad, v_grad)
return output
def get_greedy_lipschitz_components(self):
""" Returns dict of str -> [str1, ..., ] mapping locally maximal
linear regions to the set of regions that will greedily
approach this local max
"""
# Let's just be really naive about this
def get_ascent_neighbor(node):
""" Gets the neighbor that has highest lipschitz constant
Returns None if nothing has higher than this one
"""
current = node.get_lipschitz_constant()
neighbors = [(_, _.get_lipschitz_constant())
for _ in node.get_neighbors()]
max_neighbor = max(neighbors, key=lambda p: p[1])
if max_neighbor[1] > current:
return max_neighbor[0]
return None
def greedy_search_single_node(start_config):
""" Start with a single sign_config and do greedy search
to find max_lipschitz constant. Return the sign_config
of the greedy search output
"""
current_node = self.collection[start_config]
while True:
next_node = get_ascent_neighbor(current_node)
if next_node is None:
break
else:
current_node = next_node
return current_node.sign_config
greedy_output = {}
for config in self.collection.keys():
greedy_parent = greedy_search_single_node(config)
if greedy_parent not in greedy_output:
greedy_output[greedy_parent] = []
greedy_output[greedy_parent].append(config)
return greedy_output
class LinearRegion(object):
""" Holds info and shortcuts to work with linear regions """
@classmethod
def process_return_obj(cls, plnn_obj, return_obj, objective_vec=None,
do_setup=False):
""" Given a GeoCertReturn object, will build a linear region for
all of the 'seen polytopes' and return the outputs in a
dict keyed on teh sign_configs
"""
output = {}
for config in return_obj.seen_polytopes:
output[config] = cls(plnn_obj, config,
return_obj=return_obj,
objective_vec=objective_vec,
do_setup=do_setup)
return output
def __init__(self, plnn_obj, sign_config, return_obj=None,
objective_vec=None, do_setup=False):
""" Initializes a Linear Region object
ARGS:
plnn_obj - the network this region is linear for
sign_config - the neuron configuration of the region
return_obj : GeoCertReturn object - if not None is an
output of GeoCert which contains info about
the linear regions.
"""
super(LinearRegion, self).__init__()
self.plnn_obj = plnn_obj
self.sign_config = sign_config
self.hex_config = hex(int(self.sign_config, 2))
self.return_obj = return_obj
self.objective_vec = objective_vec
# setting up attributes to be stored later
self._polytope_config = None
self.polytope = None
self.linear_map = None
self.jacobian = None
self.largest_sv = None
if do_setup:
self.setup()
def __repr__(self):
return "LinearRegion: %s" % self.hex_config
def get_neighbors(self):
""" If the return obj is not None, will error. Otherwise will
return a list of neighboring LinearRegion objects
"""
assert self.return_obj is not None
neigbor_list = []
for edge in self.return_obj.polytope_graph:
if self.sign_config == edge[0]:
neigbor_idx = 1
elif self.sign_config == edge[1]:
neigbor_idx = 0
else:
continue
neigbor_list.append(edge[neigbor_idx])
return [LinearRegion(self.plnn_obj, neigbor_config,
return_obj=self.return_obj,
objective_vec=self.objective_vec)
for neigbor_config in neigbor_list]
def _get_polytope_config(self):
if self._polytope_config is not None:
return self._polytope_config
plnn_obj = self.plnn_obj
config = plnn_obj.config_str_to_config_list(self.sign_config)
self._polytope_config = plnn_obj.compute_polytope_config(config)
return self._polytope_config
def setup(self):
self.get_polytope()
self.get_linear_map()
self.get_jacobian()
self.get_largest_singular_value()
def get_polytope(self):
""" For this linear region will return the polytope for which
the neural net satisfies the given neuron configuration
"""
if self.polytope is not None:
return self.polytope
_polytope_config = self._get_polytope_config()
self.polytope = {'A': _polytope_config['poly_a'],
'b': _polytope_config['poly_b']}
return self.polytope
def get_linear_map(self):
""" For this linear region will return a torch.nn.Linear
object corresponding to the linear map at this neuron
configuration
"""
if self.linear_map is not None:
return self.linear_map
_polytope_config = self._get_polytope_config()
A = nn.Parameter(_polytope_config['total_a'])
b = nn.Parameter(_polytope_config['total_b'])
linear_map = nn.Linear(*A.shape)
linear_map.weight = A
linear_map.bias = b
self.linear_map = linear_map
return self.linear_map
def get_jacobian(self):
""" For this linear region will get the jacobian at this
linear piece
"""
if self.jacobian is not None:
return self.jacobian
linear_map = self.get_linear_map()
self.jacobian = linear_map.weight
return self.jacobian
def get_largest_singular_value(self):
""" Will return the largest singular value of the jacobian
of this linear region
"""
if self.largest_sv is not None:
return self.largest_sv
jacobian = self.get_jacobian()
self.largest_sv = jacobian.svd().S[0].item()
return self.largest_sv
def get_gradient(self):
assert self.objective_vec is not None
return self.objective_vec.matmul(self.get_jacobian())
def get_lipschitz_constant(self):
if self.objective_vec is not None:
return self.objective_vec.matmul(self.get_jacobian()).norm().item()
else:
return self.get_largest_singular_value()
|
revbucket/geometric-certificates
|
plnn.py
|
plnn.py
|
py
| 29,208 |
python
|
en
|
code
| 40 |
github-code
|
6
|
71645960508
|
from django.urls import path
from . import views
app_name = 'blog'
urlpatterns = [
path('', views.index, name="index"),
path('detalhes/<int:pk>/<slug:slug>', views.detail, name="details"),
path('post/novo/', views.post, name="new_post"),
path('editar/post/<int:pk>', views.edit, name="edit"),
path('filter/<int:pk>/<str:username>', views.filter, name="filter"),
path('search/', views.search, name="search"),
path('delete/<int:pk>', views.delete, name="delete_post"),
path('contato/', views.contato, name="contato"),
]
|
eduardoferreira97/Blog
|
blog/urls.py
|
urls.py
|
py
| 553 |
python
|
en
|
code
| 1 |
github-code
|
6
|
43371048993
|
import pygame
from pygame.locals import KEYDOWN, K_ESCAPE, QUIT
from os import path
import parameters.enums as en
from Objects.player import Player, Wall
from Objects.map import Map
from Objects.robot import Robot
import numpy as np
from Objects.machinery import Machinery, Destiny
import sys
import torch
from Objects.utils import GameBuffer
import pickle
class Game(pygame.sprite.Sprite):
SCORE = 0
def __init__(self):
pygame.init()
self.screen = pygame.display.set_mode((en.WIDTH, en.HEIGHT))
self.clock = pygame.time.Clock()
self.load_data()
self.checkpoint = "robotCheckpoint" if path.exists("robotCheckpoint") else None
def load_data(self):
game_folder = path.dirname(__file__)
self.map = Map(game_folder + "/Static/map.txt")
def new(self):
"""Initialize all variables."""
self.buffer = GameBuffer()
self.allSprites = pygame.sprite.Group()
self.machineryParts = pygame.sprite.Group()
self.destination = pygame.sprite.Group()
self.robots = pygame.sprite.Group()
self.walls = pygame.sprite.Group()
self.part = Machinery(self)
for j, row in enumerate(self.map.data):
for i, col in enumerate(row):
if col == "1":
Wall(self, i, j)
elif col == "D":
self.destiny = Destiny(self, i, j)
elif col == "P":
self.player = Player(self, self.buffer, i, j)
elif col == "R":
self.robot = Robot(self, self.buffer, i, j, self.checkpoint)
def quit(self):
pygame.quit()
sys.exit()
def events(self):
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
self.robot.save()
self.quit()
if event.type == QUIT:
self.robot.save()
self.quit()
def get_screen(self):
screen = pygame.transform.scale(self.screen, (60, 60))
screen = np.array(pygame.surfarray.array3d(screen))
screen = screen.transpose((2, 1, 0))
return torch.from_numpy(screen)
def run(self):
self.playing = True
prevScore = self.SCORE
while self.playing:
self.dt = self.clock.tick(en.FPS) / 1000
screenMatrix = self.get_screen()
self.events()
self.updates(windowPixel=screenMatrix)
self.draw()
if prevScore != self.SCORE:
self.robot.train()
prevScore = self.SCORE
def updates(self, **args):
for sprite in self.allSprites:
if isinstance(sprite, Robot) or isinstance(sprite, Player):
sprite.update(args["windowPixel"])
else:
sprite.update()
def draw_grid(self):
for x in range(0, en.WIDTH, en.TILE_SIZE):
pygame.draw.line(self.screen, en.LIGHTGREY, (x, 0), (x, en.HEIGHT))
for y in range(0, en.HEIGHT, en.TILE_SIZE):
pygame.draw.line(self.screen, en.LIGHTGREY, (0, y), (en.WIDTH, y))
def draw(self):
self.screen.fill(en.BGCOLOR)
self.draw_grid()
self.allSprites.draw(self.screen)
pygame.display.flip()
g = Game()
while True:
g.new()
g.run()
|
anfego22/rele
|
main.py
|
main.py
|
py
| 3,394 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74479990587
|
import json
from random import uniform
import matplotlib.pyplot as plt
class Scanner:
def __init__(self, data_filename, n_neighbours):
self.data_filename = data_filename
self.n_neighbours = n_neighbours
def scanner(self, visualize_data=False):
f = open(self.data_filename, encoding="utf-8")
content = f.read()
data = json.loads(content)
data = sorted(data, key=lambda x: x["coordinates"])
avg_area_price = 0
result = []
differences = []
for apartment in range(len(data)):
avg_neigh_price = 0
neighbours = self.find_neighbours(data[apartment], data)
for n in neighbours:
avg_neigh_price += n["price"]
avg_neigh_price /= self.n_neighbours
if data[apartment]["price"] < avg_neigh_price:
# append apartment data and difference between
# average prices of neighbours and apartment price
result.append(
[data[apartment], avg_neigh_price - data[apartment]["price"]]
)
avg_area_price += data[apartment]["price"]
differences.append([data[apartment], neighbours])
avg_area_price /= len(data)
for el in result:
# append difference between average price in search area
# and apartment price (profitability)
el.append(avg_area_price - el[0]["price"])
# sort data by apartment profitability
result = sorted(result, key=lambda x: x[1], reverse=True)
if visualize_data:
self.visualize_n_neighbours(differences)
return result
def find_neighbours(self, apartment, data):
data = sorted(
data,
key=lambda x: (
(apartment["coordinates"][0] - x["coordinates"][0]) ** 2
+ (apartment["coordinates"][1] - x["coordinates"][1]) ** 2
)
** 0.5,
)
neighbours = data[: self.n_neighbours]
return neighbours
@staticmethod
def visualize_n_neighbours(differences):
colors = ["b", "g", "r", "c", "m", "y", "k"]
colors_i = 0
for diff in differences:
color = colors[colors_i % len(colors)]
colors_i += 1
el_coord = diff[0]["coordinates"]
for i in range(len(diff[1])):
diff_coord = diff[1][i]["coordinates"]
# add small number to coordinates so that lines on plot do not overlap
d = uniform(0.003, 0.005)
plt.plot(
[el_coord[0] + d, diff_coord[0] + d],
[el_coord[1] + d, diff_coord[1] + d],
"-ro",
color=color,
)
plt.show()
|
SergeyBurik/profitable_apartments_parser
|
profitable_apartments_parser/scanner/scanner.py
|
scanner.py
|
py
| 2,832 |
python
|
en
|
code
| 0 |
github-code
|
6
|
44531751826
|
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
columns, data = [], []
columns=get_columns()
data=get_data(filters,columns)
return columns, data
def get_columns():
return [
{
"label": _("Loan"),
"fieldname": "name",
"fieldtype": "Link",
"options": "Bank Loan",
"width": 120
},
{
"label": _("Provider Bank"),
"fieldname": "loan_type",
"fieldtype": "Link",
"options": "Loan Type",
"width": 120
},
{
"label": _("Drawn Bank"),
"fieldname": "bank_account",
"fieldtype": "Link",
"options": "Bank",
"width": 100
},
{
"label": _("Status"),
"fieldname": "status",
"fieldtype": "Data",
"width": 120
},
{
"label": _("Loan Amount"),
"fieldname": "loan_amount",
"fieldtype": "Currency",
"width": 100
},
{
"label": _("Posting Date"),
"fieldname": "posting_date",
"fieldtype": "Date",
"width": 120
},
{
"label": _("Start Date"),
"fieldname": "repayment_start_date",
"fieldtype": "Date",
"width": 120
},
{
"label": _("Payment Freq"),
"fieldname": "payment_frequency",
"fieldtype": "Data",
"width": 120
},
{
"label": _("Repayment Amount"),
"fieldname": "repayment_amount",
"fieldtype": "Currency",
"width": 120
},
{
"label": _("Rate Of Interest %"),
"fieldname": "rate_of_interest",
"fieldtype": "Float",
"width": 100
},
{
"label": _("Periods / Paid"),
"fieldname": "periods_paid",
"fieldtype": "Data",
"width": 100
},
{
"label": _("Administrative Expenses Amount"),
"fieldname": "administrative_expenses_amount",
"fieldtype": "Currency",
"width": 120
},
{
"label": _("Total Principal Payable"),
"fieldname": "total_principal_payable",
"fieldtype": "Currency",
"width": 120
},
{
"label": _("Total Principal Paid"),
"fieldname": "total_principal_paid",
"fieldtype": "Currency",
"width": 120
},
{
"label": _("Total Interest Payable"),
"fieldname": "total_interest_payable",
"fieldtype": "Currency",
"width": 120
},
{
"label": _("Total Interest Paid"),
"fieldname": "total_interest_paid",
"fieldtype": "Currency",
"width": 120
},
{
"label": _("Total Payable Amount"),
"fieldname": "total_payment",
"fieldtype": "Currency",
"width": 120
},
{
"label": _("Total Amount Paid"),
"fieldname": "total_amount_paid",
"fieldtype": "Currency",
"width": 120
}
]
def get_data(filters, columns):
item_price_qty_data = []
item_price_qty_data = get_item_price_qty_data(filters)
return item_price_qty_data
def get_item_price_qty_data(filters):
conditions = ""
if filters.get("from_date"):
conditions += " and a.posting_date>=%(from_date)s"
if filters.get("to_date"):
conditions += " and a.posting_date<=%(to_date)s"
if filters.get("loan_type"):
conditions += " and a.loan_type=%(loan_type)s"
item_results = frappe.db.sql("""
select
a.name as name,
a.loan_type as loan_type,
a.bank_account as bank_account,
a.status as status,
a.loan_amount as loan_amount,
a.posting_date as posting_date,
a.repayment_start_date as repayment_start_date,
a.payment_frequency as payment_frequency,
a.repayment_amount as repayment_amount,
a.rate_of_interest as rate_of_interest,
a.administrative_expenses_amount as administrative_expenses_amount,
a.total_principal_payable as total_principal_payable,
a.total_principal_paid as total_principal_paid,
a.total_interest_payable as total_interest_payable,
a.total_interest_paid as total_interest_paid,
a.total_payment as total_payment,
CONCAT_WS(" / ",a.no_of_repayments, (select count(name) from `tabBank Loan Repayment Schedule` b where b.parent = a.name and b.is_paid =1)) as periods_paid,
a.total_amount_paid as total_amount_paid
from `tabBank Loan` a
where
docstatus =1
{conditions}
"""
.format(conditions=conditions), filters, as_dict=1)
#price_list_names = list(set([item.price_list_name for item in item_results]))
#buying_price_map = get_price_map(price_list_names, buying=1)
#selling_price_map = get_price_map(price_list_names, selling=1)
result = []
if item_results:
for item_dict in item_results:
data = {
'name': item_dict.name,
'loan_type': item_dict.loan_type,
'bank_account': item_dict.bank_account,
'status': item_dict.status,
'loan_amount': item_dict.loan_amount,
'posting_date': item_dict.posting_date,
'repayment_start_date': item_dict.repayment_start_date,
'payment_frequency': item_dict.payment_frequency,
'repayment_amount': item_dict.repayment_amount,
'rate_of_interest': item_dict.rate_of_interest,
'administrative_expenses_amount': item_dict.administrative_expenses_amount,
'total_principal_payable': item_dict.total_principal_payable,
'total_principal_paid': item_dict.total_principal_paid,
'total_interest_payable': item_dict.total_interest_payable,
'total_interest_paid': item_dict.total_interest_paid,
'total_payment': item_dict.total_payment,
'periods_paid': item_dict.periods_paid,
'total_amount_paid': item_dict.total_amount_paid,
}
result.append(data)
return result
def get_price_map(price_list_names, buying=0, selling=0):
price_map = {}
if not price_list_names:
return price_map
rate_key = "Buying Rate" if buying else "Selling Rate"
price_list_key = "Buying Price List" if buying else "Selling Price List"
filters = {"name": ("in", price_list_names)}
if buying:
filters["buying"] = 1
else:
filters["selling"] = 1
pricing_details = frappe.get_all("Item Price",
fields = ["name", "price_list", "price_list_rate"], filters=filters)
for d in pricing_details:
name = d["name"]
price_map[name] = {
price_list_key :d["price_list"],
rate_key :d["price_list_rate"]
}
return price_map
|
erpcloudsystems/ecs_bank_loans
|
ecs_bank_loans/ecs_bank_loans/report/bank_loan_report/bank_loan_report.py
|
bank_loan_report.py
|
py
| 5,926 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36848412423
|
import hashlib
import random
import sqlite3
from typing import List, Optional
import more_itertools
import numpy as np
import pandas as pd
import scipy.spatial
import skimage.transform
from carla_real_traffic_scenarios import DT
from carla_real_traffic_scenarios.ngsim import DatasetMode
from carla_real_traffic_scenarios.opendd.dataset import OpenDDDataset, Place
from carla_real_traffic_scenarios.utils.carla import RealTrafficVehicle, find_best_matching_model
from carla_real_traffic_scenarios.utils.transforms import Transform, Vector3, Vector2
def extract_utm_trajectory_from_df(df) -> List[Transform]:
trajectory = df[['UTM_X', 'UTM_Y', 'UTM_ANGLE']].values
return [Transform(Vector3(x, y, 0), Vector2(np.cos(angle), np.sin(angle))) for x, y, angle in trajectory]
class Utm2CarlaMapper:
def __init__(self, place: Place):
image_middle = np.array(place.image_size) // 2
pix2utm_transform = skimage.transform.AffineTransform(np.array(
[[place.world_params[0], place.world_params[2], place.world_params[4]],
[place.world_params[1], place.world_params[3], place.world_params[5]],
[0, 0, 1]]))
self.pix2utm_transformer = pix2utm_transform
utm2pix_transform = skimage.transform.AffineTransform(pix2utm_transform._inv_matrix)
self.utm2pix_transformer = utm2pix_transform
map_center_utm = np.array(place.map_center_utm.as_numpy()[:2])
reflect_matrix = np.array([[1, 0, 0], [0, -1, 0], [0, 0, 1]], dtype='float32') # reflect over Y axis
self.utm2carla_transformer = skimage.transform.AffineTransform(translation=-map_center_utm) + \
skimage.transform.AffineTransform(matrix=reflect_matrix) + \
place.correction_transform
def utm2pix(self, transform: Transform):
return self._transform_with_convert(transform, self.utm2pix_transformer)
def pix2utm(self, transform: Transform):
return self._transform_with_convert(transform, self.pix2utm_transformer)
def utm2carla(self, transform: Transform):
return self._transform_with_convert(transform, self.utm2carla_transformer)
def _transform_with_convert(self, transform: Transform, transformer: skimage.transform.AffineTransform):
position = transform.position.as_numpy()[:2]
position = position.reshape(-1, 2)
orientation = transform.orientation.as_numpy()
orientation = orientation.reshape(-1, 2)
position, orientation = self.transform(position, orientation, transformer)
position = Vector2.from_numpy(position.squeeze()).to_vector3(0)
orientation = Vector2.from_numpy(orientation.squeeze())
return Transform(position, orientation)
def transform(self, positions: np.ndarray, orientations: np.ndarray,
transformer: skimage.transform.AffineTransform):
orientations = positions + orientations
positions = transformer(positions)
orientations = transformer(orientations)
orientations = orientations - positions
return positions, orientations
class OpenDDVehicle:
def __init__(self, df, transformer) -> None:
self._df = df
self.id = int(df.OBJID.iloc[0])
self.width_m = float(df.WIDTH.iloc[0])
self.length_m = float(df.LENGTH.iloc[0])
self._model = find_best_matching_model(self.width_m, self.length_m)
self._frame = 0
self._transformer = transformer
self._max_frame = len(df)
self.trajectory_utm = extract_utm_trajectory_from_df(self._df)
self.trajectory_carla = self._map_trajectory_to_carla(self.trajectory_utm)
def set_end_of_trajectory_timestamp(self, timestamp_end_s):
df = self._df
df = df[df.TIMESTAMP < timestamp_end_s]
self._max_frame = len(df)
self.trajectory_utm = extract_utm_trajectory_from_df(df)
self.trajectory_carla = self._map_trajectory_to_carla(self.trajectory_utm)
self._df = df
def step(self):
self._frame += 1
@property
def type_id(self):
return self._model.type_id
@property
def speed_mps(self):
return self._df.V.iloc[self._frame]
@property
def velocity(self):
return (self.transform_carla.orientation * self.speed_mps).to_vector3(0)
@property
def transform_utm(self):
return self.trajectory_utm[self._frame]
@property
def transform_carla(self):
return self.trajectory_carla[self._frame]
@property
def has_finished(self) -> bool:
return self._frame >= self._max_frame - 1
def as_real_traffic_car(self):
timestamp = self._df.TIMESTAMP.iloc[self._frame]
debug_string = f'id={self.id} fm={self._frame} ts={timestamp:0.2f}'
return RealTrafficVehicle(self.id, self.type_id, timestamp,
self.width_m, self.length_m, self.transform_carla,
self.speed_mps,
debug_string)
def _map_trajectory_to_carla(self, trajectory_utm) -> List[Transform]:
trajectory_carla = []
for transform_utm in trajectory_utm:
transform_carla = self._transformer.utm2carla(transform_utm)
transform_carla = \
Transform(transform_carla.position.with_z(self._model.z_offset), transform_carla.orientation)
trajectory_carla.append(transform_carla)
return trajectory_carla
MIN_EPISODE_LENGTH_STEPS = 10 / DT
def _resample_df(df, target_timedelta_s):
# create timedelta index from TIMESTAMP column (pd.Grouper uses it)
df = df.set_index(pd.TimedeltaIndex(df.TIMESTAMP, 's'))
# group by OBJID and resample TimedeltaIndex to target fps
freq_ms = int(target_timedelta_s * 1000)
grouper = df.groupby([pd.Grouper(freq=f'{freq_ms}ms'), 'OBJID'])
df = grouper.first() # take last observation from grouped bins
df = df.reset_index(level=['OBJID']) # recover OBJID column
df['TIMESTAMP'] = df.index.to_series().dt.total_seconds()
return df
def _find_ego_vehicle_with_time_frame(place, session_df, ego_id=None):
all_objids = list(set(session_df.OBJID.to_list()))
explicit_ego_id = ego_id is not None
while True:
ego_id = ego_id if explicit_ego_id else random.choice(all_objids)
obj_df = session_df[session_df.OBJID == ego_id]
start_idx, stop_idx = _trim_trajectory_utm_to_entry_end_exit(place, obj_df)
if not explicit_ego_id and (start_idx is None or stop_idx is None or start_idx >= stop_idx):
continue
timestamp_start_s = obj_df.iloc[start_idx].TIMESTAMP if start_idx is not None else None
timestamp_end_s = obj_df.iloc[stop_idx].TIMESTAMP if stop_idx is not None else None
return ego_id, timestamp_start_s, timestamp_end_s
def _trim_trajectory_utm_to_entry_end_exit(place, obj_df):
exits_utm = np.array([exit.as_numpy() if exit else np.zeros(2) for entry, exit in place.roads_utm])
entries_utm = np.array([entry.as_numpy() if exit else np.zeros(2) for entry, exit in place.roads_utm])
trajectory_utm = obj_df[['UTM_X', 'UTM_Y']].values
dm_entries = scipy.spatial.distance_matrix(entries_utm, trajectory_utm)
entries_distances_m = np.min(dm_entries, axis=1)
nearest_entry_idx = np.argmin(entries_distances_m) # idx of nearest entry
# trajectory idx where vehicle pass nearest roundabout entry
trajectory_start_idx = np.argmin(dm_entries[nearest_entry_idx])
min_distance_from_nearest_entry = dm_entries[nearest_entry_idx][trajectory_start_idx]
MAX_DISTANCE_FROM_WP_M = 2
PRE_ENTRY_DISTANCE_M = 20
# ensure that it passes entry not more than MAX_DISTANCE_FROM_WP_M
if min_distance_from_nearest_entry > MAX_DISTANCE_FROM_WP_M:
trajectory_start_idx = None
elif trajectory_start_idx > 0:
# take 1st index from part of trajectory distanced not more than PRE_ENTRY_DISTANCE_M
trajectory_start_idx = np.where(
dm_entries[nearest_entry_idx][:trajectory_start_idx] < PRE_ENTRY_DISTANCE_M
)[0][0]
dm_exits = scipy.spatial.distance_matrix(exits_utm, trajectory_utm)
exit_distances_m = np.min(dm_exits, axis=1)
nearest_exit_idx = np.argmin(exit_distances_m)
trajectory_end_idx = np.argmin(dm_exits[nearest_exit_idx])
min_distance_from_nearest_exit = dm_exits[nearest_exit_idx][trajectory_end_idx]
# ensure that it passes exit not more than MAX_DISTANCE_FROM_WP_M
if min_distance_from_nearest_exit > MAX_DISTANCE_FROM_WP_M:
trajectory_end_idx = None
else:
trajectory_end_idx = trajectory_end_idx + np.where(
dm_exits[nearest_exit_idx][trajectory_end_idx:] < PRE_ENTRY_DISTANCE_M
)[0][-1]
return trajectory_start_idx, trajectory_end_idx
def _determine_split(session_name, ego_id, start, stop) -> DatasetMode:
split_frac = 0.8
start, stop = int(round(start, 0)), int(round(stop, 0))
hash_num = int(hashlib.sha1(f'{session_name},{ego_id},{start},{stop}'.encode('utf-8')).hexdigest(), 16)
if (hash_num % 100) / 100 < split_frac:
return DatasetMode.TRAIN
else:
return DatasetMode.VALIDATION
class OpenDDRecording():
def __init__(self, *, dataset: OpenDDDataset, timedelta_s: float = DT,
dataset_mode: DatasetMode = DatasetMode.TRAIN) -> None:
self._dataset = dataset
self._dataset_mode = dataset_mode
self._env_vehicles = {}
self._df: Optional[pd.DataFrame] = None
self._frame = 0
self._timedelta_s = timedelta_s
self._timestamps = []
self._session_name: Optional[str] = None
self._transformer: Optional[Utm2CarlaMapper] = None
def reset(self, session_name, seed=None):
if self._df is not None:
del self._df
self._session_name = session_name
with sqlite3.connect(self._dataset.db_path) as conn:
df = pd.read_sql(f'select * from {session_name}', conn)
# for now do not extract pedestrians, bicycles and trailers
df = df[~df.CLASS.str.contains('Pedestrian|Bicycle|Trailer')]
df = _resample_df(df, self._timedelta_s)
self._timestamps = np.arange(df.TIMESTAMP.min(),
df.TIMESTAMP.max() + self._timedelta_s,
self._timedelta_s)
self._df = df
# search for train/validation roundabout pass
dataset_mode = None
if seed is not None:
random.seed(seed)
while dataset_mode != self._dataset_mode:
ego_id, timestamp_start_s, timestamp_end_s = _find_ego_vehicle_with_time_frame(self.place, self._df)
dataset_mode = _determine_split(session_name, ego_id, timestamp_start_s, timestamp_end_s)
self._frame = np.where(np.isclose(self._timestamps, timestamp_start_s, 0.0001))[0][0] + 1
self._env_vehicles = {}
self._transformer = Utm2CarlaMapper(self.place)
return ego_id, timestamp_start_s, timestamp_end_s
def step(self) -> List[RealTrafficVehicle]:
timestamp_s = self._timestamps[self._frame]
vehicles_current_ids = self._df[
np.isclose(self._df.TIMESTAMP, timestamp_s)
].OBJID.to_list()
for vehicle_id in vehicles_current_ids:
if vehicle_id not in self._env_vehicles:
# TODO: check if x/y smoothing is not required (in ngsim dataset there is smoothing in 15 frames wnd)
new_vehicle_df = self._df[
(self._df.OBJID == vehicle_id) &
((self._df.TIMESTAMP >= timestamp_s) | np.isclose(self._df.TIMESTAMP, timestamp_s))
]
self._env_vehicles[vehicle_id] = OpenDDVehicle(new_vehicle_df, self._transformer)
self._env_vehicles = {k: v for k, v in self._env_vehicles.items() if not v.has_finished}
real_traffic_vehicles = [v.as_real_traffic_car() for v in self._env_vehicles.values()]
if real_traffic_vehicles:
if len(real_traffic_vehicles) > 1:
assert all([
np.isclose(v1.timestamp_s, v2.timestamp_s)
for v1, v2 in more_itertools.windowed(real_traffic_vehicles, 2)
]), (
self._session_name,
[v.debug for v in real_traffic_vehicles],
)
assert np.isclose(real_traffic_vehicles[0].timestamp_s, timestamp_s), \
(real_traffic_vehicles[0].timestamp_s, timestamp_s)
self._frame += 1
for v in self._env_vehicles.values():
v.step()
return real_traffic_vehicles
def close(self):
pass
@property
def place(self) -> Place:
place_name = self._session_name.split('_')[0]
return self._dataset.places[place_name]
@property
def session_name(self) -> str:
return self._session_name
@property
def timestamp_s(self) -> float:
return self._timestamps[self._frame]
@property
def transformer(self):
return self._transformer
def get_df_by_objid(self, ego_id):
return self._df[self._df.OBJID == ego_id]
@property
def has_finished(self):
return self._frame >= len(self._timestamps) - 1
|
deepsense-ai/carla-real-traffic-scenarios
|
carla_real_traffic_scenarios/opendd/recording.py
|
recording.py
|
py
| 13,416 |
python
|
en
|
code
| 67 |
github-code
|
6
|
35539138268
|
from django.http import JsonResponse
from .models import Task
def _get_all_tasks():
task_objects = Task.objects.all()[:30]
tasks = []
for task_obj in task_objects:
task = task_obj.get_as_dict()
tasks.append(task)
return tasks
def index(request):
if request.method == 'GET':
return get_all(request)
elif request.method == 'POST':
code = 200
error = None
task_for_response = None
task_description = request.POST.get('task[description]')
task_category = request.POST.get('task[category]')
task_date_added = request.POST.get('task[dateAdded]')
if (task_description is None) or (task_category is None)\
or (task_date_added is None):
code = 400
error = 'No task data submitted'
else:
task = Task()
task.description = task_description
task.category = task_category
task.date_added = task_date_added
task.save()
task_for_response = task.get_as_dict()
response = {
'task': task_for_response,
'code': code,
'error': error
}
return JsonResponse(response)
def get_all(request):
tasks = _get_all_tasks()
response = {
"tasks": tasks,
"code": 200
}
return JsonResponse(response)
def update_task(request, task_id):
code = 200
error = None
task_for_response = None
task = Task.objects.get(id=task_id)
if task is None:
code = 404
error = f'No task found for id {task_id}'
else:
task_description = request.POST.get('task[description]')
task_category = request.POST.get('task[category]')
task_completed = request.POST.get('task[completed]')
if (task_description is None) or (task_category is None)\
or (task_completed is None):
code = 400
error = 'No task data submitted'
else:
task.description = task_description
task.category = task_category
task.completed = task_completed in ('true', 'True', '1')
task.save()
task_for_response = task.get_as_dict()
response = {
'task': task_for_response,
'code': code,
'error': error
}
return JsonResponse(response)
def delete_completed(request):
Task.objects.filter(completed=True).delete()
return get_all(request)
|
bluepostit/django-js-todo
|
todos/views.py
|
views.py
|
py
| 2,483 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16373544563
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense , Dropout , Lambda, Flatten
from keras.optimizers import Adam ,RMSprop
from sklearn.model_selection import train_test_split
from sklearn import cross_validation
from keras import backend as K
from keras.preprocessing.image import ImageDataGenerator
from keras.utils.np_utils import to_categorical
from keras.models import Sequential
from keras.layers.core import Lambda , Dense, Flatten, Dropout
from keras.callbacks import EarlyStopping
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D
from keras.optimizers import RMSprop
from keras.preprocessing import image
import tensorflow as tf
##Reading Data
def contrast(x):
if float(x)>205:
return 1
elif float(x)<50:
return 0
else:
return float(x)/255
train = pd.read_csv('I:\\Centrale\\Machine Learning\\Kaggle-3-MNIST\\train.csv', converters = dict([(i+1, contrast) for i in range(28*28)]))
#train = pd.read_csv('I:\\Centrale\\Machine Learning\\Kaggle-3-MNIST\\train.csv')
print(train.shape)
train.head()
test= pd.read_csv('I:\\Centrale\\Machine Learning\\Kaggle-3-MNIST\\test.csv', converters = dict([(i, contrast) for i in range(28*28)]))
#test= pd.read_csv('I:\\Centrale\\Machine Learning\\Kaggle-3-MNIST\\test.csv')
print(test.shape)
test.head()
##Data Preprocessing
X_train = (train.iloc[:,1:].values).astype('float32') # all pixel values
y_train = train.iloc[:,0].values.astype('int32') # only labels i.e targets digits
X_test = test.values.astype('float32')
##Display data repartition
"""repartition =[0 for i in range(10)]
for i in range(y_train.shape[0]):
repartition[list(y_train[i,:]).index(max(y_train[i,:]))]+=1
sections=[i for i in range(10)]
plt.bar(sections, repartition, align='center', alpha=0.7)
plt.show()"""
##Data augmentation
def convert_2d(x):
"""x: 2d numpy array. m*n data image.
return a 3d image data. m * height * width * channel."""
if len(x.shape) == 1:
m = 1
height = width = int(np.sqrt(x.shape[0]))
else:
m = x.shape[0]
height = width = int(np.sqrt(x.shape[1]))
x_2d = np.reshape(x, (m, height, width, 1))
return x_2d
def crop_image(x, y, min_scale):
"""x: 2d(m*n) numpy array. 1-dimension image data;
y: 1d numpy array. The ground truth label;
min_scale: float. The minimum scale for cropping.
return zoomed images.
This function crops the image, enlarges the cropped part and uses it as augmented data."""
# convert the data to 2-d image. images should be a m*h*w*c numpy array.
images = convert_2d(x)
# m is the number of images. Since this is a gray-scale image scale from 0 to 255, it only has one channel.
m, height, width, channel = images.shape
# tf tensor for original images
img_tensor = tf.placeholder(tf.int32, [1, height, width, channel])
# tf tensor for 4 coordinates for corners of the cropped image
box_tensor = tf.placeholder(tf.float32, [1, 4])
box_idx = [0]
crop_size = np.array([height, width])
# crop and resize the image tensor
cropped_img_tensor = tf.image.crop_and_resize(img_tensor, box_tensor, box_idx, crop_size)
# numpy array for the cropped image
cropped_img = np.zeros((m, height, width, 1))
with tf.Session() as sess:
for i in range(m):
# randomly select a scale between [min_scale, min(min_scale + 0.05, 1)]
rand_scale = np.random.randint(min_scale * 100, np.minimum(min_scale * 100 + 5, 100)) / 100
# calculate the 4 coordinates
x1 = y1 = 0.5 - 0.5 * rand_scale
x2 = y2 = 0.5 + 0.5 * rand_scale
# lay down the cropping area
box = np.reshape(np.array([y1, x1, y2, x2]), (1, 4))
# save the cropped image
cropped_img[i:i + 1, :, :, :] = sess.run(cropped_img_tensor, feed_dict={img_tensor: images[i:i + 1], box_tensor: box})
# flat the 2d image
cropped_img = np.reshape(cropped_img, (m, -1))
return cropped_img
def translate(x, y, dist):
"""x: 2d(m*n) numpy array. 1-dimension image data;
y: 1d numpy array. The ground truth label;
dist: float. Percentage of height/width to shift.
return translated images.
This function shift the image to 4 different directions.
Crop a part of the image, shift it and fill the left part with 0."""
# convert the 1d image data to a m*h*w*c array
images = convert_2d(x)
m, height, width, channel = images.shape
# set 4 groups of anchors. The first 4 int in a certain group lay down the area we crop.
# The last 4 sets the area to be moved to. E.g.,
# new_img[new_top:new_bottom, new_left:new_right] = img[top:bottom, left:right]
anchors = []
anchors.append((0, height, int(dist * width), width, 0, height, 0, width - int(dist * width)))
anchors.append((0, height, 0, width - int(dist * width), 0, height, int(dist * width), width))
anchors.append((int(dist * height), height, 0, width, 0, height - int(dist * height), 0, width))
anchors.append((0, height - int(dist * height), 0, width, int(dist * height), height, 0, width))
# new_images: d*m*h*w*c array. The first dimension is the 4 directions.
new_images = np.zeros((4, m, height, width, channel))
for i in range(4):
# shift the image
top, bottom, left, right, new_top, new_bottom, new_left, new_right = anchors[i]
new_images[i, :, new_top:new_bottom, new_left:new_right, :] = images[:, top:bottom, left:right, :]
new_images = np.reshape(new_images, (4 * m, -1))
y = np.tile(y, (4, 1)).reshape((-1, 1))
return new_images
def add_noise(x, y, noise_lvl):
"""x: 2d(m*n) numpy array. 1-dimension image data;
y: 1d numpy array. The ground truth label;
noise_lvl: float. Percentage of pixels to add noise in.
return images with white noise.
This function randomly picks some pixels and replace them with noise."""
m, n = x.shape
# calculate the # of pixels to add noise in
noise_num = int(noise_lvl * n)
for i in range(m):
# generate n random numbers, sort it and choose the first noise_num indices
# which equals to generate random numbers w/o replacement
noise_idx = np.random.randint(0, n, n).argsort()[:noise_num]
# replace the chosen pixels with noise from 0 to 255
x[i, noise_idx] = np.random.randint(0, 255, noise_num)
noisy_data = x.astype("int")
return noisy_data
def rotate_image(x, y, max_angle):
"""x: 2d(m*n) numpy array. 1-dimension image data;
y: 1d numpy array. The ground truth label;
max_angle: int. The maximum degree for rotation.
return rotated images.
This function rotates the image for some random degrees(0.5 to 1 * max_angle degree)."""
images = convert_2d(x)
m, height, width, channel = images.shape
img_tensor = tf.placeholder(tf.float32, [m, height, width, channel])
# half of the images are rotated clockwise. The other half counter-clockwise
# positive angle: [max/2, max]
# negative angle: [360-max/2, 360-max]
rand_angle_pos = np.random.randint(max_angle / 2, max_angle, int(m / 2))
rand_angle_neg = np.random.randint(-max_angle, -max_angle / 2, m - int(m / 2)) + 360
rand_angle = np.transpose(np.hstack((rand_angle_pos, rand_angle_neg)))
np.random.shuffle(rand_angle)
# convert the degree to radian
rand_angle = rand_angle / 180 * np.pi
# rotate the images
rotated_img_tensor = tf.contrib.image.rotate(img_tensor, rand_angle)
with tf.Session() as sess:
rotated_imgs = sess.run(rotated_img_tensor, feed_dict={img_tensor: images})
rotated_imgs = np.reshape(rotated_imgs, (m, -1))
return rotated_imgs
print("Augment the data...")
cropped_imgs = crop_image(X_train, y_train, 0.9)
translated_imgs = translate(X_train, y_train, 0.1)
noisy_imgs = add_noise(X_train, y_train, 0.1)
rotated_imgs = rotate_image(X_train, y_train, 10)
augmented = np.vstack((X_train, cropped_imgs, translated_imgs, noisy_imgs, rotated_imgs))
#augmented is the new data composed with : the basic dataset, the cropped dataset, the shifted dataset (1 for each 4 directions), the noisy dataset and the rotated dataset
print("Done!", X_train.shape)
##Save
header = 'label'
for i in range(784):
header+=',pixel'+str(i)
np.savetxt('C:\\Users\\Nicolas\\Desktop\\augmented_data.csv', np.array([[int(y_train[i%X_train.shape[0]])]+list(augmented[i,:]) for i in range(augmented.shape[0])]),fmt='%i', delimiter = ',',newline='\n', header = header, comments='')
##Save2
pd.DataFrame(augmented).to_csv('I:\\Centrale\\Machine Learning\\Kaggle-3-MNIST\\augmented_data.csv', index = False)
|
nicogab34/MNIST--Neural-Networks
|
augment_data.py
|
augment_data.py
|
py
| 9,196 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10900131686
|
from flask import Flask
from flask import Flask, request, render_template, send_file
app = Flask(__name__)
@app.route('/cookiestealer/', methods=['GET'])
def cookieStealer():
filename = 'cookiemonster.jpg'
print("This is the cookie: \n")
print(request.cookies)
print("")
return send_file(filename, mimetype='image/jpeg')
if __name__ == '__main__':
app.run(port=3100, debug=True)
|
FelixDryselius/SecureDataSystemsGroup17
|
lab1/xss/3rd_party_cookie_stealer.py
|
3rd_party_cookie_stealer.py
|
py
| 420 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22200579634
|
#!/usr/bin/env python
from __future__ import absolute_import
import apache_beam as beam
import argparse
import json
import logging
import sys
import urllib
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from google.cloud import bigquery
from google.cloud import storage
from raxxla_transforms import data_transforms
from raxxla_transforms import table_schemas
# Global vars
logger = logging.getLogger('raxxla_loader')
formatter = '%(asctime)s %(message)s'
logging.basicConfig(level=logging.INFO, format=formatter)
table_list = ['bodies', 'systems', 'powerplay', 'population', 'stations']
arg_list = table_list + ['all']
runner_list = ['DataflowRunner', 'DirectRunner']
# Configure flags
flags = argparse.ArgumentParser(description='Initialize Raxxla BigQuery tables from EDSM data.')
flags.add_argument('--project', help='ID of the Google Cloud project to use.')
flags.add_argument('--dataset', help='Name of the BigQuery dataset to store EDSM data in.')
flags.add_argument('--bucket', help='Name of the GCS bucket to store EDSM data in.')
flags.add_argument('--runner', help='Name of the Beam runner type to use for the pipeline.', choices=runner_list)
flags.add_argument('--upload_to_gcs', help='Upload EDSM files to GCS from local download.', choices=arg_list, nargs='+')
flags.add_argument('--delete', help='Delete tables from BigQuery.', choices=arg_list, nargs='+')
flags.add_argument('--download', help='Download files from EDSM into Google Cloud Storage.', choices=arg_list, nargs='+')
flags.add_argument('--upload_to_bq', help='Write converted values to BigQuery. Requires files to be staged in GCS.', choices=arg_list, nargs='+')
args = flags.parse_args()
class remove_blank_rows(beam.DoFn):
def process(self, element):
if element is not None:
yield element
else:
return
def delete_bq_data(tables, project_id, dataset_id):
client = bigquery.Client()
dataset_ref = client.dataset(dataset_id)
tables = set(tables)
try:
for table in tables:
table_ref = dataset_ref.table(table)
delete_string = 'Deleted ' + project_id + '.' + dataset_id + '.' + table
client.delete_table(table_ref)
logger.info(delete_string)
except Exception as e:
delete_error_string = 'Unable to delete EDSM BQ tables: ' + str(e)
logger.error(delete_error_string)
sys.exit()
def download_edsm_files(files):
edsm_urls = {
'bodies': 'https://www.edsm.net/dump/bodies.json',
'systems': 'https://www.edsm.net/dump/systemsWithCoordinates.json',
'powerplay': 'https://www.edsm.net/dump/powerPlay.json',
'population': 'https://www.edsm.net/dump/systemsPopulated.json',
'stations': 'https://www.edsm.net/dump/stations.json'
}
files = set(files)
try:
for file in files:
dl_string = 'Downloading ' + file + ' file from EDSM...'
logger.info(dl_string)
download_url = edsm_urls[file]
download_path = '/tmp/' + file
urllib.urlretrieve(download_url, download_path)
except Exception as e:
download_error_string = 'Unable to download EDSM files: ' + str(e)
logger.error(download_error_string)
sys.exit()
def upload_to_bigquery(files, project_id, dataset_id, bucket_id, runner, pipeline_options):
files = set(files)
try:
for file in files:
import_string = 'Importing ' + file + ' file into BigQuery...'
logger.info(import_string)
table_spec = project_id + ':' + dataset_id + '.' + str(file)
if runner == 'DataflowRunner':
file_path = 'gs://' + str(bucket_id) + '/' + str(file)
elif runner == 'DirectRunner':
file_path = '/tmp/' + str(file)
with beam.Pipeline(options=pipeline_options) as p:
json_lines = p | beam.io.ReadFromText(file_path)
if file == 'bodies':
schema = table_schemas.bodies
rows = json_lines | beam.Map(data_transforms.transform_bodies)
elif file == 'systems':
schema = table_schemas.systems
rows = json_lines | beam.Map(data_transforms.transform_systems)
elif file == 'powerplay':
schema = table_schemas.powerplay
rows = json_lines | beam.Map(data_transforms.transform_powerplay)
elif file == 'population':
schema = table_schemas.population
rows = json_lines | beam.Map(data_transforms.transform_population)
elif file == 'stations':
schema = table_schemas.stations
rows = json_lines | beam.Map(data_transforms.transform_stations)
if schema and rows:
bq_loader = rows | beam.ParDo(remove_blank_rows()) | beam.io.WriteToBigQuery(
table_spec,
schema=schema,
write_disposition=beam.io.BigQueryDisposition.WRITE_EMPTY,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
method='DEFAULT',
batch_size=500)
else:
raise Exception('Unable to assemble rows for upload. Check upstream repo for schema updates.')
except Exception as e:
bq_upload_error_string = 'Unable to load EDSM files: ' + str(e)
logger.error(bq_upload_error_string)
sys.exit()
def upload_to_gcs(files, project_id, bucket):
try:
for file in files:
blob = bucket.blob(file)
file_path = '/tmp/' + str(file)
upload_log_string = 'Uploading ' + file + ' to GCS.'
logger.info(upload_log_string)
blob.upload_from_filename(file_path)
except Exception as e:
gcs_upload_error_string = 'Unable to upload EDSM files to GCS: ' + str(e)
logger.error(gcs_upload_error_string)
sys.exit()
def main(argv=None):
parser = argparse.ArgumentParser()
known_args, pipeline_args = parser.parse_known_args(argv)
project_id = args.project
dataset_id = args.dataset
bucket_id = args.bucket
if args.delete:
if 'all' in args.delete:
delete_bq_data(table_list, project_id, dataset_id)
else:
delete_bq_data(args.delete, project_id, dataset_id)
if args.download:
if 'all' in args.download:
download_edsm_files(table_list)
else:
download_edsm_files(args.download)
if args.upload_to_gcs:
storage_client = storage.Client()
try:
gcs_bucket = storage_client.get_bucket(bucket_id)
except Exception as e:
logger.warning('GCS bucket not found, creating...')
gcs_bucket = storage_client.create_bucket(bucket_id)
if 'all' in args.upload_to_gcs:
upload_to_gcs(table_list, project_id, gcs_bucket)
else:
upload_to_gcs(args.upload_to_gcs, project_id, gcs_bucket)
if args.upload_to_bq:
staging_location = '--staging_location=gs://' + bucket_id + '/staging'
temp_location = '--temp_location=gs://' + bucket_id + '/temp'
pipeline_args.extend([
'--job_name=raxxla-loader',
'--setup_file=./setup.py',
staging_location,
temp_location,
])
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = True
runner = args.runner
if 'all' in args.upload_to_bq:
upload_to_bigquery(table_list, project_id, dataset_id, bucket_id, runner, pipeline_options)
else:
upload_to_bigquery(args.upload_to_bq, project_id, dataset_id, bucket_id, runner, pipeline_options)
if __name__ == '__main__':
main()
|
mjcastner/edsm_bq
|
beam_parser.py
|
beam_parser.py
|
py
| 8,112 |
python
|
en
|
code
| 1 |
github-code
|
6
|
73739270268
|
from buildbot.worker import Worker
from maxscale.config.workers import WORKER_CREDENTIALS
def workerConfiguration():
"""Create worker configuration for use in BuildBot configuration"""
configuration = []
for credentials in WORKER_CREDENTIALS:
configuration.append(Worker(credentials["name"], credentials["password"]))
return configuration
def workerNames(host=""):
"""Create a list of worker names that can be used in build configuration"""
workers = []
for credentials in WORKER_CREDENTIALS:
if host in credentials["host"]:
workers.append(credentials["name"])
return workers
def workersOnHosts(*hosts):
"""
Create a list of worker names that run on the specified hosts
hosts (list): names of the host names to use
"""
workers = []
for credentials in WORKER_CREDENTIALS:
if credentials["host"] in hosts:
workers.append(credentials["name"])
return workers
def workerToHostMap():
"""Creates dictionary with worker name mapped to host"""
workerToHost = {}
for credentials in WORKER_CREDENTIALS:
workerToHost[credentials["name"]] = credentials["host"]
return workerToHost
|
dA505819/maxscale-buildbot
|
master/maxscale/workers.py
|
workers.py
|
py
| 1,208 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13321588984
|
from airflow.models import Variable
import datetime
from .test_utils import create_test_database, db_connect
from dags.rock.rock_content_items import ContentItem
from dags.rock.rock_content_items_connections import ContentItemConnection
import vcr
create_test_database()
def test_run_fetch_and_save_content_item_connections(monkeypatch):
def mock_get(config, deserialize_json=True, default_var=None):
if default_var:
return default_var
if "_rock_api" in config:
return "https://rock.apollos.app/api"
if "_rock_token" in config:
return "ASZjZWdf3IqrbZX9sedtB4wb"
if "_rock_config":
return {
"CONTENT_MAPPINGS": {
"ContentSeriesContentItem": {"ContentChannelTypeId": [6]},
"DevotionalContentItem": {"ContentChannelId": [7]},
"WeekendContentItem": {"ContentChannelId": [5]},
},
"PERSONA_CATEGORY_ID": 186,
"SERIES_CATEGORY_ORIGIN_IDS": [4, 33],
}
monkeypatch.setattr(
Variable,
"get",
mock_get,
)
content_item = ContentItem(
{
"client": "test",
"execution_date": datetime.datetime(
2005, 7, 14, 12, 30, tzinfo=datetime.timezone.utc
),
"do_backfill": True,
}
)
content_item_connection = ContentItemConnection(
{
"client": "test",
"execution_date": datetime.datetime(
2005, 7, 14, 12, 30, tzinfo=datetime.timezone.utc
),
"do_backfill": True,
}
)
monkeypatch.setattr(
content_item.pg_hook,
"get_conn",
db_connect,
)
monkeypatch.setattr(
content_item_connection.pg_hook,
"get_conn",
db_connect,
)
with vcr.use_cassette(
"tests/cassettes/content_item_connections/content_items.yaml"
):
content_item.run_fetch_and_save_content_items()
with vcr.use_cassette(
"tests/cassettes/content_item_connections/initial_content_item_connections.yaml"
):
content_item_connection.run_fetch_and_save_content_items_connections()
conn = db_connect()
with conn:
with conn.cursor() as curs:
# Check for initial parent content item
curs.execute("SELECT id FROM content_item")
parent_item_id = curs.fetchone()[0]
# Check that initial content item connections are correct
curs.execute(
"""
SELECT parent_id, origin_id FROM content_item_connection;
"""
)
initial_content_item_connections = curs.fetchall()
assert len(initial_content_item_connections) == 3
expected = [
(parent_item_id, "20"),
(parent_item_id, "18"),
(parent_item_id, "19"),
]
i = 0
for connection in initial_content_item_connections:
assert connection == expected[i]
i += 1
# Delete content item connection
with vcr.use_cassette(
"tests/cassettes/content_item_connections/delete_content_item_connection.yaml"
):
content_item_connection.run_delete_content_item_connections()
curs.execute("SELECT parent_id, origin_id FROM content_item_connection;")
content_item_connections_with_deletion = curs.fetchall()
assert len(content_item_connections_with_deletion) == 2
expected = [
(parent_item_id, "20"),
(parent_item_id, "18"),
]
i = 0
for connection in content_item_connections_with_deletion:
assert connection == expected[i]
i += 1
conn.close()
|
CrossingsCommunityChurch/apollos-shovel
|
tests/test_rock_content_item_connections.py
|
test_rock_content_item_connections.py
|
py
| 3,945 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74918976186
|
import networkx as nx
import re
def read_file(file):
first = set()
second = set()
G = nx.DiGraph()
prog = re.compile("Step ([A-Z]) must be finished before step ([A-Z]) can begin.")
with open(file) as f:
lines = f.readlines()
for line in lines:
r = prog.match(line.strip())
if not r.group(1) in G:
G.add_node(r.group(1))
if not r.group(2) in G:
G.add_node(r.group(2))
if not G.has_edge(r.group(1),r.group(2)):
G.add_edge(r.group(1),r.group(2))
first.add(r.group(1))
second.add(r.group(2))
return (G,first- second)
def duration(step):
return 60+ord(step)-64
def day7(file):
G,starter = read_file(file)
path = list()
to_visit = sorted(starter,reverse=True)
while len(to_visit) > 0:
node = to_visit.pop()
path.append(node)
neighbours = G[node]
for n in neighbours:
if not n in to_visit and not n in path:
allCompleted = True
for u,v in G.in_edges(nbunch=n):
if not u in path:
allCompleted = False
if allCompleted:
to_visit.append(n)
to_visit = sorted(to_visit,reverse=True)
#print("".join(path))
work_route = "".join(path)
end_letter = path[-1]
path = list()
to_visit = sorted(starter,reverse=True)
second = 0
workers = list()
# Trabajo Actual, segundo que termina
workers.append(['.',0])
workers.append(['.',0])
workers.append(['.',0])
workers.append(['.',0])
workers.append(['.',0])
def full_workers(workers):
full = True
for w in workers:
if w[0] == ".":
full = False
return full
end = False
while not end:
if len(to_visit) == 0 or full_workers(workers):
second += 1
for i in range(0,len(workers)):
if workers[i][1] <= second:
if workers[i][0] != ".":
path.append(workers[i][0])
neighbours = G[workers[i][0]]
for n in neighbours:
if not n in to_visit and not n in path:
allCompleted = True
for u,v in G.in_edges(nbunch=n):
if not u in path:
allCompleted = False
if allCompleted:
to_visit.append(n)
to_visit = sorted(to_visit,reverse=True)
if workers[i][0] == end_letter:
#print("Finish point")
#print("Seconds: %d" % second)
end = True
if len(to_visit) > 0:
node = to_visit.pop()
workers[i][1] = second+duration(node)
workers[i][0] = node
else:
workers[i][0] = "."
return work_route,second
|
aarroyoc/advent-of-code-2018
|
python/day7/day7.py
|
day7.py
|
py
| 3,076 |
python
|
en
|
code
| 1 |
github-code
|
6
|
33548088967
|
import os
from django.core.management.base import BaseCommand, CommandError
from main.settings import BASE_DIR, DEBUG
from costcenter.models import Fund, Source, CostCenter, FundCenter, FinancialStructureManager
from lineitems.models import LineForecast, LineItem
class Command(BaseCommand):
"""
A class to be used only for development purposes. It serves to fill in some funds, sources, fund centers and cost centers. Values fed in the database should match what is expected from the data to be used when running uploadtocsv which also uses test encumbrance report data.
"""
def handle(self, *args, **options):
if DEBUG:
LineForecast.objects.all().delete()
LineItem.objects.all().delete()
CostCenter.objects.all().delete()
Source.objects.all().delete()
Fund.objects.all().delete()
FundCenter.objects.all().delete()
self.set_fund()
self.set_source()
self.set_fund_center()
self.set_cost_center()
else:
print("This capability is only available when DEBUG is True")
def set_fund(self):
items = [
{"fund": "C113", "name": "National Procurement", "vote": "1"},
{"fund": "C116", "name": "Kitchen Procurement", "vote": "5"},
{"fund": "C523", "name": "Basement Procurement", "vote": "1"},
{"fund": "CXXX", "name": "Bedroom Procurement", "vote": "1"},
]
for item in items:
try:
found = Fund.objects.get(fund=item["fund"])
if found:
print(f"Fund {found} exists")
except Fund.DoesNotExist:
new_item = Fund.objects.create(**item)
print(f"Created fund {new_item}")
def set_source(self):
items = [{"source": "Kitchen"}]
for item in items:
try:
found = Source.objects.get(source=item["source"])
if found:
print(f"Source {found} exists")
except Source.DoesNotExist:
new_item = Source.objects.create(**item)
print(f"Created Source {new_item}")
def set_fund_center(self):
# Create root FC
fc = {"fundcenter": "1111AA", "shortname": "root", "parent": None}
new_item = FundCenter.objects.create(**fc)
root = FundCenter.objects.filter(fundcenter="1111AA").first()
print(f"Created Fund Center {root}, sequence {root.sequence}")
root_children = [
{"fundcenter": "1111AB", "shortname": "AB", "parent": root},
{"fundcenter": "1111AC", "shortname": "AC", "parent": root},
]
for item in root_children:
try:
found = FundCenter.objects.get(fundcenter=item["fundcenter"])
if found:
print(f"Fund Center {found} exists")
except FundCenter.DoesNotExist:
item["sequence"] = FinancialStructureManager().set_parent(fundcenter_parent=root)
new_item = FundCenter.objects.create(**item)
print(f"Created Fund Center {new_item}, sequence {new_item.sequence}")
ab = FundCenter.objects.filter(fundcenter="1111AB").first()
ab_children = [
{"fundcenter": "2222BA", "shortname": "BA", "parent": ab},
{"fundcenter": "2222BB", "shortname": "BB", "parent": ab},
]
for item in ab_children:
try:
found = FundCenter.objects.get(fundcenter=item["fundcenter"])
if found:
print(f"Fund Center {found} exists")
except FundCenter.DoesNotExist:
item["sequence"] = FinancialStructureManager().set_parent(fundcenter_parent=ab)
new_item = FundCenter.objects.create(**item)
print(f"Created Fund Center {new_item}")
def set_cost_center(self):
fund = Fund.objects.get(fund="C113")
source = Source.objects.get(source="Kitchen")
ab = FundCenter.objects.get(fundcenter="1111AB")
ac = FundCenter.objects.get(fundcenter="1111AC")
FSM = FinancialStructureManager()
items = [
{
"costcenter": "8486B1",
"shortname": "Utensils",
"fund": fund,
"source": source,
"isforecastable": True,
"isupdatable": True,
"note": "",
"parent": ac,
},
{
"costcenter": "8486C1",
"shortname": "Food and drink",
"fund": fund,
"source": source,
"isforecastable": True,
"isupdatable": True,
"note": "A quick and short note for 1234FF",
"parent": ab,
},
{
"costcenter": "8486C2",
"shortname": "Basement Stuff",
"fund": fund,
"source": source,
"isforecastable": True,
"isupdatable": True,
"note": "",
"parent": ab,
},
]
for item in items:
try:
found = CostCenter.objects.get(costcenter=item["costcenter"])
if found:
print(f"Cost Center {found} exists")
except CostCenter.DoesNotExist:
item["sequence"] = FSM.set_parent(item["parent"], item)
new_item = CostCenter.objects.create(**item)
print(f"Created Cost Center {new_item}")
|
mariostg/bft
|
encumbrance/management/commands/populate.py
|
populate.py
|
py
| 5,663 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13931896191
|
from odoo import models, fields, api
from odoo.exceptions import ValidationError
from odoo.http import request
from odoo.addons.resident_management.enum import STATUS_TYPES, VEHICLE_TYPES, USER_GROUP_CODE, RELATIONSHIP_TYPES
str_bql = USER_GROUP_CODE[2][0]
str_bqt = USER_GROUP_CODE[3][0]
class tb_vehicle(models.Model):
_name = 'tb_vehicle'
_description = 'Phương tiện'
license_plates = fields.Char(string='Biển số xe', copy=False)
vehicle_color = fields.Char(string='Màu xe', copy=False)
vehicle_brand = fields.Char(string='Nhãn hiệu xe', copy=False)
image = fields.Image(string='Ảnh phương tiện', copy=False)
image_citizen_identification_font = fields.Image(string='CMND/CCCD mặt trước', copy=False)
image_citizen_identification_back = fields.Image(string='CMND/CCCD mặt sau', copy=False)
image_vehicle_registration_certificate_font = fields.Image(string='Đăng ký xe mặt trước', copy=False)
image_vehicle_registration_certificate_back = fields.Image(string='Đăng ký xe mặt sau', copy=False)
name = fields.Char(string='Tên chủ xe', required=True, copy=False)
date_of_birth = fields.Char(string='Tên chủ xe', copy=False)
phone = fields.Char(string='Điện thoại', copy=False)
citizen_identification = fields.Char(string='CMND / CCCD')
relationship_type = fields.Selection(string='Quan hệ với chủ hộ', selection=RELATIONSHIP_TYPES,
default=RELATIONSHIP_TYPES[0][0])
vehicle_type = fields.Selection(string='Loại xe', selection=VEHICLE_TYPES, default=VEHICLE_TYPES[0][0])
note = fields.Text(string='Ghi chú', copy=False, help='')
status = fields.Selection(string='Trạng thái', selection=STATUS_TYPES, default=STATUS_TYPES[0][0])
blockhouse_id = fields.Many2one(comodel_name='tb_blockhouse', string="Dự án",
domain=lambda self: self._domain_blockhouse_id(),
ondelete="cascade")
building_id = fields.Many2one(comodel_name='tb_building', string="Toà nhà",
domain="[('is_active', '=', True), ('blockhouse_id', '!=', None), ('blockhouse_id', '=', blockhouse_id)]",
ondelete="cascade")
building_house_id = fields.Many2one(comodel_name='tb_building_house', string="Căn hộ",
domain="[('is_active', '=', True), ('building_id', '!=', None), ('building_id', '=', building_id)]",
ondelete="cascade")
user_id = fields.Many2one(comodel_name='res.users', string="Chủ hộ",
domain=lambda self: self._domain_user_id(),
ondelete="cascade")
def set_status_active(self):
for item in self:
item.status = 'ACTIVE'
def set_status_reject(self):
for item in self:
item.status = 'REJECT'
@api.model
def _domain_user_id(self):
user = request.env.user
if user and user.id != 1 and user.id != 2:
user_ids = (self.env['tb_users_blockhouse_res_groups_rel'].sudo()
.search([('building_id', 'in', self.building_house_id)])).user_id.ids
return ["&", ("active", "=", True), ("id", "in", user_ids)]
else:
return [("active", "=", True)]
@api.model
def _domain_blockhouse_id(self):
user = request.env.user
bqt_bh_id = [] # ban quan tri - blockhouse - id
bqt_bd_id = [] # ban quan tri - building - id
bql_bh_id = [] # ban quan ly - blockhouse - id
bql_bd_id = [] # ban quan ly - building - id
if user and user.id != 1 and user.id != 2:
for item in user.tb_users_blockhouse_res_groups_rel_ids:
if item.group_id.name and str_bqt in item.user_group_code:
bqt_bh_id.append(int(item.blockhouse_id.id))
bqt_bd_id.append(int(item.building_id.id))
if item.group_id.name and str_bql in item.user_group_code:
bql_bh_id.append(int(item.blockhouse_id.id))
bql_bd_id.append(int(item.building_id.id))
return ["&", ("is_active", "=", True), ("id", "in", list(set(bqt_bh_id + bql_bh_id)))]
else:
return [("is_active", "=", True)]
@api.model
def read_group(self, domain, fields, groupby, offset=0, limit=None, orderby=False, lazy=True):
user = request.env.user
bqt_bh_id = [] # ban quan tri - blockhouse - id
bqt_bd_id = [] # ban quan tri - building - id
bql_bh_id = [] # ban quan ly - blockhouse - id
bql_bd_id = [] # ban quan ly - building - id
if user and user.id != 1 and user.id != 2:
for item in user.tb_users_blockhouse_res_groups_rel_ids:
if item.group_id.name and str_bqt in item.user_group_code:
bqt_bh_id.append(int(item.blockhouse_id.id))
bqt_bd_id.append(int(item.building_id.id))
if item.group_id.name and str_bql in item.user_group_code:
bql_bh_id.append(int(item.blockhouse_id.id))
bql_bd_id.append(int(item.building_id.id))
domain.append(('building_id', 'in', list(set(bqt_bd_id + bql_bd_id))))
domain.append(('blockhouse_id', 'in', list(set(bqt_bh_id + bql_bh_id))))
res = super(tb_vehicle, self).read_group(domain, fields, groupby, offset=offset, limit=limit,
orderby=orderby, lazy=lazy)
return res
@api.model
def search_read(self, domain=None, fields=None, offset=0, limit=10, order=None):
user = request.env.user
bqt_bh_id = [] # ban quan tri - blockhouse - id
bqt_bd_id = [] # ban quan tri - building - id
bql_bh_id = [] # ban quan ly - blockhouse - id
bql_bd_id = [] # ban quan ly - building - id
if user and user.id != 1 and user.id != 2:
for item in user.tb_users_blockhouse_res_groups_rel_ids:
if item.group_id.name and str_bqt in item.user_group_code:
bqt_bh_id.append(int(item.blockhouse_id.id))
bqt_bd_id.append(int(item.building_id.id))
if item.group_id.name and str_bql in item.user_group_code:
bql_bh_id.append(int(item.blockhouse_id.id))
bql_bd_id.append(int(item.building_id.id))
domain.append(('building_id', 'in', list(set(bqt_bd_id + bql_bd_id))))
domain.append(('blockhouse_id', 'in', list(set(bqt_bh_id + bql_bh_id))))
res = super(tb_vehicle, self).search_read(domain, fields, offset, limit, order)
return res
def open_edit_form(self):
per_name = 'perm_write_vehicle'
error_messenger = 'Bạn chưa được phân quyền này!'
can_do = self.check_permission(per_name, raise_exception=False)
if can_do:
return {
'type': 'ir.actions.act_window',
'name': 'Sửa phương tiện',
'res_model': 'tb_vehicle',
'res_id': self.id,
'view_type': 'form',
'view_mode': 'form',
'view_id': self.env.ref('apartment_service_support.view_tb_vehicle_form').id,
'context': {'form_view_initial_mode': 'edit'},
'target': 'current',
}
raise ValidationError(error_messenger)
def open_edit_approve_form(self):
per_name = 'perm_approve_vehicle'
error_messenger = 'Bạn chưa được phân quyền này!'
can_do = self.check_permission(per_name, raise_exception=False)
if can_do:
return {
'type': 'ir.actions.act_window',
'name': 'Sửa phương tiện',
'res_model': 'tb_vehicle',
'res_id': self.id,
'view_type': 'form',
'view_mode': 'form',
'view_id': self.env.ref('apartment_service_support.view_tb_vehicle_approve_form').id,
'context': {'form_view_initial_mode': 'edit'},
'target': 'current',
}
raise ValidationError(error_messenger)
def confirm_delete(self):
per_name = 'perm_delete_vehicle'
error_messenger = 'Bạn chưa được phân quyền này!'
can_do = self.check_permission(per_name, raise_exception=False)
if can_do:
message = """Bạn có chắc muốn xóa bản ghi này?"""
value = self.env['dialog.box.confirm'].sudo().create({'message': message})
return {
'type': 'ir.actions.act_window',
'name': 'Xóa bản ghi',
'res_model': 'dialog.box.confirm',
'view_type': 'form',
'view_mode': 'form',
'target': 'new',
'res_id': value.id
}
raise ValidationError(error_messenger)
def del_record(self):
for record in self:
record.unlink()
pass
|
cntt0901taizero/residential-adminapp
|
src/resident_management/models/tb_vehicle.py
|
tb_vehicle.py
|
py
| 9,234 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12477031144
|
import argparse
import os
import importlib.util
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import json
from collections import defaultdict
import utils
import transformers
parser = argparse.ArgumentParser()
parser.add_argument('--task')
parser.add_argument('--model')
parser.add_argument('--dataset')
parser.add_argument('--k', default='0')
parser.add_argument('--mode', default='all')
parser.add_argument('--prompt', default='qa')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--repeats', default=1, type=int)
parser.add_argument('--output', default='plot.png')
parser.add_argument('--device', default='cpu')
args = parser.parse_args()
os.environ["DEVICE"] = args.device
# Check if submission module is present. If it is not, then main() will not be executed.
use_submission = importlib.util.find_spec('submission') is not None
if use_submission:
import submission
def plot():
dataset = 'xsum'
data = defaultdict(lambda: defaultdict(list))
model = 'med'
mode = 'lora16'
x_vals = set()
for k in [0,1,8,128]:
fn = '_'.join([model, dataset, str(k), mode])
id_ = '_'.join([model, dataset, mode])
with open(f'submission/results/ft/{fn}.json', 'r') as f:
score = json.load(f)['metric']
data[id_]['x'].append(k)
x_vals.add(k)
data[id_]['y'].append(score)
prompt_mode = 'tldr'
for k in [0,1,4]:
fn = '_'.join([model, dataset, str(k), prompt_mode])
id_ = '_'.join([model, dataset, prompt_mode])
with open(f'submission/results/icl/{fn}.json', 'r') as f:
score = json.load(f)['metric']
data[id_]['x'].append(k)
x_vals.add(k)
data[id_]['y'].append(score)
for k, v in data.items():
plt.plot(v['x'], v['y'], label=k)
if max(x_vals) > 4:
plt.xscale('symlog')
ax = plt.gca()
ax.xaxis.set_major_formatter(mticker.ScalarFormatter())
ax.xaxis.set_ticks(sorted(x_vals))
plt.legend()
plt.title(dataset)
plt.ylabel(utils.metric_for_dataset(dataset))
plt.xlabel('Number of support examples')
plt.savefig(args.output, bbox_inches='tight')
# Download all models and datasets required by the grader
def cache():
models = [
{'name': 'bert-tiny', 'type': transformers.AutoModelForSequenceClassification, 'num_labels': 5},
{'name': 'bert-tiny', 'type': transformers.AutoModelForCausalLM},
{'name': 'med', 'type': transformers.AutoModelForCausalLM}
]
for model in models:
if 'num_labels' in model:
utils.get_model_and_tokenizer(model['name'], model['type'], num_labels = model['num_labels'])
else:
utils.get_model_and_tokenizer(model['name'], model['type'])
datasets = [
{'name': 'amazon', 'n_train': 1, 'n_val': 125},
{'name': 'xsum', 'n_train': 8, 'n_val': 125}
]
for dataset in datasets:
utils.get_dataset(dataset=dataset['name'], n_train=dataset['n_train'], n_val=dataset['n_val'])
def run():
ks = [int(k) for k in args.k.split(',')]
if args.task == 'run_ft':
submission.run_ft(args.model.split(','), args.dataset.split(','), ks, args.mode.split(','), args.debug, args.repeats)
elif args.task == 'run_icl':
submission.run_icl(args.model.split(','), args.dataset.split(','), ks, args.prompt.split(','), args.debug, args.repeats)
elif args.task == 'plot_ft':
submission.plot_ft(args.model.split(','), args.dataset.split(','), ks, args.mode.split(','), args.output)
elif args.task == 'plot_icl':
assert ',' not in args.dataset, "Only one dataset at a time for plotting"
submission.plot_icl(args.model.split(','), args.dataset, ks, args.prompt.split(','), args.output)
elif args.task == 'plot':
plot()
elif args.task == 'cache':
cache()
if __name__ == '__main__':
run()
|
mariopenglee/llm-metalearning
|
src/main.py
|
main.py
|
py
| 3,965 |
python
|
en
|
code
| 0 |
github-code
|
6
|
89691500
|
import wordgenerator
NUMBER_OF_WORDS = int(input("How many random words do you want? "))
NUMBER_OF_LETTERS = int(input("How many letters do you want the random words to have? "))
user_options = list()
for i in range(0 , NUMBER_OF_LETTERS):
user_choice = input("What letter " + str(i + 1) + " do you want? Enter 'v' for vowels, 'c' for consonants, 'l' for any letter: ")
user_options.append(user_choice)
for i in range(0, NUMBER_OF_WORDS):
word = wordgenerator.assemble_word(user_options)
print(word)
|
caiopg/random-text-generator
|
randomtextgenerator.py
|
randomtextgenerator.py
|
py
| 519 |
python
|
en
|
code
| 0 |
github-code
|
6
|
44343669715
|
import sys
sys.stdin = open('input/19236-1.txt') # 33, 43, 76, 39
import copy
def find_fish(x, info):
for i in range(4):
for j in range(4):
if info[i][j][0] == x:
return i, j
return -1, -1
def bfs(x, y, info, total):
global answer
total += info[x][y][0] + 1
direction = info[x][y][1]
info[x][y] = [-1, -1]
for i in range(16):
fx, fy = find_fish(i, info)
if (fx, fy) == (-1, -1):
continue
for _ in range(8):
dx, dy = fx + delta[info[fx][fy][1]][0], fy + delta[info[fx][fy][1]][1]
if not (0 <= dx < 4 and 0 <= dy < 4) or (dx == x and dy == y):
info[fx][fy][1] = (info[fx][fy][1] + 1) % 8
continue
info[fx][fy], info[dx][dy] = info[dx][dy], info[fx][fy]
break
for j in range(1, 4):
move = False
ni, nj = x + delta[direction][0] * j, y + delta[direction][1] * j
if 0 <= ni < 4 and 0 <= nj < 4 and info[ni][nj][0] != -1:
bfs(ni, nj, copy.deepcopy(info), total)
move = True
if not move:
answer = max(total, answer)
info = [[0] * 4 for _ in range(4)]
for i in range(4):
temp = list(map(int, input().split()))
for j in range(4):
info[i][j] = [temp[2 * j] - 1, temp[2 * j + 1] - 1]
delta = ((-1, 0), (-1, -1), (0, -1), (1, -1), (1, 0), (1, 1), (0, 1), (-1, 1))
answer = 0
bfs(0, 0, info, 0)
print(answer)
|
nayeonkinn/algorithm
|
baekjoon/[G2] 19236. 청소년 상어.py
|
[G2] 19236. 청소년 상어.py
|
py
| 1,467 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14391947993
|
import glob
import json
def LoadTweets(directory):
directory = directory +"/*json"
files = glob.glob(directory)[:100]
twts = [a for fl in files for a in json.load(open(fl))]
twts.sort(key=lambda x: x['id'] if 'id' in x else int(x['id_str']) if 'id_str' in x else 0)
twts = [a for a in twts if 'id' in a or 'str_id' in a]
twts = [a for a in twts
if (('id' in a and a['id'] >= 656971539691257900) or
('id_str' in a and int(a['id_str']) >= 656971539691257900))]
res = []
prev_id = 0
for tw in twts:
if 'id' not in tw: tw['id'] = int(tw['id_str'])
if 'id_str' not in tw: tw['id_str'] = str(tw['id'])
if tw['id'] != prev_id: res.append(tw)
prev_id = tw['id']
return res
def LoadUsers(directory):
directory = directory + "/*txt"
usrfiles = glob.glob(directory)
users = [(a.strip("@"),fl) # Removing the @
for fl in usrfiles
for nms in open(fl)
for a in nms.strip().split(",")]
usrset = set([a[0] for a in users])
return usrset, users
|
datumkg/electweet
|
ElecTweet/TweetLoader.py
|
TweetLoader.py
|
py
| 1,092 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1518966554
|
from argparse import ArgumentParser, RawTextHelpFormatter
from glob import glob
from subprocess import check_call
import os
from shutil import rmtree
def compile_clm():
# Define and parse command line arguments
# ---------------------------------------
dsc = "Compile CLM on Piz Daint. A case will be created in a subfolder of your ${SCRATCH}.\n"\
" WARNING: tool has to be run from the default Prg-Env-cray environment"
parser = ArgumentParser(description=dsc, formatter_class=RawTextHelpFormatter)
parser.add_argument('cesm_trunk', help="path to the CESM directory")
parser.add_argument('--clm_version', choices=['4.0', '4.5'], default='4.0', help="CLM version")
parser.add_argument('-c', '--compiler', help="compiler to use (default: pgi)", default='pgi')
parser.add_argument('-v', '--compiler_version', help="switch to this version of the compiler\n"\
"This is not recommended by CSCS")
parser.add_argument('-d', '--debug', help="compile in debug mode (default: false)",
action='store_true')
parser.add_argument('--src_mod', action='append',
help="path to additionnal/modified sources (e.g. oasis interface)\n"\
"has to be a folder containing src.xxx subfolders, can be specified several times")
parser.add_argument('-o', '--output', help="output executable file path (default: ./cesm.exe)",
default='./cesm.exe')
parser.add_argument('--no_exe', help="do not execute build_cesm.bash, leave it to any suited modification before actual compilation.",
action='store_false', dest='execute')
opts = parser.parse_args()
# Init some variables
# -------------------
CESM_TRUNK = opts.cesm_trunk
EXP = 'clm{:s}_bld'.format(opts.clm_version)
CASEDIR = os.path.join(os.environ['SCRATCH'], EXP)
if os.path.exists(CASEDIR):
rmtree(CASEDIR)
RES = '1.9x2.5_gx1v6'
COMP = 'ITEST'
MACH = 'daint'
if opts.clm_version == '4.5':
COMP += 'CLM45'
out_exe = os.path.abspath(opts.output)
sourcemods = [os.path.abspath(src_dir) for src_dir in opts.src_mod]
create_case_fmt = '{:s}/scripts/create_newcase -res {:s} -compset {:s} -mach {:s} -compiler pgi_oas -case {:s}'
create_case_cmd = create_case_fmt.format(CESM_TRUNK, RES, COMP, MACH, CASEDIR)
# Build compiling script
# ----------------------
with open('build_cesm.bash', mode='w') as script:
script.write('#!/bin/bash\n')
script.write('\n')
script.write('# ----------------------------------------------\n')
script.write('# Modules\n')
script.write('# ----------------------------------------------\n')
script.write('\n')
if opts.compiler == 'pgi':
script.write('module switch PrgEnv-cray PrgEnv-pgi\n')
if opts.compiler_version is not None:
script.write('module switch pgi pgi/{:s}\n'.format(opts.compiler_version))
elif opts.compiler == 'intel':
script.write('module switch PrgEnv-cray PrgEnv-intel\n')
if opts.compiler_version is not None:
script.write('module switch intel intel/{:s}\n'.format(opts.compiler_version))
elif opts.compiler == 'cray' and opts.compiler_version is not None:
script.write('module switch cce cce/{:s}\n'.format(opts.compiler_version))
script.write('\n')
script.write('module load cray-netcdf\n')
script.write('module load daint-gpu\n')
script.write('\n')
script.write('module list\n')
script.write('\n')
script.write('# ----------------------------------------------\n')
script.write('# Create case\n')
script.write('# ----------------------------------------------\n')
script.write('\n')
script.write('{:s}\n'.format(create_case_cmd))
script.write('\n')
script.write('# ----------------------------------------------\n')
script.write('# Setup case\n')
script.write('# ----------------------------------------------\n')
script.write('\n')
script.write('cd {:s}\n'.format(CASEDIR))
script.write('\n')
script.write('switch off river routing\n')
script.write('./xmlchange RTM_MODE="NULL"\n')
script.write('\n')
script.write('set transient CO2\n')
script.write('./xmlchange CCSM_BGC=CO2A,CLM_CO2_TYPE=diagnostic\n')
if opts.debug:
script.write('# activate debug mode\n')
script.write('./xmlchange -file env_build.xml -id DEBUG -val "TRUE"\n')
script.write('\n')
script.write('./cesm_setup\n')
script.write('\n')
script.write('# ----------------------------------------------\n')
script.write('# Add source additions/modifications\n')
script.write('# ----------------------------------------------\n')
script.write('\n')
for src_dir in sourcemods:
print(src_dir)
for comp in glob('{:s}/src.*'.format(src_dir)):
print(comp)
script.write('rsync -avrL {:s} SourceMods\n'.format(comp))
script.write('\n')
script.write('# ----------------------------------------------\n')
script.write('# Build\n')
script.write('# ----------------------------------------------\n')
script.write('\n')
script.write('{:s}.build\n'.format(EXP))
script.write('rsync -avr bld/cesm.exe {:s}\n'.format(out_exe))
os.chmod('build_cesm.bash', 0o755)
# Execute compiling script
# ------------------------
if opts.execute:
check_call(['./build_cesm.bash'])
|
COSMO-RESM/COSMO_CLM2_tools
|
COSMO_CLM2_tools/compile_clm.py
|
compile_clm.py
|
py
| 5,769 |
python
|
en
|
code
| 1 |
github-code
|
6
|
34276132086
|
""" Initializes Pickly Files"""
import pickle
import json
import requests
import urllib.request
def guiinit(sub):
#Gets information from Reddit
r = urllib.request.urlopen(r'http://www.reddit.com/r/' + sub + '/new/.json', timeout=60).read().decode("utf-8")
data = json.loads(r)
#Creates ists to hold data
titlelist = []
urllist = []
permalinklist = []
#Creats Files
title = open("title.obj", 'wb')
url = open("url.obj", 'wb')
permalink = open("perma.obj", 'wb')
#Appends Data from Reddit API to lists
for i in range(0,20):
titlelist.append(data['data']['children'][i]['data']['title']) #Gets title
urllist.append(data['data']['children'][i]['data']['url']) #Gets URL
permalinklist.append("http://www.reddit.com" + data['data']['children'][i]['data']['permalink']) #Gets Comments
#Dumps lists to files
pickle.dump(titlelist, title)
pickle.dump(urllist, url)
pickle.dump(permalinklist, permalink)
#Closes files
title.close()
url.close()
permalink.close()
|
picklesforfingers/FeedMeReddit
|
pickleinit.py
|
pickleinit.py
|
py
| 1,069 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7720740035
|
import sys
#f= open(sys.argv[1], 'r')
# for line in f:
# print(line.strip())
#
#
#f2= open(sys.argv[2],'r')
# for line in f2:
# print(line.strip())
f=open(sys.argv[1])
#create an empty dictionary
flydict= dict()
for line in f:
fields= line.strip().split('\t')
#assign gene name and protein id to the dictionary
flydict[fields[0]]= fields[1]
f2=open(sys.argv[2],'r')
for line in f2:
fields=line.strip().split('\t')
print(fields)
gene_name=fields[8]
if gene_name in flydict:
protein_id= flydict[gene_name]
fields[8]=protein_id
print(gene_name, protein_id)
print(fields)
else:
if len(sys.argv)==4:
fields[8]=sys.argv[3]
print(fields)
else:
continue
|
Hkhaira1/qbb2021-answers
|
day2-lunch/day2-lunch.py
|
day2-lunch.py
|
py
| 775 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9078103274
|
import rospy
from geometry_msgs.msg import Point
import VariableManager
class MasterRecorder:
def __init__(self):
self.recording = VariableManager.recording
self.file = None
self.data_to_write = VariableManager.master_data
def main(self):
if(not self.recording and VariableManager.recording):
self.clear_recorded_data()
self.recording = VariableManager.recording
if(self.recording):
self.record_data()
def clear_recorded_data(self):
self.file = open("recorded_data.txt", "w")
self.file.write("")
self.file.close()
def record_data(self):
try:
self.file = open("recorded_data.txt", "a")
except:
self.file = open("recorded_data.txt", "w")
self.data_to_write = VariableManager.master_data
self.file.write(f"{self.data_to_write.x},{self.data_to_write.y},{self.data_to_write.z}\n")
self.file.close()
|
adi232004/FDP-Project
|
MasterDataRecorder.py
|
MasterDataRecorder.py
|
py
| 1,021 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25860754920
|
from typing import Any
from random import randint, random
import pygame as py
from engine.game_engine import Pygame
from engine.color import Color
from engine.game_objects import IGameObject
from engine.game_objects.modules import IAnimationModule, ICollisionModule
from engine.image import SpriteSheet
from engine.ui.text import TypeWriterText
from engine.map import LoopingMap
from src.player.player import Player
class UI_NPC_Text(TypeWriterText):
def __init__(self, game: Pygame = None):
super().__init__("We need to push forward! We need to push forward!", (200, 200, 200), game.fonts["8bit"], (135, 590), 10, game=game)
class UI_NPC_Text_Box(IGameObject):
def __init__(self, game: Pygame = None):
super().__init__("ui", game)
self.pos = (120, 577)
self.add_layer(f"{self.game.game_dir}\\data\\images\\ui\\ui_npc_text_box.png")
self.rect = self.get_layer_image(0).get_rect(topleft=self.pos)
class UI_NPC_Box(IGameObject):
def __init__(self, game: Pygame = None):
super().__init__("ui", game)
self.pos = (10, 665)
# self.add_layer(f"{self.game.game_dir}/data/images/npc_board.png")
self.add_layer(py.Surface((100, 100), flags=py.SRCALPHA))
self.add_layer(py.Surface((100, 100), flags=py.SRCALPHA))
self.add_layer(py.Surface((100, 100), flags=py.SRCALPHA))
# self.add_layer(f"{self.game.game_dir}/data/images/npc_box.png")
self.npc = ""
self.npcs = {
"red": py.image.load(f"{self.game.game_dir}/data/images/ui/npcs/red_npc.png")
}
self.anim_atlas = SpriteSheet(f"{self.game.game_dir}/data/images/ui/noise_atlas.png", 100, 100).get_image_array()
self.rect = self.get_layer_image(1).get_rect(bottomleft=self.pos)
self.set_module("animation", IAnimationModule(self, False))
self.get_module("animation").add_animation_by_dict(
"noise",
{
"layer": 1,
"frames": [self.anim_atlas[0], self.anim_atlas[1], self.anim_atlas[2], self.anim_atlas[3],
self.anim_atlas[4], self.anim_atlas[5], self.anim_atlas[6], self.anim_atlas[7]],
"frame_time": 40,
"loop": False,
"callback": self.noise_anim_ended
}
)
self.get_module("animation").add_animation_by_dict(
"noise_reversed",
{
"layer": 1,
"frames": [self.anim_atlas[7], self.anim_atlas[6], self.anim_atlas[5], self.anim_atlas[4],
self.anim_atlas[3], self.anim_atlas[2], self.anim_atlas[1], self.anim_atlas[1]],
"frame_time": 40,
"loop": False,
"callback": self.noise__reversed_anim_ended
}
)
self.text = UI_NPC_Text(self.game)
self.border = UI_NPC_Text_Box(self.game)
self.game.objects.add("ui_npc_text", self.text)
self.game.objects.add("ui_npc_text_box", self.border)
self.get_module("animation").should_animate = True
self.get_module("animation").play("noise")
def noise_anim_ended(self):
self.npc = "red"
if self.npc != "":
self.set_layer(self.npcs[self.npc], layer_id=0)
self.set_layer(self.anim_atlas[8], layer_id=1)
def noise__reversed_anim_ended(self):
self.npc = ""
self.set_layer(py.Surface((100, 100), py.SRCALPHA), layer_id=0)
self.set_layer(py.Surface((100, 100), py.SRCALPHA), layer_id=1)
class UI_BG(IGameObject):
def __init__(self, game=None):
super().__init__("ui", game)
self.pos = (0, 675)
self.add_layer(f"{self.game.game_dir}/data/images/ui_bg.png")
self.rect = self.get_layer_image(0).get_rect(bottomleft=self.pos)
# Rock Object #
class Rock(IGameObject):
def __init__(self, game: Pygame):
super(Rock, self).__init__('env', game)
self.set_module("collision", ICollisionModule(self, False))
self.pos = (self.game.screen.get_width(), randint(0, self.game.screen.get_height()))
self.vel = (randint(-2,-1), randint(-1, 1) * (random() * 0.5))
spr = py.image.load(f"{self.game.game_dir}/data/images/rock.png").convert_alpha()
scale = max(25, random() * 48)
spr = py.transform.scale(spr, (scale, scale))
spr = py.transform.rotate(spr, randint(-25, 25))
self.add_layer(spr)
self.rect = self.get_layer_image(self.primary_layer).get_rect(center=self.pos)
self.mask = py.mask.from_surface(self.get_layer_image(0))
def update(self, *args, **kwargs) -> None:
self.pos = (self.pos[0] + self.vel[0], self.pos[1] + self.vel[1])
self.rect = self.get_layer_image(0).get_rect(center=self.pos)
super(Rock, self).update()
class RockSpawner(IGameObject):
def __init__(self, group: str = "handler", game: Pygame = None):
super().__init__(group, game)
self.spawn_timing = 2000
self.last_spawn_time = self.game.time
def update(self, *args: Any, **kwargs: Any) -> None:
if self.last_spawn_time + self.spawn_timing <= self.game.time:
self.game.objects.add(f"rock_{randint(0, 999999)}", Rock(self.game))
self.last_spawn_time = self.game.time
return super().update(*args, **kwargs)
# MINIMAL RUNNING EXAMPLE #
# Main Game Engine Object #
class Game(Pygame):
def __init__(self):
super(Game, self).__init__(1200, 675, "Space Game", fps_target=60)
self.add_group("handler")
self.add_group("map")
self.add_group("player")
self.add_group("player_projectile")
self.add_group("env")
self.add_group("enemy")
self.add_group('ui')
self.fonts["8bit"] = self.get_font("8-BIT WONDER.ttf", 17)
# SETUP GAME AXIS CONTROLS (IControlModule().get_axis("move_left"))
self.axis = {'move_left': {py.K_a: -1, py.K_d: 1}, 'move_up': {py.K_w: -1, py.K_s: 1}}
py.event.set_grab(True)
py.mouse.set_pos((100, self.screen.get_height() / 2))
py.mouse.set_visible(False)
self.load_data()
self.start_game_loop()
def load_data(self):
super(Game, self).load_data() # Required to set the base dir of the game for easy access in objects without recalculating where to split the path (self.game_dir)
self.objects['map'] = LoopingMap(self, "data/images/background.png", [50, 0]) # Looping map uses the images size to calculate its looping. You may need to rescale your image to fit your game area (Window scaling will handle it after as long as it covers the initial screen). Supports vertical or horizontal but not both (Hopefully in future revisions)
self.objects["player"] = Player((0, self.screen.get_height() / 2), self) # Adds a GameObject to the ObjectHandler so that update and draw calls are triggered correctly
self.objects.add("rock_spawner", RockSpawner(game=self))
self.objects["ui_bg"] = UI_BG(self)
self.objects['ui_npc_box'] = UI_NPC_Box(self)
# self.objects['ui_npc_text'] = UI_NPC_Text(self)
def draw(self):
self.screen.fill(Color(1, 1, 1, 1).RGB) # self.screen.fill((255, 255, 255)). Color class is used mostly for storing colors to easily recall but may get more features later
super(Game, self).draw() # Required to call the draw function for registered objects
# UNCOMMENT FOR RECT DEBUGGING
if self.debug:
for group in self.groups.keys():
for sprite in self.groups[group]:
self.debug.debug_collision(sprite)
super(Game, self).render_display() # Required to call the render update of the display (py.display.flip())
def update(self):
super(Game, self).update()
for event in py.event.get():
# COMMENT TO REMOVE MANUAL WINDOW RESIZING SUPPORT
if event.type == py.VIDEORESIZE:
self.windowSize = (event.w, event.h)
py.display._resize_event(event)
if event.type == py.QUIT:
self.quit()
if event.type == py.KEYDOWN:
# UNCOMMENT TO TOGGLE DEBUGGING
if event.key == py.K_c: self.debug.set_debug(not self.debug._debug)
if event.key == py.K_p:
npc_ui = self.objects.get("ui_npc_box")
if npc_ui.npc == "":
npc_ui.get_module("animation").play("noise")
else:
self.objects["ui_npc_box"].get_module("animation").play("noise_reversed")
if event.key == py.K_ESCAPE:
self.quit()
if __name__ == '__main__':
g = Game()
|
XCPika/Pygame-Extension-Framework
|
main.py
|
main.py
|
py
| 8,838 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29310445886
|
from tkinter import *
window = Tk() #need a way of keeping the window on the screen
window.title("First GUI Program")
window.minsize(width = 500, height = 300)
window.config(padx=20,pady=20) #adds more space for our widgets
#Label
my_label = Label(text="taco",font =("Papyrus",20))
my_label["text"] = "cat"
my_label.config(text="tacocat")
my_label.config(padx=50,pady=50)
my_label.grid(column=0,row=0) #packs our label onto the screen
#button
def button_clicked():
my_label["text"] = input.get()
button = Button(text="Click me",command=button_clicked)
button.grid(column=1,row=1)
#Entry
input = Entry(width = 10)
input.grid(column=3,row=2)
print(input.get())
button = Button(text="No Click me",command=button_clicked)
button.grid(column=2,row=0)
#keeps our window on the screen
window.mainloop()
|
RoccoPic/100-Days-of-Code
|
Day-27/main.py
|
main.py
|
py
| 818 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6323470596
|
import typing
from typing import (
Union,
Optional,
List,
)
import asyncio
import logging
from datetime import datetime
from hikari import ActionRowComponent, Embed, MessageCreateEvent, embeds
from hikari import ButtonStyle
from hikari.impl.special_endpoints import MessageActionRowBuilder, LinkButtonBuilder
from hikari.events import InteractionCreateEvent
import lightbulb
import lightbulb.utils as lightbulb_utils
from lightbulb import commands, context
from lightbulb import OptionModifier as OM
from lightbulb.context import Context
import hikari
from matplotlib.style import available
from numpy import full, isin
from fuzzywuzzy import fuzz
from utils import Colors, Human, Paginator, crumble
from core import getLogger, Inu
log = getLogger(__name__)
plugin = lightbulb.Plugin("Voice commands")
@plugin.command
@lightbulb.add_checks(
lightbulb.has_role_permissions(hikari.Permissions.MOVE_MEMBERS),
lightbulb.bot_has_role_permissions(hikari.Permissions.MOVE_MEMBERS),
lightbulb.guild_only,
)
# @lightbulb.option(
# "member",
# "a person who is in the current voice channel. normally you",
# type=hikari.Member,
# default=None,
# )
@lightbulb.option(
"from-voice-channel",
"the voice channel where move peaple of",
type=hikari.GuildChannel,
default=None,
)
@lightbulb.option(
"voice-channel",
"the voice channel where you want to move to",
type=hikari.GuildChannel,
)
@lightbulb.command(
"move-all",
"moves all members from a current voice channel into another",
aliases=["move"]
)
@lightbulb.implements(commands.SlashCommand, commands.PrefixCommand)
async def move_all(ctx: Context):
target_channel: hikari.InteractionChannel = ctx.options["voice-channel"]
if not target_channel.type == hikari.ChannelType.GUILD_VOICE:
await ctx.respond(f"{target_channel} is not a voice channel", flags=hikari.MessageFlag.EPHEMERAL)
return None
if not ctx.options["from-voice-channel"]:
member = ctx.member
states = ctx.bot.cache.get_voice_states_view_for_guild(ctx.guild_id)
voice_state = [state for state in states.values() if state.user_id == member.id]
if not voice_state:
await ctx.respond(f"{member.display_name} needs to be in a voice channel")
return None
channel_id = voice_state[0].channel_id
user_ids = [state.user_id for state in states.values() if state.channel_id == channel_id]
else:
user_ids = [
state.user_id for state in ctx.bot.cache.get_voice_states_view_for_guild(ctx.guild_id).values()
if state.channel_id == ctx.options["from-voice-channel"].id
]
tasks = [
asyncio.create_task(
ctx.bot.rest.edit_member(
guild=ctx.guild_id,
user=user_id,
voice_channel=target_channel.id
)
)
for user_id in user_ids
]
await asyncio.wait(tasks, return_when=asyncio.ALL_COMPLETED)
await ctx.respond(
f"Moved {Human.list_([f'<@{user_id}>' for user_id in user_ids], with_a_or_an=False)} to `{target_channel.name}`"
)
@move_all.autocomplete("voice-channel")
async def tag_name_auto_complete(
option: hikari.AutocompleteInteractionOption,
interaction: hikari.AutocompleteInteraction
) -> List[str]:
vcs = []
guild = interaction.get_guild()
if not guild:
return []
for ch in guild.get_channels().values():
if not isinstance(ch, hikari.GuildVoiceChannel):
continue
if lightbulb_utils.permissions_in(ch, interaction.member) & hikari.Permissions.CONNECT:
vcs.append(f"{ch.id} | {ch.name}")
return vcs[:24]
def load(bot: Inu):
bot.add_plugin(plugin)
|
zp33dy/inu
|
inu/ext/commands/voice.py
|
voice.py
|
py
| 3,792 |
python
|
en
|
code
| 1 |
github-code
|
6
|
33245759934
|
import time
from collections import OrderedDict
from collections.abc import Callable, Sequence
from typing import Any, NamedTuple
DB = "timeseries"
TABLE_RAW = "paii_raw"
TIME_FIELD = "paii_time"
class Field(NamedTuple):
json_key: str
store_flag: bool
db_name: str
data_type: str
convert: Callable[[dict[str, Any], str], Any] | None = None
# conversions
def fahrenheit2celsius(data: dict[str, Any], key: str) -> float:
f = data[key]
return round((f - 32) * 5 / 9, 2)
def get_response_date(data: dict[str, Any], key: str) -> int:
return data.get(key, time.time())
def missing_int(data: dict[str, Any], key: str) -> int:
return data.get(key, -1)
fields = [
# inserted by the sceduler
Field(TIME_FIELD, True, TIME_FIELD, "TIMESTAMPTZ NOT NULL"),
# from the device
Field("SensorId", False, "SensorId", "VARCHAR"), # eg "84:f3:eb:7b:c8:ee"
Field("DateTime", False, "DateTime", "VARCHAR"), # eg "2020/08/23T10:44:39z"
Field("Geo", True, "geo", "VARCHAR"), # eg "PurpleAir-c8ee"
Field("Mem", False, "Mem", "VARCHAR"), # eg 18936
Field("memfrag", False, "memfrag", "VARCHAR"), # eg 8
Field("memfb", False, "memfb", "VARCHAR"), # eg 17568
Field("memcs", False, "memcs", "VARCHAR"), # eg 896
Field("Id", False, "Id", "VARCHAR"), # eg 4177
Field("lat", True, "lat", "DOUBLE PRECISION"), # eg -37.8484
Field("lon", True, "lon", "DOUBLE PRECISION"), # eg 145.177399
Field("Adc", True, "adc", "DOUBLE PRECISION"), # eg 0.05
Field("loggingrate", False, "loggingrate", "VARCHAR"), # eg 15
Field("place", True, "place", "VARCHAR"), # eg "outside"
Field("version", False, "version", "VARCHAR"), # eg "6.01"
Field("uptime", False, "uptime", "VARCHAR"), # eg 242296
Field("rssi", False, "rssi", "VARCHAR"), # eg -59
Field("period", True, "period", "INTEGER"), # eg 120
Field("httpsuccess", False, "httpsuccess", "VARCHAR"), # eg 12169
Field("httpsends", False, "httpsends", "VARCHAR"), # eg 12182
Field("hardwareversion", True, "hardwareversion", "VARCHAR"), # eg "2.0"
Field(
"hardwarediscovered",
False,
"hardwarediscovered",
"VARCHAR",
), # eg "2.0+BME280+PMSX003-B+PMSX003-A"
Field(
"current_temp_f",
True,
"current_temp_c",
"DOUBLE PRECISION",
fahrenheit2celsius,
), # eg 52
Field("current_humidity", True, "current_humidity", "DOUBLE PRECISION"), # eg 55
Field(
"current_dewpoint_f",
True,
"current_dewpoint_c",
"DOUBLE PRECISION",
fahrenheit2celsius,
), # eg 36
Field("pressure", True, "pressure", "DOUBLE PRECISION"), # eg 1005.28
Field("p25aqic_b", False, "p25aqic_b", "VARCHAR"), # eg "rgb(0,228,0)"
Field("pm2.5_aqi_b", False, "pm2.5_aqi_b", "VARCHAR"), # eg_aqi_b": 5
Field("pm1_0_cf_1_b", True, "pm1_0_cf_1_b", "DOUBLE PRECISION"), # eg 0.39
Field("p_0_3_um_b", False, "p_0_3_um_b", "VARCHAR"), # eg 261.79
Field("pm2_5_cf_1_b", True, "pm2_5_cf_1_b", "DOUBLE PRECISION"), # eg 1.3 **** µg/m3
Field("p_0_5_um_b", False, "p_0_5_um_b", "VARCHAR"), # eg 72.35
Field("pm10_0_cf_1_b", True, "pm10_0_cf_1_b", "DOUBLE PRECISION"), # eg 1.72
Field("p_1_0_um_b", False, "p_1_0_um_b", "VARCHAR"), # eg 13.05
Field("pm1_0_atm_b", False, "pm1_0_atm_b", "VARCHAR"), # eg 0.39
Field("p_2_5_um_b", False, "p_2_5_um_b", "VARCHAR"), # eg 2.42
Field("pm2_5_atm_b", False, "pm2_5_atm_b", "VARCHAR"), # eg 1.3
Field("p_5_0_um_b", False, "p_5_0_um_b", "VARCHAR"), # eg 0.7
Field("pm10_0_atm_b", False, "pm10_0_atm_b", "VARCHAR"), # eg 1.72
Field("p_10_0_um_b", False, "p_10_0_um_b", "VARCHAR"), # eg 0.0
Field("p25aqic", False, "p25aqic", "VARCHAR"), # eg "rgb(0,228,0)"
Field("pm2.5_aqi", False, "pm2.5_aqi", "VARCHAR"), # eg_aqi": 1
Field("pm1_0_cf_1", True, "pm1_0_cf_1", "DOUBLE PRECISION"), # eg 0.14
Field("p_0_3_um", False, "p_0_3_um", "VARCHAR"), # eg 163.63
Field("pm2_5_cf_1", True, "pm2_5_cf_1", "DOUBLE PRECISION"), # eg 0.33 **** µg/m3
Field("p_0_5_um", False, "p_0_5_um", "VARCHAR"), # eg 45.77
Field("pm10_0_cf_1", True, "pm10_0_cf_1", "DOUBLE PRECISION"), # eg 0.42
Field("p_1_0_um", False, "p_1_0_um", "VARCHAR"), # eg 7.79
Field("pm1_0_atm", False, "pm1_0_atm", "VARCHAR"), # eg 0.14
Field("p_2_5_um", False, "p_2_5_um", "VARCHAR"), # eg 0.56
Field("pm2_5_atm", False, "pm2_5_atm", "VARCHAR"), # eg 0.33
Field("p_5_0_um", False, "p_5_0_um", "VARCHAR"), # eg 0.18
Field("pm10_0_atm", False, "pm10_0_atm", "VARCHAR"), # eg 0.42
Field("p_10_0_um", False, "p_10_0_um", "VARCHAR"), # eg 0.0
Field("pa_latency", False, "pa_latency", "VARCHAR"), # eg 631
Field("response", False, "response", "VARCHAR"), # eg 201
Field(
"response_date",
True,
"response_date",
"INTEGER",
get_response_date,
), # eg 1598179477
Field("latency", True, "latency", "INTEGER", missing_int), # eg 1459
Field("key1_response", False, "key1_response", "VARCHAR"), # eg 200
Field("key1_response_date", False, "key1_response_date", "VARCHAR"), # eg 1598179467
Field("key1_count", False, "key1_count", "VARCHAR"), # eg 79205
Field("ts_latency", False, "ts_latency", "VARCHAR"), # eg 1198
Field("key2_response", False, "key2_response", "VARCHAR"), # eg 200
Field("key2_response_date", False, "key2_response_date", "VARCHAR"), # eg 1598179470
Field("key2_count", False, "key2_count", "VARCHAR"), # eg 79212
Field("ts_s_latency", False, "ts_s_latency", "VARCHAR"), # eg 1141
Field("key1_response_b", False, "key1_response_b", "VARCHAR"), # eg 200
Field(
"key1_response_date_b",
False,
"key1_response_date_b",
"VARCHAR",
), # eg 1598179472
Field("key1_count_b", False, "key1_count_b", "VARCHAR"), # eg 79213
Field("ts_latency_b", False, "ts_latency_b", "VARCHAR"), # eg 1133
Field("key2_response_b", False, "key2_response_b", "VARCHAR"), # eg 200
Field(
"key2_response_date_b",
False,
"key2_response_date_b",
"VARCHAR",
), # eg 1598179474
Field("key2_count_b", False, "key2_count_b", "VARCHAR"), # eg 79217
Field("ts_s_latency_b", False, "ts_s_latency_b", "VARCHAR"), # eg 1136
Field("wlstate", False, "wlstate", "VARCHAR"), # eg "Connected"
Field("status_0", True, "status_0", "INTEGER"), # eg 2
Field("status_1", True, "status_1", "INTEGER"), # eg 2
Field("status_2", True, "status_2", "INTEGER"), # eg 2
Field("status_3", True, "status_3", "INTEGER"), # eg 2
Field("status_4", True, "status_4", "INTEGER"), # eg 2
Field("status_5", True, "status_5", "INTEGER"), # eg 2
Field("status_6", True, "status_6", "INTEGER", missing_int), # eg 2
Field("status_7", True, "status_7", "INTEGER"), # eg 0
Field("status_8", True, "status_8", "INTEGER"), # eg 2
Field("status_9", True, "status_9", "INTEGER"), # eg 2
Field("ssid", False, "ssid", "VARCHAR"), # eg "apocalypse
]
def gen_stored(fields: list[Field] = fields):
yield from (f for f in fields if f.store_flag)
def compose_create(
table_name: str,
time_field: str,
fields: list[Field] = fields,
) -> str:
fdesc = ",\n".join([f"{f.db_name} {f.data_type}" for f in gen_stored()])
sql = f"""CREATE TABLE IF NOT EXISTS {table_name} (
{fdesc},
PRIMARY KEY({time_field})
);
"""
return sql
def compose_insert(field_names: Sequence, table_name: str) -> str:
"""compose parameterized insert SQL
Args:
field_names (Sequence): database table field names
table_name (str): database table name.
Returns:
str: insert SQL.
"""
fields = ", ".join(field_names)
placeholders = ", ".join([f"${i+1}" for i in range(len(field_names))])
sql = f"INSERT INTO {table_name} ({fields}) values ({placeholders})"
return sql
def convert_data(data: dict[str, Any], fields: list[Field] = fields) -> OrderedDict:
"""return filtered and ordered device data
Args:
data (Dict[str, Any]): raw dictionary directly from device.
fields (List[Field], optional): fields specification
Returns:
OrderedDict[str, Any]: {db_key: converted_value, ...} items() will return
in the same order as SQL commands assuming they are all based on the same
field list.
"""
missing = []
def convert(data: dict[str, Any], field: Field) -> Any:
if field.convert:
# custom converstion function, takes care of missing data
return field.convert(data, field.json_key)
if field.json_key in data:
return data[field.json_key]
else:
missing.append(field.json_key)
return None
missing.clear()
result = OrderedDict({f.db_name: convert(data, f) for f in gen_stored()})
# if missing:
# print(f"fields were missing: {missing}")
return result
|
PaulSorenson/purpleair_sensor
|
paii/purple_data.py
|
purple_data.py
|
py
| 9,065 |
python
|
en
|
code
| 1 |
github-code
|
6
|
28176871934
|
import os
import time
import subprocess
import shlex
import os
from delete_big_files import deleteBigFilesFor1000experiment
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
train_dir = '/home/ubuntu/robust_transfer_learning/'
sample_size_to_number_of_seeds_epochs_and_log_freq = {
400 : (3, 150, 5),
1600 : (2, 150, 5),
6400 : (1, 150, 5),
25600: (1, 150, 5),
-1 : (1, 150, 5),
}
target_ds_list = ['food101']
eps_levels = [0, 0.05, 0.25, 1]
num_unfrozen_blocks_list = [3]
polling_delay_seconds = 1
concurrent_commands = 4
commands_to_run = []
def poll_process(process):
time.sleep(polling_delay_seconds)
return process.poll()
for t in target_ds_list:
for ub in num_unfrozen_blocks_list:
for n, tup in sample_size_to_number_of_seeds_epochs_and_log_freq.items():
num_seeds, ne, li = tup
seed_list = [20000000 + 100000*(i) for i in range(num_seeds)]
for s in seed_list:
for e in eps_levels:
command = f'python train.py -e {e} -t {t} -ub {ub} -n {n} -s {s} -ne {ne} -li {li} -d True'
commands_to_run.append(command)
for start_idx in range(0, len(commands_to_run), concurrent_commands):
os.chdir(train_dir)
processes = []
rng = range(start_idx, min(len(commands_to_run), start_idx + concurrent_commands))
print(rng)
for i in rng:
os.environ["CUDA_VISIBLE_DEVICES"] = str(i%4)
processes.append(subprocess.Popen(shlex.split(commands_to_run[i])))
print(f'Starting command: {commands_to_run[i]}')
for process in processes:
while poll_process(process) is None:
pass
deleteBigFilesFor1000experiment()
|
utrerf/robust_transfer_learning
|
tools/batch.py
|
batch.py
|
py
| 1,717 |
python
|
en
|
code
| 11 |
github-code
|
6
|
2559703297
|
import os
from flask import Flask
from flask_jwt_extended import JWTManager
from flask_login import LoginManager
from .auth import ldap_handler
from .db import database
from .db.models import *
from .messages import messages
from .mocks import fake_ldap_handler
configuration_switch = {
"default": "backend.config.DevConfig", # Development configuration (fake LDAP)
"staging": "backend.config.StagingConfig", # Staging configuration (should be as close as possible to prod)
"production": "backend.config.ProductionConfig", # Production configuration
}
ENV = os.environ.get("ENV", "default")
# SET UP =====================================
LOGIN_MANAGER = LoginManager()
LOGIN_MANAGER.login_view = "auth.login"
LOGIN_MANAGER.login_message = messages.LOGIN_MANAGER_MESSAGE
LDAP = fake_ldap_handler.FAKE_LDAP if ENV == "default" else ldap_handler.LDAP
DB = database.DB
JWT_MANAGER = JWTManager()
# ===================================================
def create_app(test_configuration=None, test_db=None):
"""Application factory method"""
app = Flask(__name__, static_folder="../build/static", template_folder="../build")
# Configure the application
if test_configuration:
app.config.update(test_configuration)
else:
app.config.from_object(configuration_switch[ENV])
# Register extensions ################################
# | | | | | | | | | | | | | | | | | #
######################################################
LDAP.init_app(app)
LOGIN_MANAGER.init_app(app)
JWT_MANAGER.init_app(app)
if test_configuration:
test_db.init_app(app)
else:
DB.init_app(app)
############################################################
from .views import auth, index, home, configuration, plugin
app.register_blueprint(auth.bp)
app.register_blueprint(index.bp)
app.register_blueprint(home.bp)
app.register_blueprint(configuration.bp)
app.register_blueprint(plugin.bp)
return app
|
elliecherrill/diligent
|
backend/__init__.py
|
__init__.py
|
py
| 2,021 |
python
|
en
|
code
| 1 |
github-code
|
6
|
9975379557
|
from django.conf.urls import url
from .views import *
urlpatterns = [
# 课程列表
url(r'^list/$', CourseListView.as_view(), name='list'),
# 课程详情
url(r'^detail/(?P<course_id>\d+)/$', DetailView.as_view(), name='detail'),
# 视频信息
url(r'^info/(?P<course_id>\d+)/$', InfoView.as_view(), name='info'),
# 课程评论
url(r'^comment/(?P<course_id>\d+)/$', CommentView.as_view(), name='comment'),
# 添加评论
url(r'^addcomment/$', AddComment.as_view(), name='addcomment'),
#video
url(r'^video/(?P<video_id>\d+)/$', VideoView.as_view(), name='video'),
]
|
Liyb5/web
|
EduOnline/BlueSky/apps/courses/urls.py
|
urls.py
|
py
| 635 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5549915372
|
from django.shortcuts import render, redirect
from django.utils import timezone
from .forms import ActivateSertificateForm
from programs.models import Category
from .models import Sertificate
# Create your views here.
def activate_sertificate(request):
if request.method == "POST":
form = ActivateSertificateForm(request.POST)
if form.is_valid():
sert = form.save(commit=False)
try:
sertificate = Sertificate.objects.get(number = sert.number)
if not sertificate.is_active:
sertificate.user = request.user
sertificate.activation_date = timezone.now()
sertificate.is_active = True
sertificate.save()
return redirect('success/')
else:
return redirect('outdated/')
except Exception as e:
return redirect('unsuccess/')
else:
form = ActivateSertificateForm()
categories = Category.objects.all();
return render(request, 'activation/check_sertificate.html', {'form': form, 'categories': categories })
def return_success_message(request):
categories = Category.objects.all();
return render(request, 'activation/success.html', { 'categories': categories })
def return_unsuccess_message(request):
categories = Category.objects.all();
return render(request, 'activation/unsuccess.html', { 'categories': categories })
def return_outdated_message(request):
categories = Category.objects.all();
return render(request, 'activation/outdated.html', { 'categories': categories })
|
vladisgrig/babeo
|
activation/views.py
|
views.py
|
py
| 1,655 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14254128596
|
from __future__ import absolute_import, division, print_function, unicode_literals
from _GTW import GTW
from _MOM import MOM
from _TFL import TFL
import _GTW._OMP._PAP
from _MOM._Graph.Spec import Attr, Child, ET, IS_A, Role, Skip
import _MOM._Graph.Command
import _MOM._Graph.Entity
from _TFL._D2 import Cardinal_Direction as CD
from _TFL.I18N import _, _T
import _TFL._Meta.Once_Property
def graph (app_type) :
"""Class/association graph displaying PAP partial object model"""
ag_p = hasattr (GTW.OMP.PAP, "Adhoc_Group")
ass_p = hasattr (GTW.OMP.PAP, "Association")
le_p = hasattr (GTW.OMP.PAP, "Legal_Entity")
shp_off = CD.S
result = MOM.Graph.Spec.Graph \
( app_type
, ET.PAP.Subject_has_Property
( Role.left
( Child.PAP.Person
( offset = CD.N
)
, offset = CD.W
)
, Role.right
( Child.PAP.Address
( ET.PAP.Address_Position
( offset = CD.S if (ag_p or le_p) else CD.W
)
, offset = shp_off + 3 * CD.W
)
, Child.PAP.Email
( offset = shp_off + 2 * CD.W
)
, Child.PAP.Phone
( offset = shp_off + 1 * CD.W
)
, Child.PAP.Url
( offset = shp_off
)
, offset = CD.E
)
)
, desc = _T
("Class/association graph displaying PAP partial object model")
, title = _T ("PAP graph")
)
if hasattr (GTW.OMP.PAP, "Group") :
g_args = ()
if hasattr (GTW.OMP.PAP, "Person_in_Group") :
g_args = \
( ET.PAP.Person_in_Group
( Role.left ()
, Role.right ()
, offset = CD.N
)
,
)
result ["PAP.Subject"]._add \
( Child.PAP.Group
( * g_args
, offset = CD.W
)
)
if le_p :
result ["PAP.Group"]._add \
( Child.PAP.Legal_Entity
( offset = CD.W
, source_side = "E"
, target_side = "W"
)
)
if ass_p :
result ["PAP.Legal_Entity"]._add \
( Child.PAP.Association
( offset = CD.N
)
)
if hasattr (GTW.OMP.PAP, "Company") :
result ["PAP.Legal_Entity"]._add \
( Child.PAP.Company
( offset = CD.S
)
)
if ag_p :
result ["PAP.Group"]._add \
( Child.PAP.Adhoc_Group
( offset = CD.W + CD.S *
(-1 if not ass_p else (2 if le_p else 0))
, source_side = "E"
, target_side = "W"
)
)
if hasattr (GTW.OMP.PAP, "IM_Handle") :
result ["PAP.Property"]._add \
( Child.PAP.IM_Handle
( offset = shp_off + CD.E
)
)
if hasattr (GTW.OMP.PAP, "Nickname") :
result ["PAP.Property"]._add \
( Child.PAP.Nickname
( offset = CD.E
, source_side = "W"
, target_side = "E"
)
)
if hasattr (GTW.OMP.PAP, "Person_has_Account") :
result ["PAP.Person"]._add \
( ET.PAP.Person_has_Account
( Role.left (guide_offset = 1.0)
, Role.right
( offset = CD.E
)
, offset = CD.E
)
)
return result
# end def graph
class Command (MOM.Graph.Command) :
@TFL.Meta.Class_and_Instance_Once_Property
def PNS (self) :
return GTW.OMP.PAP
# end def PNS
def import_all (self) :
import _GTW._OMP._Auth
self.__super.import_all ()
GTW.OMP.Auth._Import_All ()
# end def import_all
# end class Command
if __name__ != "__main__" :
GTW.OMP.PAP._Export_Module ()
else :
Command () ()
### __END__ GTW.OMP.PAP.graph
|
xiaochang91/tapyr
|
_GTW/_OMP/_PAP/graph.py
|
graph.py
|
py
| 4,661 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31535390686
|
from functools import wraps
def vowel_filter(function):
vowels = ["a", "e", "i", "o", "u", "y"]
found_vowels = []
@wraps(function)
def wrapper():
for ch in function():
if ch.lower() in vowels:
found_vowels.append(ch)
return found_vowels
return wrapper
@vowel_filter
def get_letters():
return ["a", "b", "c", "d", "e"]
print(get_letters())
|
iliyan-pigeon/Soft-uni-Courses
|
pythonProjectOOP/decorators/vowels_filter.py
|
vowels_filter.py
|
py
| 416 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7438577752
|
from pathlib import Path
from vesper.tests.test_case import TestCase
from vesper.util.preference_manager import PreferenceManager
import vesper.tests.test_utils as test_utils
_DATA_DIR_PATH = Path(test_utils.get_test_data_dir_path(__file__))
_PREFERENCE_FILE_PATH = _DATA_DIR_PATH / 'Preferences.yaml'
_EMPTY_PREFERENCE_FILE_PATH = _DATA_DIR_PATH / 'Empty Preferences.yaml'
_NON_MAPPING_PREFERENCE_FILE_PATH = \
_DATA_DIR_PATH / 'Non Mapping Preferences.yaml'
_MALFORMED_PREFERENCE_FILE_PATH = _DATA_DIR_PATH / 'Malformed Preferences.yaml'
class PreferenceManagerTests(TestCase):
def test_get(self):
preferences = _get_preferences(_PREFERENCE_FILE_PATH)
cases = (
('one', 1),
('category_a.two', 2),
('category_a.three', 'three'),
('category_a.category_b.forty_five', 45),
('category_a.category_b.fifty six', 56),
('category_a.category_b', {'forty_five': 45, 'fifty six': 56})
)
for p in preferences:
for name, value in cases:
self.assertTrue(name in p)
self.assertEqual(p[name], value)
self.assertEqual(p.get(name), value)
def test_get_of_nonexistent_preferences(self):
preferences = _get_preferences(_PREFERENCE_FILE_PATH)
cases = (
'bobo',
'category_a.bobo'
)
for p in preferences:
for name in cases:
self.assertFalse(name in p)
self.assertRaises(KeyError, p.__getitem__, name)
self.assertIsNone(p.get(name))
self.assertEqual(p.get(name, 10), 10)
def test_empty_preference_file(self):
self._test_bad_preference_file(_EMPTY_PREFERENCE_FILE_PATH)
def _test_bad_preference_file(self, file_path):
preferences = _get_preferences(file_path)
for p in preferences:
self.assertEqual(len(p), 0)
def test_malformed_preference_file(self):
self._test_bad_preference_file(_MALFORMED_PREFERENCE_FILE_PATH)
def test_non_mapping_preference_file(self):
self._test_bad_preference_file(_NON_MAPPING_PREFERENCE_FILE_PATH)
def test_nonexistent_preference_file(self):
manager = PreferenceManager()
manager.load_preferences_from_file('nonexistent')
self.assertEqual(len(manager.preferences), 0)
def _get_preferences(file_path):
# Create preferences from file.
manager = PreferenceManager.create_for_file(file_path)
preferences_a = manager.preferences
# Create preferences from YAML.
with open(file_path) as file_:
yaml = file_.read()
manager = PreferenceManager.create_for_yaml(yaml)
preferences_b = manager.preferences
return (preferences_a, preferences_b)
|
HaroldMills/Vesper
|
vesper/util/tests/test_preference_manager.py
|
test_preference_manager.py
|
py
| 2,906 |
python
|
en
|
code
| 47 |
github-code
|
6
|
72935917627
|
import os
from celery import Celery
from celery.schedules import crontab
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend.settings')
app = Celery('backend')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.conf.beat_schedule = {
'check_mail_everyday': {
'task': 'emailService.task.check_dates_task',
'schedule': crontab(hour=1, minute=00),
}
}
app.autodiscover_tasks()
|
anunayajoshi/futureme
|
backend/backend/celery.py
|
celery.py
|
py
| 426 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33702704860
|
import openpyxl as xl
import xlwings as xw
from Worksheet import Worksheet,QPreviewItem
from Workcell import *
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from copy import copy
#import time
import datetime
##################################################
# class for PS sheet handling
##################################################
UP = 0
DOWN = 1
nn = 0
class PSsheet(Worksheet):
def __init__(self,sheet = None,sheet_wr = None):
super(PSsheet,self).__init__(sheet,sheet_wr)
self._preview_model_list = []
self._extended_preview_model_list = None
self.init_ps_sheet()
self.init_ps_model()
def __del__(self):
super(PSsheet,self).__del__()
del self._preview_model
del self._extended_preview_model_list
def init_ps_sheet(self):
if self._xmlname != None:
self._status = self.search_header_by_value(u'Status(POR,INIT,PREV)')
self._subject_matter = self.search_header_by_value(u'Subject Matter/\nFunctional Area')
self._container_name = self.search_header_by_value(u'Container Name\nTechnical Specification')
def init_ps_model(self):
self._preview_model = QStandardItemModel()
self._preview_model.setColumnCount(4)
self._preview_model.setHeaderData(0,Qt.Horizontal,'status')
self._preview_model.setHeaderData(1,Qt.Horizontal,'subject matter')
self._preview_model.setHeaderData(2,Qt.Horizontal,'container name')
self._preview_model.setHeaderData(3,Qt.Horizontal,'xmlname')
self._extended_preview_model = None
def update_model(self):
super(PSsheet,self).update_model()
self._preview_model_list = []
self.init_ps_model()
self.init_ps_sheet()
if self._xmlname != None:
try:
for xml_name in self.xml_names():
item_status = QPreviewItem(self._status.get_item_by_xmlname(xml_name))
item_subject_matter = QPreviewItem(self._subject_matter.get_item_by_xmlname(xml_name))
item_container_name = QPreviewItem(self._container_name.get_item_by_xmlname(xml_name))
item_xml_name = QPreviewItem(xml_name)
self._preview_model.appendRow((item_status,item_subject_matter,item_container_name,item_xml_name))
self._preview_model_list.append((item_status.value,item_subject_matter.value,item_container_name.value,item_xml_name.value))
except:
return 'error'
def status(self):
cells = list(self._worksheet.iter_cols(min_col=self._status.col,min_row=self._status.row+1,max_col=self._status.col,max_row=self.max_row).next())
while cells[-1].value == None:
cells.pop()
return map(lambda x:Status(x,self._worksheet_wr),cells)
def cell(self,row,col):
#return self._worksheet_wr.range(row,col)
return self._worksheet.cell(row=row,column=col)
def auto_fit(self,cols):
for col in cols:
for i in range(1):
self._worksheet_wr.api.Columns(col).AutoFit()
def add_row(self,start_pos,offset,orientation):
loop = offset
while loop > 0:
self._worksheet_wr.api.Rows[start_pos].Insert(-4121)
loop -= 1
def delete_row(self,start_pos,offset):
self._worksheet_wr.range('%d:%d'%(start_pos,start_pos+offset-1)).api.Delete()
def lock_row(self,row,status):
self._worksheet_wr.api.Rows[row-1].Locked = True
def lock_sheet(self):
self._worksheet_wr.api.Protect()
def lock_sheet_status(self):
return self._worksheet_wr.api.ProtectContents
def unlock_sheet(self):
self._worksheet_wr.api.Unprotect()
def unlock_all_cells(self):
self._worksheet_wr.api.Cells.Locked = False
def extended_preview_model(self):
if self._extended_preview_model == None:
self._extended_preview_model = QStandardItemModel()
self._extended_preview_model.setColumnCount(self._worksheet.max_column)
for row in self._worksheet.rows:
item_row = []
for cell in row:
try:
if cell.value == None:
item = QStandardItem('')
else:
item = QStandardItem(cell.value)
except:
item = QStandardItem('')
item_row.append(item)
self._extended_preview_model.appendRow(item_row)
return self._extended_preview_model
@property
def extended_preview_model_list(self):
if self._extended_preview_model_list == None:
self._extended_preview_model_list = []
for row in self._worksheet.iter_rows(min_row=self.min_row,max_row=self.max_row,min_col=self.min_col,max_col=self.max_col):
item_row = []
for cell in row:
#item_row.append(cell.value if cell.value is not None else '')
try:
#item_row.append('' if cell.value is None else str(cell.value) if type(cell.value) == long else '%s-%s-%s'%((cell+datetime.delta(days=1)).timetuple().tm_year,(cell+datetime.delta(days=1)).timetuple().tm_mon,(cell+datetime.delta(days=1)).timetuple().tm_mday) if type(cell) is datetime.datetime else (cell.value))
item_row.append('' if cell.value is None else str('%s-%s-%s'%((cell.value+datetime.timedelta(days=1)).timetuple().tm_year,(cell.value+datetime.timedelta(days=1)).timetuple().tm_mon,(cell.value+datetime.timedelta(days=1)).timetuple().tm_mday)) if type(cell.value) is datetime.datetime else str(cell.value))
except:
item_row.append('')
self._extended_preview_model_list.append(item_row)
return self._extended_preview_model_list
@property
def preview_model(self):
return self._preview_model_list
|
DericGitHub/excel-operator
|
model/PSsheet.py
|
PSsheet.py
|
py
| 6,063 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11004554798
|
class Solution:
def splitListToParts(self, root: ListNode, k: int) -> List[ListNode]:
totalLen = 0
cur = root
while cur:
totalLen += 1
cur = cur.next
length = totalLen // k
mod = totalLen % 3
res = []
cur = root
for i in range(k):
res.append(cur)
size = length + (1 if m > 0 else 0)
if cur:
for j in range(size):
cur = cur.next
tmp = cur.next
cur.next = None
cur = tmp
return res
|
xixihaha1995/CS61B_SP19_SP20
|
temp/toy/python/725. Split Linked List in Parts.py
|
725. Split Linked List in Parts.py
|
py
| 601 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27831010517
|
import numpy as np
import netCDF4 as nc4
import net_radiation
import atmospheric_parameters
import wind_shear_velocity
import datetime
# Using TerraClimate
# 2.5 arcminute (1/24 degree) resolution: ~5 km N-S
# Import step
# ... load files here or with a CLI
years = range(1958, 2019)
months_zero_indexed = range(12)
TerraClimateDir = '/media/andy/data1/TerraClimate/'
def extract_data(varnc, varname, varmonth_zero_indexed=None):
if varmonth_zero_indexed is None:
var = varnc.variables[varname][:]
else:
var = varnc.variables[varname][varmonth_zero_indexed]
fv = var.fill_value
var = var.data
var[var == fv] = np.nan
return var
# Get lats and lons from one file
srad_nc = nc4.Dataset(TerraClimateDir+'TerraClimate_srad_1958.nc')
lats = extract_data(srad_nc, 'lat')
lons = extract_data(srad_nc, 'lon')
#LONS, LATS = np.meshgrid (lons, lats)
# Shear velocity of winds: tool to compute from velocity
ustar_interp = wind_shear_velocity.create_lookup_table_one_step()
# Elevation
elevation_nc = nc4.Dataset(TerraClimateDir+'Gebco_2020_2_point_5_arcminute.nc')
elevation = extract_data(elevation_nc, 'value')
elevation = elevation[::-1]
# Heat capacity of air
specific_heat_capacity_of_air = 1.005 # approx. constant at 1 atm
# Humidity minor impact below 40C or so
# But this is an approximation!
cp = specific_heat_capacity_of_air # Easier
# Water density
rho_w = 1000.
# Latent heat of vaporization for water
Lv = 2.5E6
DeltaH_vap = Lv # to make me happier
# Ratio of molecular weight of water vapor to dry air
epsilon = 0.622
# Days in month, for weighting
days_in_month = [31, 28.25, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
# Evaporation array
E = np.zeros(elevation.shape)
years = years[:13] # 1970 and before: before so much GW
for year in years:
print(year)
# Incoming solar radiation (monthly average)
srad_nc = nc4.Dataset(TerraClimateDir+'TerraClimate_srad_'+str(year)+'.nc')
# Maximum daily temperature (monthly average)
tmax_nc = nc4.Dataset(TerraClimateDir+'TerraClimate_tmax_'+str(year)+'.nc')
# Minimum daily temperature (monthly average)
tmin_nc = nc4.Dataset(TerraClimateDir+'TerraClimate_tmin_'+str(year)+'.nc')
# Wind speed (monthly average)
ws_nc = nc4.Dataset(TerraClimateDir+'TerraClimate_ws_'+str(year)+'.nc')
# Vapor pressure (monthly average)
vap_nc = nc4.Dataset(TerraClimateDir+'TerraClimate_vap_'+str(year)+'.nc')
# Now compute for each month
for month_zi in months_zero_indexed:
print(month_zi+1)
# Data
srad = extract_data(srad_nc, 'srad', month_zi)
tmax = extract_data(tmax_nc, 'tmax', month_zi)
tmin = extract_data(tmin_nc, 'tmin', month_zi)
ws = extract_data(ws_nc, 'ws', month_zi)
vap = extract_data(vap_nc, 'vap', month_zi) * 1000. # kPa to Pa
# Average radiation on the midday of the month; could be more precise
date = datetime.date(year, month_zi+1, int(np.round(days_in_month[month_zi]/2.)))
#elevation = 2000. # placeholder
#julian_day = 205 # placeholder
#vap = .03*101325 # placeholder
albedo = 0.06
# Calculations:
# Net Radiation
Rn = net_radiation.computeNetRadiation(elevation, date, lats, len(lons),
tmax, tmin, vap, srad, albedo)
# Shear velocity of winds
ustar = ustar_interp(ws)
# Vapor-pressure deficit
# We don't have max and min humidity
VPD = atmospheric_parameters.compute_vpd( (tmax+tmin)/2., vap )
# Atmospheric pressure
P = atmospheric_parameters.compute_atmospheric_pressure(elevation)
# Atmospheric density (ignoring temperature + humidity effects)
rho_a = atmospheric_parameters.compute_atmospheric_density(elevation,
(tmax + tmin)/2.)
# Clausius-Clayperon phase-change slope
Delta = ( atmospheric_parameters.compute_Delta_e_sat( tmax )
+ atmospheric_parameters.compute_Delta_e_sat( tmin ) ) / 2.
_E = (Rn + cp*rho_a*ustar**2/(Delta*ws) * VPD) \
/ ( rho_w*Lv + P*cp*rho_w/epsilon )
_E[_E<0] = 0 # ignore condensation; I think it's spurious (Antarctica?)
E += _E*days_in_month[month_zi]
E /= (365.25*len(years))
# Export
from netCDF4 import Dataset
import numpy
import time
#path to the file you want to open or create
location_string="evaporation_002p5_arcmin.nc"
# Create nc file
rootgrp = Dataset(location_string,"w",format="NETCDF4")
# Dimensions
lat=rootgrp.createDimension("lat", len(lats))
lon=rootgrp.createDimension("lon", len(lons))
value = rootgrp.createDimension("evaporation", None)
# Values
latitudes = rootgrp.createVariable("lat", "f4", ("lat",))
longitudes = rootgrp.createVariable("lon", "f4", ("lon",))
values = rootgrp.createVariable("value", "f4" , ("lat", "lon",))
latitudes[:] = lats
longitudes[:] = lons
values[:] = E
# Units
latitudes.units = "degrees north"
longitudes.units = "degrees east"
values.units = "metres per second"
# Metadata
rootgrp.description = "Evaporation derived from TerraClimate data products (see https://github.com/umn-earth-surface/TerraClimate-potential-open-water-evaporation)."
rootgrp.history = "created" + time.ctime(time.time())
values.Long_Name = "Open-water evaporation"
# Save
rootgrp.close()
|
MNiMORPH/TerraClimate-potential-open-water-evaporation
|
penman.py
|
penman.py
|
py
| 5,465 |
python
|
en
|
code
| 2 |
github-code
|
6
|
23380015513
|
from gensim.models.doc2vec import Doc2Vec
import pickle
def get_most_similar_docs(test_data, model_path):
# Load the Doc2Vec model
model = Doc2Vec.load(model_path)
# Split the test_data string into a list of words
test_data_words = test_data.split()
# Infer the vector for the test document
inferred_vector = model.infer_vector(test_data_words)
with open('./homework_6/data/tagged_data.pkl', 'rb') as file:
loaded_tagged_data = pickle.load(file)
# Get the 5 most similar documents based on the inferred vector
sims = model.dv.most_similar([inferred_vector], topn=5)
idx = [sims[i][0] for i in range(5)]
# Print the most similar documents
print('Test Document: «{}»\n'.format(' '.join(test_data_words)))
print(u'SIMILAR DOCS PER MODEL %s:\n' % model)
for label, index in [('1', 0), ('2', 1), ('3', 2), ('4', 3), ('5', 4)]:
print(u'%s %s: «%s»\n' % (label, sims[index], ' '.join(loaded_tagged_data[int(sims[index][0])].words)))
return idx
test_data = 'exotic yellow spice note meet lean lime pith light crisp nose old vine expression historic winery . meyer lemon rind juice show brightly palate grippy chalkiness complement rich lemon curd flavor'
get_most_similar_docs(test_data, './homework_6/models/doc2vec.model')
|
Tokarevmm/homework5
|
homework_6/recommend.py
|
recommend.py
|
py
| 1,287 |
python
|
en
|
code
| 0 |
github-code
|
6
|
44502183300
|
from flask_wtf import FlaskForm
from wtforms import StringField, IntegerField
from wtforms.validators import DataRequired, Length, ValidationError
from urllib.parse import urlparse
from app.models import Business
def business_name_exists(form, field):
business_name = field.data
business = Business.query.filter(Business.name == business_name).first()
if business:
raise ValidationError("Business with this name already exists.")
def valid_zip_code(form, field):
if not field.data.isdigit() or len(field.data) != 5:
raise ValidationError("Invalid ZIP Code.")
def valid_phone_number(form, field):
if not field.data.isdigit() or len(field.data) != 10:
raise ValidationError("Invalid phone number.")
# def valid_url(form, field):
# try:
# result = urlparse(field.data)
# if not all([result.scheme, result.netloc]):
# raise ValueError()
# except ValueError:
# raise ValidationError("Invalid URL.")
class BusinessForm(FlaskForm):
# class Meta:
# csrf = False
name = StringField('Business Name', validators=[
Length(min=1, max=50),
DataRequired(),
# business_name_exists
])
address = StringField('Address', validators=[
Length(min=1, max=255),
DataRequired()
])
city = StringField('City', validators=[
Length(min=1, max=50),
DataRequired()
])
state = StringField('State', validators=[
Length(min=1, max=25),
DataRequired()
])
zip_code = StringField('Zip Code', validators=[
Length(min=1, max=10),
DataRequired(),
# valid_zip_code
])
phone_number = StringField('Phone Number', validators=[
Length(min=1, max=30),
DataRequired(),
# valid_phone_number
])
category_id = IntegerField('Category ID', validators=[DataRequired()])
owner_id = IntegerField('Owner ID', validators=[DataRequired()])
website = StringField('Website', validators=[
Length(min=1, max=255),
DataRequired(),
# valid_url
])
about = StringField('About', validators=[
Length(min=1, max=500),
DataRequired()
])
type = StringField('Type', validators=[
Length(min=1, max=255),
DataRequired()
])
|
stroud91/ReactFlaskProject
|
app/forms/bussiness_form.py
|
bussiness_form.py
|
py
| 2,324 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73535922429
|
import numpy as np
import matplotlib.pyplot as plt
from scipy import sparse
def output_result(path, matrix):
f = open(path, "w+")
f.write(str(len(matrix)) + '\n')
for row in matrix:
f.write(str(row)[1:-1])
f.write('\n')
f.close()
def read_buildings(path_to_buildings):
'''
:param path_to_buildings: a path to text file that consists coordinates of the buildings
:return: list of (x1,y1,x2,y2)
'''
file = open(path_to_buildings)
buildings = []
for line in file:
sp = line.split(" ")
x1, y2, x2, y1 = float(sp[0]), 1 - float(sp[1]), float(sp[2]), 1 - float(sp[3])
buildings.append((x1, y1, x2, y2))
return buildings
def isInsideBuildings(xx, yy, buildings):
'''
:param coord: (x,y) - coords to check
:param building: (x1,y1,x2,y2) - building
:return: Boolean
'''
answer = False
for building in buildings:
(x1, y1, x2, y2) = building
if x1 <= xx <= x2 and y1 <= yy <= y2:
answer = True
return not answer
def get_cond_check_func(buildings):
'''
Given building construct function that verify whether point (x,y) is outside these buildings
:param buildings:
:return: lambda (x,y) -> ...
'''
return lambda x, y: isInsideBuildings(x, y, buildings)
class ConvectionDiffusion:
def __init__(self, max_t, l1, l2, k, N, cond_func, eps):
self.max_t = max_t
self.l1 = l1
self.l2 = l2
self.k = k
self.N = N
self.h = 1.0 / N
self.eps = eps
self.cond_func = cond_func
self.tau = 1 / (4 * k * N * N)
self.U = np.zeros((N + 1, N + 1))
self.coeffs = [1 - 4 * self.tau * k / (self.h * self.h),
self.tau * (k / (self.h * self.h) - l1 / (2 * self.h)),
self.tau * (k / (self.h * self.h) + l1 / (2 * self.h)),
self.tau * (k / (self.h * self.h) - l2 / (2 * self.h)),
self.tau * (k / (self.h * self.h) + l2 / (2 * self.h))]
def check_correctness(self, x, y):
return 0 <= x < self.N and 0 < y < self.N
def iteration(self):
'''
One iteration of the simple iteration methods
:return: error
'''
dx = [0, 1, -1, 0, 0]
dy = [0, 0, 0, 1, -1]
new_U = np.zeros((N + 1, N + 1))
for i in range(self.N + 1):
for j in range(self.N + 1):
new_U[i, j] = self.U[i, j]
if not self.cond_func(i / N, j / N):
continue
else:
new_U[i, j] *= self.coeffs[0]
for f in range(1, 5):
x = i + dx[f]
y = j + dy[f]
if self.cond_func(x / N, y / N) and self.check_correctness(x, y):
new_U[i, j] += self.U[x, y] * self.coeffs[f]
else:
new_U[i, j] += self.U[i, j] * self.coeffs[f]
old_U = self.U
self.U = new_U
return np.max(np.abs((old_U / 100 - new_U / 100)))
def init_matrix(self):
self.U[:, 1] = 100
def solve(self):
'''
:return: U and image
'''
self.init_matrix()
for f in range(0, self.max_t):
error = self.iteration()
print(error)
if error < self.eps:
break
fig = plt.imshow(self.U / 100)
plt.colorbar(fig)
plt.show()
return self.U / 100
def optimized_solve(self):
vars = (self.N + 1) * (self.N + 1)
dx = [0, 1, -1, 0, 0]
dy = [0, 0, 0, 1, -1]
A = sparse.lil_matrix((vars, vars))
b = np.zeros(vars)
frames = []
for i in range(vars):
y = i // (self.N + 1)
x = i % (self.N + 1)
A[i, i] = self.coeffs[0]
if not self.cond_func(x / (self.N + 1), y / (self.N + 1)) or not self.check_correctness(x, y):
continue
if x == 0:
A[i, i] = 1
continue
for j in range(1, 5):
xx = x + dx[j]
yy = y + dy[j]
if xx == 0:
b[i] += self.coeffs[j]
continue
if not self.cond_func(xx / (self.N + 1), yy / (self.N + 1)) or not self.check_correctness(xx, yy):
A[i, i] += self.coeffs[j]
else:
A[i, yy * (self.N + 1) + xx] = self.coeffs[j]
A = sparse.csc_matrix(A)
x = np.zeros(vars)
for i in range(self.N + 1):
x[i * (self.N + 1)] = 0
for f in range(self.max_t):
x_new = A @ x + b
error = np.max(np.abs((x_new - x) / np.maximum(1, x)))
if f % 100 == 0:
frames.append(x_new.reshape((self.N + 1, self.N + 1)))
print(error)
if error < self.eps:
x = x_new
break
x = x_new
answer = x.reshape((self.N + 1, self.N + 1))
fig = plt.imshow(answer)
plt.colorbar(fig)
plt.show()
return answer, frames
def animate(frames):
fig = plt.figure()
ims = []
from matplotlib import animation
i = 0
for frame in frames:
print(frame.shape)
im = plt.imshow(frame, animated=True)
ims.append([im])
i += 1
plt.colorbar(im)
Writer = animation.writers['ffmpeg']
writer = Writer(fps=60, metadata=dict(artist='Me'), bitrate=260)
ani = animation.ArtistAnimation(fig, ims, interval=17, blit=True,
repeat_delay=1000)
# ani.save('diffusion.html')
ani.save('diffusion.mp4', writer=writer)
plt.show()
if __name__ == "__main__":
max_t = 100000
l_1 = 1
l_2 = 0.0
k = 0.5
N = 300
eps = 1e-6
buildings = read_buildings("buildings.txt")
cond_func = get_cond_check_func(buildings)
solver = ConvectionDiffusion(max_t, l_1, l_2, k, N, cond_func, eps)
u, frames = solver.optimized_solve()
animate(frames)
output_result("output.txt", u)
|
arsee2/numerical_modelling_diffusion_convection_process
|
nm_project.py
|
nm_project.py
|
py
| 6,194 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23066216205
|
import wild_pokemon, wondertrade, pokemon
def index_species(symbols, rom, proj, ps):
""" Creates an index for all species
Parameters:
symbols : The symbol table
rom : The rom
proj : The pymap project
ps : The Pstring parser
Returns: A list of lists containing IndexEntry instances """
species_index = [[] for _ in proj.constants.values("species")]
wild_pokemon.index_wild_pokemon(species_index, symbols, rom, proj, ps, wild_pokemon_table_symbol="wild_pokemon")
wondertrade.index_wondertrade(species_index, symbols, rom)
for i, species in enumerate(proj.constants.values("species")):
for entry in species_index[i]:
print("Species", pokemon.species_to_name(i, rom, proj, ps, symbols), entry.__dict__)
|
Fredd40/Violet_Sources
|
tools/indexer/core.py
|
core.py
|
py
| 766 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18004211915
|
import os
import numpy as np
import torch
import transforms3d
def plane2pose(plane_parameters):
r3 = plane_parameters[:3]
r2 = np.zeros_like(r3)
r2[0], r2[1], r2[2] = (-r3[1], r3[0], 0) if r3[2] * r3[2] <= 0.5 else (-r3[2], 0, r3[0])
r1 = np.cross(r2, r3)
pose = np.zeros([4, 4], dtype=np.float32)
pose[0, :3] = r1
pose[1, :3] = r2
pose[2, :3] = r3
pose[2, 3] = plane_parameters[3]
pose[3, 3] = 1
return pose
def plane2euler(plane_parameters, axes='sxyz'):
pose = plane2pose(plane_parameters)
T, R, Z, S = transforms3d.affines.decompose(pose)
euler = transforms3d.euler.mat2euler(R, axes=axes)
return T, euler
|
PKU-EPIC/UniDexGrasp
|
dexgrasp_policy/dexgrasp/utils/data_info.py
|
data_info.py
|
py
| 669 |
python
|
en
|
code
| 63 |
github-code
|
6
|
12697223783
|
from telegram import Update
from telegram.ext import (
Updater,
CallbackContext,
run_async,
CommandHandler,
)
from utils import Config
from pkgutil import walk_packages
from types import ModuleType
from typing import Dict
from utils import get_filter
submodules: Dict[str, ModuleType] = {
module_name: loader.find_module(module_name).load_module(module_name)
for loader, module_name, is_package in walk_packages(__path__)
}
def describe():
return "列出所有的指令, 需注意列出的指令在当前的环境内不一定可用"
def run(update: Update, context: CallbackContext):
update.effective_message.reply_text(
text="所有指令如下:\n"
+ "\n".join(
[f"/{command}: {description}"
for command, description in commands_list]
),
quote=True
)
commands_list = tuple(
(name, module.describe())
for name, module in submodules.items()
) + (
(__name__, describe()),
)
def register(updater: Updater):
for module in submodules.values():
module.register(updater)
dp = updater.dispatcher
dp.add_handler(CommandHandler(
__name__, run, filters=get_filter(Config.watchers), run_async=True))
# dp.add_handler(CommandHandler(__name__, run, filters=Filters.all)) # DEBUG
# * Unavailable until all commands are implemented (or at least their describe methods return a string with len > 3)
updater.bot.set_my_commands(commands_list)
|
finall1008/telegram-pusher-bot
|
commands/__init__.py
|
__init__.py
|
py
| 1,483 |
python
|
en
|
code
| 5 |
github-code
|
6
|
30980411340
|
from matplotlib.pyplot import draw
import pygame
from pygame.locals import *
pygame.init()
pygame.mixer.init()
# set screen resolution
resolution = (725,725)
# open a screen of above resolution
screen = pygame.display.set_mode(resolution)
# defining palette colours (global variables) as dictionary
gameColours={
'green': (101, 155, 94),
'dG': (73, 113, 69),
'red': (200, 70,48),
'dR' : (135, 47, 31)
}
# storing screen variable values
width = screen.get_width()
height = screen.get_height()
# text_on_screen() not affected by mouse position variables
# Game main menu, with start and exit buttons
def main_menu():
# track position of mouse
mx, my = pygame.mouse.get_pos()
# Generate play/quit buttons
playButton = pygame.Rect(width/7, (height/2), 200, 100)
quitButton = pygame.Rect(((width/7)+200+width/7), (height/2), 200, 100)
# Hover on buttons
if playButton.collidepoint((mx, my)):
# Rendering button darker green
pygame.draw.rect(screen, gameColours['dG'], playButton)
else:
# Rendering button green
pygame.draw.rect(screen, gameColours['green'], playButton)
if quitButton.collidepoint((mx,my)):
# Rendering button darker red
pygame.draw.rect(screen, gameColours['dR'], quitButton)
else:
# Rendering button light red
pygame.draw.rect(screen, gameColours['red'], quitButton)
# tableGen() not affected by mouse position variables
# getCatPath() not affected by mouse position variables
# updateScore() not affected by mouse position variables
# displayScore() not affected by mouse position variables
# Begins the "lets roll" screen of game, with button to start
def gameTime(score):
# mouse position
mx, my = pygame.mouse.get_pos()
rollButton = pygame.Rect(width/3, (height-100), 225, 70)
# Hover on roll button
if rollButton.collidepoint((mx, my)):
pygame.draw.rect(screen, gameColours['dR'], rollButton)
else:
pygame.draw.rect(screen, gameColours['red'], rollButton)
# checkWinner() not affected by mouse position variables
# getDice() not affected by mouse position variables
# showDice() not affected by mouse position variables
# gameLogic() not affected by mouse position variables
# user won
def winScreen(die1, die2, num, score):
# mouse coordinates
mx, my = pygame.mouse.get_pos()
againButton = pygame.Rect(width/7, (height-70), 225, 50)
quitButton = pygame.Rect(width/7+300, (height-70), 225, 50)
# hover effects (collision)
if againButton.collidepoint((mx, my)):
pygame.draw.rect(screen, gameColours['dG'], againButton)
else:
pygame.draw.rect(screen, gameColours['green'], againButton)
if quitButton.collidepoint((mx, my)):
pygame.draw.rect(screen, gameColours['dR'], quitButton)
else:
pygame.draw.rect(screen, gameColours['red'], quitButton)
# screen for when user loses
def loseScreen(die1, die2, num, score):
againButton = pygame.Rect(width/7, (height-70), 225, 50)
quitButton = pygame.Rect(width/7+300, (height-70), 225, 50)
# mouse coordinates
mx, my = pygame.mouse.get_pos()
# hover collision
if againButton.collidepoint((mx, my)):
pygame.draw.rect(screen, gameColours['dG'], againButton)
else:
pygame.draw.rect(screen, gameColours['green'], againButton)
if quitButton.collidepoint((mx, my)):
pygame.draw.rect(screen, gameColours['dR'], quitButton)
else:
pygame.draw.rect(screen, gameColours['red'], quitButton)
# screen for when computer and user dice are equal
def drawScreen(die1, num, score):
againButton = pygame.Rect(width/7, (height-70), 225, 50)
quitButton = pygame.Rect(width/7+300, (height-70), 225, 50)
mx, my = pygame.mouse.get_pos()
if againButton.collidepoint((mx, my)):
pygame.draw.rect(screen, gameColours['dG'], againButton)
else:
pygame.draw.rect(screen, gameColours['green'], againButton)
if quitButton.collidepoint((mx, my)):
pygame.draw.rect(screen, gameColours['dR'], quitButton)
else:
pygame.draw.rect(screen, gameColours['red'], quitButton)
|
jessica-leishman/high-rollers
|
analysis_static/manual slices/hrStatic3.py
|
hrStatic3.py
|
py
| 4,288 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21207331986
|
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.urls import reverse_lazy
from django.shortcuts import render
from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView
from .models import *
from .filters import PostFilter
from datetime import datetime
from .forms import PostForm
class PostList(ListView):
model = Post
ordering = 'title'
template_name = 'news.html'
context_object_name = 'news'
paginate_by = 10
# def get_queryset(self):
# queryset = super().get_queryset()
# self.filterset = PostFilter(self.request.GET, queryset)
# return self.filterset.qs
#
# def get_context_data(self, **kwargs):
# context = super().get_context_data(**kwargs)
# # context['time_now'] = datetime.utcnow()
# # context['next_sale'] = None
# # context['sorted_posts'] = Post.objects.filter().order_by('-dateCreation')
# context['filterset'] = self.filterset
# return context
class PostDetail(DetailView):
model = Post
template_name = 'post.html'
context_object_name = 'post'
class PostSearch(ListView):
model = Post
ordering = ['dateCreation']
template_name = 'search.html'
context_object_name = 'news'
paginate_by = 10
form_class = PostForm
def get_queryset(self):
queryset = super().get_queryset()
self.filterset = PostFilter(self.request.GET, queryset)
return self.filterset.qs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['filterset'] = self.filterset
# context['']
return context
# class PostSearch(ListView):
# model = Post
# template_name = "search.html"
# context_object_name = "news"
# ordering = ["dateCreation"]
# paginate_by = 10
# form_class = PostForm
#
# def get_filter(self):
# return PostFilter(self.request.GET, queryset=super().get_queryset())
#
# def get_queryset(self):
# return self.get_filter().qs
#
# def get_context_data(self, *args, **kwargs):
# return {
# **super().get_context_data(*args, **kwargs),
# "filterset": self.get_filter(),
# }
# def create_post(request):
# if request.method == 'POST':
# form = PostForm(request.POST)
# form.save()
# return HttpResponseRedirect('/news/')
# form = PostForm()
# return render(request, 'post_edit.html', {'form':form})
class PostCreate(PermissionRequiredMixin, CreateView):
permission_required = ('news.add_post',)
raise_exception = True
form_class = PostForm
model = Post
template_name = 'post_edit.html'
# def form_valid(self, form):
# product = form.save(commit=False)
# post.categoryType.choices = 'NW'
# return super().form_valid(form)
class PostUpdate(PermissionRequiredMixin, UpdateView):
permission_required = ('news.change_post',)
form_class = PostForm
model = Post
template_name = 'post_edit.html'
class PostDelete(PermissionRequiredMixin, DeleteView):
permission_required = ('news.delete_post',)
model = Post
template_name='post_delete.html'
success_url= reverse_lazy('post_list')
|
AlexAlexG/SF_lessons
|
NewsPaper/news/views.py
|
views.py
|
py
| 3,331 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31535896406
|
from exam_python_OOP_22_august_2020.project.rooms.room import Room
class Everland:
def __init__(self):
self.rooms = []
def add_room(self, room: Room):
if room not in self.rooms:
self.rooms.append(room)
def get_monthly_consumptions(self):
monthly_consumptions = 0
for room in self.rooms:
room_consumptions = 0
room_consumptions += room.room_cost
room_consumptions += room.expenses
monthly_consumptions += room_consumptions
return f"Monthly consumption: {monthly_consumptions:.2f}$."
def pay(self):
result = []
for room in self.rooms:
if room.expenses <= room.budget:
new_budget = room.budget - (room.expenses + room.room_cost)
room.budget = new_budget
result.append(f"{room.family_name} paid {room.expenses + room.room_cost}$ and have {new_budget}$ left.")
else:
self.rooms.remove(room)
result.append(f"{room.family_name} does not have enough budget and must leave the hotel.")
return '\n'.join(result)
def status(self):
result = []
all_people_in_hotel = sum([r.members_count for r in self.rooms])
result.append(f"Total population: {all_people_in_hotel:.2f}")
for room in self.rooms:
result.append(f"{room.name} with {room.members_count} members. Budget:"
f" {room.budget:.2f}$, Expenses: {room.expenses:.2f}$")
if room.__class__.__name__ == "YoungCoupleWithChildren":
child_number = 1
for ch in room.children:
monthly_cost = ch.cost * 30
result.append(f"--- Child {child_number} monthly cost: {monthly_cost:.2f}$")
child_number += 1
if room.appliances:
all_appliances_cost = sum([ap.get_monthly_expense for ap in room.appliances])
result.append(f"--- Appliances monthly cost: {all_appliances_cost}$")
return result
|
iliyan-pigeon/Soft-uni-Courses
|
pythonProjectOOP/exam_python_OOP_22_august_2020/project/everland.py
|
everland.py
|
py
| 2,108 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28427023060
|
# import os
# import time
# import random
# # 练习1:在屏幕上显示跑马灯文字。
# def main():
# content = "北京欢迎你为你开天辟地…………"
# while True:
# # 清理屏幕上的输出
# # os.system('cls')
# # os.system('clear')
# print(content)
# # 休眠200毫秒
# time.sleep(0.1)
# content = content[1:] + content[0]
#
#
# if __name__ == '__main__':
# main()
# 练习2:设计一个函数产生指定长度的验证码,验证码由大小写字母和数字构成。
# def generate_code(code_len=4):
# """
# 生成指定长度的验证码
# :param code_len: 验证码的长度(默认4个字符)
# :return: 由大小写英文字母和数字构成的随机验证码
# """
# all_chars = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
# last_pos = len(all_chars) - 1
# print(last_pos)
# code = ''
# for _ in range(code_len):
# index = random.randint(0, last_pos) # random.randint() 选取随机数
# print(all_chars[index])
# code += all_chars[index]
# print(code)
# return code
#
#
# print(generate_code())
# 练习3:设计一个函数返回给定文件名的后缀名
def get_suffix(filename, has_dot=False):
"""
获取文件名的后缀名
:param filename: 文件名
:param has_dot: 返回的后缀名是否需要带点
:return: 文件的后缀名
"""
pos = filename.rfind('.') # Python rfind() 返回字符串最后一次出现的位置(从右向左查询),
# 如果没有匹配项则返回-1。
print(pos)
if 0 < pos < len(filename) - 1: # . 不在第一个也不在最后一个
#index = pos if has_dot else pos + 1
index = pos + 1
print(index)
return filename[index:]
else:
return ''
print(get_suffix("012txtdd"))
|
sunhuimoon/Python100Days
|
day07/day0710.py
|
day0710.py
|
py
| 1,929 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
30056116646
|
def find2NumbersThatAddTo(arr, expectedSum, lo, hi):
while lo < hi:
sum = arr[lo] + arr[hi]
if sum == expectedSum:
return lo, hi
elif sum < expectedSum:
lo = lo + 1
else:
hi = hi - 1
def find3NumbersThatAddTo(arr, expectedSum):
lo = 0
hi = len(arr) - 1
found = None
while arr[hi] > expectedSum:
hi = hi - 1
while found is None:
found = find2NumbersThatAddTo(arr, expectedSum - arr[hi], lo, hi - 1)
if found is not None:
act_lo, mid = found
return act_lo, mid, hi
hi = hi - 1
def main():
f = open("input1.txt", "r")
inp = f.read().split('\n')
inp = [int(numeric_string) for numeric_string in inp]
inp.sort()
lo, mid, hi = find3NumbersThatAddTo(inp, 2020)
print(inp[lo])
print(inp[mid])
print(inp[hi])
print(inp[lo] * inp[mid] * inp[hi])
if __name__ == '__main__':
main()
# def find3NumbersThatAddTo(arr, expectedSum):
# lo = 0
# mid = int(len(arr) / 2)
# hi = len(arr) - 1
# while lo < hi:
# sum = arr[lo] + arr[mid] + arr[hi]
# if sum == expectedSum:
# return lo, mid, hi
# elif sum < expectedSum:
# diff1 = arr[lo + 1] - arr[lo]
# diff2 = arr[mid + 1] - arr[mid]
# if diff1 > diff2:
# mid = mid + 1
# if mid == hi: hi = hi + 1
# else:
# lo = lo + 1
# if mid == lo: mid = mid + 1
# if mid == hi: hi = hi + 1
#
# else:
# diff1 = arr[hi] - arr[hi - 1]
# diff2 = arr[mid] - arr[mid - 1]
#
# if diff1 > diff2:
# mid = mid - 1
# if mid == lo: lo = lo - 1
# else:
# hi = hi - 1
# if mid == hi: mid = mid - 1
# if mid == lo: lo = lo - 1
|
nithish-thomas/adventOfCode2020
|
aoc20/day1/day1_2.py
|
day1_2.py
|
py
| 1,943 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39349977880
|
# Definition for double singly-linked list.
class DbListNode(object):
"""构建一个双向链表"""
def __init__(self, x, y):
self.key = x
self.val = y
self.next = None
self.prev = None
class LRUCache(object):
'''
leet code: 146
运用你所掌握的数据结构,设计和实现一个 LRU (最近最少使用) 缓存机制。
它应该支持以下操作: 获取数据 get 和 写入数据 put 。
获取数据 get(key) - 如果密钥 (key) 存在于缓存中,则获取密钥的值(总是正数),否则返回 -1。
写入数据 put(key, value) - 如果密钥不存在,则写入其数据值。
缓存容量达到上限时,它应该在写入新数据之前删除最近最少使用的数据值,从而为新的数据值留出空间
实现方式: 哈希表+双向链表
哈希表: 查询 O(1)
双向链表: 有序, 增删操作 O(1)
Author: Ben
'''
def __init__(self, capacity: int):
self.cap = capacity
self.hkeys = {}
# self.top和self.tail作为哨兵节点, 避免越界
self.top = DbListNode(None, -1)
self.tail = DbListNode(None, -1)
self.top.next = self.tail
self.tail.prev = self.top
def get(self, key: int) -> int:
"""获取数据 get(key) - 如果密钥 (key) 存在于缓存中,则获取密钥的值(总是正数),否则返回 -1。"""
if key in self.hkeys.keys():
# 获取节点的数据
cur = self.hkeys[key]
# 从双向链表中将cur取出来
cur.next.prev = cur.prev
cur.prev.next = cur.next
# 获取最近的最新节点
top_node = self.top.next
# 将cur节点放到节点的第一位
self.top.next = cur
cur.prev = self.top
cur.next = top_node
top_node.prev = cur
return self.hkeys[key].val
return -1
def put(self, key: int, value: int) -> None:
"""
写入数据 put(key, value) - 如果密钥不存在,则写入其数据值。
当缓存容量达到上限时,它应该在写入新数据之前删除最近最少使用的数据值,从而为新的数据值留出空间
"""
if key in self.hkeys.keys():
# 获取节点的数据
cur = self.hkeys[key]
# 从双向链表中将cur取出来
cur.next.prev = cur.prev
cur.prev.next = cur.next
# 获取最近的最新节点
top_node = self.top.next
# 将cur节点放到节点的第一位
self.top.next = cur
cur.prev = self.top
cur.next = top_node
top_node.prev = cur
else:
cur = DbListNode(key, value)
# 先判断现在的长度是否和容量相等,如果相等就先去除尾端节点
if len(self.hkeys.keys()) == self.cap:
self.hkeys.pop(self.tail.prev.key)
self.tail.prev.prev.next = self.tail
self.tail.prev = self.tail.prev.prev
self.hkeys[key] = cur
# 获取最新用过的链表首部
top_node = self.top.next
self.top.next = cur
cur.prev = self.top
cur.next = top_node
top_node.prev = cur
def __repr__(self):
vals = []
p = self.top.next
while p.next:
vals.append(str(p.val))
p = p.next
return '->'.join(vals)
if __name__ == '__main__':
cache = LRUCache(2)
cache.put(1, 1)
cache.put(2, 2)
print(cache)
cache.get(1) # 返回 1
print(cache)
cache.put(3, 3) # 该操作会使得密钥 2 作废
print(cache)
cache.get(2) # 返回 -1 (未找到)
cache.put(4, 4) # 该操作会使得密钥 1 作废
print(cache)
cache.get(1) # 返回 -1 (未找到)
cache.get(3) # 返回 3
print(cache)
cache.get(4) # 返回 4
print(cache)
|
smdzz/Geek_Point
|
data_structure_and_algorithm/python/linked_list/LRUCache.py
|
LRUCache.py
|
py
| 4,050 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
10137589364
|
'''
File name: /ciphers/block_ciphers/anu/cipher.py
Author: Cesar Cruz
Project: cryptofeather
Python Version: 2.7
'''
import numpy
from constants import BLOCK_LENGTH, KEY_LENGTH, NUMBER_OF_ROUNDS, SBOX, PBOX, SBOX_INV
from utils.logic_operations import xor
from utils.crypto import sbox_operation, permutation_layer, generate_pboxinv
from utils.others import pretty_print
from utils.latex.table_generator import generate_table
def _cipher(plaintext, keys):
pl = plaintext[:(BLOCK_LENGTH / 2)]
pr = plaintext[(BLOCK_LENGTH / 2):]
for i in range(NUMBER_OF_ROUNDS):
f1, f2 = f_function(pl)
pt = xor(f1, pr)
pt = xor(pt, xor(f2, keys[i]))
pr = permutation_layer(PBOX, pl)
pl = permutation_layer(PBOX, pt)
return pl + pr
def _cipher_latex(plaintext, keys):
pl = plaintext[:(BLOCK_LENGTH / 2)]
pr = plaintext[(BLOCK_LENGTH / 2):]
rows = []
for i in range(NUMBER_OF_ROUNDS):
RK = pretty_print(keys[i], len(keys[i]))
f1, f2 = f_function(pl)
f1_lat = pretty_print(f1, len(f1))
f2_lat = pretty_print(f2, len(f2))
pt = xor(f1, pr)
A = pretty_print(pt, len(pt))
pt = xor(pt, xor(f2, keys[i]))
B = pretty_print(pt, len(pt))
pr = permutation_layer(PBOX, pl)
C = pretty_print(pr, len(pr))
pl = permutation_layer(PBOX, pt)
D = pretty_print(pl, len(pl))
row = [i, RK, f1_lat, f2_lat, A, B, C, D]
rows.append(row)
header_row1 = ["Ronda", "RK", "F1", "F2", "A", "B", "C", "D"]
generate_table("ANU Cifrado", header_row1, rows, "anuCifrado")
return pl + pr
def _decipher(ciphertext, keys):
pl = ciphertext[:(BLOCK_LENGTH / 2)]
pr = ciphertext[(BLOCK_LENGTH / 2):]
pbox_inv = generate_pboxinv(PBOX)
for i in range(NUMBER_OF_ROUNDS - 1, -1, -1):
# Swap
tmp_pl = pl
pl = pr
pr = tmp_pl
# Capa de permutacion
pl = permutation_layer(pbox_inv, pl)
pr = permutation_layer(pbox_inv, pr)
# Funcion f
f1, f2 = f_function(pl)
# Operaciones X-OR
pr = xor(pr, xor(f2, keys[i]))
pr = xor(pr, f1)
return pl + pr
def _decipher_latex(ciphertext, keys):
pl = ciphertext[:(BLOCK_LENGTH / 2)]
pr = ciphertext[(BLOCK_LENGTH / 2):]
pbox_inv = generate_pboxinv(PBOX)
rows = []
for i in range(NUMBER_OF_ROUNDS - 1, -1, -1):
RK = pretty_print(keys[i], len(keys[i]))
# Swap
tmp_pl = pl
pl = pr
pr = tmp_pl
A = pretty_print(pl, len(pl))
B = pretty_print(pr, len(pr))
# Capa de permutacion
pl = permutation_layer(pbox_inv, pl)
pr = permutation_layer(pbox_inv, pr)
C = pretty_print(pl, len(pl))
D = pretty_print(pr, len(pr))
# Funcion f
f1, f2 = f_function(pl)
F1 = pretty_print(f1, len(f1))
F2 = pretty_print(f2, len(f2))
# Operaciones X-OR
pr = xor(pr, xor(f2, keys[i]))
E = pretty_print(pr, len(pr))
pr = xor(pr, f1)
F = pretty_print(pr, len(pr))
row = [i, RK, A, B, C, D, F1, F2, E, F]
rows.append(row)
header_row1 = ["Ronda", "RK", "A", "B", "C", "D","F1", "F2", "E", "F"]
generate_table("ANU Decifrado", header_row1, rows, "anuDecifrado")
return pl + pr
def f_function(pl):
f1 = list(numpy.roll(pl, -3))
f2 = list(numpy.roll(pl, 8))
f1 = sbox_operation(SBOX, f1)
f2 = sbox_operation(SBOX, f2)
return f1, f2
|
ccruz182/Lightweight-Cryptography
|
cryptofeather/ciphers/block_ciphers/anu/cipher.py
|
cipher.py
|
py
| 3,372 |
python
|
en
|
code
| 1 |
github-code
|
6
|
70943725628
|
import json
import os
class FolderWalker:
"""
Check folder with results. Walk through the folders and define paths to various files.
If any values are not counted in one of the frameworks, they will be excluded in competitors.
Thus, the class ensures consistency of results in the analysis.
"""
def __init__(self, working_dir: str):
self.working_dir = os.path.abspath(working_dir)
path_to_config_json = os.path.join(self.working_dir, 'configuration.json')
with open(path_to_config_json) as file:
config_info = json.load(file)
self.datasets = config_info['Datasets']
self.launches = config_info['Launches']
self.libraries = config_info['Libraries to compare']
self.clip_border = config_info['Clip border']
self.forecast_files = {}
self.timeout_files = {}
self.additional_files = {}
for dataset in self.datasets:
for launch in range(self.launches):
for library in self.libraries:
launch_number = f'launch_{launch}'
case_id = f'{dataset}|{launch}|{library}'
validation_case_path = os.path.join(self.working_dir, dataset, launch_number, library)
all_forecasted_paths = self.find_files(validation_case_path,
search_pattern='forecast_vs_actual.csv')
self.forecast_files.update({case_id: all_forecasted_paths})
all_timeouts_paths = self.find_files(validation_case_path,
search_pattern='timeouts.json')
self.timeout_files.update({case_id: all_timeouts_paths})
all_additional_paths = self.find_additional_files(validation_case_path)
if all_additional_paths is not None:
self.additional_files.update({case_id: all_additional_paths})
self.exclude_mismatched_results()
def exclude_mismatched_results(self):
"""
In some cases it is not possible to get results for some cases (dataset -
launch number - library - time series - forecast horizon). So there is a
need to exclude cases without calculations
"""
for dataset in self.datasets:
# First cycle - collect information
dataset_execution_time = []
dataset_forecast = []
for launch in range(self.launches):
for library in self.libraries:
case_id = f'{dataset}|{launch}|{library}'
ex_time_files = set(map(lambda x: os.path.basename(x), self.timeout_files[case_id]))
forecast_files = set(map(lambda x: os.path.basename(x), self.forecast_files[case_id]))
dataset_execution_time.append(ex_time_files)
dataset_forecast.append(forecast_files)
# Find intersection for all cases
dataset_execution_time = set.intersection(*dataset_execution_time)
dataset_forecast = set.intersection(*dataset_forecast)
# Second cycle - update info
for launch in range(self.launches):
for library in self.libraries:
case_id = f'{dataset}|{launch}|{library}'
ex_time_file = self.timeout_files[case_id][0]
current_path = os.path.dirname(ex_time_file)
upd_time_paths = add_path_to_files(current_path, dataset_execution_time)
upd_forecasts = add_path_to_files(current_path, dataset_forecast)
self.timeout_files.update({case_id: upd_time_paths})
self.forecast_files.update({case_id: upd_forecasts})
@staticmethod
def find_files(folder_with_files: str, search_pattern: str):
""" Find all files in the folder and return full paths """
files = os.listdir(folder_with_files)
files.sort()
all_paths = []
for file in files:
if search_pattern in file:
all_paths.append(os.path.join(folder_with_files, file))
return all_paths
@staticmethod
def find_additional_files(folder_with_files: str):
""" Search for unusual files in saved folder - additional info """
files = os.listdir(folder_with_files)
files.sort()
extra_paths = []
for file in files:
if 'timeouts.json' not in file and 'forecast_vs_actual.csv' not in file:
extra_paths.append(os.path.join(folder_with_files, file))
if len(extra_paths) == 0:
return None
return extra_paths
def add_path_to_files(current_path: str, files: set):
""" In set with file names for each file add folder path """
updated_data = []
for file in files:
updated_data.append(os.path.join(current_path, file))
updated_data.sort()
return updated_data
|
ITMO-NSS-team/pytsbe
|
pytsbe/report/walk.py
|
walk.py
|
py
| 5,028 |
python
|
en
|
code
| 30 |
github-code
|
6
|
25018507922
|
import base64
import os
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import padding
current_dir = os.path.dirname(os.path.abspath(__file__))
def encrypt_data(data, key_path='public.pem'):
public_key_path = os.path.join(current_dir, key_path)
# Load the public key from file
with open(public_key_path, 'rb') as public_key_file:
public_key = serialization.load_pem_public_key(
public_key_file.read(),
backend=default_backend()
)
# Encrypt the message using the public key
encrypted_data = public_key.encrypt(
data.encode('utf-8'),
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
)
# Base64 encode the encrypted data
encoded_encrypted_data = base64.b64encode(encrypted_data).decode('utf-8')
return encoded_encrypted_data
def decrypt_data(encoded_encrypted_data, key_path='private.pem'):
private_key_path = os.path.join(current_dir, key_path)
# Load the private key from file
with open(private_key_path, 'rb') as private_key_file:
private_key = serialization.load_pem_private_key(
private_key_file.read(),
password=None,
backend=default_backend()
)
# Base64 decode the encrypted data
encrypted_data = base64.b64decode(encoded_encrypted_data)
# Decrypt the encrypted data using the private key
decrypted_data = private_key.decrypt(
encrypted_data,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
)
# Return the decrypted data as a string
return decrypted_data.decode('utf-8')
|
ivana-dodik/Blockchain
|
EP --zadatak 02/crypto.py
|
crypto.py
|
py
| 1,898 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36273987017
|
from random import randint
from ..engine.enemy import Enemy
class Barrel(Enemy):
def __init__(self, level):
super(Barrel,self).__init__(level)
self.health = self.max_health = 100
self.speed = 0.35
self.set_die_sound('assets/sound/barrelguy/die.ogg')
self.set_sprite('barrel')
self.drop = 6 + randint(-2, 2)
self.name = 'barrelguy'
|
Timtam/ggj17
|
wavomizer/enemies/barrel.py
|
barrel.py
|
py
| 406 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15298986512
|
#!/usr/bin/python3.6
import requests, json, datetime
from time import sleep
try:
while True:
req = requests.get('https://www.mercadobitcoin.net/api/BTC/ticker/')
cot = json.loads(req.text)
d = datetime.datetime.now()
print(d.strftime('%c'))
print('BTC:', cot['ticker']['buy'][:8])
sleep(10)
print('')
except:
print("Failed to establish a connection.")
|
andreMarqu3s/bit_value
|
cotacao.py
|
cotacao.py
|
py
| 387 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72810210107
|
def create_piechart():
# Importamos las dependencias
import pandas as pd
import matplotlib.pyplot as plt
from config import engine
from sqlalchemy.orm import sessionmaker
engine = engine
# Intentamos leer desde la base de datos, la tabla "tabla_1"
try:
with engine.connect() as connection:
df = pd.read_sql_table('tabla_1', connection)
except Exception as e:
print('Ocurrió un error al intentar conectar o leer la base de datos:')
print(str(e))
return
# Va recorrer la columna "sentiment" de nuestro dataframe y sumará la cantidad de veces que se repite cada valor disponible(negative, positive y neutral)
sentiment_counts = df['sentiment'].value_counts()
# Le indicamos a matplotlib las características con las que debe crear nuestro piechart
plt.figure(figsize=(10, 5))
plt.pie(sentiment_counts, labels=sentiment_counts.index, autopct='%1.1f%%')
plt.title('Sentiment Analysis Pie Chart')
# Guardamos en la carpeta raíz del proyecto el piechart generado en formato png
plt.savefig("piechart.png")
# Esto sirve para mostrar en pantalla el piechart generado, por defecto está deshabilitado
# plt.show()
|
NebyX1/data-science-engineering-end-to-end-project-bootcamp-milei-twitter-scraping
|
piechart_script.py
|
piechart_script.py
|
py
| 1,239 |
python
|
es
|
code
| 0 |
github-code
|
6
|
43755877766
|
'''
Measures the square area of colonies in an image file.
Written by George Walters-Marrah
Last updated: 6/26/2019
'''
# import needed packages
import imageio
import matplotlib.pyplot as plt
import scipy.ndimage as ndi
import numpy as np
from skimage import morphology as morph
import os.path
from os import path
def remove_large_objects(ar, max_size=64, connectivity=1, in_place=False):
"""Remove objects larger than the specified size.
Expects ar to be an array with labeled objects, and removes objects
larger than max_size. If `ar` is bool, the image is first labeled.
This leads to potentially different behavior for bool and 0-and-1
arrays.
Parameters
----------
ar : ndarray (arbitrary shape, int or bool type)
The array containing the objects of interest. If the array type is
int, the ints must be non-negative.
max_size : int, optional (default: 64)
The largest allowable object size.
connectivity : int, {1, 2, ..., ar.ndim}, optional (default: 1)
The connectivity defining the neighborhood of a pixel. Used during
labelling if `ar` is bool.
in_place : bool, optional (default: False)
If ``True``, remove the objects in the input array itself.
Otherwise, make a copy.
Raises
------
TypeError
If the input array is of an invalid type, such as float or string.
ValueError
If the input array contains negative values.
Returns
-------
out : ndarray, same shape and type as input `ar`
The input array with small connected components removed.
Examples
--------
>>> from skimage import morphology
>>> a = np.array([[0, 0, 0, 1, 0],
... [1, 1, 1, 0, 0],
... [1, 1, 1, 0, 1]], bool)
>>> b = morphology.remove_small_objects(a, 6)
>>> b
array([[False, False, False, False, False],
[ True, True, True, False, False],
[ True, True, True, False, False]], dtype=bool)
>>> c = morphology.remove_small_objects(a, 7, connectivity=2)
>>> c
array([[False, False, False, True, False],
[ True, True, True, False, False],
[ True, True, True, False, False]], dtype=bool)
>>> d = morphology.remove_small_objects(a, 6, in_place=True)
>>> d is a
True
"""
if in_place:
out = ar
else:
out = ar.copy()
if max_size == 0: # shortcut for efficiency
return out
if out.dtype == bool:
selem = ndi.generate_binary_structure(ar.ndim, connectivity)
ccs = np.zeros_like(ar, dtype=np.int32)
ndi.label(ar, selem, output=ccs)
else:
ccs = out
try:
component_sizes = np.bincount(ccs.ravel())
except ValueError:
raise ValueError("Negative value labels are not supported. Try "
"relabeling the input with `scipy.ndimage.label` or "
"`skimage.morphology.label`.")
too_big = component_sizes > max_size
too_big_mask = too_big[ccs]
out[too_big_mask] = 0
return out
def measure(imFolder, imVector, imStrain, imPlate, imRepNum, imType , firstMask, secondMaskLow, secondMaskHigh, smallSize, largeSize, stdThreshold, results = True, manual = False, stdManual = 1):
# make an object with the filepath to the image you want to analysis
imName = imVector + '_' + imStrain + '_' + imPlate + '_' + imRepNum
imGenericName = imVector + '_' + imStrain + '_' + imPlate
imPath = imFolder + '/' + imName + imType
# check if the path exists
if path.exists(imPath):
pass
else:
raise ValueError('The PATH specified does not exist. Change PATH and try again.')
# read in plate picture as an uint8 *only works with uint8 dtypes*
im = imageio.imread(imPath)
# prints the dtype and min/max. Values should be: dtype = uint8, min = ~0, max = ~255
dtype = im.dtype
if results:
print('Data type:', dtype)
print('Min. value:', im.min())
print('Max value:', im.max())
print('')
# raises error of image type isn't uint8
if dtype != 'uint8':
raise ValueError(imPath + ' must be uint8. Change image file to uint8 then try again.')
# Gets rid pure white regions of the image
mask = im < firstMask
im_mask = np.where(mask, im, 0)
# show images
if results:
fig, axes = plt.subplots(1,2)
axes[0].imshow(im, cmap = 'gray')
plt.axis('off')
axes[1].imshow(im_mask, cmap = 'gray')
plt.axis('off')
plt.show()
# Uniforms the photo to make the edges clearer and easier to detect
im_filt = ndi.uniform_filter(im_mask, size=3)
# searches for the gray areas (where colonies are).
col_mask1 = im_filt > secondMaskLow
col_mask2 = im_filt < secondMaskHigh
col_mask = col_mask1 & col_mask2
im_colonies = np.where(col_mask, im, 0)
# Creates label objects
labels, nlabels = ndi.label(col_mask)
# Get initial amount of objects found by mask
bboxinitial = ndi.find_objects(labels)
if results:
print('Objects found in initial mask for ' + imPath + ': ', len(bboxinitial))
print('')
# show images
if results:
fig, axes = plt.subplots(1,2)
axes[0].imshow(im_filt, cmap = 'gray')
plt.axis('off')
axes[1].imshow(im_colonies, cmap = 'gray')
plt.axis('off')
plt.show()
# Removes abnormally small or large objects
cols_cleaned1 = morph.remove_small_objects(labels, smallSize)
cols_cleaned2 = remove_large_objects(cols_cleaned1, largeSize)
bboxes = ndi.find_objects(cols_cleaned2)
# shows images
if results:
fig, axes = plt.subplots(1,2)
axes[0].imshow(im_colonies, cmap = 'gray')
plt.axis('off')
axes[1].imshow(cols_cleaned2, cmap = 'rainbow')
plt.axis('off')
plt.show()
# Calculates the colony size
col_size_init = []
for index in range(len(bboxes)):
# excludes colonies with abnormal morphology
npixel = 0
dpixel = 6.45*6.45
colony = cols_cleaned2[bboxes[index]]
std = np.std(colony.shape[:2])
if (std <= stdThreshold):
for image in colony:
for pixel in image:
if pixel > 0:
npixel += 1
meas = npixel*dpixel
measFin = np.around(meas, 2)
col_size_init.append(measFin)
else: pass
# allows you to manually discard bad data points.
if manual:
np_col_size_init = np.array(col_size_init)
col_size = []
for index in range(len(bboxes)):
# excludes colonies with abnormal morphology and perfect squares
size_std = np.std(np_col_size_init)
size_median = np.median(np_col_size_init)
npixel = 0
dpixel = 6.45*6.45
colony = cols_cleaned2[bboxes[index]]
std = np.std(colony.shape[:2])
if (std <= stdThreshold):
for image in colony:
for pixel in image:
if pixel > 0:
npixel += 1
meas = npixel*dpixel
measFin = np.around(meas, 2)
else:
measFin = False
# allows to manually sift through outliers
if measFin == False:
pass
elif measFin < size_median - stdManual * size_std or measFin > size_median + stdManual * size_std:
plt.imshow(im_colonies[bboxes[index]], cmap = 'gray')
plt.axis('off')
plt.show()
ques = input('Do you want to analyze that colony from ' + imName + '(' + imFolder + ')' + '? If yes, type Y. If no, type N:')
if ques == 'Y' or ques == 'y':
col_size.append(measFin)
print('Colony analyzed.')
elif ques == 'N' or ques == 'n':
print('Colony skipped.')
else:
doubleCheck = input('Did you mean to put N?:')
if doubleCheck == 'N' or doubleCheck == 'n':
col_size.append(measFin)
print('Colony analyzed.')
else:
print('Colony skipped.')
else:
col_size.append(measFin)
np_col_size = np.array(col_size)
else:
np_col_size = np.array(col_size_init)
# contains all the calculated diameter values and summarized data
colMean = np.around(np.mean(np_col_size),2)
colMedian = np.around(np.median(np_col_size),2)
colStd = np.around(np.std(np_col_size),2)
data = [imGenericName, colMean, colMedian, colStd, imFolder, imVector, imStrain, imPlate, imRepNum, np_col_size]
# prints out a summary of the results
if results:
print('Data for', imName, '(' + imFolder + ')')
print('Final amount of colonies measured:', len(np_col_size))
print('Mean of data: ', colMean)
print('Median of data: ', colMedian)
print('Standard deviation of data: ', colStd)
print('')
print(imName, 'area calculated.')
print('')
return data
def main():
measure('', '', '', '', '', '', firstMask = 190, secondMaskLow = 50, secondMaskHigh = 185, smallSize = 2, largeSize = 235, stdThreshold = 1)
if __name__ == '__main__': main()
|
gwmarrah/colony-measurer
|
colSizeMeasurer.py
|
colSizeMeasurer.py
|
py
| 9,835 |
python
|
en
|
code
| 1 |
github-code
|
6
|
23039179017
|
import cv2
import numpy as np
from hand import HandRecognizer
from args import OLD_FONT_THRESHOLD
class OldRecognizer(HandRecognizer):
def __init__(self, imname, already_read=False):
super(OldRecognizer, self).__init__(imname, already_read)
self.cal_result()
def loop_process(self, func):
pad = 0.05
for x, y, w, h in self.recs:
x0, y0, x1, y1 = map(int, (x + pad * w, y + pad * h, x + w - pad * w, y + h - pad * h))
single = cv2.cvtColor(self.raw_sudoku_im[y0:y1, x0:x1], cv2.COLOR_BGR2GRAY)
self.result.append(func(single))
def single_recognize(self, im):
ret, im = cv2.threshold(im, OLD_FONT_THRESHOLD, 255, cv2.THRESH_BINARY)
# self._debug(im)
r, c = im.shape
edges = cv2.Canny(im, 20, 50)
black, mid_area = 0, 0
for i in range(r):
for j in range(c):
if edges[i, j] == 255:
black += 1
if 1 / 3 * c < j < 2 / 3 * c:
mid_area += 1
return mid_area / black # 图像中央部分黑色像素的比例
def cal_result(self):
# print(*zip(self.result, range(9)), sep='\n')
self.result = sorted(zip(self.result, range(9)), reverse=True)[0][1]
if __name__ == '__main__':
print(OldRecognizer('test_im/raw_old.jpg').result)
|
sjdeak/RoboMasters2017-RuneDetector
|
old.py
|
old.py
|
py
| 1,379 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10719487359
|
import time
import xml.etree.ElementTree as Et
import random
import pathlib
import shutil
from zipfile import ZipFile
def name_for_ik():
"""
:return: выдает имена формата ###-###-###-###-### в шестнадцатиричной системе для интеграционных конвертов
"""
first_part = str(hex(random.randint(1000000000, 9999999999)))
second_part = str(hex(random.randint(10000, 99999)))
third_part = str(hex(random.randint(10000, 99999)))
fourth_part = str(hex(random.randint(10000, 99999)))
fifth_part = str(hex(random.randint(100000000000000, 999999999999999)))
return f'{first_part[2:10]}-{second_part[2:6]}-{third_part[2:6]}-{fourth_part[2:6]}-{fifth_part[2:14]}'
def atribute_generator(char_value): # 17 символов для имени файла ED, 9 символов для номера договора
"""
:param char_value: количество знаков в срезе
:return: случайное число, которое зависит от системной даты и времени
"""
a = str(int(time.time() * 10000000))
rv = random.randint(1, char_value - 1)
return a[len(a) + rv - char_value::] # рандомное число от 1 символа return a[len(a) - char_value::]
def envelope_change_attrib(namespaceprefix, namespaceuri, xml_source_file_path, tags, paramreplace, path_to_save_xml):
"""
Изменение аттрибутов в файле Envelope
:param namespaceprefix: префикс пространства имен в файле envelope (igr)
:param namespaceuri: ссылка пространства имен в envelope
:param xml_source_file_path: путь к файлу envelope
:param tags: теги, по которым идет поиск
:param paramreplace: словарь из параметров тегов и их новых значений
:param path_to_save_xml: путь и имя для готового файла
:return: запись в файл в том же каталоге
"""
Et.register_namespace(namespaceprefix, namespaceuri) # для записи в файле необходимо передать prefix и uri
tree = Et.parse(xml_source_file_path) # открываем xml файл и парсим
root = tree.getroot()
for tag in tags:
for element in root.findall('.//*[@{' + namespaceuri + '}' + tag + ']'): #
for key, value in paramreplace.items():
if element.attrib['{' + namespaceuri + '}' + tag] in 'Document':
element.attrib['{' + namespaceuri + '}fileName'] = value
if element.attrib['{' + namespaceuri + '}' + tag] in key:
if len(str(element.text).strip()) > 0:
if element.text is None:
element.attrib['{' + namespaceuri + '}fileIdentity'] = value
else:
element.text = value
else:
element.attrib['{' + namespaceuri + '}fileIdentity'] = value
tree.write(path_to_save_xml)
def ed421_change_attrib(namespaceprefix, namespaceuri, xml_source_file_path, path_to_save_xml, **kwargs):
"""
Изменение аттрибутов в файле ED421
:param namespaceprefix: префикс пространства имен в файле ED421 (пусто)
:param namespaceuri: ссылка пространства имен в файле ED421
:param xml_source_file_path: путь к файлу ED421
:param path_to_save_xml: путь и имя для готового файла
:param kwargs: аттрибуты тега и их новые значения
:return:
"""
Et.register_namespace(namespaceprefix, namespaceuri)
tree = Et.parse(xml_source_file_path)
root = tree.getroot()
for key, value in kwargs.items():
if root.findall(f'.[@{key}]'): # поиск атрибута в корневом элементе
root.attrib[key] = value
elif root.findall(f'.//*[@{key}]'): # поиск атрибута в дочерних элементах
root.find(f'.//*[@{key}]').set(key, value)
tree.write(path_to_save_xml, encoding='UTF-8', xml_declaration=True) # сохранение xml файла
def routeinfo_change_attrib(namespaceprefix, namespaceuri, xml_source_file_path, path_to_save_xml, new_text):
"""
Редактирование RouteInfo
:param namespaceprefix: префикс пространства имен в файле ED421 (igr)
:param namespaceuri: ссылка пространства имен в файле ED421
:param xml_source_file_path: путь к файлу
:param path_to_save_xml: путь и имя для готового файла
:param new_text: текст, который будет записан между тегами
:return: запись в xml-файл
"""
Et.register_namespace(namespaceprefix, namespaceuri)
tree = Et.parse(xml_source_file_path)
root = tree.getroot()
root.find('{' + namespaceuri + '}DocumentPackID').text = new_text
tree.write(path_to_save_xml) # сохранение xml файла
def create_new_directory(path_to_new_directory, directory_name):
"""
:param path_to_new_directory: путь, где будет создан каталог path
:param directory_name: имя для нового каталога
:return: создает каталог temp по указанному пути, если каталог существут, то перезаписывает его
"""
pathlib.Path(path_to_new_directory).joinpath(directory_name).mkdir(exist_ok=True)
return pathlib.Path(path_to_new_directory).joinpath(directory_name)
def get_arhive(path, *files):
"""
:param path: путь, где будет создан архив
:param files: файлы, которые будут помещаться в архив
:return:
"""
with ZipFile(path, 'w') as new_zip: # добавить после path функцию вызова нового имени
for arg in files:
filename = arg.name
new_zip.write(arg, arcname=filename)
pathlib.Path(arg).unlink()
def move_files(copy_from, copy_to):
"""
:param copy_from: полный путь к файлу, который будет перемещен
:param copy_to: каталог, в который будет перемещен файл
:return: перемещает файл, переданный из copy_from в каталог copy_to
"""
shutil.move(copy_from, copy_to)
# -----------------------------------------------------------
start_path = pathlib.Path.cwd()
envelope_path = start_path.joinpath('sample/envelope.xml')
routeinfo_path = start_path.joinpath('sample/RouteInfo.xml')
ed421_path = start_path.joinpath('sample/ED421.xml')
# -----------------------------------------------------------
# создать каталоги temp, converts внутри каталога
temp_path = create_new_directory(start_path, 'temp')
convert_path = create_new_directory(start_path, 'converts')
# -----------------------------------------------------------
# переменные
prefix_for_routeinfo_envelope = 'igr'
prefix_ed421 = ''
uri_for_routeinfo_envelope = 'http://www.cbr.ru/igr/'
uri_for_ed421 = 'urn:cbr-ru:elk:v2021.1.0'
text_for_sign_file = 'test signature file'
tags_attrib = ['name', 'fileType'] # теги для функции generate_xml_envelope
# -----------------------------------------------------------
# сгенерировать имена для файлов
def create_ik(iteration_count):
"""
:param iteration_count:
:return:
"""
for i in range(1, iteration_count + 1):
arhive_name = name_for_ik() # имя для архива, в который будут упакованы все файлы
ed421_name_for_arh = name_for_ik() # имя для архива, в котором лежит ed421
routeinfo_name = name_for_ik() # имя для routeinfo
sign_name = name_for_ik() # имя для файла с ЭП
# -----------------------------------------------------------
file_name_ed421 = pathlib.Path('ED421' + atribute_generator(17) + '.xml')
new_name_ed421 = temp_path.joinpath(file_name_ed421)
new_name_routeinfo = temp_path.joinpath(routeinfo_name)
new_name_envelope = temp_path.joinpath('envelope.xml')
# -----------------------------------------------------------
# создать файл с подписью
with open(temp_path.joinpath(sign_name), 'w') as sign_file:
sign_file.write(text_for_sign_file)
# заполнение словаря значениями
tags_dictionary = dict(RouteInfo=routeinfo_name,
Document=ed421_name_for_arh,
Sign=sign_name,
AssociatedFileIdentity=ed421_name_for_arh,
fileName='ED421' + atribute_generator(17) + '.xml')
attributes_and_values = dict(EDNo=atribute_generator(8),
EDDate='testEDDate',
ReqNum=atribute_generator(10),
ReqDateTime='testReqDateTime',
GrantDate='testGrantDate',
ApplicationSum=atribute_generator(17))
# изменение значений в ED421 и сохранение в другом каталоге
ed421_change_attrib(prefix_ed421,
uri_for_ed421,
ed421_path,
new_name_ed421,
**attributes_and_values)
# изменение значений в RouteInfo и сохранение в другом каталоге
routeinfo_change_attrib(prefix_for_routeinfo_envelope,
uri_for_routeinfo_envelope,
routeinfo_path,
new_name_routeinfo,
arhive_name)
# изменение значений в RouteInfo и сохранение в другом каталоге
envelope_change_attrib(prefix_for_routeinfo_envelope,
uri_for_routeinfo_envelope,
envelope_path,
tags_attrib,
tags_dictionary,
new_name_envelope)
# добавление ED421 в архив
get_arhive(temp_path.joinpath(ed421_name_for_arh),
new_name_ed421)
# формирование целого конверта
get_arhive(temp_path.joinpath(pathlib.Path(arhive_name + '.zip')),
temp_path.joinpath(ed421_name_for_arh),
new_name_routeinfo,
new_name_envelope,
temp_path.joinpath(sign_name))
# переместить конверт
move_files(temp_path.joinpath(pathlib.Path(arhive_name + '.zip')), convert_path)
# после того как все операции выполнены удалить каталог temp без проверки содержимого (наличия подкаталогов)
shutil.rmtree(temp_path, ignore_errors=True)
if __name__ == '__main__':
create_ik(2)
# TODO добавить изменение даты в трех местах в ED421
|
Steelglowhawk/updateTool
|
generator_func.py
|
generator_func.py
|
py
| 11,827 |
python
|
ru
|
code
| 1 |
github-code
|
6
|
8267514816
|
from __future__ import annotations
from kombu.pools import producers
from .queues import task_exchange
priority_to_routing_key = {
'high': 'hipri',
'mid': 'midpri',
'low': 'lopri',
}
def send_as_task(connection, fun, args=(), kwargs={}, priority='mid'):
payload = {'fun': fun, 'args': args, 'kwargs': kwargs}
routing_key = priority_to_routing_key[priority]
with producers[connection].acquire(block=True) as producer:
producer.publish(payload,
serializer='pickle',
compression='bzip2',
exchange=task_exchange,
declare=[task_exchange],
routing_key=routing_key)
if __name__ == '__main__':
from kombu import Connection
from .tasks import hello_task
connection = Connection('amqp://guest:guest@localhost:5672//')
send_as_task(connection, fun=hello_task, args=('Kombu',), kwargs={},
priority='high')
|
celery/kombu
|
examples/simple_task_queue/client.py
|
client.py
|
py
| 994 |
python
|
en
|
code
| 2,643 |
github-code
|
6
|
41172517370
|
# -*- coding: utf-8 -*-
import numpy as np
def abs_error(X, y):
"""
Calculates absolute error of a px2 matrix with its px1 predicted output
"""
y_hat = np.matrix(y).transpose()
y = X[:, 1]
m=len(y)
error = (1/m)*sum(abs(y-y_hat))
return float(error)
def mean_error(X, y):
"""
Calculates mean error of a px2 matrix with its px1 predicted output
"""
y_hat = np.matrix(y).transpose()
y = X[:, 1]
m=len(y)
error = (1/m)*sum(np.square(y-y_hat))
return float(error)
# Import X matrix
X = "(2, -2), (5, 6), (-4, -4), (-7, 1), (8, 14)"
# Wrangle data to correct form
X = X.replace("(", "")
X = X.replace("),", ";")
X = X.replace(")", "")
# Convert to np.matrix
X = np.matrix(X)
# Parameters for hypothesis
theta = [2, 1.2]
# Predict y values
y = []
for i in X[:, 0]:
y.append(theta[1]*int(i)+theta[0])
# Calculate Absolute error
e_1 = abs_error(X, y)
print(e_1)
# Calculate Mean error
e_2 = mean_error(X, y)
print(e_2)
|
SThornewillvE/Udacity-DataScience-Nanodegree
|
01_supervised-models_learning-materials/linreg/01_calculate_cost.py
|
01_calculate_cost.py
|
py
| 1,014 |
python
|
en
|
code
| 29 |
github-code
|
6
|
22113638832
|
import numpy as np
from grabscreen import grab_screen
import cv2
import time
from directkeys import PressKey, ReleaseKey, W, A, S, D
from grabkeys import key_check
import os
from keras.models import load_model
from scanner import process_img
#loading model
model = load_model('model.h5')
#W key press-time bounds
PRESS = 0.23
RELEASE = 0.30
def auto_pilot(direction):
if direction == 1: #Left
ReleaseKey(D)
PressKey(A)
elif direction == 2: #Right
ReleaseKey(A)
PressKey(D)
else: #Straight
ReleaseKey(A)
ReleaseKey(D)
def drive(times):
elapsed_time = times[0] #Period of time from last W-key full release
press_start = times[1] #Last time W-key was pressed
loop = times[2] #Period of while loop
press_time = time.time() - press_start #Period of time W-key was pressed
if elapsed_time < PRESS:
if not press_start:
press_start = time.time()
PressKey(W)
return [elapsed_time,press_start]
elif elapsed_time < RELEASE:
ReleaseKey(W)
if press_start and (press_time > 0.25 or press_time) < 0.15:
print('Warning: Press_time ' + str(press_time) + ' is out of bounds. Consider tuning PRESS/RELEASE parameters if the error occurs frequently.')
return [elapsed_time,0.0]
else:
ReleaseKey(W)
if press_start and (press_time > 0.25 or press_time) < 0.15:
print('Warning: Press_time ' + str(press_time) + ' is out of bounds. Consider tuning PRESS/RELEASE parameters if the error occurs frequently.')
return [0.0,0.0]
def main():
for i in list(range(3))[::-1]:
print(str(i+1))
time.sleep(1)
last_time = time.time()
elapser = 0.0
start = 0.0
pause = False
while True:
keys = key_check()
if 'Q' in keys:
break
if 'P' in keys:
if pause:
pause = False
time.sleep(1)
print('UNPAUSED')
else:
pause = True
time.sleep(1)
print('PAUSED')
if not pause:
loop = time.time() - last_time
elapser = elapser + loop
elapser, start = drive([elapser,start,loop])
last_time = time.time()
screen = grab_screen(region=(0,40,640,480))
proc_img = process_img(screen)
sample = proc_img.reshape(-1,100,100,1)
sample = sample.astype('float32')
sample /= 255
pred = model.predict(sample)
auto_pilot(np.argmax(pred))
main()
|
pashok3d/GTA_AutoPilot
|
predictor.py
|
predictor.py
|
py
| 2,723 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15805513471
|
from tkinter import N
import pygame
from pygame.locals import *
from Car import Car
import sys
import neat
import time
def on_init():
pygame.init()
on_init()
screen_width = 1920
screen_height = 1080
_running = True
screen = pygame.display.set_mode((screen_width, screen_height), pygame.HWSURFACE | pygame.DOUBLEBUF)
screen.fill("WHITE")
# Number of ms in 1 time unit
# Needed for acceleration
time_unit = 15
pygame.key.set_repeat(time_unit)
FPS = 60
fpsClock = pygame.time.Clock()
# Load game assests
# map as background image
background_image = pygame.image.load("images/map.png").convert_alpha()
# Car image used from : https://github.com/NeuralNine/ai-car-simulation/blob/master/car.png
car_image = pygame.image.load("images/car.png").convert_alpha()
car_image = pygame.transform.scale(car_image, (100, 50))
car_1 = Car(car_image, 881, 800, 0)
#car_1 = Car(car_image, 500, 500, 0)
def on_event(event):
if event.type == QUIT:
on_cleanup()
# if pygame.key.get_pressed()[K_UP]:
# car_1.move_forward()
# if pygame.key.get_pressed()[K_DOWN]:
# car_1.move_backward()
# if pygame.key.get_pressed()[K_LEFT]:
# car_1.move_left()
# if pygame.key.get_pressed()[K_RIGHT]:
# car_1.move_right()
def on_loop():
pass
def on_render():
screen.blit(background_image, (0, 0))
car_1.get_data()
car_1.draw(screen)
pygame.display.flip()
def on_cleanup():
pygame.display.quit()
pygame.quit()
sys.exit()
def run_simulation(genomes, config):
# Empty Collections For Nets and Cars
nets = []
cars = []
# For All Genomes Passed Create A New Neural Network
for i, g in genomes:
net = neat.nn.FeedForwardNetwork.create(g, config)
nets.append(net)
g.fitness = 0
cars.append(Car(car_image, 881, 800, 0))
# timeout = time.time() + 60*5 # 5 minutes from now
#timeout = time.time() + 15 # 10 seconds from now
timeout = time.time() + 15 # 15 seconds after current time
while(_running):
# End the game when the X is pressed
for event in pygame.event.get():
on_event(event)
# For Each Car see if its alive
# Get the action it should take
# Draw the car
screen.blit(background_image, (0, 0))
cars_alive = 0
for i, car in enumerate(cars):
if car.is_alive:
cars_alive += 1
genomes[i][1].fitness = car.get_fitness()
output = nets[i].activate(car.get_data())
#### This needs to be tested
choice = output.index(max(output))
if choice == 0:
car.move_forward()
elif choice == 1:
car.move_backward()
elif choice == 2:
car.move_left()
else:
car.move_right()
car.draw(screen)
pygame.display.flip()
if cars_alive==0:
break
if time.time()>timeout:
break
# if time.time()>timeout:
# break
#on_loop()
#on_render()
fpsClock.tick(FPS)
# Load Config
config_path = "config.txt"
config = neat.config.Config(neat.DefaultGenome,
neat.DefaultReproduction,
neat.DefaultSpeciesSet,
neat.DefaultStagnation,
config_path)
# Create Population And Add Reporters
population = neat.Population(config)
population.add_reporter(neat.StdOutReporter(True))
stats = neat.StatisticsReporter()
population.add_reporter(stats)
# Run Simulation For A Maximum of 1000 Generations
population.run(run_simulation, 10000)
on_cleanup()
# Use this to save genomes
# https://github.com/CodeReclaimers/neat-python/blob/master/neat/checkpoint.py
# Use this to visualize the network
# https://ai.stackexchange.com/questions/13948/library-for-rendering-neural-network-neat
|
styyxofficial/NEAT-AI-Racecar
|
Moving_Car.py
|
Moving_Car.py
|
py
| 4,216 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11615238643
|
import random as r
i=1;c=0;p=0;d=0
while(i<=10):
rand=r.randint(1,10)
if rand<3:
comp='s'
elif rand>=3 and rand<6:
comp='w'
elif rand>=6 and rand<=10:
comp='g'
inp=input('Enter "S" for snake "W" for water and "G" for gun\n')
if comp=='s' and inp=='w':
print('You Lost! Snake drank all the Water\nYour score is ',i)
c=c+1
elif comp=='s' and inp=='w':
print('You Lost! Snake drank all the Water\nYour score is ',i)
c=c+1
elif comp=='w' and inp=='g':
print('You lost! Your Gun fell down into the water\nYour score is ',i)
c=c+1
elif comp=='s' and inp=='g':
print('You won! You killed the Snake with the gun')
p=p+1
elif comp=='w' and inp=='s':
print('You won! Snake drank all the Water')
p=p+1
elif comp=='s' and inp=='g':
print('You won! You killed the Snake with the gun')
p=p+1
elif comp==inp:
print('Try again! You both Choosed the Same')
d=d+1
i=i+1
print(f'You won {p} Times Lost {c} Times and Draw {d} Times')
|
Coder-X27/Python-CWH
|
playlist exercise/exercise-6.py
|
exercise-6.py
|
py
| 1,109 |
python
|
en
|
code
| 1 |
github-code
|
6
|
12700893052
|
n = int(input())
numbers = sorted(map(int, input().split()))
def isGood(numbersWithoutTheNum, theNum):
lastIdx = len(numbersWithoutTheNum) - 1
p1, p2 = 0, lastIdx
while p1 < p2:
result = numbersWithoutTheNum[p1] + numbersWithoutTheNum[p2]
if result == theNum:
return True
if result < theNum:
p1 += 1
else:
p2 -= 1
return False
count = 0
for i in range(len(numbers)):
if isGood(numbers[:i] + numbers[i+1:], numbers[i]): count += 1
print(count)
|
MinChoi0129/Algorithm_Problems
|
BOJ_Problems/1253.py
|
1253.py
|
py
| 555 |
python
|
en
|
code
| 2 |
github-code
|
6
|
39525459958
|
import pyttsx3
from gtts import gTTS
import os
#MALE
engine = pyttsx3.init()
engine.say("Hello there")
engine.runAndWait()
#FEMALE
mytext = 'You are welcome to Roles Academy Madam.'
language = 'en'
myobj = gTTS(text=mytext, lang=language, slow=False)
myobj.save("welcome.mp3")
os.system("mpg321 welcome.mp3")
|
adesolasamuel/EqualityMachine
|
texttospeech.py
|
texttospeech.py
|
py
| 312 |
python
|
en
|
code
| 1 |
github-code
|
6
|
17040080901
|
import requests
import time
from bs4 import BeautifulSoup
def ProfessorLunkedInScrapper(
ProfessorName,CollegeName
):
#ProfessorName = "Roel Verstappen"
#CollegeName = "University of Groningen"
query = 'https://google.com/search?q=site:linkedin.com/in AND "'+ProfessorName+'" AND "'+CollegeName+'"'
response = requests.get(query)
soup = BeautifulSoup(response.text,'html.parser')
print(soup)
for anchor in soup.find_all('a'):
url = anchor["href"]
if 'https://www.linkedin.com/' in url:
url = url[7:url.find('&')]
#print(url)
time.sleep(1)
return url
print(ProfessorLunkedInScrapper("Steven Pinker","Harvard"))
|
brucema94/Expertfinder
|
LinkedinUrl_From_Name.py
|
LinkedinUrl_From_Name.py
|
py
| 664 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10424840901
|
#-*- coding: utf-8 -*-
"""
@author: Martí Congost
@contact: [email protected]
@organization: Whads/Accent SL
@since: September 2009
"""
from cocktail.events import when
from cocktail.translations import (
translations,
set_language
)
from cocktail import schema
from cocktail.persistence import datastore
from woost.models import Extension, Configuration, Controller
translations.define("ShopExtension",
ca = u"Botiga",
es = u"Tienda",
en = u"Shop"
)
translations.define("ShopExtension-plural",
ca = u"Botigues",
es = u"Tiendas",
en = u"Shops"
)
class ShopExtension(Extension):
def __init__(self, **values):
Extension.__init__(self, **values)
self.extension_author = u"Whads/Accent SL"
self.set("description",
u"""Proporciona els elements necessaris per implementar una botiga
electrònica.""",
"ca"
)
self.set("description",
u"""Proporciona los elementos necesarios para implementar una
tienda electrónica.""",
"es"
)
self.set("description",
u"""Supplies the building blocks required to implement an online
shop.""",
"en"
)
def _load(self):
from woost.extensions import shop
from woost.extensions.shop import (
strings,
product,
productcategory,
shoporder,
shoporderentry,
pricing,
basket,
userfilter
)
for module, keys in (
(product, ("Product",)),
(productcategory, ("ProductCategory",)),
(shoporder, ("ShopOrder",)),
(shoporderentry, ("ShopOrderEntry",)),
(pricing, (
"PricingPolicy",
"Discount",
"PriceOverride",
"RelativeDiscount",
"PercentageDiscount",
"FreeUnitsDiscount",
"ShippingCost",
"ShippingCostOverride",
"CumulativeShippingCost",
"Tax",
"CumulativeTax",
"PercentageTax"
)),
(basket, ("Basket",))
):
for key in keys:
setattr(shop, key, getattr(module, key))
ShopExtension.add_member(
schema.Collection("discounts",
items = schema.Reference(type = pricing.Discount),
related_end = schema.Reference()
)
)
ShopExtension.add_member(
schema.Collection("shipping_costs",
items = schema.Reference(type = pricing.ShippingCost),
related_end = schema.Reference()
)
)
ShopExtension.add_member(
schema.Collection("taxes",
items = schema.Reference(type = pricing.Tax),
related_end = schema.Reference()
)
)
from tpv import (
Currency,
Payment,
PaymentItem,
PaymentNotFoundError
)
from woost.extensions.payments import PaymentsExtension
from woost.extensions.payments.paymentgateway import PaymentGateway
from woost.extensions.payments.transactionnotifiedtrigger \
import launch_transaction_notification_triggers
payments_ext = PaymentsExtension.instance
def get_payment(self, payment_id):
order = shoporder.ShopOrder.get_instance(int(payment_id))
if order is None:
raise PaymentNotFoundError(payment_id)
payment = Payment()
payment.id = order.id
payment.amount = order.cost
payment.shop_order = order
payment.currency = Currency(payments_ext.payment_gateway.currency)
for entry in order.entries:
payment.add(PaymentItem(
reference = str(entry.product.id),
description = translations(entry.product),
units = entry.quantity,
price = entry.cost
))
return payment
PaymentGateway.get_payment = get_payment
def receive_order_payment(event):
payment = event.payment
shop_order = payment.shop_order
set_language(shop_order.language)
shop_order.status = payment.status
shop_order.gateway_parameters = payment.gateway_parameters
def commit_order_payment(event):
datastore.commit()
events = PaymentGateway.transaction_notified
pos = events.index(launch_transaction_notification_triggers)
events.insert(pos, receive_order_payment)
events.insert(pos + 2, commit_order_payment)
self.install()
def _install(self):
# Create the product controller
controller = Controller()
controller.qname = "woost.product_controller"
for language in Configuration.instance.languages:
value = translations(
"woost.extensions.shop Product controller title",
language
)
if value:
controller.set("title", value, language)
controller.python_name = \
"woost.extensions.shop.productcontroller.ProductController"
controller.insert()
|
marticongost/woost
|
woost/extensions/shop/__init__.py
|
__init__.py
|
py
| 5,539 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35423067505
|
def isValid(puzzle: list, index: tuple, guess: int) -> bool:
# returning a True if the guess is in a correct place and False otherwise
# we compare 'guess' with it's row, column and 3x3 square
# first: check if the guess is already exist in its row
row = index[0]
col = index[1]
for value in puzzle[row]:
if guess == value:
return False
# second: check if the guess is already exist in its column
# we neet to get the values of that column first to compare with
thisColumnValues = [puzzle[i][col] for i in range(9)]
for value in thisColumnValues:
if guess == value:
return False
# third: check if the guess exists in the 3x3 square
# 1- find what square we try search in by using the floor division operator
# which will give us the standard division results
# but floored to the nearest positive real number
# example: 9/6 = 1.5 -> floor(1.5) = 1
r = (row // 3) * 3
c = (col // 3) * 3
# 2- we muliply the result by 3 to get the index of top left cell in that square
# and we search in that square if the guess is already exists
for i in range(r, r+3):
for j in range(c, c+3):
if puzzle[i][j] == guess:
return False
# if the guess doesn't exist in the row, column or the 3x3 square, then return True
return True
def solve(puzzle: list) -> bool:
# Solve function that uses backtracking to find a solution to the sudoku puzzle
# we need to save every index of every empty cell in a list
# we save every index (x, y) in the list as a tuple
# so 'indexesOfEmptyCells' is a list of tuples
indexesOfEmptyCells = []
for i in range(9):
for j in range(9):
if puzzle[i][j] == 0:
indexesOfEmptyCells.append((i, j))
# we will start from the first empty cell which will be in the index 0 in our list
currentIndex = 0
# main algorithm
while True:
# we first get the the row and the column of the empty cell
r = indexesOfEmptyCells[currentIndex][0]
c = indexesOfEmptyCells[currentIndex][1]
# let 'value' be equal to 0
value = puzzle[r][c]
while True:
# if 'value' is equal to 0, set it to 1
if value == 0:
value = 1
# if 'value' is less that 9, increase it by 1
elif value < 9:
value += 1
# if 'value' is bigger than 9 then backtrack
# reset 'value' and the cell to 0 to decrease 'currentIndex' by 1
# to make it points to the empty cell before this one
# and break from the loop to go the previous cell
else:
value = 0
puzzle[r][c] = value
currentIndex -= 1
break
# if 'value' which is the guessed number is valid in that cell
# set the cell to the guessed number and increase the 'currentIndex' by 1
# to make it points to the next empty cell
# and break from the loop to go the next cell
if isValid(puzzle, indexesOfEmptyCells[currentIndex], value):
puzzle[r][c] = value
currentIndex += 1
break
# notice that we check the the row, column and the 3x3 square
# before we set the cell with our guessed number
# if we are in the last empty cell and we guessed it correclty (We solved it)
# then 'currentIndex' will be bigger than the length of the list
# we return True in this case
if currentIndex >= len(indexesOfEmptyCells):
return True
# if 'currentIndex' is equal to -1 then the puzzle doesn't have a solution
# we return False in this case
elif currentIndex <= -1:
return False
# the 'currentIndex' variable will be less than 0 if we backtracked
# all the empty cell and none of guessed number is correct.
# when we try every possible number from 1 to 9 and none of them
# is valid for the first empty cell then 'currentIndex' will be equal to -1
def main():
# main function to start executing
puzzle = []
# the puzzle will be list of lists
# each list inside the puzzle will have 9 integers
# 0s are empty cells and we have to fill them with 1 -> 9
# Comment the next few lines if you already defined the puzzle------#
puzzleFile = open("puzzle.txt", "r")
lines = puzzleFile.readlines()
for line in lines:
integers = line[:-1].split(', ')
readList = []
for i in integers:
readList.append(int(i))
puzzle.append(readList)
puzzleFile.close()
#-------------------------------------------------------------------#
if solve(puzzle):
# save the solution in text file
answerFile = open("answer.txt", "w")
for row in puzzle:
for i in row:
answerFile.write(str(i) + ", ")
answerFile.write("\n")
answerFile.close()
# print the puzzle after we solve it
for i in range(9):
for value in puzzle[i]:
print(value, end=", ")
print(end='\n')
else:
print("Unsolvable puzzle")
main()
|
MartinaNaeem/SudokuSolver
|
Solve.py
|
Solve.py
|
py
| 5,616 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8317691335
|
# A script for checking triangular arbitrage opportunities (Forward + Reverse)
# Using a general formula. (Pairs NEED to match the formula)
# ETH/USDT, BTC/USDT, BTC/ETH
# a/b, c/b, c/a
import ccxt
# Insert exchange
testexchange = ccxt.kucoin({
'enableRateLimit': True,
})
# Choose whatever 3 pairs match the general formula.
# If changing pairs, Change the fetch_order_book input parameter and the print statement
def get_a_b_bid_ask():
order_book = testexchange.fetch_order_book("BTC/USDT")
a_b_bid = order_book['bids'][0][0] if len (order_book['bids']) > 0 else None
a_b_ask = order_book['asks'][0][0] if len (order_book['asks']) > 0 else None
print(f'(Kucoin test ETH/USDT) The best bid is {a_b_bid}, the best ask is {a_b_ask}')
return a_b_bid, a_b_ask
def get_c_b_ask_bid():
order_book = testexchange.fetch_order_book("ETH/USDT")
c_b_ask = order_book['asks'][0][0] if len (order_book['asks']) > 0 else None
c_b_bid = order_book['bids'][0][0] if len (order_book['bids']) > 0 else None
print(f'(Kucoin test BTC/USDT) The best ask is {c_b_ask}, the best bid is {c_b_bid}')
return c_b_ask, c_b_bid
def get_c_a_bid_ask():
order_book = testexchange.fetch_order_book("ETH/BTC")
c_a_bid = (order_book['bids'][0][0]) if len (order_book['bids']) > 0 else None
c_a_ask = (order_book['asks'][0][0]) if len (order_book['asks']) > 0 else None
print(f'(Kucoin test BTC/ETH) The best bid is {c_a_bid}, the best ask is {c_a_ask}')
return c_a_bid, c_a_ask
# General formula for the forward arb rate:
# a: the coin to be targeted for arbitrage
def calculate_forward_arb_rate(a_b_bid, c_b_ask, c_a_bid):
forward_rate = a_b_bid * (1/c_b_ask) * c_a_bid
print(f"The forward arbitrage percent is {(forward_rate-1) *100}%")
# General formula for the reverse arb rate:
# a: the coin to be targeted for arbitrage
def calculate_reverse_arb_rate(c_a_ask, c_b_bid, a_b_ask):
reverse_rate = (1/c_a_ask)*(c_b_bid)*(1/a_b_ask)
print(f"The reverse arbitrage percent is {(reverse_rate-1) *100}%")
#ETH/USDT
a_b_bid, a_b_ask = get_a_b_bid_ask()
#BTC/USDT
c_b_ask, c_b_bid = get_c_b_ask_bid()
#BTC/ETH
c_a_bid, c_a_ask = get_c_a_bid_ask()
calculate_forward_arb_rate(a_b_bid, c_b_ask, c_a_bid)
calculate_reverse_arb_rate(c_a_ask, c_b_bid, a_b_ask)
#print(ccxt.exchanges)
|
AgenP/AgenP-triangular-arb-cex-scanner-v1
|
arb_ku_test.py
|
arb_ku_test.py
|
py
| 2,318 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7795403561
|
from data_loader import SimpleDataset
import string
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence
from torch.nn.utils.rnn import pad_packed_sequence
import numpy as np
# from run_experiment import save_top_ranks, graph_top_ranks, save_model
# Outputs image features from words
class GRU_REG(nn.Module):
def __init__(self, vocab_size, loss_fn=None, hidden_layer_dim=256, embedding_space=150, use_cuda=False, n_layers=1):
super().__init__()
self.hidden_layer_dim = hidden_layer_dim
self.n_layers = n_layers
self.embedding_space = embedding_space
self.embeddings = nn.Embedding(vocab_size, embedding_space)
self.gru = nn.GRU(embedding_space, hidden_layer_dim, n_layers, batch_first=True)
self.output_layer = nn.Linear(hidden_layer_dim, 2048)
self.use_cuda = use_cuda
self.float_type = torch.FloatTensor
self.long_type = torch.LongTensor
if use_cuda:
print("Using cuda")
self.float_type = torch.cuda.FloatTensor
self.long_type = torch.cuda.LongTensor
self.cuda()
if loss_fn is None:
# self.loss_fn = torch.nn.SmoothL1Loss(size_average=True)
self.loss_fn = torch.nn.MSELoss(size_average=True)
else:
self.loss_fn = loss_fn
def forward(self, sentences, sentences_mask):
batch_size = sentences.data.shape[0]
sequence_size = sentences.data.shape[1]
embeds = self.embeddings(sentences)
packed_embedding = pack_padded_sequence(embeds.view(batch_size, -1, self.embedding_space), sentences_mask, batch_first=True)
outputs, h_gru = self.gru(packed_embedding)
## unpacking: notice that: last_out == h_gru[0,:,:]
# outputs_pad, output_lengths = pad_packed_sequence(outputs, batch_first=True)
# output_lengths = Variable(torch.LongTensor(output_lengths))
# last_out = torch.gather(outputs_pad, 1, output_lengths.view(-1, 1, 1).expand(batch_size, 1, self.hidden_layer_dim)-1).view(batch_size, self.hidden_layer_dim)
predicted_image_features = self.output_layer(F.selu(h_gru[0,:,:]))
return predicted_image_features
def format_sample_into_tensors(self, sample_batch, sample_batch_length, w2i):
# Forward and backward pass per image, text is fixed
b_index = 0
#Padding
sentence_max_length = 0
sentences_mask = []
for sample in sample_batch:
temp_sentence_length = len(sample["processed_word_inputs"])
sentences_mask.append(temp_sentence_length)
if temp_sentence_length > sentence_max_length:
sentence_max_length = temp_sentence_length
word_inputs = np.zeros((sample_batch_length, sentence_max_length)) #Padding zeros
outputs = np.zeros((sample_batch_length, 2048))
for sample in sample_batch:
for index, x in enumerate(sample["processed_word_inputs"]):
word_inputs[b_index][index] = w2i[x]
outputs[b_index] = sample["target_img_features"] #torch.from_numpy().type(self.float_type)
b_index +=1
#Sort
sorted_index = len_value_argsort(sentences_mask)
word_inputs = [word_inputs[i] for i in sorted_index]
word_inputs = torch.from_numpy(np.array(word_inputs, dtype=np.int64))
inputs = Variable(word_inputs.type(self.long_type))
outputs = [outputs[i] for i in sorted_index]
outputs = torch.from_numpy(np.array(outputs))
outputs = Variable(outputs.type(self.float_type))
sentences_mask = [sentences_mask[i] for i in sorted_index]
return inputs, sentences_mask, outputs, sorted_index
def top_rank_accuracy(self, predictions, dataset, sorted_index, top_param=3, val=False, print_failed=False):
# if self.use_cuda:
# predictions = predictions.cpu()
total_size = len(predictions)
correct = 0
correct_cos = 0
dataset = [dataset[i] for i in sorted_index]
for index, prediction in enumerate(predictions):
sample = dataset[index]
actual_slice = np.zeros(10)
prediction_slice = np.zeros(10) #loss from each image
similarity_slice = np.zeros(10)
b_index = 0
for image_id in sample['img_list']:
image_features = sample['img_features'][image_id]
image_features_tensor = Variable(
torch.from_numpy(
image_features).type(self.float_type))
image_loss_from_prediction = self.loss_fn(prediction, image_features_tensor)
image_similarity_from_prediction = F.cosine_similarity(prediction, image_features_tensor, dim=0)
prediction_slice[b_index] = 1.0 - image_loss_from_prediction.data[0]
similarity_slice[b_index] = image_similarity_from_prediction.data[0]
if image_id == sample['target_img_id']:
actual_slice[b_index] = 1.0
b_index += 1
#do argmax on n (top_param) indexes
prediction_indexes = prediction_slice.flatten().argsort()[-top_param:][::-1]
similarity_indexes = similarity_slice.flatten().argsort()[-top_param:][::-1]
if actual_slice[prediction_indexes].any():
correct += 1
if actual_slice[similarity_indexes].any():
correct_cos += 1
else:
if print_failed:
print("INCORRECT")
print(sample)
if val == True:
print(f"{correct} correct out of {total_size} using loss")
print(f"{correct_cos} correct out of {total_size} using cosine similarity")
return float(correct_cos) / total_size
def train_gru_reg_network(dataset,
validation_dataset,
loss_fn=None,
embedding_space=150,
num_epochs=15,
batch_size=32,
save_model=False,
learning_rate = 0.0001,
hidden_layer_dim=256,
use_cuda=False):
if loss_fn is None:
# loss_fn = torch.nn.SmoothL1Loss(size_average=True)
loss_fn = torch.nn.MSELoss(size_average=True)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
collate_fn=lambda x: x,
# shuffle=False)
shuffle=True)
# Actually make the model
model = GRU_REG(dataset.vocab_size, loss_fn=loss_fn,
embedding_space=embedding_space,
hidden_layer_dim=hidden_layer_dim, use_cuda=use_cuda)
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
train_loss = 0.0
top_rank_1_arr = np.zeros(num_epochs)
top_rank_3_arr = np.zeros(num_epochs)
top_rank_5_arr = np.zeros(num_epochs)
for ITER in range(num_epochs):
print(f"Training Loss for {ITER} : {train_loss}")
train_loss = 0.0
count = 0
t_rank_1 = 0
for sample_batch in dataloader:
# Forward and backward pass per image, text is fixed
inputs, sentences_mask, outputs, sorted_index = model.format_sample_into_tensors(sample_batch, batch_size, dataset.w2i)
count += batch_size
prediction = model(inputs, sentences_mask)
loss = model.loss_fn(prediction, outputs)
if use_cuda:
loss = loss.cuda()
train_loss += loss.data[0]
print(f"Loss : {loss.data[0]} \t Count: {count}", end="\r")
# backward pass
model.zero_grad()
# loss.backward(retain_graph=True)
loss.backward()
# update weights
optimizer.step()
print("\n")
validation_loss, top_rank_1, top_rank_3, top_rank_5 = validate_gru_reg_model(
dataset.vocab_size,
dataset.w2i,
validation_dataset,
model=model)
top_rank_1_arr[ITER] = top_rank_1
top_rank_3_arr[ITER] = top_rank_3
top_rank_5_arr[ITER] = top_rank_5
print(f"Top 1: {top_rank_1}")
print(f"Top 3: {top_rank_3}")
print(f"Top 5: {top_rank_5}")
if save_model:
torch.save(model.state_dict(), "data/gru_reg.pt")
return model, top_rank_1_arr, top_rank_3_arr, top_rank_5_arr
def validate_gru_reg_model(vocab_size, w2i, validation_dataset, model_filename="gru_reg.pt",
model=None, embedding_space = 150, print_failed=False):
print("Evaluating model on validation set")
if model is None:
print("Loading Saved Model: " + model_filename)
model = GRU_REG(vocab_size, 2048, hidden_layer_dim=256)
if not use_cuda:
#loading a model compiled with gpu on a machine that does not have a gpu
model.load_state_dict(torch.load("data/"+model_filename, map_location=lambda storage, loc: storage))
else:
model.load_state_dict(torch.load("data/"+model_filename))
model = model.cuda()
val_dl = torch.utils.data.DataLoader(validation_dataset, batch_size=64, collate_fn=lambda x: x)
predictions = None
outputs = None
sorted_index = []
word_inputs, sentences_mask, outputs, sorted_index = model.format_sample_into_tensors(validation_dataset, len(validation_dataset), w2i)
for i in range(0, len(validation_dataset), 64):
words = word_inputs[i:i+64]
mask = sentences_mask[i:i+64]
pred = model(words, mask)
if predictions is None:
predictions = pred
else:
predictions = torch.cat((predictions, pred), dim=0)
loss = model.loss_fn(predictions, outputs)
print(f"Validation Loss : {loss.data[0]}")
top_rank_1 = model.top_rank_accuracy(predictions, validation_dataset, sorted_index, top_param=1, val=True)
top_rank_3 = model.top_rank_accuracy(predictions, validation_dataset, sorted_index, top_param=3, val=True)
top_rank_5 = model.top_rank_accuracy(predictions, validation_dataset, sorted_index, top_param=5, val=True, print_failed=print_failed)
return loss.data[0], top_rank_1, top_rank_3, top_rank_5
def len_value_argsort(seq):
return sorted(range(len(seq)), key=lambda x: seq[x], reverse=True)
if __name__ == "__main__":
use_cuda = torch.cuda.is_available()
dataset = SimpleDataset(
training_file="IR_train_easy.json",
preprocessing=True,
preprocessed_data_filename="easy_training_processed_with_questions"
)
validation_dataset = SimpleDataset(
training_file="IR_val_easy.json",
preprocessing=True,
preprocessed_data_filename="easy_val_processed_with_questions"
)
model, top_rank_1_arr, \
top_rank_3_arr, top_rank_5_arr = train_gru_reg_network(
dataset,
validation_dataset,
num_epochs=50,
batch_size=256,
embedding_space=300,
hidden_layer_dim=256,
learning_rate=0.001,
use_cuda=use_cuda)
save_model("GRU_REG_EASY",
hidden_layer_dim=256,
embedding_space=300,
learning_rate=0.001,
loss_fn_name="mse",
model=model)
save_top_ranks(top_rank_1_arr, top_rank_3_arr, top_rank_5_arr, "./results_gru_reg_easy_with_questions.p")
# graph_top_ranks(top_rank_1_arr, top_rank_3_arr, top_rank_5_arr)
|
geenen124/nlp_project
|
gru_regression.py
|
gru_regression.py
|
py
| 12,316 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40677441703
|
"""empty message
Revision ID: 37bd12af762a
Revises: fa12c537244a
Create Date: 2022-09-06 21:29:41.287889
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '37bd12af762a'
down_revision = 'fa12c537244a'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('cbsds', 'antenna_beamwidth_deg')
op.drop_column('cbsds', 'cpi_digital_signature')
op.drop_column('cbsds', 'horizontal_accuracy_m')
op.drop_column('cbsds', 'antenna_model')
op.drop_column('cbsds', 'eirp_capability_dbm_mhz')
op.drop_column('cbsds', 'antenna_azimuth_deg')
op.drop_column('cbsds', 'antenna_downtilt_deg')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('cbsds', sa.Column('antenna_downtilt_deg', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column('cbsds', sa.Column('antenna_azimuth_deg', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column('cbsds', sa.Column('eirp_capability_dbm_mhz', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column('cbsds', sa.Column('antenna_model', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column('cbsds', sa.Column('horizontal_accuracy_m', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True))
op.add_column('cbsds', sa.Column('cpi_digital_signature', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('cbsds', sa.Column('antenna_beamwidth_deg', sa.INTEGER(), autoincrement=False, nullable=True))
# ### end Alembic commands ###
|
magma/magma
|
dp/cloud/python/magma/db_service/migrations/versions/020_remove_cpi_related_fields.py
|
020_remove_cpi_related_fields.py
|
py
| 1,747 |
python
|
en
|
code
| 1,605 |
github-code
|
6
|
11245689019
|
import torch
from torch.utils.data import DataLoader
from transformers import AdamW
from core.qa.utils import (
read_squad,
add_end_idx,
add_token_positions,
tokenizer,
model,
)
train_contexts, train_questions, train_answers = read_squad(
"squad-style-answers.json"
)
train_encodings = tokenizer(
train_contexts, train_questions, truncation=True, padding=True
)
add_end_idx(train_answers, train_contexts)
add_token_positions(train_encodings, train_answers)
class SquadDataset(torch.utils.data.Dataset):
def __init__(self, encodings):
self.encodings = encodings
def __getitem__(self, idx):
return {
key: torch.tensor(val[idx]) for key, val in self.encodings.items()
}
def __len__(self):
return len(self.encodings.input_ids)
train_dataset = SquadDataset(train_encodings)
device = (
torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
)
model.to(device)
model.train()
train_loader = DataLoader(train_dataset, batch_size=16, shuffle=True)
optim = AdamW(model.parameters(), lr=5e-5)
for epoch in range(3):
for batch in train_loader:
optim.zero_grad()
input_ids = batch["input_ids"].to(device)
attention_mask = batch["attention_mask"].to(device)
start_positions = batch["start_positions"].to(device)
end_positions = batch["end_positions"].to(device)
outputs = model(
input_ids,
attention_mask=attention_mask,
start_positions=start_positions,
end_positions=end_positions,
)
loss = outputs[0]
loss.backward()
optim.step()
model.eval()
torch.save(model.state_dict(), "core/qa/saved_models/model_weights.pth")
|
expectopatronm/FAQ-Generation-and-SQuaD-Style-QA-Answering-System
|
core/qa/trainer.py
|
trainer.py
|
py
| 1,760 |
python
|
en
|
code
| 1 |
github-code
|
6
|
1528634365
|
from random import randint
import cProfile
ARRAY_LENGTH = 10000
def insertion_sort(array):
for i in range(1, len(array)):
key_item = array[i]
j = i - 1
while j>=0 and array[j] > key_item:
array[j+1] = array[j]
j -= 1
array[j+1] = key_item
return array
if __name__=="__main__":
array = [randint(0, ARRAY_LENGTH) for i in range(ARRAY_LENGTH)]
cProfile.run('insertion_sort(array)')
|
Harsh188/SSP-KVS
|
week1/a1_hc/insertionSort.py
|
insertionSort.py
|
py
| 456 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30165255260
|
# Name: Milou Bisseling
# Studentnumber: 10427538
'''
This program converts CSV to JSON
'''
import csv
import json
import sys
inputfile = 'totalbirths.csv'
outputfile = 'totalbirths.json'
fieldnames = ("Perioden", "Enkelvoudige geboorten", "Tweelinggeboorten", "Drie- of meervoudige geboorten")
# Open and read CSV file
csvfile = open(inputfile, 'r')
reader = csv.DictReader(csvfile, fieldnames)
# Open and write JSON file
jsonfile = open(outputfile, 'w')
data = json.dumps([row for row in reader])
jsonfile.write(data)
csvfile.close()
jsonfile.close()
|
miloubis/DataProcessing
|
Homework/week-6/convertCSV2JSON.py
|
convertCSV2JSON.py
|
py
| 559 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11594315825
|
import TypeImg
from TypeImg import *
class WrapImg(TypeImage):
def __init__(self,img,imgThreshold=None,contours=None,biggest=None,max_area=None):
super().__init__(img,imgThreshold,contours,biggest,max_area)
self.imgWarpColored=self.imgWarpGray =None
def Repair_Biggest_Contour(self):
myPoints = self.biggest.reshape((4, 2))
myPointsNew = np.zeros((4, 1, 2), dtype=np.int32)
add = myPoints.sum(1) #Tính toán cộng
diff = np.diff(myPoints, axis=1) #Tính hiệu rời rạc
temp=[np.argmin(add),np.argmin(diff),np.argmax(diff),np.argmax(add)]
for i in range(len(temp)):
myPointsNew[i] = myPoints[temp[i]]
self.biggest=myPointsNew
def Create_WarpColor(self):
self.Repair_Biggest_Contour()
pts1 = np.float32(self.biggest) # PREPARE POINTS FOR WARP
pts2 = np.float32([[0, 0],[widthImg, 0], [0, heightImg],[widthImg, heightImg]]) # PREPARE POINTS FOR WARP
matrix = cv2.getPerspectiveTransform(pts1, pts2) #trả về phép biến đổi phối cảnh 3x3 cho 4 cặp điểm tương ứng. Từ 4 điểm ảnh gốc tạo 4 điểm cho ảnh đích
imgWarpColored = cv2.warpPerspective(self.img, matrix, (widthImg, heightImg)) #Áp dụng một phép chuyển đổi phối cảnh cho một hình ảnh
#Remove 20 pixels
imgWarpColored=imgWarpColored[20:imgWarpColored.shape[0] - 20, 20:imgWarpColored.shape[1] - 20]
imgWarpColored = cv2.resize(imgWarpColored,(widthImg,heightImg))
self.imgWarpColored= imgWarpColored
return self.imgWarpColored
def Create_WarpGray(self):
imgWarpGray = cv2.cvtColor(self.imgWarpColored,cv2.COLOR_BGR2GRAY)
imgAdaptiveThre= cv2.adaptiveThreshold(imgWarpGray, 255, 1, 1, 7, 2)
imgAdaptiveThre = cv2.bitwise_not(imgAdaptiveThre)
imgAdaptiveThre=cv2.medianBlur(imgAdaptiveThre,3)
self.imgWarpGray=imgWarpGray
return self.imgWarpGray
|
tvanh239/Document-Scanner
|
ImgWarp.py
|
ImgWarp.py
|
py
| 2,041 |
python
|
vi
|
code
| 0 |
github-code
|
6
|
74370505788
|
from .api_doc import ApiDoc
from .media_type import MediaType
class Request(ApiDoc):
def __init__(
self, content: MediaType, description: str = "", required: bool = False
):
self.content = content
self.description = description
self.required = required
def to_doc(self) -> dict:
doc = {"content": self.content.to_doc(), "required": self.required}
if self.description:
doc["description"] = self.description
return doc
__all__ = ["Request"]
|
dkraczkowski/opyapi
|
opyapi/api/request.py
|
request.py
|
py
| 525 |
python
|
en
|
code
| 6 |
github-code
|
6
|
41859484302
|
# Open the input file for reading
with open("1601456737847_Plants_Release42_triticum_dicoccoides.txt", "r") as input_file:
# Open the output file for writing
with open("output.fa", "w") as output_file:
# Read the contents of the input file
content = input_file.readlines()
# Loop over the lines in the input file
for i, line in enumerate(content):
# If the line starts with ">", it is a sequence identifier
if line.startswith(">"):
# Write the sequence identifier to the output file
output_file.write(line)
# Otherwise, it is a genomic sequence
else:
# Write the genomic sequence to the output file in FASTA format
output_file.write(">sequence_" + str(i) + "\n")
output_file.write(line + "\n")
|
sejyoti/Questions-of-python-for-bioinformatics
|
Problem_1_solution/convert_to_fasta.py
|
convert_to_fasta.py
|
py
| 863 |
python
|
en
|
code
| 0 |
github-code
|
6
|
75051539388
|
from stratego.location import Location
from stratego.piece import Color, Piece, Rank
from stratego.printer import Printer
from testing_utils import build_test_board
# pylint: disable=protected-access
# noinspection PyProtectedMember
def test_printer_piece_movement():
r""" Verify piece movement as part of the \p Printer """
brd = build_test_board(5, 5)
p = Printer(brd, {Piece(Color.RED, Rank(1), Location(2, 1))},
{Piece(Color.BLUE, Rank(2), Location(3, 2))}, Printer.Visibility.RED)
assert p._is_loc_empty(Location(0, 0))
assert not p._is_loc_empty(Location(3, 2))
# p.move_piece(Location(3, 2), Location(2, 4))
# assert not p._is_loc_empty(Location(2, 4))
# assert p._is_loc_empty(Location(3, 2))
#
# p.delete_piece(Location(2, 4))
# assert p._is_loc_empty(Location(2, 4))
p.delete_piece(Location(3, 2))
assert p._is_loc_empty(Location(3, 2))
# noinspection PyProtectedMember
def test_printer_visibility():
r""" Verify the visibility settings of the \p Printer class """
brd = build_test_board(4, 4)
p = Printer(brd, set(), set(), Printer.Visibility.NONE)
assert not p._is_visible(Color.RED)
assert not p._is_visible(Color.BLUE)
p = Printer(brd, set(), set(), Printer.Visibility.RED)
assert p._is_visible(Color.RED)
assert not p._is_visible(Color.BLUE)
p = Printer(brd, set(), set(), Printer.Visibility.ALL)
assert p._is_visible(Color.RED)
assert p._is_visible(Color.BLUE)
|
ZaydH/stratego
|
src/tests/test_printer.py
|
test_printer.py
|
py
| 1,498 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73979398907
|
#!/usr/bin/env python3
import telebot
from telebot import types
import sqlite3
sqll = [0]
bot = telebot.TeleBot("TOKEN", parse_mode=None)
conn = sqlite3.connect('SQLdb.db', check_same_thread=False)
cursor = conn.cursor()
def updateUserBalance (id: int, balans: int):
cursor.execute('UPDATE users SET balans=? WHERE id=?', (balans, id))
conn.commit()
def createUser (id: int, user_name: str, user_login: str, balans: int):
cursor.execute('INSERT INTO users (id, user_name, user_login, balans) VALUES (?, ?, ?, ?)', (id, user_name, user_login, balans))
conn.commit()
def getUserBalans (id: int):
balans = cursor.execute('SELECT balans FROM users WHERE id = ?', (id,))
conn.commit()
return balans.fetchone()[0]
def getUserName (id: int):
userData = cursor.execute('SELECT user_name FROM users WHERE id = ?', (id,))
conn.commit()
return userData.fetchone()[0]
def selectAll_id ():
all_id = cursor.execute('SELECT id FROM users')
conn.commit()
return all_id.fetchall()
def idINFOMRER (ID): # вытаскиваем список ID-шников из картежа sqlite
allin = selectAll_id()
print(allin)
num = 0
# usId = call.from_user.id
print(ID, '- user')
new_base = []
for el in allin:
print(num)
print(ID, allin[num][0])
new_base.insert(num, allin[num][0]) #такой способ вытащить ID из кортежа
num = num+1
print(new_base)
print('==========================================')
return new_base
def select_all_base ():
all_base = cursor.execute('SELECT * FROM users')
conn.commit()
return all_base.fetchall()
@bot.message_handler(commands=['start', 'help'])
def send_welcome(message):
bot.reply_to(message, "Этот бот призван читать круче Кендрика Ламара.")
stick = open('sticker.webp', 'rb')
bot.send_sticker(message.chat.id, stick)
markup = types.ReplyKeyboardRemove(selective=False)
markup = types.InlineKeyboardMarkup(row_width=1)
itembtm1 = types.InlineKeyboardButton('Правила', callback_data='inc_1')
itembtm2 = types.InlineKeyboardButton('Пользовательское соглашение', callback_data='inc_2')
itembtm3 = types.InlineKeyboardButton('Пополнение баланса', callback_data='inc_3')
itembtm4 = types.InlineKeyboardButton('Личный кабинет', callback_data='inc_4')
markup.add(itembtm1, itembtm2, itembtm3, itembtm4)
bot.send_message(message.chat.id, "Меню:", reply_markup=markup)
"""--------------------------- Обработчик кнопок -----------------------------------------------------------"""
@bot.callback_query_handler(func=lambda call: True)
def test_callback(call):
if call.message:
if call.data == "inc_0":
markup = types.InlineKeyboardMarkup(row_width=1)
itembtm1 = types.InlineKeyboardButton('Правила', callback_data='inc_1')
itembtm2 = types.InlineKeyboardButton('Пользовательское соглашение', callback_data='inc_2')
itembtm3 = types.InlineKeyboardButton('Пополнение баланса', callback_data='inc_3')
itembtm4 = types.InlineKeyboardButton('Личный кабинет', callback_data='inc_4')
markup.add(itembtm1, itembtm2, itembtm3, itembtm4)
bot.edit_message_text("Меню:", call.message.chat.id, call.message.message_id, reply_markup=markup)
elif call.data == "inc_1":
user_ID = call.from_user.id
if user_ID in idINFOMRER(user_ID):
print('Hello, friend')
else:
us_id = call.from_user.id
us_name = call.from_user.first_name
us_sname = call.from_user.username
createUser(id=us_id, user_name=us_name, user_login=us_sname, balans=0)
print('new user')
# bot.send_message(call.from_user.id, 'Привет! Ваше имя добавленно в базу данных!')
markup = types.InlineKeyboardMarkup(row_width=1)
item1 = types.InlineKeyboardButton('Отправить отзыв', callback_data='la_2')
item2 = types.InlineKeyboardButton('Назад', callback_data='inc_0')
markup.add(item1, item2)
bot.edit_message_text("Мы тут не в игры играем, никаких правил", call.message.chat.id, call.message.message_id, reply_markup=markup)
elif call.data == "inc_2":
user_ID = call.from_user.id
if user_ID in idINFOMRER(user_ID):
print('Hello, friend')
else:
us_id = call.from_user.id
us_name = call.from_user.first_name
us_sname = call.from_user.username
createUser(id=us_id, user_name=us_name, user_login=us_sname, balans=0)
print('new user')
# bot.send_message(call.from_user.id, 'Привет! Ваше имя добавленно в базу данных!')
markup = types.InlineKeyboardMarkup(row_width=1)
item_for_block_1 = types.InlineKeyboardButton('Инверсировать игру', callback_data='item_block_2')
item_for_block_2 = types.InlineKeyboardButton('Назад', callback_data='inc_0')
markup.add(item_for_block_1, item_for_block_2)
bot.edit_message_text("Можем просто заблокировать, если ты нам не понравишься", call.message.chat.id, call.message.message_id, reply_markup=markup)
elif call.data == "inc_3":
user_ID = call.from_user.id
if user_ID in idINFOMRER(user_ID):
print('Hello, friend')
else:
us_id = call.from_user.id
us_name = call.from_user.first_name
us_sname = call.from_user.username
createUser(id=us_id, user_name=us_name, user_login=us_sname, balans=0)
# bot.send_message(call.from_user.id, 'Привет! Ваше имя добавленно в базу данных!')
print('new user')
us_id = call.from_user.id
us_name = call.from_user.first_name
us_sname = call.from_user.username
currentUserBalance = getUserBalans(us_id)
# print(currentUserBalance.fetchone()[0])
currentUserBalance = currentUserBalance+100
updateUserBalance(id=us_id, balans=currentUserBalance)
markup = types.InlineKeyboardMarkup(row_width=1)
balansbtn = types.InlineKeyboardButton('Назад', callback_data='inc_0')
markup.add(balansbtn)
bot.edit_message_text("Баланс пополнен!", call.message.chat.id, call.message.message_id, reply_markup=markup)
elif call.data == "inc_4":
user_ID = call.from_user.id
if user_ID in idINFOMRER(user_ID):
print('Hello, friend')
else:
us_id = call.from_user.id
us_name = call.from_user.first_name
us_sname = call.from_user.username
createUser(id=us_id, user_name=us_name, user_login=us_sname, balans=0)
print('new user')
# bot.send_message(call.from_user.id, 'Привет! Ваше имя добавленно в базу данных!')
if user_ID == 795675764 or user_ID == 5510951877:
markup = types.InlineKeyboardMarkup(row_width=1)
lkbtn1 = types.InlineKeyboardButton('Назад', callback_data='inc_0')
lkbtn2 = types.InlineKeyboardButton('База данных', callback_data='admin_base')
markup.add(lkbtn1, lkbtn2)
us_id = call.from_user.id
bot.edit_message_text('Привет, создатель!\n'+'Игрок: '+str(getUserName(us_id))+'\nБаланс: '+str(getUserBalans(us_id)),
call.message.chat.id, call.message.message_id, reply_markup=markup)
else:
markup = types.InlineKeyboardMarkup(row_width=1)
lkbtn1 = types.InlineKeyboardButton('Назад', callback_data='inc_0')
markup.add(lkbtn1)
us_id = call.from_user.id
bot.edit_message_text('Игрок: '+str(getUserName(us_id))+'\nБаланс: '+str(getUserBalans(us_id)),
call.message.chat.id, call.message.message_id, reply_markup=markup)
elif call.data == "la_2":
markup = types.InlineKeyboardMarkup(row_width=1)
otzyv = types.InlineKeyboardButton('Назад', callback_data='inc_0')
markup.add(otzyv)
bot.edit_message_text("[email protected]", call.message.chat.id, call.message.message_id, reply_markup=markup)
elif call.data == 'item_block_2':
markup = types.InlineKeyboardMarkup(row_width=1)
inv_1 = types.InlineKeyboardButton('Назад', callback_data='inc_0')
markup.add(inv_1)
bot.edit_message_text("Инверсированно", call.message.chat.id, call.message.message_id, reply_markup=markup)
elif call.data == 'admin_base':
my_list = []
for x in select_all_base():
my_list.append(''.join(str(x))) #https://ru.stackoverflow.com/questions/1178388
my_str = '\n'.join(my_list)
# print(select_all_base())
# for item in select_all_base():
# print(item)
markup = types.InlineKeyboardMarkup(row_width=1)
button_back = types.InlineKeyboardButton('Назад', callback_data='inc_0')
markup.add(button_back)
bot.edit_message_text('База данных\n'+my_str, call.message.chat.id, call.message.message_id, reply_markup=markup)
"""------------------------- Обработчики текста и стикеров --------------------------------------------------"""
@bot.message_handler(content_types=['text'])
def text_up(message):
bot.reply_to(message, message.text)
bot.send_message(message.chat.id, "Давай без самодеятельности. Мы для кого кнопки сделали?")
@bot.message_handler(content_types=['sticker'])
def text_down(message):
bot.send_message(message.chat.id, "Козырный стикер!")
"""--------------------------------- Start ------------------------------------------------------------------"""
bot.infinity_polling()
|
thebilderberg/telegram_bot_github
|
star_bot.py
|
star_bot.py
|
py
| 10,747 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
71916477308
|
# coding:utf-8
from PyQt5.QtWidgets import QWidget, QGridLayout, QVBoxLayout, QSizePolicy, QListWidgetItem, QAbstractItemView
from PyQt5.QtCore import pyqtSignal
from qfluentwidgets import ListWidget, PrimaryPushButton, PillPushButton, FluentIcon, InfoBar
from common.style_sheet import StyleSheet
from common.config import cfg
from globalvar.vars import set_value, get_value
from threads.pkthread import PKThread
from utils.logger import Logger
from view.frame import Frame
from view.widget.operate_toolbar import OperateToolBar
class OperateInterface(QWidget):
""" View interface """
calculate_started_signal = pyqtSignal(str)
calculate_finished_signal = pyqtSignal(str)
def __init__(self, parent=None):
super().__init__(parent=parent)
self.setObjectName("operate_interface")
self.widget = QWidget(self)
self.gridLayout = QGridLayout(self)
self.verticalLayout = QVBoxLayout()
self.toolBar = OperateToolBar(self.tr("Variables"), self.tr("Calculate and compare the PK values of the variables."), self)
self.listWidget_all = ListWidget(self)
self.listWidget_y = ListWidget(self)
self.listWidget_x = ListWidget(self)
self.frame_all = Frame(self)
self.frame_x = Frame(self)
self.frame_y = Frame(self)
self.pushButton_y = PrimaryPushButton(self.tr("Add"), self)
self.pushButton_all = PrimaryPushButton(self.tr("Add all"), self)
self.pushButton_x = PrimaryPushButton(self.tr("Add"), self)
self.variables_all = PillPushButton(self.tr("Available variables"), self, FluentIcon.TAG)
self.variables_x = PillPushButton(self.tr("Test variables"), self, FluentIcon.TAG)
self.variables_y = PillPushButton(self.tr("Independent variables"), self, FluentIcon.TAG)
self.list_all = []
self.add_to_x = True
self.add_to_y = True
self.pkThread = PKThread()
self.logger = Logger().get_logger()
self.__initWidget()
self.__initListWidgets()
self.__initConnects()
def __initWidget(self):
sizePolicy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.listWidget_y.sizePolicy().hasHeightForWidth())
self.listWidget_y.setSizePolicy(sizePolicy)
self.frame_all.addWidget(self.listWidget_all)
self.frame_x.addWidget(self.listWidget_x)
self.frame_y.addWidget(self.listWidget_y)
self.gridLayout.addWidget(self.toolBar, 0, 0, 1, 3)
self.gridLayout.addWidget(self.frame_all, 2, 0, 4, 1)
self.gridLayout.addWidget(self.pushButton_y, 3, 1, 1, 1)
self.gridLayout.addWidget(self.frame_y, 3, 2, 1, 1)
self.gridLayout.addWidget(self.variables_all, 1, 0, 1, 1)
self.gridLayout.addWidget(self.variables_y, 1, 2, 1, 1)
self.gridLayout.addWidget(self.variables_x, 4, 2, 1, 1)
self.verticalLayout.addWidget(self.pushButton_all)
self.verticalLayout.addWidget(self.pushButton_x)
self.gridLayout.addLayout(self.verticalLayout, 5, 1, 1, 1)
self.gridLayout.addWidget(self.frame_x, 5, 2, 1, 1)
self.variables_all.setCheckable(False)
self.variables_y.setCheckable(False)
self.variables_x.setCheckable(False)
self.listWidget_all.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.listWidget_x.setSelectionMode(QAbstractItemView.ExtendedSelection)
StyleSheet.GALLERY_INTERFACE.apply(self)
def __initListWidgets(self):
self.pushButton_x.setEnabled(False)
self.pushButton_y.setEnabled(False)
self.pushButton_all.setEnabled(False)
self.resetLists()
def __initButtons(self):
self.pushButton_x.setEnabled(True)
self.pushButton_y.setEnabled(True)
self.pushButton_all.setEnabled(True)
self.pushButton_x.setText(self.tr("Add"))
self.pushButton_y.setText(self.tr("Add"))
self.pushButton_all.setText(self.tr("Add all"))
self.add_to_x = True
self.add_to_y = True
def enbaleAllButtons(self, enabled):
self.pushButton_all.setEnabled(enabled)
self.pushButton_x.setEnabled(enabled)
self.pushButton_y.setEnabled(enabled)
self.toolBar.compareButton.setEnabled(enabled)
self.toolBar.calcaulateButton.setEnabled(enabled)
self.toolBar.resetButton.setEnabled(enabled)
def __initConnects(self):
self.toolBar.resetButton.clicked.connect(self.resetLists)
self.listWidget_all.clicked.connect(self.__initButtons)
self.listWidget_x.clicked.connect(self.remove_from_x)
self.listWidget_y.clicked.connect(self.remove_from_y)
self.pushButton_x.clicked.connect(self.clicked_button_x)
self.pushButton_y.clicked.connect(self.clicked_button_y)
self.pushButton_all.clicked.connect(self.clicked_button_all)
self.toolBar.calcaulateButton.clicked.connect(self.calculate)
self.toolBar.compareButton.clicked.connect(self.compare)
self.pkThread.finished_signal.connect(self.calculate_compare_finished)
self.pkThread.error_signal.connect(self.error_occurred)
self.pkThread.warn_signal.connect(self.warn_occurred)
self.pkThread.success_signal.connect(self.success_occurred)
self.pkThread.task_percentage_changed_signal.connect(self.toolBar.progressbar.setValue)
self.pkThread.task_percentage_changed_signal.connect(self.toolBar.update_percentage)
def resetLists(self):
self.setList(self.listWidget_x, [])
self.setList(self.listWidget_y, [])
self.setList(self.listWidget_all, self.list_all)
def setList(self, list_widget, list_content):
while list_widget.count() > 0:
list_widget.takeItem(0)
for content in list_content:
if not isinstance(content, str):
content = str(content)
list_widget.addItem(QListWidgetItem(content))
list_widget.clearSelection()
def updateList(self):
df = get_value("current_workbook")
self.list_all = df.columns
self.resetLists()
self.__initButtons()
set_value("pk", None)
set_value("pk_dict", {})
set_value("pk_name_dict", {})
set_value("pk_n", 0)
set_value("pks", None)
set_value("pks_dict", {})
set_value("pks_name_dict", {})
set_value("pks_n", 0)
self.logger.info(self.tr("Update the available variables in the data. All the storage cache has been reset."))
self.logger.info(self.tr("The available variables list as {0}").format(self.list_all))
def remove_from_x(self):
self.pushButton_x.setText(self.tr("Remove"))
self.pushButton_all.setText(self.tr("Remove all"))
self.add_to_x = False
def remove_from_y(self):
self.pushButton_y.setText(self.tr("Remove"))
self.add_to_y = False
def exchange_selected(self, source, destination):
selected = source.selectedIndexes()
idx = [x.row() for x in selected]
idx.sort(reverse=True)
for num in idx:
it = source.takeItem(num)
destination.addItem(it)
source.clearSelection()
destination.clearSelection()
def remove_all(self, source, destination):
while source.count() > 0 :
it = source.takeItem(0)
destination.addItem(it)
def clicked_button_x(self):
if self.add_to_x:
self.exchange_selected(self.listWidget_all, self.listWidget_x)
else:
self.exchange_selected(self.listWidget_x, self.listWidget_all)
def clicked_button_y(self):
if self.add_to_y:
if self.listWidget_y.count() == 0 and len(self.listWidget_all.selectedItems()) == 1:
self.exchange_selected(self.listWidget_all, self.listWidget_y)
else:
pass
else:
self.exchange_selected(self.listWidget_y, self.listWidget_all)
def clicked_button_all(self):
if self.add_to_x:
self.remove_all(self.listWidget_all, self.listWidget_x)
else:
self.remove_all(self.listWidget_x, self.listWidget_all)
def collect_xy(self):
x = []
y = []
n = self.listWidget_x.count()
for i in range(n):
x.append(self.listWidget_x.item(i).text())
n = self.listWidget_y.count()
for i in range(n):
y.append(self.listWidget_y.item(i).text())
set_value("x_names", x)
set_value("y_names", y)
set_value("output_dir", cfg.get(cfg.outputFolder))
self.logger.info(self.tr("The test variables include the following:"))
self.logger.info(x)
self.logger.info(self.tr("The independent variable includes the following:"))
self.logger.info(y)
def calculate(self):
self.logger.info(self.tr("Start calculating PKs."))
self.toolBar.textButton.setText(self.tr("Calculating"))
self.enbaleAllButtons(False)
self.collect_xy()
self.pkThread.set_work_type("PK")
self.pkThread.start()
self.calculate_started_signal.emit(self.tr("Calculating PKs"))
def compare(self):
self.logger.info(self.tr("Start comparing PKs."))
self.toolBar.textButton.setText(self.tr("Comparing"))
self.enbaleAllButtons(False)
self.collect_xy()
self.pkThread.set_work_type("PKC")
self.pkThread.start()
self.calculate_started_signal.emit(self.tr("Comparing PKs"))
def calculate_compare_finished(self):
self.enbaleAllButtons(True)
self.toolBar.createTopLeftInfoBar(self.tr("Success!"), self.tr("The operation success and write the results to files finished!! Please refer the output for details."), InfoBar.success, 2000)
self.calculate_finished_signal.emit(self.tr("Open the file {0}").format(get_value("last_work_file")))
def success_occurred(self, str):
self.toolBar.createTopLeftInfoBar(self.tr("Success!"), str, InfoBar.success)
def error_occurred(self, str):
self.enbaleAllButtons(True)
self.toolBar.createTopRightInfoBar(self.tr("Error!"), str,InfoBar.error)
def warn_occurred(self, str):
self.toolBar.createTopRightInfoBar(self.tr("Warn!"), str,InfoBar.warning)
|
xfz329/pk4adi_calculator
|
view/operate_interface.py
|
operate_interface.py
|
py
| 10,462 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18128239741
|
from django.conf.urls import url, include
from . import views
#app_name = 'dmlpolls'
urlpatterns = [
url(r'^$', views.index, name='poll_index'),
url(r'^(?P<question_id>[0-9]+)/$', views.detail, name='poll_detail'),
url(r'^(?P<question_id>[0-9]+)/results/$', views.results, name='poll_results'),
url(r'^(?P<question_id>[0-9]+)/vote/$', views.vote, name='poll_vote'),
url(r'^polls/', include('dmlpolls.urls')),
# url(r'^admin/', admin.site.urls),
]
|
Yobmod/dmlsite
|
dmlpolls/urls_old.py
|
urls_old.py
|
py
| 457 |
python
|
en
|
code
| 1 |
github-code
|
6
|
26529448636
|
#신입사원*
import sys
input = sys.stdin.readline
t = int(input())
for _ in range(t):
grade = []
n = int(input())
cnt = n
for _ in range(n):
paper, interview = map(int, input().split())
grade.append([paper, interview])
grade = sorted(grade, key= lambda a: a[0])
temp = grade[0][1]
for i in range(1, n):
if grade[i][1] > temp:
cnt -= 1
else:
temp = grade[i][1]
print(cnt)
|
Jaeheon-So/baekjoon-algorithm
|
그리디/1946.py
|
1946.py
|
py
| 468 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26327096081
|
import money
from exceptions import *
from tkinter import *
from datetime import *
from decimal import Decimal
import math
import re
from tkinter import messagebox
from dateutil.rrule import *
from parkomat_interface import ParkomatInterface
class ParkomatFunctions:
""" Klasa realizująca funkcjonalności programu """
__global_date = datetime.now() # zmienna przechowująca aktualnie ustawioną datę w parkomacie
__departure_time = __global_date # zmienna przechowująca czas wyjazdu
__previous_time = 0 # zmienna przechowująca poprzednio zwrócony czas w sekundach dla wrzuconych pieniędzy dla metody seconds_for_money
__inserted_money_by_user = Decimal("0.00") # zmienna przechowująca liczbę wrzuconych pieniędzy przez aktualnego użytkownika
def __init__(self):
self.__window = Tk() # Toplevel widget reprezentujący główne okno programu
self.__interface = ParkomatInterface(self.window) # interfejs programu
self.__moneyHolder = self.interface.moneyHolder # instancja przechowywacza pieniędzy
self.buttons_onclick() # metoda dodające wydarzenia do przycisków
self.actual_date() # metoda aktualizująca datę parkomatu oraz wyjazdu
@property
def window(self):
""" Getter zwracający Toplevel widget reprezentujący główne okno programu """
return self.__window
@window.setter
def window(self, window):
""" Setter ustawiający Toplevel widget reprezentujący główne okno programu """
self.__window = window
@property
def interface(self):
""" Getter zwracający odwołanie do interfejsu programu """
return self.__interface
@interface.setter
def interface(self, interface):
""" Setter ustawiające odwołanie do interfejsu programu """
self.__interface = interface
@property
def moneyHolder(self):
""" Getter zwracający przechowywacz pieniędzy """
return self.__moneyHolder
@moneyHolder.setter
def moneyHolder(self, moneyHolder):
""" Setter ustawiający przechowywacz pieniędzy """
self.__moneyHolder = moneyHolder
@property
def global_date(self):
""" Getter zwracający aktualnie ustawioną datę w parkomacie """
return self.__global_date
@property
def departure_time(self):
""" Getter zwracający datę wyjazdu """
return self.__departure_time
@global_date.setter
def global_date(self, global_date):
""" Setter ustawiający aktualną datę w parkomacie """
self.__global_date = global_date
@departure_time.setter
def departure_time(self, departure_time):
""" Setter ustawiający datę wyjazdu """
self.__departure_time = departure_time
@property
def previous_time(self):
""" Getter zwracający poprzednio dodany czas """
return self.__previous_time
@previous_time.setter
def previous_time(self, previous_time):
""" SSetter ustawiający poprzednio dodany czas """
self.__previous_time = previous_time
@property
def inserted_money_by_user(self):
""" Getter zwracający poprzednio dodany czas """
return self.__inserted_money_by_user
@inserted_money_by_user.setter
def inserted_money_by_user(self, inserted_money_by_user):
""" SSetter ustawiający poprzednio dodany czas """
self.__inserted_money_by_user = inserted_money_by_user
def main_loop(self):
""" Nieskończona pętla służąca do uruchomienia aplikacji trwająca, dopóki okno nie zostanie zamknięte """
self.window.mainloop()
def buttons_onclick(self):
""" Metoda obsługująca wydarzenia, gdy przycisk zostanie wciśnięty """
self.interface.window.button1.bind("<ButtonRelease-1>", lambda event: self.add_number_of_money(self.moneyHolder.available_values[0]))
self.interface.window.button2.bind("<ButtonRelease-1>", lambda event: self.add_number_of_money(self.moneyHolder.available_values[1]))
self.interface.window.button3.bind("<ButtonRelease-1>", lambda event: self.add_number_of_money(self.moneyHolder.available_values[2]))
self.interface.window.button4.bind("<ButtonRelease-1>", lambda event: self.add_number_of_money(self.moneyHolder.available_values[3]))
self.interface.window.button5.bind("<ButtonRelease-1>", lambda event: self.add_number_of_money(self.moneyHolder.available_values[4]))
self.interface.window.button6.bind("<ButtonRelease-1>", lambda event: self.add_number_of_money(self.moneyHolder.available_values[5]))
self.interface.window.button7.bind("<ButtonRelease-1>", lambda event: self.add_number_of_money(self.moneyHolder.available_values[6]))
self.interface.window.button8.bind("<ButtonRelease-1>", lambda event: self.add_number_of_money(self.moneyHolder.available_values[7]))
self.interface.window.button9.bind("<ButtonRelease-1>", lambda event: self.add_number_of_money(self.moneyHolder.available_values[8]))
self.interface.window.button10.bind("<ButtonRelease-1>", lambda event: self.add_number_of_money(self.moneyHolder.available_values[9]))
self.interface.window.button11.bind("<ButtonRelease-1>", lambda event: self.add_number_of_money(self.moneyHolder.available_values[10]))
self.interface.window.button12.bind("<ButtonRelease-1>", lambda event: self.add_number_of_money(self.moneyHolder.available_values[11]))
self.bind_button_confirm(lambda event: self.button_confirm(event))
self.bind_change_actual_time(lambda event: self.button_change_actual_time(event))
def actual_date(self):
""" Metoda aktualizująca aktualną datę parkomatu oraz datę wyjazdu"""
self.global_date = self.global_date + timedelta(seconds=1) # dodanie sekundy do aktualnej daty parkomatu
self.departure_time = self.departure_time + timedelta(seconds=1) # dodanie sekundy do daty wyjazdu
# wyświetlenie aktualnej daty parkomatu
self.interface.window.actual_date_label.config(text=self.global_date.strftime("%Y-%m-%d %H:%M"))
# wyświetlenie daty wyjazdu
self.interface.window.date_of_departure_label.config(text=self.departure_time.strftime("%Y-%m-%d %H:%M"))
# powtarzanie funkcji actual_date() co sekundę
self.interface.window.actual_date_label.after(1000, self.actual_date)
def button_confirm(self, event):
""" Funkcja odpowiadająca na naciśnięcie przycisku 'Zatwierdź' """
try:
self.confirm()
except Exception as err:
messagebox.showerror("Błąd", str(err))
def button_change_actual_time(self, event):
""" Funkcja odpowiadająca na naciśnięcie przycisku zmieniającego godzinę """
try:
self.change_actual_time()
except Exception as err:
messagebox.showerror("Błąd", str(err))
def bind_button_confirm(self, f):
""" Funkcja bindująca przycisk 'Zatwierdź' """
self.interface.window.confirm_button.bind("<ButtonRelease-1>", f)
def bind_change_actual_time(self, f):
""" Funkcja bindująca przycisk 'Przestaw' """
self.interface.window.change_actual_date_button.bind("<ButtonRelease-1>", f)
def change_actual_time(self):
""" Metoda ustawiająca godzinę wprowadzoną przez użytkownika """
# sprawdzenie, czy wpisano poprawnie czas
if self.inserted_money_by_user != Decimal("0.00"):
messagebox.showerror("Error", "Nie można zmienić czasu, gdy wrzucono już pieniądze.")
else:
if self.interface.window.hour_entry.get().isdigit() is False or self.interface.window.minute_entry.get().isdigit() is False or int(
self.interface.window.hour_entry.get()) < 0 or int(
self.interface.window.hour_entry.get()) > 23 or int(
self.interface.window.minute_entry.get()) < 0 or int(
self.interface.window.minute_entry.get()) > 59:
raise IncorrectTime("Wpisano niepoprawny czas.")
else:
h1 = int(self.interface.window.hour_entry.get()) # pobranie godziny z entry i przekonwertowanie na int
m1 = int(self.interface.window.minute_entry.get()) # pobranie minuty z entry i przekonwertowanie na int
self.global_date = self.global_date.replace(hour=h1, minute=m1) # ustawienie nowego czasy dla parkomatu
self.departure_time = self.global_date # przypisanie aktualnej daty parkomatu do daty wyjazdu
self.previous_time = 0 # reset wcześniejszego czasu, gdy zmieniamy czas
def add_number_of_money(self, value: Decimal):
""" Metoda dodająca wybraną liczbę monet """
number_of_money = self.interface.window.number_of_money_entry.get() # pobranie wprowadzonej liczby monet
try:
if self.interface.window.number_of_money_entry == "" or number_of_money.isdigit() is False: # jeśli nie wpisano wartości lub nie jest liczbą
raise IncorrectValueError
else: # w przeciwnym wypadku
number_of_money = int(number_of_money)
if value < 10: # jeśli wartość pieniądza wynosi poniżej 10 to tworzymy monetę
for x in range(number_of_money):
self.moneyHolder.add_money(money.Coin(value)) # dodanie monety do przechowywacza
self.inserted_money_by_user += value # dodanie wartości monety do aktualnie wrzuconych przez użytkownika
else: # w przeciwnym wypadku tworzymy banknoty
for x in range(number_of_money):
self.moneyHolder.add_money(money.Bill(value)) # dodanie banknotu do przechowywacza
self.inserted_money_by_user += value # dodanie wartości banknotu do aktualnie wrzuconych przez użytkownika
except IncorrectValueError: # przechwycenie wyjątku dla niepoprawnie wpisanej wartości
messagebox.showerror("Error", "Wpisz poprawną liczbę pieniędzy którą chcesz wrzucić.")
except TooMuchCoinsError as err: # przechwycenie wyjątku, jeśli przekroczono limit nominałów
messagebox.showerror("Error", str(err))
finally: # aktualizacja wrzuconej kwoty oraz daty wyjazdu
self.interface.window.sum_of_money_label.config(text=self.inserted_money_by_user) # wrzucona kwota
self.departure_date() # aktualizacja daty wyjazdu
def input_validator(self):
""" Metoda walidująca numer rejestracyjny """
# porównanie numeru do wyrażenia regularnego
pattern = re.match("^[A-Z0-9]+$", self.interface.window.registration_number_entry.get())
if self.interface.window.registration_number_entry.get() == "": # błąd jeśli nie wpisano numeru rejestracyjnego
raise RegistrationNumberError("Wpisz numer rejestracyjny.")
elif bool(pattern) is False: # błąd, jeśli numer nie pasuje do wyrażenia regularnego
raise RegistrationNumberError("Numer rejestracyjny może składać się tylko z wielkich liter od A do Z i cyfr")
def confirmation_of_payment(self):
""" Metoda wyświetlająca okno z potwierdzeniem opłacenia parkingu """
messagebox.showinfo("Potwierdzenie opłacenia parkingu",
"Numer rejestracyjny: {} \n\nCzas zakupu: {} \n\nTermin wyjazdu: {}"
.format(self.interface.window.registration_number_entry.get(),
self.interface.window.actual_date_label.cget("text"),
self.interface.window.date_of_departure_label.cget("text")))
def rules(self, departure_date, seconds):
""" Zasady strefy płatnego parkowania obowiązuje w godzinach od 8 do 20 od poniedziałku do piątku """
rr = rrule(SECONDLY, byweekday=(MO, TU, WE, TH, FR), byhour=(8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19),
dtstart=departure_date, interval=seconds)
return rr.after(departure_date)
def seconds_for_money(self, amount: Decimal):
""" Metoda zwracająca liczbę sekund dla wrzuconych pieniędzy """
total_seconds = 0 # zmienna przechowująca sumę dodanych sekund
grosz_1h = 60 * 60 / 200 # sekunda za jednego grosza pierwszej godziny
grosz_2h = 60 * 60 / 400 # sekunda za jednego grosza drugiej godziny
grosz_xh = 60 * 60 / 500 # sekunda za jednego grosza lub większej godziny
if total_seconds < 3600: # jeśli suma sekund jest mniejsza od godziny zapisanej w sekundach
if amount >= 2: # jeśli suma jest większa lub równa 2
total_seconds += 3600 # dodaj godzinę
amount -= 2 # odejmij od sumy koszt jednej godziny
else:
seconds = amount * 100 * Decimal(grosz_1h) # obliczona liczba sekund
total_seconds += seconds # dodanie obliczonych sekund do całościowej liczby sekund
amount = 0 # zerowanie sumy
if total_seconds < 7200: # jeśli suma sekund jest mniejsza od dwóch godzin zapisanej w sekundach
if amount >= 4: # jeśli suma jest większa lub równa 4
total_seconds += 3600 # dodaj godzinę
amount -= 4 # odejmij od sumy koszt jednej godziny
else:
seconds = amount * 100 * Decimal(grosz_2h) # obliczona liczba sekund
total_seconds += seconds # dodanie obliczonych sekund do całościowej liczby sekund
amount = 0 # zerowanie sumy
while amount > 0: # wykonuj, dopóki suma wrzuconych pieniędzy jest większa od zera
if total_seconds >= 7200: # jeśli suma sekund jest większa lub równa dwóch godzin zapisanej w sekundach
if amount >= 5: # jeśli suma jest większa lub równa 5
total_seconds += math.floor((amount / 5)) * 60 * 60 # dodanie całkowitej liczby godzin
amount -= 5 * math.floor((amount / 5)) # odjęcia całkowitej liczby godzin od sumy
else:
seconds = amount * 100 * Decimal(grosz_xh) # obliczona liczba sekund
total_seconds += seconds # dodanie obliczonych sekund do całościowej liczby sekund
amount = 0 # zerowanie sumy
temp_seconds = total_seconds
total_seconds -= self.previous_time # od całkowitego czasu odjęcie wcześniejszego
self.previous_time = temp_seconds # ustawienie nowego wcześniejszego czasu
return int(total_seconds)
def departure_date(self):
""" Metoda ustawiająca datę wyjazdu """
free_hours = [x for x in range(0, 24) if x not in range(8, 20)] # lista z darmowymi godzinami
amount = self.inserted_money_by_user # suma przechowywanych pieniędzy
seconds_paid = self.seconds_for_money(amount) # liczba zapłaconych sekund
if seconds_paid > 0: # jeśli liczba zapłaconych sekund jest większa od zera
if self.departure_time.weekday() == 5: # jeśli jest sobota
self.departure_time = self.departure_time.replace(hour=8, minute=00) + timedelta(days=2)
elif self.departure_time.weekday() == 6: # jeśli jest niedziela
self.departure_time = self.departure_time.replace(hour=8, minute=00) + timedelta(days=1)
elif self.departure_time.hour in free_hours: # jeśli są dni robocze i aktualna godzina jest darmowa
if self.departure_time.hour > 19: # jeśli jest po godzinie 19:00
self.departure_time = self.departure_time.replace(hour=8, minute=00) + timedelta(days=1)
else: # jeśli jest godzina między 0 a 8
self.departure_time = self.departure_time.replace(hour=8, minute=00)
# wyświetlenie w label zaktualizowanej daty wyjazdu
self.departure_time = self.rules(self.departure_time, seconds_paid)
self.interface.window.date_of_departure_label.config(text=self.departure_time.strftime("%Y-%m-%d %H:%M"))
def confirm(self):
""" Funkcja włączająca się przy kliknięciu przycisku 'Zatwierdź' """
self.input_validator() # sprawdzenie walidacji numeru rejestracyjnego
if self.inserted_money_by_user > 0: # wykonanie, jeśli suma monet jest większa od 0
self.confirmation_of_payment() # wykonanie funkcji potwierdzającej płatność
self.reset() # po potwierdzeniu rezerwacji reset parkomatu do stanu początkowego
else: # w przeciwnym wypadku wyświetl błąd
raise NotInsertedMoney("Nie wrzucono pieniędzy.")
def reset(self):
""" Funkcja resetująca parkomat do stanu początkowego """
self.interface.window.registration_number_entry.delete(0, "end") # reset pola z numerem rejestracyjnym
self.interface.window.sum_of_money_label.config(text="0.00") # reset pola z wrzuconymi pieniędzmi
self.interface.window.date_of_departure_label.config(text="") # reset pola z datą wyjazdu
self.global_date = datetime.now() # reset czasu parkomatu do stanu początkowego
self.departure_time = self.global_date # ustawienie z powrotem czasu wyjazdy do stanu początkowego
self.interface.window.number_of_money_entry.delete(0, "end") # reset pola z liczbą monet
self.interface.window.number_of_money_entry.insert(0, "1") # wpisanie domyślnej wartości
self.interface.window.hour_entry.delete(0, "end") # reset entry z godziną
self.interface.window.hour_entry.insert(0, "0") # wpisanie domyślnej wartości
self.interface.window.minute_entry.delete(0, "end") # reset entry z minutą
self.interface.window.minute_entry.insert(0, "0") # wpisanie domyślnej wartości
self.previous_time = 0 # reset poprzednio dodanego czasu
self.inserted_money_by_user = Decimal("0.00") # reset wrzuconych pieniędzy dla użytkownika
|
DZietara/parkomat
|
main/parkomat_functions.py
|
parkomat_functions.py
|
py
| 18,188 |
python
|
pl
|
code
| 0 |
github-code
|
6
|
37446576919
|
from subprocess import call
from metux.util.specobject import SpecError
def rsync_ssh(username, hostname, source, path):
return (call([
'rsync',
'--progress',
'--rsh=ssh',
'-r',
source+"/",
username+"@"+hostname+":/"+path ]) == 0)
def run_upload(param):
if param['protocol'] == 'rsync+ssh':
return rsync_ssh(param['username'], param['hostname'], param['source'], param['path']);
raise SpecError("unknown upload protocol: %s" % param['protocol'])
|
LibreZimbra/librezimbra
|
deb_autopkg/util/upload.py
|
upload.py
|
py
| 517 |
python
|
en
|
code
| 4 |
github-code
|
6
|
38463867669
|
from django.urls import path, re_path
from app import views
urlpatterns = [
# Matches any html file - to be used for gentella
# Avoid using your .html in your resources.
# Or create a separate django app.
re_path(r'^.*\.html', views.gentella_html, name='index'),
# The home page
path('', views.index, name='index'),
path('outliers', views.outliers, name='outliers'),
path('data_fresh', views.data_fresh, name="data_fresh"),
path('data_fresh\\', views.data_fresh, name="data_fresh"),
path('data_fresh/', views.data_fresh, name="data_fresh"),
path('data_fresh_tem_table', views.data_table_tem_fresh_with_pred, name='data_table_tem_fresh'),
path('data_live_tem', views.live_tem, name='data_live_tem'),
]
|
pennng/Django-gentella
|
app/urls.py
|
urls.py
|
py
| 752 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72532296829
|
# pylint:disable=unused-argument
# pylint:disable=redefined-outer-name
from contextlib import contextmanager
from typing import Any, AsyncIterable, Callable, Iterator
from unittest.mock import AsyncMock
import pytest
from faker import Faker
from fastapi import FastAPI, status
from httpx import HTTPError, Response
from models_library.sidecar_volumes import VolumeCategory, VolumeStatus
from pydantic import AnyHttpUrl, parse_obj_as
from pytest import LogCaptureFixture, MonkeyPatch
from pytest_mock import MockerFixture
from pytest_simcore.helpers.typing_env import EnvVarsDict
from simcore_service_director_v2.core.settings import AppSettings
from simcore_service_director_v2.modules.dynamic_sidecar.api_client._errors import (
ClientHttpError,
UnexpectedStatusError,
)
from simcore_service_director_v2.modules.dynamic_sidecar.api_client._public import (
SidecarsClient,
get_sidecars_client,
)
from simcore_service_director_v2.modules.dynamic_sidecar.api_client._public import (
setup as api_client_setup,
)
from simcore_service_director_v2.modules.dynamic_sidecar.api_client._public import (
shutdown,
)
from simcore_service_director_v2.modules.dynamic_sidecar.errors import (
EntrypointContainerNotFoundError,
)
@pytest.fixture
def dynamic_sidecar_endpoint() -> AnyHttpUrl:
return parse_obj_as(AnyHttpUrl, "http://missing-host:1111")
@pytest.fixture
def mock_env(monkeypatch: MonkeyPatch, mock_env: EnvVarsDict) -> None:
monkeypatch.setenv("S3_ACCESS_KEY", "")
monkeypatch.setenv("S3_SECRET_KEY", "")
monkeypatch.setenv("S3_BUCKET_NAME", "")
monkeypatch.setenv("R_CLONE_PROVIDER", "MINIO")
monkeypatch.setenv("POSTGRES_HOST", "")
monkeypatch.setenv("POSTGRES_USER", "")
monkeypatch.setenv("POSTGRES_PASSWORD", "")
monkeypatch.setenv("POSTGRES_DB", "")
# reduce number of retries to make more reliable
monkeypatch.setenv("DYNAMIC_SIDECAR_CLIENT_REQUEST_TIMEOUT_S", "3")
monkeypatch.setenv("S3_ENDPOINT", "")
@pytest.fixture
async def sidecars_client(
mock_env: EnvVarsDict, faker: Faker
) -> AsyncIterable[SidecarsClient]:
app = FastAPI()
app.state.settings = AppSettings.create_from_envs()
# WARNING: pytest gets confused with 'setup', use instead alias 'api_client_setup'
await api_client_setup(app)
yield get_sidecars_client(app, faker.uuid4())
await shutdown(app)
@pytest.fixture
def request_timeout() -> int:
# below refer to exponential wait step duration
return 1 + 2
@pytest.fixture
def raise_request_timeout(
monkeypatch: MonkeyPatch, request_timeout: int, mock_env: EnvVarsDict
) -> None:
monkeypatch.setenv("DYNAMIC_SIDECAR_CLIENT_REQUEST_TIMEOUT_S", f"{request_timeout}")
@pytest.fixture
def get_patched_client(
sidecars_client: SidecarsClient, mocker: MockerFixture
) -> Callable:
@contextmanager
def wrapper(
method: str,
return_value: Any | None = None,
side_effect: Callable | None = None,
) -> Iterator[SidecarsClient]:
mocker.patch(
f"simcore_service_director_v2.modules.dynamic_sidecar.api_client._thin.ThinSidecarsClient.{method}",
return_value=return_value,
side_effect=side_effect,
)
yield sidecars_client
return wrapper
@pytest.mark.parametrize("is_healthy", [True, False])
@pytest.mark.parametrize("with_retry", [True, False])
async def test_is_healthy(
get_patched_client: Callable,
dynamic_sidecar_endpoint: AnyHttpUrl,
is_healthy: bool,
with_retry: bool,
) -> None:
mock_json = {"is_healthy": is_healthy}
with get_patched_client(
"get_health" if with_retry else "get_health_no_retry",
return_value=Response(status_code=status.HTTP_200_OK, json=mock_json),
) as client:
assert (
await client.is_healthy(dynamic_sidecar_endpoint, with_retry=with_retry)
== is_healthy
)
async def test_is_healthy_times_out(
raise_request_timeout: None,
sidecars_client: SidecarsClient,
dynamic_sidecar_endpoint: AnyHttpUrl,
caplog_info_level: LogCaptureFixture,
) -> None:
assert await sidecars_client.is_healthy(dynamic_sidecar_endpoint) is False
# check if the right amount of messages was captured by the logs
unexpected_counter = 1
for log_message in caplog_info_level.messages:
if log_message.startswith("Retrying"):
assert "as it raised" in log_message
continue
assert log_message.startswith(f"Request timed-out after {unexpected_counter}")
unexpected_counter += 1
@pytest.mark.parametrize(
"side_effect",
[
pytest.param(
UnexpectedStatusError(
Response(
status_code=status.HTTP_400_BAD_REQUEST,
content="some mocked error",
request=AsyncMock(),
),
status.HTTP_200_OK,
),
id="UnexpectedStatusError",
),
pytest.param(
ClientHttpError(HTTPError("another mocked error")), id="HTTPError"
),
],
)
async def test_is_healthy_api_error(
get_patched_client: Callable,
dynamic_sidecar_endpoint: AnyHttpUrl,
side_effect: Exception,
) -> None:
with get_patched_client(
"get_health",
side_effect=side_effect,
) as client:
assert await client.is_healthy(dynamic_sidecar_endpoint) == False
async def test_containers_inspect(
get_patched_client: Callable, dynamic_sidecar_endpoint: AnyHttpUrl
) -> None:
mock_json = {"ok": "data"}
with get_patched_client(
"get_containers",
return_value=Response(status_code=status.HTTP_200_OK, json=mock_json),
) as client:
assert await client.containers_inspect(dynamic_sidecar_endpoint) == mock_json
async def test_containers_docker_status_api_ok(
get_patched_client: Callable, dynamic_sidecar_endpoint: AnyHttpUrl
) -> None:
mock_json = {"container_id": {"ok": "data"}}
with get_patched_client(
"get_containers",
return_value=Response(status_code=status.HTTP_200_OK, json=mock_json),
) as client:
assert (
await client.containers_docker_status(dynamic_sidecar_endpoint) == mock_json
)
async def test_containers_docker_status_api_error(
get_patched_client: Callable, dynamic_sidecar_endpoint: AnyHttpUrl
) -> None:
with get_patched_client(
"get_containers",
side_effect=UnexpectedStatusError(
Response(
status_code=status.HTTP_400_BAD_REQUEST,
content="some mocked error",
request=AsyncMock(),
),
status.HTTP_200_OK,
),
) as client:
assert await client.containers_docker_status(dynamic_sidecar_endpoint) == {}
async def test_disable_service_ports_io(
get_patched_client: Callable, dynamic_sidecar_endpoint: AnyHttpUrl
) -> None:
with get_patched_client(
"patch_containers_ports_io",
return_value=Response(status_code=status.HTTP_204_NO_CONTENT),
) as client:
assert await client.disable_service_ports_io(dynamic_sidecar_endpoint) is None
async def test_enable_service_ports_io(
get_patched_client: Callable, dynamic_sidecar_endpoint: AnyHttpUrl
) -> None:
with get_patched_client(
"patch_containers_ports_io",
return_value=Response(status_code=status.HTTP_204_NO_CONTENT),
) as client:
assert await client.enable_service_ports_io(dynamic_sidecar_endpoint) is None
@pytest.mark.parametrize("outputs_labels", [{}, {"ok": "data"}])
async def test_service_outputs_create_dirs(
get_patched_client: Callable,
dynamic_sidecar_endpoint: AnyHttpUrl,
outputs_labels: dict[str, Any],
) -> None:
with get_patched_client(
"post_containers_ports_outputs_dirs",
return_value=Response(status_code=status.HTTP_204_NO_CONTENT),
) as client:
assert (
await client.service_outputs_create_dirs(
dynamic_sidecar_endpoint, outputs_labels
)
is None
)
@pytest.mark.parametrize("dynamic_sidecar_network_name", ["a_test_network"])
async def test_get_entrypoint_container_name_ok(
get_patched_client: Callable,
dynamic_sidecar_endpoint: AnyHttpUrl,
dynamic_sidecar_network_name: str,
) -> None:
with get_patched_client(
"get_containers_name",
return_value=Response(status_code=status.HTTP_200_OK, json="a_test_container"),
) as client:
assert (
await client.get_entrypoint_container_name(
dynamic_sidecar_endpoint, dynamic_sidecar_network_name
)
== "a_test_container"
)
@pytest.mark.parametrize("dynamic_sidecar_network_name", ["a_test_network"])
async def test_get_entrypoint_container_name_api_not_found(
get_patched_client: Callable,
dynamic_sidecar_endpoint: AnyHttpUrl,
dynamic_sidecar_network_name: str,
) -> None:
with get_patched_client(
"get_containers_name",
side_effect=UnexpectedStatusError(
Response(status_code=status.HTTP_404_NOT_FOUND, request=AsyncMock()),
status.HTTP_204_NO_CONTENT,
),
) as client:
with pytest.raises(EntrypointContainerNotFoundError):
await client.get_entrypoint_container_name(
dynamic_sidecar_endpoint, dynamic_sidecar_network_name
)
@pytest.mark.parametrize("network_aliases", [[], ["an-alias"], ["alias-1", "alias-2"]])
async def test_attach_container_to_network(
get_patched_client: Callable,
dynamic_sidecar_endpoint: AnyHttpUrl,
network_aliases: list[str],
) -> None:
with get_patched_client(
"post_containers_networks_attach",
return_value=Response(status_code=status.HTTP_204_NO_CONTENT),
) as client:
assert (
# pylint:disable=protected-access
await client._attach_container_to_network(
dynamic_sidecar_endpoint,
container_id="container_id",
network_id="network_id",
network_aliases=network_aliases,
)
is None
)
async def test_detach_container_from_network(
get_patched_client: Callable,
dynamic_sidecar_endpoint: AnyHttpUrl,
) -> None:
with get_patched_client(
"post_containers_networks_detach",
return_value=Response(status_code=status.HTTP_204_NO_CONTENT),
) as client:
assert (
# pylint:disable=protected-access
await client._detach_container_from_network(
dynamic_sidecar_endpoint,
container_id="container_id",
network_id="network_id",
)
is None
)
@pytest.mark.parametrize("volume_category", VolumeCategory)
@pytest.mark.parametrize("volume_status", VolumeStatus)
async def test_update_volume_state(
get_patched_client: Callable,
dynamic_sidecar_endpoint: AnyHttpUrl,
volume_category: VolumeCategory,
volume_status: VolumeStatus,
) -> None:
with get_patched_client(
"put_volumes",
return_value=Response(status_code=status.HTTP_204_NO_CONTENT),
) as client:
assert (
await client.update_volume_state(
dynamic_sidecar_endpoint,
volume_category=volume_category,
volume_status=volume_status,
)
is None
)
|
ITISFoundation/osparc-simcore
|
services/director-v2/tests/unit/test_modules_dynamic_sidecar_client_api_public.py
|
test_modules_dynamic_sidecar_client_api_public.py
|
py
| 11,510 |
python
|
en
|
code
| 35 |
github-code
|
6
|
41163745392
|
import os.path
from compiler import compile_file
from interpreter import VirtualMachine
import sys
if __name__ == '__main__':
if len(sys.argv) < 2:
raise RuntimeError("Not enough argument to start the compiler")
else:
if sys.argv[1] == "--version":
print("0.2.0-dev")
else:
compiled = compile_file(sys.argv[1])
vm = VirtualMachine()
vm.load_bytecode(compiled, path=os.path.abspath(sys.argv[1]))
vm.init_eval_loop()
|
blitpxl/soil
|
soil/soil.py
|
soil.py
|
py
| 509 |
python
|
en
|
code
| 1 |
github-code
|
6
|
73500508668
|
import argparse
import copy
import csv
import os
import warnings
import numpy
import torch
import tqdm
import yaml
from torch.utils import data
from nets import nn
from utils import util
from utils.dataset import Dataset
warnings.filterwarnings("ignore")
def learning_rate(args, params):
def fn(x):
return (1 - x / args.epochs) * (1.0 - params['lrf']) + params['lrf']
return fn
def train(args, params):
# Model
model = nn.yolo_v5_n(len(params['names'].values())).cuda()
# Model attributes
params['box'] *= 3 / model.head.nl
params['obj'] *= (args.input_size / 640) ** 2 * 3 / model.head.nl
params['cls'] *= len(params['names'].values()) / 80 * 3 / model.head.nl
# Optimizer
accumulate = max(round(64 / (args.batch_size * args.world_size)), 1)
params['weight_decay'] *= args.batch_size * args.world_size * accumulate / 64
p = [], [], []
for v in model.modules():
if hasattr(v, 'bias') and isinstance(v.bias, torch.nn.Parameter):
p[2].append(v.bias)
if isinstance(v, torch.nn.BatchNorm2d):
p[1].append(v.weight)
elif hasattr(v, 'weight') and isinstance(v.weight, torch.nn.Parameter):
p[0].append(v.weight)
optimizer = torch.optim.SGD(p[2], params['lr0'], params['momentum'], nesterov=True)
optimizer.add_param_group({'params': p[0], 'weight_decay': params['weight_decay']})
optimizer.add_param_group({'params': p[1]})
del p
# Scheduler
lr = learning_rate(args, params)
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr, last_epoch=-1)
# EMA
ema = util.EMA(model) if args.local_rank == 0 else None
filenames = []
with open('../Dataset/COCO/train2017.txt') as reader:
for filename in reader.readlines():
filename = filename.rstrip().split('/')[-1]
filenames.append('../Dataset/COCO/images/train2017/' + filename)
dataset = Dataset(filenames, args.input_size, params, True)
if args.world_size <= 1:
sampler = None
else:
sampler = data.distributed.DistributedSampler(dataset)
loader = data.DataLoader(dataset, args.batch_size, sampler is None, sampler,
num_workers=8, pin_memory=True, collate_fn=Dataset.collate_fn)
util.check_anchors(dataset, model, args, params)
model.half().float() # pre-reduce anchor precision
if args.world_size > 1:
# DDP mode
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = torch.nn.parallel.DistributedDataParallel(module=model,
device_ids=[args.local_rank],
output_device=args.local_rank)
# Start training
best = 0
num_batch = len(loader)
amp_scale = torch.cuda.amp.GradScaler()
criterion = util.ComputeLoss(model, params)
num_warmup = max(round(params['warmup_epochs'] * num_batch), 100)
with open('weights/step.csv', 'w') as f:
if args.local_rank == 0:
writer = csv.DictWriter(f, fieldnames=['epoch', 'mAP@50', 'mAP'])
writer.writeheader()
for epoch in range(args.epochs):
model.train()
m_loss = util.AverageMeter()
if args.world_size > 1:
sampler.set_epoch(epoch)
p_bar = enumerate(loader)
if args.local_rank == 0:
print(('\n' + '%10s' * 3) % ('epoch', 'memory', 'loss'))
if args.local_rank == 0:
p_bar = tqdm.tqdm(p_bar, total=num_batch) # progress bar
optimizer.zero_grad()
for i, (samples, targets, _) in p_bar:
x = i + num_batch * epoch # number of iterations
samples = samples.cuda().float() / 255
targets = targets.cuda()
# Warmup
if x <= num_warmup:
xp = [0, num_warmup]
fp = [1, 64 / (args.batch_size * args.world_size)]
accumulate = max(1, numpy.interp(x, xp, fp).round())
for j, y in enumerate(optimizer.param_groups):
if j == 0:
fp = [params['warmup_bias_lr'], y['initial_lr'] * lr(epoch)]
else:
fp = [0.0, y['initial_lr'] * lr(epoch)]
y['lr'] = numpy.interp(x, xp, fp)
if 'momentum' in y:
fp = [params['warmup_momentum'], params['momentum']]
y['momentum'] = numpy.interp(x, xp, fp)
# Forward
with torch.cuda.amp.autocast():
outputs = model(samples) # forward
loss = criterion(outputs, targets)
m_loss.update(loss.item(), samples.size(0))
loss *= args.batch_size # loss scaled by batch_size
loss *= args.world_size # gradient averaged between devices in DDP mode
# Backward
amp_scale.scale(loss).backward()
# Optimize
if x % accumulate == 0:
amp_scale.step(optimizer) # optimizer.step
amp_scale.update()
optimizer.zero_grad()
if ema:
ema.update(model)
# Log
if args.local_rank == 0:
memory = f'{torch.cuda.memory_reserved() / 1E9:.3g}G' # (GB)
s = ('%10s' * 2 + '%10.4g') % (f'{epoch + 1}/{args.epochs}', memory, m_loss.avg)
p_bar.set_description(s)
# Scheduler
scheduler.step()
if args.local_rank == 0:
# mAP
last = test(args, params, ema.ema)
writer.writerow({'mAP': str(f'{last[1]:.3f}'),
'epoch': str(epoch + 1).zfill(3),
'mAP@50': str(f'{last[0]:.3f}')})
f.flush()
# Update best mAP
if last[1] > best:
best = last[1]
# Save model
ckpt = {'model': copy.deepcopy(ema.ema).half()}
# Save last, best and delete
torch.save(ckpt, './weights/last.pt')
if best == last[1]:
torch.save(ckpt, './weights/best.pt')
del ckpt
if args.local_rank == 0:
util.strip_optimizer('./weights/best.pt') # strip optimizers
util.strip_optimizer('./weights/last.pt') # strip optimizers
torch.cuda.empty_cache()
@torch.no_grad()
def test(args, params, model=None):
filenames = []
with open('../Dataset/COCO/val2017.txt') as reader:
for filename in reader.readlines():
filename = filename.rstrip().split('/')[-1]
filenames.append('../Dataset/COCO/images/val2017/' + filename)
dataset = Dataset(filenames, args.input_size, params, False)
loader = data.DataLoader(dataset, 4, False, num_workers=4,
pin_memory=True, collate_fn=Dataset.collate_fn)
if model is None:
model = torch.load('./weights/best.pt', map_location='cuda')['model']
model.half()
# Configure
model.eval()
iou_v = torch.linspace(0.5, 0.95, 10).cuda() # iou vector for [email protected]:0.95
n_iou = iou_v.numel()
m_pre = 0.
m_rec = 0.
map50 = 0.
mean_ap = 0.
metrics = []
p_bar = tqdm.tqdm(loader, desc=('%10s' * 3) % ('precision', 'recall', 'mAP'))
for samples, targets, shapes in p_bar:
samples = samples.cuda()
targets = targets.cuda()
samples = samples.half() # uint8 to fp16/32
samples = samples / 255.0 # 0 - 255 to 0.0 - 1.0
_, _, h, w = samples.shape # batch size, channels, height, width
# Inference
outputs = model(samples)
# NMS
targets[:, 2:] *= torch.tensor((w, h, w, h)).cuda() # to pixels
outputs = util.non_max_suppression(outputs, 0.001, 0.6)
# Metrics
for i, output in enumerate(outputs):
labels = targets[targets[:, 0] == i, 1:]
correct = torch.zeros(output.shape[0], n_iou, dtype=torch.bool).cuda()
if output.shape[0] == 0:
if labels.shape[0]:
metrics.append((correct, *torch.zeros((3, 0)).cuda()))
continue
detections = output.clone()
util.scale(detections[:, :4], samples[i].shape[1:], shapes[i][0], shapes[i][1])
# Evaluate
if labels.shape[0]:
tbox = labels[:, 1:5].clone() # target boxes
tbox[:, 0] = labels[:, 1] - labels[:, 3] / 2 # top left x
tbox[:, 1] = labels[:, 2] - labels[:, 4] / 2 # top left y
tbox[:, 2] = labels[:, 1] + labels[:, 3] / 2 # bottom right x
tbox[:, 3] = labels[:, 2] + labels[:, 4] / 2 # bottom right y
util.scale(tbox, samples[i].shape[1:], shapes[i][0], shapes[i][1])
correct = numpy.zeros((detections.shape[0], iou_v.shape[0]))
correct = correct.astype(bool)
t_tensor = torch.cat((labels[:, 0:1], tbox), 1)
iou = util.box_iou(t_tensor[:, 1:], detections[:, :4])
correct_class = t_tensor[:, 0:1] == detections[:, 5]
for j in range(len(iou_v)):
x = torch.where((iou >= iou_v[j]) & correct_class)
if x[0].shape[0]:
matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1)
matches = matches.cpu().numpy()
if x[0].shape[0] > 1:
matches = matches[matches[:, 2].argsort()[::-1]]
matches = matches[numpy.unique(matches[:, 1], return_index=True)[1]]
matches = matches[numpy.unique(matches[:, 0], return_index=True)[1]]
correct[matches[:, 1].astype(int), j] = True
correct = torch.tensor(correct, dtype=torch.bool, device=iou_v.device)
metrics.append((correct, output[:, 4], output[:, 5], labels[:, 0]))
# Compute metrics
metrics = [torch.cat(x, 0).cpu().numpy() for x in zip(*metrics)] # to numpy
if len(metrics) and metrics[0].any():
tp, fp, m_pre, m_rec, map50, mean_ap = util.compute_ap(*metrics)
# Print results
print('%10.3g' * 3 % (m_pre, m_rec, mean_ap))
# Return results
model.float() # for training
return map50, mean_ap
@torch.no_grad()
def demo(args):
import cv2
# Load model
model = torch.load('./weights/best.pt', map_location='cuda')['model'].float()
model.half()
model.eval()
camera = cv2.VideoCapture(0)
# Check if camera opened successfully
if not camera.isOpened():
print("Error opening video stream or file")
# Read until video is completed
while camera.isOpened():
# Capture frame-by-frame
success, frame = camera.read()
if success:
image = frame.copy()
shape = image.shape[:2]
r = args.input_size / max(shape[0], shape[1])
if r != 1:
resample = cv2.INTER_LINEAR if r > 1 else cv2.INTER_AREA
image = cv2.resize(image, dsize=(int(shape[1] * r), int(shape[0] * r)), interpolation=resample)
height, width = image.shape[:2]
# Scale ratio (new / old)
r = min(1.0, args.input_size / height, args.input_size / width)
# Compute padding
pad = int(round(width * r)), int(round(height * r))
w = (args.input_size - pad[0]) / 2
h = (args.input_size - pad[1]) / 2
if (width, height) != pad: # resize
image = cv2.resize(image, pad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(h - 0.1)), int(round(h + 0.1))
left, right = int(round(w - 0.1)), int(round(w + 0.1))
image = cv2.copyMakeBorder(image, top, bottom, left, right, cv2.BORDER_CONSTANT) # add border
# Convert HWC to CHW, BGR to RGB
x = image.transpose((2, 0, 1))[::-1]
x = numpy.ascontiguousarray(x)
x = torch.from_numpy(x)
x = x.unsqueeze(dim=0)
x = x.cuda()
x = x.half()
x = x / 255
# Inference
outputs = model(x)
# NMS
outputs = util.non_max_suppression(outputs, 0.25, 0.7)
for output in outputs:
output[:, [0, 2]] -= w # x padding
output[:, [1, 3]] -= h # y padding
output[:, :4] /= min(height / shape[0], width / shape[1])
output[:, 0].clamp_(0, shape[1]) # x1
output[:, 1].clamp_(0, shape[0]) # y1
output[:, 2].clamp_(0, shape[1]) # x2
output[:, 3].clamp_(0, shape[0]) # y2
for box in output:
box = box.cpu().numpy()
x1, y1, x2, y2, score, index = box
cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 2)
cv2.imshow('Frame', frame)
# Press Q on keyboard to exit
if cv2.waitKey(25) & 0xFF == ord('q'):
break
# Break the loop
else:
break
# When everything done, release the video capture object
camera.release()
# Closes all the frames
cv2.destroyAllWindows()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--input-size', default=640, type=int)
parser.add_argument('--batch-size', default=32, type=int)
parser.add_argument('--local_rank', default=0, type=int)
parser.add_argument('--epochs', default=600, type=int)
parser.add_argument('--train', action='store_true')
parser.add_argument('--test', action='store_true')
parser.add_argument('--demo', action='store_true')
args = parser.parse_args()
args.local_rank = int(os.getenv('LOCAL_RANK', 0))
args.world_size = int(os.getenv('WORLD_SIZE', 1))
if args.world_size > 1:
torch.cuda.set_device(device=args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
if args.local_rank == 0:
if not os.path.exists('weights'):
os.makedirs('weights')
util.setup_seed()
util.setup_multi_processes()
with open(os.path.join('utils', 'args.yaml'), errors='ignore') as f:
params = yaml.safe_load(f)
if args.train:
train(args, params)
if args.test:
test(args, params)
if args.demo:
demo(args)
if __name__ == "__main__":
main()
|
jahongir7174/YOLOv5-pt
|
main.py
|
main.py
|
py
| 14,941 |
python
|
en
|
code
| 4 |
github-code
|
6
|
36464449846
|
#!/usr/bin/env python
"""
Usage:
python detectface.py -i image.jpg
"""
from argparse import ArgumentParser
import boto3
from pprint import pprint
import sys
def get_client(endpoint):
client = boto3.client('rekognition')
return client
def get_args():
parser = ArgumentParser(description='Detect faces')
parser.add_argument('-e', '--endpoint')
parser.add_argument('-i', '--image')
return parser.parse_args()
if __name__ == '__main__':
args = get_args()
if (args.image is None):
print('''
Usage:
python detectface.py --help
''')
sys.exit(-1)
client = get_client(args.endpoint)
with open(args.image, 'rb') as image:
response = client.detect_faces(Image={'Bytes': image.read()},Attributes=['ALL'])
pprint(response)
|
wwwins/aws-utils
|
detectface.py
|
detectface.py
|
py
| 830 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6849069742
|
#三方库 xlrd /xlwt / xlutils
import xlrd
import xlwt
import xlutils
wb = xlrd.open_workbook('table/阿里巴巴2020年股票数据.xls')
#获取所有工作表的名字
# print(wb.sheet_names())
# sheet1 = wb.sheet_names('表格1') #通过工作表名获取
sheet = wb.sheet_by_index(0) #通过工作表的下标ID获取工作表
#获取工作表的行数,列数
# print(sheet.nrows,sheet.ncols)
#获取单元格数据 第一行的第一列
for i in range(sheet.nrows):
for j in range(sheet.ncols):
value1 = sheet.cell(i,j).value
if i >0 :
print(f"{value1:.2f}", end='\t')
else:
print(value1, end=' \t')
print()
# print(sheet.row(i)[j].value , end=' ')
|
twlaladelala/pytest
|
办公自动化.py
|
办公自动化.py
|
py
| 732 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
20289463636
|
import math
def isLeapYear(x):
if (x % 4 != 0):
return False
elif (x % 100 != 0):
return True
elif (x % 400 != 0):
return False
else:
return True
try:
y = int(input("Enter number to test: "))
if (y < 0):
raise ValueError()
except ValueError:
print("Input is not an positive integer.")
else:
if (isLeapYear(y)):
print("{} is a leap year.".format(y))
else:
print("{} is not a leap year.".format(y))
finally:
input("Press ENTER to exit")
|
steffebr/cs362HW3
|
leapyearV2.py
|
leapyearV2.py
|
py
| 461 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72592324348
|
import configparser
import os
from core.constants import CONFIG_FILE_PATH
__all__ = [
'BotConfigurator'
]
class BotConfigurator(object):
""" Объект-конфигуратор приложения. """
def __new__(cls, *args, **kwargs):
if not hasattr(cls, 'instance'):
cls.instance = super(BotConfigurator, cls).__new__(cls)
return cls.instance
def __init__(self):
config_path = os.path.join(CONFIG_FILE_PATH)
if os.path.isfile(config_path):
self._app_config = configparser.ConfigParser()
self._app_config.read(config_path)
else:
self._app_config = {
'telegram': {
'token': os.environ['TELEGRAM_TOKEN'],
'proxy_url': os.environ.get('PROXY_URL', None)
},
'dialog_flow': {
'token': os.environ['DIALOG_FLOW_TOKEN'],
'lang': os.environ['DIALOG_FLOW_LANG'],
'session_id': os.environ['DIALOG_FLOW_SESSION_ID']
}
}
@property
def app_config(self):
return self._app_config
|
balandin-nick/smart-telegram-bot
|
core/configurator.py
|
configurator.py
|
py
| 1,174 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31622032831
|
import os
from flask import Flask, flash, request, redirect, url_for, send_from_directory, jsonify
from werkzeug.utils import secure_filename
from excel import Excel
from translator import Translator
UPLOAD_FOLDER = './text_files'
DOWNLOAD_FOLDER = './excel_files'
ALLOWED_EXTENSIONS = {'txt'}
app = Flask(__name__, static_folder='./client/build', static_url_path='/')
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['DOWNLOAD_FOLDER'] = DOWNLOAD_FOLDER
app.config['SECRET_KEY'] = os.environ.get('SECRET_KEY')
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/')
def index():
return app.send_static_file('index.html')
@app.route('/api/', methods=["POST"])
def create_terminology():
if 'file' not in request.files:
code = 400
msg = "file not in request"
return code, msg
file = request.files['file']
if file.filename == '':
code = 400
msg = "file name empty"
return code, msg
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
text = open(os.path.join(
app.config['UPLOAD_FOLDER'], filename), 'r').read()
if len(text) == 0:
code = 400
msg = "File is empty, please provide a valid '.txt' file."
return code, msg
else:
translator = Translator(text)
translator.detect_source_language(" ".join(text.split(" ")[:5]))
translator.set_stop_words()
tokenized_text = translator.tokenize_text()
words = translator.parse_words_alpha(tokenized_text)
terms = translator.translate(words)
source_language, target_language = translator.get_source_and_target()
terminology_excel = Excel(os.path.splitext(filename)[0])
terminology_excel.write_worksheet(
terms, source_language, target_language)
terminology_excel.close_workbook()
response = send_from_directory(
app.config['DOWNLOAD_FOLDER'], f'{os.path.splitext(filename)[0]}.xlsx', as_attachment=True)
os.remove(os.path.join(app.config['UPLOAD_FOLDER'], filename))
os.remove(os.path.join(
app.config['DOWNLOAD_FOLDER'], f'{os.path.splitext(filename)[0]}.xlsx'))
return response
if __name__ == "__main__":
app.run(host="0.0.0.0", port=int(os.environ.get('PORT', 5000)))
|
atakanzen/terminolator.web
|
app.py
|
app.py
|
py
| 2,579 |
python
|
en
|
code
| 2 |
github-code
|
6
|
19880677789
|
# cython:language_level=3
import numpy as np
from scipy import optimize
import matplotlib.pyplot as plt
deltas = 2000
a = 215
r = 290
def fx(n):
global deltas, a, r
x1 = a+n
x2 = a-n
return deltas - (area(x2, r) - area(x1, r))
def fx1(n):
a = 215
r = 290
x1 = a+n
x2 = a-n
return (area(x2, r) - area(x1, r))
def area(x,r):
S = (r**2)*np.arccos(x/r) - x*np.sqrt(r**2 - x**2)
return S
# x = [x * 2.77 for x in range(11)]
# x1 = [x * 0.02 for x in range(0, 11)]
# y = []
# for i in range(11):
# a = x[i]
# y.append(fx1(a)+4909)
#
# print(y)
#
# plt.plot(x1,y)
# plt.xlabel('offset_pixel')
# plt.ylabel('pixel_different')
# x_ticks = np.arange(0,0.23,0.02)
# plt.xticks(x_ticks)
# plt.show()
# plt.savefig('plot_fig.jpg')
# print(fx1(2.56))
# print(area(0, 290))
# print(fx(0))
def main(x1, x2, x3):
global deltas, a, r
deltas = x1
a = x2
r = x3
root = optimize.bisect(fx, 0, 30)
print(root)
root = root * 0.0024
# #
print(root)
|
rzyfrank/Internship
|
cal_deltaArea.py
|
cal_deltaArea.py
|
py
| 1,006 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23515104620
|
T = int(input())
RS_list = []
for _ in range(T):
R, S = input().split()
temp = [i*int(R) for i in list(S)]
RS_list.append(''.join(temp))
for i in RS_list:
print(i)
|
Soohee410/Algorithm-in-Python
|
BOJ/Bronze/2675.py
|
2675.py
|
py
| 182 |
python
|
en
|
code
| 6 |
github-code
|
6
|
7796423185
|
# ############################################################################ #
# This is part of the PPLT project. #
# #
# Copyright (C) 2003-2006 Hannes Matuschek <[email protected]> #
# #
# This library is free software; you can redistribute it and/or #
# modify it under the terms of the GNU Lesser General Public #
# License as published by the Free Software Foundation; either #
# version 2.1 of the License, or (at your option) any later version. #
# #
# This library is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# Lesser General Public License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public #
# License along with this library; if not, write to the Free Software #
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #
# ############################################################################ #
import S7Register;
import S7Message;
import pyDCPU;
import struct;
class Object(pyDCPU.MasterObject):
def setup(self):
self.Logger.info("Setup Simatic-S7");
return(True);
def connect(self,AddrStr):
RegAddr = S7Register.S7Register(AddrStr);
self.Logger.debug("MkConnect for %s(%s)"%(AddrStr, str(RegAddr)));
if not RegAddr:
raise pyDCPU.ModuleError("Can't split Address \"%s\", my wrong format or not supported address range."%AddrStr);
#map s7-types to pplt-type-names:
if RegAddr.GetType() in (S7Register.S7Bit,): Type = pyDCPU.TBool;
elif RegAddr.GetType() in (S7Register.S7Byte, S7Register.S7Word, S7Register.S7DWord): Type = pyDCPU.TInteger;
Connection = pyDCPU.ValueConnection(self, Type, RegAddr);
return(Connection);
def write(self, Connection, Value):
if Value == None: raise pyDCPU.ModuleError("No value given to write!");
Data = Value2Raw(Value, Connection.Address.GetType()); # convert Value to raw byte-data
DataSet = S7Message.S7DataSet(Data, Connection.Address); # assamble dataset
CommSet = S7Message.S7CommandSet(S7Message.S7FunctionWrite, Connection.Address); # assamble command set
Message = S7Message.S7Message(CommSet, DataSet); # assamble message
self.Logger.debug("Will send a %i byte message."%len(Message.GetString()));
self.Connection.flush()
# write cmd-message:
self.Connection.write(Message.GetString());
# read response:
MsgString = self.Connection.read_seq();
Message = S7Message.S7Message(MsgString = MsgString);
CommSet = Message.GetCommandSet();
DataSet = Message.GetDataSet();
if DataSet.GetErrCode() == 0xff: return(True); # 0xff means: all ok.
self.Logger.error("S7 returned error code: %x"%ord(DataSet.GetErrCode()));
raise pyDCPU.ModuleError("S7 returned error code: %x"%ord(DataSet.GetErrCode()));
def read(self, Connection, Len=None):
self.Logger.debug("Read... Function %s, Addr: %s"%(str(S7Message.S7FunctionRead),str(Connection.Address)));
CommSet = S7Message.S7CommandSet(S7Message.S7FunctionRead, Connection.Address);
Message = S7Message.S7Message(CommSet);
self.Connection.flush();
ret = self.Connection.write(Message.GetString());
MsgString = self.Connection.read_seq();
Message = S7Message.S7Message(MsgString = MsgString);
CommSet = Message.GetCommandSet();
DataSet = Message.GetDataSet();
if DataSet.GetErrCode() == 0xff:
return Raw2Value(DataSet.GetDataString(), Connection.Address.GetType());
self.Logger.error("S7 returned error-code: %x"%ord(DataSetGetErrorCode()));
raise pyDCPU.ModuleError("S7 returned error-code: %x"%ord(DataSetGetErrorCode()));
def Raw2Value(Data, Type):
if Type == S7Register.S7Bit: (value,) = struct.unpack("B",Data);
elif Type == S7Register.S7Byte: (value,) = struct.unpack("B",Data);
elif Type == S7Register.S7Word: (value,) = struct.unpack("H",Data);
elif Type == S7Register.S7DWord: (value,) = struct.unpack("I",Data);
return value;
def Value2Raw(Value, Type):
if Type == S7Register.S7Bit: return struct.pack("B", int(Value));
elif Type == S7Register.S7Byte: return struct.pack("B", int(Value));
elif Type == S7Register.S7Word: return struct.pack("H", int(Value));
elif Type == S7Register.S7DWord: return struct.pack("I", int(Value));
|
BackupTheBerlios/pplt-svn
|
PPLT/Modules/Core/Master/Device/S7/SimaticS7.py
|
SimaticS7.py
|
py
| 5,265 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13864700033
|
#!/usr/bin/python3.5
# -*-coding:Utf-8 -*
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_moons
from sklearn.decomposition import PCA
from matplotlib.ticker import FormatStrFormatter
from RBFKernelPCA import RBF_Kernel_PCA
# We create a dataset of two half moons and project them on 1 dimensional space
values, classes = make_moons(n_samples = 100, random_state = 123)
kernel_pca_values, lambdas = RBF_Kernel_PCA(values, gamma = 15, n_components = 1)
# We consider that the 26th point is a new point to project
new_value = values[25]
print('New value: {}'.format(new_value))
original_projected_value = kernel_pca_values[25]
print('Original projection: {}'.format(original_projected_value))
# We define a projection function for new values
def project_value(new_value, values, gamma, kernel_pca_values, lambdas):
pairwise_distances = np.array([np.sum((new_value - row)**2) for row in values])
kernel = np.exp(-gamma * pairwise_distances)
return kernel.dot(kernel_pca_values / lambdas)
# We use the projection to recalculate the projection of the 26th point
new_projected_value = project_value(new_value, values, 15, kernel_pca_values, lambdas)
print('New projection: {}'.format(new_projected_value))
# Now we visualize the projection on the first principal components
plt.scatter(kernel_pca_values[classes == 0, 0], np.zeros((50)), color = 'red', marker = '^',alpha = 0.5)
plt.scatter(kernel_pca_values[classes == 1, 0], np.zeros((50)), color = 'blue', marker = 'o', alpha = 0.5)
plt.scatter(original_projected_value, 0, color = 'black', label = 'original projection of point X[25]', marker = '^', s = 100)
plt.scatter(new_projected_value, 0, color = 'green', label = 'remapped point X[25]', marker = 'x', s = 500)
plt.legend(scatterpoints = 1)
plt.show()
|
PiggyGenius/MachineLearning
|
NoLibraries/RBFKernelPCA/ProjectNewDataPoints.py
|
ProjectNewDataPoints.py
|
py
| 1,809 |
python
|
en
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.